summaryrefslogtreecommitdiff
path: root/gnu/gcc
diff options
context:
space:
mode:
authorMiod Vallat <miod@cvs.openbsd.org>2013-05-08 15:30:39 +0000
committerMiod Vallat <miod@cvs.openbsd.org>2013-05-08 15:30:39 +0000
commitf1c6d263359139a788d0a40df475119dbab313a8 (patch)
treeed232616cabac4bcee27cec8c394944f174ac521 /gnu/gcc
parent3a13dafeff16e2af059d65844fa8367bb061d5e8 (diff)
A port of the current gcc 3.3.6 m88k backend to gcc 4.2.1.
Main features: - md constraints rewritten in RTL - md predicaties rewritten in RTL - md va_arg switched to gimple - abort() calls replaced with gcc_assert() or gcc_unreachable() for better diagnostics - support for non-ELF systems completely removed Missing: - conversion of the pipeline information from define_function_unit to define_automata not done yet (thus pipeline information currently removed) Known regressions against 3.3.6 so far: - no stack protector support yet - __builtin_setjmp doesn't restore the frame pointer correctly upon return from __builtin_longjmp - at least one case of optimization error when delay slots are not disabled. - libgcc is only built -fPIC, instead of static/fpic/fPIC.
Diffstat (limited to 'gnu/gcc')
-rw-r--r--gnu/gcc/gcc/config/m88k/constraints.md59
-rw-r--r--gnu/gcc/gcc/config/m88k/m88k-modes.def25
-rw-r--r--gnu/gcc/gcc/config/m88k/m88k-protos.h70
-rw-r--r--gnu/gcc/gcc/config/m88k/m88k.c2681
-rw-r--r--gnu/gcc/gcc/config/m88k/m88k.h1359
-rw-r--r--gnu/gcc/gcc/config/m88k/m88k.md3845
-rw-r--r--gnu/gcc/gcc/config/m88k/m88k.opt61
-rw-r--r--gnu/gcc/gcc/config/m88k/openbsdelf.h128
-rw-r--r--gnu/gcc/gcc/config/m88k/predicates.md178
-rw-r--r--gnu/gcc/gcc/config/m88k/t-openbsd3
10 files changed, 8409 insertions, 0 deletions
diff --git a/gnu/gcc/gcc/config/m88k/constraints.md b/gnu/gcc/gcc/config/m88k/constraints.md
new file mode 100644
index 00000000000..0779b9432fe
--- /dev/null
+++ b/gnu/gcc/gcc/config/m88k/constraints.md
@@ -0,0 +1,59 @@
+;; Register constraints
+
+(define_register_constraint "x" "XRF_REGS"
+ "A register form the 88110 Extended Register File.")
+
+;; Integer constraints
+
+(define_constraint "I"
+ "A non-negative 16-bit value."
+ (and (match_code "const_int")
+ (match_test "SMALL_INTVAL (ival)")))
+
+(define_constraint "J"
+ "A non-positive 16-bit value."
+ (and (match_code "const_int")
+ (match_test "SMALL_INTVAL (-ival)")))
+
+(define_constraint "K"
+ "A non-negative value < 32."
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT)ival < 32")))
+
+(define_constraint "L"
+ "A constant with only the upper 16-bits set."
+ (and (match_code "const_int")
+ (match_test "(ival & 0xffff) == 0")))
+
+(define_constraint "M"
+ "A constant value that can be formed with `set'."
+ (and (match_code "const_int")
+ (match_test "integer_ok_for_set(ival)")))
+
+(define_constraint "N"
+ "A negative value."
+ (and (match_code "const_int")
+ (match_test "ival < 0")))
+
+(define_constraint "O"
+ "Integer zero."
+ (and (match_code "const_int")
+ (match_test "ival == 0")))
+
+(define_constraint "P"
+ "A positive value."
+ (and (match_code "const_int")
+ (match_test "ival >= 0")))
+
+;; Floating-point constraints
+
+(define_constraint "G"
+ "Floating-point zero."
+ (and (match_code "const_double")
+ (match_test "hval == 0 && lval == 0")))
+
+;; General constraints
+
+(define_constraint "Q"
+ "An address in a call context."
+ (match_operand 0 "symbolic_operand"))
diff --git a/gnu/gcc/gcc/config/m88k/m88k-modes.def b/gnu/gcc/gcc/config/m88k/m88k-modes.def
new file mode 100644
index 00000000000..28cbd805986
--- /dev/null
+++ b/gnu/gcc/gcc/config/m88k/m88k-modes.def
@@ -0,0 +1,25 @@
+/* Definitions of target machine for GNU compiler for Motorola m88100.
+ Copyright (C) 2002 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com).
+ Currently maintained by (gcc@dg-rtp.dg.com)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* Extra machine modes to represent the condition code. */
+
+CC_MODE (CCEVEN);
diff --git a/gnu/gcc/gcc/config/m88k/m88k-protos.h b/gnu/gcc/gcc/config/m88k/m88k-protos.h
new file mode 100644
index 00000000000..7eb93d62d32
--- /dev/null
+++ b/gnu/gcc/gcc/config/m88k/m88k-protos.h
@@ -0,0 +1,70 @@
+/* Definitions of target machine for GNU compiler for
+ Motorola m88100 in an 88open OCS/BCS environment.
+ Copyright (C) 2000 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com).
+ Currently maintained by (gcc@dg-rtp.dg.com)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#ifdef RTX_CODE
+extern int m88k_debugger_offset (rtx, int);
+extern void emit_bcnd (enum rtx_code, rtx);
+extern void expand_block_move (rtx, rtx, rtx *);
+extern void print_operand (FILE *, rtx, int);
+extern void print_operand_address (FILE *, rtx);
+extern const char *output_load_const_int (enum machine_mode, rtx *);
+extern const char *output_load_const_float (rtx *);
+extern const char *output_load_const_double (rtx *);
+extern const char *output_load_const_dimode (rtx *);
+extern const char *output_and (rtx[]);
+extern const char *output_ior (rtx[]);
+extern const char *output_xor (rtx[]);
+extern const char *output_call (rtx[], rtx);
+
+extern struct rtx_def *emit_test (enum rtx_code, enum machine_mode);
+extern struct rtx_def *legitimize_address (int, rtx, rtx, rtx);
+extern struct rtx_def *legitimize_operand (rtx, enum machine_mode);
+
+extern bool pic_address_needs_scratch (rtx);
+extern bool symbolic_address_p (rtx);
+extern int condition_value (rtx);
+extern int emit_move_sequence (rtx *, enum machine_mode, rtx);
+extern bool mostly_false_jump (rtx, rtx);
+extern bool real_power_of_2_operand (rtx);
+#ifdef TREE_CODE
+extern void m88k_va_start (tree, rtx);
+#endif /* TREE_CODE */
+#endif /* RTX_CODE */
+
+extern bool null_prologue (void);
+extern bool integer_ok_for_set (unsigned int);
+extern void m88k_layout_frame (void);
+extern void m88k_expand_prologue (void);
+extern void m88k_expand_epilogue (void);
+extern void output_function_profiler (FILE *, int, const char *);
+extern enum m88k_instruction classify_integer (enum machine_mode, int);
+extern bool mak_mask_p (int);
+
+#ifdef TREE_CODE
+extern struct rtx_def *m88k_function_arg (CUMULATIVE_ARGS, enum machine_mode,
+ tree, int);
+extern void m88k_function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode,
+ tree, int);
+#endif /* TREE_CODE */
+
+extern void m88k_override_options (void);
diff --git a/gnu/gcc/gcc/config/m88k/m88k.c b/gnu/gcc/gcc/config/m88k/m88k.c
new file mode 100644
index 00000000000..4701a9cbe27
--- /dev/null
+++ b/gnu/gcc/gcc/config/m88k/m88k.c
@@ -0,0 +1,2681 @@
+/* Subroutines for insn-output.c for Motorola 88000.
+ Copyright (C) 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+ 2001, 2002 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@mcc.com)
+ Currently maintained by (gcc@dg-rtp.dg.com)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "tree.h"
+#include "function.h"
+#include "expr.h"
+#include "libfuncs.h"
+#include "c-tree.h"
+#include "flags.h"
+#include "recog.h"
+#include "toplev.h"
+#include "tm_p.h"
+#include "target.h"
+#include "target-def.h"
+#include "tree-gimple.h"
+
+#ifdef REGISTER_PREFIX
+const char *m88k_register_prefix = REGISTER_PREFIX;
+#else
+const char *m88k_register_prefix = "";
+#endif
+char m88k_volatile_code;
+
+int m88k_fp_offset = 0; /* offset of frame pointer if used */
+int m88k_stack_size = 0; /* size of allocated stack (including frame) */
+int m88k_case_index;
+
+rtx m88k_compare_reg; /* cmp output pseudo register */
+rtx m88k_compare_op0; /* cmpsi operand 0 */
+rtx m88k_compare_op1; /* cmpsi operand 1 */
+
+enum processor_type m88k_cpu; /* target cpu */
+
+static void m88k_frame_related (rtx, rtx, int);
+static void m88k_maybe_dead (rtx);
+static void m88k_output_function_epilogue (FILE *, HOST_WIDE_INT);
+static rtx m88k_struct_value_rtx (tree, int);
+static int m88k_adjust_cost (rtx, rtx, rtx, int);
+static bool m88k_handle_option (size_t, const char *, int);
+static bool m88k_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
+ tree, bool);
+static bool m88k_return_in_memory (tree, tree);
+static void m88k_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
+ tree, int *, int);
+static tree m88k_build_va_list (void);
+static tree m88k_gimplify_va_arg (tree, tree, tree *, tree *);
+static bool m88k_rtx_costs (rtx, int, int, int *);
+static int m88k_address_cost (rtx);
+static void m88k_output_file_start (void);
+
+/* Initialize the GCC target structure. */
+#if !defined(OBJECT_FORMAT_ELF)
+#undef TARGET_ASM_BYTE_OP
+#define TARGET_ASM_BYTE_OP "\tbyte\t"
+#undef TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_ALIGNED_HI_OP "\thalf\t"
+#undef TARGET_ASM_ALIGNED_SI_OP
+#define TARGET_ASM_ALIGNED_SI_OP "\tword\t"
+#undef TARGET_ASM_UNALIGNED_HI_OP
+#define TARGET_ASM_UNALIGNED_HI_OP "\tuahalf\t"
+#undef TARGET_ASM_UNALIGNED_SI_OP
+#define TARGET_ASM_UNALIGNED_SI_OP "\tuaword\t"
+#endif
+
+#undef TARGET_ASM_FUNCTION_EPILOGUE
+#define TARGET_ASM_FUNCTION_EPILOGUE m88k_output_function_epilogue
+
+#undef TARGET_SCHED_ADJUST_COST
+#define TARGET_SCHED_ADJUST_COST m88k_adjust_cost
+
+#undef TARGET_HANDLE_OPTION
+#define TARGET_HANDLE_OPTION m88k_handle_option
+
+#undef TARGET_STRUCT_VALUE_RTX
+#define TARGET_STRUCT_VALUE_RTX m88k_struct_value_rtx
+
+#undef TARGET_PASS_BY_REFERENCE
+#define TARGET_PASS_BY_REFERENCE m88k_pass_by_reference
+
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY m88k_return_in_memory
+
+#undef TARGET_PROMOTE_PROTOTYPES
+#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
+
+#undef TARGET_SETUP_INCOMING_VARARGS
+#define TARGET_SETUP_INCOMING_VARARGS m88k_setup_incoming_varargs
+
+#undef TARGET_BUILD_BUILTIN_VA_LIST
+#define TARGET_BUILD_BUILTIN_VA_LIST m88k_build_va_list
+
+#undef TARGET_GIMPLIFY_VA_ARG_EXPR
+#define TARGET_GIMPLIFY_VA_ARG_EXPR m88k_gimplify_va_arg
+
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS m88k_rtx_costs
+
+#undef TARGET_ADDRESS_COST
+#define TARGET_ADDRESS_COST m88k_address_cost
+
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START m88k_output_file_start
+/* from elfos.h
+#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
+#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
+*/
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+/* Worker function for TARGET_STRUCT_VALUE_RTX. */
+
+static rtx
+m88k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
+ int incoming ATTRIBUTE_UNUSED)
+{
+ return gen_rtx_REG (Pmode, M88K_STRUCT_VALUE_REGNUM);
+}
+
+/* Determine what instructions are needed to manufacture the integer VALUE
+ in the given MODE. */
+
+enum m88k_instruction
+classify_integer (enum machine_mode mode, int value)
+{
+ if (value == 0)
+ return m88k_zero;
+ else if (SMALL_INTVAL (value))
+ return m88k_or;
+ else if (SMALL_INTVAL (-value))
+ return m88k_subu;
+ else if (mode == HImode)
+ return m88k_or_lo16;
+ else if (mode == QImode)
+ return m88k_or_lo8;
+ else if ((value & 0xffff) == 0)
+ return m88k_oru_hi16;
+ else if (integer_ok_for_set (value))
+ return m88k_set;
+ else
+ return m88k_oru_or;
+}
+
+/* Return the bit number in a compare word corresponding to CONDITION. */
+
+int
+condition_value (rtx condition)
+{
+ switch (GET_CODE (condition))
+ {
+ case EQ: return 2;
+ case NE: return 3;
+ case GT: return 4;
+ case LE: return 5;
+ case LT: return 6;
+ case GE: return 7;
+ case GTU: return 8;
+ case LEU: return 9;
+ case LTU: return 10;
+ case GEU: return 11;
+ default: gcc_unreachable ();
+ }
+}
+
+bool
+integer_ok_for_set (unsigned int value)
+{
+ unsigned int mask;
+
+ if (value == 0)
+ return false;
+ /* All the "one" bits must be contiguous. If so, MASK + 1 will be
+ a power of two or zero. */
+ mask = value | (value - 1);
+ return POWER_OF_2_or_0 (mask + 1);
+}
+
+const char *
+output_load_const_int (enum machine_mode mode, rtx *operands)
+{
+ static const char *const patterns[] =
+ {
+ "or %0,%#r0,0",
+ "or %0,%#r0,%1",
+ "subu %0,%#r0,%n1",
+ "or %0,%#r0,%h1",
+ "or %0,%#r0,%q1",
+ "set %0,%#r0,%s1",
+ "or.u %0,%#r0,%X1",
+ "or.u %0,%#r0,%X1\n\tor %0,%0,%x1",
+ };
+
+ gcc_assert (REG_P (operands[0])
+ && GET_CODE (operands[1]) == CONST_INT);
+ return patterns[classify_integer (mode, INTVAL (operands[1]))];
+}
+
+/* These next two routines assume that floating point numbers are represented
+ in a manner which is consistent between host and target machines. */
+
+const char *
+output_load_const_float (rtx *operands)
+{
+ /* These can return 0 under some circumstances when cross-compiling. */
+ operands[0] = operand_subword (operands[0], 0, 0, SFmode);
+ operands[1] = operand_subword (operands[1], 0, 0, SFmode);
+
+ return output_load_const_int (SImode, operands);
+}
+
+const char *
+output_load_const_double (rtx *operands)
+{
+ rtx latehalf[2];
+
+ /* These can return zero on some cross-compilers, but there's nothing
+ we can do about it. */
+ latehalf[0] = operand_subword (operands[0], 1, 0, DFmode);
+ latehalf[1] = operand_subword (operands[1], 1, 0, DFmode);
+
+ operands[0] = operand_subword (operands[0], 0, 0, DFmode);
+ operands[1] = operand_subword (operands[1], 0, 0, DFmode);
+
+ output_asm_insn (output_load_const_int (SImode, operands), operands);
+
+ operands[0] = latehalf[0];
+ operands[1] = latehalf[1];
+
+ return output_load_const_int (SImode, operands);
+}
+
+const char *
+output_load_const_dimode (rtx *operands)
+{
+ rtx latehalf[2];
+
+ latehalf[0] = operand_subword (operands[0], 1, 0, DImode);
+ latehalf[1] = operand_subword (operands[1], 1, 0, DImode);
+
+ operands[0] = operand_subword (operands[0], 0, 0, DImode);
+ operands[1] = operand_subword (operands[1], 0, 0, DImode);
+
+ output_asm_insn (output_load_const_int (SImode, operands), operands);
+
+ operands[0] = latehalf[0];
+ operands[1] = latehalf[1];
+
+ return output_load_const_int (SImode, operands);
+}
+
+/* Emit insns to move operands[1] into operands[0].
+
+ Return 1 if we have written out everything that needs to be done to
+ do the move. Otherwise, return 0 and the caller will emit the move
+ normally.
+
+ SCRATCH if nonzero can be used as a scratch register for the move
+ operation. It is provided by a SECONDARY_RELOAD_* macro if needed. */
+
+int
+emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch)
+{
+ rtx operand0 = operands[0];
+ rtx operand1 = operands[1];
+
+ if (CONSTANT_P (operand1) && flag_pic
+ && pic_address_needs_scratch (operand1))
+ operands[1] = operand1 = legitimize_address (1, operand1, NULL_RTX,
+ NULL_RTX);
+
+ /* Handle most common case first: storing into a register. */
+ if (register_operand (operand0, mode))
+ {
+ if (register_operand (operand1, mode)
+ || (GET_CODE (operand1) == CONST_INT && SMALL_INT (operand1))
+ || GET_CODE (operand1) == HIGH
+ /* Only `general_operands' can come here, so MEM is ok. */
+ || GET_CODE (operand1) == MEM)
+ {
+ /* Run this case quickly. */
+ emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
+ return 1;
+ }
+ }
+ else if (GET_CODE (operand0) == MEM)
+ {
+ if (register_operand (operand1, mode)
+ || (operand1 == const0_rtx && GET_MODE_SIZE (mode) <= UNITS_PER_WORD))
+ {
+ /* Run this case quickly. */
+ emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
+ return 1;
+ }
+ if (! reload_in_progress && ! reload_completed)
+ {
+ operands[0] = validize_mem (operand0);
+ operands[1] = operand1 = force_reg (mode, operand1);
+ }
+ }
+
+ /* Simplify the source if we need to. */
+ if (GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
+ {
+ if (GET_CODE (operand1) != CONST_INT
+ && GET_CODE (operand1) != CONST_DOUBLE)
+ {
+ rtx temp = ((reload_in_progress || reload_completed)
+ ? operand0 : NULL_RTX);
+ operands[1] = legitimize_address (flag_pic
+ && symbolic_address_p (operand1),
+ operand1, temp, scratch);
+ if (mode != SImode)
+ operands[1] = gen_rtx_SUBREG (mode, operands[1], 0);
+ }
+ }
+
+ /* Now have insn-emit do whatever it normally does. */
+ return 0;
+}
+
+/* Return a legitimate reference for ORIG (either an address or a MEM)
+ using the register REG. If PIC and the address is already
+ position-independent, use ORIG. Newly generated position-independent
+ addresses go into a reg. This is REG if nonzero, otherwise we
+ allocate register(s) as necessary. If this is called during reload,
+ and we need a second temp register, then we use SCRATCH, which is
+ provided via the SECONDARY_INPUT_RELOAD_CLASS mechanism. */
+
+struct rtx_def *
+legitimize_address (int pic, rtx orig, rtx reg, rtx scratch)
+{
+ rtx addr = (GET_CODE (orig) == MEM ? XEXP (orig, 0) : orig);
+ rtx new = orig;
+ rtx temp, insn;
+
+ if (pic)
+ {
+ if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
+ {
+ if (reg == NULL_RTX)
+ {
+ gcc_assert (!reload_in_progress && !reload_completed);
+ reg = gen_reg_rtx (Pmode);
+ }
+
+ if (flag_pic == 2)
+ {
+ /* If not during reload, allocate another temp reg here for
+ loading in the address, so that these instructions can be
+ optimized properly. */
+ temp = ((reload_in_progress || reload_completed)
+ ? reg : gen_reg_rtx (Pmode));
+
+ /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
+ won't get confused into thinking that these two instructions
+ are loading in the true address of the symbol. If in the
+ future a PIC rtx exists, that should be used instead. */
+ emit_insn (gen_movsi_high_pic (temp, addr));
+ emit_insn (gen_movsi_lo_sum_pic (temp, temp, addr));
+ addr = temp;
+ }
+
+ new = gen_rtx_MEM (Pmode,
+ gen_rtx_PLUS (SImode,
+ pic_offset_table_rtx, addr));
+
+ current_function_uses_pic_offset_table = 1;
+ MEM_READONLY_P (new) = 1;
+ insn = emit_move_insn (reg, new);
+ /* Put a REG_EQUAL note on this insn, so that it can be optimized
+ by loop. */
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
+ REG_NOTES (insn));
+ new = reg;
+ }
+ else if (GET_CODE (addr) == CONST)
+ {
+ rtx base;
+
+ if (GET_CODE (XEXP (addr, 0)) == PLUS
+ && XEXP (XEXP (addr, 0), 0) == pic_offset_table_rtx)
+ return orig;
+
+ if (reg == NULL_RTX)
+ {
+ gcc_assert (!reload_in_progress && !reload_completed);
+ reg = gen_reg_rtx (Pmode);
+ }
+
+ gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
+
+ base = legitimize_address (1, XEXP (XEXP (addr, 0), 0), reg,
+ NULL_RTX);
+ addr = legitimize_address (1, XEXP (XEXP (addr, 0), 1),
+ base == reg ? NULL_RTX : reg, NULL_RTX);
+
+ if (GET_CODE (addr) == CONST_INT)
+ {
+ if (ADD_INT (addr))
+ return plus_constant (base, INTVAL (addr));
+ else if (! reload_in_progress && ! reload_completed)
+ addr = force_reg (Pmode, addr);
+ /* We can't create any new registers during reload, so use the
+ SCRATCH reg provided by the reload_insi pattern. */
+ else if (scratch)
+ {
+ emit_move_insn (scratch, addr);
+ addr = scratch;
+ }
+ else
+ /* If we reach here, then the SECONDARY_INPUT_RELOAD_CLASS
+ macro needs to be adjusted so that a scratch reg is provided
+ for this address. */
+ gcc_unreachable ();
+ }
+ new = gen_rtx_PLUS (SImode, base, addr);
+ /* Should we set special REG_NOTEs here? */
+ }
+ }
+ else
+ {
+ if (reg == NULL_RTX)
+ {
+ gcc_assert (!reload_in_progress && !reload_completed);
+ reg = gen_reg_rtx (Pmode);
+ }
+
+ emit_insn (gen_rtx_SET (VOIDmode,
+ reg, gen_rtx_HIGH (SImode, addr)));
+ new = gen_rtx_LO_SUM (SImode, reg, addr);
+ }
+
+ if (GET_CODE (orig) == MEM)
+ {
+ new = gen_rtx_MEM (GET_MODE (orig), new);
+ MEM_COPY_ATTRIBUTES (new, orig);
+ }
+ return new;
+}
+
+/* Support functions for code to emit a block move. There are two methods
+ used to perform the block move:
+ + call memcpy
+ + produce an inline sequence of ld/st instructions
+ */
+
+static const enum machine_mode mode_from_align[] =
+ {VOIDmode, QImode, HImode, VOIDmode, SImode,
+ VOIDmode, VOIDmode, VOIDmode, DImode};
+
+static void block_move_sequence (rtx, rtx, rtx, rtx, int, int);
+#ifndef USE_GAS
+static void output_short_branch_defs (FILE *);
+#endif
+
+/* Emit code to perform a block move. Choose the best method.
+
+ OPERANDS[0] is the destination.
+ OPERANDS[1] is the source.
+ OPERANDS[2] is the size.
+ OPERANDS[3] is the alignment safe to use. */
+
+void
+expand_block_move (rtx dest_mem, rtx src_mem, rtx *operands)
+{
+ int align = INTVAL (operands[3]);
+ int constp = (GET_CODE (operands[2]) == CONST_INT);
+ int bytes = (constp ? INTVAL (operands[2]) : 0);
+
+ if (constp && bytes <= 0)
+ return;
+
+ /* Determine machine mode to do move with. */
+ if (align > 4 && !TARGET_88110)
+ align = 4;
+ else
+ gcc_assert (align > 0 && align != 3); /* block move invalid alignment. */
+
+ if (constp && bytes <= 3 * align)
+ block_move_sequence (operands[0], dest_mem, operands[1], src_mem,
+ bytes, align);
+
+ else
+ {
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "memcpy"), 0,
+ VOIDmode, 3,
+ operands[0], Pmode,
+ operands[1], Pmode,
+ convert_to_mode (TYPE_MODE (sizetype), operands[2],
+ TYPE_UNSIGNED (sizetype)),
+ TYPE_MODE (sizetype));
+ }
+}
+
+/* Emit code to perform a block move with an offset sequence of ld/st
+ instructions (..., ld 0, st 1, ld 1, st 0, ...). SIZE and ALIGN are
+ known constants. DEST and SRC are registers. */
+
+static void
+block_move_sequence (rtx dest, rtx dest_mem, rtx src, rtx src_mem, int size,
+ int align)
+{
+ rtx temp[2];
+ enum machine_mode mode[2];
+ int amount[2];
+ int active[2];
+ int phase = 0;
+ int next;
+ int offset_ld = 0;
+ int offset_st = 0;
+
+ active[0] = active[1] = FALSE;
+
+ /* Establish parameters for the first load and for the second load if
+ it is known to be the same mode as the first. */
+ amount[0] = amount[1] = align;
+ mode[0] = mode_from_align[align];
+ temp[0] = gen_reg_rtx (mode[0]);
+ if (size >= 2 * align)
+ {
+ mode[1] = mode[0];
+ temp[1] = gen_reg_rtx (mode[1]);
+ }
+
+ do
+ {
+ next = phase;
+ phase = !phase;
+
+ if (size > 0)
+ {
+ /* Change modes as the sequence tails off. */
+ if (size < amount[next])
+ {
+ amount[next] = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
+ mode[next] = mode_from_align[amount[next]];
+ temp[next] = gen_reg_rtx (mode[next]);
+ }
+ size -= amount[next];
+ emit_move_insn (temp[next],
+ adjust_address (src_mem, mode[next], offset_ld));
+ offset_ld += amount[next];
+ active[next] = TRUE;
+ }
+
+ if (active[phase])
+ {
+ active[phase] = FALSE;
+ emit_move_insn (adjust_address (dest_mem, mode[phase], offset_st),
+ temp[phase]);
+ offset_st += amount[phase];
+ }
+ }
+ while (active[next]);
+}
+
+/* Emit the code to do an AND operation. */
+
+const char *
+output_and (rtx operands[])
+{
+ unsigned int value;
+
+ if (REG_P (operands[2]))
+ return "and %0,%1,%2";
+
+ value = INTVAL (operands[2]);
+ if (SMALL_INTVAL (value))
+ return "mask %0,%1,%2";
+ else if ((value & 0xffff0000) == 0xffff0000)
+ return "and %0,%1,%x2";
+ else if ((value & 0xffff) == 0xffff)
+ return "and.u %0,%1,%X2";
+ else if ((value & 0xffff) == 0)
+ return "mask.u %0,%1,%X2";
+ else if (integer_ok_for_set (~value))
+ return "clr %0,%1,%S2";
+ else
+ return "and.u %0,%1,%X2\n\tand %0,%0,%x2";
+}
+
+/* Emit the code to do an inclusive OR operation. */
+
+const char *
+output_ior (rtx operands[])
+{
+ unsigned int value;
+
+ if (REG_P (operands[2]))
+ return "or %0,%1,%2";
+
+ value = INTVAL (operands[2]);
+ if (SMALL_INTVAL (value))
+ return "or %0,%1,%2";
+ else if ((value & 0xffff) == 0)
+ return "or.u %0,%1,%X2";
+ else if (integer_ok_for_set (value))
+ return "set %0,%1,%s2";
+ else
+ return "or.u %0,%1,%X2\n\tor %0,%0,%x2";
+}
+
+/* Emit the instructions for doing an XOR. */
+
+const char *
+output_xor (rtx operands[])
+{
+ unsigned int value;
+
+ if (REG_P (operands[2]))
+ return "xor %0,%1,%2";
+
+ value = INTVAL (operands[2]);
+ if (SMALL_INTVAL (value))
+ return "xor %0,%1,%2";
+ else if ((value & 0xffff) == 0)
+ return "xor.u %0,%1,%X2";
+ else
+ return "xor.u %0,%1,%X2\n\txor %0,%0,%x2";
+}
+
+/* Output a call. Normally this is just bsr or jsr, but this also deals with
+ accomplishing a branch after the call by incrementing r1. This requires
+ that various assembler bugs be accommodated. The 4.30 DG/UX assembler
+ requires that forward references not occur when computing the difference of
+ two labels. The [version?] Motorola assembler computes a word difference.
+ No doubt there's more to come!
+
+ It would seem the same idea could be used to tail call, but in this case,
+ the epilogue will be non-null. */
+
+#ifndef USE_GAS
+static rtx sb_name = NULL_RTX;
+static rtx sb_high = NULL_RTX;
+static rtx sb_low = NULL_RTX;
+#endif
+
+const char *
+output_call (rtx operands[], rtx addr)
+{
+ operands[0] = addr;
+ if (final_sequence)
+ {
+ rtx jump;
+ rtx seq_insn;
+
+ /* This can be generalized, but there is currently no need. */
+ gcc_assert (XVECLEN (final_sequence, 0) == 2);
+
+ /* The address of interior insns is not computed, so use the sequence. */
+ seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
+ jump = XVECEXP (final_sequence, 0, 1);
+ if (GET_CODE (jump) == JUMP_INSN)
+ {
+#ifndef USE_GAS
+ rtx low, high;
+#endif
+ const char *last;
+ rtx dest = XEXP (SET_SRC (PATTERN (jump)), 0);
+ int delta = 4 * (INSN_ADDRESSES (INSN_UID (dest))
+ - INSN_ADDRESSES (INSN_UID (seq_insn))
+ - 2);
+
+ /* Delete the jump. */
+ PUT_CODE (jump, NOTE);
+ NOTE_LINE_NUMBER (jump) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (jump) = 0;
+
+ /* We only do this optimization if -O2, modifying the value of
+ r1 in the delay slot confuses debuggers and profilers on some
+ systems.
+
+ If we loose, we must use the non-delay form. This is unlikely
+ to ever happen. If it becomes a problem, claim that a call
+ has two delay slots and only the second can be filled with
+ a jump.
+
+ The 88110 can lose when a jsr.n r1 is issued and a page fault
+ occurs accessing the delay slot. So don't use jsr.n form when
+ jumping thru r1.
+ */
+ if (optimize < 2
+ || ! ADD_INTVAL (delta)
+ || (REG_P (addr) && REGNO (addr) == 1))
+ {
+ operands[1] = dest;
+ return (REG_P (addr)
+ ? "jsr %0\n\tbr %l1"
+ : (flag_pic
+ ? "bsr %0#plt\n\tbr %l1"
+ : "bsr %0\n\tbr %l1"));
+ }
+
+ /* Output the short branch form. */
+ output_asm_insn ((REG_P (addr)
+ ? "jsr.n %0"
+ : (flag_pic ? "bsr.n %0#plt" : "bsr.n %0")),
+ operands);
+
+#ifdef USE_GAS
+ last = (delta < 0
+ ? "subu %#r1,%#r1,.-%l0+4"
+ : "addu %#r1,%#r1,%l0-.-4");
+ operands[0] = dest;
+#else
+ operands[0] = gen_label_rtx ();
+ operands[1] = gen_label_rtx ();
+ if (delta < 0)
+ {
+ low = dest;
+ high = operands[1];
+ last = "subu %#r1,%#r1,%l0\n%l1:";
+ }
+ else
+ {
+ low = operands[1];
+ high = dest;
+ last = "addu %#r1,%#r1,%l0\n%l1:";
+ }
+
+ /* Record the values to be computed later as "def name,high-low". */
+ sb_name = gen_rtx_EXPR_LIST (VOIDmode, operands[0], sb_name);
+ sb_high = gen_rtx_EXPR_LIST (VOIDmode, high, sb_high);
+ sb_low = gen_rtx_EXPR_LIST (VOIDmode, low, sb_low);
+#endif /* Don't USE_GAS */
+
+ return last;
+ }
+ }
+ return (REG_P (addr)
+ ? "jsr%. %0"
+ : (flag_pic ? "bsr%. %0#plt" : "bsr%. %0"));
+}
+
+#ifndef USE_GAS
+static void
+output_short_branch_defs (FILE *stream)
+{
+ char name[256], high[256], low[256];
+
+ for (; sb_name && sb_high && sb_low;
+ sb_name = XEXP (sb_name, 1),
+ sb_high = XEXP (sb_high, 1),
+ sb_low = XEXP (sb_low, 1))
+ {
+ ASM_GENERATE_INTERNAL_LABEL
+ (name, "L", CODE_LABEL_NUMBER (XEXP (sb_name, 0)));
+ ASM_GENERATE_INTERNAL_LABEL
+ (high, "L", CODE_LABEL_NUMBER (XEXP (sb_high, 0)));
+ ASM_GENERATE_INTERNAL_LABEL
+ (low, "L", CODE_LABEL_NUMBER (XEXP (sb_low, 0)));
+ /* This will change as the assembler requirements become known. */
+ fprintf (stream, "%s%s,%s-%s\n",
+ SET_ASM_OP, &name[1], &high[1], &low[1]);
+ }
+ gcc_assert (sb_name == NULL_RTX && sb_high == NULL_RTX && sb_low == NULL_RTX);
+}
+#endif
+
+/* Return truth value of the statement that this conditional branch is likely
+ to fall through. CONDITION, is the condition that JUMP_INSN is testing. */
+
+bool
+mostly_false_jump (rtx jump_insn, rtx condition)
+{
+ rtx target_label = JUMP_LABEL (jump_insn);
+ rtx insnt, insnj;
+
+ /* Much of this isn't computed unless we're optimizing. */
+ if (optimize == 0)
+ return false;
+
+ /* Determine if one path or the other leads to a return. */
+ for (insnt = NEXT_INSN (target_label);
+ insnt;
+ insnt = NEXT_INSN (insnt))
+ {
+ if (GET_CODE (insnt) == JUMP_INSN)
+ break;
+ else if (GET_CODE (insnt) == INSN
+ && GET_CODE (PATTERN (insnt)) == SEQUENCE
+ && GET_CODE (XVECEXP (PATTERN (insnt), 0, 0)) == JUMP_INSN)
+ {
+ insnt = XVECEXP (PATTERN (insnt), 0, 0);
+ break;
+ }
+ }
+ if (insnt
+ && (GET_CODE (PATTERN (insnt)) == RETURN
+ || (GET_CODE (PATTERN (insnt)) == SET
+ && GET_CODE (SET_SRC (PATTERN (insnt))) == REG
+ && REGNO (SET_SRC (PATTERN (insnt))) == 1)))
+ insnt = NULL_RTX;
+
+ for (insnj = NEXT_INSN (jump_insn);
+ insnj;
+ insnj = NEXT_INSN (insnj))
+ {
+ if (GET_CODE (insnj) == JUMP_INSN)
+ break;
+ else if (GET_CODE (insnj) == INSN
+ && GET_CODE (PATTERN (insnj)) == SEQUENCE
+ && GET_CODE (XVECEXP (PATTERN (insnj), 0, 0)) == JUMP_INSN)
+ {
+ insnj = XVECEXP (PATTERN (insnj), 0, 0);
+ break;
+ }
+ }
+ if (insnj
+ && (GET_CODE (PATTERN (insnj)) == RETURN
+ || (GET_CODE (PATTERN (insnj)) == SET
+ && GET_CODE (SET_SRC (PATTERN (insnj))) == REG
+ && REGNO (SET_SRC (PATTERN (insnj))) == 1)))
+ insnj = NULL_RTX;
+
+ /* Predict to not return. */
+ if ((insnt == NULL_RTX) != (insnj == NULL_RTX))
+ return (insnt == NULL_RTX);
+
+ /* Predict loops to loop. */
+ for (insnt = PREV_INSN (target_label);
+ insnt && GET_CODE (insnt) == NOTE;
+ insnt = PREV_INSN (insnt))
+ if (NOTE_LINE_NUMBER (insnt) == NOTE_INSN_LOOP_END)
+ return true;
+ else if (NOTE_LINE_NUMBER (insnt) == NOTE_INSN_LOOP_BEG)
+ return false;
+
+ /* Predict backward branches usually take. */
+ if (final_sequence)
+ insnj = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
+ else
+ insnj = jump_insn;
+ if (INSN_ADDRESSES (INSN_UID (insnj))
+ > INSN_ADDRESSES (INSN_UID (target_label)))
+ return false;
+
+ /* EQ tests are usually false and NE tests are usually true. Also,
+ most quantities are positive, so we can make the appropriate guesses
+ about signed comparisons against zero. Consider unsigned comparisons
+ to be a range check and assume quantities to be in range. */
+ switch (GET_CODE (condition))
+ {
+ case CONST_INT:
+ /* Unconditional branch. */
+ return false;
+ case EQ:
+ return true;
+ case NE:
+ return false;
+ case LE:
+ case LT:
+ case GEU:
+ case GTU: /* Must get casesi right at least. */
+ if (XEXP (condition, 1) == const0_rtx)
+ return true;
+ break;
+ case GE:
+ case GT:
+ case LEU:
+ case LTU:
+ if (XEXP (condition, 1) == const0_rtx)
+ return false;
+ break;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/* Return true if the operand is a power of two and is a floating
+ point type (to optimize division by power of two into multiplication). */
+
+bool
+real_power_of_2_operand (rtx op)
+{
+ REAL_VALUE_TYPE d;
+ union {
+ long l[2];
+ struct { /* IEEE double precision format */
+ unsigned sign : 1;
+ unsigned exponent : 11;
+ unsigned mantissa1 : 20;
+ unsigned mantissa2;
+ } s;
+ struct { /* IEEE double format to quick check */
+ unsigned sign : 1; /* if it fits in a float */
+ unsigned exponent1 : 4;
+ unsigned exponent2 : 7;
+ unsigned mantissa1 : 20;
+ unsigned mantissa2;
+ } s2;
+ } u;
+
+ if (GET_MODE (op) != DFmode && GET_MODE (op) != SFmode)
+ return false;
+
+ if (GET_CODE (op) != CONST_DOUBLE)
+ return false;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d, op);
+ REAL_VALUE_TO_TARGET_DOUBLE (d, u.l);
+
+ if (u.s.mantissa1 != 0 || u.s.mantissa2 != 0 /* not a power of two */
+ || u.s.exponent == 0 /* constant 0.0 */
+ || u.s.exponent == 0x7ff /* NaN */
+ || (u.s2.exponent1 != 0x8 && u.s2.exponent1 != 0x7))
+ return false; /* const won't fit in float */
+
+ return true;
+}
+
+/* Make OP legitimate for mode MODE. Currently this only deals with DFmode
+ operands, putting them in registers and making CONST_DOUBLE values
+ SFmode where possible. */
+
+struct rtx_def *
+legitimize_operand (rtx op, enum machine_mode mode)
+{
+ rtx temp;
+ REAL_VALUE_TYPE d;
+ union {
+ long l[2];
+ struct { /* IEEE double precision format */
+ unsigned sign : 1;
+ unsigned exponent : 11;
+ unsigned mantissa1 : 20;
+ unsigned mantissa2;
+ } s;
+ struct { /* IEEE double format to quick check */
+ unsigned sign : 1; /* if it fits in a float */
+ unsigned exponent1 : 4;
+ unsigned exponent2 : 7;
+ unsigned mantissa1 : 20;
+ unsigned mantissa2;
+ } s2;
+ } u;
+
+ if (GET_CODE (op) == REG || mode != DFmode)
+ return op;
+
+ if (GET_CODE (op) == CONST_DOUBLE)
+ {
+ REAL_VALUE_FROM_CONST_DOUBLE (d, op);
+ REAL_VALUE_TO_TARGET_DOUBLE (d, u.l);
+ if (u.s.exponent != 0x7ff /* NaN */
+ && u.s.mantissa2 == 0 /* Mantissa fits */
+ && (u.s2.exponent1 == 0x8 || u.s2.exponent1 == 0x7) /* Exponent fits */
+ && (temp = simplify_unary_operation (FLOAT_TRUNCATE, SFmode,
+ op, mode)) != 0)
+ return gen_rtx_FLOAT_EXTEND (mode, force_reg (SFmode, temp));
+ }
+ else if (register_operand (op, mode))
+ return op;
+
+ return force_reg (mode, op);
+}
+
+/* Returns true if OP is either a symbol reference or a sum of a symbol
+ reference and a constant. */
+
+bool
+symbolic_address_p (rtx op)
+{
+ switch (GET_CODE (op))
+ {
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return true;
+
+ case CONST:
+ op = XEXP (op, 0);
+ return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
+ || GET_CODE (XEXP (op, 0)) == LABEL_REF)
+ && GET_CODE (XEXP (op, 1)) == CONST_INT);
+
+ default:
+ return false;
+ }
+}
+
+/* Nonzero if this is a bitmask filling the bottom bits, for optimizing and +
+ shift left combinations into a single mak instruction. */
+
+bool
+mak_mask_p (int value)
+{
+ return (value && POWER_OF_2_or_0 (value + 1));
+}
+
+/* Output to FILE the start of the assembler file. */
+
+static void
+m88k_output_file_start (void)
+{
+ if (TARGET_88110)
+ fprintf (asm_out_file, "%s\n", REQUIRES_88110_ASM_OP);
+
+ default_file_start ();
+}
+
+/* Generate the assembly code for function entry.
+
+ The prologue is responsible for setting up the stack frame,
+ initializing the frame pointer register, saving registers that must be
+ saved, and allocating SIZE additional bytes of storage for the
+ local variables. SIZE is an integer. FILE is a stdio
+ stream to which the assembler code should be output.
+
+ The label for the beginning of the function need not be output by this
+ macro. That has already been done when the macro is run.
+
+ To determine which registers to save, the macro can refer to the array
+ `regs_ever_live': element R is nonzero if hard register
+ R is used anywhere within the function. This implies the
+ function prologue should save register R, but not if it is one
+ of the call-used registers.
+
+ On machines where functions may or may not have frame-pointers, the
+ function entry code must vary accordingly; it must set up the frame
+ pointer if one is wanted, and not otherwise. To determine whether a
+ frame pointer is in wanted, the macro can refer to the variable
+ `frame_pointer_needed'. The variable's value will be 1 at run
+ time in a function that needs a frame pointer.
+
+ On machines where an argument may be passed partly in registers and
+ partly in memory, this macro must examine the variable
+ `current_function_pretend_args_size', and allocate that many bytes
+ of uninitialized space on the stack just underneath the first argument
+ arriving on the stack. (This may not be at the very end of the stack,
+ if the calling sequence has pushed anything else since pushing the stack
+ arguments. But usually, on such machines, nothing else has been pushed
+ yet, because the function prologue itself does all the pushing.)
+
+ If `ACCUMULATE_OUTGOING_ARGS' is defined, the variable
+ `current_function_outgoing_args_size' contains the size in bytes
+ required for the outgoing arguments. This macro must add that
+ amount of uninitialized space to very bottom of the stack.
+
+ The stack frame we use looks like this:
+
+ caller callee
+ |==============================================|
+ | caller's frame |
+ |==============================================|
+ | [caller's outgoing memory arguments] |
+ sp -> |==============================================| <- ap
+ | [local variable space] |
+ |----------------------------------------------|
+ | [return address (r1)] |
+ |----------------------------------------------|
+ | [previous frame pointer (r30)] |
+ |==============================================| <- fp
+ | [preserved registers (r25..r14)] |
+ |----------------------------------------------|
+ | [preserved registers (x29..x22)] |
+ |==============================================|
+ | [dynamically allocated space (alloca)] |
+ |==============================================|
+ | [callee's outgoing memory arguments] |
+ |==============================================| <- sp
+
+ Notes:
+
+ r1 and r30 must be saved if debugging.
+
+ fp (if present) is located two words down from the local
+ variable space.
+ */
+
+static rtx emit_add (rtx, rtx, int);
+static void preserve_registers (int, int);
+static void emit_ldst (int, int, enum machine_mode, int);
+
+static int nregs;
+static int nxregs;
+static char save_regs[FIRST_PSEUDO_REGISTER];
+static int frame_laid_out;
+static int frame_size;
+
+#define STACK_UNIT_BOUNDARY (STACK_BOUNDARY / BITS_PER_UNIT)
+#define ROUND_CALL_BLOCK_SIZE(BYTES) \
+ (((BYTES) + (STACK_UNIT_BOUNDARY - 1)) & ~(STACK_UNIT_BOUNDARY - 1))
+
+/* Establish the position of the FP relative to the SP. This is done
+ either during output_function_prologue() or by
+ INITIAL_ELIMINATION_OFFSET. */
+
+void
+m88k_layout_frame (void)
+{
+ int regno, sp_size;
+
+ if (frame_laid_out && reload_completed)
+ return;
+
+ frame_laid_out = 1;
+
+ memset ((char *) &save_regs[0], 0, sizeof (save_regs));
+ sp_size = nregs = nxregs = 0;
+ frame_size = get_frame_size ();
+
+ /* Profiling requires a stack frame. */
+ if (current_function_profile)
+ frame_pointer_needed = 1;
+
+ /* If we are producing debug information, store r1 and r30 where the
+ debugger wants to find them (r30 at r30+0, r1 at r30+4). Space has
+ already been reserved for r1/r30 in STARTING_FRAME_OFFSET. */
+ if (write_symbols != NO_DEBUG)
+ save_regs[1] = 1;
+
+ /* If we are producing PIC, save the addressing base register and r1. */
+ if (flag_pic && current_function_uses_pic_offset_table)
+ {
+ save_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
+ nregs++;
+ }
+
+ /* If a frame is requested, save the previous FP, and the return
+ address (r1), so that a traceback can be done without using tdesc
+ information. Otherwise, simply save the FP if it is used as
+ a preserve register. */
+ if (frame_pointer_needed)
+ save_regs[FRAME_POINTER_REGNUM] = save_regs[1] = 1;
+ else
+ {
+ if (regs_ever_live[FRAME_POINTER_REGNUM])
+ save_regs[FRAME_POINTER_REGNUM] = 1;
+ /* If there is a call, r1 needs to be saved as well. */
+ if (regs_ever_live[1])
+ save_regs[1] = 1;
+ }
+
+ /* Figure out which extended register(s) needs to be saved. */
+ for (regno = FIRST_EXTENDED_REGISTER + 1; regno < FIRST_PSEUDO_REGISTER;
+ regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ {
+ save_regs[regno] = 1;
+ nxregs++;
+ }
+
+ /* Figure out which normal register(s) needs to be saved. */
+ for (regno = 2; regno < FRAME_POINTER_REGNUM; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ {
+ save_regs[regno] = 1;
+ nregs++;
+ }
+
+ /* Achieve greatest use of double memory ops. Either we end up saving
+ r30 or we use that slot to align the registers we do save. */
+ if (nregs >= 2 && save_regs[1] && !save_regs[FRAME_POINTER_REGNUM])
+ sp_size += 4;
+
+ nregs += save_regs[1] + save_regs[FRAME_POINTER_REGNUM];
+ /* if we need to align extended registers, add a word */
+ if (nxregs > 0 && (nregs & 1) != 0)
+ sp_size +=4;
+ sp_size += 4 * nregs;
+ sp_size += 8 * nxregs;
+ sp_size += current_function_outgoing_args_size;
+
+ /* The first two saved registers are placed above the new frame pointer
+ if any. In the only case this matters, they are r1 and r30. */
+ if (frame_pointer_needed || sp_size)
+ m88k_fp_offset = ROUND_CALL_BLOCK_SIZE (sp_size - STARTING_FRAME_OFFSET);
+ else
+ m88k_fp_offset = -STARTING_FRAME_OFFSET;
+ m88k_stack_size = m88k_fp_offset + STARTING_FRAME_OFFSET;
+
+ /* First, combine m88k_stack_size and size. If m88k_stack_size is
+ nonzero, align the frame size to 8 mod 16; otherwise align the
+ frame size to 0 mod 16. (If stacks are 8 byte aligned, this ends
+ up as a NOP. */
+ {
+ int need
+ = ((m88k_stack_size ? STACK_UNIT_BOUNDARY - STARTING_FRAME_OFFSET : 0)
+ - (frame_size % STACK_UNIT_BOUNDARY));
+ if (need < 0)
+ need += STACK_UNIT_BOUNDARY;
+ m88k_stack_size
+ = ROUND_CALL_BLOCK_SIZE (m88k_stack_size + frame_size + need
+ + current_function_pretend_args_size);
+ }
+}
+
+/* Return true if this function is known to have a null prologue. */
+
+bool
+null_prologue (void)
+{
+ if (! reload_completed)
+ return false;
+ m88k_layout_frame ();
+ return (! frame_pointer_needed
+ && nregs == 0
+ && nxregs == 0
+ && m88k_stack_size == 0);
+}
+
+/* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
+ with (plus:P (reg 31) VAL). It would be nice if dwarf2out_frame_debug_expr
+ could deduce these equivalences by itself so it wasn't necessary to hold
+ its hand so much. */
+
+static void
+m88k_frame_related (rtx insn, rtx reg, int val)
+{
+ rtx real, set, temp;
+
+ real = copy_rtx (PATTERN (insn));
+
+ real = replace_rtx (real, reg,
+ gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
+ STACK_POINTER_REGNUM),
+ GEN_INT (val)));
+
+ /* We expect that 'real' is a SET. */
+
+ gcc_assert (GET_CODE (real) == SET);
+ set = real;
+
+ temp = simplify_rtx (SET_SRC (set));
+ if (temp)
+ SET_SRC (set) = temp;
+ temp = simplify_rtx (SET_DEST (set));
+ if (temp)
+ SET_DEST (set) = temp;
+ if (GET_CODE (SET_DEST (set)) == MEM)
+ {
+ temp = simplify_rtx (XEXP (SET_DEST (set), 0));
+ if (temp)
+ XEXP (SET_DEST (set), 0) = temp;
+ }
+
+ RTX_FRAME_RELATED_P (insn) = 1;
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ real,
+ REG_NOTES (insn));
+}
+
+static void
+m88k_maybe_dead (rtx insn)
+{
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD,
+ const0_rtx,
+ REG_NOTES (insn));
+}
+
+void
+m88k_expand_prologue (void)
+{
+ rtx insn;
+
+ m88k_layout_frame ();
+
+ if (warn_stack_larger_than && m88k_stack_size > stack_larger_than_size)
+ warning (0, "stack usage is %d bytes", m88k_stack_size);
+
+ if (m88k_stack_size)
+ {
+ insn = emit_add (stack_pointer_rtx, stack_pointer_rtx, -m88k_stack_size);
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ /* If the stack pointer adjustment has required a temporary register,
+ tell the DWARF code how to understand this sequence. */
+ if (! SMALL_INTVAL (m88k_stack_size))
+ REG_NOTES (insn)
+ = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx,
+ GEN_INT (-m88k_stack_size))),
+ REG_NOTES(insn));
+ }
+
+ if (nregs || nxregs)
+ preserve_registers (m88k_fp_offset + 4, 1);
+
+ if (frame_pointer_needed)
+ {
+ insn = emit_add (frame_pointer_rtx, stack_pointer_rtx, m88k_fp_offset);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ if (flag_pic && save_regs[PIC_OFFSET_TABLE_REGNUM])
+ {
+ rtx return_reg = gen_rtx_REG (SImode, 1);
+ rtx label = gen_label_rtx ();
+
+ m88k_maybe_dead (emit_insn (gen_locate1 (pic_offset_table_rtx, label)));
+ m88k_maybe_dead (emit_insn (gen_locate2 (pic_offset_table_rtx, label)));
+ m88k_maybe_dead (emit_insn (gen_addsi3 (pic_offset_table_rtx,
+ pic_offset_table_rtx,
+ return_reg)));
+ }
+ if (current_function_profile)
+ emit_insn (gen_blockage ());
+}
+
+/* This function generates the assembly code for function exit,
+ on machines that need it.
+
+ The function epilogue should not depend on the current stack pointer!
+ It should use the frame pointer only, if there is a frame pointer.
+ This is mandatory because of alloca; we also take advantage of it to
+ omit stack adjustments before returning. */
+
+static void
+m88k_output_function_epilogue (FILE *stream,
+ HOST_WIDE_INT size ATTRIBUTE_UNUSED)
+{
+ rtx insn = get_last_insn ();
+
+ /* If the last insn isn't a BARRIER, we must write a return insn. This
+ should only happen if the function has no prologue and no body. */
+ if (GET_CODE (insn) == NOTE)
+ insn = prev_nonnote_insn (insn);
+ if (insn == NULL_RTX || GET_CODE (insn) != BARRIER)
+ asm_fprintf (stream, "\tjmp\t %R%s\n", reg_names[1]);
+
+#if 0
+ /* If the last insn is a barrier, and the insn before that is a call,
+ then add a nop instruction so that tdesc can walk the stack correctly
+ even though there is no epilogue. (Otherwise, the label for the
+ end of the tdesc region ends up at the start of the next function. */
+ if (insn && GET_CODE (insn) == BARRIER)
+ {
+ insn = prev_nonnote_insn (insn);
+ if (insn && GET_CODE (insn) == CALL_INSN)
+ asm_fprintf (stream, "\tor\t %R%s,%R%s,%R%s\n",
+ reg_names[0], reg_names[0], reg_names[0]);
+ }
+#endif
+
+#ifndef USE_GAS
+ output_short_branch_defs (stream);
+#endif
+
+ fprintf (stream, "\n");
+
+ frame_laid_out = 0;
+}
+
+void
+m88k_expand_epilogue (void)
+{
+ if (frame_pointer_needed)
+ emit_add (stack_pointer_rtx, frame_pointer_rtx, -m88k_fp_offset);
+
+ if (nregs || nxregs)
+ preserve_registers (m88k_fp_offset + 4, 0);
+
+ if (m88k_stack_size)
+ emit_add (stack_pointer_rtx, stack_pointer_rtx, m88k_stack_size);
+
+ emit_insn (gen_indirect_jump (INCOMING_RETURN_ADDR_RTX));
+}
+
+/* Emit insns to set DSTREG to SRCREG + AMOUNT during the prologue or
+ epilogue. */
+
+static rtx
+emit_add (rtx dstreg, rtx srcreg, int amount)
+{
+ rtx incr = GEN_INT (abs (amount));
+
+ if (! ADD_INTVAL (amount))
+ {
+ rtx temp = gen_rtx_REG (SImode, TEMP_REGNUM);
+ emit_move_insn (temp, incr);
+ incr = temp;
+ }
+ return emit_insn ((amount < 0 ? gen_subsi3 : gen_addsi3) (dstreg, srcreg,
+ incr));
+}
+
+/* Save/restore the preserve registers. base is the highest offset from
+ r31 at which a register is stored. store_p is true if stores are to
+ be done; otherwise loads. */
+
+static void
+preserve_registers (int base, int store_p)
+{
+ int regno, offset;
+ struct mem_op {
+ int regno;
+ int nregs;
+ int offset;
+ } mem_op[FIRST_PSEUDO_REGISTER];
+ struct mem_op *mo_ptr = mem_op;
+
+ /* The 88open OCS mandates that preserved registers be stored in
+ increasing order. For compatibility with current practice,
+ the order is r1, r30, then the preserve registers. */
+
+ offset = base;
+ if (save_regs[1])
+ {
+ /* An extra word is given in this case to make best use of double
+ memory ops. */
+ if (nregs > 2 && !save_regs[FRAME_POINTER_REGNUM])
+ offset -= 4;
+ /* Do not reload r1 in the epilogue unless really necessary */
+ if (store_p || regs_ever_live[1]
+ || (flag_pic && save_regs[PIC_OFFSET_TABLE_REGNUM]))
+ emit_ldst (store_p, 1, SImode, offset);
+ offset -= 4;
+ base = offset;
+ }
+
+ /* Walk the registers to save recording all single memory operations. */
+ for (regno = FRAME_POINTER_REGNUM; regno > 1; regno--)
+ if (save_regs[regno])
+ {
+ if ((offset & 7) != 4 || (regno & 1) != 1 || !save_regs[regno-1])
+ {
+ mo_ptr->nregs = 1;
+ mo_ptr->regno = regno;
+ mo_ptr->offset = offset;
+ mo_ptr++;
+ offset -= 4;
+ }
+ else
+ {
+ regno--;
+ offset -= 2*4;
+ }
+ }
+
+ /* Walk the registers to save recording all double memory operations.
+ This avoids a delay in the epilogue (ld.d/ld). */
+ offset = base;
+ for (regno = FRAME_POINTER_REGNUM; regno > 1; regno--)
+ if (save_regs[regno])
+ {
+ if ((offset & 7) != 4 || (regno & 1) != 1 || !save_regs[regno-1])
+ {
+ offset -= 4;
+ }
+ else
+ {
+ mo_ptr->nregs = 2;
+ mo_ptr->regno = regno-1;
+ mo_ptr->offset = offset-4;
+ mo_ptr++;
+ regno--;
+ offset -= 2*4;
+ }
+ }
+
+ /* Walk the extended registers to record all memory operations. */
+ /* Be sure the offset is double word aligned. */
+ offset = (offset - 1) & ~7;
+ for (regno = FIRST_PSEUDO_REGISTER - 1; regno > FIRST_EXTENDED_REGISTER;
+ regno--)
+ if (save_regs[regno])
+ {
+ mo_ptr->nregs = 2;
+ mo_ptr->regno = regno;
+ mo_ptr->offset = offset;
+ mo_ptr++;
+ offset -= 2*4;
+ }
+
+ mo_ptr->regno = 0;
+
+ /* Output the memory operations. */
+ for (mo_ptr = mem_op; mo_ptr->regno; mo_ptr++)
+ {
+ if (mo_ptr->nregs)
+ emit_ldst (store_p, mo_ptr->regno,
+ (mo_ptr->nregs > 1 ? DImode : SImode),
+ mo_ptr->offset);
+ }
+}
+
+static void
+emit_ldst (int store_p, int regno, enum machine_mode mode, int offset)
+{
+ rtx reg = gen_rtx_REG (mode, regno);
+ rtx mem;
+ rtx insn;
+
+ if (SMALL_INTVAL (offset))
+ {
+ mem = gen_rtx_MEM (mode, plus_constant (stack_pointer_rtx, offset));
+ }
+ else
+ {
+ /* offset is too large for immediate index must use register */
+
+ rtx disp = GEN_INT (offset);
+ rtx temp = gen_rtx_REG (SImode, TEMP_REGNUM);
+ rtx regi = gen_rtx_PLUS (SImode, stack_pointer_rtx, temp);
+
+ emit_move_insn (temp, disp);
+ mem = gen_rtx_MEM (mode, regi);
+ }
+
+ if (store_p)
+ {
+ insn = emit_move_insn (mem, reg);
+ m88k_frame_related (insn, stack_pointer_rtx, offset);
+ }
+ else
+ emit_move_insn (reg, mem);
+}
+
+/* Convert the address expression REG to a CFA offset. */
+
+int
+m88k_debugger_offset (rtx reg, int offset)
+{
+ if (GET_CODE (reg) == PLUS)
+ {
+ offset = INTVAL (XEXP (reg, 1));
+ reg = XEXP (reg, 0);
+ }
+
+ /* Put the offset in terms of the CFA (arg pointer). */
+ if (reg == frame_pointer_rtx)
+ offset += m88k_fp_offset - m88k_stack_size;
+ else if (reg == stack_pointer_rtx)
+ offset -= m88k_stack_size;
+ else if (reg != arg_pointer_rtx)
+ return 0;
+
+ return offset;
+}
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. NAME is the mcount function name
+ (varies). */
+
+void
+output_function_profiler (FILE *file, int labelno, const char *name)
+{
+ char label[256];
+
+ /* Remember to update FUNCTION_PROFILER_LENGTH. */
+
+ asm_fprintf (file, "\tsubu\t %R%s,%R%s,32\n", reg_names[31], reg_names[31]);
+ asm_fprintf (file, "\tst.d\t %R%s,%R%s,0\n", reg_names[2], reg_names[31]);
+ asm_fprintf (file, "\tst.d\t %R%s,%R%s,8\n", reg_names[4], reg_names[31]);
+ asm_fprintf (file, "\tst.d\t %R%s,%R%s,16\n", reg_names[6], reg_names[31]);
+ asm_fprintf (file, "\tst.d\t %R%s,%R%s,24\n", reg_names[8], reg_names[31]);
+
+ ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
+ if (flag_pic == 2)
+ {
+ asm_fprintf (file, "\tor.u\t %R%s,%R%s,%Rhi16(%s#got_rel)\n",
+ reg_names[2], reg_names[0], &label[1]);
+ asm_fprintf (file, "\tor\t %R%s,%R%s,%Rlo16(%s#got_rel)\n",
+ reg_names[2], reg_names[2], &label[1]);
+ asm_fprintf (file, "\tbsr.n\t %s#plt\n", name);
+ asm_fprintf (file, "\t ld\t %R%s,%R%s,%R%s\n", reg_names[2],
+ reg_names[PIC_OFFSET_TABLE_REGNUM], reg_names[2]);
+ }
+ else if (flag_pic)
+ {
+ asm_fprintf (file, "\tbsr.n\t %s#plt\n", name);
+ asm_fprintf (file, "\t ld\t %R%s,%R%s,%s#got_rel\n", reg_names[2],
+ reg_names[PIC_OFFSET_TABLE_REGNUM], &label[1]);
+ }
+ else
+ {
+ asm_fprintf (file, "\tor.u\t %R%s,%R%s,%Rhi16(%s)\n",
+ reg_names[2], reg_names[0], &label[1]);
+ asm_fprintf (file, "\tbsr.n\t %s\n", name);
+ asm_fprintf (file, "\t or\t %R%s,%R%s,%Rlo16(%s)\n",
+ reg_names[2], reg_names[2], &label[1]);
+ }
+
+ asm_fprintf (file, "\tld.d\t %R%s,%R%s,0\n", reg_names[2], reg_names[31]);
+ asm_fprintf (file, "\tld.d\t %R%s,%R%s,8\n", reg_names[4], reg_names[31]);
+ asm_fprintf (file, "\tld.d\t %R%s,%R%s,16\n", reg_names[6], reg_names[31]);
+ asm_fprintf (file, "\tld.d\t %R%s,%R%s,24\n", reg_names[8], reg_names[31]);
+ asm_fprintf (file, "\taddu\t %R%s,%R%s,32\n", reg_names[31], reg_names[31]);
+}
+
+/* Determine whether a function argument is passed in a register, and
+ which register.
+
+ The arguments are CUM, which summarizes all the previous
+ arguments; MODE, the machine mode of the argument; TYPE,
+ the data type of the argument as a tree node or 0 if that is not known
+ (which happens for C support library functions); and NAMED,
+ which is 1 for an ordinary argument and 0 for nameless arguments that
+ correspond to `...' in the called function's prototype.
+
+ The value of the expression should either be a `reg' RTX for the
+ hard register in which to pass the argument, or zero to pass the
+ argument on the stack.
+
+ On the m88000 the first eight words of args are normally in registers
+ and the rest are pushed. Double precision floating point must be
+ double word aligned (and if in a register, starting on an even
+ register). Structures and unions which are not 4 byte, and word
+ aligned are passed in memory rather than registers, even if they
+ would fit completely in the registers under OCS rules.
+
+ Note that FUNCTION_ARG and FUNCTION_INCOMING_ARG were different.
+ For structures that are passed in memory, but could have been
+ passed in registers, we first load the structure into the
+ register, and then when the last argument is passed, we store
+ the registers into the stack locations. This fixes some bugs
+ where GCC did not expect to have register arguments, followed
+ by stack arguments, followed by register arguments. */
+
+struct rtx_def *
+m88k_function_arg (CUMULATIVE_ARGS args_so_far, enum machine_mode mode,
+ tree type, int named ATTRIBUTE_UNUSED)
+{
+ int bytes, words;
+
+ /* undo putting struct in register */
+ if (type != NULL_TREE && AGGREGATE_TYPE_P (type))
+ mode = BLKmode;
+
+ /* m88k_function_arg argument `type' is NULL for BLKmode. */
+ gcc_assert (type != NULL_TREE || mode != BLKmode);
+
+ bytes = (mode != BLKmode) ? GET_MODE_SIZE (mode) : int_size_in_bytes (type);
+
+ /* Variable-sized types get passed by reference, which can be passed
+ in registers. */
+ if (bytes < 0)
+ {
+ if (args_so_far > 8 - (POINTER_SIZE / BITS_PER_WORD))
+ return NULL_RTX;
+
+ return gen_rtx_REG (Pmode, 2 + args_so_far);
+ }
+
+ words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+
+ if ((args_so_far & 1) != 0
+ && (mode == DImode || mode == DFmode
+ || (type != NULL_TREE && TYPE_ALIGN (type) > BITS_PER_WORD)))
+ args_so_far++;
+
+ if (args_so_far + words > 8)
+ return NULL_RTX; /* args have exhausted registers */
+
+ else if (mode == BLKmode
+ && (TYPE_ALIGN (type) != BITS_PER_WORD || bytes != UNITS_PER_WORD))
+ return NULL_RTX;
+
+ return gen_rtx_REG (((mode == BLKmode) ? TYPE_MODE (type) : mode),
+ 2 + args_so_far);
+}
+
+/* Update the summarizer variable CUM to advance past an argument in
+ the argument list. The values MODE, TYPE and NAMED describe that
+ argument. Once this is done, the variable CUM is suitable for
+ analyzing the *following* argument with `FUNCTION_ARG', etc. (TYPE
+ is null for libcalls where that information may not be available.) */
+void
+m88k_function_arg_advance (CUMULATIVE_ARGS *args_so_far, enum machine_mode mode,
+ tree type, int named ATTRIBUTE_UNUSED)
+{
+ int bytes, words;
+ int asf;
+
+ if (type != NULL_TREE && AGGREGATE_TYPE_P (type))
+ mode = BLKmode;
+
+ bytes = (mode != BLKmode) ? GET_MODE_SIZE (mode) : int_size_in_bytes (type);
+ asf = *args_so_far;
+
+ /* Variable-sized types get passed by reference, which can be passed
+ in registers. */
+ if (bytes < 0)
+ {
+ if (asf <= 8 - (POINTER_SIZE / BITS_PER_WORD))
+ *args_so_far += POINTER_SIZE / BITS_PER_WORD;
+
+ return;
+ }
+
+ words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+
+ /* Struct and unions which are not exactly the size of a register are to be
+ passed on stack. */
+ if (mode == BLKmode
+ && (TYPE_ALIGN (type) != BITS_PER_WORD || bytes != UNITS_PER_WORD))
+ return;
+
+ /* Align arguments requiring more than word alignment to a double-word
+ boundary (or an even register number if the argument will get passed
+ in registers). */
+ if ((asf & 1) != 0
+ && (mode == DImode || mode == DFmode
+ || (type != NULL_TREE && TYPE_ALIGN (type) > BITS_PER_WORD)))
+ asf++;
+
+ if (asf + words > 8)
+ return;
+
+ (*args_so_far) = asf + words;
+}
+
+/* A C expression that indicates when an argument must be passed by
+ reference. If nonzero for an argument, a copy of that argument is
+ made in memory and a pointer to the argument is passed instead of
+ the argument itself. The pointer is passed in whatever way is
+ appropriate for passing a pointer to that type.
+
+ On m88k, only variable sized types are passed by reference. */
+
+static bool
+m88k_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ tree type, bool named ATTRIBUTE_UNUSED)
+{
+ return type != NULL_TREE && int_size_in_bytes (type) < 0;
+}
+
+/* Disable the promotion of some structures and unions to registers.
+ Note that this matches FUNCTION_ARG behaviour. */
+static bool
+m88k_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
+{
+ switch (TYPE_MODE (type))
+ {
+ case BLKmode:
+ return true;
+ default:
+ if ((TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE)
+ && (TYPE_ALIGN (type) != BITS_PER_WORD
+ || GET_MODE_SIZE (TYPE_MODE (type)) != UNITS_PER_WORD))
+ return true;
+ return false;
+ }
+}
+
+/* Perform any needed actions needed for a function that is receiving a
+ variable number of arguments.
+
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+
+ MODE and TYPE are the mode and type of the current parameter.
+
+ PRETEND_SIZE is a variable that should be set to the amount of stack
+ that must be pushed by the prolog to pretend that our caller pushed
+ it.
+
+ Normally, this macro will push all remaining incoming registers on the
+ stack and set PRETEND_SIZE to the length of the registers pushed. */
+
+void
+m88k_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ tree type, int *pretend_size, int no_rtl)
+{
+ CUMULATIVE_ARGS next_cum;
+ tree fntype;
+ int stdarg_p;
+ int regcnt, delta;
+
+ fntype = TREE_TYPE (current_function_decl);
+ stdarg_p = (TYPE_ARG_TYPES (fntype) != NULL_TREE
+ && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
+ != void_type_node));
+
+ /* For varargs, we do not want to skip the dummy va_dcl argument.
+ For stdargs, we do want to skip the last named argument. */
+ next_cum = *cum;
+ if (stdarg_p)
+ m88k_function_arg_advance(&next_cum, mode, type, 1);
+
+ regcnt = next_cum < 8 ? 8 - next_cum : 0;
+ delta = regcnt & 1;
+
+ if (! no_rtl && regcnt != 0)
+ {
+ rtx mem, dst;
+ int set, regno, offs;
+
+ set = get_varargs_alias_set ();
+ mem = gen_rtx_MEM (BLKmode,
+ plus_constant (virtual_incoming_args_rtx,
+ - (regcnt + delta) * UNITS_PER_WORD));
+ MEM_NOTRAP_P (mem) = 1;
+ set_mem_alias_set (mem, set);
+
+ /* Now store the incoming registers. */
+ /* The following is equivalent to
+ move_block_from_reg (2 + next_cum,
+ adjust_address (mem, Pmode,
+ delta * UNITS_PER_WORD),
+ regcnt, UNITS_PER_WORD * regcnt);
+ but using double store instruction since the stack is properly
+ aligned. */
+ regno = 2 + next_cum;
+ dst = mem;
+
+ if (delta != 0)
+ {
+ dst = adjust_address (dst, Pmode, UNITS_PER_WORD);
+ emit_move_insn (operand_subword (dst, 0, 1, BLKmode),
+ gen_rtx_REG (SImode, regno));
+ regno++;
+ }
+
+ offs = delta;
+ while (regno < 10)
+ {
+ emit_move_insn (adjust_address (dst, DImode, offs * UNITS_PER_WORD),
+ gen_rtx_REG (DImode, regno));
+ offs += 2;
+ regno += 2;
+ }
+
+ *pretend_size = (regcnt + delta) * UNITS_PER_WORD;
+ }
+}
+
+/* Define the `__builtin_va_list' type for the ABI. */
+
+static tree
+m88k_build_va_list (void)
+{
+ tree field_reg, field_stk, field_arg, int_ptr_type_node, record;
+
+ int_ptr_type_node = build_pointer_type (integer_type_node);
+
+ record = make_node (RECORD_TYPE);
+
+ field_arg = build_decl (FIELD_DECL, get_identifier ("__va_arg"),
+ integer_type_node);
+ field_stk = build_decl (FIELD_DECL, get_identifier ("__va_stk"),
+ int_ptr_type_node);
+ field_reg = build_decl (FIELD_DECL, get_identifier ("__va_reg"),
+ int_ptr_type_node);
+
+ DECL_FIELD_CONTEXT (field_arg) = record;
+ DECL_FIELD_CONTEXT (field_stk) = record;
+ DECL_FIELD_CONTEXT (field_reg) = record;
+
+ TYPE_FIELDS (record) = field_arg;
+ TREE_CHAIN (field_arg) = field_stk;
+ TREE_CHAIN (field_stk) = field_reg;
+
+ layout_type (record);
+ return record;
+}
+
+/* Implement `va_start' for varargs and stdarg. */
+
+void
+m88k_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
+{
+ tree field_reg, field_stk, field_arg;
+ tree reg, stk, arg, t;
+ tree fntype;
+ int stdarg_p;
+ int offset;
+
+ gcc_assert (CONSTANT_P (current_function_arg_offset_rtx));
+
+ field_arg = TYPE_FIELDS (va_list_type_node);
+ field_stk = TREE_CHAIN (field_arg);
+ field_reg = TREE_CHAIN (field_stk);
+
+ arg = build3 (COMPONENT_REF, TREE_TYPE (field_arg), valist, field_arg,
+ NULL_TREE);
+ stk = build3 (COMPONENT_REF, TREE_TYPE (field_stk), valist, field_stk,
+ NULL_TREE);
+ reg = build3 (COMPONENT_REF, TREE_TYPE (field_reg), valist, field_reg,
+ NULL_TREE);
+
+ fntype = TREE_TYPE (current_function_decl);
+ stdarg_p = (TYPE_ARG_TYPES (fntype) != NULL_TREE
+ && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
+ != void_type_node));
+
+ /* Fill in the __va_arg member. */
+ t = build2 (MODIFY_EXPR, TREE_TYPE (arg), arg,
+ size_int (current_function_args_info));
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ /* Store the arg pointer in the __va_stk member. */
+ offset = XINT (current_function_arg_offset_rtx, 0);
+ if (current_function_args_info >= 8 && ! stdarg_p)
+ offset -= UNITS_PER_WORD;
+ t = make_tree (TREE_TYPE (stk), virtual_incoming_args_rtx);
+ t = build2 (PLUS_EXPR, TREE_TYPE (stk), t, size_int (offset));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (stk), stk, t);
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ /* Setup __va_reg */
+ t = make_tree (TREE_TYPE (reg), virtual_incoming_args_rtx);
+ t = build2 (PLUS_EXPR, TREE_TYPE (reg), t,
+ build_int_cst (NULL_TREE, -8 * UNITS_PER_WORD));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (reg), reg, t);
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+}
+
+/* Implement `va_arg'. */
+
+tree
+m88k_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
+{
+ tree field_reg, field_stk, field_arg;
+ int size, wsize, align;
+ bool reg_p;
+ tree ptrtype = build_pointer_type (type);
+ tree lab_done;
+ tree addr;
+ tree t;
+
+ if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
+ {
+ t = m88k_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
+ return build_va_arg_indirect_ref (t);
+ }
+
+ field_arg = TYPE_FIELDS (va_list_type_node);
+ field_stk = TREE_CHAIN (field_arg);
+ field_reg = TREE_CHAIN (field_stk);
+
+ size = int_size_in_bytes (type);
+ wsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+ reg_p = (AGGREGATE_TYPE_P (type)
+ ? size == UNITS_PER_WORD && TYPE_ALIGN (type) == BITS_PER_WORD
+ : size <= 2*UNITS_PER_WORD);
+
+ addr = create_tmp_var (ptr_type_node, "addr");
+ DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
+ lab_done = NULL;
+
+ /* Decide if we should read from stack or regs if the argument could have
+ been passed in registers. */
+ if (reg_p) {
+ tree arg, arg_align, reg;
+ tree lab_stack;
+ tree u;
+
+ lab_stack = create_artificial_label ();
+ lab_done = create_artificial_label ();
+
+ /* Align __va_arg to a doubleword boundary if necessary. */
+ arg = build3 (COMPONENT_REF, TREE_TYPE (field_arg), valist, field_arg,
+ NULL_TREE);
+ align = type == NULL_TREE ? 0 : TYPE_ALIGN (type) / BITS_PER_WORD;
+ if (align > 1)
+ {
+ t = build2 (PLUS_EXPR, TREE_TYPE (arg), arg, size_int (align - 1));
+ arg_align = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
+ build_int_cst (NULL_TREE, -align));
+ gimplify_expr (&arg_align, pre_p, NULL, is_gimple_val, fb_rvalue);
+ }
+ else
+ arg_align = arg;
+
+ /* Make sure the argument fits within the remainder of the saved
+ register area, and branch to the stack logic if not. */
+ u = fold_convert (TREE_TYPE (arg), arg_align);
+ /* if (arg_align > 8 - wsize) goto lab_stack */
+ t = fold_convert (TREE_TYPE (arg), size_int (8 - wsize));
+ t = build2 (GT_EXPR, boolean_type_node, u, t);
+ u = build1 (GOTO_EXPR, void_type_node, lab_stack);
+ t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
+ gimplify_and_add (t, pre_p);
+
+ /* Compute the argument address. */
+ reg = build3 (COMPONENT_REF, TREE_TYPE (field_reg), valist, field_reg,
+ NULL_TREE);
+ t = build2 (MULT_EXPR, TREE_TYPE (reg), arg_align,
+ size_int (UNITS_PER_WORD));
+ t = build2 (PLUS_EXPR, TREE_TYPE (reg), reg, t);
+
+ t = build2 (MODIFY_EXPR, void_type_node, addr, t);
+ gimplify_and_add (t, pre_p);
+
+ /* Increment __va_arg. */
+ t = build2 (PLUS_EXPR, TREE_TYPE (arg), arg_align, size_int (wsize));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (arg), arg, t);
+ gimplify_and_add (t, pre_p);
+
+ t = build1 (GOTO_EXPR, void_type_node, lab_done);
+ gimplify_and_add (t, pre_p);
+
+ t = build1 (LABEL_EXPR, void_type_node, lab_stack);
+ append_to_statement_list (t, pre_p);
+ }
+
+ {
+ tree stk;
+ tree u;
+
+ stk = build3 (COMPONENT_REF, TREE_TYPE (field_stk), valist, field_stk,
+ NULL_TREE);
+
+ /* Align __va_stk to the type boundary if necessary. */
+ align = type == NULL_TREE ? 0 : TYPE_ALIGN (type) / BITS_PER_UNIT;
+ if (align > UNITS_PER_WORD)
+ {
+ t = build2 (PLUS_EXPR, TREE_TYPE (stk), stk, size_int (align - 1));
+ t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
+ build_int_cst (NULL_TREE, -align));
+ gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
+ }
+ else
+ t = stk;
+
+ /* Compute the argument address. */
+ u = build2 (MODIFY_EXPR, void_type_node, addr, t);
+ gimplify_and_add (u, pre_p);
+
+ /* Increment __va_stk. */
+ t = build2 (PLUS_EXPR, TREE_TYPE (t), t, size_int (wsize * UNITS_PER_WORD));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (stk), stk, t);
+ gimplify_and_add (t, pre_p);
+ }
+
+ if (lab_done)
+ {
+ t = build1 (LABEL_EXPR, void_type_node, lab_done);
+ append_to_statement_list (t, pre_p);
+ }
+
+ addr = fold_convert (ptrtype, addr);
+ return build_va_arg_indirect_ref (addr);
+}
+
+/* If cmpsi has not been generated, emit code to do the test. Return the
+ expression describing the test of operator OP. */
+
+rtx
+emit_test (enum rtx_code op, enum machine_mode mode)
+{
+ if (m88k_compare_reg == NULL_RTX)
+ emit_insn (gen_test (m88k_compare_op0, m88k_compare_op1));
+ return (gen_rtx_fmt_ee (op, mode, m88k_compare_reg, const0_rtx));
+}
+
+/* Determine how to best perform cmpsi/bxx, where cmpsi has a constant
+ operand. All tests with zero (albeit swapped) and all equality tests
+ with a constant are done with bcnd. The remaining cases are swapped
+ as needed. */
+
+void
+emit_bcnd (enum rtx_code op, rtx label)
+{
+ if (m88k_compare_op1 == const0_rtx)
+ emit_jump_insn (gen_bcnd
+ (gen_rtx_fmt_ee (op, VOIDmode, m88k_compare_op0, const0_rtx),
+ label));
+ else if (m88k_compare_op0 == const0_rtx)
+ emit_jump_insn (gen_bcnd
+ (gen_rtx_fmt_ee (swap_condition (op),
+ VOIDmode, m88k_compare_op1, const0_rtx),
+ label));
+ else if (op != EQ && op != NE)
+ emit_jump_insn (gen_bxx (emit_test (op, VOIDmode), label));
+ else
+ {
+ rtx zero = gen_reg_rtx (SImode);
+ rtx reg, constant;
+ int value;
+
+ if (GET_CODE (m88k_compare_op1) == CONST_INT)
+ {
+ reg = force_reg (SImode, m88k_compare_op0);
+ constant = m88k_compare_op1;
+ }
+ else
+ {
+ reg = force_reg (SImode, m88k_compare_op1);
+ constant = m88k_compare_op0;
+ }
+ value = INTVAL (constant);
+
+ /* Perform an arithmetic computation to make the compared-to value
+ zero, but avoid loosing if the bcnd is later changed into sxx. */
+ if (SMALL_INTVAL (value))
+ emit_jump_insn (gen_bxx (emit_test (op, VOIDmode), label));
+ else
+ {
+ if (SMALL_INTVAL (-value))
+ emit_insn (gen_addsi3 (zero, reg,
+ GEN_INT (-value)));
+ else
+ emit_insn (gen_xorsi3 (zero, reg, constant));
+
+ emit_jump_insn (gen_bcnd (gen_rtx_fmt_ee (op, VOIDmode,
+ zero, const0_rtx),
+ label));
+ }
+ }
+}
+
+/* Print an operand. Recognize special options, documented below. */
+
+void
+print_operand (FILE *file, rtx x, int code)
+{
+ enum rtx_code xc = (x ? GET_CODE (x) : UNKNOWN);
+ int value = (xc == CONST_INT ? INTVAL (x) : 0);
+ static int sequencep;
+ static int reversep;
+
+ if (sequencep)
+ {
+ if (code < 'B' || code > 'E')
+ output_operand_lossage ("%%R not followed by %%B/C/D/E");
+ if (reversep)
+ xc = reverse_condition (xc);
+ sequencep = 0;
+ }
+
+ switch (code)
+ {
+ case '#': /* register prefix character (may be empty) */
+ fputs (m88k_register_prefix, file);
+ return;
+
+ case 'V': /* Output a serializing instruction as needed if the operand
+ (assumed to be a MEM) is a volatile load. */
+ case 'v': /* ditto for a volatile store. */
+ if (MEM_VOLATILE_P (x) && TARGET_SERIALIZE_VOLATILE)
+ {
+ /* The m88110 implements two FIFO queues, one for loads and
+ one for stores. These queues mean that loads complete in
+ their issue order as do stores. An interaction between the
+ history buffer and the store reservation station ensures
+ that a store will not bypass load. Finally, a load will not
+ bypass store, but only when they reference the same address.
+
+ To avoid this reordering (a load bypassing a store) for
+ volatile references, a serializing instruction is output.
+ We choose the fldcr instruction as it does not serialize on
+ the m88100 so that -m88000 code will not be degraded.
+
+ The mechanism below is completed by having CC_STATUS_INIT set
+ the code to the unknown value. */
+
+ /*
+ hassey 6/30/93
+ A problem with 88110 4.1 & 4.2 makes the use of fldcr for
+ this purpose undesirable. Instead we will use tb1, this will
+ cause serialization on the 88100 but such is life.
+ */
+
+ static rtx last_addr = NULL_RTX;
+ if (code == 'V' /* Only need to serialize before a load. */
+ && m88k_volatile_code != 'V' /* Loads complete in FIFO order. */
+ && !(m88k_volatile_code == 'v'
+ && GET_CODE (XEXP (x, 0)) == LO_SUM
+ && rtx_equal_p (XEXP (XEXP (x, 0), 1), last_addr)))
+ asm_fprintf (file,
+#if 0
+#ifdef AS_BUG_FLDCR
+ "fldcr\t %R%s,%Rcr63\n\t",
+#else
+ "fldcr\t %R%s,%Rfcr63\n\t",
+#endif
+ reg_names[0]);
+#else /* 0 */
+ "tb1\t 1,%R%s,0xff\n\t", reg_names[0]);
+#endif /* 0 */
+ m88k_volatile_code = code;
+ last_addr = (GET_CODE (XEXP (x, 0)) == LO_SUM
+ ? XEXP (XEXP (x, 0), 1) : 0);
+ }
+ return;
+
+ case 'X': /* print the upper 16 bits... */
+ value >>= 16;
+ case 'x': /* print the lower 16 bits of the integer constant in hex */
+ if (xc != CONST_INT)
+ output_operand_lossage ("invalid %%x/X value");
+ fprintf (file, "0x%x", value & 0xffff); return;
+
+ case 'H': /* print the low 16 bits of the negated integer constant */
+ if (xc != CONST_INT)
+ output_operand_lossage ("invalid %%H value");
+ value = -value;
+ case 'h': /* print the register or low 16 bits of the integer constant */
+ if (xc == REG)
+ goto reg;
+ if (xc != CONST_INT)
+ output_operand_lossage ("invalid %%h value");
+ fprintf (file, "%d", value & 0xffff);
+ return;
+
+ case 'Q': /* print the low 8 bits of the negated integer constant */
+ if (xc != CONST_INT)
+ output_operand_lossage ("invalid %%Q value");
+ value = -value;
+ case 'q': /* print the register or low 8 bits of the integer constant */
+ if (xc == REG)
+ goto reg;
+ if (xc != CONST_INT)
+ output_operand_lossage ("invalid %%q value");
+ fprintf (file, "%d", value & 0xff);
+ return;
+
+ case 'w': /* print the integer constant (X == 32 ? 0 : 32 - X) */
+ if (xc != CONST_INT)
+ output_operand_lossage ("invalid %%o value");
+ fprintf (file, "%d", value == 32 ? 0 : 32 - value);
+ return;
+
+ case 'p': /* print the logarithm of the integer constant */
+ if (xc != CONST_INT
+ || (value = exact_log2 (value)) < 0)
+ output_operand_lossage ("invalid %%p value");
+ fprintf (file, "%d", value);
+ return;
+
+ case 'S': /* complement the value and then... */
+ value = ~value;
+ case 's': /* print the width and offset values forming the integer
+ constant with a SET instruction. See integer_ok_for_set. */
+ {
+ unsigned mask, uval = value;
+ int top, bottom;
+
+ if (xc != CONST_INT)
+ output_operand_lossage ("invalid %%s/S value");
+ /* All the "one" bits must be contiguous. If so, MASK will be
+ a power of two or zero. */
+ mask = (uval | (uval - 1)) + 1;
+ if (!(uval && POWER_OF_2_or_0 (mask)))
+ output_operand_lossage ("invalid %%s/S value");
+ top = mask ? exact_log2 (mask) : 32;
+ bottom = exact_log2 (uval & ~(uval - 1));
+ fprintf (file,"%d<%d>", top - bottom, bottom);
+ return;
+ }
+
+ case 'P': /* print nothing if pc_rtx; output label_ref */
+ if (xc == LABEL_REF)
+ output_addr_const (file, x);
+ else if (xc != PC)
+ output_operand_lossage ("invalid %%P operand");
+ return;
+
+ case 'L': /* print 0 or 1 if operand is label_ref and then... */
+ fputc (xc == LABEL_REF ? '1' : '0', file);
+ case '.': /* print .n if delay slot is used */
+ fputs ((final_sequence
+ && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0)))
+ ? ".n\t" : "\t", file);
+ return;
+
+ case '!': /* Reverse the following condition. */
+ sequencep++;
+ reversep = 1;
+ return;
+ case 'R': /* reverse the condition of the next print_operand
+ if operand is a label_ref. */
+ sequencep++;
+ reversep = (xc == LABEL_REF);
+ return;
+
+ case 'B': /* bcnd branch values */
+ if (0) /* SVR4 */
+ fputs (m88k_register_prefix, file);
+ switch (xc)
+ {
+ case EQ: fputs ("eq0", file); return;
+ case NE: fputs ("ne0", file); return;
+ case GT: fputs ("gt0", file); return;
+ case LE: fputs ("le0", file); return;
+ case LT: fputs ("lt0", file); return;
+ case GE: fputs ("ge0", file); return;
+ default: output_operand_lossage ("invalid %%B value");
+ }
+
+ case 'C': /* bb0/bb1 branch values for comparisons */
+ if (0) /* SVR4 */
+ fputs (m88k_register_prefix, file);
+ switch (xc)
+ {
+ case EQ: fputs ("eq", file); return;
+ case NE: fputs ("ne", file); return;
+ case GT: fputs ("gt", file); return;
+ case LE: fputs ("le", file); return;
+ case LT: fputs ("lt", file); return;
+ case GE: fputs ("ge", file); return;
+ case GTU: fputs ("hi", file); return;
+ case LEU: fputs ("ls", file); return;
+ case LTU: fputs ("lo", file); return;
+ case GEU: fputs ("hs", file); return;
+ default: output_operand_lossage ("invalid %%C value");
+ }
+
+ case 'D': /* bcnd branch values for float comparisons */
+ switch (xc)
+ {
+ case EQ: fputs ("0xa", file); return;
+ case NE: fputs ("0x5", file); return;
+ case GT:
+ if (0) /* SVR4 */
+ fputs (m88k_register_prefix, file);
+ fputs ("gt0", file);
+ return;
+ case LE: fputs ("0xe", file); return;
+ case LT: fputs ("0x4", file); return;
+ case GE: fputs ("0xb", file); return;
+ default: output_operand_lossage ("invalid %%D value");
+ }
+
+ case 'E': /* bcnd branch values for special integers */
+ switch (xc)
+ {
+ case EQ: fputs ("0x8", file); return;
+ case NE: fputs ("0x7", file); return;
+ default: output_operand_lossage ("invalid %%E value");
+ }
+
+ case 'd': /* second register of a two register pair */
+ if (xc != REG)
+ output_operand_lossage ("`%%d' operand isn't a register");
+ asm_fprintf (file, "%R%s", reg_names[REGNO (x) + 1]);
+ return;
+
+ case 'r': /* an immediate 0 should be represented as `r0' */
+ if (x == const0_rtx)
+ {
+ asm_fprintf (file, "%R%s", reg_names[0]);
+ return;
+ }
+ else if (xc != REG)
+ output_operand_lossage ("invalid %%r value");
+ case 0:
+ name:
+ if (xc == REG)
+ {
+ reg:
+ if (REGNO (x) == ARG_POINTER_REGNUM)
+ output_operand_lossage ("operand is r0");
+ else
+ asm_fprintf (file, "%R%s", reg_names[REGNO (x)]);
+ }
+ else if (xc == PLUS)
+ output_address (x);
+ else if (xc == MEM)
+ output_address (XEXP (x, 0));
+ else if (flag_pic && xc == UNSPEC)
+ {
+ output_addr_const (file, XVECEXP (x, 0, 0));
+ fputs ("#got_rel", file);
+ }
+ else if (xc == CONST_DOUBLE)
+ output_operand_lossage ("operand is const_double");
+ else
+ output_addr_const (file, x);
+ return;
+
+ case 'g': /* append #got_rel as needed */
+ if (flag_pic && (xc == SYMBOL_REF || xc == LABEL_REF))
+ {
+ output_addr_const (file, x);
+ fputs ("#got_rel", file);
+ return;
+ }
+ goto name;
+
+ case 'a': /* (standard), assume operand is an address */
+ case 'c': /* (standard), assume operand is an immediate value */
+ case 'l': /* (standard), assume operand is a label_ref */
+ case 'n': /* (standard), like %c, except negate first */
+ default:
+ output_operand_lossage ("invalid code");
+ }
+}
+
+void
+print_operand_address (FILE *file, rtx addr)
+{
+ rtx reg0, reg1;
+
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ gcc_assert (REGNO (addr) != ARG_POINTER_REGNUM);
+ asm_fprintf (file, "%R%s,%R%s", reg_names[0], reg_names [REGNO (addr)]);
+ break;
+
+ case LO_SUM:
+ asm_fprintf (file, "%R%s,%Rlo16(",
+ reg_names[REGNO (XEXP (addr, 0))]);
+ output_addr_const (file, XEXP (addr, 1));
+ fputc (')', file);
+ break;
+
+ case PLUS:
+ reg0 = XEXP (addr, 0);
+ reg1 = XEXP (addr, 1);
+ if (GET_CODE (reg0) == MULT || GET_CODE (reg0) == CONST_INT)
+ {
+ rtx tmp = reg0;
+ reg0 = reg1;
+ reg1 = tmp;
+ }
+
+ gcc_assert ((!REG_P (reg0) || REGNO (reg0) != ARG_POINTER_REGNUM)
+ && (!REG_P (reg1) || REGNO (reg1) != ARG_POINTER_REGNUM));
+
+ if (REG_P (reg0))
+ {
+ if (REG_P (reg1))
+ asm_fprintf (file, "%R%s,%R%s",
+ reg_names [REGNO (reg0)], reg_names [REGNO (reg1)]);
+
+ else if (GET_CODE (reg1) == CONST_INT)
+ asm_fprintf (file, "%R%s,%d",
+ reg_names [REGNO (reg0)], INTVAL (reg1));
+
+ else if (GET_CODE (reg1) == MULT)
+ {
+ rtx mreg = XEXP (reg1, 0);
+ gcc_assert (REGNO (mreg) != ARG_POINTER_REGNUM);
+
+ asm_fprintf (file, "%R%s[%R%s]", reg_names[REGNO (reg0)],
+ reg_names[REGNO (mreg)]);
+ }
+
+ else if (GET_CODE (reg1) == ZERO_EXTRACT)
+ {
+ asm_fprintf (file, "%R%s,%Rlo16(",
+ reg_names[REGNO (reg0)]);
+ output_addr_const (file, XEXP (reg1, 0));
+ fputc (')', file);
+ }
+
+ else if (flag_pic)
+ {
+ asm_fprintf (file, "%R%s,", reg_names[REGNO (reg0)]);
+ output_addr_const (file, reg1);
+ fputs ("#got_rel", file);
+ }
+ else
+ gcc_unreachable ();
+ }
+
+ else
+ gcc_unreachable ();
+ break;
+
+ case MULT:
+ gcc_assert (REGNO (XEXP (addr, 0)) != ARG_POINTER_REGNUM);
+ asm_fprintf (file, "%R%s[%R%s]",
+ reg_names[0], reg_names[REGNO (XEXP (addr, 0))]);
+ break;
+
+ case CONST_INT:
+ asm_fprintf (file, "%R%s,%d", reg_names[0], INTVAL (addr));
+ break;
+
+ default:
+ asm_fprintf (file, "%R%s,", reg_names[0]);
+ output_addr_const (file, addr);
+ }
+}
+
+/* Return true if X is an address which needs a temporary register when
+ reloaded while generating PIC code. */
+
+bool
+pic_address_needs_scratch (rtx x)
+{
+ /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
+ if (GET_CODE (x) == CONST)
+ {
+ x = XEXP (x, 0);
+ if (GET_CODE (x) == PLUS)
+ {
+ if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && ! ADD_INT (XEXP (x, 1)))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* Adjust the cost of INSN based on the relationship between INSN that
+ is dependent on DEP_INSN through the dependence LINK. The default
+ is to make no adjustment to COST.
+
+ On the m88k, ignore the cost of anti- and output-dependencies. On
+ the m88100, a store can issue two cycles before the value (not the
+ address) has finished computing. */
+
+static int
+m88k_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
+{
+ if (REG_NOTE_KIND (link) != REG_DEP_TRUE)
+ return 0; /* Anti or output dependence. */
+
+ if (TARGET_88110
+ && recog_memoized (insn) >= 0
+ && get_attr_type (insn) == TYPE_STORE
+ && SET_SRC (PATTERN (insn)) == SET_DEST (PATTERN (dep)))
+ return cost - 4; /* 88110 store reservation station. */
+
+ return cost;
+}
+
+/* Return cost of address expression X.
+ Expect that X is properly formed address reference. */
+
+static int
+m88k_address_cost (rtx x)
+{
+ /* REG+REG is made slightly more expensive because it might keep
+ a register live for longer than we might like. */
+ switch (GET_CODE (x))
+ {
+ case REG:
+ case LO_SUM:
+ case MULT:
+ return 1;
+ case HIGH:
+ return 2;
+ case PLUS:
+ return (REG_P (XEXP (x, 0)) && REG_P (XEXP (x, 1))) ? 2 : 1;
+ default:
+ return 4;
+ }
+}
+
+/* Compute the cost of computing a constant rtl expression x
+ whose rtx-code is code. */
+static bool
+m88k_rtx_costs (rtx x, int code, int outer_code, int *total)
+{
+ switch (code)
+ {
+ case CONST_INT:
+ /* We assume that any 16 bit integer can easily be recreated, so we
+ indicate 0 cost, in an attempt to get GCC not to optimize things
+ like comparison against a constant. */
+ if (SMALL_INT (x))
+ *total = 0;
+ else if (SMALL_INTVAL (- INTVAL (x)))
+ *total = 2;
+ else if (classify_integer (SImode, INTVAL (x)) != m88k_oru_or)
+ *total = 4;
+ *total = 7;
+ return true;
+
+ case HIGH:
+ *total = 2;
+ return true;
+
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ if (flag_pic)
+ *total = (flag_pic == 2) ? 11 : 8;
+ else
+ *total = 5;
+ return true;
+
+ /* The cost of CONST_DOUBLE is zero (if it can be placed in an insn,
+ it is as good as a register; since it can't be placed in any insn,
+ it won't do anything in cse, but it will cause expand_binop to
+ pass the constant to the define_expands). */
+ case CONST_DOUBLE:
+ *total = 0;
+ return true;
+
+ case MEM:
+ *total = COSTS_N_INSNS (2);
+ return true;
+
+ case MULT:
+ *total = COSTS_N_INSNS (3);
+ return true;
+
+ case DIV:
+ case UDIV:
+ case MOD:
+ case UMOD:
+ *total = COSTS_N_INSNS (38);
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static bool
+m88k_handle_option (size_t code, const char *arg, int value)
+{
+ switch (code)
+ {
+ case OPT_m88000:
+ /* make the cpu type nonzero; will be reset in m88k_override_options() */
+ target_flags |= MASK_88100 | MASK_88110;
+ return true;
+
+ case OPT_m88100:
+ target_flags &= ~MASK_88110;
+ target_flags |= MASK_88100;
+ return true;
+
+ case OPT_m88110:
+ target_flags &= ~MASK_88100;
+ target_flags |= MASK_88110;
+ return true;
+
+ default:
+ return true;
+ }
+}
+
+void
+m88k_override_options (void)
+{
+ if (!TARGET_88100 && !TARGET_88110)
+ target_flags |= CPU_DEFAULT;
+
+ if (TARGET_88100 && TARGET_88110)
+ target_flags &= ~(MASK_88100 | MASK_88110);
+
+ if (TARGET_88110)
+ {
+ target_flags |= MASK_USE_DIV;
+ target_flags &= ~MASK_CHECK_ZERO_DIV;
+ }
+
+ m88k_cpu = (TARGET_88110 ? PROCESSOR_M88100
+ : (TARGET_88100 ? PROCESSOR_M88100 : PROCESSOR_M88000));
+
+ if (TARGET_TRAP_LARGE_SHIFT && TARGET_HANDLE_LARGE_SHIFT)
+ error ("-mtrap-large-shift and -mhandle-large-shift are incompatible");
+
+ if (TARGET_OMIT_LEAF_FRAME_POINTER) /* keep nonleaf frame pointers */
+ flag_omit_frame_pointer = 1;
+
+ /* On the m88100, it is desirable to align functions to a cache line.
+ The m88110 cache is small, so align to an 8 byte boundary. */
+ if (align_functions == 0)
+ align_functions = TARGET_88100 ? 16 : 8;
+}
diff --git a/gnu/gcc/gcc/config/m88k/m88k.h b/gnu/gcc/gcc/config/m88k/m88k.h
new file mode 100644
index 00000000000..755702ba9d1
--- /dev/null
+++ b/gnu/gcc/gcc/config/m88k/m88k.h
@@ -0,0 +1,1359 @@
+/* Definitions of target machine for GNU compiler for
+ Motorola m88100 in an 88open OCS/BCS environment.
+ Copyright (C) 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+ 2001, 2002 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com).
+ Currently maintained by (gcc@dg-rtp.dg.com)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* The m88100 port of GCC mostly adheres to the various standards from 88open.
+ These documents used to be available by writing to:
+
+ 88open Consortium Ltd.
+ 100 Homeland Court, Suite 800
+ San Jose, CA 95112
+ (408) 436-6600
+
+ In brief, the current standards are:
+
+ Binary Compatibility Standard, Release 1.1A, May 1991
+ This provides for portability of application-level software at the
+ executable level for AT&T System V Release 3.2.
+
+ Object Compatibility Standard, Release 1.1A, May 1991
+ This provides for portability of application-level software at the
+ object file and library level for C, Fortran, and Cobol, and again,
+ largely for SVR3.
+
+ Under development are standards for AT&T System V Release 4, based on the
+ [generic] System V Application Binary Interface from AT&T. These include:
+
+ System V Application Binary Interface, Motorola 88000 Processor Supplement
+ Another document from AT&T for SVR4 specific to the m88100.
+ Available from Prentice Hall.
+
+ System V Application Binary Interface, Motorola 88000 Processor Supplement,
+ Release 1.1, Draft H, May 6, 1991
+ A proposed update to the AT&T document from 88open.
+
+ System V ABI Implementation Guide for the M88000 Processor,
+ Release 1.0, January 1991
+ A companion ABI document from 88open. */
+
+/* External types used. */
+
+/* What instructions are needed to manufacture an integer constant. */
+enum m88k_instruction {
+ m88k_zero,
+ m88k_or,
+ m88k_subu,
+ m88k_or_lo16,
+ m88k_or_lo8,
+ m88k_set,
+ m88k_oru_hi16,
+ m88k_oru_or
+};
+
+/* Which processor to schedule for. The elements of the enumeration
+ must match exactly the cpu attribute in the m88k.md machine description. */
+
+enum processor_type {
+ PROCESSOR_M88100,
+ PROCESSOR_M88110,
+ PROCESSOR_M88000
+};
+
+/* Recast the cpu class to be the cpu attribute. */
+#define m88k_cpu_attr ((enum attr_cpu)m88k_cpu)
+
+/* External variables/functions defined in m88k.c. */
+
+extern char m88k_volatile_code;
+
+extern int m88k_fp_offset;
+extern int m88k_stack_size;
+extern int m88k_case_index;
+
+extern struct rtx_def *m88k_compare_reg;
+extern struct rtx_def *m88k_compare_op0;
+extern struct rtx_def *m88k_compare_op1;
+
+extern enum processor_type m88k_cpu;
+
+/*** Controlling the Compilation Driver, `gcc' ***/
+/* Show we can debug even without a frame pointer. */
+#define CAN_DEBUG_WITHOUT_FP
+
+/* If -m88100 is in effect, add -D__m88100__; similarly for -m88110.
+ Here, the CPU_DEFAULT is assumed to be -m88100. */
+#undef CPP_SPEC
+#define CPP_SPEC "%{!m88000:%{!m88100:%{m88110:-D__m88110__}}} \
+ %{!m88000:%{!m88110:-D__m88100__}}"
+
+/*** Run-time Target Specification ***/
+
+#define VERSION_INFO "m88k"
+#define TARGET_VERSION fprintf (stderr, " (%s)", VERSION_INFO)
+
+#define TARGET_DEFAULT (MASK_CHECK_ZERO_DIV)
+#define CPU_DEFAULT MASK_88100
+
+#define OVERRIDE_OPTIONS m88k_override_options ()
+
+/* Run-time target specifications. */
+#define TARGET_CPU_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__m88k"); \
+ builtin_define ("__m88k__"); \
+ builtin_assert ("cpu=m88k"); \
+ builtin_assert ("machine=m88k"); \
+ if (TARGET_88100) \
+ builtin_define ("__mc88100__"); \
+ else if (TARGET_88110) \
+ builtin_define ("__mc88110__"); \
+ else \
+ builtin_define ("__mc88000__"); \
+ } \
+ while (0)
+
+
+/*** Storage Layout ***/
+
+/* Sizes in bits of the various types. */
+#define SHORT_TYPE_SIZE 16
+#define INT_TYPE_SIZE 32
+#define LONG_TYPE_SIZE 32
+#define LONG_LONG_TYPE_SIZE 64
+#define FLOAT_TYPE_SIZE 32
+#define DOUBLE_TYPE_SIZE 64
+#define LONG_DOUBLE_TYPE_SIZE 64
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields.
+ Somewhat arbitrary. It matches the bit field patterns. */
+#define BITS_BIG_ENDIAN 1
+
+/* Define this if most significant byte of a word is the lowest numbered.
+ That is true on the m88000. */
+#define BYTES_BIG_ENDIAN 1
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered.
+ For the m88000 we can decide arbitrarily since there are no machine
+ instructions for them. */
+#define WORDS_BIG_ENDIAN 1
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD 4
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY 32
+
+/* Largest alignment for stack parameters (if greater than PARM_BOUNDARY). */
+#define MAX_PARM_BOUNDARY 64
+
+/* Boundary (in *bits*) on which stack pointer should be aligned. */
+#define STACK_BOUNDARY 128
+
+/* Allocation boundary (in *bits*) for the code of a function. */
+#define FUNCTION_BOUNDARY 32
+
+/* No data type wants to be aligned rounder than this. */
+#define BIGGEST_ALIGNMENT 64
+
+/* The best alignment to use in cases where we have a choice. */
+#define FASTEST_ALIGNMENT (TARGET_88100 ? 32 : 64)
+
+/* Make strings 4/8 byte aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ ((TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < FASTEST_ALIGNMENT) \
+ ? FASTEST_ALIGNMENT : (ALIGN))
+
+/* Make arrays of chars 4/8 byte aligned for the same reasons. */
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
+ (TREE_CODE (TYPE) == ARRAY_TYPE \
+ && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
+ && (ALIGN) < FASTEST_ALIGNMENT ? FASTEST_ALIGNMENT : (ALIGN))
+
+/* Alignment of field after `int : 0' in a structure.
+ Ignored with PCC_BITFIELD_TYPE_MATTERS. */
+/* #define EMPTY_FIELD_BOUNDARY 8 */
+
+/* Every structure's size must be a multiple of this. */
+#define STRUCTURE_SIZE_BOUNDARY 8
+
+/* Set this nonzero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 1
+
+/* A bit-field declared as `int' forces `int' alignment for the struct. */
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/*** Register Usage ***/
+
+/* No register prefixes by default. Will be overriden if necessary. */
+#undef REGISTER_PREFIX
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers.
+
+ The m88100 has a General Register File (GRF) of 32 32-bit registers.
+ The m88110 adds an Extended Register File (XRF) of 32 80-bit registers. */
+#define FIRST_PSEUDO_REGISTER 64
+#define FIRST_EXTENDED_REGISTER 32
+
+/* General notes on extended registers, their use and misuse.
+
+ Possible good uses:
+
+ spill area instead of memory.
+ -waste if only used once
+
+ floating point calculations
+ -probably a waste unless we have run out of general purpose registers
+
+ freeing up general purpose registers
+ -e.g. may be able to have more loop invariants if floating
+ point is moved into extended registers.
+
+
+ I've noticed wasteful moves into and out of extended registers; e.g. a load
+ into x21, then inside a loop a move into r24, then r24 used as input to
+ an fadd. Why not just load into r24 to begin with? Maybe the new cse.c
+ will address this. This wastes a move, but the load,store and move could
+ have been saved had extended registers been used throughout.
+ E.g. in the code following code, if z and xz are placed in extended
+ registers, there is no need to save preserve registers.
+
+ long c=1,d=1,e=1,f=1,g=1,h=1,i=1,j=1,k;
+
+ double z=0,xz=4.5;
+
+ foo(a,b)
+ long a,b;
+ {
+ while (a < b)
+ {
+ k = b + c + d + e + f + g + h + a + i + j++;
+ z += xz;
+ a++;
+ }
+ printf("k= %d; z=%f;\n", k, z);
+ }
+
+ I've found that it is possible to change the constraints (putting * before
+ the 'r' constraints int the fadd.ddd instruction) and get the entire
+ addition and store to go into extended registers. However, this also
+ forces simple addition and return of floating point arguments to a
+ function into extended registers. Not the correct solution.
+
+ Found the following note in local-alloc.c which may explain why I can't
+ get both registers to be in extended registers since two are allocated in
+ local-alloc and one in global-alloc. Doesn't explain (I don't believe)
+ why an extended register is used instead of just using the preserve
+ register.
+
+ from local-alloc.c:
+ We have provision to exempt registers, even when they are contained
+ within the block, that can be tied to others that are not contained in
+ it.
+ This is so that global_alloc could process them both and tie them then.
+ But this is currently disabled since tying in global_alloc is not
+ yet implemented.
+
+ The explanation of why the preserved register is not used is as follows,
+ I believe. The registers are being allocated in order. Tying is not
+ done so efficiently, so when it comes time to do the first allocation,
+ there are no registers left to use without spilling except extended
+ registers. Then when the next pseudo register needs a hard reg, there
+ are still no registers to be had for free, but this one must be a GRF
+ reg instead of an extended reg, so a preserve register is spilled. Thus
+ the move from extended to GRF is necessitated. I do not believe this can
+ be 'fixed' through the files in config/m88k.
+
+ gcc seems to sometimes make worse use of register allocation -- not
+ counting moves -- whenever extended registers are present. For example in
+ the whetstone, the simple for loop (slightly modified)
+ for(i = 1; i <= n1; i++)
+ {
+ x1 = (x1 + x2 + x3 - x4) * t;
+ x2 = (x1 + x2 - x3 + x4) * t;
+ x3 = (x1 - x2 + x3 + x4) * t;
+ x4 = (x1 + x2 + x3 + x4) * t;
+ }
+ in general loads the high bits of the addresses of x2-x4 and i into
+ registers outside the loop. Whenever extended registers are used, it loads
+ all of these inside the loop. My conjecture is that since the 88110 has so
+ many registers, and gcc makes no distinction at this point -- just that
+ they are not fixed, that in loop.c it believes it can expect a number of
+ registers to be available. Then it allocates 'too many' in local-alloc
+ which causes problems later. 'Too many' are allocated because a large
+ portion of the registers are extended registers and cannot be used for
+ certain purposes ( e.g. hold the address of a variable). When this loop is
+ compiled on its own, the problem does not occur. I don't know the solution
+ yet, though it is probably in the base sources. Possibly a different way
+ to calculate "threshold". */
+
+/* 1 for registers that have pervasive standard uses and are not available
+ for the register allocator. Registers r14-r25 and x22-x29 are expected
+ to be preserved across function calls.
+
+ On the 88000, the standard uses of the General Register File (GRF) are:
+ Reg 0 = Pseudo argument pointer (hardware fixed to 0).
+ Reg 1 = Subroutine return pointer (hardware).
+ Reg 2-9 = Parameter registers (OCS).
+ Reg 10 = OCS reserved temporary.
+ Reg 11 = Static link if needed [OCS reserved temporary].
+ Reg 12 = Address of structure return (OCS).
+ Reg 13 = OCS reserved temporary.
+ Reg 14-25 = Preserved register set.
+ Reg 26-29 = Reserved by OCS and ABI.
+ Reg 30 = Frame pointer (Common use).
+ Reg 31 = Stack pointer.
+
+ The following follows the current 88open UCS specification for the
+ Extended Register File (XRF):
+ Reg 32 = x0 Always equal to zero
+ Reg 33-53 = x1-x21 Temporary registers (Caller Save)
+ Reg 54-61 = x22-x29 Preserver registers (Callee Save)
+ Reg 62-63 = x30-x31 Reserved for future ABI use.
+
+ Note: The current 88110 extended register mapping is subject to change.
+ The bias towards caller-save registers is based on the
+ presumption that memory traffic can potentially be reduced by
+ allowing the "caller" to save only that part of the register
+ which is actually being used. (i.e. don't do a st.x if a st.d
+ is sufficient). Also, in scientific code (a.k.a. Fortran), the
+ large number of variables defined in common blocks may require
+ that almost all registers be saved across calls anyway. */
+
+#define FIXED_REGISTERS \
+ {1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, \
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like. */
+
+#define CALL_USED_REGISTERS \
+ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1}
+
+/* Macro to conditionally modify fixed_regs/call_used_regs. */
+#define CONDITIONAL_REGISTER_USAGE \
+ { \
+ if (! TARGET_88110) \
+ { \
+ register int i; \
+ for (i = FIRST_EXTENDED_REGISTER; i < FIRST_PSEUDO_REGISTER; \
+ i++) \
+ { \
+ fixed_regs[i] = 1; \
+ call_used_regs[i] = 1; \
+ } \
+ } \
+ if (flag_pic) \
+ { \
+ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
+ call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
+ } \
+ }
+
+/* True if register is an extended register. */
+#define XRF_REGNO_P(N) \
+ ((N) < FIRST_PSEUDO_REGISTER && (N) >= FIRST_EXTENDED_REGISTER)
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+
+ On the m88000, GRF registers hold 32-bits and XRF registers hold 80-bits.
+ An XRF register can hold any mode, but two GRF registers are required
+ for larger modes. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ (XRF_REGNO_P (REGNO) \
+ ? 1 : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+
+ For double integers, we never put the value into an odd register so that
+ the operators don't run into the situation where the high part of one of
+ the inputs is the low part of the result register. (It's ok if the output
+ registers are the same as the input registers.) The XRF registers can
+ hold all modes, but only DF and SF modes can be manipulated in these
+ registers. The compiler should be allowed to use these as a fast spill
+ area. */
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ (XRF_REGNO_P (REGNO) \
+ ? (TARGET_88110 && GET_MODE_CLASS (MODE) == MODE_FLOAT) \
+ : (((MODE) != DImode && (MODE) != DFmode && (MODE) != DCmode) \
+ || ((REGNO) & 1) == 0))
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ (((MODE1) == DFmode || (MODE1) == DCmode || (MODE1) == DImode \
+ || (TARGET_88110 && GET_MODE_CLASS (MODE1) == MODE_FLOAT)) \
+ == ((MODE2) == DFmode || (MODE2) == DCmode || (MODE2) == DImode \
+ || (TARGET_88110 && GET_MODE_CLASS (MODE2) == MODE_FLOAT)))
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* the m88000 pc isn't overloaded on a register that the compiler knows about. */
+/* #define PC_REGNUM */
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 31
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 30
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 0
+
+/* Register used in cases where a temporary is known to be safe to use. */
+#define TEMP_REGNUM 10
+
+/* Register in which static-chain is passed to a function. */
+#define STATIC_CHAIN_REGNUM 11
+
+/* Register in which address to store a structure value
+ is passed to a function. */
+#define M88K_STRUCT_VALUE_REGNUM 12
+
+/* Register to hold the addressing base for position independent
+ code access to data items. */
+#define PIC_OFFSET_TABLE_REGNUM (flag_pic ? 25 : INVALID_REGNUM)
+
+/* Order in which registers are preferred (most to least). Use temp
+ registers, then param registers top down. Preserve registers are
+ top down to maximize use of double memory ops for register save.
+ The 88open reserved registers (r26-r29 and x30-x31) may commonly be used
+ in most environments with the -fcall-used- or -fcall-saved- options. */
+#define REG_ALLOC_ORDER \
+ { \
+ 13, 12, 11, 10, 29, 28, 27, 26, \
+ 62, 63, 9, 8, 7, 6, 5, 4, \
+ 3, 2, 1, 53, 52, 51, 50, 49, \
+ 48, 47, 46, 45, 44, 43, 42, 41, \
+ 40, 39, 38, 37, 36, 35, 34, 33, \
+ 25, 24, 23, 22, 21, 20, 19, 18, \
+ 17, 16, 15, 14, 61, 60, 59, 58, \
+ 57, 56, 55, 54, 30, 31, 0, 32}
+
+/* Order for leaf functions. */
+#define REG_LEAF_ALLOC_ORDER \
+ { \
+ 9, 8, 7, 6, 13, 12, 11, 10, \
+ 29, 28, 27, 26, 62, 63, 5, 4, \
+ 3, 2, 0, 53, 52, 51, 50, 49, \
+ 48, 47, 46, 45, 44, 43, 42, 41, \
+ 40, 39, 38, 37, 36, 35, 34, 33, \
+ 25, 24, 23, 22, 21, 20, 19, 18, \
+ 17, 16, 15, 14, 61, 60, 59, 58, \
+ 57, 56, 55, 54, 30, 31, 1, 32}
+
+/* Switch between the leaf and non-leaf orderings. The purpose is to avoid
+ write-over scoreboard delays between caller and callee. */
+#define ORDER_REGS_FOR_LOCAL_ALLOC \
+{ \
+ static const int leaf[] = REG_LEAF_ALLOC_ORDER; \
+ static const int nonleaf[] = REG_ALLOC_ORDER; \
+ \
+ memcpy (reg_alloc_order, regs_ever_live[1] ? nonleaf : leaf, \
+ FIRST_PSEUDO_REGISTER * sizeof (int)); \
+}
+
+/*** Register Classes ***/
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union. */
+
+/* The m88000 hardware has two kinds of registers. In addition, we denote
+ the arg pointer as a separate class. */
+
+enum reg_class { NO_REGS, AP_REG, XRF_REGS, GENERAL_REGS, AGRF_REGS,
+ XGRF_REGS, ALL_REGS, LIM_REG_CLASSES };
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+#define REG_CLASS_NAMES \
+ { "NO_REGS", "AP_REG", "XRF_REGS", "GENERAL_REGS", "AGRF_REGS", \
+ "XGRF_REGS", "ALL_REGS" }
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+#define REG_CLASS_CONTENTS \
+ { { 0x00000000, 0x00000000 }, \
+ { 0x00000001, 0x00000000 }, \
+ { 0x00000000, 0xffffffff }, \
+ { 0xfffffffe, 0x00000000 }, \
+ { 0xffffffff, 0x00000000 }, \
+ { 0xfffffffe, 0xffffffff }, \
+ { 0xffffffff, 0xffffffff } }
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+#define REGNO_REG_CLASS(REGNO) \
+ ((REGNO) ? ((REGNO) < 32 ? GENERAL_REGS : XRF_REGS) : AP_REG)
+
+/* The class value for index registers, and the one for base regs. */
+#define BASE_REG_CLASS AGRF_REGS
+#define INDEX_REG_CLASS GENERAL_REGS
+
+/* Macros to check register numbers against specific register classes.
+ These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c. */
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < FIRST_EXTENDED_REGISTER \
+ || (unsigned) reg_renumber[REGNO] < FIRST_EXTENDED_REGISTER)
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ (((REGNO) && (REGNO) < FIRST_EXTENDED_REGISTER) \
+ || (reg_renumber[REGNO] \
+ && (unsigned) reg_renumber[REGNO] < FIRST_EXTENDED_REGISTER))
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class.
+ Double constants should be in a register iff they can be made cheaply. */
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ (CONSTANT_P (X) && ((CLASS) == XRF_REGS) ? NO_REGS : (CLASS))
+
+/* Return the register class of a scratch register needed to load IN
+ into a register of class CLASS in MODE. On the m88k, when PIC, we
+ need a temporary when loading some addresses into a register. */
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, IN) \
+ ((flag_pic \
+ && GET_CODE (IN) == CONST \
+ && GET_CODE (XEXP (IN, 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (IN, 0), 0)) == CONST_INT \
+ && ! SMALL_INT (XEXP (XEXP (IN, 0), 1))) ? GENERAL_REGS : NO_REGS)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS. */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ ((((CLASS) == XRF_REGS) ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
+
+/* Quick tests for certain values. */
+#define SMALL_INT(X) (SMALL_INTVAL (INTVAL (X)))
+#define SMALL_INTVAL(I) ((unsigned HOST_WIDE_INT) (I) < 0x10000)
+#define ADD_INT(X) (ADD_INTVAL (INTVAL (X)))
+#define ADD_INTVAL(I) ((unsigned HOST_WIDE_INT) (I) + 0xffff < 0x1ffff)
+#define POWER_OF_2(I) ((I) && POWER_OF_2_or_0(I))
+#define POWER_OF_2_or_0(I) (((I) & ((unsigned HOST_WIDE_INT)(I) - 1)) == 0)
+
+/*** Describing Stack Layout ***/
+
+/* Define this if pushing a word on the stack moves the stack pointer
+ to a smaller address. */
+#define STACK_GROWS_DOWNWARD
+
+/* Define this if the addresses of local variable slots are at negative
+ offsets from the frame pointer. */
+/* #define FRAME_GROWS_DOWNWARD */
+
+/* Offset from the frame pointer to the first local variable slot to be
+ allocated. For the m88k, the debugger wants the return address (r1)
+ stored at location r30+4, and the previous frame pointer stored at
+ location r30. */
+#define STARTING_FRAME_OFFSET 8
+
+/* If we generate an insn to push BYTES bytes, this says how many the
+ stack pointer really advances by. The m88k has no push instruction. */
+/* #define PUSH_ROUNDING(BYTES) */
+
+/* If defined, the maximum amount of space required for outgoing arguments
+ will be computed and placed into the variable
+ `current_function_outgoing_args_size'. No space will be pushed
+ onto the stack for each call; instead, the function prologue should
+ increase the stack frame size by this amount. */
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+/* Offset from the stack pointer register to the first location at which
+ outgoing arguments are placed. Use the default value zero. */
+/* #define STACK_POINTER_OFFSET 0 */
+
+/* Offset of first parameter from the argument pointer register value.
+ Using an argument pointer, this is 0 for the m88k. GCC knows
+ how to eliminate the argument pointer references if necessary. */
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Define this if functions should assume that stack space has been
+ allocated for arguments even when their values are passed in
+ registers.
+
+ The value of this macro is the size, in bytes, of the area reserved for
+ arguments passed in registers.
+
+ This space can either be allocated by the caller or be a part of the
+ machine-dependent stack frame: `OUTGOING_REG_PARM_STACK_SPACE'
+ says which. */
+/* #undef REG_PARM_STACK_SPACE(FNDECL) */
+
+/* Define this macro if REG_PARM_STACK_SPACE is defined but stack
+ parameters don't skip the area specified by REG_PARM_STACK_SPACE.
+ Normally, when a parameter is not passed in registers, it is placed on
+ the stack beyond the REG_PARM_STACK_SPACE area. Defining this macro
+ suppresses this behavior and causes the parameter to be passed on the
+ stack in its natural location. */
+/* #undef STACK_PARMS_IN_REG_PARM_AREA */
+
+/* Define this if it is the responsibility of the caller to allocate the
+ area reserved for arguments passed in registers. If
+ `ACCUMULATE_OUTGOING_ARGS' is also defined, the only effect of this
+ macro is to determine whether the space is included in
+ `current_function_outgoing_args_size'. */
+/* #define OUTGOING_REG_PARM_STACK_SPACE */
+
+/* Offset from the stack pointer register to an item dynamically allocated
+ on the stack, e.g., by `alloca'.
+
+ The default value for this macro is `STACK_POINTER_OFFSET' plus the
+ length of the outgoing arguments. The default is correct for most
+ machines. See `function.c' for details. */
+/* #define STACK_DYNAMIC_OFFSET(FUNDECL) ... */
+
+/* Value is the number of bytes of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack. */
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ gen_rtx_REG (TYPE_MODE (VALTYPE) \
+ == BLKmode ? SImode : TYPE_MODE (VALTYPE), 2)
+
+/* Define this if it differs from FUNCTION_VALUE. */
+/* #define FUNCTION_OUTGOING_VALUE(VALTYPE, FUNC) ... */
+
+/* Don't default to pcc-struct-return, because we have already specified
+ exactly how to return structures in the RETURN_IN_MEMORY macro. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+#define LIBCALL_VALUE(MODE) gen_rtx_REG (MODE, 2)
+
+/* True if N is a possible register number for a function value
+ as seen by the caller. */
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == 2)
+
+/* Determine whether a function argument is passed in a register, and
+ which register. See m88k.c. */
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ m88k_function_arg (CUM, MODE, TYPE, NAMED)
+
+/* Define this if it differs from FUNCTION_ARG. */
+/* #define FUNCTION_INCOMING_ARG(CUM, MODE, TYPE, NAMED) ... */
+
+/* A C type for declaring a variable that is used as the first argument
+ of `FUNCTION_ARG' and other related values. It suffices to count
+ the number of words of argument so far. */
+#define CUMULATIVE_ARGS int
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS for a call to a
+ function whose data type is FNTYPE. For a library call, FNTYPE is 0. */
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
+ ((CUM) = 0)
+
+/* Update the summarizer variable to advance past an argument in an
+ argument list. See m88k.c. */
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ m88k_function_arg_advance (& (CUM), MODE, TYPE, NAMED)
+
+/* True if N is a possible register number for function argument passing.
+ On the m88000, these are registers 2 through 9. */
+#define FUNCTION_ARG_REGNO_P(N) ((N) <= 9 && (N) >= 2)
+
+/* A C expression which determines whether, and in which direction,
+ to pad out an argument with extra space. The value should be of
+ type `enum direction': either `upward' to pad above the argument,
+ `downward' to pad below, or `none' to inhibit padding.
+
+ This macro does not control the *amount* of padding; that is always
+ just enough to reach the next multiple of `FUNCTION_ARG_BOUNDARY'. */
+#define FUNCTION_ARG_PADDING(MODE, TYPE) \
+ ((MODE) == BLKmode \
+ || ((TYPE) && (TREE_CODE (TYPE) == RECORD_TYPE \
+ || TREE_CODE (TYPE) == UNION_TYPE)) \
+ ? upward : GET_MODE_BITSIZE (MODE) < PARM_BOUNDARY ? downward : none)
+
+/* If defined, a C expression that gives the alignment boundary, in bits,
+ of an argument with the specified mode and type. If it is not defined,
+ `PARM_BOUNDARY' is used for all arguments. */
+#define FUNCTION_ARG_BOUNDARY(MODE, TYPE) \
+ (((TYPE) ? TYPE_ALIGN (TYPE) : GET_MODE_BITSIZE (MODE)) \
+ <= PARM_BOUNDARY ? PARM_BOUNDARY : 2 * PARM_BOUNDARY)
+
+/* Implement `va_start' for varargs and stdarg. */
+#define EXPAND_BUILTIN_VA_START(valist, nextarg) \
+ m88k_va_start (valist, nextarg)
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ output_function_profiler (FILE, LABELNO, "mcount")
+
+/* Maximum length in instructions of the code output by FUNCTION_PROFILER. */
+#define FUNCTION_PROFILER_LENGTH (5+3+1+5)
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero. */
+#define EXIT_IGNORE_STACK (1)
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms
+ may be accessed via the stack pointer) in functions that seem suitable.
+ This is computed in `reload', in reload1.c. */
+#define FRAME_POINTER_REQUIRED \
+((current_function_profile || !leaf_function_p () \
+ || !TARGET_OMIT_LEAF_FRAME_POINTER) \
+ || (write_symbols != NO_DEBUG))
+
+/* Define registers used by the epilogue and return instruction. */
+#define EPILOGUE_USES(REGNO) \
+(reload_completed && ((REGNO) == 1 \
+ || (current_function_profile \
+ && (REGNO) == FRAME_POINTER_REGNUM)))
+
+/* Before the prologue, RA is in r1. */
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, 1)
+#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (1)
+
+/* Definitions for register eliminations.
+
+ We have two registers that can be eliminated on the m88k. First, the
+ frame pointer register can often be eliminated in favor of the stack
+ pointer register. Secondly, the argument pointer register can always be
+ eliminated; it is replaced with either the stack or frame pointer. */
+
+/* This is an array of structures. Each structure initializes one pair
+ of eliminable registers. The "from" register number is given first,
+ followed by "to". Eliminations of the same "from" register are listed
+ in order of preference. */
+#define ELIMINABLE_REGS \
+{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
+
+/* Given FROM and TO register numbers, say whether this elimination
+ is allowed. */
+#define CAN_ELIMINATE(FROM, TO) \
+ (!((FROM) == FRAME_POINTER_REGNUM && FRAME_POINTER_REQUIRED))
+
+/* Define the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+{ m88k_layout_frame (); \
+ if ((FROM) == FRAME_POINTER_REGNUM && (TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) = m88k_fp_offset; \
+ else if ((FROM) == ARG_POINTER_REGNUM && (TO) == FRAME_POINTER_REGNUM)\
+ (OFFSET) = m88k_stack_size - m88k_fp_offset; \
+ else if ((FROM) == ARG_POINTER_REGNUM && (TO) == STACK_POINTER_REGNUM)\
+ (OFFSET) = m88k_stack_size; \
+ else \
+ gcc_unreachable (); \
+}
+
+/*** Trampolines for Nested Functions ***/
+
+#ifndef FINALIZE_TRAMPOLINE
+#define FINALIZE_TRAMPOLINE(TRAMP)
+#endif
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts.
+
+ This block is placed on the stack and filled in. It is aligned
+ 0 mod 128 and those portions that are executed are constant.
+ This should work for instruction caches that have cache lines up
+ to the aligned amount (128 is arbitrary), provided no other code
+ producer is attempting to play the same game. This of course is
+ in violation of any number of 88open standards. */
+
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ char buf[256]; \
+ static int labelno = 0; \
+ labelno++; \
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LTRMP", labelno); \
+ /* Save the return address (r1) in the static chain reg (r11). */ \
+ asm_fprintf (FILE, "\tor\t %R%s,%R%s,0\n", \
+ reg_names[11], reg_names[1]); \
+ /* Locate this block; transfer to the next instruction. */ \
+ fprintf (FILE, "\tbsr\t %s\n", &buf[1]); \
+ assemble_name (FILE, buf); \
+ fputs (":", FILE); \
+ /* Save r10; use it as the relative pointer; restore r1. */ \
+ asm_fprintf (FILE, "\tst\t %R%s,%R%s,24\n", \
+ reg_names[10], reg_names[1]); \
+ asm_fprintf (FILE, "\tor\t %R%s,%R%s,0\n", \
+ reg_names[10], reg_names[1]); \
+ asm_fprintf (FILE, "\tor\t %R%s,%R%s,0\n", \
+ reg_names[1], reg_names[11]); \
+ /* Load the function's address and go there. */ \
+ asm_fprintf (FILE, "\tld\t %R%s,%R%s,32\n", \
+ reg_names[11], reg_names[10]); \
+ asm_fprintf (FILE, "\tjmp.n\t %R%s\n", reg_names[11]); \
+ /* Restore r10 and load the static chain register. */ \
+ asm_fprintf (FILE, "\tld.d\t %R%s,%R%s,24\n", \
+ reg_names[10], reg_names[10]); \
+ /* Storage: r10 save area, static chain, function address. */ \
+ assemble_aligned_integer (UNITS_PER_WORD, const0_rtx); \
+ assemble_aligned_integer (UNITS_PER_WORD, const0_rtx); \
+ assemble_aligned_integer (UNITS_PER_WORD, const0_rtx); \
+}
+
+/* Length in units of the trampoline for entering a nested function.
+ This is really two components. The first 32 bytes are fixed and
+ must be copied; the last 12 bytes are just storage that's filled
+ in later. So for allocation purposes, it's 32+12 bytes, but for
+ initialization purposes, it's 32 bytes. */
+
+#define TRAMPOLINE_SIZE (32+12)
+
+/* Alignment required for a trampoline. 128 is used to find the
+ beginning of a line in the instruction cache and to allow for
+ instruction cache lines of up to 128 bytes. */
+
+#define TRAMPOLINE_ALIGNMENT 128
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+{ \
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (TRAMP, 40)), \
+ FNADDR); \
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (TRAMP, 36)), \
+ CXT); \
+ FINALIZE_TRAMPOLINE (TRAMP); \
+}
+
+/*** Addressing Modes ***/
+
+#define SELECT_CC_MODE(OP,X,Y) CCmode
+
+/* #define HAVE_POST_INCREMENT 0 */
+/* #define HAVE_POST_DECREMENT 0 */
+
+/* #define HAVE_PRE_DECREMENT 0 */
+/* #define HAVE_PRE_INCREMENT 0 */
+
+/* Recognize any constant value that is a valid address.
+ When PIC, we do not accept an address that would require a scratch reg
+ to load into a register. */
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == LABEL_REF || GET_CODE (X) == SYMBOL_REF \
+ || GET_CODE (X) == CONST_INT || GET_CODE (X) == HIGH \
+ || (GET_CODE (X) == CONST \
+ && ! (flag_pic && pic_address_needs_scratch (X))))
+
+
+/* Maximum number of registers that can appear in a valid memory address. */
+#define MAX_REGS_PER_ADDRESS 2
+
+/* The condition for memory shift insns. */
+#define SCALED_ADDRESS_P(ADDR) \
+ (GET_CODE (ADDR) == PLUS \
+ && (GET_CODE (XEXP (ADDR, 0)) == MULT \
+ || GET_CODE (XEXP (ADDR, 1)) == MULT))
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ On the m88000, a legitimate address has the form REG, REG+REG,
+ REG+SMALLINT, REG+(REG*modesize) (REG[REG]), or SMALLINT.
+
+ The register elimination process should deal with the argument
+ pointer and frame pointer changing to REG+SMALLINT. */
+
+#define LEGITIMATE_INDEX_P(X, MODE) \
+ ((GET_CODE (X) == CONST_INT \
+ && SMALL_INT (X)) \
+ || (REG_P (X) \
+ && REG_OK_FOR_INDEX_P (X)) \
+ || (GET_CODE (X) == MULT \
+ && REG_P (XEXP (X, 0)) \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 0)) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && INTVAL (XEXP (X, 1)) == GET_MODE_SIZE (MODE)))
+
+#define RTX_OK_FOR_BASE_P(X) \
+ ((GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X)) \
+ || (GET_CODE (X) == SUBREG \
+ && GET_CODE (SUBREG_REG (X)) == REG \
+ && REG_OK_FOR_BASE_P (SUBREG_REG (X))))
+
+#define RTX_OK_FOR_INDEX_P(X) \
+ ((GET_CODE (X) == REG && REG_OK_FOR_INDEX_P (X)) \
+ || (GET_CODE (X) == SUBREG \
+ && GET_CODE (SUBREG_REG (X)) == REG \
+ && REG_OK_FOR_INDEX_P (SUBREG_REG (X))))
+
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
+{ \
+ if (REG_P (X)) \
+ { \
+ if (REG_OK_FOR_BASE_P (X)) \
+ goto ADDR; \
+ } \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ register rtx _x0 = XEXP (X, 0); \
+ register rtx _x1 = XEXP (X, 1); \
+ if ((flag_pic \
+ && _x0 == pic_offset_table_rtx \
+ && (flag_pic == 2 \
+ ? RTX_OK_FOR_BASE_P (_x1) \
+ : (GET_CODE (_x1) == SYMBOL_REF \
+ || GET_CODE (_x1) == LABEL_REF))) \
+ || (RTX_OK_FOR_BASE_P (_x0) \
+ && LEGITIMATE_INDEX_P (_x1, MODE)) \
+ || (RTX_OK_FOR_BASE_P (_x1) \
+ && LEGITIMATE_INDEX_P (_x0, MODE))) \
+ goto ADDR; \
+ } \
+ else if (GET_CODE (X) == LO_SUM) \
+ { \
+ register rtx _x0 = XEXP (X, 0); \
+ register rtx _x1 = XEXP (X, 1); \
+ if (RTX_OK_FOR_BASE_P (_x0) \
+ && CONSTANT_P (_x1)) \
+ goto ADDR; \
+ } \
+ else if (GET_CODE (X) == CONST_INT \
+ && SMALL_INT (X)) \
+ goto ADDR; \
+}
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used.
+
+ Most source files want to accept pseudo regs in the hope that
+ they will get allocated to the class that the insn wants them to be in.
+ Source files for reload pass need to be strict.
+ After reload, it makes no difference, since pseudo regs have
+ been eliminated by then. */
+
+#ifndef REG_OK_STRICT
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. Not the argument pointer. */
+#define REG_OK_FOR_INDEX_P(X) \
+ (!XRF_REGNO_P(REGNO (X)))
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_BASE_P(X) (REG_OK_FOR_INDEX_P (X))
+
+#else
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+#endif
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output. */
+
+/* On the m88000, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
+
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN) \
+{ \
+ if (GET_CODE (X) == PLUS && CONSTANT_ADDRESS_P (XEXP (X, 1))) \
+ (X) = gen_rtx_PLUS (SImode, XEXP (X, 0), \
+ copy_to_mode_reg (SImode, XEXP (X, 1))); \
+ if (GET_CODE (X) == PLUS && CONSTANT_ADDRESS_P (XEXP (X, 0))) \
+ (X) = gen_rtx_PLUS (SImode, XEXP (X, 1), \
+ copy_to_mode_reg (SImode, XEXP (X, 0))); \
+ if (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) == MULT) \
+ (X) = gen_rtx_PLUS (SImode, XEXP (X, 1), \
+ force_operand (XEXP (X, 0), 0)); \
+ if (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == MULT) \
+ (X) = gen_rtx_PLUS (SImode, XEXP (X, 0), \
+ force_operand (XEXP (X, 1), 0)); \
+ if (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) == PLUS) \
+ (X) = gen_rtx_PLUS (Pmode, force_operand (XEXP (X, 0), NULL_RTX), \
+ XEXP (X, 1)); \
+ if (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == PLUS) \
+ (X) = gen_rtx_PLUS (Pmode, XEXP (X, 0), \
+ force_operand (XEXP (X, 1), NULL_RTX)); \
+ if (GET_CODE (X) == SYMBOL_REF || GET_CODE (X) == CONST \
+ || GET_CODE (X) == LABEL_REF) \
+ (X) = legitimize_address (flag_pic, X, 0, 0); \
+ if (memory_address_p (MODE, X)) \
+ goto WIN; \
+}
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for.
+ On the m88000 this is never true. */
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL)
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
+#define LEGITIMATE_CONSTANT_P(X) (1)
+
+/* Define this, so that when PIC, reload won't try to reload invalid
+ addresses which require two reload registers. */
+
+#define LEGITIMATE_PIC_OPERAND_P(X) (! pic_address_needs_scratch (X))
+
+
+/*** Condition Code Information ***/
+
+/* When using a register to hold the condition codes, the cc_status
+ mechanism cannot be used. */
+#define NOTICE_UPDATE_CC(EXP, INSN) (0)
+
+/*** Miscellaneous Parameters ***/
+
+/* The case table contains either words or branch instructions. This says
+ which. We always claim that the vector is PC-relative. It is position
+ independent when -fpic is used. */
+#define CASE_VECTOR_INSNS (TARGET_88100 || flag_pic)
+
+/* An alias for a machine mode name. This is the machine mode that
+ elements of a jump-table should have. */
+#define CASE_VECTOR_MODE SImode
+
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+ Do not define this if the table should contain absolute addresses. */
+#define CASE_VECTOR_PC_RELATIVE 1
+
+/* Define this if control falls through a `case' insn when the index
+ value is out of range. This means the specified default-label is
+ actually ignored by the `case' insn proper. */
+/* #define CASE_DROPS_THROUGH */
+
+/* Define this to be the smallest number of different values for which it
+ is best to use a jump-table instead of a tree of conditional branches.
+ The default is 4 for machines with a casesi instruction and 5 otherwise.
+ The best 88110 number is around 7, though the exact number isn't yet
+ known. A third alternative for the 88110 is to use a binary tree of
+ bb1 instructions on bits 2/1/0 if the range is dense. This may not
+ win very much though. */
+#define CASE_VALUES_THRESHOLD (TARGET_88100 ? 4 : 7)
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+#define DEFAULT_SIGNED_CHAR 1
+
+/* The 88open ABI says size_t is unsigned int. */
+#define SIZE_TYPE "unsigned int"
+
+/* Handle #pragma pack and sometimes #pragma weak. */
+#define HANDLE_SYSV_PRAGMA 1
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 8
+
+/* Define if normal loads of shorter-than-word items from memory clears
+ the rest of the bigs in the register. */
+#define BYTE_LOADS_ZERO_EXTEND
+
+/* Zero if access to memory by bytes is faster. */
+#define SLOW_BYTE_ACCESS 1
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* Define this if addresses of constant functions
+ shouldn't be put through pseudo regs where they can be cse'd.
+ Desirable on machines where ordinary constants are expensive
+ but a CALL with constant address is cheap. */
+#define NO_FUNCTION_CSE
+
+/* We assume that the store-condition-codes instructions store 0 for false
+ and some other value for true. This is the value stored for true. */
+#define STORE_FLAG_VALUE (-1)
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+#define Pmode SImode
+
+/* A function address in a call instruction
+ is a word address (for indexing purposes)
+ so give the MEM rtx word mode. */
+#define FUNCTION_MODE SImode
+
+/* A barrier will be aligned so account for the possible expansion.
+ A volatile load may be preceded by a serializing instruction.
+ Account for profiling code output at NOTE_INSN_PROLOGUE_END.
+ Account for block profiling code at basic block boundaries. */
+#define ADJUST_INSN_LENGTH(RTX, LENGTH) \
+ if (GET_CODE (RTX) == BARRIER \
+ || (TARGET_SERIALIZE_VOLATILE \
+ && GET_CODE (RTX) == INSN \
+ && GET_CODE (PATTERN (RTX)) == SET \
+ && ((GET_CODE (SET_SRC (PATTERN (RTX))) == MEM \
+ && MEM_VOLATILE_P (SET_SRC (PATTERN (RTX))))))) \
+ (LENGTH) += 1; \
+ else if (GET_CODE (RTX) == NOTE \
+ && NOTE_LINE_NUMBER (RTX) == NOTE_INSN_PROLOGUE_END) \
+ { \
+ if (current_function_profile) \
+ (LENGTH) += FUNCTION_PROFILER_LENGTH; \
+ } \
+
+/* Track the state of the last volatile memory reference. Clear the
+ state with CC_STATUS_INIT for now. */
+#define CC_STATUS_INIT \
+ do { \
+ m88k_volatile_code = '\0'; \
+ } while (0)
+
+/* A C expressions returning the cost of moving data of MODE from a register
+ to or from memory. This is more costly than between registers. */
+#define MEMORY_MOVE_COST(MODE,CLASS,IN) 4
+
+/* Provide the cost of a branch. Exact meaning under development. */
+#define BRANCH_COST (TARGET_88100 ? 1 : 2)
+
+/* Do not break .stabs pseudos into continuations. */
+#define DBX_CONTIN_LENGTH 0
+
+/*** Output of Assembler Code ***/
+
+/* Control the assembler format that we output. */
+
+/* A C string constant describing how to begin a comment in the target
+ assembler language. The compiler assumes that the comment will end at
+ the end of the line. */
+#define ASM_COMMENT_START ";"
+
+#define ASM_OUTPUT_SOURCE_FILENAME(FILE, NAME) \
+ do { \
+ fputs (FILE_ASM_OP, FILE); \
+ output_quoted_string (FILE, NAME); \
+ putc ('\n', FILE); \
+ } while (0)
+
+/* Output to assembler file text saying following lines
+ may contain character constants, extra white space, comments, etc. */
+#define ASM_APP_ON ""
+
+/* Output to assembler file text saying following lines
+ no longer contain unusual constructs. */
+#define ASM_APP_OFF ""
+
+/* Format the assembly opcode so that the arguments are all aligned.
+ The maximum instruction size is 8 characters (fxxx.xxx), so a tab and a
+ space will do to align the output. Abandon the output if a `%' is
+ encountered. */
+#define ASM_OUTPUT_OPCODE(STREAM, PTR) \
+ { \
+ int ch; \
+ const char *orig_ptr; \
+ \
+ for (orig_ptr = (PTR); \
+ (ch = *(PTR)) && ch != ' ' && ch != '\t' && ch != '\n' && ch != '%'; \
+ (PTR)++) \
+ putc (ch, STREAM); \
+ \
+ if (ch == ' ' && orig_ptr != (PTR) && (PTR) - orig_ptr < 8) \
+ putc ('\t', STREAM); \
+ }
+
+/* How to refer to registers in assembler output.
+ This sequence is indexed by compiler's hard-register-number. */
+
+#define REGISTER_NAMES \
+ { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", \
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", \
+ "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", \
+ "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", \
+ "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", \
+ "x24", "x25", "x26", "x27", "x28", "x29", "x30", "x31" }
+
+/* Define additional names for use in asm clobbers and asm declarations.
+
+ We define the fake Condition Code register as an alias for reg 0 (which
+ is our `condition code' register), so that condition codes can easily
+ be clobbered by an asm. The carry bit in the PSR is now used. */
+
+#define ADDITIONAL_REGISTER_NAMES {{"psr", 0}, {"cc", 0}}
+
+/* Change to the readonly data section for a table of addresses.
+ final_scan_insn changes back to the text section. */
+#undef ASM_OUTPUT_BEFORE_CASE_LABEL
+#define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE, PREFIX, NUM, TABLE) \
+ do { \
+ if (! CASE_VECTOR_INSNS) \
+ { \
+ switch_to_section (readonly_data_section); \
+ ASM_OUTPUT_ALIGN ((FILE), 2); \
+ } \
+ } while (0);
+
+/* Epilogue for case labels. This jump instruction is called by casesi
+ to transfer to the appropriate branch instruction within the table.
+ The label `@L<n>e' is coined to mark the end of the table. */
+#define ASM_OUTPUT_CASE_END(FILE, NUM, TABLE) \
+ do { \
+ if (CASE_VECTOR_INSNS) \
+ { \
+ char label[256]; \
+ ASM_GENERATE_INTERNAL_LABEL (label, "L", NUM); \
+ fprintf (FILE, "%se:\n", &label[1]); \
+ if (! flag_delayed_branch) \
+ asm_fprintf (FILE, "\tlda\t %R%s,%R%s[%R%s]\n", reg_names[1], \
+ reg_names[1], reg_names[m88k_case_index]); \
+ asm_fprintf (FILE, "\tjmp\t %R%s\n", reg_names[1]); \
+ } \
+ } while (0)
+
+/* This is how to output an element of a case-vector that is absolute. */
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+ do { \
+ char buffer[256]; \
+ ASM_GENERATE_INTERNAL_LABEL (buffer, "L", VALUE); \
+ fprintf (FILE, CASE_VECTOR_INSNS ? "\tbr\t %s\n" : "\tword\t %s\n", \
+ &buffer[1]); \
+ } while (0)
+
+/* This is how to output an element of a case-vector that is relative. */
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ ASM_OUTPUT_ADDR_VEC_ELT (FILE, VALUE)
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ if ((LOG) != 0) \
+ fprintf (FILE, "%s%d\n", ALIGN_ASM_OP, 1<<(LOG))
+
+/* This is how to output an insn to push a register on the stack.
+ It need not be very fast code. */
+#define ASM_OUTPUT_REG_PUSH(FILE,REGNO) \
+ asm_fprintf (FILE, "\tsubu\t %R%s,%R%s,%d\n\tst\t %R%s,%R%s,0\n", \
+ reg_names[STACK_POINTER_REGNUM], \
+ reg_names[STACK_POINTER_REGNUM], \
+ (STACK_BOUNDARY / BITS_PER_UNIT), \
+ reg_names[REGNO], \
+ reg_names[STACK_POINTER_REGNUM])
+
+/* This is how to output an insn to pop a register from the stack. */
+#define ASM_OUTPUT_REG_POP(FILE,REGNO) \
+ asm_fprintf (FILE, "\tld\t %R%s,%R%s,0\n\taddu\t %R%s,%R%s,%d\n", \
+ reg_names[REGNO], \
+ reg_names[STACK_POINTER_REGNUM], \
+ reg_names[STACK_POINTER_REGNUM], \
+ reg_names[STACK_POINTER_REGNUM], \
+ (STACK_BOUNDARY / BITS_PER_UNIT))
+
+/* Macros for debug information */
+#define DEBUGGER_AUTO_OFFSET(X) \
+ (m88k_debugger_offset (X, 0) + (m88k_stack_size - m88k_fp_offset))
+
+#define DEBUGGER_ARG_OFFSET(OFFSET, X) \
+ (m88k_debugger_offset (X, OFFSET) + (m88k_stack_size - m88k_fp_offset))
+
+/* Jump tables consist of branch instructions and should be output in
+ the text section. When we use a table of addresses, we explicitly
+ change to the readonly data section. */
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+/* Print operand X (an rtx) in assembler syntax to file FILE.
+ CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
+ For `%' followed by punctuation, CODE is the punctuation and X is null. */
+#define PRINT_OPERAND_PUNCT_VALID_P(c) \
+ ((c) == '#' || (c) == '.' || (c) == '!' || (c) == '*' || (c) == ';')
+
+#define PRINT_OPERAND(FILE, X, CODE) print_operand (FILE, X, CODE)
+
+/* Print a memory address as an operand to reference that memory location. */
+#define PRINT_OPERAND_ADDRESS(FILE, ADDR) print_operand_address (FILE, ADDR)
diff --git a/gnu/gcc/gcc/config/m88k/m88k.md b/gnu/gcc/gcc/config/m88k/m88k.md
new file mode 100644
index 00000000000..9424679b39e
--- /dev/null
+++ b/gnu/gcc/gcc/config/m88k/m88k.md
@@ -0,0 +1,3845 @@
+;;- Machine description for the Motorola 88000 for GNU C compiler
+;; Copyright (C) 1988, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000
+;; Free Software Foundation, Inc.
+;; Contributed by Michael Tiemann (tiemann@mcc.com)
+;; Currently maintained by (gcc@dg-rtp.dg.com)
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+
+;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+(define_constants
+ [(UNSPEC_ABDIFF 0)
+ (UNSPEC_GOT_REL 1)
+ ])
+
+(include "predicates.md")
+(include "constraints.md")
+
+;; Attribute describing the processor. This attribute must match exactly
+;; with the processor_type enumeration in m88k.h.
+
+; Target CPU.
+(define_attr "cpu" "m88100,m88110,m88000"
+ (const (symbol_ref "m88k_cpu")))
+
+; Type of each instruction. Default is arithmetic.
+; I'd like to write the list as this, but genattrtab won't accept it.
+;
+; "branch,jump,call, ; flow-control instructions
+; load,store,loadd,loada, ; data unit instructions
+; spadd,dpadd,spcmp,dpcmp,spdiv,dpdiv,idiv, ; FPU add instructions
+; spmul,dpmul,imul, ; FPU multiply instructions
+; arith,bit,mov ; integer unit instructions
+; marith,weird" ; multi-word instructions
+
+; Classification of each insn. Some insns of TYPE_BRANCH are multi-word.
+(define_attr "type"
+ "branch,jump,call,load,store,loadd,loada,spadd,dpadd,spcmp,dpcmp,spdiv,dpdiv,idiv,spmul,dpmul,imul,arith,bit,mov,marith,weird"
+ (const_string "arith"))
+
+(define_attr "fpu" "yes,no"
+ (if_then_else
+ (eq_attr "type" "spmul,dpmul,imul,spadd,dpadd,spcmp,dpcmp,spdiv,dpdiv,idiv")
+ (const_string "yes") (const_string "no")))
+
+; Length in # of instructions of each insn. The values are not exact, but
+; are safe.
+(define_attr "length" ""
+ (cond [(eq_attr "type" "marith,weird,branch")
+ (const_int 2)]
+ (const_int 1)))
+
+; Describe a user's asm statement.
+(define_asm_attributes
+ [(set_attr "type" "weird")])
+
+; Define the delay slot requirements for branches and calls.
+; The m88100 annuls instructions if a conditional branch is taken.
+; For insns of TYPE_BRANCH that are multi-word instructions, the
+; delay slot applies to the first instruction.
+
+; @@ For the moment, reorg.c requires that the delay slot of a branch not
+; be a call or branch.
+
+(define_delay (eq_attr "type" "branch,jump")
+ [(and (and (eq_attr "type" "!branch,jump,call,marith,weird") ; required.
+ (eq_attr "type" "!load,loadd")) ; issue as-soon-as-possible.
+ (eq_attr "fpu" "no")) ; issue as-soon-as-possible.
+ (eq_attr "type" "!call,branch,jump") (nil)])
+
+; output_call supports an unconditional branch in the delay slot of
+; a call. (@@ Support for this case is expected in reorg.c soon.)
+
+(define_delay (eq_attr "type" "call")
+ [(eq_attr "type" "!branch,call,marith,weird") ; required.
+ (nil) (nil)])
+
+;; Superoptimizer sequences
+
+;; geu+: { r = ((unsigned_word) v0 >= (unsigned_word) v1) + v2; }
+;; subu.co r5,r2,r3
+;; addu.cio r6,r4,r0
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (minus:SI (match_operand:SI 1 "register_operand" "")
+ (geu:SI (match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "register_operand" ""))))]
+ "GET_CODE (operands[0]) == REG
+ && GET_CODE (operands[1]) == REG
+ && GET_CODE (operands[2]) == REG
+ && GET_CODE (operands[3]) == REG"
+ [(set (reg:CC 0) (unspec:CC [(match_dup 2) (match_dup 3)] 1))
+ (set (match_dup 0)
+ (plus:SI (match_dup 1)
+ (unspec:SI [(const_int 0)
+ (reg:CC 0)] 0)))]
+ "")
+
+;; leu+: { r = ((unsigned_word) v0 <= (unsigned_word) v1) + v2; }
+;; subu.co r5,r3,r2
+;; addu.cio r6,r4,r0
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (minus:SI (match_operand:SI 1 "register_operand" "")
+ (leu:SI (match_operand:SI 3 "register_operand" "")
+ (match_operand:SI 2 "register_operand" ""))))]
+ "GET_CODE (operands[0]) == REG
+ && GET_CODE (operands[1]) == REG
+ && GET_CODE (operands[2]) == REG
+ && GET_CODE (operands[3]) == REG"
+ [(set (reg:CC 0) (unspec:CC [(match_dup 2) (match_dup 3)] 1))
+ (set (match_dup 0)
+ (plus:SI (match_dup 1)
+ (unspec:SI [(const_int 0)
+ (reg:CC 0)] 0)))]
+ "")
+
+;; eq0+: { r = (v0 == 0) + v1; }
+;; subu.co r4,r0,r2
+;; addu.cio r5,r3,r0
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (minus:SI (match_operand:SI 1 "register_operand" "")
+ (eq:SI (match_operand:SI 2 "register_operand" "")
+ (const_int 0))))]
+ "GET_CODE (operands[0]) == REG
+ && GET_CODE (operands[1]) == REG
+ && GET_CODE (operands[2]) == REG"
+ [(set (reg:CC 0) (unspec:CC [(const_int 0) (match_dup 2)] 1))
+ (set (match_dup 0)
+ (plus:SI (match_dup 1)
+ (unspec:SI [(const_int 0)
+ (reg:CC 0)] 0)))]
+ "")
+
+;; ltu-: { r = v2 - ((unsigned_word) v0 < (unsigned_word) v1); }
+;; subu.co r5,r2,r3
+;; subu.cio r6,r4,r0
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (plus:SI (ltu:SI (match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "register_operand" ""))
+ (match_operand:SI 1 "register_operand" "")))]
+ "GET_CODE (operands[0]) == REG
+ && GET_CODE (operands[1]) == REG
+ && GET_CODE (operands[2]) == REG
+ && GET_CODE (operands[3]) == REG"
+ [(set (reg:CC 0) (unspec:CC [(match_dup 2) (match_dup 3)] 1))
+ (set (match_dup 0)
+ (minus:SI (match_dup 1)
+ (unspec:SI [(const_int 0)
+ (reg:CC 0)] 1)))]
+ "")
+
+;; gtu-: { r = v2 - ((unsigned_word) v0 > (unsigned_word) v1); }
+;; subu.co r5,r3,r2
+;; subu.cio r6,r4,r0
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (plus:SI (gtu:SI (match_operand:SI 3 "register_operand" "")
+ (match_operand:SI 2 "register_operand" ""))
+ (match_operand:SI 1 "register_operand" "")))]
+ "GET_CODE (operands[0]) == REG
+ && GET_CODE (operands[1]) == REG
+ && GET_CODE (operands[2]) == REG
+ && GET_CODE (operands[3]) == REG"
+ [(set (reg:CC 0) (unspec:CC [(match_dup 2) (match_dup 3)] 1))
+ (set (match_dup 0)
+ (minus:SI (match_dup 1)
+ (unspec:SI [(const_int 0)
+ (reg:CC 0)] 1)))]
+ "")
+
+;; ne0-: { r = v1 - (v0 != 0); }
+;; subu.co r4,r0,r2
+;; subu.cio r5,r3,r0
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (plus:SI (ne:SI (match_operand:SI 2 "register_operand" "")
+ (const_int 0))
+ (match_operand:SI 1 "register_operand" "")))]
+ "GET_CODE (operands[0]) == REG
+ && GET_CODE (operands[1]) == REG
+ && GET_CODE (operands[2]) == REG"
+ [(set (reg:CC 0) (unspec:CC [(const_int 0) (match_dup 2)] 1))
+ (set (match_dup 0)
+ (minus:SI (match_dup 1)
+ (unspec:SI [(const_int 0)
+ (reg:CC 0)] 1)))]
+ "")
+
+;; ges0-: { r = v1 - ((signed_word) v0 >= 0); }
+;; addu.co r4,r2,r2
+;; subu.cio r5,r3,r0
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (minus:SI (match_operand:SI 1 "register_operand" "")
+ (xor:SI (lshiftrt:SI
+ (match_operand:SI 2 "register_operand" "")
+ (const_int 31))
+ (const_int 1))))]
+ "GET_CODE (operands[0]) == REG
+ && GET_CODE (operands[1]) == REG
+ && GET_CODE (operands[2]) == REG"
+ [(set (reg:CC 0) (unspec:CC [(match_dup 2) (match_dup 2)] 0))
+ (set (match_dup 0)
+ (minus:SI (match_dup 1)
+ (unspec:SI [(const_int 0)
+ (reg:CC 0)] 1)))]
+ "")
+
+;; This rich set of complex patterns are mostly due to Torbjorn Granlund
+;; (tege@sics.se). They've changed since then, so don't complain to him
+;; if they don't work right.
+
+;; Regarding shifts, gen_lshlsi3 generates ASHIFT. The gen functions
+;; produce the necessary insns to support TARGET_*_LARGE_SHIFT, so nothing
+;; special needs to be done here.
+
+;; Optimize possible cases of the set instruction.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashift:SI (const_int -1)
+ (match_operand:SI 1 "register_operand" "r")))]
+ ""
+ "set %0,%#r0,%1"
+ [(set_attr "type" "bit")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ior:SI (ashift:SI (const_int -1)
+ (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "set %0,%2,%1"
+ [(set_attr "type" "bit")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ior:SI (match_operand:SI 1 "register_operand" "r")
+ (ashift:SI (const_int -1)
+ (match_operand:SI 2 "register_operand" "r"))))]
+ ""
+ "set %0,%1,%2"
+ [(set_attr "type" "bit")])
+
+;; Optimize possible cases of the mak instruction.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (and:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "int5_operand" ""))
+ (match_operand:SI 3 "immediate_operand" "n")))]
+ "mak_mask_p (INTVAL (operands[3]) >> INTVAL (operands[2]))"
+{
+ operands[4] = GEN_INT (exact_log2 (1 + (INTVAL (operands[3])
+ >> INTVAL(operands[2]))));
+ return "mak %0,%1,%4<%2>";
+}
+ [(set_attr "type" "bit")])
+
+;; Optimize possible cases of output_and.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashift:SI (zero_extract:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "int5_operand" "")
+ (match_operand:SI 3 "int5_operand" ""))
+ (match_operand:SI 4 "int5_operand" "")))]
+ "INTVAL (operands[2]) + INTVAL (operands[3]) + INTVAL (operands[4]) == 32"
+{
+ operands[2]
+ = GEN_INT (((1 << INTVAL (operands[2])) - 1) << INTVAL (operands[4]));
+ return output_and (operands);
+}
+ [(set_attr "type" "marith")]) ; arith,bit,marith. length is 1 or 2.
+
+;; Improve logical operations on compare words
+;;
+;; We define all logical operations on CCmode values to preserve the pairwise
+;; relationship of the compare bits. This allows a future branch prediction
+;; pass the degree of freedom needed to change and/bb0-le into or/bb1-gt.
+;; THIS IS CURRENTLY FALSE!
+;;
+;; Opportunities arise when conditional expressions using && and || are made
+;; unconditional. When these are used to branch, the sequence is
+;; cmp/cmp/extu/extu/{and,or}/bcnd-{eq0,ne0}. When these are used to create
+;; a value, the sequence is cmp/cmp/extu/extu/{and,or} for 1 or 0 or
+;; cmp/cmp/ext/ext/{and,or} for -1 or 0.
+;;
+;; When the extracted conditions are the same, the define_split patterns
+;; below change extu/extu/{and,or} into {and,or}/extu. If the reversed
+;; conditions match, one compare word can be complemented, resulting in
+;; {and.c,or.c}/extu. These changes are done for ext/ext/{and,or} as well.
+;; If the conditions don't line up, one can be rotated. To keep the pairwise
+;; relationship, it may be necessary to both rotate and complement. Rotating
+;; makes branching cheaper, but doesn't help (or hurt) creating a value, so
+;; we don't do this for ext/ext/{and,or}.
+;;
+;; These changes result in the sequence extu/bcnd-{eq0,ne0} which is combined
+;; into an alternate form of bb0 and bb1.
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ior:SI (neg:SI
+ (match_operator 1 "even_relop"
+ [(match_operand 2 "partial_ccmode_register_operand" "%r")
+ (const_int 0)]))
+ (neg:SI
+ (match_operator 3 "relop"
+ [(match_operand 4 "partial_ccmode_register_operand" "r")
+ (const_int 0)]))))
+ (clobber (match_operand:SI 5 "register_operand" ""))]
+ "reload_completed
+ && GET_CODE (operands[0]) == REG"
+ [(set (match_dup 5)
+ (ior:CCEVEN (match_dup 4)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (neg:SI (match_op_dup 1 [(match_dup 5) (const_int 0)])))]
+ "operands[5] = gen_rtx_SUBREG (CCEVENmode, operands[5], 0);
+ if (GET_CODE (operands[1]) == GET_CODE (operands[3]))
+ ; /* The conditions match. */
+ else if (GET_CODE (operands[1])
+ == reverse_condition (GET_CODE (operands[3])))
+ /* Reverse the condition by complementing the compare word. */
+ operands[4] = gen_rtx_NOT (CCmode, operands[4]);
+ else
+ {
+ /* Make the condition pairs line up by rotating the compare word. */
+ int cv1 = condition_value (operands[1]);
+ int cv2 = condition_value (operands[3]);
+
+ operands[4] = gen_rtx_ROTATE (CCEVENmode, operands[4],
+ GEN_INT (((cv2 & ~1) - (cv1 & ~1))
+ & 0x1f));
+ /* Reverse the condition if needed. */
+ if ((cv1 & 1) != (cv2 & 1))
+ operands[4] = gen_rtx_NOT (CCmode, operands[4]);
+ }")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ior:SI (neg:SI
+ (match_operator 1 "odd_relop"
+ [(match_operand 2 "partial_ccmode_register_operand" "%r")
+ (const_int 0)]))
+ (neg:SI
+ (match_operator 3 "odd_relop"
+ [(match_operand 4 "partial_ccmode_register_operand" "r")
+ (const_int 0)]))))
+ (clobber (match_operand:SI 5 "register_operand" ""))]
+ "reload_completed
+ && GET_CODE (operands[0]) == REG"
+ [(set (match_dup 5)
+ (and:CCEVEN (match_dup 4)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (neg:SI (match_op_dup 1 [(match_dup 5) (const_int 0)])))]
+ "operands[5] = gen_rtx_SUBREG (CCEVENmode, operands[5], 0);
+ if (GET_CODE (operands[1]) == GET_CODE (operands[3]))
+ ; /* The conditions match. */
+ else
+ {
+ /* Make the condition pairs line up by rotating the compare word. */
+ int cv1 = condition_value (operands[1]);
+ int cv2 = condition_value (operands[3]);
+
+ operands[4] = gen_rtx_ROTATE (CCEVENmode, operands[4],
+ GEN_INT ((cv2 - cv1) & 0x1f));
+ }")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ior:SI (neg:SI
+ (match_operator 1 "odd_relop"
+ [(match_operand 2 "partial_ccmode_register_operand" "%r")
+ (const_int 0)]))
+ (neg:SI
+ (match_operator 3 "even_relop"
+ [(match_operand 4 "partial_ccmode_register_operand" "r")
+ (const_int 0)]))))
+ (clobber (match_operand:SI 5 "register_operand" ""))]
+ "reload_completed
+ && GET_CODE (operands[0]) == REG"
+ [(set (match_dup 5)
+ (ior:CCEVEN (not:CC (match_dup 2))
+ (match_dup 4)))
+ (set (match_dup 0)
+ (neg:SI (match_op_dup 3 [(match_dup 5) (const_int 0)])))]
+ "operands[5] = gen_rtx_SUBREG (CCEVENmode, operands[5], 0);
+ if (GET_CODE (operands[1])
+ == reverse_condition (GET_CODE (operands[3])))
+ ;
+ else
+ {
+ /* Make the condition pairs line up by rotating the compare word. */
+ int cv1 = condition_value (operands[1]);
+ int cv2 = condition_value (operands[3]);
+
+ operands[2] = gen_rtx_ROTATE (CCEVENmode, operands[2],
+ GEN_INT (((cv1 & ~1) - (cv2 & ~1))
+ & 0x1f));
+ }")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ior:SI (match_operator 1 "even_relop"
+ [(match_operand 2 "partial_ccmode_register_operand" "%r")
+ (const_int 0)])
+ (match_operator 3 "relop"
+ [(match_operand 4 "partial_ccmode_register_operand" "r")
+ (const_int 0)])))
+ (clobber (match_operand:SI 5 "register_operand" ""))]
+ "reload_completed
+ && GET_CODE (operands[0]) == REG
+ && (GET_CODE (operands[1]) == GET_CODE (operands[3])
+ || GET_CODE (operands[1]) == reverse_condition (GET_CODE (operands[3])))"
+ [(set (match_dup 5)
+ (ior:CCEVEN (match_dup 4)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (match_op_dup 1 [(match_dup 5) (const_int 0)]))]
+ "operands[5] = gen_rtx_SUBREG (CCEVENmode, operands[5], 0);
+ /* Reverse the condition by complementing the compare word. */
+ if (GET_CODE (operands[1]) != GET_CODE (operands[3]))
+ operands[4] = gen_rtx_NOT (CCmode, operands[4]);")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ior:SI (match_operator 1 "odd_relop"
+ [(match_operand 2 "partial_ccmode_register_operand" "%r")
+ (const_int 0)])
+ (match_operator 3 "odd_relop"
+ [(match_operand 4 "partial_ccmode_register_operand" "r")
+ (const_int 0)])))
+ (clobber (match_operand:SI 5 "register_operand" ""))]
+ "reload_completed
+ && GET_CODE (operands[0]) == REG
+ && GET_CODE (operands[1]) == GET_CODE (operands[3])"
+ [(set (match_dup 5)
+ (and:CCEVEN (match_dup 4)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (match_op_dup 1 [(match_dup 5) (const_int 0)]))]
+ "operands[5] = gen_rtx_SUBREG (CCEVENmode, operands[5], 0);")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ior:SI (match_operator 1 "odd_relop"
+ [(match_operand 2 "partial_ccmode_register_operand" "%r")
+ (const_int 0)])
+ (match_operator 3 "even_relop"
+ [(match_operand 4 "partial_ccmode_register_operand" "r")
+ (const_int 0)])))
+ (clobber (match_operand:SI 5 "register_operand" ""))]
+ "reload_completed
+ && GET_CODE (operands[0]) == REG
+ && GET_CODE (operands[1]) == reverse_condition (GET_CODE (operands[3]))"
+ [(set (match_dup 5)
+ (ior:CCEVEN (not:CC (match_dup 4))
+ (match_dup 2)))
+ (set (match_dup 0)
+ (match_op_dup 1 [(match_dup 5) (const_int 0)]))]
+ "operands[5] = gen_rtx_SUBREG (CCEVENmode, operands[5], 0);")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (and:SI (neg:SI
+ (match_operator 1 "even_relop"
+ [(match_operand 2 "partial_ccmode_register_operand" "%r")
+ (const_int 0)]))
+ (neg:SI
+ (match_operator 3 "relop"
+ [(match_operand 4 "partial_ccmode_register_operand" "r")
+ (const_int 0)]))))
+ (clobber (match_operand:SI 5 "register_operand" ""))]
+ "reload_completed
+ && GET_CODE (operands[0]) == REG"
+ [(set (match_dup 5)
+ (and:CCEVEN (match_dup 4)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (neg:SI (match_op_dup 1 [(match_dup 5) (const_int 0)])))]
+ "operands[5] = gen_rtx_SUBREG (CCEVENmode, operands[5], 0);
+ if (GET_CODE (operands[1]) == GET_CODE (operands[3]))
+ ; /* The conditions match. */
+ else if (GET_CODE (operands[1])
+ == reverse_condition (GET_CODE (operands[3])))
+ /* Reverse the condition by complementing the compare word. */
+ operands[4] = gen_rtx_NOT (CCmode, operands[4]);
+ else
+ {
+ /* Make the condition pairs line up by rotating the compare word. */
+ int cv1 = condition_value (operands[1]);
+ int cv2 = condition_value (operands[3]);
+ operands[4] = gen_rtx_ROTATE (CCmode, operands[4],
+ GEN_INT (((cv2 & ~1) - (cv1 & ~1))
+ & 0x1f));
+ /* Reverse the condition if needed. */
+ if ((cv1 & 1) != (cv2 & 1))
+ operands[4] = gen_rtx_NOT (CCmode, operands[4]);
+ }")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (and:SI (neg:SI
+ (match_operator 1 "odd_relop"
+ [(match_operand 2 "partial_ccmode_register_operand" "%r")
+ (const_int 0)]))
+ (neg:SI
+ (match_operator 3 "odd_relop"
+ [(match_operand 4 "partial_ccmode_register_operand" "r")
+ (const_int 0)]))))
+ (clobber (match_operand:SI 5 "register_operand" ""))]
+ "reload_completed
+ && GET_CODE (operands[0]) == REG"
+ [(set (match_dup 5)
+ (ior:CCEVEN (match_dup 4)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (neg:SI (match_op_dup 1 [(match_dup 5) (const_int 0)])))]
+ "operands[5] = gen_rtx_SUBREG (CCEVENmode, operands[5], 0);
+ if (GET_CODE (operands[1]) == GET_CODE (operands[3]))
+ ; /* The conditions match. */
+ else
+ {
+ /* Make the condition pairs line up by rotating the compare word. */
+ int cv1 = condition_value (operands[1]);
+ int cv2 = condition_value (operands[3]);
+ operands[4] = gen_rtx_ROTATE (CCEVENmode, operands[4],
+ GEN_INT ((cv2 - cv1) & 0x1f));
+ }")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (and:SI (neg:SI
+ (match_operator 1 "odd_relop"
+ [(match_operand 2 "partial_ccmode_register_operand" "%r")
+ (const_int 0)]))
+ (neg:SI
+ (match_operator 3 "even_relop"
+ [(match_operand 4 "partial_ccmode_register_operand" "r")
+ (const_int 0)]))))
+ (clobber (match_operand:SI 5 "register_operand" ""))]
+ "reload_completed
+ && GET_CODE (operands[0]) == REG"
+ [(set (match_dup 5)
+ (and:CCEVEN (not:CC (match_dup 2))
+ (match_dup 4)))
+ (set (match_dup 0)
+ (neg:SI (match_op_dup 3 [(match_dup 5) (const_int 0)])))]
+ "operands[5] = gen_rtx_SUBREG (CCEVENmode, operands[5], 0);
+ if (GET_CODE (operands[1])
+ == reverse_condition (GET_CODE (operands[3])))
+ ;
+ else
+ {
+ /* Make the condition pairs line up by rotating the compare word. */
+ int cv1 = condition_value (operands[1]);
+ int cv2 = condition_value (operands[3]);
+ operands[2] = gen_rtx_ROTATE (CCEVENmode, operands[2],
+ GEN_INT (((cv1 & ~1) - (cv2 & ~1))
+ & 0x1f));
+ }")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (and:SI (match_operator 1 "even_relop"
+ [(match_operand 2 "partial_ccmode_register_operand" "%r")
+ (const_int 0)])
+ (match_operator 3 "relop"
+ [(match_operand 4 "partial_ccmode_register_operand" "r")
+ (const_int 0)])))
+ (clobber (match_operand:SI 5 "register_operand" ""))]
+ "reload_completed
+ && GET_CODE (operands[0]) == REG
+ && (GET_CODE (operands[1]) == GET_CODE (operands[3])
+ || GET_CODE (operands[1]) == reverse_condition (GET_CODE (operands[3])))"
+ [(set (match_dup 5)
+ (and:CCEVEN (match_dup 4)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (match_op_dup 1 [(match_dup 5) (const_int 0)]))]
+ "operands[5] = gen_rtx_SUBREG (CCEVENmode, operands[5], 0);
+ /* Reverse the condition by complementing the compare word. */
+ if (GET_CODE (operands[1]) != GET_CODE (operands[3]))
+ operands[4] = gen_rtx_NOT (CCmode, operands[4]);")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (and:SI (match_operator 1 "odd_relop"
+ [(match_operand 2 "partial_ccmode_register_operand" "%r")
+ (const_int 0)])
+ (match_operator 3 "odd_relop"
+ [(match_operand 4 "partial_ccmode_register_operand" "r")
+ (const_int 0)])))
+ (clobber (match_operand:SI 5 "register_operand" ""))]
+ "reload_completed
+ && GET_CODE (operands[0]) == REG
+ && GET_CODE (operands[1]) == GET_CODE (operands[3])"
+ [(set (match_dup 5)
+ (ior:CCEVEN (match_dup 4)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (match_op_dup 1 [(match_dup 5) (const_int 0)]))]
+ "operands[5] = gen_rtx_SUBREG (CCEVENmode, operands[5], 0);")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (and:SI (match_operator 1 "odd_relop"
+ [(match_operand 2 "partial_ccmode_register_operand" "%r")
+ (const_int 0)])
+ (match_operator 3 "even_relop"
+ [(match_operand 4 "partial_ccmode_register_operand" "r")
+ (const_int 0)])))
+ (clobber (match_operand:SI 5 "register_operand" ""))]
+ "reload_completed
+ && GET_CODE (operands[0]) == REG
+ && GET_CODE (operands[1]) == reverse_condition (GET_CODE (operands[3]))"
+ [(set (match_dup 5)
+ (and:CCEVEN (not:CC (match_dup 2))
+ (match_dup 4)))
+ (set (match_dup 0)
+ (match_op_dup 3 [(match_dup 5) (const_int 0)]))]
+ "operands[5] = gen_rtx_SUBREG (CCEVENmode, operands[5], 0);")
+
+
+;; Logical operations on compare words.
+
+(define_insn ""
+ [(set (match_operand:CCEVEN 0 "register_operand" "=r")
+ (and:CCEVEN (not:CC (match_operand 1 "partial_ccmode_register_operand" "r"))
+ (match_operand 2 "partial_ccmode_register_operand" "r")))]
+ ""
+ "and.c %0,%2,%1")
+
+(define_insn ""
+ [(set (match_operand:CCEVEN 0 "register_operand" "=r")
+ (and:CCEVEN (match_operand 1 "partial_ccmode_register_operand" "%r")
+ (match_operand 2 "partial_ccmode_register_operand" "r")))]
+ ""
+ "and %0,%1,%2")
+
+(define_insn ""
+ [(set (match_operand:CCEVEN 0 "register_operand" "=r")
+ (ior:CCEVEN (not:CC (match_operand 1 "partial_ccmode_register_operand" "r"))
+ (match_operand 2 "partial_ccmode_register_operand" "r")))]
+ ""
+ "or.c %0,%2,%1")
+
+(define_insn ""
+ [(set (match_operand:CCEVEN 0 "register_operand" "=r")
+ (ior:CCEVEN (match_operand 1 "partial_ccmode_register_operand" "%r")
+ (match_operand 2 "partial_ccmode_register_operand" "r")))]
+ ""
+ "or %0,%1,%2")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "register_operand" "=r")
+ (rotate:CC (match_operand:CC 1 "register_operand" "r")
+ (match_operand:CC 2 "int5_operand" "")))]
+ ""
+ "rot %0,%1,%2"
+ [(set_attr "type" "bit")])
+
+(define_insn ""
+ [(set (match_operand:CCEVEN 0 "register_operand" "=r")
+ (rotate:CCEVEN (match_operand 1 "partial_ccmode_register_operand" "r")
+ (match_operand:CC 2 "int5_operand" "")))]
+ ""
+ "rot %0,%1,%2"
+ [(set_attr "type" "bit")])
+
+;; rotate/and[.c] and rotate/ior[.c]
+
+(define_split
+ [(set (match_operand:CCEVEN 0 "register_operand" "")
+ (ior:CCEVEN (rotate:CC (match_operand 1 "partial_ccmode_register_operand" "")
+ (match_operand:CC 2 "int5_operand" ""))
+ (match_operand 3 "partial_ccmode_register_operand" "")))
+ (clobber (match_operand:CCEVEN 4 "register_operand" ""))]
+ "reload_completed
+ && GET_CODE (operands[0]) == REG
+ && partial_ccmode_register_operand (operands[1], VOIDmode)
+ && partial_ccmode_register_operand (operands[3], VOIDmode)"
+ [(set (match_dup 4)
+ (rotate:CC (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (ior:CCEVEN (match_dup 4) (match_dup 3)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CCEVEN 0 "register_operand" "=r")
+ (ior:CCEVEN (rotate:CC (match_operand 1 "partial_ccmode_register_operand" "r")
+ (match_operand:CC 2 "int5_operand" ""))
+ (match_operand 3 "partial_ccmode_register_operand" "r")))
+ (clobber (match_scratch:CCEVEN 4 "=r"))]
+ ""
+ "#")
+
+(define_split
+ [(set (match_operand:CCEVEN 0 "register_operand" "")
+ (ior:CCEVEN (not:CC (rotate:CC (match_operand 1 "partial_ccmode_register_operand" "")
+ (match_operand:CC 2 "int5_operand" "")))
+ (match_operand 3 "partial_ccmode_register_operand" "")))
+ (clobber (match_operand:CCEVEN 4 "register_operand" ""))]
+ "reload_completed
+ && GET_CODE (operands[0]) == REG
+ && partial_ccmode_register_operand (operands[1], VOIDmode)
+ && partial_ccmode_register_operand (operands[3], VOIDmode)"
+ [(set (match_dup 4)
+ (rotate:CC (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (ior:CCEVEN (not:CC (match_dup 4)) (match_dup 3)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CCEVEN 0 "register_operand" "=r")
+ (ior:CCEVEN (not:CC (rotate:CC (match_operand 1 "partial_ccmode_register_operand" "r")
+ (match_operand:CC 2 "int5_operand" "")))
+ (match_operand 3 "partial_ccmode_register_operand" "r")))
+ (clobber (match_scratch:CCEVEN 4 "=r"))]
+ ""
+ "#")
+
+(define_split
+ [(set (match_operand:CCEVEN 0 "register_operand" "")
+ (and:CCEVEN (rotate:CC (match_operand 1 "partial_ccmode_register_operand" "")
+ (match_operand:CC 2 "int5_operand" ""))
+ (match_operand 3 "partial_ccmode_register_operand" "")))
+ (clobber (match_operand:CCEVEN 4 "register_operand" ""))]
+ "reload_completed
+ && GET_CODE (operands[0]) == REG
+ && partial_ccmode_register_operand (operands[1], VOIDmode)
+ && partial_ccmode_register_operand (operands[3], VOIDmode)"
+ [(set (match_dup 4)
+ (rotate:CC (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (and:CCEVEN (match_dup 4) (match_dup 3)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CCEVEN 0 "register_operand" "=r")
+ (and:CCEVEN (rotate:CC (match_operand 1 "partial_ccmode_register_operand" "r")
+ (match_operand:CC 2 "int5_operand" ""))
+ (match_operand 3 "partial_ccmode_register_operand" "r")))
+ (clobber (match_scratch:CCEVEN 4 "=r"))]
+ ""
+ "#")
+
+(define_split
+ [(set (match_operand:CCEVEN 0 "register_operand" "")
+ (and:CCEVEN (not:CC (rotate:CC (match_operand 1 "partial_ccmode_register_operand" "")
+ (match_operand:CC 2 "int5_operand" "")))
+ (match_operand 3 "partial_ccmode_register_operand" "")))
+ (clobber (match_operand:CCEVEN 4 "register_operand" ""))]
+ "reload_completed
+ && GET_CODE (operands[0]) == REG
+ && partial_ccmode_register_operand (operands[1], VOIDmode)
+ && partial_ccmode_register_operand (operands[3], VOIDmode)"
+ [(set (match_dup 4)
+ (rotate:CC (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (and:CCEVEN (not:CC (match_dup 4)) (match_dup 3)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CCEVEN 0 "register_operand" "=r")
+ (and:CCEVEN (not:CC (rotate:CC (match_operand 1 "partial_ccmode_register_operand" "r")
+ (match_operand:CC 2 "int5_operand" "")))
+ (match_operand 3 "partial_ccmode_register_operand" "r")))
+ (clobber (match_scratch:CCEVEN 4 "=r"))]
+ ""
+ "#")
+
+
+;; Recognize bcnd instructions for integer values. This is distinguished
+;; from a conditional branch instruction (below) with SImode instead of
+;; CCmode.
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "relop_no_unsigned"
+ [(match_operand:SI 1 "register_operand" "r")
+ (const_int 0)])
+ (match_operand 2 "pc_or_label_ref" "")
+ (match_operand 3 "pc_or_label_ref" "")))]
+ ""
+ "bcnd%. %R3%B0,%1,%P2%P3"
+ [(set_attr "type" "branch")])
+
+;; Recognize tests for sign and zero.
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "equality_op"
+ [(match_operand:SI 1 "register_operand" "r")
+ (const_int -2147483648)])
+ (match_operand 2 "pc_or_label_ref" "")
+ (match_operand 3 "pc_or_label_ref" "")))]
+ ""
+ "bcnd%. %R3%E0,%1,%P2%P3"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "equality_op"
+ [(zero_extract:SI
+ (match_operand:SI 1 "register_operand" "r")
+ (const_int 31)
+ (const_int 1))
+ (const_int 0)])
+ (match_operand 2 "pc_or_label_ref" "")
+ (match_operand 3 "pc_or_label_ref" "")))]
+ ""
+ "bcnd%. %R3%D0,%1,%P2%P3"
+ [(set_attr "type" "branch")])
+
+;; Recognize bcnd instructions for double integer values
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "relop_no_unsigned"
+ [(sign_extend:DI
+ (match_operand:SI 1 "register_operand" "r"))
+ (const_int 0)])
+ (match_operand 2 "pc_or_label_ref" "")
+ (match_operand 3 "pc_or_label_ref" "")))]
+ ""
+ "bcnd%. %R3%B0,%1,%P2%P3"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "equality_op"
+ [(zero_extend:DI
+ (match_operand:SI 1 "register_operand" "r"))
+ (const_int 0)])
+ (match_operand 2 "pc_or_label_ref" "")
+ (match_operand 3 "pc_or_label_ref" "")))]
+ ""
+ "bcnd%. %R3%B0,%1,%P2%P3"
+ [(set_attr "type" "branch")])
+
+;; Recognize bcnd instructions for single precision float values
+;; Exclude relational operations as they must signal NaNs.
+
+;; @@ These bcnd insns for float and double values don't seem to be recognized.
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "equality_op"
+ [(float_extend:DF
+ (match_operand:SF 1 "register_operand" "r"))
+ (const_int 0)])
+ (match_operand 2 "pc_or_label_ref" "")
+ (match_operand 3 "pc_or_label_ref" "")))]
+ ""
+ "bcnd%. %R3%D0,%1,%P2%P3"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "equality_op"
+ [(match_operand:SF 1 "register_operand" "r")
+ (const_int 0)])
+ (match_operand 2 "pc_or_label_ref" "")
+ (match_operand 3 "pc_or_label_ref" "")))]
+ ""
+ "bcnd%. %R3%D0,%1,%P2%P3"
+ [(set_attr "type" "branch")])
+
+;; Recognize bcnd instructions for double precision float values
+;; Exclude relational operations as they must signal NaNs.
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "equality_op"
+ [(match_operand:DF 1 "register_operand" "r")
+ (const_int 0)])
+ (match_operand 2 "pc_or_label_ref" "")
+ (match_operand 3 "pc_or_label_ref" "")))]
+ ""
+{
+ if (GET_CODE (operands[0]) == NE)
+ {
+ rtx op2 = operands[2];
+ operands[2] = operands[3];
+ operands[3] = op2;
+ }
+ if (GET_CODE (operands[3]) == LABEL_REF)
+ return "bcnd 0x5,%1,%3\;bcnd %#ne0,%d1,%3";
+
+ operands[3] = gen_label_rtx ();
+ output_asm_insn ("bcnd 0x5,%1,%3\;bcnd %#eq0,%d1,%2", operands);
+ emit_label (operands[3]);
+ return "";
+}
+ [(set_attr "type" "weird")
+ (set_attr "length" "3")])
+
+;; Recognize bb0 and bb1 instructions. These use two unusual template
+;; patterns, %Lx and %Px. %Lx outputs a 1 if operand `x' is a LABEL_REF
+;; otherwise it outputs a 0. It then may print ".n" if the delay slot
+;; is used. %Px does noting if `x' is PC and outputs the operand if `x'
+;; is a LABEL_REF.
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (ne (sign_extract:SI (match_operand:SI 0 "register_operand" "r")
+ (const_int 1)
+ (match_operand:SI 1 "int5_operand" ""))
+ (const_int 0))
+ (match_operand 2 "pc_or_label_ref" "")
+ (match_operand 3 "pc_or_label_ref" "")))]
+ ""
+ "bb%L2 (31-%1),%0,%P2%P3"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (eq (sign_extract:SI (match_operand:SI 0 "register_operand" "r")
+ (const_int 1)
+ (match_operand:SI 1 "int5_operand" ""))
+ (const_int 0))
+ (match_operand 2 "pc_or_label_ref" "")
+ (match_operand 3 "pc_or_label_ref" "")))]
+ ""
+ "bb%L3 (31-%1),%0,%P2%P3"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (ne (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
+ (const_int 1)
+ (match_operand:SI 1 "int5_operand" ""))
+ (const_int 0))
+ (match_operand 2 "pc_or_label_ref" "")
+ (match_operand 3 "pc_or_label_ref" "")))]
+ ""
+ "bb%L2 (31-%1),%0,%P2%P3"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (eq (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
+ (const_int 1)
+ (match_operand:SI 1 "int5_operand" ""))
+ (const_int 0))
+ (match_operand 2 "pc_or_label_ref" "")
+ (match_operand 3 "pc_or_label_ref" "")))]
+ ""
+ "bb%L3 (31-%1),%0,%P2%P3"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (eq (and:SI (match_operand:SI 0 "reg_or_bbx_mask_operand" "%r")
+ (match_operand:SI 1 "reg_or_bbx_mask_operand" "n"))
+ (const_int 0))
+ (match_operand 2 "pc_or_label_ref" "")
+ (match_operand 3 "pc_or_label_ref" "")))]
+ "(GET_CODE (operands[0]) == CONST_INT)
+ != (GET_CODE (operands[1]) == CONST_INT)"
+ "bb%L3 %p1,%0,%P2%P3"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (ne (and:SI (match_operand:SI 0 "reg_or_bbx_mask_operand" "%r")
+ (match_operand:SI 1 "reg_or_bbx_mask_operand" "n"))
+ (const_int 0))
+ (match_operand 2 "pc_or_label_ref" "")
+ (match_operand 3 "pc_or_label_ref" "")))]
+ "(GET_CODE (operands[0]) == CONST_INT)
+ != (GET_CODE (operands[1]) == CONST_INT)"
+ "bb%L2 %p1,%0,%P2%P3"
+ [(set_attr "type" "branch")])
+
+;; The comparison operations store the comparison into a register and
+;; record that register. The following Bxx or Sxx insn uses that
+;; register as an input. To facilitate use of bcnd instead of cmp/bb1,
+;; cmpsi records its operands and produces no code when any operand
+;; is constant. In this case, the Bxx insns use gen_bcnd and the
+;; Sxx insns use gen_test to ensure a cmp has been emitted.
+;;
+;; This could also be done for SFmode and DFmode having only beq and bne
+;; use gen_bcnd. The others must signal NaNs. It seems though that zero
+;; has already been copied into a register.
+;;
+;; cmpsi/beq and cmpsi/bne can always be done with bcnd if any operand
+;; is a constant. (This idea is due to Torbjorn Granlund.) Others can
+;; use bcnd only if an operand is zero.
+;;
+;; It is necessary to distinguish a register holding condition codes.
+;; This is done by context.
+
+(define_expand "test"
+ [(set (match_dup 2)
+ (compare:CC (match_operand 0 "" "")
+ (match_operand 1 "" "")))]
+ ""
+ "
+{
+ gcc_assert (m88k_compare_reg == NULL_RTX);
+
+ if (GET_CODE (operands[0]) == CONST_INT
+ && ! SMALL_INT (operands[0]))
+ operands[0] = force_reg (SImode, operands[0]);
+
+ if (GET_CODE (operands[1]) == CONST_INT
+ && ! SMALL_INT (operands[1]))
+ operands[1] = force_reg (SImode, operands[1]);
+
+ operands[2] = m88k_compare_reg = gen_reg_rtx (CCmode);
+}")
+
+(define_expand "cmpsi"
+ [(set (match_dup 2)
+ (compare:CC (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "arith32_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[0]) == CONST_INT
+ || GET_CODE (operands[1]) == CONST_INT)
+ {
+ m88k_compare_reg = NULL_RTX;
+ m88k_compare_op0 = operands[0];
+ m88k_compare_op1 = operands[1];
+ DONE;
+ }
+ operands[2] = m88k_compare_reg = gen_reg_rtx (CCmode);
+}")
+
+(define_expand "cmpsf"
+ [(set (match_dup 2)
+ (compare:CC (match_operand:SF 0 "register_operand" "")
+ (match_operand:SF 1 "register_operand" "")))]
+ ""
+ "operands[2] = m88k_compare_reg = gen_reg_rtx (CCmode);")
+
+(define_expand "cmpdf"
+ [(set (match_dup 2)
+ (compare:CC (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" "")))]
+ ""
+ "
+{
+ operands[0] = legitimize_operand (operands[0], DFmode);
+ operands[1] = legitimize_operand (operands[1], DFmode);
+ operands[2] = m88k_compare_reg = gen_reg_rtx (CCmode);
+}")
+
+;; The actual compare instructions.
+
+(define_insn ""
+ [(set (match_operand:CC 0 "register_operand" "=r")
+ (compare:CC (match_operand:SI 1 "register_operand" "rO")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ ""
+ "cmp %0,%r1,%2")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "register_operand" "=r,r,r,r")
+ (compare:CC (match_operand:SF 1 "register_operand" "r,r,x,x")
+ (match_operand:SF 2 "real_or_0_operand" "r,G,x,G")))]
+ ""
+ "@
+ fcmp.sss %0,%1,%2
+ fcmp.sss %0,%1,%#r0
+ fcmp.sss %0,%1,%2
+ fcmp.sss %0,%1,%#x0"
+ [(set_attr "type" "spcmp")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "register_operand" "=r,r")
+ (compare:CC (match_operand:DF 1 "register_operand" "r,x")
+ (float_extend:DF
+ (match_operand:SF 2 "register_operand" "r,x"))))]
+ ""
+ "fcmp.sds %0,%1,%2"
+ [(set_attr "type" "dpcmp")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "register_operand" "=r,r")
+ (compare:CC (float_extend:DF
+ (match_operand:SF 1 "register_operand" "r,x"))
+ (match_operand:DF 2 "register_operand" "r,x")))]
+ ""
+ "fcmp.ssd %0,%1,%2"
+ [(set_attr "type" "dpcmp")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "register_operand" "=r,r,r,r")
+ (compare:CC (match_operand:DF 1 "register_operand" "r,r,x,x")
+ (match_operand:DF 2 "real_or_0_operand" "r,G,x,G")))]
+ ""
+ "@
+ fcmp.sdd %0,%1,%2
+ fcmp.sds %0,%1,%#r0
+ fcmp.sdd %0,%1,%2
+ fcmp.sds %0,%1,%#x0"
+ [(set_attr "type" "dpcmp")])
+
+;; Store condition code insns. The compare insns set a register
+;; rather than cc0 and record that register for use here. See above
+;; for the special treatment of cmpsi with a constant operand.
+
+;; @@ For the m88110, use fcmpu for bxx sxx inequality comparisons.
+
+(define_expand "seq"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "operands[1] = emit_test (EQ, SImode);")
+
+(define_expand "sne"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "operands[1] = emit_test (NE, SImode);")
+
+(define_expand "sgt"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "operands[1] = emit_test (GT, SImode);")
+
+(define_expand "sgtu"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "operands[1] = emit_test (GTU, SImode);")
+
+(define_expand "slt"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "operands[1] = emit_test (LT, SImode);")
+
+(define_expand "sltu"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "operands[1] = emit_test (LTU, SImode);")
+
+(define_expand "sge"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "operands[1] = emit_test (GE, SImode);")
+
+(define_expand "sgeu"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "operands[1] = emit_test (GEU, SImode);")
+
+(define_expand "sle"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "operands[1] = emit_test (LE, SImode);")
+
+(define_expand "sleu"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "operands[1] = emit_test (LEU, SImode);")
+
+;; The actual set condition code instruction.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operator:SI 1 "relop"
+ [(match_operand:CC 2 "register_operand" "r")
+ (const_int 0)]))]
+ ""
+ "ext %0,%2,1<%C1>"
+ [(set_attr "type" "bit")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operator:SI 1 "even_relop"
+ [(match_operand:CCEVEN 2 "register_operand" "r")
+ (const_int 0)]))]
+ ""
+ "ext %0,%2,1<%C1>"
+ [(set_attr "type" "bit")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (not:SI (match_operator:SI 1 "odd_relop"
+ [(match_operand:CCEVEN 2 "register_operand" "r")
+ (const_int 0)])))]
+ ""
+ "ext %0,%2,1<%!%C1>"
+ [(set_attr "type" "bit")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operator:SI 1 "odd_relop"
+ [(match_operand:CCEVEN 2 "register_operand" "r")
+ (const_int 0)]))
+ (clobber (match_operand:SI 3 "register_operand" ""))]
+ "reload_completed
+ && GET_CODE (operands[0]) == REG"
+ [(set (match_dup 3) (not:SI (match_op_dup 1 [(match_dup 2) (const_int 0)])))
+ (set (match_dup 0) (not:SI (match_dup 3)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operator:SI 1 "odd_relop"
+ [(match_operand:CCEVEN 2 "register_operand" "r")
+ (const_int 0)]))
+ (clobber (match_scratch:SI 3 "=r"))]
+ ""
+ "#")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI
+ (match_operator:SI 1 "relop"
+ [(match_operand:CC 2 "register_operand" "r")
+ (const_int 0)])))]
+ ""
+ "extu %0,%2,1<%C1>"
+ [(set_attr "type" "bit")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI
+ (match_operator:SI 1 "even_relop"
+ [(match_operand:CCEVEN 2 "register_operand" "r")
+ (const_int 0)])))]
+ ""
+ "extu %0,%2,1<%C1>"
+ [(set_attr "type" "bit")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI
+ (not:SI (match_operator:SI 1 "odd_relop"
+ [(match_operand:CCEVEN 2 "register_operand" "r")
+ (const_int 0)]))))]
+ ""
+ "extu %0,%2,1<%!%C1>"
+ [(set_attr "type" "bit")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (neg:SI (match_operator:SI 1 "odd_relop"
+ [(match_operand:CCEVEN 2 "register_operand" "r")
+ (const_int 0)])))
+ (clobber (match_operand:SI 3 "register_operand" ""))]
+ "reload_completed
+ && GET_CODE (operands[0]) == REG"
+ [(set (match_dup 3) (neg:SI (not:SI (match_op_dup 1 [(match_dup 2)
+ (const_int 0)]))))
+ (set (match_dup 0) (xor:SI (match_dup 3) (const_int 1)))]
+ "")
+
+(define_insn
+ ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (match_operator:SI 1 "odd_relop"
+ [(match_operand:CCEVEN 2 "register_operand" "r")
+ (const_int 0)])))
+ (clobber (match_scratch:SI 3 "=r"))]
+ ""
+ "#")
+
+
+
+
+;; Conditional branch insns. The compare insns set a register
+;; rather than cc0 and record that register for use here. See above
+;; for the special case of cmpsi with a constant operand.
+
+(define_expand "bcnd"
+ [(set (pc)
+ (if_then_else (match_operand 0 "" "")
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ "gcc_assert (m88k_compare_reg == NULL_RTX);")
+
+(define_expand "bxx"
+ [(set (pc)
+ (if_then_else (match_operand 0 "" "")
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ "gcc_assert (m88k_compare_reg != NULL_RTX);")
+
+(define_expand "beq"
+ [(set (pc)
+ (if_then_else (eq (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "if (m88k_compare_reg == NULL_RTX)
+ {
+ emit_bcnd (EQ, operands[0]);
+ DONE;
+ }
+ operands[1] = m88k_compare_reg;")
+
+(define_expand "bne"
+ [(set (pc)
+ (if_then_else (ne (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "if (m88k_compare_reg == NULL_RTX)
+ {
+ emit_bcnd (NE, operands[0]);
+ DONE;
+ }
+ operands[1] = m88k_compare_reg;")
+
+(define_expand "bgt"
+ [(set (pc)
+ (if_then_else (gt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "if (m88k_compare_reg == NULL_RTX)
+ {
+ emit_bcnd (GT, operands[0]);
+ DONE;
+ }
+ operands[1] = m88k_compare_reg;")
+
+(define_expand "bgtu"
+ [(set (pc)
+ (if_then_else (gtu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "if (m88k_compare_reg == NULL_RTX)
+ {
+ emit_jump_insn (gen_bxx (emit_test (GTU, VOIDmode), operands[0]));
+ DONE;
+ }
+ operands[1] = m88k_compare_reg;")
+
+(define_expand "blt"
+ [(set (pc)
+ (if_then_else (lt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "if (m88k_compare_reg == NULL_RTX)
+ {
+ emit_bcnd (LT, operands[0]);
+ DONE;
+ }
+ operands[1] = m88k_compare_reg;")
+
+(define_expand "bltu"
+ [(set (pc)
+ (if_then_else (ltu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "if (m88k_compare_reg == NULL_RTX)
+ {
+ emit_jump_insn (gen_bxx (emit_test (LTU, VOIDmode), operands[0]));
+ DONE;
+ }
+ operands[1] = m88k_compare_reg;")
+
+(define_expand "bge"
+ [(set (pc)
+ (if_then_else (ge (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "if (m88k_compare_reg == NULL_RTX)
+ {
+ emit_bcnd (GE, operands[0]);
+ DONE;
+ }
+ operands[1] = m88k_compare_reg;")
+
+(define_expand "bgeu"
+ [(set (pc)
+ (if_then_else (geu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "if (m88k_compare_reg == NULL_RTX)
+ {
+ emit_jump_insn (gen_bxx (emit_test (GEU, VOIDmode), operands[0]));
+ DONE;
+ }
+ operands[1] = m88k_compare_reg;")
+
+(define_expand "ble"
+ [(set (pc)
+ (if_then_else (le (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "if (m88k_compare_reg == NULL_RTX)
+ {
+ emit_bcnd (LE, operands[0]);
+ DONE;
+ }
+ operands[1] = m88k_compare_reg;")
+
+(define_expand "bleu"
+ [(set (pc)
+ (if_then_else (leu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "if (m88k_compare_reg == NULL_RTX)
+ {
+ emit_jump_insn (gen_bxx (emit_test (LEU, VOIDmode), operands[0]));
+ DONE;
+ }
+ operands[1] = m88k_compare_reg;")
+
+;; The actual conditional branch instruction (both directions). This
+;; uses two unusual template patterns, %Rx and %Px. %Rx is a prefix code
+;; for the immediately following condition and reverses the condition iff
+;; operand `x' is a LABEL_REF. %Px does nothing if `x' is PC and outputs
+;; the operand if `x' is a LABEL_REF.
+
+(define_insn ""
+ [(set (pc) (if_then_else
+ (match_operator 0 "relop"
+ [(match_operand:CC 1 "register_operand" "r")
+ (const_int 0)])
+ (match_operand 2 "pc_or_label_ref" "")
+ (match_operand 3 "pc_or_label_ref" "")))]
+ ""
+{
+ if (mostly_false_jump (insn, operands[0]))
+ return "bb0%. %R2%C0,%1,%P2%P3";
+ else
+ return "bb1%. %R3%C0,%1,%P2%P3";
+}
+ [(set_attr "type" "branch")])
+
+;;
+;; Here branch prediction is sacrificed. To get it back, you need
+;; - CCODD (CC mode where the ODD bits are valid)
+;; - several define_split that can apply De Morgan's Law.
+;; - transformations between CCEVEN and CCODD modes.
+;;
+
+(define_insn ""
+ [(set (pc) (if_then_else
+ (match_operator 0 "even_relop"
+ [(match_operand:CCEVEN 1 "register_operand" "r")
+ (const_int 0)])
+ (match_operand 2 "pc_or_label_ref" "")
+ (match_operand 3 "pc_or_label_ref" "")))]
+ ""
+ "bb%L2%. %C0,%1,%P2%P3"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc) (if_then_else
+ (match_operator 0 "odd_relop"
+ [(match_operand:CCEVEN 1 "register_operand" "r")
+ (const_int 0)])
+ (match_operand 2 "pc_or_label_ref" "")
+ (match_operand 3 "pc_or_label_ref" "")))]
+ ""
+ "bb%L3%. %!%C0,%1,%P2%P3"
+ [(set_attr "type" "branch")])
+
+;; Branch conditional on scc values. These arise from manipulations on
+;; compare words above.
+;; Are these really used ?
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (ne (match_operator 0 "relop"
+ [(match_operand:CC 1 "register_operand" "r")
+ (const_int 0)])
+ (const_int 0))
+ (match_operand 2 "pc_or_label_ref" "")
+ (match_operand 3 "pc_or_label_ref" "")))]
+ ""
+ "bb%L2 %C0,%1,%P2%P3"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (ne (match_operator 0 "even_relop"
+ [(match_operand:CCEVEN 1 "register_operand" "r")
+ (const_int 0)])
+ (const_int 0))
+ (match_operand 2 "pc_or_label_ref" "")
+ (match_operand 3 "pc_or_label_ref" "")))]
+ ""
+ "bb%L2 %C0,%1,%P2%P3"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (ne (match_operator 0 "odd_relop"
+ [(match_operand:CCEVEN 1 "register_operand" "r")
+ (const_int 0)])
+ (const_int 0))
+ (match_operand 2 "pc_or_label_ref" "")
+ (match_operand 3 "pc_or_label_ref" "")))]
+ ""
+ "bb%L3 %!%C0,%1,%P2%P3"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (eq (match_operator 0 "relop"
+ [(match_operand:CC 1 "register_operand" "r")
+ (const_int 0)])
+ (const_int 0))
+ (match_operand 2 "pc_or_label_ref" "")
+ (match_operand 3 "pc_or_label_ref" "")))]
+ ""
+ "bb%L3 %C0,%1,%P2%P3"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (eq (match_operator 0 "even_relop"
+ [(match_operand:CCEVEN 1 "register_operand" "r")
+ (const_int 0)])
+ (const_int 0))
+ (match_operand 2 "pc_or_label_ref" "")
+ (match_operand 3 "pc_or_label_ref" "")))]
+ ""
+ "bb%L3 %C0,%1,%P2%P3"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (eq (match_operator 0 "odd_relop"
+ [(match_operand:CCEVEN 1 "register_operand" "r")
+ (const_int 0)])
+ (const_int 0))
+ (match_operand 2 "pc_or_label_ref" "")
+ (match_operand 3 "pc_or_label_ref" "")))]
+ ""
+ "bb%L2 %!%C0,%1,%P2%P3"
+ [(set_attr "type" "branch")])
+
+(define_insn "locate1"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (high:SI (unspec:SI [(label_ref (match_operand 1 "" ""))] UNSPEC_ABDIFF)))]
+ "flag_pic"
+ "or.u %0,%#r0,%#hi16(%1#abdiff)")
+
+(define_insn "locate2"
+ [(parallel [(set (reg:SI 1) (pc))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (lo_sum:SI (match_dup 0)
+ (unspec:SI
+ [(label_ref (match_operand 1 "" ""))] UNSPEC_ABDIFF)))])]
+ "flag_pic"
+ "bsr.n %1\;or %0,%0,%#lo16(%1#abdiff)\\n%1:"
+ [(set_attr "length" "2")])
+
+;; SImode move instructions
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, SImode, NULL_RTX))
+ DONE;
+}")
+
+(define_expand "reload_insi"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:SI 1 "general_operand" ""))
+ (clobber (match_operand:SI 2 "register_operand" "=&r"))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, SImode, operands[2]))
+ DONE;
+
+ /* We don't want the clobber emitted, so handle this ourselves. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ DONE;
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,m,r,r,r,x,x,x,m")
+ (match_operand:SI 1 "move_operand" "rI,m,rO,J,M,x,r,x,m,x"))]
+ "(register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)
+ || operands[1] == const0_rtx)"
+ "@
+ or %0,%#r0,%1
+ %V1ld\\t %0,%1
+ %v0st\\t %r1,%0
+ subu %0,%#r0,%n1
+ set %0,%#r0,%s1
+ mov.s %0,%1
+ mov.s %0,%1
+ mov %0,%1
+ %V1ld\\t %0,%1
+ %v0st\\t %1,%0"
+ [(set_attr "type" "arith,load,store,arith,bit,mov,mov,mov,load,store")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r")
+ (match_operand:SI 1 "arith32_operand" "rI,J,L,M,n"))]
+ ""
+ "@
+ or %0,%#r0,%1
+ subu %0,%#r0,%n1
+ or.u %0,%#r0,%X1
+ set %0,%#r0,%s1
+ or.u %0,%#r0,%X1\;or %0,%0,%x1"
+ [(set_attr "type" "arith,arith,arith,bit,marith")])
+
+;; @@ Why the constraint "in"? Doesn't `i' include `n'?
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "immediate_operand" "in")))]
+ ""
+ "or %0,%1,%#lo16(%g2)")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (high:SI (match_operand 1 "" "")))]
+ ""
+ "or.u %0,%#r0,%#hi16(%g1)")
+
+;; For PIC, symbol_refs are put inside unspec so that the optimizer won't
+;; confuse them with real addresses.
+
+(define_insn "movsi_lo_sum_pic"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
+ (unspec:SI [(match_operand:SI 2 "immediate_operand" "in")] UNSPEC_GOT_REL)))]
+ "flag_pic"
+ "or %0,%1,%#lo16(%g2)"
+ ;; Need to set length for this arith insn because operand2
+ ;; is not an "arith_operand".
+ [(set_attr "length" "1")])
+
+(define_insn "movsi_high_pic"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (high:SI (unspec:SI [(match_operand 1 "" "")] UNSPEC_GOT_REL)))]
+ "flag_pic"
+ "or.u %0,%#r0,%#hi16(%g1)"
+ ;; Need to set length for this arith insn because operand2
+ ;; is not an arith_operand.
+ [(set_attr "length" "1")])
+
+;; HImode move instructions
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, HImode, NULL_RTX))
+ DONE;
+}")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,r")
+ (match_operand:HI 1 "move_operand" "rP,m,rO,N"))]
+ "(register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode)
+ || operands[1] == const0_rtx)"
+ "@
+ or %0,%#r0,%h1
+ %V1ld.hu\\t %0,%1
+ %v0st.h\\t %r1,%0
+ subu %0,%#r0,%H1"
+ [(set_attr "type" "arith,load,store,arith")])
+
+(define_insn ""
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (subreg:HI (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "immediate_operand" "in")) 0))]
+ "!flag_pic"
+ "or %0,%1,%#lo16(%2)")
+
+;; QImode move instructions
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, QImode, NULL_RTX))
+ DONE;
+}")
+
+(define_insn ""
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,m,r")
+ (match_operand:QI 1 "move_operand" "rP,m,rO,N"))]
+ "(register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)
+ || operands[1] == const0_rtx)"
+ "@
+ or %0,%#r0,%q1
+ %V1ld.bu\\t %0,%1
+ %v0st.b\\t %r1,%0
+ subu %r0,%#r0,%Q1"
+ [(set_attr "type" "arith,load,store,arith")])
+
+(define_insn ""
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (subreg:QI (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "immediate_operand" "in")) 0))]
+ "!flag_pic"
+ "or %0,%1,%#lo16(%2)")
+
+;; DImode move instructions
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, DImode, NULL_RTX))
+ DONE;
+}")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,x")
+ (const_int 0))]
+ ""
+ "@
+ or %0,%#r0,0\;or %d0,%#r0,0
+ mov %0,%#x0"
+ [(set_attr "type" "marith,mov")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,m,r,x,x,x,m")
+ (match_operand:DI 1 "nonimmediate_operand" "r,m,r,x,r,x,m,x"))]
+ ""
+ "@
+ or %0,%#r0,%1\;or %d0,%#r0,%d1
+ %V1ld.d\\t %0,%1
+ %v0st.d\\t %1,%0
+ mov.d %0,%1
+ mov.d %0,%1
+ mov %0,%1
+ %V1ld.d\\t %0,%1
+ %v0st.d\\t %1,%0"
+ [(set_attr "type" "marith,loadd,store,mov,mov,mov,loadd,store")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (subreg:DI (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "immediate_operand" "in")) 0))]
+ "!flag_pic"
+ "or %0,%1,%#lo16(%2)")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (match_operand:DI 1 "immediate_operand" "n"))]
+ ""
+ "* return output_load_const_dimode (operands);"
+ [(set_attr "type" "marith")
+ (set_attr "length" "4")]) ; length is 2, 3 or 4.
+
+;; DFmode move instructions
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, DFmode, NULL_RTX))
+ DONE;
+}")
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (match_operand:DF 1 "register_operand" ""))]
+ "reload_completed
+ && GET_CODE (operands[0]) == REG && !XRF_REGNO_P (REGNO (operands[0]))
+ && GET_CODE (operands[1]) == REG && !XRF_REGNO_P (REGNO (operands[1]))"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))]
+ "
+{ operands[2] = operand_subword (operands[0], 0, 0, DFmode);
+ operands[3] = operand_subword (operands[1], 0, 0, DFmode);
+ operands[4] = operand_subword (operands[0], 1, 0, DFmode);
+ operands[5] = operand_subword (operands[1], 1, 0, DFmode); }")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r,x")
+ (const_int 0))]
+ ""
+ "@
+ or %0,%#r0,0\;or %d0,%#r0,0
+ mov %0,%#x0"
+ [(set_attr "type" "marith,mov")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m,x,r,x,x,m")
+ (match_operand:DF 1 "nonimmediate_operand" "r,m,r,r,x,x,m,x"))]
+ ""
+ "@
+ or %0,%#r0,%1\;or %d0,%#r0,%d1
+ %V1ld.d\\t %0,%1
+ %v0st.d\\t %1,%0
+ mov.d %0,%1
+ mov.d %0,%1
+ mov %0,%1
+ %V1ld.d\\t %0,%1
+ %v0st.d\\t %1,%0"
+ [(set_attr "type" "marith,loadd,store,mov,mov,mov,loadd,store")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (subreg:DF (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "immediate_operand" "in")) 0))]
+ "!flag_pic"
+ "or %0,%1,%#lo16(%2)")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (match_operand:DF 1 "immediate_operand" "F"))]
+ ""
+ "* return output_load_const_double (operands);"
+ [(set_attr "type" "marith")
+ (set_attr "length" "4")]) ; length is 2, 3, or 4.
+
+;; SFmode move instructions
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, SFmode, NULL_RTX))
+ DONE;
+}")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=r,x")
+ (const_int 0))]
+ ""
+ "@
+ or %0,%#r0,0
+ mov %0,%#x0"
+ [(set_attr "type" "arith,mov")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m,x,r,x,x,m")
+ (match_operand:SF 1 "nonimmediate_operand" "r,m,r,r,x,x,m,x"))]
+ ""
+ "@
+ or %0,%#r0,%1
+ %V1ld\\t %0,%1
+ %v0st\\t %r1,%0
+ mov.s %0,%1
+ mov.s %0,%1
+ mov %0,%1
+ %V1ld\\t %0,%1
+ %v0st\\t %r1,%0"
+ [(set_attr "type" "arith,load,store,mov,mov,mov,load,store")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (subreg:SF (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "immediate_operand" "in")) 0))]
+ "!flag_pic"
+ "or %0,%1,%#lo16(%2)")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (match_operand:SF 1 "immediate_operand" "F"))]
+ "operands[1] != const0_rtx"
+ "* return output_load_const_float (operands);"
+ [(set_attr "type" "marith")]) ; length is 1 or 2.
+
+;; CCmode move instructions
+
+;; These are a subset of the SImode move instructions. They are necessary
+;; because the reload pass may elect to store reg:CC registers in memory,
+;; and read them back.
+
+(define_expand "movcc"
+ [(set (match_operand:CC 0 "general_operand" "")
+ (match_operand:CC 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, CCmode, NULL_RTX))
+ DONE;
+}")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "nonimmediate_operand" "=r,r,m")
+ (match_operand:CC 1 "move_operand" "rI,m,rO"))]
+ "(register_operand (operands[0], CCmode)
+ || register_operand (operands[1], CCmode)
+ || operands[1] == const0_rtx)"
+ "@
+ or %0,%#r0,%1
+ %V1ld\\t %0,%1
+ %v0st\\t %r1,%0"
+ [(set_attr "type" "arith,load,store")])
+
+
+;; String/block move insn. See m88k.c for details.
+
+(define_expand "movstrsi"
+ [(parallel [(set (mem:BLK (match_operand:BLK 0 "" ""))
+ (mem:BLK (match_operand:BLK 1 "" "")))
+ (use (match_operand:SI 2 "arith32_operand" ""))
+ (use (match_operand:SI 3 "immediate_operand" ""))])]
+ ""
+ "
+{
+ rtx dest_mem = operands[0];
+ rtx src_mem = operands[1];
+ operands[0] = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
+ operands[1] = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+ expand_block_move (dest_mem, src_mem, operands);
+ DONE;
+}")
+
+;; Call a non-looping block move library function (e.g. __movstrSI96x64).
+;; operand 0 is the function name
+;; operand 1 is the destination pointer
+;; operand 2 is the source pointer
+;; operand 3 is the offset for the source and destination pointers
+;; operand 4 is the first value to be loaded
+;; operand 5 is the register to hold the value (r4 or r5, or r4 or r6 if DImode)
+
+(define_expand "call_block_move"
+ [(set (reg:SI 3) (minus:SI (match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "immediate_operand" "")))
+ (set (match_operand 5 "register_operand" "")
+ (match_operand 4 "memory_operand" ""))
+ (set (reg:SI 2) (minus:SI (match_operand:SI 1 "register_operand" "")
+ (match_dup 3)))
+ (use (reg:SI 2))
+ (use (reg:SI 3))
+ (use (reg:SI 4))
+ (use (reg:SI 5))
+ (parallel [(set (reg:DI 2)
+ (call (mem:SI (match_operand 0 "" ""))
+ (const_int 0)))
+ (clobber (reg:SI 1))])]
+ ""
+ "")
+
+(define_expand "call_block_move_DI"
+ [(set (reg:SI 3) (minus:SI (match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "immediate_operand" "")))
+ (set (match_operand 5 "register_operand" "")
+ (match_operand 4 "memory_operand" ""))
+ (set (reg:SI 2) (minus:SI (match_operand:SI 1 "register_operand" "")
+ (match_dup 3)))
+ (use (reg:SI 2))
+ (use (reg:SI 3))
+ (use (reg:DI 4))
+ (use (reg:DI 6))
+ (parallel [(set (reg:DI 2)
+ (call (mem:SI (match_operand 0 "" ""))
+ (const_int 0)))
+ (clobber (reg:SI 1))])]
+ ""
+ "")
+
+;; Call an SImode looping block move library function (e.g. __movstrSI64n68).
+;; operands 0-5 as in the non-looping interface
+;; operand 6 is the loop count
+
+(define_expand "call_movstrsi_loop"
+ [(set (reg:SI 3) (minus:SI (match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "immediate_operand" "")))
+ (set (match_operand:SI 5 "register_operand" "")
+ (match_operand 4 "memory_operand" ""))
+ (set (reg:SI 2) (minus:SI (match_operand:SI 1 "register_operand" "")
+ (match_dup 3)))
+ (set (reg:SI 6) (match_operand:SI 6 "immediate_operand" ""))
+ (use (reg:SI 2))
+ (use (reg:SI 3))
+ (use (match_dup 5))
+ (use (reg:SI 6))
+ (parallel [(set (reg:DI 2)
+ (call (mem:SI (match_operand 0 "" ""))
+ (const_int 0)))
+ (clobber (reg:SI 1))])]
+ ""
+ "")
+
+;;- zero extension instructions
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[1]) == MEM
+ && symbolic_operand (XEXP (operands[1], 0), SImode))
+ operands[1]
+ = legitimize_address (flag_pic, operands[1], 0, 0);
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (zero_extend:SI (match_operand:HI 1 "move_operand" "!r,n,m")))]
+ "GET_CODE (operands[1]) != CONST_INT"
+ "@
+ mask %0,%1,0xffff
+ or %0,%#r0,%h1
+ %V1ld.hu\\t %0,%1"
+ [(set_attr "type" "arith,arith,load")])
+
+(define_expand "zero_extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "")
+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[1]) == MEM
+ && symbolic_operand (XEXP (operands[1], 0), HImode))
+ operands[1]
+ = legitimize_address (flag_pic, operands[1], 0, 0);
+}")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r")
+ (zero_extend:HI (match_operand:QI 1 "move_operand" "r,n,m")))]
+ "GET_CODE (operands[1]) != CONST_INT"
+ "@
+ mask %0,%1,0xff
+ or %0,%#r0,%q1
+ %V1ld.bu\\t %0,%1"
+ [(set_attr "type" "arith,arith,load")])
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[1]) == MEM
+ && symbolic_operand (XEXP (operands[1], 0), SImode))
+ {
+ operands[1]
+ = legitimize_address (flag_pic, operands[1], 0, 0);
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_ZERO_EXTEND (SImode, operands[1])));
+ DONE;
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (zero_extend:SI (match_operand:QI 1 "move_operand" "r,n,m")))]
+ "GET_CODE (operands[1]) != CONST_INT"
+ "@
+ mask %0,%1,0xff
+ or %0,%#r0,%q1
+ %V1ld.bu\\t %0,%1"
+ [(set_attr "type" "arith,arith,load")])
+
+;;- sign extension instructions
+
+(define_insn "extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI (match_operand:SI 1 "register_operand" "r")))]
+ ""
+ "#")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (sign_extend:DI (match_operand:SI 1 "register_operand" "")))]
+ "reload_completed
+ && GET_CODE (operands[0]) == REG
+ && GET_CODE (operands[1]) == REG"
+ [(set (subreg:SI (match_dup 0) 4) (match_dup 1))
+ (set (subreg:SI (match_dup 0) 0)
+ (ashiftrt:SI (match_dup 1) (const_int 31)))]
+ "")
+
+(define_expand "extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[1]) == MEM
+ && symbolic_operand (XEXP (operands[1], 0), SImode))
+ operands[1]
+ = legitimize_address (flag_pic, operands[1], 0, 0);
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+ (sign_extend:SI (match_operand:HI 1 "move_operand" "!r,P,N,m")))]
+ "GET_CODE (operands[1]) != CONST_INT"
+ "@
+ ext %0,%1,16<0>
+ or %0,%#r0,%h1
+ subu %0,%#r0,%H1
+ %V1ld.h\\t %0,%1"
+ [(set_attr "type" "bit,arith,arith,load")])
+
+(define_expand "extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "")
+ (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[1]) == MEM
+ && symbolic_operand (XEXP (operands[1], 0), HImode))
+ operands[1]
+ = legitimize_address (flag_pic, operands[1], 0, 0);
+}")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r,r")
+ (sign_extend:HI (match_operand:QI 1 "move_operand" "!r,P,N,m")))]
+ "GET_CODE (operands[1]) != CONST_INT"
+ "@
+ ext %0,%1,8<0>
+ or %0,%#r0,%q1
+ subu %0,%#r0,%Q1
+ %V1ld.b\\t %0,%1"
+ [(set_attr "type" "bit,arith,arith,load")])
+
+(define_expand "extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[1]) == MEM
+ && symbolic_operand (XEXP (operands[1], 0), SImode))
+ operands[1]
+ = legitimize_address (flag_pic, operands[1], 0, 0);
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+ (sign_extend:SI (match_operand:QI 1 "move_operand" "!r,P,N,m")))]
+ "GET_CODE (operands[1]) != CONST_INT"
+ "@
+ ext %0,%1,8<0>
+ or %0,%#r0,%q1
+ subu %0,%#r0,%Q1
+ %V1ld.b\\t %0,%1"
+ [(set_attr "type" "bit,arith,arith,load")])
+
+;; Conversions between float and double.
+
+;; The fadd instruction does not conform to IEEE 754 when used to
+;; convert between float and double. In particular, the sign of -0 is
+;; not preserved. Interestingly, fsub does conform.
+
+(define_expand "extendsfdf2"
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (float_extend:DF (match_operand:SF 1 "register_operand" "r")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (float_extend:DF (match_operand:SF 1 "register_operand" "r")))]
+ "! TARGET_88110"
+ "fsub.dss %0,%1,%#r0"
+ [(set_attr "type" "spadd")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r,x")
+ (float_extend:DF (match_operand:SF 1 "register_operand" "r,x")))]
+ "TARGET_88110"
+ "fcvt.ds %0,%1"
+ [(set_attr "type" "spadd")])
+
+(define_expand "truncdfsf2"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (float_truncate:SF (match_operand:DF 1 "register_operand" "r")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (float_truncate:SF (match_operand:DF 1 "register_operand" "r")))]
+ "! TARGET_88110"
+ "fsub.sds %0,%1,%#r0"
+ [(set_attr "type" "dpadd")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=r,x")
+ (float_truncate:SF (match_operand:DF 1 "register_operand" "r,x")))]
+ "TARGET_88110"
+ "fcvt.sd %0,%1"
+ [(set_attr "type" "dpadd")])
+
+;; Conversions between floating point and integer
+
+(define_insn "floatsidf2"
+ [(set (match_operand:DF 0 "register_operand" "=r,x")
+ (float:DF (match_operand:SI 1 "register_operand" "r,r")))]
+ ""
+ "flt.ds %0,%1"
+ [(set_attr "type" "spadd,dpadd")])
+
+(define_insn "floatsisf2"
+ [(set (match_operand:SF 0 "register_operand" "=r,x")
+ (float:SF (match_operand:SI 1 "register_operand" "r,r")))]
+ ""
+ "flt.ss %0,%1"
+ [(set_attr "type" "spadd,spadd")])
+
+(define_insn "fix_truncdfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (fix:SI (match_operand:DF 1 "register_operand" "r,x")))]
+ ""
+ "trnc.sd %0,%1"
+ [(set_attr "type" "dpadd,dpadd")])
+
+(define_insn "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (fix:SI (match_operand:SF 1 "register_operand" "r,x")))]
+ ""
+ "trnc.ss %0,%1"
+ [(set_attr "type" "spadd,dpadd")])
+
+
+;;- arithmetic instructions
+;;- add instructions
+
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (plus:SI (match_operand:SI 1 "arith32_operand" "%r,r")
+ (match_operand:SI 2 "arith32_operand" "rI,J")))]
+ ""
+ "@
+ addu %0,%1,%2
+ subu %0,%1,%n2")
+
+;; patterns for mixed mode floating point.
+;; Do not define patterns that utilize mixed mode arithmetic that result
+;; in narrowing the precision, because it loses accuracy, since the standard
+;; requires double rounding, whereas the 88000 instruction only rounds once.
+
+(define_expand "adddf3"
+ [(set (match_operand:DF 0 "register_operand" "=r,x")
+ (plus:DF (match_operand:DF 1 "general_operand" "%r,x")
+ (match_operand:DF 2 "general_operand" "r,x")))]
+ ""
+ "
+{
+ operands[1] = legitimize_operand (operands[1], DFmode);
+ operands[2] = legitimize_operand (operands[2], DFmode);
+}")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r,x")
+ (plus:DF (float_extend:DF (match_operand:SF 1 "register_operand" "r,x"))
+ (float_extend:DF (match_operand:SF 2 "register_operand" "r,x"))))]
+ ""
+ "fadd.dss %0,%1,%2"
+ [(set_attr "type" "spadd")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r,x")
+ (plus:DF (match_operand:DF 1 "register_operand" "r,x")
+ (float_extend:DF (match_operand:SF 2 "register_operand" "r,x"))))]
+ ""
+ "fadd.dds %0,%1,%2"
+ [(set_attr "type" "dpadd")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r,x")
+ (plus:DF (float_extend:DF (match_operand:SF 1 "register_operand" "r,x"))
+ (match_operand:DF 2 "register_operand" "r,x")))]
+ ""
+ "fadd.dsd %0,%1,%2"
+ [(set_attr "type" "dpadd")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r,x")
+ (plus:DF (match_operand:DF 1 "register_operand" "%r,x")
+ (match_operand:DF 2 "register_operand" "r,x")))]
+ ""
+ "fadd.ddd %0,%1,%2"
+ [(set_attr "type" "dpadd")])
+
+(define_insn "addsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r,x")
+ (plus:SF (match_operand:SF 1 "register_operand" "%r,x")
+ (match_operand:SF 2 "register_operand" "r,x")))]
+ ""
+ "fadd.sss %0,%1,%2"
+ [(set_attr "type" "spadd")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_operand:DI 1 "register_operand" "r")
+ (zero_extend:DI
+ (match_operand:SI 2 "register_operand" "r"))))
+ (clobber (reg:CC 0))]
+ ""
+ "addu.co %d0,%d1,%2\;addu.ci %0,%1,%#r0"
+ [(set_attr "type" "marith")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (zero_extend:DI
+ (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:DI 2 "register_operand" "r")))
+ (clobber (reg:CC 0))]
+ ""
+ "addu.co %d0,%1,%d2\;addu.ci %0,%#r0,%2"
+ [(set_attr "type" "marith")])
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_operand:DI 1 "register_operand" "%r")
+ (match_operand:DI 2 "register_operand" "r")))
+ (clobber (reg:CC 0))]
+ ""
+ "addu.co %d0,%d1,%d2\;addu.ci %0,%1,%2"
+ [(set_attr "type" "marith")])
+
+;; Add with carry insns.
+
+(define_insn ""
+ [(parallel [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (match_operand:SI 1 "reg_or_0_operand" "rO")
+ (match_operand:SI 2 "reg_or_0_operand" "rO")))
+ (set (reg:CC 0)
+ (unspec:CC [(match_dup 1) (match_dup 2)] 0))])]
+ ""
+ "addu.co %r0,%r1,%r2")
+
+(define_insn ""
+ [(set (reg:CC 0) (unspec:CC [(match_operand:SI 0 "reg_or_0_operand" "rO")
+ (match_operand:SI 1 "reg_or_0_operand" "rO")]
+ 0))]
+ ""
+ "addu.co %#r0,%r0,%r1")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (match_operand:SI 1 "reg_or_0_operand" "rO")
+ (unspec:SI [(match_operand:SI 2 "reg_or_0_operand" "rO")
+ (reg:CC 0)] 0)))]
+ ""
+ "addu.ci %r0,%r1,%r2")
+
+;;- subtract instructions
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith32_operand" "rI")))]
+ ""
+ "subu %0,%1,%2")
+
+;; patterns for mixed mode floating point
+;; Do not define patterns that utilize mixed mode arithmetic that result
+;; in narrowing the precision, because it loses accuracy, since the standard
+;; requires double rounding, whereas the 88000 instruction only rounds once.
+
+(define_expand "subdf3"
+ [(set (match_operand:DF 0 "register_operand" "=r,x")
+ (minus:DF (match_operand:DF 1 "general_operand" "r,x")
+ (match_operand:DF 2 "general_operand" "r,x")))]
+ ""
+ "
+{
+ operands[1] = legitimize_operand (operands[1], DFmode);
+ operands[2] = legitimize_operand (operands[2], DFmode);
+}")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r,x")
+ (minus:DF (float_extend:DF (match_operand:SF 1 "register_operand" "r,x"))
+ (float_extend:DF (match_operand:SF 2 "register_operand" "r,x"))))]
+ ""
+ "fsub.dss %0,%1,%2"
+ [(set_attr "type" "spadd")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r,x")
+ (minus:DF (match_operand:DF 1 "register_operand" "r,x")
+ (float_extend:DF (match_operand:SF 2 "register_operand" "r,x"))))]
+ ""
+ "fsub.dds %0,%1,%2"
+ [(set_attr "type" "dpadd")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r,x")
+ (minus:DF (float_extend:DF (match_operand:SF 1 "register_operand" "r,x"))
+ (match_operand:DF 2 "register_operand" "r,x")))]
+ ""
+ "fsub.dsd %0,%1,%2"
+ [(set_attr "type" "dpadd")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r,x")
+ (minus:DF (match_operand:DF 1 "register_operand" "r,x")
+ (match_operand:DF 2 "register_operand" "r,x")))]
+ ""
+ "fsub.ddd %0,%1,%2"
+ [(set_attr "type" "dpadd")])
+
+(define_insn "subsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r,x")
+ (minus:SF (match_operand:SF 1 "register_operand" "r,x")
+ (match_operand:SF 2 "register_operand" "r,x")))]
+ ""
+ "fsub.sss %0,%1,%2"
+ [(set_attr "type" "spadd")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "register_operand" "r")
+ (zero_extend:DI
+ (match_operand:SI 2 "register_operand" "r"))))
+ (clobber (reg:CC 0))]
+ ""
+ "subu.co %d0,%d1,%2\;subu.ci %0,%1,%#r0"
+ [(set_attr "type" "marith")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (zero_extend:DI
+ (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:DI 2 "register_operand" "r")))
+ (clobber (reg:CC 0))]
+ ""
+ "subu.co %d0,%1,%d2\;subu.ci %0,%#r0,%2"
+ [(set_attr "type" "marith")])
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "register_operand" "r")))
+ (clobber (reg:CC 0))]
+ ""
+ "subu.co %d0,%d1,%d2\;subu.ci %0,%1,%2"
+ [(set_attr "type" "marith")])
+
+;; Subtract with carry insns.
+
+(define_insn ""
+ [(parallel [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rO")
+ (match_operand:SI 2 "reg_or_0_operand" "rO")))
+ (set (reg:CC 0)
+ (unspec:CC [(match_dup 1) (match_dup 2)] 1))])]
+ ""
+ "subu.co %r0,%r1,%r2")
+
+(define_insn ""
+ [(set (reg:CC 0) (unspec:CC [(match_operand:SI 0 "reg_or_0_operand" "rO")
+ (match_operand:SI 1 "reg_or_0_operand" "rO")]
+ 1))]
+ ""
+ "subu.co %#r0,%r0,%r1")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rO")
+ (unspec:SI [(match_operand:SI 2 "reg_or_0_operand" "rO")
+ (reg:CC 0)] 1)))]
+ ""
+ "subu.ci %r0,%r1,%r2")
+
+;;- multiply instructions
+;;
+;; There is an unfounded silicon errata for E.1 requiring that an
+;; immediate constant value in div/divu/mul instructions be less than
+;; 0x800. This is no longer provided for.
+
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (match_operand:SI 1 "arith32_operand" "%r")
+ (match_operand:SI 2 "arith32_operand" "rI")))]
+ ""
+ "mul %0,%1,%2"
+ [(set_attr "type" "imul")])
+
+(define_insn "umulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "%r"))
+ (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
+ "TARGET_88110"
+ "mulu.d %0,%1,%2"
+ [(set_attr "type" "imul")])
+
+;; patterns for mixed mode floating point
+;; Do not define patterns that utilize mixed mode arithmetic that result
+;; in narrowing the precision, because it loses accuracy, since the standard
+;; requires double rounding, whereas the 88000 instruction only rounds once.
+
+(define_expand "muldf3"
+ [(set (match_operand:DF 0 "register_operand" "=r,x")
+ (mult:DF (match_operand:DF 1 "general_operand" "%r,x")
+ (match_operand:DF 2 "general_operand" "r,x")))]
+ ""
+ "
+{
+ operands[1] = legitimize_operand (operands[1], DFmode);
+ operands[2] = legitimize_operand (operands[2], DFmode);
+}")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r,x")
+ (mult:DF (float_extend:DF (match_operand:SF 1 "register_operand" "r,x"))
+ (float_extend:DF (match_operand:SF 2 "register_operand" "r,x"))))]
+ ""
+ "fmul.dss %0,%1,%2"
+ [(set_attr "type" "spmul")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r,x")
+ (mult:DF (match_operand:DF 1 "register_operand" "r,x")
+ (float_extend:DF (match_operand:SF 2 "register_operand" "r,x"))))]
+ ""
+ "fmul.dds %0,%1,%2"
+ [(set_attr "type" "spmul")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r,x")
+ (mult:DF (float_extend:DF (match_operand:SF 1 "register_operand" "r,x"))
+ (match_operand:DF 2 "register_operand" "r,x")))]
+ ""
+ "fmul.dsd %0,%1,%2"
+ [(set_attr "type" "spmul")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r,x")
+ (mult:DF (match_operand:DF 1 "register_operand" "%r,x")
+ (match_operand:DF 2 "register_operand" "r,x")))]
+ ""
+ "fmul.ddd %0,%1,%2"
+ [(set_attr "type" "dpmul")])
+
+(define_insn "mulsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r,x")
+ (mult:SF (match_operand:SF 1 "register_operand" "%r,x")
+ (match_operand:SF 2 "register_operand" "r,x")))]
+ ""
+ "fmul.sss %0,%1,%2"
+ [(set_attr "type" "spmul")])
+
+;;- divide instructions
+;;
+;; The 88k div and divu instructions don't reliably trap on
+;; divide-by-zero. A trap to vector 503 asserts divide-by-zero. The
+;; general scheme for doing divide is to do a 4-way split based on the
+;; sign of the two operand and do the appropriate negates.
+;;
+;; The conditional trap instruction is not used as this serializes the
+;; processor. Instead a conditional branch and an unconditional trap
+;; are used, but after the divu. Since the divu takes up to 38 cycles,
+;; the conditional branch is essentially free.
+;;
+;; Two target options control how divide is done. One options selects
+;; whether to do the branch and negate scheme instead of using the div
+;; instruction; the other option selects whether to explicitly check
+;; for divide-by-zero or take your chances. If the div instruction is
+;; used, the O/S must complete the operation if the operands are
+;; negative. The O/S will signal an overflow condition if the most
+;; negative number (-2147483648) is divided by negative 1.
+;;
+;; There is an unfounded silicon errata for E.1 requiring that an
+;; immediate constant value in div/divu/mul instructions be less than
+;; 0x800. This is no longer provided for.
+
+;; Division by 0 trap
+(define_insn "trap_divide_by_zero"
+ [(trap_if (const_int 1) (const_int 503))]
+ ""
+ "tb0 0,%#r0,503"
+ [(set_attr "type" "weird")])
+
+;; Conditional division by 0 trap.
+(define_expand "tcnd_divide_by_zero"
+ [(set (pc)
+ (if_then_else (eq (match_operand:SI 0 "register_operand" "")
+ (const_int 0))
+ (pc)
+ (match_operand 1 "" "")))
+ (trap_if (const_int 1) (const_int 503))]
+ ""
+ "
+{
+ emit_insn (gen_cmpsi (operands[0], const0_rtx));
+ emit_jump_insn (gen_bne (operands[1]));
+ emit_insn (gen_trap_divide_by_zero ());
+ DONE;
+}")
+
+(define_expand "divsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (div:SI (match_operand:SI 1 "arith32_operand" "")
+ (match_operand:SI 2 "arith32_operand" "")))]
+ ""
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+ rtx op2 = operands[2];
+ rtx join_label;
+
+ /* @@ This needs to be reworked. Torbjorn Granlund has suggested making
+ it a runtime (perhaps quite special). */
+
+ if (GET_CODE (op1) == CONST_INT)
+ op1 = force_reg (SImode, op1);
+
+ else if (GET_CODE (op2) == CONST_INT
+ && ! SMALL_INT (operands[2]))
+ op2 = force_reg (SImode, op2);
+
+ if (op2 == const0_rtx)
+ {
+ emit_insn (gen_trap_divide_by_zero ());
+ emit_insn (gen_dummy (op0));
+ DONE;
+ }
+
+ if (TARGET_USE_DIV)
+ {
+ emit_move_insn (op0, gen_rtx_DIV (SImode, op1, op2));
+ if (TARGET_CHECK_ZERO_DIV && GET_CODE (op2) != CONST_INT)
+ {
+ rtx label = gen_label_rtx ();
+ emit_insn (gen_tcnd_divide_by_zero (op2, label));
+ emit_label (label);
+ emit_insn (gen_dummy (op0));
+ }
+ DONE;
+ }
+
+ join_label = gen_label_rtx ();
+ if (GET_CODE (op1) == CONST_INT)
+ {
+ int neg = FALSE;
+ rtx neg_op2 = gen_reg_rtx (SImode);
+ rtx label1 = gen_label_rtx ();
+
+ if (INTVAL (op1) < 0)
+ {
+ neg = TRUE;
+ op1 = GEN_INT (-INTVAL (op1));
+ }
+ op1 = force_reg (SImode, op1);
+
+ emit_insn (gen_negsi2 (neg_op2, op2));
+ emit_insn (gen_cmpsi (op2, const0_rtx));
+ emit_jump_insn (gen_bgt (label1));
+ /* constant / 0-or-negative */
+ emit_move_insn (op0, gen_rtx_UDIV (SImode, op1, neg_op2));
+ if (!neg)
+ emit_insn (gen_negsi2 (op0, op0));
+
+ if (TARGET_CHECK_ZERO_DIV)
+ emit_insn (gen_tcnd_divide_by_zero (op2, join_label));
+ emit_jump_insn (gen_jump (join_label));
+ emit_barrier ();
+
+ emit_label (label1); /* constant / positive */
+ emit_move_insn (op0, gen_rtx_UDIV (SImode, op1, op2));
+ if (neg)
+ emit_insn (gen_negsi2 (op0, op0));
+ }
+
+ else if (GET_CODE (op2) == CONST_INT)
+ {
+ int neg = FALSE;
+ rtx neg_op1 = gen_reg_rtx (SImode);
+ rtx label1 = gen_label_rtx ();
+
+ if (INTVAL (op2) < 0)
+ {
+ neg = TRUE;
+ op2 = GEN_INT (-INTVAL (op2));
+ }
+ else if (! SMALL_INT (operands[2]))
+ op2 = force_reg (SImode, op2);
+
+ emit_insn (gen_negsi2 (neg_op1, op1));
+ emit_insn (gen_cmpsi (op1, const0_rtx));
+ emit_jump_insn (gen_bge (label1));
+ /* 0-or-negative / constant */
+ emit_move_insn (op0, gen_rtx_UDIV (SImode, neg_op1, op2));
+ if (!neg)
+ emit_insn (gen_negsi2 (op0, op0));
+
+ emit_jump_insn (gen_jump (join_label));
+ emit_barrier ();
+
+ emit_label (label1); /* positive / constant */
+ emit_move_insn (op0, gen_rtx_UDIV (SImode, op1, op2));
+ if (neg)
+ emit_insn (gen_negsi2 (op0, op0));
+ }
+
+ else
+ {
+ rtx neg_op1 = gen_reg_rtx (SImode);
+ rtx neg_op2 = gen_reg_rtx (SImode);
+ rtx label1 = gen_label_rtx ();
+ rtx label2 = gen_label_rtx ();
+ rtx label3 = gen_label_rtx ();
+ rtx label4 = NULL_RTX;
+
+ emit_insn (gen_negsi2 (neg_op2, op2));
+ emit_insn (gen_cmpsi (op2, const0_rtx));
+ emit_jump_insn (gen_bgt (label1));
+
+ emit_insn (gen_negsi2 (neg_op1, op1));
+ emit_insn (gen_cmpsi (op1, const0_rtx));
+ emit_jump_insn (gen_bge (label2));
+ /* negative / negative-or-0 */
+ emit_move_insn (op0, gen_rtx_UDIV (SImode, neg_op1, neg_op2));
+
+ if (TARGET_CHECK_ZERO_DIV)
+ {
+ label4 = gen_label_rtx ();
+ emit_insn (gen_cmpsi (op2, const0_rtx));
+ emit_jump_insn (gen_bne (join_label));
+ emit_label (label4);
+ emit_insn (gen_trap_divide_by_zero ());
+ }
+ emit_jump_insn (gen_jump (join_label));
+ emit_barrier ();
+
+ emit_label (label2); /* pos.-or-0 / neg.-or-0 */
+ emit_move_insn (op0, gen_rtx_UDIV (SImode, op1, neg_op2));
+
+ if (TARGET_CHECK_ZERO_DIV)
+ {
+ emit_insn (gen_cmpsi (op2, const0_rtx));
+ emit_jump_insn (gen_beq (label4));
+ }
+
+ emit_insn (gen_negsi2 (op0, op0));
+ emit_jump_insn (gen_jump (join_label));
+ emit_barrier ();
+
+ emit_label (label1);
+ emit_insn (gen_negsi2 (neg_op1, op1));
+ emit_insn (gen_cmpsi (op1, const0_rtx));
+ emit_jump_insn (gen_bge (label3));
+ /* negative / positive */
+ emit_move_insn (op0, gen_rtx_UDIV (SImode, neg_op1, op2));
+ emit_insn (gen_negsi2 (op0, op0));
+ emit_jump_insn (gen_jump (join_label));
+ emit_barrier ();
+
+ emit_label (label3); /* positive-or-0 / positive */
+ emit_move_insn (op0, gen_rtx_UDIV (SImode, op1, op2));
+ }
+
+ emit_label (join_label);
+
+ emit_insn (gen_dummy (op0));
+ DONE;
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (div:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ ""
+ "div %0,%1,%2"
+ [(set_attr "type" "idiv")])
+
+(define_expand "udivsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (udiv:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "arith32_operand" "")))]
+ ""
+ "
+{
+ rtx op2 = operands[2];
+
+ if (op2 == const0_rtx)
+ {
+ emit_insn (gen_trap_divide_by_zero ());
+ emit_insn (gen_dummy (operands[0]));
+ DONE;
+ }
+ else if (GET_CODE (op2) != CONST_INT && TARGET_CHECK_ZERO_DIV)
+ {
+ rtx label = gen_label_rtx ();
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_UDIV (SImode, operands[1], op2)));
+ emit_insn (gen_tcnd_divide_by_zero (op2, label));
+ emit_label (label);
+ emit_insn (gen_dummy (operands[0]));
+ DONE;
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (udiv:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith32_operand" "rI")))]
+ "operands[2] != const0_rtx"
+ "divu %0,%1,%2"
+ [(set_attr "type" "idiv")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (udiv:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0)))]
+ ""
+ "tb0 0,%#r0,503"
+ [(set_attr "type" "weird")])
+
+;; patterns for mixed mode floating point.
+;; Do not define patterns that utilize mixed mode arithmetic that result
+;; in narrowing the precision, because it loses accuracy, since the standard
+;; requires double rounding, whereas the 88000 instruction only rounds once.
+
+(define_expand "divdf3"
+ [(set (match_operand:DF 0 "register_operand" "=r,x")
+ (div:DF (match_operand:DF 1 "general_operand" "r,x")
+ (match_operand:DF 2 "general_operand" "r,x")))]
+ ""
+ "
+{
+ operands[1] = legitimize_operand (operands[1], DFmode);
+ if (real_power_of_2_operand (operands[2]))
+ {
+ REAL_VALUE_TYPE r;
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[2]);
+ bool result;
+
+ result = exact_real_inverse (DFmode, &r);
+ gcc_assert (result);
+ emit_insn (gen_muldf3 (operands[0], operands[1],
+ CONST_DOUBLE_FROM_REAL_VALUE (r, DFmode)));
+ DONE;
+ }
+ else if (! register_operand (operands[2], DFmode))
+ operands[2] = force_reg (DFmode, operands[2]);
+}")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r,x")
+ (div:DF (float_extend:DF (match_operand:SF 1 "register_operand" "r,x"))
+ (float_extend:DF (match_operand:SF 2 "register_operand" "r,x"))))]
+ ""
+ "fdiv.dss %0,%1,%2"
+ [(set_attr "type" "dpdiv")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r,x")
+ (div:DF (match_operand:DF 1 "register_operand" "r,x")
+ (float_extend:DF (match_operand:SF 2 "register_operand" "r,x"))))]
+ ""
+ "fdiv.dds %0,%1,%2"
+ [(set_attr "type" "dpdiv")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r,x")
+ (div:DF (float_extend:DF (match_operand:SF 1 "register_operand" "r,x"))
+ (match_operand:DF 2 "register_operand" "r,x")))]
+ ""
+ "fdiv.dsd %0,%1,%2"
+ [(set_attr "type" "dpdiv")])
+
+(define_insn "divsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r,x")
+ (div:SF (match_operand:SF 1 "register_operand" "r,x")
+ (match_operand:SF 2 "register_operand" "r,x")))]
+ ""
+ "fdiv.sss %0,%1,%2"
+ [(set_attr "type" "spdiv")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r,x")
+ (div:DF (match_operand:DF 1 "register_operand" "r,x")
+ (match_operand:DF 2 "register_operand" "r,x")))]
+ ""
+ "fdiv.ddd %0,%1,%2"
+ [(set_attr "type" "dpdiv")])
+
+;; - remainder instructions, don't define, since the hardware doesn't have any
+;; direct support, and GNU can synthesis them out of div/mul just fine.
+
+;;- load effective address, must come after add, so that we favor using
+;; addu reg,reg,reg instead of: lda reg,reg,reg (addu doesn't require
+;; the data unit), and also future 88k chips might not support unscaled
+;; lda instructions.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:HI 1 "address_operand" "p"))]
+ ""
+ "lda.h %0,%a1"
+ [(set_attr "type" "loada")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:SI 1 "address_operand" "p"))]
+ ""
+ "lda %0,%a1"
+ [(set_attr "type" "loada")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:DI 1 "address_operand" "p"))]
+ ""
+ "lda.d %0,%a1"
+ [(set_attr "type" "loada")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:SF 1 "address_operand" "p"))]
+ ""
+ "lda %0,%a1"
+ [(set_attr "type" "loada")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:DF 1 "address_operand" "p"))]
+ ""
+ "lda.d %0,%a1"
+ [(set_attr "type" "loada")])
+
+;;- and instructions (with complement also)
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (and:SI (not:SI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "and.c %0,%2,%1")
+
+;; If the operation is being performed on a 32-bit constant such that
+;; it cannot be done in one insn, do it in two. We may lose a bit on
+;; CSE in pathological cases, but it seems better doing it this way.
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (and:SI (match_operand:SI 1 "arith32_operand" "")
+ (match_operand:SI 2 "arith32_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int value = INTVAL (operands[2]);
+
+ if (! (SMALL_INTVAL (value)
+ || (value & 0xffff0000) == 0xffff0000
+ || (value & 0xffff) == 0xffff
+ || (value & 0xffff) == 0
+ || integer_ok_for_set (~value)))
+ {
+ emit_insn (gen_andsi3 (operands[0], operands[1],
+ GEN_INT (value | 0xffff)));
+ operands[1] = operands[0];
+ operands[2] = GEN_INT (value | 0xffff0000);
+ }
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (and:SI (match_operand:SI 1 "arith32_operand" "%r,r")
+ (match_operand:SI 2 "arith32_operand" "rIJL,rn")))]
+ ""
+ "* return output_and (operands);"
+ [(set_attr "type" "arith,marith")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and:DI (not:DI (match_operand:DI 1 "register_operand" "r"))
+ (match_operand:DI 2 "register_operand" "r")))]
+ ""
+ "and.c %d0,%d2,%d1\;and.c %0,%2,%1"
+ [(set_attr "type" "marith")])
+
+(define_insn "anddi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and:DI (match_operand:DI 1 "arith64_operand" "%r")
+ (match_operand:DI 2 "arith64_operand" "rn")))]
+ ""
+{
+ rtx xoperands[10];
+
+ xoperands[0] = operand_subword (operands[0], 1, 0, DImode);
+ xoperands[1] = operand_subword (operands[1], 1, 0, DImode);
+ xoperands[2] = operand_subword (operands[2], 1, 0, DImode);
+
+ output_asm_insn (output_and (xoperands), xoperands);
+
+ operands[0] = operand_subword (operands[0], 0, 0, DImode);
+ operands[1] = operand_subword (operands[1], 0, 0, DImode);
+ operands[2] = operand_subword (operands[2], 0, 0, DImode);
+
+ return output_and (operands);
+}
+ [(set_attr "type" "marith")
+ (set_attr "length" "4")]) ; length is 2, 3, or 4.
+
+;;- Bit set (inclusive or) instructions (with complement also)
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ior:SI (not:SI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "or.c %0,%2,%1")
+
+(define_expand "iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ior:SI (match_operand:SI 1 "arith32_operand" "")
+ (match_operand:SI 2 "arith32_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int value = INTVAL (operands[2]);
+
+ if (! (SMALL_INTVAL (value)
+ || (value & 0xffff) == 0
+ || integer_ok_for_set (value)))
+ {
+ emit_insn (gen_iorsi3 (operands[0], operands[1],
+ GEN_INT (value & 0xffff0000)));
+ operands[1] = operands[0];
+ operands[2] = GEN_INT (value & 0xffff);
+ }
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+ (ior:SI (match_operand:SI 1 "arith32_operand" "%r,r,r,r")
+ (match_operand:SI 2 "arith32_operand" "rI,L,M,n")))]
+ ""
+ "@
+ or %0,%1,%2
+ or.u %0,%1,%X2
+ set %0,%1,%s2
+ or.u %0,%1,%X2\;or %0,%0,%x2"
+ [(set_attr "type" "arith,arith,bit,marith")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ior:DI (not:DI (match_operand:DI 1 "register_operand" "r"))
+ (match_operand:DI 2 "register_operand" "r")))]
+ ""
+ "or.c %d0,%d2,%d1\;or.c %0,%2,%1"
+ [(set_attr "type" "marith")])
+
+(define_insn "iordi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ior:DI (match_operand:DI 1 "arith64_operand" "%r")
+ (match_operand:DI 2 "arith64_operand" "rn")))]
+ ""
+{
+ rtx xoperands[10];
+
+ xoperands[0] = operand_subword (operands[0], 1, 0, DImode);
+ xoperands[1] = operand_subword (operands[1], 1, 0, DImode);
+ xoperands[2] = operand_subword (operands[2], 1, 0, DImode);
+
+ output_asm_insn (output_ior (xoperands), xoperands);
+
+ operands[0] = operand_subword (operands[0], 0, 0, DImode);
+ operands[1] = operand_subword (operands[1], 0, 0, DImode);
+ operands[2] = operand_subword (operands[2], 0, 0, DImode);
+
+ return output_ior (operands);
+}
+ [(set_attr "type" "marith")
+ (set_attr "length" "4")]) ; length is 2, 3, or 4.
+
+;;- xor instructions (with complement also)
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (not:SI (xor:SI (match_operand:SI 1 "register_operand" "%r")
+ (match_operand:SI 2 "register_operand" "r"))))]
+ ""
+ "xor.c %0,%1,%2")
+
+(define_expand "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (xor:SI (match_operand:SI 1 "arith32_operand" "")
+ (match_operand:SI 2 "arith32_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int value = INTVAL (operands[2]);
+
+ if (! (SMALL_INTVAL (value)
+ || (value & 0xffff) == 0))
+ {
+ emit_insn (gen_xorsi3 (operands[0], operands[1],
+ GEN_INT (value & 0xffff0000)));
+ operands[1] = operands[0];
+ operands[2] = GEN_INT (value & 0xffff);
+ }
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (xor:SI (match_operand:SI 1 "arith32_operand" "%r,r,r")
+ (match_operand:SI 2 "arith32_operand" "rI,L,n")))]
+ ""
+ "@
+ xor %0,%1,%2
+ xor.u %0,%1,%X2
+ xor.u %0,%1,%X2\;xor %0,%0,%x2"
+ [(set_attr "type" "arith,arith,marith")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (not:DI (xor:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "register_operand" "r"))))]
+ ""
+ "xor.c %d0,%d1,%d2\;xor.c %0,%1,%2"
+ [(set_attr "type" "marith")])
+
+(define_insn "xordi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (xor:DI (match_operand:DI 1 "arith64_operand" "%r")
+ (match_operand:DI 2 "arith64_operand" "rn")))]
+ ""
+{
+ rtx xoperands[10];
+
+ xoperands[0] = operand_subword (operands[0], 1, 0, DImode);
+ xoperands[1] = operand_subword (operands[1], 1, 0, DImode);
+ xoperands[2] = operand_subword (operands[2], 1, 0, DImode);
+
+ output_asm_insn (output_xor (xoperands), xoperands);
+
+ operands[0] = operand_subword (operands[0], 0, 0, DImode);
+ operands[1] = operand_subword (operands[1], 0, 0, DImode);
+ operands[2] = operand_subword (operands[2], 0, 0, DImode);
+
+ return output_xor (operands);
+}
+ [(set_attr "type" "marith")
+ (set_attr "length" "4")]) ; length is 2, 3, or 4.
+
+;;- ones complement instructions
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (not:SI (match_operand:SI 1 "register_operand" "r")))]
+ ""
+ "xor.c %0,%1,%#r0")
+
+(define_insn "one_cmpldi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (not:DI (match_operand:DI 1 "register_operand" "r")))]
+ ""
+ "xor.c %d0,%d1,%#r0\;xor.c %0,%1,%#r0"
+ [(set_attr "type" "marith")])
+
+;; Optimized special cases of shifting.
+;; Must precede the general case.
+
+;; @@ What about HImode shifted by 8?
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashiftrt:SI (match_operand:SI 1 "memory_operand" "m")
+ (const_int 24)))]
+ "! SCALED_ADDRESS_P (XEXP (operands[1], 0))"
+ "%V1ld.b\\t %0,%1"
+ [(set_attr "type" "load")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lshiftrt:SI (match_operand:SI 1 "memory_operand" "m")
+ (const_int 24)))]
+ "! SCALED_ADDRESS_P (XEXP (operands[1], 0))"
+ "%V1ld.bu\\t %0,%1"
+ [(set_attr "type" "load")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashiftrt:SI (match_operand:SI 1 "memory_operand" "m")
+ (const_int 16)))]
+ "! SCALED_ADDRESS_P (XEXP (operands[1], 0))"
+ "%V1ld.h\\t %0,%1"
+ [(set_attr "type" "load")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lshiftrt:SI (match_operand:SI 1 "memory_operand" "m")
+ (const_int 16)))]
+ "! SCALED_ADDRESS_P (XEXP (operands[1], 0))"
+ "%V1ld.hu\\t %0,%1"
+ [(set_attr "type" "load")])
+
+;;- arithmetic shift instructions.
+
+;; @@ Do the optimized patterns with -1 get used? Perhaps operand 1 should
+;; be arith32_operand?
+
+;; Use tbnd to support TARGET_TRAP_LARGE_SHIFT.
+(define_insn "tbnd"
+ [(trap_if (gtu (match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "arith_operand" "rI"))
+ (const_int 7))]
+ ""
+ "tbnd %r0,%1"
+ [(set_attr "type" "weird")])
+
+;; Just in case the optimizer decides to fold away the test.
+(define_insn ""
+ [(trap_if (const_int 1) (const_int 7))]
+ ""
+ "tbnd %#r31,0"
+ [(set_attr "type" "weird")])
+
+(define_expand "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ashift:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "arith32_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ if ((unsigned) INTVAL (operands[2]) > 31)
+ {
+ if (TARGET_TRAP_LARGE_SHIFT)
+ emit_insn (gen_tbnd (force_reg (SImode, operands[2]),
+ GEN_INT (31)));
+ else
+ emit_move_insn (operands[0], const0_rtx);
+ DONE;
+ }
+ }
+
+ else if (TARGET_TRAP_LARGE_SHIFT)
+ emit_insn (gen_tbnd (operands[2], GEN_INT (31)));
+
+ else if (TARGET_HANDLE_LARGE_SHIFT)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_cmpsi (operands[2], GEN_INT (31)));
+ emit_insn (gen_sleu (reg));
+ emit_insn (gen_andsi3 (reg, operands[1], reg));
+ operands[1] = reg;
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ashift:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "arith5_operand" "r,K")))]
+ ""
+ "@
+ mak %0,%1,%2
+ mak %0,%1,0<%2>"
+ [(set_attr "type" "bit")])
+
+(define_expand "ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "arith32_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ if ((unsigned) INTVAL (operands[2]) > 31)
+ {
+ if (TARGET_TRAP_LARGE_SHIFT)
+ {
+ emit_insn (gen_tbnd (force_reg (SImode, operands[2]),
+ GEN_INT (31)));
+ DONE;
+ }
+ else
+ operands[2] = GEN_INT (31);
+ }
+ }
+
+ else if (TARGET_TRAP_LARGE_SHIFT)
+ emit_insn (gen_tbnd (operands[2], GEN_INT (31)));
+
+ else if (TARGET_HANDLE_LARGE_SHIFT)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_cmpsi (operands[2], GEN_INT (31)));
+ emit_insn (gen_sgtu (reg));
+ emit_insn (gen_iorsi3 (reg, operands[2], reg));
+ operands[2] = reg;
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "arith5_operand" "r,K")))]
+ ""
+ "@
+ ext %0,%1,%2
+ ext %0,%1,0<%2>"
+ [(set_attr "type" "bit")])
+
+;;- logical shift instructions. Logical shift left becomes arithmetic
+;; shift left.
+
+(define_expand "lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "arith32_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ if ((unsigned) INTVAL (operands[2]) > 31)
+ {
+ if (TARGET_TRAP_LARGE_SHIFT)
+ emit_insn (gen_tbnd (force_reg (SImode, operands[2]),
+ GEN_INT (31)));
+ else
+ emit_move_insn (operands[0], const0_rtx);
+ DONE;
+ }
+ }
+
+ else if (TARGET_TRAP_LARGE_SHIFT)
+ emit_insn (gen_tbnd (operands[2], GEN_INT (31)));
+
+ else if (TARGET_HANDLE_LARGE_SHIFT)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_cmpsi (operands[2], GEN_INT (31)));
+ emit_insn (gen_sleu (reg));
+ emit_insn (gen_andsi3 (reg, operands[1], reg));
+ operands[1] = reg;
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "arith5_operand" "r,K")))]
+ ""
+ "@
+ extu %0,%1,%2
+ extu %0,%1,0<%2>"
+ [(set_attr "type" "bit")])
+
+;;- rotate instructions
+
+(define_expand "rotlsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (rotatert:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "arith32_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned) INTVAL (operands[2]) >= 32)
+ operands[2] = GEN_INT ((32 - INTVAL (operands[2])) % 32);
+ else
+ {
+ rtx op = gen_reg_rtx (SImode);
+ emit_insn (gen_negsi2 (op, operands[2]));
+ operands[2] = op;
+ }
+}")
+
+(define_insn "rotrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (rotatert:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ ""
+ "rot %0,%1,%2"
+ [(set_attr "type" "bit")])
+
+;; find first set.
+
+;; The ff1 instruction searches from the most significant bit while ffs
+;; searches from the least significant bit. The bit index and treatment of
+;; zero also differ. This amazing sequence was discovered using the GNU
+;; Superoptimizer.
+
+(define_insn "ffssi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,&r")
+ (ffs:SI (match_operand:SI 1 "register_operand" "0,r")))
+ (clobber (reg:CC 0))
+ (clobber (match_scratch:SI 2 "=r,X"))]
+ ""
+ "@
+ subu.co %2,%#r0,%1\;and %2,%2,%1\;addu.ci %2,%2,%2\;ff1 %0,%2
+ subu.co %0,%#r0,%1\;and %0,%0,%1\;addu.ci %0,%0,%0\;ff1 %0,%0"
+ [(set_attr "type" "marith")
+ (set_attr "length" "4")])
+
+;; Bit field instructions.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extract:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 32)
+ (const_int 0)))]
+ ""
+ "or %0,%#r0,%1")
+
+(define_insn "extv"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extract:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "int5_operand" "")
+ (match_operand:SI 3 "int5_operand" "")))]
+ ""
+{
+ operands[4] = GEN_INT ((32 - INTVAL (operands[2])) - INTVAL (operands[3]));
+ return "ext %0,%1,%2<%4>"; /* <(32-%2-%3)> */
+}
+ [(set_attr "type" "bit")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extract:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 32)
+ (const_int 0)))]
+ ""
+ "or %0,%#r0,%1")
+
+(define_insn "extzv"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extract:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "int5_operand" "")
+ (match_operand:SI 3 "int5_operand" "")))]
+ ""
+{
+ operands[4] = GEN_INT ((32 - INTVAL (operands[2])) - INTVAL (operands[3]));
+ return "extu %0,%1,%2<%4>"; /* <(32-%2-%3)> */
+}
+ [(set_attr "type" "bit")])
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+r")
+ (match_operand:SI 1 "int5_operand" "")
+ (match_operand:SI 2 "int5_operand" ""))
+ (const_int 0))]
+ ""
+{
+ operands[3] = GEN_INT ((32 - INTVAL (operands[1])) - INTVAL (operands[2]));
+ return "clr %0,%0,%1<%3>"; /* <(32-%1-%2)> */
+}
+ [(set_attr "type" "bit")])
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+r")
+ (match_operand:SI 1 "int5_operand" "")
+ (match_operand:SI 2 "int5_operand" ""))
+ (const_int -1))]
+ ""
+{
+ operands[3] = GEN_INT ((32 - INTVAL (operands[1])) - INTVAL (operands[2]));
+ return "set %0,%0,%1<%3>"; /* <(32-%1-%2)> */
+}
+ [(set_attr "type" "bit")])
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+r")
+ (match_operand:SI 1 "int5_operand" "")
+ (match_operand:SI 2 "int5_operand" ""))
+ (match_operand:SI 3 "int32_operand" "n"))]
+ ""
+{
+ int value = INTVAL (operands[3]);
+
+ if (INTVAL (operands[1]) < 32)
+ value &= (1 << INTVAL (operands[1])) - 1;
+
+ operands[2] = GEN_INT (32 - (INTVAL(operands[1]) + INTVAL(operands[2])));
+
+ value <<= INTVAL (operands[2]);
+ operands[3] = GEN_INT (value);
+
+ if (SMALL_INTVAL (value))
+ return "clr %0,%0,%1<%2>\;or %0,%0,%3";
+ else if ((value & 0x0000ffff) == 0)
+ return "clr %0,%0,%1<%2>\;or.u %0,%0,%X3";
+ else
+ return "clr %0,%0,%1<%2>\;or.u %0,%0,%X3\;or %0,%0,%x3";
+}
+ [(set_attr "type" "marith")
+ (set_attr "length" "3")]) ; may be 2 or 3.
+
+;; negate insns
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (match_operand:SI 1 "arith_operand" "rI")))]
+ ""
+ "subu %0,%#r0,%1")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=r,x")
+ (float_truncate:SF (neg:DF (match_operand:DF 1 "register_operand" "r,x"))))]
+ ""
+ "@
+ fsub.ssd %0,%#r0,%1
+ fsub.ssd %0,%#x0,%1"
+ [(set_attr "type" "dpadd")])
+
+(define_insn "negdf2"
+ [(set (match_operand:DF 0 "register_operand" "=&r,r")
+ (neg:DF (match_operand:DF 1 "register_operand" "r,0")))]
+ ""
+ "@
+ xor.u %0,%1,0x8000\;or %d0,%#r0,%d1
+ xor.u %0,%0,0x8000"
+ [(set_attr "type" "marith,arith")])
+
+(define_insn "negsf2"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (neg:SF (match_operand:SF 1 "register_operand" "r")))]
+ ""
+ "xor.u %0,%1,0x8000")
+
+;; absolute value insns for floating-point (integer abs can be done using the
+;; machine-independent sequence).
+
+(define_insn "absdf2"
+ [(set (match_operand:DF 0 "register_operand" "=&r,r")
+ (abs:DF (match_operand:DF 1 "register_operand" "r,0")))]
+ ""
+ "@
+ and.u %0,%1,0x7fff\;or %d0,%#r0,%d1
+ and.u %0,%0,0x7fff"
+ [(set_attr "type" "marith,arith")])
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (abs:SF (match_operand:SF 1 "register_operand" "r")))]
+ ""
+ "and.u %0,%1,0x7fff")
+
+;; Subroutines of "casesi".
+
+;; Operand 0 is index
+;; operand 1 is the minimum bound
+;; operand 2 is the maximum bound - minimum bound + 1
+;; operand 3 is CODE_LABEL for the table;
+;; operand 4 is the CODE_LABEL to go to if index out of range.
+
+(define_expand "casesi"
+ ;; We don't use these for generating the RTL, but we must describe
+ ;; the operands here.
+ [(match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "immediate_operand" "")
+ (match_operand:SI 2 "immediate_operand" "")
+ (match_operand 3 "" "")
+ (match_operand 4 "" "")]
+ ""
+ "
+{
+ register rtx index_diff = gen_reg_rtx (SImode);
+ register rtx low = GEN_INT (-INTVAL (operands[1]));
+ register rtx label = gen_rtx_LABEL_REF (Pmode, operands[3]);
+ register rtx base = NULL_RTX;
+
+ if (! CASE_VECTOR_INSNS)
+ /* These instructions are likely to be scheduled and made loop invariant.
+ This decreases the cost of the dispatch at the expense of the default
+ case. */
+ base = force_reg (SImode, memory_address_noforce (SImode, label));
+
+ /* Compute the index difference and handle the default case. */
+ emit_insn (gen_addsi3 (index_diff,
+ force_reg (SImode, operands[0]),
+ ADD_INT (low) ? low : force_reg (SImode, low)));
+ emit_insn (gen_cmpsi (index_diff, operands[2]));
+ /* It's possible to replace this branch with sgtu/iorsi3 and adding a -1
+ entry to the table. However, that doesn't seem to win on the m88110. */
+ emit_jump_insn (gen_bgtu (operands[4]));
+
+ if (CASE_VECTOR_INSNS)
+ /* Call the jump that will branch to the appropriate case. */
+ emit_jump_insn (gen_casesi_enter (label, index_diff, operands[3]));
+ else
+ /* Load the table entry and jump to it. */
+ emit_jump_insn (gen_casesi_jump (gen_reg_rtx (SImode), base, index_diff, operands[3]));
+
+ /* Claim that flow drops into the table so it will be adjacent by not
+ emitting a barrier. */
+ DONE;
+}")
+
+(define_expand "casesi_jump"
+ [(set (match_operand:SI 0 "" "")
+ (mem:SI (plus:SI (match_operand:SI 1 "" "")
+ (mult:SI (match_operand:SI 2 "" "")
+ (const_int 4)))))
+ (parallel [(set (pc) (match_dup 0))
+ (use (label_ref (match_operand 3 "" "")))])]
+ ""
+ "")
+
+(define_insn ""
+ [(set (pc) (match_operand:SI 0 "register_operand" "r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "jmp%. %0"
+ [(set_attr "type" "jump")])
+
+;; The bsr.n instruction is directed to the END of the table. See
+;; ASM_OUTPUT_CASE_END.
+
+(define_insn "casesi_enter"
+ [(set (pc) (match_operand 0 "" ""))
+ (use (match_operand:SI 1 "register_operand" "r"))
+ ;; The USE here is so that at least one jump-insn will refer to the label,
+ ;; to keep it alive in jump_optimize.
+ (use (label_ref (match_operand 2 "" "")))
+ (clobber (reg:SI 1))]
+ ""
+{
+ if (flag_delayed_branch)
+ return "bsr.n %0e\;lda %#r1,%#r1[%1]";
+ m88k_case_index = REGNO (operands[1]);
+ return "bsr %0e";
+}
+ [(set_attr "type" "weird")
+ (set_attr "length" "3")]) ; Including the "jmp r1".
+
+;;- jump to subroutine
+(define_expand "call"
+ [(parallel [(call (match_operand:SI 0 "" "")
+ (match_operand 1 "" ""))
+ (clobber (reg:SI 1))])]
+ ""
+ "
+{
+ gcc_assert (GET_CODE (operands[0]) == MEM);
+
+ if (! call_address_operand (XEXP (operands[0], 0), SImode)) /* Pmode ? */
+ operands[0] = gen_rtx_MEM (GET_MODE (operands[0]),
+ force_reg (Pmode, XEXP (operands[0], 0)));
+}")
+
+(define_insn ""
+ [(parallel [(call (mem:SI (match_operand:SI 0 "call_address_operand" "rQ"))
+ (match_operand 1 "" ""))
+ (clobber (reg:SI 1))])]
+ ""
+ "* return output_call (operands, operands[0]);"
+ [(set_attr "type" "call")])
+
+(define_expand "call_value"
+ [(parallel [(set (match_operand 0 "register_operand" "")
+ (call (match_operand:SI 1 "" "")
+ (match_operand 2 "" "")))
+ (clobber (reg:SI 1))])]
+ ""
+ "
+{
+ gcc_assert (GET_CODE (operands[1]) == MEM);
+
+ if (! call_address_operand (XEXP (operands[1], 0), SImode))
+ operands[1] = gen_rtx_MEM (GET_MODE (operands[1]),
+ force_reg (Pmode, XEXP (operands[1], 0)));
+}")
+
+(define_insn ""
+ [(parallel [(set (match_operand 0 "register_operand" "=r")
+ (call (mem:SI
+ (match_operand:SI 1 "call_address_operand" "rQ"))
+ (match_operand 2 "" "")))
+ (clobber (reg:SI 1))])]
+ ""
+ "* return output_call (operands, operands[1]);"
+ [(set_attr "type" "call")])
+
+;; Nop instruction and others
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "ff0 %#r0,%#r0"
+ [(set_attr "type" "bit")])
+
+(define_insn "return"
+ [(return)]
+ "null_prologue()"
+ "jmp%. %#r1"
+ [(set_attr "type" "jump")])
+
+(define_expand "prologue"
+ [(use (const_int 0))]
+ ""
+{
+ m88k_expand_prologue ();
+ DONE;
+})
+
+(define_expand "epilogue"
+ [(use (const_int 0))]
+ ""
+{
+ m88k_expand_epilogue ();
+})
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ ""
+ [(set_attr "length" "0")])
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "register_operand" "r"))]
+ ""
+ "jmp%. %0"
+ [(set_attr "type" "jump")])
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "br%. %l0"
+ [(set_attr "type" "jump")])
+
+;; This insn is used for some loop tests, typically loops reversed when
+;; strength reduction is used. It is actually created when the instruction
+;; combination phase combines the special loop test. Since this insn
+;; is both a jump insn and has an output, it must deal with its own
+;; reloads, hence the `m' constraints. The `!' constraints direct reload
+;; to not choose the register alternatives in the event a reload is needed.
+
+(define_expand "decrement_and_branch_until_zero"
+ [(parallel [(set (pc)
+ (if_then_else
+ (match_operator 0 "relop_no_unsigned"
+ [(match_operand:SI 1 "register_operand" "")
+ (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (match_dup 1)
+ (plus:SI (match_dup 1)
+ (match_operand:SI 3 "add_operand" "")))
+ (clobber (match_scratch:SI 4 ""))
+ (clobber (match_scratch:SI 5 "=X,X,&r,&r"))])]
+ ""
+ "")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "relop_no_unsigned"
+ [(match_operand:SI 1 "register_operand" "+!r,!r,m,m")
+ (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (match_dup 1)
+ (plus:SI (match_dup 1)
+ (match_operand:SI 3 "add_operand" "rI,J,rI,J")))
+ (clobber (match_scratch:SI 4 "=X,X,&r,&r"))
+ (clobber (match_scratch:SI 5 "=X,X,&r,&r"))]
+ "find_reg_note (insn, REG_NONNEG, 0)"
+ "@
+ bcnd.n %B0,%1,%2\;addu %1,%1,%3
+ bcnd.n %B0,%1,%2\;subu %1,%1,%n3
+ ld %4,%1\;addu %5,%4,%3\;bcnd.n %B0,%4,%2\;st %5,%1
+ ld %4,%1\;subu %5,%4,%n3\;bcnd.n %B0,%4,%2\;st %5,%1"
+ [(set_attr "type" "weird")
+ (set_attr "length" "2,2,4,4")])
+
+;; Special insn to serve as the last insn of a define_expand. This insn
+;; will generate no code.
+
+(define_expand "dummy"
+ [(set (match_operand 0 "" "") (match_dup 0))]
+ ""
+ "")
diff --git a/gnu/gcc/gcc/config/m88k/m88k.opt b/gnu/gcc/gcc/config/m88k/m88k.opt
new file mode 100644
index 00000000000..3a959f2a939
--- /dev/null
+++ b/gnu/gcc/gcc/config/m88k/m88k.opt
@@ -0,0 +1,61 @@
+; Options for the Motorola 88000 port of the compiler.
+
+; Copyright (C) 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+; 2001, 2002 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify
+; it under the terms of the GNU General Public License as published by
+; the Free Software Foundation; either version 2, or (at your option)
+; any later version.
+;
+; GCC is distributed in the hope that it will be useful,
+; but WITHOUT ANY WARRANTY; without even the implied warranty of
+; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+; GNU General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING. If not, write to
+; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+; Boston, MA 02110-1301, USA.
+
+m88110
+Target RejectNegative Mask(88110)
+Generate code for a 88110 processor
+
+m88100
+Target RejectNegative Mask(88100)
+Generate code for a 88100 processor
+
+m88000
+Target RejectNegative
+Generate code compatible with both 88100 and 88110 processors
+
+mtrap-large-shift
+Target Report RejectNegative Mask(TRAP_LARGE_SHIFT)
+Add code to trap on logical shift counts larger than 31
+
+mhandle-large-shift
+Target Report RejectNegative Mask(HANDLE_LARGE_SHIFT)
+Add code to handle logical shift counts larger than 31
+
+mcheck-zero-division
+Target Report Mask(CHECK_ZERO_DIV)
+Add code to trap on integer divide by zero
+
+muse-div-instruction
+Target Report RejectNegative Mask(USE_DIV)
+Use the \"div\" instruction for signed integer divide
+
+mserialize-volatile
+Target Report Mask(SERIALIZE_VOLATILE)
+Force serialization on volatile memory access
+
+momit-leaf-frame-pointer
+Target Report Mask(OMIT_LEAF_FRAME_POINTER)
+Do not save the frame pointer in leaf functions
+
+mmemcpy
+Target Report Mask(MEMCPY)
+Force all memory copies to use memcpy()
diff --git a/gnu/gcc/gcc/config/m88k/openbsdelf.h b/gnu/gcc/gcc/config/m88k/openbsdelf.h
new file mode 100644
index 00000000000..d7338f79050
--- /dev/null
+++ b/gnu/gcc/gcc/config/m88k/openbsdelf.h
@@ -0,0 +1,128 @@
+/* Configuration file for an m88k OpenBSD ELF target.
+ Copyright (C) 2000, 2004, 2005, 2012 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#define REGISTER_PREFIX "%"
+
+/* #define CTORS_SECTION_ASM_OP "\t.section\t .ctors" */
+/* #define DTORS_SECTION_ASM_OP "\t.section\t .dtors" */
+
+#define TEXT_SECTION_ASM_OP "\t.text"
+#define DATA_SECTION_ASM_OP "\t.data"
+#define FILE_ASM_OP "\t.file\t"
+#define BSS_ASM_OP "\t.bss\t"
+#define REQUIRES_88110_ASM_OP "\t.requires_88110\t"
+
+#undef ASM_OUTPUT_INTERNAL_LABEL
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+#undef ASM_OUTPUT_ALIGN
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ if ((LOG) != 0) \
+ fprintf (FILE, "%s%d\n", ALIGN_ASM_OP, (LOG))
+
+#undef ASM_OUTPUT_WEAK_ALIAS
+#define ASM_OUTPUT_WEAK_ALIAS(FILE,NAME,VALUE) \
+ do \
+ { \
+ fputs ("\t.weak\t", FILE); \
+ assemble_name (FILE, NAME); \
+ if (VALUE) \
+ { \
+ fputs ("; ", FILE); \
+ assemble_name (FILE, NAME); \
+ fputs (" = ", FILE); \
+ assemble_name (FILE, VALUE); \
+ } \
+ fputc ('\n', FILE); \
+ } \
+ while (0)
+
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ output_function_profiler (FILE, LABELNO, "__mcount")
+
+/* Run-time target specifications. */
+#define TARGET_OS_CPP_BUILTINS() \
+ OPENBSD_OS_CPP_BUILTINS_ELF()
+
+/* Layout of source language data types. */
+
+/* This must agree with <machine/_types.h> */
+#undef SIZE_TYPE
+#define SIZE_TYPE "long unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+
+#undef INTMAX_TYPE
+#define INTMAX_TYPE "long long int"
+
+#undef UINTMAX_TYPE
+#define UINTMAX_TYPE "long long unsigned int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+/* Due to the split instruction and data caches, trampolines must cause the
+ data cache to be synced before attempting to execute the trampoline code.
+ Under OpenBSD, this is done by invoking trap #451 with r2 and r3 set to
+ the address of the trampoline area and its size, respectively. */
+#undef FINALIZE_TRAMPOLINE
+#define FINALIZE_TRAMPOLINE(TRAMP) \
+ emit_library_call(gen_rtx_SYMBOL_REF (Pmode, "__dcache_sync"), \
+ 0, VOIDmode, 2, (TRAMP), Pmode, \
+ GEN_INT (TRAMPOLINE_SIZE), Pmode)
+
+#if defined(CROSS_COMPILE) && !defined(ATTRIBUTE_UNUSED)
+#define ATTRIBUTE_UNUSED
+#endif
+#undef TRANSFER_FROM_TRAMPOLINE
+#define TRANSFER_FROM_TRAMPOLINE \
+extern void __dcache_sync(int, int); \
+void \
+__dcache_sync (addr, len) \
+ int addr ATTRIBUTE_UNUSED, len ATTRIBUTE_UNUSED; \
+{ \
+ /* r2 and r3 are set by the caller and need not be modified */ \
+ __asm __volatile ("tb0 0, %r0, 451"); \
+}
+
+#undef LINK_SPEC
+#define LINK_SPEC \
+ "%{!shared:%{!nostdlib:%{!r*:%{!e*:-e __start}}}} \
+ %{shared:-shared} %{R*} \
+ %{static:-Bstatic} \
+ %{!static:-Bdynamic} \
+ %{rdynamic:-export-dynamic} \
+ %{assert*} \
+ %{!dynamic-linker:-dynamic-linker /usr/libexec/ld.so}"
+
+/* As an elf system, we need crtbegin/crtend stuff. */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "\
+ %{!shared: %{pg:gcrt0%O%s} %{!pg:%{p:gcrt0%O%s} %{!p:crt0%O%s}} \
+ crtbegin%O%s} %{shared:crtbeginS%O%s}"
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "%{!shared:crtend%O%s} %{shared:crtendS%O%s}"
diff --git a/gnu/gcc/gcc/config/m88k/predicates.md b/gnu/gcc/gcc/config/m88k/predicates.md
new file mode 100644
index 00000000000..980459577ff
--- /dev/null
+++ b/gnu/gcc/gcc/config/m88k/predicates.md
@@ -0,0 +1,178 @@
+;; Predicate definitions for Motorola 88000.
+;; Copyright (C) 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+;; 2001, 2002 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+;; Return true if OP is a suitable input for a move insn.
+
+(define_predicate "move_operand"
+ (match_code "subreg, reg, const_int, lo_sum, mem")
+{
+ if (register_operand (op, mode))
+ return true;
+ if (GET_CODE (op) == CONST_INT)
+ return (classify_integer (mode, INTVAL (op)) < m88k_oru_hi16);
+ if (GET_MODE (op) != mode)
+ return false;
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+ if (GET_CODE (op) != MEM)
+ return false;
+
+ op = XEXP (op, 0);
+ if (GET_CODE (op) == LO_SUM)
+ return (REG_P (XEXP (op, 0))
+ && symbolic_address_p (XEXP (op, 1)));
+ return memory_address_p (mode, op);
+})
+
+;; Return true if OP is suitable for a call insn.
+
+(define_predicate "call_address_operand"
+ (and (match_code "subreg, reg, symbol_ref, label_ref, const")
+ (match_test "REG_P (op) || symbolic_address_p (op)")))
+
+;; Return true if OP is a register or const0_rtx.
+
+(define_predicate "reg_or_0_operand"
+ (and (match_code "subreg, reg, const_int")
+ (match_test "op == const0_rtx || register_operand (op, mode)")))
+
+;; Nonzero if OP is a valid second operand for an arithmetic insn.
+
+(define_predicate "arith_operand"
+ (and (match_code "subreg, reg, const_int")
+ (match_test "register_operand (op, mode)
+ || (GET_CODE (op) == CONST_INT && SMALL_INT (op))")))
+
+;; Return true if OP is a register or 5 bit integer.
+
+(define_predicate "arith5_operand"
+ (and (match_code "subreg, reg, const_int")
+ (match_test "register_operand (op, mode)
+ || (GET_CODE (op) == CONST_INT
+ && (unsigned) INTVAL (op) < 32)")))
+
+(define_predicate "arith32_operand"
+ (and (match_code "subreg, reg, const_int")
+ (match_test "register_operand (op, mode)
+ || GET_CODE (op) == CONST_INT")))
+
+(define_predicate "arith64_operand"
+ (and (match_code "subreg, reg, const_int")
+ (match_test "register_operand (op, mode)
+ || GET_CODE (op) == CONST_INT")))
+
+(define_predicate "int5_operand"
+ (and (match_code "const_int")
+ (match_test "(unsigned) INTVAL (op) < 32")))
+
+(define_predicate "int32_operand"
+ (match_code "const_int"))
+
+;; Return true if OP is a register or a valid immediate operand for
+;; addu or subu.
+
+(define_predicate "add_operand"
+ (and (match_code "subreg, reg, const_int")
+ (match_test "register_operand (op, mode)
+ || (GET_CODE (op) == CONST_INT && ADD_INT (op))")))
+
+(define_predicate "reg_or_bbx_mask_operand"
+ (match_code "subreg, reg, const_int")
+{
+ int value;
+ if (register_operand (op, mode))
+ return true;
+ if (GET_CODE (op) != CONST_INT)
+ return false;
+
+ value = INTVAL (op);
+ if (POWER_OF_2 (value))
+ return true;
+
+ return false;
+})
+
+;; Return true if OP is valid to use in the context of a floating
+;; point operation. Special case 0.0, since we can use r0.
+
+(define_predicate "real_or_0_operand"
+ (match_code "subreg, reg, const_double")
+{
+ if (mode != SFmode && mode != DFmode)
+ return false;
+
+ return (register_operand (op, mode)
+ || (GET_CODE (op) == CONST_DOUBLE
+ && op == CONST0_RTX (mode)));
+})
+
+;; Return true if OP is valid to use in the context of logic arithmetic
+;; on condition codes.
+
+(define_special_predicate "partial_ccmode_register_operand"
+ (and (match_code "subreg, reg")
+ (ior (match_test "register_operand (op, CCmode)")
+ (match_test "register_operand (op, CCEVENmode)"))))
+
+;; Return true if OP is a relational operator.
+
+(define_predicate "relop"
+ (match_code "eq, ne, lt, le, ge, gt, ltu, leu, geu, gtu"))
+
+(define_predicate "even_relop"
+ (match_code "eq, lt, gt, ltu, gtu"))
+
+(define_predicate "odd_relop"
+ (match_code "ne, le, ge, leu, geu"))
+
+;; Return true if OP is a relational operator, and is not an unsigned
+;; relational operator.
+
+(define_predicate "relop_no_unsigned"
+ (match_code "eq, ne, lt, le, ge, gt")
+{
+ /* @@ What is this test doing? Why not use `mode'? */
+ if (GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
+ || GET_MODE (op) == DImode
+ || GET_MODE_CLASS (GET_MODE (XEXP (op, 0))) == MODE_FLOAT
+ || GET_MODE (XEXP (op, 0)) == DImode
+ || GET_MODE_CLASS (GET_MODE (XEXP (op, 1))) == MODE_FLOAT
+ || GET_MODE (XEXP (op, 1)) == DImode)
+ return false;
+ return true;
+})
+
+;; Return true if the code of this rtx pattern is EQ or NE.
+
+(define_predicate "equality_op"
+ (match_code "eq, ne"))
+
+;; Return true if the code of this rtx pattern is pc or label_ref.
+
+(define_special_predicate "pc_or_label_ref"
+ (match_code "pc, label_ref"))
+
+;; Returns 1 if OP is either a symbol reference or a sum of a symbol
+;; reference and a constant.
+
+(define_predicate "symbolic_operand"
+ (and (match_code "symbol_ref,label_ref,const")
+ (match_test "symbolic_address_p (op)")))
diff --git a/gnu/gcc/gcc/config/m88k/t-openbsd b/gnu/gcc/gcc/config/m88k/t-openbsd
new file mode 100644
index 00000000000..fe539f7718c
--- /dev/null
+++ b/gnu/gcc/gcc/config/m88k/t-openbsd
@@ -0,0 +1,3 @@
+MULTILIB_OPTIONS = fpic/fPIC
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib