summaryrefslogtreecommitdiff
path: root/gnu/usr.bin/gcc/config/mn10200
diff options
context:
space:
mode:
Diffstat (limited to 'gnu/usr.bin/gcc/config/mn10200')
-rw-r--r--gnu/usr.bin/gcc/config/mn10200/divmod.c50
-rw-r--r--gnu/usr.bin/gcc/config/mn10200/lib1funcs.asm609
-rw-r--r--gnu/usr.bin/gcc/config/mn10200/mn10200.c1532
-rw-r--r--gnu/usr.bin/gcc/config/mn10200/mn10200.h1078
-rw-r--r--gnu/usr.bin/gcc/config/mn10200/mn10200.md1978
-rw-r--r--gnu/usr.bin/gcc/config/mn10200/t-mn1020050
-rw-r--r--gnu/usr.bin/gcc/config/mn10200/udivmod.c14
-rw-r--r--gnu/usr.bin/gcc/config/mn10200/udivmodsi4.c24
-rw-r--r--gnu/usr.bin/gcc/config/mn10200/xm-mn10200.h47
9 files changed, 5382 insertions, 0 deletions
diff --git a/gnu/usr.bin/gcc/config/mn10200/divmod.c b/gnu/usr.bin/gcc/config/mn10200/divmod.c
new file mode 100644
index 00000000000..6faa09102b5
--- /dev/null
+++ b/gnu/usr.bin/gcc/config/mn10200/divmod.c
@@ -0,0 +1,50 @@
+long udivmodsi4 ();
+
+long
+__divsi3 (long a, long b)
+{
+ int neg = 0;
+ long res;
+
+ if (a < 0)
+ {
+ a = -a;
+ neg = !neg;
+ }
+
+ if (b < 0)
+ {
+ b = -b;
+ neg = !neg;
+ }
+
+ res = udivmodsi4 (a, b, 0);
+
+ if (neg)
+ res = -res;
+
+ return res;
+}
+
+long
+__modsi3 (long a, long b)
+{
+ int neg = 0;
+ long res;
+
+ if (a < 0)
+ {
+ a = -a;
+ neg = 1;
+ }
+
+ if (b < 0)
+ b = -b;
+
+ res = udivmodsi4 (a, b, 1);
+
+ if (neg)
+ res = -res;
+
+ return res;
+}
diff --git a/gnu/usr.bin/gcc/config/mn10200/lib1funcs.asm b/gnu/usr.bin/gcc/config/mn10200/lib1funcs.asm
new file mode 100644
index 00000000000..ff98fcc0ca2
--- /dev/null
+++ b/gnu/usr.bin/gcc/config/mn10200/lib1funcs.asm
@@ -0,0 +1,609 @@
+/* libgcc1 routines for Matsushita mn10200.
+ Copyright (C) 1997 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file with other programs, and to distribute
+those programs without any restriction coming from the use of this
+file. (The General Public License restrictions do apply in other
+respects; for example, they cover modification of the file, and
+distribution when not linked into another program.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with files
+ compiled with GCC to produce an executable, this does not cause
+ the resulting executable to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+#ifdef L_divhi3
+ /* Derive signed division/modulo from unsigned "divu" instruction. */
+ .text
+ .globl ___divhi3
+ .type ___divhi3,@function
+___divhi3:
+
+ /* We're going to need some scratch registers, so save d2/d3
+ into the stack. */
+ add -8,a3
+ movx d2,(0,a3)
+ movx d3,(4,a3)
+
+ /* Loading zeros into registers now allows us to use them
+ in the compare instructions, which saves a total of
+ two bytes (egad). */
+ sub d3,d3
+ sub d2,d2
+ sub a0,a0
+
+ /* If first operand is negative, then make it positive.
+ It will be contained in d2 just before .L1.
+
+ a0 tells us if the first operand was negated. */
+ cmp d2,d0
+ bge .L0
+ sub d0,d2
+ mov 1,a0
+ bra .L1
+.L0:
+ mov d0,d2
+.L1:
+ /* If the second operand is negative, then make it positive.
+ It will be contained in d3 just before .L3.
+
+ d0 tells us if the second operand was negated. */
+ cmp d3,d1
+ bge .L2
+ sub d1,d3
+ mov 1,d0
+ bra .L3
+.L2:
+ sub d0,d0
+ mov d1,d3
+.L3:
+ /* Loading d1 with zero here allows us to save one byte
+ in the comparison below. */
+
+ sub d1,d1
+
+ /* Make sure to clear the mdr register, then do the unsigned
+ division. Result will be in d2/mdr. */
+ mov d1,mdr
+ divu d3,d2
+
+ /* Negate the remainder based on the first argument negation
+ flag only. */
+ cmp d1,a0
+ beq .L4
+ mov mdr,d3
+ sub d3,d1
+ bra .L5
+.L4:
+ mov mdr,d1
+
+.L5:
+ /* Negate the result if either, but not both of the inputs
+ were negated. */
+ mov a0,d3
+ xor d3,d0
+ beq .L6
+ sub d0,d0
+ sub d2,d0
+ bra .L7
+.L6:
+ mov d2,d0
+.L7:
+
+ /* Restore our scratch registers, deallocate our stack and return. */
+ movx (0,a3),d2
+ movx (4,a3),d3
+ add 8,a3
+ rts
+ .size ___divhi3,.-___divhi3
+#endif
+
+#ifdef L_modhi3
+ .text
+ .globl ___modhi3
+ .type ___modhi3,@function
+___modhi3:
+ jsr ___divhi3
+ mov d1,d0
+ rts
+ .size ___modhi3,.-___modhi3
+#endif
+
+#ifdef L_addsi3
+ .text
+ .globl ___addsi3
+ .type ___addsi3,@function
+___addsi3:
+ add -4,a3
+ movx d2,(0,a3)
+ mov (8,a3),d2
+ add d2,d0
+ mov (10,a3),d2
+ addc d2,d1
+ movx (0,a3),d2
+ add 4,a3
+ rts
+
+ .size ___addsi3,.-___addsi3
+#endif
+
+#ifdef L_subsi3
+ .text
+ .globl ___subsi3
+ .type ___subsi3,@function
+___subsi3:
+ add -4,a3
+ movx d2,(0,a3)
+ mov (8,a3),d2
+ sub d2,d0
+ mov (10,a3),d2
+ subc d2,d1
+ movx (0,a3),d2
+ add 4,a3
+ rts
+
+ .size ___subsi3,.-___subsi3
+#endif
+
+#ifdef L_mulsi3
+ .text
+ .globl ___mulsi3
+ .type ___mulsi3,@function
+___mulsi3:
+ add -4,a3
+ mov a1,(0,a3)
+ mov d0,a0
+ /* Multiply arg0 msb with arg1 lsb.
+ arg0 msb is in register d1,
+ arg1 lsb is in memory. */
+ mov (8,a3),d0
+ mulu d0,d1
+ mov d1,a1
+
+ /* Multiply arg0 lsb with arg1 msb.
+ arg0 msb is in register a0,
+ arg1 lsb is in memory. */
+ mov a0,d0
+ mov (10,a3),d1
+ mulu d0,d1
+
+ /* Add the cross products. */
+ add d1,a1
+
+ /* Now multiply arg0 lsb with arg1 lsb. */
+ mov (8,a3),d1
+ mulu d1,d0
+
+ /* Add in the upper 16 bits to the cross product sum. */
+ mov mdr,d1
+ add a1,d1
+ mov (0,a3),a1
+ add 4,a3
+ rts
+
+ .size ___mulsi3,.-___mulsi3
+#endif
+
+#ifdef L_ashlsi3
+ .text
+ .globl ___ashlsi3
+ .type ___ashlsi3,@function
+___ashlsi3:
+ mov (4,a3),a0
+ cmp 0,a0
+ beq .L0
+.L1:
+ add d0,d0
+ addc d1,d1
+ add -1,a0
+ bne .L1
+.L0:
+ rts
+
+ .size ___ashlsi3,.-___ashlsi3
+#endif
+
+#ifdef L_lshrsi3
+ .text
+ .globl ___lshrsi3
+ .type ___lshrsi3,@function
+___lshrsi3:
+ mov (4,a3),a0
+ cmp 0,a0
+ beq .L0
+.L1:
+ lsr d1
+ ror d0
+ add -1,a0
+ bne .L1
+.L0:
+ rts
+
+ .size ___lshrsi3,.-___lshrsi3
+#endif
+
+#ifdef L_ashrsi3
+ .text
+ .globl ___ashrsi3
+ .type ___ashrsi3,@function
+___ashrsi3:
+ mov (4,a3),a0
+ cmp 0,a0
+ beq .L0
+.L1:
+ asr d1
+ ror d0
+ add -1,a0
+ bne .L1
+.L0:
+ rts
+
+ .size ___ashrsi3,.-___ashrsi3
+#endif
+
+/* All functions beyond this point pass their arguments in registers! */
+#ifdef L_negsi2_d0
+ .text
+ .globl ___negsi2_d0
+ .type ___negsi2_d0,@function
+___negsi2_d0:
+ add -8,a3
+ movx d3,(0,a3)
+ movx d2,(4,a3)
+ mov d0,d2
+ mov d1,d3
+ sub d0,d0
+ sub d1,d1
+ sub d2,d0
+ subc d3,d1
+ movx (0,a3),d3
+ movx (4,a3),d2
+ add 8,a3
+ rts
+
+ .size ___negsi2_d0,.-___negsi2_d0
+#endif
+
+#ifdef L_negsi2_d2
+ .text
+ .globl ___negsi2_d2
+ .type ___negsi2_d2,@function
+___negsi2_d2:
+ add -8,a3
+ movx d1,(0,a3)
+ movx d0,(4,a3)
+ mov d2,d0
+ mov d3,d1
+ sub d2,d2
+ sub d3,d3
+ sub d0,d2
+ subc d1,d3
+ movx (0,a3),d1
+ movx (4,a3),d0
+ add 8,a3
+ rts
+
+ .size ___negsi2_d2,.-___negsi2_d2
+#endif
+
+#ifdef L_zero_extendpsisi2_d0
+ .text
+ .globl ___zero_extendpsisi2_d0
+ .type ___zero_extendpsisi2_d0,@function
+___zero_extendpsisi2_d0:
+ add -4,a3
+ movx d0,(0,a3)
+ movbu (2,a3),d1
+ add 4,a3
+ rts
+
+ .size ___zero_extendpsisi2_d0,.-___zero_extendpsisi2_d0
+#endif
+
+#ifdef L_zero_extendpsisi2_d2
+ .text
+ .globl ___zero_extendpsisi2_d2
+ .type ___zero_extendpsisi2_d2,@function
+___zero_extendpsisi2_d2:
+ add -4,a3
+ movx d2,(0,a3)
+ movbu (2,a3),d3
+ add 4,a3
+ rts
+
+ .size ___zero_extendpsisi2_d2,.-___zero_extendpsisi2_d2
+#endif
+
+#ifdef L_sign_extendpsisi2_d0
+ .text
+ .globl ___sign_extendpsisi2_d0
+ .type ___sign_extendpsisi2_d0,@function
+___sign_extendpsisi2_d0:
+ add -4,a3
+ movx d0,(0,a3)
+ movb (2,a3),d1
+ add 4,a3
+ rts
+
+ .size ___sign_extendpsisi2_d0,.-___sign_extendpsisi2_d0
+#endif
+
+#ifdef L_sign_extendpsisi2_d2
+ .text
+ .globl ___sign_extendpsisi2_d2
+ .type ___sign_extendpsisi2_d2,@function
+___sign_extendpsisi2_d2:
+ add -4,a3
+ movx d2,(0,a3)
+ movb (2,a3),d3
+ add 4,a3
+ rts
+
+ .size ___sign_extendpsisi2_d2,.-___sign_extendpsisi2_d2
+#endif
+
+#ifdef L_truncsipsi2_d0_d0
+ .text
+ .globl ___truncsipsi2_d0_d0
+ .type ___truncsipsi2_d0_d0,@function
+___truncsipsi2_d0_d0:
+ add -4,a3
+ mov d0,(a3)
+ mov d1,(2,a3)
+ movx (0,a3),d0
+ add 4,a3
+ rts
+
+ .size ___truncsipsi2_d0_d0,.-___truncsipsi2_d0_d0
+#endif
+
+#ifdef L_truncsipsi2_d0_d1
+ .text
+ .globl ___truncsipsi2_d0_d1
+ .type ___truncsipsi2_d0_d1,@function
+___truncsipsi2_d0_d1:
+ add -4,a3
+ mov d0,(a3)
+ mov d1,(2,a3)
+ movx (0,a3),d1
+ add 4,a3
+ rts
+
+ .size ___truncsipsi2_d0_d1,.-___truncsipsi2_d0_d1
+#endif
+
+#ifdef L_truncsipsi2_d0_d2
+ .text
+ .globl ___truncsipsi2_d0_d2
+ .type ___truncsipsi2_d0_d2,@function
+___truncsipsi2_d0_d2:
+ add -4,a3
+ mov d0,(a3)
+ mov d1,(2,a3)
+ movx (0,a3),d2
+ add 4,a3
+ rts
+
+ .size ___truncsipsi2_d0_d2,.-___truncsipsi2_d0_d2
+#endif
+
+#ifdef L_truncsipsi2_d0_d3
+ .text
+ .globl ___truncsipsi2_d0_d3
+ .type ___truncsipsi2_d0_d3,@function
+___truncsipsi2_d0_d3:
+ add -4,a3
+ mov d0,(a3)
+ mov d1,(2,a3)
+ movx (0,a3),d3
+ add 4,a3
+ rts
+
+ .size ___truncsipsi2_d0_d3,.-___truncsipsi2_d0_d3
+#endif
+
+#ifdef L_truncsipsi2_d2_d0
+ .text
+ .globl ___truncsipsi2_d2_d0
+ .type ___truncsipsi2_d2_d0,@function
+___truncsipsi2_d2_d0:
+ add -4,a3
+ mov d2,(a3)
+ mov d3,(2,a3)
+ movx (0,a3),d0
+ add 4,a3
+ rts
+
+ .size ___truncsipsi2_d2_d0,.-___truncsipsi2_d2_d0
+#endif
+
+#ifdef L_truncsipsi2_d2_d1
+ .text
+ .globl ___truncsipsi2_d2_d1
+ .type ___truncsipsi2_d2_d1,@function
+___truncsipsi2_d2_d1:
+ add -4,a3
+ mov d2,(a3)
+ mov d3,(2,a3)
+ movx (0,a3),d1
+ add 4,a3
+ rts
+
+ .size ___truncsipsi2_d2_d1,.-___truncsipsi2_d2_d1
+#endif
+
+#ifdef L_truncsipsi2_d2_d2
+ .text
+ .globl ___truncsipsi2_d2_d2
+ .type ___truncsipsi2_d2_d2,@function
+___truncsipsi2_d2_d2:
+ add -4,a3
+ mov d2,(a3)
+ mov d3,(2,a3)
+ movx (0,a3),d2
+ add 4,a3
+ rts
+
+ .size ___truncsipsi2_d2_d2,.-___truncsipsi2_d2_d2
+#endif
+
+#ifdef L_truncsipsi2_d2_d3
+ .text
+ .globl ___truncsipsi2_d2_d3
+ .type ___truncsipsi2_d2_d3,@function
+___truncsipsi2_d2_d3:
+ add -4,a3
+ mov d2,(a3)
+ mov d3,(2,a3)
+ movx (0,a3),d3
+ add 4,a3
+ rts
+
+ .size ___truncsipsi2_d2_d3,.-___truncsipsi2_d2_d3
+#endif
+
+
+#ifdef L_cmpsi2
+ .text
+ .globl ___cmpsi2
+ .type ___cmpsi2,@function
+___cmpsi2:
+ add -4,a3
+ mov a1,(0,a3)
+ mov (10,a3),a1
+ mov (8,a3),a0
+ cmp a1,d1
+ blt .L9
+ bgt .L6
+ cmp a0,d0
+ bcc .L5
+.L9:
+ sub d0,d0
+ jmp .L8
+.L5:
+ cmp a0,d0
+ bhi .L6
+ mov 1,d0
+ jmp .L8
+.L6:
+ mov 2,d0
+.L8:
+ mov (0,a3),a1
+ add 4,a3
+ rts
+ .size ___cmpsi2,.-___cmpsi2
+#endif
+
+#ifdef L_ucmpsi2
+ .text
+ .globl ___ucmpsi2
+ .type ___ucmpsi2,@function
+___ucmpsi2:
+ add -4,a3
+ mov a1,(0,a3)
+ mov (10,a3),a1
+ mov (8,a3),a0
+ cmp a1,d1
+ bcs .L9
+ bhi .L6
+ cmp a0,d0
+ bcc .L5
+.L9:
+ sub d0,d0
+ jmp .L8
+.L5:
+ cmp a0,d0
+ bhi .L6
+ mov 1,d0
+ jmp .L8
+.L6:
+ mov 2,d0
+.L8:
+ mov (0,a3),a1
+ add 4,a3
+ rts
+ .size ___ucmpsi2,.-___ucmpsi2
+#endif
+
+
+#ifdef L_prologue
+ .text
+ .globl ___prologue
+ .type ___prologue,@function
+___prologue:
+ mov (0,a3),a0
+ add -16,a3
+ movx d2,(4,a3)
+ movx d3,(8,a3)
+ mov a1,(12,a3)
+ mov a2,(16,a3)
+ mov a0,(0,a3)
+ rts
+ .size ___prologue,.-___prologue
+#endif
+
+#ifdef L_epilogue_a0
+ .text
+ .globl ___epilogue_a0
+ .type ___epilogue_a0,@function
+___epilogue_a0:
+ mov (0,a3),a0
+ movx (4,a3),d2
+ movx (8,a3),d3
+ mov (12,a3),a1
+ mov (16,a3),a2
+ add 16,a3
+ mov a0,(0,a3)
+ rts
+ .size ___epilogue_a0,.-___epilogue_a0
+#endif
+
+#ifdef L_epilogue_d0
+ .text
+ .globl ___epilogue_d0
+ .type ___epilogue_d0,@function
+___epilogue_d0:
+ movx (0,a3),d0
+ movx (4,a3),d2
+ movx (8,a3),d3
+ mov (12,a3),a1
+ mov (16,a3),a2
+ add 16,a3
+ movx d0,(0,a3)
+ rts
+ .size ___epilogue_d0,.-___epilogue_d0
+#endif
+
+#ifdef L_epilogue_noreturn
+ .text
+ .globl ___epilogue_noreturn
+ .type ___epilogue_noreturn,@function
+___epilogue_noreturn:
+ movx (0,a3),d2
+ movx (4,a3),d3
+ mov (8,a3),a1
+ mov (12,a3),a2
+ add 16,a3
+ rts
+ .size ___epilogue_noreturn,.-___epilogue_noreturn
+#endif
diff --git a/gnu/usr.bin/gcc/config/mn10200/mn10200.c b/gnu/usr.bin/gcc/config/mn10200/mn10200.c
new file mode 100644
index 00000000000..5435f773c8d
--- /dev/null
+++ b/gnu/usr.bin/gcc/config/mn10200/mn10200.c
@@ -0,0 +1,1532 @@
+/* Subroutines for insn-output.c for Matsushita MN10200 series
+ Copyright (C) 1997 Free Software Foundation, Inc.
+ Contributed by Jeff Law (law@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include <stdio.h>
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-flags.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "recog.h"
+#include "expr.h"
+#include "tree.h"
+#include "obstack.h"
+
+/* Global registers known to hold the value zero.
+
+ Normally we'd depend on CSE and combine to put zero into a
+ register and re-use it.
+
+ However, on the mn10x00 processors we implicitly use the constant
+ zero in tst instructions, so we might be able to do better by
+ loading the value into a register in the prologue, then re-useing
+ that register throughout the function.
+
+ We could perform similar optimizations for other constants, but with
+ gcse due soon, it doesn't seem worth the effort.
+
+ These variables hold a rtx for a register known to hold the value
+ zero throughout the entire function, or NULL if no register of
+ the appropriate class has such a value throughout the life of the
+ function. */
+rtx zero_dreg;
+rtx zero_areg;
+
+/* Note whether or not we need an out of line epilogue. */
+static int out_of_line_epilogue;
+
+/* Indicate this file was compiled by gcc and what optimization
+ level was used. */
+void
+asm_file_start (file)
+ FILE *file;
+{
+ fprintf (file, "#\tGCC For the Matsushita MN10200\n");
+ if (optimize)
+ fprintf (file, "# -O%d\n", optimize);
+ else
+ fprintf (file, "\n\n");
+ output_file_directive (file, main_input_filename);
+}
+
+/* Print operand X using operand code CODE to assembly language output file
+ FILE. */
+
+void
+print_operand (file, x, code)
+ FILE *file;
+ rtx x;
+ int code;
+{
+ switch (code)
+ {
+ case 'b':
+ case 'B':
+ /* These are normal and reversed branches. */
+ switch (code == 'b' ? GET_CODE (x) : reverse_condition (GET_CODE (x)))
+ {
+ case NE:
+ fprintf (file, "ne");
+ break;
+ case EQ:
+ fprintf (file, "eq");
+ break;
+ case GE:
+ fprintf (file, "ge");
+ break;
+ case GT:
+ fprintf (file, "gt");
+ break;
+ case LE:
+ fprintf (file, "le");
+ break;
+ case LT:
+ fprintf (file, "lt");
+ break;
+ case GEU:
+ fprintf (file, "cc");
+ break;
+ case GTU:
+ fprintf (file, "hi");
+ break;
+ case LEU:
+ fprintf (file, "ls");
+ break;
+ case LTU:
+ fprintf (file, "cs");
+ break;
+ default:
+ abort ();
+ }
+ break;
+ case 'C':
+ /* This is used for the operand to a call instruction;
+ if it's a REG, enclose it in parens, else output
+ the operand normally. */
+ if (GET_CODE (x) == REG)
+ {
+ fputc ('(', file);
+ print_operand (file, x, 0);
+ fputc (')', file);
+ }
+ else
+ print_operand (file, x, 0);
+ break;
+
+ /* These are the least significant word in a 32bit value.
+ 'o' allows us to sign extend a constant if doing so
+ makes for more compact code. */
+ case 'L':
+ case 'o':
+ switch (GET_CODE (x))
+ {
+ case MEM:
+ fputc ('(', file);
+ output_address (XEXP (x, 0));
+ fputc (')', file);
+ break;
+
+ case REG:
+ fprintf (file, "%s", reg_names[REGNO (x)]);
+ break;
+
+ case SUBREG:
+ fprintf (file, "%s",
+ reg_names[REGNO (SUBREG_REG (x)) + SUBREG_WORD (x)]);
+ break;
+
+ case CONST_DOUBLE:
+ if (code == 'L')
+ {
+ long val;
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, val);
+ print_operand_address (file, GEN_INT (val & 0xffff));
+ }
+ else
+ {
+ long val;
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, val);
+
+ val &= 0xffff;
+ val = (((val) & 0xffff) ^ (~0x7fff)) + 0x8000;
+ print_operand_address (file, GEN_INT (val));
+ }
+ break;
+
+ case CONST_INT:
+ if (code == 'L')
+ print_operand_address (file, GEN_INT ((INTVAL (x) & 0xffff)));
+ else
+ {
+ unsigned int val = INTVAL (x) & 0xffff;
+ val = (((val) & 0xffff) ^ (~0x7fff)) + 0x8000;
+ print_operand_address (file, GEN_INT (val));
+ }
+ break;
+ default:
+ abort ();
+ }
+ break;
+
+ /* Similarly, but for the most significant word. */
+ case 'H':
+ case 'h':
+ switch (GET_CODE (x))
+ {
+ case MEM:
+ fputc ('(', file);
+ x = adj_offsettable_operand (x, 2);
+ output_address (XEXP (x, 0));
+ fputc (')', file);
+ break;
+
+ case REG:
+ fprintf (file, "%s", reg_names[REGNO (x) + 1]);
+ break;
+
+ case SUBREG:
+ fprintf (file, "%s",
+ reg_names[REGNO (SUBREG_REG (x)) + SUBREG_WORD (x)] + 1);
+ break;
+
+ case CONST_DOUBLE:
+ if (code == 'H')
+ {
+ long val;
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, val);
+
+ print_operand_address (file, GEN_INT ((val >> 16) & 0xffff));
+ }
+ else
+ {
+ long val;
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, val);
+
+ val = (val >> 16) & 0xffff;
+ val = (((val) & 0xffff) ^ (~0x7fff)) + 0x8000;
+
+ print_operand_address (file, GEN_INT (val));
+ }
+ break;
+
+ case CONST_INT:
+ if (code == 'H')
+ print_operand_address (file,
+ GEN_INT ((INTVAL (x) >> 16) & 0xffff));
+ else
+ {
+ unsigned int val = (INTVAL (x) >> 16) & 0xffff;
+ val = (((val) & 0xffff) ^ (~0x7fff)) + 0x8000;
+
+ print_operand_address (file, GEN_INT (val));
+ }
+ break;
+ default:
+ abort ();
+ }
+ break;
+
+ /* Output ~CONST_INT. */
+ case 'N':
+ if (GET_CODE (x) != CONST_INT)
+ abort ();
+ fprintf (file, "%d", ~INTVAL (x));
+ break;
+
+ /* An address which can not be register indirect, if it is
+ register indirect, then turn it into reg + disp. */
+ case 'A':
+ if (GET_CODE (x) != MEM)
+ abort ();
+ if (GET_CODE (XEXP (x, 0)) == REG)
+ x = gen_rtx (PLUS, PSImode, XEXP (x, 0), GEN_INT (0));
+ else
+ x = XEXP (x, 0);
+ fputc ('(', file);
+ output_address (x);
+ fputc (')', file);
+ break;
+
+ case 'Z':
+ print_operand (file, XEXP (x, 1), 0);
+ break;
+
+ /* More cases where we can sign-extend a CONST_INT if it
+ results in more compact code. */
+ case 's':
+ case 'S':
+ if (GET_CODE (x) == CONST_INT)
+ {
+ int val = INTVAL (x);
+
+ if (code == 's')
+ x = GEN_INT (((val & 0xffff) ^ (~0x7fff)) + 0x8000);
+ else
+ x = GEN_INT (((val & 0xff) ^ (~0x7f)) + 0x80);
+ }
+ /* FALL THROUGH */
+ default:
+ switch (GET_CODE (x))
+ {
+ case MEM:
+ fputc ('(', file);
+ output_address (XEXP (x, 0));
+ fputc (')', file);
+ break;
+
+ case REG:
+ fprintf (file, "%s", reg_names[REGNO (x)]);
+ break;
+
+ case SUBREG:
+ fprintf (file, "%s",
+ reg_names[REGNO (SUBREG_REG (x)) + SUBREG_WORD (x)]);
+ break;
+
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CONST:
+ case LABEL_REF:
+ case CODE_LABEL:
+ print_operand_address (file, x);
+ break;
+ default:
+ abort ();
+ }
+ break;
+ }
+}
+
+/* Output assembly language output for the address ADDR to FILE. */
+
+void
+print_operand_address (file, addr)
+ FILE *file;
+ rtx addr;
+{
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ print_operand (file, addr, 0);
+ break;
+ case PLUS:
+ {
+ rtx base, index;
+ /* The base and index could be in any order, so we have
+ to figure out which is the base and which is the index.
+ Uses the same code as GO_IF_LEGITIMATE_ADDRESS. */
+ if (REG_P (XEXP (addr, 0))
+ && REG_OK_FOR_BASE_P (XEXP (addr, 0)))
+ base = XEXP (addr, 0), index = XEXP (addr, 1);
+ else if (REG_P (XEXP (addr, 1))
+ && REG_OK_FOR_BASE_P (XEXP (addr, 1)))
+ base = XEXP (addr, 1), index = XEXP (addr, 0);
+ else
+ abort ();
+ print_operand (file, index, 0);
+ fputc (',', file);
+ print_operand (file, base, 0);;
+ break;
+ }
+ case SYMBOL_REF:
+ output_addr_const (file, addr);
+ break;
+ default:
+ output_addr_const (file, addr);
+ break;
+ }
+}
+
+/* Count the number of tst insns which compare an address register
+ with zero. */
+static void
+count_tst_insns (areg_countp)
+ int *areg_countp;
+{
+ rtx insn;
+
+ /* Assume no tst insns exist. */
+ *areg_countp = 0;
+
+ /* If not optimizing, then quit now. */
+ if (!optimize)
+ return;
+
+ /* Walk through all the insns. */
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ rtx pat;
+
+ /* Ignore anything that is not a normal INSN. */
+ if (GET_CODE (insn) != INSN)
+ continue;
+
+ /* Ignore anything that isn't a SET. */
+ pat = PATTERN (insn);
+ if (GET_CODE (pat) != SET)
+ continue;
+
+ /* Check for a tst insn. */
+ if (SET_DEST (pat) == cc0_rtx
+ && GET_CODE (SET_SRC (pat)) == REG
+ && REGNO_REG_CLASS (REGNO (SET_SRC (pat))) == ADDRESS_REGS)
+ (*areg_countp)++;
+ }
+}
+
+/* Return the total size (in bytes) of the current function's frame.
+ This is the size of the register save area + the size of locals,
+ spills, etc. */
+int
+total_frame_size ()
+{
+ unsigned int size = get_frame_size ();
+ unsigned int outgoing_args_size = current_function_outgoing_args_size;
+ int i;
+
+ /* First figure out if we're going to use an out of line
+ prologue, if so we have to make space for all the
+ registers, even if we don't use them. */
+ if (optimize && !current_function_needs_context && !frame_pointer_needed)
+ {
+ int inline_count, outline_count;
+
+ /* Compute how many bytes an inline prologue would take.
+
+ Each address register store takes two bytes, each data register
+ store takes three bytes. */
+ inline_count = 0;
+ if (regs_ever_live[5])
+ inline_count += 2;
+ if (regs_ever_live[6])
+ inline_count += 2;
+ if (regs_ever_live[2])
+ inline_count += 3;
+ if (regs_ever_live[3])
+ inline_count += 3;
+
+ /* If this function has any stack, then the stack adjustment
+ will take two (or more) bytes. */
+ if (size || outgoing_args_size
+ || regs_ever_live[5] || regs_ever_live[6]
+ || regs_ever_live[2] || regs_ever_live[3])
+ inline_count += 2;
+
+ /* Multiply the current count by two and add one to account for the
+ epilogue insns. */
+ inline_count = inline_count * 2 + 1;
+
+ /* Now compute how many bytes an out of line sequence would take. */
+ /* A relaxed jsr will be three bytes. */
+ outline_count = 3;
+
+ /* If there are outgoing arguments, then we will need a stack
+ pointer adjustment after the call to the prologue, two
+ more bytes. */
+ outline_count += (outgoing_args_size == 0 ? 0 : 2);
+
+ /* If there is some local frame to allocate, it will need to be
+ done before the call to the prologue, two more bytes. */
+ if (get_frame_size () != 0)
+ outline_count += 2;
+
+ /* Now account for the epilogue, multiply the base count by two,
+ then deal with optimizing away the rts instruction. */
+ outline_count = outline_count * 2 + 1;
+
+ if (get_frame_size () == 0 && outgoing_args_size == 0)
+ outline_count -= 1;
+
+ /* If an out of line prologue is smaller, use it. */
+ if (inline_count > outline_count)
+ return size + outgoing_args_size + 16;
+ }
+
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (regs_ever_live[i] && !call_used_regs[i] && ! fixed_regs[i]
+ || (i == FRAME_POINTER_REGNUM && frame_pointer_needed))
+ size += 4;
+ }
+
+ return (size + outgoing_args_size);
+}
+
+/* Expand the prologue into RTL. */
+void
+expand_prologue ()
+{
+ unsigned int size = total_frame_size ();
+ unsigned int outgoing_args_size = current_function_outgoing_args_size;
+ int offset, i;
+
+ zero_areg = NULL_RTX;
+ zero_dreg = NULL_RTX;
+
+ /* If optimizing, see if we should do an out of line prologue/epilogue
+ sequence.
+
+ We don't support out of line prologues if the current function
+ needs a context or frame pointer. */
+ if (optimize && !current_function_needs_context && !frame_pointer_needed)
+ {
+ int inline_count, outline_count, areg_count;
+
+ /* We need to end the current sequence so that count_tst_insns can
+ look at all the insns in this function. Normally this would be
+ unsafe, but it's OK in the prologue/epilogue expanders. */
+ end_sequence ();
+
+ /* Get a count of the number of tst insns which use address
+ registers (it's not profitable to try and improve tst insns
+ which use data registers). */
+ count_tst_insns (&areg_count);
+
+ /* Now start a new sequence. */
+ start_sequence ();
+
+ /* Compute how many bytes an inline prologue would take.
+
+ Each address register store takes two bytes, each data register
+ store takes three bytes. */
+ inline_count = 0;
+ if (regs_ever_live[5])
+ inline_count += 2;
+ if (regs_ever_live[6])
+ inline_count += 2;
+ if (regs_ever_live[2])
+ inline_count += 3;
+ if (regs_ever_live[3])
+ inline_count += 3;
+
+ /* If this function has any stack, then the stack adjustment
+ will take two (or more) bytes. */
+ if (size || outgoing_args_size
+ || regs_ever_live[5] || regs_ever_live[6]
+ || regs_ever_live[2] || regs_ever_live[3])
+ inline_count += 2;
+
+ /* Multiply the current count by two and add one to account for the
+ epilogue insns. */
+ inline_count = inline_count * 2 + 1;
+
+ /* Now compute how many bytes an out of line sequence would take. */
+ /* A relaxed jsr will be three bytes. */
+ outline_count = 3;
+
+ /* If there are outgoing arguments, then we will need a stack
+ pointer adjustment after the call to the prologue, two
+ more bytes. */
+ outline_count += (outgoing_args_size == 0 ? 0 : 2);
+
+ /* If there is some local frame to allocate, it will need to be
+ done before the call to the prologue, two more bytes. */
+ if (get_frame_size () != 0)
+ outline_count += 2;
+
+ /* Now account for the epilogue, multiply the base count by two,
+ then deal with optimizing away the rts instruction. */
+ outline_count = outline_count * 2 + 1;
+
+ if (get_frame_size () == 0 && outgoing_args_size == 0)
+ outline_count -= 1;
+
+ /* If an out of line prologue is smaller, use it. */
+ if (inline_count > outline_count)
+ {
+ if (get_frame_size () != 0)
+ emit_insn (gen_addpsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-size + outgoing_args_size + 16)));
+ emit_insn (gen_outline_prologue_call ());
+
+ if (outgoing_args_size)
+ emit_insn (gen_addpsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-outgoing_args_size)));
+
+ out_of_line_epilogue = 1;
+
+ /* Determine if it is profitable to put the value zero into a register
+ for the entire function. If so, set ZERO_DREG and ZERO_AREG. */
+
+ /* First see if we could load the value into a data register
+ since that's the most efficient way. */
+ if (areg_count > 1
+ && (!regs_ever_live[2] || !regs_ever_live[3]))
+ {
+ if (!regs_ever_live[2])
+ {
+ regs_ever_live[2] = 1;
+ zero_dreg = gen_rtx (REG, HImode, 2);
+ }
+ if (!regs_ever_live[3])
+ {
+ regs_ever_live[3] = 1;
+ zero_dreg = gen_rtx (REG, HImode, 3);
+ }
+ }
+
+ /* Now see if we could load the value into a address register. */
+ if (zero_dreg == NULL_RTX
+ && areg_count > 2
+ && (!regs_ever_live[5] || !regs_ever_live[6]))
+ {
+ if (!regs_ever_live[5])
+ {
+ regs_ever_live[5] = 1;
+ zero_areg = gen_rtx (REG, HImode, 5);
+ }
+ if (!regs_ever_live[6])
+ {
+ regs_ever_live[6] = 1;
+ zero_areg = gen_rtx (REG, HImode, 6);
+ }
+ }
+
+ if (zero_dreg)
+ emit_move_insn (zero_dreg, const0_rtx);
+
+ if (zero_areg)
+ emit_move_insn (zero_areg, const0_rtx);
+
+ return;
+ }
+ }
+
+ out_of_line_epilogue = 0;
+
+ /* Temporarily stuff the static chain onto the stack so we can
+ use a0 as a scratch register during the prologue. */
+ if (current_function_needs_context)
+ {
+ emit_insn (gen_addpsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-4)));
+ emit_move_insn (gen_rtx (MEM, PSImode, stack_pointer_rtx),
+ gen_rtx (REG, PSImode, STATIC_CHAIN_REGNUM));
+ }
+
+ if (frame_pointer_needed)
+ {
+ /* Store a2 into a0 temporarily. */
+ emit_move_insn (gen_rtx (REG, PSImode, 4), frame_pointer_rtx);
+
+ /* Set up the frame pointer. */
+ emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
+ }
+
+ /* Make any necessary space for the saved registers and local frame. */
+ if (size)
+ emit_insn (gen_addpsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-size)));
+
+ /* Save the callee saved registers. They're saved into the top
+ of the frame, using the stack pointer. */
+ for (i = 0, offset = outgoing_args_size;
+ i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (regs_ever_live[i] && !call_used_regs[i] && ! fixed_regs[i]
+ || (i == FRAME_POINTER_REGNUM && frame_pointer_needed))
+ {
+ int regno;
+
+ /* If we're saving the frame pointer, then it will be found in
+ register 4 (a0). */
+ regno = (i == FRAME_POINTER_REGNUM && frame_pointer_needed) ? 4 : i;
+
+ emit_move_insn (gen_rtx (MEM, PSImode,
+ gen_rtx (PLUS, Pmode,
+ stack_pointer_rtx,
+ GEN_INT (offset))),
+ gen_rtx (REG, PSImode, regno));
+ offset += 4;
+ }
+ }
+
+ /* Now put the static chain back where the rest of the function
+ expects to find it. */
+ if (current_function_needs_context)
+ {
+ emit_move_insn (gen_rtx (REG, PSImode, STATIC_CHAIN_REGNUM),
+ gen_rtx (MEM, PSImode,
+ gen_rtx (PLUS, PSImode, stack_pointer_rtx,
+ GEN_INT (size))));
+ }
+}
+
+/* Expand the epilogue into RTL. */
+void
+expand_epilogue ()
+{
+ unsigned int size;
+ unsigned int outgoing_args_size = current_function_outgoing_args_size;
+ int offset, i, temp_regno;
+ rtx basereg;
+
+ size = total_frame_size ();
+
+ if (DECL_RESULT (current_function_decl)
+ && DECL_RTL (DECL_RESULT (current_function_decl))
+ && REG_P (DECL_RTL (DECL_RESULT (current_function_decl))))
+ temp_regno = (REGNO (DECL_RTL (DECL_RESULT (current_function_decl))) == 4
+ ? 0 : 4);
+ else
+ temp_regno = 4;
+
+ /* Emit an out of line epilogue sequence if it's profitable to do so. */
+ if (out_of_line_epilogue)
+ {
+ /* If there were no outgoing arguments and no local frame, then
+ we will be able to omit the rts at the end of this function,
+ so just jump to the epilogue_noreturn routine. */
+ if (get_frame_size () == 0 && outgoing_args_size == 0)
+ {
+ emit_jump_insn (gen_outline_epilogue_jump ());
+ return;
+ }
+
+ if (outgoing_args_size)
+ emit_insn (gen_addpsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (outgoing_args_size)));
+
+ if (temp_regno == 0)
+ emit_insn (gen_outline_epilogue_call_d0 ());
+ else if (temp_regno == 4)
+ emit_insn (gen_outline_epilogue_call_a0 ());
+
+ if (get_frame_size () != 0)
+ emit_insn (gen_addpsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (size - outgoing_args_size - 16)));
+ emit_jump_insn (gen_return_internal ());
+ return;
+ }
+
+ /* Registers are restored from the frame pointer if we have one,
+ else they're restored from the stack pointer. Figure out
+ the appropriate offset to the register save area for both cases. */
+ if (frame_pointer_needed)
+ {
+ basereg = frame_pointer_rtx;
+ offset = -(size - outgoing_args_size);
+ }
+ else
+ {
+ basereg = stack_pointer_rtx;
+ offset = outgoing_args_size;
+ }
+
+ /* Restore each register. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (regs_ever_live[i] && !call_used_regs[i] && ! fixed_regs[i]
+ || (i == FRAME_POINTER_REGNUM && frame_pointer_needed))
+ {
+ int regno;
+
+ /* Restore the frame pointer (if it exists) into a temporary
+ register. */
+ regno = ((i == FRAME_POINTER_REGNUM && frame_pointer_needed)
+ ? temp_regno : i);
+
+ emit_move_insn (gen_rtx (REG, PSImode, regno),
+ gen_rtx (MEM, PSImode,
+ gen_rtx (PLUS, Pmode,
+ basereg,
+ GEN_INT (offset))));
+ offset += 4;
+ }
+ }
+
+ if (frame_pointer_needed)
+ {
+ /* Deallocate this frame's stack. */
+ emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
+ /* Restore the old frame pointer. */
+ emit_move_insn (frame_pointer_rtx, gen_rtx (REG, PSImode, temp_regno));
+ }
+ else if (size)
+ {
+ /* Deallocate this function's stack. */
+ emit_insn (gen_addpsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (size)));
+ }
+
+ /* If we had to allocate a slot to save the context pointer,
+ then it must be deallocated here. */
+ if (current_function_needs_context)
+ emit_insn (gen_addpsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (4)));
+
+ /* Emit the return insn, if this function had no stack, then we
+ can use the standard return (which allows more optimizations),
+ else we have to use the special one which inhibits optimizations. */
+ if (size == 0 && !current_function_needs_context)
+ emit_jump_insn (gen_return ());
+ else
+ emit_jump_insn (gen_return_internal ());
+}
+
+/* Update the condition code from the insn. */
+
+void
+notice_update_cc (body, insn)
+ rtx body;
+ rtx insn;
+{
+ switch (get_attr_cc (insn))
+ {
+ case CC_NONE:
+ /* Insn does not affect CC at all. */
+ break;
+
+ case CC_NONE_0HIT:
+ /* Insn does not change CC, but the 0'th operand has been changed. */
+ if (cc_status.value1 != 0
+ && reg_overlap_mentioned_p (recog_operand[0], cc_status.value1))
+ cc_status.value1 = 0;
+ break;
+
+ case CC_SET_ZN:
+ /* Insn sets the Z,N flags of CC to recog_operand[0].
+ V,C is in an unusable state. */
+ CC_STATUS_INIT;
+ cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
+ cc_status.value1 = recog_operand[0];
+ break;
+
+ case CC_SET_ZNV:
+ /* Insn sets the Z,N,V flags of CC to recog_operand[0].
+ C is in an unusable state. */
+ CC_STATUS_INIT;
+ cc_status.flags |= CC_NO_CARRY;
+ cc_status.value1 = recog_operand[0];
+ break;
+
+ case CC_COMPARE:
+ /* The insn is a compare instruction. */
+ CC_STATUS_INIT;
+ cc_status.value1 = SET_SRC (body);
+ break;
+
+ case CC_CLOBBER:
+ /* Insn doesn't leave CC in a usable state. */
+ CC_STATUS_INIT;
+ break;
+
+ default:
+ CC_STATUS_INIT;
+ break;
+ }
+}
+
+/* Return true if OP is a valid call operand. Valid call operands
+ are SYMBOL_REFs and REGs. */
+int
+call_address_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == REG);
+}
+
+/* Return true if OP is a memory operand with a constant address.
+ A special PSImode move pattern uses this predicate. */
+int
+constant_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return GET_CODE (op) == MEM && CONSTANT_ADDRESS_P (XEXP (op, 0));
+}
+
+/* What (if any) secondary registers are needed to move IN with mode
+ MODE into a register from in register class CLASS.
+
+ We might be able to simplify this. */
+enum reg_class
+secondary_reload_class (class, mode, in, input)
+ enum reg_class class;
+ enum machine_mode mode;
+ rtx in;
+ int input;
+{
+ int regno;
+
+ /* Memory loads less than a full word wide can't have an
+ address or stack pointer destination. They must use
+ a data register as an intermediate register. */
+ if (input
+ && GET_CODE (in) == MEM
+ && (mode == QImode)
+ && class == ADDRESS_REGS)
+ return DATA_REGS;
+
+ /* Address register stores which are not PSImode need a scratch register. */
+ if (! input
+ && GET_CODE (in) == MEM
+ && (mode != PSImode)
+ && class == ADDRESS_REGS)
+ return DATA_REGS;
+
+ /* Otherwise assume no secondary reloads are needed. */
+ return NO_REGS;
+}
+
+
+/* Shifts.
+
+ We devote a fair bit of code to getting efficient shifts since we can only
+ shift one bit at a time, and each single bit shift may take multiple
+ instructions.
+
+ The basic shift methods:
+
+ * loop shifts -- emit a loop using one (or two on H8/S) bit shifts;
+ this is the default. SHIFT_LOOP
+
+ * inlined shifts -- emit straight line code for the shift; this is
+ used when a straight line shift is about the same size or smaller
+ than a loop. We allow the inline version to be slightly longer in
+ some cases as it saves a register. SHIFT_INLINE
+
+ * There other oddballs. Not worth explaining. SHIFT_SPECIAL
+
+
+ HImode shifts:
+
+ 1-4 do them inline
+
+ 5-7 If ashift, then multiply, else loop.
+
+ 8-14 - If ashift, then multiply, if lshiftrt, then divide, else loop.
+ 15 - rotate the bit we want into the carry, clear the destination,
+ (use mov 0,dst, not sub as sub will clobber the carry), then
+ move bit into place.
+
+ Don't Panic, it's not nearly as bad as the H8 shifting code!!! */
+
+int
+nshift_operator (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ switch (GET_CODE (x))
+ {
+ case ASHIFTRT:
+ case LSHIFTRT:
+ case ASHIFT:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+/* Called from the .md file to emit code to do shifts.
+ Returns a boolean indicating success
+ (currently this is always TRUE). */
+
+int
+expand_a_shift (mode, code, operands)
+ enum machine_mode mode;
+ int code;
+ rtx operands[];
+{
+ emit_move_insn (operands[0], operands[1]);
+
+ /* need a loop to get all the bits we want - we generate the
+ code at emit time, but need to allocate a scratch reg now */
+
+ emit_insn (gen_rtx
+ (PARALLEL, VOIDmode,
+ gen_rtvec (2,
+ gen_rtx (SET, VOIDmode, operands[0],
+ gen_rtx (code, mode,
+ operands[0], operands[2])),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, HImode, 0)))));
+
+ return 1;
+}
+
+/* Shift algorithm determination.
+
+ There are various ways of doing a shift:
+ SHIFT_INLINE: If the amount is small enough, just generate as many one-bit
+ shifts as we need.
+ SHIFT_SPECIAL: Hand crafted assembler.
+ SHIFT_LOOP: If the above methods fail, just loop. */
+
+enum shift_alg
+{
+ SHIFT_INLINE,
+ SHIFT_SPECIAL,
+ SHIFT_LOOP,
+ SHIFT_MAX
+};
+
+/* Symbols of the various shifts which can be used as indices. */
+
+enum shift_type
+ {
+ SHIFT_ASHIFT, SHIFT_LSHIFTRT, SHIFT_ASHIFTRT
+ };
+
+/* Symbols of the various modes which can be used as indices. */
+
+enum shift_mode
+ {
+ HIshift,
+ };
+
+/* For single bit shift insns, record assembler and what bits of the
+ condition code are valid afterwards (represented as various CC_FOO
+ bits, 0 means CC isn't left in a usable state). */
+
+struct shift_insn
+{
+ char *assembler;
+ int cc_valid;
+};
+
+/* Assembler instruction shift table.
+
+ These tables are used to look up the basic shifts.
+ They are indexed by cpu, shift_type, and mode.
+*/
+
+static const struct shift_insn shift_one[3][3] =
+{
+ {
+/* SHIFT_ASHIFT */
+ { "add\t%0,%0", CC_OVERFLOW_UNUSABLE | CC_NO_CARRY },
+ },
+/* SHIFT_LSHIFTRT */
+ {
+ { "lsr\t%0", CC_NO_CARRY },
+ },
+/* SHIFT_ASHIFTRT */
+ {
+ { "asr\t%0", CC_NO_CARRY },
+ },
+};
+
+/* Given CPU, MODE, SHIFT_TYPE, and shift count COUNT, determine the best
+ algorithm for doing the shift. The assembler code is stored in ASSEMBLER.
+ We don't achieve maximum efficiency in all cases, but the hooks are here
+ to do so.
+
+ For now we just use lots of switch statements. Since we don't even come
+ close to supporting all the cases, this is simplest. If this function ever
+ gets too big, perhaps resort to a more table based lookup. Of course,
+ at this point you may just wish to do it all in rtl. */
+
+static enum shift_alg
+get_shift_alg (shift_type, mode, count, assembler_p, cc_valid_p)
+ enum shift_type shift_type;
+ enum machine_mode mode;
+ int count;
+ const char **assembler_p;
+ int *cc_valid_p;
+{
+ /* The default is to loop. */
+ enum shift_alg alg = SHIFT_LOOP;
+ enum shift_mode shift_mode;
+
+ /* We don't handle negative shifts or shifts greater than the word size,
+ they should have been handled already. */
+
+ if (count < 0 || count > GET_MODE_BITSIZE (mode))
+ abort ();
+
+ switch (mode)
+ {
+ case HImode:
+ shift_mode = HIshift;
+ break;
+ default:
+ abort ();
+ }
+
+ /* Assume either SHIFT_LOOP or SHIFT_INLINE.
+ It is up to the caller to know that looping clobbers cc. */
+ *assembler_p = shift_one[shift_type][shift_mode].assembler;
+ *cc_valid_p = shift_one[shift_type][shift_mode].cc_valid;
+
+ /* Now look for cases we want to optimize. */
+
+ switch (shift_mode)
+ {
+ case HIshift:
+ if (count <= 4)
+ return SHIFT_INLINE;
+ else if (count < 15 && shift_type != SHIFT_ASHIFTRT)
+ {
+ switch (count)
+ {
+ case 5:
+ if (shift_type == SHIFT_ASHIFT)
+ *assembler_p = "mov 32,%4\n\tmul %4,%0";
+ else if (shift_type == SHIFT_LSHIFTRT)
+ *assembler_p
+ = "sub %4,%4\n\tmov %4,mdr\n\tmov 32,%4\n\tdivu %4,%0";
+ *cc_valid_p = CC_NO_CARRY;
+ return SHIFT_SPECIAL;
+ case 6:
+ if (shift_type == SHIFT_ASHIFT)
+ *assembler_p = "mov 64,%4\n\tmul %4,%0";
+ else if (shift_type == SHIFT_LSHIFTRT)
+ *assembler_p
+ = "sub %4,%4\n\tmov %4,mdr\n\tmov 64,%4\n\tdivu %4,%0";
+ *cc_valid_p = CC_NO_CARRY;
+ return SHIFT_SPECIAL;
+ case 7:
+ if (shift_type == SHIFT_ASHIFT)
+ *assembler_p = "mov 128,%4\n\tmul %4,%0";
+ else if (shift_type == SHIFT_LSHIFTRT)
+ *assembler_p
+ = "sub %4,%4\n\tmov %4,mdr\n\tmov 128,%4\n\tdivu %4,%0";
+ *cc_valid_p = CC_NO_CARRY;
+ return SHIFT_SPECIAL;
+ case 8:
+ if (shift_type == SHIFT_ASHIFT)
+ *assembler_p = "mov 256,%4\n\tmul %4,%0";
+ else if (shift_type == SHIFT_LSHIFTRT)
+ *assembler_p
+ = "sub %4,%4\n\tmov %4,mdr\n\tmov 256,%4\n\tdivu %4,%0";
+ *cc_valid_p = CC_NO_CARRY;
+ return SHIFT_SPECIAL;
+ case 9:
+ if (shift_type == SHIFT_ASHIFT)
+ *assembler_p = "mov 512,%4\n\tmul %4,%0";
+ else if (shift_type == SHIFT_LSHIFTRT)
+ *assembler_p
+ = "sub %4,%4\n\tmov %4,mdr\n\tmov 512,%4\n\tdivu %4,%0";
+ *cc_valid_p = CC_NO_CARRY;
+ return SHIFT_SPECIAL;
+ case 10:
+ if (shift_type == SHIFT_ASHIFT)
+ *assembler_p = "mov 1024,%4\n\tmul %4,%0";
+ else if (shift_type == SHIFT_LSHIFTRT)
+ *assembler_p
+ = "sub %4,%4\n\tmov %4,mdr\n\tmov 1024,%4\n\tdivu %4,%0";
+ *cc_valid_p = CC_NO_CARRY;
+ return SHIFT_SPECIAL;
+ case 11:
+ if (shift_type == SHIFT_ASHIFT)
+ *assembler_p = "mov 2048,%4\n\tmul %4,%0";
+ else if (shift_type == SHIFT_LSHIFTRT)
+ *assembler_p
+ = "sub %4,%4\n\tmov %4,mdr\n\tmov 2048,%4\n\tdivu %4,%0";
+ *cc_valid_p = CC_NO_CARRY;
+ return SHIFT_SPECIAL;
+ case 12:
+ if (shift_type == SHIFT_ASHIFT)
+ *assembler_p = "mov 4096,%4\n\tmul %4,%0";
+ else if (shift_type == SHIFT_LSHIFTRT)
+ *assembler_p
+ = "sub %4,%4\n\tmov %4,mdr\n\tmov 4096,%4\n\tdivu %4,%0";
+ *cc_valid_p = CC_NO_CARRY;
+ return SHIFT_SPECIAL;
+ case 13:
+ if (shift_type == SHIFT_ASHIFT)
+ *assembler_p = "mov 8192,%4\n\tmul %4,%0";
+ else if (shift_type == SHIFT_LSHIFTRT)
+ *assembler_p
+ = "sub %4,%4\n\tmov %4,mdr\n\tmov 8192,%4\n\tdivu %4,%0";
+ *cc_valid_p = CC_NO_CARRY;
+ return SHIFT_SPECIAL;
+ case 14:
+ if (shift_type == SHIFT_ASHIFT)
+ *assembler_p = "mov 16384,%4\n\tmul %4,%0";
+ else if (shift_type == SHIFT_LSHIFTRT)
+ *assembler_p
+ = "sub %4,%4\n\tmov %4,mdr\n\tmov 16384,%4\n\tdivu %4,%0";
+ *cc_valid_p = CC_NO_CARRY;
+ return SHIFT_SPECIAL;
+ }
+ }
+ else if (count == 15)
+ {
+ if (shift_type == SHIFT_ASHIFTRT)
+ {
+ *assembler_p = "add\t%0,%0\n\tsubc\t%0,%0\n";
+ *cc_valid_p = CC_NO_CARRY;
+ return SHIFT_SPECIAL;
+ }
+ if (shift_type == SHIFT_LSHIFTRT)
+ {
+ *assembler_p = "add\t%0,%0\n\tmov 0,%0\n\trol %0\n";
+ *cc_valid_p = CC_NO_CARRY;
+ return SHIFT_SPECIAL;
+ }
+ if (shift_type == SHIFT_ASHIFT)
+ {
+ *assembler_p = "ror\t%0\n\tmov 0,%0\n\tror %0\n";
+ *cc_valid_p = CC_NO_CARRY;
+ return SHIFT_SPECIAL;
+ }
+ }
+ break;
+
+ default:
+ abort ();
+ }
+
+ return alg;
+}
+
+/* Emit the assembler code for doing shifts. */
+
+char *
+emit_a_shift (insn, operands)
+ rtx insn;
+ rtx *operands;
+{
+ static int loopend_lab;
+ char *assembler;
+ int cc_valid;
+ rtx inside = PATTERN (insn);
+ rtx shift = operands[3];
+ enum machine_mode mode = GET_MODE (shift);
+ enum rtx_code code = GET_CODE (shift);
+ enum shift_type shift_type;
+ enum shift_mode shift_mode;
+
+ loopend_lab++;
+
+ switch (mode)
+ {
+ case HImode:
+ shift_mode = HIshift;
+ break;
+ default:
+ abort ();
+ }
+
+ switch (code)
+ {
+ case ASHIFTRT:
+ shift_type = SHIFT_ASHIFTRT;
+ break;
+ case LSHIFTRT:
+ shift_type = SHIFT_LSHIFTRT;
+ break;
+ case ASHIFT:
+ shift_type = SHIFT_ASHIFT;
+ break;
+ default:
+ abort ();
+ }
+
+ if (GET_CODE (operands[2]) != CONST_INT)
+ {
+ /* Indexing by reg, so have to loop and test at top */
+ output_asm_insn ("mov %2,%4", operands);
+ output_asm_insn ("cmp 0,%4", operands);
+ fprintf (asm_out_file, "\tble .Lle%d\n", loopend_lab);
+
+ /* Get the assembler code to do one shift. */
+ get_shift_alg (shift_type, mode, 1, &assembler, &cc_valid);
+ }
+ else
+ {
+ int n = INTVAL (operands[2]);
+ enum shift_alg alg;
+
+ /* If the count is negative, make it 0. */
+ if (n < 0)
+ n = 0;
+ /* If the count is too big, truncate it.
+ ANSI says shifts of GET_MODE_BITSIZE are undefined - we choose to
+ do the intuitive thing. */
+ else if (n > GET_MODE_BITSIZE (mode))
+ n = GET_MODE_BITSIZE (mode);
+
+ alg = get_shift_alg (shift_type, mode, n, &assembler, &cc_valid);
+
+
+ switch (alg)
+ {
+ case SHIFT_INLINE:
+ /* Emit one bit shifts. */
+ while (n > 0)
+ {
+ output_asm_insn (assembler, operands);
+ n -= 1;
+ }
+
+ /* Keep track of CC. */
+ if (cc_valid)
+ {
+ cc_status.value1 = operands[0];
+ cc_status.flags |= cc_valid;
+ }
+ return "";
+
+ case SHIFT_SPECIAL:
+ output_asm_insn (assembler, operands);
+
+ /* Keep track of CC. */
+ if (cc_valid)
+ {
+ cc_status.value1 = operands[0];
+ cc_status.flags |= cc_valid;
+ }
+ return "";
+ }
+
+ {
+ fprintf (asm_out_file, "\tmov %d,%s\n", n,
+ reg_names[REGNO (operands[4])]);
+ fprintf (asm_out_file, ".Llt%d:\n", loopend_lab);
+ output_asm_insn (assembler, operands);
+ output_asm_insn ("add -1,%4", operands);
+ fprintf (asm_out_file, "\tbne .Llt%d\n", loopend_lab);
+ return "";
+ }
+ }
+
+ fprintf (asm_out_file, ".Llt%d:\n", loopend_lab);
+ output_asm_insn (assembler, operands);
+ output_asm_insn ("add -1,%4", operands);
+ fprintf (asm_out_file, "\tbne .Llt%d\n", loopend_lab);
+ fprintf (asm_out_file, ".Lle%d:\n", loopend_lab);
+
+ return "";
+}
+
+/* Return an RTX to represent where a value with mode MODE will be returned
+ from a function. If the result is 0, the argument is pushed. */
+
+rtx
+function_arg (cum, mode, type, named)
+ CUMULATIVE_ARGS *cum;
+ enum machine_mode mode;
+ tree type;
+ int named;
+{
+ rtx result = 0;
+ int size, align;
+
+ /* We only support using 2 data registers as argument registers. */
+ int nregs = 2;
+
+ /* Only pass named arguments in registers. */
+ if (!named)
+ return NULL_RTX;
+
+ /* Figure out the size of the object to be passed. We lie and claim
+ PSImode values are only two bytes since they fit in a single
+ register. */
+ if (mode == BLKmode)
+ size = int_size_in_bytes (type);
+ else if (mode == PSImode)
+ size = 2;
+ else
+ size = GET_MODE_SIZE (mode);
+
+ /* Figure out the alignment of the object to be passed. */
+ align = size;
+
+ cum->nbytes = (cum->nbytes + 1) & ~1;
+
+ /* Don't pass this arg via a register if all the argument registers
+ are used up. */
+ if (cum->nbytes + size > nregs * UNITS_PER_WORD)
+ return 0;
+
+ switch (cum->nbytes / UNITS_PER_WORD)
+ {
+ case 0:
+ result = gen_rtx (REG, mode, 0);
+ break;
+ case 1:
+ result = gen_rtx (REG, mode, 1);
+ break;
+ default:
+ result = 0;
+ }
+
+ return result;
+}
+
+/* Return the number of registers to use for an argument passed partially
+ in registers and partially in memory. */
+
+int
+function_arg_partial_nregs (cum, mode, type, named)
+ CUMULATIVE_ARGS *cum;
+ enum machine_mode mode;
+ tree type;
+ int named;
+{
+ int size, align;
+
+ /* We only support using 2 data registers as argument registers. */
+ int nregs = 2;
+
+ return 0;
+ /* Only pass named arguments in registers. */
+ if (!named)
+ return 0;
+
+ /* Figure out the size of the object to be passed. */
+ if (mode == BLKmode)
+ size = int_size_in_bytes (type);
+ else if (mode == PSImode)
+ size = 2;
+ else
+ size = GET_MODE_SIZE (mode);
+
+ /* Figure out the alignment of the object to be passed. */
+ align = size;
+
+ cum->nbytes = (cum->nbytes + 1) & ~1;
+
+ /* Don't pass this arg via a register if all the argument registers
+ are used up. */
+ if (cum->nbytes > nregs * UNITS_PER_WORD)
+ return 0;
+
+ if (cum->nbytes + size <= nregs * UNITS_PER_WORD)
+ return 0;
+
+ /* Don't pass this arg via a register if it would be split between
+ registers and memory. */
+ if (type == NULL_TREE
+ && cum->nbytes + size > nregs * UNITS_PER_WORD)
+ return 0;
+
+ return (nregs * UNITS_PER_WORD - cum->nbytes) / UNITS_PER_WORD;
+}
+
+char *
+output_tst (operand, insn)
+ rtx operand, insn;
+{
+
+ rtx temp;
+ int past_call = 0;
+
+ /* Only tst insns using address registers can be optimized. */
+ if (REGNO_REG_CLASS (REGNO (operand)) != ADDRESS_REGS)
+ return "cmp 0,%0";
+
+ /* If testing an address register against zero, we can do better if
+ we know there's a register already holding the value zero. First
+ see if a global register has been set to zero, else we do a search
+ for a register holding zero, if both of those fail, then we use a
+ compare against zero. */
+ if (zero_dreg || zero_areg)
+ {
+ rtx xoperands[2];
+ xoperands[0] = operand;
+ xoperands[1] = zero_dreg ? zero_dreg : zero_areg;
+
+ output_asm_insn ("cmp %1,%0", xoperands);
+ return "";
+ }
+
+ /* We can save a byte if we can find a register which has the value
+ zero in it. */
+ temp = PREV_INSN (insn);
+ while (temp)
+ {
+ rtx set;
+
+ /* We allow the search to go through call insns. We record
+ the fact that we've past a CALL_INSN and reject matches which
+ use call clobbered registers. */
+ if (GET_CODE (temp) == CODE_LABEL
+ || GET_CODE (temp) == JUMP_INSN
+ || GET_CODE (temp) == BARRIER)
+ break;
+
+ if (GET_CODE (temp) == CALL_INSN)
+ past_call = 1;
+
+ if (GET_CODE (temp) == NOTE)
+ {
+ temp = PREV_INSN (temp);
+ continue;
+ }
+
+ /* It must be an insn, see if it is a simple set. */
+ set = single_set (temp);
+ if (!set)
+ {
+ temp = PREV_INSN (temp);
+ continue;
+ }
+
+ /* Are we setting a register to zero?
+
+ If it's a call clobbered register, have we past a call? */
+ if (REG_P (SET_DEST (set))
+ && SET_SRC (set) == CONST0_RTX (GET_MODE (SET_DEST (set)))
+ && !reg_set_between_p (SET_DEST (set), temp, insn)
+ && (!past_call
+ || !call_used_regs[REGNO (SET_DEST (set))]))
+ {
+ rtx xoperands[2];
+ xoperands[0] = operand;
+ xoperands[1] = SET_DEST (set);
+
+ output_asm_insn ("cmp %1,%0", xoperands);
+ return "";
+ }
+ temp = PREV_INSN (temp);
+ }
+ return "cmp 0,%0";
+}
+
+/* Return nonzero if OP is a valid operand for a {zero,sign}_extendpsisi
+ instruction.
+
+ It accepts anything that is a general operand or the sum of the
+ stack pointer and a general operand. */
+extendpsi_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (general_operand (op, mode)
+ || (GET_CODE (op) == PLUS
+ && XEXP (op, 0) == stack_pointer_rtx
+ && general_operand (XEXP (op, 1), VOIDmode)));
+}
diff --git a/gnu/usr.bin/gcc/config/mn10200/mn10200.h b/gnu/usr.bin/gcc/config/mn10200/mn10200.h
new file mode 100644
index 00000000000..7eb26dd7c0d
--- /dev/null
+++ b/gnu/usr.bin/gcc/config/mn10200/mn10200.h
@@ -0,0 +1,1078 @@
+/* Definitions of target machine for GNU compiler. Matsushita MN10200 series
+ Copyright (C) 1997 Free Software Foundation, Inc.
+ Contributed by Jeff Law (law@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "svr4.h"
+
+/* Get rid of svr4.h stuff we don't want/need. */
+#undef ASM_SPEC
+#undef ASM_FINAL_SPEC
+#undef LIB_SPEC
+#undef ENDFILE_SPEC
+#undef LINK_SPEC
+#undef STARTFILE_SPEC
+
+/* Names to predefine in the preprocessor for this target machine. */
+
+#define CPP_PREDEFINES "-D__mn10200__ -D__MN10200__ -D__LONG_MAX__=2147483647L -D__LONG_LONG_MAX__=2147483647L -D__INT_MAX__=32767"
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+
+/* We don't have any switched on the mn10200. Though there are some things
+ that might be worth a switch:
+
+ -mspace to optimize even more for space.
+
+ -mrelax to enable the relaxing linker. */
+
+extern int target_flags;
+
+/* Macros used in the machine description to test the flags. */
+
+/* Macro to define tables used to set the flags.
+ This is a list in braces of pairs in braces,
+ each pair being { "NAME", VALUE }
+ where VALUE is the bits to set or minus the bits to clear.
+ An empty string NAME is used to identify the default VALUE. */
+
+#define TARGET_SWITCHES \
+ {{ "", TARGET_DEFAULT}}
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT 0
+#endif
+
+/* Print subsidiary information on the compiler version in use. */
+
+#define TARGET_VERSION fprintf (stderr, " (MN10200)");
+
+
+/* Target machine storage layout */
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields.
+ This is not true on the Matsushita MN10300. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered. */
+/* This is not true on the Matsushita MN10200. */
+#define BYTES_BIG_ENDIAN 0
+
+/* Define this if most significant word of a multiword number is lowest
+ numbered.
+ This is not true on the Matsushita MN10200. */
+#define WORDS_BIG_ENDIAN 0
+
+/* Number of bits in an addressable storage unit */
+#define BITS_PER_UNIT 8
+
+/* Width in bits of a "word", which is the contents of a machine register.
+ Note that this is not necessarily the width of data type `int';
+ if using 16-bit ints on a 68000, this would still be 32.
+ But on a machine with 16-bit registers, this would be 16.
+
+ This is a white lie. Registers are really 24bits, but most operations
+ only operate on 16 bits. GCC chokes badly if we set this to a value
+ that is not a power of two. */
+#define BITS_PER_WORD 16
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD 2
+
+/* Width in bits of a pointer.
+ See also the macro `Pmode' defined below.
+
+ This differs from Pmode because we need to allocate 32bits of space
+ to hold the 24bit pointers on this machine. */
+#define POINTER_SIZE 32
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY 16
+
+/* The stack goes in 16 bit lumps. */
+#define STACK_BOUNDARY 16
+
+/* Allocation boundary (in *bits*) for the code of a function.
+ 8 is the minimum boundary; it's unclear if bigger alignments
+ would improve performance. */
+#define FUNCTION_BOUNDARY 8
+
+/* No data type wants to be aligned rounder than this. */
+#define BIGGEST_ALIGNMENT 16
+
+/* Alignment of field after `int : 0' in a structure. */
+#define EMPTY_FIELD_BOUNDARY 16
+
+/* Seems to be how the Matsushita compiler does things, and there's
+ no real reason to be different. */
+#define STRUCTURE_SIZE_BOUNDARY 16
+#undef PCC_BITFIELD_TYPE_MATTERS
+
+/* Define this if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 1
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+#define DEFAULT_SIGNED_CHAR 0
+
+/* Define results of standard character escape sequences. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* Standard register usage. */
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers.
+
+ XXX Long term we should probably expose the MDR register, we use
+ it for division, multiplication, and some extension operations. */
+
+#define FIRST_PSEUDO_REGISTER 8
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator. */
+
+#define FIXED_REGISTERS \
+ { 0, 0, 0, 0, 0, 0, 0, 1}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you
+ like. */
+
+#define CALL_USED_REGISTERS \
+ { 1, 1, 0, 0, 1, 0, 0, 1}
+
+#define REG_ALLOC_ORDER \
+ { 0, 1, 4, 2, 3, 5, 6, 7}
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers. */
+
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ ((MODE) == PSImode ? 1 : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \
+ / UNITS_PER_WORD))
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode
+ MODE.
+
+ We allow any register to hold a PSImode value. We allow any register
+ to hold values <= 16 bits. For values > 16 bits we require aligned
+ register pairs. */
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ ((MODE) == PSImode ? 1 : ((REGNO) & 1) == 0 || GET_MODE_SIZE (MODE) <= 2)
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ (MODE1 == MODE2 || (GET_MODE_SIZE (MODE1) <= 2 && GET_MODE_SIZE (MODE2) <= 2))
+
+/* 4 data, and effectively 2 address registers is small as far as I'm
+ concerned. Especially since we use 2 data registers for argument
+ passing and return values.
+
+ We used to define CLASS_LIKELY_SPILLED_P as true for DATA_REGS too,
+ but we've made improvements to the port which greatly reduce register
+ pressure. As a result we no longer need to define CLASS_LIKELY_SPILLED_P
+ for DATA_REGS (and by not defining it we get significantly better code). */
+#define SMALL_REGISTER_CLASSES 1
+#define CLASS_LIKELY_SPILLED_P(CLASS) (CLASS == ADDRESS_REGS)
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union. */
+
+enum reg_class {
+ NO_REGS, DATA_REGS, ADDRESS_REGS, GENERAL_REGS, ALL_REGS, LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+
+#define REG_CLASS_NAMES \
+{ "NO_REGS", "DATA_REGS", "ADDRESS_REGS", \
+ "GENERAL_REGS", "ALL_REGS", "LIM_REGS" }
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+
+#define REG_CLASS_CONTENTS \
+{ 0, /* No regs */ \
+ 0x0f, /* DATA_REGS */ \
+ 0xf0, /* ADDRESS_REGS */ \
+ 0xff, /* GENERAL_REGS */ \
+ 0xff, /* ALL_REGS */ \
+}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+
+#define REGNO_REG_CLASS(REGNO) \
+ ((REGNO) < 4 ? DATA_REGS : ADDRESS_REGS)
+
+/* The class value for index registers, and the one for base regs. */
+
+#define INDEX_REG_CLASS DATA_REGS
+#define BASE_REG_CLASS ADDRESS_REGS
+
+/* Get reg_class from a letter such as appears in the machine description. */
+
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C) == 'd' ? DATA_REGS : \
+ (C) == 'a' ? ADDRESS_REGS : NO_REGS)
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c. */
+
+#define REGNO_OK_FOR_BASE_P(regno) \
+ (((regno) > 3 && regno < FIRST_PSEUDO_REGISTER) \
+ || (reg_renumber[regno] > 3 && reg_renumber[regno] < FIRST_PSEUDO_REGISTER))
+
+#define REGNO_OK_FOR_INDEX_P(regno) \
+ (((regno) >= 0 && regno < 4) \
+ || (reg_renumber[regno] >= 0 && reg_renumber[regno] < 4))
+
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class. */
+
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ ((GET_MODE (X) != PSImode) ? DATA_REGS : CLASS)
+
+/* We want to use DATA_REGS for anything that is not PSImode. */
+#define LIMIT_RELOAD_CLASS(MODE, CLASS) \
+ ((MODE != PSImode) ? DATA_REGS : CLASS)
+
+/* We have/need secondary reloads on the mn10200. Mostly to deal
+ with problems using address registers. */
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS,MODE,IN) \
+ secondary_reload_class(CLASS,MODE,IN, 1)
+
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS,MODE,IN) \
+ secondary_reload_class(CLASS,MODE,IN, 0)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS. */
+
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ ((MODE) == PSImode ? 1 : (GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* The letters I, J, K, L, M, N, O, P in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C. */
+
+#define INT_8_BITS(VALUE) ((unsigned) (VALUE) + 0x80 < 0x100)
+#define INT_16_BITS(VALUE) ((unsigned) (VALUE) + 0x8000 < 0x10000)
+
+#define CONST_OK_FOR_I(VALUE) ((VALUE) == 0)
+#define CONST_OK_FOR_J(VALUE) ((VALUE) >= 1 && (VALUE) <= 3)
+#define CONST_OK_FOR_K(VALUE) ((VALUE) >= 1 && (VALUE) <= 4)
+#define CONST_OK_FOR_L(VALUE) ((VALUE) == 15)
+#define CONST_OK_FOR_M(VALUE) ((VALUE) == 255)
+
+#define CONST_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'I' ? CONST_OK_FOR_I (VALUE) : \
+ (C) == 'J' ? CONST_OK_FOR_J (VALUE) : \
+ (C) == 'K' ? CONST_OK_FOR_K (VALUE) : \
+ (C) == 'L' ? CONST_OK_FOR_L (VALUE) : \
+ (C) == 'M' ? CONST_OK_FOR_M (VALUE) : 0)
+
+/* Similar, but for floating constants, and defining letters G and H.
+ Here VALUE is the CONST_DOUBLE rtx itself.
+
+ `G' is a floating-point zero. */
+
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'G' ? (GET_MODE_CLASS (GET_MODE (VALUE)) == MODE_FLOAT \
+ && (VALUE) == CONST0_RTX (GET_MODE (VALUE))) \
+ : 0)
+
+
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+
+#define STACK_GROWS_DOWNWARD
+
+/* Define this if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+
+#define FRAME_GROWS_DOWNWARD
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+
+#define STARTING_FRAME_OFFSET 0
+
+/* Offset of first parameter from the argument pointer register value. */
+/* Is equal to the size of the saved fp + pc, even if an fp isn't
+ saved since the value is used before we know. */
+
+#define FIRST_PARM_OFFSET(FNDECL) (current_function_needs_context ? 8 : 4)
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 7
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 6
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 6
+
+/* Register in which static-chain is passed to a function. */
+#define STATIC_CHAIN_REGNUM 4
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms
+ may be accessed via the stack pointer) in functions that seem suitable.
+ This is computed in `reload', in reload1.c.
+
+ We allow frame pointers to be eliminated when not having one will
+ not interfere with debugging. */
+#define ACCUMULATE_OUTGOING_ARGS
+#define FRAME_POINTER_REQUIRED 0
+#define CAN_DEBUG_WITHOUT_FP
+
+/* Store in the variable DEPTH the initial difference between the
+ frame pointer reg contents and the stack pointer reg contents,
+ as of the start of the function body. This depends on the layout
+ of the fixed parts of the stack frame and on how registers are saved. */
+
+#define INITIAL_FRAME_POINTER_OFFSET(DEPTH) (DEPTH) = total_frame_size()
+
+/* Various type size information.
+
+ The mn10200 has a limited number of small registers. Sizes of basic
+ data types are adjusted accordingly. */
+#define SHORT_TYPE_SIZE 16
+#define INT_TYPE_SIZE 16
+#define LONG_TYPE_SIZE 32
+#define LONG_LONG_TYPE_SIZE 32
+#define FLOAT_TYPE_SIZE 32
+#define DOUBLE_TYPE_SIZE 32
+#define LONG_DOUBLE_TYPE_SIZE DOUBLE_TYPE_SIZE
+
+/* Any size less than 64bits will work; but a smarter definition
+ can make G++ code smaller and faster. Most operations on the
+ mn10200 occur on 16bit hunks, so the best size for a boolean
+ is 16bits. */
+#define BOOL_TYPE_SIZE 16
+
+/* The difference of two pointers must be at least 24bits since pointers
+ are 24bits; however, no basic data type is 24bits, so we have to round
+ up to a 32bits for the difference of pointers. */
+#undef SIZE_TYPE
+#undef PTRDIFF_TYPE
+#define SIZE_TYPE "long unsigned int"
+#define PTRDIFF_TYPE "long unsigned int"
+
+/* Note sizeof (WCHAR_TYPE) must be equal to the value of WCHAR_TYPE_SIZE! */
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+#define MAX_FIXED_MODE_SIZE 32
+
+/* A guess for the MN10200. */
+#define PROMOTE_PROTOTYPES 1
+
+/* Value is the number of bytes of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack. */
+
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+/* 1 if N is a possible register number for function argument passing. */
+
+#define FUNCTION_ARG_REGNO_P(N) ((N) <= 1)
+
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go. */
+
+#define CUMULATIVE_ARGS struct cum_arg
+struct cum_arg { int nbytes; };
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+
+ On the MN10200, the offset starts at 0. */
+
+#define INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME,INDIRECT) \
+ ((CUM).nbytes = 0)
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ ((CUM).nbytes += ((MODE) != BLKmode \
+ ? (MODE) == PSImode ? 2 : \
+ (GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) & -UNITS_PER_WORD \
+ : (int_size_in_bytes (TYPE) + UNITS_PER_WORD - 1) & -UNITS_PER_WORD))
+
+/* Define where to put the arguments to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis). */
+
+extern struct rtx_def *function_arg();
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ function_arg (&CUM, MODE, TYPE, NAMED)
+
+
+/* For "large" items, we pass them by invisible reference, and the
+ callee is responsible for copying the data item if it might be
+ modified. */
+#define FUNCTION_ARG_PASS_BY_REFERENCE(CUM, MODE, TYPE, NAMED) \
+ ((TYPE) && int_size_in_bytes (TYPE) > 8)
+
+#define FUNCTION_ARG_CALLEE_COPIES(CUM, MODE, TYPE, NAMED) \
+ ((TYPE) && int_size_in_bytes (TYPE) > 8)
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ gen_rtx (REG, TYPE_MODE (VALTYPE), TYPE_MODE (VALTYPE) == PSImode ? 4 : 0)
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+
+#define LIBCALL_VALUE(MODE) gen_rtx (REG, MODE, (MODE) == PSImode ? 4 : 0)
+
+/* 1 if N is a possible register number for a function value. */
+
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == 0 || (N) == 4)
+
+/* Return values > 8 bytes in length in memory. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+#define RETURN_IN_MEMORY(TYPE) \
+ (int_size_in_bytes (TYPE) > 8 || TYPE_MODE (TYPE) == BLKmode)
+
+/* Register in which address to store a structure value
+ is passed to a function. On the MN10200 it's passed as
+ the first parameter. */
+
+#define STRUCT_VALUE 0
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero. */
+
+#define EXIT_IGNORE_STACK 1
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry.
+
+ ?!? Profiling is not currently supported. */
+
+#define FUNCTION_PROFILER(FILE, LABELNO) ;
+
+/* Yes, we actually support trampolines on this machine, even though
+ nobody is likely to ever use them. */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+ do { \
+ fprintf (FILE, "\t.byte 0xfd\n"); \
+ fprintf (FILE, "\t.byte 0x00\n"); \
+ fprintf (FILE, "\t.byte 0x00\n"); \
+ fprintf (FILE, "\tmov (a3),a0\n"); \
+ fprintf (FILE, "\tadd -4,a3\n"); \
+ fprintf (FILE, "\tmov a0,(0,a3)\n"); \
+ fprintf (FILE, "\tmov (21,a0),a0\n"); \
+ fprintf (FILE, "\tmov a0,(4,a3)\n"); \
+ fprintf (FILE, "\tmov (0,a3),a0\n"); \
+ fprintf (FILE, "\tmov (17,a0),a0\n"); \
+ fprintf (FILE, "\tadd 4,a3\n"); \
+ fprintf (FILE, "\trts\n"); \
+ fprintf (FILE, "\t.long 0\n"); \
+ fprintf (FILE, "\t.long 0\n"); \
+ } while (0)
+
+/* Length in units of the trampoline for entering a nested function. */
+
+#define TRAMPOLINE_SIZE 0x1c
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+{ \
+ emit_move_insn (gen_rtx (MEM, PSImode, plus_constant ((TRAMP), 20)), \
+ (CXT)); \
+ emit_move_insn (gen_rtx (MEM, PSImode, plus_constant ((TRAMP), 24)), \
+ (FNADDR)); \
+}
+
+/* A C expression whose value is RTL representing the value of the return
+ address for the frame COUNT steps up from the current frame. */
+
+#define RETURN_ADDR_RTX(COUNT, FRAME) \
+ ((COUNT == 0) \
+ ? gen_rtx (MEM, Pmode, frame_pointer_rtx) \
+ : (rtx) 0)
+
+
+/* Addressing modes, and classification of registers for them. */
+
+
+/* 1 if X is an rtx for a constant that is a valid address. */
+
+#define CONSTANT_ADDRESS_P(X) CONSTANT_P (X)
+
+/* Extra constraints. */
+#define OK_FOR_R(OP) \
+ (GET_CODE (OP) == MEM \
+ && GET_MODE (OP) == QImode \
+ && REG_P (XEXP (OP, 0)))
+
+/* Q is used for sp + <something> in the {zero,sign}_extendpsisi2 patterns. */
+#define EXTRA_CONSTRAINT(OP, C) \
+ ((C) == 'R' ? OK_FOR_R (OP) : \
+ (C) == 'S' ? GET_CODE (OP) == SYMBOL_REF : \
+ (C) == 'Q' ? GET_CODE (OP) == PLUS : 0)
+
+/* Maximum number of registers that can appear in a valid memory address. */
+
+#define MAX_REGS_PER_ADDRESS 2
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used.
+
+ Most source files want to accept pseudo regs in the hope that
+ they will get allocated to the class that the insn wants them to be in.
+ Source files for reload pass need to be strict.
+ After reload, it makes no difference, since pseudo regs have
+ been eliminated by then. */
+
+#ifndef REG_OK_STRICT
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_INDEX_P(X) \
+ (GET_MODE (X) == PSImode \
+ && ((REGNO (X) >= 0 && REGNO(X) <= 3) || REGNO (X) >= FIRST_PSEUDO_REGISTER))
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_BASE_P(X) \
+ (GET_MODE (X) == PSImode \
+ && ((REGNO (X) >= 4 && REGNO(X) <= 8) || REGNO (X) >= FIRST_PSEUDO_REGISTER))
+#else
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) \
+ (GET_MODE (X) == PSImode) && REGNO_OK_FOR_INDEX_P (REGNO (X))
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) \
+ (GET_MODE (X) == PSImode) && REGNO_OK_FOR_BASE_P (REGNO (X))
+#endif
+
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ We used to allow reg+reg addresses for QImode and HImode; however,
+ they tended to cause the register allocator to run out of registers.
+ Basically, an indexed load/store always keeps 2 data and one address
+ register live, which is just too many for this machine.
+
+ The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS,
+ except for CONSTANT_ADDRESS_P which is actually machine-independent. */
+
+/* Accept either REG or SUBREG where a register is valid. */
+
+#define RTX_OK_FOR_BASE_P(X) \
+ ((REG_P (X) && REG_OK_FOR_BASE_P (X)) \
+ || (GET_CODE (X) == SUBREG && REG_P (SUBREG_REG (X)) \
+ && REG_OK_FOR_BASE_P (SUBREG_REG (X))))
+
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
+{ \
+ if ((MODE != PSImode) && CONSTANT_ADDRESS_P (X)) \
+ goto ADDR; \
+ if (RTX_OK_FOR_BASE_P (X)) \
+ goto ADDR; \
+ if (GET_CODE (X) == PLUS) \
+ { \
+ rtx base = 0, index = 0; \
+ if (REG_P (XEXP (X, 0)) \
+ && REG_OK_FOR_BASE_P (XEXP (X, 0))) \
+ base = XEXP (X, 0), index = XEXP (X, 1); \
+ if (REG_P (XEXP (X, 1)) \
+ && REG_OK_FOR_BASE_P (XEXP (X, 1))) \
+ base = XEXP (X, 1), index = XEXP (X, 0); \
+ if (base != 0 && index != 0) \
+ { \
+ if (GET_CODE (index) == CONST_INT) \
+ goto ADDR; \
+ } \
+ } \
+}
+
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output. */
+
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN) {}
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for. */
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) {}
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
+
+#define LEGITIMATE_CONSTANT_P(X) 1
+
+
+/* Tell final.c how to eliminate redundant test instructions. */
+
+/* Here we define machine-dependent flags and fields in cc_status
+ (see `conditions.h'). No extra ones are needed for the vax. */
+
+/* Store in cc_status the expressions
+ that the condition codes will describe
+ after execution of an instruction whose pattern is EXP.
+ Do not alter them if the instruction would not alter the cc's. */
+
+#define CC_OVERFLOW_UNUSABLE 0x200
+#define CC_NO_CARRY CC_NO_OVERFLOW
+#define NOTICE_UPDATE_CC(EXP, INSN) notice_update_cc(EXP, INSN)
+
+/* The mn10200 has a limited number of registers, so CSE of function
+ addresses generally makes code worse due to register pressure. */
+#define NO_FUNCTION_CSE
+
+/* Compute the cost of computing a constant rtl expression RTX
+ whose rtx-code is CODE. The body of this macro is a portion
+ of a switch statement. If the code is computed here,
+ return it with a return statement. Otherwise, break from the switch. */
+
+#define CONST_COSTS(RTX,CODE,OUTER_CODE) \
+ case CONST_INT: \
+ /* Zeros are extremely cheap. */ \
+ if (INTVAL (RTX) == 0) \
+ return 0; \
+ /* If it fits in 8 bits, then it's still relatively cheap. */ \
+ if (INT_8_BITS (INTVAL (RTX))) \
+ return 1; \
+ /* This is the "base" cost, includes constants where either the \
+ upper or lower 16bits are all zeros. */ \
+ if (INT_16_BITS (INTVAL (RTX)) \
+ || (INTVAL (RTX) & 0xffff) == 0 \
+ || (INTVAL (RTX) & 0xffff0000) == 0) \
+ return 2; \
+ return 4; \
+ /* These are more costly than a CONST_INT, but we can relax them, \
+ so they're less costly than a CONST_DOUBLE. */ \
+ case CONST: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return 6; \
+ /* We don't optimize CONST_DOUBLEs well nor do we relax them well, \
+ so their cost is very high. */ \
+ case CONST_DOUBLE: \
+ return 8;
+
+/* Make moves between different classes more expensive than moves
+ within the same class. */
+#define REGISTER_MOVE_COST(CLASS1, CLASS2) (CLASS1 != CLASS2 ? 4 : 2)
+
+/* Provide the costs of a rtl expression. This is in the body of a
+ switch on CODE.
+
+ ?!? This probably needs more work. The definitions below were first
+ taken from the H8 port, then tweaked slightly to improve code density
+ on various sample codes. */
+
+#define RTX_COSTS(RTX,CODE,OUTER_CODE) \
+ case MOD: \
+ case DIV: \
+ return 8; \
+ case MULT: \
+ return (GET_MODE (RTX) == SImode ? 20 : 8);
+
+/* Nonzero if access to memory by bytes or half words is no faster
+ than accessing full words. */
+#define SLOW_BYTE_ACCESS 1
+
+/* According expr.c, a value of around 6 should minimize code size, and
+ for the MN10200 series, code size our primary concern. */
+#define MOVE_RATIO 6
+
+#define TEXT_SECTION_ASM_OP "\t.section .text"
+#define DATA_SECTION_ASM_OP "\t.section .data"
+#define BSS_SECTION_ASM_OP "\t.section .bss"
+
+/* Output at beginning/end of assembler file. */
+#undef ASM_FILE_START
+#define ASM_FILE_START(FILE) asm_file_start(FILE)
+
+#define ASM_COMMENT_START "#"
+
+/* Output to assembler file text saying following lines
+ may contain character constants, extra white space, comments, etc. */
+
+#define ASM_APP_ON "#APP\n"
+
+/* Output to assembler file text saying following lines
+ no longer contain unusual constructs. */
+
+#define ASM_APP_OFF "#NO_APP\n"
+
+/* This is how to output an assembler line defining a `double' constant.
+ It is .dfloat or .gfloat, depending. */
+
+#define ASM_OUTPUT_DOUBLE(FILE, VALUE) \
+do { char dstr[30]; \
+ REAL_VALUE_TO_DECIMAL ((VALUE), "%.20e", dstr); \
+ fprintf (FILE, "\t.double %s\n", dstr); \
+ } while (0)
+
+
+/* This is how to output an assembler line defining a `float' constant. */
+#define ASM_OUTPUT_FLOAT(FILE, VALUE) \
+do { char dstr[30]; \
+ REAL_VALUE_TO_DECIMAL ((VALUE), "%.20e", dstr); \
+ fprintf (FILE, "\t.float %s\n", dstr); \
+ } while (0)
+
+/* This is how to output an assembler line defining an `int' constant. */
+
+#define ASM_OUTPUT_INT(FILE, VALUE) \
+( fprintf (FILE, "\t.long "), \
+ output_addr_const (FILE, (VALUE)), \
+ fprintf (FILE, "\n"))
+
+/* Likewise for `char' and `short' constants. */
+
+#define ASM_OUTPUT_SHORT(FILE, VALUE) \
+( fprintf (FILE, "\t.hword "), \
+ output_addr_const (FILE, (VALUE)), \
+ fprintf (FILE, "\n"))
+
+#define ASM_OUTPUT_CHAR(FILE, VALUE) \
+( fprintf (FILE, "\t.byte "), \
+ output_addr_const (FILE, (VALUE)), \
+ fprintf (FILE, "\n"))
+
+/* This is how to output an assembler line for a numeric constant byte. */
+#define ASM_OUTPUT_BYTE(FILE, VALUE) \
+ fprintf (FILE, "\t.byte 0x%x\n", (VALUE))
+
+/* Define the parentheses used to group arithmetic operations
+ in assembler code. */
+
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+/* This says how to output the assembler to define a global
+ uninitialized but not common symbol.
+ Try to use asm_output_bss to implement this macro. */
+
+#define ASM_OUTPUT_BSS(FILE, DECL, NAME, SIZE, ROUNDED) \
+ asm_output_bss ((FILE), (DECL), (NAME), (SIZE), (ROUNDED))
+
+/* This is how to output the definition of a user-level label named NAME,
+ such as the label on a static function or variable NAME. */
+
+#define ASM_OUTPUT_LABEL(FILE, NAME) \
+ do { assemble_name (FILE, NAME); fputs (":\n", FILE); } while (0)
+
+/* This is how to output a command to make the user-level label named NAME
+ defined for reference from other files. */
+
+#define ASM_GLOBALIZE_LABEL(FILE, NAME) \
+ do { fputs ("\t.global ", FILE); assemble_name (FILE, NAME); fputs ("\n", FILE);} while (0)
+
+/* This is how to output a reference to a user-level label named NAME.
+ `assemble_name' uses this. */
+
+#undef ASM_OUTPUT_LABELREF
+#define ASM_OUTPUT_LABELREF(FILE, NAME) \
+ do { \
+ char* real_name; \
+ STRIP_NAME_ENCODING (real_name, (NAME)); \
+ fprintf (FILE, "_%s", real_name); \
+ } while (0)
+
+/* Store in OUTPUT a string (made with alloca) containing
+ an assembler-name for a local static variable named NAME.
+ LABELNO is an integer which is different for each call. */
+
+#define ASM_FORMAT_PRIVATE_NAME(OUTPUT, NAME, LABELNO) \
+( (OUTPUT) = (char *) alloca (strlen ((NAME)) + 10), \
+ sprintf ((OUTPUT), "%s___%d", (NAME), (LABELNO)))
+
+/* This is how we tell the assembler that two symbols have the same value. */
+
+#define ASM_OUTPUT_DEF(FILE,NAME1,NAME2) \
+ do { assemble_name(FILE, NAME1); \
+ fputs(" = ", FILE); \
+ assemble_name(FILE, NAME2); \
+ fputc('\n', FILE); } while (0)
+
+
+/* How to refer to registers in assembler output.
+ This sequence is indexed by compiler's hard-register-number (see above). */
+
+#define REGISTER_NAMES \
+{ "d0", "d1", "d2", "d3", "a0", "a1", "a2", "a3"}
+
+/* Print an instruction operand X on file FILE.
+ look in mn10200.c for details */
+
+#define PRINT_OPERAND(FILE, X, CODE) print_operand(FILE,X,CODE)
+
+/* Print a memory operand whose address is X, on file FILE.
+ This uses a function in output-vax.c. */
+
+#define PRINT_OPERAND_ADDRESS(FILE, ADDR) print_operand_address (FILE, ADDR)
+
+#define ASM_OUTPUT_REG_PUSH(FILE,REGNO)
+#define ASM_OUTPUT_REG_POP(FILE,REGNO)
+
+/* This is how to output an element of a case-vector that is absolute. */
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+ asm_fprintf (FILE, "\t%s .L%d\n", ".long", VALUE)
+
+/* This is how to output an element of a case-vector that is relative. */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, VALUE, REL) \
+ fprintf (FILE, "\t%s .L%d-.L%d\n", ".long", VALUE, REL)
+
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ if ((LOG) != 0) \
+ fprintf (FILE, "\t.align %d\n", (LOG))
+
+/* We don't have to worry about dbx compatibility for the mn10200. */
+#define DEFAULT_GDB_EXTENSIONS 1
+
+/* Use stabs debugging info by default. */
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+#define DBX_REGISTER_NUMBER(REGNO) REGNO
+
+/* GDB always assumes the current function's frame begins at the value
+ of the stack pointer upon entry to the current function. Accessing
+ local variables and parameters passed on the stack is done using the
+ base of the frame + an offset provided by GCC.
+
+ For functions which have frame pointers this method works fine;
+ the (frame pointer) == (stack pointer at function entry) and GCC provides
+ an offset relative to the frame pointer.
+
+ This loses for functions without a frame pointer; GCC provides an offset
+ which is relative to the stack pointer after adjusting for the function's
+ frame size. GDB would prefer the offset to be relative to the value of
+ the stack pointer at the function's entry. Yuk! */
+#define DEBUGGER_AUTO_OFFSET(X) \
+ ((GET_CODE (X) == PLUS ? INTVAL (XEXP (X, 1)) : 0) \
+ + (frame_pointer_needed ? 0 : -total_frame_size ()))
+
+#define DEBUGGER_ARG_OFFSET(OFFSET, X) \
+ ((GET_CODE (X) == PLUS ? OFFSET : 0) \
+ + (frame_pointer_needed ? 0 : -total_frame_size ()))
+
+/* Define to use software floating point emulator for REAL_ARITHMETIC and
+ decimal <-> binary conversion. */
+#define REAL_ARITHMETIC
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE Pmode
+
+/* Define this if the case instruction drops through after the table
+ when the index is out of range. Don't define it if the case insn
+ jumps to the default label instead. */
+#define CASE_DROPS_THROUGH
+
+/* Dispatch tables on the mn10200 are extremely expensive in terms of code
+ and readonly data size. So we crank up the case threshold value to
+ encourage a series of if/else comparisons to implement many small switch
+ statements. In theory, this value could be increased much more if we
+ were solely optimizing for space, but we keep it "reasonable" to avoid
+ serious code efficiency lossage. */
+#define CASE_VALUES_THRESHOLD 8
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* We could define this either way. Using ZERO_EXTEND for QImode makes slightly
+ fast and more compact code. */
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+/* Specify the tree operation to be used to convert reals to integers. */
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+/* This flag, if defined, says the same insns that convert to a signed fixnum
+ also convert validly to an unsigned one. */
+#define FIXUNS_TRUNC_LIKE_FIX_TRUNC
+
+/* This is the kind of divide that is easiest to do in the general case. */
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 2
+
+/* Define if shifts truncate the shift count
+ which implies one can omit a sign-extension or zero-extension
+ of a shift count. */
+#define SHIFT_COUNT_TRUNCATED 1
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) (OUTPREC != 32)
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+#define Pmode PSImode
+
+/* A function address in a call instruction
+ is a byte address (for indexing purposes)
+ so give the MEM rtx a byte's mode. */
+#define FUNCTION_MODE QImode
+
+/* Perform target dependent optabs initialization. */
+#define MODHI3_LIBCALL "__modhi3"
+#define DIVHI3_LIBCALL "__divhi3"
+
+#define INIT_TARGET_OPTABS \
+ do { \
+ sdiv_optab->handlers[(int) HImode].libfunc \
+ = gen_rtx (SYMBOL_REF, Pmode, DIVHI3_LIBCALL); \
+ smod_optab->handlers[(int) HImode].libfunc \
+ = gen_rtx (SYMBOL_REF, Pmode, MODHI3_LIBCALL); \
+ } while (0)
+
+/* The assembler op to get a word. */
+
+#define FILE_ASM_OP "\t.file\n"
+
+extern void asm_file_start ();
+extern void print_operand ();
+extern void print_operand_address ();
+extern void expand_prologue ();
+extern void expand_epilogue ();
+extern void notice_update_cc ();
+extern int call_address_operand ();
+extern enum reg_class secondary_reload_class ();
+extern char *emit_a_shift ();
+extern int current_function_needs_context;
+extern char *output_tst ();
+extern int extendpsi_operand ();
+extern int rtx_equal_function_value_matters;
+extern struct rtx_def *zero_dreg;
+extern struct rtx_def *zero_areg;
diff --git a/gnu/usr.bin/gcc/config/mn10200/mn10200.md b/gnu/usr.bin/gcc/config/mn10200/mn10200.md
new file mode 100644
index 00000000000..9dc753c3023
--- /dev/null
+++ b/gnu/usr.bin/gcc/config/mn10200/mn10200.md
@@ -0,0 +1,1978 @@
+;; GCC machine description for Matsushita MN10200
+;; Copyright (C) 1997 Free Software Foundation, Inc.
+
+;; Contributed by Jeff Law (law@cygnus.com).
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;; The original PO technology requires these to be ordered by speed,
+;; so that assigner will pick the fastest.
+
+;; See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+;; Condition code settings.
+;; none - insn does not affect cc
+;; none_0hit - insn does not affect cc but it does modify operand 0
+;; This attribute is used to keep track of when operand 0 changes.
+;; See the description of NOTICE_UPDATE_CC for more info.
+;; set_znv - sets z,n,v to usable values; c is unknown.
+;; set_zn - sets z,n to usable values; v,c is unknown.
+;; compare - compare instruction
+;; clobber - value of cc is unknown
+(define_attr "cc" "none,none_0hit,set_znv,set_zn,compare,clobber"
+ (const_string "clobber"))
+
+;; ----------------------------------------------------------------------
+;; MOVE INSTRUCTIONS
+;; ----------------------------------------------------------------------
+;;
+;; Some general notes on move instructions.
+;;
+;; The hardware can't encode nop moves involving data registers, so
+;; we catch them and emit a nop instead.
+;;
+;; Loads/stores to/from address registers must be 16bit aligned,
+;; thus we avoid them for QImode.
+;;
+;; Stores from address registers always store 24bits, so avoid
+;; stores from address registers in HImode, SImode, and SFmode.
+;;
+;; As a result of the various problems using address registers in
+;; QImode, HImode, SImode, and SFmode, we discourage their use via
+;; '*' in their constraints. They're still allowed, but they're never
+;; the preferred class for for insns with those modes.
+
+;; movqi
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* One of the ops has to be in a register */
+ if (!register_operand (operand0, QImode)
+ && !register_operand (operand1, QImode))
+ operands[1] = copy_to_mode_reg (QImode, operand1);
+}")
+
+;; We avoid memory operations involving address registers because we
+;; can't be sure they'll be suitably aligned.
+;;
+;; We also discourage holding QImode values in address registers.
+(define_insn ""
+ [(set (match_operand:QI 0 "general_operand" "=d,d,*a,d,d,m,d,*a,*a")
+ (match_operand:QI 1 "general_operand" "0,I,I,di,m,d,*a,d,i*a"))]
+ "register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)"
+ "@
+ nop
+ sub %0,%0
+ sub %0,%0
+ mov %S1,%0
+ movbu %1,%0
+ movb %1,%0
+ mov %1,%0
+ mov %1,%0
+ mov %1,%0"
+ [(set_attr "cc" "none,clobber,clobber,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit")])
+
+;; movhi
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* One of the ops has to be in a register */
+ if (!register_operand (operand1, HImode)
+ && !register_operand (operand0, HImode))
+ operands[1] = copy_to_mode_reg (HImode, operand1);
+}")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "general_operand" "=d,d,*a,d,d,m,d,*a,*a,*a")
+ (match_operand:HI 1 "general_operand" "0,I,I,di,m,d,*a,d,i*a,m"))]
+ "register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode)"
+ "@
+ nop
+ sub %0,%0
+ sub %0,%0
+ mov %s1,%0
+ mov %1,%0
+ mov %1,%0
+ mov %1,%0
+ mov %1,%0
+ mov %1,%0
+ mov %A1,%0"
+ [(set_attr "cc" "none,clobber,clobber,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit")])
+
+;; movpsi and helpers
+
+(define_expand "movpsi"
+ [(set (match_operand:PSI 0 "general_operand" "")
+ (match_operand:PSI 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* One of the ops has to be in a register */
+ if (!register_operand (operand1, PSImode)
+ && !register_operand (operand0, PSImode))
+ operands[1] = copy_to_mode_reg (PSImode, operand1);
+}")
+
+
+;; Constant and indexed addresses are not valid addresses for PSImode,
+;; therefore they won't be matched by the general movpsi pattern below.
+;; ??? We had patterns to handle indexed addresses, but they kept making
+;; us run out of regs, so they were eliminated.
+
+(define_insn ""
+ [(set (match_operand:PSI 0 "register_operand" "=a")
+ (match_operand:PSI 1 "constant_memory_operand" ""))]
+ ""
+ "mov %A1,%0"
+ [(set_attr "cc" "none_0hit")])
+
+(define_insn ""
+ [(set (match_operand:PSI 0 "constant_memory_operand" "=X")
+ (match_operand:PSI 1 "register_operand" "a"))]
+ ""
+ "mov %1,%A0"
+ [(set_attr "cc" "none_0hit")])
+
+;; We want to prefer address registers here because 24bit moves to/from
+;; memory are shorter and faster when done via address registers.
+(define_insn ""
+ [(set (match_operand:PSI 0 "general_operand" "=d,a?d,?da,a,m,?d,m")
+ (match_operand:PSI 1 "general_operand" "0,I,?dai,m,a,m,?d"))]
+ "register_operand (operands[0], PSImode)
+ || register_operand (operands[1], PSImode)"
+ "@
+ nop
+ sub %0,%0
+ mov %1,%0
+ mov %A1,%0
+ mov %1,%A0
+ movx %A1,%0
+ movx %1,%A0"
+ [(set_attr "cc" "none,clobber,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit")])
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* One of the ops has to be in a register */
+ if (!register_operand (operand1, SImode)
+ && !register_operand (operand0, SImode))
+ operands[1] = copy_to_mode_reg (SImode, operand1);
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=d,d,*a,dm,d,d,*a,*a,*a")
+ (match_operand:SI 1 "general_operand" "0,I,I,d,dim,*a,d,*a,i"))]
+ "register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return \"nop\";
+ case 1:
+ case 2:
+ return \"sub %H0,%H0\;sub %L0,%L0\";
+ case 3:
+ case 5:
+ case 6:
+ case 7:
+ return \"mov %H1,%H0\;mov %L1,%L0\";
+
+ /* The next two cases try to optimize cases where one half
+ of the constant is all zeros, or when the two halves are
+ the same. */
+ case 4:
+ case 8:
+ if (REG_P (operands[0])
+ && GET_CODE (operands[1]) == CONST_INT
+ && (INTVAL (operands[1]) & 0xffff0000) == 0)
+ output_asm_insn (\"sub %H0,%H0\", operands);
+ else
+ output_asm_insn (\"mov %h1,%H0\", operands);
+
+ if (GET_CODE (operands[1]) == CONST_INT
+ && ((INTVAL (operands[1]) & 0xffff)
+ == ((INTVAL (operands[1]) >> 16) & 0xffff)))
+ output_asm_insn (\"mov %H0,%L0\", operands);
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && (INTVAL (operands[1]) & 0xffff) == 0)
+ output_asm_insn (\"sub %L0,%L0\", operands);
+ else
+ output_asm_insn (\"mov %o1,%L0\", operands);
+ return \"\";
+ }
+}"
+ [(set_attr "cc" "none,clobber,clobber,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* One of the ops has to be in a register */
+ if (!register_operand (operand1, SFmode)
+ && !register_operand (operand0, SFmode))
+ operands[1] = copy_to_mode_reg (SFmode, operand1);
+}")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "general_operand" "=d,d,*a,dm,d,d,*a,*a,*a")
+ (match_operand:SF 1 "general_operand" "0,G,G,d,dim,*a,d,*a,i"))]
+ "register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return \"nop\";
+
+ case 1:
+ case 2:
+ return \"sub %H0,%H0\;sub %L0,%L0\";
+
+ default:
+ {
+ long val;
+ REAL_VALUE_TYPE rv;
+
+ if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ {
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, operands[1]);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, val);
+ }
+
+ if (GET_CODE (operands[1]) == CONST_INT)
+ val = INTVAL (operands[1]);
+
+ if ((GET_CODE (operands[1]) == CONST_INT
+ || GET_CODE (operands[1]) == CONST_DOUBLE)
+ && (val & 0xffff0000) == 0)
+ output_asm_insn (\"sub %H0,%H0\", operands);
+ else
+ output_asm_insn (\"mov %h1,%H0\", operands);
+
+ if (GET_CODE (operands[1]) == CONST_INT
+ && ((INTVAL (operands[1]) & 0xffff)
+ == ((INTVAL (operands[1]) >> 16) & 0xffff)))
+ output_asm_insn (\"mov %H0,%L0\", operands);
+ else if ((GET_CODE (operands[1]) == CONST_INT
+ || GET_CODE (operands[1]) == CONST_DOUBLE)
+ && (val & 0x0000ffff) == 0)
+ output_asm_insn (\"sub %L0,%L0\", operands);
+ else
+ output_asm_insn (\"mov %o1,%L0\", operands);
+ return \"\";
+ }
+ }
+}"
+ [(set_attr "cc" "none,clobber,clobber,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit")])
+
+
+;; ----------------------------------------------------------------------
+;; TEST INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+;; Go ahead and define tsthi and tstpsi so we can eliminate redundant tst insns
+;; when we start trying to optimize this port.
+(define_insn "tsthi"
+ [(set (cc0) (match_operand:HI 0 "general_operand" "da"))]
+ ""
+ "* return output_tst (operands[0], insn);"
+ [(set_attr "cc" "set_znv")])
+
+(define_insn "tstpsi"
+ [(set (cc0) (match_operand:PSI 0 "general_operand" "da"))]
+ ""
+ "* return output_tst (operands[0], insn);"
+ [(set_attr "cc" "set_znv")])
+
+(define_insn ""
+ [(set (cc0) (zero_extend:HI (match_operand:QI 0 "memory_operand" "d")))]
+ ""
+ "* return output_tst (operands[0], insn);"
+ [(set_attr "cc" "set_znv")])
+
+(define_insn ""
+ [(set (cc0) (zero_extend:PSI (match_operand:QI 0 "memory_operand" "d")))]
+ ""
+ "* return output_tst (operands[0], insn);"
+ [(set_attr "cc" "set_znv")])
+
+(define_insn "cmphi"
+ [(set (cc0)
+ (compare:HI (match_operand:HI 0 "general_operand" "da")
+ (match_operand:HI 1 "general_operand" "dai")))]
+ ""
+ "cmp %1,%0"
+ [(set_attr "cc" "compare")])
+
+(define_insn "cmppsi"
+ [(set (cc0)
+ (compare:PSI (match_operand:PSI 0 "general_operand" "da")
+ (match_operand:PSI 1 "general_operand" "dai")))]
+ ""
+ "cmp %1,%0"
+ [(set_attr "cc" "compare")])
+
+;; ----------------------------------------------------------------------
+;; ADD INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_insn "addhi3"
+ [(set (match_operand:HI 0 "general_operand" "=d")
+ (plus:HI (match_operand:HI 1 "general_operand" "%0")
+ (match_operand:HI 2 "general_operand" "dai")))]
+ ""
+ "add %2,%0"
+ [(set_attr "cc" "set_zn")])
+
+(define_insn "addpsi3"
+ [(set (match_operand:PSI 0 "general_operand" "=da")
+ (plus:PSI (match_operand:PSI 1 "general_operand" "%0")
+ (match_operand:PSI 2 "general_operand" "dai")))]
+ ""
+ "add %2,%0"
+ [(set_attr "cc" "set_zn")])
+
+;; We want to avoid using explicit registers; reload won't tell us
+;; if it has to spill them and may generate incorrect code in such
+;; cases.
+;;
+;; So we call out to a library routine to perform 32bit add or
+;; subtract operations.
+(define_expand "addsi3"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (plus:SI (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_operand" "")))]
+ ""
+ "
+{
+ /* If adding a CONST_INT, we are better off generating code ourselves.
+
+ During RTL generation we call out to library routines.
+
+ After RTL generation we can not call the library routines as
+ they need to push arguments via virtual_outgoing_args_rtx which
+ has already been instantiated. So, after RTL generation we just
+ FAIL and open code the operation. */
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ if (!rtx_equal_p (operands[0], operands[1]))
+ emit_move_insn (operands[0], operands[1]);
+ emit_insn (gen_addsi3_const (operands[0], operands[0], operands[2]));
+ DONE;
+ }
+ else if (rtx_equal_function_value_matters)
+ {
+ rtx ret, insns;
+ extern rtx emit_library_call_value ();
+
+ start_sequence ();
+ ret = emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"__addsi3\"),
+ NULL_RTX, 1, SImode, 2, operands[1],
+ SImode, operands[2], SImode);
+ insns = get_insns ();
+ end_sequence ();
+ emit_libcall_block (insns, operands[0], ret,
+ gen_rtx (ASHIFT, SImode, operands[1], operands[2]));
+ DONE;
+ }
+ else
+ FAIL;
+}")
+
+(define_insn "addsi3_const"
+ [(set (match_operand:SI 0 "general_operand" "=d")
+ (plus:SI (match_operand:SI 1 "general_operand" "0")
+ (match_operand:SI 2 "const_int_operand" "i")))
+ (clobber (match_scratch:SI 3 "=&d"))]
+ ""
+ "*
+{
+ unsigned long value = INTVAL (operands[2]);
+
+ /* If only the high bits are set in the constant, then we only
+ need a single add operation. It might be better to catch this
+ at RTL expansion time. */
+ if ((value & 0xffff) == 0)
+ return \"add %h2,%H0\";
+
+ value >>= 16;
+ value &= 0xffff;
+
+ if (value == 0)
+ return \"sub %3,%3\;add %o2,%L0\;addc %3,%H0\";
+ else
+ return \"mov %h2,%3\;add %o2,%L0\;addc %3,%H0\";
+}"
+ [(set_attr "cc" "clobber")])
+
+;; ----------------------------------------------------------------------
+;; SUBTRACT INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_insn "subhi3"
+ [(set (match_operand:HI 0 "general_operand" "=d")
+ (minus:HI (match_operand:HI 1 "general_operand" "0")
+ (match_operand:HI 2 "general_operand" "dai")))]
+ ""
+ "sub %2,%0"
+ [(set_attr "cc" "set_zn")])
+
+(define_insn "subpsi3"
+ [(set (match_operand:PSI 0 "general_operand" "=da")
+ (minus:PSI (match_operand:PSI 1 "general_operand" "0")
+ (match_operand:PSI 2 "general_operand" "dai")))]
+ ""
+ "sub %2,%0"
+ [(set_attr "cc" "set_zn")])
+
+(define_expand "subsi3"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (minus:SI (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_operand" "")))]
+ ""
+ "
+{
+ /* During RTL generation we call out to library routines.
+
+ After RTL generation we can not call the library routines as
+ they need to push arguments via virtual_outgoing_args_rtx which
+ has already been instantiated. So, after RTL generation we just
+ FAIL and open code the operation. */
+ if (rtx_equal_function_value_matters)
+ {
+ rtx ret, insns;
+ extern rtx emit_library_call_value ();
+
+ start_sequence ();
+ ret = emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"__subsi3\"),
+ NULL_RTX, 1, SImode, 2, operands[1],
+ SImode, operands[2], SImode);
+ insns = get_insns ();
+ end_sequence ();
+ emit_libcall_block (insns, operands[0], ret,
+ gen_rtx (ASHIFT, SImode, operands[1], operands[2]));
+ DONE;
+ }
+ else
+ FAIL;
+}")
+
+;; There isn't a negate instruction, so we fake it.
+;;
+;; We used to expand this into patterns, but a single pattern
+;; actually generates better overall code.
+;;
+;; We could do HImode negations with a "not;add" sequence, but
+;; generally it's generated slightly worse code.
+;;
+;; The second alternative is not strictly necesasry, but helps
+;; when the register allocators start running short of registers.
+(define_insn "neghi2"
+ [(set (match_operand:HI 0 "general_operand" "=&d,d")
+ (neg:HI (match_operand:HI 1 "general_operand" "d,0")))]
+ ""
+ "@
+ sub %0,%0\;sub %1,%0
+ not %0\;add 1,%0"
+ [(set_attr "cc" "set_zn")])
+
+;; The not/and sequence won't work here. It's not clear if we'll
+;; ever need to provide an alternate sequence since this should
+;; be used much less frequently than neghi2.
+(define_insn "negpsi2"
+ [(set (match_operand:PSI 0 "general_operand" "=&d")
+ (neg:PSI (match_operand:PSI 1 "general_operand" "d")))]
+ ""
+ "sub %0,%0\;sub %1,%0"
+ [(set_attr "cc" "set_zn")])
+
+;; Using a magic libcall that accepts its arguments in any
+;; data register pair has proven to be the most efficient
+;; and most compact way to represent negsi2.
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "general_operand" "=d")
+ (neg:SI (match_operand:SI 1 "general_operand" "0")))]
+ ""
+ "jsr ___negsi2_%0"
+ [(set_attr "cc" "clobber")])
+
+;; ----------------------------------------------------------------------
+;; MULTIPLY INSTRUCTIONS
+;; ----------------------------------------------------------------------
+;;
+;; The mn10200 has HIxHI->SI widening multiply, but we get _severe_
+;; code density regressions if we enable such a pattern.
+
+(define_insn "mulhi3"
+ [(set (match_operand:HI 0 "general_operand" "=d")
+ (mult:HI (match_operand:HI 1 "general_operand" "%0")
+ (match_operand:HI 2 "general_operand" "d")))]
+ ""
+ "mul %2,%0"
+ [(set_attr "cc" "set_zn")])
+
+(define_insn "udivmodhi4"
+ [(set (match_operand:HI 0 "general_operand" "=d")
+ (udiv:HI (match_operand:HI 1 "general_operand" "0")
+ (match_operand:HI 2 "general_operand" "d")))
+ (set (match_operand:HI 3 "general_operand" "=&d")
+ (umod:HI (match_dup 1) (match_dup 2)))]
+ ""
+ "*
+{
+ if (zero_dreg)
+ output_asm_insn (\"mov %0,mdr\", &zero_dreg);
+ else
+ output_asm_insn (\"sub %3,%3\;mov %3,mdr\", operands);
+
+ if (find_reg_note (insn, REG_UNUSED, operands[3]))
+ return \"divu %2,%0\";
+ else
+ return \"divu %2,%0\;mov mdr,%3\";
+}"
+ [(set_attr "cc" "set_zn")])
+
+
+;; ----------------------------------------------------------------------
+;; AND INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_insn "andhi3"
+ [(set (match_operand:HI 0 "general_operand" "=d,d")
+ (and:HI (match_operand:HI 1 "general_operand" "%0,0")
+ (match_operand:HI 2 "general_operand" "M,di")))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) == 0xff)
+ return \"extxbu %0\";
+ if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) == 0x7fff)
+ return \"add %0,%0\;lsr %0\";
+ if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) == 0xfffe)
+ return \"lsr %0\;add %0,%0\";
+ return \"and %2,%0\";
+}"
+ [(set_attr "cc" "none_0hit,set_znv")])
+
+;; This expander + pattern exist only to allow trampolines to be aligned
+;; in the stack.
+(define_expand "andpsi3"
+ [(set (match_operand:PSI 0 "general_operand" "")
+ (and:PSI (match_operand:PSI 1 "general_operand" "")
+ (match_operand:PSI 2 "const_int_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) != CONST_INT
+ || (INTVAL (operands[2]) & 0xff0000) != 0xff0000)
+ FAIL;
+}")
+
+(define_insn ""
+ [(set (match_operand:PSI 0 "general_operand" "=d")
+ (and:PSI (match_operand:PSI 1 "general_operand" "%0")
+ (match_operand:PSI 2 "const_int_operand" "i")))]
+ "GET_CODE (operands[2]) == CONST_INT
+ && (INTVAL (operands[2]) & 0xff0000) == 0xff0000"
+ "and %2,%0"
+ [(set_attr "cc" "clobber")])
+
+;; ----------------------------------------------------------------------
+;; OR INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_insn "iorhi3"
+ [(set (match_operand:HI 0 "general_operand" "=d")
+ (ior:HI (match_operand:HI 1 "general_operand" "%0")
+ (match_operand:HI 2 "general_operand" "di")))]
+ ""
+ "or %2,%0"
+ [(set_attr "cc" "set_znv")])
+
+;; ----------------------------------------------------------------------
+;; XOR INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_insn "xorhi3"
+ [(set (match_operand:HI 0 "general_operand" "=d")
+ (xor:HI (match_operand:HI 1 "general_operand" "%0")
+ (match_operand:HI 2 "general_operand" "di")))]
+ ""
+ "xor %2,%0"
+ [(set_attr "cc" "set_znv")])
+
+;; ----------------------------------------------------------------------
+;; NOT INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_insn "one_cmplhi2"
+ [(set (match_operand:HI 0 "general_operand" "=d")
+ (not:HI (match_operand:HI 1 "general_operand" "0")))]
+ ""
+ "not %0"
+ [(set_attr "cc" "set_znv")])
+
+
+;; -----------------------------------------------------------------
+;; BIT INSTRUCTIONS
+;; -----------------------------------------------------------------
+
+;; These clears a constant set of bits in memory or in a register.
+;; We must support register destinations to make reload happy.
+(define_insn ""
+ [(set (match_operand:QI 0 "general_operand" "R,d")
+ (subreg:QI
+ (and:HI (subreg:HI (match_dup 0) 0)
+ (match_operand 1 "const_int_operand" "")) 0))
+ (clobber (match_scratch:HI 2 "=&d,X"))]
+ ""
+ "@
+ mov %N1,%2\;bclr %2,%0
+ and %1,%0"
+ [(set_attr "cc" "clobber")])
+
+;; This clears a variable set of bits in memory or in a register.
+(define_insn ""
+ [(set (match_operand:QI 0 "general_operand" "R,d")
+ (subreg:QI
+ (and:HI (subreg:HI (match_dup 0) 0)
+ (not:HI (match_operand:HI 1 "general_operand" "d,d"))) 0))
+ (clobber (match_scratch:HI 2 "=X,&d"))]
+ ""
+ "@
+ bclr %1,%0
+ mov %1,%2\;not %2\;and %2,%0"
+ [(set_attr "cc" "clobber")])
+
+(define_insn ""
+ [(set (match_operand:QI 0 "general_operand" "R,d")
+ (subreg:QI
+ (and:HI (not:HI (match_operand:HI 1 "general_operand" "d,d"))
+ (subreg:HI (match_dup 0) 0)) 0))
+ (clobber (match_scratch:HI 2 "=X,&d"))]
+ ""
+ "@
+ bclr %1,%0
+ mov %1,%2\;not %2\;and %2,%0"
+ [(set_attr "cc" "clobber")])
+
+;; These set bits in memory.
+(define_insn ""
+ [(set (match_operand:QI 0 "general_operand" "R,d")
+ (subreg:QI
+ (ior:HI (subreg:HI (match_dup 0) 0)
+ (match_operand:HI 1 "general_operand" "d,d")) 0))]
+ ""
+ "@
+ bset %1,%0
+ or %1,%0"
+ [(set_attr "cc" "clobber")])
+
+(define_insn ""
+ [(set (match_operand:QI 0 "general_operand" "R,d")
+ (subreg:QI
+ (ior:HI (match_operand:HI 1 "general_operand" "d,d")
+ (subreg:HI (match_dup 0) 0)) 0))]
+ ""
+ "@
+ bset %1,%0
+ or %1,%0"
+ [(set_attr "cc" "clobber")])
+
+;; Not any shorter/faster than using cmp, but it might save a
+;; register if the result of the AND isn't ever used.
+
+(define_insn ""
+ [(set (cc0)
+ (zero_extract:HI (match_operand:HI 0 "general_operand" "d")
+ (match_operand 1 "const_int_operand" "")
+ (match_operand 2 "const_int_operand" "")))]
+ ""
+ "*
+{
+ int len = INTVAL (operands[1]);
+ int bit = INTVAL (operands[2]);
+ int mask = 0;
+ rtx xoperands[2];
+
+ while (len > 0)
+ {
+ mask |= (1 << bit);
+ bit++;
+ len--;
+ }
+
+ xoperands[0] = operands[0];
+ xoperands[1] = GEN_INT (mask);
+ output_asm_insn (\"btst %1,%0\", xoperands);
+ return \"\";
+}"
+ [(set_attr "cc" "set_znv")])
+
+(define_insn ""
+ [(set (cc0) (and:HI (match_operand:HI 0 "general_operand" "d")
+ (match_operand:HI 1 "const_int_operand" "i")))]
+ ""
+ "btst %1,%0"
+ [(set_attr "cc" "set_znv")])
+
+
+;; ----------------------------------------------------------------------
+;; JUMP INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+;; Conditional jump instructions
+
+(define_expand "ble"
+ [(set (pc)
+ (if_then_else (le (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bleu"
+ [(set (pc)
+ (if_then_else (leu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bge"
+ [(set (pc)
+ (if_then_else (ge (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgeu"
+ [(set (pc)
+ (if_then_else (geu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "blt"
+ [(set (pc)
+ (if_then_else (lt (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bltu"
+ [(set (pc)
+ (if_then_else (ltu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgt"
+ [(set (pc)
+ (if_then_else (gt (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgtu"
+ [(set (pc)
+ (if_then_else (gtu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "beq"
+ [(set (pc)
+ (if_then_else (eq (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bne"
+ [(set (pc)
+ (if_then_else (ne (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0
+ && (GET_CODE (operands[1]) == GT
+ || GET_CODE (operands[1]) == GE
+ || GET_CODE (operands[1]) == LE
+ || GET_CODE (operands[1]) == LT))
+ return 0;
+
+ if (GET_MODE (SET_SRC (PATTERN (PREV_INSN (insn)))) == PSImode)
+ return \"b%b1x %0\";
+ else
+ return \"b%b1 %0\";
+}"
+ [(set_attr "cc" "none")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+{
+ if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0
+ && (GET_CODE (operands[1]) == GT
+ || GET_CODE (operands[1]) == GE
+ || GET_CODE (operands[1]) == LE
+ || GET_CODE (operands[1]) == LT))
+ return 0;
+
+ if (GET_MODE (SET_SRC (PATTERN (PREV_INSN (insn)))) == PSImode)
+ return \"b%B1x %0\";
+ else
+ return \"b%B1 %0\";
+}"
+ [(set_attr "cc" "none")])
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "jmp %l0"
+ [(set_attr "cc" "none")])
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:PSI 0 "general_operand" "a"))]
+ ""
+ "jmp (%0)"
+ [(set_attr "cc" "none")])
+
+(define_insn "tablejump"
+ [(set (pc) (match_operand:PSI 0 "general_operand" "a"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "jmp (%0)"
+ [(set_attr "cc" "none")])
+
+;; Call subroutine with no return value.
+
+(define_expand "call"
+ [(call (match_operand:QI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! call_address_operand (XEXP (operands[0], 0)))
+ XEXP (operands[0], 0) = force_reg (PSImode, XEXP (operands[0], 0));
+ emit_call_insn (gen_call_internal (XEXP (operands[0], 0), operands[1]));
+ DONE;
+}")
+
+(define_insn "call_internal"
+ [(call (mem:QI (match_operand:PSI 0 "call_address_operand" "aS"))
+ (match_operand:HI 1 "general_operand" "g"))]
+ ""
+ "jsr %C0"
+ [(set_attr "cc" "clobber")])
+
+;; Call subroutine, returning value in operand 0
+;; (which must be a hard register).
+
+(define_expand "call_value"
+ [(set (match_operand 0 "" "")
+ (call (match_operand:QI 1 "general_operand" "")
+ (match_operand:HI 2 "general_operand" "")))]
+ ""
+ "
+{
+ if (! call_address_operand (XEXP (operands[1], 0)))
+ XEXP (operands[1], 0) = force_reg (PSImode, XEXP (operands[1], 0));
+ emit_call_insn (gen_call_value_internal (operands[0],
+ XEXP (operands[1], 0),
+ operands[2]));
+ DONE;
+}")
+
+(define_insn "call_value_internal"
+ [(set (match_operand 0 "" "=da")
+ (call (mem:QI (match_operand:PSI 1 "call_address_operand" "aS"))
+ (match_operand:HI 2 "general_operand" "g")))]
+ ""
+ "jsr %C1"
+ [(set_attr "cc" "clobber")])
+
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "" "")
+ (const_int 0))
+ (match_operand 1 "" "")
+ (match_operand 2 "" "")])]
+ ""
+ "
+{
+ int i;
+
+ emit_call_insn (gen_call (operands[0], const0_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+ DONE;
+}")
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop"
+ [(set_attr "cc" "none")])
+
+;; ----------------------------------------------------------------------
+;; EXTEND INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+(define_insn "zero_extendqihi2"
+ [(set (match_operand:HI 0 "general_operand" "=d,d,d")
+ (zero_extend:HI
+ (match_operand:QI 1 "general_operand" "0,di,m")))]
+ ""
+ "@
+ extxbu %0
+ mov %1,%0\;extxbu %0
+ movbu %1,%0"
+ [(set_attr "cc" "none_0hit")])
+
+(define_insn "zero_extendqipsi2"
+ [(set (match_operand:PSI 0 "general_operand" "=d,d,d")
+ (zero_extend:PSI
+ (match_operand:QI 1 "general_operand" "0,di,m")))]
+ ""
+ "@
+ extxbu %0
+ mov %1,%0\;extxbu %0
+ movbu %1,%0"
+ [(set_attr "cc" "none_0hit")])
+
+(define_insn "zero_extendqisi2"
+ [(set (match_operand:SI 0 "general_operand" "=d,d,d")
+ (zero_extend:SI
+ (match_operand:QI 1 "general_operand" "0,di,m")))]
+ ""
+ "@
+ extxbu %L0\;sub %H0,%H0
+ mov %1,%L0\;extxbu %L0\;sub %H0,%H0
+ movbu %1,%L0\;sub %H0,%H0"
+ [(set_attr "cc" "none_0hit")])
+
+(define_insn "zero_extendhipsi2"
+ [(set (match_operand:PSI 0 "general_operand" "=d,d,d")
+ (zero_extend:PSI
+ (match_operand:HI 1 "general_operand" "0,di,m")))]
+ ""
+ "@
+ extxu %0
+ mov %1,%0\;extxu %0
+ mov %1,%0\;extxu %0"
+ [(set_attr "cc" "none_0hit")])
+
+(define_insn "zero_extendhisi2"
+ [(set (match_operand:SI 0 "general_operand" "=d,d")
+ (zero_extend:SI
+ (match_operand:HI 1 "general_operand" "0,dim")))]
+ ""
+ "@
+ sub %H0,%H0
+ mov %1,%L0\;sub %H0,%H0"
+ [(set_attr "cc" "none_0hit")])
+
+;; The last alternative is necessary because the second operand might
+;; have been the frame pointer. The frame pointer would get replaced
+;; by (plus (stack_pointer) (const_int)).
+;;
+;; Reload would think that it only needed a PSImode register in
+;; push_reload and at the start of allocate_reload_regs. However,
+;; at the end of allocate_reload_reg it would realize that the
+;; reload register must also be valid for SImode, and if it was
+;; not valid reload would abort.
+(define_insn "zero_extendpsisi2"
+ [(set (match_operand:SI 0 "register_operand" "=d,?d,?*d,?*d")
+ (zero_extend:SI (match_operand:PSI 1 "extendpsi_operand"
+ "m,?0,?*dai,Q")))]
+ ""
+ "@
+ mov %L1,%L0\;movbu %H1,%H0
+ jsr ___zero_extendpsisi2_%0
+ mov %1,%L0\;jsr ___zero_extendpsisi2_%0
+ mov a3,%L0\;add %Z1,%L0\;jsr ___zero_extendpsisi2_%0"
+ [(set_attr "cc" "clobber")])
+
+;;- sign extension instructions
+
+(define_insn "extendqihi2"
+ [(set (match_operand:HI 0 "general_operand" "=d,d,d")
+ (sign_extend:HI
+ (match_operand:QI 1 "general_operand" "0,di,m")))]
+ ""
+ "*
+{
+ if (which_alternative == 0)
+ return \"extxb %0\";
+ else if (which_alternative == 1)
+ return \"mov %1,%0\;extxb %0\";
+ else if (GET_CODE (XEXP (operands[1], 0)) == REG)
+ return \"movbu %1,%0\;extxb %0\";
+ else
+ return \"movb %1,%0\";
+}"
+ [(set_attr "cc" "none_0hit")])
+
+(define_insn "extendqipsi2"
+ [(set (match_operand:PSI 0 "general_operand" "=d,d,d")
+ (sign_extend:PSI
+ (match_operand:QI 1 "general_operand" "0,di,m")))]
+ ""
+ "*
+{
+ if (which_alternative == 0)
+ return \"extxb %0\";
+ else if (which_alternative == 1)
+ return \"mov %1,%0\;extxb %0\";
+ else if (GET_CODE (XEXP (operands[1], 0)) == REG)
+ return \"movbu %1,%0\;extxb %0\";
+ else
+ return \"movb %1,%0\";
+}"
+ [(set_attr "cc" "none_0hit")])
+
+(define_insn "extendqisi2"
+ [(set (match_operand:SI 0 "general_operand" "=d,d,d")
+ (sign_extend:SI
+ (match_operand:QI 1 "general_operand" "0,di,m")))]
+ ""
+ "*
+{
+ if (which_alternative == 0)
+ return \"extxb %L0\;mov %L0,%H0\;add %H0,%H0\;subc %H0,%H0\";
+ else if (which_alternative == 1)
+ return \"mov %1,%L0\;extxb %L0\;mov %L0,%H0\;add %H0,%H0\;subc %H0,%H0\";
+ else if (GET_CODE (XEXP (operands[1], 0)) == REG)
+ return \"movbu %1,%L0\;extxb %L0\;mov %L0,%H0\;add %H0,%H0\;subc %H0,%H0\";
+ else
+ return \"movb %1,%L0\;mov %L0,%H0\;add %H0,%H0\;subc %H0,%H0\";
+}"
+ [(set_attr "cc" "none_0hit")])
+
+(define_insn "extendhipsi2"
+ [(set (match_operand:PSI 0 "general_operand" "=d,d,d")
+ (sign_extend:PSI
+ (match_operand:HI 1 "general_operand" "0,di,m")))]
+ ""
+ "@
+ extx %0
+ mov %1,%0\;extx %0
+ mov %1,%0"
+ [(set_attr "cc" "none_0hit")])
+
+(define_insn "extendhisi2"
+ [(set (match_operand:SI 0 "general_operand" "=d,d,d")
+ (sign_extend:SI
+ (match_operand:HI 1 "general_operand" "0,di,m")))]
+ ""
+ "@
+ mov %L0,%H0\;add %H0,%H0\;subc %H0,%H0
+ mov %1,%L0\;mov %L0,%H0\;add %H0,%H0\;subc %H0,%H0
+ mov %1,%L0\;mov %L0,%H0\;add %H0,%H0\;subc %H0,%H0"
+ [(set_attr "cc" "none_0hit")])
+
+;; The last alternative is necessary because the second operand might
+;; have been the frame pointer. The frame pointer would get replaced
+;; by (plus (stack_pointer) (const_int)).
+;;
+;; Reload would think that it only needed a PSImode register in
+;; push_reload and at the start of allocate_reload_regs. However,
+;; at the end of allocate_reload_reg it would realize that the
+;; reload register must also be valid for SImode, and if it was
+;; not valid reload would abort.
+(define_insn "extendpsisi2"
+ [(set (match_operand:SI 0 "general_operand" "=d,?d,?*d,?*d")
+ (sign_extend:SI (match_operand:PSI 1 "extendpsi_operand"
+ "m,?0,?*dai,Q")))]
+ ""
+ "@
+ mov %L1,%L0\;movb %H1,%H0
+ jsr ___sign_extendpsisi2_%0
+ mov %1,%L0\;jsr ___sign_extendpsisi2_%0
+ mov a3,%L0\;add %Z1,%L0\;jsr ___sign_extendpsisi2_%0"
+ [(set_attr "cc" "clobber")])
+
+(define_insn "truncsipsi2"
+ [(set (match_operand:PSI 0 "general_operand" "=a,?d,?*d,da")
+ (truncate:PSI (match_operand:SI 1 "general_operand" "m,?m,?*d,i")))]
+ ""
+ "@
+ mov %1,%0
+ movx %A1,%0
+ jsr ___truncsipsi2_%1_%0
+ mov %1,%0"
+ [(set_attr "cc" "clobber")])
+
+
+;; Combine should be simplifying this stuff, but isn't.
+;;
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=d,d,d")
+ (sign_extend:SI
+ (zero_extend:HI (match_operand:QI 1 "general_operand" "0,di,m"))))]
+ ""
+ "@
+ extxbu %L0\;sub %H0,%H0
+ mov %1,%L0\;extxbu %L0\;sub %H0,%H0
+ movbu %1,%L0\;sub %H0,%H0"
+ [(set_attr "cc" "none_0hit")])
+
+(define_insn ""
+ [(set (match_operand:PSI 0 "general_operand" "=d,d,d")
+ (truncate:PSI
+ (sign_extend:SI (match_operand:QI 1 "general_operand" "0,di,m"))))]
+ ""
+ "*
+{
+ if (which_alternative == 0)
+ return \"extxb %0\";
+ else if (which_alternative == 1)
+ return \"mov %1,%0\;extxb %0\";
+ else if (GET_CODE (XEXP (operands[1], 0)) == REG)
+ return \"movbu %1,%0\;extxb %0\";
+ else
+ return \"movb %1,%0\";
+}"
+ [(set_attr "cc" "none_0hit")])
+
+(define_insn ""
+ [(set (match_operand:PSI 0 "general_operand" "=d,d,d")
+ (truncate:PSI
+ (sign_extend:SI (match_operand:HI 1 "general_operand" "0,di,m"))))]
+ ""
+ "@
+ extx %0
+ mov %1,%0\;extx %0
+ mov %1,%0"
+ [(set_attr "cc" "none_0hit")])
+
+(define_insn ""
+ [(set (match_operand:PSI 0 "general_operand" "=d,d,d")
+ (truncate:PSI
+ (sign_extend:SI
+ (zero_extend:HI (match_operand:QI 1 "general_operand" "0,di,m")))))]
+ ""
+ "@
+ extxbu %0
+ mov %1,%0\;extxbu %0
+ movbu %1,%0"
+ [(set_attr "cc" "none_0hit")])
+
+(define_insn ""
+ [(set (match_operand:PSI 0 "general_operand" "=d,d,d")
+ (truncate:PSI
+ (zero_extend:SI (match_operand:HI 1 "general_operand" "0,di,m"))))]
+ ""
+ "@
+ extxu %0
+ mov %1,%0\;extxu %0
+ mov %1,%0\;extxu %0"
+ [(set_attr "cc" "none_0hit")])
+
+(define_insn ""
+ [(set (match_operand:PSI 0 "general_operand" "=d,d,d")
+ (truncate:PSI
+ (zero_extend:SI (match_operand:QI 1 "general_operand" "0,di,m"))))]
+ ""
+ "@
+ extxbu %0
+ mov %1,%0\;extxbu %0
+ movbu %1,%0"
+ [(set_attr "cc" "none_0hit")])
+
+;; ----------------------------------------------------------------------
+;; SHIFTS
+;; ----------------------------------------------------------------------
+
+;; If the shift count is small, we expand it into several single bit
+;; shift insns. Otherwise we expand into a generic shift insn which
+;; handles larger shift counts, shift by variable amounts, etc.
+(define_expand "ashlhi3"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (ashift:HI (match_operand:HI 1 "general_operand" "")
+ (match_operand:HI 2 "general_operand" "")))]
+ ""
+ "
+{
+ /* This is an experiment to see if exposing more of the underlying
+ operations results in better code. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) <= 4)
+ {
+ int count = INTVAL (operands[2]);
+ emit_move_insn (operands[0], operands[1]);
+ while (count > 0)
+ {
+ emit_insn (gen_rtx (SET, HImode, operands[0],
+ gen_rtx (ASHIFT, HImode,
+ operands[0], GEN_INT (1))));
+ count--;
+ }
+ DONE;
+ }
+ else
+ {
+ expand_a_shift (HImode, ASHIFT, operands);
+ DONE;
+ }
+}")
+
+;; ASHIFT one bit.
+(define_insn ""
+ [(set (match_operand:HI 0 "general_operand" "=d")
+ (ashift:HI (match_operand:HI 1 "general_operand" "0")
+ (const_int 1)))]
+ ""
+ "add %0,%0"
+ [(set_attr "cc" "set_zn")])
+
+(define_expand "lshrhi3"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (lshiftrt:HI (match_operand:HI 1 "general_operand" "")
+ (match_operand:HI 2 "general_operand" "")))]
+ ""
+ "
+{
+ /* This is an experiment to see if exposing more of the underlying
+ operations results in better code. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) <= 4)
+ {
+ int count = INTVAL (operands[2]);
+ emit_move_insn (operands[0], operands[1]);
+ while (count > 0)
+ {
+ emit_insn (gen_rtx (SET, HImode, operands[0],
+ gen_rtx (LSHIFTRT, HImode,
+ operands[0], GEN_INT (1))));
+ count--;
+ }
+ DONE;
+ }
+ else
+ {
+ expand_a_shift (HImode, LSHIFTRT, operands);
+ DONE;
+ }
+}")
+
+;; LSHIFTRT one bit.
+(define_insn ""
+ [(set (match_operand:HI 0 "general_operand" "=d")
+ (lshiftrt:HI (match_operand:HI 1 "general_operand" "0")
+ (const_int 1)))]
+ ""
+ "lsr %0"
+ [(set_attr "cc" "set_znv")])
+
+(define_expand "ashrhi3"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (ashiftrt:HI (match_operand:HI 1 "general_operand" "")
+ (match_operand:HI 2 "general_operand" "")))]
+ ""
+ "
+{
+ /* This is an experiment to see if exposing more of the underlying
+ operations results in better code. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) <= 4)
+ {
+ int count = INTVAL (operands[2]);
+ emit_move_insn (operands[0], operands[1]);
+ while (count > 0)
+ {
+ emit_insn (gen_rtx (SET, HImode, operands[0],
+ gen_rtx (ASHIFTRT, HImode,
+ operands[0], GEN_INT (1))));
+ count--;
+ }
+ DONE;
+ }
+ else
+ {
+ expand_a_shift (HImode, ASHIFTRT, operands);
+ DONE;
+ }
+}")
+
+;; ASHIFTRT one bit.
+(define_insn ""
+ [(set (match_operand:HI 0 "general_operand" "=d")
+ (ashiftrt:HI (match_operand:HI 1 "general_operand" "0")
+ (const_int 1)))]
+ ""
+ "asr %0"
+ [(set_attr "cc" "set_znv")])
+
+;; And the general HImode shift pattern. Handles both shift by constants
+;; and shift by variable counts.
+(define_insn ""
+ [(set (match_operand:HI 0 "general_operand" "=d,d")
+ (match_operator:HI 3 "nshift_operator"
+ [ (match_operand:HI 1 "general_operand" "0,0")
+ (match_operand:HI 2 "general_operand" "KL,dan")]))
+ (clobber (match_scratch:HI 4 "=X,&d"))]
+ ""
+ "* return emit_a_shift (insn, operands);"
+ [(set_attr "cc" "clobber")])
+
+;; We expect only ASHIFT with constant shift counts to be common for
+;; PSImode, so we optimize just that case. For all other cases we
+;; extend the value to SImode and perform the shift in SImode.
+(define_expand "ashlpsi3"
+ [(set (match_operand:PSI 0 "general_operand" "")
+ (ashift:PSI (match_operand:PSI 1 "general_operand" "")
+ (match_operand:HI 2 "general_operand" "")))]
+ ""
+ "
+{
+ /* This is an experiment to see if exposing more of the underlying
+ operations results in better code. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) <= 7)
+ {
+ int count = INTVAL (operands[2]);
+ emit_move_insn (operands[0], operands[1]);
+ while (count > 0)
+ {
+ emit_insn (gen_rtx (SET, PSImode, operands[0],
+ gen_rtx (ASHIFT, PSImode,
+ operands[0], GEN_INT (1))));
+ count--;
+ }
+ DONE;
+ }
+ else
+ {
+ expand_a_shift (PSImode, ASHIFT, operands);
+ DONE;
+ }
+}")
+
+;; ASHIFT one bit.
+(define_insn ""
+ [(set (match_operand:PSI 0 "general_operand" "=d")
+ (ashift:PSI (match_operand:PSI 1 "general_operand" "0")
+ (const_int 1)))]
+ ""
+ "add %0,%0"
+ [(set_attr "cc" "set_zn")])
+
+(define_expand "lshrpsi3"
+ [(set (match_operand:PSI 0 "general_operand" "")
+ (lshiftrt:PSI (match_operand:PSI 1 "general_operand" "")
+ (match_operand:HI 2 "general_operand" "")))]
+ ""
+ "
+{
+ rtx reg = gen_reg_rtx (SImode);
+
+ emit_insn (gen_zero_extendpsisi2 (reg, operands[1]));
+ reg = expand_binop (SImode, lshr_optab, reg,
+ operands[2], reg, 1, OPTAB_WIDEN);
+ emit_insn (gen_truncsipsi2 (operands[0], reg));
+ DONE;
+}")
+
+(define_expand "ashrpsi3"
+ [(set (match_operand:PSI 0 "general_operand" "")
+ (ashiftrt:PSI (match_operand:PSI 1 "general_operand" "")
+ (match_operand:HI 2 "general_operand" "")))]
+ ""
+ "
+{
+ rtx reg = gen_reg_rtx (SImode);
+
+ emit_insn (gen_extendpsisi2 (reg, operands[1]));
+ reg = expand_binop (SImode, ashr_optab, reg,
+ operands[2], reg, 0, OPTAB_WIDEN);
+ emit_insn (gen_truncsipsi2 (operands[0], reg));
+ DONE;
+}")
+
+(define_expand "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ashift:SI (match_operand:SI 1 "nonmemory_operand" "")
+ (match_operand:HI 2 "general_operand" "")))]
+ ""
+ "
+{
+ /* For small shifts, just emit a series of single bit shifts inline.
+
+ For other constant shift counts smaller than a word or non-constant
+ shift counts we call out to a library call during RTL generation time;
+ after RTL generation time we allow optabs.c to open code the operation.
+ See comments in addsi3/subsi3 expanders.
+
+ Otherwise we allow optabs.c to open code the operation. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (INTVAL (operands[2]) <= 3))
+ {
+ int count = INTVAL (operands[2]);
+ emit_move_insn (operands[0], operands[1]);
+ while (count > 0)
+ {
+ emit_insn (gen_rtx (SET, SImode, operands[0],
+ gen_rtx (ASHIFT, SImode,
+ operands[0], GEN_INT (1))));
+ count--;
+ }
+ DONE;
+ }
+ else if (rtx_equal_function_value_matters
+ && (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) <= 15))
+ {
+ rtx ret, insns;
+ extern rtx emit_library_call_value ();
+
+ start_sequence ();
+ ret = emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"__ashlsi3\"),
+ NULL_RTX, 1, SImode, 2, operands[1],
+ SImode, operands[2], HImode);
+ insns = get_insns ();
+ end_sequence ();
+ emit_libcall_block (insns, operands[0], ret,
+ gen_rtx (ASHIFT, SImode, operands[1], operands[2]));
+ DONE;
+ }
+ else
+ FAIL;
+}")
+
+;; ASHIFT one bit.
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=d")
+ (ashift:SI (match_operand:SI 1 "general_operand" "0")
+ (const_int 1)))]
+ ""
+ "add %L0,%L0\;addc %H0,%H0"
+ [(set_attr "cc" "clobber")])
+
+(define_expand "lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (lshiftrt:SI (match_operand:SI 1 "general_operand" "")
+ (match_operand:HI 2 "general_operand" "")))]
+ ""
+ "
+{
+ /* For small shifts, just emit a series of single bit shifts inline.
+
+ For other constant shift counts smaller than a word or non-constant
+ shift counts we call out to a library call during RTL generation time;
+ after RTL generation time we allow optabs.c to open code the operation.
+ See comments in addsi3/subsi3 expanders.
+
+ Otherwise we allow optabs.c to open code the operation. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (INTVAL (operands[2]) <= 2))
+ {
+ int count = INTVAL (operands[2]);
+ emit_move_insn (operands[0], operands[1]);
+ while (count > 0)
+ {
+ emit_insn (gen_rtx (SET, SImode, operands[0],
+ gen_rtx (LSHIFTRT, SImode,
+ operands[0], GEN_INT (1))));
+ count--;
+ }
+ DONE;
+ }
+ else if (rtx_equal_function_value_matters
+ && (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) <= 15))
+ {
+ rtx ret, insns;
+ extern rtx emit_library_call_value ();
+
+ start_sequence ();
+ ret = emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"__lshrsi3\"),
+ NULL_RTX, 1, SImode, 2, operands[1],
+ SImode, operands[2], HImode);
+ insns = get_insns ();
+ end_sequence ();
+ emit_libcall_block (insns, operands[0], ret,
+ gen_rtx (LSHIFTRT, SImode, operands[1], operands[2]));
+ DONE;
+ }
+ else
+ FAIL;
+}")
+
+;; LSHIFTRT one bit.
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=d")
+ (lshiftrt:SI (match_operand:SI 1 "general_operand" "0")
+ (const_int 1)))]
+ ""
+ "lsr %H0\;ror %L0"
+ [(set_attr "cc" "clobber")])
+
+(define_expand "ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:HI 2 "general_operand" "")))]
+ ""
+ "
+{
+ /* For small shifts, just emit a series of single bit shifts inline.
+
+ For other constant shift counts smaller than a word or non-constant
+ shift counts we call out to a library call during RTL generation time;
+ after RTL generation time we allow optabs.c to open code the operation.
+ See comments in addsi3/subsi3 expanders.
+
+ Otherwise we allow optabs.c to open code the operation. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (INTVAL (operands[2]) <= 2))
+ {
+ int count = INTVAL (operands[2]);
+ emit_move_insn (operands[0], operands[1]);
+ while (count > 0)
+ {
+ emit_insn (gen_rtx (SET, SImode, operands[0],
+ gen_rtx (ASHIFTRT, SImode,
+ operands[0], GEN_INT (1))));
+ count--;
+ }
+ DONE;
+ }
+ else if (rtx_equal_function_value_matters
+ && (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) <= 15))
+ {
+ rtx ret, insns;
+ extern rtx emit_library_call_value ();
+
+ start_sequence ();
+ ret = emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"__ashrsi3\"),
+ NULL_RTX, 1, SImode, 2, operands[1],
+ SImode, operands[2], HImode);
+ insns = get_insns ();
+ end_sequence ();
+ emit_libcall_block (insns, operands[0], ret,
+ gen_rtx (ASHIFTRT, SImode, operands[1], operands[2]));
+ DONE;
+ }
+ else
+ FAIL;
+}")
+
+;; ASHIFTRT one bit.
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=d")
+ (ashiftrt:SI (match_operand:SI 1 "general_operand" "0")
+ (const_int 1)))]
+ ""
+ "asr %H0\;ror %L0"
+ [(set_attr "cc" "clobber")])
+
+;; ----------------------------------------------------------------------
+;; PROLOGUE/EPILOGUE
+;; ----------------------------------------------------------------------
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+ "expand_prologue (); DONE;")
+
+(define_insn "outline_prologue_call"
+ [(const_int 1)]
+ ""
+ "jsr ___prologue"
+ [(set_attr "cc" "clobber")])
+
+(define_expand "epilogue"
+ [(return)]
+ ""
+ "
+{
+ expand_epilogue ();
+ DONE;
+}")
+
+(define_insn "outline_epilogue_call_a0"
+ [(const_int 2)]
+ ""
+ "jsr ___epilogue_a0"
+ [(set_attr "cc" "clobber")])
+
+(define_insn "outline_epilogue_call_d0"
+ [(const_int 3)]
+ ""
+ "jsr ___epilogue_d0"
+ [(set_attr "cc" "clobber")])
+
+(define_insn "outline_epilogue_jump"
+ [(const_int 4)]
+ ""
+ "jmp ___epilogue_noreturn"
+ [(set_attr "cc" "clobber")])
+
+(define_insn "return"
+ [(return)]
+ "reload_completed && total_frame_size () == 0
+ && !current_function_needs_context"
+ "*
+{
+ rtx next = next_active_insn (insn);
+
+ if (next
+ && GET_CODE (next) == JUMP_INSN
+ && GET_CODE (PATTERN (next)) == RETURN)
+ return \"\";
+ return \"rts\";
+}"
+ [(set_attr "cc" "clobber")])
+
+(define_insn "return_internal"
+ [(const_int 0)
+ (return)]
+ ""
+ "rts"
+ [(set_attr "cc" "clobber")])
+
+;; These are special combiner patterns to improve array/pointer accesses.
+;;
+;; A typical sequence involves extending an integer/char, shifting it left
+;; a few times, then truncating the value to PSImode.
+;;
+;; This first pattern combines the shifting & truncation operations, by
+;; itself it is a win because the shifts end up occurring in PSImode instead
+;; of SImode. However, it has the secondary effect of giving us the
+;; opportunity to match patterns which allow us to remove the initial
+;; extension completely, which is a big win.
+(define_insn ""
+ [(set (match_operand:PSI 0 "general_operand" "=d,d,a")
+ (truncate:PSI
+ (ashift:SI (match_operand:SI 1 "general_operand" "d,m,m")
+ (match_operand:HI 2 "const_int_operand" "i,i,i"))))]
+ ""
+ "*
+{
+ int count = INTVAL (operands[2]);
+ if (which_alternative == 0)
+ output_asm_insn (\"jsr ___truncsipsi2_%1_%0\", operands);
+ else if (which_alternative == 1)
+ output_asm_insn (\"movx %A1,%0\", operands);
+ else
+ output_asm_insn (\" mov %1,%0\", operands);
+
+ while (count)
+ {
+ output_asm_insn (\"add %0,%0\", operands);
+ count--;
+ }
+ return \"\";
+}"
+ [(set_attr "cc" "clobber")])
+
+;; Similarly, except that we also have zero/sign extension of the
+;; original operand. */
+(define_insn ""
+ [(set (match_operand:PSI 0 "general_operand" "=d,d")
+ (truncate:PSI
+ (ashift:SI
+ (zero_extend:SI (match_operand:HI 1 "general_operand" "0,dim"))
+ (match_operand:HI 2 "const_int_operand" "i,i"))))]
+ ""
+ "*
+{
+ int count = INTVAL (operands[2]);
+
+ /* First extend operand 1 to PSImode. */
+ if (which_alternative == 0)
+ output_asm_insn (\"extxu %0\", operands);
+ else
+ output_asm_insn (\"mov %1,%0\;extxu %0\", operands);
+
+ /* Now do the shifting. */
+ while (count)
+ {
+ output_asm_insn (\"add %0,%0\", operands);
+ count--;
+ }
+ return \"\";
+}"
+ [(set_attr "cc" "clobber")])
+
+(define_insn ""
+ [(set (match_operand:PSI 0 "general_operand" "=d,d,d")
+ (truncate:PSI
+ (ashift:SI
+ (sign_extend:SI (match_operand:HI 1 "general_operand" "0,di,m"))
+ (match_operand:HI 2 "const_int_operand" "i,i,i"))))]
+ ""
+ "*
+{
+ int count = INTVAL (operands[2]);
+
+ /* First extend operand 1 to PSImode. */
+ if (which_alternative == 0)
+ output_asm_insn (\"extx %0\", operands);
+ else if (which_alternative == 1)
+ output_asm_insn (\"mov %1,%0\;extx %0\", operands);
+ else
+ output_asm_insn (\"mov %1,%0\", operands);
+
+ /* Now do the shifting. */
+ while (count)
+ {
+ output_asm_insn (\"add %0,%0\", operands);
+ count--;
+ }
+ return \"\";
+}"
+ [(set_attr "cc" "clobber")])
+
+(define_insn ""
+ [(set (match_operand:PSI 0 "general_operand" "=d,d,d")
+ (truncate:PSI
+ (ashift:SI
+ (sign_extend:SI
+ (zero_extend:HI (match_operand:QI 1 "general_operand" "0,di,m")))
+ (match_operand:HI 2 "const_int_operand" "i,i,i"))))]
+ ""
+ "*
+{
+ int count = INTVAL (operands[2]);
+
+ /* First extend operand 1 to PSImode. */
+ if (which_alternative == 0)
+ output_asm_insn (\"extxbu %0\", operands);
+ else if (which_alternative == 1)
+ output_asm_insn (\"mov %1,%0\;extxbu %0\", operands);
+ else
+ output_asm_insn (\"movbu %1,%0\", operands);
+
+ /* Now do the shifting. */
+ while (count)
+ {
+ output_asm_insn (\"add %0,%0\", operands);
+ count--;
+ }
+ return \"\";
+}"
+ [(set_attr "cc" "clobber")])
+
+(define_insn ""
+ [(set (match_operand:PSI 0 "general_operand" "=d,d,d")
+ (truncate:PSI
+ (ashift:SI
+ (sign_extend:SI
+ (match_operand:QI 1 "general_operand" "0,di,m"))
+ (match_operand:HI 2 "const_int_operand" "i,i,i"))))]
+ ""
+ "*
+{
+ int count = INTVAL (operands[2]);
+
+ /* First extend operand 1 to PSImode. */
+ if (which_alternative == 0)
+ output_asm_insn (\"extxb %0\", operands);
+ else if (which_alternative == 1)
+ output_asm_insn (\"mov %1,%0\;extxb %0\", operands);
+ else if (GET_CODE (XEXP (operands[1], 0)) == REG)
+ output_asm_insn (\"movbu %1,%0\;extxb %0\", operands);
+ else
+ output_asm_insn (\"movb %1,%0\", operands);
+
+ /* Now do the shifting. */
+ while (count)
+ {
+ output_asm_insn (\"add %0,%0\", operands);
+ count--;
+ }
+ return \"\";
+}"
+ [(set_attr "cc" "clobber")])
+
+;; Try to combine consecutive updates of the stack pointer (or any
+;; other register for that matter).
+(define_peephole
+ [(set (match_operand:PSI 0 "register_operand" "=da")
+ (plus:PSI (match_dup 0)
+ (match_operand 1 "const_int_operand" "")))
+ (set (match_dup 0)
+ (plus:PSI (match_dup 0)
+ (match_operand 2 "const_int_operand" "")))]
+ ""
+ "*
+{
+ operands[1] = GEN_INT (INTVAL (operands[2]) + INTVAL (operands[1]));
+ return \"add %1,%0\";
+}"
+ [(set_attr "cc" "clobber")])
+
+;;
+;; We had patterns to check eq/ne, but the they don't work because
+;; 0x80000000 + 0x80000000 = 0x0 with a carry out.
+;;
+;; The Z flag and C flag would be set, and we have no way to
+;; check for the Z flag set and C flag clear.
+;;
+;; This will work on the mn10200 because we can check the ZX flag
+;; if the comparison is in HImode.
+(define_peephole
+ [(set (cc0) (match_operand:HI 0 "register_operand" "d"))
+ (set (pc) (if_then_else (ge (cc0) (const_int 0))
+ (match_operand 1 "" "")
+ (pc)))]
+ "dead_or_set_p (ins1, operands[0]) && REG_OK_FOR_INDEX_P (operands[0])"
+ "add %0,%0\;bcc %1"
+ [(set_attr "cc" "clobber")])
+
+(define_peephole
+ [(set (cc0) (match_operand:HI 0 "register_operand" "d"))
+ (set (pc) (if_then_else (lt (cc0) (const_int 0))
+ (match_operand 1 "" "")
+ (pc)))]
+ "dead_or_set_p (ins1, operands[0]) && REG_OK_FOR_INDEX_P (operands[0])"
+ "add %0,%0\;bcs %1"
+ [(set_attr "cc" "clobber")])
+
+(define_peephole
+ [(set (cc0) (match_operand:HI 0 "register_operand" "d"))
+ (set (pc) (if_then_else (ge (cc0) (const_int 0))
+ (pc)
+ (match_operand 1 "" "")))]
+ "dead_or_set_p (ins1, operands[0]) && REG_OK_FOR_INDEX_P (operands[0])"
+ "add %0,%0\;bcs %1"
+ [(set_attr "cc" "clobber")])
+
+(define_peephole
+ [(set (cc0) (match_operand:HI 0 "register_operand" "d"))
+ (set (pc) (if_then_else (lt (cc0) (const_int 0))
+ (pc)
+ (match_operand 1 "" "")))]
+ "dead_or_set_p (ins1, operands[0]) && REG_OK_FOR_INDEX_P (operands[0])"
+ "add %0,%0\;bcc %1"
+ [(set_attr "cc" "clobber")])
+
+(define_peephole
+ [(set (cc0) (match_operand:PSI 0 "register_operand" "d"))
+ (set (pc) (if_then_else (ge (cc0) (const_int 0))
+ (match_operand 1 "" "")
+ (pc)))]
+ "dead_or_set_p (ins1, operands[0]) && REG_OK_FOR_INDEX_P (operands[0])"
+ "add %0,%0\;bccx %1"
+ [(set_attr "cc" "clobber")])
+
+(define_peephole
+ [(set (cc0) (match_operand:PSI 0 "register_operand" "d"))
+ (set (pc) (if_then_else (lt (cc0) (const_int 0))
+ (match_operand 1 "" "")
+ (pc)))]
+ "dead_or_set_p (ins1, operands[0]) && REG_OK_FOR_INDEX_P (operands[0])"
+ "add %0,%0\;bcsx %1"
+ [(set_attr "cc" "clobber")])
+
+(define_peephole
+ [(set (cc0) (match_operand:PSI 0 "register_operand" "d"))
+ (set (pc) (if_then_else (ge (cc0) (const_int 0))
+ (pc)
+ (match_operand 1 "" "")))]
+ "dead_or_set_p (ins1, operands[0]) && REG_OK_FOR_INDEX_P (operands[0])"
+ "add %0,%0\;bcsx %1"
+ [(set_attr "cc" "clobber")])
+
+(define_peephole
+ [(set (cc0) (match_operand:PSI 0 "register_operand" "d"))
+ (set (pc) (if_then_else (lt (cc0) (const_int 0))
+ (pc)
+ (match_operand 1 "" "")))]
+ "dead_or_set_p (ins1, operands[0]) && REG_OK_FOR_INDEX_P (operands[0])"
+ "add %0,%0\;bccx %1"
+ [(set_attr "cc" "clobber")])
+
+;; We call out to library routines to perform 32bit addition and subtraction
+;; operations (see addsi3/subsi3 expanders for why). These peepholes catch
+;; the trivial case where the operation could be done with an add;addc or
+;; sub;subc sequence.
+(define_peephole
+ [(set (mem:SI (reg:PSI 7)) (reg:SI 2))
+ (set (reg:SI 0) (call (match_operand:QI 1 "general_operand" "")
+ (match_operand:HI 2 "general_operand" "")))]
+ "GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
+ && strcmp (XSTR (XEXP (operands[1], 0), 0), \"__addsi3\") == 0"
+ "add d2,d0\;addc d3,d1"
+ [(set_attr "cc" "clobber")])
+
+(define_peephole
+ [(set (mem:SI (reg:PSI 7)) (reg:SI 2))
+ (set (reg:SI 0) (call (match_operand:QI 1 "general_operand" "")
+ (match_operand:HI 2 "general_operand" "")))]
+ "GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
+ && strcmp (XSTR (XEXP (operands[1], 0), 0), \"__subsi3\") == 0"
+ "sub d2,d0\;subc d3,d1"
+ [(set_attr "cc" "clobber")])
diff --git a/gnu/usr.bin/gcc/config/mn10200/t-mn10200 b/gnu/usr.bin/gcc/config/mn10200/t-mn10200
new file mode 100644
index 00000000000..9486837cd9a
--- /dev/null
+++ b/gnu/usr.bin/gcc/config/mn10200/t-mn10200
@@ -0,0 +1,50 @@
+LIBGCC1=libgcc1.null
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = mn10200/lib1funcs.asm
+LIB1ASMFUNCS = _divhi3 \
+ _modhi3 \
+ _addsi3 \
+ _subsi3 \
+ _mulsi3 \
+ _ashlsi3 \
+ _lshrsi3 \
+ _ashrsi3 \
+ _negsi2_d0 \
+ _negsi2_d2 \
+ _zero_extendpsisi2_d0 \
+ _zero_extendpsisi2_d2 \
+ _sign_extendpsisi2_d0 \
+ _sign_extendpsisi2_d2 \
+ _truncsipsi2_d0_d0 \
+ _truncsipsi2_d0_d1 \
+ _truncsipsi2_d0_d2 \
+ _truncsipsi2_d0_d3 \
+ _truncsipsi2_d2_d0 \
+ _truncsipsi2_d2_d1 \
+ _truncsipsi2_d2_d2 \
+ _truncsipsi2_d2_d3 \
+ _cmpsi2 \
+ _ucmpsi2 \
+ _prologue \
+ _epilogue_a0 \
+ _epilogue_d0 \
+ _epilogue_noreturn
+
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+# We do not have DF or DI types, so fake out the libgcc2 compilation.
+TARGET_LIBGCC2_CFLAGS=-DDF=SF -DDI=SI
+LIB2FUNCS_EXTRA = fp-bit.c $(srcdir)/config/mn10200/udivmodsi4.c \
+ $(srcdir)/config/mn10200/divmod.c $(srcdir)/config/mn10200/udivmod.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#define FLOAT_ONLY' >> fp-bit.c
+ echo '#define SMALL_MACHINE' >> fp-bit.c
+ echo '#define CMPtype HItype' >> fp-bit.c
+ echo '#ifdef __LITTLE_ENDIAN__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >>fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
diff --git a/gnu/usr.bin/gcc/config/mn10200/udivmod.c b/gnu/usr.bin/gcc/config/mn10200/udivmod.c
new file mode 100644
index 00000000000..1395e9cc940
--- /dev/null
+++ b/gnu/usr.bin/gcc/config/mn10200/udivmod.c
@@ -0,0 +1,14 @@
+long udivmodsi4 ();
+
+long
+__udivsi3 (long a, long b)
+{
+ return udivmodsi4 (a, b, 0);
+}
+
+long
+__umodsi3 (long a, long b)
+{
+ return udivmodsi4 (a, b, 1);
+}
+
diff --git a/gnu/usr.bin/gcc/config/mn10200/udivmodsi4.c b/gnu/usr.bin/gcc/config/mn10200/udivmodsi4.c
new file mode 100644
index 00000000000..83c2340c2f8
--- /dev/null
+++ b/gnu/usr.bin/gcc/config/mn10200/udivmodsi4.c
@@ -0,0 +1,24 @@
+unsigned long
+udivmodsi4(unsigned long num, unsigned long den, int modwanted)
+{
+ unsigned long bit = 1;
+ unsigned long res = 0;
+
+ while (den < num && bit && !(den & (1L<<31)))
+ {
+ den <<=1;
+ bit <<=1;
+ }
+ while (bit)
+ {
+ if (num >= den)
+ {
+ num -= den;
+ res |= bit;
+ }
+ bit >>=1;
+ den >>=1;
+ }
+ if (modwanted) return num;
+ return res;
+}
diff --git a/gnu/usr.bin/gcc/config/mn10200/xm-mn10200.h b/gnu/usr.bin/gcc/config/mn10200/xm-mn10200.h
new file mode 100644
index 00000000000..7ebac70ed3f
--- /dev/null
+++ b/gnu/usr.bin/gcc/config/mn10200/xm-mn10200.h
@@ -0,0 +1,47 @@
+/* Configuration for Matsushita MN10200.
+ Copyright (C) 1997 Free Software Foundation, Inc.
+ Contributed by Cygnus Support.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* #defines that need visibility everywhere. */
+#define FALSE 0
+#define TRUE 1
+
+/* This describes the machine the compiler is hosted on. */
+#define HOST_BITS_PER_CHAR 8
+#define HOST_BITS_PER_SHORT 16
+#define HOST_BITS_PER_INT 16
+#define HOST_BITS_PER_LONG 32
+#define HOST_BITS_PER_LONGLONG 64
+
+/* Arguments to use with `exit'. */
+#define SUCCESS_EXIT_CODE 0
+#define FATAL_EXIT_CODE 33
+
+/* target machine dependencies.
+ tm.h is a symbolic link to the actual target specific file. */
+
+#include "tm.h"
+
+#ifndef __STDC__
+extern char *malloc (), *realloc (), *calloc ();
+#else
+extern void *malloc (), *realloc (), *calloc ();
+#endif
+extern void free ();