diff options
author | Thomas Graichen <graichen@cvs.openbsd.org> | 1997-04-04 13:21:36 +0000 |
---|---|---|
committer | Thomas Graichen <graichen@cvs.openbsd.org> | 1997-04-04 13:21:36 +0000 |
commit | 50325cbab454647a313ba68279c844e2bc6143af (patch) | |
tree | 0e52e902317bb4442448c5c61ab6d2162111a240 /gnu/usr.bin/gcc/f/gbe/2.7.2.2.diff | |
parent | b2ad87cb6f8d3d16576e4e93251e0228f0672cdc (diff) |
sync g77 to version 0.5.20 - i hope i got everything right because there
is no patch from 0.5.19 to 0.5.20 - so i did it by diffing two gcc trees
looking carefully at the results
what does the new g77 give us:
* now it completely works on the alpha (64bit)
* faster
* less bugs :-)
Diffstat (limited to 'gnu/usr.bin/gcc/f/gbe/2.7.2.2.diff')
-rw-r--r-- | gnu/usr.bin/gcc/f/gbe/2.7.2.2.diff | 4100 |
1 files changed, 4100 insertions, 0 deletions
diff --git a/gnu/usr.bin/gcc/f/gbe/2.7.2.2.diff b/gnu/usr.bin/gcc/f/gbe/2.7.2.2.diff new file mode 100644 index 00000000000..80903bed46f --- /dev/null +++ b/gnu/usr.bin/gcc/f/gbe/2.7.2.2.diff @@ -0,0 +1,4100 @@ +IMPORTANT: After applying this patch, you must rebuild the +Info documentation derived from the Texinfo files in the +gcc distribution, as this patch does not include patches +to any derived files (due to differences in the way gcc +version 2.7.2.2 is obtained by users). Use the following +command sequence after applying this patch: + + cd gcc-2.7.2.2; make -f Makefile.in gcc.info + +If that fails due to `makeinfo' not being installed, obtain +texinfo-3.9.tar.gz from a GNU distribution site, unpack, +build, and install it, and try the above command sequence +again. + + +diff -rcp2N gcc-2.7.2.2/ChangeLog gcc-2.7.2.2.f.2/ChangeLog +*** gcc-2.7.2.2/ChangeLog Thu Feb 20 19:24:10 1997 +--- gcc-2.7.2.2.f.2/ChangeLog Thu Feb 27 23:04:00 1997 +*************** +*** 1,2 **** +--- 1,69 ---- ++ Wed Feb 26 13:09:33 1997 Michael Meissner <meissner@cygnus.com> ++ ++ * reload.c (debug_reload): Fix format string to print ++ reload_nocombine[r]. ++ ++ Sun Feb 23 15:26:53 1997 Craig Burley <burley@gnu.ai.mit.edu> ++ ++ * fold-const.c (multiple_of_p): Clean up and improve. ++ (fold): Clean up invocation of multiple_of_p. ++ ++ Sat Feb 8 04:53:27 1997 Craig Burley <burley@gnu.ai.mit.edu> ++ ++ From <jfc@jfc.tiac.net> Fri, 07 Feb 1997 22:02:21 -0500: ++ * alias.c (init_alias_analysis): Reduce amount of time ++ needed to simplify the reg_base_value array in the ++ typical case (especially involving function inlining). ++ ++ Fri Jan 10 17:22:17 1997 Craig Burley <burley@gnu.ai.mit.edu> ++ ++ Minor improvements/fixes to better alias handling: ++ * Makefile.in (alias.o): Fix typo in rule (was RLT_H). ++ * cse.c, sched.c: Fix up some indenting. ++ * toplev.c: Add -fargument-alias flag, so Fortran users ++ can turn C-style aliasing on once g77 defaults to ++ -fargument-noalias-global. ++ ++ Integrate patch for better alias handling from ++ John Carr <jfc@mit.edu>: ++ * Makefile.in (OBJS, alias.o): New module and rule. ++ * alias.c: New source module. ++ * calls.c (expand_call): Recognize alias status of calls ++ to malloc(). ++ * combine.c (distribute_notes): New REG_NOALIAS note. ++ * rtl.h (REG_NOALIAS): Ditto. ++ Many other changes for new alias.c module. ++ * cse.c: Many changes, and much code moved into alias.c. ++ * flags.h (flag_alias_check, flag_argument_noalias): ++ New flags. ++ * toplev.c: New flags and related options. ++ * local-alloc.c (validate_equiv_mem_from_store): ++ Caller of true_dependence changed. ++ * loop.c (NUM_STORES): Increase to 50 from 20. ++ (prescan_loop): "const" functions don't alter unknown addresses. ++ (invariant_p): Caller of true_dependence changed. ++ (record_giv): Zero new unrolled and shared flags. ++ (emit_iv_add_mult): Record base value for register. ++ * sched.c: Many changes, mostly moving code to alias.c. ++ (sched_note_set): SCHED_SORT macro def form, but not function, ++ inexplicably changed. ++ * unroll.c: Record base values for registers, etc. ++ ++ Fri Jan 3 04:01:00 1997 Craig Burley <burley@gnu.ai.mit.edu> ++ ++ * loop.c (check_final_value): Handle insns with no luid's ++ appropriately, instead of crashing on INSN_LUID macro ++ invocations. ++ ++ Mon Dec 23 00:49:19 1996 Craig Burley <burley@gnu.ai.mit.edu> ++ ++ * config/alpha/alpha.md: Fix pattern that matches if_then_else ++ involving DF target, DF comparison, SF source. ++ ++ Fri Dec 20 15:42:52 1996 Craig Burley <burley@gnu.ai.mit.edu> ++ ++ * fold-const.c (multiple_of_p): New function. ++ (fold): Use new function to turn *_DIV_EXPR into EXACT_DIV_EXPR. ++ + Sat Jun 29 12:33:39 1996 Richard Kenner <kenner@vlsi1.ultra.nyu.edu> + +diff -rcp2N gcc-2.7.2.2/Makefile.in gcc-2.7.2.2.f.2/Makefile.in +*** gcc-2.7.2.2/Makefile.in Sun Nov 26 14:44:25 1995 +--- gcc-2.7.2.2.f.2/Makefile.in Sun Feb 23 16:36:34 1997 +*************** OBJS = toplev.o version.o tree.o print-t +*** 519,523 **** + integrate.o jump.o cse.o loop.o unroll.o flow.o stupid.o combine.o \ + regclass.o local-alloc.o global.o reload.o reload1.o caller-save.o \ +! insn-peep.o reorg.o sched.o final.o recog.o reg-stack.o \ + insn-opinit.o insn-recog.o insn-extract.o insn-output.o insn-emit.o \ + insn-attrtab.o $(out_object_file) getpwd.o convert.o $(EXTRA_OBJS) +--- 519,523 ---- + integrate.o jump.o cse.o loop.o unroll.o flow.o stupid.o combine.o \ + regclass.o local-alloc.o global.o reload.o reload1.o caller-save.o \ +! insn-peep.o reorg.o alias.o sched.o final.o recog.o reg-stack.o \ + insn-opinit.o insn-recog.o insn-extract.o insn-output.o insn-emit.o \ + insn-attrtab.o $(out_object_file) getpwd.o convert.o $(EXTRA_OBJS) +*************** reorg.o : reorg.c $(CONFIG_H) $(RTL_H) c +*** 1238,1241 **** +--- 1238,1242 ---- + basic-block.h regs.h insn-config.h insn-attr.h insn-flags.h recog.h \ + flags.h output.h ++ alias.o : $(CONFIG_H) $(RTL_H) flags.h hard-reg-set.h regs.h + sched.o : sched.c $(CONFIG_H) $(RTL_H) basic-block.h regs.h hard-reg-set.h \ + flags.h insn-config.h insn-attr.h +diff -rcp2N gcc-2.7.2.2/alias.c gcc-2.7.2.2.f.2/alias.c +*** gcc-2.7.2.2/alias.c Wed Dec 31 19:00:00 1969 +--- gcc-2.7.2.2.f.2/alias.c Sat Feb 8 04:53:07 1997 +*************** +*** 0 **** +--- 1,989 ---- ++ /* Alias analysis for GNU C, by John Carr (jfc@mit.edu). ++ Derived in part from sched.c */ ++ #include "config.h" ++ #include "rtl.h" ++ #include "expr.h" ++ #include "regs.h" ++ #include "hard-reg-set.h" ++ #include "flags.h" ++ ++ static rtx canon_rtx PROTO((rtx)); ++ static int rtx_equal_for_memref_p PROTO((rtx, rtx)); ++ static rtx find_symbolic_term PROTO((rtx)); ++ static int memrefs_conflict_p PROTO((int, rtx, int, rtx, ++ HOST_WIDE_INT)); ++ ++ /* Set up all info needed to perform alias analysis on memory references. */ ++ ++ #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X))) ++ ++ /* reg_base_value[N] gives an address to which register N is related. ++ If all sets after the first add or subtract to the current value ++ or otherwise modify it so it does not point to a different top level ++ object, reg_base_value[N] is equal to the address part of the source ++ of the first set. The value will be a SYMBOL_REF, a LABEL_REF, or ++ (address (reg)) to indicate that the address is derived from an ++ argument or fixed register. */ ++ rtx *reg_base_value; ++ unsigned int reg_base_value_size; /* size of reg_base_value array */ ++ #define REG_BASE_VALUE(X) \ ++ (REGNO (X) < reg_base_value_size ? reg_base_value[REGNO (X)] : 0) ++ ++ /* Vector indexed by N giving the initial (unchanging) value known ++ for pseudo-register N. */ ++ rtx *reg_known_value; ++ ++ /* Indicates number of valid entries in reg_known_value. */ ++ static int reg_known_value_size; ++ ++ /* Vector recording for each reg_known_value whether it is due to a ++ REG_EQUIV note. Future passes (viz., reload) may replace the ++ pseudo with the equivalent expression and so we account for the ++ dependences that would be introduced if that happens. */ ++ /* ??? This is a problem only on the Convex. The REG_EQUIV notes created in ++ assign_parms mention the arg pointer, and there are explicit insns in the ++ RTL that modify the arg pointer. Thus we must ensure that such insns don't ++ get scheduled across each other because that would invalidate the REG_EQUIV ++ notes. One could argue that the REG_EQUIV notes are wrong, but solving ++ the problem in the scheduler will likely give better code, so we do it ++ here. */ ++ char *reg_known_equiv_p; ++ ++ /* Inside SRC, the source of a SET, find a base address. */ ++ ++ /* When copying arguments into pseudo-registers, record the (ADDRESS) ++ expression for the argument directly so that even if the argument ++ register is changed later (e.g. for a function call) the original ++ value is noted. */ ++ static int copying_arguments; ++ ++ static rtx ++ find_base_value (src) ++ register rtx src; ++ { ++ switch (GET_CODE (src)) ++ { ++ case SYMBOL_REF: ++ case LABEL_REF: ++ return src; ++ ++ case REG: ++ if (copying_arguments && REGNO (src) < FIRST_PSEUDO_REGISTER) ++ return reg_base_value[REGNO (src)]; ++ return src; ++ ++ case MEM: ++ /* Check for an argument passed in memory. Only record in the ++ copying-arguments block; it is too hard to track changes ++ otherwise. */ ++ if (copying_arguments ++ && (XEXP (src, 0) == arg_pointer_rtx ++ || (GET_CODE (XEXP (src, 0)) == PLUS ++ && XEXP (XEXP (src, 0), 0) == arg_pointer_rtx))) ++ return gen_rtx (ADDRESS, VOIDmode, src); ++ return 0; ++ ++ case CONST: ++ src = XEXP (src, 0); ++ if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS) ++ break; ++ /* fall through */ ++ case PLUS: ++ case MINUS: ++ /* Guess which operand to set the register equivalent to. */ ++ /* If the first operand is a symbol or the second operand is ++ an integer, the first operand is the base address. */ ++ if (GET_CODE (XEXP (src, 0)) == SYMBOL_REF ++ || GET_CODE (XEXP (src, 0)) == LABEL_REF ++ || GET_CODE (XEXP (src, 1)) == CONST_INT) ++ return XEXP (src, 0); ++ /* If an operand is a register marked as a pointer, it is the base. */ ++ if (GET_CODE (XEXP (src, 0)) == REG ++ && REGNO_POINTER_FLAG (REGNO (XEXP (src, 0)))) ++ src = XEXP (src, 0); ++ else if (GET_CODE (XEXP (src, 1)) == REG ++ && REGNO_POINTER_FLAG (REGNO (XEXP (src, 1)))) ++ src = XEXP (src, 1); ++ else ++ return 0; ++ if (copying_arguments && REGNO (src) < FIRST_PSEUDO_REGISTER) ++ return reg_base_value[REGNO (src)]; ++ return src; ++ ++ case AND: ++ /* If the second operand is constant set the base ++ address to the first operand. */ ++ if (GET_CODE (XEXP (src, 1)) == CONST_INT ++ && GET_CODE (XEXP (src, 0)) == REG) ++ { ++ src = XEXP (src, 0); ++ if (copying_arguments && REGNO (src) < FIRST_PSEUDO_REGISTER) ++ return reg_base_value[REGNO (src)]; ++ return src; ++ } ++ return 0; ++ ++ case HIGH: ++ return XEXP (src, 0); ++ } ++ ++ return 0; ++ } ++ ++ /* Called from init_alias_analysis indirectly through note_stores. */ ++ ++ /* while scanning insns to find base values, reg_seen[N] is nonzero if ++ register N has been set in this function. */ ++ static char *reg_seen; ++ ++ static ++ void record_set (dest, set) ++ rtx dest, set; ++ { ++ register int regno; ++ rtx src; ++ ++ if (GET_CODE (dest) != REG) ++ return; ++ ++ regno = REGNO (dest); ++ ++ if (set) ++ { ++ /* A CLOBBER wipes out any old value but does not prevent a previously ++ unset register from acquiring a base address (i.e. reg_seen is not ++ set). */ ++ if (GET_CODE (set) == CLOBBER) ++ { ++ reg_base_value[regno] = 0; ++ return; ++ } ++ src = SET_SRC (set); ++ } ++ else ++ { ++ static int unique_id; ++ if (reg_seen[regno]) ++ { ++ reg_base_value[regno] = 0; ++ return; ++ } ++ reg_seen[regno] = 1; ++ reg_base_value[regno] = gen_rtx (ADDRESS, Pmode, ++ GEN_INT (unique_id++)); ++ return; ++ } ++ ++ /* This is not the first set. If the new value is not related to the ++ old value, forget the base value. Note that the following code is ++ not detected: ++ extern int x, y; int *p = &x; p += (&y-&x); ++ ANSI C does not allow computing the difference of addresses ++ of distinct top level objects. */ ++ if (reg_base_value[regno]) ++ switch (GET_CODE (src)) ++ { ++ case PLUS: ++ case MINUS: ++ if (XEXP (src, 0) != dest && XEXP (src, 1) != dest) ++ reg_base_value[regno] = 0; ++ break; ++ case AND: ++ if (XEXP (src, 0) != dest || GET_CODE (XEXP (src, 1)) != CONST_INT) ++ reg_base_value[regno] = 0; ++ break; ++ case LO_SUM: ++ if (XEXP (src, 0) != dest) ++ reg_base_value[regno] = 0; ++ break; ++ default: ++ reg_base_value[regno] = 0; ++ break; ++ } ++ /* If this is the first set of a register, record the value. */ ++ else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno]) ++ && ! reg_seen[regno] && reg_base_value[regno] == 0) ++ reg_base_value[regno] = find_base_value (src); ++ ++ reg_seen[regno] = 1; ++ } ++ ++ /* Called from loop optimization when a new pseudo-register is created. */ ++ void ++ record_base_value (regno, val) ++ int regno; ++ rtx val; ++ { ++ if (!flag_alias_check || regno >= reg_base_value_size) ++ return; ++ if (GET_CODE (val) == REG) ++ { ++ if (REGNO (val) < reg_base_value_size) ++ reg_base_value[regno] = reg_base_value[REGNO (val)]; ++ return; ++ } ++ reg_base_value[regno] = find_base_value (val); ++ } ++ ++ static rtx ++ canon_rtx (x) ++ rtx x; ++ { ++ /* Recursively look for equivalences. */ ++ if (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER ++ && REGNO (x) < reg_known_value_size) ++ return reg_known_value[REGNO (x)] == x ++ ? x : canon_rtx (reg_known_value[REGNO (x)]); ++ else if (GET_CODE (x) == PLUS) ++ { ++ rtx x0 = canon_rtx (XEXP (x, 0)); ++ rtx x1 = canon_rtx (XEXP (x, 1)); ++ ++ if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1)) ++ { ++ /* We can tolerate LO_SUMs being offset here; these ++ rtl are used for nothing other than comparisons. */ ++ if (GET_CODE (x0) == CONST_INT) ++ return plus_constant_for_output (x1, INTVAL (x0)); ++ else if (GET_CODE (x1) == CONST_INT) ++ return plus_constant_for_output (x0, INTVAL (x1)); ++ return gen_rtx (PLUS, GET_MODE (x), x0, x1); ++ } ++ } ++ /* This gives us much better alias analysis when called from ++ the loop optimizer. Note we want to leave the original ++ MEM alone, but need to return the canonicalized MEM with ++ all the flags with their original values. */ ++ else if (GET_CODE (x) == MEM) ++ { ++ rtx addr = canon_rtx (XEXP (x, 0)); ++ if (addr != XEXP (x, 0)) ++ { ++ rtx new = gen_rtx (MEM, GET_MODE (x), addr); ++ MEM_VOLATILE_P (new) = MEM_VOLATILE_P (x); ++ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (x); ++ MEM_IN_STRUCT_P (new) = MEM_IN_STRUCT_P (x); ++ x = new; ++ } ++ } ++ return x; ++ } ++ ++ /* Return 1 if X and Y are identical-looking rtx's. ++ ++ We use the data in reg_known_value above to see if two registers with ++ different numbers are, in fact, equivalent. */ ++ ++ static int ++ rtx_equal_for_memref_p (x, y) ++ rtx x, y; ++ { ++ register int i; ++ register int j; ++ register enum rtx_code code; ++ register char *fmt; ++ ++ if (x == 0 && y == 0) ++ return 1; ++ if (x == 0 || y == 0) ++ return 0; ++ x = canon_rtx (x); ++ y = canon_rtx (y); ++ ++ if (x == y) ++ return 1; ++ ++ code = GET_CODE (x); ++ /* Rtx's of different codes cannot be equal. */ ++ if (code != GET_CODE (y)) ++ return 0; ++ ++ /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. ++ (REG:SI x) and (REG:HI x) are NOT equivalent. */ ++ ++ if (GET_MODE (x) != GET_MODE (y)) ++ return 0; ++ ++ /* REG, LABEL_REF, and SYMBOL_REF can be compared nonrecursively. */ ++ ++ if (code == REG) ++ return REGNO (x) == REGNO (y); ++ if (code == LABEL_REF) ++ return XEXP (x, 0) == XEXP (y, 0); ++ if (code == SYMBOL_REF) ++ return XSTR (x, 0) == XSTR (y, 0); ++ ++ /* For commutative operations, the RTX match if the operand match in any ++ order. Also handle the simple binary and unary cases without a loop. */ ++ if (code == EQ || code == NE || GET_RTX_CLASS (code) == 'c') ++ return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0)) ++ && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1))) ++ || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1)) ++ && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0)))); ++ else if (GET_RTX_CLASS (code) == '<' || GET_RTX_CLASS (code) == '2') ++ return (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0)) ++ && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1))); ++ else if (GET_RTX_CLASS (code) == '1') ++ return rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0)); ++ ++ /* Compare the elements. If any pair of corresponding elements ++ fail to match, return 0 for the whole things. */ ++ ++ fmt = GET_RTX_FORMAT (code); ++ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) ++ { ++ switch (fmt[i]) ++ { ++ case 'w': ++ if (XWINT (x, i) != XWINT (y, i)) ++ return 0; ++ break; ++ ++ case 'n': ++ case 'i': ++ if (XINT (x, i) != XINT (y, i)) ++ return 0; ++ break; ++ ++ case 'V': ++ case 'E': ++ /* Two vectors must have the same length. */ ++ if (XVECLEN (x, i) != XVECLEN (y, i)) ++ return 0; ++ ++ /* And the corresponding elements must match. */ ++ for (j = 0; j < XVECLEN (x, i); j++) ++ if (rtx_equal_for_memref_p (XVECEXP (x, i, j), XVECEXP (y, i, j)) == 0) ++ return 0; ++ break; ++ ++ case 'e': ++ if (rtx_equal_for_memref_p (XEXP (x, i), XEXP (y, i)) == 0) ++ return 0; ++ break; ++ ++ case 'S': ++ case 's': ++ if (strcmp (XSTR (x, i), XSTR (y, i))) ++ return 0; ++ break; ++ ++ case 'u': ++ /* These are just backpointers, so they don't matter. */ ++ break; ++ ++ case '0': ++ break; ++ ++ /* It is believed that rtx's at this level will never ++ contain anything but integers and other rtx's, ++ except for within LABEL_REFs and SYMBOL_REFs. */ ++ default: ++ abort (); ++ } ++ } ++ return 1; ++ } ++ ++ /* Given an rtx X, find a SYMBOL_REF or LABEL_REF within ++ X and return it, or return 0 if none found. */ ++ ++ static rtx ++ find_symbolic_term (x) ++ rtx x; ++ { ++ register int i; ++ register enum rtx_code code; ++ register char *fmt; ++ ++ code = GET_CODE (x); ++ if (code == SYMBOL_REF || code == LABEL_REF) ++ return x; ++ if (GET_RTX_CLASS (code) == 'o') ++ return 0; ++ ++ fmt = GET_RTX_FORMAT (code); ++ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) ++ { ++ rtx t; ++ ++ if (fmt[i] == 'e') ++ { ++ t = find_symbolic_term (XEXP (x, i)); ++ if (t != 0) ++ return t; ++ } ++ else if (fmt[i] == 'E') ++ break; ++ } ++ return 0; ++ } ++ ++ static rtx ++ find_base_term (x) ++ rtx x; ++ { ++ switch (GET_CODE (x)) ++ { ++ case REG: ++ return REG_BASE_VALUE (x); ++ ++ case HIGH: ++ return find_base_value (XEXP (x, 0)); ++ ++ case CONST: ++ x = XEXP (x, 0); ++ if (GET_CODE (x) != PLUS && GET_CODE (x) != MINUS) ++ return 0; ++ /* fall through */ ++ case LO_SUM: ++ case PLUS: ++ case MINUS: ++ { ++ rtx tmp = find_base_term (XEXP (x, 0)); ++ if (tmp) ++ return tmp; ++ return find_base_term (XEXP (x, 1)); ++ } ++ ++ case AND: ++ if (GET_CODE (XEXP (x, 0)) == REG && GET_CODE (XEXP (x, 1)) == CONST_INT) ++ return REG_BASE_VALUE (XEXP (x, 0)); ++ return 0; ++ ++ case SYMBOL_REF: ++ case LABEL_REF: ++ return x; ++ ++ default: ++ return 0; ++ } ++ } ++ ++ /* Return 0 if the addresses X and Y are known to point to different ++ objects, 1 if they might be pointers to the same object. */ ++ ++ static int ++ base_alias_check (x, y) ++ rtx x, y; ++ { ++ rtx x_base = find_base_term (x); ++ rtx y_base = find_base_term (y); ++ ++ /* If either base address is unknown or the base addresses are equal, ++ nothing is known about aliasing. */ ++ if (x_base == 0 || y_base == 0 || rtx_equal_p (x_base, y_base)) ++ return 1; ++ ++ /* The base addresses of the read and write are different ++ expressions. If they are both symbols there is no ++ conflict. */ ++ if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS) ++ return 0; ++ ++ /* If one address is a stack reference there can be no alias: ++ stack references using different base registers do not alias, ++ a stack reference can not alias a parameter, and a stack reference ++ can not alias a global. */ ++ if ((GET_CODE (x_base) == ADDRESS && GET_MODE (x_base) == Pmode) ++ || (GET_CODE (y_base) == ADDRESS && GET_MODE (y_base) == Pmode)) ++ return 0; ++ ++ if (! flag_argument_noalias) ++ return 1; ++ ++ if (flag_argument_noalias > 1) ++ return 0; ++ ++ /* Weak noalias assertion (arguments are distinct, but may match globals). */ ++ return ! (GET_MODE (x_base) == VOIDmode && GET_MODE (y_base) == VOIDmode); ++ } ++ ++ /* Return nonzero if X and Y (memory addresses) could reference the ++ same location in memory. C is an offset accumulator. When ++ C is nonzero, we are testing aliases between X and Y + C. ++ XSIZE is the size in bytes of the X reference, ++ similarly YSIZE is the size in bytes for Y. ++ ++ If XSIZE or YSIZE is zero, we do not know the amount of memory being ++ referenced (the reference was BLKmode), so make the most pessimistic ++ assumptions. ++ ++ We recognize the following cases of non-conflicting memory: ++ ++ (1) addresses involving the frame pointer cannot conflict ++ with addresses involving static variables. ++ (2) static variables with different addresses cannot conflict. ++ ++ Nice to notice that varying addresses cannot conflict with fp if no ++ local variables had their addresses taken, but that's too hard now. */ ++ ++ ++ static int ++ memrefs_conflict_p (xsize, x, ysize, y, c) ++ register rtx x, y; ++ int xsize, ysize; ++ HOST_WIDE_INT c; ++ { ++ if (GET_CODE (x) == HIGH) ++ x = XEXP (x, 0); ++ else if (GET_CODE (x) == LO_SUM) ++ x = XEXP (x, 1); ++ else ++ x = canon_rtx (x); ++ if (GET_CODE (y) == HIGH) ++ y = XEXP (y, 0); ++ else if (GET_CODE (y) == LO_SUM) ++ y = XEXP (y, 1); ++ else ++ y = canon_rtx (y); ++ ++ if (rtx_equal_for_memref_p (x, y)) ++ { ++ if (xsize == 0 || ysize == 0) ++ return 1; ++ if (c >= 0 && xsize > c) ++ return 1; ++ if (c < 0 && ysize+c > 0) ++ return 1; ++ return 0; ++ } ++ ++ if (y == frame_pointer_rtx || y == hard_frame_pointer_rtx ++ || y == stack_pointer_rtx) ++ { ++ rtx t = y; ++ int tsize = ysize; ++ y = x; ysize = xsize; ++ x = t; xsize = tsize; ++ } ++ ++ if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx ++ || x == stack_pointer_rtx) ++ { ++ rtx y1; ++ ++ if (CONSTANT_P (y)) ++ return 0; ++ ++ if (GET_CODE (y) == PLUS ++ && canon_rtx (XEXP (y, 0)) == x ++ && (y1 = canon_rtx (XEXP (y, 1))) ++ && GET_CODE (y1) == CONST_INT) ++ { ++ c += INTVAL (y1); ++ return (xsize == 0 || ysize == 0 ++ || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)); ++ } ++ ++ if (GET_CODE (y) == PLUS ++ && (y1 = canon_rtx (XEXP (y, 0))) ++ && CONSTANT_P (y1)) ++ return 0; ++ ++ return 1; ++ } ++ ++ if (GET_CODE (x) == PLUS) ++ { ++ /* The fact that X is canonicalized means that this ++ PLUS rtx is canonicalized. */ ++ rtx x0 = XEXP (x, 0); ++ rtx x1 = XEXP (x, 1); ++ ++ if (GET_CODE (y) == PLUS) ++ { ++ /* The fact that Y is canonicalized means that this ++ PLUS rtx is canonicalized. */ ++ rtx y0 = XEXP (y, 0); ++ rtx y1 = XEXP (y, 1); ++ ++ if (rtx_equal_for_memref_p (x1, y1)) ++ return memrefs_conflict_p (xsize, x0, ysize, y0, c); ++ if (rtx_equal_for_memref_p (x0, y0)) ++ return memrefs_conflict_p (xsize, x1, ysize, y1, c); ++ if (GET_CODE (x1) == CONST_INT) ++ if (GET_CODE (y1) == CONST_INT) ++ return memrefs_conflict_p (xsize, x0, ysize, y0, ++ c - INTVAL (x1) + INTVAL (y1)); ++ else ++ return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1)); ++ else if (GET_CODE (y1) == CONST_INT) ++ return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1)); ++ ++ /* Handle case where we cannot understand iteration operators, ++ but we notice that the base addresses are distinct objects. */ ++ /* ??? Is this still necessary? */ ++ x = find_symbolic_term (x); ++ if (x == 0) ++ return 1; ++ y = find_symbolic_term (y); ++ if (y == 0) ++ return 1; ++ return rtx_equal_for_memref_p (x, y); ++ } ++ else if (GET_CODE (x1) == CONST_INT) ++ return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1)); ++ } ++ else if (GET_CODE (y) == PLUS) ++ { ++ /* The fact that Y is canonicalized means that this ++ PLUS rtx is canonicalized. */ ++ rtx y0 = XEXP (y, 0); ++ rtx y1 = XEXP (y, 1); ++ ++ if (GET_CODE (y1) == CONST_INT) ++ return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1)); ++ else ++ return 1; ++ } ++ ++ if (GET_CODE (x) == GET_CODE (y)) ++ switch (GET_CODE (x)) ++ { ++ case MULT: ++ { ++ /* Handle cases where we expect the second operands to be the ++ same, and check only whether the first operand would conflict ++ or not. */ ++ rtx x0, y0; ++ rtx x1 = canon_rtx (XEXP (x, 1)); ++ rtx y1 = canon_rtx (XEXP (y, 1)); ++ if (! rtx_equal_for_memref_p (x1, y1)) ++ return 1; ++ x0 = canon_rtx (XEXP (x, 0)); ++ y0 = canon_rtx (XEXP (y, 0)); ++ if (rtx_equal_for_memref_p (x0, y0)) ++ return (xsize == 0 || ysize == 0 ++ || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)); ++ ++ /* Can't properly adjust our sizes. */ ++ if (GET_CODE (x1) != CONST_INT) ++ return 1; ++ xsize /= INTVAL (x1); ++ ysize /= INTVAL (x1); ++ c /= INTVAL (x1); ++ return memrefs_conflict_p (xsize, x0, ysize, y0, c); ++ } ++ } ++ ++ /* Treat an access through an AND (e.g. a subword access on an Alpha) ++ as an access with indeterminate size. */ ++ if (GET_CODE (x) == AND && GET_CODE (XEXP (x, 1)) == CONST_INT) ++ return memrefs_conflict_p (0, XEXP (x, 0), ysize, y, c); ++ if (GET_CODE (y) == AND && GET_CODE (XEXP (y, 1)) == CONST_INT) ++ return memrefs_conflict_p (xsize, x, 0, XEXP (y, 0), c); ++ ++ if (CONSTANT_P (x)) ++ { ++ if (GET_CODE (x) == CONST_INT && GET_CODE (y) == CONST_INT) ++ { ++ c += (INTVAL (y) - INTVAL (x)); ++ return (xsize == 0 || ysize == 0 ++ || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)); ++ } ++ ++ if (GET_CODE (x) == CONST) ++ { ++ if (GET_CODE (y) == CONST) ++ return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)), ++ ysize, canon_rtx (XEXP (y, 0)), c); ++ else ++ return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)), ++ ysize, y, c); ++ } ++ if (GET_CODE (y) == CONST) ++ return memrefs_conflict_p (xsize, x, ysize, ++ canon_rtx (XEXP (y, 0)), c); ++ ++ if (CONSTANT_P (y)) ++ return (rtx_equal_for_memref_p (x, y) ++ && (xsize == 0 || ysize == 0 ++ || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0))); ++ ++ return 1; ++ } ++ return 1; ++ } ++ ++ /* Functions to compute memory dependencies. ++ ++ Since we process the insns in execution order, we can build tables ++ to keep track of what registers are fixed (and not aliased), what registers ++ are varying in known ways, and what registers are varying in unknown ++ ways. ++ ++ If both memory references are volatile, then there must always be a ++ dependence between the two references, since their order can not be ++ changed. A volatile and non-volatile reference can be interchanged ++ though. ++ ++ A MEM_IN_STRUCT reference at a non-QImode varying address can never ++ conflict with a non-MEM_IN_STRUCT reference at a fixed address. We must ++ allow QImode aliasing because the ANSI C standard allows character ++ pointers to alias anything. We are assuming that characters are ++ always QImode here. */ ++ ++ /* Read dependence: X is read after read in MEM takes place. There can ++ only be a dependence here if both reads are volatile. */ ++ ++ int ++ read_dependence (mem, x) ++ rtx mem; ++ rtx x; ++ { ++ return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem); ++ } ++ ++ /* True dependence: X is read after store in MEM takes place. */ ++ ++ int ++ true_dependence (mem, mem_mode, x, varies) ++ rtx mem; ++ enum machine_mode mem_mode; ++ rtx x; ++ int (*varies)(); ++ { ++ rtx x_addr, mem_addr; ++ ++ if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem)) ++ return 1; ++ ++ x_addr = XEXP (x, 0); ++ mem_addr = XEXP (mem, 0); ++ ++ if (flag_alias_check && ! base_alias_check (x_addr, mem_addr)) ++ return 0; ++ ++ /* If X is an unchanging read, then it can't possibly conflict with any ++ non-unchanging store. It may conflict with an unchanging write though, ++ because there may be a single store to this address to initialize it. ++ Just fall through to the code below to resolve the case where we have ++ both an unchanging read and an unchanging write. This won't handle all ++ cases optimally, but the possible performance loss should be ++ negligible. */ ++ if (RTX_UNCHANGING_P (x) && ! RTX_UNCHANGING_P (mem)) ++ return 0; ++ ++ x_addr = canon_rtx (x_addr); ++ mem_addr = canon_rtx (mem_addr); ++ if (mem_mode == VOIDmode) ++ mem_mode = GET_MODE (mem); ++ ++ if (! memrefs_conflict_p (mem_mode, mem_addr, SIZE_FOR_MODE (x), x_addr, 0)) ++ return 0; ++ ++ /* If both references are struct references, or both are not, nothing ++ is known about aliasing. ++ ++ If either reference is QImode or BLKmode, ANSI C permits aliasing. ++ ++ If both addresses are constant, or both are not, nothing is known ++ about aliasing. */ ++ if (MEM_IN_STRUCT_P (x) == MEM_IN_STRUCT_P (mem) ++ || mem_mode == QImode || mem_mode == BLKmode ++ || GET_MODE (x) == QImode || GET_MODE (mem) == BLKmode ++ || varies (x_addr) == varies (mem_addr)) ++ return 1; ++ ++ /* One memory reference is to a constant address, one is not. ++ One is to a structure, the other is not. ++ ++ If either memory reference is a variable structure the other is a ++ fixed scalar and there is no aliasing. */ ++ if ((MEM_IN_STRUCT_P (mem) && varies (mem_addr)) ++ || (MEM_IN_STRUCT_P (x) && varies (x))) ++ return 0; ++ ++ return 1; ++ } ++ ++ /* Anti dependence: X is written after read in MEM takes place. */ ++ ++ int ++ anti_dependence (mem, x) ++ rtx mem; ++ rtx x; ++ { ++ if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem)) ++ return 1; ++ ++ if (flag_alias_check && ! base_alias_check (XEXP (x, 0), XEXP (mem, 0))) ++ return 0; ++ ++ /* If MEM is an unchanging read, then it can't possibly conflict with ++ the store to X, because there is at most one store to MEM, and it must ++ have occurred somewhere before MEM. */ ++ x = canon_rtx (x); ++ mem = canon_rtx (mem); ++ if (RTX_UNCHANGING_P (mem)) ++ return 0; ++ ++ return (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0), ++ SIZE_FOR_MODE (x), XEXP (x, 0), 0) ++ && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem) ++ && GET_MODE (mem) != QImode ++ && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x)) ++ && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x) ++ && GET_MODE (x) != QImode ++ && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))); ++ } ++ ++ /* Output dependence: X is written after store in MEM takes place. */ ++ ++ int ++ output_dependence (mem, x) ++ register rtx mem; ++ register rtx x; ++ { ++ if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem)) ++ return 1; ++ ++ if (flag_alias_check && !base_alias_check (XEXP (x, 0), XEXP (mem, 0))) ++ return 0; ++ ++ x = canon_rtx (x); ++ mem = canon_rtx (mem); ++ return (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0), ++ SIZE_FOR_MODE (x), XEXP (x, 0), 0) ++ && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem) ++ && GET_MODE (mem) != QImode ++ && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x)) ++ && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x) ++ && GET_MODE (x) != QImode ++ && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))); ++ } ++ ++ void ++ init_alias_analysis () ++ { ++ int maxreg = max_reg_num (); ++ register int i; ++ register rtx insn; ++ rtx note; ++ rtx set; ++ int changed; ++ ++ reg_known_value_size = maxreg; ++ ++ reg_known_value ++ = (rtx *) oballoc ((maxreg - FIRST_PSEUDO_REGISTER) * sizeof (rtx)) ++ - FIRST_PSEUDO_REGISTER; ++ reg_known_equiv_p = ++ oballoc (maxreg - FIRST_PSEUDO_REGISTER) - FIRST_PSEUDO_REGISTER; ++ bzero ((char *) (reg_known_value + FIRST_PSEUDO_REGISTER), ++ (maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx)); ++ bzero (reg_known_equiv_p + FIRST_PSEUDO_REGISTER, ++ (maxreg - FIRST_PSEUDO_REGISTER) * sizeof (char)); ++ ++ if (flag_alias_check) ++ { ++ /* Overallocate reg_base_value to allow some growth during loop ++ optimization. Loop unrolling can create a large number of ++ registers. */ ++ reg_base_value_size = maxreg * 2; ++ reg_base_value = (rtx *)oballoc (reg_base_value_size * sizeof (rtx)); ++ reg_seen = (char *)alloca (reg_base_value_size); ++ bzero (reg_base_value, reg_base_value_size * sizeof (rtx)); ++ bzero (reg_seen, reg_base_value_size); ++ ++ /* Mark all hard registers which may contain an address. ++ The stack, frame and argument pointers may contain an address. ++ An argument register which can hold a Pmode value may contain ++ an address even if it is not in BASE_REGS. ++ ++ The address expression is VOIDmode for an argument and ++ Pmode for other registers. */ ++ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) ++ if (FUNCTION_ARG_REGNO_P (i) && HARD_REGNO_MODE_OK (i, Pmode)) ++ reg_base_value[i] = gen_rtx (ADDRESS, VOIDmode, ++ gen_rtx (REG, Pmode, i)); ++ ++ reg_base_value[STACK_POINTER_REGNUM] ++ = gen_rtx (ADDRESS, Pmode, stack_pointer_rtx); ++ reg_base_value[ARG_POINTER_REGNUM] ++ = gen_rtx (ADDRESS, Pmode, arg_pointer_rtx); ++ reg_base_value[FRAME_POINTER_REGNUM] ++ = gen_rtx (ADDRESS, Pmode, frame_pointer_rtx); ++ reg_base_value[HARD_FRAME_POINTER_REGNUM] ++ = gen_rtx (ADDRESS, Pmode, hard_frame_pointer_rtx); ++ } ++ ++ copying_arguments = 1; ++ /* Fill in the entries with known constant values. */ ++ for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) ++ { ++ if (flag_alias_check && GET_RTX_CLASS (GET_CODE (insn)) == 'i') ++ { ++ /* If this insn has a noalias note, process it, Otherwise, ++ scan for sets. A simple set will have no side effects ++ which could change the base value of any other register. */ ++ rtx noalias_note; ++ if (GET_CODE (PATTERN (insn)) == SET ++ && (noalias_note = find_reg_note (insn, REG_NOALIAS, NULL_RTX))) ++ record_set(SET_DEST (PATTERN (insn)), 0); ++ else ++ note_stores (PATTERN (insn), record_set); ++ } ++ else if (GET_CODE (insn) == NOTE ++ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_BEG) ++ copying_arguments = 0; ++ ++ if ((set = single_set (insn)) != 0 ++ && GET_CODE (SET_DEST (set)) == REG ++ && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER ++ && (((note = find_reg_note (insn, REG_EQUAL, 0)) != 0 ++ && reg_n_sets[REGNO (SET_DEST (set))] == 1) ++ || (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != 0) ++ && GET_CODE (XEXP (note, 0)) != EXPR_LIST) ++ { ++ int regno = REGNO (SET_DEST (set)); ++ reg_known_value[regno] = XEXP (note, 0); ++ reg_known_equiv_p[regno] = REG_NOTE_KIND (note) == REG_EQUIV; ++ } ++ } ++ ++ /* Fill in the remaining entries. */ ++ for (i = FIRST_PSEUDO_REGISTER; i < maxreg; i++) ++ if (reg_known_value[i] == 0) ++ reg_known_value[i] = regno_reg_rtx[i]; ++ ++ if (! flag_alias_check) ++ return; ++ ++ /* Simplify the reg_base_value array so that no register refers to ++ another register, except to special registers indirectly through ++ ADDRESS expressions. ++ ++ In theory this loop can take as long as O(registers^2), but unless ++ there are very long dependency chains it will run in close to linear ++ time. */ ++ do ++ { ++ changed = 0; ++ for (i = FIRST_PSEUDO_REGISTER; i < reg_base_value_size; i++) ++ { ++ rtx base = reg_base_value[i]; ++ if (base && GET_CODE (base) == REG) ++ { ++ int base_regno = REGNO (base); ++ if (base_regno == i) /* register set from itself */ ++ reg_base_value[i] = 0; ++ else ++ reg_base_value[i] = reg_base_value[base_regno]; ++ changed = 1; ++ } ++ } ++ } ++ while (changed); ++ ++ reg_seen = 0; ++ } ++ ++ void ++ end_alias_analysis () ++ { ++ reg_known_value = 0; ++ reg_base_value = 0; ++ reg_base_value_size = 0; ++ } +diff -rcp2N gcc-2.7.2.2/calls.c gcc-2.7.2.2.f.2/calls.c +*** gcc-2.7.2.2/calls.c Thu Oct 26 21:53:43 1995 +--- gcc-2.7.2.2.f.2/calls.c Fri Jan 10 23:18:21 1997 +*************** expand_call (exp, target, ignore) +*** 564,567 **** +--- 564,569 ---- + /* Nonzero if it is plausible that this is a call to alloca. */ + int may_be_alloca; ++ /* Nonzero if this is a call to malloc or a related function. */ ++ int is_malloc; + /* Nonzero if this is a call to setjmp or a related function. */ + int returns_twice; +*************** expand_call (exp, target, ignore) +*** 852,855 **** +--- 854,858 ---- + returns_twice = 0; + is_longjmp = 0; ++ is_malloc = 0; + + if (name != 0 && IDENTIFIER_LENGTH (DECL_NAME (fndecl)) <= 15) +*************** expand_call (exp, target, ignore) +*** 891,894 **** +--- 894,901 ---- + && ! strcmp (tname, "longjmp")) + is_longjmp = 1; ++ /* Only recognize malloc when alias analysis is enabled. */ ++ else if (tname[0] == 'm' && flag_alias_check ++ && ! strcmp(tname, "malloc")) ++ is_malloc = 1; + } + +*************** expand_call (exp, target, ignore) +*** 1363,1367 **** + /* Now we are about to start emitting insns that can be deleted + if a libcall is deleted. */ +! if (is_const) + start_sequence (); + +--- 1370,1374 ---- + /* Now we are about to start emitting insns that can be deleted + if a libcall is deleted. */ +! if (is_const || is_malloc) + start_sequence (); + +*************** expand_call (exp, target, ignore) +*** 1951,1954 **** +--- 1958,1975 ---- + end_sequence (); + emit_insns (insns); ++ } ++ else if (is_malloc) ++ { ++ rtx temp = gen_reg_rtx (GET_MODE (valreg)); ++ rtx last, insns; ++ ++ emit_move_insn (temp, valreg); ++ last = get_last_insn (); ++ REG_NOTES (last) = ++ gen_rtx (EXPR_LIST, REG_NOALIAS, temp, REG_NOTES (last)); ++ insns = get_insns (); ++ end_sequence (); ++ emit_insns (insns); ++ valreg = temp; + } + +diff -rcp2N gcc-2.7.2.2/combine.c gcc-2.7.2.2.f.2/combine.c +*** gcc-2.7.2.2/combine.c Sun Nov 26 14:32:07 1995 +--- gcc-2.7.2.2.f.2/combine.c Fri Jan 10 23:18:21 1997 +*************** distribute_notes (notes, from_insn, i3, +*** 10648,10651 **** +--- 10648,10652 ---- + case REG_EQUIV: + case REG_NONNEG: ++ case REG_NOALIAS: + /* These notes say something about results of an insn. We can + only support them if they used to be on I3 in which case they +diff -rcp2N gcc-2.7.2.2/config/alpha/alpha.c gcc-2.7.2.2.f.2/config/alpha/alpha.c +*** gcc-2.7.2.2/config/alpha/alpha.c Thu Feb 20 19:24:11 1997 +--- gcc-2.7.2.2.f.2/config/alpha/alpha.c Sun Feb 23 15:35:33 1997 +*************** output_prolog (file, size) +*** 1370,1373 **** +--- 1370,1378 ---- + + alpha_function_needs_gp = 0; ++ #ifdef __linux__ ++ if(profile_flag) { ++ alpha_function_needs_gp = 1; ++ } ++ #endif + for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) + if ((GET_CODE (insn) == CALL_INSN) +diff -rcp2N gcc-2.7.2.2/config/alpha/alpha.h gcc-2.7.2.2.f.2/config/alpha/alpha.h +*** gcc-2.7.2.2/config/alpha/alpha.h Thu Feb 20 19:24:12 1997 +--- gcc-2.7.2.2.f.2/config/alpha/alpha.h Sun Feb 23 15:35:34 1997 +*************** extern int target_flags; +*** 112,116 **** +--- 112,118 ---- + {"", TARGET_DEFAULT | TARGET_CPU_DEFAULT} } + ++ #ifndef TARGET_DEFAULT + #define TARGET_DEFAULT 3 ++ #endif + + #ifndef TARGET_CPU_DEFAULT +diff -rcp2N gcc-2.7.2.2/config/alpha/alpha.md gcc-2.7.2.2.f.2/config/alpha/alpha.md +*** gcc-2.7.2.2/config/alpha/alpha.md Fri Oct 27 06:49:59 1995 +--- gcc-2.7.2.2.f.2/config/alpha/alpha.md Mon Dec 23 00:43:55 1996 +*************** +*** 1746,1752 **** + (if_then_else:DF + (match_operator 3 "signed_comparison_operator" +! [(match_operand:DF 1 "reg_or_fp0_operand" "fG,fG") + (match_operand:DF 2 "fp0_operand" "G,G")]) +! (float_extend:DF (match_operand:SF 4 "reg_or_fp0_operand" "fG,0")) + (match_operand:DF 5 "reg_or_fp0_operand" "0,fG")))] + "TARGET_FP" +--- 1746,1752 ---- + (if_then_else:DF + (match_operator 3 "signed_comparison_operator" +! [(match_operand:DF 4 "reg_or_fp0_operand" "fG,fG") + (match_operand:DF 2 "fp0_operand" "G,G")]) +! (float_extend:DF (match_operand:SF 1 "reg_or_fp0_operand" "fG,0")) + (match_operand:DF 5 "reg_or_fp0_operand" "0,fG")))] + "TARGET_FP" +diff -rcp2N gcc-2.7.2.2/config/alpha/linux.h gcc-2.7.2.2.f.2/config/alpha/linux.h +*** gcc-2.7.2.2/config/alpha/linux.h Wed Dec 31 19:00:00 1969 +--- gcc-2.7.2.2.f.2/config/alpha/linux.h Thu Dec 19 12:31:08 1996 +*************** +*** 0 **** +--- 1,72 ---- ++ /* Definitions of target machine for GNU compiler, for Alpha Linux, ++ using ECOFF. ++ Copyright (C) 1995 Free Software Foundation, Inc. ++ Contributed by Bob Manson. ++ Derived from work contributed by Cygnus Support, ++ (c) 1993 Free Software Foundation. ++ ++ This file is part of GNU CC. ++ ++ GNU CC is free software; you can redistribute it and/or modify ++ it under the terms of the GNU General Public License as published by ++ the Free Software Foundation; either version 2, or (at your option) ++ any later version. ++ ++ GNU CC is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ GNU General Public License for more details. ++ ++ You should have received a copy of the GNU General Public License ++ along with GNU CC; see the file COPYING. If not, write to ++ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ ++ ++ #define TARGET_DEFAULT (3 | MASK_GAS) ++ ++ #include "alpha/alpha.h" ++ ++ #undef TARGET_VERSION ++ #define TARGET_VERSION fprintf (stderr, " (Linux/Alpha)"); ++ ++ #undef CPP_PREDEFINES ++ #define CPP_PREDEFINES "\ ++ -D__alpha -D__alpha__ -D__linux__ -D__linux -D_LONGLONG -Dlinux -Dunix \ ++ -Asystem(linux) -Acpu(alpha) -Amachine(alpha)" ++ ++ /* We don't actually need any of these; the MD_ vars are ignored ++ anyway for cross-compilers, and the other specs won't get picked up ++ 'coz the user is supposed to do ld -r (hmm, perhaps that should be ++ the default). In any case, setting them thus will catch some ++ common user errors. */ ++ ++ #undef MD_EXEC_PREFIX ++ #undef MD_STARTFILE_PREFIX ++ ++ #undef LIB_SPEC ++ #define LIB_SPEC "%{pg:-lgmon} %{pg:-lc_p} %{!pg:-lc}" ++ ++ #undef LINK_SPEC ++ #define LINK_SPEC \ ++ "-G 8 %{O*:-O3} %{!O*:-O1}" ++ ++ #undef ASM_SPEC ++ #define ASM_SPEC "-nocpp" ++ ++ /* Can't do stabs */ ++ #undef SDB_DEBUGGING_INFO ++ ++ /* Prefer dbx. */ ++ #undef PREFERRED_DEBUGGING_TYPE ++ #define PREFERRED_DEBUGGING_TYPE DBX_DEBUG ++ ++ #undef FUNCTION_PROFILER ++ ++ #define FUNCTION_PROFILER(FILE, LABELNO) \ ++ do { \ ++ fputs ("\tlda $27,_mcount\n", (FILE)); \ ++ fputs ("\tjsr $26,($27),_mcount\n", (FILE)); \ ++ fputs ("\tldgp $29,0($26)\n", (FILE)); \ ++ } while (0); ++ ++ /* Generate calls to memcpy, etc., not bcopy, etc. */ ++ #define TARGET_MEM_FUNCTIONS +diff -rcp2N gcc-2.7.2.2/config/alpha/t-linux gcc-2.7.2.2.f.2/config/alpha/t-linux +*** gcc-2.7.2.2/config/alpha/t-linux Wed Dec 31 19:00:00 1969 +--- gcc-2.7.2.2.f.2/config/alpha/t-linux Thu Dec 19 12:31:08 1996 +*************** +*** 0 **** +--- 1,3 ---- ++ # Our header files are supposed to be correct, nein? ++ FIXINCLUDES = ++ STMP_FIXPROTO = +diff -rcp2N gcc-2.7.2.2/config/alpha/x-linux gcc-2.7.2.2.f.2/config/alpha/x-linux +*** gcc-2.7.2.2/config/alpha/x-linux Wed Dec 31 19:00:00 1969 +--- gcc-2.7.2.2.f.2/config/alpha/x-linux Thu Dec 19 12:31:08 1996 +*************** +*** 0 **** +--- 1 ---- ++ CLIB=-lbfd -liberty +diff -rcp2N gcc-2.7.2.2/config/alpha/xm-alpha.h gcc-2.7.2.2.f.2/config/alpha/xm-alpha.h +*** gcc-2.7.2.2/config/alpha/xm-alpha.h Thu Aug 31 17:52:27 1995 +--- gcc-2.7.2.2.f.2/config/alpha/xm-alpha.h Thu Dec 19 12:31:08 1996 +*************** Boston, MA 02111-1307, USA. */ +*** 46,51 **** +--- 46,53 ---- + #include <alloca.h> + #else ++ #ifndef __alpha__ + extern void *alloca (); + #endif ++ #endif + + /* The host compiler has problems with enum bitfields since it makes +*************** extern void *malloc (), *realloc (), *ca +*** 68,72 **** +--- 70,76 ---- + /* OSF/1 has vprintf. */ + ++ #ifndef linux /* 1996/02/22 mauro@craftwork.com -- unreliable with Linux */ + #define HAVE_VPRINTF ++ #endif + + /* OSF/1 has putenv. */ +diff -rcp2N gcc-2.7.2.2/config/alpha/xm-linux.h gcc-2.7.2.2.f.2/config/alpha/xm-linux.h +*** gcc-2.7.2.2/config/alpha/xm-linux.h Wed Dec 31 19:00:00 1969 +--- gcc-2.7.2.2.f.2/config/alpha/xm-linux.h Thu Dec 19 12:31:08 1996 +*************** +*** 0 **** +--- 1,8 ---- ++ #ifndef _XM_LINUX_H ++ #define _XM_LINUX_H ++ ++ #include "xm-alpha.h" ++ ++ #define DONT_DECLARE_SYS_SIGLIST ++ #define USE_BFD ++ #endif +diff -rcp2N gcc-2.7.2.2/config/x-linux gcc-2.7.2.2.f.2/config/x-linux +*** gcc-2.7.2.2/config/x-linux Tue Mar 28 07:43:37 1995 +--- gcc-2.7.2.2.f.2/config/x-linux Thu Dec 19 12:31:08 1996 +*************** BOOT_CFLAGS = -O $(CFLAGS) -Iinclude +*** 13,14 **** +--- 13,17 ---- + # Don't run fixproto + STMP_FIXPROTO = ++ ++ # Don't install "assert.h" in gcc. We use the one in glibc. ++ INSTALL_ASSERT_H = +diff -rcp2N gcc-2.7.2.2/config/x-linux-aout gcc-2.7.2.2.f.2/config/x-linux-aout +*** gcc-2.7.2.2/config/x-linux-aout Wed Dec 31 19:00:00 1969 +--- gcc-2.7.2.2.f.2/config/x-linux-aout Thu Dec 19 12:31:08 1996 +*************** +*** 0 **** +--- 1,14 ---- ++ # It is defined in config/xm-linux.h. ++ # X_CFLAGS = -DPOSIX ++ ++ # The following is needed when compiling stages 2 and 3 because gcc's ++ # limits.h must be picked up before /usr/include/limits.h. This is because ++ # each does an #include_next of the other if the other hasn't been included. ++ # /usr/include/limits.h loses if it gets found first because /usr/include is ++ # at the end of the search order. When a new version of gcc is released, ++ # gcc's limits.h hasn't been installed yet and hence isn't found. ++ ++ BOOT_CFLAGS = -O $(CFLAGS) -Iinclude ++ ++ # Don't run fixproto ++ STMP_FIXPROTO = +diff -rcp2N gcc-2.7.2.2/configure gcc-2.7.2.2.f.2/configure +*** gcc-2.7.2.2/configure Thu Feb 20 19:24:33 1997 +--- gcc-2.7.2.2.f.2/configure Sun Feb 23 16:15:12 1997 +*************** exec_prefix='$(prefix)' +*** 82,85 **** +--- 82,86 ---- + # The default g++ include directory is $(libdir)/g++-include. + gxx_include_dir='$(libdir)/g++-include' ++ #gxx_include_dir='$(exec_prefix)/include/g++' + + # Default --program-transform-name to nothing. +*************** for machine in $canon_build $canon_host +*** 548,551 **** +--- 549,559 ---- + use_collect2=yes + ;; ++ alpha-*-linux*) ++ tm_file=alpha/linux.h ++ tmake_file=alpha/t-linux ++ xmake_file=alpha/x-linux ++ fixincludes=Makefile.in ++ xm_file=alpha/xm-linux.h ++ ;; + alpha-dec-osf[23456789]*) + tm_file=alpha/osf2.h +*************** for machine in $canon_build $canon_host +*** 985,989 **** + cpu_type=i386 # with a.out format using pre BFD linkers + xm_file=i386/xm-linux.h +! xmake_file=x-linux + tm_file=i386/linux-oldld.h + fixincludes=Makefile.in # The headers are ok already. +--- 993,997 ---- + cpu_type=i386 # with a.out format using pre BFD linkers + xm_file=i386/xm-linux.h +! xmake_file=x-linux-aout + tm_file=i386/linux-oldld.h + fixincludes=Makefile.in # The headers are ok already. +*************** for machine in $canon_build $canon_host +*** 994,998 **** + cpu_type=i386 # with a.out format + xm_file=i386/xm-linux.h +! xmake_file=x-linux + tm_file=i386/linux-aout.h + fixincludes=Makefile.in # The headers are ok already. +--- 1002,1006 ---- + cpu_type=i386 # with a.out format + xm_file=i386/xm-linux.h +! xmake_file=x-linux-aout + tm_file=i386/linux-aout.h + fixincludes=Makefile.in # The headers are ok already. +*************** for machine in $canon_build $canon_host +*** 1003,1007 **** + cpu_type=i386 # with ELF format, using GNU libc v1. + xm_file=i386/xm-linux.h +! xmake_file=x-linux + tmake_file=t-linux-libc1 + tm_file=i386/linux.h +--- 1011,1015 ---- + cpu_type=i386 # with ELF format, using GNU libc v1. + xm_file=i386/xm-linux.h +! xmake_file=x-linux-aout + tmake_file=t-linux-libc1 + tm_file=i386/linux.h +diff -rcp2N gcc-2.7.2.2/cse.c gcc-2.7.2.2.f.2/cse.c +*** gcc-2.7.2.2/cse.c Sun Nov 26 14:47:05 1995 +--- gcc-2.7.2.2.f.2/cse.c Fri Jan 10 23:18:22 1997 +*************** static struct table_elt *last_jump_equiv +*** 520,544 **** + static int constant_pool_entries_cost; + +- /* Bits describing what kind of values in memory must be invalidated +- for a particular instruction. If all three bits are zero, +- no memory refs need to be invalidated. Each bit is more powerful +- than the preceding ones, and if a bit is set then the preceding +- bits are also set. +- +- Here is how the bits are set: +- Pushing onto the stack invalidates only the stack pointer, +- writing at a fixed address invalidates only variable addresses, +- writing in a structure element at variable address +- invalidates all but scalar variables, +- and writing in anything else at variable address invalidates everything. */ +- +- struct write_data +- { +- int sp : 1; /* Invalidate stack pointer. */ +- int var : 1; /* Invalidate variable addresses. */ +- int nonscalar : 1; /* Invalidate all but scalar variables. */ +- int all : 1; /* Invalidate all memory refs. */ +- }; +- + /* Define maximum length of a branch path. */ + +--- 520,523 ---- +*************** static void merge_equiv_classes PROTO((s +*** 626,632 **** + struct table_elt *)); + static void invalidate PROTO((rtx, enum machine_mode)); + static void remove_invalid_refs PROTO((int)); + static void rehash_using_reg PROTO((rtx)); +! static void invalidate_memory PROTO((struct write_data *)); + static void invalidate_for_call PROTO((void)); + static rtx use_related_value PROTO((rtx, struct table_elt *)); +--- 605,612 ---- + struct table_elt *)); + static void invalidate PROTO((rtx, enum machine_mode)); ++ static int cse_rtx_varies_p PROTO((rtx)); + static void remove_invalid_refs PROTO((int)); + static void rehash_using_reg PROTO((rtx)); +! static void invalidate_memory PROTO((void)); + static void invalidate_for_call PROTO((void)); + static rtx use_related_value PROTO((rtx, struct table_elt *)); +*************** static void set_nonvarying_address_compo +*** 638,644 **** + HOST_WIDE_INT *)); + static int refers_to_p PROTO((rtx, rtx)); +- static int refers_to_mem_p PROTO((rtx, rtx, HOST_WIDE_INT, +- HOST_WIDE_INT)); +- static int cse_rtx_addr_varies_p PROTO((rtx)); + static rtx canon_reg PROTO((rtx, rtx)); + static void find_best_addr PROTO((rtx, rtx *)); +--- 618,621 ---- +*************** static void record_jump_cond PROTO((enum +*** 656,661 **** + rtx, rtx, int)); + static void cse_insn PROTO((rtx, int)); +! static void note_mem_written PROTO((rtx, struct write_data *)); +! static void invalidate_from_clobbers PROTO((struct write_data *, rtx)); + static rtx cse_process_notes PROTO((rtx, rtx)); + static void cse_around_loop PROTO((rtx)); +--- 633,638 ---- + rtx, rtx, int)); + static void cse_insn PROTO((rtx, int)); +! static int note_mem_written PROTO((rtx)); +! static void invalidate_from_clobbers PROTO((rtx)); + static rtx cse_process_notes PROTO((rtx, rtx)); + static void cse_around_loop PROTO((rtx)); +*************** invalidate (x, full_mode) +*** 1512,1517 **** + register int i; + register struct table_elt *p; +- rtx base; +- HOST_WIDE_INT start, end; + + /* If X is a register, dependencies on its contents +--- 1489,1492 ---- +*************** invalidate (x, full_mode) +*** 1605,1611 **** + full_mode = GET_MODE (x); + +- set_nonvarying_address_components (XEXP (x, 0), GET_MODE_SIZE (full_mode), +- &base, &start, &end); +- + for (i = 0; i < NBUCKETS; i++) + { +--- 1580,1583 ---- +*************** invalidate (x, full_mode) +*** 1614,1618 **** + { + next = p->next_same_hash; +! if (refers_to_mem_p (p->exp, base, start, end)) + remove_from_table (p, i); + } +--- 1586,1594 ---- + { + next = p->next_same_hash; +! /* Invalidate ASM_OPERANDS which reference memory (this is easier +! than checking all the aliases). */ +! if (p->in_memory +! && (GET_CODE (p->exp) != MEM +! || true_dependence (x, full_mode, p->exp, cse_rtx_varies_p))) + remove_from_table (p, i); + } +*************** rehash_using_reg (x) +*** 1695,1722 **** + } + +- /* Remove from the hash table all expressions that reference memory, +- or some of them as specified by *WRITES. */ +- +- static void +- invalidate_memory (writes) +- struct write_data *writes; +- { +- register int i; +- register struct table_elt *p, *next; +- int all = writes->all; +- int nonscalar = writes->nonscalar; +- +- for (i = 0; i < NBUCKETS; i++) +- for (p = table[i]; p; p = next) +- { +- next = p->next_same_hash; +- if (p->in_memory +- && (all +- || (nonscalar && p->in_struct) +- || cse_rtx_addr_varies_p (p->exp))) +- remove_from_table (p, i); +- } +- } +- + /* Remove from the hash table any expression that is a call-clobbered + register. Also update their TICK values. */ +--- 1671,1674 ---- +*************** invalidate_for_call () +*** 1756,1759 **** +--- 1708,1717 ---- + next = p->next_same_hash; + ++ if (p->in_memory) ++ { ++ remove_from_table (p, hash); ++ continue; ++ } ++ + if (GET_CODE (p->exp) != REG + || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER) +*************** set_nonvarying_address_components (addr, +*** 2395,2477 **** + } + +! /* Return 1 iff any subexpression of X refers to memory +! at an address of BASE plus some offset +! such that any of the bytes' offsets fall between START (inclusive) +! and END (exclusive). +! +! The value is undefined if X is a varying address (as determined by +! cse_rtx_addr_varies_p). This function is not used in such cases. +! +! When used in the cse pass, `qty_const' is nonzero, and it is used +! to treat an address that is a register with a known constant value +! as if it were that constant value. +! In the loop pass, `qty_const' is zero, so this is not done. */ +! +! static int +! refers_to_mem_p (x, base, start, end) +! rtx x, base; +! HOST_WIDE_INT start, end; +! { +! register HOST_WIDE_INT i; +! register enum rtx_code code; +! register char *fmt; +! +! repeat: +! if (x == 0) +! return 0; +! +! code = GET_CODE (x); +! if (code == MEM) +! { +! register rtx addr = XEXP (x, 0); /* Get the address. */ +! rtx mybase; +! HOST_WIDE_INT mystart, myend; +! +! set_nonvarying_address_components (addr, GET_MODE_SIZE (GET_MODE (x)), +! &mybase, &mystart, &myend); +! +! +! /* refers_to_mem_p is never called with varying addresses. +! If the base addresses are not equal, there is no chance +! of the memory addresses conflicting. */ +! if (! rtx_equal_p (mybase, base)) +! return 0; +! +! return myend > start && mystart < end; +! } +! +! /* X does not match, so try its subexpressions. */ +! +! fmt = GET_RTX_FORMAT (code); +! for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) +! if (fmt[i] == 'e') +! { +! if (i == 0) +! { +! x = XEXP (x, 0); +! goto repeat; +! } +! else +! if (refers_to_mem_p (XEXP (x, i), base, start, end)) +! return 1; +! } +! else if (fmt[i] == 'E') +! { +! int j; +! for (j = 0; j < XVECLEN (x, i); j++) +! if (refers_to_mem_p (XVECEXP (x, i, j), base, start, end)) +! return 1; +! } +! +! return 0; +! } +! +! /* Nonzero if X refers to memory at a varying address; + except that a register which has at the moment a known constant value + isn't considered variable. */ + + static int +! cse_rtx_addr_varies_p (x) +! rtx x; + { + /* We need not check for X and the equivalence class being of the same +--- 2353,2363 ---- + } + +! /* Nonzero if X, a memory address, refers to a varying address; + except that a register which has at the moment a known constant value + isn't considered variable. */ + + static int +! cse_rtx_varies_p (x) +! register rtx x; + { + /* We need not check for X and the equivalence class being of the same +*************** cse_rtx_addr_varies_p (x) +*** 2479,2497 **** + doesn't vary in any mode. */ + +! if (GET_CODE (x) == MEM +! && GET_CODE (XEXP (x, 0)) == REG +! && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0))) +! && GET_MODE (XEXP (x, 0)) == qty_mode[reg_qty[REGNO (XEXP (x, 0))]] +! && qty_const[reg_qty[REGNO (XEXP (x, 0))]] != 0) + return 0; + +! if (GET_CODE (x) == MEM +! && GET_CODE (XEXP (x, 0)) == PLUS +! && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT +! && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG +! && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 0))) +! && (GET_MODE (XEXP (XEXP (x, 0), 0)) +! == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]]) +! && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]]) + return 0; + +--- 2365,2381 ---- + doesn't vary in any mode. */ + +! if (GET_CODE (x) == REG +! && REGNO_QTY_VALID_P (REGNO (x)) +! && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]] +! && qty_const[reg_qty[REGNO (x)]] != 0) + return 0; + +! if (GET_CODE (x) == PLUS +! && GET_CODE (XEXP (x, 1)) == CONST_INT +! && GET_CODE (XEXP (x, 0)) == REG +! && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0))) +! && (GET_MODE (XEXP (x, 0)) +! == qty_mode[reg_qty[REGNO (XEXP (x, 0))]]) +! && qty_const[reg_qty[REGNO (XEXP (x, 0))]]) + return 0; + +*************** cse_rtx_addr_varies_p (x) +*** 2501,2519 **** + load fp minus a constant into a register, then a MEM which is the + sum of the two `constant' registers. */ +! if (GET_CODE (x) == MEM +! && GET_CODE (XEXP (x, 0)) == PLUS +! && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG +! && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG +! && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 0))) +! && (GET_MODE (XEXP (XEXP (x, 0), 0)) +! == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]]) +! && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]] +! && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 1))) +! && (GET_MODE (XEXP (XEXP (x, 0), 1)) +! == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 1))]]) +! && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 1))]]) + return 0; + +! return rtx_addr_varies_p (x); + } + +--- 2385,2402 ---- + load fp minus a constant into a register, then a MEM which is the + sum of the two `constant' registers. */ +! if (GET_CODE (x) == PLUS +! && GET_CODE (XEXP (x, 0)) == REG +! && GET_CODE (XEXP (x, 1)) == REG +! && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0))) +! && (GET_MODE (XEXP (x, 0)) +! == qty_mode[reg_qty[REGNO (XEXP (x, 0))]]) +! && qty_const[reg_qty[REGNO (XEXP (x, 0))]] +! && REGNO_QTY_VALID_P (REGNO (XEXP (x, 1))) +! && (GET_MODE (XEXP (x, 1)) +! == qty_mode[reg_qty[REGNO (XEXP (x, 1))]]) +! && qty_const[reg_qty[REGNO (XEXP (x, 1))]]) + return 0; + +! return rtx_varies_p (x); + } + +*************** cse_insn (insn, in_libcall_block) +*** 6105,6110 **** + rtx this_insn_cc0 = 0; + enum machine_mode this_insn_cc0_mode; +- struct write_data writes_memory; +- static struct write_data init = {0, 0, 0, 0}; + + rtx src_eqv = 0; +--- 5988,5991 ---- +*************** cse_insn (insn, in_libcall_block) +*** 6118,6122 **** + + this_insn = insn; +- writes_memory = init; + + /* Find all the SETs and CLOBBERs in this instruction. +--- 5999,6002 ---- +*************** cse_insn (insn, in_libcall_block) +*** 6220,6225 **** + else if (GET_CODE (y) == CLOBBER) + { +! /* If we clobber memory, take note of that, +! and canon the address. + This does nothing when a register is clobbered + because we have already invalidated the reg. */ +--- 6100,6104 ---- + else if (GET_CODE (y) == CLOBBER) + { +! /* If we clobber memory, canon the address. + This does nothing when a register is clobbered + because we have already invalidated the reg. */ +*************** cse_insn (insn, in_libcall_block) +*** 6227,6231 **** + { + canon_reg (XEXP (y, 0), NULL_RTX); +! note_mem_written (XEXP (y, 0), &writes_memory); + } + } +--- 6106,6110 ---- + { + canon_reg (XEXP (y, 0), NULL_RTX); +! note_mem_written (XEXP (y, 0)); + } + } +*************** cse_insn (insn, in_libcall_block) +*** 6249,6253 **** + { + canon_reg (XEXP (x, 0), NULL_RTX); +! note_mem_written (XEXP (x, 0), &writes_memory); + } + } +--- 6128,6132 ---- + { + canon_reg (XEXP (x, 0), NULL_RTX); +! note_mem_written (XEXP (x, 0)); + } + } +*************** cse_insn (insn, in_libcall_block) +*** 6674,6678 **** + } + #endif /* LOAD_EXTEND_OP */ +! + if (src == src_folded) + src_folded = 0; +--- 6553,6557 ---- + } + #endif /* LOAD_EXTEND_OP */ +! + if (src == src_folded) + src_folded = 0; +*************** cse_insn (insn, in_libcall_block) +*** 6860,6864 **** + || (GET_CODE (src_folded) != MEM + && ! src_folded_force_flag)) +! && GET_MODE_CLASS (mode) != MODE_CC) + { + src_folded_force_flag = 1; +--- 6739,6744 ---- + || (GET_CODE (src_folded) != MEM + && ! src_folded_force_flag)) +! && GET_MODE_CLASS (mode) != MODE_CC +! && mode != VOIDmode) + { + src_folded_force_flag = 1; +*************** cse_insn (insn, in_libcall_block) +*** 6984,6993 **** + { + dest = fold_rtx (dest, insn); +! +! /* Decide whether we invalidate everything in memory, +! or just things at non-fixed places. +! Writing a large aggregate must invalidate everything +! because we don't know how long it is. */ +! note_mem_written (dest, &writes_memory); + } + +--- 6864,6868 ---- + { + dest = fold_rtx (dest, insn); +! note_mem_written (dest); + } + +*************** cse_insn (insn, in_libcall_block) +*** 7234,7238 **** + sets[i].src_elt = src_eqv_elt; + +! invalidate_from_clobbers (&writes_memory, x); + + /* Some registers are invalidated by subroutine calls. Memory is +--- 7109,7113 ---- + sets[i].src_elt = src_eqv_elt; + +! invalidate_from_clobbers (x); + + /* Some registers are invalidated by subroutine calls. Memory is +*************** cse_insn (insn, in_libcall_block) +*** 7241,7248 **** + if (GET_CODE (insn) == CALL_INSN) + { +- static struct write_data everything = {0, 1, 1, 1}; +- + if (! CONST_CALL_P (insn)) +! invalidate_memory (&everything); + invalidate_for_call (); + } +--- 7116,7121 ---- + if (GET_CODE (insn) == CALL_INSN) + { + if (! CONST_CALL_P (insn)) +! invalidate_memory (); + invalidate_for_call (); + } +*************** cse_insn (insn, in_libcall_block) +*** 7265,7270 **** + we have just done an invalidate_memory that covers even those. */ + if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG +! || (GET_CODE (dest) == MEM && ! writes_memory.all +! && ! cse_rtx_addr_varies_p (dest))) + invalidate (dest, VOIDmode); + else if (GET_CODE (dest) == STRICT_LOW_PART +--- 7138,7142 ---- + we have just done an invalidate_memory that covers even those. */ + if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG +! || GET_CODE (dest) == MEM) + invalidate (dest, VOIDmode); + else if (GET_CODE (dest) == STRICT_LOW_PART +*************** cse_insn (insn, in_libcall_block) +*** 7532,7580 **** + } + +- /* Store 1 in *WRITES_PTR for those categories of memory ref +- that must be invalidated when the expression WRITTEN is stored in. +- If WRITTEN is null, say everything must be invalidated. */ +- + static void +! note_mem_written (written, writes_ptr) +! rtx written; +! struct write_data *writes_ptr; +! { +! static struct write_data everything = {0, 1, 1, 1}; +! +! if (written == 0) +! *writes_ptr = everything; +! else if (GET_CODE (written) == MEM) +! { +! /* Pushing or popping the stack invalidates just the stack pointer. */ +! rtx addr = XEXP (written, 0); +! if ((GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC +! || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC) +! && GET_CODE (XEXP (addr, 0)) == REG +! && REGNO (XEXP (addr, 0)) == STACK_POINTER_REGNUM) +! { +! writes_ptr->sp = 1; +! return; +! } +! else if (GET_MODE (written) == BLKmode) +! *writes_ptr = everything; +! /* (mem (scratch)) means clobber everything. */ +! else if (GET_CODE (addr) == SCRATCH) +! *writes_ptr = everything; +! else if (cse_rtx_addr_varies_p (written)) +! { +! /* A varying address that is a sum indicates an array element, +! and that's just as good as a structure element +! in implying that we need not invalidate scalar variables. +! However, we must allow QImode aliasing of scalars, because the +! ANSI C standard allows character pointers to alias anything. */ +! if (! ((MEM_IN_STRUCT_P (written) +! || GET_CODE (XEXP (written, 0)) == PLUS) +! && GET_MODE (written) != QImode)) +! writes_ptr->all = 1; +! writes_ptr->nonscalar = 1; +! } +! writes_ptr->var = 1; + } + } + +--- 7404,7447 ---- + } + + static void +! invalidate_memory () +! { +! register int i; +! register struct table_elt *p, *next; +! +! for (i = 0; i < NBUCKETS; i++) +! for (p = table[i]; p; p = next) +! { +! next = p->next_same_hash; +! if (p->in_memory) +! remove_from_table (p, i); +! } +! } +! +! static int +! note_mem_written (mem) +! register rtx mem; +! { +! if (mem == 0 || GET_CODE(mem) != MEM ) +! return 0; +! else +! { +! register rtx addr = XEXP (mem, 0); +! /* Pushing or popping the stack invalidates just the stack pointer. */ +! if ((GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC +! || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC) +! && GET_CODE (XEXP (addr, 0)) == REG +! && REGNO (XEXP (addr, 0)) == STACK_POINTER_REGNUM) +! { +! if (reg_tick[STACK_POINTER_REGNUM] >= 0) +! reg_tick[STACK_POINTER_REGNUM]++; +! +! /* This should be *very* rare. */ +! if (TEST_HARD_REG_BIT (hard_regs_in_table, STACK_POINTER_REGNUM)) +! invalidate (stack_pointer_rtx, VOIDmode); +! return 1; + } ++ return 0; ++ } + } + +*************** note_mem_written (written, writes_ptr) +*** 7584,7612 **** + alias with something that is SET or CLOBBERed. + +- W points to the writes_memory for this insn, a struct write_data +- saying which kinds of memory references must be invalidated. + X is the pattern of the insn. */ + + static void +! invalidate_from_clobbers (w, x) +! struct write_data *w; + rtx x; + { +- /* If W->var is not set, W specifies no action. +- If W->all is set, this step gets all memory refs +- so they can be ignored in the rest of this function. */ +- if (w->var) +- invalidate_memory (w); +- +- if (w->sp) +- { +- if (reg_tick[STACK_POINTER_REGNUM] >= 0) +- reg_tick[STACK_POINTER_REGNUM]++; +- +- /* This should be *very* rare. */ +- if (TEST_HARD_REG_BIT (hard_regs_in_table, STACK_POINTER_REGNUM)) +- invalidate (stack_pointer_rtx, VOIDmode); +- } +- + if (GET_CODE (x) == CLOBBER) + { +--- 7451,7460 ---- + alias with something that is SET or CLOBBERed. + + X is the pattern of the insn. */ + + static void +! invalidate_from_clobbers (x) + rtx x; + { + if (GET_CODE (x) == CLOBBER) + { +*************** invalidate_from_clobbers (w, x) +*** 7615,7619 **** + { + if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG +! || (GET_CODE (ref) == MEM && ! w->all)) + invalidate (ref, VOIDmode); + else if (GET_CODE (ref) == STRICT_LOW_PART +--- 7463,7467 ---- + { + if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG +! || GET_CODE (ref) == MEM) + invalidate (ref, VOIDmode); + else if (GET_CODE (ref) == STRICT_LOW_PART +*************** invalidate_from_clobbers (w, x) +*** 7634,7638 **** + { + if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG +! || (GET_CODE (ref) == MEM && !w->all)) + invalidate (ref, VOIDmode); + else if (GET_CODE (ref) == STRICT_LOW_PART +--- 7482,7486 ---- + { + if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG +! || GET_CODE (ref) == MEM) + invalidate (ref, VOIDmode); + else if (GET_CODE (ref) == STRICT_LOW_PART +*************** cse_around_loop (loop_start) +*** 7800,7807 **** + } + +- /* Variable used for communications between the next two routines. */ +- +- static struct write_data skipped_writes_memory; +- + /* Process one SET of an insn that was skipped. We ignore CLOBBERs + since they are done elsewhere. This function is called via note_stores. */ +--- 7648,7651 ---- +*************** invalidate_skipped_set (dest, set) +*** 7812,7815 **** +--- 7656,7675 ---- + rtx dest; + { ++ enum rtx_code code = GET_CODE (dest); ++ ++ if (code == MEM ++ && ! note_mem_written (dest) /* If this is not a stack push ... */ ++ /* There are times when an address can appear varying and be a PLUS ++ during this scan when it would be a fixed address were we to know ++ the proper equivalences. So invalidate all memory if there is ++ a BLKmode or nonscalar memory reference or a reference to a ++ variable address. */ ++ && (MEM_IN_STRUCT_P (dest) || GET_MODE (dest) == BLKmode ++ || cse_rtx_varies_p (XEXP (dest, 0)))) ++ { ++ invalidate_memory (); ++ return; ++ } ++ + if (GET_CODE (set) == CLOBBER + #ifdef HAVE_cc0 +*************** invalidate_skipped_set (dest, set) +*** 7819,7837 **** + return; + +! if (GET_CODE (dest) == MEM) +! note_mem_written (dest, &skipped_writes_memory); +! +! /* There are times when an address can appear varying and be a PLUS +! during this scan when it would be a fixed address were we to know +! the proper equivalences. So promote "nonscalar" to be "all". */ +! if (skipped_writes_memory.nonscalar) +! skipped_writes_memory.all = 1; +! +! if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG +! || (! skipped_writes_memory.all && ! cse_rtx_addr_varies_p (dest))) +! invalidate (dest, VOIDmode); +! else if (GET_CODE (dest) == STRICT_LOW_PART +! || GET_CODE (dest) == ZERO_EXTRACT) + invalidate (XEXP (dest, 0), GET_MODE (dest)); + } + +--- 7679,7686 ---- + return; + +! if (code == STRICT_LOW_PART || code == ZERO_EXTRACT) + invalidate (XEXP (dest, 0), GET_MODE (dest)); ++ else if (code == REG || code == SUBREG || code == MEM) ++ invalidate (dest, VOIDmode); + } + +*************** invalidate_skipped_block (start) +*** 7845,7850 **** + { + rtx insn; +- static struct write_data init = {0, 0, 0, 0}; +- static struct write_data everything = {0, 1, 1, 1}; + + for (insn = start; insn && GET_CODE (insn) != CODE_LABEL; +--- 7694,7697 ---- +*************** invalidate_skipped_block (start) +*** 7854,7867 **** + continue; + +- skipped_writes_memory = init; +- + if (GET_CODE (insn) == CALL_INSN) + { + invalidate_for_call (); +- skipped_writes_memory = everything; + } + + note_stores (PATTERN (insn), invalidate_skipped_set); +- invalidate_from_clobbers (&skipped_writes_memory, PATTERN (insn)); + } + } +--- 7701,7712 ---- + continue; + + if (GET_CODE (insn) == CALL_INSN) + { ++ if (! CONST_CALL_P (insn)) ++ invalidate_memory (); + invalidate_for_call (); + } + + note_stores (PATTERN (insn), invalidate_skipped_set); + } + } +*************** cse_set_around_loop (x, insn, loop_start +*** 7913,7920 **** + { + struct table_elt *src_elt; +- static struct write_data init = {0, 0, 0, 0}; +- struct write_data writes_memory; +- +- writes_memory = init; + + /* If this is a SET, see if we can replace SET_SRC, but ignore SETs that +--- 7758,7761 ---- +*************** cse_set_around_loop (x, insn, loop_start +*** 7976,7991 **** + + /* Now invalidate anything modified by X. */ +! note_mem_written (SET_DEST (x), &writes_memory); +! +! if (writes_memory.var) +! invalidate_memory (&writes_memory); +! +! /* See comment on similar code in cse_insn for explanation of these tests. */ +! if (GET_CODE (SET_DEST (x)) == REG || GET_CODE (SET_DEST (x)) == SUBREG +! || (GET_CODE (SET_DEST (x)) == MEM && ! writes_memory.all +! && ! cse_rtx_addr_varies_p (SET_DEST (x)))) +! invalidate (SET_DEST (x), VOIDmode); +! else if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART +! || GET_CODE (SET_DEST (x)) == ZERO_EXTRACT) + invalidate (XEXP (SET_DEST (x), 0), GET_MODE (SET_DEST (x))); + } +--- 7817,7828 ---- + + /* Now invalidate anything modified by X. */ +! note_mem_written (SET_DEST (x)); +! +! /* See comment on similar code in cse_insn for explanation of these tests. */ +! if (GET_CODE (SET_DEST (x)) == REG || GET_CODE (SET_DEST (x)) == SUBREG +! || GET_CODE (SET_DEST (x)) == MEM) +! invalidate (SET_DEST (x), VOIDmode); +! else if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART +! || GET_CODE (SET_DEST (x)) == ZERO_EXTRACT) + invalidate (XEXP (SET_DEST (x), 0), GET_MODE (SET_DEST (x))); + } +*************** cse_main (f, nregs, after_loop, file) +*** 8234,8237 **** +--- 8071,8075 ---- + + init_recog (); ++ init_alias_analysis (); + + max_reg = nregs; +diff -rcp2N gcc-2.7.2.2/flags.h gcc-2.7.2.2.f.2/flags.h +*** gcc-2.7.2.2/flags.h Thu Jun 15 07:34:11 1995 +--- gcc-2.7.2.2.f.2/flags.h Fri Jan 10 23:18:22 1997 +*************** extern int flag_unroll_loops; +*** 204,207 **** +--- 204,221 ---- + extern int flag_unroll_all_loops; + ++ /* Nonzero forces all invariant computations in loops to be moved ++ outside the loop. */ ++ ++ extern int flag_move_all_movables; ++ ++ /* Nonzero forces all general induction variables in loops to be ++ strength reduced. */ ++ ++ extern int flag_reduce_all_givs; ++ ++ /* Nonzero gets another run of loop_optimize performed. */ ++ ++ extern int flag_rerun_loop_opt; ++ + /* Nonzero for -fcse-follow-jumps: + have cse follow jumps to do a more extensive job. */ +*************** extern int flag_gnu_linker; +*** 339,342 **** +--- 353,369 ---- + /* Tag all structures with __attribute__(packed) */ + extern int flag_pack_struct; ++ ++ /* 1 if alias checking is enabled: symbols do not alias each other ++ and parameters do not alias the current stack frame. */ ++ extern int flag_alias_check; ++ ++ /* This flag is only tested if alias checking is enabled. ++ 0 if pointer arguments may alias each other. True in C. ++ 1 if pointer arguments may not alias each other but may alias ++ global variables. ++ 2 if pointer arguments may not alias each other and may not ++ alias global variables. True in Fortran. ++ The value is ignored if flag_alias_check is 0. */ ++ extern int flag_argument_noalias; + + /* Other basic status info about current function. */ +diff -rcp2N gcc-2.7.2.2/fold-const.c gcc-2.7.2.2.f.2/fold-const.c +*** gcc-2.7.2.2/fold-const.c Fri Sep 15 18:26:12 1995 +--- gcc-2.7.2.2.f.2/fold-const.c Sun Feb 23 15:25:58 1997 +*************** static tree unextend PROTO((tree, int, i +*** 80,83 **** +--- 80,84 ---- + static tree fold_truthop PROTO((enum tree_code, tree, tree, tree)); + static tree strip_compound_expr PROTO((tree, tree)); ++ static int multiple_of_p PROTO((tree, tree, tree)); + + #ifndef BRANCH_COST +*************** strip_compound_expr (t, s) +*** 3065,3068 **** +--- 3066,3169 ---- + } + ++ /* Determine if first argument is a multiple of second argument. ++ Return 0 if it is not, or is not easily determined to so be. ++ ++ An example of the sort of thing we care about (at this point -- ++ this routine could surely be made more general, and expanded ++ to do what the *_DIV_EXPR's fold() cases do now) is discovering ++ that ++ ++ SAVE_EXPR (I) * SAVE_EXPR (J * 8) ++ ++ is a multiple of ++ ++ SAVE_EXPR (J * 8) ++ ++ when we know that the two `SAVE_EXPR (J * 8)' nodes are the ++ same node (which means they will have the same value at run ++ time, even though we don't know when they'll be assigned). ++ ++ This code also handles discovering that ++ ++ SAVE_EXPR (I) * SAVE_EXPR (J * 8) ++ ++ is a multiple of ++ ++ 8 ++ ++ (of course) so we don't have to worry about dealing with a ++ possible remainder. ++ ++ Note that we _look_ inside a SAVE_EXPR only to determine ++ how it was calculated; it is not safe for fold() to do much ++ of anything else with the internals of a SAVE_EXPR, since ++ fold() cannot know when it will be evaluated at run time. ++ For example, the latter example above _cannot_ be implemented ++ as ++ ++ SAVE_EXPR (I) * J ++ ++ or any variant thereof, since the value of J at evaluation time ++ of the original SAVE_EXPR is not necessarily the same at the time ++ the new expression is evaluated. The only optimization of this ++ sort that would be valid is changing ++ ++ SAVE_EXPR (I) * SAVE_EXPR (SAVE_EXPR (J) * 8) ++ divided by ++ 8 ++ ++ to ++ ++ SAVE_EXPR (I) * SAVE_EXPR (J) ++ ++ (where the same SAVE_EXPR (J) is used in the original and the ++ transformed version). */ ++ ++ static int ++ multiple_of_p (type, top, bottom) ++ tree type; ++ tree top; ++ tree bottom; ++ { ++ if (operand_equal_p (top, bottom, 0)) ++ return 1; ++ ++ if (TREE_CODE (type) != INTEGER_TYPE) ++ return 0; ++ ++ switch (TREE_CODE (top)) ++ { ++ case MULT_EXPR: ++ return (multiple_of_p (type, TREE_OPERAND (top, 0), bottom) ++ || multiple_of_p (type, TREE_OPERAND (top, 1), bottom)); ++ ++ case PLUS_EXPR: ++ case MINUS_EXPR: ++ return (multiple_of_p (type, TREE_OPERAND (top, 0), bottom) ++ && multiple_of_p (type, TREE_OPERAND (top, 1), bottom)); ++ ++ case NOP_EXPR: ++ /* Punt if conversion from non-integral or wider integral type. */ ++ if ((TREE_CODE (TREE_TYPE (TREE_OPERAND (top, 0))) != INTEGER_TYPE) ++ || (TYPE_PRECISION (type) ++ < TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (top, 0))))) ++ return 0; ++ /* Fall through. */ ++ case SAVE_EXPR: ++ return multiple_of_p (type, TREE_OPERAND (top, 0), bottom); ++ ++ case INTEGER_CST: ++ if ((TREE_CODE (bottom) != INTEGER_CST) ++ || (tree_int_cst_sgn (top) < 0) ++ || (tree_int_cst_sgn (bottom) < 0)) ++ return 0; ++ return integer_zerop (const_binop (TRUNC_MOD_EXPR, ++ top, bottom, 0)); ++ ++ default: ++ return 0; ++ } ++ } ++ + /* Perform constant folding and related simplification of EXPR. + The related simplifications include x*1 => x, x*0 => 0, etc., +*************** fold (expr) +*** 4010,4013 **** +--- 4111,4121 ---- + case FLOOR_DIV_EXPR: + case CEIL_DIV_EXPR: ++ if (integer_onep (arg1)) ++ return non_lvalue (convert (type, arg0)); ++ /* If arg0 is a multiple of arg1, then rewrite to the fastest div ++ operation, EXACT_DIV_EXPR. Otherwise, handle folding of ++ general divide. */ ++ if (multiple_of_p (type, arg0, arg1)) ++ return fold (build (EXACT_DIV_EXPR, type, arg0, arg1)); + case EXACT_DIV_EXPR: + if (integer_onep (arg1)) +diff -rcp2N gcc-2.7.2.2/gcc.texi gcc-2.7.2.2.f.2/gcc.texi +*** gcc-2.7.2.2/gcc.texi Thu Feb 20 19:24:19 1997 +--- gcc-2.7.2.2.f.2/gcc.texi Sun Feb 23 16:16:49 1997 +*************** original English. +*** 149,152 **** +--- 149,153 ---- + @sp 3 + @center Last updated 29 June 1996 ++ @center (Revised for GNU Fortran 1997-01-10) + @sp 1 + @c The version number appears twice more in this file. +diff -rcp2N gcc-2.7.2.2/glimits.h gcc-2.7.2.2.f.2/glimits.h +*** gcc-2.7.2.2/glimits.h Wed Sep 29 17:30:54 1993 +--- gcc-2.7.2.2.f.2/glimits.h Thu Dec 19 12:31:08 1996 +*************** +*** 64,68 **** + (Same as `int'). */ + #ifndef __LONG_MAX__ +! #define __LONG_MAX__ 2147483647L + #endif + #undef LONG_MIN +--- 64,72 ---- + (Same as `int'). */ + #ifndef __LONG_MAX__ +! # ifndef __alpha__ +! # define __LONG_MAX__ 2147483647L +! # else +! # define __LONG_MAX__ 9223372036854775807LL +! # endif /* __alpha__ */ + #endif + #undef LONG_MIN +diff -rcp2N gcc-2.7.2.2/invoke.texi gcc-2.7.2.2.f.2/invoke.texi +*** gcc-2.7.2.2/invoke.texi Tue Oct 3 11:40:43 1995 +--- gcc-2.7.2.2.f.2/invoke.texi Sun Feb 23 16:18:06 1997 +*************** +*** 1,3 **** +! @c Copyright (C) 1988, 89, 92, 93, 94, 1995 Free Software Foundation, Inc. + @c This is part of the GCC manual. + @c For copying conditions, see the file gcc.texi. +--- 1,3 ---- +! @c Copyright (C) 1988, 89, 92-95, 1997 Free Software Foundation, Inc. + @c This is part of the GCC manual. + @c For copying conditions, see the file gcc.texi. +*************** in the following sections. +*** 149,152 **** +--- 149,153 ---- + -fschedule-insns2 -fstrength-reduce -fthread-jumps + -funroll-all-loops -funroll-loops ++ -fmove-all-movables -freduce-all-givs -frerun-loop-opt + -O -O0 -O1 -O2 -O3 + @end smallexample +*************** in addition to the above: +*** 331,334 **** +--- 332,337 ---- + -fshort-double -fvolatile -fvolatile-global + -fverbose-asm -fpack-struct +e0 +e1 ++ -fargument-alias -fargument-noalias ++ -fargument-noalias-global + @end smallexample + @end table +*************** and usually makes programs run more slow +*** 1941,1944 **** +--- 1944,1992 ---- + implies @samp{-fstrength-reduce} as well as @samp{-frerun-cse-after-loop}. + ++ @item -fmove-all-movables ++ Forces all invariant computations in loops to be moved ++ outside the loop. ++ This option is provided primarily to improve performance ++ for some Fortran code, though it might improve code written ++ in other languages. ++ ++ @emph{Note:} When compiling programs written in Fortran, ++ this option is enabled by default. ++ ++ Analysis of Fortran code optimization and the resulting ++ optimizations triggered by this option, and the ++ @samp{-freduce-all-givs} and @samp{-frerun-loop-opt} ++ options as well, were ++ contributed by Toon Moene (@code{toon@@moene.indiv.nluug.nl}). ++ ++ These three options are intended to be removed someday, once ++ they have helped determine the efficacy of various ++ approaches to improving the performance of Fortran code. ++ ++ Please let us (@code{fortran@@gnu.ai.mit.edu}) ++ know how use of these options affects ++ the performance of your production code. ++ We're very interested in code that runs @emph{slower} ++ when these options are @emph{enabled}. ++ ++ @item -freduce-all-givs ++ Forces all general-induction variables in loops to be ++ strength-reduced. ++ This option is provided primarily to improve performance ++ for some Fortran code, though it might improve code written ++ in other languages. ++ ++ @emph{Note:} When compiling programs written in Fortran, ++ this option is enabled by default. ++ ++ @item -frerun-loop-opt ++ Runs loop optimizations a second time. ++ This option is provided primarily to improve performance ++ for some Fortran code, though it might improve code written ++ in other languages. ++ ++ @emph{Note:} When compiling programs written in Fortran, ++ this option is enabled by default. ++ + @item -fno-peephole + Disable any machine-specific peephole optimizations. +*************** compilation). +*** 4229,4232 **** +--- 4277,4352 ---- + With @samp{+e1}, G++ actually generates the code implementing virtual + functions defined in the code, and makes them publicly visible. ++ ++ @cindex aliasing of parameters ++ @cindex parameters, aliased ++ @item -fargument-alias ++ @item -fargument-noalias ++ @item -fargument-noalias-global ++ Specify the possible relationships among parameters and between ++ parameters and global data. ++ ++ @samp{-fargument-alias} specifies that arguments (parameters) may ++ alias each other and may alias global storage. ++ @samp{-fargument-noalias} specifies that arguments do not alias ++ each other, but may alias global storage. ++ @samp{-fargument-noalias-global} specifies that arguments do not ++ alias each other and do not alias global storage. ++ ++ For code written in C, C++, and Objective-C, @samp{-fargument-alias} ++ is the default. ++ For code written in Fortran, @samp{-fargument-noalias-global} is ++ the default, though this is pertinent only on systems where ++ @code{g77} is installed. ++ (See the documentation for other compilers for information on the ++ defaults for their respective languages.) ++ ++ Normally, @code{gcc} assumes that a write through a pointer ++ passed as a parameter to the current function might modify a ++ value pointed to by another pointer passed as a parameter, or ++ in global storage. ++ ++ For example, consider this code: ++ ++ @example ++ void x(int *i, int *j) ++ @{ ++ extern int k; ++ ++ ++*i; ++ ++*j; ++ ++k; ++ @} ++ @end example ++ ++ When compiling the above function, @code{gcc} assumes that @samp{i} might ++ be a pointer to the same variable as @samp{j}, and that either @samp{i}, ++ @samp{j}, or both might be a pointer to @samp{k}. ++ ++ Therefore, @code{gcc} does not assume it can generate code to read ++ @samp{*i}, @samp{*j}, and @samp{k} into separate registers, increment ++ each register, then write the incremented values back out. ++ ++ Instead, @code{gcc} must generate code that reads @samp{*i}, ++ increments it, and writes it back before reading @samp{*j}, ++ in case @samp{i} and @samp{j} are aliased, and, similarly, ++ that writes @samp{*j} before reading @samp{k}. ++ The result is code that, on many systems, takes longer to execute, ++ due to the way many processors schedule instruction execution. ++ ++ Compiling the above code with the @samp{-fargument-noalias} option ++ allows @code{gcc} to assume that @samp{i} and @samp{j} do not alias ++ each other, but either might alias @samp{k}. ++ ++ Compiling the above code with the @samp{-fargument-noalias-global} ++ option allows @code{gcc} to assume that no combination of @samp{i}, ++ @samp{j}, and @samp{k} are aliases for each other. ++ ++ @emph{Note:} Use the @samp{-fargument-noalias} and ++ @samp{-fargument-noalias-global} options with care. ++ While they can result in faster executables, they can ++ also result in executables with subtle bugs, bugs that ++ show up only when compiled for specific target systems, ++ or bugs that show up only when compiled by specific versions ++ of @code{g77}. + @end table + +diff -rcp2N gcc-2.7.2.2/local-alloc.c gcc-2.7.2.2.f.2/local-alloc.c +*** gcc-2.7.2.2/local-alloc.c Mon Aug 21 13:15:44 1995 +--- gcc-2.7.2.2.f.2/local-alloc.c Fri Jan 10 23:18:22 1997 +*************** validate_equiv_mem_from_store (dest, set +*** 545,549 **** + && reg_overlap_mentioned_p (dest, equiv_mem)) + || (GET_CODE (dest) == MEM +! && true_dependence (dest, equiv_mem))) + equiv_mem_modified = 1; + } +--- 545,549 ---- + && reg_overlap_mentioned_p (dest, equiv_mem)) + || (GET_CODE (dest) == MEM +! && true_dependence (dest, VOIDmode, equiv_mem, rtx_varies_p))) + equiv_mem_modified = 1; + } +*************** memref_referenced_p (memref, x) +*** 630,634 **** + + case MEM: +! if (true_dependence (memref, x)) + return 1; + break; +--- 630,634 ---- + + case MEM: +! if (true_dependence (memref, VOIDmode, x, rtx_varies_p)) + return 1; + break; +diff -rcp2N gcc-2.7.2.2/loop.c gcc-2.7.2.2.f.2/loop.c +*** gcc-2.7.2.2/loop.c Thu Feb 20 19:24:20 1997 +--- gcc-2.7.2.2.f.2/loop.c Sun Feb 23 15:35:42 1997 +*************** int *loop_number_exit_count; +*** 111,116 **** + unsigned HOST_WIDE_INT loop_n_iterations; + +! /* Nonzero if there is a subroutine call in the current loop. +! (unknown_address_altered is also nonzero in this case.) */ + + static int loop_has_call; +--- 111,115 ---- + unsigned HOST_WIDE_INT loop_n_iterations; + +! /* Nonzero if there is a subroutine call in the current loop. */ + + static int loop_has_call; +*************** static char *moved_once; +*** 160,164 **** + here, we just turn on unknown_address_altered. */ + +! #define NUM_STORES 20 + static rtx loop_store_mems[NUM_STORES]; + +--- 159,163 ---- + here, we just turn on unknown_address_altered. */ + +! #define NUM_STORES 50 + static rtx loop_store_mems[NUM_STORES]; + +*************** move_movables (movables, threshold, insn +*** 1629,1632 **** +--- 1628,1632 ---- + + if (already_moved[regno] ++ || flag_move_all_movables + || (threshold * savings * m->lifetime) >= insn_count + || (m->forces && m->forces->done +*************** prescan_loop (start, end) +*** 2199,2203 **** + else if (GET_CODE (insn) == CALL_INSN) + { +! unknown_address_altered = 1; + loop_has_call = 1; + } +--- 2199,2204 ---- + else if (GET_CODE (insn) == CALL_INSN) + { +! if (! CONST_CALL_P (insn)) +! unknown_address_altered = 1; + loop_has_call = 1; + } +*************** invariant_p (x) +*** 2777,2781 **** + /* See if there is any dependence between a store and this load. */ + for (i = loop_store_mems_idx - 1; i >= 0; i--) +! if (true_dependence (loop_store_mems[i], x)) + return 0; + +--- 2778,2782 ---- + /* See if there is any dependence between a store and this load. */ + for (i = loop_store_mems_idx - 1; i >= 0; i--) +! if (true_dependence (loop_store_mems[i], VOIDmode, x, rtx_varies_p)) + return 0; + +*************** strength_reduce (scan_start, end, loop_t +*** 3821,3826 **** + exit. */ + +! if (v->lifetime * threshold * benefit < insn_count +! && ! bl->reversed) + { + if (loop_dump_stream) +--- 3822,3827 ---- + exit. */ + +! if ( ! flag_reduce_all_givs && v->lifetime * threshold * benefit < insn_count +! && ! bl->reversed ) + { + if (loop_dump_stream) +*************** record_giv (v, insn, src_reg, dest_reg, +*** 4375,4378 **** +--- 4376,4381 ---- + v->final_value = 0; + v->same_insn = 0; ++ v->unrolled = 0; ++ v->shared = 0; + + /* The v->always_computable field is used in update_giv_derive, to +*************** check_final_value (v, loop_start, loop_e +*** 4652,4657 **** + if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) + && LABEL_NAME (JUMP_LABEL (p)) +! && ((INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (v->insn) +! && INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start)) + || (INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (last_giv_use) + && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end)))) +--- 4655,4663 ---- + if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) + && LABEL_NAME (JUMP_LABEL (p)) +! && ((INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop) +! || (INSN_UID (v->insn) >= max_uid_for_loop) +! || (INSN_UID (last_giv_use) >= max_uid_for_loop) +! || (INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (v->insn) +! && INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start)) + || (INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (last_giv_use) + && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end)))) +*************** emit_iv_add_mult (b, m, a, reg, insert_b +*** 5560,5563 **** +--- 5566,5571 ---- + + emit_insn_before (seq, insert_before); ++ ++ record_base_value (REGNO (reg), b); + } + +diff -rcp2N gcc-2.7.2.2/loop.h gcc-2.7.2.2.f.2/loop.h +*** gcc-2.7.2.2/loop.h Fri Jul 14 08:23:28 1995 +--- gcc-2.7.2.2.f.2/loop.h Fri Jan 10 23:18:23 1997 +*************** struct induction +*** 89,92 **** +--- 89,95 ---- + we won't use it to eliminate a biv, it + would probably lose. */ ++ unsigned unrolled : 1; /* 1 if new register has been allocated in ++ unrolled loop. */ ++ unsigned shared : 1; + int lifetime; /* Length of life of this giv */ + int times_used; /* # times this giv is used. */ +diff -rcp2N gcc-2.7.2.2/real.c gcc-2.7.2.2.f.2/real.c +*** gcc-2.7.2.2/real.c Tue Aug 15 17:57:18 1995 +--- gcc-2.7.2.2.f.2/real.c Thu Dec 19 12:31:09 1996 +*************** make_nan (nan, sign, mode) +*** 5625,5633 **** + } + +! /* Convert an SFmode target `float' value to a REAL_VALUE_TYPE. +! This is the inverse of the function `etarsingle' invoked by + REAL_VALUE_TO_TARGET_SINGLE. */ + + REAL_VALUE_TYPE + ereal_from_float (f) + HOST_WIDE_INT f; +--- 5625,5699 ---- + } + +! /* This is the inverse of the function `etarsingle' invoked by + REAL_VALUE_TO_TARGET_SINGLE. */ + + REAL_VALUE_TYPE ++ ereal_unto_float (f) ++ long f; ++ { ++ REAL_VALUE_TYPE r; ++ unsigned EMUSHORT s[2]; ++ unsigned EMUSHORT e[NE]; ++ ++ /* Convert 32 bit integer to array of 16 bit pieces in target machine order. ++ This is the inverse operation to what the function `endian' does. */ ++ if (REAL_WORDS_BIG_ENDIAN) ++ { ++ s[0] = (unsigned EMUSHORT) (f >> 16); ++ s[1] = (unsigned EMUSHORT) f; ++ } ++ else ++ { ++ s[0] = (unsigned EMUSHORT) f; ++ s[1] = (unsigned EMUSHORT) (f >> 16); ++ } ++ /* Convert and promote the target float to E-type. */ ++ e24toe (s, e); ++ /* Output E-type to REAL_VALUE_TYPE. */ ++ PUT_REAL (e, &r); ++ return r; ++ } ++ ++ ++ /* This is the inverse of the function `etardouble' invoked by ++ REAL_VALUE_TO_TARGET_DOUBLE. */ ++ ++ REAL_VALUE_TYPE ++ ereal_unto_double (d) ++ long d[]; ++ { ++ REAL_VALUE_TYPE r; ++ unsigned EMUSHORT s[4]; ++ unsigned EMUSHORT e[NE]; ++ ++ /* Convert array of HOST_WIDE_INT to equivalent array of 16-bit pieces. */ ++ if (REAL_WORDS_BIG_ENDIAN) ++ { ++ s[0] = (unsigned EMUSHORT) (d[0] >> 16); ++ s[1] = (unsigned EMUSHORT) d[0]; ++ s[2] = (unsigned EMUSHORT) (d[1] >> 16); ++ s[3] = (unsigned EMUSHORT) d[1]; ++ } ++ else ++ { ++ /* Target float words are little-endian. */ ++ s[0] = (unsigned EMUSHORT) d[0]; ++ s[1] = (unsigned EMUSHORT) (d[0] >> 16); ++ s[2] = (unsigned EMUSHORT) d[1]; ++ s[3] = (unsigned EMUSHORT) (d[1] >> 16); ++ } ++ /* Convert target double to E-type. */ ++ e53toe (s, e); ++ /* Output E-type to REAL_VALUE_TYPE. */ ++ PUT_REAL (e, &r); ++ return r; ++ } ++ ++ ++ /* Convert an SFmode target `float' value to a REAL_VALUE_TYPE. ++ This is somewhat like ereal_unto_float, but the input types ++ for these are different. */ ++ ++ REAL_VALUE_TYPE + ereal_from_float (f) + HOST_WIDE_INT f; +*************** ereal_from_float (f) +*** 5658,5663 **** + + /* Convert a DFmode target `double' value to a REAL_VALUE_TYPE. +! This is the inverse of the function `etardouble' invoked by +! REAL_VALUE_TO_TARGET_DOUBLE. + + The DFmode is stored as an array of HOST_WIDE_INT in the target's +--- 5724,5729 ---- + + /* Convert a DFmode target `double' value to a REAL_VALUE_TYPE. +! This is somewhat like ereal_unto_double, but the input types +! for these are different. + + The DFmode is stored as an array of HOST_WIDE_INT in the target's +diff -rcp2N gcc-2.7.2.2/real.h gcc-2.7.2.2.f.2/real.h +*** gcc-2.7.2.2/real.h Thu Jun 15 07:57:56 1995 +--- gcc-2.7.2.2.f.2/real.h Thu Dec 19 12:31:09 1996 +*************** extern void ereal_to_decimal PROTO((REAL +*** 152,155 **** +--- 152,157 ---- + extern int ereal_cmp PROTO((REAL_VALUE_TYPE, REAL_VALUE_TYPE)); + extern int ereal_isneg PROTO((REAL_VALUE_TYPE)); ++ extern REAL_VALUE_TYPE ereal_unto_float PROTO((long)); ++ extern REAL_VALUE_TYPE ereal_unto_double PROTO((long *)); + extern REAL_VALUE_TYPE ereal_from_float PROTO((HOST_WIDE_INT)); + extern REAL_VALUE_TYPE ereal_from_double PROTO((HOST_WIDE_INT *)); +*************** extern REAL_VALUE_TYPE real_value_trunca +*** 197,200 **** +--- 199,208 ---- + /* IN is a REAL_VALUE_TYPE. OUT is a long. */ + #define REAL_VALUE_TO_TARGET_SINGLE(IN, OUT) ((OUT) = etarsingle ((IN))) ++ ++ /* Inverse of REAL_VALUE_TO_TARGET_DOUBLE. */ ++ #define REAL_VALUE_UNTO_TARGET_DOUBLE(d) (ereal_unto_double (d)) ++ ++ /* Inverse of REAL_VALUE_TO_TARGET_SINGLE. */ ++ #define REAL_VALUE_UNTO_TARGET_SINGLE(f) (ereal_unto_float (f)) + + /* d is an array of HOST_WIDE_INT that holds a double precision +diff -rcp2N gcc-2.7.2.2/reload.c gcc-2.7.2.2.f.2/reload.c +*** gcc-2.7.2.2/reload.c Sat Nov 11 08:23:54 1995 +--- gcc-2.7.2.2.f.2/reload.c Thu Feb 27 23:03:05 1997 +*************** +*** 1,4 **** + /* Search an insn for pseudo regs that must be in hard regs and are not. +! Copyright (C) 1987, 88, 89, 92, 93, 94, 1995 Free Software Foundation, Inc. + + This file is part of GNU CC. +--- 1,4 ---- + /* Search an insn for pseudo regs that must be in hard regs and are not. +! Copyright (C) 1987, 88, 89, 92-5, 1996 Free Software Foundation, Inc. + + This file is part of GNU CC. +*************** static int push_secondary_reload PROTO(( +*** 292,295 **** +--- 292,296 ---- + enum machine_mode, enum reload_type, + enum insn_code *)); ++ static enum reg_class find_valid_class PROTO((enum machine_mode, int)); + static int push_reload PROTO((rtx, rtx, rtx *, rtx *, enum reg_class, + enum machine_mode, enum machine_mode, +*************** push_secondary_reload (in_p, x, opnum, o +*** 361,364 **** +--- 362,368 ---- + mode and object being reloaded. */ + if (GET_CODE (x) == SUBREG ++ #ifdef CLASS_CANNOT_CHANGE_SIZE ++ && reload_class != CLASS_CANNOT_CHANGE_SIZE ++ #endif + && (GET_MODE_SIZE (GET_MODE (x)) + > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))) +*************** clear_secondary_mem () +*** 689,692 **** +--- 693,728 ---- + #endif /* SECONDARY_MEMORY_NEEDED */ + ++ /* Find the largest class for which every register number plus N is valid in ++ M1 (if in range). Abort if no such class exists. */ ++ ++ static enum reg_class ++ find_valid_class (m1, n) ++ enum machine_mode m1; ++ int n; ++ { ++ int class; ++ int regno; ++ enum reg_class best_class; ++ int best_size = 0; ++ ++ for (class = 1; class < N_REG_CLASSES; class++) ++ { ++ int bad = 0; ++ for (regno = 0; regno < FIRST_PSEUDO_REGISTER && ! bad; regno++) ++ if (TEST_HARD_REG_BIT (reg_class_contents[class], regno) ++ && TEST_HARD_REG_BIT (reg_class_contents[class], regno + n) ++ && ! HARD_REGNO_MODE_OK (regno + n, m1)) ++ bad = 1; ++ ++ if (! bad && reg_class_size[class] > best_size) ++ best_class = class, best_size = reg_class_size[class]; ++ } ++ ++ if (best_size == 0) ++ abort (); ++ ++ return best_class; ++ } ++ + /* Record one reload that needs to be performed. + IN is an rtx saying where the data are to be found before this instruction. +*************** push_reload (in, out, inloc, outloc, cla +*** 894,898 **** + && GET_CODE (SUBREG_REG (in)) == REG + && REGNO (SUBREG_REG (in)) < FIRST_PSEUDO_REGISTER +! && (! HARD_REGNO_MODE_OK (REGNO (SUBREG_REG (in)), inmode) + || (GET_MODE_SIZE (inmode) <= UNITS_PER_WORD + && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))) +--- 930,935 ---- + && GET_CODE (SUBREG_REG (in)) == REG + && REGNO (SUBREG_REG (in)) < FIRST_PSEUDO_REGISTER +! && (! HARD_REGNO_MODE_OK (REGNO (SUBREG_REG (in)) + SUBREG_WORD (in), +! inmode) + || (GET_MODE_SIZE (inmode) <= UNITS_PER_WORD + && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))) +*************** push_reload (in, out, inloc, outloc, cla +*** 909,913 **** + output before the outer reload. */ + push_reload (SUBREG_REG (in), NULL_RTX, &SUBREG_REG (in), NULL_PTR, +! GENERAL_REGS, VOIDmode, VOIDmode, 0, 0, opnum, type); + dont_remove_subreg = 1; + } +--- 946,951 ---- + output before the outer reload. */ + push_reload (SUBREG_REG (in), NULL_RTX, &SUBREG_REG (in), NULL_PTR, +! find_valid_class (inmode, SUBREG_WORD (in)), +! VOIDmode, VOIDmode, 0, 0, opnum, type); + dont_remove_subreg = 1; + } +*************** push_reload (in, out, inloc, outloc, cla +*** 982,986 **** + && GET_CODE (SUBREG_REG (out)) == REG + && REGNO (SUBREG_REG (out)) < FIRST_PSEUDO_REGISTER +! && (! HARD_REGNO_MODE_OK (REGNO (SUBREG_REG (out)), outmode) + || (GET_MODE_SIZE (outmode) <= UNITS_PER_WORD + && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (out))) +--- 1020,1025 ---- + && GET_CODE (SUBREG_REG (out)) == REG + && REGNO (SUBREG_REG (out)) < FIRST_PSEUDO_REGISTER +! && (! HARD_REGNO_MODE_OK (REGNO (SUBREG_REG (out)) + SUBREG_WORD (out), +! outmode) + || (GET_MODE_SIZE (outmode) <= UNITS_PER_WORD + && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (out))) +*************** push_reload (in, out, inloc, outloc, cla +*** 998,1002 **** + dont_remove_subreg = 1; + push_reload (SUBREG_REG (out), SUBREG_REG (out), &SUBREG_REG (out), +! &SUBREG_REG (out), ALL_REGS, VOIDmode, VOIDmode, 0, 0, + opnum, RELOAD_OTHER); + } +--- 1037,1043 ---- + dont_remove_subreg = 1; + push_reload (SUBREG_REG (out), SUBREG_REG (out), &SUBREG_REG (out), +! &SUBREG_REG (out), +! find_valid_class (outmode, SUBREG_WORD (out)), +! VOIDmode, VOIDmode, 0, 0, + opnum, RELOAD_OTHER); + } +*************** find_equiv_reg (goal, insn, class, other +*** 5518,5522 **** + and is also a register that appears in the address of GOAL. */ + +! if (goal_mem && value == SET_DEST (PATTERN (where)) + && refers_to_regno_for_reload_p (valueno, + (valueno +--- 5559,5563 ---- + and is also a register that appears in the address of GOAL. */ + +! if (goal_mem && value == SET_DEST (single_set (where)) + && refers_to_regno_for_reload_p (valueno, + (valueno +*************** debug_reload() +*** 5900,5904 **** + + if (reload_nocombine[r]) +! fprintf (stderr, ", can combine", reload_nocombine[r]); + + if (reload_secondary_p[r]) +--- 5941,5945 ---- + + if (reload_nocombine[r]) +! fprintf (stderr, ", can't combine %d", reload_nocombine[r]); + + if (reload_secondary_p[r]) +diff -rcp2N gcc-2.7.2.2/rtl.h gcc-2.7.2.2.f.2/rtl.h +*** gcc-2.7.2.2/rtl.h Thu Jun 15 08:03:16 1995 +--- gcc-2.7.2.2.f.2/rtl.h Fri Jan 10 23:18:23 1997 +*************** enum reg_note { REG_DEAD = 1, REG_INC = +*** 349,353 **** + REG_NONNEG = 8, REG_NO_CONFLICT = 9, REG_UNUSED = 10, + REG_CC_SETTER = 11, REG_CC_USER = 12, REG_LABEL = 13, +! REG_DEP_ANTI = 14, REG_DEP_OUTPUT = 15 }; + + /* Define macros to extract and insert the reg-note kind in an EXPR_LIST. */ +--- 349,353 ---- + REG_NONNEG = 8, REG_NO_CONFLICT = 9, REG_UNUSED = 10, + REG_CC_SETTER = 11, REG_CC_USER = 12, REG_LABEL = 13, +! REG_DEP_ANTI = 14, REG_DEP_OUTPUT = 15, REG_NOALIAS = 16 }; + + /* Define macros to extract and insert the reg-note kind in an EXPR_LIST. */ +*************** extern char *reg_note_name[]; +*** 432,436 **** + #define NOTE_INSN_FUNCTION_BEG -13 + +- + #if 0 /* These are not used, and I don't know what they were for. --rms. */ + #define NOTE_DECL_NAME(INSN) ((INSN)->fld[3].rtstr) +--- 432,435 ---- +*************** extern char *note_insn_name[]; +*** 576,579 **** +--- 575,579 ---- + /* For a TRAP_IF rtx, TRAP_CONDITION is an expression. */ + #define TRAP_CONDITION(RTX) ((RTX)->fld[0].rtx) ++ #define TRAP_CODE(RTX) ((RTX)->fld[1].rtint) + + /* 1 in a SYMBOL_REF if it addresses this function's constants pool. */ +*************** extern rtx eliminate_constant_term PROTO +*** 817,820 **** +--- 817,830 ---- + extern rtx expand_complex_abs PROTO((enum machine_mode, rtx, rtx, int)); + extern enum machine_mode choose_hard_reg_mode PROTO((int, int)); ++ extern int rtx_varies_p PROTO((rtx)); ++ extern int may_trap_p PROTO((rtx)); ++ extern int side_effects_p PROTO((rtx)); ++ extern int volatile_refs_p PROTO((rtx)); ++ extern int volatile_insn_p PROTO((rtx)); ++ extern void remove_note PROTO((rtx, rtx)); ++ extern void note_stores PROTO((rtx, void (*)())); ++ extern int refers_to_regno_p PROTO((int, int, rtx, rtx *)); ++ extern int reg_overlap_mentioned_p PROTO((rtx, rtx)); ++ + + /* Maximum number of parallel sets and clobbers in any insn in this fn. +*************** extern rtx *regno_reg_rtx; +*** 967,968 **** +--- 977,985 ---- + + extern int rtx_to_tree_code PROTO((enum rtx_code)); ++ ++ extern int true_dependence PROTO((rtx, enum machine_mode, rtx, int (*)())); ++ extern int read_dependence PROTO((rtx, rtx)); ++ extern int anti_dependence PROTO((rtx, rtx)); ++ extern int output_dependence PROTO((rtx, rtx)); ++ extern void init_alias_analysis PROTO((void)); ++ extern void end_alias_analysis PROTO((void)); +diff -rcp2N gcc-2.7.2.2/sched.c gcc-2.7.2.2.f.2/sched.c +*** gcc-2.7.2.2/sched.c Thu Jun 15 08:06:39 1995 +--- gcc-2.7.2.2.f.2/sched.c Fri Jan 10 23:18:24 1997 +*************** Boston, MA 02111-1307, USA. */ +*** 126,129 **** +--- 126,132 ---- + #include "insn-attr.h" + ++ extern char *reg_known_equiv_p; ++ extern rtx *reg_known_value; ++ + #ifdef INSN_SCHEDULING + /* Arrays set up by scheduling for the same respective purposes as +*************** static int *sched_reg_live_length; +*** 143,146 **** +--- 146,150 ---- + by splitting insns. */ + static rtx *reg_last_uses; ++ static int reg_last_uses_size; + static rtx *reg_last_sets; + static regset reg_pending_sets; +*************** struct sometimes +*** 294,302 **** + + /* Forward declarations. */ +- static rtx canon_rtx PROTO((rtx)); +- static int rtx_equal_for_memref_p PROTO((rtx, rtx)); +- static rtx find_symbolic_term PROTO((rtx)); +- static int memrefs_conflict_p PROTO((int, rtx, int, rtx, +- HOST_WIDE_INT)); + static void add_dependence PROTO((rtx, rtx, enum reg_note)); + static void remove_dependence PROTO((rtx, rtx)); +--- 298,301 ---- +*************** void schedule_insns PROTO((FILE *)); +*** 346,885 **** + #endif /* INSN_SCHEDULING */ + +- #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X))) +- +- /* Vector indexed by N giving the initial (unchanging) value known +- for pseudo-register N. */ +- static rtx *reg_known_value; +- +- /* Vector recording for each reg_known_value whether it is due to a +- REG_EQUIV note. Future passes (viz., reload) may replace the +- pseudo with the equivalent expression and so we account for the +- dependences that would be introduced if that happens. */ +- /* ??? This is a problem only on the Convex. The REG_EQUIV notes created in +- assign_parms mention the arg pointer, and there are explicit insns in the +- RTL that modify the arg pointer. Thus we must ensure that such insns don't +- get scheduled across each other because that would invalidate the REG_EQUIV +- notes. One could argue that the REG_EQUIV notes are wrong, but solving +- the problem in the scheduler will likely give better code, so we do it +- here. */ +- static char *reg_known_equiv_p; +- +- /* Indicates number of valid entries in reg_known_value. */ +- static int reg_known_value_size; +- +- static rtx +- canon_rtx (x) +- rtx x; +- { +- if (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER +- && REGNO (x) <= reg_known_value_size) +- return reg_known_value[REGNO (x)]; +- else if (GET_CODE (x) == PLUS) +- { +- rtx x0 = canon_rtx (XEXP (x, 0)); +- rtx x1 = canon_rtx (XEXP (x, 1)); +- +- if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1)) +- { +- /* We can tolerate LO_SUMs being offset here; these +- rtl are used for nothing other than comparisons. */ +- if (GET_CODE (x0) == CONST_INT) +- return plus_constant_for_output (x1, INTVAL (x0)); +- else if (GET_CODE (x1) == CONST_INT) +- return plus_constant_for_output (x0, INTVAL (x1)); +- return gen_rtx (PLUS, GET_MODE (x), x0, x1); +- } +- } +- return x; +- } +- +- /* Set up all info needed to perform alias analysis on memory references. */ +- +- void +- init_alias_analysis () +- { +- int maxreg = max_reg_num (); +- rtx insn; +- rtx note; +- rtx set; +- +- reg_known_value_size = maxreg; +- +- reg_known_value +- = (rtx *) oballoc ((maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx)) +- - FIRST_PSEUDO_REGISTER; +- bzero ((char *) (reg_known_value + FIRST_PSEUDO_REGISTER), +- (maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx)); +- +- reg_known_equiv_p +- = (char *) oballoc ((maxreg -FIRST_PSEUDO_REGISTER) * sizeof (char)) +- - FIRST_PSEUDO_REGISTER; +- bzero (reg_known_equiv_p + FIRST_PSEUDO_REGISTER, +- (maxreg - FIRST_PSEUDO_REGISTER) * sizeof (char)); +- +- /* Fill in the entries with known constant values. */ +- for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) +- if ((set = single_set (insn)) != 0 +- && GET_CODE (SET_DEST (set)) == REG +- && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER +- && (((note = find_reg_note (insn, REG_EQUAL, 0)) != 0 +- && reg_n_sets[REGNO (SET_DEST (set))] == 1) +- || (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != 0) +- && GET_CODE (XEXP (note, 0)) != EXPR_LIST) +- { +- int regno = REGNO (SET_DEST (set)); +- reg_known_value[regno] = XEXP (note, 0); +- reg_known_equiv_p[regno] = REG_NOTE_KIND (note) == REG_EQUIV; +- } +- +- /* Fill in the remaining entries. */ +- while (--maxreg >= FIRST_PSEUDO_REGISTER) +- if (reg_known_value[maxreg] == 0) +- reg_known_value[maxreg] = regno_reg_rtx[maxreg]; +- } +- +- /* Return 1 if X and Y are identical-looking rtx's. +- +- We use the data in reg_known_value above to see if two registers with +- different numbers are, in fact, equivalent. */ +- +- static int +- rtx_equal_for_memref_p (x, y) +- rtx x, y; +- { +- register int i; +- register int j; +- register enum rtx_code code; +- register char *fmt; +- +- if (x == 0 && y == 0) +- return 1; +- if (x == 0 || y == 0) +- return 0; +- x = canon_rtx (x); +- y = canon_rtx (y); +- +- if (x == y) +- return 1; +- +- code = GET_CODE (x); +- /* Rtx's of different codes cannot be equal. */ +- if (code != GET_CODE (y)) +- return 0; +- +- /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. +- (REG:SI x) and (REG:HI x) are NOT equivalent. */ +- +- if (GET_MODE (x) != GET_MODE (y)) +- return 0; +- +- /* REG, LABEL_REF, and SYMBOL_REF can be compared nonrecursively. */ +- +- if (code == REG) +- return REGNO (x) == REGNO (y); +- if (code == LABEL_REF) +- return XEXP (x, 0) == XEXP (y, 0); +- if (code == SYMBOL_REF) +- return XSTR (x, 0) == XSTR (y, 0); +- +- /* For commutative operations, the RTX match if the operand match in any +- order. Also handle the simple binary and unary cases without a loop. */ +- if (code == EQ || code == NE || GET_RTX_CLASS (code) == 'c') +- return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0)) +- && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1))) +- || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1)) +- && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0)))); +- else if (GET_RTX_CLASS (code) == '<' || GET_RTX_CLASS (code) == '2') +- return (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0)) +- && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1))); +- else if (GET_RTX_CLASS (code) == '1') +- return rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0)); +- +- /* Compare the elements. If any pair of corresponding elements +- fail to match, return 0 for the whole things. */ +- +- fmt = GET_RTX_FORMAT (code); +- for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) +- { +- switch (fmt[i]) +- { +- case 'w': +- if (XWINT (x, i) != XWINT (y, i)) +- return 0; +- break; +- +- case 'n': +- case 'i': +- if (XINT (x, i) != XINT (y, i)) +- return 0; +- break; +- +- case 'V': +- case 'E': +- /* Two vectors must have the same length. */ +- if (XVECLEN (x, i) != XVECLEN (y, i)) +- return 0; +- +- /* And the corresponding elements must match. */ +- for (j = 0; j < XVECLEN (x, i); j++) +- if (rtx_equal_for_memref_p (XVECEXP (x, i, j), XVECEXP (y, i, j)) == 0) +- return 0; +- break; +- +- case 'e': +- if (rtx_equal_for_memref_p (XEXP (x, i), XEXP (y, i)) == 0) +- return 0; +- break; +- +- case 'S': +- case 's': +- if (strcmp (XSTR (x, i), XSTR (y, i))) +- return 0; +- break; +- +- case 'u': +- /* These are just backpointers, so they don't matter. */ +- break; +- +- case '0': +- break; +- +- /* It is believed that rtx's at this level will never +- contain anything but integers and other rtx's, +- except for within LABEL_REFs and SYMBOL_REFs. */ +- default: +- abort (); +- } +- } +- return 1; +- } +- +- /* Given an rtx X, find a SYMBOL_REF or LABEL_REF within +- X and return it, or return 0 if none found. */ +- +- static rtx +- find_symbolic_term (x) +- rtx x; +- { +- register int i; +- register enum rtx_code code; +- register char *fmt; +- +- code = GET_CODE (x); +- if (code == SYMBOL_REF || code == LABEL_REF) +- return x; +- if (GET_RTX_CLASS (code) == 'o') +- return 0; +- +- fmt = GET_RTX_FORMAT (code); +- for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) +- { +- rtx t; +- +- if (fmt[i] == 'e') +- { +- t = find_symbolic_term (XEXP (x, i)); +- if (t != 0) +- return t; +- } +- else if (fmt[i] == 'E') +- break; +- } +- return 0; +- } +- +- /* Return nonzero if X and Y (memory addresses) could reference the +- same location in memory. C is an offset accumulator. When +- C is nonzero, we are testing aliases between X and Y + C. +- XSIZE is the size in bytes of the X reference, +- similarly YSIZE is the size in bytes for Y. +- +- If XSIZE or YSIZE is zero, we do not know the amount of memory being +- referenced (the reference was BLKmode), so make the most pessimistic +- assumptions. +- +- We recognize the following cases of non-conflicting memory: +- +- (1) addresses involving the frame pointer cannot conflict +- with addresses involving static variables. +- (2) static variables with different addresses cannot conflict. +- +- Nice to notice that varying addresses cannot conflict with fp if no +- local variables had their addresses taken, but that's too hard now. */ +- +- /* ??? In Fortran, references to a array parameter can never conflict with +- another array parameter. */ +- +- static int +- memrefs_conflict_p (xsize, x, ysize, y, c) +- rtx x, y; +- int xsize, ysize; +- HOST_WIDE_INT c; +- { +- if (GET_CODE (x) == HIGH) +- x = XEXP (x, 0); +- else if (GET_CODE (x) == LO_SUM) +- x = XEXP (x, 1); +- else +- x = canon_rtx (x); +- if (GET_CODE (y) == HIGH) +- y = XEXP (y, 0); +- else if (GET_CODE (y) == LO_SUM) +- y = XEXP (y, 1); +- else +- y = canon_rtx (y); +- +- if (rtx_equal_for_memref_p (x, y)) +- return (xsize == 0 || ysize == 0 || +- (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)); +- +- if (y == frame_pointer_rtx || y == hard_frame_pointer_rtx +- || y == stack_pointer_rtx) +- { +- rtx t = y; +- int tsize = ysize; +- y = x; ysize = xsize; +- x = t; xsize = tsize; +- } +- +- if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx +- || x == stack_pointer_rtx) +- { +- rtx y1; +- +- if (CONSTANT_P (y)) +- return 0; +- +- if (GET_CODE (y) == PLUS +- && canon_rtx (XEXP (y, 0)) == x +- && (y1 = canon_rtx (XEXP (y, 1))) +- && GET_CODE (y1) == CONST_INT) +- { +- c += INTVAL (y1); +- return (xsize == 0 || ysize == 0 +- || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)); +- } +- +- if (GET_CODE (y) == PLUS +- && (y1 = canon_rtx (XEXP (y, 0))) +- && CONSTANT_P (y1)) +- return 0; +- +- return 1; +- } +- +- if (GET_CODE (x) == PLUS) +- { +- /* The fact that X is canonicalized means that this +- PLUS rtx is canonicalized. */ +- rtx x0 = XEXP (x, 0); +- rtx x1 = XEXP (x, 1); +- +- if (GET_CODE (y) == PLUS) +- { +- /* The fact that Y is canonicalized means that this +- PLUS rtx is canonicalized. */ +- rtx y0 = XEXP (y, 0); +- rtx y1 = XEXP (y, 1); +- +- if (rtx_equal_for_memref_p (x1, y1)) +- return memrefs_conflict_p (xsize, x0, ysize, y0, c); +- if (rtx_equal_for_memref_p (x0, y0)) +- return memrefs_conflict_p (xsize, x1, ysize, y1, c); +- if (GET_CODE (x1) == CONST_INT) +- if (GET_CODE (y1) == CONST_INT) +- return memrefs_conflict_p (xsize, x0, ysize, y0, +- c - INTVAL (x1) + INTVAL (y1)); +- else +- return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1)); +- else if (GET_CODE (y1) == CONST_INT) +- return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1)); +- +- /* Handle case where we cannot understand iteration operators, +- but we notice that the base addresses are distinct objects. */ +- x = find_symbolic_term (x); +- if (x == 0) +- return 1; +- y = find_symbolic_term (y); +- if (y == 0) +- return 1; +- return rtx_equal_for_memref_p (x, y); +- } +- else if (GET_CODE (x1) == CONST_INT) +- return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1)); +- } +- else if (GET_CODE (y) == PLUS) +- { +- /* The fact that Y is canonicalized means that this +- PLUS rtx is canonicalized. */ +- rtx y0 = XEXP (y, 0); +- rtx y1 = XEXP (y, 1); +- +- if (GET_CODE (y1) == CONST_INT) +- return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1)); +- else +- return 1; +- } +- +- if (GET_CODE (x) == GET_CODE (y)) +- switch (GET_CODE (x)) +- { +- case MULT: +- { +- /* Handle cases where we expect the second operands to be the +- same, and check only whether the first operand would conflict +- or not. */ +- rtx x0, y0; +- rtx x1 = canon_rtx (XEXP (x, 1)); +- rtx y1 = canon_rtx (XEXP (y, 1)); +- if (! rtx_equal_for_memref_p (x1, y1)) +- return 1; +- x0 = canon_rtx (XEXP (x, 0)); +- y0 = canon_rtx (XEXP (y, 0)); +- if (rtx_equal_for_memref_p (x0, y0)) +- return (xsize == 0 || ysize == 0 +- || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)); +- +- /* Can't properly adjust our sizes. */ +- if (GET_CODE (x1) != CONST_INT) +- return 1; +- xsize /= INTVAL (x1); +- ysize /= INTVAL (x1); +- c /= INTVAL (x1); +- return memrefs_conflict_p (xsize, x0, ysize, y0, c); +- } +- } +- +- if (CONSTANT_P (x)) +- { +- if (GET_CODE (x) == CONST_INT && GET_CODE (y) == CONST_INT) +- { +- c += (INTVAL (y) - INTVAL (x)); +- return (xsize == 0 || ysize == 0 +- || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)); +- } +- +- if (GET_CODE (x) == CONST) +- { +- if (GET_CODE (y) == CONST) +- return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)), +- ysize, canon_rtx (XEXP (y, 0)), c); +- else +- return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)), +- ysize, y, c); +- } +- if (GET_CODE (y) == CONST) +- return memrefs_conflict_p (xsize, x, ysize, +- canon_rtx (XEXP (y, 0)), c); +- +- if (CONSTANT_P (y)) +- return (rtx_equal_for_memref_p (x, y) +- && (xsize == 0 || ysize == 0 +- || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0))); +- +- return 1; +- } +- return 1; +- } +- +- /* Functions to compute memory dependencies. +- +- Since we process the insns in execution order, we can build tables +- to keep track of what registers are fixed (and not aliased), what registers +- are varying in known ways, and what registers are varying in unknown +- ways. +- +- If both memory references are volatile, then there must always be a +- dependence between the two references, since their order can not be +- changed. A volatile and non-volatile reference can be interchanged +- though. +- +- A MEM_IN_STRUCT reference at a non-QImode varying address can never +- conflict with a non-MEM_IN_STRUCT reference at a fixed address. We must +- allow QImode aliasing because the ANSI C standard allows character +- pointers to alias anything. We are assuming that characters are +- always QImode here. */ +- +- /* Read dependence: X is read after read in MEM takes place. There can +- only be a dependence here if both reads are volatile. */ +- +- int +- read_dependence (mem, x) +- rtx mem; +- rtx x; +- { +- return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem); +- } +- +- /* True dependence: X is read after store in MEM takes place. */ +- +- int +- true_dependence (mem, x) +- rtx mem; +- rtx x; +- { +- /* If X is an unchanging read, then it can't possibly conflict with any +- non-unchanging store. It may conflict with an unchanging write though, +- because there may be a single store to this address to initialize it. +- Just fall through to the code below to resolve the case where we have +- both an unchanging read and an unchanging write. This won't handle all +- cases optimally, but the possible performance loss should be +- negligible. */ +- if (RTX_UNCHANGING_P (x) && ! RTX_UNCHANGING_P (mem)) +- return 0; +- +- return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem)) +- || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0), +- SIZE_FOR_MODE (x), XEXP (x, 0), 0) +- && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem) +- && GET_MODE (mem) != QImode +- && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x)) +- && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x) +- && GET_MODE (x) != QImode +- && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem)))); +- } +- +- /* Anti dependence: X is written after read in MEM takes place. */ +- +- int +- anti_dependence (mem, x) +- rtx mem; +- rtx x; +- { +- /* If MEM is an unchanging read, then it can't possibly conflict with +- the store to X, because there is at most one store to MEM, and it must +- have occurred somewhere before MEM. */ +- if (RTX_UNCHANGING_P (mem)) +- return 0; +- +- return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem)) +- || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0), +- SIZE_FOR_MODE (x), XEXP (x, 0), 0) +- && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem) +- && GET_MODE (mem) != QImode +- && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x)) +- && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x) +- && GET_MODE (x) != QImode +- && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem)))); +- } +- +- /* Output dependence: X is written after store in MEM takes place. */ +- +- int +- output_dependence (mem, x) +- rtx mem; +- rtx x; +- { +- return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem)) +- || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0), +- SIZE_FOR_MODE (x), XEXP (x, 0), 0) +- && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem) +- && GET_MODE (mem) != QImode +- && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x)) +- && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x) +- && GET_MODE (x) != QImode +- && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem)))); +- } +- + /* Helper functions for instruction scheduling. */ + +--- 345,348 ---- +*************** sched_analyze_2 (x, insn) +*** 1922,1926 **** + /* If a dependency already exists, don't create a new one. */ + if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn))) +! if (true_dependence (XEXP (pending_mem, 0), x)) + add_dependence (insn, XEXP (pending, 0), 0); + +--- 1385,1390 ---- + /* If a dependency already exists, don't create a new one. */ + if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn))) +! if (true_dependence (XEXP (pending_mem, 0), VOIDmode, +! x, rtx_varies_p)) + add_dependence (insn, XEXP (pending, 0), 0); + +*************** sched_analyze_insn (x, insn, loop_notes) +*** 2021,2025 **** + register RTX_CODE code = GET_CODE (x); + rtx link; +! int maxreg = max_reg_num (); + int i; + +--- 1485,1489 ---- + register RTX_CODE code = GET_CODE (x); + rtx link; +! int maxreg = reg_last_uses_size; + int i; + +*************** sched_analyze_insn (x, insn, loop_notes) +*** 2058,2062 **** + if (loop_notes) + { +! int max_reg = max_reg_num (); + rtx link; + +--- 1522,1526 ---- + if (loop_notes) + { +! int max_reg = reg_last_uses_size; + rtx link; + +*************** sched_analyze (head, tail) +*** 2202,2207 **** + && NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP) + { +! int max_reg = max_reg_num (); +! for (i = 0; i < max_reg; i++) + { + for (u = reg_last_uses[i]; u; u = XEXP (u, 1)) +--- 1666,1670 ---- + && NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP) + { +! for (i = 0; i < reg_last_uses_size; i++) + { + for (u = reg_last_uses[i]; u; u = XEXP (u, 1)) +*************** sched_note_set (b, x, death) +*** 2372,2380 **** + + #define SCHED_SORT(READY, NEW_READY, OLD_READY) \ +! do { if ((NEW_READY) - (OLD_READY) == 1) \ +! swap_sort (READY, NEW_READY); \ +! else if ((NEW_READY) - (OLD_READY) > 1) \ +! qsort (READY, NEW_READY, sizeof (rtx), rank_for_schedule); } \ +! while (0) + + /* Returns a positive value if y is preferred; returns a negative value if +--- 1835,1842 ---- + + #define SCHED_SORT(READY, NEW_READY, OLD_READY) \ +! if ((NEW_READY) - (OLD_READY) == 1) \ +! swap_sort (READY, NEW_READY); \ +! else if ((NEW_READY) - (OLD_READY) > 1) \ +! qsort (READY, NEW_READY, sizeof (rtx), rank_for_schedule); else \ + + /* Returns a positive value if y is preferred; returns a negative value if +*************** schedule_block (b, file) +*** 3174,3178 **** + b, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b])); + +! i = max_reg_num (); + reg_last_uses = (rtx *) alloca (i * sizeof (rtx)); + bzero ((char *) reg_last_uses, i * sizeof (rtx)); +--- 2636,2640 ---- + b, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b])); + +! reg_last_uses_size = i = max_reg_num (); + reg_last_uses = (rtx *) alloca (i * sizeof (rtx)); + bzero ((char *) reg_last_uses, i * sizeof (rtx)); +*************** schedule_insns (dump_file) +*** 4718,4721 **** +--- 4180,4198 ---- + max_regno * sizeof (short)); + init_alias_analysis (); ++ #if 0 ++ if (dump_file) ++ { ++ extern rtx *reg_base_value; ++ extern int reg_base_value_size; ++ int i; ++ for (i = 0; i < reg_base_value_size; i++) ++ if (reg_base_value[i]) ++ { ++ fprintf (dump_file, ";; reg_base_value[%d] = ", i); ++ print_rtl (dump_file, reg_base_value[i]); ++ fputc ('\n', dump_file); ++ } ++ } ++ #endif + } + else +*************** schedule_insns (dump_file) +*** 4726,4731 **** + bb_dead_regs = 0; + bb_live_regs = 0; +! if (! flag_schedule_insns) +! init_alias_analysis (); + } + +--- 4203,4207 ---- + bb_dead_regs = 0; + bb_live_regs = 0; +! init_alias_analysis (); + } + +diff -rcp2N gcc-2.7.2.2/toplev.c gcc-2.7.2.2.f.2/toplev.c +*** gcc-2.7.2.2/toplev.c Fri Oct 20 17:56:35 1995 +--- gcc-2.7.2.2.f.2/toplev.c Fri Jan 10 23:18:24 1997 +*************** int flag_unroll_loops; +*** 388,391 **** +--- 388,405 ---- + int flag_unroll_all_loops; + ++ /* Nonzero forces all invariant computations in loops to be moved ++ outside the loop. */ ++ ++ int flag_move_all_movables = 0; ++ ++ /* Nonzero forces all general induction variables in loops to be ++ strength reduced. */ ++ ++ int flag_reduce_all_givs = 0; ++ ++ /* Nonzero gets another run of loop_optimize performed. */ ++ ++ int flag_rerun_loop_opt = 0; ++ + /* Nonzero for -fwritable-strings: + store string constants in data segment and don't uniquize them. */ +*************** int flag_gnu_linker = 1; +*** 522,525 **** +--- 536,550 ---- + int flag_pack_struct = 0; + ++ /* 1 if alias checking is on (by default, when -O). */ ++ int flag_alias_check = 0; ++ ++ /* 0 if pointer arguments may alias each other. True in C. ++ 1 if pointer arguments may not alias each other but may alias ++ global variables. ++ 2 if pointer arguments may not alias each other and may not ++ alias global variables. True in Fortran. ++ This defaults to 0 for C. */ ++ int flag_argument_noalias = 0; ++ + /* Table of language-independent -f options. + STRING is the option name. VARIABLE is the address of the variable. +*************** struct { char *string; int *variable; in +*** 542,545 **** +--- 567,573 ---- + {"unroll-loops", &flag_unroll_loops, 1}, + {"unroll-all-loops", &flag_unroll_all_loops, 1}, ++ {"move-all-movables", &flag_move_all_movables, 1}, ++ {"reduce-all-givs", &flag_reduce_all_givs, 1}, ++ {"rerun-loop-opt", &flag_rerun_loop_opt, 1}, + {"writable-strings", &flag_writable_strings, 1}, + {"peephole", &flag_no_peephole, 0}, +*************** struct { char *string; int *variable; in +*** 568,572 **** + {"gnu-linker", &flag_gnu_linker, 1}, + {"pack-struct", &flag_pack_struct, 1}, +! {"bytecode", &output_bytecode, 1} + }; + +--- 596,604 ---- + {"gnu-linker", &flag_gnu_linker, 1}, + {"pack-struct", &flag_pack_struct, 1}, +! {"bytecode", &output_bytecode, 1}, +! {"alias-check", &flag_alias_check, 1}, +! {"argument-alias", &flag_argument_noalias, 0}, +! {"argument-noalias", &flag_argument_noalias, 1}, +! {"argument-noalias-global", &flag_argument_noalias, 2} + }; + +*************** rest_of_compilation (decl) +*** 2894,2897 **** +--- 2926,2931 ---- + { + loop_optimize (insns, loop_dump_file); ++ if (flag_rerun_loop_opt) ++ loop_optimize (insns, loop_dump_file); + }); + } +*************** main (argc, argv, envp) +*** 3383,3386 **** +--- 3417,3421 ---- + flag_omit_frame_pointer = 1; + #endif ++ flag_alias_check = 1; + } + +diff -rcp2N gcc-2.7.2.2/unroll.c gcc-2.7.2.2.f.2/unroll.c +*** gcc-2.7.2.2/unroll.c Sat Aug 19 17:33:26 1995 +--- gcc-2.7.2.2.f.2/unroll.c Fri Jan 10 23:18:24 1997 +*************** unroll_loop (loop_end, insn_count, loop_ +*** 995,1000 **** + for (j = FIRST_PSEUDO_REGISTER; j < max_reg_before_loop; j++) + if (local_regno[j]) +! map->reg_map[j] = gen_reg_rtx (GET_MODE (regno_reg_rtx[j])); +! + /* The last copy needs the compare/branch insns at the end, + so reset copy_end here if the loop ends with a conditional +--- 995,1003 ---- + for (j = FIRST_PSEUDO_REGISTER; j < max_reg_before_loop; j++) + if (local_regno[j]) +! { +! map->reg_map[j] = gen_reg_rtx (GET_MODE (regno_reg_rtx[j])); +! record_base_value (REGNO (map->reg_map[j]), +! regno_reg_rtx[j]); +! } + /* The last copy needs the compare/branch insns at the end, + so reset copy_end here if the loop ends with a conditional +*************** unroll_loop (loop_end, insn_count, loop_ +*** 1136,1140 **** + for (j = FIRST_PSEUDO_REGISTER; j < max_reg_before_loop; j++) + if (local_regno[j]) +! map->reg_map[j] = gen_reg_rtx (GET_MODE (regno_reg_rtx[j])); + + /* If loop starts with a branch to the test, then fix it so that +--- 1139,1147 ---- + for (j = FIRST_PSEUDO_REGISTER; j < max_reg_before_loop; j++) + if (local_regno[j]) +! { +! map->reg_map[j] = gen_reg_rtx (GET_MODE (regno_reg_rtx[j])); +! record_base_value (REGNO (map->reg_map[j]), +! regno_reg_rtx[j]); +! } + + /* If loop starts with a branch to the test, then fix it so that +*************** copy_loop_body (copy_start, copy_end, ma +*** 1631,1635 **** + incrementing the shared pseudo reg more than + once. */ +! if (! tv->same_insn) + { + /* tv->dest_reg may actually be a (PLUS (REG) +--- 1638,1642 ---- + incrementing the shared pseudo reg more than + once. */ +! if (! tv->same_insn && ! tv->shared) + { + /* tv->dest_reg may actually be a (PLUS (REG) +*************** copy_loop_body (copy_start, copy_end, ma +*** 1757,1760 **** +--- 1764,1768 ---- + giv_dest_reg = tem; + map->reg_map[regno] = tem; ++ record_base_value (REGNO (tem), giv_src_reg); + } + else +*************** find_splittable_regs (unroll_type, loop_ +*** 2443,2447 **** + { + rtx tem = gen_reg_rtx (bl->biv->mode); +! + emit_insn_before (gen_move_insn (tem, bl->biv->src_reg), + loop_start); +--- 2451,2456 ---- + { + rtx tem = gen_reg_rtx (bl->biv->mode); +! +! record_base_value (REGNO (tem), bl->biv->add_val); + emit_insn_before (gen_move_insn (tem, bl->biv->src_reg), + loop_start); +*************** find_splittable_regs (unroll_type, loop_ +*** 2500,2503 **** +--- 2509,2514 ---- + exits. */ + rtx tem = gen_reg_rtx (bl->biv->mode); ++ record_base_value (REGNO (tem), bl->biv->add_val); ++ + emit_insn_before (gen_move_insn (tem, bl->biv->src_reg), + loop_start); +*************** find_splittable_givs (bl, unroll_type, l +*** 2675,2678 **** +--- 2686,2690 ---- + rtx tem = gen_reg_rtx (bl->biv->mode); + ++ record_base_value (REGNO (tem), bl->biv->add_val); + emit_insn_before (gen_move_insn (tem, bl->biv->src_reg), + loop_start); +*************** find_splittable_givs (bl, unroll_type, l +*** 2716,2719 **** +--- 2728,2732 ---- + { + rtx tem = gen_reg_rtx (v->mode); ++ record_base_value (REGNO (tem), v->add_val); + emit_iv_add_mult (bl->initial_value, v->mult_val, + v->add_val, tem, loop_start); +*************** find_splittable_givs (bl, unroll_type, l +*** 2734,2747 **** + register for the split addr giv, just to be safe. */ + +! /* ??? If there are multiple address givs which have been +! combined with the same dest_reg giv, then we may only need +! one new register for them. Pulling out constants below will +! catch some of the common cases of this. Currently, I leave +! the work of simplifying multiple address givs to the +! following cse pass. */ +! +! /* As a special case, if we have multiple identical address givs +! within a single instruction, then we do use a single pseudo +! reg for both. This is necessary in case one is a match_dup + of the other. */ + +--- 2747,2753 ---- + register for the split addr giv, just to be safe. */ + +! /* If we have multiple identical address givs within a +! single instruction, then use a single pseudo reg for +! both. This is necessary in case one is a match_dup + of the other. */ + +*************** find_splittable_givs (bl, unroll_type, l +*** 2756,2759 **** +--- 2762,2776 ---- + INSN_UID (v->insn)); + } ++ /* If multiple address GIVs have been combined with the ++ same dest_reg GIV, do not create a new register for ++ each. */ ++ else if (unroll_type != UNROLL_COMPLETELY ++ && v->giv_type == DEST_ADDR ++ && v->same && v->same->giv_type == DEST_ADDR ++ && v->same->unrolled) ++ { ++ v->dest_reg = v->same->dest_reg; ++ v->shared = 1; ++ } + else if (unroll_type != UNROLL_COMPLETELY) + { +*************** find_splittable_givs (bl, unroll_type, l +*** 2761,2765 **** + register to hold the split value of the DEST_ADDR giv. + Emit insn to initialize its value before loop start. */ +! tem = gen_reg_rtx (v->mode); + + /* If the address giv has a constant in its new_reg value, +--- 2778,2785 ---- + register to hold the split value of the DEST_ADDR giv. + Emit insn to initialize its value before loop start. */ +! +! rtx tem = gen_reg_rtx (v->mode); +! record_base_value (REGNO (tem), v->add_val); +! v->unrolled = 1; + + /* If the address giv has a constant in its new_reg value, +*************** find_splittable_givs (bl, unroll_type, l +*** 2772,2776 **** + v->dest_reg + = plus_constant (tem, INTVAL (XEXP (v->new_reg,1))); +! + /* Only succeed if this will give valid addresses. + Try to validate both the first and the last +--- 2792,2796 ---- + v->dest_reg + = plus_constant (tem, INTVAL (XEXP (v->new_reg,1))); +! + /* Only succeed if this will give valid addresses. + Try to validate both the first and the last +*************** final_biv_value (bl, loop_start, loop_en +*** 3061,3064 **** +--- 3081,3085 ---- + + tem = gen_reg_rtx (bl->biv->mode); ++ record_base_value (REGNO (tem), bl->biv->add_val); + /* Make sure loop_end is not the last insn. */ + if (NEXT_INSN (loop_end) == 0) +*************** final_giv_value (v, loop_start, loop_end +*** 3154,3157 **** +--- 3175,3179 ---- + /* Put the final biv value in tem. */ + tem = gen_reg_rtx (bl->biv->mode); ++ record_base_value (REGNO (tem), bl->biv->add_val); + emit_iv_add_mult (increment, GEN_INT (loop_n_iterations), + bl->initial_value, tem, insert_before); +diff -rcp2N gcc-2.7.2.2/version.c gcc-2.7.2.2.f.2/version.c +*** gcc-2.7.2.2/version.c Thu Feb 20 19:24:33 1997 +--- gcc-2.7.2.2.f.2/version.c Sun Feb 23 16:30:36 1997 +*************** +*** 1 **** +! char *version_string = "2.7.2.2"; +--- 1 ---- +! char *version_string = "2.7.2.2.f.2"; |