summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMiod Vallat <miod@cvs.openbsd.org>2011-11-25 05:21:45 +0000
committerMiod Vallat <miod@cvs.openbsd.org>2011-11-25 05:21:45 +0000
commit188539c0c1642ac2e2f1dfa76b61b3b7fa3bb587 (patch)
tree10b7af479263e6f12e00f3b98baae369cb1da49c
parent6b8998c60d4d424cca620f34cfdffbd784a0a224 (diff)
Allow MD backend to prevent the optimization of a bcopy() or memmove() of
size 1 (the size being known at compile-time) into an inline mempcpy() expansion, which will in turn expand into a byte load and store operation. This expansion loses precious address alignment information at some point (because everybody knows that you can read a byte from any address, right?), and this loses bigtime on strict alignment platforms which lack the ability to accesse bytes directly, such as alpha (unless compiling with -mbwx and runnning on a BWX-capable cpu). An example of such call with size 1 is lib/libkvm/kvm.c line 780.
-rw-r--r--gnu/gcc/gcc/builtins.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/gnu/gcc/gcc/builtins.c b/gnu/gcc/gcc/builtins.c
index f6c00447aa6..7a927d2d6da 100644
--- a/gnu/gcc/gcc/builtins.c
+++ b/gnu/gcc/gcc/builtins.c
@@ -3060,10 +3060,19 @@ expand_builtin_memmove (tree arglist, tree type, rtx target,
it is ok to use memcpy as well. */
if (integer_onep (len))
{
- rtx ret = expand_builtin_mempcpy (arglist, type, target, mode,
- /*endp=*/0);
- if (ret)
- return ret;
+#if defined(SUBWORD_ACCESS_P)
+ if (SUBWORD_ACCESS_P
+ || (src_align >= BIGGEST_ALIGNMENT
+ && dest_align >= BIGGEST_ALIGNMENT))
+ {
+#endif
+ rtx ret = expand_builtin_mempcpy (arglist, type, target, mode,
+ /*endp=*/0);
+ if (ret)
+ return ret;
+#if defined(SUBWORD_ACCESS_P)
+ }
+#endif
}
/* Otherwise, call the normal function. */