summaryrefslogtreecommitdiff
path: root/gnu
diff options
context:
space:
mode:
authorPatrick Wildt <patrick@cvs.openbsd.org>2020-08-03 14:30:26 +0000
committerPatrick Wildt <patrick@cvs.openbsd.org>2020-08-03 14:30:26 +0000
commita3338ca1c6dfa80761ba39575dd9d81c052f7774 (patch)
treeb80a576a66f63638f0b1573d419d3f83e6793135 /gnu
parent6d04c3cdb07eefa10680452eabd5755b32c0e018 (diff)
Import LLVM 10.0.0 release including clang, lld and lldb.
ok hackroom tested by plenty
Diffstat (limited to 'gnu')
-rw-r--r--gnu/llvm/llvm/lib/Target/PowerPC/PPCISelLowering.cpp41
1 files changed, 6 insertions, 35 deletions
diff --git a/gnu/llvm/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/gnu/llvm/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index abbd94b55f0..eff14419c37 100644
--- a/gnu/llvm/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/gnu/llvm/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -167,23 +167,6 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
}
- if (Subtarget.isISA3_0()) {
- setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal);
- setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal);
- setTruncStoreAction(MVT::f64, MVT::f16, Legal);
- setTruncStoreAction(MVT::f32, MVT::f16, Legal);
- } else {
- // No extending loads from f16 or HW conversions back and forth.
- setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
- setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
- setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
- setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
- setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
- setTruncStoreAction(MVT::f64, MVT::f16, Expand);
- setTruncStoreAction(MVT::f32, MVT::f16, Expand);
- }
-
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
// PowerPC has pre-inc load and store's.
@@ -694,7 +677,6 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
}
}
- setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand);
if (!Subtarget.hasP8Vector()) {
setOperationAction(ISD::SMAX, MVT::v2i64, Expand);
setOperationAction(ISD::SMIN, MVT::v2i64, Expand);
@@ -7741,17 +7723,15 @@ void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
// Emit a store to the stack slot.
SDValue Chain;
- unsigned Alignment = DAG.getEVTAlignment(Tmp.getValueType());
if (i32Stack) {
MachineFunction &MF = DAG.getMachineFunction();
- Alignment = 4;
MachineMemOperand *MMO =
- MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment);
+ MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4);
SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr };
Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
} else
- Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI, Alignment);
+ Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI);
// Result is a load from the stack slot. If loading 4 bytes, make sure to
// add in a bias on big endian.
@@ -7764,7 +7744,6 @@ void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
RLI.Chain = Chain;
RLI.Ptr = FIPtr;
RLI.MPI = MPI;
- RLI.Alignment = Alignment;
}
/// Custom lowers floating point to integer conversions to use
@@ -7872,10 +7851,9 @@ bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
SelectionDAG &DAG,
ISD::LoadExtType ET) const {
SDLoc dl(Op);
- bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT &&
- (Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32);
if (ET == ISD::NON_EXTLOAD &&
- (ValidFPToUint || Op.getOpcode() == ISD::FP_TO_SINT) &&
+ (Op.getOpcode() == ISD::FP_TO_UINT ||
+ Op.getOpcode() == ISD::FP_TO_SINT) &&
isOperationLegalOrCustom(Op.getOpcode(),
Op.getOperand(0).getValueType())) {
@@ -10382,7 +10360,6 @@ SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
assert(Op.getOpcode() == ISD::FP_EXTEND &&
"Should only be called for ISD::FP_EXTEND");
- // FIXME: handle extends from half precision float vectors on P9.
// We only want to custom lower an extend from v2f32 to v2f64.
if (Op.getValueType() != MVT::v2f64 ||
Op.getOperand(0).getValueType() != MVT::v2f32)
@@ -10596,11 +10573,6 @@ void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
case ISD::BITCAST:
// Don't handle bitcast here.
return;
- case ISD::FP_EXTEND:
- SDValue Lowered = LowerFP_EXTEND(SDValue(N, 0), DAG);
- if (Lowered)
- Results.push_back(Lowered);
- return;
}
}
@@ -13619,7 +13591,7 @@ SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
(Op1VT == MVT::i32 || Op1VT == MVT::i64 ||
(Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
- if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Vector() ||
+ if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Altivec() ||
cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
return SDValue();
@@ -15282,8 +15254,7 @@ bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
if (!VT.isSimple())
return false;
- if (VT.isFloatingPoint() && !VT.isVector() &&
- !Subtarget.allowsUnalignedFPAccess())
+ if (VT.isFloatingPoint() && !Subtarget.allowsUnalignedFPAccess())
return false;
if (VT.getSimpleVT().isVector()) {