summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPatrick Wildt <patrick@cvs.openbsd.org>2020-08-03 14:30:26 +0000
committerPatrick Wildt <patrick@cvs.openbsd.org>2020-08-03 14:30:26 +0000
commit07f815ede6cd74d12038282b7da52b262307c25c (patch)
tree3884b7ceb74ec4e193117fc42f9dd8a82b336c52
parent9d64bf78c75cae1e68ab6c71fe8e734db4344291 (diff)
Import LLVM 10.0.0 release including clang, lld and lldb.
ok hackroom tested by plenty
-rw-r--r--gnu/llvm/llvm/lib/Target/RISCV/RISCVISelLowering.cpp179
1 files changed, 34 insertions, 145 deletions
diff --git a/gnu/llvm/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/gnu/llvm/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 03d9eefd59d..5a2cffbc824 100644
--- a/gnu/llvm/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/gnu/llvm/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -33,7 +33,6 @@
#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -149,27 +148,12 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
- if (!(Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp())) {
- setOperationAction(ISD::ROTL, XLenVT, Expand);
- setOperationAction(ISD::ROTR, XLenVT, Expand);
- }
-
- if (!Subtarget.hasStdExtZbp())
- setOperationAction(ISD::BSWAP, XLenVT, Expand);
-
- if (!Subtarget.hasStdExtZbb()) {
- setOperationAction(ISD::CTTZ, XLenVT, Expand);
- setOperationAction(ISD::CTLZ, XLenVT, Expand);
- setOperationAction(ISD::CTPOP, XLenVT, Expand);
- }
-
- if (Subtarget.hasStdExtZbp())
- setOperationAction(ISD::BITREVERSE, XLenVT, Legal);
-
- if (Subtarget.hasStdExtZbt()) {
- setOperationAction(ISD::FSHL, XLenVT, Legal);
- setOperationAction(ISD::FSHR, XLenVT, Legal);
- }
+ setOperationAction(ISD::ROTL, XLenVT, Expand);
+ setOperationAction(ISD::ROTR, XLenVT, Expand);
+ setOperationAction(ISD::BSWAP, XLenVT, Expand);
+ setOperationAction(ISD::CTTZ, XLenVT, Expand);
+ setOperationAction(ISD::CTLZ, XLenVT, Expand);
+ setOperationAction(ISD::CTPOP, XLenVT, Expand);
ISD::CondCode FPCCToExtend[] = {
ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
@@ -213,14 +197,6 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setTruncStoreAction(MVT::f64, MVT::f16, Expand);
}
- if (Subtarget.is64Bit() &&
- !(Subtarget.hasStdExtD() || Subtarget.hasStdExtF())) {
- setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
- setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
- setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
- setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
- }
-
setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
setOperationAction(ISD::BlockAddress, XLenVT, Custom);
setOperationAction(ISD::ConstantPool, XLenVT, Custom);
@@ -234,7 +210,6 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::TRAP, MVT::Other, Legal);
setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
- setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
if (Subtarget.hasStdExtA()) {
setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
@@ -252,12 +227,6 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
// Effectively disable jump table generation.
setMinimumJumpTableEntries(INT_MAX);
-
- // Jumps are expensive, compared to logic
- setJumpIsExpensive();
-
- // We can use any register for comparisons
- setHasMultipleConditionRegisters();
}
EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
@@ -367,17 +336,6 @@ bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
}
-bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
- bool ForCodeSize) const {
- if (VT == MVT::f32 && !Subtarget.hasStdExtF())
- return false;
- if (VT == MVT::f64 && !Subtarget.hasStdExtD())
- return false;
- if (Imm.isNegZero())
- return false;
- return Imm.isZero();
-}
-
bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
return (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
(VT == MVT::f64 && Subtarget.hasStdExtD());
@@ -460,8 +418,6 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
SDValue FPConv = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
return FPConv;
}
- case ISD::INTRINSIC_WO_CHAIN:
- return LowerINTRINSIC_WO_CHAIN(Op, DAG);
}
}
@@ -478,7 +434,7 @@ static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
SelectionDAG &DAG, unsigned Flags) {
- return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
+ return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlignment(),
N->getOffset(), Flags);
}
@@ -865,20 +821,6 @@ SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
return DAG.getMergeValues(Parts, DL);
}
-SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
- SelectionDAG &DAG) const {
- unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
- SDLoc DL(Op);
- switch (IntNo) {
- default:
- return SDValue(); // Don't custom lower most intrinsics.
- case Intrinsic::thread_pointer: {
- EVT PtrVT = getPointerTy(DAG.getDataLayout());
- return DAG.getRegister(RISCV::X4, PtrVT);
- }
- }
-}
-
// Returns the opcode of the target-specific SDNode that implements the 32-bit
// form of the given Opcode.
static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
@@ -934,32 +876,6 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
switch (N->getOpcode()) {
default:
llvm_unreachable("Don't know how to custom type legalize this operation!");
- case ISD::STRICT_FP_TO_SINT:
- case ISD::STRICT_FP_TO_UINT:
- case ISD::FP_TO_SINT:
- case ISD::FP_TO_UINT: {
- bool IsStrict = N->isStrictFPOpcode();
- assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
- "Unexpected custom legalisation");
- SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
- RTLIB::Libcall LC;
- if (N->getOpcode() == ISD::FP_TO_SINT ||
- N->getOpcode() == ISD::STRICT_FP_TO_SINT)
- LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
- else
- LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
- MakeLibCallOptions CallOptions;
- EVT OpVT = Op0.getValueType();
- CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
- SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
- SDValue Result;
- std::tie(Result, Chain) =
- makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
- Results.push_back(Result);
- if (IsStrict)
- Results.push_back(Chain);
- break;
- }
case ISD::READCYCLECOUNTER: {
assert(!Subtarget.is64Bit() &&
"READCYCLECOUNTER only has custom type legalization on riscv32");
@@ -968,8 +884,8 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
SDValue RCW =
DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
- Results.push_back(
- DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
+ Results.push_back(RCW);
+ Results.push_back(RCW.getValue(1));
Results.push_back(RCW.getValue(2));
break;
}
@@ -1256,13 +1172,13 @@ static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
Register HiReg = MI.getOperand(1).getReg();
Register SrcReg = MI.getOperand(2).getReg();
const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
- int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
+ int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex();
TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
RI);
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
- MachineMemOperand::MOLoad, 8, Align(8));
+ MachineMemOperand::MOLoad, 8, 8);
BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
.addFrameIndex(FI)
.addImm(0)
@@ -1288,11 +1204,11 @@ static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
Register LoReg = MI.getOperand(1).getReg();
Register HiReg = MI.getOperand(2).getReg();
const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
- int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
+ int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex();
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
- MachineMemOperand::MOStore, 8, Align(8));
+ MachineMemOperand::MOStore, 8, 8);
BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
.addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
.addFrameIndex(FI)
@@ -1514,15 +1430,14 @@ static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
VA1.getLocVT(), CCValAssign::Full));
} else {
// Both halves must be passed on the stack, with proper alignment.
- Align StackAlign =
- std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
+ unsigned StackAlign = std::max(XLenInBytes, ArgFlags1.getOrigAlign());
State.addLoc(
CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
State.AllocateStack(XLenInBytes, StackAlign),
VA1.getLocVT(), CCValAssign::Full));
State.addLoc(CCValAssign::getMem(
- ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
- LocVT2, CCValAssign::Full));
+ ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2,
+ CCValAssign::Full));
return false;
}
@@ -1533,8 +1448,8 @@ static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
} else {
// The second half is passed via the stack, without additional alignment.
State.addLoc(CCValAssign::getMem(
- ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
- LocVT2, CCValAssign::Full));
+ ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2,
+ CCValAssign::Full));
}
return false;
@@ -1602,7 +1517,7 @@ static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
// original type is larger than 2*XLEN, so the register alignment rule does
// not apply.
unsigned TwoXLenInBytes = (2 * XLen) / 8;
- if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
+ if (!IsFixed && ArgFlags.getOrigAlign() == TwoXLenInBytes &&
DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
// Skip 'odd' register if necessary.
@@ -1629,13 +1544,13 @@ static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
Register Reg = State.AllocateReg(ArgGPRs);
LocVT = MVT::i32;
if (!Reg) {
- unsigned StackOffset = State.AllocateStack(8, Align(8));
+ unsigned StackOffset = State.AllocateStack(8, 8);
State.addLoc(
CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
return false;
}
if (!State.AllocateReg(ArgGPRs))
- State.AllocateStack(4, Align(4));
+ State.AllocateStack(4, 4);
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
return false;
}
@@ -1675,8 +1590,7 @@ static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
Reg = State.AllocateReg(ArgFPR64s, ArgFPR32s);
else
Reg = State.AllocateReg(ArgGPRs);
- unsigned StackOffset =
- Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8));
+ unsigned StackOffset = Reg ? 0 : State.AllocateStack(XLen / 8, XLen / 8);
// If we reach this point and PendingLocs is non-empty, we must be at the
// end of a split argument that must be passed indirectly.
@@ -1731,7 +1645,7 @@ void RISCVTargetLowering::analyzeInputArgs(
RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
- ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy)) {
+ ArgFlags, CCInfo, /*IsRet=*/true, IsRet, ArgTy)) {
LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
<< EVT(ArgVT).getEVTString() << '\n');
llvm_unreachable(nullptr);
@@ -1945,13 +1859,13 @@ static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
}
if (LocVT == MVT::i32 || LocVT == MVT::f32) {
- unsigned Offset4 = State.AllocateStack(4, Align(4));
+ unsigned Offset4 = State.AllocateStack(4, 4);
State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
return false;
}
if (LocVT == MVT::i64 || LocVT == MVT::f64) {
- unsigned Offset5 = State.AllocateStack(8, Align(8));
+ unsigned Offset5 = State.AllocateStack(8, 8);
State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
return false;
}
@@ -2210,7 +2124,7 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
if (IsTailCall)
++NumTailCalls;
- else if (CLI.CB && CLI.CB->isMustTailCall())
+ else if (CLI.CS && CLI.CS.isMustTailCall())
report_fatal_error("failed to perform tail call elimination on a call "
"site marked musttail");
@@ -2226,17 +2140,17 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
SDValue Arg = OutVals[i];
unsigned Size = Flags.getByValSize();
- Align Alignment = Flags.getNonZeroByValAlign();
+ unsigned Align = Flags.getByValAlign();
- int FI =
- MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
+ int FI = MF.getFrameInfo().CreateStackObject(Size, Align, /*isSS=*/false);
SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
- Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
+ Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Align,
/*IsVolatile=*/false,
- /*AlwaysInline=*/false, IsTailCall,
- MachinePointerInfo(), MachinePointerInfo());
+ /*AlwaysInline=*/false,
+ IsTailCall, MachinePointerInfo(),
+ MachinePointerInfo());
ByValArgs.push_back(FIPtr);
}
@@ -2411,7 +2325,6 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
}
Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
- DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
Glue = Chain.getValue(1);
// Mark the end of the call, which is glued to the call itself.
@@ -2581,10 +2494,6 @@ void RISCVTargetLowering::validateCCReservedRegs(
F, "Argument register required, but has been reserved."});
}
-bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
- return CI->isTailCall();
-}
-
const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
switch ((RISCVISD::NodeType)Opcode) {
case RISCVISD::FIRST_NUMBER:
@@ -2974,12 +2883,12 @@ Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
return Result;
}
-Register RISCVTargetLowering::getExceptionPointerRegister(
+unsigned RISCVTargetLowering::getExceptionPointerRegister(
const Constant *PersonalityFn) const {
return RISCV::X10;
}
-Register RISCVTargetLowering::getExceptionSelectorRegister(
+unsigned RISCVTargetLowering::getExceptionSelectorRegister(
const Constant *PersonalityFn) const {
return RISCV::X11;
}
@@ -2994,26 +2903,6 @@ bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
return true;
}
-bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
- SDValue C) const {
- // Check integral scalar types.
- if (VT.isScalarInteger()) {
- // Do not perform the transformation on riscv32 with the M extension.
- if (!Subtarget.is64Bit() && Subtarget.hasStdExtM())
- return false;
- if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
- if (ConstNode->getAPIntValue().getBitWidth() > 8 * sizeof(int64_t))
- return false;
- int64_t Imm = ConstNode->getSExtValue();
- if (isPowerOf2_64(Imm + 1) || isPowerOf2_64(Imm - 1) ||
- isPowerOf2_64(1 - Imm) || isPowerOf2_64(-1 - Imm))
- return true;
- }
- }
-
- return false;
-}
-
#define GET_REGISTER_MATCHER
#include "RISCVGenAsmMatcher.inc"