summaryrefslogtreecommitdiff
path: root/gnu/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'gnu/llvm/lib')
-rw-r--r--gnu/llvm/lib/CodeGen/CMakeLists.txt87
-rw-r--r--gnu/llvm/lib/CodeGen/PrologEpilogInserter.cpp836
-rw-r--r--gnu/llvm/lib/Target/X86/X86FrameLowering.cpp1123
-rw-r--r--gnu/llvm/lib/Target/X86/X86FrameLowering.h82
-rw-r--r--gnu/llvm/lib/Target/X86/X86InstrCompiler.td653
5 files changed, 1111 insertions, 1670 deletions
diff --git a/gnu/llvm/lib/CodeGen/CMakeLists.txt b/gnu/llvm/lib/CodeGen/CMakeLists.txt
index 865de4f47af..a078c3c707a 100644
--- a/gnu/llvm/lib/CodeGen/CMakeLists.txt
+++ b/gnu/llvm/lib/CodeGen/CMakeLists.txt
@@ -1,3 +1,8 @@
+set(system_libs)
+if(CMAKE_HOST_UNIX AND LLVM_ENABLE_THREADS AND HAVE_LIBPTHREAD)
+ set(system_libs ${system_libs} pthread)
+endif()
+
add_llvm_library(LLVMCodeGen
AggressiveAntiDepBreaker.cpp
AllocationOrder.cpp
@@ -5,26 +10,23 @@ add_llvm_library(LLVMCodeGen
AtomicExpandPass.cpp
BasicTargetTransformInfo.cpp
BranchFolding.cpp
- BranchRelaxation.cpp
- BuiltinGCs.cpp
CalcSpillWeights.cpp
CallingConvLower.cpp
CodeGen.cpp
CodeGenPrepare.cpp
+ CoreCLRGC.cpp
CriticalAntiDepBreaker.cpp
- DeadMachineInstructionElim.cpp
- DetectDeadLanes.cpp
DFAPacketizer.cpp
+ DeadMachineInstructionElim.cpp
DwarfEHPrepare.cpp
EarlyIfConversion.cpp
EdgeBundles.cpp
+ ErlangGC.cpp
ExecutionDepsFix.cpp
ExpandISelPseudos.cpp
- ExpandMemCmp.cpp
ExpandPostRAPseudos.cpp
- ExpandReductions.cpp
+ LiveDebugValues.cpp
FaultMaps.cpp
- FEntryInserter.cpp
FuncletLayout.cpp
GCMetadata.cpp
GCMetadataPrinter.cpp
@@ -33,75 +35,62 @@ add_llvm_library(LLVMCodeGen
GlobalMerge.cpp
IfConversion.cpp
ImplicitNullChecks.cpp
- IndirectBrExpandPass.cpp
InlineSpiller.cpp
InterferenceCache.cpp
InterleavedAccessPass.cpp
IntrinsicLowering.cpp
+ LLVMTargetMachine.cpp
LatencyPriorityQueue.cpp
- LazyMachineBlockFrequencyInfo.cpp
LexicalScopes.cpp
- LiveDebugValues.cpp
LiveDebugVariables.cpp
- LiveIntervals.cpp
LiveInterval.cpp
+ LiveIntervalAnalysis.cpp
LiveIntervalUnion.cpp
- LivePhysRegs.cpp
LiveRangeCalc.cpp
LiveRangeEdit.cpp
- LiveRangeShrink.cpp
LiveRegMatrix.cpp
- LiveRegUnits.cpp
- LiveStacks.cpp
+ LivePhysRegs.cpp
+ LiveStackAnalysis.cpp
LiveVariables.cpp
- LLVMTargetMachine.cpp
LocalStackSlotAllocation.cpp
- LowLevelType.cpp
- LowerEmuTLS.cpp
MachineBasicBlock.cpp
MachineBlockFrequencyInfo.cpp
MachineBlockPlacement.cpp
MachineBranchProbabilityInfo.cpp
+ MachineCSE.cpp
MachineCombiner.cpp
MachineCopyPropagation.cpp
- MachineCSE.cpp
- MachineDominanceFrontier.cpp
MachineDominators.cpp
- MachineFrameInfo.cpp
+ MachineDominanceFrontier.cpp
MachineFunction.cpp
+ MachineFunctionAnalysis.cpp
MachineFunctionPass.cpp
MachineFunctionPrinterPass.cpp
- MachineInstrBundle.cpp
MachineInstr.cpp
+ MachineInstrBundle.cpp
MachineLICM.cpp
MachineLoopInfo.cpp
MachineModuleInfo.cpp
MachineModuleInfoImpls.cpp
- MachineOperand.cpp
- MachineOptimizationRemarkEmitter.cpp
- MachineOutliner.cpp
MachinePassRegistry.cpp
- MachinePipeliner.cpp
MachinePostDominators.cpp
- MachineRegionInfo.cpp
MachineRegisterInfo.cpp
+ MachineRegionInfo.cpp
+ MachineSSAUpdater.cpp
MachineScheduler.cpp
MachineSink.cpp
- MachineSSAUpdater.cpp
MachineTraceMetrics.cpp
MachineVerifier.cpp
- PatchableFunction.cpp
MIRPrinter.cpp
MIRPrintingPass.cpp
- MacroFusion.cpp
+ OcamlGC.cpp
OptimizePHIs.cpp
- ParallelCG.cpp
- PeepholeOptimizer.cpp
PHIElimination.cpp
PHIEliminationUtils.cpp
- PostRAHazardRecognizer.cpp
+ ParallelCG.cpp
+ Passes.cpp
+ PeepholeOptimizer.cpp
PostRASchedulerList.cpp
- PreISelIntrinsicLowering.cpp
ProcessImplicitDefs.cpp
PrologEpilogInserter.cpp
PseudoSourceValue.cpp
@@ -114,59 +103,45 @@ add_llvm_library(LLVMCodeGen
RegisterCoalescer.cpp
RegisterPressure.cpp
RegisterScavenging.cpp
- RenameIndependentSubregs.cpp
- MIRCanonicalizerPass.cpp
- RegisterUsageInfo.cpp
- RegUsageInfoCollector.cpp
- RegUsageInfoPropagate.cpp
- ResetMachineFunctionPass.cpp
- SafeStack.cpp
- SafeStackColoring.cpp
- SafeStackLayout.cpp
- ScalarizeMaskedMemIntrin.cpp
ScheduleDAG.cpp
ScheduleDAGInstrs.cpp
ScheduleDAGPrinter.cpp
ScoreboardHazardRecognizer.cpp
- ShadowStackGCLowering.cpp
ShrinkWrap.cpp
+ ShadowStackGC.cpp
+ ShadowStackGCLowering.cpp
SjLjEHPrepare.cpp
SlotIndexes.cpp
SpillPlacement.cpp
SplitKit.cpp
StackColoring.cpp
- StackMapLivenessAnalysis.cpp
- StackMaps.cpp
StackProtector.cpp
StackSlotColoring.cpp
+ StackMapLivenessAnalysis.cpp
+ StackMaps.cpp
+ StatepointExampleGC.cpp
TailDuplication.cpp
- TailDuplicator.cpp
TargetFrameLoweringImpl.cpp
TargetInstrInfo.cpp
TargetLoweringBase.cpp
TargetLoweringObjectFileImpl.cpp
TargetOptionsImpl.cpp
- TargetPassConfig.cpp
TargetRegisterInfo.cpp
TargetSchedule.cpp
- TargetSubtargetInfo.cpp
TwoAddressInstructionPass.cpp
UnreachableBlockElim.cpp
VirtRegMap.cpp
WinEHPrepare.cpp
- XRayInstrumentation.cpp
ADDITIONAL_HEADER_DIRS
${LLVM_MAIN_INCLUDE_DIR}/llvm/CodeGen
${LLVM_MAIN_INCLUDE_DIR}/llvm/CodeGen/PBQP
- LINK_LIBS ${LLVM_PTHREAD_LIB}
-
- DEPENDS
- intrinsics_gen
+ LINK_LIBS ${system_libs}
)
+add_dependencies(LLVMCodeGen intrinsics_gen)
+
add_subdirectory(SelectionDAG)
add_subdirectory(AsmPrinter)
add_subdirectory(MIRParser)
-add_subdirectory(GlobalISel)
diff --git a/gnu/llvm/lib/CodeGen/PrologEpilogInserter.cpp b/gnu/llvm/lib/CodeGen/PrologEpilogInserter.cpp
index a8d8ad8ac7d..939c50027b0 100644
--- a/gnu/llvm/lib/CodeGen/PrologEpilogInserter.cpp
+++ b/gnu/llvm/lib/CodeGen/PrologEpilogInserter.cpp
@@ -1,4 +1,4 @@
-//===- PrologEpilogInserter.cpp - Insert Prolog/Epilog code in function ---===//
+//===-- PrologEpilogInserter.cpp - Insert Prolog/Epilog code in function --===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,72 +16,43 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
-#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/OptimizationRemarkEmitter.h"
-#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/CodeGen/MachineOperand.h"
-#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/CodeGen/StackProtector.h"
-#include "llvm/CodeGen/TargetFrameLowering.h"
-#include "llvm/CodeGen/TargetInstrInfo.h"
-#include "llvm/CodeGen/TargetOpcodes.h"
-#include "llvm/CodeGen/TargetRegisterInfo.h"
-#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/CodeGen/WinEHFuncInfo.h"
-#include "llvm/IR/Attributes.h"
-#include "llvm/IR/CallingConv.h"
-#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DiagnosticInfo.h"
-#include "llvm/IR/Function.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/LLVMContext.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/Pass.h"
-#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
-#include <algorithm>
-#include <cassert>
-#include <cstdint>
-#include <functional>
-#include <limits>
-#include <utility>
-#include <vector>
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+#include <climits>
using namespace llvm;
-#define DEBUG_TYPE "prologepilog"
-
-using MBBVector = SmallVector<MachineBasicBlock *, 4>;
+#define DEBUG_TYPE "pei"
namespace {
-
class PEI : public MachineFunctionPass {
public:
static char ID;
-
PEI() : MachineFunctionPass(ID) {
initializePEIPass(*PassRegistry::getPassRegistry());
}
@@ -90,6 +61,7 @@ public:
/// runOnMachineFunction - Insert prolog/epilog code and replace abstract
/// frame indexes with appropriate references.
+ ///
bool runOnMachineFunction(MachineFunction &Fn) override;
private:
@@ -97,41 +69,33 @@ private:
// MinCSFrameIndex, MaxCSFrameIndex - Keeps the range of callee saved
// stack frame indexes.
- unsigned MinCSFrameIndex = std::numeric_limits<unsigned>::max();
- unsigned MaxCSFrameIndex = 0;
+ unsigned MinCSFrameIndex, MaxCSFrameIndex;
// Save and Restore blocks of the current function. Typically there is a
// single save block, unless Windows EH funclets are involved.
- MBBVector SaveBlocks;
- MBBVector RestoreBlocks;
+ SmallVector<MachineBasicBlock *, 1> SaveBlocks;
+ SmallVector<MachineBasicBlock *, 4> RestoreBlocks;
// Flag to control whether to use the register scavenger to resolve
// frame index materialization registers. Set according to
// TRI->requiresFrameIndexScavenging() for the current function.
bool FrameIndexVirtualScavenging;
- // Flag to control whether the scavenger should be passed even though
- // FrameIndexVirtualScavenging is used.
- bool FrameIndexEliminationScavenging;
-
- // Emit remarks.
- MachineOptimizationRemarkEmitter *ORE = nullptr;
-
- void calculateCallFrameInfo(MachineFunction &Fn);
- void calculateSaveRestoreBlocks(MachineFunction &Fn);
- void spillCalleeSavedRegs(MachineFunction &MF);
-
+ void calculateSets(MachineFunction &Fn);
+ void calculateCallsInformation(MachineFunction &Fn);
+ void assignCalleeSavedSpillSlots(MachineFunction &Fn,
+ const BitVector &SavedRegs);
+ void insertCSRSpillsAndRestores(MachineFunction &Fn);
void calculateFrameObjectOffsets(MachineFunction &Fn);
void replaceFrameIndices(MachineFunction &Fn);
void replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn,
int &SPAdj);
+ void scavengeFrameVirtualRegs(MachineFunction &Fn);
void insertPrologEpilogCode(MachineFunction &Fn);
};
-
-} // end anonymous namespace
+} // namespace
char PEI::ID = 0;
-
char &llvm::PrologEpilogCodeInserterID = PEI::ID;
static cl::opt<unsigned>
@@ -139,20 +103,17 @@ WarnStackSize("warn-stack-size", cl::Hidden, cl::init((unsigned)-1),
cl::desc("Warn for stack size bigger than the given"
" number"));
-INITIALIZE_PASS_BEGIN(PEI, DEBUG_TYPE, "Prologue/Epilogue Insertion", false,
- false)
+INITIALIZE_PASS_BEGIN(PEI, "prologepilog",
+ "Prologue/Epilogue Insertion", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(StackProtector)
-INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass)
-INITIALIZE_PASS_END(PEI, DEBUG_TYPE,
- "Prologue/Epilogue Insertion & Frame Finalization", false,
- false)
-
-MachineFunctionPass *llvm::createPrologEpilogInserterPass() {
- return new PEI();
-}
+INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
+INITIALIZE_PASS_END(PEI, "prologepilog",
+ "Prologue/Epilogue Insertion & Frame Finalization",
+ false, false)
+STATISTIC(NumScavengedRegs, "Number of frame index regs scavenged");
STATISTIC(NumBytesStackSpace,
"Number of bytes used for stack in all functions");
@@ -161,38 +122,76 @@ void PEI::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<MachineLoopInfo>();
AU.addPreserved<MachineDominatorTree>();
AU.addRequired<StackProtector>();
- AU.addRequired<MachineOptimizationRemarkEmitterPass>();
+ AU.addRequired<TargetPassConfig>();
MachineFunctionPass::getAnalysisUsage(AU);
}
+/// Compute the set of return blocks
+void PEI::calculateSets(MachineFunction &Fn) {
+ const MachineFrameInfo *MFI = Fn.getFrameInfo();
+
+ // Even when we do not change any CSR, we still want to insert the
+ // prologue and epilogue of the function.
+ // So set the save points for those.
+
+ // Use the points found by shrink-wrapping, if any.
+ if (MFI->getSavePoint()) {
+ SaveBlocks.push_back(MFI->getSavePoint());
+ assert(MFI->getRestorePoint() && "Both restore and save must be set");
+ MachineBasicBlock *RestoreBlock = MFI->getRestorePoint();
+ // If RestoreBlock does not have any successor and is not a return block
+ // then the end point is unreachable and we do not need to insert any
+ // epilogue.
+ if (!RestoreBlock->succ_empty() || RestoreBlock->isReturnBlock())
+ RestoreBlocks.push_back(RestoreBlock);
+ return;
+ }
+
+ // Save refs to entry and return blocks.
+ SaveBlocks.push_back(&Fn.front());
+ for (MachineBasicBlock &MBB : Fn) {
+ if (MBB.isEHFuncletEntry())
+ SaveBlocks.push_back(&MBB);
+ if (MBB.isReturnBlock())
+ RestoreBlocks.push_back(&MBB);
+ }
+}
+
/// StackObjSet - A set of stack object indexes
-using StackObjSet = SmallSetVector<int, 8>;
+typedef SmallSetVector<int, 8> StackObjSet;
/// runOnMachineFunction - Insert prolog/epilog code and replace abstract
/// frame indexes with appropriate references.
+///
bool PEI::runOnMachineFunction(MachineFunction &Fn) {
- const Function &F = Fn.getFunction();
+ const Function* F = Fn.getFunction();
const TargetRegisterInfo *TRI = Fn.getSubtarget().getRegisterInfo();
const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering();
+ assert(!Fn.getRegInfo().getNumVirtRegs() && "Regalloc must assign all vregs");
+
RS = TRI->requiresRegisterScavenging(Fn) ? new RegScavenger() : nullptr;
FrameIndexVirtualScavenging = TRI->requiresFrameIndexScavenging(Fn);
- FrameIndexEliminationScavenging = (RS && !FrameIndexVirtualScavenging) ||
- TRI->requiresFrameIndexReplacementScavenging(Fn);
- ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE();
// Calculate the MaxCallFrameSize and AdjustsStack variables for the
// function's frame information. Also eliminates call frame pseudo
// instructions.
- calculateCallFrameInfo(Fn);
+ calculateCallsInformation(Fn);
- // Determine placement of CSR spill/restore code and prolog/epilog code:
+ // Determine which of the registers in the callee save list should be saved.
+ BitVector SavedRegs;
+ TFI->determineCalleeSaves(Fn, SavedRegs, RS);
+
+ // Insert spill code for any callee saved registers that are modified.
+ assignCalleeSavedSpillSlots(Fn, SavedRegs);
+
+ // Determine placement of CSR spill/restore code:
// place all spills in the entry block, all restores in return blocks.
- calculateSaveRestoreBlocks(Fn);
+ calculateSets(Fn);
- // Handle CSR spilling and restoring, for targets that need it.
- if (Fn.getTarget().usesPhysRegsForPEI())
- spillCalleeSavedRegs(Fn);
+ // Add the code to save and restore the callee saved registers.
+ if (!F->hasFnAttribute(Attribute::Naked))
+ insertCSRSpillsAndRestores(Fn);
// Allow the target machine to make final modifications to the function
// before the frame layout is finalized.
@@ -206,7 +205,7 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) {
// called functions. Because of this, calculateCalleeSavedRegisters()
// must be called before this function in order to set the AdjustsStack
// and MaxCallFrameSize variables.
- if (!F.hasFnAttribute(Attribute::Naked))
+ if (!F->hasFnAttribute(Attribute::Naked))
insertPrologEpilogCode(Fn);
// Replace all MO_FrameIndex operands with physical register references
@@ -218,34 +217,35 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) {
// post-pass, scavenge the virtual registers that frame index elimination
// inserted.
if (TRI->requiresRegisterScavenging(Fn) && FrameIndexVirtualScavenging)
- scavengeFrameVirtualRegs(Fn, *RS);
+ scavengeFrameVirtualRegs(Fn);
+
+ // Clear any vregs created by virtual scavenging.
+ Fn.getRegInfo().clearVirtRegs();
// Warn on stack size when we exceeds the given limit.
- MachineFrameInfo &MFI = Fn.getFrameInfo();
- uint64_t StackSize = MFI.getStackSize();
+ MachineFrameInfo *MFI = Fn.getFrameInfo();
+ uint64_t StackSize = MFI->getStackSize();
if (WarnStackSize.getNumOccurrences() > 0 && WarnStackSize < StackSize) {
- DiagnosticInfoStackSize DiagStackSize(F, StackSize);
- F.getContext().diagnose(DiagStackSize);
+ DiagnosticInfoStackSize DiagStackSize(*F, StackSize);
+ F->getContext().diagnose(DiagStackSize);
}
delete RS;
SaveBlocks.clear();
RestoreBlocks.clear();
- MFI.setSavePoint(nullptr);
- MFI.setRestorePoint(nullptr);
return true;
}
-/// Calculate the MaxCallFrameSize and AdjustsStack
+/// calculateCallsInformation - Calculate the MaxCallFrameSize and AdjustsStack
/// variables for the function's frame information and eliminate call frame
/// pseudo instructions.
-void PEI::calculateCallFrameInfo(MachineFunction &Fn) {
+void PEI::calculateCallsInformation(MachineFunction &Fn) {
const TargetInstrInfo &TII = *Fn.getSubtarget().getInstrInfo();
const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering();
- MachineFrameInfo &MFI = Fn.getFrameInfo();
+ MachineFrameInfo *MFI = Fn.getFrameInfo();
unsigned MaxCallFrameSize = 0;
- bool AdjustsStack = MFI.adjustsStack();
+ bool AdjustsStack = MFI->adjustsStack();
// Get the function call frame set-up and tear-down instruction opcode
unsigned FrameSetupOpcode = TII.getCallFrameSetupOpcode();
@@ -259,8 +259,11 @@ void PEI::calculateCallFrameInfo(MachineFunction &Fn) {
std::vector<MachineBasicBlock::iterator> FrameSDOps;
for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB)
for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I)
- if (TII.isFrameInstr(*I)) {
- unsigned Size = TII.getFrameSize(*I);
+ if (I->getOpcode() == FrameSetupOpcode ||
+ I->getOpcode() == FrameDestroyOpcode) {
+ assert(I->getNumOperands() >= 1 && "Call Frame Setup/Destroy Pseudo"
+ " instructions should have a single immediate argument!");
+ unsigned Size = I->getOperand(0).getImm();
if (Size > MaxCallFrameSize) MaxCallFrameSize = Size;
AdjustsStack = true;
FrameSDOps.push_back(I);
@@ -271,11 +274,8 @@ void PEI::calculateCallFrameInfo(MachineFunction &Fn) {
AdjustsStack = true;
}
- assert(!MFI.isMaxCallFrameSizeComputed() ||
- (MFI.getMaxCallFrameSize() == MaxCallFrameSize &&
- MFI.adjustsStack() == AdjustsStack));
- MFI.setAdjustsStack(AdjustsStack);
- MFI.setMaxCallFrameSize(MaxCallFrameSize);
+ MFI->setAdjustsStack(AdjustsStack);
+ MFI->setMaxCallFrameSize(MaxCallFrameSize);
for (std::vector<MachineBasicBlock::iterator>::iterator
i = FrameSDOps.begin(), e = FrameSDOps.end(); i != e; ++i) {
@@ -290,47 +290,17 @@ void PEI::calculateCallFrameInfo(MachineFunction &Fn) {
}
}
-/// Compute the sets of entry and return blocks for saving and restoring
-/// callee-saved registers, and placing prolog and epilog code.
-void PEI::calculateSaveRestoreBlocks(MachineFunction &Fn) {
- const MachineFrameInfo &MFI = Fn.getFrameInfo();
-
- // Even when we do not change any CSR, we still want to insert the
- // prologue and epilogue of the function.
- // So set the save points for those.
-
- // Use the points found by shrink-wrapping, if any.
- if (MFI.getSavePoint()) {
- SaveBlocks.push_back(MFI.getSavePoint());
- assert(MFI.getRestorePoint() && "Both restore and save must be set");
- MachineBasicBlock *RestoreBlock = MFI.getRestorePoint();
- // If RestoreBlock does not have any successor and is not a return block
- // then the end point is unreachable and we do not need to insert any
- // epilogue.
- if (!RestoreBlock->succ_empty() || RestoreBlock->isReturnBlock())
- RestoreBlocks.push_back(RestoreBlock);
- return;
- }
-
- // Save refs to entry and return blocks.
- SaveBlocks.push_back(&Fn.front());
- for (MachineBasicBlock &MBB : Fn) {
- if (MBB.isEHFuncletEntry())
- SaveBlocks.push_back(&MBB);
- if (MBB.isReturnBlock())
- RestoreBlocks.push_back(&MBB);
- }
-}
+void PEI::assignCalleeSavedSpillSlots(MachineFunction &F,
+ const BitVector &SavedRegs) {
+ // These are used to keep track the callee-save area. Initialize them.
+ MinCSFrameIndex = INT_MAX;
+ MaxCSFrameIndex = 0;
-static void assignCalleeSavedSpillSlots(MachineFunction &F,
- const BitVector &SavedRegs,
- unsigned &MinCSFrameIndex,
- unsigned &MaxCSFrameIndex) {
if (SavedRegs.empty())
return;
const TargetRegisterInfo *RegInfo = F.getSubtarget().getRegisterInfo();
- const MCPhysReg *CSRegs = F.getRegInfo().getCalleeSavedRegs();
+ const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&F);
std::vector<CalleeSavedInfo> CSI;
for (unsigned i = 0; CSRegs[i]; ++i) {
@@ -340,7 +310,7 @@ static void assignCalleeSavedSpillSlots(MachineFunction &F,
}
const TargetFrameLowering *TFI = F.getSubtarget().getFrameLowering();
- MachineFrameInfo &MFI = F.getFrameInfo();
+ MachineFrameInfo *MFI = F.getFrameInfo();
if (!TFI->assignCalleeSavedSpillSlots(F, RegInfo, CSI)) {
// If target doesn't implement this, use generic code.
@@ -353,13 +323,14 @@ static void assignCalleeSavedSpillSlots(MachineFunction &F,
// Now that we know which registers need to be saved and restored, allocate
// stack slots for them.
- for (auto &CS : CSI) {
- unsigned Reg = CS.getReg();
+ for (std::vector<CalleeSavedInfo>::iterator I = CSI.begin(), E = CSI.end();
+ I != E; ++I) {
+ unsigned Reg = I->getReg();
const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
int FrameIdx;
if (RegInfo->hasReservedSpillSlot(F, Reg, FrameIdx)) {
- CS.setFrameIdx(FrameIdx);
+ I->setFrameIdx(FrameIdx);
continue;
}
@@ -370,35 +341,35 @@ static void assignCalleeSavedSpillSlots(MachineFunction &F,
FixedSlot->Reg != Reg)
++FixedSlot;
- unsigned Size = RegInfo->getSpillSize(*RC);
if (FixedSlot == FixedSpillSlots + NumFixedSpillSlots) {
// Nope, just spill it anywhere convenient.
- unsigned Align = RegInfo->getSpillAlignment(*RC);
+ unsigned Align = RC->getAlignment();
unsigned StackAlign = TFI->getStackAlignment();
// We may not be able to satisfy the desired alignment specification of
// the TargetRegisterClass if the stack alignment is smaller. Use the
// min.
Align = std::min(Align, StackAlign);
- FrameIdx = MFI.CreateStackObject(Size, Align, true);
+ FrameIdx = MFI->CreateStackObject(RC->getSize(), Align, true);
if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
} else {
// Spill it to the stack where we must.
- FrameIdx = MFI.CreateFixedSpillStackObject(Size, FixedSlot->Offset);
+ FrameIdx =
+ MFI->CreateFixedSpillStackObject(RC->getSize(), FixedSlot->Offset);
}
- CS.setFrameIdx(FrameIdx);
+ I->setFrameIdx(FrameIdx);
}
}
- MFI.setCalleeSavedInfo(CSI);
+ MFI->setCalleeSavedInfo(CSI);
}
/// Helper function to update the liveness information for the callee-saved
/// registers.
static void updateLiveness(MachineFunction &MF) {
- MachineFrameInfo &MFI = MF.getFrameInfo();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
// Visited will contain all the basic blocks that are in the region
// where the callee saved registers are alive:
// - Anything that is not Save or Restore -> LiveThrough.
@@ -409,7 +380,7 @@ static void updateLiveness(MachineFunction &MF) {
SmallPtrSet<MachineBasicBlock *, 8> Visited;
SmallVector<MachineBasicBlock *, 8> WorkList;
MachineBasicBlock *Entry = &MF.front();
- MachineBasicBlock *Save = MFI.getSavePoint();
+ MachineBasicBlock *Save = MFI->getSavePoint();
if (!Save)
Save = Entry;
@@ -420,7 +391,7 @@ static void updateLiveness(MachineFunction &MF) {
}
Visited.insert(Save);
- MachineBasicBlock *Restore = MFI.getRestorePoint();
+ MachineBasicBlock *Restore = MFI->getRestorePoint();
if (Restore)
// By construction Restore cannot be visited, otherwise it
// means there exists a path to Restore that does not go
@@ -440,239 +411,125 @@ static void updateLiveness(MachineFunction &MF) {
WorkList.push_back(SuccBB);
}
- const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
+ const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
- MachineRegisterInfo &MRI = MF.getRegInfo();
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
for (MachineBasicBlock *MBB : Visited) {
MCPhysReg Reg = CSI[i].getReg();
// Add the callee-saved register as live-in.
// It's killed at the spill.
- if (!MRI.isReserved(Reg) && !MBB->isLiveIn(Reg))
+ if (!MBB->isLiveIn(Reg))
MBB->addLiveIn(Reg);
}
}
}
-/// Insert restore code for the callee-saved registers used in the function.
-static void insertCSRSaves(MachineBasicBlock &SaveBlock,
- ArrayRef<CalleeSavedInfo> CSI) {
- MachineFunction &Fn = *SaveBlock.getParent();
- const TargetInstrInfo &TII = *Fn.getSubtarget().getInstrInfo();
- const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering();
- const TargetRegisterInfo *TRI = Fn.getSubtarget().getRegisterInfo();
+/// insertCSRSpillsAndRestores - Insert spill and restore code for
+/// callee saved registers used in the function.
+///
+void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) {
+ // Get callee saved register information.
+ MachineFrameInfo *MFI = Fn.getFrameInfo();
+ const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
- MachineBasicBlock::iterator I = SaveBlock.begin();
- if (!TFI->spillCalleeSavedRegisters(SaveBlock, I, CSI, TRI)) {
- for (const CalleeSavedInfo &CS : CSI) {
- // Insert the spill to the stack frame.
- unsigned Reg = CS.getReg();
- const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.storeRegToStackSlot(SaveBlock, I, Reg, true, CS.getFrameIdx(), RC,
- TRI);
- }
- }
-}
+ MFI->setCalleeSavedInfoValid(true);
+
+ // Early exit if no callee saved registers are modified!
+ if (CSI.empty())
+ return;
-/// Insert restore code for the callee-saved registers used in the function.
-static void insertCSRRestores(MachineBasicBlock &RestoreBlock,
- std::vector<CalleeSavedInfo> &CSI) {
- MachineFunction &Fn = *RestoreBlock.getParent();
const TargetInstrInfo &TII = *Fn.getSubtarget().getInstrInfo();
const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering();
const TargetRegisterInfo *TRI = Fn.getSubtarget().getRegisterInfo();
-
- // Restore all registers immediately before the return and any
- // terminators that precede it.
- MachineBasicBlock::iterator I = RestoreBlock.getFirstTerminator();
-
- if (!TFI->restoreCalleeSavedRegisters(RestoreBlock, I, CSI, TRI)) {
- for (const CalleeSavedInfo &CI : reverse(CSI)) {
- unsigned Reg = CI.getReg();
- const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.loadRegFromStackSlot(RestoreBlock, I, Reg, CI.getFrameIdx(), RC, TRI);
- assert(I != RestoreBlock.begin() &&
- "loadRegFromStackSlot didn't insert any code!");
- // Insert in reverse order. loadRegFromStackSlot can insert
- // multiple instructions.
+ MachineBasicBlock::iterator I;
+
+ // Spill using target interface.
+ for (MachineBasicBlock *SaveBlock : SaveBlocks) {
+ I = SaveBlock->begin();
+ if (!TFI->spillCalleeSavedRegisters(*SaveBlock, I, CSI, TRI)) {
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ // Insert the spill to the stack frame.
+ unsigned Reg = CSI[i].getReg();
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.storeRegToStackSlot(*SaveBlock, I, Reg, true, CSI[i].getFrameIdx(),
+ RC, TRI);
+ }
}
+ // Update the live-in information of all the blocks up to the save point.
+ updateLiveness(Fn);
}
-}
-
-void PEI::spillCalleeSavedRegs(MachineFunction &Fn) {
- // We can't list this requirement in getRequiredProperties because some
- // targets (WebAssembly) use virtual registers past this point, and the pass
- // pipeline is set up without giving the passes a chance to look at the
- // TargetMachine.
- // FIXME: Find a way to express this in getRequiredProperties.
- assert(Fn.getProperties().hasProperty(
- MachineFunctionProperties::Property::NoVRegs));
-
- const Function &F = Fn.getFunction();
- const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering();
- MachineFrameInfo &MFI = Fn.getFrameInfo();
- MinCSFrameIndex = std::numeric_limits<unsigned>::max();
- MaxCSFrameIndex = 0;
-
- // Determine which of the registers in the callee save list should be saved.
- BitVector SavedRegs;
- TFI->determineCalleeSaves(Fn, SavedRegs, RS);
- // Assign stack slots for any callee-saved registers that must be spilled.
- assignCalleeSavedSpillSlots(Fn, SavedRegs, MinCSFrameIndex, MaxCSFrameIndex);
-
- // Add the code to save and restore the callee saved registers.
- if (!F.hasFnAttribute(Attribute::Naked)) {
- MFI.setCalleeSavedInfoValid(true);
-
- std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
- if (!CSI.empty()) {
- for (MachineBasicBlock *SaveBlock : SaveBlocks) {
- insertCSRSaves(*SaveBlock, CSI);
- // Update the live-in information of all the blocks up to the save
- // point.
- updateLiveness(Fn);
+ // Restore using target interface.
+ for (MachineBasicBlock *MBB : RestoreBlocks) {
+ I = MBB->end();
+
+ // Skip over all terminator instructions, which are part of the return
+ // sequence.
+ MachineBasicBlock::iterator I2 = I;
+ while (I2 != MBB->begin() && (--I2)->isTerminator())
+ I = I2;
+
+ bool AtStart = I == MBB->begin();
+ MachineBasicBlock::iterator BeforeI = I;
+ if (!AtStart)
+ --BeforeI;
+
+ // Restore all registers immediately before the return and any
+ // terminators that precede it.
+ if (!TFI->restoreCalleeSavedRegisters(*MBB, I, CSI, TRI)) {
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ unsigned Reg = CSI[i].getReg();
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.loadRegFromStackSlot(*MBB, I, Reg, CSI[i].getFrameIdx(), RC, TRI);
+ assert(I != MBB->begin() &&
+ "loadRegFromStackSlot didn't insert any code!");
+ // Insert in reverse order. loadRegFromStackSlot can insert
+ // multiple instructions.
+ if (AtStart)
+ I = MBB->begin();
+ else {
+ I = BeforeI;
+ ++I;
+ }
}
- for (MachineBasicBlock *RestoreBlock : RestoreBlocks)
- insertCSRRestores(*RestoreBlock, CSI);
}
}
}
/// AdjustStackOffset - Helper function used to adjust the stack frame offset.
static inline void
-AdjustStackOffset(MachineFrameInfo &MFI, int FrameIdx,
+AdjustStackOffset(MachineFrameInfo *MFI, int FrameIdx,
bool StackGrowsDown, int64_t &Offset,
unsigned &MaxAlign, unsigned Skew) {
// If the stack grows down, add the object size to find the lowest address.
if (StackGrowsDown)
- Offset += MFI.getObjectSize(FrameIdx);
+ Offset += MFI->getObjectSize(FrameIdx);
- unsigned Align = MFI.getObjectAlignment(FrameIdx);
+ unsigned Align = MFI->getObjectAlignment(FrameIdx);
// If the alignment of this object is greater than that of the stack, then
// increase the stack alignment to match.
MaxAlign = std::max(MaxAlign, Align);
// Adjust to alignment boundary.
- Offset = alignTo(Offset, Align, Skew);
+ Offset = RoundUpToAlignment(Offset, Align, Skew);
if (StackGrowsDown) {
DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset << "]\n");
- MFI.setObjectOffset(FrameIdx, -Offset); // Set the computed offset
+ MFI->setObjectOffset(FrameIdx, -Offset); // Set the computed offset
} else {
DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset << "]\n");
- MFI.setObjectOffset(FrameIdx, Offset);
- Offset += MFI.getObjectSize(FrameIdx);
- }
-}
-
-/// Compute which bytes of fixed and callee-save stack area are unused and keep
-/// track of them in StackBytesFree.
-static inline void
-computeFreeStackSlots(MachineFrameInfo &MFI, bool StackGrowsDown,
- unsigned MinCSFrameIndex, unsigned MaxCSFrameIndex,
- int64_t FixedCSEnd, BitVector &StackBytesFree) {
- // Avoid undefined int64_t -> int conversion below in extreme case.
- if (FixedCSEnd > std::numeric_limits<int>::max())
- return;
-
- StackBytesFree.resize(FixedCSEnd, true);
-
- SmallVector<int, 16> AllocatedFrameSlots;
- // Add fixed objects.
- for (int i = MFI.getObjectIndexBegin(); i != 0; ++i)
- AllocatedFrameSlots.push_back(i);
- // Add callee-save objects.
- for (int i = MinCSFrameIndex; i <= (int)MaxCSFrameIndex; ++i)
- AllocatedFrameSlots.push_back(i);
-
- for (int i : AllocatedFrameSlots) {
- // These are converted from int64_t, but they should always fit in int
- // because of the FixedCSEnd check above.
- int ObjOffset = MFI.getObjectOffset(i);
- int ObjSize = MFI.getObjectSize(i);
- int ObjStart, ObjEnd;
- if (StackGrowsDown) {
- // ObjOffset is negative when StackGrowsDown is true.
- ObjStart = -ObjOffset - ObjSize;
- ObjEnd = -ObjOffset;
- } else {
- ObjStart = ObjOffset;
- ObjEnd = ObjOffset + ObjSize;
- }
- // Ignore fixed holes that are in the previous stack frame.
- if (ObjEnd > 0)
- StackBytesFree.reset(ObjStart, ObjEnd);
+ MFI->setObjectOffset(FrameIdx, Offset);
+ Offset += MFI->getObjectSize(FrameIdx);
}
}
-/// Assign frame object to an unused portion of the stack in the fixed stack
-/// object range. Return true if the allocation was successful.
-static inline bool scavengeStackSlot(MachineFrameInfo &MFI, int FrameIdx,
- bool StackGrowsDown, unsigned MaxAlign,
- BitVector &StackBytesFree) {
- if (MFI.isVariableSizedObjectIndex(FrameIdx))
- return false;
-
- if (StackBytesFree.none()) {
- // clear it to speed up later scavengeStackSlot calls to
- // StackBytesFree.none()
- StackBytesFree.clear();
- return false;
- }
-
- unsigned ObjAlign = MFI.getObjectAlignment(FrameIdx);
- if (ObjAlign > MaxAlign)
- return false;
-
- int64_t ObjSize = MFI.getObjectSize(FrameIdx);
- int FreeStart;
- for (FreeStart = StackBytesFree.find_first(); FreeStart != -1;
- FreeStart = StackBytesFree.find_next(FreeStart)) {
-
- // Check that free space has suitable alignment.
- unsigned ObjStart = StackGrowsDown ? FreeStart + ObjSize : FreeStart;
- if (alignTo(ObjStart, ObjAlign) != ObjStart)
- continue;
-
- if (FreeStart + ObjSize > StackBytesFree.size())
- return false;
-
- bool AllBytesFree = true;
- for (unsigned Byte = 0; Byte < ObjSize; ++Byte)
- if (!StackBytesFree.test(FreeStart + Byte)) {
- AllBytesFree = false;
- break;
- }
- if (AllBytesFree)
- break;
- }
-
- if (FreeStart == -1)
- return false;
-
- if (StackGrowsDown) {
- int ObjStart = -(FreeStart + ObjSize);
- DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" << ObjStart
- << "]\n");
- MFI.setObjectOffset(FrameIdx, ObjStart);
- } else {
- DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" << FreeStart
- << "]\n");
- MFI.setObjectOffset(FrameIdx, FreeStart);
- }
-
- StackBytesFree.reset(FreeStart, FreeStart + ObjSize);
- return true;
-}
-
/// AssignProtectedObjSet - Helper function to assign large stack objects (i.e.,
/// those required to be close to the Stack Protector) to stack offsets.
static void
AssignProtectedObjSet(const StackObjSet &UnassignedObjs,
SmallSet<int, 16> &ProtectedObjs,
- MachineFrameInfo &MFI, bool StackGrowsDown,
+ MachineFrameInfo *MFI, bool StackGrowsDown,
int64_t &Offset, unsigned &MaxAlign, unsigned Skew) {
for (StackObjSet::const_iterator I = UnassignedObjs.begin(),
@@ -685,6 +542,7 @@ AssignProtectedObjSet(const StackObjSet &UnassignedObjs,
/// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the
/// abstract stack objects.
+///
void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering();
StackProtector *SP = &getAnalysis<StackProtector>();
@@ -693,7 +551,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
// Loop over all of the stack objects, assigning sequential addresses...
- MachineFrameInfo &MFI = Fn.getFrameInfo();
+ MachineFrameInfo *MFI = Fn.getFrameInfo();
// Start at the beginning of the local area.
// The Offset is the distance from the stack top in the direction
@@ -710,19 +568,20 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
// If there are fixed sized objects that are preallocated in the local area,
// non-fixed objects can't be allocated right at the start of local area.
- // Adjust 'Offset' to point to the end of last fixed sized preallocated
- // object.
- for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) {
+ // We currently don't support filling in holes in between fixed sized
+ // objects, so we adjust 'Offset' to point to the end of last fixed sized
+ // preallocated object.
+ for (int i = MFI->getObjectIndexBegin(); i != 0; ++i) {
int64_t FixedOff;
if (StackGrowsDown) {
// The maximum distance from the stack pointer is at lower address of
// the object -- which is given by offset. For down growing stack
// the offset is negative, so we negate the offset to get the distance.
- FixedOff = -MFI.getObjectOffset(i);
+ FixedOff = -MFI->getObjectOffset(i);
} else {
// The maximum distance from the start pointer is at the upper
// address of the object.
- FixedOff = MFI.getObjectOffset(i) + MFI.getObjectSize(i);
+ FixedOff = MFI->getObjectOffset(i) + MFI->getObjectSize(i);
}
if (FixedOff > Offset) Offset = FixedOff;
}
@@ -733,35 +592,27 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
for (unsigned i = MinCSFrameIndex; i <= MaxCSFrameIndex; ++i) {
// If the stack grows down, we need to add the size to find the lowest
// address of the object.
- Offset += MFI.getObjectSize(i);
+ Offset += MFI->getObjectSize(i);
- unsigned Align = MFI.getObjectAlignment(i);
+ unsigned Align = MFI->getObjectAlignment(i);
// Adjust to alignment boundary
- Offset = alignTo(Offset, Align, Skew);
+ Offset = RoundUpToAlignment(Offset, Align, Skew);
- DEBUG(dbgs() << "alloc FI(" << i << ") at SP[" << -Offset << "]\n");
- MFI.setObjectOffset(i, -Offset); // Set the computed offset
+ MFI->setObjectOffset(i, -Offset); // Set the computed offset
}
- } else if (MaxCSFrameIndex >= MinCSFrameIndex) {
- // Be careful about underflow in comparisons agains MinCSFrameIndex.
- for (unsigned i = MaxCSFrameIndex; i != MinCSFrameIndex - 1; --i) {
- if (MFI.isDeadObjectIndex(i))
- continue;
-
- unsigned Align = MFI.getObjectAlignment(i);
+ } else {
+ int MaxCSFI = MaxCSFrameIndex, MinCSFI = MinCSFrameIndex;
+ for (int i = MaxCSFI; i >= MinCSFI ; --i) {
+ unsigned Align = MFI->getObjectAlignment(i);
// Adjust to alignment boundary
- Offset = alignTo(Offset, Align, Skew);
+ Offset = RoundUpToAlignment(Offset, Align, Skew);
- DEBUG(dbgs() << "alloc FI(" << i << ") at SP[" << Offset << "]\n");
- MFI.setObjectOffset(i, Offset);
- Offset += MFI.getObjectSize(i);
+ MFI->setObjectOffset(i, Offset);
+ Offset += MFI->getObjectSize(i);
}
}
- // FixedCSEnd is the stack offset to the end of the fixed and callee-save
- // stack area.
- int64_t FixedCSEnd = Offset;
- unsigned MaxAlign = MFI.getMaxAlignment();
+ unsigned MaxAlign = MFI->getMaxAlignment();
// Make sure the special register scavenging spill slot is closest to the
// incoming stack pointer if a frame pointer is required and is closer
@@ -783,60 +634,54 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
// check for whether the frame is large enough to want to use virtual
// frame index registers. Functions which don't want/need this optimization
// will continue to use the existing code path.
- if (MFI.getUseLocalStackAllocationBlock()) {
- unsigned Align = MFI.getLocalFrameMaxAlign();
+ if (MFI->getUseLocalStackAllocationBlock()) {
+ unsigned Align = MFI->getLocalFrameMaxAlign();
// Adjust to alignment boundary.
- Offset = alignTo(Offset, Align, Skew);
+ Offset = RoundUpToAlignment(Offset, Align, Skew);
DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
// Resolve offsets for objects in the local block.
- for (unsigned i = 0, e = MFI.getLocalFrameObjectCount(); i != e; ++i) {
- std::pair<int, int64_t> Entry = MFI.getLocalFrameObjectMap(i);
+ for (unsigned i = 0, e = MFI->getLocalFrameObjectCount(); i != e; ++i) {
+ std::pair<int, int64_t> Entry = MFI->getLocalFrameObjectMap(i);
int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second;
DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" <<
FIOffset << "]\n");
- MFI.setObjectOffset(Entry.first, FIOffset);
+ MFI->setObjectOffset(Entry.first, FIOffset);
}
// Allocate the local block
- Offset += MFI.getLocalFrameSize();
+ Offset += MFI->getLocalFrameSize();
MaxAlign = std::max(Align, MaxAlign);
}
- // Retrieve the Exception Handler registration node.
- int EHRegNodeFrameIndex = std::numeric_limits<int>::max();
- if (const WinEHFuncInfo *FuncInfo = Fn.getWinEHFuncInfo())
- EHRegNodeFrameIndex = FuncInfo->EHRegNodeFrameIndex;
-
// Make sure that the stack protector comes before the local variables on the
// stack.
SmallSet<int, 16> ProtectedObjs;
- if (MFI.getStackProtectorIndex() >= 0) {
+ if (MFI->getStackProtectorIndex() >= 0) {
StackObjSet LargeArrayObjs;
StackObjSet SmallArrayObjs;
StackObjSet AddrOfObjs;
- AdjustStackOffset(MFI, MFI.getStackProtectorIndex(), StackGrowsDown,
+ AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), StackGrowsDown,
Offset, MaxAlign, Skew);
// Assign large stack objects first.
- for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) {
- if (MFI.isObjectPreAllocated(i) &&
- MFI.getUseLocalStackAllocationBlock())
+ for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
+ if (MFI->isObjectPreAllocated(i) &&
+ MFI->getUseLocalStackAllocationBlock())
continue;
if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex)
continue;
if (RS && RS->isScavengingFrameIndex((int)i))
continue;
- if (MFI.isDeadObjectIndex(i))
+ if (MFI->isDeadObjectIndex(i))
continue;
- if (MFI.getStackProtectorIndex() == (int)i ||
- EHRegNodeFrameIndex == (int)i)
+ if (MFI->getStackProtectorIndex() == (int)i)
continue;
- switch (SP->getSSPLayout(MFI.getObjectAllocation(i))) {
+ switch (SP->getSSPLayout(MFI->getObjectAllocation(i))) {
case StackProtector::SSPLK_None:
continue;
case StackProtector::SSPLK_SmallArray:
@@ -860,56 +705,26 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
Offset, MaxAlign, Skew);
}
- SmallVector<int, 8> ObjectsToAllocate;
-
- // Then prepare to assign frame offsets to stack objects that are not used to
- // spill callee saved registers.
- for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) {
- if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock())
+ // Then assign frame offsets to stack objects that are not used to spill
+ // callee saved registers.
+ for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
+ if (MFI->isObjectPreAllocated(i) &&
+ MFI->getUseLocalStackAllocationBlock())
continue;
if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex)
continue;
if (RS && RS->isScavengingFrameIndex((int)i))
continue;
- if (MFI.isDeadObjectIndex(i))
+ if (MFI->isDeadObjectIndex(i))
continue;
- if (MFI.getStackProtectorIndex() == (int)i ||
- EHRegNodeFrameIndex == (int)i)
+ if (MFI->getStackProtectorIndex() == (int)i)
continue;
if (ProtectedObjs.count(i))
continue;
- // Add the objects that we need to allocate to our working set.
- ObjectsToAllocate.push_back(i);
+ AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign, Skew);
}
- // Allocate the EH registration node first if one is present.
- if (EHRegNodeFrameIndex != std::numeric_limits<int>::max())
- AdjustStackOffset(MFI, EHRegNodeFrameIndex, StackGrowsDown, Offset,
- MaxAlign, Skew);
-
- // Give the targets a chance to order the objects the way they like it.
- if (Fn.getTarget().getOptLevel() != CodeGenOpt::None &&
- Fn.getTarget().Options.StackSymbolOrdering)
- TFI.orderFrameObjects(Fn, ObjectsToAllocate);
-
- // Keep track of which bytes in the fixed and callee-save range are used so we
- // can use the holes when allocating later stack objects. Only do this if
- // stack protector isn't being used and the target requests it and we're
- // optimizing.
- BitVector StackBytesFree;
- if (!ObjectsToAllocate.empty() &&
- Fn.getTarget().getOptLevel() != CodeGenOpt::None &&
- MFI.getStackProtectorIndex() < 0 && TFI.enableStackSlotScavenging(Fn))
- computeFreeStackSlots(MFI, StackGrowsDown, MinCSFrameIndex, MaxCSFrameIndex,
- FixedCSEnd, StackBytesFree);
-
- // Now walk the objects and actually assign base offsets to them.
- for (auto &Object : ObjectsToAllocate)
- if (!scavengeStackSlot(MFI, Object, StackGrowsDown, MaxAlign,
- StackBytesFree))
- AdjustStackOffset(MFI, Object, StackGrowsDown, Offset, MaxAlign, Skew);
-
// Make sure the special register scavenging spill slot is closest to the
// stack pointer.
if (RS && !EarlyScavengingSlots) {
@@ -924,8 +739,8 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
// If we have reserved argument space for call sites in the function
// immediately on entry to the current function, count it as part of the
// overall stack size.
- if (MFI.adjustsStack() && TFI.hasReservedCallFrame(Fn))
- Offset += MFI.getMaxCallFrameSize();
+ if (MFI->adjustsStack() && TFI.hasReservedCallFrame(Fn))
+ Offset += MFI->getMaxCallFrameSize();
// Round up the size to a multiple of the alignment. If the function has
// any calls or alloca's, align to the target's StackAlignment value to
@@ -933,8 +748,8 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
// otherwise, for leaf functions, align to the TransientStackAlignment
// value.
unsigned StackAlign;
- if (MFI.adjustsStack() || MFI.hasVarSizedObjects() ||
- (RegInfo->needsStackRealignment(Fn) && MFI.getObjectIndexEnd() != 0))
+ if (MFI->adjustsStack() || MFI->hasVarSizedObjects() ||
+ (RegInfo->needsStackRealignment(Fn) && MFI->getObjectIndexEnd() != 0))
StackAlign = TFI.getStackAlignment();
else
StackAlign = TFI.getTransientStackAlignment();
@@ -942,25 +757,19 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
// If the frame pointer is eliminated, all frame offsets will be relative to
// SP not FP. Align to MaxAlign so this works.
StackAlign = std::max(StackAlign, MaxAlign);
- Offset = alignTo(Offset, StackAlign, Skew);
+ Offset = RoundUpToAlignment(Offset, StackAlign, Skew);
}
// Update frame info to pretend that this is part of the stack...
int64_t StackSize = Offset - LocalAreaOffset;
- MFI.setStackSize(StackSize);
+ MFI->setStackSize(StackSize);
NumBytesStackSpace += StackSize;
-
- ORE->emit([&]() {
- return MachineOptimizationRemarkAnalysis(DEBUG_TYPE, "StackSize",
- Fn.getFunction().getSubprogram(),
- &Fn.front())
- << ore::NV("NumStackBytes", StackSize) << " stack bytes in function";
- });
}
/// insertPrologEpilogCode - Scan the function for modified callee saved
/// registers, insert spill code for these callee saved registers, then add
/// prolog and epilog code to the function.
+///
void PEI::insertPrologEpilogCode(MachineFunction &Fn) {
const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering();
@@ -982,24 +791,21 @@ void PEI::insertPrologEpilogCode(MachineFunction &Fn) {
if (Fn.shouldSplitStack()) {
for (MachineBasicBlock *SaveBlock : SaveBlocks)
TFI.adjustForSegmentedStacks(Fn, *SaveBlock);
- // Record that there are split-stack functions, so we will emit a
- // special section to tell the linker.
- Fn.getMMI().setHasSplitStack(true);
- } else
- Fn.getMMI().setHasNosplitStack(true);
+ }
// Emit additional code that is required to explicitly handle the stack in
// HiPE native code (if needed) when loaded in the Erlang/OTP runtime. The
// approach is rather similar to that of Segmented Stacks, but it uses a
// different conditional check and another BIF for allocating more stack
// space.
- if (Fn.getFunction().getCallingConv() == CallingConv::HiPE)
+ if (Fn.getFunction()->getCallingConv() == CallingConv::HiPE)
for (MachineBasicBlock *SaveBlock : SaveBlocks)
TFI.adjustForHiPEPrologue(Fn, *SaveBlock);
}
/// replaceFrameIndices - Replace all MO_FrameIndex operands with physical
/// register references and actual offsets.
+///
void PEI::replaceFrameIndices(MachineFunction &Fn) {
const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering();
if (!TFI.needsFrameIndexResolution(Fn)) return;
@@ -1007,7 +813,7 @@ void PEI::replaceFrameIndices(MachineFunction &Fn) {
// Store SPAdj at exit of a basic block.
SmallVector<int, 8> SPState;
SPState.resize(Fn.getNumBlockIDs());
- df_iterator_default_set<MachineBasicBlock*> Reachable;
+ SmallPtrSet<MachineBasicBlock*, 8> Reachable;
// Iterate over the reachable blocks in DFS order.
for (auto DFI = df_ext_begin(&Fn, Reachable), DFE = df_ext_end(&Fn, Reachable);
@@ -1042,40 +848,50 @@ void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn,
const TargetInstrInfo &TII = *Fn.getSubtarget().getInstrInfo();
const TargetRegisterInfo &TRI = *Fn.getSubtarget().getRegisterInfo();
const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering();
+ unsigned FrameSetupOpcode = TII.getCallFrameSetupOpcode();
+ unsigned FrameDestroyOpcode = TII.getCallFrameDestroyOpcode();
- if (RS && FrameIndexEliminationScavenging)
- RS->enterBasicBlock(*BB);
+ if (RS && !FrameIndexVirtualScavenging) RS->enterBasicBlock(BB);
bool InsideCallSequence = false;
for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) {
- if (TII.isFrameInstr(*I)) {
- InsideCallSequence = TII.isFrameSetup(*I);
- SPAdj += TII.getSPAdjust(*I);
- I = TFI->eliminateCallFramePseudoInstr(Fn, *BB, I);
+
+ if (I->getOpcode() == FrameSetupOpcode ||
+ I->getOpcode() == FrameDestroyOpcode) {
+ InsideCallSequence = (I->getOpcode() == FrameSetupOpcode);
+ SPAdj += TII.getSPAdjust(I);
+
+ MachineBasicBlock::iterator PrevI = BB->end();
+ if (I != BB->begin()) PrevI = std::prev(I);
+ TFI->eliminateCallFramePseudoInstr(Fn, *BB, I);
+
+ // Visit the instructions created by eliminateCallFramePseudoInstr().
+ if (PrevI == BB->end())
+ I = BB->begin(); // The replaced instr was the first in the block.
+ else
+ I = std::next(PrevI);
continue;
}
- MachineInstr &MI = *I;
+ MachineInstr *MI = I;
bool DoIncr = true;
- bool DidFinishLoop = true;
- for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
- if (!MI.getOperand(i).isFI())
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ if (!MI->getOperand(i).isFI())
continue;
// Frame indices in debug values are encoded in a target independent
// way with simply the frame index and offset rather than any
// target-specific addressing mode.
- if (MI.isDebugValue()) {
+ if (MI->isDebugValue()) {
assert(i == 0 && "Frame indices can only appear as the first "
"operand of a DBG_VALUE machine instruction");
unsigned Reg;
- int64_t Offset =
- TFI->getFrameIndexReference(Fn, MI.getOperand(0).getIndex(), Reg);
- MI.getOperand(0).ChangeToRegister(Reg, false /*isDef*/);
- auto *DIExpr = DIExpression::prepend(MI.getDebugExpression(),
- DIExpression::NoDeref, Offset);
- MI.getOperand(3).setMetadata(DIExpr);
+ MachineOperand &Offset = MI->getOperand(1);
+ Offset.setImm(Offset.getImm() +
+ TFI->getFrameIndexReference(
+ Fn, MI->getOperand(0).getIndex(), Reg));
+ MI->getOperand(0).ChangeToRegister(Reg, false /*isDef*/);
continue;
}
@@ -1084,16 +900,18 @@ void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn,
// implementation other than historical accident. The only
// remaining difference is the unconditional use of the stack
// pointer as the base register.
- if (MI.getOpcode() == TargetOpcode::STATEPOINT) {
- assert((!MI.isDebugValue() || i == 0) &&
+ if (MI->getOpcode() == TargetOpcode::STATEPOINT) {
+ assert((!MI->isDebugValue() || i == 0) &&
"Frame indicies can only appear as the first operand of a "
"DBG_VALUE machine instruction");
unsigned Reg;
- MachineOperand &Offset = MI.getOperand(i + 1);
- int refOffset = TFI->getFrameIndexReferencePreferSP(
- Fn, MI.getOperand(i).getIndex(), Reg, /*IgnoreSPUpdates*/ false);
+ MachineOperand &Offset = MI->getOperand(i + 1);
+ const unsigned refOffset =
+ TFI->getFrameIndexReferenceFromSP(Fn, MI->getOperand(i).getIndex(),
+ Reg);
+
Offset.setImm(Offset.getImm() + refOffset);
- MI.getOperand(i).ChangeToRegister(Reg, false /*isDef*/);
+ MI->getOperand(i).ChangeToRegister(Reg, false /*isDef*/);
continue;
}
@@ -1111,7 +929,7 @@ void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn,
// use that target machine register info object to eliminate
// it.
TRI.eliminateFrameIndex(MI, SPAdj, i,
- FrameIndexEliminationScavenging ? RS : nullptr);
+ FrameIndexVirtualScavenging ? nullptr : RS);
// Reset the iterator if we were at the beginning of the BB.
if (AtBeginning) {
@@ -1119,7 +937,7 @@ void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn,
DoIncr = false;
}
- DidFinishLoop = false;
+ MI = nullptr;
break;
}
@@ -1127,16 +945,106 @@ void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn,
// the SP adjustment made by each instruction in the sequence.
// This includes both the frame setup/destroy pseudos (handled above),
// as well as other instructions that have side effects w.r.t the SP.
- // Note that this must come after eliminateFrameIndex, because
+ // Note that this must come after eliminateFrameIndex, because
// if I itself referred to a frame index, we shouldn't count its own
// adjustment.
- if (DidFinishLoop && InsideCallSequence)
+ if (MI && InsideCallSequence)
SPAdj += TII.getSPAdjust(MI);
if (DoIncr && I != BB->end()) ++I;
// Update register states.
- if (RS && FrameIndexEliminationScavenging && DidFinishLoop)
- RS->forward(MI);
+ if (RS && !FrameIndexVirtualScavenging && MI) RS->forward(MI);
+ }
+}
+
+/// scavengeFrameVirtualRegs - Replace all frame index virtual registers
+/// with physical registers. Use the register scavenger to find an
+/// appropriate register to use.
+///
+/// FIXME: Iterating over the instruction stream is unnecessary. We can simply
+/// iterate over the vreg use list, which at this point only contains machine
+/// operands for which eliminateFrameIndex need a new scratch reg.
+void
+PEI::scavengeFrameVirtualRegs(MachineFunction &Fn) {
+ // Run through the instructions and find any virtual registers.
+ for (MachineFunction::iterator BB = Fn.begin(),
+ E = Fn.end(); BB != E; ++BB) {
+ RS->enterBasicBlock(&*BB);
+
+ int SPAdj = 0;
+
+ // The instruction stream may change in the loop, so check BB->end()
+ // directly.
+ for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) {
+ // We might end up here again with a NULL iterator if we scavenged a
+ // register for which we inserted spill code for definition by what was
+ // originally the first instruction in BB.
+ if (I == MachineBasicBlock::iterator(nullptr))
+ I = BB->begin();
+
+ MachineInstr *MI = I;
+ MachineBasicBlock::iterator J = std::next(I);
+ MachineBasicBlock::iterator P =
+ I == BB->begin() ? MachineBasicBlock::iterator(nullptr)
+ : std::prev(I);
+
+ // RS should process this instruction before we might scavenge at this
+ // location. This is because we might be replacing a virtual register
+ // defined by this instruction, and if so, registers killed by this
+ // instruction are available, and defined registers are not.
+ RS->forward(I);
+
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ if (MI->getOperand(i).isReg()) {
+ MachineOperand &MO = MI->getOperand(i);
+ unsigned Reg = MO.getReg();
+ if (Reg == 0)
+ continue;
+ if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ continue;
+
+ // When we first encounter a new virtual register, it
+ // must be a definition.
+ assert(MI->getOperand(i).isDef() &&
+ "frame index virtual missing def!");
+ // Scavenge a new scratch register
+ const TargetRegisterClass *RC = Fn.getRegInfo().getRegClass(Reg);
+ unsigned ScratchReg = RS->scavengeRegister(RC, J, SPAdj);
+
+ ++NumScavengedRegs;
+
+ // Replace this reference to the virtual register with the
+ // scratch register.
+ assert (ScratchReg && "Missing scratch register!");
+ Fn.getRegInfo().replaceRegWith(Reg, ScratchReg);
+
+ // Because this instruction was processed by the RS before this
+ // register was allocated, make sure that the RS now records the
+ // register as being used.
+ RS->setRegUsed(ScratchReg);
+ }
+ }
+
+ // If the scavenger needed to use one of its spill slots, the
+ // spill code will have been inserted in between I and J. This is a
+ // problem because we need the spill code before I: Move I to just
+ // prior to J.
+ if (I != std::prev(J)) {
+ BB->splice(J, &*BB, I);
+
+ // Before we move I, we need to prepare the RS to visit I again.
+ // Specifically, RS will assert if it sees uses of registers that
+ // it believes are undefined. Because we have already processed
+ // register kills in I, when it visits I again, it will believe that
+ // those registers are undefined. To avoid this situation, unprocess
+ // the instruction I.
+ assert(RS->getCurrentPosition() == I &&
+ "The register scavenger has an unexpected position");
+ I = P;
+ RS->unprocess(P);
+ } else
+ ++I;
+ }
}
}
diff --git a/gnu/llvm/lib/Target/X86/X86FrameLowering.cpp b/gnu/llvm/lib/Target/X86/X86FrameLowering.cpp
index 11808f8995f..3348e56bc77 100644
--- a/gnu/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/gnu/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -29,8 +29,8 @@
#include "llvm/IR/Function.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/Debug.h"
#include <cstdlib>
using namespace llvm;
@@ -50,7 +50,7 @@ X86FrameLowering::X86FrameLowering(const X86Subtarget &STI,
}
bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
- return !MF.getFrameInfo().hasVarSizedObjects() &&
+ return !MF.getFrameInfo()->hasVarSizedObjects() &&
!MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
}
@@ -74,7 +74,7 @@ X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {
// when there are no stack objects.
bool
X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const {
- return MF.getFrameInfo().hasStackObjects() ||
+ return MF.getFrameInfo()->hasStackObjects() ||
MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
}
@@ -82,15 +82,17 @@ X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const {
/// pointer register. This is true if the function has variable sized allocas
/// or if frame pointer elimination is disabled.
bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
- const MachineFrameInfo &MFI = MF.getFrameInfo();
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ const MachineModuleInfo &MMI = MF.getMMI();
+
return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
TRI->needsStackRealignment(MF) ||
- MFI.hasVarSizedObjects() ||
- MFI.isFrameAddressTaken() || MFI.hasOpaqueSPAdjustment() ||
+ MFI->hasVarSizedObjects() ||
+ MFI->isFrameAddressTaken() || MFI->hasOpaqueSPAdjustment() ||
MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
- MF.callsUnwindInit() || MF.hasEHFunclets() || MF.callsEHReturn() ||
- MFI.hasStackMap() || MFI.hasPatchPoint() ||
- MFI.hasCopyImplyingStackAdjustment());
+ MMI.callsUnwindInit() || MMI.hasEHFunclets() || MMI.callsEHReturn() ||
+ MFI->hasStackMap() || MFI->hasPatchPoint() ||
+ MFI->hasCopyImplyingStackAdjustment());
}
static unsigned getSUBriOpcode(unsigned IsLP64, int64_t Imm) {
@@ -148,18 +150,15 @@ static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
const X86RegisterInfo *TRI,
bool Is64Bit) {
const MachineFunction *MF = MBB.getParent();
- if (MF->callsEHReturn())
+ const Function *F = MF->getFunction();
+ if (!F || MF->getMMI().callsEHReturn())
return 0;
const TargetRegisterClass &AvailableRegs = *TRI->getGPRsForTailCall(*MF);
- if (MBBI == MBB.end())
- return 0;
-
- switch (MBBI->getOpcode()) {
+ unsigned Opc = MBBI->getOpcode();
+ switch (Opc) {
default: return 0;
- case TargetOpcode::PATCHABLE_RET:
- case X86::RET:
case X86::RETL:
case X86::RETQ:
case X86::RETIL:
@@ -251,76 +250,40 @@ void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
int64_t NumBytes, bool InEpilogue) const {
bool isSub = NumBytes < 0;
uint64_t Offset = isSub ? -NumBytes : NumBytes;
- MachineInstr::MIFlag Flag =
- isSub ? MachineInstr::FrameSetup : MachineInstr::FrameDestroy;
uint64_t Chunk = (1LL << 31) - 1;
DebugLoc DL = MBB.findDebugLoc(MBBI);
- if (Offset > Chunk) {
- // Rather than emit a long series of instructions for large offsets,
- // load the offset into a register and do one sub/add
- unsigned Reg = 0;
- unsigned Rax = (unsigned)(Is64Bit ? X86::RAX : X86::EAX);
+ while (Offset) {
+ if (Offset > Chunk) {
+ // Rather than emit a long series of instructions for large offsets,
+ // load the offset into a register and do one sub/add
+ unsigned Reg = 0;
- if (isSub && !isEAXLiveIn(MBB))
- Reg = Rax;
- else
- Reg = findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
-
- unsigned MovRIOpc = Is64Bit ? X86::MOV64ri : X86::MOV32ri;
- unsigned AddSubRROpc =
- isSub ? getSUBrrOpcode(Is64Bit) : getADDrrOpcode(Is64Bit);
- if (Reg) {
- BuildMI(MBB, MBBI, DL, TII.get(MovRIOpc), Reg)
- .addImm(Offset)
- .setMIFlag(Flag);
- MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AddSubRROpc), StackPtr)
- .addReg(StackPtr)
- .addReg(Reg);
- MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
- return;
- } else if (Offset > 8 * Chunk) {
- // If we would need more than 8 add or sub instructions (a >16GB stack
- // frame), it's worth spilling RAX to materialize this immediate.
- // pushq %rax
- // movabsq +-$Offset+-SlotSize, %rax
- // addq %rsp, %rax
- // xchg %rax, (%rsp)
- // movq (%rsp), %rsp
- assert(Is64Bit && "can't have 32-bit 16GB stack frame");
- BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r))
- .addReg(Rax, RegState::Kill)
- .setMIFlag(Flag);
- // Subtract is not commutative, so negate the offset and always use add.
- // Subtract 8 less and add 8 more to account for the PUSH we just did.
- if (isSub)
- Offset = -(Offset - SlotSize);
+ if (isSub && !isEAXLiveIn(MBB))
+ Reg = (unsigned)(Is64Bit ? X86::RAX : X86::EAX);
else
- Offset = Offset + SlotSize;
- BuildMI(MBB, MBBI, DL, TII.get(MovRIOpc), Rax)
- .addImm(Offset)
- .setMIFlag(Flag);
- MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(X86::ADD64rr), Rax)
- .addReg(Rax)
- .addReg(StackPtr);
- MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
- // Exchange the new SP in RAX with the top of the stack.
- addRegOffset(
- BuildMI(MBB, MBBI, DL, TII.get(X86::XCHG64rm), Rax).addReg(Rax),
- StackPtr, false, 0);
- // Load new SP from the top of the stack into RSP.
- addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), StackPtr),
- StackPtr, false, 0);
- return;
+ Reg = findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
+
+ if (Reg) {
+ unsigned Opc = Is64Bit ? X86::MOV64ri : X86::MOV32ri;
+ BuildMI(MBB, MBBI, DL, TII.get(Opc), Reg)
+ .addImm(Offset);
+ Opc = isSub
+ ? getSUBrrOpcode(Is64Bit)
+ : getADDrrOpcode(Is64Bit);
+ MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
+ .addReg(StackPtr)
+ .addReg(Reg);
+ MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
+ Offset = 0;
+ continue;
+ }
}
- }
- while (Offset) {
uint64_t ThisVal = std::min(Offset, Chunk);
- if (ThisVal == SlotSize) {
- // Use push / pop for slot sized adjustments as a size optimization. We
- // need to find a dead register when using pop.
+ if (ThisVal == (Is64Bit ? 8 : 4)) {
+ // Use push / pop instead.
unsigned Reg = isSub
? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
: findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
@@ -328,24 +291,31 @@ void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
unsigned Opc = isSub
? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
: (Is64Bit ? X86::POP64r : X86::POP32r);
- BuildMI(MBB, MBBI, DL, TII.get(Opc))
- .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub))
- .setMIFlag(Flag);
+ MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc))
+ .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub));
+ if (isSub)
+ MI->setFlag(MachineInstr::FrameSetup);
+ else
+ MI->setFlag(MachineInstr::FrameDestroy);
Offset -= ThisVal;
continue;
}
}
- BuildStackAdjustment(MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue)
- .setMIFlag(Flag);
+ MachineInstrBuilder MI = BuildStackAdjustment(
+ MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue);
+ if (isSub)
+ MI.setMIFlag(MachineInstr::FrameSetup);
+ else
+ MI.setMIFlag(MachineInstr::FrameDestroy);
Offset -= ThisVal;
}
}
MachineInstrBuilder X86FrameLowering::BuildStackAdjustment(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
- const DebugLoc &DL, int64_t Offset, bool InEpilogue) const {
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc DL,
+ int64_t Offset, bool InEpilogue) const {
assert(Offset != 0 && "zero offset stack adjustment requested");
// On Atom, using LEA to adjust SP is preferred, but using it in the epilogue
@@ -401,40 +371,19 @@ int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI;
MachineBasicBlock::iterator NI = doMergeWithPrevious ? nullptr
: std::next(MBBI);
- PI = skipDebugInstructionsBackward(PI, MBB.begin());
- if (NI != nullptr)
- NI = skipDebugInstructionsForward(NI, MBB.end());
-
unsigned Opc = PI->getOpcode();
int Offset = 0;
- if (!doMergeWithPrevious && NI != MBB.end() &&
- NI->getOpcode() == TargetOpcode::CFI_INSTRUCTION) {
- // Don't merge with the next instruction if it has CFI.
- return Offset;
- }
-
if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
- Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
+ Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
+ Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
PI->getOperand(0).getReg() == StackPtr){
- assert(PI->getOperand(1).getReg() == StackPtr);
Offset += PI->getOperand(2).getImm();
MBB.erase(PI);
if (!doMergeWithPrevious) MBBI = NI;
- } else if ((Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
- PI->getOperand(0).getReg() == StackPtr &&
- PI->getOperand(1).getReg() == StackPtr &&
- PI->getOperand(2).getImm() == 1 &&
- PI->getOperand(3).getReg() == X86::NoRegister &&
- PI->getOperand(5).getReg() == X86::NoRegister) {
- // For LEAs we have: def = lea SP, FI, noreg, Offset, noreg.
- Offset += PI->getOperand(4).getImm();
- MBB.erase(PI);
- if (!doMergeWithPrevious) MBBI = NI;
} else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
PI->getOperand(0).getReg() == StackPtr) {
- assert(PI->getOperand(1).getReg() == StackPtr);
Offset -= PI->getOperand(2).getImm();
MBB.erase(PI);
if (!doMergeWithPrevious) MBBI = NI;
@@ -444,31 +393,31 @@ int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
}
void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- const DebugLoc &DL,
- const MCCFIInstruction &CFIInst) const {
+ MachineBasicBlock::iterator MBBI, DebugLoc DL,
+ MCCFIInstruction CFIInst) const {
MachineFunction &MF = *MBB.getParent();
- unsigned CFIIndex = MF.addFrameInst(CFIInst);
+ unsigned CFIIndex = MF.getMMI().addFrameInst(CFIInst);
BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex);
}
-void X86FrameLowering::emitCalleeSavedFrameMoves(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
- const DebugLoc &DL) const {
+void
+X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ DebugLoc DL) const {
MachineFunction &MF = *MBB.getParent();
- MachineFrameInfo &MFI = MF.getFrameInfo();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
MachineModuleInfo &MMI = MF.getMMI();
const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
// Add callee saved registers to move list.
- const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
+ const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
if (CSI.empty()) return;
// Calculate offsets.
for (std::vector<CalleeSavedInfo>::const_iterator
I = CSI.begin(), E = CSI.end(); I != E; ++I) {
- int64_t Offset = MFI.getObjectOffset(I->getFrameIdx());
+ int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
unsigned Reg = I->getReg();
unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
@@ -477,19 +426,20 @@ void X86FrameLowering::emitCalleeSavedFrameMoves(
}
}
-void X86FrameLowering::emitStackProbe(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- const DebugLoc &DL, bool InProlog) const {
+MachineInstr *X86FrameLowering::emitStackProbe(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ DebugLoc DL,
+ bool InProlog) const {
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
if (STI.isTargetWindowsCoreCLR()) {
if (InProlog) {
- emitStackProbeInlineStub(MF, MBB, MBBI, DL, true);
+ return emitStackProbeInlineStub(MF, MBB, MBBI, DL, true);
} else {
- emitStackProbeInline(MF, MBB, MBBI, DL, false);
+ return emitStackProbeInline(MF, MBB, MBBI, DL, false);
}
} else {
- emitStackProbeCall(MF, MBB, MBBI, DL, InProlog);
+ return emitStackProbeCall(MF, MBB, MBBI, DL, InProlog);
}
}
@@ -507,22 +457,18 @@ void X86FrameLowering::inlineStackProbe(MachineFunction &MF,
}
if (ChkStkStub != nullptr) {
- assert(!ChkStkStub->isBundled() &&
- "Not expecting bundled instructions here");
MachineBasicBlock::iterator MBBI = std::next(ChkStkStub->getIterator());
- assert(std::prev(MBBI) == ChkStkStub &&
- "MBBI expected after __chkstk_stub.");
+ assert(std::prev(MBBI).operator==(ChkStkStub) &&
+ "MBBI expected after __chkstk_stub.");
DebugLoc DL = PrologMBB.findDebugLoc(MBBI);
emitStackProbeInline(MF, PrologMBB, MBBI, DL, true);
ChkStkStub->eraseFromParent();
}
}
-void X86FrameLowering::emitStackProbeInline(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- const DebugLoc &DL,
- bool InProlog) const {
+MachineInstr *X86FrameLowering::emitStackProbeInline(
+ MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, DebugLoc DL, bool InProlog) const {
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
assert(STI.is64Bit() && "different expansion needed for 32 bit");
assert(STI.isTargetWindowsCoreCLR() && "custom expansion expects CoreCLR");
@@ -645,7 +591,7 @@ void X86FrameLowering::emitStackProbeInline(MachineFunction &MF,
// lowest touched page on the stack, not the point at which the OS
// will cause an overflow exception, so this is just an optimization
// to avoid unnecessarily touching pages that are below the current
- // SP but already committed to the stack by the OS.
+ // SP but already commited to the stack by the OS.
BuildMI(&MBB, DL, TII.get(X86::MOV64rm), LimitReg)
.addReg(0)
.addImm(1)
@@ -732,27 +678,32 @@ void X86FrameLowering::emitStackProbeInline(MachineFunction &MF,
}
// Possible TODO: physreg liveness for InProlog case.
+
+ return ContinueMBBI;
}
-void X86FrameLowering::emitStackProbeCall(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- const DebugLoc &DL,
- bool InProlog) const {
+MachineInstr *X86FrameLowering::emitStackProbeCall(
+ MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, DebugLoc DL, bool InProlog) const {
bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
- // FIXME: Add retpoline support and remove this.
- if (Is64Bit && IsLargeCodeModel && STI.useRetpoline())
- report_fatal_error("Emitting stack probe calls on 64-bit with the large "
- "code model and retpoline not yet implemented.");
-
unsigned CallOp;
if (Is64Bit)
CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32;
else
CallOp = X86::CALLpcrel32;
- StringRef Symbol = STI.getTargetLowering()->getStackProbeSymbolName(MF);
+ const char *Symbol;
+ if (Is64Bit) {
+ if (STI.isTargetCygMing()) {
+ Symbol = "___chkstk_ms";
+ } else {
+ Symbol = "__chkstk";
+ }
+ } else if (STI.isTargetCygMing())
+ Symbol = "_alloca";
+ else
+ Symbol = "_chkstk";
MachineInstrBuilder CI;
MachineBasicBlock::iterator ExpansionMBBI = std::prev(MBBI);
@@ -763,11 +714,10 @@ void X86FrameLowering::emitStackProbeCall(MachineFunction &MF,
// For the large code model, we have to call through a register. Use R11,
// as it is scratch in all supported calling conventions.
BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11)
- .addExternalSymbol(MF.createExternalSymbolName(Symbol));
+ .addExternalSymbol(Symbol);
CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11);
} else {
- CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp))
- .addExternalSymbol(MF.createExternalSymbolName(Symbol));
+ CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addExternalSymbol(Symbol);
}
unsigned AX = Is64Bit ? X86::RAX : X86::EAX;
@@ -778,16 +728,13 @@ void X86FrameLowering::emitStackProbeCall(MachineFunction &MF,
.addReg(SP, RegState::Define | RegState::Implicit)
.addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
- if (STI.isTargetWin64() || !STI.isOSWindows()) {
- // MSVC x32's _chkstk and cygwin/mingw's _alloca adjust %esp themselves.
+ if (Is64Bit) {
// MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp
- // themselves. They also does not clobber %rax so we can reuse it when
+ // themselves. It also does not clobber %rax so we can reuse it when
// adjusting %rsp.
- // All other platforms do not specify a particular ABI for the stack probe
- // function, so we arbitrarily define it to not adjust %esp/%rsp itself.
- BuildMI(MBB, MBBI, DL, TII.get(getSUBrrOpcode(Is64Bit)), SP)
- .addReg(SP)
- .addReg(AX);
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64rr), X86::RSP)
+ .addReg(X86::RSP)
+ .addReg(X86::RAX);
}
if (InProlog) {
@@ -795,16 +742,20 @@ void X86FrameLowering::emitStackProbeCall(MachineFunction &MF,
for (++ExpansionMBBI; ExpansionMBBI != MBBI; ++ExpansionMBBI)
ExpansionMBBI->setFlag(MachineInstr::FrameSetup);
}
+
+ return MBBI;
}
-void X86FrameLowering::emitStackProbeInlineStub(
+MachineInstr *X86FrameLowering::emitStackProbeInlineStub(
MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const {
+ MachineBasicBlock::iterator MBBI, DebugLoc DL, bool InProlog) const {
assert(InProlog && "ChkStkStub called outside prolog!");
BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32))
.addExternalSymbol("__chkstk_stub");
+
+ return MBBI;
}
static unsigned calculateSetFPREG(uint64_t SPAdjust) {
@@ -821,11 +772,11 @@ static unsigned calculateSetFPREG(uint64_t SPAdjust) {
// have a call out. Otherwise just make sure we have some alignment - we'll
// go with the minimum SlotSize.
uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const {
- const MachineFrameInfo &MFI = MF.getFrameInfo();
- uint64_t MaxAlign = MFI.getMaxAlignment(); // Desired stack alignment.
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
unsigned StackAlign = getStackAlignment();
- if (MF.getFunction().hasFnAttribute("stackrealign")) {
- if (MFI.hasCalls())
+ if (MF.getFunction()->hasFnAttribute("stackrealign")) {
+ if (MFI->hasCalls())
MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
else if (MaxAlign < SlotSize)
MaxAlign = SlotSize;
@@ -835,7 +786,7 @@ uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) con
void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- const DebugLoc &DL, unsigned Reg,
+ DebugLoc DL, unsigned Reg,
uint64_t MaxAlign) const {
uint64_t Val = -MaxAlign;
unsigned AndOp = getANDriOpcode(Uses64BitFramePtr, Val);
@@ -928,7 +879,6 @@ void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
Notes:
- .seh directives are emitted only for Windows 64 ABI
- - .cv_fpo directives are emitted on win32 when emitting CodeView
- .cfi directives are emitted for all other ABIs
- for 32-bit code, substitute %e?? registers for %r??
*/
@@ -938,36 +888,31 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
"MF used frame lowering for wrong subtarget");
MachineBasicBlock::iterator MBBI = MBB.begin();
- MachineFrameInfo &MFI = MF.getFrameInfo();
- const Function &Fn = MF.getFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const Function *Fn = MF.getFunction();
MachineModuleInfo &MMI = MF.getMMI();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment.
- uint64_t StackSize = MFI.getStackSize(); // Number of bytes to allocate.
+ uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.
bool IsFunclet = MBB.isEHFuncletEntry();
EHPersonality Personality = EHPersonality::Unknown;
- if (Fn.hasPersonalityFn())
- Personality = classifyEHPersonality(Fn.getPersonalityFn());
+ if (Fn->hasPersonalityFn())
+ Personality = classifyEHPersonality(Fn->getPersonalityFn());
bool FnHasClrFunclet =
- MF.hasEHFunclets() && Personality == EHPersonality::CoreCLR;
+ MMI.hasEHFunclets() && Personality == EHPersonality::CoreCLR;
bool IsClrFunclet = IsFunclet && FnHasClrFunclet;
bool HasFP = hasFP(MF);
- bool IsWin64CC = STI.isCallingConvWin64(Fn.getCallingConv());
+ bool IsWin64CC = STI.isCallingConvWin64(Fn->getCallingConv());
bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
- bool NeedsWin64CFI = IsWin64Prologue && Fn.needsUnwindTableEntry();
- // FIXME: Emit FPO data for EH funclets.
- bool NeedsWinFPO =
- !IsFunclet && STI.isTargetWin32() && MMI.getModule()->getCodeViewFlag();
- bool NeedsWinCFI = NeedsWin64CFI || NeedsWinFPO;
+ bool NeedsWinCFI = IsWin64Prologue && Fn->needsUnwindTableEntry();
bool NeedsDwarfCFI =
- !IsWin64Prologue && (MMI.hasDebugInfo() || Fn.needsUnwindTableEntry());
+ !IsWin64Prologue && (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
unsigned FramePtr = TRI->getFrameRegister(MF);
const unsigned MachineFramePtr =
STI.isTarget64BitILP32()
? getX86SubSuperRegister(FramePtr, 64) : FramePtr;
unsigned BasePtr = TRI->getBaseRegister();
- bool HasWinCFI = false;
-
+
// Debug location must be unknown since the first debug location is used
// to determine the end of the prologue.
DebugLoc DL;
@@ -981,44 +926,32 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
X86FI->setCalleeSavedFrameSize(
X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
- bool UseStackProbe = !STI.getTargetLowering()->getStackProbeSymbolName(MF).empty();
+ bool UseStackProbe = (STI.isOSWindows() && !STI.isTargetMachO());
// The default stack probe size is 4096 if the function has no stackprobesize
// attribute.
unsigned StackProbeSize = 4096;
- if (Fn.hasFnAttribute("stack-probe-size"))
- Fn.getFnAttribute("stack-probe-size")
+ if (Fn->hasFnAttribute("stack-probe-size"))
+ Fn->getFnAttribute("stack-probe-size")
.getValueAsString()
.getAsInteger(0, StackProbeSize);
- // Re-align the stack on 64-bit if the x86-interrupt calling convention is
- // used and an error code was pushed, since the x86-64 ABI requires a 16-byte
- // stack alignment.
- if (Fn.getCallingConv() == CallingConv::X86_INTR && Is64Bit &&
- Fn.arg_size() == 2) {
- StackSize += 8;
- MFI.setStackSize(StackSize);
- emitSPUpdate(MBB, MBBI, -8, /*InEpilogue=*/false);
- }
-
// If this is x86-64 and the Red Zone is not disabled, if we are a leaf
// function, and use up to 128 bytes of stack space, don't have a frame
// pointer, calls, or dynamic alloca then we do not need to adjust the
// stack pointer (we fit in the Red Zone). We also check that we don't
// push and pop from the stack.
- if (Is64Bit && !Fn.hasFnAttribute(Attribute::NoRedZone) &&
+ if (Is64Bit && !Fn->hasFnAttribute(Attribute::NoRedZone) &&
!TRI->needsStackRealignment(MF) &&
- !MFI.hasVarSizedObjects() && // No dynamic alloca.
- !MFI.adjustsStack() && // No calls.
- !UseStackProbe && // No stack probes.
- !IsWin64CC && // Win64 has no Red Zone
- !MFI.hasCopyImplyingStackAdjustment() && // Don't push and pop.
- !MF.shouldSplitStack()) { // Regular stack
+ !MFI->hasVarSizedObjects() && // No dynamic alloca.
+ !MFI->adjustsStack() && // No calls.
+ !IsWin64CC && // Win64 has no Red Zone
+ !MFI->hasCopyImplyingStackAdjustment() && // Don't push and pop.
+ !MF.shouldSplitStack()) { // Regular stack
uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
if (HasFP) MinSize += SlotSize;
- X86FI->setUsesRedZone(MinSize > 0 || StackSize > 0);
StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
- MFI.setStackSize(StackSize);
+ MFI->setStackSize(StackSize);
}
// Insert stack pointer adjustment for later moving of return addr. Only
@@ -1066,8 +999,6 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
}
if (HasFP) {
- assert(MF.getRegInfo().isReserved(MachineFramePtr) && "FP reserved");
-
// Calculate required stack adjustment.
uint64_t FrameSize = StackSize - SlotSize;
// If required, include space for extra hidden slot for stashing base pointer.
@@ -1078,15 +1009,15 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// Callee-saved registers are pushed on stack before the stack is realigned.
if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
- NumBytes = alignTo(NumBytes, MaxAlign);
+ NumBytes = RoundUpToAlignment(NumBytes, MaxAlign);
// Get the offset of the stack slot for the EBP register, which is
// guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
// Update the frame offset adjustment.
if (!IsFunclet)
- MFI.setOffsetAdjustment(-NumBytes);
+ MFI->setOffsetAdjustment(-NumBytes);
else
- assert(MFI.getOffsetAdjustment() == -(int)NumBytes &&
+ assert(MFI->getOffsetAdjustment() == -(int)NumBytes &&
"should calculate same local variable offset for funclets");
// Save EBP/RBP into the appropriate stack slot.
@@ -1108,7 +1039,6 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
}
if (NeedsWinCFI) {
- HasWinCFI = true;
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
.addImm(FramePtr)
.setMIFlag(MachineInstr::FrameSetup);
@@ -1129,15 +1059,13 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfaRegister(
nullptr, DwarfFramePtr));
}
+ }
- if (NeedsWinFPO) {
- // .cv_fpo_setframe $FramePtr
- HasWinCFI = true;
- BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
- .addImm(FramePtr)
- .addImm(0)
- .setMIFlag(MachineInstr::FrameSetup);
- }
+ // Mark the FramePtr as live-in in every block. Don't do this again for
+ // funclet prologues.
+ if (!IsFunclet) {
+ for (MachineBasicBlock &EveryMBB : MF)
+ EveryMBB.addLiveIn(MachineFramePtr);
}
} else {
assert(!IsFunclet && "funclets without FPs not yet implemented");
@@ -1172,10 +1100,8 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
}
if (NeedsWinCFI) {
- HasWinCFI = true;
- BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
- .addImm(Reg)
- .setMIFlag(MachineInstr::FrameSetup);
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)).addImm(Reg).setMIFlag(
+ MachineInstr::FrameSetup);
}
}
@@ -1204,11 +1130,8 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// virtual memory manager are allocated in correct sequence.
uint64_t AlignedNumBytes = NumBytes;
if (IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF))
- AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign);
+ AlignedNumBytes = RoundUpToAlignment(AlignedNumBytes, MaxAlign);
if (AlignedNumBytes >= StackProbeSize && UseStackProbe) {
- assert(!X86FI->getUsesRedZone() &&
- "The Red Zone is not accounted for in stack probes");
-
// Check whether EAX is livein for this block.
bool isEAXAlive = isEAXLiveIn(MBB);
@@ -1262,12 +1185,10 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
emitSPUpdate(MBB, MBBI, -(int64_t)NumBytes, /*InEpilogue=*/false);
}
- if (NeedsWinCFI && NumBytes) {
- HasWinCFI = true;
+ if (NeedsWinCFI && NumBytes)
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
.addImm(NumBytes)
.setMIFlag(MachineInstr::FrameSetup);
- }
int SEHFrameOffset = 0;
unsigned SPOrEstablisher;
@@ -1314,8 +1235,6 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// If this is not a funclet, emit the CFI describing our frame pointer.
if (NeedsWinCFI && !IsFunclet) {
- assert(!NeedsWinFPO && "this setframe incompatible with FPO data");
- HasWinCFI = true;
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
.addImm(FramePtr)
.addImm(SEHFrameOffset)
@@ -1341,7 +1260,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
}
while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) {
- const MachineInstr &FrameInstr = *MBBI;
+ const MachineInstr *FrameInstr = &*MBBI;
++MBBI;
if (NeedsWinCFI) {
@@ -1352,8 +1271,6 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
int Offset = getFrameIndexReference(MF, FI, IgnoredFrameReg);
Offset += SEHFrameOffset;
- HasWinCFI = true;
- assert(!NeedsWinFPO && "SEH_SaveXMM incompatible with FPO data");
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM))
.addImm(Reg)
.addImm(Offset)
@@ -1363,7 +1280,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
}
}
- if (NeedsWinCFI && HasWinCFI)
+ if (NeedsWinCFI)
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue))
.setMIFlag(MachineInstr::FrameSetup);
@@ -1440,38 +1357,24 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
}
// Emit DWARF info specifying the offsets of the callee-saved registers.
- emitCalleeSavedFrameMoves(MBB, MBBI, DL);
+ if (PushedRegs)
+ emitCalleeSavedFrameMoves(MBB, MBBI, DL);
}
-
- // X86 Interrupt handling function cannot assume anything about the direction
- // flag (DF in EFLAGS register). Clear this flag by creating "cld" instruction
- // in each prologue of interrupt handler function.
- //
- // FIXME: Create "cld" instruction only in these cases:
- // 1. The interrupt handling function uses any of the "rep" instructions.
- // 2. Interrupt handling function calls another function.
- //
- if (Fn.getCallingConv() == CallingConv::X86_INTR)
- BuildMI(MBB, MBBI, DL, TII.get(X86::CLD))
- .setMIFlag(MachineInstr::FrameSetup);
-
- // At this point we know if the function has WinCFI or not.
- MF.setHasWinCFI(HasWinCFI);
}
bool X86FrameLowering::canUseLEAForSPInEpilogue(
const MachineFunction &MF) const {
- // We can't use LEA instructions for adjusting the stack pointer if we don't
- // have a frame pointer in the Win64 ABI. Only ADD instructions may be used
- // to deallocate the stack.
+ // We can't use LEA instructions for adjusting the stack pointer if this is a
+ // leaf function in the Win64 ABI. Only ADD instructions may be used to
+ // deallocate the stack.
// This means that we can use LEA for SP in two situations:
// 1. We *aren't* using the Win64 ABI which means we are free to use LEA.
// 2. We *have* a frame pointer which means we are permitted to use LEA.
return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF);
}
-static bool isFuncletReturnInstr(MachineInstr &MI) {
- switch (MI.getOpcode()) {
+static bool isFuncletReturnInstr(MachineInstr *MI) {
+ switch (MI->getOpcode()) {
case X86::CATCHRET:
case X86::CLEANUPRET:
return true;
@@ -1497,10 +1400,11 @@ static bool isFuncletReturnInstr(MachineInstr &MI) {
unsigned
X86FrameLowering::getPSPSlotOffsetFromSP(const MachineFunction &MF) const {
const WinEHFuncInfo &Info = *MF.getWinEHFuncInfo();
+ // getFrameIndexReferenceFromSP has an out ref parameter for the stack
+ // pointer register; pass a dummy that we ignore
unsigned SPReg;
- int Offset = getFrameIndexReferencePreferSP(MF, Info.PSPSymFrameIdx, SPReg,
- /*IgnoreSPUpdates*/ true);
- assert(Offset >= 0 && SPReg == TRI->getStackRegister());
+ int Offset = getFrameIndexReferenceFromSP(MF, Info.PSPSymFrameIdx, SPReg);
+ assert(Offset >= 0);
return static_cast<unsigned>(Offset);
}
@@ -1512,7 +1416,7 @@ X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const {
// This is the amount of stack a funclet needs to allocate.
unsigned UsedSize;
EHPersonality Personality =
- classifyEHPersonality(MF.getFunction().getPersonalityFn());
+ classifyEHPersonality(MF.getFunction()->getPersonalityFn());
if (Personality == EHPersonality::CoreCLR) {
// CLR funclets need to hold enough space to include the PSPSym, at the
// same offset from the stack pointer (immediately after the prolog) as it
@@ -1520,30 +1424,23 @@ X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const {
UsedSize = getPSPSlotOffsetFromSP(MF) + SlotSize;
} else {
// Other funclets just need enough stack for outgoing call arguments.
- UsedSize = MF.getFrameInfo().getMaxCallFrameSize();
+ UsedSize = MF.getFrameInfo()->getMaxCallFrameSize();
}
// RBP is not included in the callee saved register block. After pushing RBP,
// everything is 16 byte aligned. Everything we allocate before an outgoing
// call must also be 16 byte aligned.
- unsigned FrameSizeMinusRBP = alignTo(CSSize + UsedSize, getStackAlignment());
+ unsigned FrameSizeMinusRBP =
+ RoundUpToAlignment(CSSize + UsedSize, getStackAlignment());
// Subtract out the size of the callee saved registers. This is how much stack
// each funclet will allocate.
return FrameSizeMinusRBP - CSSize;
}
-static bool isTailCallOpcode(unsigned Opc) {
- return Opc == X86::TCRETURNri || Opc == X86::TCRETURNdi ||
- Opc == X86::TCRETURNmi ||
- Opc == X86::TCRETURNri64 || Opc == X86::TCRETURNdi64 ||
- Opc == X86::TCRETURNmi64;
-}
-
void X86FrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
- const MachineFrameInfo &MFI = MF.getFrameInfo();
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
- MachineBasicBlock::iterator Terminator = MBB.getFirstTerminator();
- MachineBasicBlock::iterator MBBI = Terminator;
+ MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
DebugLoc DL;
if (MBBI != MBB.end())
DL = MBBI->getDebugLoc();
@@ -1554,21 +1451,38 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
Is64BitILP32 ? getX86SubSuperRegister(FramePtr, 64) : FramePtr;
bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
- bool NeedsWin64CFI =
- IsWin64Prologue && MF.getFunction().needsUnwindTableEntry();
- bool IsFunclet = MBBI == MBB.end() ? false : isFuncletReturnInstr(*MBBI);
+ bool NeedsWinCFI =
+ IsWin64Prologue && MF.getFunction()->needsUnwindTableEntry();
+ bool IsFunclet = isFuncletReturnInstr(MBBI);
+ MachineBasicBlock *TargetMBB = nullptr;
// Get the number of bytes to allocate from the FrameInfo.
- uint64_t StackSize = MFI.getStackSize();
+ uint64_t StackSize = MFI->getStackSize();
uint64_t MaxAlign = calculateMaxStackAlign(MF);
unsigned CSSize = X86FI->getCalleeSavedFrameSize();
- bool HasFP = hasFP(MF);
uint64_t NumBytes = 0;
- if (IsFunclet) {
- assert(HasFP && "EH funclets without FP not yet implemented");
+ if (MBBI->getOpcode() == X86::CATCHRET) {
+ // SEH shouldn't use catchret.
+ assert(!isAsynchronousEHPersonality(
+ classifyEHPersonality(MF.getFunction()->getPersonalityFn())) &&
+ "SEH should not use CATCHRET");
+
NumBytes = getWinEHFuncletFrameSize(MF);
- } else if (HasFP) {
+ assert(hasFP(MF) && "EH funclets without FP not yet implemented");
+ TargetMBB = MBBI->getOperand(0).getMBB();
+
+ // Pop EBP.
+ BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r),
+ MachineFramePtr)
+ .setMIFlag(MachineInstr::FrameDestroy);
+ } else if (MBBI->getOpcode() == X86::CLEANUPRET) {
+ NumBytes = getWinEHFuncletFrameSize(MF);
+ assert(hasFP(MF) && "EH funclets without FP not yet implemented");
+ BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r),
+ MachineFramePtr)
+ .setMIFlag(MachineInstr::FrameDestroy);
+ } else if (hasFP(MF)) {
// Calculate required stack adjustment.
uint64_t FrameSize = StackSize - SlotSize;
NumBytes = FrameSize - CSSize;
@@ -1576,52 +1490,65 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
// Callee-saved registers were pushed on stack before the stack was
// realigned.
if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
- NumBytes = alignTo(FrameSize, MaxAlign);
- } else {
- NumBytes = StackSize - CSSize;
- }
- uint64_t SEHStackAllocAmt = NumBytes;
+ NumBytes = RoundUpToAlignment(FrameSize, MaxAlign);
- if (HasFP) {
// Pop EBP.
- BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r),
- MachineFramePtr)
+ BuildMI(MBB, MBBI, DL,
+ TII.get(Is64Bit ? X86::POP64r : X86::POP32r), MachineFramePtr)
.setMIFlag(MachineInstr::FrameDestroy);
+ } else {
+ NumBytes = StackSize - CSSize;
}
+ uint64_t SEHStackAllocAmt = NumBytes;
- MachineBasicBlock::iterator FirstCSPop = MBBI;
// Skip the callee-saved pop instructions.
while (MBBI != MBB.begin()) {
MachineBasicBlock::iterator PI = std::prev(MBBI);
unsigned Opc = PI->getOpcode();
- if (Opc != X86::DBG_VALUE && !PI->isTerminator()) {
- if ((Opc != X86::POP32r || !PI->getFlag(MachineInstr::FrameDestroy)) &&
- (Opc != X86::POP64r || !PI->getFlag(MachineInstr::FrameDestroy)))
- break;
- FirstCSPop = PI;
- }
+ if ((Opc != X86::POP32r || !PI->getFlag(MachineInstr::FrameDestroy)) &&
+ (Opc != X86::POP64r || !PI->getFlag(MachineInstr::FrameDestroy)) &&
+ Opc != X86::DBG_VALUE && !PI->isTerminator())
+ break;
--MBBI;
}
- MBBI = FirstCSPop;
+ MachineBasicBlock::iterator FirstCSPop = MBBI;
- if (IsFunclet && Terminator->getOpcode() == X86::CATCHRET)
- emitCatchRetReturnValue(MBB, FirstCSPop, &*Terminator);
+ if (TargetMBB) {
+ // Fill EAX/RAX with the address of the target block.
+ unsigned ReturnReg = STI.is64Bit() ? X86::RAX : X86::EAX;
+ if (STI.is64Bit()) {
+ // LEA64r TargetMBB(%rip), %rax
+ BuildMI(MBB, FirstCSPop, DL, TII.get(X86::LEA64r), ReturnReg)
+ .addReg(X86::RIP)
+ .addImm(0)
+ .addReg(0)
+ .addMBB(TargetMBB)
+ .addReg(0);
+ } else {
+ // MOV32ri $TargetMBB, %eax
+ BuildMI(MBB, FirstCSPop, DL, TII.get(X86::MOV32ri), ReturnReg)
+ .addMBB(TargetMBB);
+ }
+ // Record that we've taken the address of TargetMBB and no longer just
+ // reference it in a terminator.
+ TargetMBB->setHasAddressTaken();
+ }
if (MBBI != MBB.end())
DL = MBBI->getDebugLoc();
// If there is an ADD32ri or SUB32ri of ESP immediately before this
// instruction, merge the two instructions.
- if (NumBytes || MFI.hasVarSizedObjects())
+ if (NumBytes || MFI->hasVarSizedObjects())
NumBytes += mergeSPUpdates(MBB, MBBI, true);
// If dynamic alloca is used, then reset esp to point to the last callee-saved
// slot before popping them off! Same applies for the case, when stack was
// realigned. Don't do this if this was a funclet epilogue, since the funclets
// will not do realignment or dynamic stack allocation.
- if ((TRI->needsStackRealignment(MF) || MFI.hasVarSizedObjects()) &&
+ if ((TRI->needsStackRealignment(MF) || MFI->hasVarSizedObjects()) &&
!IsFunclet) {
if (TRI->needsStackRealignment(MF))
MBBI = FirstCSPop;
@@ -1659,33 +1586,36 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
// into the epilogue. To cope with that, we insert an epilogue marker here,
// then replace it with a 'nop' if it ends up immediately after a CALL in the
// final emitted code.
- if (NeedsWin64CFI && MF.hasWinCFI())
+ if (NeedsWinCFI)
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue));
- if (Terminator == MBB.end() || !isTailCallOpcode(Terminator->getOpcode())) {
- // Add the return addr area delta back since we are not tail calling.
- int Offset = -1 * X86FI->getTCReturnAddrDelta();
- assert(Offset >= 0 && "TCDelta should never be positive");
- if (Offset) {
- // Check for possible merge with preceding ADD instruction.
- Offset += mergeSPUpdates(MBB, Terminator, true);
- emitSPUpdate(MBB, Terminator, Offset, /*InEpilogue=*/true);
- }
+ // Add the return addr area delta back since we are not tail calling.
+ int Offset = -1 * X86FI->getTCReturnAddrDelta();
+ assert(Offset >= 0 && "TCDelta should never be positive");
+ if (Offset) {
+ MBBI = MBB.getFirstTerminator();
+
+ // Check for possible merge with preceding ADD instruction.
+ Offset += mergeSPUpdates(MBB, MBBI, true);
+ emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true);
}
}
+// NOTE: this only has a subset of the full frame index logic. In
+// particular, the FI < 0 and AfterFPPop logic is handled in
+// X86RegisterInfo::eliminateFrameIndex, but not here. Possibly
+// (probably?) it should be moved into here.
int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
unsigned &FrameReg) const {
- const MachineFrameInfo &MFI = MF.getFrameInfo();
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
- bool IsFixed = MFI.isFixedObjectIndex(FI);
// We can't calculate offset from frame pointer if the stack is realigned,
// so enforce usage of stack/base pointer. The base pointer is used when we
// have dynamic allocas in addition to dynamic realignment.
if (TRI->hasBasePointer(MF))
- FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getBaseRegister();
+ FrameReg = TRI->getBaseRegister();
else if (TRI->needsStackRealignment(MF))
- FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getStackRegister();
+ FrameReg = TRI->getStackRegister();
else
FrameReg = TRI->getFrameRegister(MF);
@@ -1693,16 +1623,16 @@ int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
// object.
// We need to factor in additional offsets applied during the prologue to the
// frame, base, and stack pointer depending on which is used.
- int Offset = MFI.getObjectOffset(FI) - getOffsetOfLocalArea();
+ int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
unsigned CSSize = X86FI->getCalleeSavedFrameSize();
- uint64_t StackSize = MFI.getStackSize();
+ uint64_t StackSize = MFI->getStackSize();
bool HasFP = hasFP(MF);
bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
int64_t FPDelta = 0;
if (IsWin64Prologue) {
- assert(!MFI.hasCalls() || (StackSize % 16) == 8);
+ assert(!MFI->hasCalls() || (StackSize % 16) == 8);
// Calculate required stack adjustment.
uint64_t FrameSize = StackSize - SlotSize;
@@ -1720,7 +1650,7 @@ int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
// restricted Win64 prologue.
// Add FPDelta to all offsets below that go through the frame pointer.
FPDelta = FrameSize - SEHFrameOffset;
- assert((!MFI.hasCalls() || (FPDelta % 16) == 0) &&
+ assert((!MFI->hasCalls() || (FPDelta % 16) == 0) &&
"FPDelta isn't aligned per the Win64 ABI!");
}
@@ -1731,7 +1661,7 @@ int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
// Skip the saved EBP.
return Offset + SlotSize + FPDelta;
} else {
- assert((-(Offset + StackSize)) % MFI.getObjectAlignment(FI) == 0);
+ assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
return Offset + StackSize;
}
} else if (TRI->needsStackRealignment(MF)) {
@@ -1739,7 +1669,7 @@ int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
// Skip the saved EBP.
return Offset + SlotSize + FPDelta;
} else {
- assert((-(Offset + StackSize)) % MFI.getObjectAlignment(FI) == 0);
+ assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
return Offset + StackSize;
}
// FIXME: Support tail calls
@@ -1759,69 +1689,61 @@ int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
return Offset + FPDelta;
}
-int X86FrameLowering::getFrameIndexReferenceSP(const MachineFunction &MF,
- int FI, unsigned &FrameReg,
- int Adjustment) const {
- const MachineFrameInfo &MFI = MF.getFrameInfo();
- FrameReg = TRI->getStackRegister();
- return MFI.getObjectOffset(FI) - getOffsetOfLocalArea() + Adjustment;
-}
-
-int
-X86FrameLowering::getFrameIndexReferencePreferSP(const MachineFunction &MF,
- int FI, unsigned &FrameReg,
- bool IgnoreSPUpdates) const {
-
- const MachineFrameInfo &MFI = MF.getFrameInfo();
+// Simplified from getFrameIndexReference keeping only StackPointer cases
+int X86FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
+ int FI,
+ unsigned &FrameReg) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
// Does not include any dynamic realign.
- const uint64_t StackSize = MFI.getStackSize();
- // LLVM arranges the stack as follows:
- // ...
- // ARG2
- // ARG1
- // RETADDR
- // PUSH RBP <-- RBP points here
- // PUSH CSRs
- // ~~~~~~~ <-- possible stack realignment (non-win64)
- // ...
- // STACK OBJECTS
- // ... <-- RSP after prologue points here
- // ~~~~~~~ <-- possible stack realignment (win64)
- //
- // if (hasVarSizedObjects()):
- // ... <-- "base pointer" (ESI/RBX) points here
- // DYNAMIC ALLOCAS
- // ... <-- RSP points here
- //
- // Case 1: In the simple case of no stack realignment and no dynamic
- // allocas, both "fixed" stack objects (arguments and CSRs) are addressable
- // with fixed offsets from RSP.
- //
- // Case 2: In the case of stack realignment with no dynamic allocas, fixed
- // stack objects are addressed with RBP and regular stack objects with RSP.
- //
- // Case 3: In the case of dynamic allocas and stack realignment, RSP is used
- // to address stack arguments for outgoing calls and nothing else. The "base
- // pointer" points to local variables, and RBP points to fixed objects.
- //
- // In cases 2 and 3, we can only answer for non-fixed stack objects, and the
- // answer we give is relative to the SP after the prologue, and not the
- // SP in the middle of the function.
-
- if (MFI.isFixedObjectIndex(FI) && TRI->needsStackRealignment(MF) &&
- !STI.isTargetWin64())
- return getFrameIndexReference(MF, FI, FrameReg);
-
- // If !hasReservedCallFrame the function might have SP adjustement in the
- // body. So, even though the offset is statically known, it depends on where
- // we are in the function.
- const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
- if (!IgnoreSPUpdates && !TFI->hasReservedCallFrame(MF))
- return getFrameIndexReference(MF, FI, FrameReg);
+ const uint64_t StackSize = MFI->getStackSize();
+ {
+#ifndef NDEBUG
+ // LLVM arranges the stack as follows:
+ // ...
+ // ARG2
+ // ARG1
+ // RETADDR
+ // PUSH RBP <-- RBP points here
+ // PUSH CSRs
+ // ~~~~~~~ <-- possible stack realignment (non-win64)
+ // ...
+ // STACK OBJECTS
+ // ... <-- RSP after prologue points here
+ // ~~~~~~~ <-- possible stack realignment (win64)
+ //
+ // if (hasVarSizedObjects()):
+ // ... <-- "base pointer" (ESI/RBX) points here
+ // DYNAMIC ALLOCAS
+ // ... <-- RSP points here
+ //
+ // Case 1: In the simple case of no stack realignment and no dynamic
+ // allocas, both "fixed" stack objects (arguments and CSRs) are addressable
+ // with fixed offsets from RSP.
+ //
+ // Case 2: In the case of stack realignment with no dynamic allocas, fixed
+ // stack objects are addressed with RBP and regular stack objects with RSP.
+ //
+ // Case 3: In the case of dynamic allocas and stack realignment, RSP is used
+ // to address stack arguments for outgoing calls and nothing else. The "base
+ // pointer" points to local variables, and RBP points to fixed objects.
+ //
+ // In cases 2 and 3, we can only answer for non-fixed stack objects, and the
+ // answer we give is relative to the SP after the prologue, and not the
+ // SP in the middle of the function.
+
+ assert((!MFI->isFixedObjectIndex(FI) || !TRI->needsStackRealignment(MF) ||
+ STI.isTargetWin64()) &&
+ "offset from fixed object to SP is not static");
+
+ // We don't handle tail calls, and shouldn't be seeing them either.
+ int TailCallReturnAddrDelta =
+ MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta();
+ assert(!(TailCallReturnAddrDelta < 0) && "we don't handle this case!");
+#endif
+ }
- // We don't handle tail calls, and shouldn't be seeing them either.
- assert(MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta() >= 0 &&
- "we don't handle this case!");
+ // Fill in FrameReg output argument.
+ FrameReg = TRI->getStackRegister();
// This is how the math works out:
//
@@ -1837,7 +1759,7 @@ X86FrameLowering::getFrameIndexReferencePreferSP(const MachineFunction &MF,
//
// A is the incoming stack pointer.
// (B - A) is the local area offset (-8 for x86-64) [1]
- // (C - A) is the Offset returned by MFI.getObjectOffset for Obj0 [2]
+ // (C - A) is the Offset returned by MFI->getObjectOffset for Obj0 [2]
//
// |(E - B)| is the StackSize (absolute value, positive). For a
// stack that grown down, this works out to be (B - E). [3]
@@ -1847,14 +1769,18 @@ X86FrameLowering::getFrameIndexReferencePreferSP(const MachineFunction &MF,
// (C - E) == (C - A) - (B - A) + (B - E)
// { Using [1], [2] and [3] above }
// == getObjectOffset - LocalAreaOffset + StackSize
+ //
+
+ // Get the Offset from the StackPointer
+ int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
- return getFrameIndexReferenceSP(MF, FI, FrameReg, StackSize);
+ return Offset + StackSize;
}
bool X86FrameLowering::assignCalleeSavedSpillSlots(
MachineFunction &MF, const TargetRegisterInfo *TRI,
std::vector<CalleeSavedInfo> &CSI) const {
- MachineFrameInfo &MFI = MF.getFrameInfo();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
unsigned CalleeSavedFrameSize = 0;
@@ -1863,7 +1789,7 @@ bool X86FrameLowering::assignCalleeSavedSpillSlots(
if (hasFP(MF)) {
// emitPrologue always spills frame register the first thing.
SpillSlotOffset -= SlotSize;
- MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
+ MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
// Since emitPrologue and emitEpilogue will handle spilling and restoring of
// the frame register, we can delete it from CSI list and not have to worry
@@ -1887,7 +1813,7 @@ bool X86FrameLowering::assignCalleeSavedSpillSlots(
SpillSlotOffset -= SlotSize;
CalleeSavedFrameSize += SlotSize;
- int SlotIndex = MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
+ int SlotIndex = MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
CSI[i - 1].setFrameIdx(SlotIndex);
}
@@ -1900,15 +1826,14 @@ bool X86FrameLowering::assignCalleeSavedSpillSlots(
continue;
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- unsigned Size = TRI->getSpillSize(*RC);
- unsigned Align = TRI->getSpillAlignment(*RC);
// ensure alignment
- SpillSlotOffset -= std::abs(SpillSlotOffset) % Align;
+ SpillSlotOffset -= std::abs(SpillSlotOffset) % RC->getAlignment();
// spill into slot
- SpillSlotOffset -= Size;
- int SlotIndex = MFI.CreateFixedSpillStackObject(Size, SpillSlotOffset);
+ SpillSlotOffset -= RC->getSize();
+ int SlotIndex =
+ MFI->CreateFixedSpillStackObject(RC->getSize(), SpillSlotOffset);
CSI[i - 1].setFrameIdx(SlotIndex);
- MFI.ensureMaxAlignment(Align);
+ MFI->ensureMaxAlignment(RC->getAlignment());
}
return true;
@@ -1926,37 +1851,16 @@ bool X86FrameLowering::spillCalleeSavedRegisters(
return true;
// Push GPRs. It increases frame size.
- const MachineFunction &MF = *MBB.getParent();
unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
for (unsigned i = CSI.size(); i != 0; --i) {
unsigned Reg = CSI[i - 1].getReg();
if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
continue;
+ // Add the callee-saved register as live-in. It's killed at the spill.
+ MBB.addLiveIn(Reg);
- const MachineRegisterInfo &MRI = MF.getRegInfo();
- bool isLiveIn = MRI.isLiveIn(Reg);
- if (!isLiveIn)
- MBB.addLiveIn(Reg);
-
- // Decide whether we can add a kill flag to the use.
- bool CanKill = !isLiveIn;
- // Check if any subregister is live-in
- if (CanKill) {
- for (MCRegAliasIterator AReg(Reg, TRI, false); AReg.isValid(); ++AReg) {
- if (MRI.isLiveIn(*AReg)) {
- CanKill = false;
- break;
- }
- }
- }
-
- // Do not set a kill flag on values that are also marked as live-in. This
- // happens with the @llvm-returnaddress intrinsic and with arguments
- // passed in callee saved registers.
- // Omitting the kill flags is conservatively correct even if the live-in
- // is not used after all.
- BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, getKillRegState(CanKill))
+ BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill)
.setMIFlag(MachineInstr::FrameSetup);
}
@@ -1980,44 +1884,14 @@ bool X86FrameLowering::spillCalleeSavedRegisters(
return true;
}
-void X86FrameLowering::emitCatchRetReturnValue(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- MachineInstr *CatchRet) const {
- // SEH shouldn't use catchret.
- assert(!isAsynchronousEHPersonality(classifyEHPersonality(
- MBB.getParent()->getFunction().getPersonalityFn())) &&
- "SEH should not use CATCHRET");
- DebugLoc DL = CatchRet->getDebugLoc();
- MachineBasicBlock *CatchRetTarget = CatchRet->getOperand(0).getMBB();
-
- // Fill EAX/RAX with the address of the target block.
- if (STI.is64Bit()) {
- // LEA64r CatchRetTarget(%rip), %rax
- BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), X86::RAX)
- .addReg(X86::RIP)
- .addImm(0)
- .addReg(0)
- .addMBB(CatchRetTarget)
- .addReg(0);
- } else {
- // MOV32ri $CatchRetTarget, %eax
- BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
- .addMBB(CatchRetTarget);
- }
-
- // Record that we've taken the address of CatchRetTarget and no longer just
- // reference it in a terminator.
- CatchRetTarget->setHasAddressTaken();
-}
-
bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
- std::vector<CalleeSavedInfo> &CSI,
+ const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const {
if (CSI.empty())
return false;
- if (MI != MBB.end() && isFuncletReturnInstr(*MI) && STI.isOSWindows()) {
+ if (isFuncletReturnInstr(MI) && STI.isOSWindows()) {
// Don't restore CSRs in 32-bit EH funclets. Matches
// spillCalleeSavedRegisters.
if (STI.is32Bit())
@@ -2025,9 +1899,9 @@ bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
// Don't restore CSRs before an SEH catchret. SEH except blocks do not form
// funclets. emitEpilogue transforms these to normal jumps.
if (MI->getOpcode() == X86::CATCHRET) {
- const Function &F = MBB.getParent()->getFunction();
+ const Function *Func = MBB.getParent()->getFunction();
bool IsSEH = isAsynchronousEHPersonality(
- classifyEHPersonality(F.getPersonalityFn()));
+ classifyEHPersonality(Func->getPersonalityFn()));
if (IsSEH)
return true;
}
@@ -2065,7 +1939,7 @@ void X86FrameLowering::determineCalleeSaves(MachineFunction &MF,
RegScavenger *RS) const {
TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
- MachineFrameInfo &MFI = MF.getFrameInfo();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
@@ -2080,7 +1954,7 @@ void X86FrameLowering::determineCalleeSaves(MachineFunction &MF,
// ...
// }
// [EBP]
- MFI.CreateFixedObject(-TailCallReturnAddrDelta,
+ MFI->CreateFixedObject(-TailCallReturnAddrDelta,
TailCallReturnAddrDelta - SlotSize, true);
}
@@ -2089,8 +1963,8 @@ void X86FrameLowering::determineCalleeSaves(MachineFunction &MF,
SavedRegs.set(TRI->getBaseRegister());
// Allocate a spill slot for EBP if we have a base pointer and EH funclets.
- if (MF.hasEHFunclets()) {
- int FI = MFI.CreateSpillStackObject(SlotSize, SlotSize);
+ if (MF.getMMI().hasEHFunclets()) {
+ int FI = MFI->CreateSpillStackObject(SlotSize, SlotSize);
X86FI->setHasSEHFramePtrSave(true);
X86FI->setSEHFramePtrSaveIndex(FI);
}
@@ -2099,8 +1973,8 @@ void X86FrameLowering::determineCalleeSaves(MachineFunction &MF,
static bool
HasNestArgument(const MachineFunction *MF) {
- const Function &F = MF->getFunction();
- for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
+ const Function *F = MF->getFunction();
+ for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
I != E; I++) {
if (I->hasNestAttr())
return true;
@@ -2114,7 +1988,7 @@ HasNestArgument(const MachineFunction *MF) {
/// needed. Set primary to true for the first register, false for the second.
static unsigned
GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) {
- CallingConv::ID CallingConvention = MF.getFunction().getCallingConv();
+ CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv();
// Erlang stuff.
if (CallingConvention == CallingConv::HiPE) {
@@ -2151,7 +2025,7 @@ static const uint64_t kSplitStackAvailable = 256;
void X86FrameLowering::adjustForSegmentedStacks(
MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
- MachineFrameInfo &MFI = MF.getFrameInfo();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
uint64_t StackSize;
unsigned TlsReg, TlsOffset;
DebugLoc DL;
@@ -2164,7 +2038,7 @@ void X86FrameLowering::adjustForSegmentedStacks(
assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
"Scratch register is live-in");
- if (MF.getFunction().isVarArg())
+ if (MF.getFunction()->isVarArg())
report_fatal_error("Segmented stacks do not support vararg functions.");
if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() &&
!STI.isTargetWin64() && !STI.isTargetFreeBSD() &&
@@ -2174,7 +2048,7 @@ void X86FrameLowering::adjustForSegmentedStacks(
// Eventually StackSize will be calculated by a link-time pass; which will
// also decide whether checking code needs to be injected into this particular
// prologue.
- StackSize = MFI.getStackSize();
+ StackSize = MFI->getStackSize();
// Do not generate a prologue for functions with a stack of size zero
if (StackSize == 0)
@@ -2350,10 +2224,6 @@ void X86FrameLowering::adjustForSegmentedStacks(
// This solution is not perfect, as it assumes that the .rodata section
// is laid out within 2^31 bytes of each function body, but this seems
// to be sufficient for JIT.
- // FIXME: Add retpoline support and remove the error here..
- if (STI.useRetpoline())
- report_fatal_error("Emitting morestack calls on 64-bit with the large "
- "code model and retpoline not yet implemented.");
BuildMI(allocMBB, DL, TII.get(X86::CALL64m))
.addReg(X86::RIP)
.addImm(0)
@@ -2380,33 +2250,11 @@ void X86FrameLowering::adjustForSegmentedStacks(
checkMBB->addSuccessor(allocMBB);
checkMBB->addSuccessor(&PrologueMBB);
-#ifdef EXPENSIVE_CHECKS
+#ifdef XDEBUG
MF.verify();
#endif
}
-/// Lookup an ERTS parameter in the !hipe.literals named metadata node.
-/// HiPE provides Erlang Runtime System-internal parameters, such as PCB offsets
-/// to fields it needs, through a named metadata node "hipe.literals" containing
-/// name-value pairs.
-static unsigned getHiPELiteral(
- NamedMDNode *HiPELiteralsMD, const StringRef LiteralName) {
- for (int i = 0, e = HiPELiteralsMD->getNumOperands(); i != e; ++i) {
- MDNode *Node = HiPELiteralsMD->getOperand(i);
- if (Node->getNumOperands() != 2) continue;
- MDString *NodeName = dyn_cast<MDString>(Node->getOperand(0));
- ValueAsMetadata *NodeVal = dyn_cast<ValueAsMetadata>(Node->getOperand(1));
- if (!NodeName || !NodeVal) continue;
- ConstantInt *ValConst = dyn_cast_or_null<ConstantInt>(NodeVal->getValue());
- if (ValConst && NodeName->getString() == LiteralName) {
- return ValConst->getZExtValue();
- }
- }
-
- report_fatal_error("HiPE literal " + LiteralName
- + " required but not provided");
-}
-
/// Erlang programs may need a special prologue to handle the stack size they
/// might need at runtime. That is because Erlang/OTP does not implement a C
/// stack but uses a custom implementation of hybrid stack/heap architecture.
@@ -2424,7 +2272,7 @@ static unsigned getHiPELiteral(
/// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
void X86FrameLowering::adjustForHiPEPrologue(
MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
- MachineFrameInfo &MFI = MF.getFrameInfo();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
DebugLoc DL;
// To support shrink-wrapping we would need to insert the new blocks
@@ -2432,19 +2280,12 @@ void X86FrameLowering::adjustForHiPEPrologue(
assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");
// HiPE-specific values
- NamedMDNode *HiPELiteralsMD = MF.getMMI().getModule()
- ->getNamedMetadata("hipe.literals");
- if (!HiPELiteralsMD)
- report_fatal_error(
- "Can't generate HiPE prologue without runtime parameters");
- const unsigned HipeLeafWords
- = getHiPELiteral(HiPELiteralsMD,
- Is64Bit ? "AMD64_LEAF_WORDS" : "X86_LEAF_WORDS");
+ const unsigned HipeLeafWords = 24;
const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
const unsigned Guaranteed = HipeLeafWords * SlotSize;
- unsigned CallerStkArity = MF.getFunction().arg_size() > CCRegisteredArgs ?
- MF.getFunction().arg_size() - CCRegisteredArgs : 0;
- unsigned MaxStack = MFI.getStackSize() + CallerStkArity*SlotSize + SlotSize;
+ unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ?
+ MF.getFunction()->arg_size() - CCRegisteredArgs : 0;
+ unsigned MaxStack = MFI->getStackSize() + CallerStkArity*SlotSize + SlotSize;
assert(STI.isTargetLinux() &&
"HiPE prologue is only supported on Linux operating systems.");
@@ -2456,16 +2297,18 @@ void X86FrameLowering::adjustForHiPEPrologue(
// b) outgoing on-stack parameter areas, and
// c) the minimum stack space this function needs to make available for the
// functions it calls (a tunable ABI property).
- if (MFI.hasCalls()) {
+ if (MFI->hasCalls()) {
unsigned MoreStackForCalls = 0;
- for (auto &MBB : MF) {
- for (auto &MI : MBB) {
- if (!MI.isCall())
+ for (MachineFunction::iterator MBBI = MF.begin(), MBBE = MF.end();
+ MBBI != MBBE; ++MBBI)
+ for (MachineBasicBlock::iterator MI = MBBI->begin(), ME = MBBI->end();
+ MI != ME; ++MI) {
+ if (!MI->isCall())
continue;
// Get callee operand.
- const MachineOperand &MO = MI.getOperand(0);
+ const MachineOperand &MO = MI->getOperand(0);
// Only take account of global function calls (no closures etc.).
if (!MO.isGlobal())
@@ -2491,7 +2334,6 @@ void X86FrameLowering::adjustForHiPEPrologue(
MoreStackForCalls = std::max(MoreStackForCalls,
(HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
}
- }
MaxStack += MoreStackForCalls;
}
@@ -2511,19 +2353,20 @@ void X86FrameLowering::adjustForHiPEPrologue(
unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
unsigned LEAop, CMPop, CALLop;
- SPLimitOffset = getHiPELiteral(HiPELiteralsMD, "P_NSP_LIMIT");
if (Is64Bit) {
SPReg = X86::RSP;
PReg = X86::RBP;
LEAop = X86::LEA64r;
CMPop = X86::CMP64rm;
CALLop = X86::CALL64pcrel32;
+ SPLimitOffset = 0x90;
} else {
SPReg = X86::ESP;
PReg = X86::EBP;
LEAop = X86::LEA32r;
CMPop = X86::CMP32rm;
CALLop = X86::CALLpcrel32;
+ SPLimitOffset = 0x4c;
}
ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
@@ -2552,15 +2395,13 @@ void X86FrameLowering::adjustForHiPEPrologue(
incStackMBB->addSuccessor(&PrologueMBB, {99, 100});
incStackMBB->addSuccessor(incStackMBB, {1, 100});
}
-#ifdef EXPENSIVE_CHECKS
+#ifdef XDEBUG
MF.verify();
#endif
}
bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- const DebugLoc &DL,
- int Offset) const {
+ MachineBasicBlock::iterator MBBI, DebugLoc DL, int Offset) const {
if (Offset <= 0)
return false;
@@ -2584,7 +2425,6 @@ bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
unsigned Regs[2];
unsigned FoundRegs = 0;
- auto &MRI = MBB.getParent()->getRegInfo();
auto RegMask = Prev->getOperand(1);
auto &RegClass =
@@ -2598,14 +2438,11 @@ bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
if (!RegMask.clobbersPhysReg(Candidate))
continue;
- // Don't clobber reserved registers
- if (MRI.isReserved(Candidate))
- continue;
-
bool IsDef = false;
for (const MachineOperand &MO : Prev->implicit_operands()) {
if (MO.isReg() && MO.isDef() &&
- TRI->isSuperOrSubRegisterEq(MO.getReg(), Candidate)) {
+ (TRI->isSubRegisterEq(MO.getReg(), Candidate) ||
+ TRI->isSuperRegister(MO.getReg(), Candidate))) {
IsDef = true;
break;
}
@@ -2633,17 +2470,16 @@ bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
return true;
}
-MachineBasicBlock::iterator X86FrameLowering::
+void X86FrameLowering::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
bool reserveCallFrame = hasReservedCallFrame(MF);
unsigned Opcode = I->getOpcode();
bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
DebugLoc DL = I->getDebugLoc();
- uint64_t Amount = !reserveCallFrame ? TII.getFrameSize(*I) : 0;
- uint64_t InternalAmt = (isDestroy || Amount) ? TII.getFrameAdjustment(*I) : 0;
+ uint64_t Amount = !reserveCallFrame ? I->getOperand(0).getImm() : 0;
+ uint64_t InternalAmt = (isDestroy || Amount) ? I->getOperand(1).getImm() : 0;
I = MBB.erase(I);
- auto InsertPos = skipDebugInstructionsForward(I, MBB.end());
if (!reserveCallFrame) {
// If the stack pointer can be changed after prologue, turn the
@@ -2654,13 +2490,13 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// amount of space needed for the outgoing arguments up to the next
// alignment boundary.
unsigned StackAlign = getStackAlignment();
- Amount = alignTo(Amount, StackAlign);
+ Amount = RoundUpToAlignment(Amount, StackAlign);
MachineModuleInfo &MMI = MF.getMMI();
- const Function &F = MF.getFunction();
+ const Function *Fn = MF.getFunction();
bool WindowsCFI = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
- bool DwarfCFI = !WindowsCFI &&
- (MMI.hasDebugInfo() || F.needsUnwindTableEntry());
+ bool DwarfCFI = !WindowsCFI &&
+ (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
// If we have any exception handlers in this function, and we adjust
// the SP before calls, we may need to indicate this to the unwinder
@@ -2669,15 +2505,16 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// GNU_ARGS_SIZE.
// TODO: We don't need to reset this between subsequent functions,
// if it didn't change.
- bool HasDwarfEHHandlers = !WindowsCFI && !MF.getLandingPads().empty();
+ bool HasDwarfEHHandlers = !WindowsCFI &&
+ !MF.getMMI().getLandingPads().empty();
if (HasDwarfEHHandlers && !isDestroy &&
MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences())
- BuildCFI(MBB, InsertPos, DL,
+ BuildCFI(MBB, I, DL,
MCCFIInstruction::createGnuArgsSize(nullptr, Amount));
if (Amount == 0)
- return I;
+ return;
// Factor out the amount that gets handled inside the sequence
// (Pushes of argument for frame setup, callee pops for frame destroy)
@@ -2687,26 +2524,16 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// If this is a callee-pop calling convention, emit a CFA adjust for
// the amount the callee popped.
if (isDestroy && InternalAmt && DwarfCFI && !hasFP(MF))
- BuildCFI(MBB, InsertPos, DL,
+ BuildCFI(MBB, I, DL,
MCCFIInstruction::createAdjustCfaOffset(nullptr, -InternalAmt));
- // Add Amount to SP to destroy a frame, or subtract to setup.
- int64_t StackAdjustment = isDestroy ? Amount : -Amount;
- int64_t CfaAdjustment = -StackAdjustment;
-
- if (StackAdjustment) {
- // Merge with any previous or following adjustment instruction. Note: the
- // instructions merged with here do not have CFI, so their stack
- // adjustments do not feed into CfaAdjustment.
- StackAdjustment += mergeSPUpdates(MBB, InsertPos, true);
- StackAdjustment += mergeSPUpdates(MBB, InsertPos, false);
-
- if (StackAdjustment) {
- if (!(F.optForMinSize() &&
- adjustStackWithPops(MBB, InsertPos, DL, StackAdjustment)))
- BuildStackAdjustment(MBB, InsertPos, DL, StackAdjustment,
- /*InEpilogue=*/false);
- }
+ if (Amount) {
+ // Add Amount to SP to destroy a frame, and subtract to setup.
+ int Offset = isDestroy ? Amount : -Amount;
+
+ if (!(Fn->optForMinSize() &&
+ adjustStackWithPops(MBB, I, DL, Offset)))
+ BuildStackAdjustment(MBB, I, DL, Offset, /*InEpilogue=*/false);
}
if (DwarfCFI && !hasFP(MF)) {
@@ -2716,17 +2543,18 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// CFI only for EH purposes or for debugging. EH only requires the CFA
// offset to be correct at each call site, while for debugging we want
// it to be more precise.
-
+ int CFAOffset = Amount;
// TODO: When not using precise CFA, we also need to adjust for the
// InternalAmt here.
- if (CfaAdjustment) {
- BuildCFI(MBB, InsertPos, DL,
- MCCFIInstruction::createAdjustCfaOffset(nullptr,
- CfaAdjustment));
+
+ if (CFAOffset) {
+ CFAOffset = isDestroy ? -CFAOffset : CFAOffset;
+ BuildCFI(MBB, I, DL,
+ MCCFIInstruction::createAdjustCfaOffset(nullptr, CFAOffset));
}
}
- return I;
+ return;
}
if (isDestroy && InternalAmt) {
@@ -2736,14 +2564,11 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// We are not tracking the stack pointer adjustment by the callee, so make
// sure we restore the stack pointer immediately after the call, there may
// be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
- MachineBasicBlock::iterator CI = I;
MachineBasicBlock::iterator B = MBB.begin();
- while (CI != B && !std::prev(CI)->isCall())
- --CI;
- BuildStackAdjustment(MBB, CI, DL, -InternalAmt, /*InEpilogue=*/false);
+ while (I != B && !std::prev(I)->isCall())
+ --I;
+ BuildStackAdjustment(MBB, I, DL, -InternalAmt, /*InEpilogue=*/false);
}
-
- return I;
}
bool X86FrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const {
@@ -2775,19 +2600,19 @@ bool X86FrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
bool X86FrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
// If we may need to emit frameless compact unwind information, give
// up as this is currently broken: PR25614.
- return (MF.getFunction().hasFnAttribute(Attribute::NoUnwind) || hasFP(MF)) &&
+ return (MF.getFunction()->hasFnAttribute(Attribute::NoUnwind) || hasFP(MF)) &&
// The lowering of segmented stack and HiPE only support entry blocks
// as prologue blocks: PR26107.
// This limitation may be lifted if we fix:
// - adjustForSegmentedStacks
// - adjustForHiPEPrologue
- MF.getFunction().getCallingConv() != CallingConv::HiPE &&
+ MF.getFunction()->getCallingConv() != CallingConv::HiPE &&
!MF.shouldSplitStack();
}
MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
- const DebugLoc &DL, bool RestoreSP) const {
+ DebugLoc DL, bool RestoreSP) const {
assert(STI.isTargetWindowsMSVC() && "funclets only supported in MSVC env");
assert(STI.isTargetWin32() && "EBP/ESI restoration only required on win32");
assert(STI.is32Bit() && !Uses64BitFramePtr &&
@@ -2798,12 +2623,12 @@ MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers(
unsigned BasePtr = TRI->getBaseRegister();
WinEHFuncInfo &FuncInfo = *MF.getWinEHFuncInfo();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
- MachineFrameInfo &MFI = MF.getFrameInfo();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
// FIXME: Don't set FrameSetup flag in catchret case.
int FI = FuncInfo.EHRegNodeFrameIndex;
- int EHRegSize = MFI.getObjectSize(FI);
+ int EHRegSize = MFI->getObjectSize(FI);
if (RestoreSP) {
// MOV32rm -EHRegSize(%ebp), %esp
@@ -2847,150 +2672,6 @@ MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers(
return MBBI;
}
-namespace {
-// Struct used by orderFrameObjects to help sort the stack objects.
-struct X86FrameSortingObject {
- bool IsValid = false; // true if we care about this Object.
- unsigned ObjectIndex = 0; // Index of Object into MFI list.
- unsigned ObjectSize = 0; // Size of Object in bytes.
- unsigned ObjectAlignment = 1; // Alignment of Object in bytes.
- unsigned ObjectNumUses = 0; // Object static number of uses.
-};
-
-// The comparison function we use for std::sort to order our local
-// stack symbols. The current algorithm is to use an estimated
-// "density". This takes into consideration the size and number of
-// uses each object has in order to roughly minimize code size.
-// So, for example, an object of size 16B that is referenced 5 times
-// will get higher priority than 4 4B objects referenced 1 time each.
-// It's not perfect and we may be able to squeeze a few more bytes out of
-// it (for example : 0(esp) requires fewer bytes, symbols allocated at the
-// fringe end can have special consideration, given their size is less
-// important, etc.), but the algorithmic complexity grows too much to be
-// worth the extra gains we get. This gets us pretty close.
-// The final order leaves us with objects with highest priority going
-// at the end of our list.
-struct X86FrameSortingComparator {
- inline bool operator()(const X86FrameSortingObject &A,
- const X86FrameSortingObject &B) {
- uint64_t DensityAScaled, DensityBScaled;
-
- // For consistency in our comparison, all invalid objects are placed
- // at the end. This also allows us to stop walking when we hit the
- // first invalid item after it's all sorted.
- if (!A.IsValid)
- return false;
- if (!B.IsValid)
- return true;
-
- // The density is calculated by doing :
- // (double)DensityA = A.ObjectNumUses / A.ObjectSize
- // (double)DensityB = B.ObjectNumUses / B.ObjectSize
- // Since this approach may cause inconsistencies in
- // the floating point <, >, == comparisons, depending on the floating
- // point model with which the compiler was built, we're going
- // to scale both sides by multiplying with
- // A.ObjectSize * B.ObjectSize. This ends up factoring away
- // the division and, with it, the need for any floating point
- // arithmetic.
- DensityAScaled = static_cast<uint64_t>(A.ObjectNumUses) *
- static_cast<uint64_t>(B.ObjectSize);
- DensityBScaled = static_cast<uint64_t>(B.ObjectNumUses) *
- static_cast<uint64_t>(A.ObjectSize);
-
- // If the two densities are equal, prioritize highest alignment
- // objects. This allows for similar alignment objects
- // to be packed together (given the same density).
- // There's room for improvement here, also, since we can pack
- // similar alignment (different density) objects next to each
- // other to save padding. This will also require further
- // complexity/iterations, and the overall gain isn't worth it,
- // in general. Something to keep in mind, though.
- if (DensityAScaled == DensityBScaled)
- return A.ObjectAlignment < B.ObjectAlignment;
-
- return DensityAScaled < DensityBScaled;
- }
-};
-} // namespace
-
-// Order the symbols in the local stack.
-// We want to place the local stack objects in some sort of sensible order.
-// The heuristic we use is to try and pack them according to static number
-// of uses and size of object in order to minimize code size.
-void X86FrameLowering::orderFrameObjects(
- const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const {
- const MachineFrameInfo &MFI = MF.getFrameInfo();
-
- // Don't waste time if there's nothing to do.
- if (ObjectsToAllocate.empty())
- return;
-
- // Create an array of all MFI objects. We won't need all of these
- // objects, but we're going to create a full array of them to make
- // it easier to index into when we're counting "uses" down below.
- // We want to be able to easily/cheaply access an object by simply
- // indexing into it, instead of having to search for it every time.
- std::vector<X86FrameSortingObject> SortingObjects(MFI.getObjectIndexEnd());
-
- // Walk the objects we care about and mark them as such in our working
- // struct.
- for (auto &Obj : ObjectsToAllocate) {
- SortingObjects[Obj].IsValid = true;
- SortingObjects[Obj].ObjectIndex = Obj;
- SortingObjects[Obj].ObjectAlignment = MFI.getObjectAlignment(Obj);
- // Set the size.
- int ObjectSize = MFI.getObjectSize(Obj);
- if (ObjectSize == 0)
- // Variable size. Just use 4.
- SortingObjects[Obj].ObjectSize = 4;
- else
- SortingObjects[Obj].ObjectSize = ObjectSize;
- }
-
- // Count the number of uses for each object.
- for (auto &MBB : MF) {
- for (auto &MI : MBB) {
- if (MI.isDebugValue())
- continue;
- for (const MachineOperand &MO : MI.operands()) {
- // Check to see if it's a local stack symbol.
- if (!MO.isFI())
- continue;
- int Index = MO.getIndex();
- // Check to see if it falls within our range, and is tagged
- // to require ordering.
- if (Index >= 0 && Index < MFI.getObjectIndexEnd() &&
- SortingObjects[Index].IsValid)
- SortingObjects[Index].ObjectNumUses++;
- }
- }
- }
-
- // Sort the objects using X86FrameSortingAlgorithm (see its comment for
- // info).
- std::stable_sort(SortingObjects.begin(), SortingObjects.end(),
- X86FrameSortingComparator());
-
- // Now modify the original list to represent the final order that
- // we want. The order will depend on whether we're going to access them
- // from the stack pointer or the frame pointer. For SP, the list should
- // end up with the END containing objects that we want with smaller offsets.
- // For FP, it should be flipped.
- int i = 0;
- for (auto &Obj : SortingObjects) {
- // All invalid items are sorted at the end, so it's safe to stop.
- if (!Obj.IsValid)
- break;
- ObjectsToAllocate[i++] = Obj.ObjectIndex;
- }
-
- // Flip it if we're accessing off of the FP.
- if (!TRI->needsStackRealignment(MF) && hasFP(MF))
- std::reverse(ObjectsToAllocate.begin(), ObjectsToAllocate.end());
-}
-
-
unsigned X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF) const {
// RDX, the parent frame pointer, is homed into 16(%rsp) in the prologue.
unsigned Offset = 16;
@@ -3005,15 +2686,11 @@ unsigned X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF)
void X86FrameLowering::processFunctionBeforeFrameFinalized(
MachineFunction &MF, RegScavenger *RS) const {
- // Mark the function as not having WinCFI. We will set it back to true in
- // emitPrologue if it gets called and emits CFI.
- MF.setHasWinCFI(false);
-
// If this function isn't doing Win64-style C++ EH, we don't need to do
// anything.
- const Function &F = MF.getFunction();
- if (!STI.is64Bit() || !MF.hasEHFunclets() ||
- classifyEHPersonality(F.getPersonalityFn()) != EHPersonality::MSVC_CXX)
+ const Function *Fn = MF.getFunction();
+ if (!STI.is64Bit() || !MF.getMMI().hasEHFunclets() ||
+ classifyEHPersonality(Fn->getPersonalityFn()) != EHPersonality::MSVC_CXX)
return;
// Win64 C++ EH needs to allocate the UnwindHelp object at some fixed offset
@@ -3021,31 +2698,15 @@ void X86FrameLowering::processFunctionBeforeFrameFinalized(
// object, so that we can allocate a slot immediately following it. If there
// were no fixed objects, use offset -SlotSize, which is immediately after the
// return address. Fixed objects have negative frame indices.
- MachineFrameInfo &MFI = MF.getFrameInfo();
- WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
int64_t MinFixedObjOffset = -SlotSize;
- for (int I = MFI.getObjectIndexBegin(); I < 0; ++I)
- MinFixedObjOffset = std::min(MinFixedObjOffset, MFI.getObjectOffset(I));
-
- for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
- for (WinEHHandlerType &H : TBME.HandlerArray) {
- int FrameIndex = H.CatchObj.FrameIndex;
- if (FrameIndex != INT_MAX) {
- // Ensure alignment.
- unsigned Align = MFI.getObjectAlignment(FrameIndex);
- MinFixedObjOffset -= std::abs(MinFixedObjOffset) % Align;
- MinFixedObjOffset -= MFI.getObjectSize(FrameIndex);
- MFI.setObjectOffset(FrameIndex, MinFixedObjOffset);
- }
- }
- }
+ for (int I = MFI->getObjectIndexBegin(); I < 0; ++I)
+ MinFixedObjOffset = std::min(MinFixedObjOffset, MFI->getObjectOffset(I));
- // Ensure alignment.
- MinFixedObjOffset -= std::abs(MinFixedObjOffset) % 8;
int64_t UnwindHelpOffset = MinFixedObjOffset - SlotSize;
int UnwindHelpFI =
- MFI.CreateFixedObject(SlotSize, UnwindHelpOffset, /*Immutable=*/false);
- EHInfo.UnwindHelpFrameIdx = UnwindHelpFI;
+ MFI->CreateFixedObject(SlotSize, UnwindHelpOffset, /*Immutable=*/false);
+ MF.getWinEHFuncInfo()->UnwindHelpFrameIdx = UnwindHelpFI;
// Store -2 into UnwindHelp on function entry. We have to scan forwards past
// other frame setup instructions.
diff --git a/gnu/llvm/lib/Target/X86/X86FrameLowering.h b/gnu/llvm/lib/Target/X86/X86FrameLowering.h
index 909319fc18f..65fed3ddb29 100644
--- a/gnu/llvm/lib/Target/X86/X86FrameLowering.h
+++ b/gnu/llvm/lib/Target/X86/X86FrameLowering.h
@@ -14,13 +14,12 @@
#ifndef LLVM_LIB_TARGET_X86_X86FRAMELOWERING_H
#define LLVM_LIB_TARGET_X86_X86FRAMELOWERING_H
-#include "llvm/CodeGen/TargetFrameLowering.h"
+#include "llvm/Target/TargetFrameLowering.h"
namespace llvm {
class MachineInstrBuilder;
class MCCFIInstruction;
-class X86InstrInfo;
class X86Subtarget;
class X86RegisterInfo;
@@ -31,7 +30,7 @@ public:
// Cached subtarget predicates.
const X86Subtarget &STI;
- const X86InstrInfo &TII;
+ const TargetInstrInfo &TII;
const X86RegisterInfo *TRI;
unsigned SlotSize;
@@ -50,10 +49,11 @@ public:
/// Emit target stack probe code. This is required for all
/// large stack allocations on Windows. The caller is required to materialize
- /// the number of bytes to probe in RAX/EAX.
- void emitStackProbe(MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
- bool InProlog) const;
+ /// the number of bytes to probe in RAX/EAX. Returns instruction just
+ /// after the expansion.
+ MachineInstr *emitStackProbe(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, DebugLoc DL,
+ bool InProlog) const;
/// Replace a StackProbe inline-stub with the actual probe code inline.
void inlineStackProbe(MachineFunction &MF,
@@ -61,7 +61,7 @@ public:
void emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- const DebugLoc &DL) const;
+ DebugLoc DL) const;
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
@@ -89,7 +89,7 @@ public:
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
- std::vector<CalleeSavedInfo> &CSI,
+ const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const override;
bool hasFP(const MachineFunction &MF) const override;
@@ -100,15 +100,12 @@ public:
int getFrameIndexReference(const MachineFunction &MF, int FI,
unsigned &FrameReg) const override;
- int getFrameIndexReferenceSP(const MachineFunction &MF,
- int FI, unsigned &SPReg, int Adjustment) const;
- int getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI,
- unsigned &FrameReg,
- bool IgnoreSPUpdates) const override;
+ int getFrameIndexReferenceFromSP(const MachineFunction &MF, int FI,
+ unsigned &FrameReg) const override;
- MachineBasicBlock::iterator
- eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI) const override;
+ void eliminateCallFramePseudoInstr(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const override;
unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const override;
@@ -150,66 +147,65 @@ public:
/// Returns true if the target will correctly handle shrink wrapping.
bool enableShrinkWrapping(const MachineFunction &MF) const override;
- /// Order the symbols in the local stack.
- /// We want to place the local stack objects in some sort of sensible order.
- /// The heuristic we use is to try and pack them according to static number
- /// of uses and size in order to minimize code size.
- void orderFrameObjects(const MachineFunction &MF,
- SmallVectorImpl<int> &ObjectsToAllocate) const override;
+ /// convertArgMovsToPushes - This method tries to convert a call sequence
+ /// that uses sub and mov instructions to put the argument onto the stack
+ /// into a series of pushes.
+ /// Returns true if the transformation succeeded, false if not.
+ bool convertArgMovsToPushes(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ uint64_t Amount) const;
/// Wraps up getting a CFI index and building a MachineInstr for it.
void BuildCFI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
- const DebugLoc &DL, const MCCFIInstruction &CFIInst) const;
+ DebugLoc DL, MCCFIInstruction CFIInst) const;
/// Sets up EBP and optionally ESI based on the incoming EBP value. Only
/// needed for 32-bit. Used in funclet prologues and at catchret destinations.
MachineBasicBlock::iterator
restoreWin32EHStackPointers(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- const DebugLoc &DL, bool RestoreSP = false) const;
+ MachineBasicBlock::iterator MBBI, DebugLoc DL,
+ bool RestoreSP = false) const;
private:
uint64_t calculateMaxStackAlign(const MachineFunction &MF) const;
/// Emit target stack probe as a call to a helper function
- void emitStackProbeCall(MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
- bool InProlog) const;
+ MachineInstr *emitStackProbeCall(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ DebugLoc DL, bool InProlog) const;
/// Emit target stack probe as an inline sequence.
- void emitStackProbeInline(MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- const DebugLoc &DL, bool InProlog) const;
+ MachineInstr *emitStackProbeInline(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ DebugLoc DL, bool InProlog) const;
/// Emit a stub to later inline the target stack probe.
- void emitStackProbeInlineStub(MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- const DebugLoc &DL, bool InProlog) const;
+ MachineInstr *emitStackProbeInlineStub(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ DebugLoc DL, bool InProlog) const;
/// Aligns the stack pointer by ANDing it with -MaxAlign.
void BuildStackAlignAND(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
+ MachineBasicBlock::iterator MBBI, DebugLoc DL,
unsigned Reg, uint64_t MaxAlign) const;
/// Make small positive stack adjustments using POPs.
bool adjustStackWithPops(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
+ MachineBasicBlock::iterator MBBI, DebugLoc DL,
int Offset) const;
/// Adjusts the stack pointer using LEA, SUB, or ADD.
MachineInstrBuilder BuildStackAdjustment(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- const DebugLoc &DL, int64_t Offset,
+ DebugLoc DL, int64_t Offset,
bool InEpilogue) const;
unsigned getPSPSlotOffsetFromSP(const MachineFunction &MF) const;
unsigned getWinEHFuncletFrameSize(const MachineFunction &MF) const;
-
- /// Materialize the catchret target MBB in RAX.
- void emitCatchRetReturnValue(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- MachineInstr *CatchRet) const;
};
} // End llvm namespace
diff --git a/gnu/llvm/lib/Target/X86/X86InstrCompiler.td b/gnu/llvm/lib/Target/X86/X86InstrCompiler.td
index d66d9258e96..1cee25a26e7 100644
--- a/gnu/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/gnu/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -32,10 +32,9 @@ def GetLo8XForm : SDNodeXForm<imm, [{
// PIC base construction. This expands to code that looks like this:
// call $next_inst
// popl %destreg"
-let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP, SSP],
- SchedRW = [WriteJump] in
+let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP] in
def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
- "", [], IIC_CALL_RI>;
+ "", []>;
// ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
@@ -43,18 +42,18 @@ let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP, SSP],
// pointer before prolog-epilog rewriting occurs.
// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
// sub / add which can clobber EFLAGS.
-let Defs = [ESP, EFLAGS, SSP], Uses = [ESP, SSP], SchedRW = [WriteALU] in {
-def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs),
- (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3),
- "#ADJCALLSTACKDOWN", [], IIC_ALU_NONMEM>,
- Requires<[NotLP64]>;
+let Defs = [ESP, EFLAGS], Uses = [ESP] in {
+def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
+ "#ADJCALLSTACKDOWN",
+ []>,
+ Requires<[NotLP64]>;
def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
"#ADJCALLSTACKUP",
- [(X86callseq_end timm:$amt1, timm:$amt2)],
- IIC_ALU_NONMEM>, Requires<[NotLP64]>;
+ [(X86callseq_end timm:$amt1, timm:$amt2)]>,
+ Requires<[NotLP64]>;
}
-def : Pat<(X86callseq_start timm:$amt1, timm:$amt2),
- (ADJCALLSTACKDOWN32 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[NotLP64]>;
+def : Pat<(X86callseq_start timm:$amt1),
+ (ADJCALLSTACKDOWN32 i32imm:$amt1, 0)>, Requires<[NotLP64]>;
// ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
@@ -62,20 +61,19 @@ def : Pat<(X86callseq_start timm:$amt1, timm:$amt2),
// pointer before prolog-epilog rewriting occurs.
// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
// sub / add which can clobber EFLAGS.
-let Defs = [RSP, EFLAGS, SSP], Uses = [RSP, SSP], SchedRW = [WriteALU] in {
-def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs),
- (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3),
+let Defs = [RSP, EFLAGS], Uses = [RSP] in {
+def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
"#ADJCALLSTACKDOWN",
- [], IIC_ALU_NONMEM>, Requires<[IsLP64]>;
+ []>,
+ Requires<[IsLP64]>;
def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
"#ADJCALLSTACKUP",
- [(X86callseq_end timm:$amt1, timm:$amt2)],
- IIC_ALU_NONMEM>, Requires<[IsLP64]>;
+ [(X86callseq_end timm:$amt1, timm:$amt2)]>,
+ Requires<[IsLP64]>;
}
-def : Pat<(X86callseq_start timm:$amt1, timm:$amt2),
- (ADJCALLSTACKDOWN64 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[IsLP64]>;
+def : Pat<(X86callseq_start timm:$amt1),
+ (ADJCALLSTACKDOWN64 i32imm:$amt1, 0)>, Requires<[IsLP64]>;
-let SchedRW = [WriteSystem] in {
// x86-64 va_start lowering magic.
let usesCustomInserter = 1, Defs = [EFLAGS] in {
@@ -101,6 +99,18 @@ def VAARG_64 : I<0, Pseudo,
(X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)),
(implicit EFLAGS)]>;
+// Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
+// targets. These calls are needed to probe the stack when allocating more than
+// 4k bytes in one go. Touching the stack at 4K increments is necessary to
+// ensure that the guard pages used by the OS virtual memory manager are
+// allocated in correct sequence.
+// The main point of having separate instruction are extra unmodelled effects
+// (compared to ordinary calls) like stack pointer change.
+
+let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
+ def WIN_ALLOCA : I<0, Pseudo, (outs), (ins),
+ "# dynamic stack allocation",
+ [(X86WinAlloca)]>;
// When using segmented stacks these are lowered into instructions which first
// check if the current stacklet has enough free memory. If it does, memory is
@@ -122,39 +132,6 @@ def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
Requires<[In64BitMode]>;
}
-// Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
-// targets. These calls are needed to probe the stack when allocating more than
-// 4k bytes in one go. Touching the stack at 4K increments is necessary to
-// ensure that the guard pages used by the OS virtual memory manager are
-// allocated in correct sequence.
-// The main point of having separate instruction are extra unmodelled effects
-// (compared to ordinary calls) like stack pointer change.
-
-let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
-def WIN_ALLOCA_32 : I<0, Pseudo, (outs), (ins GR32:$size),
- "# dynamic stack allocation",
- [(X86WinAlloca GR32:$size)]>,
- Requires<[NotLP64]>;
-
-let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
-def WIN_ALLOCA_64 : I<0, Pseudo, (outs), (ins GR64:$size),
- "# dynamic stack allocation",
- [(X86WinAlloca GR64:$size)]>,
- Requires<[In64BitMode]>;
-} // SchedRW
-
-// These instructions XOR the frame pointer into a GPR. They are used in some
-// stack protection schemes. These are post-RA pseudos because we only know the
-// frame register after register allocation.
-let Constraints = "$src = $dst", isPseudo = 1, Defs = [EFLAGS] in {
- def XOR32_FP : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src),
- "xorl\t$$FP, $src", [], IIC_BIN_NONMEM>,
- Requires<[NotLP64]>, Sched<[WriteALU]>;
- def XOR64_FP : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src),
- "xorq\t$$FP $src", [], IIC_BIN_NONMEM>,
- Requires<[In64BitMode]>, Sched<[WriteALU]>;
-}
-
//===----------------------------------------------------------------------===//
// EH Pseudo Instructions
//
@@ -219,17 +196,17 @@ let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
Requires<[In64BitMode]>;
}
}
+} // SchedRW
let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in {
def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst),
"#EH_SjLj_Setup\t$dst", []>;
}
-} // SchedRW
//===----------------------------------------------------------------------===//
// Pseudo instructions used by unwind info.
//
-let isPseudo = 1, SchedRW = [WriteSystem] in {
+let isPseudo = 1 in {
def SEH_PushReg : I<0, Pseudo, (outs), (ins i32imm:$reg),
"#SEH_PushReg $reg", []>;
def SEH_SaveReg : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
@@ -255,15 +232,15 @@ let isPseudo = 1, SchedRW = [WriteSystem] in {
// This is lowered into a RET instruction by MCInstLower. We need
// this so that we don't have to have a MachineBasicBlock which ends
// with a RET and also has successors.
-let isPseudo = 1, SchedRW = [WriteJumpLd] in {
+let isPseudo = 1 in {
def MORESTACK_RET: I<0, Pseudo, (outs), (ins),
- "", [], IIC_RET>;
+ "", []>;
// This instruction is lowered to a RET followed by a MOV. The two
// instructions are not generated on a higher level since then the
// verifier sees a MachineBasicBlock ending with a non-terminator.
def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins),
- "", [], IIC_RET>;
+ "", []>;
}
//===----------------------------------------------------------------------===//
@@ -273,54 +250,40 @@ def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins),
// Alias instruction mapping movr0 to xor.
// FIXME: remove when we can teach regalloc that xor reg, reg is ok.
let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
- isPseudo = 1, AddedComplexity = 10 in
+ isPseudo = 1 in
def MOV32r0 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
[(set GR32:$dst, 0)], IIC_ALU_NONMEM>, Sched<[WriteZero]>;
// Other widths can also make use of the 32-bit xor, which may have a smaller
// encoding and avoid partial register updates.
-let AddedComplexity = 10 in {
def : Pat<(i8 0), (EXTRACT_SUBREG (MOV32r0), sub_8bit)>;
def : Pat<(i16 0), (EXTRACT_SUBREG (MOV32r0), sub_16bit)>;
-def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)>;
+def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)> {
+ let AddedComplexity = 20;
}
-let Predicates = [OptForSize, Not64BitMode],
- AddedComplexity = 10 in {
- let SchedRW = [WriteALU] in {
+let Predicates = [OptForSize, NotSlowIncDec, Not64BitMode],
+ AddedComplexity = 1 in {
// Pseudo instructions for materializing 1 and -1 using XOR+INC/DEC,
// which only require 3 bytes compared to MOV32ri which requires 5.
let Defs = [EFLAGS], isReMaterializable = 1, isPseudo = 1 in {
def MOV32r1 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
- [(set GR32:$dst, 1)], IIC_ALU_NONMEM>;
+ [(set GR32:$dst, 1)]>;
def MOV32r_1 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
- [(set GR32:$dst, -1)], IIC_ALU_NONMEM>;
+ [(set GR32:$dst, -1)]>;
}
- } // SchedRW
// MOV16ri is 4 bytes, so the instructions above are smaller.
def : Pat<(i16 1), (EXTRACT_SUBREG (MOV32r1), sub_16bit)>;
def : Pat<(i16 -1), (EXTRACT_SUBREG (MOV32r_1), sub_16bit)>;
}
-let isReMaterializable = 1, isPseudo = 1, AddedComplexity = 5,
- SchedRW = [WriteALU] in {
-// AddedComplexity higher than MOV64ri but lower than MOV32r0 and MOV32r1.
-def MOV32ImmSExti8 : I<0, Pseudo, (outs GR32:$dst), (ins i32i8imm:$src), "",
- [(set GR32:$dst, i32immSExt8:$src)], IIC_ALU_NONMEM>,
- Requires<[OptForMinSize, NotWin64WithoutFP]>;
-def MOV64ImmSExti8 : I<0, Pseudo, (outs GR64:$dst), (ins i64i8imm:$src), "",
- [(set GR64:$dst, i64immSExt8:$src)], IIC_ALU_NONMEM>,
- Requires<[OptForMinSize, NotWin64WithoutFP]>;
-}
-
// Materialize i64 constant where top 32-bits are zero. This could theoretically
// use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
// that would make it more difficult to rematerialize.
let isReMaterializable = 1, isAsCheapAsAMove = 1,
- isPseudo = 1, hasSideEffects = 0, SchedRW = [WriteALU] in
-def MOV32ri64 : I<0, Pseudo, (outs GR32:$dst), (ins i64i32imm:$src), "", [],
- IIC_ALU_NONMEM>;
+ isPseudo = 1, hasSideEffects = 0 in
+def MOV32ri64 : I<0, Pseudo, (outs GR32:$dst), (ins i64i32imm:$src), "", []>;
// This 64-bit pseudo-move can be used for both a 64-bit constant that is
// actually the zero-extension of a 32-bit constant and for labels in the
@@ -463,7 +426,6 @@ let Defs = [RCX,RDI], isCodeGenOnly = 1 in {
//===----------------------------------------------------------------------===//
// Thread Local Storage Instructions
//
-let SchedRW = [WriteSystem] in {
// ELF TLS Support
// All calls clobber the non-callee saved registers. ESP is marked as
@@ -474,7 +436,7 @@ let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
- usesCustomInserter = 1, Uses = [ESP, SSP] in {
+ usesCustomInserter = 1, Uses = [ESP] in {
def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
"# TLS_addr32",
[(X86tlsaddr tls32addr:$sym)]>,
@@ -494,7 +456,7 @@ let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
- usesCustomInserter = 1, Uses = [RSP, SSP] in {
+ usesCustomInserter = 1, Uses = [RSP] in {
def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
"# TLS_addr64",
[(X86tlsaddr tls64addr:$sym)]>,
@@ -510,26 +472,23 @@ def TLS_base_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
// address of the variable is in %eax. %ecx is trashed during the function
// call. All other registers are preserved.
let Defs = [EAX, ECX, EFLAGS],
- Uses = [ESP, SSP],
+ Uses = [ESP],
usesCustomInserter = 1 in
def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
"# TLSCall_32",
[(X86TLSCall addr:$sym)]>,
Requires<[Not64BitMode]>;
-// For x86_64, the address of the thunk is passed in %rdi, but the
-// pseudo directly use the symbol, so do not add an implicit use of
-// %rdi. The lowering will do the right thing with RDI.
-// On return the address of the variable is in %rax. All other
-// registers are preserved.
+// For x86_64, the address of the thunk is passed in %rdi, on return
+// the address of the variable is in %rax. All other registers are preserved.
let Defs = [RAX, EFLAGS],
- Uses = [RSP, SSP],
+ Uses = [RSP, RDI],
usesCustomInserter = 1 in
def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
"# TLSCall_64",
[(X86TLSCall addr:$sym)]>,
Requires<[In64BitMode]>;
-} // SchedRW
+
//===----------------------------------------------------------------------===//
// Conditional Move Pseudo Instructions
@@ -544,7 +503,7 @@ multiclass CMOVrr_PSEUDO<RegisterClass RC, ValueType VT> {
EFLAGS)))]>;
}
-let usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS] in {
+let usesCustomInserter = 1, Uses = [EFLAGS] in {
// X86 doesn't have 8-bit conditional moves. Use a customInserter to
// emit control flow. An alternative to this is to mark i8 SELECT as Promote,
// however that requires promoting the operands, and can induce additional
@@ -582,7 +541,7 @@ let usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS] in {
defm _V16I1 : CMOVrr_PSEUDO<VK16, v16i1>;
defm _V32I1 : CMOVrr_PSEUDO<VK32, v32i1>;
defm _V64I1 : CMOVrr_PSEUDO<VK64, v64i1>;
-} // usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS]
+} // usesCustomInserter = 1, Uses = [EFLAGS]
//===----------------------------------------------------------------------===//
// Normal-Instructions-With-Lock-Prefix Pseudo Instructions
@@ -609,7 +568,7 @@ def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
// ImmOpc8 corresponds to the mi8 version of the instruction
// ImmMod corresponds to the instruction format of the mi and mi8 versions
multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8,
- Format ImmMod, SDNode Op, string mnemonic> {
+ Format ImmMod, string mnemonic> {
let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
SchedRW = [WriteALULd, WriteRMW] in {
@@ -618,152 +577,112 @@ def NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
!strconcat(mnemonic, "{b}\t",
"{$src2, $dst|$dst, $src2}"),
- [(set EFLAGS, (Op addr:$dst, GR8:$src2))],
- IIC_ALU_NONMEM>, LOCK;
-
+ [], IIC_ALU_NONMEM>, LOCK;
def NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
!strconcat(mnemonic, "{w}\t",
"{$src2, $dst|$dst, $src2}"),
- [(set EFLAGS, (Op addr:$dst, GR16:$src2))],
- IIC_ALU_NONMEM>, OpSize16, LOCK;
-
+ [], IIC_ALU_NONMEM>, OpSize16, LOCK;
def NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
!strconcat(mnemonic, "{l}\t",
"{$src2, $dst|$dst, $src2}"),
- [(set EFLAGS, (Op addr:$dst, GR32:$src2))],
- IIC_ALU_NONMEM>, OpSize32, LOCK;
-
+ [], IIC_ALU_NONMEM>, OpSize32, LOCK;
def NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
!strconcat(mnemonic, "{q}\t",
"{$src2, $dst|$dst, $src2}"),
- [(set EFLAGS, (Op addr:$dst, GR64:$src2))],
- IIC_ALU_NONMEM>, LOCK;
+ [], IIC_ALU_NONMEM>, LOCK;
def NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 },
ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2),
!strconcat(mnemonic, "{b}\t",
"{$src2, $dst|$dst, $src2}"),
- [(set EFLAGS, (Op addr:$dst, (i8 imm:$src2)))],
- IIC_ALU_MEM>, LOCK;
+ [], IIC_ALU_MEM>, LOCK;
def NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),
!strconcat(mnemonic, "{w}\t",
"{$src2, $dst|$dst, $src2}"),
- [(set EFLAGS, (Op addr:$dst, (i16 imm:$src2)))],
- IIC_ALU_MEM>, OpSize16, LOCK;
+ [], IIC_ALU_MEM>, OpSize16, LOCK;
def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),
!strconcat(mnemonic, "{l}\t",
"{$src2, $dst|$dst, $src2}"),
- [(set EFLAGS, (Op addr:$dst, (i32 imm:$src2)))],
- IIC_ALU_MEM>, OpSize32, LOCK;
+ [], IIC_ALU_MEM>, OpSize32, LOCK;
def NAME#64mi32 : RIi32S<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
!strconcat(mnemonic, "{q}\t",
"{$src2, $dst|$dst, $src2}"),
- [(set EFLAGS, (Op addr:$dst, i64immSExt32:$src2))],
- IIC_ALU_MEM>, LOCK;
+ [], IIC_ALU_MEM>, LOCK;
def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),
!strconcat(mnemonic, "{w}\t",
"{$src2, $dst|$dst, $src2}"),
- [(set EFLAGS, (Op addr:$dst, i16immSExt8:$src2))],
- IIC_ALU_MEM>, OpSize16, LOCK;
-
+ [], IIC_ALU_MEM>, OpSize16, LOCK;
def NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),
!strconcat(mnemonic, "{l}\t",
"{$src2, $dst|$dst, $src2}"),
- [(set EFLAGS, (Op addr:$dst, i32immSExt8:$src2))],
- IIC_ALU_MEM>, OpSize32, LOCK;
-
+ [], IIC_ALU_MEM>, OpSize32, LOCK;
def NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),
!strconcat(mnemonic, "{q}\t",
"{$src2, $dst|$dst, $src2}"),
- [(set EFLAGS, (Op addr:$dst, i64immSExt8:$src2))],
- IIC_ALU_MEM>, LOCK;
+ [], IIC_ALU_MEM>, LOCK;
}
}
-defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, X86lock_add, "add">;
-defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, X86lock_sub, "sub">;
-defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, X86lock_or , "or">;
-defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, X86lock_and, "and">;
-defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, X86lock_xor, "xor">;
+defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, "add">;
+defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, "sub">;
+defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, "or">;
+defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, "and">;
+defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, "xor">;
+// Optimized codegen when the non-memory output is not used.
multiclass LOCK_ArithUnOp<bits<8> Opc8, bits<8> Opc, Format Form,
- string frag, string mnemonic> {
+ string mnemonic> {
let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
SchedRW = [WriteALULd, WriteRMW] in {
+
def NAME#8m : I<Opc8, Form, (outs), (ins i8mem :$dst),
!strconcat(mnemonic, "{b}\t$dst"),
- [(set EFLAGS, (!cast<PatFrag>(frag # "_8") addr:$dst))],
- IIC_UNARY_MEM>, LOCK;
+ [], IIC_UNARY_MEM>, LOCK;
def NAME#16m : I<Opc, Form, (outs), (ins i16mem:$dst),
!strconcat(mnemonic, "{w}\t$dst"),
- [(set EFLAGS, (!cast<PatFrag>(frag # "_16") addr:$dst))],
- IIC_UNARY_MEM>, OpSize16, LOCK;
+ [], IIC_UNARY_MEM>, OpSize16, LOCK;
def NAME#32m : I<Opc, Form, (outs), (ins i32mem:$dst),
!strconcat(mnemonic, "{l}\t$dst"),
- [(set EFLAGS, (!cast<PatFrag>(frag # "_32") addr:$dst))],
- IIC_UNARY_MEM>, OpSize32, LOCK;
+ [], IIC_UNARY_MEM>, OpSize32, LOCK;
def NAME#64m : RI<Opc, Form, (outs), (ins i64mem:$dst),
!strconcat(mnemonic, "{q}\t$dst"),
- [(set EFLAGS, (!cast<PatFrag>(frag # "_64") addr:$dst))],
- IIC_UNARY_MEM>, LOCK;
+ [], IIC_UNARY_MEM>, LOCK;
}
}
-multiclass unary_atomic_intrin<SDNode atomic_op> {
- def _8 : PatFrag<(ops node:$ptr),
- (atomic_op node:$ptr), [{
- return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
- }]>;
- def _16 : PatFrag<(ops node:$ptr),
- (atomic_op node:$ptr), [{
- return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
- }]>;
- def _32 : PatFrag<(ops node:$ptr),
- (atomic_op node:$ptr), [{
- return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
- }]>;
- def _64 : PatFrag<(ops node:$ptr),
- (atomic_op node:$ptr), [{
- return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
- }]>;
-}
-
-defm X86lock_inc : unary_atomic_intrin<X86lock_inc>;
-defm X86lock_dec : unary_atomic_intrin<X86lock_dec>;
-
-defm LOCK_INC : LOCK_ArithUnOp<0xFE, 0xFF, MRM0m, "X86lock_inc", "inc">;
-defm LOCK_DEC : LOCK_ArithUnOp<0xFE, 0xFF, MRM1m, "X86lock_dec", "dec">;
+defm LOCK_INC : LOCK_ArithUnOp<0xFE, 0xFF, MRM0m, "inc">;
+defm LOCK_DEC : LOCK_ArithUnOp<0xFE, 0xFF, MRM1m, "dec">;
// Atomic compare and swap.
multiclass LCMPXCHG_UnOp<bits<8> Opc, Format Form, string mnemonic,
SDPatternOperator frag, X86MemOperand x86memop,
InstrItinClass itin> {
-let isCodeGenOnly = 1, usesCustomInserter = 1 in {
+let isCodeGenOnly = 1 in {
def NAME : I<Opc, Form, (outs), (ins x86memop:$ptr),
!strconcat(mnemonic, "\t$ptr"),
[(frag addr:$ptr)], itin>, TB, LOCK;
@@ -805,18 +724,18 @@ defm LCMPXCHG8B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg8b",
// register and the register allocator will ignore any use/def of
// it. In other words, the register will not fix the clobbering of
// RBX that will happen when setting the arguments for the instrucion.
-//
+//
// Unlike the actual related instuction, we mark that this one
// defines EBX (instead of using EBX).
// The rationale is that we will define RBX during the expansion of
// the pseudo. The argument feeding EBX is ebx_input.
//
// The additional argument, $ebx_save, is a temporary register used to
-// save the value of RBX across the actual instruction.
+// save the value of RBX accross the actual instruction.
//
// To make sure the register assigned to $ebx_save does not interfere with
// the definition of the actual instruction, we use a definition $dst which
-// is tied to $rbx_save. That way, the live-range of $rbx_save spans across
+// is tied to $rbx_save. That way, the live-range of $rbx_save spans accross
// the instruction and we are sure we will have a valid register to restore
// the value of RBX.
let Defs = [EAX, EDX, EBX, EFLAGS], Uses = [EAX, ECX, EDX],
@@ -933,7 +852,7 @@ multiclass RELEASE_BINOP_MI<SDNode op> {
[(atomic_store_64 addr:$dst, (op
(atomic_load_64 addr:$dst), GR64:$src))]>;
}
-let Defs = [EFLAGS], SchedRW = [WriteMicrocoded] in {
+let Defs = [EFLAGS] in {
defm RELEASE_ADD : RELEASE_BINOP_MI<add>;
defm RELEASE_AND : RELEASE_BINOP_MI<and>;
defm RELEASE_OR : RELEASE_BINOP_MI<or>;
@@ -946,20 +865,20 @@ let Defs = [EFLAGS], SchedRW = [WriteMicrocoded] in {
// FIXME: imm version.
// FIXME: Version that doesn't clobber $src, using AVX's VADDSS.
// FIXME: This could also handle SIMD operations with *ps and *pd instructions.
-let usesCustomInserter = 1, SchedRW = [WriteMicrocoded] in {
+let usesCustomInserter = 1 in {
multiclass RELEASE_FP_BINOP_MI<SDNode op> {
def NAME#32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, FR32:$src),
"#BINOP "#NAME#"32mr PSEUDO!",
[(atomic_store_32 addr:$dst,
- (i32 (bitconvert (op
+ (i32 (bitconvert (op
(f32 (bitconvert (i32 (atomic_load_32 addr:$dst)))),
- FR32:$src))))]>, Requires<[HasSSE1]>;
+ FR32:$src))))]>, Requires<[HasSSE1]>;
def NAME#64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, FR64:$src),
"#BINOP "#NAME#"64mr PSEUDO!",
[(atomic_store_64 addr:$dst,
- (i64 (bitconvert (op
+ (i64 (bitconvert (op
(f64 (bitconvert (i64 (atomic_load_64 addr:$dst)))),
- FR64:$src))))]>, Requires<[HasSSE2]>;
+ FR64:$src))))]>, Requires<[HasSSE2]>;
}
defm RELEASE_FADD : RELEASE_FP_BINOP_MI<fadd>;
// FIXME: Add fsub, fmul, fdiv, ...
@@ -980,17 +899,17 @@ multiclass RELEASE_UNOP<dag dag8, dag dag16, dag dag32, dag dag64> {
[(atomic_store_64 addr:$dst, dag64)]>;
}
-let Defs = [EFLAGS], Predicates = [UseIncDec], SchedRW = [WriteMicrocoded] in {
+let Defs = [EFLAGS] in {
defm RELEASE_INC : RELEASE_UNOP<
(add (atomic_load_8 addr:$dst), (i8 1)),
(add (atomic_load_16 addr:$dst), (i16 1)),
(add (atomic_load_32 addr:$dst), (i32 1)),
- (add (atomic_load_64 addr:$dst), (i64 1))>;
+ (add (atomic_load_64 addr:$dst), (i64 1))>, Requires<[NotSlowIncDec]>;
defm RELEASE_DEC : RELEASE_UNOP<
(add (atomic_load_8 addr:$dst), (i8 -1)),
(add (atomic_load_16 addr:$dst), (i16 -1)),
(add (atomic_load_32 addr:$dst), (i32 -1)),
- (add (atomic_load_64 addr:$dst), (i64 -1))>;
+ (add (atomic_load_64 addr:$dst), (i64 -1))>, Requires<[NotSlowIncDec]>;
}
/*
TODO: These don't work because the type inference of TableGen fails.
@@ -1010,19 +929,18 @@ defm RELEASE_NOT : RELEASE_UNOP<
(not (atomic_load_64 addr:$dst))>;
*/
-let SchedRW = [WriteMicrocoded] in {
def RELEASE_MOV8mi : I<0, Pseudo, (outs), (ins i8mem:$dst, i8imm:$src),
- "#RELEASE_MOV8mi PSEUDO!",
- [(atomic_store_8 addr:$dst, (i8 imm:$src))]>;
+ "#RELEASE_MOV8mi PSEUDO!",
+ [(atomic_store_8 addr:$dst, (i8 imm:$src))]>;
def RELEASE_MOV16mi : I<0, Pseudo, (outs), (ins i16mem:$dst, i16imm:$src),
- "#RELEASE_MOV16mi PSEUDO!",
- [(atomic_store_16 addr:$dst, (i16 imm:$src))]>;
+ "#RELEASE_MOV16mi PSEUDO!",
+ [(atomic_store_16 addr:$dst, (i16 imm:$src))]>;
def RELEASE_MOV32mi : I<0, Pseudo, (outs), (ins i32mem:$dst, i32imm:$src),
- "#RELEASE_MOV32mi PSEUDO!",
- [(atomic_store_32 addr:$dst, (i32 imm:$src))]>;
+ "#RELEASE_MOV32mi PSEUDO!",
+ [(atomic_store_32 addr:$dst, (i32 imm:$src))]>;
def RELEASE_MOV64mi32 : I<0, Pseudo, (outs), (ins i64mem:$dst, i64i32imm:$src),
- "#RELEASE_MOV64mi32 PSEUDO!",
- [(atomic_store_64 addr:$dst, i64immSExt32:$src)]>;
+ "#RELEASE_MOV64mi32 PSEUDO!",
+ [(atomic_store_64 addr:$dst, i64immSExt32:$src)]>;
def RELEASE_MOV8mr : I<0, Pseudo, (outs), (ins i8mem :$dst, GR8 :$src),
"#RELEASE_MOV8mr PSEUDO!",
@@ -1049,23 +967,57 @@ def ACQUIRE_MOV32rm : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$src),
def ACQUIRE_MOV64rm : I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$src),
"#ACQUIRE_MOV64rm PSEUDO!",
[(set GR64:$dst, (atomic_load_64 addr:$src))]>;
-} // SchedRW
//===----------------------------------------------------------------------===//
// DAG Pattern Matching Rules
//===----------------------------------------------------------------------===//
-// Use AND/OR to store 0/-1 in memory when optimizing for minsize. This saves
-// binary size compared to a regular MOV, but it introduces an unnecessary
-// load, so is not suitable for regular or optsize functions.
-let Predicates = [OptForMinSize] in {
-def : Pat<(store (i16 0), addr:$dst), (AND16mi8 addr:$dst, 0)>;
-def : Pat<(store (i32 0), addr:$dst), (AND32mi8 addr:$dst, 0)>;
-def : Pat<(store (i64 0), addr:$dst), (AND64mi8 addr:$dst, 0)>;
-def : Pat<(store (i16 -1), addr:$dst), (OR16mi8 addr:$dst, -1)>;
-def : Pat<(store (i32 -1), addr:$dst), (OR32mi8 addr:$dst, -1)>;
-def : Pat<(store (i64 -1), addr:$dst), (OR64mi8 addr:$dst, -1)>;
-}
+// ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
+def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>;
+def : Pat<(i32 (X86Wrapper tjumptable :$dst)), (MOV32ri tjumptable :$dst)>;
+def : Pat<(i32 (X86Wrapper tglobaltlsaddr:$dst)),(MOV32ri tglobaltlsaddr:$dst)>;
+def : Pat<(i32 (X86Wrapper tglobaladdr :$dst)), (MOV32ri tglobaladdr :$dst)>;
+def : Pat<(i32 (X86Wrapper texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;
+def : Pat<(i32 (X86Wrapper mcsym:$dst)), (MOV32ri mcsym:$dst)>;
+def : Pat<(i32 (X86Wrapper tblockaddress:$dst)), (MOV32ri tblockaddress:$dst)>;
+
+def : Pat<(add GR32:$src1, (X86Wrapper tconstpool:$src2)),
+ (ADD32ri GR32:$src1, tconstpool:$src2)>;
+def : Pat<(add GR32:$src1, (X86Wrapper tjumptable:$src2)),
+ (ADD32ri GR32:$src1, tjumptable:$src2)>;
+def : Pat<(add GR32:$src1, (X86Wrapper tglobaladdr :$src2)),
+ (ADD32ri GR32:$src1, tglobaladdr:$src2)>;
+def : Pat<(add GR32:$src1, (X86Wrapper texternalsym:$src2)),
+ (ADD32ri GR32:$src1, texternalsym:$src2)>;
+def : Pat<(add GR32:$src1, (X86Wrapper mcsym:$src2)),
+ (ADD32ri GR32:$src1, mcsym:$src2)>;
+def : Pat<(add GR32:$src1, (X86Wrapper tblockaddress:$src2)),
+ (ADD32ri GR32:$src1, tblockaddress:$src2)>;
+
+def : Pat<(store (i32 (X86Wrapper tglobaladdr:$src)), addr:$dst),
+ (MOV32mi addr:$dst, tglobaladdr:$src)>;
+def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst),
+ (MOV32mi addr:$dst, texternalsym:$src)>;
+def : Pat<(store (i32 (X86Wrapper mcsym:$src)), addr:$dst),
+ (MOV32mi addr:$dst, mcsym:$src)>;
+def : Pat<(store (i32 (X86Wrapper tblockaddress:$src)), addr:$dst),
+ (MOV32mi addr:$dst, tblockaddress:$src)>;
+
+// ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small
+// code model mode, should use 'movabs'. FIXME: This is really a hack, the
+// 'movabs' predicate should handle this sort of thing.
+def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
+ (MOV64ri tconstpool :$dst)>, Requires<[FarData]>;
+def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
+ (MOV64ri tjumptable :$dst)>, Requires<[FarData]>;
+def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
+ (MOV64ri tglobaladdr :$dst)>, Requires<[FarData]>;
+def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
+ (MOV64ri texternalsym:$dst)>, Requires<[FarData]>;
+def : Pat<(i64 (X86Wrapper mcsym:$dst)),
+ (MOV64ri mcsym:$dst)>, Requires<[FarData]>;
+def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
+ (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>;
// In kernel code model, we can get the address of a label
// into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
@@ -1088,22 +1040,22 @@ def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
// for MOV64mi32 should handle this sort of thing.
def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tconstpool:$src)>,
- Requires<[NearData, IsNotPIC]>;
+ Requires<[NearData, IsStatic]>;
def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tjumptable:$src)>,
- Requires<[NearData, IsNotPIC]>;
+ Requires<[NearData, IsStatic]>;
def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tglobaladdr:$src)>,
- Requires<[NearData, IsNotPIC]>;
+ Requires<[NearData, IsStatic]>;
def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
(MOV64mi32 addr:$dst, texternalsym:$src)>,
- Requires<[NearData, IsNotPIC]>;
+ Requires<[NearData, IsStatic]>;
def : Pat<(store (i64 (X86Wrapper mcsym:$src)), addr:$dst),
(MOV64mi32 addr:$dst, mcsym:$src)>,
- Requires<[NearData, IsNotPIC]>;
+ Requires<[NearData, IsStatic]>;
def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tblockaddress:$src)>,
- Requires<[NearData, IsNotPIC]>;
+ Requires<[NearData, IsStatic]>;
def : Pat<(i32 (X86RecoverFrameAlloc mcsym:$dst)), (MOV32ri mcsym:$dst)>;
def : Pat<(i64 (X86RecoverFrameAlloc mcsym:$dst)), (MOV64ri mcsym:$dst)>;
@@ -1146,14 +1098,14 @@ def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off),
def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
(TCRETURNri ptr_rc_tailcall:$dst, imm:$off)>,
- Requires<[Not64BitMode, NotUseRetpoline]>;
+ Requires<[Not64BitMode]>;
// FIXME: This is disabled for 32-bit PIC mode because the global base
// register which is part of the address mode may be assigned a
// callee-saved register.
def : Pat<(X86tcret (load addr:$dst), imm:$off),
(TCRETURNmi addr:$dst, imm:$off)>,
- Requires<[Not64BitMode, IsNotPIC, NotUseRetpoline]>;
+ Requires<[Not64BitMode, IsNotPIC]>;
def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
(TCRETURNdi tglobaladdr:$dst, imm:$off)>,
@@ -1165,21 +1117,13 @@ def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
(TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>,
- Requires<[In64BitMode, NotUseRetpoline]>;
+ Requires<[In64BitMode]>;
// Don't fold loads into X86tcret requiring more than 6 regs.
// There wouldn't be enough scratch registers for base+index.
def : Pat<(X86tcret_6regs (load addr:$dst), imm:$off),
(TCRETURNmi64 addr:$dst, imm:$off)>,
- Requires<[In64BitMode, NotUseRetpoline]>;
-
-def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
- (RETPOLINE_TCRETURN64 ptr_rc_tailcall:$dst, imm:$off)>,
- Requires<[In64BitMode, UseRetpoline]>;
-
-def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
- (RETPOLINE_TCRETURN32 ptr_rc_tailcall:$dst, imm:$off)>,
- Requires<[Not64BitMode, UseRetpoline]>;
+ Requires<[In64BitMode]>;
def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
(TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
@@ -1241,13 +1185,12 @@ defm : CMOVmr<X86_COND_O , CMOVNO16rm, CMOVNO32rm, CMOVNO64rm>;
defm : CMOVmr<X86_COND_NO, CMOVO16rm , CMOVO32rm , CMOVO64rm>;
// zextload bool -> zextload byte
-// i1 stored in one byte in zero-extended form.
-// Upper bits cleanup should be executed before Store.
-def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
-def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
-def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
+def : Pat<(zextloadi8i1 addr:$src), (AND8ri (MOV8rm addr:$src), (i8 1))>;
+def : Pat<(zextloadi16i1 addr:$src), (AND16ri8 (MOVZX16rm8 addr:$src), (i16 1))>;
+def : Pat<(zextloadi32i1 addr:$src), (AND32ri8 (MOVZX32rm8 addr:$src), (i32 1))>;
def : Pat<(zextloadi64i1 addr:$src),
- (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
+ (SUBREG_TO_REG (i64 0),
+ (AND32ri8 (MOVZX32rm8 addr:$src), (i32 1)), sub_32bit)>;
// extload bool -> extload byte
// When extloading from 16-bit and smaller memory locations into 64-bit
@@ -1287,20 +1230,20 @@ def : Pat<(i64 (anyext GR8 :$src)),
def : Pat<(i64 (anyext GR16:$src)),
(SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16 :$src), sub_32bit)>;
def : Pat<(i64 (anyext GR32:$src)),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, sub_32bit)>;
+ (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
// Any instruction that defines a 32-bit result leaves the high half of the
// register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
-// be copying from a truncate. Any other 32-bit operation will zero-extend
-// up to 64 bits. AssertSext/AssertZext aren't saying anything about the upper
-// 32 bits, they're probably just qualifying a CopyFromReg.
+// be copying from a truncate. And x86's cmov doesn't do anything if the
+// condition is false. But any other 32-bit operation will zero-extend
+// up to 64 bits.
def def32 : PatLeaf<(i32 GR32:$src), [{
return N->getOpcode() != ISD::TRUNCATE &&
N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
N->getOpcode() != ISD::CopyFromReg &&
N->getOpcode() != ISD::AssertSext &&
- N->getOpcode() != ISD::AssertZext;
+ N->getOpcode() != X86ISD::CMOV;
}]>;
// In the case of a 32-bit def that is known to implicitly zero-extend,
@@ -1323,11 +1266,11 @@ def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
- KnownBits Known0;
- CurDAG->computeKnownBits(N->getOperand(0), Known0, 0);
- KnownBits Known1;
- CurDAG->computeKnownBits(N->getOperand(1), Known1, 0);
- return (~Known0.Zero & ~Known1.Zero) == 0;
+ APInt KnownZero0, KnownOne0;
+ CurDAG->computeKnownBits(N->getOperand(0), KnownZero0, KnownOne0, 0);
+ APInt KnownZero1, KnownOne1;
+ CurDAG->computeKnownBits(N->getOperand(1), KnownZero1, KnownOne1, 0);
+ return (~KnownZero0 & ~KnownZero1) == 0;
}]>;
@@ -1408,7 +1351,7 @@ def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
// instructions.
def : Pat<(add GR64:$src1, 0x0000000080000000),
(SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
-def : Pat<(store (add (loadi64 addr:$dst), 0x0000000080000000), addr:$dst),
+def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
(SUB64mi32 addr:$dst, 0xffffffff80000000)>;
// To avoid needing to materialize an immediate in a register, use a 32-bit and
@@ -1447,11 +1390,16 @@ def : Pat<(and GR32:$src1, 0xffff),
(MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
// r & (2^8-1) ==> movz
def : Pat<(and GR32:$src1, 0xff),
- (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>;
+ (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src1,
+ GR32_ABCD)),
+ sub_8bit))>,
+ Requires<[Not64BitMode]>;
// r & (2^8-1) ==> movz
def : Pat<(and GR16:$src1, 0xff),
- (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)),
- sub_16bit)>;
+ (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG
+ (i16 (COPY_TO_REGCLASS GR16:$src1, GR16_ABCD)), sub_8bit)),
+ sub_16bit)>,
+ Requires<[Not64BitMode]>;
// r & (2^32-1) ==> movz
def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
@@ -1468,6 +1416,15 @@ def : Pat<(and GR64:$src, 0xff),
(SUBREG_TO_REG (i64 0),
(MOVZX32rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit))),
sub_32bit)>;
+// r & (2^8-1) ==> movz
+def : Pat<(and GR32:$src1, 0xff),
+ (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>,
+ Requires<[In64BitMode]>;
+// r & (2^8-1) ==> movz
+def : Pat<(and GR16:$src1, 0xff),
+ (EXTRACT_SUBREG (MOVZX32rr8 (i8
+ (EXTRACT_SUBREG GR16:$src1, sub_8bit))), sub_16bit)>,
+ Requires<[In64BitMode]>;
} // AddedComplexity = 1
@@ -1475,11 +1432,16 @@ def : Pat<(and GR64:$src, 0xff),
def : Pat<(sext_inreg GR32:$src, i16),
(MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
def : Pat<(sext_inreg GR32:$src, i8),
- (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>;
+ (MOVSX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
+ GR32_ABCD)),
+ sub_8bit))>,
+ Requires<[Not64BitMode]>;
def : Pat<(sext_inreg GR16:$src, i8),
- (EXTRACT_SUBREG (MOVSX32rr8 (EXTRACT_SUBREG GR16:$src, sub_8bit)),
- sub_16bit)>;
+ (EXTRACT_SUBREG (i32 (MOVSX32rr8 (EXTRACT_SUBREG
+ (i32 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), sub_8bit))),
+ sub_16bit)>,
+ Requires<[Not64BitMode]>;
def : Pat<(sext_inreg GR64:$src, i32),
(MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
@@ -1487,6 +1449,13 @@ def : Pat<(sext_inreg GR64:$src, i16),
(MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
def : Pat<(sext_inreg GR64:$src, i8),
(MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
+def : Pat<(sext_inreg GR32:$src, i8),
+ (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>,
+ Requires<[In64BitMode]>;
+def : Pat<(sext_inreg GR16:$src, i8),
+ (EXTRACT_SUBREG (MOVSX32rr8
+ (EXTRACT_SUBREG GR16:$src, sub_8bit)), sub_16bit)>,
+ Requires<[In64BitMode]>;
// sext, sext_load, zext, zext_load
def: Pat<(i16 (sext GR8:$src)),
@@ -1524,26 +1493,40 @@ def : Pat<(i8 (trunc GR16:$src)),
// h-register tricks
def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
- (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>,
- Requires<[Not64BitMode]>;
-def : Pat<(i8 (trunc (srl_su (i32 (anyext GR16:$src)), (i8 8)))),
- (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>,
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi)>,
Requires<[Not64BitMode]>;
def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
- (EXTRACT_SUBREG GR32:$src, sub_8bit_hi)>,
+ (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
+ sub_8bit_hi)>,
Requires<[Not64BitMode]>;
def : Pat<(srl GR16:$src, (i8 8)),
(EXTRACT_SUBREG
- (MOVZX32_NOREXrr8 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
- sub_16bit)>;
+ (MOVZX32rr8
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi)),
+ sub_16bit)>,
+ Requires<[Not64BitMode]>;
def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
- (MOVZX32_NOREXrr8 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>;
+ (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
+ GR16_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[Not64BitMode]>;
def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
- (MOVZX32_NOREXrr8 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>;
+ (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
+ GR16_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[Not64BitMode]>;
def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
- (MOVZX32_NOREXrr8 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>;
+ (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
+ GR32_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[Not64BitMode]>;
def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
- (MOVZX32_NOREXrr8 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>;
+ (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
+ GR32_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[Not64BitMode]>;
// h-register tricks.
// For now, be conservative on x86-64 and use an h-register extract only if the
@@ -1557,35 +1540,68 @@ def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
(SUBREG_TO_REG
(i64 0),
(MOVZX32_NOREXrr8
- (EXTRACT_SUBREG GR64:$src, sub_8bit_hi)),
+ (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
+ sub_8bit_hi)),
sub_32bit)>;
+def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
+ (MOVZX32_NOREXrr8
+ (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[In64BitMode]>;
+def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
+ (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
+ GR32_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[In64BitMode]>;
+def : Pat<(srl GR16:$src, (i8 8)),
+ (EXTRACT_SUBREG
+ (MOVZX32_NOREXrr8
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi)),
+ sub_16bit)>,
+ Requires<[In64BitMode]>;
+def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
+ (MOVZX32_NOREXrr8
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[In64BitMode]>;
+def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
+ (MOVZX32_NOREXrr8
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[In64BitMode]>;
def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
(SUBREG_TO_REG
(i64 0),
(MOVZX32_NOREXrr8
- (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi)),
sub_32bit)>;
def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
(SUBREG_TO_REG
(i64 0),
(MOVZX32_NOREXrr8
- (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi)),
sub_32bit)>;
// h-register extract and store.
def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
(MOV8mr_NOREX
addr:$dst,
- (EXTRACT_SUBREG GR64:$src, sub_8bit_hi))>;
+ (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
+ sub_8bit_hi))>;
def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
(MOV8mr_NOREX
addr:$dst,
- (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>,
+ (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
+ sub_8bit_hi))>,
Requires<[In64BitMode]>;
def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
(MOV8mr_NOREX
addr:$dst,
- (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>,
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi))>,
Requires<[In64BitMode]>;
@@ -1600,13 +1616,7 @@ def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
-// Helper imms to check if a mask doesn't change significant shift/rotate bits.
-def immShift8 : ImmLeaf<i8, [{
- return countTrailingOnes<uint64_t>(Imm) >= 3;
-}]>;
-def immShift16 : ImmLeaf<i8, [{
- return countTrailingOnes<uint64_t>(Imm) >= 4;
-}]>;
+// Helper imms that check if a mask doesn't change significant shift bits.
def immShift32 : ImmLeaf<i8, [{
return countTrailingOnes<uint64_t>(Imm) >= 5;
}]>;
@@ -1633,121 +1643,15 @@ multiclass MaskedShiftAmountPats<SDNode frag, string name> {
// (shift x (and y, 63)) ==> (shift x, y)
def : Pat<(frag GR64:$src1, (and CL, immShift64)),
(!cast<Instruction>(name # "64rCL") GR64:$src1)>;
- def : Pat<(store (frag (loadi64 addr:$dst), (and CL, immShift64)), addr:$dst),
+ def : Pat<(store (frag (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
(!cast<Instruction>(name # "64mCL") addr:$dst)>;
}
defm : MaskedShiftAmountPats<shl, "SHL">;
defm : MaskedShiftAmountPats<srl, "SHR">;
defm : MaskedShiftAmountPats<sra, "SAR">;
-
-// ROL/ROR instructions allow a stronger mask optimization than shift for 8- and
-// 16-bit. We can remove a mask of any (bitwidth - 1) on the rotation amount
-// because over-rotating produces the same result. This is noted in the Intel
-// docs with: "tempCOUNT <- (COUNT & COUNTMASK) MOD SIZE". Masking the rotation
-// amount could affect EFLAGS results, but that does not matter because we are
-// not tracking flags for these nodes.
-multiclass MaskedRotateAmountPats<SDNode frag, string name> {
- // (rot x (and y, BitWidth - 1)) ==> (rot x, y)
- def : Pat<(frag GR8:$src1, (and CL, immShift8)),
- (!cast<Instruction>(name # "8rCL") GR8:$src1)>;
- def : Pat<(frag GR16:$src1, (and CL, immShift16)),
- (!cast<Instruction>(name # "16rCL") GR16:$src1)>;
- def : Pat<(frag GR32:$src1, (and CL, immShift32)),
- (!cast<Instruction>(name # "32rCL") GR32:$src1)>;
- def : Pat<(store (frag (loadi8 addr:$dst), (and CL, immShift8)), addr:$dst),
- (!cast<Instruction>(name # "8mCL") addr:$dst)>;
- def : Pat<(store (frag (loadi16 addr:$dst), (and CL, immShift16)), addr:$dst),
- (!cast<Instruction>(name # "16mCL") addr:$dst)>;
- def : Pat<(store (frag (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
- (!cast<Instruction>(name # "32mCL") addr:$dst)>;
-
- // (rot x (and y, 63)) ==> (rot x, y)
- def : Pat<(frag GR64:$src1, (and CL, immShift64)),
- (!cast<Instruction>(name # "64rCL") GR64:$src1)>;
- def : Pat<(store (frag (loadi64 addr:$dst), (and CL, immShift64)), addr:$dst),
- (!cast<Instruction>(name # "64mCL") addr:$dst)>;
-}
-
-
-defm : MaskedRotateAmountPats<rotl, "ROL">;
-defm : MaskedRotateAmountPats<rotr, "ROR">;
-
-// Double shift amount is implicitly masked.
-multiclass MaskedDoubleShiftAmountPats<SDNode frag, string name> {
- // (shift x (and y, 31)) ==> (shift x, y)
- def : Pat<(frag GR16:$src1, GR16:$src2, (and CL, immShift32)),
- (!cast<Instruction>(name # "16rrCL") GR16:$src1, GR16:$src2)>;
- def : Pat<(frag GR32:$src1, GR32:$src2, (and CL, immShift32)),
- (!cast<Instruction>(name # "32rrCL") GR32:$src1, GR32:$src2)>;
-
- // (shift x (and y, 63)) ==> (shift x, y)
- def : Pat<(frag GR64:$src1, GR64:$src2, (and CL, immShift64)),
- (!cast<Instruction>(name # "64rrCL") GR64:$src1, GR64:$src2)>;
-}
-
-defm : MaskedDoubleShiftAmountPats<X86shld, "SHLD">;
-defm : MaskedDoubleShiftAmountPats<X86shrd, "SHRD">;
-
-let Predicates = [HasBMI2] in {
- let AddedComplexity = 1 in {
- def : Pat<(sra GR32:$src1, (and GR8:$src2, immShift32)),
- (SARX32rr GR32:$src1,
- (INSERT_SUBREG
- (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
- def : Pat<(sra GR64:$src1, (and GR8:$src2, immShift64)),
- (SARX64rr GR64:$src1,
- (INSERT_SUBREG
- (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
-
- def : Pat<(srl GR32:$src1, (and GR8:$src2, immShift32)),
- (SHRX32rr GR32:$src1,
- (INSERT_SUBREG
- (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
- def : Pat<(srl GR64:$src1, (and GR8:$src2, immShift64)),
- (SHRX64rr GR64:$src1,
- (INSERT_SUBREG
- (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
-
- def : Pat<(shl GR32:$src1, (and GR8:$src2, immShift32)),
- (SHLX32rr GR32:$src1,
- (INSERT_SUBREG
- (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
- def : Pat<(shl GR64:$src1, (and GR8:$src2, immShift64)),
- (SHLX64rr GR64:$src1,
- (INSERT_SUBREG
- (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
- }
-
- let AddedComplexity = -20 in {
- def : Pat<(sra (loadi32 addr:$src1), (and GR8:$src2, immShift32)),
- (SARX32rm addr:$src1,
- (INSERT_SUBREG
- (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
- def : Pat<(sra (loadi64 addr:$src1), (and GR8:$src2, immShift64)),
- (SARX64rm addr:$src1,
- (INSERT_SUBREG
- (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
-
- def : Pat<(srl (loadi32 addr:$src1), (and GR8:$src2, immShift32)),
- (SHRX32rm addr:$src1,
- (INSERT_SUBREG
- (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
- def : Pat<(srl (loadi64 addr:$src1), (and GR8:$src2, immShift64)),
- (SHRX64rm addr:$src1,
- (INSERT_SUBREG
- (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
-
- def : Pat<(shl (loadi32 addr:$src1), (and GR8:$src2, immShift32)),
- (SHLX32rm addr:$src1,
- (INSERT_SUBREG
- (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
- def : Pat<(shl (loadi64 addr:$src1), (and GR8:$src2, immShift64)),
- (SHLX64rm addr:$src1,
- (INSERT_SUBREG
- (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
- }
-}
+defm : MaskedShiftAmountPats<rotl, "ROL">;
+defm : MaskedShiftAmountPats<rotr, "ROR">;
// (anyext (setcc_carry)) -> (setcc_carry)
def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
@@ -1757,6 +1661,9 @@ def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
def : Pat<(i32 (anyext (i16 (X86setcc_c X86_COND_B, EFLAGS)))),
(SETB_C32r)>;
+
+
+
//===----------------------------------------------------------------------===//
// EFLAGS-defining Patterns
//===----------------------------------------------------------------------===//
@@ -1814,12 +1721,6 @@ def : Pat<(X86sub_flag 0, GR16:$src), (NEG16r GR16:$src)>;
def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>;
def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>;
-// sub reg, relocImm
-def : Pat<(X86sub_flag GR64:$src1, i64relocImmSExt8_su:$src2),
- (SUB64ri8 GR64:$src1, i64relocImmSExt8_su:$src2)>;
-def : Pat<(X86sub_flag GR64:$src1, i64relocImmSExt32_su:$src2),
- (SUB64ri32 GR64:$src1, i64relocImmSExt32_su:$src2)>;
-
// mul reg, reg
def : Pat<(mul GR16:$src1, GR16:$src2),
(IMUL16rr GR16:$src1, GR16:$src2)>;
@@ -1890,7 +1791,7 @@ def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
// Increment/Decrement reg.
// Do not make INC/DEC if it is slow
-let Predicates = [UseIncDec] in {
+let Predicates = [NotSlowIncDec] in {
def : Pat<(add GR8:$src, 1), (INC8r GR8:$src)>;
def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>;
def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>;