summaryrefslogtreecommitdiff
path: root/gnu
diff options
context:
space:
mode:
authorPascal Stumpf <pascal@cvs.openbsd.org>2016-09-03 22:47:03 +0000
committerPascal Stumpf <pascal@cvs.openbsd.org>2016-09-03 22:47:03 +0000
commitc596f2c8274a8ab05a1e5fe0a58385c8df2ee5af (patch)
tree6cef10c394eb179397f257ad93cabd305ad0ccb1 /gnu
parent75d5280649f1c48f0877d595fa40e60842a24cbe (diff)
Use the space freed up by sparc and zaurus to import LLVM.
ok hackroom@
Diffstat (limited to 'gnu')
-rw-r--r--gnu/llvm/include/llvm/CodeGen/CommandFlags.h194
-rw-r--r--gnu/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h229
-rw-r--r--gnu/llvm/include/llvm/CodeGen/LiveStackAnalysis.h23
-rw-r--r--gnu/llvm/include/llvm/DebugInfo/CodeView/TypeRecordBuilder.h25
-rw-r--r--gnu/llvm/include/llvm/DebugInfo/CodeView/TypeTableBuilder.h137
-rw-r--r--gnu/llvm/include/llvm/MC/MCTargetOptionsCommandFlags.h10
-rw-r--r--gnu/llvm/include/llvm/Support/GCOV.h111
-rw-r--r--gnu/llvm/include/llvm/Target/CostTable.h15
-rw-r--r--gnu/llvm/include/llvm/Target/TargetCallingConv.h190
-rw-r--r--gnu/llvm/include/llvm/Target/TargetFrameLowering.h78
-rw-r--r--gnu/llvm/include/llvm/Target/TargetInstrInfo.h641
-rw-r--r--gnu/llvm/include/llvm/Target/TargetLowering.h1248
-rw-r--r--gnu/llvm/include/llvm/Target/TargetLoweringObjectFile.h124
-rw-r--r--gnu/llvm/include/llvm/Target/TargetOpcodes.h127
-rw-r--r--gnu/llvm/include/llvm/Target/TargetRegisterInfo.h348
-rw-r--r--gnu/llvm/include/llvm/Target/TargetSubtargetInfo.h93
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/CmpInstAnalysis.h27
-rw-r--r--gnu/llvm/lib/Analysis/SparsePropagation.cpp6
-rw-r--r--gnu/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp825
-rw-r--r--gnu/llvm/lib/CodeGen/LiveStackAnalysis.cpp6
-rw-r--r--gnu/llvm/lib/Fuzzer/CMakeLists.txt90
-rw-r--r--gnu/llvm/lib/Fuzzer/FuzzerCrossOver.cpp5
-rw-r--r--gnu/llvm/lib/Fuzzer/FuzzerDriver.cpp613
-rw-r--r--gnu/llvm/lib/Fuzzer/FuzzerFlags.def113
-rw-r--r--gnu/llvm/lib/Fuzzer/FuzzerIO.cpp97
-rw-r--r--gnu/llvm/lib/Fuzzer/FuzzerInterface.h233
-rw-r--r--gnu/llvm/lib/Fuzzer/FuzzerInternal.h236
-rw-r--r--gnu/llvm/lib/Fuzzer/FuzzerLoop.cpp887
-rw-r--r--gnu/llvm/lib/Fuzzer/FuzzerMain.cpp9
-rw-r--r--gnu/llvm/lib/Fuzzer/FuzzerMutate.cpp543
-rw-r--r--gnu/llvm/lib/Fuzzer/FuzzerSHA1.cpp24
-rw-r--r--gnu/llvm/lib/Fuzzer/FuzzerUtil.cpp121
-rw-r--r--gnu/llvm/lib/Fuzzer/test/CMakeLists.txt295
-rw-r--r--gnu/llvm/lib/Fuzzer/test/CallerCalleeTest.cpp5
-rw-r--r--gnu/llvm/lib/Fuzzer/test/CounterTest.cpp3
-rw-r--r--gnu/llvm/lib/Fuzzer/test/FourIndependentBranchesTest.cpp6
-rw-r--r--gnu/llvm/lib/Fuzzer/test/FullCoverageSetTest.cpp5
-rw-r--r--gnu/llvm/lib/Fuzzer/test/FuzzerUnittest.cpp467
-rw-r--r--gnu/llvm/lib/Fuzzer/test/MemcmpTest.cpp7
-rw-r--r--gnu/llvm/lib/Fuzzer/test/NullDerefTest.cpp5
-rw-r--r--gnu/llvm/lib/Fuzzer/test/SimpleCmpTest.cpp48
-rw-r--r--gnu/llvm/lib/Fuzzer/test/SimpleDictionaryTest.cpp5
-rw-r--r--gnu/llvm/lib/Fuzzer/test/SimpleHashTest.cpp7
-rw-r--r--gnu/llvm/lib/Fuzzer/test/SimpleTest.cpp5
-rw-r--r--gnu/llvm/lib/Fuzzer/test/StrcmpTest.cpp13
-rw-r--r--gnu/llvm/lib/Fuzzer/test/StrncmpTest.cpp7
-rw-r--r--gnu/llvm/lib/Fuzzer/test/SwitchTest.cpp7
-rw-r--r--gnu/llvm/lib/Fuzzer/test/ThreadedTest.cpp5
-rw-r--r--gnu/llvm/lib/Fuzzer/test/TimeoutTest.cpp5
-rw-r--r--gnu/llvm/lib/Fuzzer/test/fuzzer-timeout.test10
-rw-r--r--gnu/llvm/lib/Fuzzer/test/fuzzer.test54
-rw-r--r--gnu/llvm/lib/Fuzzer/test/lit.cfg40
-rw-r--r--gnu/llvm/lib/Fuzzer/test/lit.site.cfg.in2
-rw-r--r--gnu/llvm/lib/Fuzzer/test/merge.test52
-rw-r--r--gnu/llvm/lib/Fuzzer/test/uninstrumented/CMakeLists.txt19
-rw-r--r--gnu/llvm/lib/IR/GCOV.cpp95
-rw-r--r--gnu/llvm/lib/Target/AMDGPU/CIInstructions.td320
-rw-r--r--gnu/llvm/lib/Target/AMDGPU/SIFixControlFlowLiveIntervals.cpp4
-rw-r--r--gnu/llvm/lib/Target/Hexagon/HexagonIntrinsicsDerived.td18
-rw-r--r--gnu/llvm/lib/Target/Mips/MicroMips64r6InstrFormats.td187
-rw-r--r--gnu/llvm/lib/Target/Mips/MicroMips64r6InstrInfo.td477
-rw-r--r--gnu/llvm/lib/Transforms/Utils/CmpInstAnalysis.cpp76
-rw-r--r--gnu/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h8
-rw-r--r--gnu/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h153
-rw-r--r--gnu/llvm/tools/clang/lib/Analysis/BodyFarm.h2
65 files changed, 3354 insertions, 6486 deletions
diff --git a/gnu/llvm/include/llvm/CodeGen/CommandFlags.h b/gnu/llvm/include/llvm/CodeGen/CommandFlags.h
index 0d898827efc..0d37dc00422 100644
--- a/gnu/llvm/include/llvm/CodeGen/CommandFlags.h
+++ b/gnu/llvm/include/llvm/CodeGen/CommandFlags.h
@@ -27,6 +27,7 @@
#include "llvm/Support/Host.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/Target/TargetRecip.h"
#include <string>
using namespace llvm;
@@ -45,28 +46,20 @@ MAttrs("mattr",
cl::desc("Target specific attributes (-mattr=help for details)"),
cl::value_desc("a1,+a2,-a3,..."));
-cl::opt<Reloc::Model> RelocModel(
- "relocation-model", cl::desc("Choose relocation model"),
- cl::values(
- clEnumValN(Reloc::Static, "static", "Non-relocatable code"),
- clEnumValN(Reloc::PIC_, "pic",
- "Fully relocatable, position independent code"),
- clEnumValN(Reloc::DynamicNoPIC, "dynamic-no-pic",
- "Relocatable external references, non-relocatable code"),
- clEnumValN(Reloc::ROPI, "ropi",
- "Code and read-only data relocatable, accessed PC-relative"),
- clEnumValN(Reloc::RWPI, "rwpi",
- "Read-write data relocatable, accessed relative to static base"),
- clEnumValN(Reloc::ROPI_RWPI, "ropi-rwpi",
- "Combination of ropi and rwpi")));
-
-static inline Optional<Reloc::Model> getRelocModel() {
- if (RelocModel.getNumOccurrences()) {
- Reloc::Model R = RelocModel;
- return R;
- }
- return None;
-}
+cl::opt<Reloc::Model>
+RelocModel("relocation-model",
+ cl::desc("Choose relocation model"),
+ cl::init(Reloc::Default),
+ cl::values(
+ clEnumValN(Reloc::Default, "default",
+ "Target default relocation model"),
+ clEnumValN(Reloc::Static, "static",
+ "Non-relocatable code"),
+ clEnumValN(Reloc::PIC_, "pic",
+ "Fully relocatable, position independent code"),
+ clEnumValN(Reloc::DynamicNoPIC, "dynamic-no-pic",
+ "Relocatable external references, non-relocatable code"),
+ clEnumValEnd));
cl::opt<ThreadModel::Model>
TMModel("thread-model",
@@ -75,7 +68,8 @@ TMModel("thread-model",
cl::values(clEnumValN(ThreadModel::POSIX, "posix",
"POSIX thread model"),
clEnumValN(ThreadModel::Single, "single",
- "Single thread model")));
+ "Single thread model"),
+ clEnumValEnd));
cl::opt<llvm::CodeModel::Model>
CMModel("code-model",
@@ -90,22 +84,8 @@ CMModel("code-model",
clEnumValN(CodeModel::Medium, "medium",
"Medium code model"),
clEnumValN(CodeModel::Large, "large",
- "Large code model")));
-
-cl::opt<llvm::ExceptionHandling>
-ExceptionModel("exception-model",
- cl::desc("exception model"),
- cl::init(ExceptionHandling::None),
- cl::values(clEnumValN(ExceptionHandling::None, "default",
- "default exception handling model"),
- clEnumValN(ExceptionHandling::DwarfCFI, "dwarf",
- "DWARF-like CFI based exception handling"),
- clEnumValN(ExceptionHandling::SjLj, "sjlj",
- "SjLj exception handling"),
- clEnumValN(ExceptionHandling::ARM, "arm",
- "ARM EHABI exceptions"),
- clEnumValN(ExceptionHandling::WinEH, "wineh",
- "Windows exception model")));
+ "Large code model"),
+ clEnumValEnd));
cl::opt<TargetMachine::CodeGenFileType>
FileType("filetype", cl::init(TargetMachine::CGFT_AssemblyFile),
@@ -116,7 +96,13 @@ FileType("filetype", cl::init(TargetMachine::CGFT_AssemblyFile),
clEnumValN(TargetMachine::CGFT_ObjectFile, "obj",
"Emit a native object ('.o') file"),
clEnumValN(TargetMachine::CGFT_Null, "null",
- "Emit nothing, for performance testing")));
+ "Emit nothing, for performance testing"),
+ clEnumValEnd));
+
+cl::opt<bool>
+EnableFPMAD("enable-fp-mad",
+ cl::desc("Enable less precise MAD instructions to be generated"),
+ cl::init(false));
cl::opt<bool>
DisableFPElim("disable-fp-elim",
@@ -139,31 +125,6 @@ EnableNoNaNsFPMath("enable-no-nans-fp-math",
cl::init(false));
cl::opt<bool>
-EnableNoSignedZerosFPMath("enable-no-signed-zeros-fp-math",
- cl::desc("Enable FP math optimizations that assume "
- "the sign of 0 is insignificant"),
- cl::init(false));
-
-cl::opt<bool>
-EnableNoTrappingFPMath("enable-no-trapping-fp-math",
- cl::desc("Enable setting the FP exceptions build "
- "attribute not to use exceptions"),
- cl::init(false));
-
-cl::opt<llvm::FPDenormal::DenormalMode>
-DenormalMode("denormal-fp-math",
- cl::desc("Select which denormal numbers the code is permitted to require"),
- cl::init(FPDenormal::IEEE),
- cl::values(
- clEnumValN(FPDenormal::IEEE, "ieee",
- "IEEE 754 denormal numbers"),
- clEnumValN(FPDenormal::PreserveSign, "preserve-sign",
- "the sign of a flushed-to-zero number is preserved "
- "in the sign of 0"),
- clEnumValN(FPDenormal::PositiveZero, "positive-zero",
- "denormals are flushed to positive zero")));
-
-cl::opt<bool>
EnableHonorSignDependentRoundingFPMath("enable-sign-dependent-rounding-fp-math",
cl::Hidden,
cl::desc("Force codegen to assume rounding mode can change dynamically"),
@@ -179,7 +140,8 @@ FloatABIForCalls("float-abi",
clEnumValN(FloatABI::Soft, "soft",
"Soft float ABI (implied by -soft-float)"),
clEnumValN(FloatABI::Hard, "hard",
- "Hard float ABI (uses FP registers)")));
+ "Hard float ABI (uses FP registers)"),
+ clEnumValEnd));
cl::opt<llvm::FPOpFusion::FPOpFusionMode>
FuseFPOps("fp-contract",
@@ -191,7 +153,14 @@ FuseFPOps("fp-contract",
clEnumValN(FPOpFusion::Standard, "on",
"Only fuse 'blessed' FP ops."),
clEnumValN(FPOpFusion::Strict, "off",
- "Only fuse FP ops when the result won't be affected.")));
+ "Only fuse FP ops when the result won't be affected."),
+ clEnumValEnd));
+
+cl::list<std::string>
+ReciprocalOps("recip",
+ cl::CommaSeparated,
+ cl::desc("Choose reciprocal operation types and parameters."),
+ cl::value_desc("all,none,default,divf,!vec-sqrtd,vec-divd:0,sqrt:9..."));
cl::opt<bool>
DontPlaceZerosInBSS("nozero-initialized-in-bss",
@@ -208,11 +177,6 @@ DisableTailCalls("disable-tail-calls",
cl::desc("Never emit tail calls"),
cl::init(false));
-cl::opt<bool>
-StackSymbolOrdering("stack-symbol-ordering",
- cl::desc("Order local stack symbols."),
- cl::init(true));
-
cl::opt<unsigned>
OverrideStackAlignment("stack-alignment",
cl::desc("Override default stack alignment"),
@@ -229,14 +193,27 @@ TrapFuncName("trap-func", cl::Hidden,
cl::init(""));
cl::opt<bool>
+EnablePIE("enable-pie",
+ cl::desc("Assume the creation of a position independent executable."),
+ cl::init(false));
+
+cl::opt<bool>
UseCtors("use-ctors",
cl::desc("Use .ctors instead of .init_array."),
cl::init(false));
-cl::opt<bool> RelaxELFRelocations(
- "relax-elf-relocations",
- cl::desc("Emit GOTPCRELX/REX_GOTPCRELX instead of GOTPCREL on x86-64 ELF"),
- cl::init(false));
+cl::opt<std::string> StopAfter("stop-after",
+ cl::desc("Stop compilation after a specific pass"),
+ cl::value_desc("pass-name"),
+ cl::init(""));
+cl::opt<std::string> StartAfter("start-after",
+ cl::desc("Resume compilation after a specific pass"),
+ cl::value_desc("pass-name"),
+ cl::init(""));
+
+cl::opt<std::string>
+ RunPass("run-pass", cl::desc("Run compiler only for one specific pass"),
+ cl::value_desc("pass-name"), cl::init(""));
cl::opt<bool> DataSections("data-sections",
cl::desc("Emit data into separate sections"),
@@ -255,6 +232,21 @@ cl::opt<bool> UniqueSectionNames("unique-section-names",
cl::desc("Give unique names to every section"),
cl::init(true));
+cl::opt<llvm::JumpTable::JumpTableType>
+JTableType("jump-table-type",
+ cl::desc("Choose the type of Jump-Instruction Table for jumptable."),
+ cl::init(JumpTable::Single),
+ cl::values(
+ clEnumValN(JumpTable::Single, "single",
+ "Create a single table for all jumptable functions"),
+ clEnumValN(JumpTable::Arity, "arity",
+ "Create one table per number of parameters."),
+ clEnumValN(JumpTable::Simplified, "simplified",
+ "Create one table per simplified function type."),
+ clEnumValN(JumpTable::Full, "full",
+ "Create one table per unique function type."),
+ clEnumValEnd));
+
cl::opt<llvm::EABI> EABIVersion(
"meabi", cl::desc("Set EABI type (default depends on triple):"),
cl::init(EABI::Default),
@@ -262,7 +254,7 @@ cl::opt<llvm::EABI> EABIVersion(
"Triple default EABI version"),
clEnumValN(EABI::EABI4, "4", "EABI version 4"),
clEnumValN(EABI::EABI5, "5", "EABI version 5"),
- clEnumValN(EABI::GNU, "gnu", "EABI GNU")));
+ clEnumValN(EABI::GNU, "gnu", "EABI GNU"), clEnumValEnd));
cl::opt<DebuggerKind>
DebuggerTuningOpt("debugger-tune",
@@ -272,19 +264,19 @@ DebuggerTuningOpt("debugger-tune",
clEnumValN(DebuggerKind::GDB, "gdb", "gdb"),
clEnumValN(DebuggerKind::LLDB, "lldb", "lldb"),
clEnumValN(DebuggerKind::SCE, "sce",
- "SCE targets (e.g. PS4)")));
+ "SCE targets (e.g. PS4)"),
+ clEnumValEnd));
// Common utility function tightly tied to the options listed here. Initializes
// a TargetOptions object with CodeGen flags and returns it.
static inline TargetOptions InitTargetOptionsFromCodeGenFlags() {
TargetOptions Options;
+ Options.LessPreciseFPMADOption = EnableFPMAD;
Options.AllowFPOpFusion = FuseFPOps;
+ Options.Reciprocals = TargetRecip(ReciprocalOps);
Options.UnsafeFPMath = EnableUnsafeFPMath;
Options.NoInfsFPMath = EnableNoInfsFPMath;
Options.NoNaNsFPMath = EnableNoNaNsFPMath;
- Options.NoSignedZerosFPMath = EnableNoSignedZerosFPMath;
- Options.NoTrappingFPMath = EnableNoTrappingFPMath;
- Options.FPDenormalMode = DenormalMode;
Options.HonorSignDependentRoundingFPMathOption =
EnableHonorSignDependentRoundingFPMath;
if (FloatABIForCalls != FloatABI::Default)
@@ -292,16 +284,15 @@ static inline TargetOptions InitTargetOptionsFromCodeGenFlags() {
Options.NoZerosInBSS = DontPlaceZerosInBSS;
Options.GuaranteedTailCallOpt = EnableGuaranteedTailCallOpt;
Options.StackAlignmentOverride = OverrideStackAlignment;
- Options.StackSymbolOrdering = StackSymbolOrdering;
+ Options.PositionIndependentExecutable = EnablePIE;
Options.UseInitArray = !UseCtors;
- Options.RelaxELFRelocations = RelaxELFRelocations;
Options.DataSections = DataSections;
Options.FunctionSections = FunctionSections;
Options.UniqueSectionNames = UniqueSectionNames;
Options.EmulatedTLS = EmulatedTLS;
- Options.ExceptionModel = ExceptionModel;
Options.MCOptions = InitMCTargetOptionsFromFlags();
+ Options.JTType = JTableType;
Options.ThreadModel = TMModel;
Options.EABIVersion = EABIVersion;
@@ -346,21 +337,29 @@ static inline void setFunctionAttributes(StringRef CPU, StringRef Features,
Module &M) {
for (auto &F : M) {
auto &Ctx = F.getContext();
- AttributeList Attrs = F.getAttributes();
- AttrBuilder NewAttrs;
+ AttributeSet Attrs = F.getAttributes(), NewAttrs;
if (!CPU.empty())
- NewAttrs.addAttribute("target-cpu", CPU);
+ NewAttrs = NewAttrs.addAttribute(Ctx, AttributeSet::FunctionIndex,
+ "target-cpu", CPU);
+
if (!Features.empty())
- NewAttrs.addAttribute("target-features", Features);
+ NewAttrs = NewAttrs.addAttribute(Ctx, AttributeSet::FunctionIndex,
+ "target-features", Features);
+
if (DisableFPElim.getNumOccurrences() > 0)
- NewAttrs.addAttribute("no-frame-pointer-elim",
- DisableFPElim ? "true" : "false");
+ NewAttrs = NewAttrs.addAttribute(Ctx, AttributeSet::FunctionIndex,
+ "no-frame-pointer-elim",
+ DisableFPElim ? "true" : "false");
+
if (DisableTailCalls.getNumOccurrences() > 0)
- NewAttrs.addAttribute("disable-tail-calls",
- toStringRef(DisableTailCalls));
+ NewAttrs = NewAttrs.addAttribute(Ctx, AttributeSet::FunctionIndex,
+ "disable-tail-calls",
+ toStringRef(DisableTailCalls));
+
if (StackRealign)
- NewAttrs.addAttribute("stackrealign");
+ NewAttrs = NewAttrs.addAttribute(Ctx, AttributeSet::FunctionIndex,
+ "stackrealign");
if (TrapFuncName.getNumOccurrences() > 0)
for (auto &B : F)
@@ -369,13 +368,12 @@ static inline void setFunctionAttributes(StringRef CPU, StringRef Features,
if (const auto *F = Call->getCalledFunction())
if (F->getIntrinsicID() == Intrinsic::debugtrap ||
F->getIntrinsicID() == Intrinsic::trap)
- Call->addAttribute(
- llvm::AttributeList::FunctionIndex,
- Attribute::get(Ctx, "trap-func-name", TrapFuncName));
+ Call->addAttribute(llvm::AttributeSet::FunctionIndex,
+ "trap-func-name", TrapFuncName);
// Let NewAttrs override Attrs.
- F.setAttributes(
- Attrs.addAttributes(Ctx, AttributeList::FunctionIndex, NewAttrs));
+ NewAttrs = Attrs.addAttributes(Ctx, AttributeSet::FunctionIndex, NewAttrs);
+ F.setAttributes(NewAttrs);
}
}
diff --git a/gnu/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h b/gnu/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h
index 820e8836248..87421e2f83b 100644
--- a/gnu/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h
+++ b/gnu/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h
@@ -1,4 +1,4 @@
-//===- LiveIntervalAnalysis.h - Live Interval Analysis ----------*- C++ -*-===//
+//===-- LiveIntervalAnalysis.h - Live Interval Analysis ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,20 +7,19 @@
//
//===----------------------------------------------------------------------===//
//
-/// \file This file implements the LiveInterval analysis pass. Given some
-/// numbering of each the machine instructions (in this implemention depth-first
-/// order) an interval [i, j) is said to be a live interval for register v if
-/// there is no instruction with number j' > j such that v is live at j' and
-/// there is no instruction with number i' < i such that v is live at i'. In
-/// this implementation intervals can have holes, i.e. an interval might look
-/// like [1,20), [50,65), [1000,1001).
+// This file implements the LiveInterval analysis pass. Given some numbering of
+// each the machine instructions (in this implemention depth-first order) an
+// interval [i, j) is said to be a live interval for register v if there is no
+// instruction with number j' > j such that v is live at j' and there is no
+// instruction with number i' < i such that v is live at i'. In this
+// implementation intervals can have holes, i.e. an interval might look like
+// [1,20), [50,65), [1000,1001).
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_LIVEINTERVALANALYSIS_H
#define LLVM_CODEGEN_LIVEINTERVALANALYSIS_H
-#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
@@ -28,29 +27,28 @@
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/SlotIndexes.h"
-#include "llvm/MC/LaneBitmask.h"
+#include "llvm/Support/Allocator.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include <cassert>
-#include <cstdint>
-#include <utility>
+#include <cmath>
+#include <iterator>
namespace llvm {
extern cl::opt<bool> UseSegmentSetForPhysRegs;
-class BitVector;
-class LiveRangeCalc;
-class MachineBlockFrequencyInfo;
-class MachineDominatorTree;
-class MachineFunction;
-class MachineInstr;
-class MachineRegisterInfo;
-class raw_ostream;
-class TargetInstrInfo;
-class VirtRegMap;
+ class BitVector;
+ class BlockFrequency;
+ class LiveRangeCalc;
+ class LiveVariables;
+ class MachineDominatorTree;
+ class MachineLoopInfo;
+ class TargetRegisterInfo;
+ class MachineRegisterInfo;
+ class TargetInstrInfo;
+ class TargetRegisterClass;
+ class VirtRegMap;
+ class MachineBlockFrequencyInfo;
class LiveIntervals : public MachineFunctionPass {
MachineFunction* MF;
@@ -59,21 +57,24 @@ class VirtRegMap;
const TargetInstrInfo* TII;
AliasAnalysis *AA;
SlotIndexes* Indexes;
- MachineDominatorTree *DomTree = nullptr;
- LiveRangeCalc *LRCalc = nullptr;
+ MachineDominatorTree *DomTree;
+ LiveRangeCalc *LRCalc;
/// Special pool allocator for VNInfo's (LiveInterval val#).
+ ///
VNInfo::Allocator VNInfoAllocator;
/// Live interval pointers for all the virtual registers.
IndexedMap<LiveInterval*, VirtReg2IndexFunctor> VirtRegIntervals;
- /// Sorted list of instructions with register mask operands. Always use the
- /// 'r' slot, RegMasks are normal clobbers, not early clobbers.
+ /// RegMaskSlots - Sorted list of instructions with register mask operands.
+ /// Always use the 'r' slot, RegMasks are normal clobbers, not early
+ /// clobbers.
SmallVector<SlotIndex, 8> RegMaskSlots;
- /// This vector is parallel to RegMaskSlots, it holds a pointer to the
- /// corresponding register mask. This pointer can be recomputed as:
+ /// RegMaskBits - This vector is parallel to RegMaskSlots, it holds a
+ /// pointer to the corresponding register mask. This pointer can be
+ /// recomputed as:
///
/// MI = Indexes->getInstructionFromIndex(RegMaskSlot[N]);
/// unsigned OpNum = findRegMaskOperand(MI);
@@ -97,15 +98,14 @@ class VirtRegMap;
SmallVector<LiveRange*, 0> RegUnitRanges;
public:
- static char ID;
-
+ static char ID; // Pass identification, replacement for typeid
LiveIntervals();
~LiveIntervals() override;
- /// Calculate the spill weight to assign to a single instruction.
+ // Calculate the spill weight to assign to a single instruction.
static float getSpillWeight(bool isDef, bool isUse,
const MachineBlockFrequencyInfo *MBFI,
- const MachineInstr &Instr);
+ const MachineInstr *Instr);
LiveInterval &getInterval(unsigned Reg) {
if (hasInterval(Reg))
@@ -122,7 +122,7 @@ class VirtRegMap;
return VirtRegIntervals.inBounds(Reg) && VirtRegIntervals[Reg];
}
- /// Interval creation.
+ // Interval creation.
LiveInterval &createEmptyInterval(unsigned Reg) {
assert(!hasInterval(Reg) && "Interval already exists!");
VirtRegIntervals.grow(Reg);
@@ -136,7 +136,7 @@ class VirtRegMap;
return LI;
}
- /// Interval removal.
+ // Interval removal.
void removeInterval(unsigned Reg) {
delete VirtRegIntervals[Reg];
VirtRegIntervals[Reg] = nullptr;
@@ -145,7 +145,7 @@ class VirtRegMap;
/// Given a register and an instruction, adds a live segment from that
/// instruction to the end of its MBB.
LiveInterval::Segment addSegmentToEndOfBlock(unsigned reg,
- MachineInstr &startInst);
+ MachineInstr* startInst);
/// After removing some uses of a register, shrink its live range to just
/// the remaining uses. This method does not compute reaching defs for new
@@ -164,26 +164,18 @@ class VirtRegMap;
/// LiveInterval::removeEmptySubranges() afterwards.
void shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg);
- /// Extend the live range \p LR to reach all points in \p Indices. The
- /// points in the \p Indices array must be jointly dominated by the union
- /// of the existing defs in \p LR and points in \p Undefs.
- ///
- /// PHI-defs are added as needed to maintain SSA form.
+ /// extendToIndices - Extend the live range of LI to reach all points in
+ /// Indices. The points in the Indices array must be jointly dominated by
+ /// existing defs in LI. PHI-defs are added as needed to maintain SSA form.
///
- /// If a SlotIndex in \p Indices is the end index of a basic block, \p LR
- /// will be extended to be live out of the basic block.
- /// If a SlotIndex in \p Indices is jointy dominated only by points in
- /// \p Undefs, the live range will not be extended to that point.
+ /// If a SlotIndex in Indices is the end index of a basic block, LI will be
+ /// extended to be live out of the basic block.
///
/// See also LiveRangeCalc::extend().
- void extendToIndices(LiveRange &LR, ArrayRef<SlotIndex> Indices,
- ArrayRef<SlotIndex> Undefs);
+ void extendToIndices(LiveRange &LR, ArrayRef<SlotIndex> Indices);
- void extendToIndices(LiveRange &LR, ArrayRef<SlotIndex> Indices) {
- extendToIndices(LR, Indices, /*Undefs=*/{});
- }
- /// If \p LR has a live value at \p Kill, prune its live range by removing
+ /// If @p LR has a live value at @p Kill, prune its live range by removing
/// any liveness reachable from Kill. Add live range end points to
/// EndPoints such that extendToIndices(LI, EndPoints) will reconstruct the
/// value's live range.
@@ -193,16 +185,6 @@ class VirtRegMap;
void pruneValue(LiveRange &LR, SlotIndex Kill,
SmallVectorImpl<SlotIndex> *EndPoints);
- /// This function should not be used. Its intend is to tell you that
- /// you are doing something wrong if you call pruveValue directly on a
- /// LiveInterval. Indeed, you are supposed to call pruneValue on the main
- /// LiveRange and all the LiveRange of the subranges if any.
- LLVM_ATTRIBUTE_UNUSED void pruneValue(LiveInterval &, SlotIndex,
- SmallVectorImpl<SlotIndex> *) {
- llvm_unreachable(
- "Use pruneValue on the main LiveRange and on each subrange");
- }
-
SlotIndexes *getSlotIndexes() const {
return Indexes;
}
@@ -211,15 +193,15 @@ class VirtRegMap;
return AA;
}
- /// Returns true if the specified machine instr has been removed or was
- /// never entered in the map.
- bool isNotInMIMap(const MachineInstr &Instr) const {
+ /// isNotInMIMap - returns true if the specified machine instr has been
+ /// removed or was never entered in the map.
+ bool isNotInMIMap(const MachineInstr* Instr) const {
return !Indexes->hasIndex(Instr);
}
/// Returns the base index of the given instruction.
- SlotIndex getInstructionIndex(const MachineInstr &Instr) const {
- return Indexes->getInstructionIndex(Instr);
+ SlotIndex getInstructionIndex(const MachineInstr *instr) const {
+ return Indexes->getInstructionIndex(instr);
}
/// Returns the instruction associated with the given index.
@@ -258,22 +240,22 @@ class VirtRegMap;
RegMaskBlocks.push_back(std::make_pair(RegMaskSlots.size(), 0));
}
- SlotIndex InsertMachineInstrInMaps(MachineInstr &MI) {
+ SlotIndex InsertMachineInstrInMaps(MachineInstr *MI) {
return Indexes->insertMachineInstrInMaps(MI);
}
void InsertMachineInstrRangeInMaps(MachineBasicBlock::iterator B,
MachineBasicBlock::iterator E) {
for (MachineBasicBlock::iterator I = B; I != E; ++I)
- Indexes->insertMachineInstrInMaps(*I);
+ Indexes->insertMachineInstrInMaps(I);
}
- void RemoveMachineInstrFromMaps(MachineInstr &MI) {
+ void RemoveMachineInstrFromMaps(MachineInstr *MI) {
Indexes->removeMachineInstrFromMaps(MI);
}
- SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI) {
- return Indexes->replaceMachineInstrInMaps(MI, NewMI);
+ void ReplaceMachineInstrInMaps(MachineInstr *MI, MachineInstr *NewMI) {
+ Indexes->replaceMachineInstrInMaps(MI, NewMI);
}
VNInfo::Allocator& getVNInfoAllocator() { return VNInfoAllocator; }
@@ -281,44 +263,48 @@ class VirtRegMap;
void getAnalysisUsage(AnalysisUsage &AU) const override;
void releaseMemory() override;
- /// Pass entry point; Calculates LiveIntervals.
+ /// runOnMachineFunction - pass entry point
bool runOnMachineFunction(MachineFunction&) override;
- /// Implement the dump method.
+ /// print - Implement the dump method.
void print(raw_ostream &O, const Module* = nullptr) const override;
- /// If LI is confined to a single basic block, return a pointer to that
- /// block. If LI is live in to or out of any block, return NULL.
+ /// intervalIsInOneMBB - If LI is confined to a single basic block, return
+ /// a pointer to that block. If LI is live in to or out of any block,
+ /// return NULL.
MachineBasicBlock *intervalIsInOneMBB(const LiveInterval &LI) const;
/// Returns true if VNI is killed by any PHI-def values in LI.
/// This may conservatively return true to avoid expensive computations.
bool hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const;
- /// Add kill flags to any instruction that kills a virtual register.
+ /// addKillFlags - Add kill flags to any instruction that kills a virtual
+ /// register.
void addKillFlags(const VirtRegMap*);
- /// Call this method to notify LiveIntervals that instruction \p MI has been
- /// moved within a basic block. This will update the live intervals for all
- /// operands of \p MI. Moves between basic blocks are not supported.
+ /// handleMove - call this method to notify LiveIntervals that
+ /// instruction 'mi' has been moved within a basic block. This will update
+ /// the live intervals for all operands of mi. Moves between basic blocks
+ /// are not supported.
///
/// \param UpdateFlags Update live intervals for nonallocatable physregs.
- void handleMove(MachineInstr &MI, bool UpdateFlags = false);
+ void handleMove(MachineInstr* MI, bool UpdateFlags = false);
- /// Update intervals for operands of \p MI so that they begin/end on the
- /// SlotIndex for \p BundleStart.
+ /// moveIntoBundle - Update intervals for operands of MI so that they
+ /// begin/end on the SlotIndex for BundleStart.
///
/// \param UpdateFlags Update live intervals for nonallocatable physregs.
///
/// Requires MI and BundleStart to have SlotIndexes, and assumes
/// existing liveness is accurate. BundleStart should be the first
/// instruction in the Bundle.
- void handleMoveIntoBundle(MachineInstr &MI, MachineInstr &BundleStart,
+ void handleMoveIntoBundle(MachineInstr* MI, MachineInstr* BundleStart,
bool UpdateFlags = false);
- /// Update live intervals for instructions in a range of iterators. It is
- /// intended for use after target hooks that may insert or remove
- /// instructions, and is only efficient for a small number of instructions.
+ /// repairIntervalsInRange - Update live intervals for instructions in a
+ /// range of iterators. It is intended for use after target hooks that may
+ /// insert or remove instructions, and is only efficient for a small number
+ /// of instructions.
///
/// OrigRegs is a vector of registers that were originally used by the
/// instructions in the range between the two iterators.
@@ -341,33 +327,34 @@ class VirtRegMap;
// LiveIntervalAnalysis maintains a sorted list of instructions with
// register mask operands.
- /// Returns a sorted array of slot indices of all instructions with
- /// register mask operands.
+ /// getRegMaskSlots - Returns a sorted array of slot indices of all
+ /// instructions with register mask operands.
ArrayRef<SlotIndex> getRegMaskSlots() const { return RegMaskSlots; }
- /// Returns a sorted array of slot indices of all instructions with register
- /// mask operands in the basic block numbered \p MBBNum.
+ /// getRegMaskSlotsInBlock - Returns a sorted array of slot indices of all
+ /// instructions with register mask operands in the basic block numbered
+ /// MBBNum.
ArrayRef<SlotIndex> getRegMaskSlotsInBlock(unsigned MBBNum) const {
std::pair<unsigned, unsigned> P = RegMaskBlocks[MBBNum];
return getRegMaskSlots().slice(P.first, P.second);
}
- /// Returns an array of register mask pointers corresponding to
- /// getRegMaskSlots().
+ /// getRegMaskBits() - Returns an array of register mask pointers
+ /// corresponding to getRegMaskSlots().
ArrayRef<const uint32_t*> getRegMaskBits() const { return RegMaskBits; }
- /// Returns an array of mask pointers corresponding to
- /// getRegMaskSlotsInBlock(MBBNum).
+ /// getRegMaskBitsInBlock - Returns an array of mask pointers corresponding
+ /// to getRegMaskSlotsInBlock(MBBNum).
ArrayRef<const uint32_t*> getRegMaskBitsInBlock(unsigned MBBNum) const {
std::pair<unsigned, unsigned> P = RegMaskBlocks[MBBNum];
return getRegMaskBits().slice(P.first, P.second);
}
- /// Test if \p LI is live across any register mask instructions, and
- /// compute a bit mask of physical registers that are not clobbered by any
- /// of them.
+ /// checkRegMaskInterference - Test if LI is live across any register mask
+ /// instructions, and compute a bit mask of physical registers that are not
+ /// clobbered by any of them.
///
- /// Returns false if \p LI doesn't cross any register mask instructions. In
+ /// Returns false if LI doesn't cross any register mask instructions. In
/// that case, the bit vector is not filled in.
bool checkRegMaskInterference(LiveInterval &LI,
BitVector &UsableRegs);
@@ -383,8 +370,8 @@ class VirtRegMap;
// track liveness per register unit to handle aliasing registers more
// efficiently.
- /// Return the live range for register unit \p Unit. It will be computed if
- /// it doesn't exist.
+ /// getRegUnit - Return the live range for Unit.
+ /// It will be computed if it doesn't exist.
LiveRange &getRegUnit(unsigned Unit) {
LiveRange *LR = RegUnitRanges[Unit];
if (!LR) {
@@ -396,8 +383,8 @@ class VirtRegMap;
return *LR;
}
- /// Return the live range for register unit \p Unit if it has already been
- /// computed, or nullptr if it hasn't been computed yet.
+ /// getCachedRegUnit - Return the live range for Unit if it has already
+ /// been computed, or NULL if it hasn't been computed yet.
LiveRange *getCachedRegUnit(unsigned Unit) {
return RegUnitRanges[Unit];
}
@@ -406,31 +393,19 @@ class VirtRegMap;
return RegUnitRanges[Unit];
}
- /// Remove computed live range for register unit \p Unit. Subsequent uses
- /// should rely on on-demand recomputation.
- void removeRegUnit(unsigned Unit) {
- delete RegUnitRanges[Unit];
- RegUnitRanges[Unit] = nullptr;
- }
-
/// Remove value numbers and related live segments starting at position
- /// \p Pos that are part of any liverange of physical register \p Reg or one
+ /// @p Pos that are part of any liverange of physical register @p Reg or one
/// of its subregisters.
void removePhysRegDefAt(unsigned Reg, SlotIndex Pos);
- /// Remove value number and related live segments of \p LI and its subranges
- /// that start at position \p Pos.
+ /// Remove value number and related live segments of @p LI and its subranges
+ /// that start at position @p Pos.
void removeVRegDefAt(LiveInterval &LI, SlotIndex Pos);
/// Split separate components in LiveInterval \p LI into separate intervals.
void splitSeparateComponents(LiveInterval &LI,
SmallVectorImpl<LiveInterval*> &SplitLIs);
- /// For live interval \p LI with correct SubRanges construct matching
- /// information for the main live range. Expects the main live range to not
- /// have any segments or value numbers.
- void constructMainRangeFromSubranges(LiveInterval &LI);
-
private:
/// Compute live intervals for all virtual registers.
void computeVirtRegs();
@@ -438,10 +413,10 @@ class VirtRegMap;
/// Compute RegMaskSlots and RegMaskBits.
void computeRegMasks();
- /// Walk the values in \p LI and check for dead values:
+ /// Walk the values in @p LI and check for dead values:
/// - Dead PHIDef values are marked as unused.
/// - Dead operands are marked as such.
- /// - Completely dead machine instructions are added to the \p dead vector
+ /// - Completely dead machine instructions are added to the @p dead vector
/// if it is not nullptr.
/// Returns true if any PHI value numbers have been removed which may
/// have separated the interval into multiple connected components.
@@ -459,18 +434,16 @@ class VirtRegMap;
/// Helper function for repairIntervalsInRange(), walks backwards and
- /// creates/modifies live segments in \p LR to match the operands found.
- /// Only full operands or operands with subregisters matching \p LaneMask
+ /// creates/modifies live segments in @p LR to match the operands found.
+ /// Only full operands or operands with subregisters matching @p LaneMask
/// are considered.
void repairOldRegInRange(MachineBasicBlock::iterator Begin,
MachineBasicBlock::iterator End,
const SlotIndex endIdx, LiveRange &LR,
- unsigned Reg,
- LaneBitmask LaneMask = LaneBitmask::getAll());
+ unsigned Reg, LaneBitmask LaneMask = ~0u);
class HMEditor;
};
+} // End llvm namespace
-} // end namespace llvm
-
-#endif // LLVM_CODEGEN_LIVEINTERVALANALYSIS_H
+#endif
diff --git a/gnu/llvm/include/llvm/CodeGen/LiveStackAnalysis.h b/gnu/llvm/include/llvm/CodeGen/LiveStackAnalysis.h
index c90ae7b184f..3ffbe3d775b 100644
--- a/gnu/llvm/include/llvm/CodeGen/LiveStackAnalysis.h
+++ b/gnu/llvm/include/llvm/CodeGen/LiveStackAnalysis.h
@@ -1,4 +1,4 @@
-//===- LiveStackAnalysis.h - Live Stack Slot Analysis -----------*- C++ -*-===//
+//===-- LiveStackAnalysis.h - Live Stack Slot Analysis ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -18,16 +18,13 @@
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/Pass.h"
-#include <cassert>
+#include "llvm/Support/Allocator.h"
+#include "llvm/Target/TargetRegisterInfo.h"
#include <map>
#include <unordered_map>
namespace llvm {
-class TargetRegisterClass;
-class TargetRegisterInfo;
-
class LiveStacks : public MachineFunctionPass {
const TargetRegisterInfo *TRI;
@@ -36,7 +33,8 @@ class LiveStacks : public MachineFunctionPass {
VNInfo::Allocator VNInfoAllocator;
/// S2IMap - Stack slot indices to live interval mapping.
- using SS2IntervalMap = std::unordered_map<int, LiveInterval>;
+ ///
+ typedef std::unordered_map<int, LiveInterval> SS2IntervalMap;
SS2IntervalMap S2IMap;
/// S2RCMap - Stack slot indices to register class mapping.
@@ -44,14 +42,12 @@ class LiveStacks : public MachineFunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
-
LiveStacks() : MachineFunctionPass(ID) {
initializeLiveStacksPass(*PassRegistry::getPassRegistry());
}
- using iterator = SS2IntervalMap::iterator;
- using const_iterator = SS2IntervalMap::const_iterator;
-
+ typedef SS2IntervalMap::iterator iterator;
+ typedef SS2IntervalMap::const_iterator const_iterator;
const_iterator begin() const { return S2IMap.begin(); }
const_iterator end() const { return S2IMap.end(); }
iterator begin() { return S2IMap.begin(); }
@@ -97,7 +93,6 @@ public:
/// print - Implement the dump method.
void print(raw_ostream &O, const Module * = nullptr) const override;
};
+}
-} // end namespace llvm
-
-#endif // LLVM_CODEGEN_LIVESTACK_ANALYSIS_H
+#endif /* LLVM_CODEGEN_LIVESTACK_ANALYSIS_H */
diff --git a/gnu/llvm/include/llvm/DebugInfo/CodeView/TypeRecordBuilder.h b/gnu/llvm/include/llvm/DebugInfo/CodeView/TypeRecordBuilder.h
index 5a6507ee7f5..1f48cf70666 100644
--- a/gnu/llvm/include/llvm/DebugInfo/CodeView/TypeRecordBuilder.h
+++ b/gnu/llvm/include/llvm/DebugInfo/CodeView/TypeRecordBuilder.h
@@ -10,10 +10,9 @@
#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPERECORDBUILDER_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPERECORDBUILDER_H
-#include "llvm/ADT/SmallVector.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
-#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/EndianStream.h"
#include "llvm/Support/raw_ostream.h"
@@ -40,34 +39,14 @@ public:
void writeEncodedInteger(int64_t Value);
void writeEncodedSignedInteger(int64_t Value);
void writeEncodedUnsignedInteger(uint64_t Value);
+ void writeNullTerminatedString(const char *Value);
void writeNullTerminatedString(StringRef Value);
- void writeGuid(StringRef Guid);
- void writeBytes(StringRef Value) { Stream << Value; }
llvm::StringRef str();
uint64_t size() const { return Stream.tell(); }
- TypeRecordKind kind() const { return Kind; }
-
- /// Returns the number of bytes remaining before this record is larger than
- /// the maximum record length. Accounts for the extra two byte size field in
- /// the header.
- size_t maxBytesRemaining() const { return MaxRecordLength - size() - 2; }
-
- void truncate(uint64_t Size) {
- // This works because raw_svector_ostream is not buffered.
- assert(Size < Buffer.size());
- Buffer.resize(Size);
- }
-
- void reset(TypeRecordKind K) {
- Buffer.clear();
- Kind = K;
- writeTypeRecordKind(K);
- }
private:
- TypeRecordKind Kind;
llvm::SmallVector<char, 256> Buffer;
llvm::raw_svector_ostream Stream;
llvm::support::endian::Writer<llvm::support::endianness::little> Writer;
diff --git a/gnu/llvm/include/llvm/DebugInfo/CodeView/TypeTableBuilder.h b/gnu/llvm/include/llvm/DebugInfo/CodeView/TypeTableBuilder.h
index 1069dcd4533..2c950e8af79 100644
--- a/gnu/llvm/include/llvm/DebugInfo/CodeView/TypeTableBuilder.h
+++ b/gnu/llvm/include/llvm/DebugInfo/CodeView/TypeTableBuilder.h
@@ -10,128 +10,51 @@
#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPETABLEBUILDER_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPETABLEBUILDER_H
-#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
-#include "llvm/DebugInfo/CodeView/TypeSerializer.h"
-#include "llvm/Support/Allocator.h"
-#include "llvm/Support/Error.h"
-#include <algorithm>
-#include <cassert>
-#include <cstdint>
-#include <type_traits>
+#include "llvm/Support/Compiler.h"
namespace llvm {
namespace codeview {
+class FieldListRecordBuilder;
+class MethodListRecordBuilder;
+class TypeRecordBuilder;
+
class TypeTableBuilder {
private:
- TypeIndex handleError(Error EC) const {
- assert(false && "Couldn't write Type!");
- consumeError(std::move(EC));
- return TypeIndex();
- }
-
- BumpPtrAllocator &Allocator;
- TypeSerializer Serializer;
-
-public:
- explicit TypeTableBuilder(BumpPtrAllocator &Allocator,
- bool WriteUnique = true)
- : Allocator(Allocator), Serializer(Allocator, WriteUnique) {}
TypeTableBuilder(const TypeTableBuilder &) = delete;
TypeTableBuilder &operator=(const TypeTableBuilder &) = delete;
- bool empty() const { return Serializer.records().empty(); }
-
- BumpPtrAllocator &getAllocator() const { return Allocator; }
-
- template <typename T> TypeIndex writeKnownType(T &Record) {
- static_assert(!std::is_same<T, FieldListRecord>::value,
- "Can't serialize FieldList!");
-
- CVType Type;
- Type.Type = static_cast<TypeLeafKind>(Record.getKind());
- if (auto EC = Serializer.visitTypeBegin(Type))
- return handleError(std::move(EC));
- if (auto EC = Serializer.visitKnownRecord(Type, Record))
- return handleError(std::move(EC));
-
- auto ExpectedIndex = Serializer.visitTypeEndGetIndex(Type);
- if (!ExpectedIndex)
- return handleError(ExpectedIndex.takeError());
-
- return *ExpectedIndex;
- }
-
- TypeIndex writeSerializedRecord(ArrayRef<uint8_t> Record) {
- return Serializer.insertRecordBytes(Record);
- }
-
- TypeIndex writeSerializedRecord(const RemappedType &Record) {
- return Serializer.insertRecord(Record);
- }
-
- template <typename TFunc> void ForEachRecord(TFunc Func) {
- uint32_t Index = TypeIndex::FirstNonSimpleIndex;
-
- for (auto Record : Serializer.records()) {
- Func(TypeIndex(Index), Record);
- ++Index;
- }
- }
-
- ArrayRef<ArrayRef<uint8_t>> records() const { return Serializer.records(); }
-};
-
-class FieldListRecordBuilder {
- TypeTableBuilder &TypeTable;
- BumpPtrAllocator Allocator;
- TypeSerializer TempSerializer;
- CVType Type;
+protected:
+ TypeTableBuilder();
public:
- explicit FieldListRecordBuilder(TypeTableBuilder &TypeTable)
- : TypeTable(TypeTable), TempSerializer(Allocator, false) {
- Type.Type = TypeLeafKind::LF_FIELDLIST;
- }
-
- void begin() {
- TempSerializer.reset();
-
- if (auto EC = TempSerializer.visitTypeBegin(Type))
- consumeError(std::move(EC));
- }
+ virtual ~TypeTableBuilder();
- template <typename T> void writeMemberType(T &Record) {
- CVMemberRecord CVMR;
- CVMR.Kind = static_cast<TypeLeafKind>(Record.getKind());
- if (auto EC = TempSerializer.visitMemberBegin(CVMR))
- consumeError(std::move(EC));
- if (auto EC = TempSerializer.visitKnownMember(CVMR, Record))
- consumeError(std::move(EC));
- if (auto EC = TempSerializer.visitMemberEnd(CVMR))
- consumeError(std::move(EC));
- }
-
- TypeIndex end(bool Write) {
- TypeIndex Index;
- if (auto EC = TempSerializer.visitTypeEnd(Type)) {
- consumeError(std::move(EC));
- return TypeIndex();
- }
-
- if (Write) {
- for (auto Record : TempSerializer.records())
- Index = TypeTable.writeSerializedRecord(Record);
- }
+public:
+ TypeIndex writeModifier(const ModifierRecord &Record);
+ TypeIndex writeProcedure(const ProcedureRecord &Record);
+ TypeIndex writeMemberFunction(const MemberFunctionRecord &Record);
+ TypeIndex writeArgumentList(const ArgumentListRecord &Record);
+ TypeIndex writeRecord(TypeRecordBuilder &builder);
+ TypeIndex writePointer(const PointerRecord &Record);
+ TypeIndex writePointerToMember(const PointerToMemberRecord &Record);
+ TypeIndex writeArray(const ArrayRecord &Record);
+ TypeIndex writeAggregate(const AggregateRecord &Record);
+ TypeIndex writeEnum(const EnumRecord &Record);
+ TypeIndex writeBitField(const BitFieldRecord &Record);
+ TypeIndex writeVirtualTableShape(const VirtualTableShapeRecord &Record);
+
+ TypeIndex writeFieldList(FieldListRecordBuilder &FieldList);
+ TypeIndex writeMethodList(MethodListRecordBuilder &MethodList);
- return Index;
- }
+private:
+ virtual TypeIndex writeRecord(llvm::StringRef record) = 0;
};
+}
+}
-} // end namespace codeview
-} // end namespace llvm
-
-#endif // LLVM_DEBUGINFO_CODEVIEW_TYPETABLEBUILDER_H
+#endif
diff --git a/gnu/llvm/include/llvm/MC/MCTargetOptionsCommandFlags.h b/gnu/llvm/include/llvm/MC/MCTargetOptionsCommandFlags.h
index 96179be3b8b..5180208d33b 100644
--- a/gnu/llvm/include/llvm/MC/MCTargetOptionsCommandFlags.h
+++ b/gnu/llvm/include/llvm/MC/MCTargetOptionsCommandFlags.h
@@ -26,7 +26,8 @@ cl::opt<MCTargetOptions::AsmInstrumentation> AsmInstrumentation(
cl::values(clEnumValN(MCTargetOptions::AsmInstrumentationNone, "none",
"no instrumentation at all"),
clEnumValN(MCTargetOptions::AsmInstrumentationAddress, "address",
- "instrument instructions with memory arguments")));
+ "instrument instructions with memory arguments"),
+ clEnumValEnd));
cl::opt<bool> RelaxAll("mc-relax-all",
cl::desc("When used with filetype=obj, "
@@ -38,8 +39,6 @@ cl::opt<bool> IncrementalLinkerCompatible(
"When used with filetype=obj, "
"emit an object file which can be used with an incremental linker"));
-cl::opt<bool> PIECopyRelocations("pie-copy-relocations", cl::desc("PIE Copy Relocations"));
-
cl::opt<int> DwarfVersion("dwarf-version", cl::desc("Dwarf version"),
cl::init(0));
@@ -53,9 +52,6 @@ cl::opt<bool> FatalWarnings("fatal-warnings",
cl::opt<bool> NoWarn("no-warn", cl::desc("Suppress all warnings"));
cl::alias NoWarnW("W", cl::desc("Alias for --no-warn"), cl::aliasopt(NoWarn));
-cl::opt<bool> NoDeprecatedWarn("no-deprecated-warn",
- cl::desc("Suppress all deprecated warnings"));
-
cl::opt<std::string>
ABIName("target-abi", cl::Hidden,
cl::desc("The name of the ABI to be targeted from the backend."),
@@ -67,13 +63,11 @@ static inline MCTargetOptions InitMCTargetOptionsFromFlags() {
(AsmInstrumentation == MCTargetOptions::AsmInstrumentationAddress);
Options.MCRelaxAll = RelaxAll;
Options.MCIncrementalLinkerCompatible = IncrementalLinkerCompatible;
- Options.MCPIECopyRelocations = PIECopyRelocations;
Options.DwarfVersion = DwarfVersion;
Options.ShowMCInst = ShowMCInst;
Options.ABIName = ABIName;
Options.MCFatalWarnings = FatalWarnings;
Options.MCNoWarn = NoWarn;
- Options.MCNoDeprecatedWarn = NoDeprecatedWarn;
return Options;
}
diff --git a/gnu/llvm/include/llvm/Support/GCOV.h b/gnu/llvm/include/llvm/Support/GCOV.h
index 02016e7dbd6..544434f036a 100644
--- a/gnu/llvm/include/llvm/Support/GCOV.h
+++ b/gnu/llvm/include/llvm/Support/GCOV.h
@@ -1,4 +1,4 @@
-//===- GCOV.h - LLVM coverage tool ------------------------------*- C++ -*-===//
+//===- GCOV.h - LLVM coverage tool ----------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -19,17 +19,9 @@
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
-#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
-#include <cassert>
-#include <cstddef>
-#include <cstdint>
-#include <memory>
-#include <string>
-#include <utility>
namespace llvm {
@@ -38,7 +30,6 @@ class GCOVBlock;
class FileInfo;
namespace GCOV {
-
enum GCOVVersion { V402, V404, V704 };
/// \brief A struct for passing gcov options between functions.
@@ -56,14 +47,13 @@ struct Options {
bool LongFileNames;
bool NoOutput;
};
-
-} // end namespace GCOV
+} // end GCOV namespace
/// GCOVBuffer - A wrapper around MemoryBuffer to provide GCOV specific
/// read operations.
class GCOVBuffer {
public:
- GCOVBuffer(MemoryBuffer *B) : Buffer(B) {}
+ GCOVBuffer(MemoryBuffer *B) : Buffer(B), Cursor(0) {}
/// readGCNOFormat - Check GCNO signature is valid at the beginning of buffer.
bool readGCNOFormat() {
@@ -234,48 +224,47 @@ public:
private:
MemoryBuffer *Buffer;
- uint64_t Cursor = 0;
+ uint64_t Cursor;
};
/// GCOVFile - Collects coverage information for one pair of coverage file
/// (.gcno and .gcda).
class GCOVFile {
public:
- GCOVFile() = default;
-
+ GCOVFile()
+ : GCNOInitialized(false), Checksum(0), Functions(), RunCount(0),
+ ProgramCount(0) {}
bool readGCNO(GCOVBuffer &Buffer);
bool readGCDA(GCOVBuffer &Buffer);
uint32_t getChecksum() const { return Checksum; }
- void print(raw_ostream &OS) const;
void dump() const;
void collectLineCounts(FileInfo &FI);
private:
- bool GCNOInitialized = false;
+ bool GCNOInitialized;
GCOV::GCOVVersion Version;
- uint32_t Checksum = 0;
+ uint32_t Checksum;
SmallVector<std::unique_ptr<GCOVFunction>, 16> Functions;
- uint32_t RunCount = 0;
- uint32_t ProgramCount = 0;
+ uint32_t RunCount;
+ uint32_t ProgramCount;
};
/// GCOVEdge - Collects edge information.
struct GCOVEdge {
- GCOVEdge(GCOVBlock &S, GCOVBlock &D) : Src(S), Dst(D) {}
+ GCOVEdge(GCOVBlock &S, GCOVBlock &D) : Src(S), Dst(D), Count(0) {}
GCOVBlock &Src;
GCOVBlock &Dst;
- uint64_t Count = 0;
+ uint64_t Count;
};
/// GCOVFunction - Collects function information.
class GCOVFunction {
public:
- using BlockIterator = pointee_iterator<SmallVectorImpl<
- std::unique_ptr<GCOVBlock>>::const_iterator>;
-
- GCOVFunction(GCOVFile &P) : Parent(P) {}
+ typedef pointee_iterator<SmallVectorImpl<
+ std::unique_ptr<GCOVBlock>>::const_iterator> BlockIterator;
+ GCOVFunction(GCOVFile &P) : Parent(P), Ident(0), LineNumber(0) {}
bool readGCNO(GCOVBuffer &Buffer, GCOV::GCOVVersion Version);
bool readGCDA(GCOVBuffer &Buffer, GCOV::GCOVVersion Version);
StringRef getName() const { return Name; }
@@ -290,15 +279,14 @@ public:
return make_range(block_begin(), block_end());
}
- void print(raw_ostream &OS) const;
void dump() const;
void collectLineCounts(FileInfo &FI);
private:
GCOVFile &Parent;
- uint32_t Ident = 0;
+ uint32_t Ident;
uint32_t Checksum;
- uint32_t LineNumber = 0;
+ uint32_t LineNumber;
StringRef Name;
StringRef Filename;
SmallVector<std::unique_ptr<GCOVBlock>, 16> Blocks;
@@ -308,10 +296,10 @@ private:
/// GCOVBlock - Collects block information.
class GCOVBlock {
struct EdgeWeight {
- EdgeWeight(GCOVBlock *D) : Dst(D) {}
+ EdgeWeight(GCOVBlock *D) : Dst(D), Count(0) {}
GCOVBlock *Dst;
- uint64_t Count = 0;
+ uint64_t Count;
};
struct SortDstEdgesFunctor {
@@ -321,11 +309,12 @@ class GCOVBlock {
};
public:
- using EdgeIterator = SmallVectorImpl<GCOVEdge *>::const_iterator;
+ typedef SmallVectorImpl<GCOVEdge *>::const_iterator EdgeIterator;
- GCOVBlock(GCOVFunction &P, uint32_t N) : Parent(P), Number(N) {}
+ GCOVBlock(GCOVFunction &P, uint32_t N)
+ : Parent(P), Number(N), Counter(0), DstEdgesAreSorted(true), SrcEdges(),
+ DstEdges(), Lines() {}
~GCOVBlock();
-
const GCOVFunction &getParent() const { return Parent; }
void addLine(uint32_t N) { Lines.push_back(N); }
uint32_t getLastLine() const { return Lines.back(); }
@@ -336,7 +325,6 @@ public:
assert(&Edge->Dst == this); // up to caller to ensure edge is valid
SrcEdges.push_back(Edge);
}
-
void addDstEdge(GCOVEdge *Edge) {
assert(&Edge->Src == this); // up to caller to ensure edge is valid
// Check if adding this edge causes list to become unsorted.
@@ -344,7 +332,6 @@ public:
DstEdgesAreSorted = false;
DstEdges.push_back(Edge);
}
-
size_t getNumSrcEdges() const { return SrcEdges.size(); }
size_t getNumDstEdges() const { return DstEdges.size(); }
void sortDstEdges();
@@ -361,15 +348,14 @@ public:
return make_range(dst_begin(), dst_end());
}
- void print(raw_ostream &OS) const;
void dump() const;
void collectLineCounts(FileInfo &FI);
private:
GCOVFunction &Parent;
uint32_t Number;
- uint64_t Counter = 0;
- bool DstEdgesAreSorted = true;
+ uint64_t Counter;
+ bool DstEdgesAreSorted;
SmallVector<GCOVEdge *, 16> SrcEdges;
SmallVector<GCOVEdge *, 16> DstEdges;
SmallVector<uint32_t, 16> Lines;
@@ -381,48 +367,48 @@ class FileInfo {
// Therefore this typedef allows LineData.Functions to store multiple
// functions
// per instance. This is rare, however, so optimize for the common case.
- using FunctionVector = SmallVector<const GCOVFunction *, 1>;
- using FunctionLines = DenseMap<uint32_t, FunctionVector>;
- using BlockVector = SmallVector<const GCOVBlock *, 4>;
- using BlockLines = DenseMap<uint32_t, BlockVector>;
+ typedef SmallVector<const GCOVFunction *, 1> FunctionVector;
+ typedef DenseMap<uint32_t, FunctionVector> FunctionLines;
+ typedef SmallVector<const GCOVBlock *, 4> BlockVector;
+ typedef DenseMap<uint32_t, BlockVector> BlockLines;
struct LineData {
- LineData() = default;
-
+ LineData() : LastLine(0) {}
BlockLines Blocks;
FunctionLines Functions;
- uint32_t LastLine = 0;
+ uint32_t LastLine;
};
struct GCOVCoverage {
- GCOVCoverage(StringRef Name) : Name(Name) {}
+ GCOVCoverage(StringRef Name)
+ : Name(Name), LogicalLines(0), LinesExec(0), Branches(0),
+ BranchesExec(0), BranchesTaken(0) {}
StringRef Name;
- uint32_t LogicalLines = 0;
- uint32_t LinesExec = 0;
+ uint32_t LogicalLines;
+ uint32_t LinesExec;
- uint32_t Branches = 0;
- uint32_t BranchesExec = 0;
- uint32_t BranchesTaken = 0;
+ uint32_t Branches;
+ uint32_t BranchesExec;
+ uint32_t BranchesTaken;
};
public:
- FileInfo(const GCOV::Options &Options) : Options(Options) {}
+ FileInfo(const GCOV::Options &Options)
+ : Options(Options), LineInfo(), RunCount(0), ProgramCount(0) {}
void addBlockLine(StringRef Filename, uint32_t Line, const GCOVBlock *Block) {
if (Line > LineInfo[Filename].LastLine)
LineInfo[Filename].LastLine = Line;
LineInfo[Filename].Blocks[Line - 1].push_back(Block);
}
-
void addFunctionLine(StringRef Filename, uint32_t Line,
const GCOVFunction *Function) {
if (Line > LineInfo[Filename].LastLine)
LineInfo[Filename].LastLine = Line;
LineInfo[Filename].Functions[Line - 1].push_back(Function);
}
-
void setRunCount(uint32_t Runs) { RunCount = Runs; }
void setProgramCount(uint32_t Programs) { ProgramCount = Programs; }
void print(raw_ostream &OS, StringRef MainFilename, StringRef GCNOFile,
@@ -445,16 +431,15 @@ private:
const GCOV::Options &Options;
StringMap<LineData> LineInfo;
- uint32_t RunCount = 0;
- uint32_t ProgramCount = 0;
+ uint32_t RunCount;
+ uint32_t ProgramCount;
- using FileCoverageList = SmallVector<std::pair<std::string, GCOVCoverage>, 4>;
- using FuncCoverageMap = MapVector<const GCOVFunction *, GCOVCoverage>;
+ typedef SmallVector<std::pair<std::string, GCOVCoverage>, 4> FileCoverageList;
+ typedef MapVector<const GCOVFunction *, GCOVCoverage> FuncCoverageMap;
FileCoverageList FileCoverages;
FuncCoverageMap FuncCoverages;
};
+}
-} // end namespace llvm
-
-#endif // LLVM_SUPPORT_GCOV_H
+#endif
diff --git a/gnu/llvm/include/llvm/Target/CostTable.h b/gnu/llvm/include/llvm/Target/CostTable.h
index b7d9240a91f..2499f5c3189 100644
--- a/gnu/llvm/include/llvm/Target/CostTable.h
+++ b/gnu/llvm/include/llvm/Target/CostTable.h
@@ -16,7 +16,6 @@
#define LLVM_TARGET_COSTTABLE_H_
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineValueType.h"
namespace llvm {
@@ -31,9 +30,9 @@ struct CostTblEntry {
/// Find in cost table, TypeTy must be comparable to CompareTy by ==
inline const CostTblEntry *CostTableLookup(ArrayRef<CostTblEntry> Tbl,
int ISD, MVT Ty) {
- auto I = find_if(Tbl, [=](const CostTblEntry &Entry) {
- return ISD == Entry.ISD && Ty == Entry.Type;
- });
+ auto I = std::find_if(Tbl.begin(), Tbl.end(),
+ [=](const CostTblEntry &Entry) {
+ return ISD == Entry.ISD && Ty == Entry.Type; });
if (I != Tbl.end())
return I;
@@ -54,9 +53,11 @@ struct TypeConversionCostTblEntry {
inline const TypeConversionCostTblEntry *
ConvertCostTableLookup(ArrayRef<TypeConversionCostTblEntry> Tbl,
int ISD, MVT Dst, MVT Src) {
- auto I = find_if(Tbl, [=](const TypeConversionCostTblEntry &Entry) {
- return ISD == Entry.ISD && Src == Entry.Src && Dst == Entry.Dst;
- });
+ auto I = std::find_if(Tbl.begin(), Tbl.end(),
+ [=](const TypeConversionCostTblEntry &Entry) {
+ return ISD == Entry.ISD && Src == Entry.Src &&
+ Dst == Entry.Dst;
+ });
if (I != Tbl.end())
return I;
diff --git a/gnu/llvm/include/llvm/Target/TargetCallingConv.h b/gnu/llvm/include/llvm/Target/TargetCallingConv.h
index 4f750b8a289..0c6c1f1468c 100644
--- a/gnu/llvm/include/llvm/Target/TargetCallingConv.h
+++ b/gnu/llvm/include/llvm/Target/TargetCallingConv.h
@@ -14,120 +14,118 @@
#ifndef LLVM_TARGET_TARGETCALLINGCONV_H
#define LLVM_TARGET_TARGETCALLINGCONV_H
-#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/Support/MathExtras.h"
-#include <cassert>
-#include <climits>
-#include <cstdint>
+#include <string>
+#include <limits.h>
namespace llvm {
-namespace ISD {
+namespace ISD {
struct ArgFlagsTy {
private:
- unsigned IsZExt : 1; ///< Zero extended
- unsigned IsSExt : 1; ///< Sign extended
- unsigned IsInReg : 1; ///< Passed in register
- unsigned IsSRet : 1; ///< Hidden struct-ret ptr
- unsigned IsByVal : 1; ///< Struct passed by value
- unsigned IsNest : 1; ///< Nested fn static chain
- unsigned IsReturned : 1; ///< Always returned
- unsigned IsSplit : 1;
- unsigned IsInAlloca : 1; ///< Passed with inalloca
- unsigned IsSplitEnd : 1; ///< Last part of a split
- unsigned IsSwiftSelf : 1; ///< Swift self parameter
- unsigned IsSwiftError : 1; ///< Swift error parameter
- unsigned IsHva : 1; ///< HVA field for
- unsigned IsHvaStart : 1; ///< HVA structure start
- unsigned IsSecArgPass : 1; ///< Second argument
- unsigned ByValAlign : 4; ///< Log 2 of byval alignment
- unsigned OrigAlign : 5; ///< Log 2 of original alignment
- unsigned IsInConsecutiveRegsLast : 1;
- unsigned IsInConsecutiveRegs : 1;
- unsigned IsCopyElisionCandidate : 1; ///< Argument copy elision candidate
-
- unsigned ByValSize; ///< Byval struct size
-
+ static const uint64_t NoFlagSet = 0ULL;
+ static const uint64_t ZExt = 1ULL<<0; ///< Zero extended
+ static const uint64_t ZExtOffs = 0;
+ static const uint64_t SExt = 1ULL<<1; ///< Sign extended
+ static const uint64_t SExtOffs = 1;
+ static const uint64_t InReg = 1ULL<<2; ///< Passed in register
+ static const uint64_t InRegOffs = 2;
+ static const uint64_t SRet = 1ULL<<3; ///< Hidden struct-ret ptr
+ static const uint64_t SRetOffs = 3;
+ static const uint64_t ByVal = 1ULL<<4; ///< Struct passed by value
+ static const uint64_t ByValOffs = 4;
+ static const uint64_t Nest = 1ULL<<5; ///< Nested fn static chain
+ static const uint64_t NestOffs = 5;
+ static const uint64_t Returned = 1ULL<<6; ///< Always returned
+ static const uint64_t ReturnedOffs = 6;
+ static const uint64_t ByValAlign = 0xFULL<<7; ///< Struct alignment
+ static const uint64_t ByValAlignOffs = 7;
+ static const uint64_t Split = 1ULL<<11;
+ static const uint64_t SplitOffs = 11;
+ static const uint64_t InAlloca = 1ULL<<12; ///< Passed with inalloca
+ static const uint64_t InAllocaOffs = 12;
+ static const uint64_t SplitEnd = 1ULL<<13; ///< Last part of a split
+ static const uint64_t SplitEndOffs = 13;
+ static const uint64_t OrigAlign = 0x1FULL<<27;
+ static const uint64_t OrigAlignOffs = 27;
+ static const uint64_t ByValSize = 0x3fffffffULL<<32; ///< Struct size
+ static const uint64_t ByValSizeOffs = 32;
+ static const uint64_t InConsecutiveRegsLast = 0x1ULL<<62; ///< Struct size
+ static const uint64_t InConsecutiveRegsLastOffs = 62;
+ static const uint64_t InConsecutiveRegs = 0x1ULL<<63; ///< Struct size
+ static const uint64_t InConsecutiveRegsOffs = 63;
+
+ static const uint64_t One = 1ULL; ///< 1 of this type, for shifts
+
+ uint64_t Flags;
public:
- ArgFlagsTy()
- : IsZExt(0), IsSExt(0), IsInReg(0), IsSRet(0), IsByVal(0), IsNest(0),
- IsReturned(0), IsSplit(0), IsInAlloca(0), IsSplitEnd(0),
- IsSwiftSelf(0), IsSwiftError(0), IsHva(0), IsHvaStart(0),
- IsSecArgPass(0), ByValAlign(0), OrigAlign(0),
- IsInConsecutiveRegsLast(0), IsInConsecutiveRegs(0),
- IsCopyElisionCandidate(0), ByValSize(0) {
- static_assert(sizeof(*this) == 2 * sizeof(unsigned), "flags are too big");
- }
-
- bool isZExt() const { return IsZExt; }
- void setZExt() { IsZExt = 1; }
-
- bool isSExt() const { return IsSExt; }
- void setSExt() { IsSExt = 1; }
-
- bool isInReg() const { return IsInReg; }
- void setInReg() { IsInReg = 1; }
-
- bool isSRet() const { return IsSRet; }
- void setSRet() { IsSRet = 1; }
-
- bool isByVal() const { return IsByVal; }
- void setByVal() { IsByVal = 1; }
+ ArgFlagsTy() : Flags(0) { }
- bool isInAlloca() const { return IsInAlloca; }
- void setInAlloca() { IsInAlloca = 1; }
+ bool isZExt() const { return Flags & ZExt; }
+ void setZExt() { Flags |= One << ZExtOffs; }
- bool isSwiftSelf() const { return IsSwiftSelf; }
- void setSwiftSelf() { IsSwiftSelf = 1; }
+ bool isSExt() const { return Flags & SExt; }
+ void setSExt() { Flags |= One << SExtOffs; }
- bool isSwiftError() const { return IsSwiftError; }
- void setSwiftError() { IsSwiftError = 1; }
+ bool isInReg() const { return Flags & InReg; }
+ void setInReg() { Flags |= One << InRegOffs; }
- bool isHva() const { return IsHva; }
- void setHva() { IsHva = 1; }
+ bool isSRet() const { return Flags & SRet; }
+ void setSRet() { Flags |= One << SRetOffs; }
- bool isHvaStart() const { return IsHvaStart; }
- void setHvaStart() { IsHvaStart = 1; }
+ bool isByVal() const { return Flags & ByVal; }
+ void setByVal() { Flags |= One << ByValOffs; }
- bool isSecArgPass() const { return IsSecArgPass; }
- void setSecArgPass() { IsSecArgPass = 1; }
+ bool isInAlloca() const { return Flags & InAlloca; }
+ void setInAlloca() { Flags |= One << InAllocaOffs; }
- bool isNest() const { return IsNest; }
- void setNest() { IsNest = 1; }
+ bool isNest() const { return Flags & Nest; }
+ void setNest() { Flags |= One << NestOffs; }
- bool isReturned() const { return IsReturned; }
- void setReturned() { IsReturned = 1; }
+ bool isReturned() const { return Flags & Returned; }
+ void setReturned() { Flags |= One << ReturnedOffs; }
- bool isInConsecutiveRegs() const { return IsInConsecutiveRegs; }
- void setInConsecutiveRegs() { IsInConsecutiveRegs = 1; }
+ bool isInConsecutiveRegs() const { return Flags & InConsecutiveRegs; }
+ void setInConsecutiveRegs() { Flags |= One << InConsecutiveRegsOffs; }
- bool isInConsecutiveRegsLast() const { return IsInConsecutiveRegsLast; }
- void setInConsecutiveRegsLast() { IsInConsecutiveRegsLast = 1; }
+ bool isInConsecutiveRegsLast() const { return Flags & InConsecutiveRegsLast; }
+ void setInConsecutiveRegsLast() { Flags |= One << InConsecutiveRegsLastOffs; }
- bool isSplit() const { return IsSplit; }
- void setSplit() { IsSplit = 1; }
+ unsigned getByValAlign() const {
+ return (unsigned)
+ ((One << ((Flags & ByValAlign) >> ByValAlignOffs)) / 2);
+ }
+ void setByValAlign(unsigned A) {
+ Flags = (Flags & ~ByValAlign) |
+ (uint64_t(Log2_32(A) + 1) << ByValAlignOffs);
+ }
- bool isSplitEnd() const { return IsSplitEnd; }
- void setSplitEnd() { IsSplitEnd = 1; }
+ bool isSplit() const { return Flags & Split; }
+ void setSplit() { Flags |= One << SplitOffs; }
- bool isCopyElisionCandidate() const { return IsCopyElisionCandidate; }
- void setCopyElisionCandidate() { IsCopyElisionCandidate = 1; }
+ bool isSplitEnd() const { return Flags & SplitEnd; }
+ void setSplitEnd() { Flags |= One << SplitEndOffs; }
- unsigned getByValAlign() const { return (1U << ByValAlign) / 2; }
- void setByValAlign(unsigned A) {
- ByValAlign = Log2_32(A) + 1;
- assert(getByValAlign() == A && "bitfield overflow");
+ unsigned getOrigAlign() const {
+ return (unsigned)
+ ((One << ((Flags & OrigAlign) >> OrigAlignOffs)) / 2);
}
-
- unsigned getOrigAlign() const { return (1U << OrigAlign) / 2; }
void setOrigAlign(unsigned A) {
- OrigAlign = Log2_32(A) + 1;
- assert(getOrigAlign() == A && "bitfield overflow");
+ Flags = (Flags & ~OrigAlign) |
+ (uint64_t(Log2_32(A) + 1) << OrigAlignOffs);
}
- unsigned getByValSize() const { return ByValSize; }
- void setByValSize(unsigned S) { ByValSize = S; }
+ unsigned getByValSize() const {
+ return (unsigned)((Flags & ByValSize) >> ByValSizeOffs);
+ }
+ void setByValSize(unsigned S) {
+ Flags = (Flags & ~ByValSize) | (uint64_t(S) << ByValSizeOffs);
+ }
+
+ /// getRawBits - Represent the flags as a bunch of bits.
+ uint64_t getRawBits() const { return Flags; }
};
/// InputArg - This struct carries flags and type information about a
@@ -136,9 +134,9 @@ namespace ISD {
///
struct InputArg {
ArgFlagsTy Flags;
- MVT VT = MVT::Other;
+ MVT VT;
EVT ArgVT;
- bool Used = false;
+ bool Used;
/// Index original Function's argument.
unsigned OrigArgIndex;
@@ -150,7 +148,7 @@ namespace ISD {
/// registers, we got 4 InputArgs with PartOffsets 0, 4, 8 and 12.
unsigned PartOffset;
- InputArg() = default;
+ InputArg() : VT(MVT::Other), Used(false) {}
InputArg(ArgFlagsTy flags, EVT vt, EVT argvt, bool used,
unsigned origIdx, unsigned partOffs)
: Flags(flags), Used(used), OrigArgIndex(origIdx), PartOffset(partOffs) {
@@ -178,7 +176,7 @@ namespace ISD {
EVT ArgVT;
/// IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
- bool IsFixed = false;
+ bool IsFixed;
/// Index original Function's argument.
unsigned OrigArgIndex;
@@ -188,7 +186,7 @@ namespace ISD {
/// registers, we got 4 OutputArgs with PartOffsets 0, 4, 8 and 12.
unsigned PartOffset;
- OutputArg() = default;
+ OutputArg() : IsFixed(false) {}
OutputArg(ArgFlagsTy flags, EVT vt, EVT argvt, bool isfixed,
unsigned origIdx, unsigned partOffs)
: Flags(flags), IsFixed(isfixed), OrigArgIndex(origIdx),
@@ -197,8 +195,8 @@ namespace ISD {
ArgVT = argvt;
}
};
+}
-} // end namespace ISD
-} // end namespace llvm
+} // end llvm namespace
-#endif // LLVM_TARGET_TARGETCALLINGCONV_H
+#endif
diff --git a/gnu/llvm/include/llvm/Target/TargetFrameLowering.h b/gnu/llvm/include/llvm/Target/TargetFrameLowering.h
index 4576f8c7582..cadd07d71f1 100644
--- a/gnu/llvm/include/llvm/Target/TargetFrameLowering.h
+++ b/gnu/llvm/include/llvm/Target/TargetFrameLowering.h
@@ -75,9 +75,9 @@ public:
///
int alignSPAdjust(int SPAdj) const {
if (SPAdj < 0) {
- SPAdj = -alignTo(-SPAdj, StackAlignment);
+ SPAdj = -RoundUpToAlignment(-SPAdj, StackAlignment);
} else {
- SPAdj = alignTo(SPAdj, StackAlignment);
+ SPAdj = RoundUpToAlignment(SPAdj, StackAlignment);
}
return SPAdj;
}
@@ -151,13 +151,6 @@ public:
return false;
}
- /// Returns true if the stack slot holes in the fixed and callee-save stack
- /// area should be used when allocating other stack locations to reduce stack
- /// size.
- virtual bool enableStackSlotScavenging(const MachineFunction &MF) const {
- return false;
- }
-
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
virtual void emitPrologue(MachineFunction &MF,
@@ -179,6 +172,12 @@ public:
virtual void adjustForHiPEPrologue(MachineFunction &MF,
MachineBasicBlock &PrologueMBB) const {}
+ /// Adjust the prologue to add an allocation at a fixed offset from the frame
+ /// pointer.
+ virtual void
+ adjustForFrameAllocatePrologue(MachineFunction &MF,
+ MachineBasicBlock &PrologueMBB) const {}
+
/// spillCalleeSavedRegisters - Issues instruction(s) to spill all callee
/// saved registers and returns true if it isn't possible / profitable to do
/// so by issuing a series of store instructions via
@@ -240,17 +239,15 @@ public:
virtual int getFrameIndexReference(const MachineFunction &MF, int FI,
unsigned &FrameReg) const;
- /// Same as \c getFrameIndexReference, except that the stack pointer (as
- /// opposed to the frame pointer) will be the preferred value for \p
- /// FrameReg. This is generally used for emitting statepoint or EH tables that
- /// use offsets from RSP. If \p IgnoreSPUpdates is true, the returned
- /// offset is only guaranteed to be valid with respect to the value of SP at
- /// the end of the prologue.
- virtual int getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI,
- unsigned &FrameReg,
- bool IgnoreSPUpdates) const {
- // Always safe to dispatch to getFrameIndexReference.
- return getFrameIndexReference(MF, FI, FrameReg);
+ /// Same as above, except that the 'base register' will always be RSP, not
+ /// RBP on x86. This is generally used for emitting statepoint or EH tables
+ /// that use offsets from RSP.
+ /// TODO: This should really be a parameterizable choice.
+ virtual int getFrameIndexReferenceFromSP(const MachineFunction &MF, int FI,
+ unsigned &FrameReg) const {
+ // default to calling normal version, we override this on x86 only
+ llvm_unreachable("unimplemented for non-x86");
+ return 0;
}
/// This method determines which of the registers reported by
@@ -276,13 +273,14 @@ public:
report_fatal_error("WinEH not implemented for this target");
}
- /// This method is called during prolog/epilog code insertion to eliminate
- /// call frame setup and destroy pseudo instructions (but only if the Target
- /// is using them). It is responsible for eliminating these instructions,
- /// replacing them with concrete instructions. This method need only be
- /// implemented if using call frame setup/destroy pseudo instructions.
- /// Returns an iterator pointing to the instruction after the replaced one.
- virtual MachineBasicBlock::iterator
+ /// eliminateCallFramePseudoInstr - This method is called during prolog/epilog
+ /// code insertion to eliminate call frame setup and destroy pseudo
+ /// instructions (but only if the Target is using them). It is responsible
+ /// for eliminating these instructions, replacing them with concrete
+ /// instructions. This method need only be implemented if using call frame
+ /// setup/destroy pseudo instructions.
+ ///
+ virtual void
eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const {
@@ -290,18 +288,6 @@ public:
"target!");
}
-
- /// Order the symbols in the local stack frame.
- /// The list of objects that we want to order is in \p objectsToAllocate as
- /// indices into the MachineFrameInfo. The array can be reordered in any way
- /// upon return. The contents of the array, however, may not be modified (i.e.
- /// only their order may be changed).
- /// By default, just maintain the original order.
- virtual void
- orderFrameObjects(const MachineFunction &MF,
- SmallVectorImpl<int> &objectsToAllocate) const {
- }
-
/// Check whether or not the given \p MBB can be used as a prologue
/// for the target.
/// The prologue will be inserted first in this basic block.
@@ -325,20 +311,6 @@ public:
virtual bool canUseAsEpilogue(const MachineBasicBlock &MBB) const {
return true;
}
-
- /// Check if given function is safe for not having callee saved registers.
- /// This is used when interprocedural register allocation is enabled.
- static bool isSafeForNoCSROpt(const Function *F) {
- if (!F->hasLocalLinkage() || F->hasAddressTaken() ||
- !F->hasFnAttribute(Attribute::NoRecurse))
- return false;
- // Function should not be optimized as tail call.
- for (const User *U : F->users())
- if (auto CS = ImmutableCallSite(U))
- if (CS.isTailCall())
- return false;
- return true;
- }
};
} // End llvm namespace
diff --git a/gnu/llvm/include/llvm/Target/TargetInstrInfo.h b/gnu/llvm/include/llvm/Target/TargetInstrInfo.h
index 1843a2eed9b..0cebcf1c6b5 100644
--- a/gnu/llvm/include/llvm/Target/TargetInstrInfo.h
+++ b/gnu/llvm/include/llvm/Target/TargetInstrInfo.h
@@ -1,4 +1,4 @@
-//===- llvm/Target/TargetInstrInfo.h - Instruction Info ---------*- C++ -*-===//
+//===-- llvm/Target/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,63 +14,52 @@
#ifndef LLVM_TARGET_TARGETINSTRINFO_H
#define LLVM_TARGET_TARGETINSTRINFO_H
-#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/DenseMapInfo.h"
-#include "llvm/ADT/None.h"
-#include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/CodeGen/MachineCombinerPattern.h"
#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineLoopInfo.h"
-#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/Support/BranchProbability.h"
-#include "llvm/Support/ErrorHandling.h"
-#include <cassert>
-#include <cstddef>
-#include <cstdint>
-#include <utility>
-#include <vector>
+#include "llvm/Target/TargetRegisterInfo.h"
namespace llvm {
-class DFAPacketizer;
class InstrItineraryData;
class LiveVariables;
+class MCAsmInfo;
class MachineMemOperand;
class MachineRegisterInfo;
-class MCAsmInfo;
+class MDNode;
class MCInst;
struct MCSchedModel;
-class Module;
-class ScheduleDAG;
-class ScheduleHazardRecognizer;
+class MCSymbolRefExpr;
class SDNode;
+class ScheduleHazardRecognizer;
class SelectionDAG;
-class RegScavenger;
+class ScheduleDAG;
class TargetRegisterClass;
class TargetRegisterInfo;
-class TargetSchedModel;
class TargetSubtargetInfo;
+class TargetSchedModel;
+class DFAPacketizer;
template<class T> class SmallVectorImpl;
+
//---------------------------------------------------------------------------
///
/// TargetInstrInfo - Interface to description of machine instruction set
///
class TargetInstrInfo : public MCInstrInfo {
+ TargetInstrInfo(const TargetInstrInfo &) = delete;
+ void operator=(const TargetInstrInfo &) = delete;
public:
TargetInstrInfo(unsigned CFSetupOpcode = ~0u, unsigned CFDestroyOpcode = ~0u,
- unsigned CatchRetOpcode = ~0u, unsigned ReturnOpcode = ~0u)
+ unsigned CatchRetOpcode = ~0u)
: CallFrameSetupOpcode(CFSetupOpcode),
CallFrameDestroyOpcode(CFDestroyOpcode),
- CatchRetOpcode(CatchRetOpcode),
- ReturnOpcode(ReturnOpcode) {}
- TargetInstrInfo(const TargetInstrInfo &) = delete;
- TargetInstrInfo &operator=(const TargetInstrInfo &) = delete;
+ CatchRetOpcode(CatchRetOpcode) {}
+
virtual ~TargetInstrInfo();
static bool isGenericOpcode(unsigned Opc) {
@@ -89,10 +78,10 @@ public:
/// This means the only allowed uses are constants and unallocatable physical
/// registers so that the instructions result is independent of the place
/// in the function.
- bool isTriviallyReMaterializable(const MachineInstr &MI,
+ bool isTriviallyReMaterializable(const MachineInstr *MI,
AliasAnalysis *AA = nullptr) const {
- return MI.getOpcode() == TargetOpcode::IMPLICIT_DEF ||
- (MI.getDesc().isRematerializable() &&
+ return MI->getOpcode() == TargetOpcode::IMPLICIT_DEF ||
+ (MI->getDesc().isRematerializable() &&
(isReallyTriviallyReMaterializable(MI, AA) ||
isReallyTriviallyReMaterializableGeneric(MI, AA)));
}
@@ -105,7 +94,7 @@ protected:
/// than producing a value, or if it requres any address registers that are
/// not always available.
/// Requirements must be check as stated in isTriviallyReMaterializable() .
- virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI,
+ virtual bool isReallyTriviallyReMaterializable(const MachineInstr *MI,
AliasAnalysis *AA) const {
return false;
}
@@ -125,7 +114,8 @@ protected:
/// Do not call this method for a non-commutable instruction.
/// Even though the instruction is commutable, the method may still
/// fail to commute the operands, null pointer is returned in such cases.
- virtual MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
+ virtual MachineInstr *commuteInstructionImpl(MachineInstr *MI,
+ bool NewMI,
unsigned OpIdx1,
unsigned OpIdx2) const;
@@ -149,7 +139,7 @@ private:
/// set and the target hook isReallyTriviallyReMaterializable returns false,
/// this function does target-independent tests to determine if the
/// instruction is really trivially rematerializable.
- bool isReallyTriviallyReMaterializableGeneric(const MachineInstr &MI,
+ bool isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
AliasAnalysis *AA) const;
public:
@@ -161,50 +151,13 @@ public:
unsigned getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; }
unsigned getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; }
- /// Returns true if the argument is a frame pseudo instruction.
- bool isFrameInstr(const MachineInstr &I) const {
- return I.getOpcode() == getCallFrameSetupOpcode() ||
- I.getOpcode() == getCallFrameDestroyOpcode();
- }
-
- /// Returns true if the argument is a frame setup pseudo instruction.
- bool isFrameSetup(const MachineInstr &I) const {
- return I.getOpcode() == getCallFrameSetupOpcode();
- }
-
- /// Returns size of the frame associated with the given frame instruction.
- /// For frame setup instruction this is frame that is set up space set up
- /// after the instruction. For frame destroy instruction this is the frame
- /// freed by the caller.
- /// Note, in some cases a call frame (or a part of it) may be prepared prior
- /// to the frame setup instruction. It occurs in the calls that involve
- /// inalloca arguments. This function reports only the size of the frame part
- /// that is set up between the frame setup and destroy pseudo instructions.
- int64_t getFrameSize(const MachineInstr &I) const {
- assert(isFrameInstr(I) && "Not a frame instruction");
- assert(I.getOperand(0).getImm() >= 0);
- return I.getOperand(0).getImm();
- }
-
- /// Returns the total frame size, which is made up of the space set up inside
- /// the pair of frame start-stop instructions and the space that is set up
- /// prior to the pair.
- int64_t getFrameTotalSize(const MachineInstr &I) const {
- if (isFrameSetup(I)) {
- assert(I.getOperand(1).getImm() >= 0 && "Frame size must not be negative");
- return getFrameSize(I) + I.getOperand(1).getImm();
- }
- return getFrameSize(I);
- }
-
unsigned getCatchReturnOpcode() const { return CatchRetOpcode; }
- unsigned getReturnOpcode() const { return ReturnOpcode; }
/// Returns the actual stack pointer adjustment made by an instruction
/// as part of a call sequence. By default, only call frame setup/destroy
/// instructions adjust the stack, but targets may want to override this
/// to enable more fine-grained adjustment, or adjust by a different value.
- virtual int getSPAdjust(const MachineInstr &MI) const;
+ virtual int getSPAdjust(const MachineInstr *MI) const;
/// Return true if the instruction is a "coalescable" extension instruction.
/// That is, it's like a copy where it's legal for the source to overlap the
@@ -222,14 +175,14 @@ public:
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
- virtual unsigned isLoadFromStackSlot(const MachineInstr &MI,
+ virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
return 0;
}
/// Check for post-frame ptr elimination stack locations as well.
/// This uses a heuristic so it isn't reliable for correctness.
- virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI,
+ virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
int &FrameIndex) const {
return 0;
}
@@ -240,7 +193,7 @@ public:
/// If not, return false. Unlike isLoadFromStackSlot, this returns true for
/// any instructions that loads from the stack. This is just a hint, as some
/// cases may be missed.
- virtual bool hasLoadFromStackSlot(const MachineInstr &MI,
+ virtual bool hasLoadFromStackSlot(const MachineInstr *MI,
const MachineMemOperand *&MMO,
int &FrameIndex) const;
@@ -249,14 +202,14 @@ public:
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
- virtual unsigned isStoreToStackSlot(const MachineInstr &MI,
+ virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
return 0;
}
/// Check for post-frame ptr elimination stack locations as well.
/// This uses a heuristic, so it isn't reliable for correctness.
- virtual unsigned isStoreToStackSlotPostFE(const MachineInstr &MI,
+ virtual unsigned isStoreToStackSlotPostFE(const MachineInstr *MI,
int &FrameIndex) const {
return 0;
}
@@ -267,14 +220,14 @@ public:
/// If not, return false. Unlike isStoreToStackSlot,
/// this returns true for any instructions that stores to the
/// stack. This is just a hint, as some cases may be missed.
- virtual bool hasStoreToStackSlot(const MachineInstr &MI,
+ virtual bool hasStoreToStackSlot(const MachineInstr *MI,
const MachineMemOperand *&MMO,
int &FrameIndex) const;
/// Return true if the specified machine instruction
/// is a copy of one stack slot to another and has no other effect.
/// Provide the identity of the two frame indices.
- virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex,
+ virtual bool isStackSlotCopy(const MachineInstr *MI, int &DestFrameIndex,
int &SrcFrameIndex) const {
return false;
}
@@ -296,27 +249,12 @@ public:
unsigned &Size, unsigned &Offset,
const MachineFunction &MF) const;
- /// Returns the size in bytes of the specified MachineInstr, or ~0U
- /// when this function is not implemented by a target.
- virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const {
- return ~0U;
- }
-
/// Return true if the instruction is as cheap as a move instruction.
///
/// Targets for different archs need to override this, and different
/// micro-architectures can also be finely tuned inside.
- virtual bool isAsCheapAsAMove(const MachineInstr &MI) const {
- return MI.isAsCheapAsAMove();
- }
-
- /// Return true if the instruction should be sunk by MachineSink.
- ///
- /// MachineSink determines on its own whether the instruction is safe to sink;
- /// this gives the target a hook to override the default behavior with regards
- /// to which instructions should be sunk.
- virtual bool shouldSink(const MachineInstr &MI) const {
- return true;
+ virtual bool isAsCheapAsAMove(const MachineInstr *MI) const {
+ return MI->isAsCheapAsAMove();
}
/// Re-issue the specified 'original' instruction at the
@@ -325,8 +263,9 @@ public:
/// DestReg:SubIdx. Any existing subreg index is preserved or composed with
/// SubIdx.
virtual void reMaterialize(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, unsigned DestReg,
- unsigned SubIdx, const MachineInstr &Orig,
+ MachineBasicBlock::iterator MI,
+ unsigned DestReg, unsigned SubIdx,
+ const MachineInstr *Orig,
const TargetRegisterInfo &TRI) const;
/// Create a duplicate of the Orig instruction in MF. This is like
@@ -334,7 +273,7 @@ public:
/// that are required to be unique.
///
/// The instruction must be duplicable as indicated by isNotDuplicable().
- virtual MachineInstr *duplicate(MachineInstr &Orig,
+ virtual MachineInstr *duplicate(MachineInstr *Orig,
MachineFunction &MF) const;
/// This method must be implemented by targets that
@@ -347,9 +286,9 @@ public:
/// This method returns a null pointer if the transformation cannot be
/// performed, otherwise it returns the last new instruction.
///
- virtual MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineInstr &MI,
- LiveVariables *LV) const {
+ virtual MachineInstr *
+ convertToThreeAddress(MachineFunction::iterator &MFI,
+ MachineBasicBlock::iterator &MBBI, LiveVariables *LV) const {
return nullptr;
}
@@ -376,7 +315,8 @@ public:
/// Even though the instruction is commutable, the method may still
/// fail to commute the operands, null pointer is returned in such cases.
MachineInstr *
- commuteInstruction(MachineInstr &MI, bool NewMI = false,
+ commuteInstruction(MachineInstr *MI,
+ bool NewMI = false,
unsigned OpIdx1 = CommuteAnyOperandIndex,
unsigned OpIdx2 = CommuteAnyOperandIndex) const;
@@ -397,7 +337,7 @@ public:
/// findCommutedOpIndices(MI, Op1, Op2);
/// can be interpreted as a query asking to find an operand that would be
/// commutable with the operand#1.
- virtual bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
+ virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const;
/// A pair composed of a register and a sub-register index.
@@ -405,17 +345,14 @@ public:
struct RegSubRegPair {
unsigned Reg;
unsigned SubReg;
-
RegSubRegPair(unsigned Reg = 0, unsigned SubReg = 0)
: Reg(Reg), SubReg(SubReg) {}
};
-
/// A pair composed of a pair of a register and a sub-register index,
/// and another sub-register index.
/// Used to give some type checking when modeling Reg:SubReg1, SubReg2.
struct RegSubRegPairAndIdx : RegSubRegPair {
unsigned SubIdx;
-
RegSubRegPairAndIdx(unsigned Reg = 0, unsigned SubReg = 0,
unsigned SubIdx = 0)
: RegSubRegPair(Reg, SubReg), SubIdx(SubIdx) {}
@@ -481,40 +418,16 @@ public:
RegSubRegPair &BaseReg,
RegSubRegPairAndIdx &InsertedReg) const;
+
/// Return true if two machine instructions would produce identical values.
/// By default, this is only true when the two instructions
/// are deemed identical except for defs. If this function is called when the
/// IR is still in SSA form, the caller can pass the MachineRegisterInfo for
/// aggressive checks.
- virtual bool produceSameValue(const MachineInstr &MI0,
- const MachineInstr &MI1,
+ virtual bool produceSameValue(const MachineInstr *MI0,
+ const MachineInstr *MI1,
const MachineRegisterInfo *MRI = nullptr) const;
- /// \returns true if a branch from an instruction with opcode \p BranchOpc
- /// bytes is capable of jumping to a position \p BrOffset bytes away.
- virtual bool isBranchOffsetInRange(unsigned BranchOpc,
- int64_t BrOffset) const {
- llvm_unreachable("target did not implement");
- }
-
- /// \returns The block that branch instruction \p MI jumps to.
- virtual MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const {
- llvm_unreachable("target did not implement");
- }
-
- /// Insert an unconditional indirect branch at the end of \p MBB to \p
- /// NewDestBB. \p BrOffset indicates the offset of \p NewDestBB relative to
- /// the offset of the position to insert the new branch.
- ///
- /// \returns The number of bytes added to the block.
- virtual unsigned insertIndirectBranch(MachineBasicBlock &MBB,
- MachineBasicBlock &NewDestBB,
- const DebugLoc &DL,
- int64_t BrOffset = 0,
- RegScavenger *RS = nullptr) const {
- llvm_unreachable("target did not implement");
- }
-
/// Analyze the branching code at the end of MBB, returning
/// true if it cannot be understood (e.g. it's a switch dispatch or isn't
/// implemented for a target). Upon success, this returns false and returns
@@ -534,15 +447,13 @@ public:
/// condition. These operands can be passed to other TargetInstrInfo
/// methods to create new branches.
///
- /// Note that removeBranch and insertBranch must be implemented to support
+ /// Note that RemoveBranch and InsertBranch must be implemented to support
/// cases where this method returns success.
///
/// If AllowModify is true, then this routine is allowed to modify the basic
/// block (e.g. delete instructions after the unconditional branch).
///
- /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
- /// before calling this function.
- virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify = false) const {
@@ -562,19 +473,23 @@ public:
PRED_INVALID // Sentinel value
};
- ComparePredicate Predicate = PRED_INVALID;
- MachineOperand LHS = MachineOperand::CreateImm(0);
- MachineOperand RHS = MachineOperand::CreateImm(0);
- MachineBasicBlock *TrueDest = nullptr;
- MachineBasicBlock *FalseDest = nullptr;
- MachineInstr *ConditionDef = nullptr;
+ ComparePredicate Predicate;
+ MachineOperand LHS;
+ MachineOperand RHS;
+ MachineBasicBlock *TrueDest;
+ MachineBasicBlock *FalseDest;
+ MachineInstr *ConditionDef;
/// SingleUseCondition is true if ConditionDef is dead except for the
/// branch(es) at the end of the basic block.
///
- bool SingleUseCondition = false;
+ bool SingleUseCondition;
- explicit MachineBranchPredicate() = default;
+ explicit MachineBranchPredicate()
+ : Predicate(PRED_INVALID), LHS(MachineOperand::CreateImm(0)),
+ RHS(MachineOperand::CreateImm(0)), TrueDest(nullptr),
+ FalseDest(nullptr), ConditionDef(nullptr), SingleUseCondition(false) {
+ }
};
/// Analyze the branching code at the end of MBB and parse it into the
@@ -584,7 +499,7 @@ public:
/// If AllowModify is true, then this routine is allowed to modify the basic
/// block (e.g. delete instructions after the unconditional branch).
///
- virtual bool analyzeBranchPredicate(MachineBasicBlock &MBB,
+ virtual bool AnalyzeBranchPredicate(MachineBasicBlock &MBB,
MachineBranchPredicate &MBP,
bool AllowModify = false) const {
return true;
@@ -593,60 +508,25 @@ public:
/// Remove the branching code at the end of the specific MBB.
/// This is only invoked in cases where AnalyzeBranch returns success. It
/// returns the number of instructions that were removed.
- /// If \p BytesRemoved is non-null, report the change in code size from the
- /// removed instructions.
- virtual unsigned removeBranch(MachineBasicBlock &MBB,
- int *BytesRemoved = nullptr) const {
- llvm_unreachable("Target didn't implement TargetInstrInfo::removeBranch!");
+ virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const {
+ llvm_unreachable("Target didn't implement TargetInstrInfo::RemoveBranch!");
}
- /// Insert branch code into the end of the specified MachineBasicBlock. The
- /// operands to this method are the same as those returned by AnalyzeBranch.
- /// This is only invoked in cases where AnalyzeBranch returns success. It
- /// returns the number of instructions inserted. If \p BytesAdded is non-null,
- /// report the change in code size from the added instructions.
+ /// Insert branch code into the end of the specified MachineBasicBlock.
+ /// The operands to this method are the same as those
+ /// returned by AnalyzeBranch. This is only invoked in cases where
+ /// AnalyzeBranch returns success. It returns the number of instructions
+ /// inserted.
///
/// It is also invoked by tail merging to add unconditional branches in
/// cases where AnalyzeBranch doesn't apply because there was no original
/// branch to analyze. At least this much must be implemented, else tail
/// merging needs to be disabled.
- ///
- /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
- /// before calling this function.
- virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond,
- const DebugLoc &DL,
- int *BytesAdded = nullptr) const {
- llvm_unreachable("Target didn't implement TargetInstrInfo::insertBranch!");
- }
-
- unsigned insertUnconditionalBranch(MachineBasicBlock &MBB,
- MachineBasicBlock *DestBB,
- const DebugLoc &DL,
- int *BytesAdded = nullptr) const {
- return insertBranch(MBB, DestBB, nullptr,
- ArrayRef<MachineOperand>(), DL, BytesAdded);
- }
-
- /// Analyze the loop code, return true if it cannot be understoo. Upon
- /// success, this function returns false and returns information about the
- /// induction variable and compare instruction used at the end.
- virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst,
- MachineInstr *&CmpInst) const {
- return true;
- }
-
- /// Generate code to reduce the loop iteration by one and check if the loop is
- /// finished. Return the value/register of the the new loop count. We need
- /// this function when peeling off one or more iterations of a loop. This
- /// function assumes the nth iteration is peeled first.
- virtual unsigned reduceLoopCount(MachineBasicBlock &MBB,
- MachineInstr *IndVar, MachineInstr &Cmp,
- SmallVectorImpl<MachineOperand> &Cond,
- SmallVectorImpl<MachineInstr *> &PrevInsts,
- unsigned Iter, unsigned MaxIter) const {
- llvm_unreachable("Target didn't implement ReduceLoopCount");
+ DebugLoc DL) const {
+ llvm_unreachable("Target didn't implement TargetInstrInfo::InsertBranch!");
}
/// Delete the instruction OldInst and everything after it, replacing it with
@@ -654,6 +534,40 @@ public:
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
MachineBasicBlock *NewDest) const;
+ /// Get an instruction that performs an unconditional branch to the given
+ /// symbol.
+ virtual void
+ getUnconditionalBranch(MCInst &MI,
+ const MCSymbolRefExpr *BranchTarget) const {
+ llvm_unreachable("Target didn't implement "
+ "TargetInstrInfo::getUnconditionalBranch!");
+ }
+
+ /// Get a machine trap instruction.
+ virtual void getTrap(MCInst &MI) const {
+ llvm_unreachable("Target didn't implement TargetInstrInfo::getTrap!");
+ }
+
+ /// Get a number of bytes that suffices to hold
+ /// either the instruction returned by getUnconditionalBranch or the
+ /// instruction returned by getTrap. This only makes sense because
+ /// getUnconditionalBranch returns a single, specific instruction. This
+ /// information is needed by the jumptable construction code, since it must
+ /// decide how many bytes to use for a jumptable entry so it can generate the
+ /// right mask.
+ ///
+ /// Note that if the jumptable instruction requires alignment, then that
+ /// alignment should be factored into this required bound so that the
+ /// resulting bound gives the right alignment for the instruction.
+ virtual unsigned getJumpInstrTableEntryBound() const {
+ // This method gets called by LLVMTargetMachine always, so it can't fail
+ // just because there happens to be no implementation for this target.
+ // Any code that tries to use a jumptable annotation without defining
+ // getUnconditionalBranch on the appropriate Target will fail anyway, and
+ // the value returned here won't matter in that case.
+ return 0;
+ }
+
/// Return true if it's legal to split the given basic
/// block at the specified instruction (i.e. instruction would be the start
/// of a new basic block).
@@ -758,7 +672,7 @@ public:
/// @param TrueReg Virtual register to copy when Cond is true.
/// @param FalseReg Virtual register to copy when Cons is false.
virtual void insertSelect(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, const DebugLoc &DL,
+ MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DstReg, ArrayRef<MachineOperand> Cond,
unsigned TrueReg, unsigned FalseReg) const {
llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!");
@@ -782,11 +696,11 @@ public:
/// @param FalseOp Operand number of the value selected when Cond is false.
/// @param Optimizable Returned as true if MI is optimizable.
/// @returns False on success.
- virtual bool analyzeSelect(const MachineInstr &MI,
+ virtual bool analyzeSelect(const MachineInstr *MI,
SmallVectorImpl<MachineOperand> &Cond,
unsigned &TrueOp, unsigned &FalseOp,
bool &Optimizable) const {
- assert(MI.getDesc().isSelect() && "MI must be a select instruction");
+ assert(MI && MI->getDesc().isSelect() && "MI must be a select instruction");
return true;
}
@@ -805,7 +719,7 @@ public:
/// MI. Has to be updated with any newly created MI or deleted ones.
/// @param PreferFalse Try to optimize FalseOp instead of TrueOp.
/// @returns Optimized instruction or NULL.
- virtual MachineInstr *optimizeSelect(MachineInstr &MI,
+ virtual MachineInstr *optimizeSelect(MachineInstr *MI,
SmallPtrSetImpl<MachineInstr *> &NewMIs,
bool PreferFalse = false) const {
// This function must be implemented if Optimizable is ever set.
@@ -821,7 +735,7 @@ public:
/// careful implementation when multiple copy instructions are required for
/// large registers. See for example the ARM target.
virtual void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, const DebugLoc &DL,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!");
@@ -858,21 +772,9 @@ public:
/// into real instructions. The target can edit MI in place, or it can insert
/// new instructions and erase MI. The function should return true if
/// anything was changed.
- virtual bool expandPostRAPseudo(MachineInstr &MI) const { return false; }
-
- /// Check whether the target can fold a load that feeds a subreg operand
- /// (or a subreg operand that feeds a store).
- /// For example, X86 may want to return true if it can fold
- /// movl (%esp), %eax
- /// subb, %al, ...
- /// Into:
- /// subb (%esp), ...
- ///
- /// Ideally, we'd like the target implementation of foldMemoryOperand() to
- /// reject subregs - but since this behavior used to be enforced in the
- /// target-independent code, moving this responsibility to the targets
- /// has the potential of causing nasty silent breakage in out-of-tree targets.
- virtual bool isSubregFoldable() const { return false; }
+ virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
+ return false;
+ }
/// Attempt to fold a load or store of the specified stack
/// slot into the specified machine instruction for the specified operand(s).
@@ -880,15 +782,14 @@ public:
/// operand folded, otherwise NULL is returned.
/// The new instruction is inserted before MI, and the client is responsible
/// for removing the old instruction.
- MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
- int FrameIndex,
- LiveIntervals *LIS = nullptr) const;
+ MachineInstr *foldMemoryOperand(MachineBasicBlock::iterator MI,
+ ArrayRef<unsigned> Ops, int FrameIndex) const;
/// Same as the previous version except it allows folding of any load and
/// store from / to any address, not just from a specific stack slot.
- MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
- MachineInstr &LoadMI,
- LiveIntervals *LIS = nullptr) const;
+ MachineInstr *foldMemoryOperand(MachineBasicBlock::iterator MI,
+ ArrayRef<unsigned> Ops,
+ MachineInstr *LoadMI) const;
/// Return true when there is potentially a faster code sequence
/// for an instruction chain ending in \p Root. All potential patterns are
@@ -901,11 +802,6 @@ public:
MachineInstr &Root,
SmallVectorImpl<MachineCombinerPattern> &Patterns) const;
- /// Return true when a code sequence can improve throughput. It
- /// should be called only for instructions in loops.
- /// \param Pattern - combiner pattern
- virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const;
-
/// Return true if the input \P Inst is part of a chain of dependent ops
/// that are suitable for reassociation, otherwise return false.
/// If the instruction's operands must be commuted to have a previous
@@ -954,7 +850,8 @@ public:
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2,
MachineInstr &NewMI1,
MachineInstr &NewMI2) const {
- }
+ return;
+ };
/// Return true when a target supports MachineCombiner.
virtual bool useMachineCombiner() const { return false; }
@@ -965,11 +862,9 @@ protected:
/// take care of adding a MachineMemOperand to the newly created instruction.
/// The instruction and any auxiliary instructions necessary will be inserted
/// at InsertPt.
- virtual MachineInstr *
- foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
- ArrayRef<unsigned> Ops,
- MachineBasicBlock::iterator InsertPt, int FrameIndex,
- LiveIntervals *LIS = nullptr) const {
+ virtual MachineInstr *foldMemoryOperandImpl(
+ MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
return nullptr;
}
@@ -979,9 +874,8 @@ protected:
/// The instruction and any auxiliary instructions necessary will be inserted
/// at InsertPt.
virtual MachineInstr *foldMemoryOperandImpl(
- MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
- MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
- LiveIntervals *LIS = nullptr) const {
+ MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
return nullptr;
}
@@ -1032,10 +926,9 @@ public:
/// unfoldMemoryOperand - Separate a single instruction which folded a load or
/// a store or a load and a store into two or more instruction. If this is
/// possible, returns true as well as the new instructions by reference.
- virtual bool
- unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, unsigned Reg,
- bool UnfoldLoad, bool UnfoldStore,
- SmallVectorImpl<MachineInstr *> &NewMIs) const {
+ virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
+ unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
+ SmallVectorImpl<MachineInstr*> &NewMIs) const{
return false;
}
@@ -1081,42 +974,31 @@ public:
/// Get the base register and byte offset of an instruction that reads/writes
/// memory.
- virtual bool getMemOpBaseRegImmOfs(MachineInstr &MemOp, unsigned &BaseReg,
- int64_t &Offset,
+ virtual bool getMemOpBaseRegImmOfs(MachineInstr *MemOp, unsigned &BaseReg,
+ unsigned &Offset,
const TargetRegisterInfo *TRI) const {
return false;
}
- /// Return true if the instruction contains a base register and offset. If
- /// true, the function also sets the operand position in the instruction
- /// for the base register and offset.
- virtual bool getBaseAndOffsetPosition(const MachineInstr &MI,
- unsigned &BasePos,
- unsigned &OffsetPos) const {
- return false;
- }
+ virtual bool enableClusterLoads() const { return false; }
- /// If the instruction is an increment of a constant value, return the amount.
- virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const {
+ virtual bool shouldClusterLoads(MachineInstr *FirstLdSt,
+ MachineInstr *SecondLdSt,
+ unsigned NumLoads) const {
return false;
}
- /// Returns true if the two given memory operations should be scheduled
- /// adjacent. Note that you have to add:
- /// DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
- /// or
- /// DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
- /// to TargetPassConfig::createMachineScheduler() to have an effect.
- virtual bool shouldClusterMemOps(MachineInstr &FirstLdSt,
- MachineInstr &SecondLdSt,
- unsigned NumLoads) const {
- llvm_unreachable("target did not implement shouldClusterMemOps()");
+ /// Can this target fuse the given instructions if they are scheduled
+ /// adjacent.
+ virtual bool shouldScheduleAdjacent(MachineInstr* First,
+ MachineInstr *Second) const {
+ return false;
}
/// Reverses the branch condition of the specified condition list,
/// returning false on success and true if it cannot be reversed.
virtual
- bool reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
+ bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
return true;
}
@@ -1124,46 +1006,25 @@ public:
virtual void insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const;
+
/// Return the noop instruction to use for a noop.
- virtual void getNoop(MCInst &NopInst) const;
+ virtual void getNoopForMachoTarget(MCInst &NopInst) const;
- /// Return true for post-incremented instructions.
- virtual bool isPostIncrement(const MachineInstr &MI) const {
- return false;
- }
/// Returns true if the instruction is already predicated.
- virtual bool isPredicated(const MachineInstr &MI) const {
+ virtual bool isPredicated(const MachineInstr *MI) const {
return false;
}
/// Returns true if the instruction is a
/// terminator instruction that has not been predicated.
- virtual bool isUnpredicatedTerminator(const MachineInstr &MI) const;
-
- /// Returns true if MI is an unconditional tail call.
- virtual bool isUnconditionalTailCall(const MachineInstr &MI) const {
- return false;
- }
-
- /// Returns true if the tail call can be made conditional on BranchCond.
- virtual bool
- canMakeTailCallConditional(SmallVectorImpl<MachineOperand> &Cond,
- const MachineInstr &TailCall) const {
- return false;
- }
-
- /// Replace the conditional branch in MBB with a conditional tail call.
- virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB,
- SmallVectorImpl<MachineOperand> &Cond,
- const MachineInstr &TailCall) const {
- llvm_unreachable("Target didn't implement replaceBranchWithTailCall!");
- }
+ virtual bool isUnpredicatedTerminator(const MachineInstr *MI) const;
/// Convert the instruction into a predicated instruction.
/// It returns true if the operation was successful.
- virtual bool PredicateInstruction(MachineInstr &MI,
- ArrayRef<MachineOperand> Pred) const;
+ virtual
+ bool PredicateInstruction(MachineInstr *MI,
+ ArrayRef<MachineOperand> Pred) const;
/// Returns true if the first specified predicate
/// subsumes the second, e.g. GE subsumes GT.
@@ -1176,7 +1037,7 @@ public:
/// If the specified instruction defines any predicate
/// or condition code register(s) used for predication, returns true as well
/// as the definition predicate(s) by reference.
- virtual bool DefinesPredicate(MachineInstr &MI,
+ virtual bool DefinesPredicate(MachineInstr *MI,
std::vector<MachineOperand> &Pred) const {
return false;
}
@@ -1184,8 +1045,8 @@ public:
/// Return true if the specified instruction can be predicated.
/// By default, this returns true for every instruction with a
/// PredicateOperand.
- virtual bool isPredicable(const MachineInstr &MI) const {
- return MI.getDesc().isPredicable();
+ virtual bool isPredicable(MachineInstr *MI) const {
+ return MI->getDesc().isPredicable();
}
/// Return true if it's safe to move a machine
@@ -1196,7 +1057,7 @@ public:
/// Test if the given instruction should be considered a scheduling boundary.
/// This primarily includes labels and terminators.
- virtual bool isSchedulingBoundary(const MachineInstr &MI,
+ virtual bool isSchedulingBoundary(const MachineInstr *MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const;
@@ -1223,13 +1084,6 @@ public:
CreateTargetPostRAHazardRecognizer(const InstrItineraryData*,
const ScheduleDAG *DAG) const;
- /// Allocate and return a hazard recognizer to use for by non-scheduling
- /// passes.
- virtual ScheduleHazardRecognizer*
- CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const {
- return nullptr;
- }
-
/// Provide a global flag for disabling the PreRA hazard recognizer that
/// targets may choose to honor.
bool usePreRAHazardRecognizer() const;
@@ -1238,20 +1092,22 @@ public:
/// in SrcReg and SrcReg2 if having two register operands, and the value it
/// compares against in CmpValue. Return true if the comparison instruction
/// can be analyzed.
- virtual bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
- unsigned &SrcReg2, int &Mask, int &Value) const {
+ virtual bool analyzeCompare(const MachineInstr *MI,
+ unsigned &SrcReg, unsigned &SrcReg2,
+ int &Mask, int &Value) const {
return false;
}
/// See if the comparison instruction can be converted
/// into something more efficient. E.g., on ARM most instructions can set the
/// flags register, obviating the need for a separate CMP.
- virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
- unsigned SrcReg2, int Mask, int Value,
+ virtual bool optimizeCompareInstr(MachineInstr *CmpInstr,
+ unsigned SrcReg, unsigned SrcReg2,
+ int Mask, int Value,
const MachineRegisterInfo *MRI) const {
return false;
}
- virtual bool optimizeCondBranch(MachineInstr &MI) const { return false; }
+ virtual bool optimizeCondBranch(MachineInstr *MI) const { return false; }
/// Try to remove the load by folding it to a register operand at the use.
/// We fold the load instructions if and only if the
@@ -1260,10 +1116,10 @@ public:
/// defined by the load we are trying to fold. DefMI returns the machine
/// instruction that defines FoldAsLoadDefReg, and the function returns
/// the machine instruction generated due to folding.
- virtual MachineInstr *optimizeLoadInstr(MachineInstr &MI,
- const MachineRegisterInfo *MRI,
- unsigned &FoldAsLoadDefReg,
- MachineInstr *&DefMI) const {
+ virtual MachineInstr* optimizeLoadInstr(MachineInstr *MI,
+ const MachineRegisterInfo *MRI,
+ unsigned &FoldAsLoadDefReg,
+ MachineInstr *&DefMI) const {
return nullptr;
}
@@ -1273,7 +1129,7 @@ public:
/// then the caller may assume that DefMI has been erased from its parent
/// block. The caller may assume that it will not be erased by this
/// function otherwise.
- virtual bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
+ virtual bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
unsigned Reg, MachineRegisterInfo *MRI) const {
return false;
}
@@ -1283,7 +1139,7 @@ public:
/// IssueWidth is the number of microops that can be dispatched each
/// cycle. An instruction with zero microops takes no dispatch resources.
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
- const MachineInstr &MI) const;
+ const MachineInstr *MI) const;
/// Return true for pseudo instructions that don't consume any
/// machine resources in their current form. These are common cases that the
@@ -1306,28 +1162,35 @@ public:
/// by a target. Use computeOperandLatency to get the best estimate of
/// latency.
virtual int getOperandLatency(const InstrItineraryData *ItinData,
- const MachineInstr &DefMI, unsigned DefIdx,
- const MachineInstr &UseMI,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI,
unsigned UseIdx) const;
+ /// Compute and return the latency of the given data
+ /// dependent def and use when the operand indices are already known.
+ unsigned computeOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI, unsigned UseIdx)
+ const;
+
/// Compute the instruction latency of a given instruction.
/// If the instruction has higher cost when predicated, it's returned via
/// PredCost.
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
- const MachineInstr &MI,
+ const MachineInstr *MI,
unsigned *PredCost = nullptr) const;
- virtual unsigned getPredicationCost(const MachineInstr &MI) const;
+ virtual unsigned getPredicationCost(const MachineInstr *MI) const;
virtual int getInstrLatency(const InstrItineraryData *ItinData,
SDNode *Node) const;
- /// Return the default expected latency for a def based on its opcode.
+ /// Return the default expected latency for a def based on it's opcode.
unsigned defaultDefLatency(const MCSchedModel &SchedModel,
- const MachineInstr &DefMI) const;
+ const MachineInstr *DefMI) const;
int computeDefOperandLatency(const InstrItineraryData *ItinData,
- const MachineInstr &DefMI) const;
+ const MachineInstr *DefMI) const;
/// Return true if this opcode has high latency to its result.
virtual bool isHighLatencyDef(int opc) const { return false; }
@@ -1337,23 +1200,23 @@ public:
/// it 'high'. This is used by optimization passes such as machine LICM to
/// determine whether it makes sense to hoist an instruction out even in a
/// high register pressure situation.
- virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
- const MachineRegisterInfo *MRI,
- const MachineInstr &DefMI, unsigned DefIdx,
- const MachineInstr &UseMI,
- unsigned UseIdx) const {
+ virtual
+ bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
+ const MachineRegisterInfo *MRI,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI, unsigned UseIdx) const {
return false;
}
/// Compute operand latency of a def of 'Reg'. Return true
/// if the target considered it 'low'.
- virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel,
- const MachineInstr &DefMI,
- unsigned DefIdx) const;
+ virtual
+ bool hasLowDefLatency(const TargetSchedModel &SchedModel,
+ const MachineInstr *DefMI, unsigned DefIdx) const;
/// Perform target-specific instruction verification.
- virtual bool verifyInstruction(const MachineInstr &MI,
- StringRef &ErrInfo) const {
+ virtual
+ bool verifyInstruction(const MachineInstr *MI, StringRef &ErrInfo) const {
return true;
}
@@ -1377,7 +1240,7 @@ public:
/// execution domain.
///
virtual std::pair<uint16_t, uint16_t>
- getExecutionDomain(const MachineInstr &MI) const {
+ getExecutionDomain(const MachineInstr *MI) const {
return std::make_pair(0, 0);
}
@@ -1385,7 +1248,8 @@ public:
///
/// The bit (1 << Domain) must be set in the mask returned from
/// getExecutionDomain(MI).
- virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const {}
+ virtual void setExecutionDomain(MachineInstr *MI, unsigned Domain) const {}
+
/// Returns the preferred minimum clearance
/// before an instruction with an unwanted partial register update.
@@ -1427,7 +1291,7 @@ public:
/// allows the target to insert a dependency breaking instruction.
///
virtual unsigned
- getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum,
+ getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
const TargetRegisterInfo *TRI) const {
// The default implementation returns 0 for no partial register dependency.
return 0;
@@ -1447,7 +1311,7 @@ public:
/// This hook works similarly to getPartialRegUpdateClearance, except that it
/// does not take an operand index. Instead sets \p OpNum to the index of the
/// unused register.
- virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum,
+ virtual unsigned getUndefRegClearance(const MachineInstr *MI, unsigned &OpNum,
const TargetRegisterInfo *TRI) const {
// The default implementation returns 0 for no undef register dependency.
return 0;
@@ -1470,8 +1334,9 @@ public:
/// An <imp-kill> operand should be added to MI if an instruction was
/// inserted. This ties the instructions together in the post-ra scheduler.
///
- virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum,
- const TargetRegisterInfo *TRI) const {}
+ virtual void
+ breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
+ const TargetRegisterInfo *TRI) const {}
/// Create machine specific model for scheduling.
virtual DFAPacketizer *
@@ -1479,23 +1344,16 @@ public:
return nullptr;
}
- /// Sometimes, it is possible for the target
- /// to tell, even without aliasing information, that two MIs access different
- /// memory addresses. This function returns true if two MIs access different
- /// memory addresses and false otherwise.
- ///
- /// Assumes any physical registers used to compute addresses have the same
- /// value for both instructions. (This is the most useful assumption for
- /// post-RA scheduling.)
- ///
- /// See also MachineInstr::mayAlias, which is implemented on top of this
- /// function.
+ // Sometimes, it is possible for the target
+ // to tell, even without aliasing information, that two MIs access different
+ // memory addresses. This function returns true if two MIs access different
+ // memory addresses and false otherwise.
virtual bool
- areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
+ areMemAccessesTriviallyDisjoint(MachineInstr *MIa, MachineInstr *MIb,
AliasAnalysis *AA = nullptr) const {
- assert((MIa.mayLoad() || MIa.mayStore()) &&
+ assert(MIa && (MIa->mayLoad() || MIa->mayStore()) &&
"MIa must load from or modify a memory location");
- assert((MIb.mayLoad() || MIb.mayStore()) &&
+ assert(MIb && (MIb->mayLoad() || MIb->mayStore()) &&
"MIb must load from or modify a memory location");
return false;
}
@@ -1545,110 +1403,24 @@ public:
return None;
}
- /// Return an array that contains the MMO target flag values and their
- /// names.
- ///
- /// MIR Serialization is able to serialize only the MMO target flags that are
- /// defined by this method.
- virtual ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
- getSerializableMachineMemOperandTargetFlags() const {
- return None;
- }
-
- /// Determines whether \p Inst is a tail call instruction. Override this
- /// method on targets that do not properly set MCID::Return and MCID::Call on
- /// tail call instructions."
- virtual bool isTailCall(const MachineInstr &Inst) const {
- return Inst.isReturn() && Inst.isCall();
- }
-
- /// True if the instruction is bound to the top of its basic block and no
- /// other instructions shall be inserted before it. This can be implemented
- /// to prevent register allocator to insert spills before such instructions.
- virtual bool isBasicBlockPrologue(const MachineInstr &MI) const {
- return false;
- }
-
- /// \brief Return how many instructions would be saved by outlining a
- /// sequence containing \p SequenceSize instructions that appears
- /// \p Occurrences times in a module.
- virtual unsigned getOutliningBenefit(size_t SequenceSize, size_t Occurrences,
- bool CanBeTailCall) const {
- llvm_unreachable(
- "Target didn't implement TargetInstrInfo::getOutliningBenefit!");
- }
-
- /// Represents how an instruction should be mapped by the outliner.
- /// \p Legal instructions are those which are safe to outline.
- /// \p Illegal instructions are those which cannot be outlined.
- /// \p Invisible instructions are instructions which can be outlined, but
- /// shouldn't actually impact the outlining result.
- enum MachineOutlinerInstrType {Legal, Illegal, Invisible};
-
- /// Returns how or if \p MI should be outlined.
- virtual MachineOutlinerInstrType getOutliningType(MachineInstr &MI) const {
- llvm_unreachable(
- "Target didn't implement TargetInstrInfo::getOutliningType!");
- }
-
- /// Insert a custom epilogue for outlined functions.
- /// This may be empty, in which case no epilogue or return statement will be
- /// emitted.
- virtual void insertOutlinerEpilogue(MachineBasicBlock &MBB,
- MachineFunction &MF,
- bool IsTailCall) const {
- llvm_unreachable(
- "Target didn't implement TargetInstrInfo::insertOutlinerEpilogue!");
- }
-
- /// Insert a call to an outlined function into the program.
- /// Returns an iterator to the spot where we inserted the call. This must be
- /// implemented by the target.
- virtual MachineBasicBlock::iterator
- insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &It, MachineFunction &MF,
- bool IsTailCall) const {
- llvm_unreachable(
- "Target didn't implement TargetInstrInfo::insertOutlinedCall!");
- }
-
- /// Insert a custom prologue for outlined functions.
- /// This may be empty, in which case no prologue will be emitted.
- virtual void insertOutlinerPrologue(MachineBasicBlock &MBB,
- MachineFunction &MF,
- bool IsTailCall) const {
- llvm_unreachable(
- "Target didn't implement TargetInstrInfo::insertOutlinerPrologue!");
- }
-
- /// Return true if the function can safely be outlined from.
- /// By default, this means that the function has no red zone.
- virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF) const {
- llvm_unreachable("Target didn't implement "
- "TargetInstrInfo::isFunctionSafeToOutlineFrom!");
- }
-
private:
unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode;
unsigned CatchRetOpcode;
- unsigned ReturnOpcode;
};
/// \brief Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair.
template<>
struct DenseMapInfo<TargetInstrInfo::RegSubRegPair> {
- using RegInfo = DenseMapInfo<unsigned>;
+ typedef DenseMapInfo<unsigned> RegInfo;
static inline TargetInstrInfo::RegSubRegPair getEmptyKey() {
return TargetInstrInfo::RegSubRegPair(RegInfo::getEmptyKey(),
RegInfo::getEmptyKey());
}
-
static inline TargetInstrInfo::RegSubRegPair getTombstoneKey() {
return TargetInstrInfo::RegSubRegPair(RegInfo::getTombstoneKey(),
RegInfo::getTombstoneKey());
}
-
/// \brief Reuse getHashValue implementation from
/// std::pair<unsigned, unsigned>.
static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val) {
@@ -1656,7 +1428,6 @@ struct DenseMapInfo<TargetInstrInfo::RegSubRegPair> {
std::make_pair(Val.Reg, Val.SubReg);
return DenseMapInfo<std::pair<unsigned, unsigned>>::getHashValue(PairVal);
}
-
static bool isEqual(const TargetInstrInfo::RegSubRegPair &LHS,
const TargetInstrInfo::RegSubRegPair &RHS) {
return RegInfo::isEqual(LHS.Reg, RHS.Reg) &&
@@ -1664,6 +1435,6 @@ struct DenseMapInfo<TargetInstrInfo::RegSubRegPair> {
}
};
-} // end namespace llvm
+} // End llvm namespace
-#endif // LLVM_TARGET_TARGETINSTRINFO_H
+#endif
diff --git a/gnu/llvm/include/llvm/Target/TargetLowering.h b/gnu/llvm/include/llvm/Target/TargetLowering.h
index 23711d636c9..304da4f8751 100644
--- a/gnu/llvm/include/llvm/Target/TargetLowering.h
+++ b/gnu/llvm/include/llvm/Target/TargetLowering.h
@@ -1,4 +1,4 @@
-//===- llvm/Target/TargetLowering.h - Target Lowering Info ------*- C++ -*-===//
+//===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -23,88 +23,63 @@
#ifndef LLVM_TARGET_TARGETLOWERING_H
#define LLVM_TARGET_TARGETLOWERING_H
-#include "llvm/ADT/APInt.h"
-#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/DAGCombine.h"
-#include "llvm/CodeGen/ISDOpcodes.h"
-#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
-#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
-#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/DerivedTypes.h"
-#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
-#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
-#include "llvm/IR/Type.h"
#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/Support/AtomicOrdering.h"
-#include "llvm/Support/Casting.h"
-#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetCallingConv.h"
#include "llvm/Target/TargetMachine.h"
-#include <algorithm>
-#include <cassert>
#include <climits>
-#include <cstdint>
-#include <iterator>
#include <map>
-#include <string>
-#include <utility>
#include <vector>
namespace llvm {
-
-class BranchProbability;
-class CCState;
-class CCValAssign;
-class Constant;
-class FastISel;
-class FunctionLoweringInfo;
-class GlobalValue;
-class IntrinsicInst;
-struct KnownBits;
-class LLVMContext;
-class MachineBasicBlock;
-class MachineFunction;
-class MachineInstr;
-class MachineJumpTableInfo;
-class MachineLoop;
-class MachineRegisterInfo;
-class MCContext;
-class MCExpr;
-class Module;
-class TargetRegisterClass;
-class TargetLibraryInfo;
-class TargetRegisterInfo;
-class Value;
-
-namespace Sched {
-
- enum Preference {
- None, // No preference
- Source, // Follow source order.
- RegPressure, // Scheduling for lowest register pressure.
- Hybrid, // Scheduling for both latency and register pressure.
- ILP, // Scheduling for ILP in low register pressure mode.
- VLIW // Scheduling for VLIW targets.
- };
-
-} // end namespace Sched
+ class CallInst;
+ class CCState;
+ class FastISel;
+ class FunctionLoweringInfo;
+ class ImmutableCallSite;
+ class IntrinsicInst;
+ class MachineBasicBlock;
+ class MachineFunction;
+ class MachineInstr;
+ class MachineJumpTableInfo;
+ class MachineLoop;
+ class Mangler;
+ class MCContext;
+ class MCExpr;
+ class MCSymbol;
+ template<typename T> class SmallVectorImpl;
+ class DataLayout;
+ class TargetRegisterClass;
+ class TargetLibraryInfo;
+ class TargetLoweringObjectFile;
+ class Value;
+
+ namespace Sched {
+ enum Preference {
+ None, // No preference
+ Source, // Follow source order.
+ RegPressure, // Scheduling for lowest register pressure.
+ Hybrid, // Scheduling for both latency and register pressure.
+ ILP, // Scheduling for ILP in low register pressure mode.
+ VLIW // Scheduling for VLIW targets.
+ };
+ }
/// This base class for TargetLowering contains the SelectionDAG-independent
/// parts that can be used from the rest of CodeGen.
class TargetLoweringBase {
+ TargetLoweringBase(const TargetLoweringBase&) = delete;
+ void operator=(const TargetLoweringBase&) = delete;
+
public:
/// This enum indicates whether operations are valid for a target, and if not,
/// what action should be used to make them valid.
@@ -133,7 +108,7 @@ public:
/// LegalizeKind holds the legalization kind that needs to happen to EVT
/// in order to type-legalize it.
- using LegalizeKind = std::pair<LegalizeTypeAction, EVT>;
+ typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
/// Enum that describes how the target represents true/false values.
enum BooleanContent {
@@ -164,42 +139,6 @@ public:
CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
};
- /// Enum that specifies when a multiplication should be expanded.
- enum class MulExpansionKind {
- Always, // Always expand the instruction.
- OnlyLegalOrCustom, // Only expand when the resulting instructions are legal
- // or custom.
- };
-
- class ArgListEntry {
- public:
- Value *Val = nullptr;
- SDValue Node = SDValue();
- Type *Ty = nullptr;
- bool IsSExt : 1;
- bool IsZExt : 1;
- bool IsInReg : 1;
- bool IsSRet : 1;
- bool IsNest : 1;
- bool IsByVal : 1;
- bool IsInAlloca : 1;
- bool IsReturned : 1;
- bool IsSwiftSelf : 1;
- bool IsSwiftError : 1;
- uint16_t Alignment = 0;
-
- ArgListEntry()
- : IsSExt(false), IsZExt(false), IsInReg(false), IsSRet(false),
- IsNest(false), IsByVal(false), IsInAlloca(false), IsReturned(false),
- IsSwiftSelf(false), IsSwiftError(false) {}
-
- void setAttributes(ImmutableCallSite *CS, unsigned ArgIdx);
- };
- using ArgListTy = std::vector<ArgListEntry>;
-
- virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC,
- ArgListTy &Args) const {};
-
static ISD::NodeType getExtendForContent(BooleanContent Content) {
switch (Content) {
case UndefinedBooleanContent:
@@ -217,9 +156,7 @@ public:
/// NOTE: The TargetMachine owns TLOF.
explicit TargetLoweringBase(const TargetMachine &TM);
- TargetLoweringBase(const TargetLoweringBase &) = delete;
- TargetLoweringBase &operator=(const TargetLoweringBase &) = delete;
- virtual ~TargetLoweringBase() = default;
+ virtual ~TargetLoweringBase() {}
protected:
/// \brief Initialize all of the actions to default values.
@@ -237,18 +174,6 @@ public:
return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
}
- /// Return the type for frame index, which is determined by
- /// the alloca address space specified through the data layout.
- MVT getFrameIndexTy(const DataLayout &DL) const {
- return getPointerTy(DL, DL.getAllocaAddrSpace());
- }
-
- /// Return the type for operands of fence.
- /// TODO: Let fence operands be of i32 type and remove this.
- virtual MVT getFenceOperandTy(const DataLayout &DL) const {
- return getPointerTy(DL);
- }
-
/// EVT is not used in-tree, but is used by out-of-tree target.
/// A documentation for this function would be nice...
virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
@@ -262,6 +187,9 @@ public:
return getPointerTy(DL);
}
+ /// Return true if the select operation is expensive for this target.
+ bool isSelectExpensive() const { return SelectIsExpensive; }
+
virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
return true;
}
@@ -303,49 +231,14 @@ public:
/// several shifts, adds, and multiplies for this target.
/// The definition of "cheaper" may depend on whether we're optimizing
/// for speed or for size.
- virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; }
-
- /// Return true if the target can handle a standalone remainder operation.
- virtual bool hasStandaloneRem(EVT VT) const {
- return true;
- }
-
- /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
- virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const {
- // Default behavior is to replace SQRT(X) with X*RSQRT(X).
+ virtual bool isIntDivCheap(EVT VT, AttributeSet Attr) const {
return false;
}
- /// Reciprocal estimate status values used by the functions below.
- enum ReciprocalEstimate : int {
- Unspecified = -1,
- Disabled = 0,
- Enabled = 1
- };
-
- /// Return a ReciprocalEstimate enum value for a square root of the given type
- /// based on the function's attributes. If the operation is not overridden by
- /// the function's attributes, "Unspecified" is returned and target defaults
- /// are expected to be used for instruction selection.
- int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const;
-
- /// Return a ReciprocalEstimate enum value for a division of the given type
- /// based on the function's attributes. If the operation is not overridden by
- /// the function's attributes, "Unspecified" is returned and target defaults
- /// are expected to be used for instruction selection.
- int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const;
-
- /// Return the refinement step count for a square root of the given type based
- /// on the function's attributes. If the operation is not overridden by
- /// the function's attributes, "Unspecified" is returned and target defaults
- /// are expected to be used for instruction selection.
- int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;
-
- /// Return the refinement step count for a division of the given type based
- /// on the function's attributes. If the operation is not overridden by
- /// the function's attributes, "Unspecified" is returned and target defaults
- /// are expected to be used for instruction selection.
- int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;
+ /// Return true if sqrt(x) is as cheap or cheaper than 1 / rsqrt(x)
+ bool isFsqrtCheap() const {
+ return FsqrtIsCheap;
+ }
/// Returns true if target has indicated at least one type should be bypassed.
bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
@@ -366,41 +259,18 @@ public:
return PredictableSelectIsExpensive;
}
- /// If a branch or a select condition is skewed in one direction by more than
- /// this factor, it is very likely to be predicted correctly.
- virtual BranchProbability getPredictableBranchThreshold() const;
-
- /// Return true if the following transform is beneficial:
+ /// isLoadBitCastBeneficial() - Return true if the following transform
+ /// is beneficial.
/// fold (conv (load x)) -> (load (conv*)x)
/// On architectures that don't natively support some vector loads
/// efficiently, casting the load to a smaller vector of larger types and
/// loading is more efficient, however, this can be undone by optimizations in
/// dag combiner.
- virtual bool isLoadBitCastBeneficial(EVT LoadVT,
- EVT BitcastVT) const {
- // Don't do if we could do an indexed load on the original type, but not on
- // the new one.
- if (!LoadVT.isSimple() || !BitcastVT.isSimple())
- return true;
-
- MVT LoadMVT = LoadVT.getSimpleVT();
-
- // Don't bother doing this if it's just going to be promoted again later, as
- // doing so might interfere with other combines.
- if (getOperationAction(ISD::LOAD, LoadMVT) == Promote &&
- getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT())
- return false;
-
+ virtual bool isLoadBitCastBeneficial(EVT /* Load */,
+ EVT /* Bitcast */) const {
return true;
}
- /// Return true if the following transform is beneficial:
- /// (store (y (conv x)), y*)) -> (store x, (x*))
- virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT) const {
- // Default to the same logic as loads.
- return isLoadBitCastBeneficial(StoreVT, BitcastVT);
- }
-
/// Return true if it is expected to be cheaper to do a store of a non-zero
/// vector constant with the given size and type for the address space than to
/// store the individual scalar element constants.
@@ -410,16 +280,6 @@ public:
return false;
}
- /// Should we merge stores after Legalization (generally
- /// better quality) or before (simpler)
- virtual bool mergeStoresAfterLegalization() const { return false; }
-
- /// Returns if it's reasonable to merge stores to MemVT size.
- virtual bool canMergeStoresTo(unsigned AS, EVT MemVT,
- const SelectionDAG &DAG) const {
- return true;
- }
-
/// \brief Return true if it is cheap to speculate a call to intrinsic cttz.
virtual bool isCheapToSpeculateCttz() const {
return false;
@@ -430,80 +290,19 @@ public:
return false;
}
- /// \brief Return true if ctlz instruction is fast.
- virtual bool isCtlzFast() const {
- return false;
- }
-
- /// Return true if it is safe to transform an integer-domain bitwise operation
- /// into the equivalent floating-point operation. This should be set to true
- /// if the target has IEEE-754-compliant fabs/fneg operations for the input
- /// type.
- virtual bool hasBitPreservingFPLogic(EVT VT) const {
- return false;
- }
-
- /// \brief Return true if it is cheaper to split the store of a merged int val
- /// from a pair of smaller values into multiple stores.
- virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const {
- return false;
- }
-
/// \brief Return if the target supports combining a
/// chain like:
/// \code
- /// %andResult = and %val1, #mask
+ /// %andResult = and %val1, #imm-with-one-bit-set;
/// %icmpResult = icmp %andResult, 0
+ /// br i1 %icmpResult, label %dest1, label %dest2
/// \endcode
/// into a single machine instruction of a form like:
/// \code
- /// cc = test %register, #mask
+ /// brOnBitSet %register, #bitNumber, dest
/// \endcode
- virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
- return false;
- }
-
- /// Use bitwise logic to make pairs of compares more efficient. For example:
- /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
- /// This should be true when it takes more than one instruction to lower
- /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on
- /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win.
- virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const {
- return false;
- }
-
- /// Return the preferred operand type if the target has a quick way to compare
- /// integer values of the given size. Assume that any legal integer type can
- /// be compared efficiently. Targets may override this to allow illegal wide
- /// types to return a vector type if there is support to compare that type.
- virtual MVT hasFastEqualityCompare(unsigned NumBits) const {
- MVT VT = MVT::getIntegerVT(NumBits);
- return isTypeLegal(VT) ? VT : MVT::INVALID_SIMPLE_VALUE_TYPE;
- }
-
- /// Return true if the target should transform:
- /// (X & Y) == Y ---> (~X & Y) == 0
- /// (X & Y) != Y ---> (~X & Y) != 0
- ///
- /// This may be profitable if the target has a bitwise and-not operation that
- /// sets comparison flags. A target may want to limit the transformation based
- /// on the type of Y or if Y is a constant.
- ///
- /// Note that the transform will not occur if Y is known to be a power-of-2
- /// because a mask and compare of a single bit can be handled by inverting the
- /// predicate, for example:
- /// (X & 8) == 8 ---> (X & 8) != 0
- virtual bool hasAndNotCompare(SDValue Y) const {
- return false;
- }
-
- /// Return true if the target has a bitwise and-not operation:
- /// X = ~A & B
- /// This can be used to simplify select or other instructions.
- virtual bool hasAndNot(SDValue X) const {
- // If the target has the more complex version of this operation, assume that
- // it has this operation too.
- return hasAndNotCompare(X);
+ bool isMaskAndBranchFoldingLegal() const {
+ return MaskAndBranchFoldingIsLegal;
}
/// \brief Return true if the target wants to use the optimization that
@@ -688,29 +487,20 @@ public:
unsigned &NumIntermediates,
MVT &RegisterVT) const;
- /// Certain targets such as MIPS require that some types such as vectors are
- /// always broken down into scalars in some contexts. This occurs even if the
- /// vector type is legal.
- virtual unsigned getVectorTypeBreakdownForCallingConv(
- LLVMContext &Context, EVT VT, EVT &IntermediateVT,
- unsigned &NumIntermediates, MVT &RegisterVT) const {
- return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates,
- RegisterVT);
- }
-
struct IntrinsicInfo {
- unsigned opc = 0; // target opcode
- EVT memVT; // memory VT
- const Value* ptrVal = nullptr; // value representing memory location
- int offset = 0; // offset off of ptrVal
- unsigned size = 0; // the size of the memory location
- // (taken from memVT if zero)
- unsigned align = 1; // alignment
- bool vol = false; // is volatile?
- bool readMem = false; // reads memory?
- bool writeMem = false; // writes memory?
-
- IntrinsicInfo() = default;
+ unsigned opc; // target opcode
+ EVT memVT; // memory VT
+ const Value* ptrVal; // value representing memory location
+ int offset; // offset off of ptrVal
+ unsigned size; // the size of the memory location
+ // (taken from memVT if zero)
+ unsigned align; // alignment
+ bool vol; // is volatile?
+ bool readMem; // reads memory?
+ bool writeMem; // writes memory?
+
+ IntrinsicInfo() : opc(0), ptrVal(nullptr), offset(0), size(0), align(1),
+ vol(false), readMem(false), writeMem(false) {}
};
/// Given an intrinsic, checks if on the target the intrinsic will need to map
@@ -741,7 +531,7 @@ public:
/// Returns true if the operation can trap for the value type.
///
/// VT must be a legal type. By default, we optimistically assume most
- /// operations don't trap except for integer divide and remainder.
+ /// operations don't trap except for divide and remainder.
virtual bool canOpTrap(unsigned Op, EVT VT) const;
/// Similar to isShuffleMaskLegal. This is used by Targets can use this to
@@ -759,7 +549,7 @@ public:
if (VT.isExtended()) return Expand;
// If a target-specific SDNode requires legalization, require the target
// to provide custom legalization for it.
- if (Op >= array_lengthof(OpActions[0])) return Custom;
+ if (Op > array_lengthof(OpActions[0])) return Custom;
return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
}
@@ -781,91 +571,6 @@ public:
getOperationAction(Op, VT) == Promote);
}
- /// Return true if the specified operation is legal on this target or can be
- /// made legal with custom lowering or using promotion. This is used to help
- /// guide high-level lowering decisions.
- bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT) const {
- return (VT == MVT::Other || isTypeLegal(VT)) &&
- (getOperationAction(Op, VT) == Legal ||
- getOperationAction(Op, VT) == Custom ||
- getOperationAction(Op, VT) == Promote);
- }
-
- /// Return true if the specified operation is illegal but has a custom lowering
- /// on that type. This is used to help guide high-level lowering
- /// decisions.
- bool isOperationCustom(unsigned Op, EVT VT) const {
- return (!isTypeLegal(VT) && getOperationAction(Op, VT) == Custom);
- }
-
- /// Return true if lowering to a jump table is allowed.
- bool areJTsAllowed(const Function *Fn) const {
- if (Fn->getFnAttribute("no-jump-tables").getValueAsString() == "true")
- return false;
-
- return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
- isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
- }
-
- /// Check whether the range [Low,High] fits in a machine word.
- bool rangeFitsInWord(const APInt &Low, const APInt &High,
- const DataLayout &DL) const {
- // FIXME: Using the pointer type doesn't seem ideal.
- uint64_t BW = DL.getPointerSizeInBits();
- uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1;
- return Range <= BW;
- }
-
- /// Return true if lowering to a jump table is suitable for a set of case
- /// clusters which may contain \p NumCases cases, \p Range range of values.
- /// FIXME: This function check the maximum table size and density, but the
- /// minimum size is not checked. It would be nice if the the minimum size is
- /// also combined within this function. Currently, the minimum size check is
- /// performed in findJumpTable() in SelectionDAGBuiler and
- /// getEstimatedNumberOfCaseClusters() in BasicTTIImpl.
- bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases,
- uint64_t Range) const {
- const bool OptForSize = SI->getParent()->getParent()->optForSize();
- const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize);
- const unsigned MaxJumpTableSize =
- OptForSize || getMaximumJumpTableSize() == 0
- ? UINT_MAX
- : getMaximumJumpTableSize();
- // Check whether a range of clusters is dense enough for a jump table.
- if (Range <= MaxJumpTableSize &&
- (NumCases * 100 >= Range * MinDensity)) {
- return true;
- }
- return false;
- }
-
- /// Return true if lowering to a bit test is suitable for a set of case
- /// clusters which contains \p NumDests unique destinations, \p Low and
- /// \p High as its lowest and highest case values, and expects \p NumCmps
- /// case value comparisons. Check if the number of destinations, comparison
- /// metric, and range are all suitable.
- bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps,
- const APInt &Low, const APInt &High,
- const DataLayout &DL) const {
- // FIXME: I don't think NumCmps is the correct metric: a single case and a
- // range of cases both require only one branch to lower. Just looking at the
- // number of clusters and destinations should be enough to decide whether to
- // build bit tests.
-
- // To lower a range with bit tests, the range must fit the bitwidth of a
- // machine word.
- if (!rangeFitsInWord(Low, High, DL))
- return false;
-
- // Decide whether it's profitable to lower this range with bit tests. Each
- // destination requires a bit test and branch, and there is an overall range
- // check branch. For a small number of clusters, separate comparisons might
- // be cheaper, and for many destinations, splitting the range might be
- // better.
- return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) ||
- (NumDests == 3 && NumCmps >= 6);
- }
-
/// Return true if the specified operation is illegal on this target or
/// unlikely to be made legal with custom lowering. This is used to help guide
/// high-level lowering decisions.
@@ -889,20 +594,21 @@ public:
unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::LAST_VALUETYPE &&
MemI < MVT::LAST_VALUETYPE && "Table isn't big enough!");
- unsigned Shift = 4 * ExtType;
- return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf);
+ return LoadExtActions[ValI][MemI][ExtType];
}
/// Return true if the specified load with extension is legal on this target.
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
- return getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
+ return ValVT.isSimple() && MemVT.isSimple() &&
+ getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
}
/// Return true if the specified load with extension is legal or custom
/// on this target.
bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
- return getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
- getLoadExtAction(ExtType, ValVT, MemVT) == Custom;
+ return ValVT.isSimple() && MemVT.isSimple() &&
+ (getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
+ getLoadExtAction(ExtType, ValVT, MemVT) == Custom);
}
/// Return how this store with truncation should be treated: either it is
@@ -920,15 +626,8 @@ public:
/// Return true if the specified store with truncation is legal on this
/// target.
bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
- return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal;
- }
-
- /// Return true if the specified store with truncation has solution on this
- /// target.
- bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const {
- return isTypeLegal(ValVT) &&
- (getTruncStoreAction(ValVT, MemVT) == Legal ||
- getTruncStoreAction(ValVT, MemVT) == Custom);
+ return isTypeLegal(ValVT) && MemVT.isSimple() &&
+ getTruncStoreAction(ValVT.getSimpleVT(), MemVT.getSimpleVT()) == Legal;
}
/// Return how the indexed load should be treated: either it is legal, needs
@@ -973,7 +672,7 @@ public:
LegalizeAction
getCondCodeAction(ISD::CondCode CC, MVT VT) const {
assert((unsigned)CC < array_lengthof(CondCodeActions) &&
- ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) &&
+ ((unsigned)VT.SimpleTy >> 4) < array_lengthof(CondCodeActions[0]) &&
"Table isn't big enough!");
// See setCondCodeAction for how this is encoded.
uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
@@ -990,6 +689,7 @@ public:
getCondCodeAction(CC, VT) == Custom;
}
+
/// If the action for this operation is to promote, this method returns the
/// ValueType to promote to.
MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
@@ -1106,33 +806,6 @@ public:
llvm_unreachable("Unsupported extended type!");
}
- /// Certain combinations of ABIs, Targets and features require that types
- /// are legal for some operations and not for other operations.
- /// For MIPS all vector types must be passed through the integer register set.
- virtual MVT getRegisterTypeForCallingConv(MVT VT) const {
- return getRegisterType(VT);
- }
-
- virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context,
- EVT VT) const {
- return getRegisterType(Context, VT);
- }
-
- /// Certain targets require unusual breakdowns of certain types. For MIPS,
- /// this occurs when a vector type is used, as vector are passed through the
- /// integer register set.
- virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context,
- EVT VT) const {
- return getNumRegisters(Context, VT);
- }
-
- /// Certain targets have context senstive alignment requirements, where one
- /// type has the alignment requirement of another type.
- virtual unsigned getABIAlignmentForCallingConv(Type *ArgTy,
- DataLayout DL) const {
- return DL.getABITypeAlignment(ArgTy);
- }
-
/// If true, then instruction selection should seek to shrink the FP constant
/// of the specified type to a smaller type in order to save space and / or
/// reduce runtime.
@@ -1166,11 +839,6 @@ public:
return GatherAllAliasesMaxDepth;
}
- /// Returns the size of the platform's va_list object.
- virtual unsigned getVaListSizeInBits(const DataLayout &DL) const {
- return getPointerTy(DL).getSizeInBits();
- }
-
/// \brief Get maximum # of store operations permitted for llvm.memset
///
/// This function returns the maximum number of store operations permitted
@@ -1191,16 +859,6 @@ public:
return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
}
- /// Get maximum # of load operations permitted for memcmp
- ///
- /// This function returns the maximum number of load operations permitted
- /// to replace a call to memcmp. The value is set by the target at the
- /// performance threshold for such a replacement. If OptSize is true,
- /// return the limit for functions that have OptSize attribute.
- unsigned getMaxExpandSizeMemcmp(bool OptSize) const {
- return OptSize ? MaxLoadsPerMemcmpOptSize : MaxLoadsPerMemcmp;
- }
-
/// \brief Get maximum # of store operations permitted for llvm.memmove
///
/// This function returns the maximum number of store operations permitted
@@ -1274,18 +932,10 @@ public:
return UseUnderscoreLongJmp;
}
- /// Return lower limit for number of blocks in a jump table.
- unsigned getMinimumJumpTableEntries() const;
-
- /// Return lower limit of the density in a jump table.
- unsigned getMinimumJumpTableDensity(bool OptForSize) const;
-
- /// Return upper limit for number of entries in a jump table.
- /// Zero if no limit.
- unsigned getMaximumJumpTableSize() const;
-
- virtual bool isJumpTableRelative() const {
- return TM.isPositionIndependent();
+ /// Return integer threshold on number of blocks to use jump tables rather
+ /// than if sequence.
+ int getMinimumJumpTableEntries() const {
+ return MinimumJumpTableEntries;
}
/// If a physical register, this specifies the register that
@@ -1310,10 +960,6 @@ public:
return 0;
}
- virtual bool needsFixedCatchObjects() const {
- report_fatal_error("Funclet EH is not implemented for this target");
- }
-
/// Returns the target's jmp_buf size in bytes (if never set, the default is
/// 200)
unsigned getJumpBufSize() const {
@@ -1346,52 +992,29 @@ public:
return PrefLoopAlignment;
}
- /// If the target has a standard location for the stack protector guard,
- /// returns the address of that location. Otherwise, returns nullptr.
- /// DEPRECATED: please override useLoadStackGuardNode and customize
- /// LOAD_STACK_GUARD, or customize @llvm.stackguard().
- virtual Value *getIRStackGuard(IRBuilder<> &IRB) const;
-
- /// Inserts necessary declarations for SSP (stack protection) purpose.
- /// Should be used only when getIRStackGuard returns nullptr.
- virtual void insertSSPDeclarations(Module &M) const;
-
- /// Return the variable that's previously inserted by insertSSPDeclarations,
- /// if any, otherwise return nullptr. Should be used only when
- /// getIRStackGuard returns nullptr.
- virtual Value *getSDagStackGuard(const Module &M) const;
-
- /// If the target has a standard stack protection check function that
- /// performs validation and error handling, returns the function. Otherwise,
- /// returns nullptr. Must be previously inserted by insertSSPDeclarations.
- /// Should be used only when getIRStackGuard returns nullptr.
- virtual Value *getSSPStackGuardCheck(const Module &M) const;
+ /// Return whether the DAG builder should automatically insert fences and
+ /// reduce ordering for atomics.
+ bool getInsertFencesForAtomic() const {
+ return InsertFencesForAtomic;
+ }
-protected:
- Value *getDefaultSafeStackPointerLocation(IRBuilder<> &IRB,
- bool UseTLS) const;
+ /// Return true if the target stores stack protector cookies at a fixed offset
+ /// in some non-standard address space, and populates the address space and
+ /// offset as appropriate.
+ virtual bool getStackCookieLocation(unsigned &/*AddressSpace*/,
+ unsigned &/*Offset*/) const {
+ return false;
+ }
-public:
- /// Returns the target-specific address of the unsafe stack pointer.
+ /// If the target has a standard location for the unsafe stack pointer,
+ /// returns the address of that location. Otherwise, returns nullptr.
virtual Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const;
- /// Returns the name of the symbol used to emit stack probes or the empty
- /// string if not applicable.
- virtual StringRef getStackProbeSymbolName(MachineFunction &MF) const {
- return "";
- }
-
/// Returns true if a cast between SrcAS and DestAS is a noop.
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
return false;
}
- /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
- /// are happy to sink it into basic blocks.
- virtual bool isCheapAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
- return isNoopAddrSpaceCast(SrcAS, DestAS);
- }
-
/// Return true if the pointer arguments to CI should be aligned by aligning
/// the object whose address is being passed. If so then MinSize is set to the
/// minimum size the object must be to be aligned and PrefAlign is set to the
@@ -1418,30 +1041,6 @@ public:
/// \name Helpers for atomic expansion.
/// @{
- /// Returns the maximum atomic operation size (in bits) supported by
- /// the backend. Atomic operations greater than this size (as well
- /// as ones that are not naturally aligned), will be expanded by
- /// AtomicExpandPass into an __atomic_* library call.
- unsigned getMaxAtomicSizeInBitsSupported() const {
- return MaxAtomicSizeInBitsSupported;
- }
-
- /// Returns the size of the smallest cmpxchg or ll/sc instruction
- /// the backend supports. Any smaller operations are widened in
- /// AtomicExpandPass.
- ///
- /// Note that *unlike* operations above the maximum size, atomic ops
- /// are still natively supported below the minimum; they just
- /// require a more complex expansion.
- unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; }
-
- /// Whether AtomicExpandPass should automatically insert fences and reduce
- /// ordering for this atomic. This should be true for most architectures with
- /// weak memory ordering. Defaults to false.
- virtual bool shouldInsertFencesForAtomic(const Instruction *I) const {
- return false;
- }
-
/// Perform a load-linked operation on Addr, returning a "Value *" with the
/// corresponding pointee type. This may entail some non-trivial operations to
/// truncate or reconstruct types that will be illegal in the backend. See
@@ -1460,15 +1059,12 @@ public:
/// Inserts in the IR a target-specific intrinsic specifying a fence.
/// It is called by AtomicExpandPass before expanding an
- /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
- /// if shouldInsertFencesForAtomic returns true.
- ///
- /// Inst is the original atomic instruction, prior to other expansions that
- /// may be performed.
- ///
+ /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad.
+ /// RMW and CmpXchg set both IsStore and IsLoad to true.
/// This function should either return a nullptr, or a pointer to an IR-level
/// Instruction*. Even complex fence sequences can be represented by a
/// single Instruction* through an intrinsic to be lowered later.
+ /// Backends with !getInsertFencesForAtomic() should keep a no-op here.
/// Backends should override this method to produce target-specific intrinsic
/// for their fences.
/// FIXME: Please note that the default implementation here in terms of
@@ -1491,18 +1087,25 @@ public:
/// seq_cst. But if they are lowered to monotonic accesses, no amount of
/// IR-level fences can prevent it.
/// @{
- virtual Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
- AtomicOrdering Ord) const {
- if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore())
+ virtual Instruction *emitLeadingFence(IRBuilder<> &Builder,
+ AtomicOrdering Ord, bool IsStore,
+ bool IsLoad) const {
+ if (!getInsertFencesForAtomic())
+ return nullptr;
+
+ if (isAtLeastRelease(Ord) && IsStore)
return Builder.CreateFence(Ord);
else
return nullptr;
}
virtual Instruction *emitTrailingFence(IRBuilder<> &Builder,
- Instruction *Inst,
- AtomicOrdering Ord) const {
- if (isAcquireOrStronger(Ord))
+ AtomicOrdering Ord, bool IsStore,
+ bool IsLoad) const {
+ if (!getInsertFencesForAtomic())
+ return nullptr;
+
+ if (isAtLeastAcquire(Ord))
return Builder.CreateFence(Ord);
else
return nullptr;
@@ -1563,14 +1166,6 @@ public:
return nullptr;
}
- /// Returns how the platform's atomic operations are extended (ZERO_EXTEND,
- /// SIGN_EXTEND, or ANY_EXTEND).
- virtual ISD::NodeType getExtendForAtomicOps() const {
- return ISD::ZERO_EXTEND;
- }
-
- /// @}
-
/// Returns true if we should normalize
/// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
/// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
@@ -1589,13 +1184,6 @@ public:
Action != TypeSplitVector;
}
- /// Return true if a select of constants (select Cond, C1, C2) should be
- /// transformed into simple math ops with the condition value. For example:
- /// select Cond, C1, C1-1 --> add (zext Cond), C1-1
- virtual bool convertSelectOfConstantsToMath() const {
- return false;
- }
-
//===--------------------------------------------------------------------===//
// TargetLowering Configuration Methods - These methods should be invoked by
// the derived class constructor to configure this object for the target.
@@ -1638,12 +1226,11 @@ protected:
UseUnderscoreLongJmp = Val;
}
- /// Indicate the minimum number of blocks to generate jump tables.
- void setMinimumJumpTableEntries(unsigned Val);
-
- /// Indicate the maximum number of entries in jump tables.
- /// Set to zero to generate unlimited jump tables.
- void setMaximumJumpTableSize(unsigned);
+ /// Indicate the number of blocks to generate jump tables rather than if
+ /// sequence.
+ void setMinimumJumpTableEntries(int Val) {
+ MinimumJumpTableEntries = Val;
+ }
/// If set to a physical register, this specifies the register that
/// llvm.savestack/llvm.restorestack should save and restore.
@@ -1651,6 +1238,12 @@ protected:
StackPointerRegisterToSaveRestore = R;
}
+ /// Tells the code generator not to expand operations into sequences that use
+ /// the select operations if possible.
+ void setSelectIsExpensive(bool isExpensive = true) {
+ SelectIsExpensive = isExpensive;
+ }
+
/// Tells the code generator that the target has multiple (allocatable)
/// condition registers that can be used to store the results of comparisons
/// for use by selects and conditional branches. With multiple condition
@@ -1673,6 +1266,10 @@ protected:
/// control.
void setJumpIsExpensive(bool isExpensive = true);
+ /// Tells the code generator that fsqrt is cheap, and should not be replaced
+ /// with an alternative sequence of instructions.
+ void setFsqrtIsCheap(bool isCheap = true) { FsqrtIsCheap = isCheap; }
+
/// Tells the code generator that this target supports floating point
/// exceptions and cares about preserving floating point exception behavior.
void setHasFloatingPointExceptions(bool FPExceptions = true) {
@@ -1689,9 +1286,21 @@ protected:
/// that class natively.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
+ AvailableRegClasses.push_back(std::make_pair(VT, RC));
RegClassForVT[VT.SimpleTy] = RC;
}
+ /// Remove all register classes.
+ void clearRegisterClasses() {
+ std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr);
+
+ AvailableRegClasses.clear();
+ }
+
+ /// \brief Remove all operation actions.
+ void clearOperationActions() {
+ }
+
/// Return the largest legal super-reg register class of the register class
/// for the specified type and its associated "cost".
virtual std::pair<const TargetRegisterClass *, uint8_t>
@@ -1702,8 +1311,7 @@ protected:
void computeRegisterProperties(const TargetRegisterInfo *TRI);
/// Indicate that the specified operation does not work with the specified
- /// type and indicate what to do about it. Note that VT may refer to either
- /// the type of a result or that of an operand of Op.
+ /// type and indicate what to do about it.
void setOperationAction(unsigned Op, MVT VT,
LegalizeAction Action) {
assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
@@ -1716,10 +1324,7 @@ protected:
LegalizeAction Action) {
assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
MemVT.isValid() && "Table isn't big enough!");
- assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
- unsigned Shift = 4 * ExtType;
- LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift);
- LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift;
+ LoadExtActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy][ExtType] = Action;
}
/// Indicate that the specified truncating store does not work with the
@@ -1781,13 +1386,6 @@ protected:
PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
}
- /// Convenience method to set an operation to Promote and specify the type
- /// in a single call.
- void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
- setOperationAction(Opc, OrigVT, Promote);
- AddPromotedToType(Opc, OrigVT, DestVT);
- }
-
/// Targets should invoke this method for each target independent node that
/// they want to provide a custom DAG combiner for by implementing the
/// PerformDAGCombine virtual method.
@@ -1832,17 +1430,10 @@ protected:
MinStackArgumentAlignment = Align;
}
- /// Set the maximum atomic operation size supported by the
- /// backend. Atomic operations greater than this size (as well as
- /// ones that are not naturally aligned), will be expanded by
- /// AtomicExpandPass into an __atomic_* library call.
- void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) {
- MaxAtomicSizeInBitsSupported = SizeInBits;
- }
-
- // Sets the minimum cmpxchg or ll/sc size supported by the backend.
- void setMinCmpXchgSizeInBits(unsigned SizeInBits) {
- MinCmpXchgSizeInBits = SizeInBits;
+ /// Set if the DAG builder should automatically insert fences and reduce the
+ /// order of atomic memory operations to Monotonic.
+ void setInsertFencesForAtomic(bool fence) {
+ InsertFencesForAtomic = fence;
}
public:
@@ -1855,9 +1446,10 @@ public:
/// possible to be done in the address mode for that operand. This hook lets
/// targets also pass back when this should be done on intrinsics which
/// load/store.
- virtual bool getAddrModeArguments(IntrinsicInst * /*I*/,
+ virtual bool GetAddrModeArguments(IntrinsicInst * /*I*/,
SmallVectorImpl<Value*> &/*Ops*/,
- Type *&/*AccessTy*/) const {
+ Type *&/*AccessTy*/,
+ unsigned AddrSpace = 0) const {
return false;
}
@@ -1869,11 +1461,11 @@ public:
/// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
/// no scale.
struct AddrMode {
- GlobalValue *BaseGV = nullptr;
- int64_t BaseOffs = 0;
- bool HasBaseReg = false;
- int64_t Scale = 0;
- AddrMode() = default;
+ GlobalValue *BaseGV;
+ int64_t BaseOffs;
+ bool HasBaseReg;
+ int64_t Scale;
+ AddrMode() : BaseGV(nullptr), BaseOffs(0), HasBaseReg(false), Scale(0) {}
};
/// Return true if the addressing mode represented by AM is legal for this
@@ -1904,10 +1496,6 @@ public:
return -1;
}
- virtual bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) const {
- return true;
- }
-
/// Return true if the specified immediate is legal icmp immediate, that is
/// the target has icmp instructions which can compare a register against the
/// immediate without having to materialize the immediate into a register.
@@ -1930,38 +1518,6 @@ public:
return false;
}
- /// Returns true if the opcode is a commutative binary operation.
- virtual bool isCommutativeBinOp(unsigned Opcode) const {
- // FIXME: This should get its info from the td file.
- switch (Opcode) {
- case ISD::ADD:
- case ISD::SMIN:
- case ISD::SMAX:
- case ISD::UMIN:
- case ISD::UMAX:
- case ISD::MUL:
- case ISD::MULHU:
- case ISD::MULHS:
- case ISD::SMUL_LOHI:
- case ISD::UMUL_LOHI:
- case ISD::FADD:
- case ISD::FMUL:
- case ISD::AND:
- case ISD::OR:
- case ISD::XOR:
- case ISD::SADDO:
- case ISD::UADDO:
- case ISD::ADDC:
- case ISD::ADDE:
- case ISD::FMINNUM:
- case ISD::FMAXNUM:
- case ISD::FMINNAN:
- case ISD::FMAXNAN:
- return true;
- default: return false;
- }
- }
-
/// Return true if it's free to truncate a value of type FromTy to type
/// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
/// by referencing its sub-register AX.
@@ -2012,35 +1568,6 @@ public:
return isExtFreeImpl(I);
}
- /// Return true if \p Load and \p Ext can form an ExtLoad.
- /// For example, in AArch64
- /// %L = load i8, i8* %ptr
- /// %E = zext i8 %L to i32
- /// can be lowered into one load instruction
- /// ldrb w0, [x0]
- bool isExtLoad(const LoadInst *Load, const Instruction *Ext,
- const DataLayout &DL) const {
- EVT VT = getValueType(DL, Ext->getType());
- EVT LoadVT = getValueType(DL, Load->getType());
-
- // If the load has other users and the truncate is not free, the ext
- // probably isn't free.
- if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) &&
- !isTruncateFree(Ext->getType(), Load->getType()))
- return false;
-
- // Check whether the target supports casts folded into loads.
- unsigned LType;
- if (isa<ZExtInst>(Ext))
- LType = ISD::ZEXTLOAD;
- else {
- assert(isa<SExtInst>(Ext) && "Unexpected ext type!");
- LType = ISD::SEXTLOAD;
- }
-
- return isLoadExtLegal(LType, VT, LoadVT);
- }
-
/// Return true if any actual instruction that defines a value of type FromTy
/// implicitly zero-extends the value to ToTy in the result register.
///
@@ -2080,8 +1607,13 @@ public:
/// In other words, unless the target performs a post-isel load combining,
/// this information should not be provided because it will generate more
/// loads.
+ virtual bool hasPairedLoad(Type * /*LoadedType*/,
+ unsigned & /*RequiredAligment*/) const {
+ return false;
+ }
+
virtual bool hasPairedLoad(EVT /*LoadedType*/,
- unsigned & /*RequiredAlignment*/) const {
+ unsigned & /*RequiredAligment*/) const {
return false;
}
@@ -2226,15 +1758,13 @@ public:
return LibcallCallingConvs[Call];
}
- /// Execute target specific actions to finalize target lowering.
- /// This is used to set extra flags in MachineFrameInformation and freezing
- /// the set of reserved registers.
- /// The default implementation just freezes the set of reserved registers.
- virtual void finalizeLowering(MachineFunction &MF) const;
-
private:
const TargetMachine &TM;
+ /// Tells the code generator not to expand operations into sequences that use
+ /// the select operations if possible.
+ bool SelectIsExpensive;
+
/// Tells the code generator that the target has multiple (allocatable)
/// condition registers that can be used to store the results of comparisons
/// for use by selects and conditional branches. With multiple condition
@@ -2248,6 +1778,9 @@ private:
/// combined with "shift" to BitExtract instructions.
bool HasExtractBitsInsn;
+ // Don't expand fsqrt with an approximation based on the inverse sqrt.
+ bool FsqrtIsCheap;
+
/// Tells the code generator to bypass slow divide or remainder
/// instructions. For example, BypassSlowDivWidths[32,8] tells the code
/// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
@@ -2273,6 +1806,9 @@ private:
/// Defaults to false.
bool UseUnderscoreLongJmp;
+ /// Number of blocks threshold to use jump tables.
+ int MinimumJumpTableEntries;
+
/// Information about the contents of the high-bits in boolean values held in
/// a type wider than i1. See getBooleanContents.
BooleanContent BooleanContents;
@@ -2309,13 +1845,10 @@ private:
/// The preferred loop alignment.
unsigned PrefLoopAlignment;
- /// Size in bits of the maximum atomics size the backend supports.
- /// Accesses larger than this will be expanded by AtomicExpandPass.
- unsigned MaxAtomicSizeInBitsSupported;
-
- /// Size in bits of the minimum cmpxchg or ll/sc operation the
- /// backend supports.
- unsigned MinCmpXchgSizeInBits;
+ /// Whether the DAG builder should automatically insert fences and reduce
+ /// ordering for atomics. (This will be set for for most architectures with
+ /// weak memory ordering.)
+ bool InsertFencesForAtomic;
/// If set to a physical register, this specifies the register that
/// llvm.savestack/llvm.restorestack should save and restore.
@@ -2356,9 +1889,9 @@ private:
/// For each load extension type and each value type, keep a LegalizeAction
/// that indicates how instruction selection should deal with a load of a
- /// specific value type and extension type. Uses 4-bits to store the action
- /// for each of the 4 load ext types.
- uint16_t LoadExtActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
+ /// specific value type and extension type.
+ LegalizeAction LoadExtActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE]
+ [ISD::LAST_LOADEXT_TYPE];
/// For each value type pair keep a LegalizeAction that indicates whether a
/// truncating store of a specific value type and truncating type is legal.
@@ -2386,6 +1919,9 @@ protected:
private:
LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
+private:
+ std::vector<std::pair<MVT, const TargetRegisterClass*> > AvailableRegClasses;
+
/// Targets can specify ISD nodes that they would like PerformDAGCombine
/// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
/// array.
@@ -2418,7 +1954,7 @@ protected:
virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
/// Depth that GatherAllAliases should should continue looking for chain
- /// dependencies when trying to find a more preferable chain. As an
+ /// dependencies when trying to find a more preferrable chain. As an
/// approximation, this should be more than the number of consecutive stores
/// expected to be merged.
unsigned GatherAllAliasesMaxDepth;
@@ -2455,8 +1991,6 @@ protected:
/// Maximum number of store operations that may be substituted for a call to
/// memcpy, used for functions with OptSize attribute.
unsigned MaxStoresPerMemcpyOptSize;
- unsigned MaxLoadsPerMemcmp;
- unsigned MaxLoadsPerMemcmpOptSize;
/// \brief Specify maximum bytes of store instructions per memmove call.
///
@@ -2478,17 +2012,21 @@ protected:
/// the branch is usually predicted right.
bool PredictableSelectIsExpensive;
+ /// MaskAndBranchFoldingIsLegal - Indicates if the target supports folding
+ /// a mask of a single bit, a compare, and a branch into a single instruction.
+ bool MaskAndBranchFoldingIsLegal;
+
/// \see enableExtLdPromotion.
bool EnableExtLdPromotion;
+protected:
/// Return true if the value types that can be represented by the specified
/// register class are all legal.
- bool isLegalRC(const TargetRegisterInfo &TRI,
- const TargetRegisterClass &RC) const;
+ bool isLegalRC(const TargetRegisterClass *RC) const;
/// Replace/modify any TargetFrameIndex operands with a targte-dependent
/// sequence of memory operands that is recognized by PrologEpilogInserter.
- MachineBasicBlock *emitPatchPoint(MachineInstr &MI,
+ MachineBasicBlock *emitPatchPoint(MachineInstr *MI,
MachineBasicBlock *MBB) const;
};
@@ -2498,17 +2036,13 @@ protected:
/// This class also defines callbacks that targets must implement to lower
/// target-specific constructs to SelectionDAG operators.
class TargetLowering : public TargetLoweringBase {
-public:
- struct DAGCombinerInfo;
-
- TargetLowering(const TargetLowering &) = delete;
- TargetLowering &operator=(const TargetLowering &) = delete;
+ TargetLowering(const TargetLowering&) = delete;
+ void operator=(const TargetLowering&) = delete;
+public:
/// NOTE: The TargetMachine owns TLOF.
explicit TargetLowering(const TargetMachine &TM);
- bool isPositionIndependent() const;
-
/// Returns true by value, base pointer and offset pointer and addressing mode
/// by reference if the node's address can be legally represented as
/// pre-indexed load / store address.
@@ -2558,26 +2092,18 @@ public:
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
SDValue &Chain) const;
- void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
- SDValue &NewRHS, ISD::CondCode &CCCode,
- const SDLoc &DL) const;
+ void softenSetCCOperands(SelectionDAG &DAG, EVT VT,
+ SDValue &NewLHS, SDValue &NewRHS,
+ ISD::CondCode &CCCode, SDLoc DL) const;
/// Returns a pair of (return value, chain).
/// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
EVT RetVT, ArrayRef<SDValue> Ops,
- bool isSigned, const SDLoc &dl,
+ bool isSigned, SDLoc dl,
bool doesNotReturn = false,
bool isReturnValueUsed = true) const;
- /// Check whether parameters to a call that are passed in callee saved
- /// registers are the same as from the calling function. This needs to be
- /// checked for tail call eligibility.
- bool parametersInCSRMatch(const MachineRegisterInfo &MRI,
- const uint32_t *CallerPreservedMask,
- const SmallVectorImpl<CCValAssign> &ArgLocs,
- const SmallVectorImpl<SDValue> &OutVals) const;
-
//===--------------------------------------------------------------------===//
// TargetLowering Optimization Methods
//
@@ -2604,38 +2130,19 @@ public:
New = N;
return true;
}
- };
-
- /// Check to see if the specified operand of the specified instruction is a
- /// constant integer. If so, check to see if there are any bits set in the
- /// constant that are not demanded. If so, shrink the constant and return
- /// true.
- bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
- TargetLoweringOpt &TLO) const;
-
- // Target hook to do target-specific const optimization, which is called by
- // ShrinkDemandedConstant. This function should return true if the target
- // doesn't want ShrinkDemandedConstant to further optimize the constant.
- virtual bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
- TargetLoweringOpt &TLO) const {
- return false;
- }
- /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This
- /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
- /// generalized for targets with other types of implicit widening casts.
- bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
- TargetLoweringOpt &TLO) const;
-
- /// Helper for SimplifyDemandedBits that can simplify an operation with
- /// multiple uses. This function simplifies operand \p OpIdx of \p User and
- /// then updates \p User with the simplified version. No other uses of
- /// \p OpIdx are updated. If \p User is the only user of \p OpIdx, this
- /// function behaves exactly like function SimplifyDemandedBits declared
- /// below except that it also updates the DAG by calling
- /// DCI.CommitTargetLoweringOpt.
- bool SimplifyDemandedBits(SDNode *User, unsigned OpIdx, const APInt &Demanded,
- DAGCombinerInfo &DCI, TargetLoweringOpt &TLO) const;
+ /// Check to see if the specified operand of the specified instruction is a
+ /// constant integer. If so, check to see if there are any bits set in the
+ /// constant that are not demanded. If so, shrink the constant and return
+ /// true.
+ bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded);
+
+ /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This
+ /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
+ /// generalized for targets with other types of implicit widening casts.
+ bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
+ SDLoc dl);
+ };
/// Look at Op. At this point, we know that only the DemandedMask bits of the
/// result of Op are ever used downstream. If we can use this information to
@@ -2644,38 +2151,21 @@ public:
/// expression and return a mask of KnownOne and KnownZero bits for the
/// expression (used to simplify the caller). The KnownZero/One bits may only
/// be accurate for those bits in the DemandedMask.
- /// \p AssumeSingleUse When this parameter is true, this function will
- /// attempt to simplify \p Op even if there are multiple uses.
- /// Callers are responsible for correctly updating the DAG based on the
- /// results of this function, because simply replacing replacing TLO.Old
- /// with TLO.New will be incorrect when this parameter is true and TLO.Old
- /// has multiple uses.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
- KnownBits &Known,
- TargetLoweringOpt &TLO,
- unsigned Depth = 0,
- bool AssumeSingleUse = false) const;
-
- /// Helper wrapper around SimplifyDemandedBits
- bool SimplifyDemandedBits(SDValue Op, APInt &DemandedMask,
- DAGCombinerInfo &DCI) const;
+ APInt &KnownZero, APInt &KnownOne,
+ TargetLoweringOpt &TLO, unsigned Depth = 0) const;
/// Determine which of the bits specified in Mask are known to be either zero
- /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
- /// argument allows us to only collect the known bits that are shared by the
- /// requested vector elements.
+ /// or one and return them in the KnownZero/KnownOne bitsets.
virtual void computeKnownBitsForTargetNode(const SDValue Op,
- KnownBits &Known,
- const APInt &DemandedElts,
+ APInt &KnownZero,
+ APInt &KnownOne,
const SelectionDAG &DAG,
unsigned Depth = 0) const;
/// This method can be implemented by targets that want to expose additional
- /// information about sign bits to the DAG Combiner. The DemandedElts
- /// argument allows us to only collect the minimum sign bits that are shared
- /// by the requested vector elements.
+ /// information about sign bits to the DAG Combiner.
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
- const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth = 0) const;
@@ -2683,7 +2173,6 @@ public:
void *DC; // The DAG Combiner object.
CombineLevel Level;
bool CalledByLegalizer;
-
public:
SelectionDAG &DAG;
@@ -2699,6 +2188,7 @@ public:
bool isCalledByLegalizer() const { return CalledByLegalizer; }
void AddToWorklist(SDNode *N);
+ void RemoveFromWorklist(SDNode *N);
SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true);
SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
@@ -2714,18 +2204,11 @@ public:
/// from getBooleanContents().
bool isConstFalseVal(const SDNode *N) const;
- /// Return a constant of type VT that contains a true value that respects
- /// getBooleanContents()
- SDValue getConstTrueVal(SelectionDAG &DAG, EVT VT, const SDLoc &DL) const;
-
- /// Return if \p N is a True value when extended to \p VT.
- bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool Signed) const;
-
/// Try to simplify a setcc built with the specified operands and cc. If it is
/// unable to simplify it, return a null SDValue.
- SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
- bool foldBooleans, DAGCombinerInfo &DCI,
- const SDLoc &dl) const;
+ SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
+ ISD::CondCode Cond, bool foldBooleans,
+ DAGCombinerInfo &DCI, SDLoc dl) const;
/// Returns true (and the GlobalValue and the offset) if the node is a
/// GlobalAddress + offset.
@@ -2752,22 +2235,10 @@ public:
// This transformation may not be desirable if it disrupts a particularly
// auspicious target-specific tree (e.g. bitfield extraction in AArch64).
// By default, it returns true.
- virtual bool isDesirableToCommuteWithShift(const SDNode *N) const {
+ virtual bool isDesirableToCommuteWithShift(const SDNode *N /*Op*/) const {
return true;
}
- // Return true if it is profitable to combine a BUILD_VECTOR to a TRUNCATE.
- // Example of such a combine:
- // v4i32 build_vector((extract_elt V, 0),
- // (extract_elt V, 2),
- // (extract_elt V, 4),
- // (extract_elt V, 6))
- // -->
- // v4i32 truncate (bitcast V to v4i64)
- virtual bool isDesirableToCombineBuildVectorToTruncate() const {
- return false;
- }
-
/// Return true if the target has native support for the specified value type
/// and it is 'desirable' to use the type for the given node type. e.g. On x86
/// i16 is legal, but undesirable since i16 instruction encodings are longer
@@ -2792,18 +2263,18 @@ public:
return false;
}
- /// Return true if the target supports swifterror attribute. It optimizes
- /// loads and stores to reading and writing a specific register.
- virtual bool supportSwiftError() const {
- return false;
- }
-
/// Return true if the target supports that a subset of CSRs for the given
/// machine function is handled explicitly via copies.
virtual bool supportSplitCSR(MachineFunction *MF) const {
return false;
}
+ /// Return true if the MachineFunction contains a COPY which would imply
+ /// HasCopyImplyingStackAdjustment.
+ virtual bool hasCopyImplyingStackAdjustment(MachineFunction *MF) const {
+ return false;
+ }
+
/// Perform necessary initialization to handle a subset of CSRs explicitly
/// via copies. This function is called at the beginning of instruction
/// selection.
@@ -2830,54 +2301,74 @@ public:
/// described by the Ins array, into the specified DAG. The implementation
/// should fill in the InVals array with legal-type argument values, and
/// return the resulting token chain value.
- virtual SDValue LowerFormalArguments(
- SDValue /*Chain*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/,
- const SmallVectorImpl<ISD::InputArg> & /*Ins*/, const SDLoc & /*dl*/,
- SelectionDAG & /*DAG*/, SmallVectorImpl<SDValue> & /*InVals*/) const {
+ ///
+ virtual SDValue
+ LowerFormalArguments(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
+ bool /*isVarArg*/,
+ const SmallVectorImpl<ISD::InputArg> &/*Ins*/,
+ SDLoc /*dl*/, SelectionDAG &/*DAG*/,
+ SmallVectorImpl<SDValue> &/*InVals*/) const {
llvm_unreachable("Not Implemented");
}
+ struct ArgListEntry {
+ SDValue Node;
+ Type* Ty;
+ bool isSExt : 1;
+ bool isZExt : 1;
+ bool isInReg : 1;
+ bool isSRet : 1;
+ bool isNest : 1;
+ bool isByVal : 1;
+ bool isInAlloca : 1;
+ bool isReturned : 1;
+ uint16_t Alignment;
+
+ ArgListEntry() : isSExt(false), isZExt(false), isInReg(false),
+ isSRet(false), isNest(false), isByVal(false), isInAlloca(false),
+ isReturned(false), Alignment(0) { }
+
+ void setAttributes(ImmutableCallSite *CS, unsigned AttrIdx);
+ };
+ typedef std::vector<ArgListEntry> ArgListTy;
+
/// This structure contains all information that is necessary for lowering
/// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
/// needs to lower a call, and targets will see this struct in their LowerCall
/// implementation.
struct CallLoweringInfo {
SDValue Chain;
- Type *RetTy = nullptr;
+ Type *RetTy;
bool RetSExt : 1;
bool RetZExt : 1;
bool IsVarArg : 1;
bool IsInReg : 1;
bool DoesNotReturn : 1;
bool IsReturnValueUsed : 1;
- bool IsConvergent : 1;
- bool IsPatchPoint : 1;
// IsTailCall should be modified by implementations of
// TargetLowering::LowerCall that perform tail call conversions.
- bool IsTailCall = false;
-
- // Is Call lowering done post SelectionDAG type legalization.
- bool IsPostTypeLegalization = false;
+ bool IsTailCall;
- unsigned NumFixedArgs = -1;
- CallingConv::ID CallConv = CallingConv::C;
+ unsigned NumFixedArgs;
+ CallingConv::ID CallConv;
SDValue Callee;
ArgListTy Args;
SelectionDAG &DAG;
SDLoc DL;
- ImmutableCallSite *CS = nullptr;
+ ImmutableCallSite *CS;
+ bool IsPatchPoint;
SmallVector<ISD::OutputArg, 32> Outs;
SmallVector<SDValue, 32> OutVals;
SmallVector<ISD::InputArg, 32> Ins;
- SmallVector<SDValue, 4> InVals;
CallLoweringInfo(SelectionDAG &DAG)
- : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false),
- DoesNotReturn(false), IsReturnValueUsed(true), IsConvergent(false),
- IsPatchPoint(false), DAG(DAG) {}
+ : RetTy(nullptr), RetSExt(false), RetZExt(false), IsVarArg(false),
+ IsInReg(false), DoesNotReturn(false), IsReturnValueUsed(true),
+ IsTailCall(false), NumFixedArgs(-1), CallConv(CallingConv::C),
+ DAG(DAG), CS(nullptr), IsPatchPoint(false) {}
- CallLoweringInfo &setDebugLoc(const SDLoc &dl) {
+ CallLoweringInfo &setDebugLoc(SDLoc dl) {
DL = dl;
return *this;
}
@@ -2887,26 +2378,14 @@ public:
return *this;
}
- // setCallee with target/module-specific attributes
- CallLoweringInfo &setLibCallee(CallingConv::ID CC, Type *ResultType,
- SDValue Target, ArgListTy &&ArgsList) {
- RetTy = ResultType;
- Callee = Target;
- CallConv = CC;
- NumFixedArgs = Args.size();
- Args = std::move(ArgsList);
-
- DAG.getTargetLoweringInfo().markLibCallAttributes(
- &(DAG.getMachineFunction()), CC, Args);
- return *this;
- }
-
CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType,
- SDValue Target, ArgListTy &&ArgsList) {
+ SDValue Target, ArgListTy &&ArgsList,
+ unsigned FixedArgs = -1) {
RetTy = ResultType;
Callee = Target;
CallConv = CC;
- NumFixedArgs = Args.size();
+ NumFixedArgs =
+ (FixedArgs == static_cast<unsigned>(-1) ? Args.size() : FixedArgs);
Args = std::move(ArgsList);
return *this;
}
@@ -2916,15 +2395,12 @@ public:
ImmutableCallSite &Call) {
RetTy = ResultType;
- IsInReg = Call.hasRetAttr(Attribute::InReg);
- DoesNotReturn =
- Call.doesNotReturn() ||
- (!Call.isInvoke() &&
- isa<UnreachableInst>(Call.getInstruction()->getNextNode()));
+ IsInReg = Call.paramHasAttr(0, Attribute::InReg);
+ DoesNotReturn = Call.doesNotReturn();
IsVarArg = FTy->isVarArg();
IsReturnValueUsed = !Call.getInstruction()->use_empty();
- RetSExt = Call.hasRetAttr(Attribute::SExt);
- RetZExt = Call.hasRetAttr(Attribute::ZExt);
+ RetSExt = Call.paramHasAttr(0, Attribute::SExt);
+ RetZExt = Call.paramHasAttr(0, Attribute::ZExt);
Callee = Target;
@@ -2962,11 +2438,6 @@ public:
return *this;
}
- CallLoweringInfo &setConvergent(bool Value = true) {
- IsConvergent = Value;
- return *this;
- }
-
CallLoweringInfo &setSExtResult(bool Value = true) {
RetSExt = Value;
return *this;
@@ -2982,14 +2453,10 @@ public:
return *this;
}
- CallLoweringInfo &setIsPostTypeLegalization(bool Value=true) {
- IsPostTypeLegalization = Value;
- return *this;
- }
-
ArgListTy &getArgs() {
return Args;
}
+
};
/// This function lowers an abstract call to a function into an actual call.
@@ -3027,12 +2494,12 @@ public:
/// This hook must be implemented to lower outgoing return values, described
/// by the Outs array, into the specified DAG. The implementation should
/// return the resulting token chain value.
- virtual SDValue LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
- bool /*isVarArg*/,
- const SmallVectorImpl<ISD::OutputArg> & /*Outs*/,
- const SmallVectorImpl<SDValue> & /*OutVals*/,
- const SDLoc & /*dl*/,
- SelectionDAG & /*DAG*/) const {
+ virtual SDValue
+ LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
+ bool /*isVarArg*/,
+ const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
+ const SmallVectorImpl<SDValue> &/*OutVals*/,
+ SDLoc /*dl*/, SelectionDAG &/*DAG*/) const {
llvm_unreachable("Not Implemented");
}
@@ -3048,7 +2515,7 @@ public:
/// Return true if the target may be able emit the call instruction as a tail
/// call. This is used by optimization passes to determine if it's profitable
/// to duplicate return instructions to enable tailcall optimization.
- virtual bool mayBeEmittedAsTailCall(const CallInst *) const {
+ virtual bool mayBeEmittedAsTailCall(CallInst *) const {
return false;
}
@@ -3067,12 +2534,12 @@ public:
}
/// Return the type that should be used to zero or sign extend a
- /// zeroext/signext integer return value. FIXME: Some C calling conventions
- /// require the return type to be promoted, but this is not true all the time,
- /// e.g. i1/i8/i16 on x86/x86_64. It is also not necessary for non-C calling
- /// conventions. The frontend should handle this and include all of the
- /// necessary information.
- virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
+ /// zeroext/signext integer argument or return value. FIXME: Most C calling
+ /// convention requires the return type to be promoted, but this is not true
+ /// all the time, e.g. i1 on x86-64. It is also not necessary for non-C
+ /// calling conventions. The frontend should handle this and include all of
+ /// the necessary information.
+ virtual EVT getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
ISD::NodeType /*ExtendKind*/) const {
EVT MinVT = getRegisterType(Context, MVT::i32);
return VT.bitsLT(MinVT) ? MinVT : VT;
@@ -3100,18 +2567,11 @@ public:
/// which allows a CPU to reuse the result of a previous load indefinitely,
/// even if a cache-coherent store is performed by another CPU. The default
/// implementation does nothing.
- virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL,
+ virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL,
SelectionDAG &DAG) const {
return Chain;
}
- /// This callback is used to inspect load/store instructions and add
- /// target-specific MachineMemOperand flags to them. The default
- /// implementation does nothing.
- virtual MachineMemOperand::Flags getMMOFlags(const Instruction &I) const {
- return MachineMemOperand::MONone;
- }
-
/// This callback is invoked by the type legalizer to legalize nodes with an
/// illegal operand type but legal result types. It replaces the
/// LowerOperation callback in the type Legalizer. The reason we can not do
@@ -3161,6 +2621,7 @@ public:
return nullptr;
}
+
bool verifyReturnAddressArgumentIsConstant(SDValue Op,
SelectionDAG &DAG) const;
@@ -3209,19 +2670,15 @@ public:
/// Information about the constraint code, e.g. Register, RegisterClass,
/// Memory, Other, Unknown.
- TargetLowering::ConstraintType ConstraintType = TargetLowering::C_Unknown;
+ TargetLowering::ConstraintType ConstraintType;
/// If this is the result output operand or a clobber, this is null,
/// otherwise it is the incoming operand to the CallInst. This gets
/// modified as the asm is processed.
- Value *CallOperandVal = nullptr;
+ Value *CallOperandVal;
/// The ValueType for the operand value.
- MVT ConstraintVT = MVT::Other;
-
- /// Copy constructor for copying from a ConstraintInfo.
- AsmOperandInfo(InlineAsm::ConstraintInfo Info)
- : InlineAsm::ConstraintInfo(std::move(Info)) {}
+ MVT ConstraintVT;
/// Return true of this is an input operand that is a matching constraint
/// like "4".
@@ -3230,9 +2687,15 @@ public:
/// If this is an input matching constraint, this method returns the output
/// operand it matches.
unsigned getMatchedOperand() const;
+
+ /// Copy constructor for copying from a ConstraintInfo.
+ AsmOperandInfo(InlineAsm::ConstraintInfo Info)
+ : InlineAsm::ConstraintInfo(std::move(Info)),
+ ConstraintType(TargetLowering::C_Unknown), CallOperandVal(nullptr),
+ ConstraintVT(MVT::Other) {}
};
- using AsmOperandInfoVector = std::vector<AsmOperandInfo>;
+ typedef std::vector<AsmOperandInfo> AsmOperandInfoVector;
/// Split up the constraint string from the inline assembly value into the
/// specific constraints and their prefixes, and also tie in the associated
@@ -3324,39 +2787,32 @@ public:
/// Hooks for building estimates in place of slower divisions and square
/// roots.
- /// Return either a square root or its reciprocal estimate value for the input
- /// operand.
- /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
- /// 'Enabled' as set by a potential default override attribute.
- /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
- /// refinement iterations required to generate a sufficient (though not
- /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
+ /// Return a reciprocal square root estimate value for the input operand.
+ /// The RefinementSteps output is the number of Newton-Raphson refinement
+ /// iterations required to generate a sufficient (though not necessarily
+ /// IEEE-754 compliant) estimate for the value type.
/// The boolean UseOneConstNR output is used to select a Newton-Raphson
- /// algorithm implementation that uses either one or two constants.
- /// The boolean Reciprocal is used to select whether the estimate is for the
- /// square root of the input operand or the reciprocal of its square root.
+ /// algorithm implementation that uses one constant or two constants.
/// A target may choose to implement its own refinement within this function.
/// If that's true, then return '0' as the number of RefinementSteps to avoid
/// any further refinement of the estimate.
/// An empty SDValue return means no estimate sequence can be created.
- virtual SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
- int Enabled, int &RefinementSteps,
- bool &UseOneConstNR, bool Reciprocal) const {
+ virtual SDValue getRsqrtEstimate(SDValue Operand, DAGCombinerInfo &DCI,
+ unsigned &RefinementSteps,
+ bool &UseOneConstNR) const {
return SDValue();
}
/// Return a reciprocal estimate value for the input operand.
- /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
- /// 'Enabled' as set by a potential default override attribute.
- /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
- /// refinement iterations required to generate a sufficient (though not
- /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
+ /// The RefinementSteps output is the number of Newton-Raphson refinement
+ /// iterations required to generate a sufficient (though not necessarily
+ /// IEEE-754 compliant) estimate for the value type.
/// A target may choose to implement its own refinement within this function.
/// If that's true, then return '0' as the number of RefinementSteps to avoid
/// any further refinement of the estimate.
/// An empty SDValue return means no estimate sequence can be created.
- virtual SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
- int Enabled, int &RefinementSteps) const {
+ virtual SDValue getRecipEstimate(SDValue Operand, DAGCombinerInfo &DCI,
+ unsigned &RefinementSteps) const {
return SDValue();
}
@@ -3364,22 +2820,6 @@ public:
// Legalization utility functions
//
- /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes,
- /// respectively, each computing an n/2-bit part of the result.
- /// \param Result A vector that will be filled with the parts of the result
- /// in little-endian order.
- /// \param LL Low bits of the LHS of the MUL. You can use this parameter
- /// if you want to control how low bits are extracted from the LHS.
- /// \param LH High bits of the LHS of the MUL. See LL for meaning.
- /// \param RL Low bits of the RHS of the MUL. See LL for meaning
- /// \param RH High bits of the RHS of the MUL. See LL for meaning.
- /// \returns true if the node has been expanded, false if it has not
- bool expandMUL_LOHI(unsigned Opcode, EVT VT, SDLoc dl, SDValue LHS,
- SDValue RHS, SmallVectorImpl<SDValue> &Result, EVT HiLoVT,
- SelectionDAG &DAG, MulExpansionKind Kind,
- SDValue LL = SDValue(), SDValue LH = SDValue(),
- SDValue RL = SDValue(), SDValue RH = SDValue()) const;
-
/// Expand a MUL into two nodes. One that computes the high bits of
/// the result and one that computes the low bits.
/// \param HiLoVT The value type to use for the Lo and Hi nodes.
@@ -3390,9 +2830,9 @@ public:
/// \param RH High bits of the RHS of the MUL. See LL for meaning.
/// \returns true if the node has been expanded. false if it has not
bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
- SelectionDAG &DAG, MulExpansionKind Kind,
- SDValue LL = SDValue(), SDValue LH = SDValue(),
- SDValue RL = SDValue(), SDValue RH = SDValue()) const;
+ SelectionDAG &DAG, SDValue LL = SDValue(),
+ SDValue LH = SDValue(), SDValue RL = SDValue(),
+ SDValue RH = SDValue()) const;
/// Expand float(f32) to SINT(i64) conversion
/// \param N Node to expand
@@ -3400,43 +2840,6 @@ public:
/// \returns True, if the expansion was successful, false otherwise
bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
- /// Turn load of vector type into a load of the individual elements.
- /// \param LD load to expand
- /// \returns MERGE_VALUEs of the scalar loads with their chains.
- SDValue scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const;
-
- // Turn a store of a vector type into stores of the individual elements.
- /// \param ST Store with a vector value type
- /// \returns MERGE_VALUs of the individual store chains.
- SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const;
-
- /// Expands an unaligned load to 2 half-size loads for an integer, and
- /// possibly more for vectors.
- std::pair<SDValue, SDValue> expandUnalignedLoad(LoadSDNode *LD,
- SelectionDAG &DAG) const;
-
- /// Expands an unaligned store to 2 half-size stores for integer values, and
- /// possibly more for vectors.
- SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const;
-
- /// Increments memory address \p Addr according to the type of the value
- /// \p DataVT that should be stored. If the data is stored in compressed
- /// form, the memory address should be incremented according to the number of
- /// the stored elements. This number is equal to the number of '1's bits
- /// in the \p Mask.
- /// \p DataVT is a vector type. \p Mask is a vector value.
- /// \p DataVT and \p Mask have the same number of vector elements.
- SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL,
- EVT DataVT, SelectionDAG &DAG,
- bool IsCompressedMemory) const;
-
- /// Get a pointer to vector element \p Idx located in memory for a vector of
- /// type \p VecVT starting at a base address of \p VecPtr. If \p Idx is out of
- /// bounds the returned pointer is unspecified, but will be within the vector
- /// bounds.
- SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT,
- SDValue Idx) const;
-
//===--------------------------------------------------------------------===//
// Instruction Emitting Hooks
//
@@ -3450,14 +2853,14 @@ public:
/// As long as the returned basic block is different (i.e., we created a new
/// one), the custom inserter is free to modify the rest of \p MBB.
virtual MachineBasicBlock *
- EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const;
+ EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
/// This method should be implemented by targets that mark instructions with
/// the 'hasPostISelHook' flag. These instructions must be adjusted after
/// instruction selection by target hooks. e.g. To fill in optional defs for
/// ARM 's' setting instructions.
- virtual void AdjustInstrPostInstrSelection(MachineInstr &MI,
- SDNode *Node) const;
+ virtual void
+ AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
/// If this function returns true, SelectionDAGBuilder emits a
/// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
@@ -3468,26 +2871,15 @@ public:
/// Lower TLS global address SDNode for target independent emulated TLS model.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
SelectionDAG &DAG) const;
-
- // seteq(x, 0) -> truncate(srl(ctlz(zext(x)), log2(#bits)))
- // If we're comparing for equality to zero and isCtlzFast is true, expose the
- // fact that this can be implemented as a ctlz/srl pair, so that the dag
- // combiner can fold the new nodes.
- SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const;
-
-private:
- SDValue simplifySetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
- ISD::CondCode Cond, DAGCombinerInfo &DCI,
- const SDLoc &DL) const;
};
/// Given an LLVM IR type and return type attributes, compute the return value
/// EVTs and flags, and optionally also the offsets, if the return value is
/// being lowered to memory.
-void GetReturnInfo(Type *ReturnType, AttributeList attr,
+void GetReturnInfo(Type *ReturnType, AttributeSet attr,
SmallVectorImpl<ISD::OutputArg> &Outs,
const TargetLowering &TLI, const DataLayout &DL);
-} // end namespace llvm
+} // end llvm namespace
-#endif // LLVM_TARGET_TARGETLOWERING_H
+#endif
diff --git a/gnu/llvm/include/llvm/Target/TargetLoweringObjectFile.h b/gnu/llvm/include/llvm/Target/TargetLoweringObjectFile.h
index 80d4d8e42e5..cb52698c58b 100644
--- a/gnu/llvm/include/llvm/Target/TargetLoweringObjectFile.h
+++ b/gnu/llvm/include/llvm/Target/TargetLoweringObjectFile.h
@@ -16,52 +16,44 @@
#define LLVM_TARGET_TARGETLOWERINGOBJECTFILE_H
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/SectionKind.h"
-#include <cstdint>
namespace llvm {
-
-class GlobalValue;
-class MachineModuleInfo;
-class Mangler;
-class MCContext;
-class MCExpr;
-class MCSection;
-class MCSymbol;
-class MCSymbolRefExpr;
-class MCStreamer;
-class MCValue;
-class TargetMachine;
+ class MachineModuleInfo;
+ class Mangler;
+ class MCContext;
+ class MCExpr;
+ class MCSection;
+ class MCSymbol;
+ class MCSymbolRefExpr;
+ class MCStreamer;
+ class MCValue;
+ class ConstantExpr;
+ class GlobalValue;
+ class TargetMachine;
class TargetLoweringObjectFile : public MCObjectFileInfo {
- MCContext *Ctx = nullptr;
+ MCContext *Ctx;
- /// Name-mangler for global names.
- Mangler *Mang = nullptr;
+ TargetLoweringObjectFile(
+ const TargetLoweringObjectFile&) = delete;
+ void operator=(const TargetLoweringObjectFile&) = delete;
protected:
- bool SupportIndirectSymViaGOTPCRel = false;
- bool SupportGOTPCRelWithOffset = true;
+ bool SupportIndirectSymViaGOTPCRel;
+ bool SupportGOTPCRelWithOffset;
- /// This section contains the static constructor pointer list.
- MCSection *StaticCtorSection;
+public:
+ MCContext &getContext() const { return *Ctx; }
- /// This section contains the static destructor pointer list.
- MCSection *StaticDtorSection;
+ TargetLoweringObjectFile()
+ : MCObjectFileInfo(), Ctx(nullptr), SupportIndirectSymViaGOTPCRel(false),
+ SupportGOTPCRelWithOffset(true) {}
-public:
- TargetLoweringObjectFile() = default;
- TargetLoweringObjectFile(const TargetLoweringObjectFile &) = delete;
- TargetLoweringObjectFile &
- operator=(const TargetLoweringObjectFile &) = delete;
virtual ~TargetLoweringObjectFile();
- MCContext &getContext() const { return *Ctx; }
- Mangler &getMangler() const { return *Mang; }
-
/// This method must be called before any actual lowering is done. This
/// specifies the current context for codegen, and gives the lowering
/// implementations a chance to set up their default sections.
@@ -70,41 +62,41 @@ public:
virtual void emitPersonalityValue(MCStreamer &Streamer, const DataLayout &TM,
const MCSymbol *Sym) const;
- /// Emit the module-level metadata that the platform cares about.
- virtual void emitModuleMetadata(MCStreamer &Streamer, Module &M,
- const TargetMachine &TM) const {}
+ /// Emit the module flags that the platform cares about.
+ virtual void emitModuleFlags(MCStreamer &Streamer,
+ ArrayRef<Module::ModuleFlagEntry> Flags,
+ Mangler &Mang, const TargetMachine &TM) const {}
/// Given a constant with the SectionKind, return a section that it should be
/// placed in.
virtual MCSection *getSectionForConstant(const DataLayout &DL,
SectionKind Kind,
- const Constant *C,
- unsigned &Align) const;
+ const Constant *C) const;
/// Classify the specified global variable into a set of target independent
/// categories embodied in SectionKind.
- static SectionKind getKindForGlobal(const GlobalObject *GO,
+ static SectionKind getKindForGlobal(const GlobalValue *GV,
const TargetMachine &TM);
/// This method computes the appropriate section to emit the specified global
/// variable or function definition. This should not be passed external (or
/// available externally) globals.
- MCSection *SectionForGlobal(const GlobalObject *GO, SectionKind Kind,
- const TargetMachine &TM) const;
+ MCSection *SectionForGlobal(const GlobalValue *GV, SectionKind Kind,
+ Mangler &Mang, const TargetMachine &TM) const;
/// This method computes the appropriate section to emit the specified global
/// variable or function definition. This should not be passed external (or
/// available externally) globals.
- MCSection *SectionForGlobal(const GlobalObject *GO,
+ MCSection *SectionForGlobal(const GlobalValue *GV, Mangler &Mang,
const TargetMachine &TM) const {
- return SectionForGlobal(GO, getKindForGlobal(GO, TM), TM);
+ return SectionForGlobal(GV, getKindForGlobal(GV, TM), Mang, TM);
}
virtual void getNameWithPrefix(SmallVectorImpl<char> &OutName,
- const GlobalValue *GV,
+ const GlobalValue *GV, Mangler &Mang,
const TargetMachine &TM) const;
- virtual MCSection *getSectionForJumpTable(const Function &F,
+ virtual MCSection *getSectionForJumpTable(const Function &F, Mangler &Mang,
const TargetMachine &TM) const;
virtual bool shouldPutJumpTableInFunctionSection(bool UsesLabelDifference,
@@ -112,32 +104,40 @@ public:
/// Targets should implement this method to assign a section to globals with
/// an explicit section specfied. The implementation of this method can
- /// assume that GO->hasSection() is true.
+ /// assume that GV->hasSection() is true.
virtual MCSection *
- getExplicitSectionGlobal(const GlobalObject *GO, SectionKind Kind,
- const TargetMachine &TM) const = 0;
+ getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
+ Mangler &Mang, const TargetMachine &TM) const = 0;
+
+ /// Allow the target to completely override section assignment of a global.
+ virtual const MCSection *getSpecialCasedSectionGlobals(const GlobalValue *GV,
+ SectionKind Kind,
+ Mangler &Mang) const {
+ return nullptr;
+ }
/// Return an MCExpr to use for a reference to the specified global variable
/// from exception handling information.
- virtual const MCExpr *getTTypeGlobalReference(const GlobalValue *GV,
- unsigned Encoding,
- const TargetMachine &TM,
- MachineModuleInfo *MMI,
- MCStreamer &Streamer) const;
+ virtual const MCExpr *
+ getTTypeGlobalReference(const GlobalValue *GV, unsigned Encoding,
+ Mangler &Mang, const TargetMachine &TM,
+ MachineModuleInfo *MMI, MCStreamer &Streamer) const;
/// Return the MCSymbol for a private symbol with global value name as its
/// base, with the specified suffix.
MCSymbol *getSymbolWithGlobalValueBase(const GlobalValue *GV,
- StringRef Suffix,
+ StringRef Suffix, Mangler &Mang,
const TargetMachine &TM) const;
// The symbol that gets passed to .cfi_personality.
virtual MCSymbol *getCFIPersonalitySymbol(const GlobalValue *GV,
+ Mangler &Mang,
const TargetMachine &TM,
MachineModuleInfo *MMI) const;
- const MCExpr *getTTypeReference(const MCSymbolRefExpr *Sym, unsigned Encoding,
- MCStreamer &Streamer) const;
+ const MCExpr *
+ getTTypeReference(const MCSymbolRefExpr *Sym, unsigned Encoding,
+ MCStreamer &Streamer) const;
virtual MCSection *getStaticCtorSection(unsigned Priority,
const MCSymbol *KeySym) const {
@@ -153,9 +153,9 @@ public:
/// emitting the address in debug info.
virtual const MCExpr *getDebugThreadLocalSymbol(const MCSymbol *Sym) const;
- virtual const MCExpr *lowerRelativeReference(const GlobalValue *LHS,
- const GlobalValue *RHS,
- const TargetMachine &TM) const {
+ virtual const MCExpr *
+ getExecutableRelativeSymbol(const ConstantExpr *CE, Mangler &Mang,
+ const TargetMachine &TM) const {
return nullptr;
}
@@ -180,15 +180,15 @@ public:
return nullptr;
}
- virtual void emitLinkerFlagsForGlobal(raw_ostream &OS,
- const GlobalValue *GV) const {}
+ virtual void emitLinkerFlagsForGlobal(raw_ostream &OS, const GlobalValue *GV,
+ const Mangler &Mang) const {}
protected:
- virtual MCSection *SelectSectionForGlobal(const GlobalObject *GO,
- SectionKind Kind,
+ virtual MCSection *SelectSectionForGlobal(const GlobalValue *GV,
+ SectionKind Kind, Mangler &Mang,
const TargetMachine &TM) const = 0;
};
} // end namespace llvm
-#endif // LLVM_TARGET_TARGETLOWERINGOBJECTFILE_H
+#endif
diff --git a/gnu/llvm/include/llvm/Target/TargetOpcodes.h b/gnu/llvm/include/llvm/Target/TargetOpcodes.h
index 33df133a4d5..db37bdb6258 100644
--- a/gnu/llvm/include/llvm/Target/TargetOpcodes.h
+++ b/gnu/llvm/include/llvm/Target/TargetOpcodes.h
@@ -18,25 +18,122 @@ namespace llvm {
/// Invariant opcodes: All instruction sets have these as their low opcodes.
///
+/// Every instruction defined here must also appear in Target.td and the order
+/// must be the same as in CodeGenTarget.cpp.
+///
namespace TargetOpcode {
enum {
-#define HANDLE_TARGET_OPCODE(OPC) OPC,
-#define HANDLE_TARGET_OPCODE_MARKER(IDENT, OPC) IDENT = OPC,
-#include "llvm/Target/TargetOpcodes.def"
+ PHI = 0,
+ INLINEASM = 1,
+ CFI_INSTRUCTION = 2,
+ EH_LABEL = 3,
+ GC_LABEL = 4,
+
+ /// KILL - This instruction is a noop that is used only to adjust the
+ /// liveness of registers. This can be useful when dealing with
+ /// sub-registers.
+ KILL = 5,
+
+ /// EXTRACT_SUBREG - This instruction takes two operands: a register
+ /// that has subregisters, and a subregister index. It returns the
+ /// extracted subregister value. This is commonly used to implement
+ /// truncation operations on target architectures which support it.
+ EXTRACT_SUBREG = 6,
+
+ /// INSERT_SUBREG - This instruction takes three operands: a register that
+ /// has subregisters, a register providing an insert value, and a
+ /// subregister index. It returns the value of the first register with the
+ /// value of the second register inserted. The first register is often
+ /// defined by an IMPLICIT_DEF, because it is commonly used to implement
+ /// anyext operations on target architectures which support it.
+ INSERT_SUBREG = 7,
+
+ /// IMPLICIT_DEF - This is the MachineInstr-level equivalent of undef.
+ IMPLICIT_DEF = 8,
+
+ /// SUBREG_TO_REG - This instruction is similar to INSERT_SUBREG except that
+ /// the first operand is an immediate integer constant. This constant is
+ /// often zero, because it is commonly used to assert that the instruction
+ /// defining the register implicitly clears the high bits.
+ SUBREG_TO_REG = 9,
+
+ /// COPY_TO_REGCLASS - This instruction is a placeholder for a plain
+ /// register-to-register copy into a specific register class. This is only
+ /// used between instruction selection and MachineInstr creation, before
+ /// virtual registers have been created for all the instructions, and it's
+ /// only needed in cases where the register classes implied by the
+ /// instructions are insufficient. It is emitted as a COPY MachineInstr.
+ COPY_TO_REGCLASS = 10,
+
+ /// DBG_VALUE - a mapping of the llvm.dbg.value intrinsic
+ DBG_VALUE = 11,
+
+ /// REG_SEQUENCE - This variadic instruction is used to form a register that
+ /// represents a consecutive sequence of sub-registers. It's used as a
+ /// register coalescing / allocation aid and must be eliminated before code
+ /// emission.
+ // In SDNode form, the first operand encodes the register class created by
+ // the REG_SEQUENCE, while each subsequent pair names a vreg + subreg index
+ // pair. Once it has been lowered to a MachineInstr, the regclass operand
+ // is no longer present.
+ /// e.g. v1027 = REG_SEQUENCE v1024, 3, v1025, 4, v1026, 5
+ /// After register coalescing references of v1024 should be replace with
+ /// v1027:3, v1025 with v1027:4, etc.
+ REG_SEQUENCE = 12,
+
+ /// COPY - Target-independent register copy. This instruction can also be
+ /// used to copy between subregisters of virtual registers.
+ COPY = 13,
+
+ /// BUNDLE - This instruction represents an instruction bundle. Instructions
+ /// which immediately follow a BUNDLE instruction which are marked with
+ /// 'InsideBundle' flag are inside the bundle.
+ BUNDLE = 14,
+
+ /// Lifetime markers.
+ LIFETIME_START = 15,
+ LIFETIME_END = 16,
+
+ /// A Stackmap instruction captures the location of live variables at its
+ /// position in the instruction stream. It is followed by a shadow of bytes
+ /// that must lie within the function and not contain another stackmap.
+ STACKMAP = 17,
+
+ /// Patchable call instruction - this instruction represents a call to a
+ /// constant address, followed by a series of NOPs. It is intended to
+ /// support optimizations for dynamic languages (such as javascript) that
+ /// rewrite calls to runtimes with more efficient code sequences.
+ /// This also implies a stack map.
+ PATCHPOINT = 18,
+
+ /// This pseudo-instruction loads the stack guard value. Targets which need
+ /// to prevent the stack guard value or address from being spilled to the
+ /// stack should override TargetLowering::emitLoadStackGuardNode and
+ /// additionally expand this pseudo after register allocation.
+ LOAD_STACK_GUARD = 19,
+
+ /// Call instruction with associated vm state for deoptimization and list
+ /// of live pointers for relocation by the garbage collector. It is
+ /// intended to support garbage collection with fully precise relocating
+ /// collectors and deoptimizations in either the callee or caller.
+ STATEPOINT = 20,
+
+ /// Instruction that records the offset of a local stack allocation passed to
+ /// llvm.localescape. It has two arguments: the symbol for the label and the
+ /// frame index of the local stack allocation.
+ LOCAL_ESCAPE = 21,
+
+ /// Loading instruction that may page fault, bundled with associated
+ /// information on how to handle such a page fault. It is intended to support
+ /// "zero cost" null checks in managed languages by allowing LLVM to fold
+ /// comparisons into existing memory operations.
+ FAULTING_LOAD_OP = 22,
+
+ /// BUILTIN_OP_END - This must be the last enum value in this list.
+ /// The target-specific post-isel opcode values start here.
+ GENERIC_OP_END = FAULTING_LOAD_OP,
};
} // end namespace TargetOpcode
-
-/// Check whether the given Opcode is a generic opcode that is not supposed
-/// to appear after ISel.
-static inline bool isPreISelGenericOpcode(unsigned Opcode) {
- return Opcode >= TargetOpcode::PRE_ISEL_GENERIC_OPCODE_START &&
- Opcode <= TargetOpcode::PRE_ISEL_GENERIC_OPCODE_END;
-}
-
-/// Check whether the given Opcode is a target-specific opcode.
-static inline bool isTargetSpecificOpcode(unsigned Opcode) {
- return Opcode > TargetOpcode::PRE_ISEL_GENERIC_OPCODE_END;
-}
} // end namespace llvm
#endif
diff --git a/gnu/llvm/include/llvm/Target/TargetRegisterInfo.h b/gnu/llvm/include/llvm/Target/TargetRegisterInfo.h
index b6839dad106..fccaad4705d 100644
--- a/gnu/llvm/include/llvm/Target/TargetRegisterInfo.h
+++ b/gnu/llvm/include/llvm/Target/TargetRegisterInfo.h
@@ -1,4 +1,4 @@
-//==- Target/TargetRegisterInfo.h - Target Register Information --*- C++ -*-==//
+//=== Target/TargetRegisterInfo.h - Target Register Information -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -17,40 +17,52 @@
#define LLVM_TARGET_TARGETREGISTERINFO_H
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/IR/CallingConv.h"
-#include "llvm/MC/LaneBitmask.h"
#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Printable.h"
#include <cassert>
-#include <cstdint>
#include <functional>
namespace llvm {
class BitVector;
-class LiveRegMatrix;
class MachineFunction;
-class MachineInstr;
class RegScavenger;
+template<class T> class SmallVectorImpl;
class VirtRegMap;
+class raw_ostream;
+class LiveRegMatrix;
+
+/// A bitmask representing the covering of a register with sub-registers.
+///
+/// This is typically used to track liveness at sub-register granularity.
+/// Lane masks for sub-register indices are similar to register units for
+/// physical registers. The individual bits in a lane mask can't be assigned
+/// any specific meaning. They can be used to check if two sub-register
+/// indices overlap.
+///
+/// Iff the target has a register such that:
+///
+/// getSubReg(Reg, A) overlaps getSubReg(Reg, B)
+///
+/// then:
+///
+/// (getSubRegIndexLaneMask(A) & getSubRegIndexLaneMask(B)) != 0
+typedef unsigned LaneBitmask;
class TargetRegisterClass {
public:
- using iterator = const MCPhysReg *;
- using const_iterator = const MCPhysReg *;
- using sc_iterator = const TargetRegisterClass* const *;
+ typedef const MCPhysReg* iterator;
+ typedef const MCPhysReg* const_iterator;
+ typedef const MVT::SimpleValueType* vt_iterator;
+ typedef const TargetRegisterClass* const * sc_iterator;
// Instance variables filled by tablegen, do not use!
const MCRegisterClass *MC;
- const uint16_t SpillSize, SpillAlignment;
- const MVT::SimpleValueType *VTs;
+ const vt_iterator VTs;
const uint32_t *SubClassMask;
const uint16_t *SuperRegIndices;
const LaneBitmask LaneMask;
@@ -59,9 +71,6 @@ public:
const uint8_t AllocationPriority;
/// Whether the class supports two (or more) disjunct subregister indices.
const bool HasDisjunctSubRegs;
- /// Whether a combination of subregisters can cover every register in the
- /// class. See also the CoveredBySubRegs description in Target.td.
- const bool CoveredBySubRegs;
const sc_iterator SuperClasses;
ArrayRef<MCPhysReg> (*OrderFunc)(const MachineFunction&);
@@ -76,11 +85,6 @@ public:
/// Return the number of registers in this class.
unsigned getNumRegs() const { return MC->getNumRegs(); }
- iterator_range<SmallVectorImpl<MCPhysReg>::const_iterator>
- getRegisters() const {
- return make_range(MC->begin(), MC->end());
- }
-
/// Return the specified register in the class.
unsigned getRegister(unsigned i) const {
return MC->getRegister(i);
@@ -97,6 +101,13 @@ public:
return MC->contains(Reg1, Reg2);
}
+ /// Return the size of the register in bytes, which is also the size
+ /// of a stack slot allocated to hold a spilled copy of this register.
+ unsigned getSize() const { return MC->getSize(); }
+
+ /// Return the minimum required alignment for a register of this class.
+ unsigned getAlignment() const { return MC->getAlignment(); }
+
/// Return the cost of copying a value between two registers in this class.
/// A negative number means the register class is very expensive
/// to copy e.g. status flag register classes.
@@ -106,6 +117,26 @@ public:
/// registers.
bool isAllocatable() const { return MC->isAllocatable(); }
+ /// Return true if this TargetRegisterClass has the ValueType vt.
+ bool hasType(MVT vt) const {
+ for(int i = 0; VTs[i] != MVT::Other; ++i)
+ if (MVT(VTs[i]) == vt)
+ return true;
+ return false;
+ }
+
+ /// vt_begin / vt_end - Loop over all of the value types that can be
+ /// represented by values in this register class.
+ vt_iterator vt_begin() const {
+ return VTs;
+ }
+
+ vt_iterator vt_end() const {
+ vt_iterator I = VTs;
+ while (*I != MVT::Other) ++I;
+ return I;
+ }
+
/// Return true if the specified TargetRegisterClass
/// is a proper sub-class of this TargetRegisterClass.
bool hasSubClass(const TargetRegisterClass *RC) const {
@@ -130,21 +161,8 @@ public:
}
/// Returns a bit vector of subclasses, including this one.
- /// The vector is indexed by class IDs.
- ///
- /// To use it, consider the returned array as a chunk of memory that
- /// contains an array of bits of size NumRegClasses. Each 32-bit chunk
- /// contains a bitset of the ID of the subclasses in big-endian style.
-
- /// I.e., the representation of the memory from left to right at the
- /// bit level looks like:
- /// [31 30 ... 1 0] [ 63 62 ... 33 32] ...
- /// [ XXX NumRegClasses NumRegClasses - 1 ... ]
- /// Where the number represents the class ID and XXX bits that
- /// should be ignored.
- ///
- /// See the implementation of hasSubClassEq for an example of how it
- /// can be used.
+ /// The vector is indexed by class IDs, see hasSubClassEq() above for how to
+ /// use it.
const uint32_t *getSubClassMask() const {
return SubClassMask;
}
@@ -156,6 +174,7 @@ public:
/// There exists SuperRC where:
/// For all Reg in SuperRC:
/// this->contains(Reg:Idx)
+ ///
const uint16_t *getSuperRegIndices() const {
return SuperRegIndices;
}
@@ -186,13 +205,14 @@ public:
/// other criteria.
///
/// By default, this method returns all registers in the class.
+ ///
ArrayRef<MCPhysReg> getRawAllocationOrder(const MachineFunction &MF) const {
return OrderFunc ? OrderFunc(MF) : makeArrayRef(begin(), getNumRegs());
}
/// Returns the combination of all lane masks of register in this class.
/// The lane masks of the registers are the combination of all lane masks
- /// of their subregisters. Returns 1 if there are no subregisters.
+ /// of their subregisters.
LaneBitmask getLaneMask() const {
return LaneMask;
}
@@ -220,9 +240,7 @@ struct RegClassWeight {
///
class TargetRegisterInfo : public MCRegisterInfo {
public:
- using regclass_iterator = const TargetRegisterClass * const *;
- using vt_iterator = const MVT::SimpleValueType *;
-
+ typedef const TargetRegisterClass * const * regclass_iterator;
private:
const TargetRegisterInfoDesc *InfoDesc; // Extra desc array for codegen
const char *const *SubRegIndexNames; // Names of subreg indexes.
@@ -230,7 +248,7 @@ private:
const LaneBitmask *SubRegIndexLaneMasks;
regclass_iterator RegClassBegin, RegClassEnd; // List of regclasses
- LaneBitmask CoveringLanes;
+ unsigned CoveringLanes;
protected:
TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
@@ -238,10 +256,10 @@ protected:
regclass_iterator RegClassEnd,
const char *const *SRINames,
const LaneBitmask *SRILaneMasks,
- LaneBitmask CoveringLanes);
+ unsigned CoveringLanes);
virtual ~TargetRegisterInfo();
-
public:
+
// Register numbers can represent physical registers, virtual registers, and
// sometimes stack slots. The unsigned values are divided into these ranges:
//
@@ -304,44 +322,6 @@ public:
return Index | (1u << 31);
}
- /// Return the size in bits of a register from class RC.
- unsigned getRegSizeInBits(const TargetRegisterClass &RC) const {
- return RC.SpillSize * 8;
- }
-
- /// Return the size in bytes of the stack slot allocated to hold a spilled
- /// copy of a register from class RC.
- unsigned getSpillSize(const TargetRegisterClass &RC) const {
- return RC.SpillSize;
- }
-
- /// Return the minimum required alignment for a spill slot for a register
- /// of this class.
- unsigned getSpillAlignment(const TargetRegisterClass &RC) const {
- return RC.SpillAlignment;
- }
-
- /// Return true if the given TargetRegisterClass has the ValueType T.
- bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const {
- for (int i = 0; RC.VTs[i] != MVT::Other; ++i)
- if (MVT(RC.VTs[i]) == T)
- return true;
- return false;
- }
-
- /// Loop over all of the value types that can be represented by values
- // in the given register class.
- vt_iterator legalclasstypes_begin(const TargetRegisterClass &RC) const {
- return RC.VTs;
- }
-
- vt_iterator legalclasstypes_end(const TargetRegisterClass &RC) const {
- vt_iterator I = RC.VTs;
- while (*I != MVT::Other)
- ++I;
- return I;
- }
-
/// Returns the Register Class of a physical register of the given type,
/// picking the most sub register class of the right type that contains this
/// physreg.
@@ -442,12 +422,15 @@ public:
/// this target. The register should be in the order of desired callee-save
/// stack frame offset. The first register is closest to the incoming stack
/// pointer if stack grows down, and vice versa.
- /// Notice: This function does not take into account disabled CSRs.
- /// In most cases you will want to use instead the function
- /// getCalleeSavedRegs that is implemented in MachineRegisterInfo.
+ ///
virtual const MCPhysReg*
getCalleeSavedRegs(const MachineFunction *MF) const = 0;
+ virtual const MCPhysReg*
+ getCalleeSavedRegsViaCopy(const MachineFunction *MF) const {
+ return nullptr;
+ }
+
/// Return a mask of call-preserved registers for the given calling convention
/// on the current function. The mask should include all call-preserved
/// aliases. This is used by the register allocator to determine which
@@ -474,47 +457,23 @@ public:
/// Return a register mask that clobbers everything.
virtual const uint32_t *getNoPreservedMask() const {
- llvm_unreachable("target does not provide no preserved mask");
+ llvm_unreachable("target does not provide no presered mask");
}
- /// Return true if all bits that are set in mask \p mask0 are also set in
- /// \p mask1.
- bool regmaskSubsetEqual(const uint32_t *mask0, const uint32_t *mask1) const;
-
/// Return all the call-preserved register masks defined for this target.
virtual ArrayRef<const uint32_t *> getRegMasks() const = 0;
virtual ArrayRef<const char *> getRegMaskNames() const = 0;
/// Returns a bitset indexed by physical register number indicating if a
/// register is a special register that has particular uses and should be
- /// considered unavailable at all times, e.g. stack pointer, return address.
- /// A reserved register:
- /// - is not allocatable
- /// - is considered always live
- /// - is ignored by liveness tracking
- /// It is often necessary to reserve the super registers of a reserved
- /// register as well, to avoid them getting allocated indirectly. You may use
- /// markSuperRegs() and checkAllSuperRegsMarked() in this case.
+ /// considered unavailable at all times, e.g. SP, RA. This is
+ /// used by register scavenger to determine what registers are free.
virtual BitVector getReservedRegs(const MachineFunction &MF) const = 0;
- /// Returns true if PhysReg is unallocatable and constant throughout the
- /// function. Used by MachineRegisterInfo::isConstantPhysReg().
- virtual bool isConstantPhysReg(unsigned PhysReg) const { return false; }
-
- /// Physical registers that may be modified within a function but are
- /// guaranteed to be restored before any uses. This is useful for targets that
- /// have call sequences where a GOT register may be updated by the caller
- /// prior to a call and is guaranteed to be restored (also by the caller)
- /// after the call.
- virtual bool isCallerPreservedPhysReg(unsigned PhysReg,
- const MachineFunction &MF) const {
- return false;
- }
-
/// Prior to adding the live-out mask to a stackmap or patchpoint
/// instruction, provide the target the opportunity to adjust it (mainly to
/// remove pseudo-registers that should be ignored).
- virtual void adjustStackMapLiveOutMask(uint32_t *Mask) const {}
+ virtual void adjustStackMapLiveOutMask(uint32_t *Mask) const { }
/// Return a super-register of the specified register
/// Reg so its sub-register of index SubIdx is Reg.
@@ -534,7 +493,7 @@ public:
// For a copy-like instruction that defines a register of class DefRC with
// subreg index DefSubReg, reading from another source with class SrcRC and
- // subregister SrcSubReg return true if this is a preferable copy
+ // subregister SrcSubReg return true if this is a preferrable copy
// instruction or an earlier use should be used.
virtual bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
unsigned DefSubReg,
@@ -572,6 +531,7 @@ public:
/// The ARM register Q0 has two D subregs dsub_0:D0 and dsub_1:D1. It also has
/// ssub_0:S0 - ssub_3:S3 subregs.
/// If you compose subreg indices dsub_1, ssub_0 you get ssub_2.
+ ///
unsigned composeSubRegIndices(unsigned a, unsigned b) const {
if (!a) return b;
if (!b) return a;
@@ -588,20 +548,6 @@ public:
return composeSubRegIndexLaneMaskImpl(IdxA, Mask);
}
- /// Transform a lanemask given for a virtual register to the corresponding
- /// lanemask before using subregister with index \p IdxA.
- /// This is the reverse of composeSubRegIndexLaneMask(), assuming Mask is a
- /// valie lane mask (no invalid bits set) the following holds:
- /// X0 = composeSubRegIndexLaneMask(Idx, Mask)
- /// X1 = reverseComposeSubRegIndexLaneMask(Idx, X0)
- /// => X1 == Mask
- LaneBitmask reverseComposeSubRegIndexLaneMask(unsigned IdxA,
- LaneBitmask LaneMask) const {
- if (!IdxA)
- return LaneMask;
- return reverseComposeSubRegIndexLaneMaskImpl(IdxA, LaneMask);
- }
-
/// Debugging helper: dump register in human readable form to dbgs() stream.
static void dumpReg(unsigned Reg, unsigned SubRegIndex = 0,
const TargetRegisterInfo* TRI = nullptr);
@@ -618,11 +564,6 @@ protected:
llvm_unreachable("Target has no sub-registers");
}
- virtual LaneBitmask reverseComposeSubRegIndexLaneMaskImpl(unsigned,
- LaneBitmask) const {
- llvm_unreachable("Target has no sub-registers");
- }
-
public:
/// Find a common super-register class if it exists.
///
@@ -646,6 +587,7 @@ public:
/// corresponding argument register class.
///
/// The function returns NULL if no register class can be found.
+ ///
const TargetRegisterClass*
getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
const TargetRegisterClass *RCB, unsigned SubB,
@@ -656,11 +598,9 @@ public:
//
/// Register class iterators
+ ///
regclass_iterator regclass_begin() const { return RegClassBegin; }
regclass_iterator regclass_end() const { return RegClassEnd; }
- iterator_range<regclass_iterator> regclasses() const {
- return make_range(regclass_begin(), regclass_end());
- }
unsigned getNumRegClasses() const {
return (unsigned)(regclass_end()-regclass_begin());
@@ -830,13 +770,6 @@ public:
return false;
}
- /// Returns true if the target requires using the RegScavenger directly for
- /// frame elimination despite using requiresFrameIndexScavenging.
- virtual bool requiresFrameIndexReplacementScavenging(
- const MachineFunction &MF) const {
- return false;
- }
-
/// Returns true if the target wants the LocalStackAllocation pass to be run
/// and virtual base registers used for more efficient stack access.
virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const {
@@ -910,6 +843,7 @@ public:
/// Return true if the register was spilled, false otherwise.
/// If this function does not spill the register, the scavenger
/// will instead spill it to the emergency spill slot.
+ ///
virtual bool saveScavengerRegister(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
MachineBasicBlock::iterator &UseMI,
@@ -929,17 +863,6 @@ public:
int SPAdj, unsigned FIOperandNum,
RegScavenger *RS = nullptr) const = 0;
- /// Return the assembly name for \p Reg.
- virtual StringRef getRegAsmName(unsigned Reg) const {
- // FIXME: We are assuming that the assembly name is equal to the TableGen
- // name converted to lower case
- //
- // The TableGen name is the name of the definition for this register in the
- // target's tablegen files. For example, the TableGen name of
- // def EAX : Register <...>; is "EAX"
- return StringRef(getName(Reg));
- }
-
//===--------------------------------------------------------------------===//
/// Subtarget Hooks
@@ -958,16 +881,9 @@ public:
/// getFrameRegister - This method should return the register used as a base
/// for values allocated in the current stack frame.
virtual unsigned getFrameRegister(const MachineFunction &MF) const = 0;
-
- /// Mark a register and all its aliases as reserved in the given set.
- void markSuperRegs(BitVector &RegisterSet, unsigned Reg) const;
-
- /// Returns true if for every register in the set all super registers are part
- /// of the set as well.
- bool checkAllSuperRegsMarked(const BitVector &RegisterSet,
- ArrayRef<MCPhysReg> Exceptions = ArrayRef<MCPhysReg>()) const;
};
+
//===----------------------------------------------------------------------===//
// SuperRegClassIterator
//===----------------------------------------------------------------------===//
@@ -986,7 +902,7 @@ public:
//
class SuperRegClassIterator {
const unsigned RCMaskWords;
- unsigned SubReg = 0;
+ unsigned SubReg;
const uint16_t *Idx;
const uint32_t *Mask;
@@ -997,7 +913,9 @@ public:
const TargetRegisterInfo *TRI,
bool IncludeSelf = false)
: RCMaskWords((TRI->getNumRegClasses() + 31) / 32),
- Idx(RC->getSuperRegIndices()), Mask(RC->getSubClassMask()) {
+ SubReg(0),
+ Idx(RC->getSuperRegIndices()),
+ Mask(RC->getSubClassMask()) {
if (!IncludeSelf)
++*this;
}
@@ -1008,9 +926,8 @@ public:
/// Returns the current sub-register index.
unsigned getSubReg() const { return SubReg; }
- /// Returns the bit mask of register classes that getSubReg() projects into
+ /// Returns the bit mask if register classes that getSubReg() projects into
/// RC.
- /// See TargetRegisterClass::getSubClassMask() for how to use it.
const uint32_t *getMask() const { return Mask; }
/// Advance iterator to the next entry.
@@ -1023,96 +940,6 @@ public:
}
};
-//===----------------------------------------------------------------------===//
-// BitMaskClassIterator
-//===----------------------------------------------------------------------===//
-/// This class encapuslates the logic to iterate over bitmask returned by
-/// the various RegClass related APIs.
-/// E.g., this class can be used to iterate over the subclasses provided by
-/// TargetRegisterClass::getSubClassMask or SuperRegClassIterator::getMask.
-class BitMaskClassIterator {
- /// Total number of register classes.
- const unsigned NumRegClasses;
- /// Base index of CurrentChunk.
- /// In other words, the number of bit we read to get at the
- /// beginning of that chunck.
- unsigned Base = 0;
- /// Adjust base index of CurrentChunk.
- /// Base index + how many bit we read within CurrentChunk.
- unsigned Idx = 0;
- /// Current register class ID.
- unsigned ID = 0;
- /// Mask we are iterating over.
- const uint32_t *Mask;
- /// Current chunk of the Mask we are traversing.
- uint32_t CurrentChunk;
-
- /// Move ID to the next set bit.
- void moveToNextID() {
- // If the current chunk of memory is empty, move to the next one,
- // while making sure we do not go pass the number of register
- // classes.
- while (!CurrentChunk) {
- // Move to the next chunk.
- Base += 32;
- if (Base >= NumRegClasses) {
- ID = NumRegClasses;
- return;
- }
- CurrentChunk = *++Mask;
- Idx = Base;
- }
- // Otherwise look for the first bit set from the right
- // (representation of the class ID is big endian).
- // See getSubClassMask for more details on the representation.
- unsigned Offset = countTrailingZeros(CurrentChunk);
- // Add the Offset to the adjusted base number of this chunk: Idx.
- // This is the ID of the register class.
- ID = Idx + Offset;
-
- // Consume the zeros, if any, and the bit we just read
- // so that we are at the right spot for the next call.
- // Do not do Offset + 1 because Offset may be 31 and 32
- // will be UB for the shift, though in that case we could
- // have make the chunk being equal to 0, but that would
- // have introduced a if statement.
- moveNBits(Offset);
- moveNBits(1);
- }
-
- /// Move \p NumBits Bits forward in CurrentChunk.
- void moveNBits(unsigned NumBits) {
- assert(NumBits < 32 && "Undefined behavior spotted!");
- // Consume the bit we read for the next call.
- CurrentChunk >>= NumBits;
- // Adjust the base for the chunk.
- Idx += NumBits;
- }
-
-public:
- /// Create a BitMaskClassIterator that visits all the register classes
- /// represented by \p Mask.
- ///
- /// \pre \p Mask != nullptr
- BitMaskClassIterator(const uint32_t *Mask, const TargetRegisterInfo &TRI)
- : NumRegClasses(TRI.getNumRegClasses()), Mask(Mask), CurrentChunk(*Mask) {
- // Move to the first ID.
- moveToNextID();
- }
-
- /// Returns true if this iterator is still pointing at a valid entry.
- bool isValid() const { return getID() != NumRegClasses; }
-
- /// Returns the current register class ID.
- unsigned getID() const { return ID; }
-
- /// Advance iterator to the next entry.
- void operator++() {
- assert(isValid() && "Cannot move iterator past end.");
- moveToNextID();
- }
-};
-
// This is useful when building IndexedMaps keyed on virtual registers
struct VirtReg2IndexFunctor : public std::unary_function<unsigned, unsigned> {
unsigned operator()(unsigned Reg) const {
@@ -1147,6 +974,9 @@ Printable PrintRegUnit(unsigned Unit, const TargetRegisterInfo *TRI);
/// registers on a \ref raw_ostream.
Printable PrintVRegOrUnit(unsigned VRegOrUnit, const TargetRegisterInfo *TRI);
-} // end namespace llvm
+/// Create Printable object to print LaneBitmasks on a \ref raw_ostream.
+Printable PrintLaneMask(LaneBitmask LaneMask);
+
+} // End llvm namespace
-#endif // LLVM_TARGET_TARGETREGISTERINFO_H
+#endif
diff --git a/gnu/llvm/include/llvm/Target/TargetSubtargetInfo.h b/gnu/llvm/include/llvm/Target/TargetSubtargetInfo.h
index 9440c56dcf1..d50aa4932f8 100644
--- a/gnu/llvm/include/llvm/Target/TargetSubtargetInfo.h
+++ b/gnu/llvm/include/llvm/Target/TargetSubtargetInfo.h
@@ -1,4 +1,4 @@
-//===- llvm/Target/TargetSubtargetInfo.h - Target Information ---*- C++ -*-===//
+//==-- llvm/Target/TargetSubtargetInfo.h - Target Information ----*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
@@ -14,35 +14,17 @@
#ifndef LLVM_TARGET_TARGETSUBTARGETINFO_H
#define LLVM_TARGET_TARGETSUBTARGETINFO_H
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/PBQPRAConstraint.h"
-#include "llvm/CodeGen/ScheduleDAGMutation.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/CodeGen.h"
-#include <memory>
-#include <vector>
-
namespace llvm {
-class CallLowering;
-class InstrItineraryData;
-struct InstrStage;
-class InstructionSelector;
-class LegalizerInfo;
+class DataLayout;
+class MachineFunction;
class MachineInstr;
-struct MachineSchedPolicy;
-struct MCReadAdvanceEntry;
-struct MCWriteLatencyEntry;
-struct MCWriteProcResEntry;
-class RegisterBankInfo;
class SDep;
-class SelectionDAGTargetInfo;
-struct SubtargetFeatureKV;
-struct SubtargetInfoKV;
class SUnit;
class TargetFrameLowering;
class TargetInstrInfo;
@@ -50,7 +32,9 @@ class TargetLowering;
class TargetRegisterClass;
class TargetRegisterInfo;
class TargetSchedModel;
-class Triple;
+class TargetSelectionDAGInfo;
+struct MachineSchedPolicy;
+template <typename T> class SmallVectorImpl;
//===----------------------------------------------------------------------===//
///
@@ -59,6 +43,10 @@ class Triple;
/// be exposed through a TargetSubtargetInfo-derived class.
///
class TargetSubtargetInfo : public MCSubtargetInfo {
+ TargetSubtargetInfo(const TargetSubtargetInfo &) = delete;
+ void operator=(const TargetSubtargetInfo &) = delete;
+ TargetSubtargetInfo() = delete;
+
protected: // Can only create subclasses...
TargetSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS,
ArrayRef<SubtargetFeatureKV> PF,
@@ -72,15 +60,10 @@ protected: // Can only create subclasses...
public:
// AntiDepBreakMode - Type of anti-dependence breaking that should
// be performed before post-RA scheduling.
- using AntiDepBreakMode = enum { ANTIDEP_NONE, ANTIDEP_CRITICAL, ANTIDEP_ALL };
- using RegClassVector = SmallVectorImpl<const TargetRegisterClass *>;
+ typedef enum { ANTIDEP_NONE, ANTIDEP_CRITICAL, ANTIDEP_ALL } AntiDepBreakMode;
+ typedef SmallVectorImpl<const TargetRegisterClass *> RegClassVector;
- TargetSubtargetInfo() = delete;
- TargetSubtargetInfo(const TargetSubtargetInfo &) = delete;
- TargetSubtargetInfo &operator=(const TargetSubtargetInfo &) = delete;
- ~TargetSubtargetInfo() override;
-
- virtual bool isXRaySupported() const { return false; }
+ virtual ~TargetSubtargetInfo();
// Interfaces to the major aspects of target machine information:
//
@@ -88,7 +71,6 @@ public:
// -- Pipelines and scheduling information
// -- Stack frame information
// -- Selection DAG lowering information
- // -- Call lowering information
//
// N.B. These objects may change during compilation. It's not safe to cache
// them between functions.
@@ -97,37 +79,24 @@ public:
return nullptr;
}
virtual const TargetLowering *getTargetLowering() const { return nullptr; }
- virtual const SelectionDAGTargetInfo *getSelectionDAGInfo() const {
- return nullptr;
- }
- virtual const CallLowering *getCallLowering() const { return nullptr; }
-
- // FIXME: This lets targets specialize the selector by subtarget (which lets
- // us do things like a dedicated avx512 selector). However, we might want
- // to also specialize selectors by MachineFunction, which would let us be
- // aware of optsize/optnone and such.
- virtual const InstructionSelector *getInstructionSelector() const {
+ virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const {
return nullptr;
}
-
/// Target can subclass this hook to select a different DAG scheduler.
virtual RegisterScheduler::FunctionPassCtor
getDAGScheduler(CodeGenOpt::Level) const {
return nullptr;
}
- virtual const LegalizerInfo *getLegalizerInfo() const { return nullptr; }
-
/// getRegisterInfo - If register information is available, return it. If
- /// not, return null.
+ /// not, return null. This is kept separate from RegInfo until RegInfo has
+ /// details of graph coloring register allocation removed from it.
+ ///
virtual const TargetRegisterInfo *getRegisterInfo() const { return nullptr; }
- /// If the information for the register banks is available, return it.
- /// Otherwise return nullptr.
- virtual const RegisterBankInfo *getRegBankInfo() const { return nullptr; }
-
/// getInstrItineraryData - Returns instruction itinerary data for the target
/// or specific subtarget.
+ ///
virtual const InstrItineraryData *getInstrItineraryData() const {
return nullptr;
}
@@ -150,9 +119,6 @@ public:
/// TargetLowering preference). It does not yet disable the postRA scheduler.
virtual bool enableMachineScheduler() const;
- /// \brief Support printing of [latency:throughput] comment in output .S file.
- virtual bool supportPrintSchedInfo() const { return false; }
-
/// \brief True if the machine scheduler should disable the TLI preference
/// for preRA scheduling with the source level scheduler.
virtual bool enableMachineSchedDefaultSched() const { return true; }
@@ -178,6 +144,7 @@ public:
/// scheduling heuristics (no custom MachineSchedStrategy) to make
/// changes to the generic scheduling policy.
virtual void overrideSchedPolicy(MachineSchedPolicy &Policy,
+ MachineInstr *begin, MachineInstr *end,
unsigned NumRegionInstrs) const {}
// \brief Perform target specific adjustments to the latency of a schedule
@@ -195,18 +162,6 @@ public:
return CriticalPathRCs.clear();
}
- // \brief Provide an ordered list of schedule DAG mutations for the post-RA
- // scheduler.
- virtual void getPostRAMutations(
- std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const {
- }
-
- // \brief Provide an ordered list of schedule DAG mutations for the machine
- // pipeliner.
- virtual void getSMSMutations(
- std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const {
- }
-
// For use with PostRAScheduling: get the minimum optimization level needed
// to enable post-RA scheduling.
virtual CodeGenOpt::Level getOptLevelToEnablePostRAScheduler() const {
@@ -234,15 +189,9 @@ public:
}
/// Enable tracking of subregister liveness in register allocator.
- /// Please use MachineRegisterInfo::subRegLivenessEnabled() instead where
- /// possible.
virtual bool enableSubRegLiveness() const { return false; }
-
- /// Returns string representation of scheduler comment
- std::string getSchedInfoStr(const MachineInstr &MI) const override;
- std::string getSchedInfoStr(MCInst const &MCI) const override;
};
-} // end namespace llvm
+} // End llvm namespace
-#endif // LLVM_TARGET_TARGETSUBTARGETINFO_H
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/CmpInstAnalysis.h b/gnu/llvm/include/llvm/Transforms/Utils/CmpInstAnalysis.h
index 5ec3888d453..73c15e42c35 100644
--- a/gnu/llvm/include/llvm/Transforms/Utils/CmpInstAnalysis.h
+++ b/gnu/llvm/include/llvm/Transforms/Utils/CmpInstAnalysis.h
@@ -21,13 +21,13 @@ namespace llvm {
class ICmpInst;
class Value;
- /// Encode a icmp predicate into a three bit mask. These bits are carefully
- /// arranged to allow folding of expressions such as:
+ /// getICmpCode - Encode a icmp predicate into a three bit mask. These bits
+ /// are carefully arranged to allow folding of expressions such as:
///
/// (A < B) | (A > B) --> (A != B)
///
/// Note that this is only valid if the first and second predicates have the
- /// same sign. It is illegal to do: (A u< B) | (A s> B)
+ /// same sign. Is illegal to do: (A u< B) | (A s> B)
///
/// Three bits are used to represent the condition, as follows:
/// 0 A > B
@@ -46,25 +46,20 @@ namespace llvm {
///
unsigned getICmpCode(const ICmpInst *ICI, bool InvertPred = false);
- /// This is the complement of getICmpCode, which turns an opcode and two
- /// operands into either a constant true or false, or the predicate for a new
- /// ICmp instruction. The sign is passed in to determine which kind of
- /// predicate to use in the new icmp instruction.
+ /// getICmpValue - This is the complement of getICmpCode, which turns an
+ /// opcode and two operands into either a constant true or false, or the
+ /// predicate for a new ICmp instruction. The sign is passed in to determine
+ /// which kind of predicate to use in the new icmp instruction.
/// Non-NULL return value will be a true or false constant.
- /// NULL return means a new ICmp is needed. The predicate for which is output
- /// in NewICmpPred.
+ /// NULL return means a new ICmp is needed. The predicate for which is
+ /// output in NewICmpPred.
Value *getICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
CmpInst::Predicate &NewICmpPred);
- /// Return true if both predicates match sign or if at least one of them is an
- /// equality comparison (which is signless).
+ /// PredicatesFoldable - Return true if both predicates match sign or if at
+ /// least one of them is an equality comparison (which is signless).
bool PredicatesFoldable(CmpInst::Predicate p1, CmpInst::Predicate p2);
- /// Decompose an icmp into the form ((X & Y) pred Z) if possible. The returned
- /// predicate is either == or !=. Returns false if decomposition fails.
- bool decomposeBitTestICmp(const ICmpInst *I, CmpInst::Predicate &Pred,
- Value *&X, Value *&Y, Value *&Z);
-
} // end namespace llvm
#endif
diff --git a/gnu/llvm/lib/Analysis/SparsePropagation.cpp b/gnu/llvm/lib/Analysis/SparsePropagation.cpp
index 470f4bee1e0..f5a927b8052 100644
--- a/gnu/llvm/lib/Analysis/SparsePropagation.cpp
+++ b/gnu/llvm/lib/Analysis/SparsePropagation.cpp
@@ -195,7 +195,7 @@ void SparseSolver::getFeasibleSuccessors(TerminatorInst &TI,
Succs.assign(TI.getNumSuccessors(), true);
return;
}
- SwitchInst::CaseHandle Case = *SI.findCaseValue(cast<ConstantInt>(C));
+ SwitchInst::CaseIt Case = SI.findCaseValue(cast<ConstantInt>(C));
Succs[Case.getSuccessorIndex()] = true;
}
@@ -320,8 +320,8 @@ void SparseSolver::Solve(Function &F) {
// Notify all instructions in this basic block that they are newly
// executable.
- for (Instruction &I : *BB)
- visitInst(I);
+ for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
+ visitInst(*I);
}
}
}
diff --git a/gnu/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp b/gnu/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
index 0e240f482a1..a506e0571c0 100644
--- a/gnu/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
+++ b/gnu/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
@@ -1,4 +1,4 @@
-//===- LiveIntervalAnalysis.cpp - Live Interval Analysis ------------------===//
+//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,52 +7,38 @@
//
//===----------------------------------------------------------------------===//
//
-/// \file This file implements the LiveInterval analysis pass which is used
-/// by the Linear Scan Register allocator. This pass linearizes the
-/// basic blocks of the function in DFS order and computes live intervals for
-/// each virtual and physical register.
+// This file implements the LiveInterval analysis pass which is used
+// by the Linear Scan Register allocator. This pass linearizes the
+// basic blocks of the function in DFS order and uses the
+// LiveVariables pass to conservatively compute live intervals for
+// each virtual and physical register.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "LiveRangeCalc.h"
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/LiveVariables.h"
-#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
#include "llvm/CodeGen/MachineDominators.h"
-#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineInstrBundle.h"
-#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/CodeGen/VirtRegMap.h"
-#include "llvm/MC/LaneBitmask.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/Pass.h"
+#include "llvm/IR/Value.h"
#include "llvm/Support/BlockFrequency.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
-#include <cassert>
-#include <cstdint>
-#include <iterator>
-#include <tuple>
-#include <utility>
-
+#include <cmath>
+#include <limits>
using namespace llvm;
#define DEBUG_TYPE "regalloc"
@@ -62,6 +48,7 @@ char &llvm::LiveIntervalsID = LiveIntervals::ID;
INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
"Live Interval Analysis", false, false)
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(LiveVariables)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
@@ -75,19 +62,25 @@ static cl::opt<bool> EnablePrecomputePhysRegs(
static bool EnablePrecomputePhysRegs = false;
#endif // NDEBUG
-namespace llvm {
+static cl::opt<bool> EnableSubRegLiveness(
+ "enable-subreg-liveness", cl::Hidden, cl::init(true),
+ cl::desc("Enable subregister liveness tracking."));
+namespace llvm {
cl::opt<bool> UseSegmentSetForPhysRegs(
"use-segment-set-for-physregs", cl::Hidden, cl::init(true),
cl::desc(
"Use segment set for the computation of the live ranges of physregs."));
-
-} // end namespace llvm
+}
void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addRequired<AAResultsWrapperPass>();
AU.addPreserved<AAResultsWrapperPass>();
+ // LiveVariables isn't really required by this analysis, it is only required
+ // here to make sure it is live during TwoAddressInstructionPass and
+ // PHIElimination. This is temporary.
+ AU.addRequired<LiveVariables>();
AU.addPreserved<LiveVariables>();
AU.addPreservedID(MachineLoopInfoID);
AU.addRequiredTransitiveID(MachineDominatorsID);
@@ -97,7 +90,8 @@ void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
MachineFunctionPass::getAnalysisUsage(AU);
}
-LiveIntervals::LiveIntervals() : MachineFunctionPass(ID) {
+LiveIntervals::LiveIntervals() : MachineFunctionPass(ID),
+ DomTree(nullptr), LRCalc(nullptr) {
initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
}
@@ -114,14 +108,16 @@ void LiveIntervals::releaseMemory() {
RegMaskBits.clear();
RegMaskBlocks.clear();
- for (LiveRange *LR : RegUnitRanges)
- delete LR;
+ for (unsigned i = 0, e = RegUnitRanges.size(); i != e; ++i)
+ delete RegUnitRanges[i];
RegUnitRanges.clear();
// Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
VNInfoAllocator.Reset();
}
+/// runOnMachineFunction - calculates LiveIntervals
+///
bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
MF = &fn;
MRI = &MF->getRegInfo();
@@ -131,6 +127,9 @@ bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
Indexes = &getAnalysis<SlotIndexes>();
DomTree = &getAnalysis<MachineDominatorTree>();
+ if (EnableSubRegLiveness && MF->getSubtarget().enableSubRegLiveness())
+ MRI->enableSubRegLiveness(true);
+
if (!LRCalc)
LRCalc = new LiveRangeCalc();
@@ -151,13 +150,14 @@ bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
return true;
}
+/// print - Implement the dump method.
void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
OS << "********** INTERVALS **********\n";
// Dump the regunits.
- for (unsigned Unit = 0, UnitE = RegUnitRanges.size(); Unit != UnitE; ++Unit)
- if (LiveRange *LR = RegUnitRanges[Unit])
- OS << PrintRegUnit(Unit, TRI) << ' ' << *LR << '\n';
+ for (unsigned i = 0, e = RegUnitRanges.size(); i != e; ++i)
+ if (LiveRange *LR = RegUnitRanges[i])
+ OS << PrintRegUnit(i, TRI) << ' ' << *LR << '\n';
// Dump the virtregs.
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
@@ -167,8 +167,8 @@ void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
}
OS << "RegMasks:";
- for (SlotIndex Idx : RegMaskSlots)
- OS << ' ' << Idx;
+ for (unsigned i = 0, e = RegMaskSlots.size(); i != e; ++i)
+ OS << ' ' << RegMaskSlots[i];
OS << '\n';
printInstrs(OS);
@@ -180,23 +180,33 @@ void LiveIntervals::printInstrs(raw_ostream &OS) const {
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-LLVM_DUMP_METHOD void LiveIntervals::dumpInstrs() const {
+void LiveIntervals::dumpInstrs() const {
printInstrs(dbgs());
}
#endif
LiveInterval* LiveIntervals::createInterval(unsigned reg) {
- float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? huge_valf : 0.0F;
+ float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
+ llvm::huge_valf : 0.0F;
return new LiveInterval(reg, Weight);
}
-/// Compute the live interval of a virtual register, based on defs and uses.
+
+/// computeVirtRegInterval - Compute the live interval of a virtual register,
+/// based on defs and uses.
void LiveIntervals::computeVirtRegInterval(LiveInterval &LI) {
assert(LRCalc && "LRCalc not initialized.");
assert(LI.empty() && "Should only compute empty intervals.");
+ bool ShouldTrackSubRegLiveness = MRI->shouldTrackSubRegLiveness(LI.reg);
LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
- LRCalc->calculate(LI, MRI->shouldTrackSubRegLiveness(LI.reg));
- computeDeadValues(LI, nullptr);
+ LRCalc->calculate(LI, ShouldTrackSubRegLiveness);
+ bool SeparatedComponents = computeDeadValues(LI, nullptr);
+ if (SeparatedComponents) {
+ assert(ShouldTrackSubRegLiveness
+ && "Separated components should only occur for unused subreg defs");
+ SmallVector<LiveInterval*, 8> SplitLIs;
+ splitSeparateComponents(LI, SplitLIs);
+ }
}
void LiveIntervals::computeVirtRegs() {
@@ -212,7 +222,7 @@ void LiveIntervals::computeRegMasks() {
RegMaskBlocks.resize(MF->getNumBlockIDs());
// Find all instructions with regmask operands.
- for (const MachineBasicBlock &MBB : *MF) {
+ for (MachineBasicBlock &MBB : *MF) {
std::pair<unsigned, unsigned> &RMB = RegMaskBlocks[MBB.getNumber()];
RMB.first = RegMaskSlots.size();
@@ -222,22 +232,18 @@ void LiveIntervals::computeRegMasks() {
RegMaskBits.push_back(Mask);
}
- for (const MachineInstr &MI : MBB) {
+ for (MachineInstr &MI : MBB) {
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isRegMask())
continue;
- RegMaskSlots.push_back(Indexes->getInstructionIndex(MI).getRegSlot());
+ RegMaskSlots.push_back(Indexes->getInstructionIndex(&MI).getRegSlot());
RegMaskBits.push_back(MO.getRegMask());
}
}
- // Some block ends, such as funclet returns, create masks. Put the mask on
- // the last instruction of the block, because MBB slot index intervals are
- // half-open.
+ // Some block ends, such as funclet returns, create masks.
if (const uint32_t *Mask = MBB.getEndClobberMask(TRI)) {
- assert(!MBB.empty() && "empty return block?");
- RegMaskSlots.push_back(
- Indexes->getInstructionIndex(MBB.back()).getRegSlot());
+ RegMaskSlots.push_back(Indexes->getMBBEndIdx(&MBB));
RegMaskBits.push_back(Mask);
}
@@ -257,9 +263,9 @@ void LiveIntervals::computeRegMasks() {
// interference.
//
-/// Compute the live range of a register unit, based on the uses and defs of
-/// aliasing registers. The range should be empty, or contain only dead
-/// phi-defs from ABI blocks.
+/// computeRegUnitInterval - Compute the live range of a register unit, based
+/// on the uses and defs of aliasing registers. The range should be empty,
+/// or contain only dead phi-defs from ABI blocks.
void LiveIntervals::computeRegUnitRange(LiveRange &LR, unsigned Unit) {
assert(LRCalc && "LRCalc not initialized.");
LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
@@ -269,34 +275,22 @@ void LiveIntervals::computeRegUnitRange(LiveRange &LR, unsigned Unit) {
// may share super-registers. That's OK because createDeadDefs() is
// idempotent. It is very rare for a register unit to have multiple roots, so
// uniquing super-registers is probably not worthwhile.
- bool IsReserved = false;
- for (MCRegUnitRootIterator Root(Unit, TRI); Root.isValid(); ++Root) {
- bool IsRootReserved = true;
- for (MCSuperRegIterator Super(*Root, TRI, /*IncludeSelf=*/true);
- Super.isValid(); ++Super) {
- unsigned Reg = *Super;
- if (!MRI->reg_empty(Reg))
- LRCalc->createDeadDefs(LR, Reg);
- // A register unit is considered reserved if all its roots and all their
- // super registers are reserved.
- if (!MRI->isReserved(Reg))
- IsRootReserved = false;
+ for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) {
+ for (MCSuperRegIterator Supers(*Roots, TRI, /*IncludeSelf=*/true);
+ Supers.isValid(); ++Supers) {
+ if (!MRI->reg_empty(*Supers))
+ LRCalc->createDeadDefs(LR, *Supers);
}
- IsReserved |= IsRootReserved;
}
- assert(IsReserved == MRI->isReservedRegUnit(Unit) &&
- "reserved computation mismatch");
// Now extend LR to reach all uses.
// Ignore uses of reserved registers. We only track defs of those.
- if (!IsReserved) {
- for (MCRegUnitRootIterator Root(Unit, TRI); Root.isValid(); ++Root) {
- for (MCSuperRegIterator Super(*Root, TRI, /*IncludeSelf=*/true);
- Super.isValid(); ++Super) {
- unsigned Reg = *Super;
- if (!MRI->reg_empty(Reg))
- LRCalc->extendToUses(LR, Reg);
- }
+ for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) {
+ for (MCSuperRegIterator Supers(*Roots, TRI, /*IncludeSelf=*/true);
+ Supers.isValid(); ++Supers) {
+ unsigned Reg = *Supers;
+ if (!MRI->isReserved(Reg) && !MRI->reg_empty(Reg))
+ LRCalc->extendToUses(LR, Reg);
}
}
@@ -305,9 +299,11 @@ void LiveIntervals::computeRegUnitRange(LiveRange &LR, unsigned Unit) {
LR.flushSegmentSet();
}
-/// Precompute the live ranges of any register units that are live-in to an ABI
-/// block somewhere. Register values can appear without a corresponding def when
-/// entering the entry block or a landing pad.
+
+/// computeLiveInRegUnits - Precompute the live ranges of any register units
+/// that are live-in to an ABI block somewhere. Register values can appear
+/// without a corresponding def when entering the entry block or a landing pad.
+///
void LiveIntervals::computeLiveInRegUnits() {
RegUnitRanges.resize(TRI->getNumRegUnits());
DEBUG(dbgs() << "Computing live-in reg-units in ABI blocks.\n");
@@ -316,15 +312,18 @@ void LiveIntervals::computeLiveInRegUnits() {
SmallVector<unsigned, 8> NewRanges;
// Check all basic blocks for live-ins.
- for (const MachineBasicBlock &MBB : *MF) {
+ for (MachineFunction::const_iterator MFI = MF->begin(), MFE = MF->end();
+ MFI != MFE; ++MFI) {
+ const MachineBasicBlock *MBB = &*MFI;
+
// We only care about ABI blocks: Entry + landing pads.
- if ((&MBB != &MF->front() && !MBB.isEHPad()) || MBB.livein_empty())
+ if ((MFI != MF->begin() && !MBB->isEHPad()) || MBB->livein_empty())
continue;
// Create phi-defs at Begin for all live-in registers.
- SlotIndex Begin = Indexes->getMBBStartIdx(&MBB);
- DEBUG(dbgs() << Begin << "\tBB#" << MBB.getNumber());
- for (const auto &LI : MBB.liveins()) {
+ SlotIndex Begin = Indexes->getMBBStartIdx(MBB);
+ DEBUG(dbgs() << Begin << "\tBB#" << MBB->getNumber());
+ for (const auto &LI : MBB->liveins()) {
for (MCRegUnitIterator Units(LI.PhysReg, TRI); Units.isValid(); ++Units) {
unsigned Unit = *Units;
LiveRange *LR = RegUnitRanges[Unit];
@@ -343,13 +342,16 @@ void LiveIntervals::computeLiveInRegUnits() {
DEBUG(dbgs() << "Created " << NewRanges.size() << " new intervals.\n");
// Compute the 'normal' part of the ranges.
- for (unsigned Unit : NewRanges)
+ for (unsigned i = 0, e = NewRanges.size(); i != e; ++i) {
+ unsigned Unit = NewRanges[i];
computeRegUnitRange(*RegUnitRanges[Unit], Unit);
+ }
}
+
static void createSegmentsForValues(LiveRange &LR,
- iterator_range<LiveInterval::vni_iterator> VNIs) {
- for (VNInfo *VNI : VNIs) {
+ iterator_range<LiveInterval::vni_iterator> VNIs) {
+ for (auto VNI : VNIs) {
if (VNI->isUnused())
continue;
SlotIndex Def = VNI->def;
@@ -357,7 +359,7 @@ static void createSegmentsForValues(LiveRange &LR,
}
}
-using ShrinkToUsesWorkList = SmallVector<std::pair<SlotIndex, VNInfo*>, 16>;
+typedef SmallVector<std::pair<SlotIndex, VNInfo*>, 16> ShrinkToUsesWorkList;
static void extendSegmentsToUses(LiveRange &LR, const SlotIndexes &Indexes,
ShrinkToUsesWorkList &WorkList,
@@ -365,7 +367,7 @@ static void extendSegmentsToUses(LiveRange &LR, const SlotIndexes &Indexes,
// Keep track of the PHIs that are in use.
SmallPtrSet<VNInfo*, 8> UsedPHIs;
// Blocks that have already been added to WorkList as live-out.
- SmallPtrSet<const MachineBasicBlock*, 16> LiveOut;
+ SmallPtrSet<MachineBasicBlock*, 16> LiveOut;
// Extend intervals to reach all uses in WorkList.
while (!WorkList.empty()) {
@@ -384,7 +386,7 @@ static void extendSegmentsToUses(LiveRange &LR, const SlotIndexes &Indexes,
!UsedPHIs.insert(VNI).second)
continue;
// The PHI is live, make sure the predecessors are live-out.
- for (const MachineBasicBlock *Pred : MBB->predecessors()) {
+ for (auto &Pred : MBB->predecessors()) {
if (!LiveOut.insert(Pred).second)
continue;
SlotIndex Stop = Indexes.getMBBEndIdx(Pred);
@@ -400,7 +402,7 @@ static void extendSegmentsToUses(LiveRange &LR, const SlotIndexes &Indexes,
LR.addSegment(LiveRange::Segment(BlockStart, Idx, VNI));
// Make sure VNI is live-out from the predecessors.
- for (const MachineBasicBlock *Pred : MBB->predecessors()) {
+ for (auto &Pred : MBB->predecessors()) {
if (!LiveOut.insert(Pred).second)
continue;
SlotIndex Stop = Indexes.getMBBEndIdx(Pred);
@@ -431,9 +433,11 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
ShrinkToUsesWorkList WorkList;
// Visit all instructions reading li->reg.
- unsigned Reg = li->reg;
- for (MachineInstr &UseMI : MRI->reg_instructions(Reg)) {
- if (UseMI.isDebugValue() || !UseMI.readsVirtualRegister(Reg))
+ for (MachineRegisterInfo::reg_instr_iterator
+ I = MRI->reg_instr_begin(li->reg), E = MRI->reg_instr_end();
+ I != E; ) {
+ MachineInstr *UseMI = &*(I++);
+ if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
continue;
SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot();
LiveQueryResult LRQ = li->Query(Idx);
@@ -442,9 +446,9 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
// This shouldn't happen: readsVirtualRegister returns true, but there is
// no live value. It is likely caused by a target getting <undef> flags
// wrong.
- DEBUG(dbgs() << Idx << '\t' << UseMI
+ DEBUG(dbgs() << Idx << '\t' << *UseMI
<< "Warning: Instr claims to read non-existent value in "
- << *li << '\n');
+ << *li << '\n');
continue;
}
// Special case: An early-clobber tied operand reads and writes the
@@ -472,7 +476,7 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
bool LiveIntervals::computeDeadValues(LiveInterval &LI,
SmallVectorImpl<MachineInstr*> *dead) {
bool MayHaveSplitComponents = false;
- for (VNInfo *VNI : LI.valnos) {
+ for (auto VNI : LI.valnos) {
if (VNI->isUnused())
continue;
SlotIndex Def = VNI->def;
@@ -481,11 +485,13 @@ bool LiveIntervals::computeDeadValues(LiveInterval &LI,
// Is the register live before? Otherwise we may have to add a read-undef
// flag for subregister defs.
+ bool DeadBeforeDef = false;
unsigned VReg = LI.reg;
if (MRI->shouldTrackSubRegLiveness(VReg)) {
if ((I == LI.begin() || std::prev(I)->end < Def) && !VNI->isPHIDef()) {
MachineInstr *MI = getInstructionFromIndex(Def);
MI->setRegisterDefReadUndef(VReg);
+ DeadBeforeDef = true;
}
}
@@ -501,7 +507,15 @@ bool LiveIntervals::computeDeadValues(LiveInterval &LI,
// This is a dead def. Make sure the instruction knows.
MachineInstr *MI = getInstructionFromIndex(Def);
assert(MI && "No instruction defining live value");
- MI->addRegisterDead(LI.reg, TRI);
+ MI->addRegisterDead(VReg, TRI);
+
+ // If we have a dead def that is completely separate from the rest of
+ // the liverange then we rewrite it to use a different VReg to not violate
+ // the rule that the liveness of a virtual register forms a connected
+ // component. This should only happen if subregister liveness is tracked.
+ if (DeadBeforeDef)
+ MayHaveSplitComponents = true;
+
if (dead && MI->allDefsAreDead()) {
DEBUG(dbgs() << "All defs dead: " << Def << '\t' << *MI);
dead->push_back(MI);
@@ -511,7 +525,8 @@ bool LiveIntervals::computeDeadValues(LiveInterval &LI,
return MayHaveSplitComponents;
}
-void LiveIntervals::shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg) {
+void LiveIntervals::shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg)
+{
DEBUG(dbgs() << "Shrink: " << SR << '\n');
assert(TargetRegisterInfo::isVirtualRegister(Reg)
&& "Can only shrink virtual registers");
@@ -520,20 +535,19 @@ void LiveIntervals::shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg) {
// Visit all instructions reading Reg.
SlotIndex LastIdx;
- for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
- // Skip "undef" uses.
- if (!MO.readsReg())
+ for (MachineOperand &MO : MRI->reg_operands(Reg)) {
+ MachineInstr *UseMI = MO.getParent();
+ if (UseMI->isDebugValue())
continue;
// Maybe the operand is for a subregister we don't care about.
unsigned SubReg = MO.getSubReg();
if (SubReg != 0) {
LaneBitmask LaneMask = TRI->getSubRegIndexLaneMask(SubReg);
- if ((LaneMask & SR.LaneMask).none())
+ if ((LaneMask & SR.LaneMask) == 0)
continue;
}
// We only need to visit each instruction once.
- MachineInstr *UseMI = MO.getParent();
- SlotIndex Idx = getInstructionIndex(*UseMI).getRegSlot();
+ SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot();
if (Idx == LastIdx)
continue;
LastIdx = Idx;
@@ -562,7 +576,7 @@ void LiveIntervals::shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg) {
SR.segments.swap(NewLR.segments);
// Remove dead PHI value numbers
- for (VNInfo *VNI : SR.valnos) {
+ for (auto VNI : SR.valnos) {
if (VNI->isUnused())
continue;
const LiveRange::Segment *Segment = SR.getSegmentContaining(VNI->def);
@@ -571,9 +585,9 @@ void LiveIntervals::shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg) {
continue;
if (VNI->isPHIDef()) {
// This is a dead PHI. Remove it.
- DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
VNI->markUnused();
SR.removeSegment(*Segment);
+ DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
}
}
@@ -581,12 +595,11 @@ void LiveIntervals::shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg) {
}
void LiveIntervals::extendToIndices(LiveRange &LR,
- ArrayRef<SlotIndex> Indices,
- ArrayRef<SlotIndex> Undefs) {
+ ArrayRef<SlotIndex> Indices) {
assert(LRCalc && "LRCalc not initialized.");
LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
- for (SlotIndex Idx : Indices)
- LRCalc->extend(LR, Idx, /*PhysReg=*/0, Undefs);
+ for (unsigned i = 0, e = Indices.size(); i != e; ++i)
+ LRCalc->extend(LR, Indices[i]);
}
void LiveIntervals::pruneValue(LiveRange &LR, SlotIndex Kill,
@@ -613,11 +626,13 @@ void LiveIntervals::pruneValue(LiveRange &LR, SlotIndex Kill,
// Find all blocks that are reachable from KillMBB without leaving VNI's live
// range. It is possible that KillMBB itself is reachable, so start a DFS
// from each successor.
- using VisitedTy = df_iterator_default_set<MachineBasicBlock*,9>;
+ typedef SmallPtrSet<MachineBasicBlock*, 9> VisitedTy;
VisitedTy Visited;
- for (MachineBasicBlock *Succ : KillMBB->successors()) {
+ for (MachineBasicBlock::succ_iterator
+ SuccI = KillMBB->succ_begin(), SuccE = KillMBB->succ_end();
+ SuccI != SuccE; ++SuccI) {
for (df_ext_iterator<MachineBasicBlock*, VisitedTy>
- I = df_ext_begin(Succ, Visited), E = df_ext_end(Succ, Visited);
+ I = df_ext_begin(*SuccI, Visited), E = df_ext_end(*SuccI, Visited);
I != E;) {
MachineBasicBlock *MBB = *I;
@@ -669,9 +684,9 @@ void LiveIntervals::addKillFlags(const VirtRegMap *VRM) {
// Find the regunit intervals for the assigned register. They may overlap
// the virtual register live range, cancelling any kills.
RU.clear();
- for (MCRegUnitIterator Unit(VRM->getPhys(Reg), TRI); Unit.isValid();
- ++Unit) {
- const LiveRange &RURange = getRegUnit(*Unit);
+ for (MCRegUnitIterator Units(VRM->getPhys(Reg), TRI); Units.isValid();
+ ++Units) {
+ const LiveRange &RURange = getRegUnit(*Units);
if (RURange.empty())
continue;
RU.push_back(std::make_pair(&RURange, RURange.find(LI.begin()->end)));
@@ -730,7 +745,7 @@ void LiveIntervals::addKillFlags(const VirtRegMap *VRM) {
LaneBitmask DefinedLanesMask;
if (!SRs.empty()) {
// Compute a mask of lanes that are defined.
- DefinedLanesMask = LaneBitmask::getNone();
+ DefinedLanesMask = 0;
for (auto &SRP : SRs) {
const LiveInterval::SubRange &SR = *SRP.first;
LiveRange::const_iterator &I = SRP.second;
@@ -743,7 +758,7 @@ void LiveIntervals::addKillFlags(const VirtRegMap *VRM) {
DefinedLanesMask |= SR.LaneMask;
}
} else
- DefinedLanesMask = LaneBitmask::getAll();
+ DefinedLanesMask = ~0u;
bool IsFullWrite = false;
for (const MachineOperand &MO : MI->operands()) {
@@ -752,7 +767,7 @@ void LiveIntervals::addKillFlags(const VirtRegMap *VRM) {
if (MO.isUse()) {
// Reading any undefined lanes?
LaneBitmask UseMask = TRI->getSubRegIndexLaneMask(MO.getSubReg());
- if ((UseMask & ~DefinedLanesMask).any())
+ if ((UseMask & ~DefinedLanesMask) != 0)
goto CancelKill;
} else if (MO.getSubReg() == 0) {
// Writing to the full register?
@@ -814,34 +829,38 @@ LiveIntervals::hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const {
// Conservatively return true instead of scanning huge predecessor lists.
if (PHIMBB->pred_size() > 100)
return true;
- for (const MachineBasicBlock *Pred : PHIMBB->predecessors())
- if (VNI == LI.getVNInfoBefore(Indexes->getMBBEndIdx(Pred)))
+ for (MachineBasicBlock::const_pred_iterator
+ PI = PHIMBB->pred_begin(), PE = PHIMBB->pred_end(); PI != PE; ++PI)
+ if (VNI == LI.getVNInfoBefore(Indexes->getMBBEndIdx(*PI)))
return true;
}
return false;
}
-float LiveIntervals::getSpillWeight(bool isDef, bool isUse,
- const MachineBlockFrequencyInfo *MBFI,
- const MachineInstr &MI) {
- BlockFrequency Freq = MBFI->getBlockFreq(MI.getParent());
+float
+LiveIntervals::getSpillWeight(bool isDef, bool isUse,
+ const MachineBlockFrequencyInfo *MBFI,
+ const MachineInstr *MI) {
+ BlockFrequency Freq = MBFI->getBlockFreq(MI->getParent());
const float Scale = 1.0f / MBFI->getEntryFreq();
return (isDef + isUse) * (Freq.getFrequency() * Scale);
}
LiveRange::Segment
-LiveIntervals::addSegmentToEndOfBlock(unsigned reg, MachineInstr &startInst) {
+LiveIntervals::addSegmentToEndOfBlock(unsigned reg, MachineInstr* startInst) {
LiveInterval& Interval = createEmptyInterval(reg);
- VNInfo *VN = Interval.getNextValue(
- SlotIndex(getInstructionIndex(startInst).getRegSlot()),
- getVNInfoAllocator());
- LiveRange::Segment S(SlotIndex(getInstructionIndex(startInst).getRegSlot()),
- getMBBEndIdx(startInst.getParent()), VN);
+ VNInfo* VN = Interval.getNextValue(
+ SlotIndex(getInstructionIndex(startInst).getRegSlot()),
+ getVNInfoAllocator());
+ LiveRange::Segment S(
+ SlotIndex(getInstructionIndex(startInst).getRegSlot()),
+ getMBBEndIdx(startInst->getParent()), VN);
Interval.addSegment(S);
return S;
}
+
//===----------------------------------------------------------------------===//
// Register mask functions
//===----------------------------------------------------------------------===//
@@ -874,7 +893,7 @@ bool LiveIntervals::checkRegMaskInterference(LiveInterval &LI,
return false;
bool Found = false;
- while (true) {
+ for (;;) {
assert(*SlotI >= LiveI->start);
// Loop over all slots overlapping this segment.
while (*SlotI < LiveI->end) {
@@ -905,7 +924,7 @@ bool LiveIntervals::checkRegMaskInterference(LiveInterval &LI,
// IntervalUpdate class.
//===----------------------------------------------------------------------===//
-/// Toolkit used by handleMove to trim or extend live intervals.
+// HMEditor is a toolkit used by handleMove to trim or extend live intervals.
class LiveIntervals::HMEditor {
private:
LiveIntervals& LIS;
@@ -928,7 +947,7 @@ public:
// kill flags. This is wasteful. Eventually, LiveVariables will strip all kill
// flags, and postRA passes will use a live register utility instead.
LiveRange *getRegUnitLI(unsigned Unit) {
- if (UpdateFlags && !MRI.isReservedRegUnit(Unit))
+ if (UpdateFlags)
return &LIS.getRegUnit(Unit);
return LIS.getCachedRegUnit(Unit);
}
@@ -943,13 +962,10 @@ public:
hasRegMask = true;
if (!MO.isReg())
continue;
- if (MO.isUse()) {
- if (!MO.readsReg())
- continue;
- // Aggressively clear all kill flags.
- // They are reinserted by VirtRegRewriter.
+ // Aggressively clear all kill flags.
+ // They are reinserted by VirtRegRewriter.
+ if (MO.isUse())
MO.setIsKill(false);
- }
unsigned Reg = MO.getReg();
if (!Reg)
@@ -958,15 +974,14 @@ public:
LiveInterval &LI = LIS.getInterval(Reg);
if (LI.hasSubRanges()) {
unsigned SubReg = MO.getSubReg();
- LaneBitmask LaneMask = SubReg ? TRI.getSubRegIndexLaneMask(SubReg)
- : MRI.getMaxLaneMaskForVReg(Reg);
+ LaneBitmask LaneMask = TRI.getSubRegIndexLaneMask(SubReg);
for (LiveInterval::SubRange &S : LI.subranges()) {
- if ((S.LaneMask & LaneMask).none())
+ if ((S.LaneMask & LaneMask) == 0)
continue;
updateRange(S, Reg, S.LaneMask);
}
}
- updateRange(LI, Reg, LaneBitmask::getNone());
+ updateRange(LI, Reg, 0);
continue;
}
@@ -974,7 +989,7 @@ public:
// precomputed live range.
for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units)
if (LiveRange *LR = getRegUnitLI(*Units))
- updateRange(*LR, *Units, LaneBitmask::getNone());
+ updateRange(*LR, *Units, 0);
}
if (hasRegMask)
updateRegMaskSlots();
@@ -990,7 +1005,7 @@ private:
dbgs() << " ";
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
dbgs() << PrintReg(Reg);
- if (LaneMask.any())
+ if (LaneMask != 0)
dbgs() << " L" << PrintLaneMask(LaneMask);
} else {
dbgs() << PrintRegUnit(Reg, &TRI);
@@ -1006,300 +1021,172 @@ private:
}
/// Update LR to reflect an instruction has been moved downwards from OldIdx
- /// to NewIdx (OldIdx < NewIdx).
+ /// to NewIdx.
+ ///
+ /// 1. Live def at OldIdx:
+ /// Move def to NewIdx, assert endpoint after NewIdx.
+ ///
+ /// 2. Live def at OldIdx, killed at NewIdx:
+ /// Change to dead def at NewIdx.
+ /// (Happens when bundling def+kill together).
+ ///
+ /// 3. Dead def at OldIdx:
+ /// Move def to NewIdx, possibly across another live value.
+ ///
+ /// 4. Def at OldIdx AND at NewIdx:
+ /// Remove segment [OldIdx;NewIdx) and value defined at OldIdx.
+ /// (Happens when bundling multiple defs together).
+ ///
+ /// 5. Value read at OldIdx, killed before NewIdx:
+ /// Extend kill to NewIdx.
+ ///
void handleMoveDown(LiveRange &LR) {
+ // First look for a kill at OldIdx.
+ LiveRange::iterator I = LR.find(OldIdx.getBaseIndex());
LiveRange::iterator E = LR.end();
- // Segment going into OldIdx.
- LiveRange::iterator OldIdxIn = LR.find(OldIdx.getBaseIndex());
-
- // No value live before or after OldIdx? Nothing to do.
- if (OldIdxIn == E || SlotIndex::isEarlierInstr(OldIdx, OldIdxIn->start))
+ // Is LR even live at OldIdx?
+ if (I == E || SlotIndex::isEarlierInstr(OldIdx, I->start))
return;
- LiveRange::iterator OldIdxOut;
- // Do we have a value live-in to OldIdx?
- if (SlotIndex::isEarlierInstr(OldIdxIn->start, OldIdx)) {
+ // Handle a live-in value.
+ if (!SlotIndex::isSameInstr(I->start, OldIdx)) {
+ bool isKill = SlotIndex::isSameInstr(OldIdx, I->end);
// If the live-in value already extends to NewIdx, there is nothing to do.
- if (SlotIndex::isEarlierEqualInstr(NewIdx, OldIdxIn->end))
+ if (!SlotIndex::isEarlierInstr(I->end, NewIdx))
return;
// Aggressively remove all kill flags from the old kill point.
// Kill flags shouldn't be used while live intervals exist, they will be
// reinserted by VirtRegRewriter.
- if (MachineInstr *KillMI = LIS.getInstructionFromIndex(OldIdxIn->end))
- for (MIBundleOperands MO(*KillMI); MO.isValid(); ++MO)
+ if (MachineInstr *KillMI = LIS.getInstructionFromIndex(I->end))
+ for (MIBundleOperands MO(KillMI); MO.isValid(); ++MO)
if (MO->isReg() && MO->isUse())
MO->setIsKill(false);
-
- // Is there a def before NewIdx which is not OldIdx?
- LiveRange::iterator Next = std::next(OldIdxIn);
- if (Next != E && !SlotIndex::isSameInstr(OldIdx, Next->start) &&
- SlotIndex::isEarlierInstr(Next->start, NewIdx)) {
- // If we are here then OldIdx was just a use but not a def. We only have
- // to ensure liveness extends to NewIdx.
- LiveRange::iterator NewIdxIn =
- LR.advanceTo(Next, NewIdx.getBaseIndex());
- // Extend the segment before NewIdx if necessary.
- if (NewIdxIn == E ||
- !SlotIndex::isEarlierInstr(NewIdxIn->start, NewIdx)) {
- LiveRange::iterator Prev = std::prev(NewIdxIn);
- Prev->end = NewIdx.getRegSlot();
- }
- // Extend OldIdxIn.
- OldIdxIn->end = Next->start;
- return;
- }
-
- // Adjust OldIdxIn->end to reach NewIdx. This may temporarily make LR
- // invalid by overlapping ranges.
- bool isKill = SlotIndex::isSameInstr(OldIdx, OldIdxIn->end);
- OldIdxIn->end = NewIdx.getRegSlot(OldIdxIn->end.isEarlyClobber());
- // If this was not a kill, then there was no def and we're done.
+ // Adjust I->end to reach NewIdx. This may temporarily make LR invalid by
+ // overlapping ranges. Case 5 above.
+ I->end = NewIdx.getRegSlot(I->end.isEarlyClobber());
+ // If this was a kill, there may also be a def. Otherwise we're done.
if (!isKill)
return;
-
- // Did we have a Def at OldIdx?
- OldIdxOut = Next;
- if (OldIdxOut == E || !SlotIndex::isSameInstr(OldIdx, OldIdxOut->start))
- return;
- } else {
- OldIdxOut = OldIdxIn;
+ ++I;
}
- // If we are here then there is a Definition at OldIdx. OldIdxOut points
- // to the segment starting there.
- assert(OldIdxOut != E && SlotIndex::isSameInstr(OldIdx, OldIdxOut->start) &&
- "No def?");
- VNInfo *OldIdxVNI = OldIdxOut->valno;
- assert(OldIdxVNI->def == OldIdxOut->start && "Inconsistent def");
-
- // If the defined value extends beyond NewIdx, just move the beginning
- // of the segment to NewIdx.
- SlotIndex NewIdxDef = NewIdx.getRegSlot(OldIdxOut->start.isEarlyClobber());
- if (SlotIndex::isEarlierInstr(NewIdxDef, OldIdxOut->end)) {
- OldIdxVNI->def = NewIdxDef;
- OldIdxOut->start = OldIdxVNI->def;
+ // Check for a def at OldIdx.
+ if (I == E || !SlotIndex::isSameInstr(OldIdx, I->start))
return;
- }
-
- // If we are here then we have a Definition at OldIdx which ends before
- // NewIdx.
-
- // Is there an existing Def at NewIdx?
- LiveRange::iterator AfterNewIdx
- = LR.advanceTo(OldIdxOut, NewIdx.getRegSlot());
- bool OldIdxDefIsDead = OldIdxOut->end.isDead();
- if (!OldIdxDefIsDead &&
- SlotIndex::isEarlierInstr(OldIdxOut->end, NewIdxDef)) {
- // OldIdx is not a dead def, and NewIdxDef is inside a new interval.
- VNInfo *DefVNI;
- if (OldIdxOut != LR.begin() &&
- !SlotIndex::isEarlierInstr(std::prev(OldIdxOut)->end,
- OldIdxOut->start)) {
- // There is no gap between OldIdxOut and its predecessor anymore,
- // merge them.
- LiveRange::iterator IPrev = std::prev(OldIdxOut);
- DefVNI = OldIdxVNI;
- IPrev->end = OldIdxOut->end;
- } else {
- // The value is live in to OldIdx
- LiveRange::iterator INext = std::next(OldIdxOut);
- assert(INext != E && "Must have following segment");
- // We merge OldIdxOut and its successor. As we're dealing with subreg
- // reordering, there is always a successor to OldIdxOut in the same BB
- // We don't need INext->valno anymore and will reuse for the new segment
- // we create later.
- DefVNI = OldIdxVNI;
- INext->start = OldIdxOut->end;
- INext->valno->def = INext->start;
- }
- // If NewIdx is behind the last segment, extend that and append a new one.
- if (AfterNewIdx == E) {
- // OldIdxOut is undef at this point, Slide (OldIdxOut;AfterNewIdx] up
- // one position.
- // |- ?/OldIdxOut -| |- X0 -| ... |- Xn -| end
- // => |- X0/OldIdxOut -| ... |- Xn -| |- undef/NewS -| end
- std::copy(std::next(OldIdxOut), E, OldIdxOut);
- // The last segment is undefined now, reuse it for a dead def.
- LiveRange::iterator NewSegment = std::prev(E);
- *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(),
- DefVNI);
- DefVNI->def = NewIdxDef;
-
- LiveRange::iterator Prev = std::prev(NewSegment);
- Prev->end = NewIdxDef;
- } else {
- // OldIdxOut is undef at this point, Slide (OldIdxOut;AfterNewIdx] up
- // one position.
- // |- ?/OldIdxOut -| |- X0 -| ... |- Xn/AfterNewIdx -| |- Next -|
- // => |- X0/OldIdxOut -| ... |- Xn -| |- Xn/AfterNewIdx -| |- Next -|
- std::copy(std::next(OldIdxOut), std::next(AfterNewIdx), OldIdxOut);
- LiveRange::iterator Prev = std::prev(AfterNewIdx);
- // We have two cases:
- if (SlotIndex::isEarlierInstr(Prev->start, NewIdxDef)) {
- // Case 1: NewIdx is inside a liverange. Split this liverange at
- // NewIdxDef into the segment "Prev" followed by "NewSegment".
- LiveRange::iterator NewSegment = AfterNewIdx;
- *NewSegment = LiveRange::Segment(NewIdxDef, Prev->end, Prev->valno);
- Prev->valno->def = NewIdxDef;
-
- *Prev = LiveRange::Segment(Prev->start, NewIdxDef, DefVNI);
- DefVNI->def = Prev->start;
- } else {
- // Case 2: NewIdx is in a lifetime hole. Keep AfterNewIdx as is and
- // turn Prev into a segment from NewIdx to AfterNewIdx->start.
- *Prev = LiveRange::Segment(NewIdxDef, AfterNewIdx->start, DefVNI);
- DefVNI->def = NewIdxDef;
- assert(DefVNI != AfterNewIdx->valno);
- }
- }
+ // We have a def at OldIdx.
+ VNInfo *DefVNI = I->valno;
+ assert(DefVNI->def == I->start && "Inconsistent def");
+ DefVNI->def = NewIdx.getRegSlot(I->start.isEarlyClobber());
+ // If the defined value extends beyond NewIdx, just move the def down.
+ // This is case 1 above.
+ if (SlotIndex::isEarlierInstr(NewIdx, I->end)) {
+ I->start = DefVNI->def;
return;
}
-
- if (AfterNewIdx != E &&
- SlotIndex::isSameInstr(AfterNewIdx->start, NewIdxDef)) {
- // There is an existing def at NewIdx. The def at OldIdx is coalesced into
- // that value.
- assert(AfterNewIdx->valno != OldIdxVNI && "Multiple defs of value?");
- LR.removeValNo(OldIdxVNI);
- } else {
- // There was no existing def at NewIdx. We need to create a dead def
- // at NewIdx. Shift segments over the old OldIdxOut segment, this frees
- // a new segment at the place where we want to construct the dead def.
- // |- OldIdxOut -| |- X0 -| ... |- Xn -| |- AfterNewIdx -|
- // => |- X0/OldIdxOut -| ... |- Xn -| |- undef/NewS. -| |- AfterNewIdx -|
- assert(AfterNewIdx != OldIdxOut && "Inconsistent iterators");
- std::copy(std::next(OldIdxOut), AfterNewIdx, OldIdxOut);
- // We can reuse OldIdxVNI now.
- LiveRange::iterator NewSegment = std::prev(AfterNewIdx);
- VNInfo *NewSegmentVNI = OldIdxVNI;
- NewSegmentVNI->def = NewIdxDef;
- *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(),
- NewSegmentVNI);
+ // The remaining possibilities are now:
+ // 2. Live def at OldIdx, killed at NewIdx: isSameInstr(I->end, NewIdx).
+ // 3. Dead def at OldIdx: I->end = OldIdx.getDeadSlot().
+ // In either case, it is possible that there is an existing def at NewIdx.
+ assert((I->end == OldIdx.getDeadSlot() ||
+ SlotIndex::isSameInstr(I->end, NewIdx)) &&
+ "Cannot move def below kill");
+ LiveRange::iterator NewI = LR.advanceTo(I, NewIdx.getRegSlot());
+ if (NewI != E && SlotIndex::isSameInstr(NewI->start, NewIdx)) {
+ // There is an existing def at NewIdx, case 4 above. The def at OldIdx is
+ // coalesced into that value.
+ assert(NewI->valno != DefVNI && "Multiple defs of value?");
+ LR.removeValNo(DefVNI);
+ return;
}
+ // There was no existing def at NewIdx. Turn *I into a dead def at NewIdx.
+ // If the def at OldIdx was dead, we allow it to be moved across other LR
+ // values. The new range should be placed immediately before NewI, move any
+ // intermediate ranges up.
+ assert(NewI != I && "Inconsistent iterators");
+ std::copy(std::next(I), NewI, I);
+ *std::prev(NewI)
+ = LiveRange::Segment(DefVNI->def, NewIdx.getDeadSlot(), DefVNI);
}
/// Update LR to reflect an instruction has been moved upwards from OldIdx
- /// to NewIdx (NewIdx < OldIdx).
+ /// to NewIdx.
+ ///
+ /// 1. Live def at OldIdx:
+ /// Hoist def to NewIdx.
+ ///
+ /// 2. Dead def at OldIdx:
+ /// Hoist def+end to NewIdx, possibly move across other values.
+ ///
+ /// 3. Dead def at OldIdx AND existing def at NewIdx:
+ /// Remove value defined at OldIdx, coalescing it with existing value.
+ ///
+ /// 4. Live def at OldIdx AND existing def at NewIdx:
+ /// Remove value defined at NewIdx, hoist OldIdx def to NewIdx.
+ /// (Happens when bundling multiple defs together).
+ ///
+ /// 5. Value killed at OldIdx:
+ /// Hoist kill to NewIdx, then scan for last kill between NewIdx and
+ /// OldIdx.
+ ///
void handleMoveUp(LiveRange &LR, unsigned Reg, LaneBitmask LaneMask) {
+ // First look for a kill at OldIdx.
+ LiveRange::iterator I = LR.find(OldIdx.getBaseIndex());
LiveRange::iterator E = LR.end();
- // Segment going into OldIdx.
- LiveRange::iterator OldIdxIn = LR.find(OldIdx.getBaseIndex());
-
- // No value live before or after OldIdx? Nothing to do.
- if (OldIdxIn == E || SlotIndex::isEarlierInstr(OldIdx, OldIdxIn->start))
+ // Is LR even live at OldIdx?
+ if (I == E || SlotIndex::isEarlierInstr(OldIdx, I->start))
return;
- LiveRange::iterator OldIdxOut;
- // Do we have a value live-in to OldIdx?
- if (SlotIndex::isEarlierInstr(OldIdxIn->start, OldIdx)) {
- // If the live-in value isn't killed here, then we have no Def at
- // OldIdx, moreover the value must be live at NewIdx so there is nothing
- // to do.
- bool isKill = SlotIndex::isSameInstr(OldIdx, OldIdxIn->end);
- if (!isKill)
+ // Handle a live-in value.
+ if (!SlotIndex::isSameInstr(I->start, OldIdx)) {
+ // If the live-in value isn't killed here, there is nothing to do.
+ if (!SlotIndex::isSameInstr(OldIdx, I->end))
return;
-
- // At this point we have to move OldIdxIn->end back to the nearest
- // previous use or (dead-)def but no further than NewIdx.
- SlotIndex DefBeforeOldIdx
- = std::max(OldIdxIn->start.getDeadSlot(),
- NewIdx.getRegSlot(OldIdxIn->end.isEarlyClobber()));
- OldIdxIn->end = findLastUseBefore(DefBeforeOldIdx, Reg, LaneMask);
-
- // Did we have a Def at OldIdx? If not we are done now.
- OldIdxOut = std::next(OldIdxIn);
- if (OldIdxOut == E || !SlotIndex::isSameInstr(OldIdx, OldIdxOut->start))
+ // Adjust I->end to end at NewIdx. If we are hoisting a kill above
+ // another use, we need to search for that use. Case 5 above.
+ I->end = NewIdx.getRegSlot(I->end.isEarlyClobber());
+ ++I;
+ // If OldIdx also defines a value, there couldn't have been another use.
+ if (I == E || !SlotIndex::isSameInstr(I->start, OldIdx)) {
+ // No def, search for the new kill.
+ // This can never be an early clobber kill since there is no def.
+ std::prev(I)->end = findLastUseBefore(Reg, LaneMask).getRegSlot();
return;
- } else {
- OldIdxOut = OldIdxIn;
- OldIdxIn = OldIdxOut != LR.begin() ? std::prev(OldIdxOut) : E;
+ }
}
- // If we are here then there is a Definition at OldIdx. OldIdxOut points
- // to the segment starting there.
- assert(OldIdxOut != E && SlotIndex::isSameInstr(OldIdx, OldIdxOut->start) &&
- "No def?");
- VNInfo *OldIdxVNI = OldIdxOut->valno;
- assert(OldIdxVNI->def == OldIdxOut->start && "Inconsistent def");
- bool OldIdxDefIsDead = OldIdxOut->end.isDead();
-
- // Is there an existing def at NewIdx?
- SlotIndex NewIdxDef = NewIdx.getRegSlot(OldIdxOut->start.isEarlyClobber());
- LiveRange::iterator NewIdxOut = LR.find(NewIdx.getRegSlot());
- if (SlotIndex::isSameInstr(NewIdxOut->start, NewIdx)) {
- assert(NewIdxOut->valno != OldIdxVNI &&
- "Same value defined more than once?");
- // If OldIdx was a dead def remove it.
- if (!OldIdxDefIsDead) {
- // Remove segment starting at NewIdx and move begin of OldIdxOut to
- // NewIdx so it can take its place.
- OldIdxVNI->def = NewIdxDef;
- OldIdxOut->start = NewIdxDef;
- LR.removeValNo(NewIdxOut->valno);
- } else {
- // Simply remove the dead def at OldIdx.
- LR.removeValNo(OldIdxVNI);
- }
- } else {
- // Previously nothing was live after NewIdx, so all we have to do now is
- // move the begin of OldIdxOut to NewIdx.
- if (!OldIdxDefIsDead) {
- // Do we have any intermediate Defs between OldIdx and NewIdx?
- if (OldIdxIn != E &&
- SlotIndex::isEarlierInstr(NewIdxDef, OldIdxIn->start)) {
- // OldIdx is not a dead def and NewIdx is before predecessor start.
- LiveRange::iterator NewIdxIn = NewIdxOut;
- assert(NewIdxIn == LR.find(NewIdx.getBaseIndex()));
- const SlotIndex SplitPos = NewIdxDef;
- OldIdxVNI = OldIdxIn->valno;
-
- // Merge the OldIdxIn and OldIdxOut segments into OldIdxOut.
- OldIdxOut->valno->def = OldIdxIn->start;
- *OldIdxOut = LiveRange::Segment(OldIdxIn->start, OldIdxOut->end,
- OldIdxOut->valno);
- // OldIdxIn and OldIdxVNI are now undef and can be overridden.
- // We Slide [NewIdxIn, OldIdxIn) down one position.
- // |- X0/NewIdxIn -| ... |- Xn-1 -||- Xn/OldIdxIn -||- OldIdxOut -|
- // => |- undef/NexIdxIn -| |- X0 -| ... |- Xn-1 -| |- Xn/OldIdxOut -|
- std::copy_backward(NewIdxIn, OldIdxIn, OldIdxOut);
- // NewIdxIn is now considered undef so we can reuse it for the moved
- // value.
- LiveRange::iterator NewSegment = NewIdxIn;
- LiveRange::iterator Next = std::next(NewSegment);
- if (SlotIndex::isEarlierInstr(Next->start, NewIdx)) {
- // There is no gap between NewSegment and its predecessor.
- *NewSegment = LiveRange::Segment(Next->start, SplitPos,
- Next->valno);
- *Next = LiveRange::Segment(SplitPos, Next->end, OldIdxVNI);
- Next->valno->def = SplitPos;
- } else {
- // There is a gap between NewSegment and its predecessor
- // Value becomes live in.
- *NewSegment = LiveRange::Segment(SplitPos, Next->start, OldIdxVNI);
- NewSegment->valno->def = SplitPos;
- }
- } else {
- // Leave the end point of a live def.
- OldIdxOut->start = NewIdxDef;
- OldIdxVNI->def = NewIdxDef;
- if (OldIdxIn != E && SlotIndex::isEarlierInstr(NewIdx, OldIdxIn->end))
- OldIdxIn->end = NewIdx.getRegSlot();
- }
- } else {
- // OldIdxVNI is a dead def. It may have been moved across other values
- // in LR, so move OldIdxOut up to NewIdxOut. Slide [NewIdxOut;OldIdxOut)
- // down one position.
- // |- X0/NewIdxOut -| ... |- Xn-1 -| |- Xn/OldIdxOut -| |- next - |
- // => |- undef/NewIdxOut -| |- X0 -| ... |- Xn-1 -| |- next -|
- std::copy_backward(NewIdxOut, OldIdxOut, std::next(OldIdxOut));
- // OldIdxVNI can be reused now to build a new dead def segment.
- LiveRange::iterator NewSegment = NewIdxOut;
- VNInfo *NewSegmentVNI = OldIdxVNI;
- *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(),
- NewSegmentVNI);
- NewSegmentVNI->def = NewIdxDef;
+ // Now deal with the def at OldIdx.
+ assert(I != E && SlotIndex::isSameInstr(I->start, OldIdx) && "No def?");
+ VNInfo *DefVNI = I->valno;
+ assert(DefVNI->def == I->start && "Inconsistent def");
+ DefVNI->def = NewIdx.getRegSlot(I->start.isEarlyClobber());
+
+ // Check for an existing def at NewIdx.
+ LiveRange::iterator NewI = LR.find(NewIdx.getRegSlot());
+ if (SlotIndex::isSameInstr(NewI->start, NewIdx)) {
+ assert(NewI->valno != DefVNI && "Same value defined more than once?");
+ // There is an existing def at NewIdx.
+ if (I->end.isDead()) {
+ // Case 3: Remove the dead def at OldIdx.
+ LR.removeValNo(DefVNI);
+ return;
}
+ // Case 4: Replace def at NewIdx with live def at OldIdx.
+ I->start = DefVNI->def;
+ LR.removeValNo(NewI->valno);
+ return;
+ }
+
+ // There is no existing def at NewIdx. Hoist DefVNI.
+ if (!I->end.isDead()) {
+ // Leave the end point of a live def.
+ I->start = DefVNI->def;
+ return;
}
+
+ // DefVNI is a dead def. It may have been moved across other values in LR,
+ // so move I up to NewI. Slide [NewI;I) down one position.
+ std::copy_backward(NewI, I, std::next(I));
+ *NewI = LiveRange::Segment(DefVNI->def, NewIdx.getDeadSlot(), DefVNI);
}
void updateRegMaskSlots() {
@@ -1318,31 +1205,29 @@ private:
}
// Return the last use of reg between NewIdx and OldIdx.
- SlotIndex findLastUseBefore(SlotIndex Before, unsigned Reg,
- LaneBitmask LaneMask) {
+ SlotIndex findLastUseBefore(unsigned Reg, LaneBitmask LaneMask) {
+
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
- SlotIndex LastUse = Before;
+ SlotIndex LastUse = NewIdx;
for (MachineOperand &MO : MRI.use_nodbg_operands(Reg)) {
- if (MO.isUndef())
- continue;
unsigned SubReg = MO.getSubReg();
- if (SubReg != 0 && LaneMask.any()
- && (TRI.getSubRegIndexLaneMask(SubReg) & LaneMask).none())
+ if (SubReg != 0 && LaneMask != 0
+ && (TRI.getSubRegIndexLaneMask(SubReg) & LaneMask) == 0)
continue;
- const MachineInstr &MI = *MO.getParent();
+ const MachineInstr *MI = MO.getParent();
SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI);
if (InstSlot > LastUse && InstSlot < OldIdx)
- LastUse = InstSlot.getRegSlot();
+ LastUse = InstSlot;
}
return LastUse;
}
// This is a regunit interval, so scanning the use list could be very
// expensive. Scan upwards from OldIdx instead.
- assert(Before < OldIdx && "Expected upwards move");
+ assert(NewIdx < OldIdx && "Expected upwards move");
SlotIndexes *Indexes = LIS.getSlotIndexes();
- MachineBasicBlock *MBB = Indexes->getMBBFromIndex(Before);
+ MachineBasicBlock *MBB = Indexes->getMBBFromIndex(NewIdx);
// OldIdx may not correspond to an instruction any longer, so set MII to
// point to the next instruction after OldIdx, or MBB->end().
@@ -1356,44 +1241,44 @@ private:
while (MII != Begin) {
if ((--MII)->isDebugValue())
continue;
- SlotIndex Idx = Indexes->getInstructionIndex(*MII);
+ SlotIndex Idx = Indexes->getInstructionIndex(MII);
- // Stop searching when Before is reached.
- if (!SlotIndex::isEarlierInstr(Before, Idx))
- return Before;
+ // Stop searching when NewIdx is reached.
+ if (!SlotIndex::isEarlierInstr(NewIdx, Idx))
+ return NewIdx;
// Check if MII uses Reg.
- for (MIBundleOperands MO(*MII); MO.isValid(); ++MO)
- if (MO->isReg() && !MO->isUndef() &&
+ for (MIBundleOperands MO(MII); MO.isValid(); ++MO)
+ if (MO->isReg() &&
TargetRegisterInfo::isPhysicalRegister(MO->getReg()) &&
TRI.hasRegUnit(MO->getReg(), Reg))
- return Idx.getRegSlot();
+ return Idx;
}
- // Didn't reach Before. It must be the first instruction in the block.
- return Before;
+ // Didn't reach NewIdx. It must be the first instruction in the block.
+ return NewIdx;
}
};
-void LiveIntervals::handleMove(MachineInstr &MI, bool UpdateFlags) {
- assert(!MI.isBundled() && "Can't handle bundled instructions yet.");
+void LiveIntervals::handleMove(MachineInstr* MI, bool UpdateFlags) {
+ assert(!MI->isBundled() && "Can't handle bundled instructions yet.");
SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
Indexes->removeMachineInstrFromMaps(MI);
SlotIndex NewIndex = Indexes->insertMachineInstrInMaps(MI);
- assert(getMBBStartIdx(MI.getParent()) <= OldIndex &&
- OldIndex < getMBBEndIdx(MI.getParent()) &&
+ assert(getMBBStartIdx(MI->getParent()) <= OldIndex &&
+ OldIndex < getMBBEndIdx(MI->getParent()) &&
"Cannot handle moves across basic block boundaries.");
HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags);
- HME.updateAllRanges(&MI);
+ HME.updateAllRanges(MI);
}
-void LiveIntervals::handleMoveIntoBundle(MachineInstr &MI,
- MachineInstr &BundleStart,
+void LiveIntervals::handleMoveIntoBundle(MachineInstr* MI,
+ MachineInstr* BundleStart,
bool UpdateFlags) {
SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
SlotIndex NewIndex = Indexes->getInstructionIndex(BundleStart);
HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags);
- HME.updateAllRanges(&MI);
+ HME.updateAllRanges(MI);
}
void LiveIntervals::repairOldRegInRange(const MachineBasicBlock::iterator Begin,
@@ -1403,11 +1288,6 @@ void LiveIntervals::repairOldRegInRange(const MachineBasicBlock::iterator Begin,
LaneBitmask LaneMask) {
LiveInterval::iterator LII = LR.find(endIdx);
SlotIndex lastUseIdx;
- if (LII == LR.begin()) {
- // This happens when the function is called for a subregister that only
- // occurs _after_ the range that is to be repaired.
- return;
- }
if (LII != LR.end() && LII->start < endIdx)
lastUseIdx = LII->end;
else
@@ -1415,8 +1295,8 @@ void LiveIntervals::repairOldRegInRange(const MachineBasicBlock::iterator Begin,
for (MachineBasicBlock::iterator I = End; I != Begin;) {
--I;
- MachineInstr &MI = *I;
- if (MI.isDebugValue())
+ MachineInstr *MI = I;
+ if (MI->isDebugValue())
continue;
SlotIndex instrIdx = getInstructionIndex(MI);
@@ -1425,16 +1305,15 @@ void LiveIntervals::repairOldRegInRange(const MachineBasicBlock::iterator Begin,
// FIXME: This doesn't currently handle early-clobber or multiple removed
// defs inside of the region to repair.
- for (MachineInstr::mop_iterator OI = MI.operands_begin(),
- OE = MI.operands_end();
- OI != OE; ++OI) {
+ for (MachineInstr::mop_iterator OI = MI->operands_begin(),
+ OE = MI->operands_end(); OI != OE; ++OI) {
const MachineOperand &MO = *OI;
if (!MO.isReg() || MO.getReg() != Reg)
continue;
unsigned SubReg = MO.getSubReg();
LaneBitmask Mask = TRI->getSubRegIndexLaneMask(SubReg);
- if ((Mask & LaneMask).none())
+ if ((Mask & LaneMask) == 0)
continue;
if (MO.isDef()) {
@@ -1497,27 +1376,26 @@ LiveIntervals::repairIntervalsInRange(MachineBasicBlock *MBB,
ArrayRef<unsigned> OrigRegs) {
// Find anchor points, which are at the beginning/end of blocks or at
// instructions that already have indexes.
- while (Begin != MBB->begin() && !Indexes->hasIndex(*Begin))
+ while (Begin != MBB->begin() && !Indexes->hasIndex(Begin))
--Begin;
- while (End != MBB->end() && !Indexes->hasIndex(*End))
+ while (End != MBB->end() && !Indexes->hasIndex(End))
++End;
SlotIndex endIdx;
if (End == MBB->end())
endIdx = getMBBEndIdx(MBB).getPrevSlot();
else
- endIdx = getInstructionIndex(*End);
+ endIdx = getInstructionIndex(End);
Indexes->repairIndexesInRange(MBB, Begin, End);
for (MachineBasicBlock::iterator I = End; I != Begin;) {
--I;
- MachineInstr &MI = *I;
- if (MI.isDebugValue())
+ MachineInstr *MI = I;
+ if (MI->isDebugValue())
continue;
- for (MachineInstr::const_mop_iterator MOI = MI.operands_begin(),
- MOE = MI.operands_end();
- MOI != MOE; ++MOI) {
+ for (MachineInstr::const_mop_iterator MOI = MI->operands_begin(),
+ MOE = MI->operands_end(); MOI != MOE; ++MOI) {
if (MOI->isReg() &&
TargetRegisterInfo::isVirtualRegister(MOI->getReg()) &&
!hasInterval(MOI->getReg())) {
@@ -1526,7 +1404,8 @@ LiveIntervals::repairIntervalsInRange(MachineBasicBlock *MBB,
}
}
- for (unsigned Reg : OrigRegs) {
+ for (unsigned i = 0, e = OrigRegs.size(); i != e; ++i) {
+ unsigned Reg = OrigRegs[i];
if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
@@ -1535,35 +1414,31 @@ LiveIntervals::repairIntervalsInRange(MachineBasicBlock *MBB,
if (!LI.hasAtLeastOneValue())
continue;
- for (LiveInterval::SubRange &S : LI.subranges())
+ for (LiveInterval::SubRange &S : LI.subranges()) {
repairOldRegInRange(Begin, End, endIdx, S, Reg, S.LaneMask);
-
+ }
repairOldRegInRange(Begin, End, endIdx, LI, Reg);
}
}
void LiveIntervals::removePhysRegDefAt(unsigned Reg, SlotIndex Pos) {
- for (MCRegUnitIterator Unit(Reg, TRI); Unit.isValid(); ++Unit) {
- if (LiveRange *LR = getCachedRegUnit(*Unit))
+ for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) {
+ if (LiveRange *LR = getCachedRegUnit(*Units))
if (VNInfo *VNI = LR->getVNInfoAt(Pos))
LR->removeValNo(VNI);
}
}
void LiveIntervals::removeVRegDefAt(LiveInterval &LI, SlotIndex Pos) {
- // LI may not have the main range computed yet, but its subranges may
- // be present.
VNInfo *VNI = LI.getVNInfoAt(Pos);
- if (VNI != nullptr) {
- assert(VNI->def.getBaseIndex() == Pos.getBaseIndex());
- LI.removeValNo(VNI);
- }
+ if (VNI == nullptr)
+ return;
+ LI.removeValNo(VNI);
- // Also remove the value defined in subranges.
+ // Also remove the value in subranges.
for (LiveInterval::SubRange &S : LI.subranges()) {
if (VNInfo *SVNI = S.getVNInfoAt(Pos))
- if (SVNI->def.getBaseIndex() == Pos.getBaseIndex())
- S.removeValNo(SVNI);
+ S.removeValNo(SVNI);
}
LI.removeEmptySubRanges();
}
@@ -1584,9 +1459,3 @@ void LiveIntervals::splitSeparateComponents(LiveInterval &LI,
}
ConEQ.Distribute(LI, SplitLIs.data(), *MRI);
}
-
-void LiveIntervals::constructMainRangeFromSubranges(LiveInterval &LI) {
- assert(LRCalc && "LRCalc not initialized.");
- LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
- LRCalc->constructMainRangeFromSubranges(LI);
-}
diff --git a/gnu/llvm/lib/CodeGen/LiveStackAnalysis.cpp b/gnu/llvm/lib/CodeGen/LiveStackAnalysis.cpp
index b51f8b0aa6b..5c9c679e97b 100644
--- a/gnu/llvm/lib/CodeGen/LiveStackAnalysis.cpp
+++ b/gnu/llvm/lib/CodeGen/LiveStackAnalysis.cpp
@@ -14,21 +14,23 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/LiveStackAnalysis.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
+#include <limits>
using namespace llvm;
#define DEBUG_TYPE "livestacks"
char LiveStacks::ID = 0;
-INITIALIZE_PASS_BEGIN(LiveStacks, DEBUG_TYPE,
+INITIALIZE_PASS_BEGIN(LiveStacks, "livestacks",
"Live Stack Slot Analysis", false, false)
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
-INITIALIZE_PASS_END(LiveStacks, DEBUG_TYPE,
+INITIALIZE_PASS_END(LiveStacks, "livestacks",
"Live Stack Slot Analysis", false, false)
char &llvm::LiveStacksID = LiveStacks::ID;
diff --git a/gnu/llvm/lib/Fuzzer/CMakeLists.txt b/gnu/llvm/lib/Fuzzer/CMakeLists.txt
index bc744890b99..d4d85041d21 100644
--- a/gnu/llvm/lib/Fuzzer/CMakeLists.txt
+++ b/gnu/llvm/lib/Fuzzer/CMakeLists.txt
@@ -1,68 +1,34 @@
-include(CheckCXXSourceCompiles)
-
-if( APPLE )
- CHECK_CXX_SOURCE_COMPILES("
- static thread_local int blah;
- int main() {
- return 0;
- }
- " HAS_THREAD_LOCAL)
-
- if( NOT HAS_THREAD_LOCAL )
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Dthread_local=__thread")
- endif()
-endif()
-
-set(LIBFUZZER_FLAGS_BASE "${CMAKE_CXX_FLAGS}")
+set(LIBFUZZER_FLAGS_BASE "${CMAKE_CXX_FLAGS_RELEASE}")
+# Disable the coverage and sanitizer instrumentation for the fuzzer itself.
+set(CMAKE_CXX_FLAGS_RELEASE "${LIBFUZZER_FLAGS_BASE} -O2 -fno-sanitize=all")
if( LLVM_USE_SANITIZE_COVERAGE )
- if(NOT "${LLVM_USE_SANITIZER}" STREQUAL "Address")
- message(FATAL_ERROR
- "LibFuzzer and its tests require LLVM_USE_SANITIZER=Address and "
- "LLVM_USE_SANITIZE_COVERAGE=YES to be set."
- )
- endif()
-
- # Disable the coverage and sanitizer instrumentation for the fuzzer itself.
- set(CMAKE_CXX_FLAGS "${LIBFUZZER_FLAGS_BASE} -fno-sanitize-coverage=trace-pc-guard,edge,trace-cmp,indirect-calls,8bit-counters -Werror")
-endif()
-
-# Compile libFuzzer if the compilation is specifically requested, OR
-# if the platform is known to be working.
-if ( LLVM_USE_SANITIZE_COVERAGE OR CMAKE_SYSTEM_NAME MATCHES "Darwin|Linux" )
add_library(LLVMFuzzerNoMainObjects OBJECT
- FuzzerCrossOver.cpp
- FuzzerDriver.cpp
- FuzzerExtFunctionsDlsym.cpp
- FuzzerExtFunctionsDlsymWin.cpp
- FuzzerExtFunctionsWeak.cpp
- FuzzerExtraCounters.cpp
- FuzzerIO.cpp
- FuzzerIOPosix.cpp
- FuzzerIOWindows.cpp
- FuzzerLoop.cpp
- FuzzerMerge.cpp
- FuzzerMutate.cpp
- FuzzerSHA1.cpp
- FuzzerShmemPosix.cpp
- FuzzerShmemWindows.cpp
- FuzzerTracePC.cpp
- FuzzerUtil.cpp
- FuzzerUtilDarwin.cpp
- FuzzerUtilLinux.cpp
- FuzzerUtilPosix.cpp
- FuzzerUtilWindows.cpp
- )
+ FuzzerCrossOver.cpp
+ FuzzerInterface.cpp
+ FuzzerTraceState.cpp
+ FuzzerDriver.cpp
+ FuzzerIO.cpp
+ FuzzerLoop.cpp
+ FuzzerMutate.cpp
+ FuzzerSanitizerOptions.cpp
+ FuzzerSHA1.cpp
+ FuzzerUtil.cpp
+ )
add_library(LLVMFuzzerNoMain STATIC
- $<TARGET_OBJECTS:LLVMFuzzerNoMainObjects>
- )
- target_link_libraries(LLVMFuzzerNoMain ${LLVM_PTHREAD_LIB})
+ $<TARGET_OBJECTS:LLVMFuzzerNoMainObjects>
+ )
+ if( HAVE_LIBPTHREAD )
+ target_link_libraries(LLVMFuzzerNoMain pthread)
+ endif()
add_library(LLVMFuzzer STATIC
- FuzzerMain.cpp
- $<TARGET_OBJECTS:LLVMFuzzerNoMainObjects>
- )
- target_link_libraries(LLVMFuzzer ${LLVM_PTHREAD_LIB})
-endif()
+ FuzzerMain.cpp
+ $<TARGET_OBJECTS:LLVMFuzzerNoMainObjects>
+ )
+ if( HAVE_LIBPTHREAD )
+ target_link_libraries(LLVMFuzzer pthread)
+ endif()
-if( LLVM_USE_SANITIZE_COVERAGE AND LLVM_INCLUDE_TESTS )
- add_subdirectory(test)
+ if( LLVM_INCLUDE_TESTS )
+ add_subdirectory(test)
+ endif()
endif()
diff --git a/gnu/llvm/lib/Fuzzer/FuzzerCrossOver.cpp b/gnu/llvm/lib/Fuzzer/FuzzerCrossOver.cpp
index 8b0fd7d529a..5203deaf912 100644
--- a/gnu/llvm/lib/Fuzzer/FuzzerCrossOver.cpp
+++ b/gnu/llvm/lib/Fuzzer/FuzzerCrossOver.cpp
@@ -9,11 +9,10 @@
// Cross over test inputs.
//===----------------------------------------------------------------------===//
-#include "FuzzerDefs.h"
-#include "FuzzerMutate.h"
-#include "FuzzerRandom.h"
#include <cstring>
+#include "FuzzerInternal.h"
+
namespace fuzzer {
// Cross Data1 and Data2, store the result (up to MaxOutSize bytes) in Out.
diff --git a/gnu/llvm/lib/Fuzzer/FuzzerDriver.cpp b/gnu/llvm/lib/Fuzzer/FuzzerDriver.cpp
index fd8cab38a7b..66e46dbf3aa 100644
--- a/gnu/llvm/lib/Fuzzer/FuzzerDriver.cpp
+++ b/gnu/llvm/lib/Fuzzer/FuzzerDriver.cpp
@@ -9,25 +9,19 @@
// FuzzerDriver and flag parsing.
//===----------------------------------------------------------------------===//
-#include "FuzzerCorpus.h"
-#include "FuzzerIO.h"
#include "FuzzerInterface.h"
#include "FuzzerInternal.h"
-#include "FuzzerMutate.h"
-#include "FuzzerRandom.h"
-#include "FuzzerShmem.h"
-#include "FuzzerTracePC.h"
-#include <algorithm>
-#include <atomic>
-#include <chrono>
+
#include <cstring>
+#include <chrono>
+#include <unistd.h>
+#include <thread>
+#include <atomic>
#include <mutex>
#include <string>
-#include <thread>
-
-// This function should be present in the libFuzzer so that the client
-// binary can test for its existence.
-extern "C" __attribute__((used)) void __libfuzzer_is_present() {}
+#include <sstream>
+#include <algorithm>
+#include <iterator>
namespace fuzzer {
@@ -42,20 +36,16 @@ struct FlagDescription {
};
struct {
-#define FUZZER_DEPRECATED_FLAG(Name)
#define FUZZER_FLAG_INT(Name, Default, Description) int Name;
#define FUZZER_FLAG_UNSIGNED(Name, Default, Description) unsigned int Name;
#define FUZZER_FLAG_STRING(Name, Description) const char *Name;
#include "FuzzerFlags.def"
-#undef FUZZER_DEPRECATED_FLAG
#undef FUZZER_FLAG_INT
#undef FUZZER_FLAG_UNSIGNED
#undef FUZZER_FLAG_STRING
} Flags;
static const FlagDescription FlagDescriptions [] {
-#define FUZZER_DEPRECATED_FLAG(Name) \
- {#Name, "Deprecated; don't use", 0, nullptr, nullptr, nullptr},
#define FUZZER_FLAG_INT(Name, Default, Description) \
{#Name, Description, Default, &Flags.Name, nullptr, nullptr},
#define FUZZER_FLAG_UNSIGNED(Name, Default, Description) \
@@ -64,7 +54,6 @@ static const FlagDescription FlagDescriptions [] {
#define FUZZER_FLAG_STRING(Name, Description) \
{#Name, Description, 0, nullptr, &Flags.Name, nullptr},
#include "FuzzerFlags.def"
-#undef FUZZER_DEPRECATED_FLAG
#undef FUZZER_FLAG_INT
#undef FUZZER_FLAG_UNSIGNED
#undef FUZZER_FLAG_STRING
@@ -77,14 +66,8 @@ static std::vector<std::string> *Inputs;
static std::string *ProgName;
static void PrintHelp() {
- Printf("Usage:\n");
- auto Prog = ProgName->c_str();
- Printf("\nTo run fuzzing pass 0 or more directories.\n");
- Printf("%s [-flag1=val1 [-flag2=val2 ...] ] [dir1 [dir2 ...] ]\n", Prog);
-
- Printf("\nTo run individual tests without fuzzing pass 1 or more files:\n");
- Printf("%s [-flag1=val1 [-flag2=val2 ...] ] file1 [file2 ...]\n", Prog);
-
+ Printf("Usage: %s [-flag1=val1 [-flag2=val2 ...] ] [dir1 [dir2 ...] ]\n",
+ ProgName->c_str());
Printf("\nFlags: (strictly in form -flag=value)\n");
size_t MaxFlagLen = 0;
for (size_t F = 0; F < kNumFlags; F++)
@@ -92,7 +75,6 @@ static void PrintHelp() {
for (size_t F = 0; F < kNumFlags; F++) {
const auto &D = FlagDescriptions[F];
- if (strstr(D.Description, "internal flag") == D.Description) continue;
Printf(" %s", D.Name);
for (size_t i = 0, n = MaxFlagLen - strlen(D.Name); i < n; i++)
Printf(" ");
@@ -111,34 +93,14 @@ static const char *FlagValue(const char *Param, const char *Name) {
return nullptr;
}
-// Avoid calling stol as it triggers a bug in clang/glibc build.
-static long MyStol(const char *Str) {
- long Res = 0;
- long Sign = 1;
- if (*Str == '-') {
- Str++;
- Sign = -1;
- }
- for (size_t i = 0; Str[i]; i++) {
- char Ch = Str[i];
- if (Ch < '0' || Ch > '9')
- return Res;
- Res = Res * 10 + (Ch - '0');
- }
- return Res * Sign;
-}
-
static bool ParseOneFlag(const char *Param) {
if (Param[0] != '-') return false;
if (Param[1] == '-') {
static bool PrintedWarning = false;
if (!PrintedWarning) {
PrintedWarning = true;
- Printf("INFO: libFuzzer ignores flags that start with '--'\n");
+ Printf("WARNING: libFuzzer ignores flags that start with '--'\n");
}
- for (size_t F = 0; F < kNumFlags; F++)
- if (FlagValue(Param + 1, FlagDescriptions[F].Name))
- Printf("WARNING: did you mean '%s' (single dash)?\n", Param + 1);
return true;
}
for (size_t F = 0; F < kNumFlags; F++) {
@@ -146,10 +108,10 @@ static bool ParseOneFlag(const char *Param) {
const char *Str = FlagValue(Param, Name);
if (Str) {
if (FlagDescriptions[F].IntFlag) {
- int Val = MyStol(Str);
+ int Val = std::stol(Str);
*FlagDescriptions[F].IntFlag = Val;
if (Flags.verbosity >= 2)
- Printf("Flag: %s %d\n", Name, Val);
+ Printf("Flag: %s %d\n", Name, Val);;
return true;
} else if (FlagDescriptions[F].UIntFlag) {
unsigned int Val = std::stoul(Str);
@@ -162,15 +124,11 @@ static bool ParseOneFlag(const char *Param) {
if (Flags.verbosity >= 2)
Printf("Flag: %s %s\n", Name, Str);
return true;
- } else { // Deprecated flag.
- Printf("Flag: %s: deprecated, don't use\n", Name);
- return true;
}
}
}
- Printf("\n\nWARNING: unrecognized flag '%s'; "
- "use -help=1 to list all flags\n\n", Param);
- return true;
+ PrintHelp();
+ exit(1);
}
// We don't use any library to minimize dependencies.
@@ -186,11 +144,7 @@ static void ParseFlags(const std::vector<std::string> &Args) {
}
Inputs = new std::vector<std::string>;
for (size_t A = 1; A < Args.size(); A++) {
- if (ParseOneFlag(Args[A].c_str())) {
- if (Flags.ignore_remaining_args)
- break;
- continue;
- }
+ if (ParseOneFlag(Args[A].c_str())) continue;
Inputs->push_back(Args[A]);
}
}
@@ -199,396 +153,120 @@ static std::mutex Mu;
static void PulseThread() {
while (true) {
- SleepSeconds(600);
+ std::this_thread::sleep_for(std::chrono::seconds(600));
std::lock_guard<std::mutex> Lock(Mu);
Printf("pulse...\n");
}
}
-static void WorkerThread(const std::string &Cmd, std::atomic<unsigned> *Counter,
- unsigned NumJobs, std::atomic<bool> *HasErrors) {
+static void WorkerThread(const std::string &Cmd, std::atomic<int> *Counter,
+ int NumJobs, std::atomic<bool> *HasErrors) {
while (true) {
- unsigned C = (*Counter)++;
+ int C = (*Counter)++;
if (C >= NumJobs) break;
std::string Log = "fuzz-" + std::to_string(C) + ".log";
std::string ToRun = Cmd + " > " + Log + " 2>&1\n";
if (Flags.verbosity)
Printf("%s", ToRun.c_str());
- int ExitCode = ExecuteCommand(ToRun);
+ int ExitCode = ExecuteCommand(ToRun.c_str());
if (ExitCode != 0)
*HasErrors = true;
std::lock_guard<std::mutex> Lock(Mu);
- Printf("================== Job %u exited with exit code %d ============\n",
+ Printf("================== Job %d exited with exit code %d ============\n",
C, ExitCode);
fuzzer::CopyFileToErr(Log);
}
}
-std::string CloneArgsWithoutX(const std::vector<std::string> &Args,
- const char *X1, const char *X2) {
+static int RunInMultipleProcesses(const std::vector<std::string> &Args,
+ int NumWorkers, int NumJobs) {
+ std::atomic<int> Counter(0);
+ std::atomic<bool> HasErrors(false);
std::string Cmd;
for (auto &S : Args) {
- if (FlagValue(S.c_str(), X1) || FlagValue(S.c_str(), X2))
+ if (FlagValue(S.c_str(), "jobs") || FlagValue(S.c_str(), "workers"))
continue;
Cmd += S + " ";
}
- return Cmd;
-}
-
-static int RunInMultipleProcesses(const std::vector<std::string> &Args,
- unsigned NumWorkers, unsigned NumJobs) {
- std::atomic<unsigned> Counter(0);
- std::atomic<bool> HasErrors(false);
- std::string Cmd = CloneArgsWithoutX(Args, "jobs", "workers");
std::vector<std::thread> V;
std::thread Pulse(PulseThread);
Pulse.detach();
- for (unsigned i = 0; i < NumWorkers; i++)
+ for (int i = 0; i < NumWorkers; i++)
V.push_back(std::thread(WorkerThread, Cmd, &Counter, NumJobs, &HasErrors));
for (auto &T : V)
T.join();
return HasErrors ? 1 : 0;
}
-static void RssThread(Fuzzer *F, size_t RssLimitMb) {
- while (true) {
- SleepSeconds(1);
- size_t Peak = GetPeakRSSMb();
- if (Peak > RssLimitMb)
- F->RssLimitCallback();
- }
-}
-
-static void StartRssThread(Fuzzer *F, size_t RssLimitMb) {
- if (!RssLimitMb) return;
- std::thread T(RssThread, F, RssLimitMb);
- T.detach();
-}
-
-int RunOneTest(Fuzzer *F, const char *InputFilePath, size_t MaxLen) {
+int RunOneTest(Fuzzer *F, const char *InputFilePath) {
Unit U = FileToVector(InputFilePath);
- if (MaxLen && MaxLen < U.size())
- U.resize(MaxLen);
- F->ExecuteCallback(U.data(), U.size());
- F->TryDetectingAMemoryLeak(U.data(), U.size(), true);
- return 0;
-}
-
-static bool AllInputsAreFiles() {
- if (Inputs->empty()) return false;
- for (auto &Path : *Inputs)
- if (!IsFile(Path))
- return false;
- return true;
-}
-
-static std::string GetDedupTokenFromFile(const std::string &Path) {
- auto S = FileToString(Path);
- auto Beg = S.find("DEDUP_TOKEN:");
- if (Beg == std::string::npos)
- return "";
- auto End = S.find('\n', Beg);
- if (End == std::string::npos)
- return "";
- return S.substr(Beg, End - Beg);
-}
-
-int CleanseCrashInput(const std::vector<std::string> &Args,
- const FuzzingOptions &Options) {
- if (Inputs->size() != 1 || !Flags.exact_artifact_path) {
- Printf("ERROR: -cleanse_crash should be given one input file and"
- " -exact_artifact_path\n");
- exit(1);
- }
- std::string InputFilePath = Inputs->at(0);
- std::string OutputFilePath = Flags.exact_artifact_path;
- std::string BaseCmd =
- CloneArgsWithoutX(Args, "cleanse_crash", "cleanse_crash");
-
- auto InputPos = BaseCmd.find(" " + InputFilePath + " ");
- assert(InputPos != std::string::npos);
- BaseCmd.erase(InputPos, InputFilePath.size() + 1);
-
- auto LogFilePath = DirPlusFile(
- TmpDir(), "libFuzzerTemp." + std::to_string(GetPid()) + ".txt");
- auto TmpFilePath = DirPlusFile(
- TmpDir(), "libFuzzerTemp." + std::to_string(GetPid()) + ".repro");
- auto LogFileRedirect = " > " + LogFilePath + " 2>&1 ";
-
- auto Cmd = BaseCmd + " " + TmpFilePath + LogFileRedirect;
-
- std::string CurrentFilePath = InputFilePath;
- auto U = FileToVector(CurrentFilePath);
- size_t Size = U.size();
-
- const std::vector<uint8_t> ReplacementBytes = {' ', 0xff};
- for (int NumAttempts = 0; NumAttempts < 5; NumAttempts++) {
- bool Changed = false;
- for (size_t Idx = 0; Idx < Size; Idx++) {
- Printf("CLEANSE[%d]: Trying to replace byte %zd of %zd\n", NumAttempts,
- Idx, Size);
- uint8_t OriginalByte = U[Idx];
- if (ReplacementBytes.end() != std::find(ReplacementBytes.begin(),
- ReplacementBytes.end(),
- OriginalByte))
- continue;
- for (auto NewByte : ReplacementBytes) {
- U[Idx] = NewByte;
- WriteToFile(U, TmpFilePath);
- auto ExitCode = ExecuteCommand(Cmd);
- RemoveFile(TmpFilePath);
- if (!ExitCode) {
- U[Idx] = OriginalByte;
- } else {
- Changed = true;
- Printf("CLEANSE: Replaced byte %zd with 0x%x\n", Idx, NewByte);
- WriteToFile(U, OutputFilePath);
- break;
- }
- }
- }
- if (!Changed) break;
- }
- RemoveFile(LogFilePath);
+ Unit PreciseSizedU(U);
+ assert(PreciseSizedU.size() == PreciseSizedU.capacity());
+ F->ExecuteCallback(PreciseSizedU);
return 0;
}
-int MinimizeCrashInput(const std::vector<std::string> &Args,
- const FuzzingOptions &Options) {
- if (Inputs->size() != 1) {
- Printf("ERROR: -minimize_crash should be given one input file\n");
- exit(1);
- }
- std::string InputFilePath = Inputs->at(0);
- auto BaseCmd = SplitBefore(
- "-ignore_remaining_args=1",
- CloneArgsWithoutX(Args, "minimize_crash", "exact_artifact_path"));
- auto InputPos = BaseCmd.first.find(" " + InputFilePath + " ");
- assert(InputPos != std::string::npos);
- BaseCmd.first.erase(InputPos, InputFilePath.size() + 1);
- if (Flags.runs <= 0 && Flags.max_total_time == 0) {
- Printf("INFO: you need to specify -runs=N or "
- "-max_total_time=N with -minimize_crash=1\n"
- "INFO: defaulting to -max_total_time=600\n");
- BaseCmd.first += " -max_total_time=600";
- }
-
- auto LogFilePath = DirPlusFile(
- TmpDir(), "libFuzzerTemp." + std::to_string(GetPid()) + ".txt");
- auto LogFileRedirect = " > " + LogFilePath + " 2>&1 ";
-
- std::string CurrentFilePath = InputFilePath;
- while (true) {
- Unit U = FileToVector(CurrentFilePath);
- Printf("CRASH_MIN: minimizing crash input: '%s' (%zd bytes)\n",
- CurrentFilePath.c_str(), U.size());
-
- auto Cmd = BaseCmd.first + " " + CurrentFilePath + LogFileRedirect + " " +
- BaseCmd.second;
-
- Printf("CRASH_MIN: executing: %s\n", Cmd.c_str());
- int ExitCode = ExecuteCommand(Cmd);
- if (ExitCode == 0) {
- Printf("ERROR: the input %s did not crash\n", CurrentFilePath.c_str());
- exit(1);
- }
- Printf("CRASH_MIN: '%s' (%zd bytes) caused a crash. Will try to minimize "
- "it further\n",
- CurrentFilePath.c_str(), U.size());
- auto DedupToken1 = GetDedupTokenFromFile(LogFilePath);
- if (!DedupToken1.empty())
- Printf("CRASH_MIN: DedupToken1: %s\n", DedupToken1.c_str());
-
- std::string ArtifactPath =
- Flags.exact_artifact_path
- ? Flags.exact_artifact_path
- : Options.ArtifactPrefix + "minimized-from-" + Hash(U);
- Cmd += " -minimize_crash_internal_step=1 -exact_artifact_path=" +
- ArtifactPath;
- Printf("CRASH_MIN: executing: %s\n", Cmd.c_str());
- ExitCode = ExecuteCommand(Cmd);
- CopyFileToErr(LogFilePath);
- if (ExitCode == 0) {
- if (Flags.exact_artifact_path) {
- CurrentFilePath = Flags.exact_artifact_path;
- WriteToFile(U, CurrentFilePath);
- }
- Printf("CRASH_MIN: failed to minimize beyond %s (%d bytes), exiting\n",
- CurrentFilePath.c_str(), U.size());
- break;
- }
- auto DedupToken2 = GetDedupTokenFromFile(LogFilePath);
- if (!DedupToken2.empty())
- Printf("CRASH_MIN: DedupToken2: %s\n", DedupToken2.c_str());
-
- if (DedupToken1 != DedupToken2) {
- if (Flags.exact_artifact_path) {
- CurrentFilePath = Flags.exact_artifact_path;
- WriteToFile(U, CurrentFilePath);
- }
- Printf("CRASH_MIN: mismatch in dedup tokens"
- " (looks like a different bug). Won't minimize further\n");
- break;
- }
-
- CurrentFilePath = ArtifactPath;
- Printf("*********************************\n");
- }
- RemoveFile(LogFilePath);
- return 0;
+int FuzzerDriver(int argc, char **argv, UserCallback Callback) {
+ FuzzerRandomLibc Rand(0);
+ SimpleUserSuppliedFuzzer SUSF(&Rand, Callback);
+ return FuzzerDriver(argc, argv, SUSF);
}
-int MinimizeCrashInputInternalStep(Fuzzer *F, InputCorpus *Corpus) {
- assert(Inputs->size() == 1);
- std::string InputFilePath = Inputs->at(0);
- Unit U = FileToVector(InputFilePath);
- Printf("INFO: Starting MinimizeCrashInputInternalStep: %zd\n", U.size());
- if (U.size() < 2) {
- Printf("INFO: The input is small enough, exiting\n");
- exit(0);
- }
- F->SetMaxInputLen(U.size());
- F->SetMaxMutationLen(U.size() - 1);
- F->MinimizeCrashLoop(U);
- Printf("INFO: Done MinimizeCrashInputInternalStep, no crashes found\n");
- exit(0);
- return 0;
+int FuzzerDriver(int argc, char **argv, UserSuppliedFuzzer &USF) {
+ std::vector<std::string> Args(argv, argv + argc);
+ return FuzzerDriver(Args, USF);
}
-int AnalyzeDictionary(Fuzzer *F, const std::vector<Unit>& Dict,
- UnitVector& Corpus) {
- Printf("Started dictionary minimization (up to %d tests)\n",
- Dict.size() * Corpus.size() * 2);
-
- // Scores and usage count for each dictionary unit.
- std::vector<int> Scores(Dict.size());
- std::vector<int> Usages(Dict.size());
-
- std::vector<size_t> InitialFeatures;
- std::vector<size_t> ModifiedFeatures;
- for (auto &C : Corpus) {
- // Get coverage for the testcase without modifications.
- F->ExecuteCallback(C.data(), C.size());
- InitialFeatures.clear();
- TPC.CollectFeatures([&](size_t Feature) -> bool {
- InitialFeatures.push_back(Feature);
- return true;
- });
-
- for (size_t i = 0; i < Dict.size(); ++i) {
- auto Data = C;
- auto StartPos = std::search(Data.begin(), Data.end(),
- Dict[i].begin(), Dict[i].end());
- // Skip dictionary unit, if the testcase does not contain it.
- if (StartPos == Data.end())
- continue;
-
- ++Usages[i];
- while (StartPos != Data.end()) {
- // Replace all occurrences of dictionary unit in the testcase.
- auto EndPos = StartPos + Dict[i].size();
- for (auto It = StartPos; It != EndPos; ++It)
- *It ^= 0xFF;
-
- StartPos = std::search(EndPos, Data.end(),
- Dict[i].begin(), Dict[i].end());
- }
-
- // Get coverage for testcase with masked occurrences of dictionary unit.
- F->ExecuteCallback(Data.data(), Data.size());
- ModifiedFeatures.clear();
- TPC.CollectFeatures([&](size_t Feature) -> bool {
- ModifiedFeatures.push_back(Feature);
- return true;
- });
-
- if (InitialFeatures == ModifiedFeatures)
- --Scores[i];
- else
- Scores[i] += 2;
- }
- }
-
- Printf("###### Useless dictionary elements. ######\n");
- for (size_t i = 0; i < Dict.size(); ++i) {
- // Dictionary units with positive score are treated as useful ones.
- if (Scores[i] > 0)
- continue;
-
- Printf("\"");
- PrintASCII(Dict[i].data(), Dict[i].size(), "\"");
- Printf(" # Score: %d, Used: %d\n", Scores[i], Usages[i]);
- }
- Printf("###### End of useless dictionary elements. ######\n");
- return 0;
+int FuzzerDriver(const std::vector<std::string> &Args, UserCallback Callback) {
+ FuzzerRandomLibc Rand(0);
+ SimpleUserSuppliedFuzzer SUSF(&Rand, Callback);
+ return FuzzerDriver(Args, SUSF);
}
-int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
+int FuzzerDriver(const std::vector<std::string> &Args,
+ UserSuppliedFuzzer &USF) {
using namespace fuzzer;
- assert(argc && argv && "Argument pointers cannot be nullptr");
- std::string Argv0((*argv)[0]);
- EF = new ExternalFunctions();
- if (EF->LLVMFuzzerInitialize)
- EF->LLVMFuzzerInitialize(argc, argv);
- const std::vector<std::string> Args(*argv, *argv + *argc);
assert(!Args.empty());
ProgName = new std::string(Args[0]);
- if (Argv0 != *ProgName) {
- Printf("ERROR: argv[0] has been modified in LLVMFuzzerInitialize\n");
- exit(1);
- }
ParseFlags(Args);
if (Flags.help) {
PrintHelp();
return 0;
}
- if (Flags.close_fd_mask & 2)
- DupAndCloseStderr();
- if (Flags.close_fd_mask & 1)
- CloseStdout();
-
if (Flags.jobs > 0 && Flags.workers == 0) {
Flags.workers = std::min(NumberOfCpuCores() / 2, Flags.jobs);
if (Flags.workers > 1)
- Printf("Running %u workers\n", Flags.workers);
+ Printf("Running %d workers\n", Flags.workers);
}
if (Flags.workers > 0 && Flags.jobs > 0)
return RunInMultipleProcesses(Args, Flags.workers, Flags.jobs);
- const size_t kMaxSaneLen = 1 << 20;
- const size_t kMinDefaultLen = 4096;
- FuzzingOptions Options;
+ Fuzzer::FuzzingOptions Options;
Options.Verbosity = Flags.verbosity;
Options.MaxLen = Flags.max_len;
- Options.ExperimentalLenControl = Flags.experimental_len_control;
- if (Flags.experimental_len_control && Flags.max_len == kMinDefaultLen)
- Options.MaxLen = 1 << 20;
Options.UnitTimeoutSec = Flags.timeout;
- Options.ErrorExitCode = Flags.error_exitcode;
- Options.TimeoutExitCode = Flags.timeout_exitcode;
Options.MaxTotalTimeSec = Flags.max_total_time;
Options.DoCrossOver = Flags.cross_over;
Options.MutateDepth = Flags.mutate_depth;
+ Options.ExitOnFirst = Flags.exit_on_first;
Options.UseCounters = Flags.use_counters;
Options.UseIndirCalls = Flags.use_indir_calls;
- Options.UseMemmem = Flags.use_memmem;
- Options.UseCmp = Flags.use_cmp;
- Options.UseValueProfile = Flags.use_value_profile;
- Options.Shrink = Flags.shrink;
- Options.ReduceInputs = Flags.reduce_inputs;
+ Options.UseTraces = Flags.use_traces;
Options.ShuffleAtStartUp = Flags.shuffle;
- Options.PreferSmall = Flags.prefer_small;
- Options.ReloadIntervalSec = Flags.reload;
+ Options.PreferSmallDuringInitialShuffle =
+ Flags.prefer_small_during_initial_shuffle;
+ Options.Reload = Flags.reload;
Options.OnlyASCII = Flags.only_ascii;
- Options.DetectLeaks = Flags.detect_leaks;
- Options.TraceMalloc = Flags.trace_malloc;
- Options.RssLimitMb = Flags.rss_limit_mb;
+ Options.OutputCSV = Flags.output_csv;
if (Flags.runs >= 0)
Options.MaxNumberOfRuns = Flags.runs;
- if (!Inputs->empty() && !Flags.minimize_crash_internal_step)
+ if (!Inputs->empty())
Options.OutputCorpus = (*Inputs)[0];
+ if (Flags.sync_command)
+ Options.SyncCommand = Flags.sync_command;
+ Options.SyncTimeout = Flags.sync_timeout;
Options.ReportSlowUnits = Flags.report_slow_units;
if (Flags.artifact_prefix)
Options.ArtifactPrefix = Flags.artifact_prefix;
@@ -600,164 +278,59 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
return 1;
if (Flags.verbosity > 0 && !Dictionary.empty())
Printf("Dictionary: %zd entries\n", Dictionary.size());
- bool DoPlainRun = AllInputsAreFiles();
- Options.SaveArtifacts =
- !DoPlainRun || Flags.minimize_crash_internal_step;
- Options.PrintNewCovPcs = Flags.print_pcs;
- Options.PrintFinalStats = Flags.print_final_stats;
- Options.PrintCorpusStats = Flags.print_corpus_stats;
- Options.PrintCoverage = Flags.print_coverage;
- Options.DumpCoverage = Flags.dump_coverage;
- if (Flags.exit_on_src_pos)
- Options.ExitOnSrcPos = Flags.exit_on_src_pos;
- if (Flags.exit_on_item)
- Options.ExitOnItem = Flags.exit_on_item;
+ Options.SaveArtifacts = !Flags.test_single_input;
+ Options.PrintNewCovPcs = Flags.print_new_cov_pcs;
- unsigned Seed = Flags.seed;
- // Initialize Seed.
- if (Seed == 0)
- Seed =
- std::chrono::system_clock::now().time_since_epoch().count() + GetPid();
- if (Flags.verbosity)
- Printf("INFO: Seed: %u\n", Seed);
-
- Random Rand(Seed);
- auto *MD = new MutationDispatcher(Rand, Options);
- auto *Corpus = new InputCorpus(Options.OutputCorpus);
- auto *F = new Fuzzer(Callback, *Corpus, *MD, Options);
+ Fuzzer F(USF, Options);
for (auto &U: Dictionary)
- if (U.size() <= Word::GetMaxSize())
- MD->AddWordToManualDictionary(Word(U.data(), U.size()));
-
- StartRssThread(F, Flags.rss_limit_mb);
-
- Options.HandleAbrt = Flags.handle_abrt;
- Options.HandleBus = Flags.handle_bus;
- Options.HandleFpe = Flags.handle_fpe;
- Options.HandleIll = Flags.handle_ill;
- Options.HandleInt = Flags.handle_int;
- Options.HandleSegv = Flags.handle_segv;
- Options.HandleTerm = Flags.handle_term;
- Options.HandleXfsz = Flags.handle_xfsz;
- SetSignalHandler(Options);
-
- if (Flags.minimize_crash)
- return MinimizeCrashInput(Args, Options);
-
- if (Flags.minimize_crash_internal_step)
- return MinimizeCrashInputInternalStep(F, Corpus);
-
- if (Flags.cleanse_crash)
- return CleanseCrashInput(Args, Options);
-
- if (auto Name = Flags.run_equivalence_server) {
- SMR.Destroy(Name);
- if (!SMR.Create(Name)) {
- Printf("ERROR: can't create shared memory region\n");
- return 1;
- }
- Printf("INFO: EQUIVALENCE SERVER UP\n");
- while (true) {
- SMR.WaitClient();
- size_t Size = SMR.ReadByteArraySize();
- SMR.WriteByteArray(nullptr, 0);
- const Unit tmp(SMR.GetByteArray(), SMR.GetByteArray() + Size);
- F->ExecuteCallback(tmp.data(), tmp.size());
- SMR.PostServer();
- }
- return 0;
- }
+ USF.GetMD().AddWordToManualDictionary(U);
- if (auto Name = Flags.use_equivalence_server) {
- if (!SMR.Open(Name)) {
- Printf("ERROR: can't open shared memory region\n");
- return 1;
- }
- Printf("INFO: EQUIVALENCE CLIENT UP\n");
- }
+ // Timer
+ if (Flags.timeout > 0)
+ SetTimer(Flags.timeout / 2 + 1);
- if (DoPlainRun) {
- Options.SaveArtifacts = false;
- int Runs = std::max(1, Flags.runs);
- Printf("%s: Running %zd inputs %d time(s) each.\n", ProgName->c_str(),
- Inputs->size(), Runs);
- for (auto &Path : *Inputs) {
- auto StartTime = system_clock::now();
- Printf("Running: %s\n", Path.c_str());
- for (int Iter = 0; Iter < Runs; Iter++)
- RunOneTest(F, Path.c_str(), Options.MaxLen);
- auto StopTime = system_clock::now();
- auto MS = duration_cast<milliseconds>(StopTime - StartTime).count();
- Printf("Executed %s in %zd ms\n", Path.c_str(), (long)MS);
- }
- Printf("***\n"
- "*** NOTE: fuzzing was not performed, you have only\n"
- "*** executed the target code on a fixed set of inputs.\n"
- "***\n");
- F->PrintFinalStats();
- exit(0);
- }
-
- if (Flags.merge) {
- if (Options.MaxLen == 0)
- F->SetMaxInputLen(kMaxSaneLen);
- if (Flags.merge_control_file)
- F->CrashResistantMergeInternalStep(Flags.merge_control_file);
- else
- F->CrashResistantMerge(Args, *Inputs,
- Flags.load_coverage_summary,
- Flags.save_coverage_summary);
+ if (Flags.test_single_input) {
+ RunOneTest(&F, Flags.test_single_input);
exit(0);
}
- size_t TemporaryMaxLen = Options.MaxLen ? Options.MaxLen : kMaxSaneLen;
-
- UnitVector InitialCorpus;
- for (auto &Inp : *Inputs) {
- Printf("Loading corpus dir: %s\n", Inp.c_str());
- ReadDirToVectorOfUnits(Inp.c_str(), &InitialCorpus, nullptr,
- TemporaryMaxLen, /*ExitOnError=*/false);
+ if (Flags.save_minimized_corpus) {
+ Printf("The flag -save_minimized_corpus is deprecated; use -merge=1\n");
+ exit(1);
}
- if (Flags.analyze_dict) {
- if (Dictionary.empty() || Inputs->empty()) {
- Printf("ERROR: can't analyze dict without dict and corpus provided\n");
- return 1;
- }
- if (AnalyzeDictionary(F, Dictionary, InitialCorpus)) {
- Printf("Dictionary analysis failed\n");
- exit(1);
- }
- Printf("Dictionary analysis suceeded\n");
+ if (Flags.merge) {
+ F.Merge(*Inputs);
exit(0);
}
- if (Options.MaxLen == 0) {
- size_t MaxLen = 0;
- for (auto &U : InitialCorpus)
- MaxLen = std::max(U.size(), MaxLen);
- F->SetMaxInputLen(std::min(std::max(kMinDefaultLen, MaxLen), kMaxSaneLen));
- }
-
- if (InitialCorpus.empty()) {
- InitialCorpus.push_back(Unit({'\n'})); // Valid ASCII input.
- if (Options.Verbosity)
- Printf("INFO: A corpus is not provided, starting from an empty corpus\n");
- }
- F->ShuffleAndMinimize(&InitialCorpus);
- InitialCorpus.clear(); // Don't need this memory any more.
- F->Loop();
+ unsigned Seed = Flags.seed;
+ // Initialize Seed.
+ if (Seed == 0)
+ Seed = time(0) * 10000 + getpid();
+ if (Flags.verbosity)
+ Printf("Seed: %u\n", Seed);
+ USF.GetRand().ResetSeed(Seed);
+
+ F.RereadOutputCorpus();
+ for (auto &inp : *Inputs)
+ if (inp != Options.OutputCorpus)
+ F.ReadDir(inp, nullptr);
+
+ if (F.CorpusSize() == 0)
+ F.AddToCorpus(Unit()); // Can't fuzz empty corpus, so add an empty input.
+ F.ShuffleAndMinimize();
+ if (Flags.drill)
+ F.Drill();
+ else
+ F.Loop();
if (Flags.verbosity)
- Printf("Done %zd runs in %zd second(s)\n", F->getTotalNumberOfRuns(),
- F->secondsSinceProcessStartUp());
- F->PrintFinalStats();
+ Printf("Done %d runs in %zd second(s)\n", F.getTotalNumberOfRuns(),
+ F.secondsSinceProcessStartUp());
exit(0); // Don't let F destroy itself.
}
-// Storage for global ExternalFunctions object.
-ExternalFunctions *EF = nullptr;
-
} // namespace fuzzer
diff --git a/gnu/llvm/lib/Fuzzer/FuzzerFlags.def b/gnu/llvm/lib/Fuzzer/FuzzerFlags.def
index 526805705b2..977efb76922 100644
--- a/gnu/llvm/lib/Fuzzer/FuzzerFlags.def
+++ b/gnu/llvm/lib/Fuzzer/FuzzerFlags.def
@@ -14,74 +14,49 @@ FUZZER_FLAG_INT(verbosity, 1, "Verbosity level.")
FUZZER_FLAG_UNSIGNED(seed, 0, "Random seed. If 0, seed is generated.")
FUZZER_FLAG_INT(runs, -1,
"Number of individual test runs (-1 for infinite runs).")
-FUZZER_FLAG_INT(max_len, 0, "Maximum length of the test input. "
- "If 0, libFuzzer tries to guess a good value based on the corpus "
- "and reports it. ")
-FUZZER_FLAG_INT(experimental_len_control, 0, "experimental flag")
+FUZZER_FLAG_INT(max_len, 64, "Maximum length of the test input.")
FUZZER_FLAG_INT(cross_over, 1, "If 1, cross over inputs.")
FUZZER_FLAG_INT(mutate_depth, 5,
"Apply this number of consecutive mutations to each input.")
FUZZER_FLAG_INT(shuffle, 1, "Shuffle inputs at startup")
-FUZZER_FLAG_INT(prefer_small, 1,
- "If 1, always prefer smaller inputs during the corpus shuffle.")
+FUZZER_FLAG_INT(
+ prefer_small_during_initial_shuffle, -1,
+ "If 1, always prefer smaller inputs during the initial corpus shuffle."
+ " If 0, never do that. If -1, do it sometimes.")
+FUZZER_FLAG_INT(exit_on_first, 0,
+ "If 1, exit after the first new interesting input is found.")
FUZZER_FLAG_INT(
timeout, 1200,
"Timeout in seconds (if positive). "
"If one unit runs more than this number of seconds the process will abort.")
-FUZZER_FLAG_INT(error_exitcode, 77, "When libFuzzer itself reports a bug "
- "this exit code will be used.")
-FUZZER_FLAG_INT(timeout_exitcode, 77, "When libFuzzer reports a timeout "
- "this exit code will be used.")
FUZZER_FLAG_INT(max_total_time, 0, "If positive, indicates the maximal total "
"time in seconds to run the fuzzer.")
FUZZER_FLAG_INT(help, 0, "Print help.")
+FUZZER_FLAG_INT(save_minimized_corpus, 0, "Deprecated. Use -merge=1")
FUZZER_FLAG_INT(merge, 0, "If 1, the 2-nd, 3-rd, etc corpora will be "
- "merged into the 1-st corpus. Only interesting units will be taken. "
- "This flag can be used to minimize a corpus.")
-FUZZER_FLAG_STRING(merge_control_file, "internal flag")
-FUZZER_FLAG_STRING(save_coverage_summary, "Experimental:"
- " save coverage summary to a given file."
- " Used with -merge=1")
-FUZZER_FLAG_STRING(load_coverage_summary, "Experimental:"
- " load coverage summary from a given file."
- " Treat this coverage as belonging to the first corpus. "
- " Used with -merge=1")
-FUZZER_FLAG_INT(minimize_crash, 0, "If 1, minimizes the provided"
- " crash input. Use with -runs=N or -max_total_time=N to limit "
- "the number attempts."
- " Use with -exact_artifact_path to specify the output."
- " Combine with ASAN_OPTIONS=dedup_token_length=3 (or similar) to ensure that"
- " the minimized input triggers the same crash."
- )
-FUZZER_FLAG_INT(cleanse_crash, 0, "If 1, tries to cleanse the provided"
- " crash input to make it contain fewer original bytes."
- " Use with -exact_artifact_path to specify the output."
- )
-FUZZER_FLAG_INT(minimize_crash_internal_step, 0, "internal flag")
+ "merged into the 1-st corpus. Only interesting units will be taken.")
FUZZER_FLAG_INT(use_counters, 1, "Use coverage counters")
FUZZER_FLAG_INT(use_indir_calls, 1, "Use indirect caller-callee counters")
-FUZZER_FLAG_INT(use_memmem, 1,
- "Use hints from intercepting memmem, strstr, etc")
-FUZZER_FLAG_INT(use_value_profile, 0,
- "Experimental. Use value profile to guide fuzzing.")
-FUZZER_FLAG_INT(use_cmp, 1, "Use CMP traces to guide mutations")
-FUZZER_FLAG_INT(shrink, 0, "Experimental. Try to shrink corpus inputs.")
-FUZZER_FLAG_INT(reduce_inputs, 0, "Experimental. "
- "Try to reduce the size of inputs wile preserving their full feature sets")
-FUZZER_FLAG_UNSIGNED(jobs, 0, "Number of jobs to run. If jobs >= 1 we spawn"
+FUZZER_FLAG_INT(use_traces, 0, "Experimental: use instruction traces")
+FUZZER_FLAG_INT(jobs, 0, "Number of jobs to run. If jobs >= 1 we spawn"
" this number of jobs in separate worker processes"
" with stdout/stderr redirected to fuzz-JOB.log.")
-FUZZER_FLAG_UNSIGNED(workers, 0,
+FUZZER_FLAG_INT(workers, 0,
"Number of simultaneous worker processes to run the jobs."
" If zero, \"min(jobs,NumberOfCpuCores()/2)\" is used.")
FUZZER_FLAG_INT(reload, 1,
- "Reload the main corpus every <N> seconds to get new units"
- " discovered by other processes. If 0, disabled")
+ "Reload the main corpus periodically to get new units"
+ " discovered by other processes.")
+FUZZER_FLAG_STRING(sync_command, "Execute an external command "
+ "\"<sync_command> <test_corpus>\" "
+ "to synchronize the test corpus.")
+FUZZER_FLAG_INT(sync_timeout, 600, "Minimum timeout between syncs.")
FUZZER_FLAG_INT(report_slow_units, 10,
"Report slowest units if they run for more than this number of seconds.")
FUZZER_FLAG_INT(only_ascii, 0,
"If 1, generate only ASCII (isprint+isspace) inputs.")
FUZZER_FLAG_STRING(dict, "Experimental. Use the dictionary file.")
+FUZZER_FLAG_STRING(test_single_input, "Use specified file as test input.")
FUZZER_FLAG_STRING(artifact_prefix, "Write fuzzing artifacts (crash, "
"timeout, or slow inputs) as "
"$(artifact_prefix)file")
@@ -90,50 +65,8 @@ FUZZER_FLAG_STRING(exact_artifact_path,
"as $(exact_artifact_path). This overrides -artifact_prefix "
"and will not use checksum in the file name. Do not "
"use the same path for several parallel processes.")
-FUZZER_FLAG_INT(print_pcs, 0, "If 1, print out newly covered PCs.")
-FUZZER_FLAG_INT(print_final_stats, 0, "If 1, print statistics at exit.")
-FUZZER_FLAG_INT(print_corpus_stats, 0,
- "If 1, print statistics on corpus elements at exit.")
-FUZZER_FLAG_INT(print_coverage, 0, "If 1, print coverage information as text"
- " at exit.")
-FUZZER_FLAG_INT(dump_coverage, 0, "If 1, dump coverage information as a"
- " .sancov file at exit.")
-FUZZER_FLAG_INT(handle_segv, 1, "If 1, try to intercept SIGSEGV.")
-FUZZER_FLAG_INT(handle_bus, 1, "If 1, try to intercept SIGBUS.")
-FUZZER_FLAG_INT(handle_abrt, 1, "If 1, try to intercept SIGABRT.")
-FUZZER_FLAG_INT(handle_ill, 1, "If 1, try to intercept SIGILL.")
-FUZZER_FLAG_INT(handle_fpe, 1, "If 1, try to intercept SIGFPE.")
-FUZZER_FLAG_INT(handle_int, 1, "If 1, try to intercept SIGINT.")
-FUZZER_FLAG_INT(handle_term, 1, "If 1, try to intercept SIGTERM.")
-FUZZER_FLAG_INT(handle_xfsz, 1, "If 1, try to intercept SIGXFSZ.")
-FUZZER_FLAG_INT(close_fd_mask, 0, "If 1, close stdout at startup; "
- "if 2, close stderr; if 3, close both. "
- "Be careful, this will also close e.g. asan's stderr/stdout.")
-FUZZER_FLAG_INT(detect_leaks, 1, "If 1, and if LeakSanitizer is enabled "
- "try to detect memory leaks during fuzzing (i.e. not only at shut down).")
-FUZZER_FLAG_INT(trace_malloc, 0, "If >= 1 will print all mallocs/frees. "
- "If >= 2 will also print stack traces.")
-FUZZER_FLAG_INT(rss_limit_mb, 2048, "If non-zero, the fuzzer will exit upon"
- "reaching this limit of RSS memory usage.")
-FUZZER_FLAG_STRING(exit_on_src_pos, "Exit if a newly found PC originates"
- " from the given source location. Example: -exit_on_src_pos=foo.cc:123. "
- "Used primarily for testing libFuzzer itself.")
-FUZZER_FLAG_STRING(exit_on_item, "Exit if an item with a given sha1 sum"
- " was added to the corpus. "
- "Used primarily for testing libFuzzer itself.")
-FUZZER_FLAG_INT(ignore_remaining_args, 0, "If 1, ignore all arguments passed "
- "after this one. Useful for fuzzers that need to do their own "
- "argument parsing.")
-
-FUZZER_FLAG_STRING(run_equivalence_server, "Experimental")
-FUZZER_FLAG_STRING(use_equivalence_server, "Experimental")
-FUZZER_FLAG_INT(analyze_dict, 0, "Experimental")
+FUZZER_FLAG_INT(drill, 0, "Experimental: fuzz using a single unit as the seed "
+ "corpus, then merge with the initial corpus")
+FUZZER_FLAG_INT(output_csv, 0, "Enable pulse output in CSV format.")
+FUZZER_FLAG_INT(print_new_cov_pcs, 0, "If 1, print out new covered pcs.")
-FUZZER_DEPRECATED_FLAG(exit_on_first)
-FUZZER_DEPRECATED_FLAG(save_minimized_corpus)
-FUZZER_DEPRECATED_FLAG(sync_command)
-FUZZER_DEPRECATED_FLAG(sync_timeout)
-FUZZER_DEPRECATED_FLAG(test_single_input)
-FUZZER_DEPRECATED_FLAG(drill)
-FUZZER_DEPRECATED_FLAG(truncate_units)
-FUZZER_DEPRECATED_FLAG(output_csv)
diff --git a/gnu/llvm/lib/Fuzzer/FuzzerIO.cpp b/gnu/llvm/lib/Fuzzer/FuzzerIO.cpp
index e3f609ed8a8..043fad396d5 100644
--- a/gnu/llvm/lib/Fuzzer/FuzzerIO.cpp
+++ b/gnu/llvm/lib/Fuzzer/FuzzerIO.cpp
@@ -8,44 +8,54 @@
//===----------------------------------------------------------------------===//
// IO functions.
//===----------------------------------------------------------------------===//
-
-#include "FuzzerIO.h"
-#include "FuzzerDefs.h"
-#include "FuzzerExtFunctions.h"
-#include <algorithm>
-#include <cstdarg>
-#include <fstream>
+#include "FuzzerInternal.h"
#include <iterator>
-#include <sys/stat.h>
+#include <fstream>
+#include <dirent.h>
#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <cstdarg>
+#include <cstdio>
namespace fuzzer {
-static FILE *OutputFile = stderr;
-
-long GetEpoch(const std::string &Path) {
+static long GetEpoch(const std::string &Path) {
struct stat St;
if (stat(Path.c_str(), &St))
return 0; // Can't stat, be conservative.
return St.st_mtime;
}
-Unit FileToVector(const std::string &Path, size_t MaxSize, bool ExitOnError) {
+static std::vector<std::string> ListFilesInDir(const std::string &Dir,
+ long *Epoch) {
+ std::vector<std::string> V;
+ if (Epoch) {
+ auto E = GetEpoch(Dir);
+ if (*Epoch >= E) return V;
+ *Epoch = E;
+ }
+ DIR *D = opendir(Dir.c_str());
+ if (!D) {
+ Printf("No such directory: %s; exiting\n", Dir.c_str());
+ exit(1);
+ }
+ while (auto E = readdir(D)) {
+ if (E->d_type == DT_REG || E->d_type == DT_LNK)
+ V.push_back(E->d_name);
+ }
+ closedir(D);
+ return V;
+}
+
+Unit FileToVector(const std::string &Path) {
std::ifstream T(Path);
- if (ExitOnError && !T) {
+ if (!T) {
Printf("No such directory: %s; exiting\n", Path.c_str());
exit(1);
}
-
- T.seekg(0, T.end);
- size_t FileLen = T.tellg();
- if (MaxSize)
- FileLen = std::min(FileLen, MaxSize);
-
- T.seekg(0, T.beg);
- Unit Res(FileLen);
- T.read(reinterpret_cast<char *>(Res.data()), FileLen);
- return Res;
+ return Unit((std::istreambuf_iterator<char>(T)),
+ std::istreambuf_iterator<char>());
}
std::string FileToString(const std::string &Path) {
@@ -67,52 +77,25 @@ void WriteToFile(const Unit &U, const std::string &Path) {
}
void ReadDirToVectorOfUnits(const char *Path, std::vector<Unit> *V,
- long *Epoch, size_t MaxSize, bool ExitOnError) {
+ long *Epoch) {
long E = Epoch ? *Epoch : 0;
- std::vector<std::string> Files;
- ListFilesInDirRecursive(Path, Epoch, &Files, /*TopDir*/true);
- size_t NumLoaded = 0;
- for (size_t i = 0; i < Files.size(); i++) {
- auto &X = Files[i];
- if (Epoch && GetEpoch(X) < E) continue;
- NumLoaded++;
- if ((NumLoaded & (NumLoaded - 1)) == 0 && NumLoaded >= 1024)
- Printf("Loaded %zd/%zd files from %s\n", NumLoaded, Files.size(), Path);
- auto S = FileToVector(X, MaxSize, ExitOnError);
- if (!S.empty())
- V->push_back(S);
+ for (auto &X : ListFilesInDir(Path, Epoch)) {
+ auto FilePath = DirPlusFile(Path, X);
+ if (Epoch && GetEpoch(FilePath) < E) continue;
+ V->push_back(FileToVector(FilePath));
}
}
std::string DirPlusFile(const std::string &DirPath,
const std::string &FileName) {
- return DirPath + GetSeparator() + FileName;
-}
-
-void DupAndCloseStderr() {
- int OutputFd = DuplicateFile(2);
- if (OutputFd > 0) {
- FILE *NewOutputFile = OpenFile(OutputFd, "w");
- if (NewOutputFile) {
- OutputFile = NewOutputFile;
- if (EF->__sanitizer_set_report_fd)
- EF->__sanitizer_set_report_fd(
- reinterpret_cast<void *>(GetHandleFromFd(OutputFd)));
- DiscardOutput(2);
- }
- }
-}
-
-void CloseStdout() {
- DiscardOutput(1);
+ return DirPath + "/" + FileName;
}
void Printf(const char *Fmt, ...) {
va_list ap;
va_start(ap, Fmt);
- vfprintf(OutputFile, Fmt, ap);
+ vfprintf(stderr, Fmt, ap);
va_end(ap);
- fflush(OutputFile);
}
} // namespace fuzzer
diff --git a/gnu/llvm/lib/Fuzzer/FuzzerInterface.h b/gnu/llvm/lib/Fuzzer/FuzzerInterface.h
index c2c0a39843c..e22b27a3dd2 100644
--- a/gnu/llvm/lib/Fuzzer/FuzzerInterface.h
+++ b/gnu/llvm/lib/Fuzzer/FuzzerInterface.h
@@ -6,62 +6,193 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-// Define the interface between libFuzzer and the library being tested.
+// Define the interface between the Fuzzer and the library being tested.
//===----------------------------------------------------------------------===//
-// NOTE: the libFuzzer interface is thin and in the majority of cases
-// you should not include this file into your target. In 95% of cases
-// all you need is to define the following function in your file:
-// extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size);
-
-// WARNING: keep the interface in C.
+// WARNING: keep the interface free of STL or any other header-based C++ lib,
+// to avoid bad interactions between the code used in the fuzzer and
+// the code used in the target function.
#ifndef LLVM_FUZZER_INTERFACE_H
#define LLVM_FUZZER_INTERFACE_H
-#include <stddef.h>
-#include <stdint.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif // __cplusplus
-
-// Mandatory user-provided target function.
-// Executes the code under test with [Data, Data+Size) as the input.
-// libFuzzer will invoke this function *many* times with different inputs.
-// Must return 0.
-int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size);
-
-// Optional user-provided initialization function.
-// If provided, this function will be called by libFuzzer once at startup.
-// It may read and modify argc/argv.
-// Must return 0.
-int LLVMFuzzerInitialize(int *argc, char ***argv);
-
-// Optional user-provided custom mutator.
-// Mutates raw data in [Data, Data+Size) inplace.
-// Returns the new size, which is not greater than MaxSize.
-// Given the same Seed produces the same mutation.
-size_t LLVMFuzzerCustomMutator(uint8_t *Data, size_t Size, size_t MaxSize,
- unsigned int Seed);
-
-// Optional user-provided custom cross-over function.
-// Combines pieces of Data1 & Data2 together into Out.
-// Returns the new size, which is not greater than MaxOutSize.
-// Should produce the same mutation given the same Seed.
-size_t LLVMFuzzerCustomCrossOver(const uint8_t *Data1, size_t Size1,
- const uint8_t *Data2, size_t Size2,
- uint8_t *Out, size_t MaxOutSize,
- unsigned int Seed);
-
-// Experimental, may go away in future.
-// libFuzzer-provided function to be used inside LLVMFuzzerCustomMutator.
-// Mutates raw data in [Data, Data+Size) inplace.
-// Returns the new size, which is not greater than MaxSize.
-size_t LLVMFuzzerMutate(uint8_t *Data, size_t Size, size_t MaxSize);
-
-#ifdef __cplusplus
-} // extern "C"
-#endif // __cplusplus
+#include <limits>
+#include <cstddef>
+#include <cstdint>
+#include <vector>
+#include <string>
+
+namespace fuzzer {
+typedef std::vector<uint8_t> Unit;
+
+/// Returns an int 0. Values other than zero are reserved for future.
+typedef int (*UserCallback)(const uint8_t *Data, size_t Size);
+/** Simple C-like interface with a single user-supplied callback.
+
+Usage:
+
+#\code
+#include "FuzzerInterface.h"
+
+int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
+ DoStuffWithData(Data, Size);
+ return 0;
+}
+
+// Implement your own main() or use the one from FuzzerMain.cpp.
+int main(int argc, char **argv) {
+ InitializeMeIfNeeded();
+ return fuzzer::FuzzerDriver(argc, argv, LLVMFuzzerTestOneInput);
+}
+#\endcode
+*/
+int FuzzerDriver(int argc, char **argv, UserCallback Callback);
+
+class FuzzerRandomBase {
+ public:
+ FuzzerRandomBase(){}
+ virtual ~FuzzerRandomBase(){};
+ virtual void ResetSeed(unsigned int seed) = 0;
+ // Return a random number.
+ virtual size_t Rand() = 0;
+ // Return a random number in range [0,n).
+ size_t operator()(size_t n) { return n ? Rand() % n : 0; }
+ bool RandBool() { return Rand() % 2; }
+};
+
+class FuzzerRandomLibc : public FuzzerRandomBase {
+ public:
+ FuzzerRandomLibc(unsigned int seed) { ResetSeed(seed); }
+ void ResetSeed(unsigned int seed) override;
+ ~FuzzerRandomLibc() override {}
+ size_t Rand() override;
+};
+
+class MutationDispatcher {
+ public:
+ MutationDispatcher(FuzzerRandomBase &Rand);
+ ~MutationDispatcher();
+ /// Indicate that we are about to start a new sequence of mutations.
+ void StartMutationSequence();
+ /// Print the current sequence of mutations.
+ void PrintMutationSequence();
+ /// Mutates data by shuffling bytes.
+ size_t Mutate_ShuffleBytes(uint8_t *Data, size_t Size, size_t MaxSize);
+ /// Mutates data by erasing a byte.
+ size_t Mutate_EraseByte(uint8_t *Data, size_t Size, size_t MaxSize);
+ /// Mutates data by inserting a byte.
+ size_t Mutate_InsertByte(uint8_t *Data, size_t Size, size_t MaxSize);
+ /// Mutates data by chanding one byte.
+ size_t Mutate_ChangeByte(uint8_t *Data, size_t Size, size_t MaxSize);
+ /// Mutates data by chanding one bit.
+ size_t Mutate_ChangeBit(uint8_t *Data, size_t Size, size_t MaxSize);
+
+ /// Mutates data by adding a word from the manual dictionary.
+ size_t Mutate_AddWordFromManualDictionary(uint8_t *Data, size_t Size,
+ size_t MaxSize);
+
+ /// Mutates data by adding a word from the automatic dictionary.
+ size_t Mutate_AddWordFromAutoDictionary(uint8_t *Data, size_t Size,
+ size_t MaxSize);
+
+ /// Tries to find an ASCII integer in Data, changes it to another ASCII int.
+ size_t Mutate_ChangeASCIIInteger(uint8_t *Data, size_t Size, size_t MaxSize);
+
+ /// CrossOver Data with some other element of the corpus.
+ size_t Mutate_CrossOver(uint8_t *Data, size_t Size, size_t MaxSize);
+
+ /// Applies one of the above mutations.
+ /// Returns the new size of data which could be up to MaxSize.
+ size_t Mutate(uint8_t *Data, size_t Size, size_t MaxSize);
+
+ /// Creates a cross-over of two pieces of Data, returns its size.
+ size_t CrossOver(const uint8_t *Data1, size_t Size1, const uint8_t *Data2,
+ size_t Size2, uint8_t *Out, size_t MaxOutSize);
+
+ void AddWordToManualDictionary(const Unit &Word);
+
+ void AddWordToAutoDictionary(const Unit &Word, size_t PositionHint);
+ void ClearAutoDictionary();
+
+ void SetCorpus(const std::vector<Unit> *Corpus);
+
+ private:
+ FuzzerRandomBase &Rand;
+ struct Impl;
+ Impl *MDImpl;
+};
+
+// For backward compatibility only, deprecated.
+static inline size_t Mutate(uint8_t *Data, size_t Size, size_t MaxSize,
+ FuzzerRandomBase &Rand) {
+ MutationDispatcher MD(Rand);
+ return MD.Mutate(Data, Size, MaxSize);
+}
+
+/** An abstract class that allows to use user-supplied mutators with libFuzzer.
+
+Usage:
+
+#\code
+#include "FuzzerInterface.h"
+class MyFuzzer : public fuzzer::UserSuppliedFuzzer {
+ public:
+ MyFuzzer(fuzzer::FuzzerRandomBase *Rand);
+ // Must define the target function.
+ int TargetFunction(...) { ...; return 0; }
+ // Optionally define the mutator.
+ size_t Mutate(...) { ... }
+ // Optionally define the CrossOver method.
+ size_t CrossOver(...) { ... }
+};
+
+int main(int argc, char **argv) {
+ MyFuzzer F;
+ fuzzer::FuzzerDriver(argc, argv, F);
+}
+#\endcode
+*/
+class UserSuppliedFuzzer {
+ public:
+ UserSuppliedFuzzer(FuzzerRandomBase *Rand);
+ /// Executes the target function on 'Size' bytes of 'Data'.
+ virtual int TargetFunction(const uint8_t *Data, size_t Size) = 0;
+ virtual void StartMutationSequence() { MD.StartMutationSequence(); }
+ virtual void PrintMutationSequence() { MD.PrintMutationSequence(); }
+ virtual void SetCorpus(const std::vector<Unit> *Corpus) {
+ MD.SetCorpus(Corpus);
+ }
+ /// Mutates 'Size' bytes of data in 'Data' inplace into up to 'MaxSize' bytes,
+ /// returns the new size of the data, which should be positive.
+ virtual size_t Mutate(uint8_t *Data, size_t Size, size_t MaxSize) {
+ return MD.Mutate(Data, Size, MaxSize);
+ }
+ /// Crosses 'Data1' and 'Data2', writes up to 'MaxOutSize' bytes into Out,
+ /// returns the number of bytes written, which should be positive.
+ virtual size_t CrossOver(const uint8_t *Data1, size_t Size1,
+ const uint8_t *Data2, size_t Size2,
+ uint8_t *Out, size_t MaxOutSize) {
+ return MD.CrossOver(Data1, Size1, Data2, Size2, Out, MaxOutSize);
+ }
+ virtual ~UserSuppliedFuzzer();
+
+ FuzzerRandomBase &GetRand() { return *Rand; }
+
+ MutationDispatcher &GetMD() { return MD; }
+
+ private:
+ bool OwnRand = false;
+ FuzzerRandomBase *Rand;
+ MutationDispatcher MD;
+};
+
+/// Runs the fuzzing with the UserSuppliedFuzzer.
+int FuzzerDriver(int argc, char **argv, UserSuppliedFuzzer &USF);
+
+/// More C++-ish interface.
+int FuzzerDriver(const std::vector<std::string> &Args, UserSuppliedFuzzer &USF);
+int FuzzerDriver(const std::vector<std::string> &Args, UserCallback Callback);
+
+} // namespace fuzzer
#endif // LLVM_FUZZER_INTERFACE_H
diff --git a/gnu/llvm/lib/Fuzzer/FuzzerInternal.h b/gnu/llvm/lib/Fuzzer/FuzzerInternal.h
index 3fc3fe004ce..c1e9daac980 100644
--- a/gnu/llvm/lib/Fuzzer/FuzzerInternal.h
+++ b/gnu/llvm/lib/Fuzzer/FuzzerInternal.h
@@ -12,132 +12,196 @@
#ifndef LLVM_FUZZER_INTERNAL_H
#define LLVM_FUZZER_INTERNAL_H
-#include "FuzzerDefs.h"
-#include "FuzzerExtFunctions.h"
-#include "FuzzerInterface.h"
-#include "FuzzerOptions.h"
-#include "FuzzerSHA1.h"
-#include "FuzzerValueBitMap.h"
-#include <algorithm>
-#include <atomic>
-#include <chrono>
+#include <cassert>
#include <climits>
+#include <chrono>
+#include <cstddef>
#include <cstdlib>
-#include <string.h>
+#include <string>
+#include <vector>
+#include <unordered_set>
-namespace fuzzer {
+#include "FuzzerInterface.h"
+namespace fuzzer {
using namespace std::chrono;
-class Fuzzer {
-public:
+std::string FileToString(const std::string &Path);
+Unit FileToVector(const std::string &Path);
+void ReadDirToVectorOfUnits(const char *Path, std::vector<Unit> *V,
+ long *Epoch);
+void WriteToFile(const Unit &U, const std::string &Path);
+void CopyFileToErr(const std::string &Path);
+// Returns "Dir/FileName" or equivalent for the current OS.
+std::string DirPlusFile(const std::string &DirPath,
+ const std::string &FileName);
+
+void Printf(const char *Fmt, ...);
+void Print(const Unit &U, const char *PrintAfter = "");
+void PrintASCII(const uint8_t *Data, size_t Size, const char *PrintAfter = "");
+void PrintASCII(const Unit &U, const char *PrintAfter = "");
+std::string Hash(const Unit &U);
+void SetTimer(int Seconds);
+std::string Base64(const Unit &U);
+int ExecuteCommand(const std::string &Command);
+
+// Private copy of SHA1 implementation.
+static const int kSHA1NumBytes = 20;
+// Computes SHA1 hash of 'Len' bytes in 'Data', writes kSHA1NumBytes to 'Out'.
+void ComputeSHA1(const uint8_t *Data, size_t Len, uint8_t *Out);
+
+// Changes U to contain only ASCII (isprint+isspace) characters.
+// Returns true iff U has been changed.
+bool ToASCII(Unit &U);
+bool IsASCII(const Unit &U);
+
+int NumberOfCpuCores();
+int GetPid();
+
+// Dictionary.
+
+// Parses one dictionary entry.
+// If successfull, write the enty to Unit and returns true,
+// otherwise returns false.
+bool ParseOneDictionaryEntry(const std::string &Str, Unit *U);
+// Parses the dictionary file, fills Units, returns true iff all lines
+// were parsed succesfully.
+bool ParseDictionaryFile(const std::string &Text, std::vector<Unit> *Units);
- Fuzzer(UserCallback CB, InputCorpus &Corpus, MutationDispatcher &MD,
- FuzzingOptions Options);
- ~Fuzzer();
+class Fuzzer {
+ public:
+ struct FuzzingOptions {
+ int Verbosity = 1;
+ int MaxLen = 0;
+ int UnitTimeoutSec = 300;
+ int MaxTotalTimeSec = 0;
+ bool DoCrossOver = true;
+ int MutateDepth = 5;
+ bool ExitOnFirst = false;
+ bool UseCounters = false;
+ bool UseIndirCalls = true;
+ bool UseTraces = false;
+ bool UseFullCoverageSet = false;
+ bool Reload = true;
+ bool ShuffleAtStartUp = true;
+ int PreferSmallDuringInitialShuffle = -1;
+ size_t MaxNumberOfRuns = ULONG_MAX;
+ int SyncTimeout = 600;
+ int ReportSlowUnits = 10;
+ bool OnlyASCII = false;
+ std::string OutputCorpus;
+ std::string SyncCommand;
+ std::string ArtifactPrefix = "./";
+ std::string ExactArtifactPath;
+ bool SaveArtifacts = true;
+ bool PrintNEW = true; // Print a status line when new units are found;
+ bool OutputCSV = false;
+ bool PrintNewCovPcs = false;
+ };
+ Fuzzer(UserSuppliedFuzzer &USF, FuzzingOptions Options);
+ void AddToCorpus(const Unit &U) { Corpus.push_back(U); }
+ size_t ChooseUnitIdxToMutate();
+ const Unit &ChooseUnitToMutate() { return Corpus[ChooseUnitIdxToMutate()]; };
void Loop();
- void MinimizeCrashLoop(const Unit &U);
- void ShuffleAndMinimize(UnitVector *V);
- void RereadOutputCorpus(size_t MaxSize);
+ void Drill();
+ void ShuffleAndMinimize();
+ void InitializeTraceState();
+ size_t CorpusSize() const { return Corpus.size(); }
+ void ReadDir(const std::string &Path, long *Epoch) {
+ Printf("Loading corpus: %s\n", Path.c_str());
+ ReadDirToVectorOfUnits(Path.c_str(), &Corpus, Epoch);
+ }
+ void RereadOutputCorpus();
+ // Save the current corpus to OutputCorpus.
+ void SaveCorpus();
size_t secondsSinceProcessStartUp() {
return duration_cast<seconds>(system_clock::now() - ProcessStartTime)
.count();
}
- bool TimedOut() {
- return Options.MaxTotalTimeSec > 0 &&
- secondsSinceProcessStartUp() >
- static_cast<size_t>(Options.MaxTotalTimeSec);
- }
-
- size_t execPerSec() {
- size_t Seconds = secondsSinceProcessStartUp();
- return Seconds ? TotalNumberOfRuns / Seconds : 0;
- }
-
size_t getTotalNumberOfRuns() { return TotalNumberOfRuns; }
static void StaticAlarmCallback();
- static void StaticCrashSignalCallback();
- static void StaticInterruptCallback();
- static void StaticFileSizeExceedCallback();
- void ExecuteCallback(const uint8_t *Data, size_t Size);
- bool RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile = false,
- InputInfo *II = nullptr);
+ void ExecuteCallback(const Unit &U);
// Merge Corpora[1:] into Corpora[0].
void Merge(const std::vector<std::string> &Corpora);
- void CrashResistantMerge(const std::vector<std::string> &Args,
- const std::vector<std::string> &Corpora,
- const char *CoverageSummaryInputPathOrNull,
- const char *CoverageSummaryOutputPathOrNull);
- void CrashResistantMergeInternalStep(const std::string &ControlFilePath);
- MutationDispatcher &GetMD() { return MD; }
- void PrintFinalStats();
- void SetMaxInputLen(size_t MaxInputLen);
- void SetMaxMutationLen(size_t MaxMutationLen);
- void RssLimitCallback();
-
- bool InFuzzingThread() const { return IsMyThread; }
- size_t GetCurrentUnitInFuzzingThead(const uint8_t **Data) const;
- void TryDetectingAMemoryLeak(const uint8_t *Data, size_t Size,
- bool DuringInitialCorpusExecution);
-
- void HandleMalloc(size_t Size);
- void AnnounceOutput(const uint8_t *Data, size_t Size);
-
-private:
+
+ private:
void AlarmCallback();
- void CrashCallback();
- void CrashOnOverwrittenData();
- void InterruptCallback();
void MutateAndTestOne();
- void ReportNewCoverage(InputInfo *II, const Unit &U);
- void PrintPulseAndReportSlowInput(const uint8_t *Data, size_t Size);
+ void ReportNewCoverage(const Unit &U);
+ bool RunOne(const Unit &U);
+ void RunOneAndUpdateCorpus(Unit &U);
void WriteToOutputCorpus(const Unit &U);
void WriteUnitToFileWithPrefix(const Unit &U, const char *Prefix);
- void PrintStats(const char *Where, const char *End = "\n", size_t Units = 0);
- void PrintStatusForNewUnit(const Unit &U, const char *Text);
- void ShuffleCorpus(UnitVector *V);
- void CheckExitOnSrcPosOrItem();
+ void PrintStats(const char *Where, const char *End = "\n");
+ void PrintStatusForNewUnit(const Unit &U);
+ void PrintUnitInASCII(const Unit &U, const char *PrintAfter = "");
+
+ void SyncCorpus();
+
+ size_t RecordBlockCoverage();
+ size_t RecordCallerCalleeCoverage();
+ void PrepareCoverageBeforeRun();
+ bool CheckCoverageAfterRun();
+
+
+ // Trace-based fuzzing: we run a unit with some kind of tracing
+ // enabled and record potentially useful mutations. Then
+ // We apply these mutations one by one to the unit and run it again.
+
+ // Start tracing; forget all previously proposed mutations.
+ void StartTraceRecording();
+ // Stop tracing.
+ void StopTraceRecording();
+ void SetDeathCallback();
static void StaticDeathCallback();
- void DumpCurrentUnit(const char *Prefix);
void DeathCallback();
-
- void AllocateCurrentUnitData();
- uint8_t *CurrentUnitData = nullptr;
- std::atomic<size_t> CurrentUnitSize;
- uint8_t BaseSha1[kSHA1NumBytes]; // Checksum of the base unit.
- bool RunningCB = false;
+ Unit CurrentUnit;
size_t TotalNumberOfRuns = 0;
- size_t NumberOfNewUnitsAdded = 0;
+ size_t TotalNumberOfExecutedTraceBasedMutations = 0;
- bool HasMoreMallocsThanFrees = false;
- size_t NumberOfLeakDetectionAttempts = 0;
+ std::vector<Unit> Corpus;
+ std::unordered_set<std::string> UnitHashesAddedToCorpus;
- UserCallback CB;
- InputCorpus &Corpus;
- MutationDispatcher &MD;
- FuzzingOptions Options;
+ // For UseCounters
+ std::vector<uint8_t> CounterBitmap;
+ size_t TotalBits() { // Slow. Call it only for printing stats.
+ size_t Res = 0;
+ for (auto x : CounterBitmap) Res += __builtin_popcount(x);
+ return Res;
+ }
+ UserSuppliedFuzzer &USF;
+ FuzzingOptions Options;
system_clock::time_point ProcessStartTime = system_clock::now();
- system_clock::time_point UnitStartTime, UnitStopTime;
+ system_clock::time_point LastExternalSync = system_clock::now();
+ system_clock::time_point UnitStartTime;
long TimeOfLongestUnitInSeconds = 0;
long EpochOfLastReadOfOutputCorpus = 0;
+ size_t LastRecordedBlockCoverage = 0;
+ size_t LastRecordedCallerCalleeCoverage = 0;
+ size_t LastCoveragePcBufferLen = 0;
+};
- size_t MaxInputLen = 0;
- size_t MaxMutationLen = 0;
+class SimpleUserSuppliedFuzzer: public UserSuppliedFuzzer {
+ public:
+ SimpleUserSuppliedFuzzer(FuzzerRandomBase *Rand, UserCallback Callback)
+ : UserSuppliedFuzzer(Rand), Callback(Callback) {}
- std::vector<uint32_t> UniqFeatureSetTmp;
+ virtual int TargetFunction(const uint8_t *Data, size_t Size) override {
+ return Callback(Data, Size);
+ }
- // Need to know our own thread.
- static thread_local bool IsMyThread;
+ private:
+ UserCallback Callback = nullptr;
};
-} // namespace fuzzer
+}; // namespace fuzzer
#endif // LLVM_FUZZER_INTERNAL_H
diff --git a/gnu/llvm/lib/Fuzzer/FuzzerLoop.cpp b/gnu/llvm/lib/Fuzzer/FuzzerLoop.cpp
index 8ac7a847aef..5237682ff24 100644
--- a/gnu/llvm/lib/Fuzzer/FuzzerLoop.cpp
+++ b/gnu/llvm/lib/Fuzzer/FuzzerLoop.cpp
@@ -9,136 +9,65 @@
// Fuzzer's main loop.
//===----------------------------------------------------------------------===//
-#include "FuzzerCorpus.h"
-#include "FuzzerIO.h"
#include "FuzzerInternal.h"
-#include "FuzzerMutate.h"
-#include "FuzzerRandom.h"
-#include "FuzzerShmem.h"
-#include "FuzzerTracePC.h"
#include <algorithm>
-#include <cstring>
-#include <memory>
-#include <set>
#if defined(__has_include)
-#if __has_include(<sanitizer / lsan_interface.h>)
-#include <sanitizer/lsan_interface.h>
-#endif
+# if __has_include(<sanitizer/coverage_interface.h>)
+# include <sanitizer/coverage_interface.h>
+# endif
#endif
-#define NO_SANITIZE_MEMORY
-#if defined(__has_feature)
-#if __has_feature(memory_sanitizer)
-#undef NO_SANITIZE_MEMORY
-#define NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory))
-#endif
-#endif
+extern "C" {
+// Re-declare some of the sanitizer functions as "weak" so that
+// libFuzzer can be linked w/o the sanitizers and sanitizer-coverage
+// (in which case it will complain at start-up time).
+__attribute__((weak)) void __sanitizer_print_stack_trace();
+__attribute__((weak)) void __sanitizer_reset_coverage();
+__attribute__((weak)) size_t __sanitizer_get_total_unique_caller_callee_pairs();
+__attribute__((weak)) size_t __sanitizer_get_total_unique_coverage();
+__attribute__((weak))
+void __sanitizer_set_death_callback(void (*callback)(void));
+__attribute__((weak)) size_t __sanitizer_get_number_of_counters();
+__attribute__((weak))
+uintptr_t __sanitizer_update_counter_bitset_and_clear_counters(uint8_t *bitset);
+__attribute__((weak)) uintptr_t
+__sanitizer_get_coverage_pc_buffer(uintptr_t **data);
+}
namespace fuzzer {
static const size_t kMaxUnitSizeToPrint = 256;
-thread_local bool Fuzzer::IsMyThread;
+static void MissingWeakApiFunction(const char *FnName) {
+ Printf("ERROR: %s is not defined. Exiting.\n"
+ "Did you use -fsanitize-coverage=... to build your code?\n", FnName);
+ exit(1);
+}
-SharedMemoryRegion SMR;
+#define CHECK_WEAK_API_FUNCTION(fn) \
+ do { \
+ if (!fn) \
+ MissingWeakApiFunction(#fn); \
+ } while (false)
// Only one Fuzzer per process.
static Fuzzer *F;
-// Leak detection is expensive, so we first check if there were more mallocs
-// than frees (using the sanitizer malloc hooks) and only then try to call lsan.
-struct MallocFreeTracer {
- void Start(int TraceLevel) {
- this->TraceLevel = TraceLevel;
- if (TraceLevel)
- Printf("MallocFreeTracer: START\n");
- Mallocs = 0;
- Frees = 0;
- }
- // Returns true if there were more mallocs than frees.
- bool Stop() {
- if (TraceLevel)
- Printf("MallocFreeTracer: STOP %zd %zd (%s)\n", Mallocs.load(),
- Frees.load(), Mallocs == Frees ? "same" : "DIFFERENT");
- bool Result = Mallocs > Frees;
- Mallocs = 0;
- Frees = 0;
- TraceLevel = 0;
- return Result;
- }
- std::atomic<size_t> Mallocs;
- std::atomic<size_t> Frees;
- int TraceLevel = 0;
-};
-
-static MallocFreeTracer AllocTracer;
-
-ATTRIBUTE_NO_SANITIZE_MEMORY
-void MallocHook(const volatile void *ptr, size_t size) {
- size_t N = AllocTracer.Mallocs++;
- F->HandleMalloc(size);
- if (int TraceLevel = AllocTracer.TraceLevel) {
- Printf("MALLOC[%zd] %p %zd\n", N, ptr, size);
- if (TraceLevel >= 2 && EF)
- EF->__sanitizer_print_stack_trace();
- }
-}
-
-ATTRIBUTE_NO_SANITIZE_MEMORY
-void FreeHook(const volatile void *ptr) {
- size_t N = AllocTracer.Frees++;
- if (int TraceLevel = AllocTracer.TraceLevel) {
- Printf("FREE[%zd] %p\n", N, ptr);
- if (TraceLevel >= 2 && EF)
- EF->__sanitizer_print_stack_trace();
- }
-}
-
-// Crash on a single malloc that exceeds the rss limit.
-void Fuzzer::HandleMalloc(size_t Size) {
- if (!Options.RssLimitMb || (Size >> 20) < (size_t)Options.RssLimitMb)
- return;
- Printf("==%d== ERROR: libFuzzer: out-of-memory (malloc(%zd))\n", GetPid(),
- Size);
- Printf(" To change the out-of-memory limit use -rss_limit_mb=<N>\n\n");
- if (EF->__sanitizer_print_stack_trace)
- EF->__sanitizer_print_stack_trace();
- DumpCurrentUnit("oom-");
- Printf("SUMMARY: libFuzzer: out-of-memory\n");
- PrintFinalStats();
- _Exit(Options.ErrorExitCode); // Stop right now.
-}
-
-Fuzzer::Fuzzer(UserCallback CB, InputCorpus &Corpus, MutationDispatcher &MD,
- FuzzingOptions Options)
- : CB(CB), Corpus(Corpus), MD(MD), Options(Options) {
- if (EF->__sanitizer_set_death_callback)
- EF->__sanitizer_set_death_callback(StaticDeathCallback);
+Fuzzer::Fuzzer(UserSuppliedFuzzer &USF, FuzzingOptions Options)
+ : USF(USF), Options(Options) {
+ SetDeathCallback();
+ InitializeTraceState();
assert(!F);
F = this;
- TPC.ResetMaps();
- IsMyThread = true;
- if (Options.DetectLeaks && EF->__sanitizer_install_malloc_and_free_hooks)
- EF->__sanitizer_install_malloc_and_free_hooks(MallocHook, FreeHook);
- TPC.SetUseCounters(Options.UseCounters);
- TPC.SetUseValueProfile(Options.UseValueProfile);
- TPC.SetPrintNewPCs(Options.PrintNewCovPcs);
-
- if (Options.Verbosity)
- TPC.PrintModuleInfo();
- if (!Options.OutputCorpus.empty() && Options.ReloadIntervalSec)
- EpochOfLastReadOfOutputCorpus = GetEpoch(Options.OutputCorpus);
- MaxInputLen = MaxMutationLen = Options.MaxLen;
- AllocateCurrentUnitData();
- CurrentUnitSize = 0;
- memset(BaseSha1, 0, sizeof(BaseSha1));
}
-Fuzzer::~Fuzzer() { }
+void Fuzzer::SetDeathCallback() {
+ CHECK_WEAK_API_FUNCTION(__sanitizer_set_death_callback);
+ __sanitizer_set_death_callback(StaticDeathCallback);
+}
-void Fuzzer::AllocateCurrentUnitData() {
- if (CurrentUnitData || MaxInputLen == 0) return;
- CurrentUnitData = new uint8_t[MaxInputLen];
+void Fuzzer::PrintUnitInASCII(const Unit &U, const char *PrintAfter) {
+ PrintASCII(U, PrintAfter);
}
void Fuzzer::StaticDeathCallback() {
@@ -146,23 +75,13 @@ void Fuzzer::StaticDeathCallback() {
F->DeathCallback();
}
-void Fuzzer::DumpCurrentUnit(const char *Prefix) {
- if (!CurrentUnitData) return; // Happens when running individual inputs.
- MD.PrintMutationSequence();
- Printf("; base unit: %s\n", Sha1ToString(BaseSha1).c_str());
- size_t UnitSize = CurrentUnitSize;
- if (UnitSize <= kMaxUnitSizeToPrint) {
- PrintHexArray(CurrentUnitData, UnitSize, "\n");
- PrintASCII(CurrentUnitData, UnitSize, "\n");
- }
- WriteUnitToFileWithPrefix({CurrentUnitData, CurrentUnitData + UnitSize},
- Prefix);
-}
-
-NO_SANITIZE_MEMORY
void Fuzzer::DeathCallback() {
- DumpCurrentUnit("crash-");
- PrintFinalStats();
+ Printf("DEATH:\n");
+ if (CurrentUnit.size() <= kMaxUnitSizeToPrint) {
+ Print(CurrentUnit, "\n");
+ PrintUnitInASCII(CurrentUnit, "\n");
+ }
+ WriteUnitToFileWithPrefix(CurrentUnit, "crash-");
}
void Fuzzer::StaticAlarmCallback() {
@@ -170,328 +89,224 @@ void Fuzzer::StaticAlarmCallback() {
F->AlarmCallback();
}
-void Fuzzer::StaticCrashSignalCallback() {
- assert(F);
- F->CrashCallback();
-}
-
-void Fuzzer::StaticInterruptCallback() {
- assert(F);
- F->InterruptCallback();
-}
-
-void Fuzzer::StaticFileSizeExceedCallback() {
- Printf("==%lu== ERROR: libFuzzer: file size exceeded\n", GetPid());
- exit(1);
-}
-
-void Fuzzer::CrashCallback() {
- Printf("==%lu== ERROR: libFuzzer: deadly signal\n", GetPid());
- if (EF->__sanitizer_print_stack_trace)
- EF->__sanitizer_print_stack_trace();
- Printf("NOTE: libFuzzer has rudimentary signal handlers.\n"
- " Combine libFuzzer with AddressSanitizer or similar for better "
- "crash reports.\n");
- Printf("SUMMARY: libFuzzer: deadly signal\n");
- DumpCurrentUnit("crash-");
- PrintFinalStats();
- _Exit(Options.ErrorExitCode); // Stop right now.
-}
-
-void Fuzzer::InterruptCallback() {
- Printf("==%lu== libFuzzer: run interrupted; exiting\n", GetPid());
- PrintFinalStats();
- _Exit(0); // Stop right now, don't perform any at-exit actions.
-}
-
-NO_SANITIZE_MEMORY
void Fuzzer::AlarmCallback() {
assert(Options.UnitTimeoutSec > 0);
- // In Windows Alarm callback is executed by a different thread.
-#if !LIBFUZZER_WINDOWS
- if (!InFuzzingThread()) return;
-#endif
- if (!RunningCB)
- return; // We have not started running units yet.
size_t Seconds =
duration_cast<seconds>(system_clock::now() - UnitStartTime).count();
- if (Seconds == 0)
- return;
+ if (Seconds == 0) return;
if (Options.Verbosity >= 2)
Printf("AlarmCallback %zd\n", Seconds);
if (Seconds >= (size_t)Options.UnitTimeoutSec) {
Printf("ALARM: working on the last Unit for %zd seconds\n", Seconds);
Printf(" and the timeout value is %d (use -timeout=N to change)\n",
Options.UnitTimeoutSec);
- DumpCurrentUnit("timeout-");
- Printf("==%lu== ERROR: libFuzzer: timeout after %d seconds\n", GetPid(),
+ if (CurrentUnit.size() <= kMaxUnitSizeToPrint) {
+ Print(CurrentUnit, "\n");
+ PrintUnitInASCII(CurrentUnit, "\n");
+ }
+ WriteUnitToFileWithPrefix(CurrentUnit, "timeout-");
+ Printf("==%d== ERROR: libFuzzer: timeout after %d seconds\n", GetPid(),
Seconds);
- if (EF->__sanitizer_print_stack_trace)
- EF->__sanitizer_print_stack_trace();
+ if (__sanitizer_print_stack_trace)
+ __sanitizer_print_stack_trace();
Printf("SUMMARY: libFuzzer: timeout\n");
- PrintFinalStats();
- _Exit(Options.TimeoutExitCode); // Stop right now.
+ exit(1);
}
}
-void Fuzzer::RssLimitCallback() {
- Printf(
- "==%lu== ERROR: libFuzzer: out-of-memory (used: %zdMb; limit: %zdMb)\n",
- GetPid(), GetPeakRSSMb(), Options.RssLimitMb);
- Printf(" To change the out-of-memory limit use -rss_limit_mb=<N>\n\n");
- if (EF->__sanitizer_print_memory_profile)
- EF->__sanitizer_print_memory_profile(95, 8);
- DumpCurrentUnit("oom-");
- Printf("SUMMARY: libFuzzer: out-of-memory\n");
- PrintFinalStats();
- _Exit(Options.ErrorExitCode); // Stop right now.
-}
+void Fuzzer::PrintStats(const char *Where, const char *End) {
+ size_t Seconds = secondsSinceProcessStartUp();
+ size_t ExecPerSec = (Seconds ? TotalNumberOfRuns / Seconds : 0);
-void Fuzzer::PrintStats(const char *Where, const char *End, size_t Units) {
- size_t ExecPerSec = execPerSec();
- if (!Options.Verbosity)
- return;
- Printf("#%zd\t%s", TotalNumberOfRuns, Where);
- if (size_t N = TPC.GetTotalPCCoverage())
- Printf(" cov: %zd", N);
- if (size_t N = Corpus.NumFeatures())
- Printf( " ft: %zd", N);
- if (!Corpus.empty()) {
- Printf(" corp: %zd", Corpus.NumActiveUnits());
- if (size_t N = Corpus.SizeInBytes()) {
- if (N < (1<<14))
- Printf("/%zdb", N);
- else if (N < (1 << 24))
- Printf("/%zdKb", N >> 10);
- else
- Printf("/%zdMb", N >> 20);
+ if (Options.OutputCSV) {
+ static bool csvHeaderPrinted = false;
+ if (!csvHeaderPrinted) {
+ csvHeaderPrinted = true;
+ Printf("runs,block_cov,bits,cc_cov,corpus,execs_per_sec,tbms,reason\n");
}
+ Printf("%zd,%zd,%zd,%zd,%zd,%zd,%zd,%s\n", TotalNumberOfRuns,
+ LastRecordedBlockCoverage, TotalBits(),
+ LastRecordedCallerCalleeCoverage, Corpus.size(), ExecPerSec,
+ TotalNumberOfExecutedTraceBasedMutations, Where);
}
- if (Units)
- Printf(" units: %zd", Units);
- Printf(" exec/s: %zd", ExecPerSec);
- Printf(" rss: %zdMb", GetPeakRSSMb());
+ if (!Options.Verbosity)
+ return;
+ Printf("#%zd\t%s", TotalNumberOfRuns, Where);
+ if (LastRecordedBlockCoverage)
+ Printf(" cov: %zd", LastRecordedBlockCoverage);
+ if (auto TB = TotalBits())
+ Printf(" bits: %zd", TB);
+ if (LastRecordedCallerCalleeCoverage)
+ Printf(" indir: %zd", LastRecordedCallerCalleeCoverage);
+ Printf(" units: %zd exec/s: %zd", Corpus.size(), ExecPerSec);
+ if (TotalNumberOfExecutedTraceBasedMutations)
+ Printf(" tbm: %zd", TotalNumberOfExecutedTraceBasedMutations);
Printf("%s", End);
}
-void Fuzzer::PrintFinalStats() {
- if (Options.PrintCoverage)
- TPC.PrintCoverage();
- if (Options.DumpCoverage)
- TPC.DumpCoverage();
- if (Options.PrintCorpusStats)
- Corpus.PrintStats();
- if (!Options.PrintFinalStats) return;
- size_t ExecPerSec = execPerSec();
- Printf("stat::number_of_executed_units: %zd\n", TotalNumberOfRuns);
- Printf("stat::average_exec_per_sec: %zd\n", ExecPerSec);
- Printf("stat::new_units_added: %zd\n", NumberOfNewUnitsAdded);
- Printf("stat::slowest_unit_time_sec: %zd\n", TimeOfLongestUnitInSeconds);
- Printf("stat::peak_rss_mb: %zd\n", GetPeakRSSMb());
-}
-
-void Fuzzer::SetMaxInputLen(size_t MaxInputLen) {
- assert(this->MaxInputLen == 0); // Can only reset MaxInputLen from 0 to non-0.
- assert(MaxInputLen);
- this->MaxInputLen = MaxInputLen;
- this->MaxMutationLen = MaxInputLen;
- AllocateCurrentUnitData();
- Printf("INFO: -max_len is not provided; "
- "libFuzzer will not generate inputs larger than %zd bytes\n",
- MaxInputLen);
-}
-
-void Fuzzer::SetMaxMutationLen(size_t MaxMutationLen) {
- assert(MaxMutationLen && MaxMutationLen <= MaxInputLen);
- this->MaxMutationLen = MaxMutationLen;
-}
-
-void Fuzzer::CheckExitOnSrcPosOrItem() {
- if (!Options.ExitOnSrcPos.empty()) {
- static auto *PCsSet = new std::set<uintptr_t>;
- for (size_t i = 1, N = TPC.GetNumPCs(); i < N; i++) {
- uintptr_t PC = TPC.GetPC(i);
- if (!PC) continue;
- if (!PCsSet->insert(PC).second) continue;
- std::string Descr = DescribePC("%L", PC);
- if (Descr.find(Options.ExitOnSrcPos) != std::string::npos) {
- Printf("INFO: found line matching '%s', exiting.\n",
- Options.ExitOnSrcPos.c_str());
- _Exit(0);
- }
- }
- }
- if (!Options.ExitOnItem.empty()) {
- if (Corpus.HasUnit(Options.ExitOnItem)) {
- Printf("INFO: found item with checksum '%s', exiting.\n",
- Options.ExitOnItem.c_str());
- _Exit(0);
- }
- }
-}
-
-void Fuzzer::RereadOutputCorpus(size_t MaxSize) {
- if (Options.OutputCorpus.empty() || !Options.ReloadIntervalSec) return;
+void Fuzzer::RereadOutputCorpus() {
+ if (Options.OutputCorpus.empty()) return;
std::vector<Unit> AdditionalCorpus;
ReadDirToVectorOfUnits(Options.OutputCorpus.c_str(), &AdditionalCorpus,
- &EpochOfLastReadOfOutputCorpus, MaxSize,
- /*ExitOnError*/ false);
+ &EpochOfLastReadOfOutputCorpus);
+ if (Corpus.empty()) {
+ Corpus = AdditionalCorpus;
+ return;
+ }
+ if (!Options.Reload) return;
if (Options.Verbosity >= 2)
- Printf("Reload: read %zd new units.\n", AdditionalCorpus.size());
- bool Reloaded = false;
- for (auto &U : AdditionalCorpus) {
- if (U.size() > MaxSize)
- U.resize(MaxSize);
- if (!Corpus.HasUnit(U)) {
- if (RunOne(U.data(), U.size()))
- Reloaded = true;
+ Printf("Reload: read %zd new units.\n", AdditionalCorpus.size());
+ for (auto &X : AdditionalCorpus) {
+ if (X.size() > (size_t)Options.MaxLen)
+ X.resize(Options.MaxLen);
+ if (UnitHashesAddedToCorpus.insert(Hash(X)).second) {
+ CurrentUnit.clear();
+ CurrentUnit.insert(CurrentUnit.begin(), X.begin(), X.end());
+ if (RunOne(CurrentUnit)) {
+ Corpus.push_back(X);
+ PrintStats("RELOAD");
+ }
}
}
- if (Reloaded)
- PrintStats("RELOAD");
}
-void Fuzzer::ShuffleCorpus(UnitVector *V) {
- std::shuffle(V->begin(), V->end(), MD.GetRand());
- if (Options.PreferSmall)
- std::stable_sort(V->begin(), V->end(), [](const Unit &A, const Unit &B) {
- return A.size() < B.size();
- });
-}
-
-void Fuzzer::ShuffleAndMinimize(UnitVector *InitialCorpus) {
- Printf("#0\tREAD units: %zd\n", InitialCorpus->size());
- if (Options.ShuffleAtStartUp)
- ShuffleCorpus(InitialCorpus);
-
- // Test the callback with empty input and never try it again.
- uint8_t dummy;
- ExecuteCallback(&dummy, 0);
-
- for (const auto &U : *InitialCorpus) {
- RunOne(U.data(), U.size());
- TryDetectingAMemoryLeak(U.data(), U.size(),
- /*DuringInitialCorpusExecution*/ true);
+void Fuzzer::ShuffleAndMinimize() {
+ bool PreferSmall = (Options.PreferSmallDuringInitialShuffle == 1 ||
+ (Options.PreferSmallDuringInitialShuffle == -1 &&
+ USF.GetRand().RandBool()));
+ if (Options.Verbosity)
+ Printf("PreferSmall: %d\n", PreferSmall);
+ PrintStats("READ ");
+ std::vector<Unit> NewCorpus;
+ if (Options.ShuffleAtStartUp) {
+ std::random_shuffle(Corpus.begin(), Corpus.end(), USF.GetRand());
+ if (PreferSmall)
+ std::stable_sort(
+ Corpus.begin(), Corpus.end(),
+ [](const Unit &A, const Unit &B) { return A.size() < B.size(); });
}
- PrintStats("INITED");
- if (Corpus.empty()) {
- Printf("ERROR: no interesting inputs were found. "
- "Is the code instrumented for coverage? Exiting.\n");
- exit(1);
+ Unit &U = CurrentUnit;
+ for (const auto &C : Corpus) {
+ for (size_t First = 0; First < 1; First++) {
+ U.clear();
+ size_t Last = std::min(First + Options.MaxLen, C.size());
+ U.insert(U.begin(), C.begin() + First, C.begin() + Last);
+ if (Options.OnlyASCII)
+ ToASCII(U);
+ if (RunOne(U)) {
+ NewCorpus.push_back(U);
+ if (Options.Verbosity >= 2)
+ Printf("NEW0: %zd L %zd\n", LastRecordedBlockCoverage, U.size());
+ }
+ }
}
+ Corpus = NewCorpus;
+ for (auto &X : Corpus)
+ UnitHashesAddedToCorpus.insert(Hash(X));
+ PrintStats("INITED");
}
-void Fuzzer::PrintPulseAndReportSlowInput(const uint8_t *Data, size_t Size) {
+bool Fuzzer::RunOne(const Unit &U) {
+ UnitStartTime = system_clock::now();
+ TotalNumberOfRuns++;
+
+ PrepareCoverageBeforeRun();
+ ExecuteCallback(U);
+ bool Res = CheckCoverageAfterRun();
+
+ auto UnitStopTime = system_clock::now();
auto TimeOfUnit =
duration_cast<seconds>(UnitStopTime - UnitStartTime).count();
if (!(TotalNumberOfRuns & (TotalNumberOfRuns - 1)) &&
secondsSinceProcessStartUp() >= 2)
PrintStats("pulse ");
- if (TimeOfUnit > TimeOfLongestUnitInSeconds * 1.1 &&
+ if (TimeOfUnit > TimeOfLongestUnitInSeconds &&
TimeOfUnit >= Options.ReportSlowUnits) {
TimeOfLongestUnitInSeconds = TimeOfUnit;
Printf("Slowest unit: %zd s:\n", TimeOfLongestUnitInSeconds);
- WriteUnitToFileWithPrefix({Data, Data + Size}, "slow-unit-");
+ WriteUnitToFileWithPrefix(U, "slow-unit-");
}
+ return Res;
}
-bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,
- InputInfo *II) {
- if (!Size) return false;
-
- ExecuteCallback(Data, Size);
-
- UniqFeatureSetTmp.clear();
- size_t FoundUniqFeaturesOfII = 0;
- size_t NumUpdatesBefore = Corpus.NumFeatureUpdates();
- TPC.CollectFeatures([&](size_t Feature) {
- if (Corpus.AddFeature(Feature, Size, Options.Shrink))
- UniqFeatureSetTmp.push_back(Feature);
- if (Options.ReduceInputs && II)
- if (std::binary_search(II->UniqFeatureSet.begin(),
- II->UniqFeatureSet.end(), Feature))
- FoundUniqFeaturesOfII++;
- });
- PrintPulseAndReportSlowInput(Data, Size);
- size_t NumNewFeatures = Corpus.NumFeatureUpdates() - NumUpdatesBefore;
- if (NumNewFeatures) {
- Corpus.AddToCorpus({Data, Data + Size}, NumNewFeatures, MayDeleteFile,
- UniqFeatureSetTmp);
- CheckExitOnSrcPosOrItem();
- return true;
- }
- if (II && FoundUniqFeaturesOfII &&
- FoundUniqFeaturesOfII == II->UniqFeatureSet.size() &&
- II->U.size() > Size) {
- Corpus.Replace(II, {Data, Data + Size});
- CheckExitOnSrcPosOrItem();
- return true;
- }
- return false;
+void Fuzzer::RunOneAndUpdateCorpus(Unit &U) {
+ if (TotalNumberOfRuns >= Options.MaxNumberOfRuns)
+ return;
+ if (Options.OnlyASCII)
+ ToASCII(U);
+ if (RunOne(U))
+ ReportNewCoverage(U);
+}
+
+void Fuzzer::ExecuteCallback(const Unit &U) {
+ const uint8_t *Data = U.data();
+ uint8_t EmptyData;
+ if (!Data)
+ Data = &EmptyData;
+ int Res = USF.TargetFunction(Data, U.size());
+ (void)Res;
+ assert(Res == 0);
}
-size_t Fuzzer::GetCurrentUnitInFuzzingThead(const uint8_t **Data) const {
- assert(InFuzzingThread());
- *Data = CurrentUnitData;
- return CurrentUnitSize;
+size_t Fuzzer::RecordBlockCoverage() {
+ CHECK_WEAK_API_FUNCTION(__sanitizer_get_total_unique_coverage);
+ uintptr_t PrevCoverage = LastRecordedBlockCoverage;
+ LastRecordedBlockCoverage = __sanitizer_get_total_unique_coverage();
+
+ if (PrevCoverage == LastRecordedBlockCoverage || !Options.PrintNewCovPcs)
+ return LastRecordedBlockCoverage;
+
+ uintptr_t PrevBufferLen = LastCoveragePcBufferLen;
+ uintptr_t *CoverageBuf;
+ LastCoveragePcBufferLen = __sanitizer_get_coverage_pc_buffer(&CoverageBuf);
+ assert(CoverageBuf);
+ for (size_t i = PrevBufferLen; i < LastCoveragePcBufferLen; ++i) {
+ Printf("0x%x\n", CoverageBuf[i]);
+ }
+
+ return LastRecordedBlockCoverage;
}
-void Fuzzer::CrashOnOverwrittenData() {
- Printf("==%d== ERROR: libFuzzer: fuzz target overwrites it's const input\n",
- GetPid());
- DumpCurrentUnit("crash-");
- Printf("SUMMARY: libFuzzer: out-of-memory\n");
- _Exit(Options.ErrorExitCode); // Stop right now.
+size_t Fuzzer::RecordCallerCalleeCoverage() {
+ if (!Options.UseIndirCalls)
+ return 0;
+ if (!__sanitizer_get_total_unique_caller_callee_pairs)
+ return 0;
+ return LastRecordedCallerCalleeCoverage =
+ __sanitizer_get_total_unique_caller_callee_pairs();
}
-// Compare two arrays, but not all bytes if the arrays are large.
-static bool LooseMemeq(const uint8_t *A, const uint8_t *B, size_t Size) {
- const size_t Limit = 64;
- if (Size <= 64)
- return !memcmp(A, B, Size);
- // Compare first and last Limit/2 bytes.
- return !memcmp(A, B, Limit / 2) &&
- !memcmp(A + Size - Limit / 2, B + Size - Limit / 2, Limit / 2);
+void Fuzzer::PrepareCoverageBeforeRun() {
+ if (Options.UseCounters) {
+ size_t NumCounters = __sanitizer_get_number_of_counters();
+ CounterBitmap.resize(NumCounters);
+ __sanitizer_update_counter_bitset_and_clear_counters(0);
+ }
+ RecordBlockCoverage();
+ RecordCallerCalleeCoverage();
}
-void Fuzzer::ExecuteCallback(const uint8_t *Data, size_t Size) {
- TotalNumberOfRuns++;
- assert(InFuzzingThread());
- if (SMR.IsClient())
- SMR.WriteByteArray(Data, Size);
- // We copy the contents of Unit into a separate heap buffer
- // so that we reliably find buffer overflows in it.
- uint8_t *DataCopy = new uint8_t[Size];
- memcpy(DataCopy, Data, Size);
- if (CurrentUnitData && CurrentUnitData != Data)
- memcpy(CurrentUnitData, Data, Size);
- CurrentUnitSize = Size;
- AllocTracer.Start(Options.TraceMalloc);
- UnitStartTime = system_clock::now();
- TPC.ResetMaps();
- RunningCB = true;
- int Res = CB(DataCopy, Size);
- RunningCB = false;
- UnitStopTime = system_clock::now();
- (void)Res;
- assert(Res == 0);
- HasMoreMallocsThanFrees = AllocTracer.Stop();
- if (!LooseMemeq(DataCopy, Data, Size))
- CrashOnOverwrittenData();
- CurrentUnitSize = 0;
- delete[] DataCopy;
+bool Fuzzer::CheckCoverageAfterRun() {
+ size_t OldCoverage = LastRecordedBlockCoverage;
+ size_t NewCoverage = RecordBlockCoverage();
+ size_t OldCallerCalleeCoverage = LastRecordedCallerCalleeCoverage;
+ size_t NewCallerCalleeCoverage = RecordCallerCalleeCoverage();
+ size_t NumNewBits = 0;
+ if (Options.UseCounters)
+ NumNewBits = __sanitizer_update_counter_bitset_and_clear_counters(
+ CounterBitmap.data());
+ return NewCoverage > OldCoverage ||
+ NewCallerCalleeCoverage > OldCallerCalleeCoverage || NumNewBits;
}
void Fuzzer::WriteToOutputCorpus(const Unit &U) {
- if (Options.OnlyASCII)
- assert(IsASCII(U));
- if (Options.OutputCorpus.empty())
- return;
+ if (Options.OutputCorpus.empty()) return;
std::string Path = DirPlusFile(Options.OutputCorpus, Hash(U));
WriteToFile(U, Path);
if (Options.Verbosity >= 2)
Printf("Written to %s\n", Path.c_str());
+ assert(!Options.OnlyASCII || IsASCII(U));
}
void Fuzzer::WriteUnitToFileWithPrefix(const Unit &U, const char *Prefix) {
@@ -499,7 +314,7 @@ void Fuzzer::WriteUnitToFileWithPrefix(const Unit &U, const char *Prefix) {
return;
std::string Path = Options.ArtifactPrefix + Prefix + Hash(U);
if (!Options.ExactArtifactPath.empty())
- Path = Options.ExactArtifactPath; // Overrides ArtifactPrefix.
+ Path = Options.ExactArtifactPath; // Overrides ArtifactPrefix.
WriteToFile(U, Path);
Printf("artifact_prefix='%s'; Test unit written to %s\n",
Options.ArtifactPrefix.c_str(), Path.c_str());
@@ -507,189 +322,191 @@ void Fuzzer::WriteUnitToFileWithPrefix(const Unit &U, const char *Prefix) {
Printf("Base64: %s\n", Base64(U).c_str());
}
-void Fuzzer::PrintStatusForNewUnit(const Unit &U, const char *Text) {
+void Fuzzer::SaveCorpus() {
+ if (Options.OutputCorpus.empty()) return;
+ for (const auto &U : Corpus)
+ WriteToFile(U, DirPlusFile(Options.OutputCorpus, Hash(U)));
+ if (Options.Verbosity)
+ Printf("Written corpus of %zd files to %s\n", Corpus.size(),
+ Options.OutputCorpus.c_str());
+}
+
+void Fuzzer::PrintStatusForNewUnit(const Unit &U) {
if (!Options.PrintNEW)
return;
- PrintStats(Text, "");
+ PrintStats("NEW ", "");
if (Options.Verbosity) {
Printf(" L: %zd ", U.size());
- MD.PrintMutationSequence();
+ USF.PrintMutationSequence();
Printf("\n");
}
}
-void Fuzzer::ReportNewCoverage(InputInfo *II, const Unit &U) {
- II->NumSuccessfullMutations++;
- MD.RecordSuccessfulMutationSequence();
- PrintStatusForNewUnit(U, II->Reduced ? "REDUCE" :
- "NEW ");
+void Fuzzer::ReportNewCoverage(const Unit &U) {
+ Corpus.push_back(U);
+ UnitHashesAddedToCorpus.insert(Hash(U));
+ PrintStatusForNewUnit(U);
WriteToOutputCorpus(U);
- NumberOfNewUnitsAdded++;
- TPC.PrintNewPCs();
+ if (Options.ExitOnFirst)
+ exit(0);
}
-// Tries detecting a memory leak on the particular input that we have just
-// executed before calling this function.
-void Fuzzer::TryDetectingAMemoryLeak(const uint8_t *Data, size_t Size,
- bool DuringInitialCorpusExecution) {
- if (!HasMoreMallocsThanFrees) return; // mallocs==frees, a leak is unlikely.
- if (!Options.DetectLeaks) return;
- if (!&(EF->__lsan_enable) || !&(EF->__lsan_disable) ||
- !(EF->__lsan_do_recoverable_leak_check))
- return; // No lsan.
- // Run the target once again, but with lsan disabled so that if there is
- // a real leak we do not report it twice.
- EF->__lsan_disable();
- ExecuteCallback(Data, Size);
- EF->__lsan_enable();
- if (!HasMoreMallocsThanFrees) return; // a leak is unlikely.
- if (NumberOfLeakDetectionAttempts++ > 1000) {
- Options.DetectLeaks = false;
- Printf("INFO: libFuzzer disabled leak detection after every mutation.\n"
- " Most likely the target function accumulates allocated\n"
- " memory in a global state w/o actually leaking it.\n"
- " You may try running this binary with -trace_malloc=[12]"
- " to get a trace of mallocs and frees.\n"
- " If LeakSanitizer is enabled in this process it will still\n"
- " run on the process shutdown.\n");
+void Fuzzer::Merge(const std::vector<std::string> &Corpora) {
+ if (Corpora.size() <= 1) {
+ Printf("Merge requires two or more corpus dirs\n");
return;
}
- // Now perform the actual lsan pass. This is expensive and we must ensure
- // we don't call it too often.
- if (EF->__lsan_do_recoverable_leak_check()) { // Leak is found, report it.
- if (DuringInitialCorpusExecution)
- Printf("\nINFO: a leak has been found in the initial corpus.\n\n");
- Printf("INFO: to ignore leaks on libFuzzer side use -detect_leaks=0.\n\n");
- CurrentUnitSize = Size;
- DumpCurrentUnit("leak-");
- PrintFinalStats();
- _Exit(Options.ErrorExitCode); // not exit() to disable lsan further on.
+ auto InitialCorpusDir = Corpora[0];
+ ReadDir(InitialCorpusDir, nullptr);
+ Printf("Merge: running the initial corpus '%s' of %d units\n",
+ InitialCorpusDir.c_str(), Corpus.size());
+ for (auto &U : Corpus)
+ RunOne(U);
+
+ std::vector<std::string> ExtraCorpora(Corpora.begin() + 1, Corpora.end());
+
+ size_t NumTried = 0;
+ size_t NumMerged = 0;
+ for (auto &C : ExtraCorpora) {
+ Corpus.clear();
+ ReadDir(C, nullptr);
+ Printf("Merge: merging the extra corpus '%s' of %zd units\n", C.c_str(),
+ Corpus.size());
+ for (auto &U : Corpus) {
+ NumTried++;
+ if (RunOne(U)) {
+ WriteToOutputCorpus(U);
+ NumMerged++;
+ }
+ }
}
-}
-
-static size_t ComputeMutationLen(size_t MaxInputSize, size_t MaxMutationLen,
- Random &Rand) {
- assert(MaxInputSize <= MaxMutationLen);
- if (MaxInputSize == MaxMutationLen) return MaxMutationLen;
- size_t Result = MaxInputSize;
- size_t R = Rand.Rand();
- if ((R % (1U << 7)) == 0)
- Result++;
- if ((R % (1U << 15)) == 0)
- Result += 10 + Result / 2;
- return Min(Result, MaxMutationLen);
+ Printf("Merge: written %zd out of %zd units\n", NumMerged, NumTried);
}
void Fuzzer::MutateAndTestOne() {
- MD.StartMutationSequence();
-
- auto &II = Corpus.ChooseUnitToMutate(MD.GetRand());
- const auto &U = II.U;
- memcpy(BaseSha1, II.Sha1, sizeof(BaseSha1));
- assert(CurrentUnitData);
- size_t Size = U.size();
- assert(Size <= MaxInputLen && "Oversized Unit");
- memcpy(CurrentUnitData, U.data(), Size);
+ auto &U = CurrentUnit;
+ USF.StartMutationSequence();
- assert(MaxMutationLen > 0);
-
- size_t CurrentMaxMutationLen =
- Options.ExperimentalLenControl
- ? ComputeMutationLen(Corpus.MaxInputSize(), MaxMutationLen,
- MD.GetRand())
- : MaxMutationLen;
+ U = ChooseUnitToMutate();
for (int i = 0; i < Options.MutateDepth; i++) {
- if (TotalNumberOfRuns >= Options.MaxNumberOfRuns)
- break;
- size_t NewSize = 0;
- NewSize = MD.Mutate(CurrentUnitData, Size, CurrentMaxMutationLen);
+ size_t Size = U.size();
+ U.resize(Options.MaxLen);
+ size_t NewSize = USF.Mutate(U.data(), Size, U.size());
assert(NewSize > 0 && "Mutator returned empty unit");
- assert(NewSize <= CurrentMaxMutationLen && "Mutator return overisized unit");
- Size = NewSize;
- II.NumExecutedMutations++;
- if (RunOne(CurrentUnitData, Size, /*MayDeleteFile=*/true, &II))
- ReportNewCoverage(&II, {CurrentUnitData, CurrentUnitData + Size});
-
- TryDetectingAMemoryLeak(CurrentUnitData, Size,
- /*DuringInitialCorpusExecution*/ false);
+ assert(NewSize <= (size_t)Options.MaxLen &&
+ "Mutator return overisized unit");
+ U.resize(NewSize);
+ if (i == 0)
+ StartTraceRecording();
+ RunOneAndUpdateCorpus(U);
+ StopTraceRecording();
}
}
+// Returns an index of random unit from the corpus to mutate.
+// Hypothesis: units added to the corpus last are more likely to be interesting.
+// This function gives more wieght to the more recent units.
+size_t Fuzzer::ChooseUnitIdxToMutate() {
+ size_t N = Corpus.size();
+ size_t Total = (N + 1) * N / 2;
+ size_t R = USF.GetRand()(Total);
+ size_t IdxBeg = 0, IdxEnd = N;
+ // Binary search.
+ while (IdxEnd - IdxBeg >= 2) {
+ size_t Idx = IdxBeg + (IdxEnd - IdxBeg) / 2;
+ if (R > (Idx + 1) * Idx / 2)
+ IdxBeg = Idx;
+ else
+ IdxEnd = Idx;
+ }
+ assert(IdxBeg < N);
+ return IdxBeg;
+}
+
+// Experimental search heuristic: drilling.
+// - Read, shuffle, execute and minimize the corpus.
+// - Choose one random unit.
+// - Reset the coverage.
+// - Start fuzzing as if the chosen unit was the only element of the corpus.
+// - When done, reset the coverage again.
+// - Merge the newly created corpus into the original one.
+void Fuzzer::Drill() {
+ // The corpus is already read, shuffled, and minimized.
+ assert(!Corpus.empty());
+ Options.PrintNEW = false; // Don't print NEW status lines when drilling.
+
+ Unit U = ChooseUnitToMutate();
+
+ CHECK_WEAK_API_FUNCTION(__sanitizer_reset_coverage);
+ __sanitizer_reset_coverage();
+
+ std::vector<Unit> SavedCorpus;
+ SavedCorpus.swap(Corpus);
+ Corpus.push_back(U);
+ assert(Corpus.size() == 1);
+ RunOne(U);
+ PrintStats("DRILL ");
+ std::string SavedOutputCorpusPath; // Don't write new units while drilling.
+ SavedOutputCorpusPath.swap(Options.OutputCorpus);
+ Loop();
+
+ __sanitizer_reset_coverage();
+
+ PrintStats("REINIT");
+ SavedOutputCorpusPath.swap(Options.OutputCorpus);
+ for (auto &U : SavedCorpus) {
+ CurrentUnit = U;
+ RunOne(U);
+ }
+ PrintStats("MERGE ");
+ Options.PrintNEW = true;
+ size_t NumMerged = 0;
+ for (auto &U : Corpus) {
+ CurrentUnit = U;
+ if (RunOne(U)) {
+ PrintStatusForNewUnit(U);
+ NumMerged++;
+ WriteToOutputCorpus(U);
+ }
+ }
+ PrintStats("MERGED");
+ if (NumMerged && Options.Verbosity)
+ Printf("Drilling discovered %zd new units\n", NumMerged);
+}
+
void Fuzzer::Loop() {
- TPC.InitializePrintNewPCs();
system_clock::time_point LastCorpusReload = system_clock::now();
if (Options.DoCrossOver)
- MD.SetCorpus(&Corpus);
+ USF.SetCorpus(&Corpus);
while (true) {
+ SyncCorpus();
auto Now = system_clock::now();
- if (duration_cast<seconds>(Now - LastCorpusReload).count() >=
- Options.ReloadIntervalSec) {
- RereadOutputCorpus(MaxInputLen);
- LastCorpusReload = system_clock::now();
+ if (duration_cast<seconds>(Now - LastCorpusReload).count()) {
+ RereadOutputCorpus();
+ LastCorpusReload = Now;
}
if (TotalNumberOfRuns >= Options.MaxNumberOfRuns)
break;
- if (TimedOut()) break;
+ if (Options.MaxTotalTimeSec > 0 &&
+ secondsSinceProcessStartUp() >
+ static_cast<size_t>(Options.MaxTotalTimeSec))
+ break;
// Perform several mutations and runs.
MutateAndTestOne();
}
PrintStats("DONE ", "\n");
- MD.PrintRecommendedDictionary();
-}
-
-void Fuzzer::MinimizeCrashLoop(const Unit &U) {
- if (U.size() <= 1) return;
- while (!TimedOut() && TotalNumberOfRuns < Options.MaxNumberOfRuns) {
- MD.StartMutationSequence();
- memcpy(CurrentUnitData, U.data(), U.size());
- for (int i = 0; i < Options.MutateDepth; i++) {
- size_t NewSize = MD.Mutate(CurrentUnitData, U.size(), MaxMutationLen);
- assert(NewSize > 0 && NewSize <= MaxMutationLen);
- ExecuteCallback(CurrentUnitData, NewSize);
- PrintPulseAndReportSlowInput(CurrentUnitData, NewSize);
- TryDetectingAMemoryLeak(CurrentUnitData, NewSize,
- /*DuringInitialCorpusExecution*/ false);
- }
- }
-}
-
-void Fuzzer::AnnounceOutput(const uint8_t *Data, size_t Size) {
- if (SMR.IsServer()) {
- SMR.WriteByteArray(Data, Size);
- } else if (SMR.IsClient()) {
- SMR.PostClient();
- SMR.WaitServer();
- size_t OtherSize = SMR.ReadByteArraySize();
- uint8_t *OtherData = SMR.GetByteArray();
- if (Size != OtherSize || memcmp(Data, OtherData, Size) != 0) {
- size_t i = 0;
- for (i = 0; i < Min(Size, OtherSize); i++)
- if (Data[i] != OtherData[i])
- break;
- Printf("==%lu== ERROR: libFuzzer: equivalence-mismatch. Sizes: %zd %zd; "
- "offset %zd\n", GetPid(), Size, OtherSize, i);
- DumpCurrentUnit("mismatch-");
- Printf("SUMMARY: libFuzzer: equivalence-mismatch\n");
- PrintFinalStats();
- _Exit(Options.ErrorExitCode);
- }
- }
}
-} // namespace fuzzer
-
-extern "C" {
-
-size_t LLVMFuzzerMutate(uint8_t *Data, size_t Size, size_t MaxSize) {
- assert(fuzzer::F);
- return fuzzer::F->GetMD().DefaultMutate(Data, Size, MaxSize);
+void Fuzzer::SyncCorpus() {
+ if (Options.SyncCommand.empty() || Options.OutputCorpus.empty()) return;
+ auto Now = system_clock::now();
+ if (duration_cast<seconds>(Now - LastExternalSync).count() <
+ Options.SyncTimeout)
+ return;
+ LastExternalSync = Now;
+ ExecuteCommand(Options.SyncCommand + " " + Options.OutputCorpus);
}
-// Experimental
-void LLVMFuzzerAnnounceOutput(const uint8_t *Data, size_t Size) {
- assert(fuzzer::F);
- fuzzer::F->AnnounceOutput(Data, Size);
-}
-} // extern "C"
+} // namespace fuzzer
diff --git a/gnu/llvm/lib/Fuzzer/FuzzerMain.cpp b/gnu/llvm/lib/Fuzzer/FuzzerMain.cpp
index af8657200be..c5af5b05909 100644
--- a/gnu/llvm/lib/Fuzzer/FuzzerMain.cpp
+++ b/gnu/llvm/lib/Fuzzer/FuzzerMain.cpp
@@ -9,13 +9,12 @@
// main() and flags.
//===----------------------------------------------------------------------===//
-#include "FuzzerDefs.h"
+#include "FuzzerInterface.h"
+#include "FuzzerInternal.h"
-extern "C" {
// This function should be defined by the user.
-int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size);
-} // extern "C"
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size);
int main(int argc, char **argv) {
- return fuzzer::FuzzerDriver(&argc, &argv, LLVMFuzzerTestOneInput);
+ return fuzzer::FuzzerDriver(argc, argv, LLVMFuzzerTestOneInput);
}
diff --git a/gnu/llvm/lib/Fuzzer/FuzzerMutate.cpp b/gnu/llvm/lib/Fuzzer/FuzzerMutate.cpp
index 5998ef9d319..30e5b43c083 100644
--- a/gnu/llvm/lib/Fuzzer/FuzzerMutate.cpp
+++ b/gnu/llvm/lib/Fuzzer/FuzzerMutate.cpp
@@ -9,113 +9,95 @@
// Mutate a test input.
//===----------------------------------------------------------------------===//
-#include "FuzzerMutate.h"
-#include "FuzzerCorpus.h"
-#include "FuzzerDefs.h"
-#include "FuzzerExtFunctions.h"
-#include "FuzzerIO.h"
-#include "FuzzerOptions.h"
+#include <cstring>
-namespace fuzzer {
-
-const size_t Dictionary::kMaxDictSize;
+#include "FuzzerInternal.h"
-static void PrintASCII(const Word &W, const char *PrintAfter) {
- PrintASCII(W.data(), W.size(), PrintAfter);
-}
+#include <algorithm>
-MutationDispatcher::MutationDispatcher(Random &Rand,
- const FuzzingOptions &Options)
- : Rand(Rand), Options(Options) {
- DefaultMutators.insert(
- DefaultMutators.begin(),
- {
- {&MutationDispatcher::Mutate_EraseBytes, "EraseBytes"},
- {&MutationDispatcher::Mutate_InsertByte, "InsertByte"},
- {&MutationDispatcher::Mutate_InsertRepeatedBytes,
- "InsertRepeatedBytes"},
- {&MutationDispatcher::Mutate_ChangeByte, "ChangeByte"},
- {&MutationDispatcher::Mutate_ChangeBit, "ChangeBit"},
- {&MutationDispatcher::Mutate_ShuffleBytes, "ShuffleBytes"},
- {&MutationDispatcher::Mutate_ChangeASCIIInteger, "ChangeASCIIInt"},
- {&MutationDispatcher::Mutate_ChangeBinaryInteger, "ChangeBinInt"},
- {&MutationDispatcher::Mutate_CopyPart, "CopyPart"},
- {&MutationDispatcher::Mutate_CrossOver, "CrossOver"},
- {&MutationDispatcher::Mutate_AddWordFromManualDictionary,
- "ManualDict"},
- {&MutationDispatcher::Mutate_AddWordFromPersistentAutoDictionary,
- "PersAutoDict"},
- });
- if(Options.UseCmp)
- DefaultMutators.push_back(
- {&MutationDispatcher::Mutate_AddWordFromTORC, "CMP"});
+namespace fuzzer {
- if (EF->LLVMFuzzerCustomMutator)
- Mutators.push_back({&MutationDispatcher::Mutate_Custom, "Custom"});
+struct Mutator {
+ size_t (MutationDispatcher::*Fn)(uint8_t *Data, size_t Size, size_t Max);
+ const char *Name;
+};
+
+struct DictionaryEntry {
+ Unit Word;
+ size_t PositionHint;
+};
+
+struct MutationDispatcher::Impl {
+ std::vector<DictionaryEntry> ManualDictionary;
+ std::vector<DictionaryEntry> AutoDictionary;
+ std::vector<Mutator> Mutators;
+ std::vector<Mutator> CurrentMutatorSequence;
+ std::vector<DictionaryEntry> CurrentDictionaryEntrySequence;
+ const std::vector<Unit> *Corpus = nullptr;
+ FuzzerRandomBase &Rand;
+
+ void Add(Mutator M) { Mutators.push_back(M); }
+ Impl(FuzzerRandomBase &Rand) : Rand(Rand) {
+ Add({&MutationDispatcher::Mutate_EraseByte, "EraseByte"});
+ Add({&MutationDispatcher::Mutate_InsertByte, "InsertByte"});
+ Add({&MutationDispatcher::Mutate_ChangeByte, "ChangeByte"});
+ Add({&MutationDispatcher::Mutate_ChangeBit, "ChangeBit"});
+ Add({&MutationDispatcher::Mutate_ShuffleBytes, "ShuffleBytes"});
+ Add({&MutationDispatcher::Mutate_ChangeASCIIInteger, "ChangeASCIIInt"});
+ Add({&MutationDispatcher::Mutate_CrossOver, "CrossOver"});
+ Add({&MutationDispatcher::Mutate_AddWordFromManualDictionary,
+ "AddFromManualDict"});
+ Add({&MutationDispatcher::Mutate_AddWordFromAutoDictionary,
+ "AddFromAutoDict"});
+ }
+ void SetCorpus(const std::vector<Unit> *Corpus) { this->Corpus = Corpus; }
+ size_t AddWordFromDictionary(const std::vector<DictionaryEntry> &D,
+ uint8_t *Data, size_t Size, size_t MaxSize);
+};
+
+static char FlipRandomBit(char X, FuzzerRandomBase &Rand) {
+ int Bit = Rand(8);
+ char Mask = 1 << Bit;
+ char R;
+ if (X & (1 << Bit))
+ R = X & ~Mask;
else
- Mutators = DefaultMutators;
-
- if (EF->LLVMFuzzerCustomCrossOver)
- Mutators.push_back(
- {&MutationDispatcher::Mutate_CustomCrossOver, "CustomCrossOver"});
+ R = X | Mask;
+ assert(R != X);
+ return R;
}
-static char RandCh(Random &Rand) {
+static char RandCh(FuzzerRandomBase &Rand) {
if (Rand.RandBool()) return Rand(256);
- const char *Special = "!*'();:@&=+$,/?%#[]012Az-`~.\xff\x00";
+ const char *Special = "!*'();:@&=+$,/?%#[]123ABCxyz-`~.";
return Special[Rand(sizeof(Special) - 1)];
}
-size_t MutationDispatcher::Mutate_Custom(uint8_t *Data, size_t Size,
- size_t MaxSize) {
- return EF->LLVMFuzzerCustomMutator(Data, Size, MaxSize, Rand.Rand());
-}
-
-size_t MutationDispatcher::Mutate_CustomCrossOver(uint8_t *Data, size_t Size,
- size_t MaxSize) {
- if (!Corpus || Corpus->size() < 2 || Size == 0)
- return 0;
- size_t Idx = Rand(Corpus->size());
- const Unit &Other = (*Corpus)[Idx];
- if (Other.empty())
- return 0;
- CustomCrossOverInPlaceHere.resize(MaxSize);
- auto &U = CustomCrossOverInPlaceHere;
- size_t NewSize = EF->LLVMFuzzerCustomCrossOver(
- Data, Size, Other.data(), Other.size(), U.data(), U.size(), Rand.Rand());
- if (!NewSize)
- return 0;
- assert(NewSize <= MaxSize && "CustomCrossOver returned overisized unit");
- memcpy(Data, U.data(), NewSize);
- return NewSize;
-}
-
size_t MutationDispatcher::Mutate_ShuffleBytes(uint8_t *Data, size_t Size,
size_t MaxSize) {
- if (Size > MaxSize || Size == 0) return 0;
+ assert(Size);
size_t ShuffleAmount =
Rand(std::min(Size, (size_t)8)) + 1; // [1,8] and <= Size.
size_t ShuffleStart = Rand(Size - ShuffleAmount);
assert(ShuffleStart + ShuffleAmount <= Size);
- std::shuffle(Data + ShuffleStart, Data + ShuffleStart + ShuffleAmount, Rand);
+ std::random_shuffle(Data + ShuffleStart, Data + ShuffleStart + ShuffleAmount,
+ Rand);
return Size;
}
-size_t MutationDispatcher::Mutate_EraseBytes(uint8_t *Data, size_t Size,
- size_t MaxSize) {
- if (Size <= 1) return 0;
- size_t N = Rand(Size / 2) + 1;
- assert(N < Size);
- size_t Idx = Rand(Size - N + 1);
- // Erase Data[Idx:Idx+N].
- memmove(Data + Idx, Data + Idx + N, Size - Idx - N);
- // Printf("Erase: %zd %zd => %zd; Idx %zd\n", N, Size, Size - N, Idx);
- return Size - N;
+size_t MutationDispatcher::Mutate_EraseByte(uint8_t *Data, size_t Size,
+ size_t MaxSize) {
+ assert(Size);
+ if (Size == 1) return 0;
+ size_t Idx = Rand(Size);
+ // Erase Data[Idx].
+ memmove(Data + Idx, Data + Idx + 1, Size - Idx - 1);
+ return Size - 1;
}
size_t MutationDispatcher::Mutate_InsertByte(uint8_t *Data, size_t Size,
size_t MaxSize) {
- if (Size >= MaxSize) return 0;
+ if (Size == MaxSize) return 0;
size_t Idx = Rand(Size + 1);
// Insert new value at Data[Idx].
memmove(Data + Idx + 1, Data + Idx, Size - Idx);
@@ -123,27 +105,8 @@ size_t MutationDispatcher::Mutate_InsertByte(uint8_t *Data, size_t Size,
return Size + 1;
}
-size_t MutationDispatcher::Mutate_InsertRepeatedBytes(uint8_t *Data,
- size_t Size,
- size_t MaxSize) {
- const size_t kMinBytesToInsert = 3;
- if (Size + kMinBytesToInsert >= MaxSize) return 0;
- size_t MaxBytesToInsert = std::min(MaxSize - Size, (size_t)128);
- size_t N = Rand(MaxBytesToInsert - kMinBytesToInsert + 1) + kMinBytesToInsert;
- assert(Size + N <= MaxSize && N);
- size_t Idx = Rand(Size + 1);
- // Insert new values at Data[Idx].
- memmove(Data + Idx + N, Data + Idx, Size - Idx);
- // Give preference to 0x00 and 0xff.
- uint8_t Byte = Rand.RandBool() ? Rand(256) : (Rand.RandBool() ? 0 : 255);
- for (size_t i = 0; i < N; i++)
- Data[Idx + i] = Byte;
- return Size + N;
-}
-
size_t MutationDispatcher::Mutate_ChangeByte(uint8_t *Data, size_t Size,
size_t MaxSize) {
- if (Size > MaxSize) return 0;
size_t Idx = Rand(Size);
Data[Idx] = RandCh(Rand);
return Size;
@@ -151,203 +114,51 @@ size_t MutationDispatcher::Mutate_ChangeByte(uint8_t *Data, size_t Size,
size_t MutationDispatcher::Mutate_ChangeBit(uint8_t *Data, size_t Size,
size_t MaxSize) {
- if (Size > MaxSize) return 0;
size_t Idx = Rand(Size);
- Data[Idx] ^= 1 << Rand(8);
+ Data[Idx] = FlipRandomBit(Data[Idx], Rand);
return Size;
}
size_t MutationDispatcher::Mutate_AddWordFromManualDictionary(uint8_t *Data,
size_t Size,
size_t MaxSize) {
- return AddWordFromDictionary(ManualDictionary, Data, Size, MaxSize);
+ return MDImpl->AddWordFromDictionary(MDImpl->ManualDictionary, Data, Size,
+ MaxSize);
}
-size_t MutationDispatcher::ApplyDictionaryEntry(uint8_t *Data, size_t Size,
- size_t MaxSize,
- DictionaryEntry &DE) {
- const Word &W = DE.GetW();
- bool UsePositionHint = DE.HasPositionHint() &&
- DE.GetPositionHint() + W.size() < Size &&
- Rand.RandBool();
- if (Rand.RandBool()) { // Insert W.
- if (Size + W.size() > MaxSize) return 0;
- size_t Idx = UsePositionHint ? DE.GetPositionHint() : Rand(Size + 1);
- memmove(Data + Idx + W.size(), Data + Idx, Size - Idx);
- memcpy(Data + Idx, W.data(), W.size());
- Size += W.size();
- } else { // Overwrite some bytes with W.
- if (W.size() > Size) return 0;
- size_t Idx = UsePositionHint ? DE.GetPositionHint() : Rand(Size - W.size());
- memcpy(Data + Idx, W.data(), W.size());
- }
- return Size;
-}
-
-// Somewhere in the past we have observed a comparison instructions
-// with arguments Arg1 Arg2. This function tries to guess a dictionary
-// entry that will satisfy that comparison.
-// It first tries to find one of the arguments (possibly swapped) in the
-// input and if it succeeds it creates a DE with a position hint.
-// Otherwise it creates a DE with one of the arguments w/o a position hint.
-DictionaryEntry MutationDispatcher::MakeDictionaryEntryFromCMP(
- const void *Arg1, const void *Arg2,
- const void *Arg1Mutation, const void *Arg2Mutation,
- size_t ArgSize, const uint8_t *Data,
- size_t Size) {
- ScopedDoingMyOwnMemOrStr scoped_doing_my_own_mem_os_str;
- bool HandleFirst = Rand.RandBool();
- const void *ExistingBytes, *DesiredBytes;
- Word W;
- const uint8_t *End = Data + Size;
- for (int Arg = 0; Arg < 2; Arg++) {
- ExistingBytes = HandleFirst ? Arg1 : Arg2;
- DesiredBytes = HandleFirst ? Arg2Mutation : Arg1Mutation;
- HandleFirst = !HandleFirst;
- W.Set(reinterpret_cast<const uint8_t*>(DesiredBytes), ArgSize);
- const size_t kMaxNumPositions = 8;
- size_t Positions[kMaxNumPositions];
- size_t NumPositions = 0;
- for (const uint8_t *Cur = Data;
- Cur < End && NumPositions < kMaxNumPositions; Cur++) {
- Cur =
- (const uint8_t *)SearchMemory(Cur, End - Cur, ExistingBytes, ArgSize);
- if (!Cur) break;
- Positions[NumPositions++] = Cur - Data;
- }
- if (!NumPositions) continue;
- return DictionaryEntry(W, Positions[Rand(NumPositions)]);
- }
- DictionaryEntry DE(W);
- return DE;
-}
-
-
-template <class T>
-DictionaryEntry MutationDispatcher::MakeDictionaryEntryFromCMP(
- T Arg1, T Arg2, const uint8_t *Data, size_t Size) {
- if (Rand.RandBool()) Arg1 = Bswap(Arg1);
- if (Rand.RandBool()) Arg2 = Bswap(Arg2);
- T Arg1Mutation = Arg1 + Rand(-1, 1);
- T Arg2Mutation = Arg2 + Rand(-1, 1);
- return MakeDictionaryEntryFromCMP(&Arg1, &Arg2, &Arg1Mutation, &Arg2Mutation,
- sizeof(Arg1), Data, Size);
-}
-
-DictionaryEntry MutationDispatcher::MakeDictionaryEntryFromCMP(
- const Word &Arg1, const Word &Arg2, const uint8_t *Data, size_t Size) {
- return MakeDictionaryEntryFromCMP(Arg1.data(), Arg2.data(), Arg1.data(),
- Arg2.data(), Arg1.size(), Data, Size);
-}
-
-size_t MutationDispatcher::Mutate_AddWordFromTORC(
- uint8_t *Data, size_t Size, size_t MaxSize) {
- Word W;
- DictionaryEntry DE;
- switch (Rand(4)) {
- case 0: {
- auto X = TPC.TORC8.Get(Rand.Rand());
- DE = MakeDictionaryEntryFromCMP(X.A, X.B, Data, Size);
- } break;
- case 1: {
- auto X = TPC.TORC4.Get(Rand.Rand());
- if ((X.A >> 16) == 0 && (X.B >> 16) == 0 && Rand.RandBool())
- DE = MakeDictionaryEntryFromCMP((uint16_t)X.A, (uint16_t)X.B, Data, Size);
- else
- DE = MakeDictionaryEntryFromCMP(X.A, X.B, Data, Size);
- } break;
- case 2: {
- auto X = TPC.TORCW.Get(Rand.Rand());
- DE = MakeDictionaryEntryFromCMP(X.A, X.B, Data, Size);
- } break;
- case 3: if (Options.UseMemmem) {
- auto X = TPC.MMT.Get(Rand.Rand());
- DE = DictionaryEntry(X);
- } break;
- default:
- assert(0);
- }
- if (!DE.GetW().size()) return 0;
- Size = ApplyDictionaryEntry(Data, Size, MaxSize, DE);
- if (!Size) return 0;
- DictionaryEntry &DERef =
- CmpDictionaryEntriesDeque[CmpDictionaryEntriesDequeIdx++ %
- kCmpDictionaryEntriesDequeSize];
- DERef = DE;
- CurrentDictionaryEntrySequence.push_back(&DERef);
- return Size;
+size_t MutationDispatcher::Mutate_AddWordFromAutoDictionary(uint8_t *Data,
+ size_t Size,
+ size_t MaxSize) {
+ return MDImpl->AddWordFromDictionary(MDImpl->AutoDictionary, Data, Size,
+ MaxSize);
}
-size_t MutationDispatcher::Mutate_AddWordFromPersistentAutoDictionary(
- uint8_t *Data, size_t Size, size_t MaxSize) {
- return AddWordFromDictionary(PersistentAutoDictionary, Data, Size, MaxSize);
-}
-
-size_t MutationDispatcher::AddWordFromDictionary(Dictionary &D, uint8_t *Data,
- size_t Size, size_t MaxSize) {
- if (Size > MaxSize) return 0;
+size_t MutationDispatcher::Impl::AddWordFromDictionary(
+ const std::vector<DictionaryEntry> &D, uint8_t *Data, size_t Size,
+ size_t MaxSize) {
if (D.empty()) return 0;
- DictionaryEntry &DE = D[Rand(D.size())];
- Size = ApplyDictionaryEntry(Data, Size, MaxSize, DE);
- if (!Size) return 0;
- DE.IncUseCount();
- CurrentDictionaryEntrySequence.push_back(&DE);
- return Size;
-}
-
-// Overwrites part of To[0,ToSize) with a part of From[0,FromSize).
-// Returns ToSize.
-size_t MutationDispatcher::CopyPartOf(const uint8_t *From, size_t FromSize,
- uint8_t *To, size_t ToSize) {
- // Copy From[FromBeg, FromBeg + CopySize) into To[ToBeg, ToBeg + CopySize).
- size_t ToBeg = Rand(ToSize);
- size_t CopySize = Rand(ToSize - ToBeg) + 1;
- assert(ToBeg + CopySize <= ToSize);
- CopySize = std::min(CopySize, FromSize);
- size_t FromBeg = Rand(FromSize - CopySize + 1);
- assert(FromBeg + CopySize <= FromSize);
- memmove(To + ToBeg, From + FromBeg, CopySize);
- return ToSize;
-}
-
-// Inserts part of From[0,ToSize) into To.
-// Returns new size of To on success or 0 on failure.
-size_t MutationDispatcher::InsertPartOf(const uint8_t *From, size_t FromSize,
- uint8_t *To, size_t ToSize,
- size_t MaxToSize) {
- if (ToSize >= MaxToSize) return 0;
- size_t AvailableSpace = MaxToSize - ToSize;
- size_t MaxCopySize = std::min(AvailableSpace, FromSize);
- size_t CopySize = Rand(MaxCopySize) + 1;
- size_t FromBeg = Rand(FromSize - CopySize + 1);
- assert(FromBeg + CopySize <= FromSize);
- size_t ToInsertPos = Rand(ToSize + 1);
- assert(ToInsertPos + CopySize <= MaxToSize);
- size_t TailSize = ToSize - ToInsertPos;
- if (To == From) {
- MutateInPlaceHere.resize(MaxToSize);
- memcpy(MutateInPlaceHere.data(), From + FromBeg, CopySize);
- memmove(To + ToInsertPos + CopySize, To + ToInsertPos, TailSize);
- memmove(To + ToInsertPos, MutateInPlaceHere.data(), CopySize);
- } else {
- memmove(To + ToInsertPos + CopySize, To + ToInsertPos, TailSize);
- memmove(To + ToInsertPos, From + FromBeg, CopySize);
+ const DictionaryEntry &DE = D[Rand(D.size())];
+ const Unit &Word = DE.Word;
+ size_t PositionHint = DE.PositionHint;
+ bool UsePositionHint = PositionHint != std::numeric_limits<size_t>::max() &&
+ PositionHint + Word.size() < Size && Rand.RandBool();
+ if (Rand.RandBool()) { // Insert Word.
+ if (Size + Word.size() > MaxSize) return 0;
+ size_t Idx = UsePositionHint ? PositionHint : Rand(Size + 1);
+ memmove(Data + Idx + Word.size(), Data + Idx, Size - Idx);
+ memcpy(Data + Idx, Word.data(), Word.size());
+ Size += Word.size();
+ } else { // Overwrite some bytes with Word.
+ if (Word.size() > Size) return 0;
+ size_t Idx = UsePositionHint ? PositionHint : Rand(Size - Word.size());
+ memcpy(Data + Idx, Word.data(), Word.size());
}
- return ToSize + CopySize;
-}
-
-size_t MutationDispatcher::Mutate_CopyPart(uint8_t *Data, size_t Size,
- size_t MaxSize) {
- if (Size > MaxSize || Size == 0) return 0;
- if (Rand.RandBool())
- return CopyPartOf(Data, Size, Data, Size);
- else
- return InsertPartOf(Data, Size, Data, Size, MaxSize);
+ CurrentDictionaryEntrySequence.push_back(DE);
+ return Size;
}
size_t MutationDispatcher::Mutate_ChangeASCIIInteger(uint8_t *Data, size_t Size,
size_t MaxSize) {
- if (Size > MaxSize) return 0;
size_t B = Rand(Size);
while (B < Size && !isdigit(Data[B])) B++;
if (B == Size) return 0;
@@ -379,69 +190,16 @@ size_t MutationDispatcher::Mutate_ChangeASCIIInteger(uint8_t *Data, size_t Size,
return Size;
}
-template<class T>
-size_t ChangeBinaryInteger(uint8_t *Data, size_t Size, Random &Rand) {
- if (Size < sizeof(T)) return 0;
- size_t Off = Rand(Size - sizeof(T) + 1);
- assert(Off + sizeof(T) <= Size);
- T Val;
- if (Off < 64 && !Rand(4)) {
- Val = Size;
- if (Rand.RandBool())
- Val = Bswap(Val);
- } else {
- memcpy(&Val, Data + Off, sizeof(Val));
- T Add = Rand(21);
- Add -= 10;
- if (Rand.RandBool())
- Val = Bswap(T(Bswap(Val) + Add)); // Add assuming different endiannes.
- else
- Val = Val + Add; // Add assuming current endiannes.
- if (Add == 0 || Rand.RandBool()) // Maybe negate.
- Val = -Val;
- }
- memcpy(Data + Off, &Val, sizeof(Val));
- return Size;
-}
-
-size_t MutationDispatcher::Mutate_ChangeBinaryInteger(uint8_t *Data,
- size_t Size,
- size_t MaxSize) {
- if (Size > MaxSize) return 0;
- switch (Rand(4)) {
- case 3: return ChangeBinaryInteger<uint64_t>(Data, Size, Rand);
- case 2: return ChangeBinaryInteger<uint32_t>(Data, Size, Rand);
- case 1: return ChangeBinaryInteger<uint16_t>(Data, Size, Rand);
- case 0: return ChangeBinaryInteger<uint8_t>(Data, Size, Rand);
- default: assert(0);
- }
- return 0;
-}
-
size_t MutationDispatcher::Mutate_CrossOver(uint8_t *Data, size_t Size,
size_t MaxSize) {
- if (Size > MaxSize) return 0;
+ auto Corpus = MDImpl->Corpus;
if (!Corpus || Corpus->size() < 2 || Size == 0) return 0;
size_t Idx = Rand(Corpus->size());
- const Unit &O = (*Corpus)[Idx];
- if (O.empty()) return 0;
- MutateInPlaceHere.resize(MaxSize);
- auto &U = MutateInPlaceHere;
- size_t NewSize = 0;
- switch(Rand(3)) {
- case 0:
- NewSize = CrossOver(Data, Size, O.data(), O.size(), U.data(), U.size());
- break;
- case 1:
- NewSize = InsertPartOf(O.data(), O.size(), U.data(), U.size(), MaxSize);
- if (!NewSize)
- NewSize = CopyPartOf(O.data(), O.size(), U.data(), U.size());
- break;
- case 2:
- NewSize = CopyPartOf(O.data(), O.size(), U.data(), U.size());
- break;
- default: assert(0);
- }
+ const Unit &Other = (*Corpus)[Idx];
+ if (Other.empty()) return 0;
+ Unit U(MaxSize);
+ size_t NewSize =
+ CrossOver(Data, Size, Other.data(), Other.size(), U.data(), U.size());
assert(NewSize > 0 && "CrossOver returned empty unit");
assert(NewSize <= MaxSize && "CrossOver returned overisized unit");
memcpy(Data, U.data(), NewSize);
@@ -449,85 +207,72 @@ size_t MutationDispatcher::Mutate_CrossOver(uint8_t *Data, size_t Size,
}
void MutationDispatcher::StartMutationSequence() {
- CurrentMutatorSequence.clear();
- CurrentDictionaryEntrySequence.clear();
-}
-
-// Copy successful dictionary entries to PersistentAutoDictionary.
-void MutationDispatcher::RecordSuccessfulMutationSequence() {
- for (auto DE : CurrentDictionaryEntrySequence) {
- // PersistentAutoDictionary.AddWithSuccessCountOne(DE);
- DE->IncSuccessCount();
- assert(DE->GetW().size());
- // Linear search is fine here as this happens seldom.
- if (!PersistentAutoDictionary.ContainsWord(DE->GetW()))
- PersistentAutoDictionary.push_back({DE->GetW(), 1});
- }
-}
-
-void MutationDispatcher::PrintRecommendedDictionary() {
- std::vector<DictionaryEntry> V;
- for (auto &DE : PersistentAutoDictionary)
- if (!ManualDictionary.ContainsWord(DE.GetW()))
- V.push_back(DE);
- if (V.empty()) return;
- Printf("###### Recommended dictionary. ######\n");
- for (auto &DE: V) {
- assert(DE.GetW().size());
- Printf("\"");
- PrintASCII(DE.GetW(), "\"");
- Printf(" # Uses: %zd\n", DE.GetUseCount());
- }
- Printf("###### End of recommended dictionary. ######\n");
+ MDImpl->CurrentMutatorSequence.clear();
+ MDImpl->CurrentDictionaryEntrySequence.clear();
}
void MutationDispatcher::PrintMutationSequence() {
- Printf("MS: %zd ", CurrentMutatorSequence.size());
- for (auto M : CurrentMutatorSequence)
+ Printf("MS: %zd ", MDImpl->CurrentMutatorSequence.size());
+ for (auto M : MDImpl->CurrentMutatorSequence)
Printf("%s-", M.Name);
- if (!CurrentDictionaryEntrySequence.empty()) {
+ if (!MDImpl->CurrentDictionaryEntrySequence.empty()) {
Printf(" DE: ");
- for (auto DE : CurrentDictionaryEntrySequence) {
+ for (auto DE : MDImpl->CurrentDictionaryEntrySequence) {
Printf("\"");
- PrintASCII(DE->GetW(), "\"-");
+ PrintASCII(DE.Word, "\"-");
}
}
}
-size_t MutationDispatcher::Mutate(uint8_t *Data, size_t Size, size_t MaxSize) {
- return MutateImpl(Data, Size, MaxSize, Mutators);
-}
-
-size_t MutationDispatcher::DefaultMutate(uint8_t *Data, size_t Size,
- size_t MaxSize) {
- return MutateImpl(Data, Size, MaxSize, DefaultMutators);
-}
-
// Mutates Data in place, returns new size.
-size_t MutationDispatcher::MutateImpl(uint8_t *Data, size_t Size,
- size_t MaxSize,
- const std::vector<Mutator> &Mutators) {
+size_t MutationDispatcher::Mutate(uint8_t *Data, size_t Size, size_t MaxSize) {
assert(MaxSize > 0);
+ assert(Size <= MaxSize);
+ if (Size == 0) {
+ for (size_t i = 0; i < MaxSize; i++)
+ Data[i] = RandCh(Rand);
+ return MaxSize;
+ }
+ assert(Size > 0);
// Some mutations may fail (e.g. can't insert more bytes if Size == MaxSize),
// in which case they will return 0.
// Try several times before returning un-mutated data.
- for (int Iter = 0; Iter < 100; Iter++) {
- auto M = Mutators[Rand(Mutators.size())];
+ for (int Iter = 0; Iter < 10; Iter++) {
+ size_t MutatorIdx = Rand(MDImpl->Mutators.size());
+ auto M = MDImpl->Mutators[MutatorIdx];
size_t NewSize = (this->*(M.Fn))(Data, Size, MaxSize);
- if (NewSize && NewSize <= MaxSize) {
- if (Options.OnlyASCII)
- ToASCII(Data, NewSize);
- CurrentMutatorSequence.push_back(M);
+ if (NewSize) {
+ MDImpl->CurrentMutatorSequence.push_back(M);
return NewSize;
}
}
- *Data = ' ';
- return 1; // Fallback, should not happen frequently.
+ return Size;
+}
+
+void MutationDispatcher::SetCorpus(const std::vector<Unit> *Corpus) {
+ MDImpl->SetCorpus(Corpus);
+}
+
+void MutationDispatcher::AddWordToManualDictionary(const Unit &Word) {
+ MDImpl->ManualDictionary.push_back(
+ {Word, std::numeric_limits<size_t>::max()});
}
-void MutationDispatcher::AddWordToManualDictionary(const Word &W) {
- ManualDictionary.push_back(
- {W, std::numeric_limits<size_t>::max()});
+void MutationDispatcher::AddWordToAutoDictionary(const Unit &Word,
+ size_t PositionHint) {
+ static const size_t kMaxAutoDictSize = 1 << 14;
+ if (MDImpl->AutoDictionary.size() >= kMaxAutoDictSize) return;
+ MDImpl->AutoDictionary.push_back({Word, PositionHint});
}
+void MutationDispatcher::ClearAutoDictionary() {
+ MDImpl->AutoDictionary.clear();
+}
+
+MutationDispatcher::MutationDispatcher(FuzzerRandomBase &Rand) : Rand(Rand) {
+ MDImpl = new Impl(Rand);
+}
+
+MutationDispatcher::~MutationDispatcher() { delete MDImpl; }
+
} // namespace fuzzer
diff --git a/gnu/llvm/lib/Fuzzer/FuzzerSHA1.cpp b/gnu/llvm/lib/Fuzzer/FuzzerSHA1.cpp
index d2f8e811bbf..b42a04854cd 100644
--- a/gnu/llvm/lib/Fuzzer/FuzzerSHA1.cpp
+++ b/gnu/llvm/lib/Fuzzer/FuzzerSHA1.cpp
@@ -16,15 +16,12 @@
// For the same reason we do not want to depend on SHA1 from LLVM tree.
//===----------------------------------------------------------------------===//
-#include "FuzzerSHA1.h"
-#include "FuzzerDefs.h"
+#include "FuzzerInternal.h"
/* This code is public-domain - it is based on libcrypt
* placed in the public domain by Wei Dai and other contributors.
*/
-#include <iomanip>
-#include <sstream>
#include <stdint.h>
#include <string.h>
@@ -196,27 +193,10 @@ uint8_t* sha1_result(sha1nfo *s) {
} // namespace; Added for LibFuzzer
-namespace fuzzer {
-
// The rest is added for LibFuzzer
-void ComputeSHA1(const uint8_t *Data, size_t Len, uint8_t *Out) {
+void fuzzer::ComputeSHA1(const uint8_t *Data, size_t Len, uint8_t *Out) {
sha1nfo s;
sha1_init(&s);
sha1_write(&s, (const char*)Data, Len);
memcpy(Out, sha1_result(&s), HASH_LENGTH);
}
-
-std::string Sha1ToString(const uint8_t Sha1[kSHA1NumBytes]) {
- std::stringstream SS;
- for (int i = 0; i < kSHA1NumBytes; i++)
- SS << std::hex << std::setfill('0') << std::setw(2) << (unsigned)Sha1[i];
- return SS.str();
-}
-
-std::string Hash(const Unit &U) {
- uint8_t Hash[kSHA1NumBytes];
- ComputeSHA1(U.data(), U.size(), Hash);
- return Sha1ToString(Hash);
-}
-
-}
diff --git a/gnu/llvm/lib/Fuzzer/FuzzerUtil.cpp b/gnu/llvm/lib/Fuzzer/FuzzerUtil.cpp
index 2d95f40e46a..d7226cfce96 100644
--- a/gnu/llvm/lib/Fuzzer/FuzzerUtil.cpp
+++ b/gnu/llvm/lib/Fuzzer/FuzzerUtil.cpp
@@ -9,30 +9,22 @@
// Misc utils.
//===----------------------------------------------------------------------===//
-#include "FuzzerUtil.h"
-#include "FuzzerIO.h"
#include "FuzzerInternal.h"
+#include <sstream>
+#include <iomanip>
+#include <sys/time.h>
#include <cassert>
-#include <chrono>
#include <cstring>
-#include <errno.h>
#include <signal.h>
#include <sstream>
-#include <stdio.h>
-#include <sys/types.h>
-#include <thread>
+#include <unistd.h>
namespace fuzzer {
-void PrintHexArray(const uint8_t *Data, size_t Size,
- const char *PrintAfter) {
- for (size_t i = 0; i < Size; i++)
- Printf("0x%x,", (unsigned)Data[i]);
- Printf("%s", PrintAfter);
-}
-
void Print(const Unit &v, const char *PrintAfter) {
- PrintHexArray(v.data(), v.size(), PrintAfter);
+ for (auto x : v)
+ Printf("0x%x,", (unsigned) x);
+ Printf("%s", PrintAfter);
}
void PrintASCIIByte(uint8_t Byte) {
@@ -53,13 +45,50 @@ void PrintASCII(const uint8_t *Data, size_t Size, const char *PrintAfter) {
}
void PrintASCII(const Unit &U, const char *PrintAfter) {
- PrintASCII(U.data(), U.size(), PrintAfter);
+ for (auto X : U)
+ PrintASCIIByte(X);
+ Printf("%s", PrintAfter);
}
-bool ToASCII(uint8_t *Data, size_t Size) {
+std::string Hash(const Unit &U) {
+ uint8_t Hash[kSHA1NumBytes];
+ ComputeSHA1(U.data(), U.size(), Hash);
+ std::stringstream SS;
+ for (int i = 0; i < kSHA1NumBytes; i++)
+ SS << std::hex << std::setfill('0') << std::setw(2) << (unsigned)Hash[i];
+ return SS.str();
+}
+
+static void AlarmHandler(int, siginfo_t *, void *) {
+ Fuzzer::StaticAlarmCallback();
+}
+
+void SetTimer(int Seconds) {
+ struct itimerval T {{Seconds, 0}, {Seconds, 0}};
+ int Res = setitimer(ITIMER_REAL, &T, nullptr);
+ assert(Res == 0);
+ struct sigaction sigact;
+ memset(&sigact, 0, sizeof(sigact));
+ sigact.sa_sigaction = AlarmHandler;
+ Res = sigaction(SIGALRM, &sigact, 0);
+ assert(Res == 0);
+}
+
+int NumberOfCpuCores() {
+ FILE *F = popen("nproc", "r");
+ int N = 0;
+ fscanf(F, "%d", &N);
+ fclose(F);
+ return N;
+}
+
+int ExecuteCommand(const std::string &Command) {
+ return system(Command.c_str());
+}
+
+bool ToASCII(Unit &U) {
bool Changed = false;
- for (size_t i = 0; i < Size; i++) {
- uint8_t &X = Data[i];
+ for (auto &X : U) {
auto NewX = X;
NewX &= 127;
if (!isspace(NewX) && !isprint(NewX))
@@ -70,11 +99,9 @@ bool ToASCII(uint8_t *Data, size_t Size) {
return Changed;
}
-bool IsASCII(const Unit &U) { return IsASCII(U.data(), U.size()); }
-
-bool IsASCII(const uint8_t *Data, size_t Size) {
- for (size_t i = 0; i < Size; i++)
- if (!(isprint(Data[i]) || isspace(Data[i]))) return false;
+bool IsASCII(const Unit &U) {
+ for (auto X : U)
+ if (!(isprint(X) || isspace(X))) return false;
return true;
}
@@ -151,6 +178,9 @@ bool ParseDictionaryFile(const std::string &Text, std::vector<Unit> *Units) {
return true;
}
+int GetPid() { return getpid(); }
+
+
std::string Base64(const Unit &U) {
static const char Table[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
@@ -179,47 +209,4 @@ std::string Base64(const Unit &U) {
return Res;
}
-std::string DescribePC(const char *SymbolizedFMT, uintptr_t PC) {
- if (!EF->__sanitizer_symbolize_pc) return "<can not symbolize>";
- char PcDescr[1024];
- EF->__sanitizer_symbolize_pc(reinterpret_cast<void*>(PC),
- SymbolizedFMT, PcDescr, sizeof(PcDescr));
- PcDescr[sizeof(PcDescr) - 1] = 0; // Just in case.
- return PcDescr;
-}
-
-void PrintPC(const char *SymbolizedFMT, const char *FallbackFMT, uintptr_t PC) {
- if (EF->__sanitizer_symbolize_pc)
- Printf("%s", DescribePC(SymbolizedFMT, PC).c_str());
- else
- Printf(FallbackFMT, PC);
-}
-
-unsigned NumberOfCpuCores() {
- unsigned N = std::thread::hardware_concurrency();
- if (!N) {
- Printf("WARNING: std::thread::hardware_concurrency not well defined for "
- "your platform. Assuming CPU count of 1.\n");
- N = 1;
- }
- return N;
-}
-
-bool ExecuteCommandAndReadOutput(const std::string &Command, std::string *Out) {
- FILE *Pipe = OpenProcessPipe(Command.c_str(), "r");
- if (!Pipe) return false;
- char Buff[1024];
- size_t N;
- while ((N = fread(Buff, 1, sizeof(Buff), Pipe)) > 0)
- Out->append(Buff, N);
- return true;
-}
-
-size_t SimpleFastHash(const uint8_t *Data, size_t Size) {
- size_t Res = 0;
- for (size_t i = 0; i < Size; i++)
- Res = Res * 11 + Data[i];
- return Res;
-}
-
} // namespace fuzzer
diff --git a/gnu/llvm/lib/Fuzzer/test/CMakeLists.txt b/gnu/llvm/lib/Fuzzer/test/CMakeLists.txt
index 43aea2b7a18..cd0b167eb38 100644
--- a/gnu/llvm/lib/Fuzzer/test/CMakeLists.txt
+++ b/gnu/llvm/lib/Fuzzer/test/CMakeLists.txt
@@ -1,277 +1,118 @@
# Build all these tests with -O0, otherwise optimizations may merge some
# basic blocks and we'll fail to discover the targets.
-# We change the flags for every build type because we might be doing
-# a multi-configuration build (e.g. Xcode) where CMAKE_BUILD_TYPE doesn't
-# mean anything.
-set(variables_to_filter
- CMAKE_CXX_FLAGS_RELEASE
- CMAKE_CXX_FLAGS_DEBUG
- CMAKE_CXX_FLAGS_RELWITHDEBINFO
- CMAKE_CXX_FLAGS_MINSIZEREL
- LIBFUZZER_FLAGS_BASE
- )
-foreach (VARNAME ${variables_to_filter})
- string(REGEX REPLACE "([-/]O)[123s]" "\\10" ${VARNAME} "${${VARNAME}}")
-endforeach()
-
-# Enable the coverage instrumentation (it is disabled for the Fuzzer lib).
-set(CMAKE_CXX_FLAGS "${LIBFUZZER_FLAGS_BASE} -fsanitize-coverage=trace-pc-guard,indirect-calls,trace-cmp,trace-div,trace-gep -gline-tables-only")
-
-if(MSVC)
- # For tests use the CRT specified for release build
- # (asan doesn't support MDd and MTd)
- if ("${LLVM_USE_CRT_RELEASE}" STREQUAL "")
- set(CRT_FLAG " /MD ")
- else()
- set(CRT_FLAG " /${LLVM_USE_CRT_RELEASE} ")
- endif()
- # In order to use the sanitizers in Windows, we need to link against many
- # runtime libraries which will depend on the target being created
- # (executable or dll) and the c runtime library used (MT/MD).
- # By default, cmake uses link.exe for linking, which fails because we don't
- # specify the appropiate dependencies.
- # As we don't want to consider all of that possible situations which depends
- # on the implementation of the compiler-rt, the simplest option is to change
- # the rules for linking executables and shared libraries, using the compiler
- # instead of link.exe. Clang will consider the sanitizer flags, and
- # automatically provide the required libraries to the linker.
- set(CMAKE_CXX_LINK_EXECUTABLE "<CMAKE_CXX_COMPILER> <FLAGS> ${CMAKE_CXX_FLAGS} ${CRT_FLAG} <OBJECTS> -o <TARGET> <LINK_LIBRARIES> /link <CMAKE_CXX_LINK_FLAGS> <LINK_FLAGS>")
- set(CMAKE_CXX_CREATE_SHARED_LIBRARY "<CMAKE_CXX_COMPILER> ${CMAKE_CXX_FLAGS} ${CRT_FLAG} /LD <CMAKE_SHARED_LIBRARY_CXX_FLAGS> <CMAKE_SHARED_LIBRARY_CREATE_CXX_FLAGS> <CMAKE_SHARED_LIBRARY_SONAME_CXX_FLAG> <TARGET_SONAME> -o <TARGET> <OBJECTS> <LINK_LIBRARIES> /link <LINK_FLAGS>")
-endif()
-
-add_custom_target(TestBinaries)
-
-# add_libfuzzer_test(<name>
-# SOURCES source0.cpp [source1.cpp ...]
-# )
-#
-# Declares a LibFuzzer test executable with target name LLVMFuzzer-<name>.
-#
-# One or more source files to be compiled into the binary must be declared
-# after the SOURCES keyword.
-function(add_libfuzzer_test name)
- set(multi_arg_options "SOURCES")
- cmake_parse_arguments(
- "add_libfuzzer_test" "" "" "${multi_arg_options}" ${ARGN})
- if ("${add_libfuzzer_test_SOURCES}" STREQUAL "")
- message(FATAL_ERROR "Source files must be specified")
- endif()
- add_executable(LLVMFuzzer-${name}
- ${add_libfuzzer_test_SOURCES}
- )
- target_link_libraries(LLVMFuzzer-${name} LLVMFuzzer)
- # Place binary where llvm-lit expects to find it
- set_target_properties(LLVMFuzzer-${name}
- PROPERTIES RUNTIME_OUTPUT_DIRECTORY
- "${CMAKE_BINARY_DIR}/lib/Fuzzer/test"
- )
- add_dependencies(TestBinaries LLVMFuzzer-${name})
-endfunction()
+# Also enable the coverage instrumentation back (it is disabled
+# for the Fuzzer lib)
+set(CMAKE_CXX_FLAGS_RELEASE "${LIBFUZZER_FLAGS_BASE} -O0 -fsanitize-coverage=edge,indirect-calls")
-###############################################################################
-# Basic tests
-###############################################################################
+set(DFSanTests
+ MemcmpTest
+ SimpleCmpTest
+ StrcmpTest
+ StrncmpTest
+ SwitchTest
+ )
set(Tests
- AbsNegAndConstantTest
- AbsNegAndConstant64Test
- AccumulateAllocationsTest
- BadStrcmpTest
- BogusInitializeTest
- BufferOverflowOnInput
CallerCalleeTest
- CleanseTest
CounterTest
- CustomCrossOverAndMutateTest
- CustomCrossOverTest
- CustomMutatorTest
- CxxStringEqTest
- DivTest
- EmptyTest
- EquivalenceATest
- EquivalenceBTest
- FlagsTest
FourIndependentBranchesTest
FullCoverageSetTest
- InitializeTest
- Memcmp64BytesTest
MemcmpTest
- LeakTest
- LeakTimeoutTest
- LoadTest
NullDerefTest
- NullDerefOnEmptyTest
- NthRunCrashTest
- OneHugeAllocTest
- OutOfMemoryTest
- OutOfMemorySingleLargeMallocTest
- OverwriteInputTest
- RepeatedMemcmp
- RepeatedBytesTest
SimpleCmpTest
SimpleDictionaryTest
SimpleHashTest
SimpleTest
- SimpleThreadedTest
- SingleByteInputTest
- SingleMemcmpTest
- SingleStrcmpTest
- SingleStrncmpTest
- SpamyTest
- ShrinkControlFlowTest
- ShrinkControlFlowSimpleTest
- ShrinkValueProfileTest
StrcmpTest
- StrncmpOOBTest
StrncmpTest
- StrstrTest
- SwapCmpTest
SwitchTest
- Switch2Test
- TableLookupTest
- ThreadedLeakTest
ThreadedTest
TimeoutTest
- TimeoutEmptyTest
- TraceMallocTest
- TwoDifferentBugsTest
)
-if(APPLE OR MSVC)
- # LeakSanitizer is not supported on OSX and Windows right now
- set(HAS_LSAN 0)
- message(WARNING "LeakSanitizer is not supported."
- " Building and running LibFuzzer LeakSanitizer tests is disabled."
- )
-else()
- set(HAS_LSAN 1)
-endif()
+set(CustomMainTests
+ UserSuppliedFuzzerTest
+ )
+
+set(UninstrumentedTests
+ UninstrumentedTest
+ )
+
+set(TraceBBTests
+ SimpleTest
+ )
+
+set(TestBinaries)
foreach(Test ${Tests})
- add_libfuzzer_test(${Test} SOURCES ${Test}.cpp)
+ add_executable(LLVMFuzzer-${Test}
+ ${Test}.cpp
+ )
+ target_link_libraries(LLVMFuzzer-${Test}
+ LLVMFuzzer
+ )
+ set(TestBinaries ${TestBinaries} LLVMFuzzer-${Test})
endforeach()
-function(test_export_symbol target symbol)
- if(MSVC)
- set_target_properties(LLVMFuzzer-${target} PROPERTIES LINK_FLAGS
- "-export:${symbol}")
- endif()
-endfunction()
+foreach(Test ${CustomMainTests})
+ add_executable(LLVMFuzzer-${Test}
+ ${Test}.cpp
+ )
+ target_link_libraries(LLVMFuzzer-${Test}
+ LLVMFuzzerNoMain
+ )
+ set(TestBinaries ${TestBinaries} LLVMFuzzer-${Test})
+endforeach()
-test_export_symbol(InitializeTest "LLVMFuzzerInitialize")
-test_export_symbol(BogusInitializeTest "LLVMFuzzerInitialize")
-test_export_symbol(CustomCrossOverTest "LLVMFuzzerCustomCrossOver")
-test_export_symbol(CustomMutatorTest "LLVMFuzzerCustomMutator")
-###############################################################################
-# Unit tests
-###############################################################################
+configure_lit_site_cfg(
+ ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in
+ ${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg
+ )
-add_executable(LLVMFuzzer-Unittest
- FuzzerUnittest.cpp
+configure_lit_site_cfg(
+ ${CMAKE_CURRENT_SOURCE_DIR}/unit/lit.site.cfg.in
+ ${CMAKE_CURRENT_BINARY_DIR}/unit/lit.site.cfg
)
-add_executable(LLVMFuzzer-StandaloneInitializeTest
- InitializeTest.cpp
- ../standalone/StandaloneFuzzTargetMain.c
+include_directories(..)
+include_directories(${LLVM_MAIN_SRC_DIR}/utils/unittest/googletest/include)
+
+add_executable(LLVMFuzzer-Unittest
+ FuzzerUnittest.cpp
+ $<TARGET_OBJECTS:LLVMFuzzerNoMainObjects>
)
target_link_libraries(LLVMFuzzer-Unittest
gtest
gtest_main
- LLVMFuzzerNoMain
- )
-
-target_include_directories(LLVMFuzzer-Unittest PRIVATE
- "${LLVM_MAIN_SRC_DIR}/utils/unittest/googletest/include"
)
-add_dependencies(TestBinaries LLVMFuzzer-Unittest)
-set_target_properties(LLVMFuzzer-Unittest
- PROPERTIES RUNTIME_OUTPUT_DIRECTORY
- "${CMAKE_CURRENT_BINARY_DIR}"
-)
-
-add_dependencies(TestBinaries LLVMFuzzer-StandaloneInitializeTest)
-set_target_properties(LLVMFuzzer-StandaloneInitializeTest
- PROPERTIES RUNTIME_OUTPUT_DIRECTORY
- "${CMAKE_CURRENT_BINARY_DIR}"
-)
-
-###############################################################################
-# Additional tests
-###############################################################################
-
-include_directories(..)
-
-# add_subdirectory(uninstrumented)
-add_subdirectory(no-coverage)
-add_subdirectory(trace-pc)
-add_subdirectory(ubsan)
-if (NOT MSVC)
- add_subdirectory(inline-8bit-counters)
-endif()
-
-add_library(LLVMFuzzer-DSO1 SHARED DSO1.cpp)
-add_library(LLVMFuzzer-DSO2 SHARED DSO2.cpp)
+set(TestBinaries ${TestBinaries} LLVMFuzzer-Unittest)
-add_executable(LLVMFuzzer-DSOTest
- DSOTestMain.cpp
- DSOTestExtra.cpp)
-
-target_link_libraries(LLVMFuzzer-DSOTest
- LLVMFuzzer-DSO1
- LLVMFuzzer-DSO2
- LLVMFuzzer
- )
+add_subdirectory(dfsan)
-set_target_properties(LLVMFuzzer-DSOTest PROPERTIES RUNTIME_OUTPUT_DIRECTORY
- "${CMAKE_BINARY_DIR}/lib/Fuzzer/test")
+foreach(Test ${DFSanTests})
+ set(TestBinaries ${TestBinaries} LLVMFuzzer-${Test}-DFSan)
+endforeach()
-if(MSVC)
- set_output_directory(LLVMFuzzer-DSO1
- BINARY_DIR "${CMAKE_BINARY_DIR}/lib/Fuzzer/test"
- LIBRARY_DIR "${CMAKE_BINARY_DIR}/lib/Fuzzer/test")
- set_output_directory(LLVMFuzzer-DSO2
- BINARY_DIR "${CMAKE_BINARY_DIR}/lib/Fuzzer/test"
- LIBRARY_DIR "${CMAKE_BINARY_DIR}/lib/Fuzzer/test")
-else(MSVC)
- set_output_directory(LLVMFuzzer-DSO1
- LIBRARY_DIR "${CMAKE_BINARY_DIR}/lib/Fuzzer/lib")
- set_output_directory(LLVMFuzzer-DSO2
- LIBRARY_DIR "${CMAKE_BINARY_DIR}/lib/Fuzzer/lib")
-endif()
+add_subdirectory(uninstrumented)
-add_dependencies(TestBinaries LLVMFuzzer-DSOTest)
+foreach(Test ${UninstrumentedTests})
+ set(TestBinaries ${TestBinaries} LLVMFuzzer-${Test}-Uninstrumented)
+endforeach()
-###############################################################################
-# Configure lit to run the tests
-#
-# Note this is done after declaring all tests so we can inform lit if any tests
-# need to be disabled.
-###############################################################################
-set(LIBFUZZER_POSIX 1)
-if (MSVC)
- set(LIBFUZZER_POSIX 0)
-endif()
+add_subdirectory(trace-bb)
-configure_lit_site_cfg(
- ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in
- ${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg
- )
+foreach(Test ${TraceBBTests})
+ set(TestBinaries ${TestBinaries} LLVMFuzzer-${Test}-TraceBB)
+endforeach()
-configure_lit_site_cfg(
- ${CMAKE_CURRENT_SOURCE_DIR}/unit/lit.site.cfg.in
- ${CMAKE_CURRENT_BINARY_DIR}/unit/lit.site.cfg
+set_target_properties(${TestBinaries}
+ PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
)
add_lit_testsuite(check-fuzzer "Running Fuzzer tests"
${CMAKE_CURRENT_BINARY_DIR}
- DEPENDS TestBinaries
+ DEPENDS ${TestBinaries} FileCheck not
)
-
-# Don't add dependencies on Windows. The linker step would fail on Windows,
-# since cmake will use link.exe for linking and won't include compiler-rt libs.
-if(NOT MSVC)
- add_dependencies(check-fuzzer FileCheck sancov not)
-endif()
diff --git a/gnu/llvm/lib/Fuzzer/test/CallerCalleeTest.cpp b/gnu/llvm/lib/Fuzzer/test/CallerCalleeTest.cpp
index ed9f37cc152..150b2fc0405 100644
--- a/gnu/llvm/lib/Fuzzer/test/CallerCalleeTest.cpp
+++ b/gnu/llvm/lib/Fuzzer/test/CallerCalleeTest.cpp
@@ -1,11 +1,8 @@
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-
// Simple test for a fuzzer.
// Try to find the target using the indirect caller-callee pairs.
-#include <cstddef>
#include <cstdint>
#include <cstdlib>
+#include <cstddef>
#include <cstring>
#include <iostream>
diff --git a/gnu/llvm/lib/Fuzzer/test/CounterTest.cpp b/gnu/llvm/lib/Fuzzer/test/CounterTest.cpp
index 4917934c62e..b61f419c499 100644
--- a/gnu/llvm/lib/Fuzzer/test/CounterTest.cpp
+++ b/gnu/llvm/lib/Fuzzer/test/CounterTest.cpp
@@ -1,6 +1,3 @@
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-
// Test for a fuzzer: must find the case where a particular basic block is
// executed many times.
#include <iostream>
diff --git a/gnu/llvm/lib/Fuzzer/test/FourIndependentBranchesTest.cpp b/gnu/llvm/lib/Fuzzer/test/FourIndependentBranchesTest.cpp
index ba963d9b1de..6007dd4a027 100644
--- a/gnu/llvm/lib/Fuzzer/test/FourIndependentBranchesTest.cpp
+++ b/gnu/llvm/lib/Fuzzer/test/FourIndependentBranchesTest.cpp
@@ -1,14 +1,10 @@
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-
// Simple test for a fuzzer. The fuzzer must find the string "FUZZ".
-#include <cstddef>
#include <cstdint>
#include <cstdlib>
+#include <cstddef>
#include <iostream>
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
- if (Size > 64) return 0;
int bits = 0;
if (Size > 0 && Data[0] == 'F') bits |= 1;
if (Size > 1 && Data[1] == 'U') bits |= 2;
diff --git a/gnu/llvm/lib/Fuzzer/test/FullCoverageSetTest.cpp b/gnu/llvm/lib/Fuzzer/test/FullCoverageSetTest.cpp
index 6d7e48fe51f..a868084a0ce 100644
--- a/gnu/llvm/lib/Fuzzer/test/FullCoverageSetTest.cpp
+++ b/gnu/llvm/lib/Fuzzer/test/FullCoverageSetTest.cpp
@@ -1,10 +1,7 @@
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-
// Simple test for a fuzzer. The fuzzer must find the string "FUZZER".
-#include <cstddef>
#include <cstdint>
#include <cstdlib>
+#include <cstddef>
#include <iostream>
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
diff --git a/gnu/llvm/lib/Fuzzer/test/FuzzerUnittest.cpp b/gnu/llvm/lib/Fuzzer/test/FuzzerUnittest.cpp
index eba2663029b..b33e0c96145 100644
--- a/gnu/llvm/lib/Fuzzer/test/FuzzerUnittest.cpp
+++ b/gnu/llvm/lib/Fuzzer/test/FuzzerUnittest.cpp
@@ -1,38 +1,18 @@
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-
-// Avoid ODR violations (LibFuzzer is built without ASan and this test is built
-// with ASan) involving C++ standard library types when using libcxx.
-#define _LIBCPP_HAS_NO_ASAN
-
-// Do not attempt to use LLVM ostream from gtest.
-#define GTEST_NO_LLVM_RAW_OSTREAM 1
-
-#include "FuzzerCorpus.h"
-#include "FuzzerDictionary.h"
#include "FuzzerInternal.h"
-#include "FuzzerMerge.h"
-#include "FuzzerMutate.h"
-#include "FuzzerRandom.h"
-#include "FuzzerTracePC.h"
#include "gtest/gtest.h"
-#include <memory>
#include <set>
-#include <sstream>
using namespace fuzzer;
// For now, have LLVMFuzzerTestOneInput just to make it link.
// Later we may want to make unittests that actually call LLVMFuzzerTestOneInput.
-extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
+extern "C" void LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
abort();
}
TEST(Fuzzer, CrossOver) {
- std::unique_ptr<ExternalFunctions> t(new ExternalFunctions());
- fuzzer::EF = t.get();
- Random Rand(0);
- MutationDispatcher MD(Rand, {});
+ FuzzerRandomLibc Rand(0);
+ MutationDispatcher MD(Rand);
Unit A({0, 1, 2}), B({5, 6, 7});
Unit C;
Unit Expected[] = {
@@ -98,9 +78,7 @@ TEST(Fuzzer, Hash) {
typedef size_t (MutationDispatcher::*Mutator)(uint8_t *Data, size_t Size,
size_t MaxSize);
-void TestEraseBytes(Mutator M, int NumIter) {
- std::unique_ptr<ExternalFunctions> t(new ExternalFunctions());
- fuzzer::EF = t.get();
+void TestEraseByte(Mutator M, int NumIter) {
uint8_t REM0[8] = {0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77};
uint8_t REM1[8] = {0x00, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77};
uint8_t REM2[8] = {0x00, 0x11, 0x33, 0x44, 0x55, 0x66, 0x77};
@@ -109,18 +87,8 @@ void TestEraseBytes(Mutator M, int NumIter) {
uint8_t REM5[8] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x66, 0x77};
uint8_t REM6[8] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x77};
uint8_t REM7[8] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66};
-
- uint8_t REM8[6] = {0x22, 0x33, 0x44, 0x55, 0x66, 0x77};
- uint8_t REM9[6] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55};
- uint8_t REM10[6] = {0x00, 0x11, 0x22, 0x55, 0x66, 0x77};
-
- uint8_t REM11[5] = {0x33, 0x44, 0x55, 0x66, 0x77};
- uint8_t REM12[5] = {0x00, 0x11, 0x22, 0x33, 0x44};
- uint8_t REM13[5] = {0x00, 0x44, 0x55, 0x66, 0x77};
-
-
- Random Rand(0);
- MutationDispatcher MD(Rand, {});
+ FuzzerRandomLibc Rand(0);
+ MutationDispatcher MD(Rand);
int FoundMask = 0;
for (int i = 0; i < NumIter; i++) {
uint8_t T[8] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77};
@@ -133,30 +101,20 @@ void TestEraseBytes(Mutator M, int NumIter) {
if (NewSize == 7 && !memcmp(REM5, T, 7)) FoundMask |= 1 << 5;
if (NewSize == 7 && !memcmp(REM6, T, 7)) FoundMask |= 1 << 6;
if (NewSize == 7 && !memcmp(REM7, T, 7)) FoundMask |= 1 << 7;
-
- if (NewSize == 6 && !memcmp(REM8, T, 6)) FoundMask |= 1 << 8;
- if (NewSize == 6 && !memcmp(REM9, T, 6)) FoundMask |= 1 << 9;
- if (NewSize == 6 && !memcmp(REM10, T, 6)) FoundMask |= 1 << 10;
-
- if (NewSize == 5 && !memcmp(REM11, T, 5)) FoundMask |= 1 << 11;
- if (NewSize == 5 && !memcmp(REM12, T, 5)) FoundMask |= 1 << 12;
- if (NewSize == 5 && !memcmp(REM13, T, 5)) FoundMask |= 1 << 13;
}
- EXPECT_EQ(FoundMask, (1 << 14) - 1);
+ EXPECT_EQ(FoundMask, 255);
}
-TEST(FuzzerMutate, EraseBytes1) {
- TestEraseBytes(&MutationDispatcher::Mutate_EraseBytes, 200);
+TEST(FuzzerMutate, EraseByte1) {
+ TestEraseByte(&MutationDispatcher::Mutate_EraseByte, 100);
}
-TEST(FuzzerMutate, EraseBytes2) {
- TestEraseBytes(&MutationDispatcher::Mutate, 2000);
+TEST(FuzzerMutate, EraseByte2) {
+ TestEraseByte(&MutationDispatcher::Mutate, 1000);
}
void TestInsertByte(Mutator M, int NumIter) {
- std::unique_ptr<ExternalFunctions> t(new ExternalFunctions());
- fuzzer::EF = t.get();
- Random Rand(0);
- MutationDispatcher MD(Rand, {});
+ FuzzerRandomLibc Rand(0);
+ MutationDispatcher MD(Rand);
int FoundMask = 0;
uint8_t INS0[8] = {0xF1, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66};
uint8_t INS1[8] = {0x00, 0xF2, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66};
@@ -188,55 +146,9 @@ TEST(FuzzerMutate, InsertByte2) {
TestInsertByte(&MutationDispatcher::Mutate, 1 << 17);
}
-void TestInsertRepeatedBytes(Mutator M, int NumIter) {
- std::unique_ptr<ExternalFunctions> t(new ExternalFunctions());
- fuzzer::EF = t.get();
- Random Rand(0);
- MutationDispatcher MD(Rand, {});
- int FoundMask = 0;
- uint8_t INS0[7] = {0x00, 0x11, 0x22, 0x33, 'a', 'a', 'a'};
- uint8_t INS1[7] = {0x00, 0x11, 0x22, 'a', 'a', 'a', 0x33};
- uint8_t INS2[7] = {0x00, 0x11, 'a', 'a', 'a', 0x22, 0x33};
- uint8_t INS3[7] = {0x00, 'a', 'a', 'a', 0x11, 0x22, 0x33};
- uint8_t INS4[7] = {'a', 'a', 'a', 0x00, 0x11, 0x22, 0x33};
-
- uint8_t INS5[8] = {0x00, 0x11, 0x22, 0x33, 'b', 'b', 'b', 'b'};
- uint8_t INS6[8] = {0x00, 0x11, 0x22, 'b', 'b', 'b', 'b', 0x33};
- uint8_t INS7[8] = {0x00, 0x11, 'b', 'b', 'b', 'b', 0x22, 0x33};
- uint8_t INS8[8] = {0x00, 'b', 'b', 'b', 'b', 0x11, 0x22, 0x33};
- uint8_t INS9[8] = {'b', 'b', 'b', 'b', 0x00, 0x11, 0x22, 0x33};
-
- for (int i = 0; i < NumIter; i++) {
- uint8_t T[8] = {0x00, 0x11, 0x22, 0x33};
- size_t NewSize = (MD.*M)(T, 4, 8);
- if (NewSize == 7 && !memcmp(INS0, T, 7)) FoundMask |= 1 << 0;
- if (NewSize == 7 && !memcmp(INS1, T, 7)) FoundMask |= 1 << 1;
- if (NewSize == 7 && !memcmp(INS2, T, 7)) FoundMask |= 1 << 2;
- if (NewSize == 7 && !memcmp(INS3, T, 7)) FoundMask |= 1 << 3;
- if (NewSize == 7 && !memcmp(INS4, T, 7)) FoundMask |= 1 << 4;
-
- if (NewSize == 8 && !memcmp(INS5, T, 8)) FoundMask |= 1 << 5;
- if (NewSize == 8 && !memcmp(INS6, T, 8)) FoundMask |= 1 << 6;
- if (NewSize == 8 && !memcmp(INS7, T, 8)) FoundMask |= 1 << 7;
- if (NewSize == 8 && !memcmp(INS8, T, 8)) FoundMask |= 1 << 8;
- if (NewSize == 8 && !memcmp(INS9, T, 8)) FoundMask |= 1 << 9;
-
- }
- EXPECT_EQ(FoundMask, (1 << 10) - 1);
-}
-
-TEST(FuzzerMutate, InsertRepeatedBytes1) {
- TestInsertRepeatedBytes(&MutationDispatcher::Mutate_InsertRepeatedBytes, 10000);
-}
-TEST(FuzzerMutate, InsertRepeatedBytes2) {
- TestInsertRepeatedBytes(&MutationDispatcher::Mutate, 300000);
-}
-
void TestChangeByte(Mutator M, int NumIter) {
- std::unique_ptr<ExternalFunctions> t(new ExternalFunctions());
- fuzzer::EF = t.get();
- Random Rand(0);
- MutationDispatcher MD(Rand, {});
+ FuzzerRandomLibc Rand(0);
+ MutationDispatcher MD(Rand);
int FoundMask = 0;
uint8_t CH0[8] = {0xF0, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77};
uint8_t CH1[8] = {0x00, 0xF1, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77};
@@ -269,10 +181,8 @@ TEST(FuzzerMutate, ChangeByte2) {
}
void TestChangeBit(Mutator M, int NumIter) {
- std::unique_ptr<ExternalFunctions> t(new ExternalFunctions());
- fuzzer::EF = t.get();
- Random Rand(0);
- MutationDispatcher MD(Rand, {});
+ FuzzerRandomLibc Rand(0);
+ MutationDispatcher MD(Rand);
int FoundMask = 0;
uint8_t CH0[8] = {0x01, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77};
uint8_t CH1[8] = {0x00, 0x13, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77};
@@ -305,10 +215,8 @@ TEST(FuzzerMutate, ChangeBit2) {
}
void TestShuffleBytes(Mutator M, int NumIter) {
- std::unique_ptr<ExternalFunctions> t(new ExternalFunctions());
- fuzzer::EF = t.get();
- Random Rand(0);
- MutationDispatcher MD(Rand, {});
+ FuzzerRandomLibc Rand(0);
+ MutationDispatcher MD(Rand);
int FoundMask = 0;
uint8_t CH0[7] = {0x00, 0x22, 0x11, 0x33, 0x44, 0x55, 0x66};
uint8_t CH1[7] = {0x11, 0x00, 0x33, 0x22, 0x44, 0x55, 0x66};
@@ -328,69 +236,19 @@ void TestShuffleBytes(Mutator M, int NumIter) {
}
TEST(FuzzerMutate, ShuffleBytes1) {
- TestShuffleBytes(&MutationDispatcher::Mutate_ShuffleBytes, 1 << 16);
+ TestShuffleBytes(&MutationDispatcher::Mutate_ShuffleBytes, 1 << 15);
}
TEST(FuzzerMutate, ShuffleBytes2) {
- TestShuffleBytes(&MutationDispatcher::Mutate, 1 << 20);
-}
-
-void TestCopyPart(Mutator M, int NumIter) {
- std::unique_ptr<ExternalFunctions> t(new ExternalFunctions());
- fuzzer::EF = t.get();
- Random Rand(0);
- MutationDispatcher MD(Rand, {});
- int FoundMask = 0;
- uint8_t CH0[7] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x00, 0x11};
- uint8_t CH1[7] = {0x55, 0x66, 0x22, 0x33, 0x44, 0x55, 0x66};
- uint8_t CH2[7] = {0x00, 0x55, 0x66, 0x33, 0x44, 0x55, 0x66};
- uint8_t CH3[7] = {0x00, 0x11, 0x22, 0x00, 0x11, 0x22, 0x66};
- uint8_t CH4[7] = {0x00, 0x11, 0x11, 0x22, 0x33, 0x55, 0x66};
-
- for (int i = 0; i < NumIter; i++) {
- uint8_t T[7] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66};
- size_t NewSize = (MD.*M)(T, 7, 7);
- if (NewSize == 7 && !memcmp(CH0, T, 7)) FoundMask |= 1 << 0;
- if (NewSize == 7 && !memcmp(CH1, T, 7)) FoundMask |= 1 << 1;
- if (NewSize == 7 && !memcmp(CH2, T, 7)) FoundMask |= 1 << 2;
- if (NewSize == 7 && !memcmp(CH3, T, 7)) FoundMask |= 1 << 3;
- if (NewSize == 7 && !memcmp(CH4, T, 7)) FoundMask |= 1 << 4;
- }
-
- uint8_t CH5[8] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x00, 0x11, 0x22};
- uint8_t CH6[8] = {0x22, 0x33, 0x44, 0x00, 0x11, 0x22, 0x33, 0x44};
- uint8_t CH7[8] = {0x00, 0x11, 0x22, 0x00, 0x11, 0x22, 0x33, 0x44};
- uint8_t CH8[8] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x22, 0x33, 0x44};
- uint8_t CH9[8] = {0x00, 0x11, 0x22, 0x22, 0x33, 0x44, 0x33, 0x44};
-
- for (int i = 0; i < NumIter; i++) {
- uint8_t T[8] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77};
- size_t NewSize = (MD.*M)(T, 5, 8);
- if (NewSize == 8 && !memcmp(CH5, T, 8)) FoundMask |= 1 << 5;
- if (NewSize == 8 && !memcmp(CH6, T, 8)) FoundMask |= 1 << 6;
- if (NewSize == 8 && !memcmp(CH7, T, 8)) FoundMask |= 1 << 7;
- if (NewSize == 8 && !memcmp(CH8, T, 8)) FoundMask |= 1 << 8;
- if (NewSize == 8 && !memcmp(CH9, T, 8)) FoundMask |= 1 << 9;
- }
-
- EXPECT_EQ(FoundMask, 1023);
-}
-
-TEST(FuzzerMutate, CopyPart1) {
- TestCopyPart(&MutationDispatcher::Mutate_CopyPart, 1 << 10);
-}
-TEST(FuzzerMutate, CopyPart2) {
- TestCopyPart(&MutationDispatcher::Mutate, 1 << 13);
+ TestShuffleBytes(&MutationDispatcher::Mutate, 1 << 19);
}
void TestAddWordFromDictionary(Mutator M, int NumIter) {
- std::unique_ptr<ExternalFunctions> t(new ExternalFunctions());
- fuzzer::EF = t.get();
- Random Rand(0);
- MutationDispatcher MD(Rand, {});
+ FuzzerRandomLibc Rand(0);
+ MutationDispatcher MD(Rand);
uint8_t Word1[4] = {0xAA, 0xBB, 0xCC, 0xDD};
uint8_t Word2[3] = {0xFF, 0xEE, 0xEF};
- MD.AddWordToManualDictionary(Word(Word1, sizeof(Word1)));
- MD.AddWordToManualDictionary(Word(Word2, sizeof(Word2)));
+ MD.AddWordToManualDictionary(Unit(Word1, Word1 + sizeof(Word1)));
+ MD.AddWordToManualDictionary(Unit(Word2, Word2 + sizeof(Word2)));
int FoundMask = 0;
uint8_t CH0[7] = {0x00, 0x11, 0x22, 0xAA, 0xBB, 0xCC, 0xDD};
uint8_t CH1[7] = {0x00, 0x11, 0xAA, 0xBB, 0xCC, 0xDD, 0x22};
@@ -424,11 +282,36 @@ TEST(FuzzerMutate, AddWordFromDictionary2) {
TestAddWordFromDictionary(&MutationDispatcher::Mutate, 1 << 15);
}
+void TestAddWordFromDictionaryWithHint(Mutator M, int NumIter) {
+ FuzzerRandomLibc Rand(0);
+ MutationDispatcher MD(Rand);
+ uint8_t Word[] = {0xAA, 0xBB, 0xCC, 0xDD, 0xFF, 0xEE, 0xEF};
+ size_t PosHint = 7777;
+ MD.AddWordToAutoDictionary(Unit(Word, Word + sizeof(Word)), PosHint);
+ int FoundMask = 0;
+ for (int i = 0; i < NumIter; i++) {
+ uint8_t T[10000];
+ memset(T, 0, sizeof(T));
+ size_t NewSize = (MD.*M)(T, 9000, 10000);
+ if (NewSize >= PosHint + sizeof(Word) &&
+ !memcmp(Word, T + PosHint, sizeof(Word)))
+ FoundMask = 1;
+ }
+ EXPECT_EQ(FoundMask, 1);
+}
+
+TEST(FuzzerMutate, AddWordFromDictionaryWithHint1) {
+ TestAddWordFromDictionaryWithHint(
+ &MutationDispatcher::Mutate_AddWordFromAutoDictionary, 1 << 5);
+}
+
+TEST(FuzzerMutate, AddWordFromDictionaryWithHint2) {
+ TestAddWordFromDictionaryWithHint(&MutationDispatcher::Mutate, 1 << 10);
+}
+
void TestChangeASCIIInteger(Mutator M, int NumIter) {
- std::unique_ptr<ExternalFunctions> t(new ExternalFunctions());
- fuzzer::EF = t.get();
- Random Rand(0);
- MutationDispatcher MD(Rand, {});
+ FuzzerRandomLibc Rand(0);
+ MutationDispatcher MD(Rand);
uint8_t CH0[8] = {'1', '2', '3', '4', '5', '6', '7', '7'};
uint8_t CH1[8] = {'1', '2', '3', '4', '5', '6', '7', '9'};
@@ -456,46 +339,6 @@ TEST(FuzzerMutate, ChangeASCIIInteger2) {
TestChangeASCIIInteger(&MutationDispatcher::Mutate, 1 << 15);
}
-void TestChangeBinaryInteger(Mutator M, int NumIter) {
- std::unique_ptr<ExternalFunctions> t(new ExternalFunctions());
- fuzzer::EF = t.get();
- Random Rand(0);
- MutationDispatcher MD(Rand, {});
-
- uint8_t CH0[8] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x79};
- uint8_t CH1[8] = {0x00, 0x11, 0x22, 0x31, 0x44, 0x55, 0x66, 0x77};
- uint8_t CH2[8] = {0xff, 0x10, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77};
- uint8_t CH3[8] = {0x00, 0x11, 0x2a, 0x33, 0x44, 0x55, 0x66, 0x77};
- uint8_t CH4[8] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x4f, 0x66, 0x77};
- uint8_t CH5[8] = {0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88};
- uint8_t CH6[8] = {0x00, 0x11, 0x22, 0x00, 0x00, 0x00, 0x08, 0x77}; // Size
- uint8_t CH7[8] = {0x00, 0x08, 0x00, 0x33, 0x44, 0x55, 0x66, 0x77}; // Sw(Size)
-
- int FoundMask = 0;
- for (int i = 0; i < NumIter; i++) {
- uint8_t T[8] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77};
- size_t NewSize = (MD.*M)(T, 8, 8);
- /**/ if (NewSize == 8 && !memcmp(CH0, T, 8)) FoundMask |= 1 << 0;
- else if (NewSize == 8 && !memcmp(CH1, T, 8)) FoundMask |= 1 << 1;
- else if (NewSize == 8 && !memcmp(CH2, T, 8)) FoundMask |= 1 << 2;
- else if (NewSize == 8 && !memcmp(CH3, T, 8)) FoundMask |= 1 << 3;
- else if (NewSize == 8 && !memcmp(CH4, T, 8)) FoundMask |= 1 << 4;
- else if (NewSize == 8 && !memcmp(CH5, T, 8)) FoundMask |= 1 << 5;
- else if (NewSize == 8 && !memcmp(CH6, T, 8)) FoundMask |= 1 << 6;
- else if (NewSize == 8 && !memcmp(CH7, T, 8)) FoundMask |= 1 << 7;
- }
- EXPECT_EQ(FoundMask, 255);
-}
-
-TEST(FuzzerMutate, ChangeBinaryInteger1) {
- TestChangeBinaryInteger(&MutationDispatcher::Mutate_ChangeBinaryInteger,
- 1 << 12);
-}
-
-TEST(FuzzerMutate, ChangeBinaryInteger2) {
- TestChangeBinaryInteger(&MutationDispatcher::Mutate, 1 << 15);
-}
-
TEST(FuzzerDictionary, ParseOneDictionaryEntry) {
Unit U;
@@ -557,205 +400,3 @@ TEST(FuzzerUtil, Base64) {
EXPECT_EQ("YWJjeHk=", Base64({'a', 'b', 'c', 'x', 'y'}));
EXPECT_EQ("YWJjeHl6", Base64({'a', 'b', 'c', 'x', 'y', 'z'}));
}
-
-TEST(Corpus, Distribution) {
- Random Rand(0);
- std::unique_ptr<InputCorpus> C(new InputCorpus(""));
- size_t N = 10;
- size_t TriesPerUnit = 1<<16;
- for (size_t i = 0; i < N; i++)
- C->AddToCorpus(Unit{ static_cast<uint8_t>(i) }, 1, false, {});
-
- std::vector<size_t> Hist(N);
- for (size_t i = 0; i < N * TriesPerUnit; i++) {
- Hist[C->ChooseUnitIdxToMutate(Rand)]++;
- }
- for (size_t i = 0; i < N; i++) {
- // A weak sanity check that every unit gets invoked.
- EXPECT_GT(Hist[i], TriesPerUnit / N / 3);
- }
-}
-
-TEST(Merge, Bad) {
- const char *kInvalidInputs[] = {
- "",
- "x",
- "3\nx",
- "2\n3",
- "2\n2",
- "2\n2\nA\n",
- "2\n2\nA\nB\nC\n",
- "0\n0\n",
- "1\n1\nA\nDONE 0",
- "1\n1\nA\nSTARTED 1",
- };
- Merger M;
- for (auto S : kInvalidInputs) {
- // fprintf(stderr, "TESTING:\n%s\n", S);
- EXPECT_FALSE(M.Parse(S, false));
- }
-}
-
-void EQ(const std::vector<uint32_t> &A, const std::vector<uint32_t> &B) {
- EXPECT_EQ(A, B);
-}
-
-void EQ(const std::vector<std::string> &A, const std::vector<std::string> &B) {
- std::set<std::string> a(A.begin(), A.end());
- std::set<std::string> b(B.begin(), B.end());
- EXPECT_EQ(a, b);
-}
-
-static void Merge(const std::string &Input,
- const std::vector<std::string> Result,
- size_t NumNewFeatures) {
- Merger M;
- std::vector<std::string> NewFiles;
- EXPECT_TRUE(M.Parse(Input, true));
- std::stringstream SS;
- M.PrintSummary(SS);
- EXPECT_EQ(NumNewFeatures, M.Merge(&NewFiles));
- EXPECT_EQ(M.AllFeatures(), M.ParseSummary(SS));
- EQ(NewFiles, Result);
-}
-
-TEST(Merge, Good) {
- Merger M;
-
- EXPECT_TRUE(M.Parse("1\n0\nAA\n", false));
- EXPECT_EQ(M.Files.size(), 1U);
- EXPECT_EQ(M.NumFilesInFirstCorpus, 0U);
- EXPECT_EQ(M.Files[0].Name, "AA");
- EXPECT_TRUE(M.LastFailure.empty());
- EXPECT_EQ(M.FirstNotProcessedFile, 0U);
-
- EXPECT_TRUE(M.Parse("2\n1\nAA\nBB\nSTARTED 0 42\n", false));
- EXPECT_EQ(M.Files.size(), 2U);
- EXPECT_EQ(M.NumFilesInFirstCorpus, 1U);
- EXPECT_EQ(M.Files[0].Name, "AA");
- EXPECT_EQ(M.Files[1].Name, "BB");
- EXPECT_EQ(M.LastFailure, "AA");
- EXPECT_EQ(M.FirstNotProcessedFile, 1U);
-
- EXPECT_TRUE(M.Parse("3\n1\nAA\nBB\nC\n"
- "STARTED 0 1000\n"
- "DONE 0 1 2 3\n"
- "STARTED 1 1001\n"
- "DONE 1 4 5 6 \n"
- "STARTED 2 1002\n"
- "", true));
- EXPECT_EQ(M.Files.size(), 3U);
- EXPECT_EQ(M.NumFilesInFirstCorpus, 1U);
- EXPECT_EQ(M.Files[0].Name, "AA");
- EXPECT_EQ(M.Files[0].Size, 1000U);
- EXPECT_EQ(M.Files[1].Name, "BB");
- EXPECT_EQ(M.Files[1].Size, 1001U);
- EXPECT_EQ(M.Files[2].Name, "C");
- EXPECT_EQ(M.Files[2].Size, 1002U);
- EXPECT_EQ(M.LastFailure, "C");
- EXPECT_EQ(M.FirstNotProcessedFile, 3U);
- EQ(M.Files[0].Features, {1, 2, 3});
- EQ(M.Files[1].Features, {4, 5, 6});
-
-
- std::vector<std::string> NewFiles;
-
- EXPECT_TRUE(M.Parse("3\n2\nAA\nBB\nC\n"
- "STARTED 0 1000\nDONE 0 1 2 3\n"
- "STARTED 1 1001\nDONE 1 4 5 6 \n"
- "STARTED 2 1002\nDONE 2 6 1 3 \n"
- "", true));
- EXPECT_EQ(M.Files.size(), 3U);
- EXPECT_EQ(M.NumFilesInFirstCorpus, 2U);
- EXPECT_TRUE(M.LastFailure.empty());
- EXPECT_EQ(M.FirstNotProcessedFile, 3U);
- EQ(M.Files[0].Features, {1, 2, 3});
- EQ(M.Files[1].Features, {4, 5, 6});
- EQ(M.Files[2].Features, {1, 3, 6});
- EXPECT_EQ(0U, M.Merge(&NewFiles));
- EQ(NewFiles, {});
-
- EXPECT_TRUE(M.Parse("3\n1\nA\nB\nC\n"
- "STARTED 0 1000\nDONE 0 1 2 3\n"
- "STARTED 1 1001\nDONE 1 4 5 6 \n"
- "STARTED 2 1002\nDONE 2 6 1 3\n"
- "", true));
- EQ(M.Files[0].Features, {1, 2, 3});
- EQ(M.Files[1].Features, {4, 5, 6});
- EQ(M.Files[2].Features, {1, 3, 6});
- EXPECT_EQ(3U, M.Merge(&NewFiles));
- EQ(NewFiles, {"B"});
-
- // Same as the above, but with InitialFeatures.
- EXPECT_TRUE(M.Parse("2\n0\nB\nC\n"
- "STARTED 0 1001\nDONE 0 4 5 6 \n"
- "STARTED 1 1002\nDONE 1 6 1 3\n"
- "", true));
- EQ(M.Files[0].Features, {4, 5, 6});
- EQ(M.Files[1].Features, {1, 3, 6});
- EXPECT_EQ(3U, M.Merge({1, 2, 3}, &NewFiles));
- EQ(NewFiles, {"B"});
-}
-
-TEST(Merge, Merge) {
-
- Merge("3\n1\nA\nB\nC\n"
- "STARTED 0 1000\nDONE 0 1 2 3\n"
- "STARTED 1 1001\nDONE 1 4 5 6 \n"
- "STARTED 2 1002\nDONE 2 6 1 3 \n",
- {"B"}, 3);
-
- Merge("3\n0\nA\nB\nC\n"
- "STARTED 0 2000\nDONE 0 1 2 3\n"
- "STARTED 1 1001\nDONE 1 4 5 6 \n"
- "STARTED 2 1002\nDONE 2 6 1 3 \n",
- {"A", "B", "C"}, 6);
-
- Merge("4\n0\nA\nB\nC\nD\n"
- "STARTED 0 2000\nDONE 0 1 2 3\n"
- "STARTED 1 1101\nDONE 1 4 5 6 \n"
- "STARTED 2 1102\nDONE 2 6 1 3 100 \n"
- "STARTED 3 1000\nDONE 3 1 \n",
- {"A", "B", "C", "D"}, 7);
-
- Merge("4\n1\nA\nB\nC\nD\n"
- "STARTED 0 2000\nDONE 0 4 5 6 7 8\n"
- "STARTED 1 1100\nDONE 1 1 2 3 \n"
- "STARTED 2 1100\nDONE 2 2 3 \n"
- "STARTED 3 1000\nDONE 3 1 \n",
- {"B", "D"}, 3);
-}
-
-TEST(Fuzzer, ForEachNonZeroByte) {
- const size_t N = 64;
- alignas(64) uint8_t Ar[N + 8] = {
- 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 2, 0, 0, 0, 0, 0, 0,
- 0, 0, 3, 0, 4, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 5, 0, 6, 0, 0,
- 0, 0, 0, 0, 0, 0, 7, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 8,
- 9, 9, 9, 9, 9, 9, 9, 9,
- };
- typedef std::vector<std::pair<size_t, uint8_t> > Vec;
- Vec Res, Expected;
- auto CB = [&](size_t Idx, uint8_t V) { Res.push_back({Idx, V}); };
- ForEachNonZeroByte(Ar, Ar + N, 100, CB);
- Expected = {{108, 1}, {109, 2}, {118, 3}, {120, 4},
- {135, 5}, {137, 6}, {146, 7}, {163, 8}};
- EXPECT_EQ(Res, Expected);
-
- Res.clear();
- ForEachNonZeroByte(Ar + 9, Ar + N, 109, CB);
- Expected = { {109, 2}, {118, 3}, {120, 4},
- {135, 5}, {137, 6}, {146, 7}, {163, 8}};
- EXPECT_EQ(Res, Expected);
-
- Res.clear();
- ForEachNonZeroByte(Ar + 9, Ar + N - 9, 109, CB);
- Expected = { {109, 2}, {118, 3}, {120, 4},
- {135, 5}, {137, 6}, {146, 7}};
- EXPECT_EQ(Res, Expected);
-}
diff --git a/gnu/llvm/lib/Fuzzer/test/MemcmpTest.cpp b/gnu/llvm/lib/Fuzzer/test/MemcmpTest.cpp
index 8dbb7d84fbb..c19c95717bb 100644
--- a/gnu/llvm/lib/Fuzzer/test/MemcmpTest.cpp
+++ b/gnu/llvm/lib/Fuzzer/test/MemcmpTest.cpp
@@ -1,18 +1,15 @@
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-
// Simple test for a fuzzer. The fuzzer must find a particular string.
+#include <cstring>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
-#include <cstring>
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
// TODO: check other sizes.
if (Size >= 8 && memcmp(Data, "01234567", 8) == 0) {
if (Size >= 12 && memcmp(Data + 8, "ABCD", 4) == 0) {
if (Size >= 14 && memcmp(Data + 12, "XY", 2) == 0) {
- if (Size >= 17 && memcmp(Data + 14, "KLM", 3) == 0) {
+ if (Size >= 16 && memcmp(Data + 14, "KLM", 3) == 0) {
if (Size >= 27 && memcmp(Data + 17, "ABCDE-GHIJ", 10) == 0){
fprintf(stderr, "BINGO %zd\n", Size);
for (size_t i = 0; i < Size; i++) {
diff --git a/gnu/llvm/lib/Fuzzer/test/NullDerefTest.cpp b/gnu/llvm/lib/Fuzzer/test/NullDerefTest.cpp
index 1b44b682ace..200c56ccbbc 100644
--- a/gnu/llvm/lib/Fuzzer/test/NullDerefTest.cpp
+++ b/gnu/llvm/lib/Fuzzer/test/NullDerefTest.cpp
@@ -1,10 +1,7 @@
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-
// Simple test for a fuzzer. The fuzzer must find the string "Hi!".
-#include <cstddef>
#include <cstdint>
#include <cstdlib>
+#include <cstddef>
#include <iostream>
static volatile int Sink;
diff --git a/gnu/llvm/lib/Fuzzer/test/SimpleCmpTest.cpp b/gnu/llvm/lib/Fuzzer/test/SimpleCmpTest.cpp
index 8acad4ac77e..8568c737efb 100644
--- a/gnu/llvm/lib/Fuzzer/test/SimpleCmpTest.cpp
+++ b/gnu/llvm/lib/Fuzzer/test/SimpleCmpTest.cpp
@@ -1,47 +1,31 @@
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-
// Simple test for a fuzzer. The fuzzer must find several narrow ranges.
#include <cstdint>
-#include <cstdio>
#include <cstdlib>
#include <cstring>
-
-extern int AllLines[];
-
-bool PrintOnce(int Line) {
- if (!AllLines[Line])
- fprintf(stderr, "Seen line %d\n", Line);
- AllLines[Line] = 1;
- return true;
-}
+#include <cstdio>
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
- if (Size != 22) return 0;
+ if (Size < 14) return 0;
uint64_t x = 0;
int64_t y = 0;
- int32_t z = 0;
- uint16_t a = 0;
- memcpy(&x, Data, 8); // 8
- memcpy(&y, Data + 8, 8); // 16
- memcpy(&z, Data + 16, sizeof(z)); // 20
- memcpy(&a, Data + 20, sizeof(a)); // 22
- const bool k32bit = sizeof(void*) == 4;
+ int z = 0;
+ unsigned short a = 0;
+ memcpy(&x, Data, 8);
+ memcpy(&y, Data + Size - 8, 8);
+ memcpy(&z, Data + Size / 2, sizeof(z));
+ memcpy(&a, Data + Size / 2 + 4, sizeof(a));
- if ((k32bit || x > 1234567890) && PrintOnce(__LINE__) &&
- (k32bit || x < 1234567895) && PrintOnce(__LINE__) &&
- a == 0x4242 && PrintOnce(__LINE__) &&
- (k32bit || y >= 987654321) && PrintOnce(__LINE__) &&
- (k32bit || y <= 987654325) && PrintOnce(__LINE__) &&
- z < -10000 && PrintOnce(__LINE__) &&
- z >= -10005 && PrintOnce(__LINE__) &&
- z != -10003 && PrintOnce(__LINE__) &&
- true) {
+ if (x > 1234567890 &&
+ x < 1234567895 &&
+ y >= 987654321 &&
+ y <= 987654325 &&
+ z < -10000 &&
+ z >= -10005 &&
+ z != -10003 &&
+ a == 4242) {
fprintf(stderr, "BINGO; Found the target: size %zd (%zd, %zd, %d, %d), exiting.\n",
Size, x, y, z, a);
exit(1);
}
return 0;
}
-
-int AllLines[__LINE__ + 1]; // Must be the last line.
diff --git a/gnu/llvm/lib/Fuzzer/test/SimpleDictionaryTest.cpp b/gnu/llvm/lib/Fuzzer/test/SimpleDictionaryTest.cpp
index a1cd2004722..b9cb2f0270a 100644
--- a/gnu/llvm/lib/Fuzzer/test/SimpleDictionaryTest.cpp
+++ b/gnu/llvm/lib/Fuzzer/test/SimpleDictionaryTest.cpp
@@ -1,13 +1,10 @@
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-
// Simple test for a fuzzer.
// The fuzzer must find a string based on dictionary words:
// "Elvis"
// "Presley"
-#include <cstddef>
#include <cstdint>
#include <cstdlib>
+#include <cstddef>
#include <cstring>
#include <iostream>
diff --git a/gnu/llvm/lib/Fuzzer/test/SimpleHashTest.cpp b/gnu/llvm/lib/Fuzzer/test/SimpleHashTest.cpp
index a3f4211ebee..5bab3fa7f64 100644
--- a/gnu/llvm/lib/Fuzzer/test/SimpleHashTest.cpp
+++ b/gnu/llvm/lib/Fuzzer/test/SimpleHashTest.cpp
@@ -1,13 +1,10 @@
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-
// This test computes a checksum of the data (all but the last 4 bytes),
// and then compares the last 4 bytes with the computed value.
// A fuzzer with cmp traces is expected to defeat this check.
#include <cstdint>
-#include <cstdio>
#include <cstdlib>
#include <cstring>
+#include <cstdio>
// A modified jenkins_one_at_a_time_hash initialized by non-zero,
// so that simple_hash(0) != 0. See also
@@ -26,7 +23,7 @@ static uint32_t simple_hash(const uint8_t *Data, size_t Size) {
}
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
- if (Size < 14 || Size > 64)
+ if (Size < 14)
return 0;
uint32_t Hash = simple_hash(&Data[0], Size - 4);
diff --git a/gnu/llvm/lib/Fuzzer/test/SimpleTest.cpp b/gnu/llvm/lib/Fuzzer/test/SimpleTest.cpp
index a8b4988dff1..04225a889f5 100644
--- a/gnu/llvm/lib/Fuzzer/test/SimpleTest.cpp
+++ b/gnu/llvm/lib/Fuzzer/test/SimpleTest.cpp
@@ -1,11 +1,8 @@
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-
// Simple test for a fuzzer. The fuzzer must find the string "Hi!".
#include <assert.h>
-#include <cstddef>
#include <cstdint>
#include <cstdlib>
+#include <cstddef>
#include <iostream>
static volatile int Sink;
diff --git a/gnu/llvm/lib/Fuzzer/test/StrcmpTest.cpp b/gnu/llvm/lib/Fuzzer/test/StrcmpTest.cpp
index e7636e8812f..835819ae2f4 100644
--- a/gnu/llvm/lib/Fuzzer/test/StrcmpTest.cpp
+++ b/gnu/llvm/lib/Fuzzer/test/StrcmpTest.cpp
@@ -1,12 +1,9 @@
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-
// Break through a series of strcmp.
-#include <cassert>
+#include <cstring>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
-#include <cstring>
+#include <cassert>
bool Eq(const uint8_t *Data, size_t Size, const char *Str) {
char Buff[1024];
@@ -20,9 +17,9 @@ bool Eq(const uint8_t *Data, size_t Size, const char *Str) {
}
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
- if (Eq(Data, Size, "ABC") &&
- Size >= 3 && Eq(Data + 3, Size - 3, "QWER") &&
- Size >= 7 && Eq(Data + 7, Size - 7, "ZXCVN") &&
+ if (Eq(Data, Size, "AAA") &&
+ Size >= 3 && Eq(Data + 3, Size - 3, "BBBB") &&
+ Size >= 7 && Eq(Data + 7, Size - 7, "CCCCCC") &&
Size >= 14 && Data[13] == 42
) {
fprintf(stderr, "BINGO\n");
diff --git a/gnu/llvm/lib/Fuzzer/test/StrncmpTest.cpp b/gnu/llvm/lib/Fuzzer/test/StrncmpTest.cpp
index f71f01ee309..55344d75e0b 100644
--- a/gnu/llvm/lib/Fuzzer/test/StrncmpTest.cpp
+++ b/gnu/llvm/lib/Fuzzer/test/StrncmpTest.cpp
@@ -1,11 +1,8 @@
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-
// Simple test for a fuzzer. The fuzzer must find a particular string.
+#include <cstring>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
-#include <cstring>
static volatile int sink;
@@ -17,7 +14,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
if (Size >= 8 && strncmp(S, "01234567", 8) == 0) {
if (Size >= 12 && strncmp(S + 8, "ABCD", 4) == 0) {
if (Size >= 14 && strncmp(S + 12, "XY", 2) == 0) {
- if (Size >= 17 && strncmp(S + 14, "KLM", 3) == 0) {
+ if (Size >= 16 && strncmp(S + 14, "KLM", 3) == 0) {
fprintf(stderr, "BINGO\n");
exit(1);
}
diff --git a/gnu/llvm/lib/Fuzzer/test/SwitchTest.cpp b/gnu/llvm/lib/Fuzzer/test/SwitchTest.cpp
index 86944cad21c..5de7fff7452 100644
--- a/gnu/llvm/lib/Fuzzer/test/SwitchTest.cpp
+++ b/gnu/llvm/lib/Fuzzer/test/SwitchTest.cpp
@@ -1,12 +1,9 @@
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-
// Simple test for a fuzzer. The fuzzer must find the interesting switch value.
-#include <cstddef>
#include <cstdint>
-#include <cstdio>
#include <cstdlib>
+#include <cstdio>
#include <cstring>
+#include <cstddef>
static volatile int Sink;
diff --git a/gnu/llvm/lib/Fuzzer/test/ThreadedTest.cpp b/gnu/llvm/lib/Fuzzer/test/ThreadedTest.cpp
index bb51ba764eb..7aa114a41f3 100644
--- a/gnu/llvm/lib/Fuzzer/test/ThreadedTest.cpp
+++ b/gnu/llvm/lib/Fuzzer/test/ThreadedTest.cpp
@@ -1,10 +1,7 @@
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-
// Threaded test for a fuzzer. The fuzzer should not crash.
#include <assert.h>
-#include <cstddef>
#include <cstdint>
+#include <cstddef>
#include <cstring>
#include <thread>
diff --git a/gnu/llvm/lib/Fuzzer/test/TimeoutTest.cpp b/gnu/llvm/lib/Fuzzer/test/TimeoutTest.cpp
index e3cdba3eec3..71790ded95a 100644
--- a/gnu/llvm/lib/Fuzzer/test/TimeoutTest.cpp
+++ b/gnu/llvm/lib/Fuzzer/test/TimeoutTest.cpp
@@ -1,10 +1,7 @@
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-
// Simple test for a fuzzer. The fuzzer must find the string "Hi!".
-#include <cstddef>
#include <cstdint>
#include <cstdlib>
+#include <cstddef>
#include <iostream>
static volatile int Sink;
diff --git a/gnu/llvm/lib/Fuzzer/test/fuzzer-timeout.test b/gnu/llvm/lib/Fuzzer/test/fuzzer-timeout.test
index beb08671183..c3a9e8a3a9e 100644
--- a/gnu/llvm/lib/Fuzzer/test/fuzzer-timeout.test
+++ b/gnu/llvm/lib/Fuzzer/test/fuzzer-timeout.test
@@ -7,13 +7,7 @@ TimeoutTest: #1
TimeoutTest: #2
TimeoutTest: SUMMARY: libFuzzer: timeout
-RUN: not LLVMFuzzer-TimeoutTest -timeout=1 %S/hi.txt 2>&1 | FileCheck %s --check-prefix=SingleInputTimeoutTest
-SingleInputTimeoutTest: ALARM: working on the last Unit for {{[1-3]}} seconds
+RUN: not LLVMFuzzer-TimeoutTest -timeout=1 -test_single_input=%S/hi.txt 2>&1 | FileCheck %s --check-prefix=SingleInputTimeoutTest
+SingleInputTimeoutTest: ALARM: working on the last Unit for
SingleInputTimeoutTest-NOT: Test unit written to ./timeout-
-RUN: LLVMFuzzer-TimeoutTest -timeout=1 -timeout_exitcode=0
-
-RUN: not LLVMFuzzer-TimeoutEmptyTest -timeout=1 2>&1 | FileCheck %s --check-prefix=TimeoutEmptyTest
-TimeoutEmptyTest: ALARM: working on the last Unit for
-TimeoutEmptyTest: == ERROR: libFuzzer: timeout after
-TimeoutEmptyTest: SUMMARY: libFuzzer: timeout
diff --git a/gnu/llvm/lib/Fuzzer/test/fuzzer.test b/gnu/llvm/lib/Fuzzer/test/fuzzer.test
index ff46d32b387..c63014f59d6 100644
--- a/gnu/llvm/lib/Fuzzer/test/fuzzer.test
+++ b/gnu/llvm/lib/Fuzzer/test/fuzzer.test
@@ -2,59 +2,35 @@ CHECK: BINGO
Done1000000: Done 1000000 runs in
RUN: LLVMFuzzer-SimpleTest 2>&1 | FileCheck %s
+RUN: not LLVMFuzzer-NullDerefTest -test_single_input=%S/hi.txt 2>&1 | FileCheck %s --check-prefix=SingleInput
+SingleInput-NOT: Test unit written to ./crash-
-# only_ascii mode. Will perform some minimal self-validation.
-RUN: LLVMFuzzer-SimpleTest -only_ascii=1 2>&1
-
-RUN: LLVMFuzzer-SimpleCmpTest -max_total_time=1 -use_cmp=0 2>&1 | FileCheck %s --check-prefix=MaxTotalTime
+RUN: LLVMFuzzer-SimpleCmpTest -max_total_time=1 2>&1 | FileCheck %s --check-prefix=MaxTotalTime
MaxTotalTime: Done {{.*}} runs in {{.}} second(s)
-RUN: not LLVMFuzzer-NullDerefTest 2>&1 | FileCheck %s --check-prefix=NullDerefTest
-RUN: not LLVMFuzzer-NullDerefTest -close_fd_mask=3 2>&1 | FileCheck %s --check-prefix=NullDerefTest
-NullDerefTest: ERROR: AddressSanitizer: {{SEGV|access-violation}} on unknown address
+RUN: not LLVMFuzzer-NullDerefTest 2>&1 | FileCheck %s --check-prefix=NullDerefTest
NullDerefTest: Test unit written to ./crash-
RUN: not LLVMFuzzer-NullDerefTest -artifact_prefix=ZZZ 2>&1 | FileCheck %s --check-prefix=NullDerefTestPrefix
NullDerefTestPrefix: Test unit written to ZZZcrash-
RUN: not LLVMFuzzer-NullDerefTest -artifact_prefix=ZZZ -exact_artifact_path=FOOBAR 2>&1 | FileCheck %s --check-prefix=NullDerefTestExactPath
NullDerefTestExactPath: Test unit written to FOOBAR
-RUN: not LLVMFuzzer-NullDerefOnEmptyTest -print_final_stats=1 2>&1 | FileCheck %s --check-prefix=NULL_DEREF_ON_EMPTY
-NULL_DEREF_ON_EMPTY: stat::number_of_executed_units:
-
#not LLVMFuzzer-FullCoverageSetTest -timeout=15 -seed=1 -mutate_depth=2 -use_full_coverage_set=1 2>&1 | FileCheck %s
-RUN: not LLVMFuzzer-CounterTest -max_len=6 -seed=1 -timeout=15 2>&1 | FileCheck %s --check-prefix=COUNTERS
+RUN: not LLVMFuzzer-CounterTest -use_counters=1 -max_len=6 -seed=1 -timeout=15 2>&1 | FileCheck %s
-COUNTERS: INITED {{.*}} {{bits:|ft:}}
-COUNTERS: NEW {{.*}} {{bits:|ft:}} {{[1-9]*}}
-COUNTERS: NEW {{.*}} {{bits:|ft:}} {{[1-9]*}}
-COUNTERS: BINGO
-
-# Don't run UninstrumentedTest for now since we build libFuzzer itself with asan.
-DISABLED: not LLVMFuzzer-UninstrumentedTest-Uninstrumented 2>&1 | FileCheck %s --check-prefix=UNINSTRUMENTED
-UNINSTRUMENTED: ERROR: __sanitizer_set_death_callback is not defined. Exiting.
+RUN: not LLVMFuzzer-CallerCalleeTest -cross_over=0 -max_len=6 -seed=1 -timeout=15 2>&1 | FileCheck %s
+# This one is flaky, may actually find the goal even w/o use_indir_calls.
+# LLVMFuzzer-CallerCalleeTest -use_indir_calls=0 -cross_over=0 -max_len=6 -seed=1 -runs=1000000 2>&1 | FileCheck %s --check-prefix=Done1000000
-RUN: not LLVMFuzzer-NotinstrumentedTest-NoCoverage 2>&1 | FileCheck %s --check-prefix=NO_COVERAGE
-NO_COVERAGE: ERROR: no interesting inputs were found. Is the code instrumented for coverage? Exiting
-RUN: not LLVMFuzzer-BufferOverflowOnInput 2>&1 | FileCheck %s --check-prefix=OOB
-OOB: AddressSanitizer: heap-buffer-overflow
-OOB: is located 0 bytes to the right of 3-byte region
+RUN: not LLVMFuzzer-UserSuppliedFuzzerTest -seed=1 -timeout=15 2>&1 | FileCheck %s
-RUN: not LLVMFuzzer-InitializeTest -use_value_profile=1 2>&1 | FileCheck %s
-
-RUN: not LLVMFuzzer-DSOTest 2>&1 | FileCheck %s --check-prefix=DSO
-DSO: INFO: Loaded 3 modules
-DSO: BINGO
-
-RUN: LLVMFuzzer-SimpleTest -exit_on_src_pos=SimpleTest.cpp:17 2>&1 | FileCheck %s --check-prefix=EXIT_ON_SRC_POS
-RUN: LLVMFuzzer-ShrinkControlFlowTest -exit_on_src_pos=ShrinkControlFlowTest.cpp:23 2>&1 | FileCheck %s --check-prefix=EXIT_ON_SRC_POS
-EXIT_ON_SRC_POS: INFO: found line matching '{{.*}}', exiting.
+RUN: not LLVMFuzzer-UninstrumentedTest-Uninstrumented 2>&1 | FileCheck %s --check-prefix=UNINSTRUMENTED
+UNINSTRUMENTED: ERROR: __sanitizer_set_death_callback is not defined. Exiting.
-RUN: env ASAN_OPTIONS=strict_string_checks=1 not LLVMFuzzer-StrncmpOOBTest -seed=1 -runs=1000000 2>&1 | FileCheck %s --check-prefix=STRNCMP
-STRNCMP: AddressSanitizer: heap-buffer-overflow
-STRNCMP-NOT: __sanitizer_weak_hook_strncmp
-STRNCMP: in LLVMFuzzerTestOneInput
+RUN: LLVMFuzzer-SimpleTest -print_new_cov_pcs=1 2>&1 | FileCheck %s --check-prefix=PCS
+PCS:{{^0x[a-f0-9]+}}
+PCS:NEW
+PCS:BINGO
-RUN: not LLVMFuzzer-BogusInitializeTest 2>&1 | FileCheck %s --check-prefix=BOGUS_INITIALIZE
-BOGUS_INITIALIZE: argv[0] has been modified in LLVMFuzzerInitialize
diff --git a/gnu/llvm/lib/Fuzzer/test/lit.cfg b/gnu/llvm/lib/Fuzzer/test/lit.cfg
index 85c95b42d1e..2140a97668b 100644
--- a/gnu/llvm/lib/Fuzzer/test/lit.cfg
+++ b/gnu/llvm/lib/Fuzzer/test/lit.cfg
@@ -1,28 +1,10 @@
import lit.formats
-import sys
config.name = "LLVMFuzzer"
config.test_format = lit.formats.ShTest(True)
config.suffixes = ['.test']
config.test_source_root = os.path.dirname(__file__)
-# Choose between lit's internal shell pipeline runner and a real shell. If
-# LIT_USE_INTERNAL_SHELL is in the environment, we use that as an override.
-use_lit_shell = os.environ.get("LIT_USE_INTERNAL_SHELL")
-if use_lit_shell:
- # 0 is external, "" is default, and everything else is internal.
- execute_external = (use_lit_shell == "0")
-else:
- # Otherwise we default to internal on Windows and external elsewhere, as
- # bash on Windows is usually very slow.
- execute_external = (not sys.platform in ['win32'])
-
-# testFormat: The test format to use to interpret tests.
-#
-# For now we require '&&' between commands, until they get globally killed and
-# the test runner updated.
-config.test_format = lit.formats.ShTest(execute_external)
-
# Tweak PATH to include llvm tools dir and current exec dir.
llvm_tools_dir = getattr(config, 'llvm_tools_dir', None)
if (not llvm_tools_dir) or (not os.path.exists(llvm_tools_dir)):
@@ -31,25 +13,3 @@ path = os.path.pathsep.join((llvm_tools_dir, config.test_exec_root,
config.environment['PATH']))
config.environment['PATH'] = path
-if config.has_lsan:
- lit_config.note('lsan feature available')
- config.available_features.add('lsan')
-else:
- lit_config.note('lsan feature unavailable')
-
-if sys.platform.startswith('win') or sys.platform.startswith('cygwin'):
- config.available_features.add('windows')
-
-if sys.platform.startswith('darwin'):
- config.available_features.add('darwin')
-
-if config.is_posix:
- config.available_features.add('posix')
-
-if sys.platform.startswith('linux'):
- # Note the value of ``sys.platform`` is not consistent
- # between python 2 and 3, hence the use of ``.startswith()``.
- lit_config.note('linux feature available')
- config.available_features.add('linux')
-else:
- lit_config.note('linux feature unavailable')
diff --git a/gnu/llvm/lib/Fuzzer/test/lit.site.cfg.in b/gnu/llvm/lib/Fuzzer/test/lit.site.cfg.in
index 069f2b72c0d..e520db8e881 100644
--- a/gnu/llvm/lib/Fuzzer/test/lit.site.cfg.in
+++ b/gnu/llvm/lib/Fuzzer/test/lit.site.cfg.in
@@ -1,5 +1,3 @@
config.test_exec_root = "@CMAKE_CURRENT_BINARY_DIR@"
config.llvm_tools_dir = "@LLVM_TOOLS_DIR@"
-config.has_lsan = True if @HAS_LSAN@ == 1 else False
-config.is_posix = @LIBFUZZER_POSIX@
lit_config.load_config(config, "@CMAKE_CURRENT_SOURCE_DIR@/lit.cfg")
diff --git a/gnu/llvm/lib/Fuzzer/test/merge.test b/gnu/llvm/lib/Fuzzer/test/merge.test
index e59da8c3e09..57ecc141bbf 100644
--- a/gnu/llvm/lib/Fuzzer/test/merge.test
+++ b/gnu/llvm/lib/Fuzzer/test/merge.test
@@ -1,16 +1,15 @@
CHECK: BINGO
-RUN: rm -rf %tmp/T0 %tmp/T1 %tmp/T2
-RUN: mkdir -p %tmp/T0 %tmp/T1 %tmp/T2
-RUN: echo F..... > %tmp/T0/1
-RUN: echo .U.... > %tmp/T0/2
-RUN: echo ..Z... > %tmp/T0/3
+RUN: rm -rf %tmp/T1 %tmp/T2
+RUN: mkdir -p %tmp/T1 %tmp/T2
+RUN: echo F..... > %tmp/T1/1
+RUN: echo .U.... > %tmp/T1/2
+RUN: echo ..Z... > %tmp/T1/3
# T1 has 3 elements, T2 is empty.
-RUN: cp %tmp/T0/* %tmp/T1/
-RUN: LLVMFuzzer-FullCoverageSetTest -merge=1 %tmp/T1 %tmp/T2 2>&1 | FileCheck %s --check-prefix=CHECK1
-CHECK1: MERGE-OUTER: 3 files, 3 in the initial corpus
-CHECK1: MERGE-OUTER: 0 new files with 0 new features added
+RUN: LLVMFuzzer-FullCoverageSetTest -merge=1 %tmp/T1 %tmp/T2 2>&1 | FileCheck %s --check-prefix=CHECK1
+CHECK1: Merge: running the initial corpus {{.*}} of 3 units
+CHECK1: Merge: written 0 out of 0 units
RUN: echo ...Z.. > %tmp/T2/1
RUN: echo ....E. > %tmp/T2/2
@@ -20,34 +19,11 @@ RUN: echo .U.... > %tmp/T2/b
RUN: echo ..Z... > %tmp/T2/c
# T1 has 3 elements, T2 has 6 elements, only 3 are new.
-RUN: LLVMFuzzer-FullCoverageSetTest -merge=1 %tmp/T1 %tmp/T2 2>&1 | FileCheck %s --check-prefix=CHECK2
-CHECK2: MERGE-OUTER: 9 files, 3 in the initial corpus
-CHECK2: MERGE-OUTER: 3 new files with 3 new features added
+RUN: LLVMFuzzer-FullCoverageSetTest -merge=1 %tmp/T1 %tmp/T2 2>&1 | FileCheck %s --check-prefix=CHECK2
+CHECK2: Merge: running the initial corpus {{.*}} of 3 units
+CHECK2: Merge: written 3 out of 6 units
# Now, T1 has 6 units and T2 has no new interesting units.
-RUN: LLVMFuzzer-FullCoverageSetTest -merge=1 %tmp/T1 %tmp/T2 2>&1 | FileCheck %s --check-prefix=CHECK3
-CHECK3: MERGE-OUTER: 12 files, 6 in the initial corpus
-CHECK3: MERGE-OUTER: 0 new files with 0 new features added
-
-# Check that we respect max_len during the merge and don't crash.
-RUN: rm %tmp/T1/*
-RUN: cp %tmp/T0/* %tmp/T1/
-RUN: echo looooooooong > %tmp/T2/looooooooong
-RUN: LLVMFuzzer-FullCoverageSetTest -merge=1 %tmp/T1 %tmp/T2 -max_len=6 2>&1 | FileCheck %s --check-prefix=MAX_LEN
-MAX_LEN: MERGE-OUTER: 3 new files
-
-# Check that merge tolerates failures.
-RUN: rm %tmp/T1/*
-RUN: cp %tmp/T0/* %tmp/T1/
-RUN: echo 'FUZZER' > %tmp/T2/FUZZER
-RUN: LLVMFuzzer-FullCoverageSetTest -merge=1 %tmp/T1 %tmp/T2 2>&1 | FileCheck %s --check-prefix=MERGE_WITH_CRASH
-MERGE_WITH_CRASH: MERGE-OUTER: succesfull in 2 attempt(s)
-MERGE_WITH_CRASH: MERGE-OUTER: 3 new files
-
-# Check that we actually limit the size with max_len
-RUN: LLVMFuzzer-FullCoverageSetTest -merge=1 %tmp/T1 %tmp/T2 -max_len=5 2>&1 | FileCheck %s --check-prefix=MERGE_LEN5
-MERGE_LEN5: MERGE-OUTER: succesfull in 1 attempt(s)
-
-RUN: rm -rf %tmp/T1/* %tmp/T2/*
-RUN: not LLVMFuzzer-FullCoverageSetTest -merge=1 %tmp/T1 %tmp/T2 2>&1 | FileCheck %s --check-prefix=EMPTY
-EMPTY: MERGE-OUTER: zero succesfull attempts, exiting
+RUN: LLVMFuzzer-FullCoverageSetTest -merge=1 %tmp/T1 %tmp/T2 2>&1 | FileCheck %s --check-prefix=CHECK3
+CHECK3: Merge: running the initial corpus {{.*}} of 6 units
+CHECK3: Merge: written 0 out of 6 units
diff --git a/gnu/llvm/lib/Fuzzer/test/uninstrumented/CMakeLists.txt b/gnu/llvm/lib/Fuzzer/test/uninstrumented/CMakeLists.txt
index f4ab59e5b18..443ba3716f6 100644
--- a/gnu/llvm/lib/Fuzzer/test/uninstrumented/CMakeLists.txt
+++ b/gnu/llvm/lib/Fuzzer/test/uninstrumented/CMakeLists.txt
@@ -1,13 +1,14 @@
-# These tests are not instrumented with coverage and don't
-# have coverage rt in the binary.
+# These tests are not instrumented with coverage.
-set(CMAKE_CXX_FLAGS
- "${LIBFUZZER_FLAGS_BASE} -fno-sanitize=all -fno-sanitize-coverage=edge,trace-cmp,indirect-calls,8bit-counters,trace-pc-guard")
-
-set(UninstrumentedTests
- UninstrumentedTest
- )
+set(CMAKE_CXX_FLAGS_RELEASE
+ "${LIBFUZZER_FLAGS_BASE} -O0 -fno-sanitize=all")
foreach(Test ${UninstrumentedTests})
- add_libfuzzer_test(${Test}-Uninstrumented SOURCES ../${Test}.cpp)
+ add_executable(LLVMFuzzer-${Test}-Uninstrumented
+ ../${Test}.cpp
+ )
+ target_link_libraries(LLVMFuzzer-${Test}-Uninstrumented
+ LLVMFuzzer
+ )
endforeach()
+
diff --git a/gnu/llvm/lib/IR/GCOV.cpp b/gnu/llvm/lib/IR/GCOV.cpp
index d4b45522822..35b8157751b 100644
--- a/gnu/llvm/lib/IR/GCOV.cpp
+++ b/gnu/llvm/lib/IR/GCOV.cpp
@@ -17,11 +17,11 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Format.h"
+#include "llvm/Support/MemoryObject.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <system_error>
-
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -103,17 +103,11 @@ bool GCOVFile::readGCDA(GCOVBuffer &Buffer) {
return true;
}
-void GCOVFile::print(raw_ostream &OS) const {
- for (const auto &FPtr : Functions)
- FPtr->print(OS);
-}
-
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// dump - Dump GCOVFile content to dbgs() for debugging purposes.
-LLVM_DUMP_METHOD void GCOVFile::dump() const {
- print(dbgs());
+void GCOVFile::dump() const {
+ for (const auto &FPtr : Functions)
+ FPtr->dump();
}
-#endif
/// collectLineCounts - Collect line counts. This must be used after
/// reading .gcno and .gcda files.
@@ -253,12 +247,10 @@ bool GCOVFunction::readGCNO(GCOVBuffer &Buff, GCOV::GCOVVersion Version) {
/// readGCDA - Read a function from the GCDA buffer. Return false if an error
/// occurs.
bool GCOVFunction::readGCDA(GCOVBuffer &Buff, GCOV::GCOVVersion Version) {
- uint32_t HeaderLength;
- if (!Buff.readInt(HeaderLength))
+ uint32_t Dummy;
+ if (!Buff.readInt(Dummy))
return false; // Function header length
- uint64_t EndPos = Buff.getCursor() + HeaderLength * sizeof(uint32_t);
-
uint32_t GCDAIdent;
if (!Buff.readInt(GCDAIdent))
return false;
@@ -288,15 +280,13 @@ bool GCOVFunction::readGCDA(GCOVBuffer &Buff, GCOV::GCOVVersion Version) {
}
}
- if (Buff.getCursor() < EndPos) {
- StringRef GCDAName;
- if (!Buff.readString(GCDAName))
- return false;
- if (Name != GCDAName) {
- errs() << "Function names do not match: " << Name << " != " << GCDAName
- << ".\n";
- return false;
- }
+ StringRef GCDAName;
+ if (!Buff.readString(GCDAName))
+ return false;
+ if (Name != GCDAName) {
+ errs() << "Function names do not match: " << Name << " != " << GCDAName
+ << ".\n";
+ return false;
}
if (!Buff.readArcTag()) {
@@ -349,19 +339,13 @@ uint64_t GCOVFunction::getExitCount() const {
return Blocks.back()->getCount();
}
-void GCOVFunction::print(raw_ostream &OS) const {
- OS << "===== " << Name << " (" << Ident << ") @ " << Filename << ":"
- << LineNumber << "\n";
- for (const auto &Block : Blocks)
- Block->print(OS);
-}
-
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// dump - Dump GCOVFunction content to dbgs() for debugging purposes.
-LLVM_DUMP_METHOD void GCOVFunction::dump() const {
- print(dbgs());
+void GCOVFunction::dump() const {
+ dbgs() << "===== " << Name << " (" << Ident << ") @ " << Filename << ":"
+ << LineNumber << "\n";
+ for (const auto &Block : Blocks)
+ Block->dump();
}
-#endif
/// collectLineCounts - Collect line counts. This must be used after
/// reading .gcno and .gcda files.
@@ -412,35 +396,29 @@ void GCOVBlock::collectLineCounts(FileInfo &FI) {
FI.addBlockLine(Parent.getFilename(), N, this);
}
-void GCOVBlock::print(raw_ostream &OS) const {
- OS << "Block : " << Number << " Counter : " << Counter << "\n";
+/// dump - Dump GCOVBlock content to dbgs() for debugging purposes.
+void GCOVBlock::dump() const {
+ dbgs() << "Block : " << Number << " Counter : " << Counter << "\n";
if (!SrcEdges.empty()) {
- OS << "\tSource Edges : ";
+ dbgs() << "\tSource Edges : ";
for (const GCOVEdge *Edge : SrcEdges)
- OS << Edge->Src.Number << " (" << Edge->Count << "), ";
- OS << "\n";
+ dbgs() << Edge->Src.Number << " (" << Edge->Count << "), ";
+ dbgs() << "\n";
}
if (!DstEdges.empty()) {
- OS << "\tDestination Edges : ";
+ dbgs() << "\tDestination Edges : ";
for (const GCOVEdge *Edge : DstEdges)
- OS << Edge->Dst.Number << " (" << Edge->Count << "), ";
- OS << "\n";
+ dbgs() << Edge->Dst.Number << " (" << Edge->Count << "), ";
+ dbgs() << "\n";
}
if (!Lines.empty()) {
- OS << "\tLines : ";
+ dbgs() << "\tLines : ";
for (uint32_t N : Lines)
- OS << (N) << ",";
- OS << "\n";
+ dbgs() << (N) << ",";
+ dbgs() << "\n";
}
}
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-/// dump - Dump GCOVBlock content to dbgs() for debugging purposes.
-LLVM_DUMP_METHOD void GCOVBlock::dump() const {
- print(dbgs());
-}
-#endif
-
//===----------------------------------------------------------------------===//
// FileInfo implementation.
@@ -518,7 +496,7 @@ public:
OS << format("%5u:", LineNum) << Line << "\n";
}
};
-} // end anonymous namespace
+}
/// Convert a path to a gcov filename. If PreservePaths is true, this
/// translates "/" to "#", ".." to "^", and drops ".", to match gcov.
@@ -589,12 +567,8 @@ FileInfo::openCoveragePath(StringRef CoveragePath) {
/// print - Print source files with collected line count information.
void FileInfo::print(raw_ostream &InfoOS, StringRef MainFilename,
StringRef GCNOFile, StringRef GCDAFile) {
- SmallVector<StringRef, 4> Filenames;
- for (const auto &LI : LineInfo)
- Filenames.push_back(LI.first());
- std::sort(Filenames.begin(), Filenames.end());
-
- for (StringRef Filename : Filenames) {
+ for (const auto &LI : LineInfo) {
+ StringRef Filename = LI.first();
auto AllLines = LineConsumer(Filename);
std::string CoveragePath = getCoveragePath(Filename, MainFilename);
@@ -607,7 +581,7 @@ void FileInfo::print(raw_ostream &InfoOS, StringRef MainFilename,
CovOS << " -: 0:Runs:" << RunCount << "\n";
CovOS << " -: 0:Programs:" << ProgramCount << "\n";
- const LineData &Line = LineInfo[Filename];
+ const LineData &Line = LI.second;
GCOVCoverage FileCoverage(Filename);
for (uint32_t LineIndex = 0; LineIndex < Line.LastLine || !AllLines.empty();
++LineIndex) {
@@ -709,6 +683,7 @@ void FileInfo::print(raw_ostream &InfoOS, StringRef MainFilename,
if (Options.FuncCoverage)
printFuncCoverage(InfoOS);
printFileCoverage(InfoOS);
+ return;
}
/// printFunctionSummary - Print function and block summary.
diff --git a/gnu/llvm/lib/Target/AMDGPU/CIInstructions.td b/gnu/llvm/lib/Target/AMDGPU/CIInstructions.td
index 26a483a8abf..c543814cae0 100644
--- a/gnu/llvm/lib/Target/AMDGPU/CIInstructions.td
+++ b/gnu/llvm/lib/Target/AMDGPU/CIInstructions.td
@@ -12,4 +12,322 @@
// S_CBRANCH_CDBGUSER
// S_CBRANCH_CDBGSYS
// S_CBRANCH_CDBGSYS_OR_USER
-// S_CBRANCH_CDBGSYS_AND_USER \ No newline at end of file
+// S_CBRANCH_CDBGSYS_AND_USER
+// DS_NOP
+// DS_GWS_SEMA_RELEASE_ALL
+// DS_WRAP_RTN_B32
+// DS_CNDXCHG32_RTN_B64
+// DS_WRITE_B96
+// DS_WRITE_B128
+// DS_CONDXCHG32_RTN_B128
+// DS_READ_B96
+// DS_READ_B128
+// BUFFER_LOAD_DWORDX3
+// BUFFER_STORE_DWORDX3
+
+
+def isCIVI : Predicate <
+ "Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS || "
+ "Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS"
+>, AssemblerPredicate<"FeatureCIInsts">;
+
+def HasFlatAddressSpace : Predicate<"Subtarget->hasFlatAddressSpace()">;
+
+//===----------------------------------------------------------------------===//
+// VOP1 Instructions
+//===----------------------------------------------------------------------===//
+
+let SubtargetPredicate = isCIVI in {
+
+let SchedRW = [WriteDoubleAdd] in {
+defm V_TRUNC_F64 : VOP1Inst <vop1<0x17>, "v_trunc_f64",
+ VOP_F64_F64, ftrunc
+>;
+defm V_CEIL_F64 : VOP1Inst <vop1<0x18>, "v_ceil_f64",
+ VOP_F64_F64, fceil
+>;
+defm V_FLOOR_F64 : VOP1Inst <vop1<0x1A>, "v_floor_f64",
+ VOP_F64_F64, ffloor
+>;
+defm V_RNDNE_F64 : VOP1Inst <vop1<0x19>, "v_rndne_f64",
+ VOP_F64_F64, frint
+>;
+} // End SchedRW = [WriteDoubleAdd]
+
+let SchedRW = [WriteQuarterRate32] in {
+defm V_LOG_LEGACY_F32 : VOP1Inst <vop1<0x45, 0x4c>, "v_log_legacy_f32",
+ VOP_F32_F32
+>;
+defm V_EXP_LEGACY_F32 : VOP1Inst <vop1<0x46, 0x4b>, "v_exp_legacy_f32",
+ VOP_F32_F32
+>;
+} // End SchedRW = [WriteQuarterRate32]
+
+//===----------------------------------------------------------------------===//
+// VOP3 Instructions
+//===----------------------------------------------------------------------===//
+
+defm V_QSAD_PK_U16_U8 : VOP3Inst <vop3<0x173>, "v_qsad_pk_u16_u8",
+ VOP_I32_I32_I32
+>;
+defm V_MQSAD_U16_U8 : VOP3Inst <vop3<0x172>, "v_mqsad_u16_u8",
+ VOP_I32_I32_I32
+>;
+defm V_MQSAD_U32_U8 : VOP3Inst <vop3<0x175>, "v_mqsad_u32_u8",
+ VOP_I32_I32_I32
+>;
+
+let isCommutable = 1 in {
+defm V_MAD_U64_U32 : VOP3Inst <vop3<0x176>, "v_mad_u64_u32",
+ VOP_I64_I32_I32_I64
+>;
+
+// XXX - Does this set VCC?
+defm V_MAD_I64_I32 : VOP3Inst <vop3<0x177>, "v_mad_i64_i32",
+ VOP_I64_I32_I32_I64
+>;
+} // End isCommutable = 1
+
+
+//===----------------------------------------------------------------------===//
+// DS Instructions
+//===----------------------------------------------------------------------===//
+defm DS_WRAP_RTN_F32 : DS_1A1D_RET <0x34, "ds_wrap_rtn_f32", VGPR_32, "ds_wrap_f32">;
+
+// DS_CONDXCHG32_RTN_B64
+// DS_CONDXCHG32_RTN_B128
+
+//===----------------------------------------------------------------------===//
+// SMRD Instructions
+//===----------------------------------------------------------------------===//
+
+defm S_DCACHE_INV_VOL : SMRD_Inval <smrd<0x1d, 0x22>,
+ "s_dcache_inv_vol", int_amdgcn_s_dcache_inv_vol>;
+
+//===----------------------------------------------------------------------===//
+// MUBUF Instructions
+//===----------------------------------------------------------------------===//
+
+defm BUFFER_WBINVL1_VOL : MUBUF_Invalidate <mubuf<0x70, 0x3f>,
+ "buffer_wbinvl1_vol", int_amdgcn_buffer_wbinvl1_vol
+>;
+
+//===----------------------------------------------------------------------===//
+// Flat Instructions
+//===----------------------------------------------------------------------===//
+
+defm FLAT_LOAD_UBYTE : FLAT_Load_Helper <
+ flat<0x8, 0x10>, "flat_load_ubyte", VGPR_32
+>;
+defm FLAT_LOAD_SBYTE : FLAT_Load_Helper <
+ flat<0x9, 0x11>, "flat_load_sbyte", VGPR_32
+>;
+defm FLAT_LOAD_USHORT : FLAT_Load_Helper <
+ flat<0xa, 0x12>, "flat_load_ushort", VGPR_32
+>;
+defm FLAT_LOAD_SSHORT : FLAT_Load_Helper <
+ flat<0xb, 0x13>, "flat_load_sshort", VGPR_32>
+;
+defm FLAT_LOAD_DWORD : FLAT_Load_Helper <
+ flat<0xc, 0x14>, "flat_load_dword", VGPR_32
+>;
+defm FLAT_LOAD_DWORDX2 : FLAT_Load_Helper <
+ flat<0xd, 0x15>, "flat_load_dwordx2", VReg_64
+>;
+defm FLAT_LOAD_DWORDX4 : FLAT_Load_Helper <
+ flat<0xe, 0x17>, "flat_load_dwordx4", VReg_128
+>;
+defm FLAT_LOAD_DWORDX3 : FLAT_Load_Helper <
+ flat<0xf, 0x16>, "flat_load_dwordx3", VReg_96
+>;
+defm FLAT_STORE_BYTE : FLAT_Store_Helper <
+ flat<0x18>, "flat_store_byte", VGPR_32
+>;
+defm FLAT_STORE_SHORT : FLAT_Store_Helper <
+ flat <0x1a>, "flat_store_short", VGPR_32
+>;
+defm FLAT_STORE_DWORD : FLAT_Store_Helper <
+ flat<0x1c>, "flat_store_dword", VGPR_32
+>;
+defm FLAT_STORE_DWORDX2 : FLAT_Store_Helper <
+ flat<0x1d>, "flat_store_dwordx2", VReg_64
+>;
+defm FLAT_STORE_DWORDX4 : FLAT_Store_Helper <
+ flat<0x1e, 0x1f>, "flat_store_dwordx4", VReg_128
+>;
+defm FLAT_STORE_DWORDX3 : FLAT_Store_Helper <
+ flat<0x1f, 0x1e>, "flat_store_dwordx3", VReg_96
+>;
+defm FLAT_ATOMIC_SWAP : FLAT_ATOMIC <
+ flat<0x30, 0x40>, "flat_atomic_swap", VGPR_32
+>;
+defm FLAT_ATOMIC_CMPSWAP : FLAT_ATOMIC <
+ flat<0x31, 0x41>, "flat_atomic_cmpswap", VGPR_32, VReg_64
+>;
+defm FLAT_ATOMIC_ADD : FLAT_ATOMIC <
+ flat<0x32, 0x42>, "flat_atomic_add", VGPR_32
+>;
+defm FLAT_ATOMIC_SUB : FLAT_ATOMIC <
+ flat<0x33, 0x43>, "flat_atomic_sub", VGPR_32
+>;
+defm FLAT_ATOMIC_SMIN : FLAT_ATOMIC <
+ flat<0x35, 0x44>, "flat_atomic_smin", VGPR_32
+>;
+defm FLAT_ATOMIC_UMIN : FLAT_ATOMIC <
+ flat<0x36, 0x45>, "flat_atomic_umin", VGPR_32
+>;
+defm FLAT_ATOMIC_SMAX : FLAT_ATOMIC <
+ flat<0x37, 0x46>, "flat_atomic_smax", VGPR_32
+>;
+defm FLAT_ATOMIC_UMAX : FLAT_ATOMIC <
+ flat<0x38, 0x47>, "flat_atomic_umax", VGPR_32
+>;
+defm FLAT_ATOMIC_AND : FLAT_ATOMIC <
+ flat<0x39, 0x48>, "flat_atomic_and", VGPR_32
+>;
+defm FLAT_ATOMIC_OR : FLAT_ATOMIC <
+ flat<0x3a, 0x49>, "flat_atomic_or", VGPR_32
+>;
+defm FLAT_ATOMIC_XOR : FLAT_ATOMIC <
+ flat<0x3b, 0x4a>, "flat_atomic_xor", VGPR_32
+>;
+defm FLAT_ATOMIC_INC : FLAT_ATOMIC <
+ flat<0x3c, 0x4b>, "flat_atomic_inc", VGPR_32
+>;
+defm FLAT_ATOMIC_DEC : FLAT_ATOMIC <
+ flat<0x3d, 0x4c>, "flat_atomic_dec", VGPR_32
+>;
+defm FLAT_ATOMIC_SWAP_X2 : FLAT_ATOMIC <
+ flat<0x50, 0x60>, "flat_atomic_swap_x2", VReg_64
+>;
+defm FLAT_ATOMIC_CMPSWAP_X2 : FLAT_ATOMIC <
+ flat<0x51, 0x61>, "flat_atomic_cmpswap_x2", VReg_64, VReg_128
+>;
+defm FLAT_ATOMIC_ADD_X2 : FLAT_ATOMIC <
+ flat<0x52, 0x62>, "flat_atomic_add_x2", VReg_64
+>;
+defm FLAT_ATOMIC_SUB_X2 : FLAT_ATOMIC <
+ flat<0x53, 0x63>, "flat_atomic_sub_x2", VReg_64
+>;
+defm FLAT_ATOMIC_SMIN_X2 : FLAT_ATOMIC <
+ flat<0x55, 0x64>, "flat_atomic_smin_x2", VReg_64
+>;
+defm FLAT_ATOMIC_UMIN_X2 : FLAT_ATOMIC <
+ flat<0x56, 0x65>, "flat_atomic_umin_x2", VReg_64
+>;
+defm FLAT_ATOMIC_SMAX_X2 : FLAT_ATOMIC <
+ flat<0x57, 0x66>, "flat_atomic_smax_x2", VReg_64
+>;
+defm FLAT_ATOMIC_UMAX_X2 : FLAT_ATOMIC <
+ flat<0x58, 0x67>, "flat_atomic_umax_x2", VReg_64
+>;
+defm FLAT_ATOMIC_AND_X2 : FLAT_ATOMIC <
+ flat<0x59, 0x68>, "flat_atomic_and_x2", VReg_64
+>;
+defm FLAT_ATOMIC_OR_X2 : FLAT_ATOMIC <
+ flat<0x5a, 0x69>, "flat_atomic_or_x2", VReg_64
+>;
+defm FLAT_ATOMIC_XOR_X2 : FLAT_ATOMIC <
+ flat<0x5b, 0x6a>, "flat_atomic_xor_x2", VReg_64
+>;
+defm FLAT_ATOMIC_INC_X2 : FLAT_ATOMIC <
+ flat<0x5c, 0x6b>, "flat_atomic_inc_x2", VReg_64
+>;
+defm FLAT_ATOMIC_DEC_X2 : FLAT_ATOMIC <
+ flat<0x5d, 0x6c>, "flat_atomic_dec_x2", VReg_64
+>;
+
+} // End SubtargetPredicate = isCIVI
+
+// CI Only flat instructions
+
+let SubtargetPredicate = isCI, VIAssemblerPredicate = DisableInst in {
+
+defm FLAT_ATOMIC_FCMPSWAP : FLAT_ATOMIC <
+ flat<0x3e>, "flat_atomic_fcmpswap", VGPR_32, VReg_64
+>;
+defm FLAT_ATOMIC_FMIN : FLAT_ATOMIC <
+ flat<0x3f>, "flat_atomic_fmin", VGPR_32
+>;
+defm FLAT_ATOMIC_FMAX : FLAT_ATOMIC <
+ flat<0x40>, "flat_atomic_fmax", VGPR_32
+>;
+defm FLAT_ATOMIC_FCMPSWAP_X2 : FLAT_ATOMIC <
+ flat<0x5e>, "flat_atomic_fcmpswap_x2", VReg_64, VReg_128
+>;
+defm FLAT_ATOMIC_FMIN_X2 : FLAT_ATOMIC <
+ flat<0x5f>, "flat_atomic_fmin_x2", VReg_64
+>;
+defm FLAT_ATOMIC_FMAX_X2 : FLAT_ATOMIC <
+ flat<0x60>, "flat_atomic_fmax_x2", VReg_64
+>;
+
+} // End let SubtargetPredicate = isCI, VIAssemblerPredicate = DisableInst
+
+let Predicates = [isCI] in {
+
+// Convert (x - floor(x)) to fract(x)
+def : Pat <
+ (f32 (fsub (f32 (VOP3Mods f32:$x, i32:$mods)),
+ (f32 (ffloor (f32 (VOP3Mods f32:$x, i32:$mods)))))),
+ (V_FRACT_F32_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE)
+>;
+
+// Convert (x + (-floor(x))) to fract(x)
+def : Pat <
+ (f64 (fadd (f64 (VOP3Mods f64:$x, i32:$mods)),
+ (f64 (fneg (f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))))))),
+ (V_FRACT_F64_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE)
+>;
+
+} // End Predicates = [isCI]
+
+
+//===----------------------------------------------------------------------===//
+// Flat Patterns
+//===----------------------------------------------------------------------===//
+
+let Predicates = [isCIVI] in {
+
+// Patterns for global loads with no offset
+class FlatLoadPat <FLAT inst, SDPatternOperator node, ValueType vt> : Pat <
+ (vt (node i64:$addr)),
+ (inst $addr, 0, 0, 0)
+>;
+
+def : FlatLoadPat <FLAT_LOAD_UBYTE, flat_az_extloadi8, i32>;
+def : FlatLoadPat <FLAT_LOAD_SBYTE, flat_sextloadi8, i32>;
+def : FlatLoadPat <FLAT_LOAD_USHORT, flat_az_extloadi16, i32>;
+def : FlatLoadPat <FLAT_LOAD_SSHORT, flat_sextloadi16, i32>;
+def : FlatLoadPat <FLAT_LOAD_DWORD, flat_load, i32>;
+def : FlatLoadPat <FLAT_LOAD_DWORDX2, flat_load, v2i32>;
+def : FlatLoadPat <FLAT_LOAD_DWORDX4, flat_load, v4i32>;
+
+class FlatStorePat <FLAT inst, SDPatternOperator node, ValueType vt> : Pat <
+ (node vt:$data, i64:$addr),
+ (inst $data, $addr, 0, 0, 0)
+>;
+
+def : FlatStorePat <FLAT_STORE_BYTE, flat_truncstorei8, i32>;
+def : FlatStorePat <FLAT_STORE_SHORT, flat_truncstorei16, i32>;
+def : FlatStorePat <FLAT_STORE_DWORD, flat_store, i32>;
+def : FlatStorePat <FLAT_STORE_DWORDX2, flat_store, v2i32>;
+def : FlatStorePat <FLAT_STORE_DWORDX4, flat_store, v4i32>;
+
+class FlatAtomicPat <FLAT inst, SDPatternOperator node, ValueType vt> : Pat <
+ (vt (node i64:$addr, vt:$data)),
+ (inst $addr, $data, 0, 0)
+>;
+
+def : FlatAtomicPat <FLAT_ATOMIC_ADD_RTN, atomic_add_global, i32>;
+def : FlatAtomicPat <FLAT_ATOMIC_AND_RTN, atomic_and_global, i32>;
+def : FlatAtomicPat <FLAT_ATOMIC_SUB_RTN, atomic_sub_global, i32>;
+def : FlatAtomicPat <FLAT_ATOMIC_SMAX_RTN, atomic_max_global, i32>;
+def : FlatAtomicPat <FLAT_ATOMIC_UMAX_RTN, atomic_umax_global, i32>;
+def : FlatAtomicPat <FLAT_ATOMIC_SMIN_RTN, atomic_min_global, i32>;
+def : FlatAtomicPat <FLAT_ATOMIC_UMIN_RTN, atomic_umin_global, i32>;
+def : FlatAtomicPat <FLAT_ATOMIC_OR_RTN, atomic_or_global, i32>;
+def : FlatAtomicPat <FLAT_ATOMIC_SWAP_RTN, atomic_swap_global, i32>;
+def : FlatAtomicPat <FLAT_ATOMIC_XOR_RTN, atomic_xor_global, i32>;
+
+} // End Predicates = [isCIVI]
diff --git a/gnu/llvm/lib/Target/AMDGPU/SIFixControlFlowLiveIntervals.cpp b/gnu/llvm/lib/Target/AMDGPU/SIFixControlFlowLiveIntervals.cpp
index d4d3959658e..636750dcfba 100644
--- a/gnu/llvm/lib/Target/AMDGPU/SIFixControlFlowLiveIntervals.cpp
+++ b/gnu/llvm/lib/Target/AMDGPU/SIFixControlFlowLiveIntervals.cpp
@@ -37,7 +37,9 @@ public:
bool runOnMachineFunction(MachineFunction &MF) override;
- StringRef getPassName() const override { return "SI Fix CF Live Intervals"; }
+ const char *getPassName() const override {
+ return "SI Fix CF Live Intervals";
+ }
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<LiveIntervals>();
diff --git a/gnu/llvm/lib/Target/Hexagon/HexagonIntrinsicsDerived.td b/gnu/llvm/lib/Target/Hexagon/HexagonIntrinsicsDerived.td
index 400c17333f7..4c28b28337f 100644
--- a/gnu/llvm/lib/Target/Hexagon/HexagonIntrinsicsDerived.td
+++ b/gnu/llvm/lib/Target/Hexagon/HexagonIntrinsicsDerived.td
@@ -20,21 +20,21 @@ def : Pat <(mul DoubleRegs:$src1, DoubleRegs:$src2),
(EXTRACT_SUBREG
(i64
(M2_dpmpyuu_s0 (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1),
- isub_lo)),
+ subreg_loreg)),
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2),
- isub_lo)))),
- isub_hi)),
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), isub_lo)),
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), isub_hi))),
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), isub_lo)),
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), isub_hi))),
+ subreg_loreg)))),
+ subreg_hireg)),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg))),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_loreg)),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg))),
(i32
(EXTRACT_SUBREG
(i64
(M2_dpmpyuu_s0
- (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), isub_lo)),
+ (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)),
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2),
- isub_lo)))), isub_lo))))>;
+ subreg_loreg)))), subreg_loreg))))>;
diff --git a/gnu/llvm/lib/Target/Mips/MicroMips64r6InstrFormats.td b/gnu/llvm/lib/Target/Mips/MicroMips64r6InstrFormats.td
index 26062bfb2b8..da305a2d508 100644
--- a/gnu/llvm/lib/Target/Mips/MicroMips64r6InstrFormats.td
+++ b/gnu/llvm/lib/Target/Mips/MicroMips64r6InstrFormats.td
@@ -71,197 +71,16 @@ class POOL32S_DALIGN_FM_MMR6 {
class POOL32A_DIVMOD_FM_MMR6<string instr_asm, bits<9> funct>
: MMR6Arch<instr_asm> {
- bits<5> rt;
- bits<5> rs;
- bits<5> rd;
-
- bits<32> Inst;
-
- let Inst{31-26} = 0b010110;
- let Inst{25-21} = rt;
- let Inst{20-16} = rs;
- let Inst{15-11} = rd;
- let Inst{10-9} = 0b00;
- let Inst{8-0} = funct;
-}
-
-class POOL32S_DMFTC0_FM_MMR6<string instr_asm, bits<5> funct>
- : MMR6Arch<instr_asm>, MipsR6Inst {
- bits<5> rt;
- bits<5> rs;
- bits<3> sel;
-
- bits<32> Inst;
-
- let Inst{31-26} = 0b010110;
- let Inst{25-21} = rt;
- let Inst{20-16} = rs;
- let Inst{15-14} = 0;
- let Inst{13-11} = sel;
- let Inst{10-6} = funct;
- let Inst{5-0} = 0b111100;
-}
-
-class POOL32S_ARITH_FM_MMR6<string opstr, bits<9> funct>
- : MMR6Arch<opstr> {
- bits<5> rt;
- bits<5> rs;
bits<5> rd;
-
- bits<32> Inst;
-
- let Inst{31-26} = 0b010110;
- let Inst{25-21} = rt;
- let Inst{20-16} = rs;
- let Inst{15-11} = rd;
- let Inst{10-9} = 0b00;
- let Inst{8-0} = funct;
-}
-
-class DADDIU_FM_MMR6<string opstr> : MMR6Arch<opstr> {
- bits<5> rt;
- bits<5> rs;
- bits<16> imm16;
-
- bits<32> Inst;
-
- let Inst{31-26} = 0b010111;
- let Inst{25-21} = rt;
- let Inst{20-16} = rs;
- let Inst{15-0} = imm16;
-}
-
-class PCREL18_FM_MMR6<bits<3> funct> : MipsR6Inst {
- bits<5> rt;
- bits<18> imm;
-
- bits<32> Inst;
-
- let Inst{31-26} = 0b011110;
- let Inst{25-21} = rt;
- let Inst{20-18} = funct;
- let Inst{17-0} = imm;
-}
-
-class POOL32S_2R_FM_MMR6<string instr_asm, bits<10> funct>
- : MMR6Arch<instr_asm>, MipsR6Inst {
- bits<5> rt;
- bits<5> rs;
-
- bits<32> Inst;
-
- let Inst{31-26} = 0b010110;
- let Inst{25-21} = rt;
- let Inst{20-16} = rs;
- let Inst{15-6} = funct;
- let Inst{5-0} = 0b111100;
-}
-
-class POOL32S_2RSA5B0_FM_MMR6<string instr_asm, bits<9> funct>
- : MMR6Arch<instr_asm>, MipsR6Inst {
- bits<5> rt;
bits<5> rs;
- bits<5> sa;
-
- bits<32> Inst;
-
- let Inst{31-26} = 0b010110;
- let Inst{25-21} = rt;
- let Inst{20-16} = rs;
- let Inst{15-11} = sa;
- let Inst{10-9} = 0b00;
- let Inst{8-0} = funct;
-}
-
-class LD_SD_32_2R_OFFSET16_FM_MMR6<string instr_asm, bits<6> op>
- : MMR6Arch<instr_asm>, MipsR6Inst {
- bits<5> rt;
- bits<21> addr;
- bits<5> base = addr{20-16};
- bits<16> offset = addr{15-0};
-
- bits<32> Inst;
-
- let Inst{31-26} = op;
- let Inst{25-21} = rt;
- let Inst{20-16} = base;
- let Inst{15-0} = offset;
-}
-
-class POOL32C_2R_OFFSET12_FM_MMR6<string instr_asm, bits<4> funct>
- : MMR6Arch<instr_asm>, MipsR6Inst {
bits<5> rt;
- bits<21> addr;
- bits<5> base = addr{20-16};
- bits<12> offset = addr{11-0};
-
- bits<32> Inst;
-
- let Inst{31-26} = 0b011000;
- let Inst{25-21} = rt;
- let Inst{20-16} = base;
- let Inst{15-12} = funct;
- let Inst{11-0} = offset;
-}
-
-class POOL32S_3R_FM_MMR6<string instr_asm, bits<9> funct>
- : MMR6Arch<instr_asm>, MipsR6Inst {
- bits<5> rt;
- bits<5> rs;
- bits<5> rd;
bits<32> Inst;
let Inst{31-26} = 0b010110;
- let Inst{25-21} = rt;
+ let Inst{25-21} = rd;
let Inst{20-16} = rs;
- let Inst{15-11} = rd;
+ let Inst{15-11} = rt;
let Inst{10-9} = 0b00;
- let Inst{8-0} = funct;
-}
-
-class POOL32S_DBITSWAP_FM_MMR6<string instr_asm> : MMR6Arch<instr_asm>,
- MipsR6Inst {
- bits<5> rt;
- bits<5> rd;
-
- bits<32> Inst;
-
- let Inst{31-26} = 0b010110;
- let Inst{25-21} = rt;
- let Inst{20-16} = rd;
- let Inst{15-12} = 0b0000;
- let Inst{11-6} = 0b101100;
- let Inst{5-0} = 0b111100;
-}
-
-class POOL32S_3RSA_FM_MMR6<string instr_asm> : MMR6Arch<instr_asm>,
- MipsR6Inst {
- bits<5> rt;
- bits<5> rs;
- bits<5> rd;
- bits<2> sa;
-
- bits<32> Inst;
-
- let Inst{31-26} = 0b010110;
- let Inst{25-21} = rt;
- let Inst{20-16} = rs;
- let Inst{15-11} = rd;
- let Inst{10-9} = sa;
- let Inst{8-6} = 0b100;
- let Inst{5-0} = 0b000100;
-}
-
-class PCREL_1ROFFSET19_FM_MMR6<string instr_asm> : MMR6Arch<instr_asm>,
- MipsR6Inst {
- bits<5> rt;
- bits<19> offset;
-
- bits<32> Inst;
-
- let Inst{31-26} = 0b011110;
- let Inst{25-21} = rt;
- let Inst{20-19} = 0b10;
- let Inst{18-0} = offset;
+ let Inst{8-0} = funct;
}
diff --git a/gnu/llvm/lib/Target/Mips/MicroMips64r6InstrInfo.td b/gnu/llvm/lib/Target/Mips/MicroMips64r6InstrInfo.td
index 38b09d105dd..ec1aef86a94 100644
--- a/gnu/llvm/lib/Target/Mips/MicroMips64r6InstrInfo.td
+++ b/gnu/llvm/lib/Target/Mips/MicroMips64r6InstrInfo.td
@@ -28,48 +28,6 @@ class DDIV_MM64R6_ENC : POOL32A_DIVMOD_FM_MMR6<"ddiv", 0b100011000>;
class DMOD_MM64R6_ENC : POOL32A_DIVMOD_FM_MMR6<"dmod", 0b101011000>;
class DDIVU_MM64R6_ENC : POOL32A_DIVMOD_FM_MMR6<"ddivu", 0b110011000>;
class DMODU_MM64R6_ENC : POOL32A_DIVMOD_FM_MMR6<"dmodu", 0b111011000>;
-class DINSU_MM64R6_ENC : POOL32S_EXTBITS_FM_MMR6<0b110100>;
-class DINSM_MM64R6_ENC : POOL32S_EXTBITS_FM_MMR6<0b000100>;
-class DINS_MM64R6_ENC : POOL32S_EXTBITS_FM_MMR6<0b001100>;
-class DMTC0_MM64R6_ENC : POOL32S_DMFTC0_FM_MMR6<"dmtc0", 0b01011>;
-class DMTC1_MM64R6_ENC : POOL32F_MFTC1_FM_MMR6<"dmtc1", 0b10110000>;
-class DMTC2_MM64R6_ENC : POOL32A_MFTC2_FM_MMR6<"dmtc2", 0b0111110100>;
-class DMFC0_MM64R6_ENC : POOL32S_DMFTC0_FM_MMR6<"dmfc0", 0b00011>;
-class DMFC1_MM64R6_ENC : POOL32F_MFTC1_FM_MMR6<"dmfc1", 0b10010000>;
-class DMFC2_MM64R6_ENC : POOL32A_MFTC2_FM_MMR6<"dmfc2", 0b0110110100>;
-class DADD_MM64R6_ENC : POOL32S_ARITH_FM_MMR6<"dadd", 0b100010000>;
-class DADDIU_MM64R6_ENC : DADDIU_FM_MMR6<"daddiu">;
-class DADDU_MM64R6_ENC : POOL32S_ARITH_FM_MMR6<"daddu", 0b101010000>;
-class LDPC_MMR646_ENC : PCREL18_FM_MMR6<0b110>;
-class DSUB_MM64R6_ENC : POOL32S_ARITH_FM_MMR6<"dsub", 0b110010000>;
-class DSUBU_MM64R6_ENC : POOL32S_ARITH_FM_MMR6<"dsubu", 0b111010000>;
-class DMUL_MM64R6_ENC : POOL32S_ARITH_FM_MMR6<"dmul", 0b000011000>;
-class DMUH_MM64R6_ENC : POOL32S_ARITH_FM_MMR6<"dmuh", 0b001011000>;
-class DMULU_MM64R6_ENC : POOL32S_ARITH_FM_MMR6<"dmulu", 0b010011000>;
-class DMUHU_MM64R6_ENC : POOL32S_ARITH_FM_MMR6<"dmuhu", 0b011011000>;
-class DSBH_MM64R6_ENC : POOL32S_2R_FM_MMR6<"dsbh", 0b0111101100>;
-class DSHD_MM64R6_ENC : POOL32S_2R_FM_MMR6<"dshd", 0b1111101100>;
-class DSLL_MM64R6_ENC : POOL32S_2RSA5B0_FM_MMR6<"dsll", 0b000000000>;
-class DSLL32_MM64R6_ENC : POOL32S_2RSA5B0_FM_MMR6<"dsll32", 0b000001000>;
-class DSLLV_MM64R6_ENC : POOL32S_3R_FM_MMR6<"dsllv", 0b000010000>;
-class DSRAV_MM64R6_ENC : POOL32S_3R_FM_MMR6<"dsrav", 0b010010000>;
-class DSRA_MM64R6_ENC : POOL32S_2RSA5B0_FM_MMR6<"dsra", 0b010000000>;
-class DSRA32_MM64R6_ENC : POOL32S_2RSA5B0_FM_MMR6<"dsra32", 0b010000100>;
-class DCLO_MM64R6_ENC : POOL32S_2R_FM_MMR6<"dclo", 0b0100101100>;
-class DCLZ_MM64R6_ENC : POOL32S_2R_FM_MMR6<"dclz", 0b0101101100>;
-class DROTR_MM64R6_ENC : POOL32S_2RSA5B0_FM_MMR6<"drotr", 0b011000000>;
-class DROTR32_MM64R6_ENC : POOL32S_2RSA5B0_FM_MMR6<"drotr32", 0b011001000>;
-class DROTRV_MM64R6_ENC : POOL32S_3R_FM_MMR6<"drotrv", 0b011010000>;
-class LD_MM64R6_ENC : LD_SD_32_2R_OFFSET16_FM_MMR6<"ld", 0b110111>;
-class LLD_MM64R6_ENC : POOL32C_2R_OFFSET12_FM_MMR6<"lld", 0b0111>;
-class LWU_MM64R6_ENC : POOL32C_2R_OFFSET12_FM_MMR6<"lwu", 0b1110>;
-class SD_MM64R6_ENC : LD_SD_32_2R_OFFSET16_FM_MMR6<"sd", 0b110110>;
-class DSRL_MM64R6_ENC : POOL32S_2RSA5B0_FM_MMR6<"dsrl", 0b001000000>;
-class DSRL32_MM64R6_ENC : POOL32S_2RSA5B0_FM_MMR6<"dsrl32", 0b001001000>;
-class DSRLV_MM64R6_ENC : POOL32S_3R_FM_MMR6<"dsrlv", 0b001010000>;
-class DBITSWAP_MM64R6_ENC : POOL32S_DBITSWAP_FM_MMR6<"dbitswap">;
-class DLSA_MM64R6_ENC : POOL32S_3RSA_FM_MMR6<"dlsa">;
-class LWUPC_MM64R6_ENC : PCREL_1ROFFSET19_FM_MMR6<"lwupc">;
//===----------------------------------------------------------------------===//
//
@@ -77,28 +35,24 @@ class LWUPC_MM64R6_ENC : PCREL_1ROFFSET19_FM_MMR6<"lwupc">;
//
//===----------------------------------------------------------------------===//
-class DAUI_MMR6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
- InstrItinClass Itin>
+class DAUI_MMR6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd>
: MMR6Arch<instr_asm>, MipsR6Inst {
dag OutOperandList = (outs GPROpnd:$rt);
- dag InOperandList = (ins GPROpnd:$rs, uimm16:$imm);
+ dag InOperandList = (ins GPROpnd:$rs, simm16:$imm);
string AsmString = !strconcat(instr_asm, "\t$rt, $rs, $imm");
list<dag> Pattern = [];
- InstrItinClass Itinerary = Itin;
}
-class DAUI_MMR6_DESC : DAUI_MMR6_DESC_BASE<"daui", GPR64Opnd, II_DAUI>;
+class DAUI_MMR6_DESC : DAUI_MMR6_DESC_BASE<"daui", GPR64Opnd>;
-class DAHI_DATI_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
- InstrItinClass Itin>
+class DAHI_DATI_DESC_BASE<string instr_asm, RegisterOperand GPROpnd>
: MMR6Arch<instr_asm>, MipsR6Inst {
dag OutOperandList = (outs GPROpnd:$rs);
- dag InOperandList = (ins GPROpnd:$rt, uimm16:$imm);
- string AsmString = !strconcat(instr_asm, "\t$rt, $rs, $imm");
+ dag InOperandList = (ins GPROpnd:$rt, simm16:$imm);
+ string AsmString = !strconcat(instr_asm, "\t$rt, $imm");
string Constraints = "$rs = $rt";
- InstrItinClass Itinerary = Itin;
}
-class DAHI_MMR6_DESC : DAHI_DATI_DESC_BASE<"dahi", GPR64Opnd, II_DAHI>;
-class DATI_MMR6_DESC : DAHI_DATI_DESC_BASE<"dati", GPR64Opnd, II_DATI>;
+class DAHI_MMR6_DESC : DAHI_DATI_DESC_BASE<"dahi", GPR64Opnd>;
+class DATI_MMR6_DESC : DAHI_DATI_DESC_BASE<"dati", GPR64Opnd>;
class EXTBITS_DESC_BASE<string instr_asm, RegisterOperand RO, Operand PosOpnd,
Operand SizeOpnd, SDPatternOperator Op = null_frag>
@@ -114,7 +68,7 @@ class EXTBITS_DESC_BASE<string instr_asm, RegisterOperand RO, Operand PosOpnd,
// TODO: Add 'pos + size' constraint check to dext* instructions
// DEXT: 0 < pos + size <= 63
// DEXTM, DEXTU: 32 < pos + size <= 64
-class DEXT_MMR6_DESC : EXTBITS_DESC_BASE<"dext", GPR64Opnd, uimm5_report_uimm6,
+class DEXT_MMR6_DESC : EXTBITS_DESC_BASE<"dext", GPR64Opnd, uimm5,
uimm5_plus1, MipsExt>;
class DEXTM_MMR6_DESC : EXTBITS_DESC_BASE<"dextm", GPR64Opnd, uimm5,
uimm5_plus33, MipsExt>;
@@ -122,235 +76,19 @@ class DEXTU_MMR6_DESC : EXTBITS_DESC_BASE<"dextu", GPR64Opnd, uimm5_plus32,
uimm5_plus1, MipsExt>;
class DALIGN_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
- Operand ImmOpnd, InstrItinClass itin>
- : MMR6Arch<instr_asm>, MipsR6Inst {
+ Operand ImmOpnd> : MMR6Arch<instr_asm>, MipsR6Inst {
dag OutOperandList = (outs GPROpnd:$rd);
dag InOperandList = (ins GPROpnd:$rs, GPROpnd:$rt, ImmOpnd:$bp);
string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt, $bp");
list<dag> Pattern = [];
- InstrItinClass Itinerary = itin;
-}
-
-class DALIGN_MMR6_DESC : DALIGN_DESC_BASE<"dalign", GPR64Opnd, uimm3,
- II_DALIGN>;
-
-class DDIV_MM64R6_DESC : DIVMOD_MMR6_DESC_BASE<"ddiv", GPR64Opnd, II_DDIV,
- sdiv>;
-class DMOD_MM64R6_DESC : DIVMOD_MMR6_DESC_BASE<"dmod", GPR64Opnd, II_DMOD,
- srem>;
-class DDIVU_MM64R6_DESC : DIVMOD_MMR6_DESC_BASE<"ddivu", GPR64Opnd, II_DDIVU,
- udiv>;
-class DMODU_MM64R6_DESC : DIVMOD_MMR6_DESC_BASE<"dmodu", GPR64Opnd, II_DMODU,
- urem>;
-
-class DCLO_MM64R6_DESC {
- dag OutOperandList = (outs GPR64Opnd:$rt);
- dag InOperandList = (ins GPR64Opnd:$rs);
- string AsmString = !strconcat("dclo", "\t$rt, $rs");
- list<dag> Pattern = [(set GPR64Opnd:$rt, (ctlz (not GPR64Opnd:$rs)))];
- InstrItinClass Itinerary = II_DCLO;
- Format Form = FrmR;
- string BaseOpcode = "dclo";
-}
-
-class DCLZ_MM64R6_DESC {
- dag OutOperandList = (outs GPR64Opnd:$rt);
- dag InOperandList = (ins GPR64Opnd:$rs);
- string AsmString = !strconcat("dclz", "\t$rt, $rs");
- list<dag> Pattern = [(set GPR64Opnd:$rt, (ctlz GPR64Opnd:$rs))];
- InstrItinClass Itinerary = II_DCLZ;
- Format Form = FrmR;
- string BaseOpcode = "dclz";
-}
-
-class DINSU_MM64R6_DESC : InsBase<"dinsu", GPR64Opnd, uimm5_plus32,
- uimm5_inssize_plus1, MipsIns>;
-class DINSM_MM64R6_DESC : InsBase<"dinsm", GPR64Opnd, uimm5, uimm_range_2_64>;
-class DINS_MM64R6_DESC : InsBase<"dins", GPR64Opnd, uimm5, uimm5_inssize_plus1,
- MipsIns>;
-class DMTC0_MM64R6_DESC : MTC0_MMR6_DESC_BASE<"dmtc0", COP0Opnd, GPR64Opnd,
- II_DMTC0>;
-class DMTC1_MM64R6_DESC : MTC1_MMR6_DESC_BASE<"dmtc1", FGR64Opnd, GPR64Opnd,
- II_DMTC1, bitconvert>;
-class DMTC2_MM64R6_DESC : MTC2_MMR6_DESC_BASE<"dmtc2", COP2Opnd, GPR64Opnd,
- II_DMTC2>;
-class DMFC0_MM64R6_DESC : MFC0_MMR6_DESC_BASE<"dmfc0", GPR64Opnd, COP0Opnd,
- II_DMFC0>;
-class DMFC1_MM64R6_DESC : MFC1_MMR6_DESC_BASE<"dmfc1", GPR64Opnd, FGR64Opnd,
- II_DMFC1, bitconvert>;
-class DMFC2_MM64R6_DESC : MFC2_MMR6_DESC_BASE<"dmfc2", GPR64Opnd, COP2Opnd,
- II_DMFC2>;
-class DADD_MM64R6_DESC : ArithLogicR<"dadd", GPR64Opnd, 1, II_DADD>;
-class DADDIU_MM64R6_DESC : ArithLogicI<"daddiu", simm16_64, GPR64Opnd,
- II_DADDIU, immSExt16, add>,
- IsAsCheapAsAMove;
-class DADDU_MM64R6_DESC : ArithLogicR<"daddu", GPR64Opnd, 1, II_DADDU, add>;
-
-class DSUB_DESC_BASE<string instr_asm, RegisterOperand RO,
- InstrItinClass Itin = NoItinerary,
- SDPatternOperator OpNode = null_frag>
- : MipsR6Inst {
- dag OutOperandList = (outs RO:$rd);
- dag InOperandList = (ins RO:$rs, RO:$rt);
- string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt");
- list<dag> Pattern = [(set RO:$rd, (OpNode RO:$rs, RO:$rt))];
- InstrItinClass Itinerary = Itin;
- Format Form = FrmR;
- string BaseOpcode = instr_asm;
- let isCommutable = 0;
- let isReMaterializable = 1;
- let TwoOperandAliasConstraint = "$rd = $rs";
-}
-class DSUB_MM64R6_DESC : DSUB_DESC_BASE<"dsub", GPR64Opnd, II_DSUB>;
-class DSUBU_MM64R6_DESC : DSUB_DESC_BASE<"dsubu", GPR64Opnd, II_DSUBU, sub>;
-
-class LDPC_MM64R6_DESC : PCREL_MMR6_DESC_BASE<"ldpc", GPR64Opnd, simm18_lsl3,
- II_LDPC>;
-
-class MUL_MM64R6_DESC_BASE<string opstr, RegisterOperand GPROpnd,
- InstrItinClass Itin = NoItinerary,
- SDPatternOperator Op = null_frag> : MipsR6Inst {
- dag OutOperandList = (outs GPROpnd:$rd);
- dag InOperandList = (ins GPROpnd:$rs, GPROpnd:$rt);
- string AsmString = !strconcat(opstr, "\t$rd, $rs, $rt");
- InstrItinClass Itinerary = Itin;
- list<dag> Pattern = [(set GPROpnd:$rd, (Op GPROpnd:$rs, GPROpnd:$rt))];
-}
-
-class DMUL_MM64R6_DESC : MUL_MM64R6_DESC_BASE<"dmul", GPR64Opnd, II_DMUL, mul>;
-class DMUH_MM64R6_DESC : MUL_MM64R6_DESC_BASE<"dmuh", GPR64Opnd, II_DMUH,
- mulhs>;
-class DMULU_MM64R6_DESC : MUL_MM64R6_DESC_BASE<"dmulu", GPR64Opnd, II_DMULU>;
-class DMUHU_MM64R6_DESC : MUL_MM64R6_DESC_BASE<"dmuhu", GPR64Opnd, II_DMUHU,
- mulhu>;
-
-class DSBH_DSHD_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
- InstrItinClass Itin> {
- dag OutOperandList = (outs GPROpnd:$rt);
- dag InOperandList = (ins GPROpnd:$rs);
- string AsmString = !strconcat(instr_asm, "\t$rt, $rs");
- bit hasSideEffects = 0;
- list<dag> Pattern = [];
- InstrItinClass Itinerary = Itin;
- Format Form = FrmR;
- string BaseOpcode = instr_asm;
-}
-
-class DSBH_MM64R6_DESC : DSBH_DSHD_DESC_BASE<"dsbh", GPR64Opnd, II_DSBH>;
-class DSHD_MM64R6_DESC : DSBH_DSHD_DESC_BASE<"dshd", GPR64Opnd, II_DSHD>;
-
-class SHIFT_ROTATE_IMM_MM64R6<string instr_asm, Operand ImmOpnd,
- InstrItinClass itin,
- SDPatternOperator OpNode = null_frag,
- SDPatternOperator PO = null_frag> {
- dag OutOperandList = (outs GPR64Opnd:$rt);
- dag InOperandList = (ins GPR64Opnd:$rs, ImmOpnd:$sa);
- string AsmString = !strconcat(instr_asm, "\t$rt, $rs, $sa");
- list<dag> Pattern = [(set GPR64Opnd:$rt, (OpNode GPR64Opnd:$rs, PO:$sa))];
- InstrItinClass Itinerary = itin;
- Format Form = FrmR;
- string TwoOperandAliasConstraint = "$rs = $rt";
- string BaseOpcode = instr_asm;
-}
-
-class SHIFT_ROTATE_REG_MM64R6<string instr_asm, InstrItinClass itin,
- SDPatternOperator OpNode = null_frag> {
- dag OutOperandList = (outs GPR64Opnd:$rd);
- dag InOperandList = (ins GPR64Opnd:$rt, GPR32Opnd:$rs);
- string AsmString = !strconcat(instr_asm, "\t$rd, $rt, $rs");
- list<dag> Pattern = [(set GPR64Opnd:$rd,
- (OpNode GPR64Opnd:$rt, GPR32Opnd:$rs))];
- InstrItinClass Itinerary = itin;
- Format Form = FrmR;
- string BaseOpcode = instr_asm;
-}
-
-class DSLL_MM64R6_DESC : SHIFT_ROTATE_IMM_MM64R6<"dsll", uimm6, II_DSLL, shl,
- immZExt6>;
-class DSLL32_MM64R6_DESC : SHIFT_ROTATE_IMM_MM64R6<"dsll32", uimm5, II_DSLL32>;
-class DSLLV_MM64R6_DESC : SHIFT_ROTATE_REG_MM64R6<"dsllv", II_DSLLV, shl>;
-class DSRAV_MM64R6_DESC : SHIFT_ROTATE_REG_MM64R6<"dsrav", II_DSRAV, sra>;
-class DSRA_MM64R6_DESC : SHIFT_ROTATE_IMM_MM64R6<"dsra", uimm6, II_DSRA, sra,
- immZExt6>;
-class DSRA32_MM64R6_DESC : SHIFT_ROTATE_IMM_MM64R6<"dsra32", uimm5, II_DSRA32>;
-class DROTR_MM64R6_DESC : SHIFT_ROTATE_IMM_MM64R6<"drotr", uimm6, II_DROTR,
- rotr, immZExt6>;
-class DROTR32_MM64R6_DESC : SHIFT_ROTATE_IMM_MM64R6<"drotr32", uimm5,
- II_DROTR32>;
-class DROTRV_MM64R6_DESC : SHIFT_ROTATE_REG_MM64R6<"drotrv", II_DROTRV, rotr>;
-class DSRL_MM64R6_DESC : SHIFT_ROTATE_IMM_MM64R6<"dsrl", uimm6, II_DSRL, srl,
- immZExt6>;
-class DSRL32_MM64R6_DESC : SHIFT_ROTATE_IMM_MM64R6<"dsrl32", uimm5, II_DSRL32>;
-class DSRLV_MM64R6_DESC : SHIFT_ROTATE_REG_MM64R6<"dsrlv", II_DSRLV, srl>;
-
-class Load_MM64R6<string instr_asm, Operand MemOpnd, InstrItinClass itin,
- SDPatternOperator OpNode = null_frag> {
- dag OutOperandList = (outs GPR64Opnd:$rt);
- dag InOperandList = (ins MemOpnd:$addr);
- string AsmString = !strconcat(instr_asm, "\t$rt, $addr");
- list<dag> Pattern = [(set GPR64Opnd:$rt, (OpNode addr:$addr))];
- InstrItinClass Itinerary = itin;
- Format Form = FrmI;
- bit mayLoad = 1;
- bit canFoldAsLoad = 1;
- string BaseOpcode = instr_asm;
-}
-
-class LD_MM64R6_DESC : Load_MM64R6<"ld", mem_simm16, II_LD, load> {
- string DecoderMethod = "DecodeMemMMImm16";
-}
-class LWU_MM64R6_DESC : Load_MM64R6<"lwu", mem_simm12, II_LWU, zextloadi32>{
- string DecoderMethod = "DecodeMemMMImm12";
-}
-
-class LLD_MM64R6_DESC {
- dag OutOperandList = (outs GPR64Opnd:$rt);
- dag InOperandList = (ins mem_simm12:$addr);
- string AsmString = "lld\t$rt, $addr";
- list<dag> Pattern = [];
- bit mayLoad = 1;
- InstrItinClass Itinerary = II_LLD;
- string BaseOpcode = "lld";
- string DecoderMethod = "DecodeMemMMImm12";
-}
-
-class SD_MM64R6_DESC {
- dag OutOperandList = (outs);
- dag InOperandList = (ins GPR64Opnd:$rt, mem_simm16:$addr);
- string AsmString = "sd\t$rt, $addr";
- list<dag> Pattern = [(store GPR64Opnd:$rt, addr:$addr)];
- InstrItinClass Itinerary = II_SD;
- Format Form = FrmI;
- bit mayStore = 1;
- string BaseOpcode = "sd";
- string DecoderMethod = "DecodeMemMMImm16";
}
-class DBITSWAP_MM64R6_DESC {
- dag OutOperandList = (outs GPR64Opnd:$rd);
- dag InOperandList = (ins GPR64Opnd:$rt);
- string AsmString = !strconcat("dbitswap", "\t$rd, $rt");
- list<dag> Pattern = [];
- InstrItinClass Itinerary = II_DBITSWAP;
-}
-
-class DLSA_MM64R6_DESC {
- dag OutOperandList = (outs GPR64Opnd:$rd);
- dag InOperandList = (ins GPR64Opnd:$rt, GPR64Opnd:$rs, uimm2_plus1:$sa);
- string AsmString = "dlsa\t$rt, $rs, $rd, $sa";
- list<dag> Pattern = [];
- InstrItinClass Itinerary = II_DLSA;
-}
+class DALIGN_MMR6_DESC : DALIGN_DESC_BASE<"dalign", GPR64Opnd, uimm3>;
-class LWUPC_MM64R6_DESC {
- dag OutOperandList = (outs GPR64Opnd:$rt);
- dag InOperandList = (ins simm19_lsl2:$offset);
- string AsmString = "lwupc\t$rt, $offset";
- list<dag> Pattern = [];
- InstrItinClass Itinerary = II_LWUPC;
- bit mayLoad = 1;
- bit IsPCRelativeLoad = 1;
-}
+class DDIV_MM64R6_DESC : ArithLogicR<"ddiv", GPR32Opnd>;
+class DMOD_MM64R6_DESC : ArithLogicR<"dmod", GPR32Opnd>;
+class DDIVU_MM64R6_DESC : ArithLogicR<"ddivu", GPR32Opnd>;
+class DMODU_MM64R6_DESC : ArithLogicR<"dmodu", GPR32Opnd>;
//===----------------------------------------------------------------------===//
//
@@ -360,10 +98,8 @@ class LWUPC_MM64R6_DESC {
let DecoderNamespace = "MicroMipsR6" in {
def DAUI_MM64R6 : StdMMR6Rel, DAUI_MMR6_DESC, DAUI_MMR6_ENC, ISA_MICROMIPS64R6;
- let DecoderMethod = "DecodeDAHIDATIMMR6" in {
- def DAHI_MM64R6 : StdMMR6Rel, DAHI_MMR6_DESC, DAHI_MMR6_ENC, ISA_MICROMIPS64R6;
- def DATI_MM64R6 : StdMMR6Rel, DATI_MMR6_DESC, DATI_MMR6_ENC, ISA_MICROMIPS64R6;
- }
+ def DAHI_MM64R6 : StdMMR6Rel, DAHI_MMR6_DESC, DAHI_MMR6_ENC, ISA_MICROMIPS64R6;
+ def DATI_MM64R6 : StdMMR6Rel, DATI_MMR6_DESC, DATI_MMR6_ENC, ISA_MICROMIPS64R6;
def DEXT_MM64R6 : StdMMR6Rel, DEXT_MMR6_DESC, DEXT_MMR6_ENC,
ISA_MICROMIPS64R6;
def DEXTM_MM64R6 : StdMMR6Rel, DEXTM_MMR6_DESC, DEXTM_MMR6_ENC,
@@ -380,183 +116,4 @@ let DecoderNamespace = "MicroMipsR6" in {
ISA_MICROMIPS64R6;
def DMODU_MM64R6 : R6MMR6Rel, DMODU_MM64R6_DESC, DMODU_MM64R6_ENC,
ISA_MICROMIPS64R6;
- def DINSU_MM64R6: R6MMR6Rel, DINSU_MM64R6_DESC, DINSU_MM64R6_ENC,
- ISA_MICROMIPS64R6;
- def DINSM_MM64R6: R6MMR6Rel, DINSM_MM64R6_DESC, DINSM_MM64R6_ENC,
- ISA_MICROMIPS64R6;
- def DINS_MM64R6: R6MMR6Rel, DINS_MM64R6_DESC, DINS_MM64R6_ENC,
- ISA_MICROMIPS64R6;
- def DMTC0_MM64R6 : StdMMR6Rel, DMTC0_MM64R6_ENC, DMTC0_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def DMTC1_MM64R6 : StdMMR6Rel, DMTC1_MM64R6_DESC, DMTC1_MM64R6_ENC,
- ISA_MICROMIPS64R6;
- def DMTC2_MM64R6 : StdMMR6Rel, DMTC2_MM64R6_ENC, DMTC2_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def DMFC0_MM64R6 : StdMMR6Rel, DMFC0_MM64R6_ENC, DMFC0_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def DMFC1_MM64R6 : StdMMR6Rel, DMFC1_MM64R6_DESC, DMFC1_MM64R6_ENC,
- ISA_MICROMIPS64R6;
- def DMFC2_MM64R6 : StdMMR6Rel, DMFC2_MM64R6_ENC, DMFC2_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def DADD_MM64R6: StdMMR6Rel, DADD_MM64R6_DESC, DADD_MM64R6_ENC,
- ISA_MICROMIPS64R6;
- def DADDIU_MM64R6: StdMMR6Rel, DADDIU_MM64R6_DESC, DADDIU_MM64R6_ENC,
- ISA_MICROMIPS64R6;
- def DADDU_MM64R6: StdMMR6Rel, DADDU_MM64R6_DESC, DADDU_MM64R6_ENC,
- ISA_MICROMIPS64R6;
- def LDPC_MM64R6 : R6MMR6Rel, LDPC_MMR646_ENC, LDPC_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def DSUB_MM64R6 : StdMMR6Rel, DSUB_MM64R6_DESC, DSUB_MM64R6_ENC,
- ISA_MICROMIPS64R6;
- def DSUBU_MM64R6 : StdMMR6Rel, DSUBU_MM64R6_DESC, DSUBU_MM64R6_ENC,
- ISA_MICROMIPS64R6;
- def DMUL_MM64R6 : R6MMR6Rel, DMUL_MM64R6_DESC, DMUL_MM64R6_ENC,
- ISA_MICROMIPS64R6;
- def DMUH_MM64R6 : R6MMR6Rel, DMUH_MM64R6_DESC, DMUH_MM64R6_ENC,
- ISA_MICROMIPS64R6;
- def DMULU_MM64R6 : R6MMR6Rel, DMULU_MM64R6_DESC, DMULU_MM64R6_ENC,
- ISA_MICROMIPS64R6;
- def DMUHU_MM64R6 : R6MMR6Rel, DMUHU_MM64R6_DESC, DMUHU_MM64R6_ENC,
- ISA_MICROMIPS64R6;
- def DSBH_MM64R6 : R6MMR6Rel, DSBH_MM64R6_ENC, DSBH_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def DSHD_MM64R6 : R6MMR6Rel, DSHD_MM64R6_ENC, DSHD_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def DSLL_MM64R6 : StdMMR6Rel, DSLL_MM64R6_ENC, DSLL_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def DSLL32_MM64R6 : StdMMR6Rel, DSLL32_MM64R6_ENC, DSLL32_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def DSLLV_MM64R6 : StdMMR6Rel, DSLLV_MM64R6_ENC, DSLLV_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def DSRAV_MM64R6 : StdMMR6Rel, DSRAV_MM64R6_ENC, DSRAV_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def DSRA_MM64R6 : StdMMR6Rel, DSRA_MM64R6_ENC, DSRA_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def DSRA32_MM64R6 : StdMMR6Rel, DSRA32_MM64R6_ENC, DSRA32_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def DCLO_MM64R6 : StdMMR6Rel, R6MMR6Rel, DCLO_MM64R6_ENC, DCLO_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def DCLZ_MM64R6 : StdMMR6Rel, R6MMR6Rel, DCLZ_MM64R6_ENC, DCLZ_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def DROTR_MM64R6 : StdMMR6Rel, DROTR_MM64R6_ENC, DROTR_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def DROTR32_MM64R6 : StdMMR6Rel, DROTR32_MM64R6_ENC, DROTR32_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def DROTRV_MM64R6 : StdMMR6Rel, DROTRV_MM64R6_ENC, DROTRV_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def LD_MM64R6 : StdMMR6Rel, LD_MM64R6_ENC, LD_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def LLD_MM64R6 : StdMMR6Rel, R6MMR6Rel, LLD_MM64R6_ENC, LLD_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def LWU_MM64R6 : StdMMR6Rel, LWU_MM64R6_ENC, LWU_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def SD_MM64R6 : StdMMR6Rel, SD_MM64R6_ENC, SD_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def DSRL_MM64R6 : StdMMR6Rel, DSRL_MM64R6_ENC, DSRL_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def DSRL32_MM64R6 : StdMMR6Rel, DSRL32_MM64R6_ENC, DSRL32_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def DSRLV_MM64R6 : StdMMR6Rel, DSRLV_MM64R6_ENC, DSRLV_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def DBITSWAP_MM64R6 : R6MMR6Rel, DBITSWAP_MM64R6_ENC, DBITSWAP_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def DLSA_MM64R6 : R6MMR6Rel, DLSA_MM64R6_ENC, DLSA_MM64R6_DESC,
- ISA_MICROMIPS64R6;
- def LWUPC_MM64R6 : R6MMR6Rel, LWUPC_MM64R6_ENC, LWUPC_MM64R6_DESC,
- ISA_MICROMIPS64R6;
}
-
-let AdditionalPredicates = [InMicroMips] in
-defm : MaterializeImms<i64, ZERO_64, DADDIU_MM64R6, LUi64, ORi64>;
-
-//===----------------------------------------------------------------------===//
-//
-// Arbitrary patterns that map to one or more instructions
-//
-//===----------------------------------------------------------------------===//
-
-defm : MipsHiLoRelocs<LUi64, DADDIU_MM64R6, ZERO_64, GPR64Opnd>, SYM_32,
- ISA_MICROMIPS64R6;
-
-defm : MipsHighestHigherHiLoRelocs<LUi64, DADDIU_MM64R6>, SYM_64,
- ISA_MICROMIPS64R6;
-
-def : MipsPat<(addc GPR64:$lhs, GPR64:$rhs),
- (DADDU_MM64R6 GPR64:$lhs, GPR64:$rhs)>, ISA_MICROMIPS64R6;
-def : MipsPat<(addc GPR64:$lhs, immSExt16:$imm),
- (DADDIU_MM64R6 GPR64:$lhs, imm:$imm)>, ISA_MICROMIPS64R6;
-
-
-def : MipsPat<(rotr GPR64:$rt, (i32 (trunc GPR64:$rs))),
- (DROTRV_MM64R6 GPR64:$rt, (EXTRACT_SUBREG GPR64:$rs, sub_32))>,
- ISA_MICROMIPS64R6;
-
-
-def : WrapperPat<tglobaladdr, DADDIU_MM64R6, GPR64>, ISA_MICROMIPS64R6;
-def : WrapperPat<tconstpool, DADDIU_MM64R6, GPR64>, ISA_MICROMIPS64R6;
-def : WrapperPat<texternalsym, DADDIU_MM64R6, GPR64>, ISA_MICROMIPS64R6;
-def : WrapperPat<tblockaddress, DADDIU_MM64R6, GPR64>, ISA_MICROMIPS64R6;
-def : WrapperPat<tjumptable, DADDIU_MM64R6, GPR64>, ISA_MICROMIPS64R6;
-def : WrapperPat<tglobaltlsaddr, DADDIU_MM64R6, GPR64>, ISA_MICROMIPS64R6;
-
-// Carry pattern
-def : MipsPat<(subc GPR64:$lhs, GPR64:$rhs),
- (DSUBU_MM64R6 GPR64:$lhs, GPR64:$rhs)>, ISA_MICROMIPS64R6;
-
-def : MipsPat<(atomic_load_64 addr:$a), (LD_MM64R6 addr:$a)>, ISA_MICROMIPS64R6;
-
-//===----------------------------------------------------------------------===//
-//
-// Instruction aliases
-//
-//===----------------------------------------------------------------------===//
-
-def : MipsInstAlias<"dmtc0 $rt, $rd",
- (DMTC0_MM64R6 COP0Opnd:$rd, GPR64Opnd:$rt, 0), 0>;
-def : MipsInstAlias<"dmfc0 $rt, $rd",
- (DMFC0_MM64R6 GPR64Opnd:$rt, COP0Opnd:$rd, 0), 0>,
- ISA_MICROMIPS64R6;
-def : MipsInstAlias<"daddu $rs, $rt, $imm",
- (DADDIU_MM64R6 GPR64Opnd:$rs,
- GPR64Opnd:$rt,
- simm16_64:$imm),
- 0>, ISA_MICROMIPS64R6;
-def : MipsInstAlias<"daddu $rs, $imm",
- (DADDIU_MM64R6 GPR64Opnd:$rs,
- GPR64Opnd:$rs,
- simm16_64:$imm),
- 0>, ISA_MICROMIPS64R6;
-def : MipsInstAlias<"dsubu $rt, $rs, $imm",
- (DADDIU_MM64R6 GPR64Opnd:$rt,
- GPR64Opnd:$rs,
- InvertedImOperand64:$imm),
- 0>, ISA_MICROMIPS64R6;
-def : MipsInstAlias<"dsubu $rs, $imm",
- (DADDIU_MM64R6 GPR64Opnd:$rs,
- GPR64Opnd:$rs,
- InvertedImOperand64:$imm),
- 0>, ISA_MICROMIPS64R6;
-def : MipsInstAlias<"dneg $rt, $rs",
- (DSUB_MM64R6 GPR64Opnd:$rt, ZERO_64, GPR64Opnd:$rs), 1>,
- ISA_MICROMIPS64R6;
-def : MipsInstAlias<"dneg $rt",
- (DSUB_MM64R6 GPR64Opnd:$rt, ZERO_64, GPR64Opnd:$rt), 1>,
- ISA_MICROMIPS64R6;
-def : MipsInstAlias<"dnegu $rt, $rs",
- (DSUBU_MM64R6 GPR64Opnd:$rt, ZERO_64, GPR64Opnd:$rs), 1>,
- ISA_MICROMIPS64R6;
-def : MipsInstAlias<"dnegu $rt",
- (DSUBU_MM64R6 GPR64Opnd:$rt, ZERO_64, GPR64Opnd:$rt), 1>,
- ISA_MICROMIPS64R6;
-def : MipsInstAlias<"dsll $rd, $rt, $rs",
- (DSLLV_MM64R6 GPR64Opnd:$rd, GPR64Opnd:$rt,
- GPR32Opnd:$rs), 0>, ISA_MICROMIPS64R6;
-def : MipsInstAlias<"dsrl $rd, $rt, $rs",
- (DSRLV_MM64R6 GPR64Opnd:$rd, GPR64Opnd:$rt,
- GPR32Opnd:$rs), 0>, ISA_MICROMIPS64R6;
-def : MipsInstAlias<"dsrl $rd, $rt",
- (DSRLV_MM64R6 GPR64Opnd:$rd, GPR64Opnd:$rd,
- GPR32Opnd:$rt), 0>, ISA_MICROMIPS64R6;
-def : MipsInstAlias<"dsll $rd, $rt",
- (DSLLV_MM64R6 GPR64Opnd:$rd, GPR64Opnd:$rd,
- GPR32Opnd:$rt), 0>, ISA_MICROMIPS64R6;
diff --git a/gnu/llvm/lib/Transforms/Utils/CmpInstAnalysis.cpp b/gnu/llvm/lib/Transforms/Utils/CmpInstAnalysis.cpp
index d9294c49930..3b15a0a3e60 100644
--- a/gnu/llvm/lib/Transforms/Utils/CmpInstAnalysis.cpp
+++ b/gnu/llvm/lib/Transforms/Utils/CmpInstAnalysis.cpp
@@ -18,6 +18,29 @@
using namespace llvm;
+/// getICmpCode - Encode a icmp predicate into a three bit mask. These bits
+/// are carefully arranged to allow folding of expressions such as:
+///
+/// (A < B) | (A > B) --> (A != B)
+///
+/// Note that this is only valid if the first and second predicates have the
+/// same sign. Is illegal to do: (A u< B) | (A s> B)
+///
+/// Three bits are used to represent the condition, as follows:
+/// 0 A > B
+/// 1 A == B
+/// 2 A < B
+///
+/// <=> Value Definition
+/// 000 0 Always false
+/// 001 1 A > B
+/// 010 2 A == B
+/// 011 3 A >= B
+/// 100 4 A < B
+/// 101 5 A != B
+/// 110 6 A <= B
+/// 111 7 Always true
+///
unsigned llvm::getICmpCode(const ICmpInst *ICI, bool InvertPred) {
ICmpInst::Predicate Pred = InvertPred ? ICI->getInversePredicate()
: ICI->getPredicate();
@@ -39,6 +62,13 @@ unsigned llvm::getICmpCode(const ICmpInst *ICI, bool InvertPred) {
}
}
+/// getICmpValue - This is the complement of getICmpCode, which turns an
+/// opcode and two operands into either a constant true or false, or the
+/// predicate for a new ICmp instruction. The sign is passed in to determine
+/// which kind of predicate to use in the new icmp instruction.
+/// Non-NULL return value will be a true or false constant.
+/// NULL return means a new ICmp is needed. The predicate for which is
+/// output in NewICmpPred.
Value *llvm::getICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
CmpInst::Predicate &NewICmpPred) {
switch (Code) {
@@ -57,52 +87,10 @@ Value *llvm::getICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
return nullptr;
}
+/// PredicatesFoldable - Return true if both predicates match sign or if at
+/// least one of them is an equality comparison (which is signless).
bool llvm::PredicatesFoldable(ICmpInst::Predicate p1, ICmpInst::Predicate p2) {
return (CmpInst::isSigned(p1) == CmpInst::isSigned(p2)) ||
(CmpInst::isSigned(p1) && ICmpInst::isEquality(p2)) ||
(CmpInst::isSigned(p2) && ICmpInst::isEquality(p1));
}
-
-bool llvm::decomposeBitTestICmp(const ICmpInst *I, CmpInst::Predicate &Pred,
- Value *&X, Value *&Y, Value *&Z) {
- ConstantInt *C = dyn_cast<ConstantInt>(I->getOperand(1));
- if (!C)
- return false;
-
- switch (I->getPredicate()) {
- default:
- return false;
- case ICmpInst::ICMP_SLT:
- // X < 0 is equivalent to (X & SignMask) != 0.
- if (!C->isZero())
- return false;
- Y = ConstantInt::get(I->getContext(), APInt::getSignMask(C->getBitWidth()));
- Pred = ICmpInst::ICMP_NE;
- break;
- case ICmpInst::ICMP_SGT:
- // X > -1 is equivalent to (X & SignMask) == 0.
- if (!C->isMinusOne())
- return false;
- Y = ConstantInt::get(I->getContext(), APInt::getSignMask(C->getBitWidth()));
- Pred = ICmpInst::ICMP_EQ;
- break;
- case ICmpInst::ICMP_ULT:
- // X <u 2^n is equivalent to (X & ~(2^n-1)) == 0.
- if (!C->getValue().isPowerOf2())
- return false;
- Y = ConstantInt::get(I->getContext(), -C->getValue());
- Pred = ICmpInst::ICMP_EQ;
- break;
- case ICmpInst::ICMP_UGT:
- // X >u 2^n-1 is equivalent to (X & ~(2^n-1)) != 0.
- if (!(C->getValue() + 1).isPowerOf2())
- return false;
- Y = ConstantInt::get(I->getContext(), ~C->getValue());
- Pred = ICmpInst::ICMP_NE;
- break;
- }
-
- X = I->getOperand(0);
- Z = ConstantInt::getNullValue(C->getType());
- return true;
-}
diff --git a/gnu/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h b/gnu/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h
index ec7549d4535..931190e43a6 100644
--- a/gnu/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h
+++ b/gnu/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h
@@ -201,10 +201,6 @@ public:
}
return static_cast<T*>(data);
}
-
- /// Returns true if the root namespace of the given declaration is the 'std'
- /// C++ namespace.
- static bool isInStdNamespace(const Decl *D);
private:
ManagedAnalysis *&getAnalysisImpl(const void* tag);
@@ -406,8 +402,7 @@ private:
};
class AnalysisDeclContextManager {
- typedef llvm::DenseMap<const Decl *, std::unique_ptr<AnalysisDeclContext>>
- ContextMap;
+ typedef llvm::DenseMap<const Decl*, AnalysisDeclContext*> ContextMap;
ContextMap Contexts;
LocationContextManager LocContexts;
@@ -426,7 +421,6 @@ public:
bool addImplicitDtors = false,
bool addInitializers = false,
bool addTemporaryDtors = false,
- bool addLifetime = false,
bool synthesizeBodies = false,
bool addStaticInitBranches = false,
bool addCXXNewAllocator = true,
diff --git a/gnu/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h b/gnu/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h
index b72bce5fc9f..197d27a2f37 100644
--- a/gnu/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h
+++ b/gnu/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h
@@ -19,7 +19,6 @@
#include "llvm/ADT/FoldingSet.h"
namespace clang {
-class CFGBlock;
namespace ento {
@@ -59,9 +58,10 @@ public:
///
/// The last parameter can be used to register a new visitor with the given
/// BugReport while processing a node.
- virtual std::shared_ptr<PathDiagnosticPiece>
- VisitNode(const ExplodedNode *Succ, const ExplodedNode *Pred,
- BugReporterContext &BRC, BugReport &BR) = 0;
+ virtual PathDiagnosticPiece *VisitNode(const ExplodedNode *Succ,
+ const ExplodedNode *Pred,
+ BugReporterContext &BRC,
+ BugReport &BR) = 0;
/// \brief Provide custom definition for the final diagnostic piece on the
/// path - the piece, which is displayed before the path is expanded.
@@ -120,10 +120,10 @@ public:
void Profile(llvm::FoldingSetNodeID &ID) const override;
- std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
- BugReporterContext &BRC,
- BugReport &BR) override;
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) override;
};
class TrackConstraintBRVisitor final
@@ -149,10 +149,10 @@ public:
/// to make all PathDiagnosticPieces created by this visitor.
static const char *getTag();
- std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
- BugReporterContext &BRC,
- BugReport &BR) override;
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) override;
private:
/// Checks if the constraint is valid in the current state.
@@ -171,10 +171,10 @@ public:
ID.AddPointer(&x);
}
- std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
- BugReporterContext &BRC,
- BugReport &BR) override;
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) override;
/// If the statement is a message send expression with nil receiver, returns
/// the receiver expression. Returns NULL otherwise.
@@ -184,11 +184,6 @@ public:
/// Visitor that tries to report interesting diagnostics from conditions.
class ConditionBRVisitor final
: public BugReporterVisitorImpl<ConditionBRVisitor> {
-
- // FIXME: constexpr initialization isn't supported by MSVC2013.
- static const char *const GenericTrueMessage;
- static const char *const GenericFalseMessage;
-
public:
void Profile(llvm::FoldingSetNodeID &ID) const override {
static int x = 0;
@@ -199,48 +194,56 @@ public:
/// to make all PathDiagnosticPieces created by this visitor.
static const char *getTag();
- std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
- const ExplodedNode *Prev,
- BugReporterContext &BRC,
- BugReport &BR) override;
-
- std::shared_ptr<PathDiagnosticPiece> VisitNodeImpl(const ExplodedNode *N,
- const ExplodedNode *Prev,
- BugReporterContext &BRC,
- BugReport &BR);
-
- std::shared_ptr<PathDiagnosticPiece>
- VisitTerminator(const Stmt *Term, const ExplodedNode *N,
- const CFGBlock *srcBlk, const CFGBlock *dstBlk, BugReport &R,
- BugReporterContext &BRC);
-
- std::shared_ptr<PathDiagnosticPiece>
- VisitTrueTest(const Expr *Cond, bool tookTrue, BugReporterContext &BRC,
- BugReport &R, const ExplodedNode *N);
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *Prev,
+ BugReporterContext &BRC,
+ BugReport &BR) override;
- std::shared_ptr<PathDiagnosticPiece>
- VisitTrueTest(const Expr *Cond, const DeclRefExpr *DR, const bool tookTrue,
- BugReporterContext &BRC, BugReport &R, const ExplodedNode *N);
-
- std::shared_ptr<PathDiagnosticPiece>
- VisitTrueTest(const Expr *Cond, const BinaryOperator *BExpr,
- const bool tookTrue, BugReporterContext &BRC, BugReport &R,
- const ExplodedNode *N);
-
- std::shared_ptr<PathDiagnosticPiece>
- VisitConditionVariable(StringRef LhsString, const Expr *CondVarExpr,
- const bool tookTrue, BugReporterContext &BRC,
- BugReport &R, const ExplodedNode *N);
+ PathDiagnosticPiece *VisitNodeImpl(const ExplodedNode *N,
+ const ExplodedNode *Prev,
+ BugReporterContext &BRC,
+ BugReport &BR);
+
+ PathDiagnosticPiece *VisitTerminator(const Stmt *Term,
+ const ExplodedNode *N,
+ const CFGBlock *srcBlk,
+ const CFGBlock *dstBlk,
+ BugReport &R,
+ BugReporterContext &BRC);
+
+ PathDiagnosticPiece *VisitTrueTest(const Expr *Cond,
+ bool tookTrue,
+ BugReporterContext &BRC,
+ BugReport &R,
+ const ExplodedNode *N);
+
+ PathDiagnosticPiece *VisitTrueTest(const Expr *Cond,
+ const DeclRefExpr *DR,
+ const bool tookTrue,
+ BugReporterContext &BRC,
+ BugReport &R,
+ const ExplodedNode *N);
+
+ PathDiagnosticPiece *VisitTrueTest(const Expr *Cond,
+ const BinaryOperator *BExpr,
+ const bool tookTrue,
+ BugReporterContext &BRC,
+ BugReport &R,
+ const ExplodedNode *N);
+
+ PathDiagnosticPiece *VisitConditionVariable(StringRef LhsString,
+ const Expr *CondVarExpr,
+ const bool tookTrue,
+ BugReporterContext &BRC,
+ BugReport &R,
+ const ExplodedNode *N);
bool patternMatch(const Expr *Ex,
- const Expr *ParentEx,
raw_ostream &Out,
BugReporterContext &BRC,
BugReport &R,
const ExplodedNode *N,
Optional<bool> &prunable);
-
- static bool isPieceMessageGeneric(const PathDiagnosticPiece *Piece);
};
/// \brief Suppress reports that might lead to known false positives.
@@ -258,10 +261,10 @@ public:
ID.AddPointer(getTag());
}
- std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
- const ExplodedNode *Prev,
- BugReporterContext &BRC,
- BugReport &BR) override {
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *Prev,
+ BugReporterContext &BRC,
+ BugReport &BR) override {
return nullptr;
}
@@ -290,10 +293,10 @@ public:
ID.AddPointer(R);
}
- std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
- BugReporterContext &BRC,
- BugReport &BR) override;
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) override;
};
class SuppressInlineDefensiveChecksVisitor final
@@ -321,26 +324,10 @@ public:
/// to make all PathDiagnosticPieces created by this visitor.
static const char *getTag();
- std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *Succ,
- const ExplodedNode *Pred,
- BugReporterContext &BRC,
- BugReport &BR) override;
-};
-
-class CXXSelfAssignmentBRVisitor final
- : public BugReporterVisitorImpl<CXXSelfAssignmentBRVisitor> {
-
- bool Satisfied;
-
-public:
- CXXSelfAssignmentBRVisitor() : Satisfied(false) {}
-
- void Profile(llvm::FoldingSetNodeID &ID) const override {}
-
- std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *Succ,
- const ExplodedNode *Pred,
- BugReporterContext &BRC,
- BugReport &BR) override;
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *Succ,
+ const ExplodedNode *Pred,
+ BugReporterContext &BRC,
+ BugReport &BR) override;
};
namespace bugreporter {
diff --git a/gnu/llvm/tools/clang/lib/Analysis/BodyFarm.h b/gnu/llvm/tools/clang/lib/Analysis/BodyFarm.h
index edbe9962465..91379437231 100644
--- a/gnu/llvm/tools/clang/lib/Analysis/BodyFarm.h
+++ b/gnu/llvm/tools/clang/lib/Analysis/BodyFarm.h
@@ -15,7 +15,6 @@
#ifndef LLVM_CLANG_LIB_ANALYSIS_BODYFARM_H
#define LLVM_CLANG_LIB_ANALYSIS_BODYFARM_H
-#include "clang/AST/DeclBase.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Optional.h"
@@ -23,6 +22,7 @@
namespace clang {
class ASTContext;
+class Decl;
class FunctionDecl;
class ObjCMethodDecl;
class ObjCPropertyDecl;