summaryrefslogtreecommitdiff
path: root/gnu/llvm
diff options
context:
space:
mode:
authormortimer <mortimer@cvs.openbsd.org>2021-01-01 16:43:06 +0000
committermortimer <mortimer@cvs.openbsd.org>2021-01-01 16:43:06 +0000
commit87ab0880b95e8d21f6689d6e1a738c98124d772d (patch)
tree725eb83238ac0b2915ee8a69ee1e497a447542df /gnu/llvm
parent45f4f2a4bb08e495ee18bbd2f086bdfcc19df3ae (diff)
Skip leaf function optimization under some circumstances.
On architectures that do not always spill the return address to the stack, it is possible for local stack frame data corruption in leaf functions to span stack frames if the retguard cookie is not spilled in the function. In leaf functions on these architectures, now spill the retguard cookie if the function contains arrays or has variables which have their address taken.
Diffstat (limited to 'gnu/llvm')
-rw-r--r--gnu/llvm/llvm/lib/CodeGen/ReturnProtectorLowering.cpp111
1 files changed, 110 insertions, 1 deletions
diff --git a/gnu/llvm/llvm/lib/CodeGen/ReturnProtectorLowering.cpp b/gnu/llvm/llvm/lib/CodeGen/ReturnProtectorLowering.cpp
index 49ca3441b42..cff5b886133 100644
--- a/gnu/llvm/llvm/lib/CodeGen/ReturnProtectorLowering.cpp
+++ b/gnu/llvm/llvm/lib/CodeGen/ReturnProtectorLowering.cpp
@@ -17,6 +17,7 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetFrameLowering.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCRegisterInfo.h"
@@ -47,6 +48,70 @@ static void markUsedRegsInSuccessors(MachineBasicBlock &MBB,
markUsedRegsInSuccessors(*SuccMBB, Used, Visited);
}
+static bool containsProtectableData(Type *Ty) {
+ if (!Ty)
+ return false;
+
+ if (ArrayType *AT = dyn_cast<ArrayType>(Ty))
+ return true;
+
+ if (StructType *ST = dyn_cast<StructType>(Ty)) {
+ for (StructType::element_iterator I = ST->element_begin(),
+ E = ST->element_end();
+ I != E; ++I) {
+ if (containsProtectableData(*I))
+ return true;
+ }
+ }
+ return false;
+}
+
+// Mostly the same as StackProtector::HasAddressTaken
+static bool hasAddressTaken(const Instruction *AI,
+ SmallPtrSet<const PHINode *, 16> &visitedPHI) {
+ for (const User *U : AI->users()) {
+ const auto *I = cast<Instruction>(U);
+ switch (I->getOpcode()) {
+ case Instruction::Store:
+ if (AI == cast<StoreInst>(I)->getValueOperand())
+ return true;
+ break;
+ case Instruction::AtomicCmpXchg:
+ if (AI == cast<AtomicCmpXchgInst>(I)->getNewValOperand())
+ return true;
+ break;
+ case Instruction::PtrToInt:
+ if (AI == cast<PtrToIntInst>(I)->getOperand(0))
+ return true;
+ break;
+ case Instruction::BitCast:
+ case Instruction::GetElementPtr:
+ case Instruction::Select:
+ case Instruction::AddrSpaceCast:
+ if (hasAddressTaken(I, visitedPHI))
+ return true;
+ break;
+ case Instruction::PHI: {
+ const auto *PN = cast<PHINode>(I);
+ if (visitedPHI.insert(PN).second)
+ if (hasAddressTaken(PN, visitedPHI))
+ return true;
+ break;
+ }
+ case Instruction::Load:
+ case Instruction::AtomicRMW:
+ case Instruction::Ret:
+ return false;
+ break;
+ default:
+ // Conservatively return true for any instruction that takes an address
+ // operand, but is not handled above.
+ return true;
+ }
+ }
+ return false;
+}
+
/// setupReturnProtector - Checks the function for ROP friendly return
/// instructions and sets ReturnProtectorNeeded if found.
void ReturnProtectorLowering::setupReturnProtector(MachineFunction &MF) const {
@@ -115,7 +180,51 @@ bool ReturnProtectorLowering::determineReturnProtectorRegister(
break;
}
- if (!hasCalls) {
+ // If the return address is always on the stack, then we
+ // want to try to keep the return protector cookie unspilled.
+ // This prevents a single stack smash from corrupting both the
+ // return protector cookie and the return address.
+ llvm::Triple::ArchType arch = MF.getTarget().getTargetTriple().getArch();
+ bool returnAddrOnStack = arch == llvm::Triple::ArchType::x86
+ || arch == llvm::Triple::ArchType::x86_64;
+
+ // For architectures which do not spill a return address
+ // to the stack by default, it is possible that in a leaf
+ // function that neither the return address or the retguard cookie
+ // will be spilled, and stack corruption may be missed.
+ // Here, we check leaf functions on these kinds of architectures
+ // to see if they have any variable sized local allocations,
+ // array type allocations, allocations which contain array
+ // types, or elements that have their address taken. If any of
+ // these conditions are met, then we skip leaf function
+ // optimization and spill the retguard cookie to the stack.
+ bool hasLocals = MFI.hasVarSizedObjects();
+ if (!hasCalls && !hasLocals && !returnAddrOnStack) {
+ for (const BasicBlock &BB : MF.getFunction()) {
+ for (const Instruction &I : BB) {
+ if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
+ // Check for array allocations
+ Type *Ty = AI->getAllocatedType();
+ if (AI->isArrayAllocation() || containsProtectableData(Ty)) {
+ hasLocals = true;
+ break;
+ }
+ // Check for address taken
+ SmallPtrSet<const PHINode *, 16> visitedPHIs;
+ if (hasAddressTaken(AI, visitedPHIs)) {
+ hasLocals = true;
+ break;
+ }
+ }
+ }
+ if (hasLocals)
+ break;
+ }
+ }
+
+ bool tryLeafOptimize = !hasCalls && (returnAddrOnStack || !hasLocals);
+
+ if (tryLeafOptimize) {
SmallSet<unsigned, 16> LeafUsed;
SmallSet<int, 24> LeafVisited;
markUsedRegsInSuccessors(MF.front(), LeafUsed, LeafVisited);