diff options
Diffstat (limited to 'llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp')
| -rw-r--r-- | llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 366 |
1 files changed, 289 insertions, 77 deletions
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index 73b763710fdf..87cc60d51bc2 100644 --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -23,6 +23,7 @@ #include "llvm/CodeGen/GlobalISel/CallLowering.h" #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h" +#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" #include "llvm/CodeGen/LowLevelType.h" #include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineFrameInfo.h" @@ -32,6 +33,7 @@ #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/RuntimeLibcalls.h" #include "llvm/CodeGen/StackProtector.h" #include "llvm/CodeGen/SwitchLoweringUtils.h" #include "llvm/CodeGen/TargetFrameLowering.h" @@ -47,6 +49,7 @@ #include "llvm/IR/DataLayout.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/Function.h" #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/InlineAsm.h" @@ -114,7 +117,7 @@ static void reportTranslationError(MachineFunction &MF, R << (" (in function: " + MF.getName() + ")").str(); if (TPC.isGlobalISelAbortEnabled()) - report_fatal_error(R.getMsg()); + report_fatal_error(Twine(R.getMsg())); else ORE.emit(R); } @@ -566,7 +569,7 @@ bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) { if (BrInst.isUnconditional()) { // If the unconditional target is the layout successor, fallthrough. - if (!CurMBB.isLayoutSuccessor(Succ0MBB)) + if (OptLevel == CodeGenOpt::None || !CurMBB.isLayoutSuccessor(Succ0MBB)) MIRBuilder.buildBr(*Succ0MBB); // Link successors. @@ -739,8 +742,7 @@ bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) { // FIXME: At the moment we don't do any splitting optimizations here like // SelectionDAG does, so this worklist only has one entry. while (!WorkList.empty()) { - SwitchWorkListItem W = WorkList.back(); - WorkList.pop_back(); + SwitchWorkListItem W = WorkList.pop_back_val(); if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB)) return false; } @@ -784,7 +786,7 @@ bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT, JT.Reg = Sub.getReg(0); - if (JTH.OmitRangeCheck) { + if (JTH.FallthroughUnreachable) { if (JT.MBB != HeaderBB->getNextNode()) MIB.buildBr(*JT.MBB); return true; @@ -936,11 +938,10 @@ bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W, } } - // Skip the range check if the fallthrough block is unreachable. if (FallthroughUnreachable) - JTH->OmitRangeCheck = true; + JTH->FallthroughUnreachable = true; - if (!JTH->OmitRangeCheck) + if (!JTH->FallthroughUnreachable) addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb); addSuccessorWithProb(CurMBB, JumpMBB, JumpProb); CurMBB->normalizeSuccProbs(); @@ -1004,14 +1005,22 @@ void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B, Register MinValReg = MIB.buildConstant(SwitchOpTy, B.First).getReg(0); auto RangeSub = MIB.buildSub(SwitchOpTy, SwitchOpReg, MinValReg); - // Ensure that the type will fit the mask value. + Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext()); + const LLT PtrTy = getLLTForType(*PtrIRTy, *DL); + LLT MaskTy = SwitchOpTy; - for (unsigned I = 0, E = B.Cases.size(); I != E; ++I) { - if (!isUIntN(SwitchOpTy.getSizeInBits(), B.Cases[I].Mask)) { - // Switch table case range are encoded into series of masks. - // Just use pointer type, it's guaranteed to fit. - MaskTy = LLT::scalar(64); - break; + if (MaskTy.getSizeInBits() > PtrTy.getSizeInBits() || + !isPowerOf2_32(MaskTy.getSizeInBits())) + MaskTy = LLT::scalar(PtrTy.getSizeInBits()); + else { + // Ensure that the type will fit the mask value. + for (unsigned I = 0, E = B.Cases.size(); I != E; ++I) { + if (!isUIntN(SwitchOpTy.getSizeInBits(), B.Cases[I].Mask)) { + // Switch table case range are encoded into series of masks. + // Just use pointer type, it's guaranteed to fit. + MaskTy = LLT::scalar(PtrTy.getSizeInBits()); + break; + } } } Register SubReg = RangeSub.getReg(0); @@ -1023,13 +1032,13 @@ void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B, MachineBasicBlock *MBB = B.Cases[0].ThisBB; - if (!B.OmitRangeCheck) + if (!B.FallthroughUnreachable) addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb); addSuccessorWithProb(SwitchBB, MBB, B.Prob); SwitchBB->normalizeSuccProbs(); - if (!B.OmitRangeCheck) { + if (!B.FallthroughUnreachable) { // Conditional branch to the default block. auto RangeCst = MIB.buildConstant(SwitchOpTy, B.Range); auto RangeCmp = MIB.buildICmp(CmpInst::Predicate::ICMP_UGT, LLT::scalar(1), @@ -1129,10 +1138,8 @@ bool IRTranslator::lowerBitTestWorkItem( BTB->DefaultProb -= DefaultProb / 2; } - if (FallthroughUnreachable) { - // Skip the range check if the fallthrough block is unreachable. - BTB->OmitRangeCheck = true; - } + if (FallthroughUnreachable) + BTB->FallthroughUnreachable = true; // If we're in the right place, emit the bit test header right now. if (CurMBB == SwitchMBB) { @@ -1297,11 +1304,9 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8); Align BaseAlign = getMemOpAlign(LI); - AAMDNodes AAMetadata; - LI.getAAMetadata(AAMetadata); auto MMO = MF->getMachineMemOperand( Ptr, Flags, MRI->getType(Regs[i]), - commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, Ranges, + commonAlignment(BaseAlign, Offsets[i] / 8), LI.getAAMetadata(), Ranges, LI.getSyncScopeID(), LI.getOrdering()); MIRBuilder.buildLoad(Regs[i], Addr, *MMO); } @@ -1339,11 +1344,9 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8); Align BaseAlign = getMemOpAlign(SI); - AAMDNodes AAMetadata; - SI.getAAMetadata(AAMetadata); auto MMO = MF->getMachineMemOperand( Ptr, Flags, MRI->getType(Vals[i]), - commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, nullptr, + commonAlignment(BaseAlign, Offsets[i] / 8), SI.getAAMetadata(), nullptr, SI.getSyncScopeID(), SI.getOrdering()); MIRBuilder.buildStore(Vals[i], Addr, *MMO); } @@ -1590,8 +1593,7 @@ bool IRTranslator::translateMemFunc(const CallInst &CI, Align DstAlign; Align SrcAlign; unsigned IsVol = - cast<ConstantInt>(CI.getArgOperand(CI.getNumArgOperands() - 1)) - ->getZExtValue(); + cast<ConstantInt>(CI.getArgOperand(CI.arg_size() - 1))->getZExtValue(); if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) { DstAlign = MCI->getDestAlign().valueOrOne(); @@ -1763,6 +1765,10 @@ unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) { return TargetOpcode::G_VECREDUCE_UMAX; case Intrinsic::vector_reduce_umin: return TargetOpcode::G_VECREDUCE_UMIN; + case Intrinsic::lround: + return TargetOpcode::G_LROUND; + case Intrinsic::llround: + return TargetOpcode::G_LLROUND; } return Intrinsic::not_intrinsic; } @@ -1779,7 +1785,7 @@ bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI, // Yes. Let's translate it. SmallVector<llvm::SrcOp, 4> VRegs; - for (auto &Arg : CI.arg_operands()) + for (auto &Arg : CI.args()) VRegs.push_back(getOrCreateVReg(*Arg)); MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs, @@ -2172,7 +2178,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission // is the same on all targets. - for (unsigned Idx = 0, E = CI.getNumArgOperands(); Idx < E; ++Idx) { + for (unsigned Idx = 0, E = CI.arg_size(); Idx < E; ++Idx) { Value *Arg = CI.getArgOperand(Idx)->stripPointerCasts(); if (isa<ConstantPointerNull>(Arg)) continue; // Skip null pointers. They represent a hole in index space. @@ -2228,6 +2234,23 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, return true; } + case Intrinsic::trap: + case Intrinsic::debugtrap: + case Intrinsic::ubsantrap: { + StringRef TrapFuncName = + CI.getAttributes().getFnAttr("trap-func-name").getValueAsString(); + if (TrapFuncName.empty()) + break; // Use the default handling. + CallLowering::CallLoweringInfo Info; + if (ID == Intrinsic::ubsantrap) { + Info.OrigArgs.push_back({getOrCreateVRegs(*CI.getArgOperand(0)), + CI.getArgOperand(0)->getType(), 0}); + } + Info.Callee = MachineOperand::CreateES(TrapFuncName.data()); + Info.CB = &CI; + Info.OrigRet = {Register(), Type::getVoidTy(CI.getContext()), 0}; + return CLI->lowerCall(MIRBuilder, Info); + } #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ case Intrinsic::INTRINSIC: #include "llvm/IR/ConstrainedOps.def" @@ -2321,6 +2344,8 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) { if (CI.isInlineAsm()) return translateInlineAsm(CI, MIRBuilder); + diagnoseDontCall(CI); + Intrinsic::ID ID = Intrinsic::not_intrinsic; if (F && F->isIntrinsic()) { ID = F->getIntrinsicID(); @@ -2347,7 +2372,7 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) { if (isa<FPMathOperator>(CI)) MIB->copyIRFlags(CI); - for (auto &Arg : enumerate(CI.arg_operands())) { + for (auto &Arg : enumerate(CI.args())) { // If this is required to be an immediate, don't materialize it in a // register. if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) { @@ -2360,10 +2385,15 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) { } else { MIB.addFPImm(cast<ConstantFP>(Arg.value())); } - } else if (auto MD = dyn_cast<MetadataAsValue>(Arg.value())) { - auto *MDN = dyn_cast<MDNode>(MD->getMetadata()); - if (!MDN) // This was probably an MDString. - return false; + } else if (auto *MDVal = dyn_cast<MetadataAsValue>(Arg.value())) { + auto *MD = MDVal->getMetadata(); + auto *MDN = dyn_cast<MDNode>(MD); + if (!MDN) { + if (auto *ConstMD = dyn_cast<ConstantAsMetadata>(MD)) + MDN = MDNode::get(MF->getFunction().getContext(), ConstMD); + else // This was probably an MDString. + return false; + } MIB.addMetadata(MDN); } else { ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value()); @@ -2472,32 +2502,19 @@ bool IRTranslator::translateInvoke(const User &U, if (!isa<LandingPadInst>(EHPadBB->getFirstNonPHI())) return false; - bool LowerInlineAsm = false; - if (I.isInlineAsm()) { - const InlineAsm *IA = cast<InlineAsm>(I.getCalledOperand()); - if (!IA->canThrow()) { - // Fast path without emitting EH_LABELs. - - if (!translateInlineAsm(I, MIRBuilder)) - return false; - - MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB(), - *ReturnMBB = &getMBB(*ReturnBB); - - // Update successor info. - addSuccessorWithProb(InvokeMBB, ReturnMBB, BranchProbability::getOne()); - - MIRBuilder.buildBr(*ReturnMBB); - return true; - } else { - LowerInlineAsm = true; - } - } + bool LowerInlineAsm = I.isInlineAsm(); + bool NeedEHLabel = true; + // If it can't throw then use a fast-path without emitting EH labels. + if (LowerInlineAsm) + NeedEHLabel = (cast<InlineAsm>(I.getCalledOperand()))->canThrow(); // Emit the actual call, bracketed by EH_LABELs so that the MF knows about // the region covered by the try. - MCSymbol *BeginSymbol = Context.createTempSymbol(); - MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol); + MCSymbol *BeginSymbol = nullptr; + if (NeedEHLabel) { + BeginSymbol = Context.createTempSymbol(); + MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol); + } if (LowerInlineAsm) { if (!translateInlineAsm(I, MIRBuilder)) @@ -2505,8 +2522,11 @@ bool IRTranslator::translateInvoke(const User &U, } else if (!translateCallBase(I, MIRBuilder)) return false; - MCSymbol *EndSymbol = Context.createTempSymbol(); - MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol); + MCSymbol *EndSymbol = nullptr; + if (NeedEHLabel) { + EndSymbol = Context.createTempSymbol(); + MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol); + } SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests; BranchProbabilityInfo *BPI = FuncInfo.BPI; @@ -2528,7 +2548,12 @@ bool IRTranslator::translateInvoke(const User &U, } InvokeMBB->normalizeSuccProbs(); - MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol); + if (NeedEHLabel) { + assert(BeginSymbol && "Expected a begin symbol!"); + assert(EndSymbol && "Expected an end symbol!"); + MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol); + } + MIRBuilder.buildBr(ReturnMBB); return true; } @@ -2670,6 +2695,28 @@ bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) { return true; } +bool IRTranslator::translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder) { + if (!MF->getTarget().Options.TrapUnreachable) + return true; + + auto &UI = cast<UnreachableInst>(U); + // We may be able to ignore unreachable behind a noreturn call. + if (MF->getTarget().Options.NoTrapAfterNoreturn) { + const BasicBlock &BB = *UI.getParent(); + if (&UI != &BB.front()) { + BasicBlock::const_iterator PredI = + std::prev(BasicBlock::const_iterator(UI)); + if (const CallInst *Call = dyn_cast<CallInst>(&*PredI)) { + if (Call->doesNotReturn()) + return true; + } + } + } + + MIRBuilder.buildIntrinsic(Intrinsic::trap, ArrayRef<Register>(), true); + return true; +} + bool IRTranslator::translateInsertElement(const User &U, MachineIRBuilder &MIRBuilder) { // If it is a <1 x Ty> vector, use the scalar as it is @@ -2757,14 +2804,11 @@ bool IRTranslator::translateAtomicCmpXchg(const User &U, Register Cmp = getOrCreateVReg(*I.getCompareOperand()); Register NewVal = getOrCreateVReg(*I.getNewValOperand()); - AAMDNodes AAMetadata; - I.getAAMetadata(AAMetadata); - MIRBuilder.buildAtomicCmpXchgWithSuccess( OldValRes, SuccessRes, Addr, Cmp, NewVal, *MF->getMachineMemOperand( MachinePointerInfo(I.getPointerOperand()), Flags, MRI->getType(Cmp), - getMemOpAlign(I), AAMetadata, nullptr, I.getSyncScopeID(), + getMemOpAlign(I), I.getAAMetadata(), nullptr, I.getSyncScopeID(), I.getSuccessOrdering(), I.getFailureOrdering())); return true; } @@ -2824,14 +2868,11 @@ bool IRTranslator::translateAtomicRMW(const User &U, break; } - AAMDNodes AAMetadata; - I.getAAMetadata(AAMetadata); - MIRBuilder.buildAtomicRMW( Opcode, Res, Addr, Val, *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), Flags, MRI->getType(Val), getMemOpAlign(I), - AAMetadata, nullptr, I.getSyncScopeID(), + I.getAAMetadata(), nullptr, I.getSyncScopeID(), I.getOrdering())); return true; } @@ -2985,7 +3026,8 @@ bool IRTranslator::translate(const Constant &C, Register Reg) { return true; } -void IRTranslator::finalizeBasicBlock() { +bool IRTranslator::finalizeBasicBlock(const BasicBlock &BB, + MachineBasicBlock &MBB) { for (auto &BTB : SL->BitTestCases) { // Emit header first, if it wasn't already emitted. if (!BTB.Emitted) @@ -3005,7 +3047,7 @@ void IRTranslator::finalizeBasicBlock() { // test, and delete the last bit test. MachineBasicBlock *NextMBB; - if (BTB.ContiguousRange && j + 2 == ej) { + if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) { // Second-to-last bit-test with contiguous range: fall through to the // target of the final bit test. NextMBB = BTB.Cases[j + 1].TargetBB; @@ -3019,7 +3061,7 @@ void IRTranslator::finalizeBasicBlock() { emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j], MBB); - if (BTB.ContiguousRange && j + 2 == ej) { + if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) { // We need to record the replacement phi edge here that normally // happens in emitBitTestCase before we delete the case, otherwise the // phi edge will be lost. @@ -3054,6 +3096,176 @@ void IRTranslator::finalizeBasicBlock() { for (auto &SwCase : SL->SwitchCases) emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder); SL->SwitchCases.clear(); + + // Check if we need to generate stack-protector guard checks. + StackProtector &SP = getAnalysis<StackProtector>(); + if (SP.shouldEmitSDCheck(BB)) { + const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); + bool FunctionBasedInstrumentation = + TLI.getSSPStackGuardCheck(*MF->getFunction().getParent()); + SPDescriptor.initialize(&BB, &MBB, FunctionBasedInstrumentation); + } + // Handle stack protector. + if (SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) { + LLVM_DEBUG(dbgs() << "Unimplemented stack protector case\n"); + return false; + } else if (SPDescriptor.shouldEmitStackProtector()) { + MachineBasicBlock *ParentMBB = SPDescriptor.getParentMBB(); + MachineBasicBlock *SuccessMBB = SPDescriptor.getSuccessMBB(); + + // Find the split point to split the parent mbb. At the same time copy all + // physical registers used in the tail of parent mbb into virtual registers + // before the split point and back into physical registers after the split + // point. This prevents us needing to deal with Live-ins and many other + // register allocation issues caused by us splitting the parent mbb. The + // register allocator will clean up said virtual copies later on. + MachineBasicBlock::iterator SplitPoint = findSplitPointForStackProtector( + ParentMBB, *MF->getSubtarget().getInstrInfo()); + + // Splice the terminator of ParentMBB into SuccessMBB. + SuccessMBB->splice(SuccessMBB->end(), ParentMBB, SplitPoint, + ParentMBB->end()); + + // Add compare/jump on neq/jump to the parent BB. + if (!emitSPDescriptorParent(SPDescriptor, ParentMBB)) + return false; + + // CodeGen Failure MBB if we have not codegened it yet. + MachineBasicBlock *FailureMBB = SPDescriptor.getFailureMBB(); + if (FailureMBB->empty()) { + if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB)) + return false; + } + + // Clear the Per-BB State. + SPDescriptor.resetPerBBState(); + } + return true; +} + +bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD, + MachineBasicBlock *ParentBB) { + CurBuilder->setInsertPt(*ParentBB, ParentBB->end()); + // First create the loads to the guard/stack slot for the comparison. + const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); + Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext()); + const LLT PtrTy = getLLTForType(*PtrIRTy, *DL); + LLT PtrMemTy = getLLTForMVT(TLI.getPointerMemTy(*DL)); + + MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo(); + int FI = MFI.getStackProtectorIndex(); + + Register Guard; + Register StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0); + const Module &M = *ParentBB->getParent()->getFunction().getParent(); + Align Align = DL->getPrefTypeAlign(Type::getInt8PtrTy(M.getContext())); + + // Generate code to load the content of the guard slot. + Register GuardVal = + CurBuilder + ->buildLoad(PtrMemTy, StackSlotPtr, + MachinePointerInfo::getFixedStack(*MF, FI), Align, + MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile) + .getReg(0); + + if (TLI.useStackGuardXorFP()) { + LLVM_DEBUG(dbgs() << "Stack protector xor'ing with FP not yet implemented"); + return false; + } + + // Retrieve guard check function, nullptr if instrumentation is inlined. + if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) { + // This path is currently untestable on GlobalISel, since the only platform + // that needs this seems to be Windows, and we fall back on that currently. + // The code still lives here in case that changes. + // Silence warning about unused variable until the code below that uses + // 'GuardCheckFn' is enabled. + (void)GuardCheckFn; + return false; +#if 0 + // The target provides a guard check function to validate the guard value. + // Generate a call to that function with the content of the guard slot as + // argument. + FunctionType *FnTy = GuardCheckFn->getFunctionType(); + assert(FnTy->getNumParams() == 1 && "Invalid function signature"); + ISD::ArgFlagsTy Flags; + if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg)) + Flags.setInReg(); + CallLowering::ArgInfo GuardArgInfo( + {GuardVal, FnTy->getParamType(0), {Flags}}); + + CallLowering::CallLoweringInfo Info; + Info.OrigArgs.push_back(GuardArgInfo); + Info.CallConv = GuardCheckFn->getCallingConv(); + Info.Callee = MachineOperand::CreateGA(GuardCheckFn, 0); + Info.OrigRet = {Register(), FnTy->getReturnType()}; + if (!CLI->lowerCall(MIRBuilder, Info)) { + LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector check\n"); + return false; + } + return true; +#endif + } + + // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD. + // Otherwise, emit a volatile load to retrieve the stack guard value. + if (TLI.useLoadStackGuardNode()) { + Guard = + MRI->createGenericVirtualRegister(LLT::scalar(PtrTy.getSizeInBits())); + getStackGuard(Guard, *CurBuilder); + } else { + // TODO: test using android subtarget when we support @llvm.thread.pointer. + const Value *IRGuard = TLI.getSDagStackGuard(M); + Register GuardPtr = getOrCreateVReg(*IRGuard); + + Guard = CurBuilder + ->buildLoad(PtrMemTy, GuardPtr, + MachinePointerInfo::getFixedStack(*MF, FI), Align, + MachineMemOperand::MOLoad | + MachineMemOperand::MOVolatile) + .getReg(0); + } + + // Perform the comparison. + auto Cmp = + CurBuilder->buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Guard, GuardVal); + // If the guard/stackslot do not equal, branch to failure MBB. + CurBuilder->buildBrCond(Cmp, *SPD.getFailureMBB()); + // Otherwise branch to success MBB. + CurBuilder->buildBr(*SPD.getSuccessMBB()); + return true; +} + +bool IRTranslator::emitSPDescriptorFailure(StackProtectorDescriptor &SPD, + MachineBasicBlock *FailureBB) { + CurBuilder->setInsertPt(*FailureBB, FailureBB->end()); + const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); + + const RTLIB::Libcall Libcall = RTLIB::STACKPROTECTOR_CHECK_FAIL; + const char *Name = TLI.getLibcallName(Libcall); + + CallLowering::CallLoweringInfo Info; + Info.CallConv = TLI.getLibcallCallingConv(Libcall); + Info.Callee = MachineOperand::CreateES(Name); + Info.OrigRet = {Register(), Type::getVoidTy(MF->getFunction().getContext()), + 0}; + if (!CLI->lowerCall(*CurBuilder, Info)) { + LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector fail\n"); + return false; + } + + // On PS4, the "return address" must still be within the calling function, + // even if it's at the very end, so emit an explicit TRAP here. + // Passing 'true' for doesNotReturn above won't generate the trap for us. + // WebAssembly needs an unreachable instruction after a non-returning call, + // because the function return type can be different from __stack_chk_fail's + // return type (void). + const TargetMachine &TM = MF->getTarget(); + if (TM.getTargetTriple().isPS4CPU() || TM.getTargetTriple().isWasm()) { + LLVM_DEBUG(dbgs() << "Unhandled trap emission for stack protector fail\n"); + return false; + } + return true; } void IRTranslator::finalizeFunction() { @@ -3069,6 +3281,7 @@ void IRTranslator::finalizeFunction() { EntryBuilder.reset(); CurBuilder.reset(); FuncInfo.clear(); + SPDescriptor.resetPerFunctionState(); } /// Returns true if a BasicBlock \p BB within a variadic function contains a @@ -3079,7 +3292,7 @@ static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) { // Walk the block backwards, because tail calls usually only appear at the end // of a block. - return std::any_of(BB.rbegin(), BB.rend(), [](const Instruction &I) { + return llvm::any_of(llvm::reverse(BB), [](const Instruction &I) { const auto *CI = dyn_cast<CallInst>(&I); return CI && CI->isMustTailCall(); }); @@ -3088,8 +3301,6 @@ static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) { bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { MF = &CurMF; const Function &F = MF->getFunction(); - if (F.empty()) - return false; GISelCSEAnalysisWrapper &Wrapper = getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper(); // Set the CSEConfig and run the analysis. @@ -3257,7 +3468,8 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { return false; } - finalizeBasicBlock(); + if (!finalizeBasicBlock(*BB, MBB)) + return false; } #ifndef NDEBUG WrapperObserver.removeObserver(&Verifier); |
