diff options
Diffstat (limited to 'lib/CodeGen/ImplicitNullChecks.cpp')
-rw-r--r-- | lib/CodeGen/ImplicitNullChecks.cpp | 206 |
1 files changed, 146 insertions, 60 deletions
diff --git a/lib/CodeGen/ImplicitNullChecks.cpp b/lib/CodeGen/ImplicitNullChecks.cpp index 9588dfb72058..920c2a372a9b 100644 --- a/lib/CodeGen/ImplicitNullChecks.cpp +++ b/lib/CodeGen/ImplicitNullChecks.cpp @@ -22,6 +22,7 @@ // With the help of a runtime that understands the .fault_maps section, // faulting_load_op branches to throw_npe if executing movl (%r10), %esi incurs // a page fault. +// Store and LoadStore are also supported. // //===----------------------------------------------------------------------===// @@ -29,6 +30,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/CodeGen/FaultMaps.h" #include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineMemOperand.h" @@ -151,25 +153,44 @@ class ImplicitNullChecks : public MachineFunctionPass { const TargetRegisterInfo *TRI = nullptr; AliasAnalysis *AA = nullptr; MachineModuleInfo *MMI = nullptr; + MachineFrameInfo *MFI = nullptr; bool analyzeBlockForNullChecks(MachineBasicBlock &MBB, SmallVectorImpl<NullCheck> &NullCheckList); - MachineInstr *insertFaultingLoad(MachineInstr *LoadMI, MachineBasicBlock *MBB, - MachineBasicBlock *HandlerMBB); + MachineInstr *insertFaultingInstr(MachineInstr *MI, MachineBasicBlock *MBB, + MachineBasicBlock *HandlerMBB); void rewriteNullChecks(ArrayRef<NullCheck> NullCheckList); - /// Is \p MI a memory operation that can be used to implicitly null check the - /// value in \p PointerReg? \p PrevInsts is the set of instruction seen since + enum AliasResult { + AR_NoAlias, + AR_MayAlias, + AR_WillAliasEverything + }; + /// Returns AR_NoAlias if \p MI memory operation does not alias with + /// \p PrevMI, AR_MayAlias if they may alias and AR_WillAliasEverything if + /// they may alias and any further memory operation may alias with \p PrevMI. + AliasResult areMemoryOpsAliased(MachineInstr &MI, MachineInstr *PrevMI); + + enum SuitabilityResult { + SR_Suitable, + SR_Unsuitable, + SR_Impossible + }; + /// Return SR_Suitable if \p MI a memory operation that can be used to + /// implicitly null check the value in \p PointerReg, SR_Unsuitable if + /// \p MI cannot be used to null check and SR_Impossible if there is + /// no sense to continue lookup due to any other instruction will not be able + /// to be used. \p PrevInsts is the set of instruction seen since /// the explicit null check on \p PointerReg. - bool isSuitableMemoryOp(MachineInstr &MI, unsigned PointerReg, - ArrayRef<MachineInstr *> PrevInsts); + SuitabilityResult isSuitableMemoryOp(MachineInstr &MI, unsigned PointerReg, + ArrayRef<MachineInstr *> PrevInsts); /// Return true if \p FaultingMI can be hoisted from after the the /// instructions in \p InstsSeenSoFar to before them. Set \p Dependence to a /// non-null value if we also need to (and legally can) hoist a depedency. - bool canHoistLoadInst(MachineInstr *FaultingMI, unsigned PointerReg, - ArrayRef<MachineInstr *> InstsSeenSoFar, - MachineBasicBlock *NullSucc, MachineInstr *&Dependence); + bool canHoistInst(MachineInstr *FaultingMI, unsigned PointerReg, + ArrayRef<MachineInstr *> InstsSeenSoFar, + MachineBasicBlock *NullSucc, MachineInstr *&Dependence); public: static char ID; @@ -193,7 +214,7 @@ public: } bool ImplicitNullChecks::canHandle(const MachineInstr *MI) { - if (MI->isCall() || MI->mayStore() || MI->hasUnmodeledSideEffects()) + if (MI->isCall() || MI->hasUnmodeledSideEffects()) return false; auto IsRegMask = [](const MachineOperand &MO) { return MO.isRegMask(); }; (void)IsRegMask; @@ -248,7 +269,7 @@ bool ImplicitNullChecks::canReorder(const MachineInstr *A, unsigned RegB = MOB.getReg(); - if (TRI->regsOverlap(RegA, RegB)) + if (TRI->regsOverlap(RegA, RegB) && (MOA.isDef() || MOB.isDef())) return false; } } @@ -260,6 +281,7 @@ bool ImplicitNullChecks::runOnMachineFunction(MachineFunction &MF) { TII = MF.getSubtarget().getInstrInfo(); TRI = MF.getRegInfo().getTargetRegisterInfo(); MMI = &MF.getMMI(); + MFI = &MF.getFrameInfo(); AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); SmallVector<NullCheck, 16> NullCheckList; @@ -283,36 +305,91 @@ static bool AnyAliasLiveIn(const TargetRegisterInfo *TRI, return false; } -bool ImplicitNullChecks::isSuitableMemoryOp( - MachineInstr &MI, unsigned PointerReg, ArrayRef<MachineInstr *> PrevInsts) { +ImplicitNullChecks::AliasResult +ImplicitNullChecks::areMemoryOpsAliased(MachineInstr &MI, + MachineInstr *PrevMI) { + // If it is not memory access, skip the check. + if (!(PrevMI->mayStore() || PrevMI->mayLoad())) + return AR_NoAlias; + // Load-Load may alias + if (!(MI.mayStore() || PrevMI->mayStore())) + return AR_NoAlias; + // We lost info, conservatively alias. If it was store then no sense to + // continue because we won't be able to check against it further. + if (MI.memoperands_empty()) + return MI.mayStore() ? AR_WillAliasEverything : AR_MayAlias; + if (PrevMI->memoperands_empty()) + return PrevMI->mayStore() ? AR_WillAliasEverything : AR_MayAlias; + + for (MachineMemOperand *MMO1 : MI.memoperands()) { + // MMO1 should have a value due it comes from operation we'd like to use + // as implicit null check. + assert(MMO1->getValue() && "MMO1 should have a Value!"); + for (MachineMemOperand *MMO2 : PrevMI->memoperands()) { + if (const PseudoSourceValue *PSV = MMO2->getPseudoValue()) { + if (PSV->mayAlias(MFI)) + return AR_MayAlias; + continue; + } + llvm::AliasResult AAResult = AA->alias( + MemoryLocation(MMO1->getValue(), MemoryLocation::UnknownSize, + MMO1->getAAInfo()), + MemoryLocation(MMO2->getValue(), MemoryLocation::UnknownSize, + MMO2->getAAInfo())); + if (AAResult != NoAlias) + return AR_MayAlias; + } + } + return AR_NoAlias; +} + +ImplicitNullChecks::SuitabilityResult +ImplicitNullChecks::isSuitableMemoryOp(MachineInstr &MI, unsigned PointerReg, + ArrayRef<MachineInstr *> PrevInsts) { int64_t Offset; unsigned BaseReg; if (!TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI) || BaseReg != PointerReg) - return false; - - // We want the load to be issued at a sane offset from PointerReg, so that - // if PointerReg is null then the load reliably page faults. - if (!(MI.mayLoad() && !MI.isPredicable() && Offset < PageSize)) - return false; - - // Finally, we need to make sure that the load instruction actually is - // loading from PointerReg, and there isn't some re-definition of PointerReg - // between the compare and the load. + return SR_Unsuitable; + + // We want the mem access to be issued at a sane offset from PointerReg, + // so that if PointerReg is null then the access reliably page faults. + if (!((MI.mayLoad() || MI.mayStore()) && !MI.isPredicable() && + Offset < PageSize)) + return SR_Unsuitable; + + // Finally, we need to make sure that the access instruction actually is + // accessing from PointerReg, and there isn't some re-definition of PointerReg + // between the compare and the memory access. + // If PointerReg has been redefined before then there is no sense to continue + // lookup due to this condition will fail for any further instruction. + SuitabilityResult Suitable = SR_Suitable; for (auto *PrevMI : PrevInsts) - for (auto &PrevMO : PrevMI->operands()) - if (PrevMO.isReg() && PrevMO.getReg() && + for (auto &PrevMO : PrevMI->operands()) { + if (PrevMO.isReg() && PrevMO.getReg() && PrevMO.isDef() && TRI->regsOverlap(PrevMO.getReg(), PointerReg)) - return false; - - return true; + return SR_Impossible; + + // Check whether the current memory access aliases with previous one. + // If we already found that it aliases then no need to continue. + // But we continue base pointer check as it can result in SR_Impossible. + if (Suitable == SR_Suitable) { + AliasResult AR = areMemoryOpsAliased(MI, PrevMI); + if (AR == AR_WillAliasEverything) + return SR_Impossible; + if (AR == AR_MayAlias) + Suitable = SR_Unsuitable; + } + } + return Suitable; } -bool ImplicitNullChecks::canHoistLoadInst( - MachineInstr *FaultingMI, unsigned PointerReg, - ArrayRef<MachineInstr *> InstsSeenSoFar, MachineBasicBlock *NullSucc, - MachineInstr *&Dependence) { +bool ImplicitNullChecks::canHoistInst(MachineInstr *FaultingMI, + unsigned PointerReg, + ArrayRef<MachineInstr *> InstsSeenSoFar, + MachineBasicBlock *NullSucc, + MachineInstr *&Dependence) { auto DepResult = computeDependence(FaultingMI, InstsSeenSoFar); if (!DepResult.CanReorder) return false; @@ -359,7 +436,8 @@ bool ImplicitNullChecks::canHoistLoadInst( // The Dependency can't be re-defining the base register -- then we won't // get the memory operation on the address we want. This is already // checked in \c IsSuitableMemoryOp. - assert(!TRI->regsOverlap(DependenceMO.getReg(), PointerReg) && + assert(!(DependenceMO.isDef() && + TRI->regsOverlap(DependenceMO.getReg(), PointerReg)) && "Should have been checked before!"); } @@ -481,9 +559,11 @@ bool ImplicitNullChecks::analyzeBlockForNullChecks( return false; MachineInstr *Dependence; - if (isSuitableMemoryOp(MI, PointerReg, InstsSeenSoFar) && - canHoistLoadInst(&MI, PointerReg, InstsSeenSoFar, NullSucc, - Dependence)) { + SuitabilityResult SR = isSuitableMemoryOp(MI, PointerReg, InstsSeenSoFar); + if (SR == SR_Impossible) + return false; + if (SR == SR_Suitable && + canHoistInst(&MI, PointerReg, InstsSeenSoFar, NullSucc, Dependence)) { NullCheckList.emplace_back(&MI, MBP.ConditionDef, &MBB, NotNullSucc, NullSucc, Dependence); return true; @@ -495,36 +575,42 @@ bool ImplicitNullChecks::analyzeBlockForNullChecks( return false; } -/// Wrap a machine load instruction, LoadMI, into a FAULTING_LOAD_OP machine -/// instruction. The FAULTING_LOAD_OP instruction does the same load as LoadMI -/// (defining the same register), and branches to HandlerMBB if the load -/// faults. The FAULTING_LOAD_OP instruction is inserted at the end of MBB. -MachineInstr * -ImplicitNullChecks::insertFaultingLoad(MachineInstr *LoadMI, - MachineBasicBlock *MBB, - MachineBasicBlock *HandlerMBB) { +/// Wrap a machine instruction, MI, into a FAULTING machine instruction. +/// The FAULTING instruction does the same load/store as MI +/// (defining the same register), and branches to HandlerMBB if the mem access +/// faults. The FAULTING instruction is inserted at the end of MBB. +MachineInstr *ImplicitNullChecks::insertFaultingInstr( + MachineInstr *MI, MachineBasicBlock *MBB, MachineBasicBlock *HandlerMBB) { const unsigned NoRegister = 0; // Guaranteed to be the NoRegister value for // all targets. DebugLoc DL; - unsigned NumDefs = LoadMI->getDesc().getNumDefs(); + unsigned NumDefs = MI->getDesc().getNumDefs(); assert(NumDefs <= 1 && "other cases unhandled!"); unsigned DefReg = NoRegister; if (NumDefs != 0) { - DefReg = LoadMI->defs().begin()->getReg(); - assert(std::distance(LoadMI->defs().begin(), LoadMI->defs().end()) == 1 && + DefReg = MI->defs().begin()->getReg(); + assert(std::distance(MI->defs().begin(), MI->defs().end()) == 1 && "expected exactly one def!"); } - auto MIB = BuildMI(MBB, DL, TII->get(TargetOpcode::FAULTING_LOAD_OP), DefReg) + FaultMaps::FaultKind FK; + if (MI->mayLoad()) + FK = + MI->mayStore() ? FaultMaps::FaultingLoadStore : FaultMaps::FaultingLoad; + else + FK = FaultMaps::FaultingStore; + + auto MIB = BuildMI(MBB, DL, TII->get(TargetOpcode::FAULTING_OP), DefReg) + .addImm(FK) .addMBB(HandlerMBB) - .addImm(LoadMI->getOpcode()); + .addImm(MI->getOpcode()); - for (auto &MO : LoadMI->uses()) - MIB.addOperand(MO); + for (auto &MO : MI->uses()) + MIB.add(MO); - MIB.setMemRefs(LoadMI->memoperands_begin(), LoadMI->memoperands_end()); + MIB.setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); return MIB; } @@ -545,18 +631,18 @@ void ImplicitNullChecks::rewriteNullChecks( NC.getCheckBlock()->insert(NC.getCheckBlock()->end(), DepMI); } - // Insert a faulting load where the conditional branch was originally. We - // check earlier ensures that this bit of code motion is legal. We do not - // touch the successors list for any basic block since we haven't changed - // control flow, we've just made it implicit. - MachineInstr *FaultingLoad = insertFaultingLoad( + // Insert a faulting instruction where the conditional branch was + // originally. We check earlier ensures that this bit of code motion + // is legal. We do not touch the successors list for any basic block + // since we haven't changed control flow, we've just made it implicit. + MachineInstr *FaultingInstr = insertFaultingInstr( NC.getMemOperation(), NC.getCheckBlock(), NC.getNullSucc()); // Now the values defined by MemOperation, if any, are live-in of // the block of MemOperation. - // The original load operation may define implicit-defs alongside - // the loaded value. + // The original operation may define implicit-defs alongside + // the value. MachineBasicBlock *MBB = NC.getMemOperation()->getParent(); - for (const MachineOperand &MO : FaultingLoad->operands()) { + for (const MachineOperand &MO : FaultingInstr->operands()) { if (!MO.isReg() || !MO.isDef()) continue; unsigned Reg = MO.getReg(); |