diff options
Diffstat (limited to 'lib/CodeGen')
-rw-r--r-- | lib/CodeGen/BranchFolding.cpp | 5 | ||||
-rw-r--r-- | lib/CodeGen/MachineFunction.cpp | 14 | ||||
-rw-r--r-- | lib/CodeGen/MachineInstr.cpp | 6 | ||||
-rw-r--r-- | lib/CodeGen/ScheduleDAGInstrs.cpp | 63 | ||||
-rw-r--r-- | lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp | 39 | ||||
-rw-r--r-- | lib/CodeGen/StackColoring.cpp | 58 |
6 files changed, 96 insertions, 89 deletions
diff --git a/lib/CodeGen/BranchFolding.cpp b/lib/CodeGen/BranchFolding.cpp index 5309549762920..3c439e66944b2 100644 --- a/lib/CodeGen/BranchFolding.cpp +++ b/lib/CodeGen/BranchFolding.cpp @@ -1475,13 +1475,14 @@ ReoptimizeBlock: bool PredAnalyzable = !TII->analyzeBranch(*Pred, PredTBB, PredFBB, PredCond, true); - if (PredAnalyzable && !PredCond.empty() && PredTBB == MBB) { + if (PredAnalyzable && !PredCond.empty() && PredTBB == MBB && + PredTBB != PredFBB) { // The predecessor has a conditional branch to this block which consists // of only a tail call. Try to fold the tail call into the conditional // branch. if (TII->canMakeTailCallConditional(PredCond, TailCall)) { // TODO: It would be nice if analyzeBranch() could provide a pointer - // to the branch insturction so replaceBranchWithTailCall() doesn't + // to the branch instruction so replaceBranchWithTailCall() doesn't // have to search for it. TII->replaceBranchWithTailCall(*Pred, PredCond, TailCall); ++NumTailCalls; diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp index f88e175a97762..742b095d955e8 100644 --- a/lib/CodeGen/MachineFunction.cpp +++ b/lib/CodeGen/MachineFunction.cpp @@ -330,6 +330,20 @@ MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO, MMO->getOrdering(), MMO->getFailureOrdering()); } +MachineMemOperand * +MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO, + const AAMDNodes &AAInfo) { + MachinePointerInfo MPI = MMO->getValue() ? + MachinePointerInfo(MMO->getValue(), MMO->getOffset()) : + MachinePointerInfo(MMO->getPseudoValue(), MMO->getOffset()); + + return new (Allocator) + MachineMemOperand(MPI, MMO->getFlags(), MMO->getSize(), + MMO->getBaseAlignment(), AAInfo, + MMO->getRanges(), MMO->getSyncScopeID(), + MMO->getOrdering(), MMO->getFailureOrdering()); +} + MachineInstr::mmo_iterator MachineFunction::allocateMemRefsArray(unsigned long Num) { return Allocator.Allocate<MachineMemOperand *>(Num); diff --git a/lib/CodeGen/MachineInstr.cpp b/lib/CodeGen/MachineInstr.cpp index afea5575a3ae5..535757ed87c1a 100644 --- a/lib/CodeGen/MachineInstr.cpp +++ b/lib/CodeGen/MachineInstr.cpp @@ -578,10 +578,8 @@ bool MachinePointerInfo::isDereferenceable(unsigned Size, LLVMContext &C, if (BasePtr == nullptr) return false; - return isDereferenceableAndAlignedPointer(BasePtr, 1, - APInt(DL.getPointerSize(), - Offset + Size), - DL); + return isDereferenceableAndAlignedPointer( + BasePtr, 1, APInt(DL.getPointerSizeInBits(), Offset + Size), DL); } /// getConstantPool - Return a MachinePointerInfo record that refers to the diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp index ccd937950a743..99baa07390eb9 100644 --- a/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -121,63 +121,6 @@ ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, SchedModel.init(ST.getSchedModel(), &ST, TII); } -/// This is the function that does the work of looking through basic -/// ptrtoint+arithmetic+inttoptr sequences. -static const Value *getUnderlyingObjectFromInt(const Value *V) { - do { - if (const Operator *U = dyn_cast<Operator>(V)) { - // If we find a ptrtoint, we can transfer control back to the - // regular getUnderlyingObjectFromInt. - if (U->getOpcode() == Instruction::PtrToInt) - return U->getOperand(0); - // If we find an add of a constant, a multiplied value, or a phi, it's - // likely that the other operand will lead us to the base - // object. We don't have to worry about the case where the - // object address is somehow being computed by the multiply, - // because our callers only care when the result is an - // identifiable object. - if (U->getOpcode() != Instruction::Add || - (!isa<ConstantInt>(U->getOperand(1)) && - Operator::getOpcode(U->getOperand(1)) != Instruction::Mul && - !isa<PHINode>(U->getOperand(1)))) - return V; - V = U->getOperand(0); - } else { - return V; - } - assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); - } while (true); -} - -/// This is a wrapper around GetUnderlyingObjects and adds support for basic -/// ptrtoint+arithmetic+inttoptr sequences. -static void getUnderlyingObjects(const Value *V, - SmallVectorImpl<Value *> &Objects, - const DataLayout &DL) { - SmallPtrSet<const Value *, 16> Visited; - SmallVector<const Value *, 4> Working(1, V); - do { - V = Working.pop_back_val(); - - SmallVector<Value *, 4> Objs; - GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL); - - for (Value *V : Objs) { - if (!Visited.insert(V).second) - continue; - if (Operator::getOpcode(V) == Instruction::IntToPtr) { - const Value *O = - getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); - if (O->getType()->isPointerTy()) { - Working.push_back(O); - continue; - } - } - Objects.push_back(const_cast<Value *>(V)); - } - } while (!Working.empty()); -} - /// If this machine instr has memory reference information and it can be tracked /// to a normal reference to a known object, return the Value for that object. static void getUnderlyingObjectsForInstr(const MachineInstr *MI, @@ -208,12 +151,10 @@ static void getUnderlyingObjectsForInstr(const MachineInstr *MI, Objects.push_back(UnderlyingObjectsVector::value_type(PSV, MayAlias)); } else if (const Value *V = MMO->getValue()) { SmallVector<Value *, 4> Objs; - getUnderlyingObjects(V, Objs, DL); + getUnderlyingObjectsForCodeGen(V, Objs, DL); for (Value *V : Objs) { - if (!isIdentifiedObject(V)) - return false; - + assert(isIdentifiedObject(V)); Objects.push_back(UnderlyingObjectsVector::value_type(V, true)); } } else diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 41c3f5f235eab..127312076207c 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -99,6 +99,27 @@ LimitFPPrecision("limit-float-precision", // store [4096 x i8] %data, [4096 x i8]* %buffer static const unsigned MaxParallelChains = 64; +// True if the Value passed requires ABI mangling as it is a parameter to a +// function or a return value from a function which is not an intrinsic. +static bool isABIRegCopy(const Value * V) { + const bool IsRetInst = V && isa<ReturnInst>(V); + const bool IsCallInst = V && isa<CallInst>(V); + const bool IsInLineAsm = + IsCallInst && static_cast<const CallInst *>(V)->isInlineAsm(); + const bool IsIndirectFunctionCall = + IsCallInst && !IsInLineAsm && + !static_cast<const CallInst *>(V)->getCalledFunction(); + // It is possible that the call instruction is an inline asm statement or an + // indirect function call in which case the return value of + // getCalledFunction() would be nullptr. + const bool IsInstrinsicCall = + IsCallInst && !IsInLineAsm && !IsIndirectFunctionCall && + static_cast<const CallInst *>(V)->getCalledFunction()->getIntrinsicID() != + Intrinsic::not_intrinsic; + + return IsRetInst || (IsCallInst && (!IsInLineAsm && !IsInstrinsicCall)); +} + static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, @@ -1026,13 +1047,9 @@ SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) { if (It != FuncInfo.ValueMap.end()) { unsigned InReg = It->second; - bool IsABIRegCopy = - V && ((isa<CallInst>(V) && - !(static_cast<const CallInst *>(V))->isInlineAsm()) || - isa<ReturnInst>(V)); RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(), - DAG.getDataLayout(), InReg, Ty, IsABIRegCopy); + DAG.getDataLayout(), InReg, Ty, isABIRegCopy(V)); SDValue Chain = DAG.getEntryNode(); Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V); @@ -1221,13 +1238,9 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) { // If this is an instruction which fast-isel has deferred, select it now. if (const Instruction *Inst = dyn_cast<Instruction>(V)) { unsigned InReg = FuncInfo.InitializeRegForValue(Inst); - bool IsABIRegCopy = - V && ((isa<CallInst>(V) && - !(static_cast<const CallInst *>(V))->isInlineAsm()) || - isa<ReturnInst>(V)); RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg, - Inst->getType(), IsABIRegCopy); + Inst->getType(), isABIRegCopy(V)); SDValue Chain = DAG.getEntryNode(); return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V); } @@ -8281,13 +8294,9 @@ SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); // If this is an InlineAsm we have to match the registers required, not the // notional registers required by the type. - bool IsABIRegCopy = - V && ((isa<CallInst>(V) && - !(static_cast<const CallInst *>(V))->isInlineAsm()) || - isa<ReturnInst>(V)); RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, - V->getType(), IsABIRegCopy); + V->getType(), isABIRegCopy(V)); SDValue Chain = DAG.getEntryNode(); ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) == diff --git a/lib/CodeGen/StackColoring.cpp b/lib/CodeGen/StackColoring.cpp index 6bac39c7ee77c..e5fc5402cb41b 100644 --- a/lib/CodeGen/StackColoring.cpp +++ b/lib/CodeGen/StackColoring.cpp @@ -37,6 +37,7 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/PseudoSourceValue.h" +#include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/CodeGen/SlotIndexes.h" #include "llvm/CodeGen/StackProtector.h" #include "llvm/CodeGen/WinEHFuncInfo.h" @@ -889,6 +890,10 @@ void StackColoring::remapInstructions(DenseMap<int, int> &SlotRemap) { // Keep a list of *allocas* which need to be remapped. DenseMap<const AllocaInst*, const AllocaInst*> Allocas; + + // Keep a list of allocas which has been affected by the remap. + SmallPtrSet<const AllocaInst*, 32> MergedAllocas; + for (const std::pair<int, int> &SI : SlotRemap) { const AllocaInst *From = MFI->getObjectAllocation(SI.first); const AllocaInst *To = MFI->getObjectAllocation(SI.second); @@ -908,6 +913,10 @@ void StackColoring::remapInstructions(DenseMap<int, int> &SlotRemap) { Inst = Cast; } + // We keep both slots to maintain AliasAnalysis metadata later. + MergedAllocas.insert(From); + MergedAllocas.insert(To); + // Allow the stack protector to adjust its value map to account for the // upcoming replacement. SP->adjustForColoring(From, To); @@ -939,13 +948,6 @@ void StackColoring::remapInstructions(DenseMap<int, int> &SlotRemap) { // Update the MachineMemOperand to use the new alloca. for (MachineMemOperand *MMO : I.memoperands()) { - // FIXME: In order to enable the use of TBAA when using AA in CodeGen, - // we'll also need to update the TBAA nodes in MMOs with values - // derived from the merged allocas. When doing this, we'll need to use - // the same variant of GetUnderlyingObjects that is used by the - // instruction scheduler (that can look through ptrtoint/inttoptr - // pairs). - // We've replaced IR-level uses of the remapped allocas, so we only // need to replace direct uses here. const AllocaInst *AI = dyn_cast_or_null<AllocaInst>(MMO->getValue()); @@ -997,6 +999,48 @@ void StackColoring::remapInstructions(DenseMap<int, int> &SlotRemap) { MO.setIndex(ToSlot); FixedInstr++; } + + // We adjust AliasAnalysis information for merged stack slots. + MachineSDNode::mmo_iterator NewMemOps = + MF->allocateMemRefsArray(I.getNumMemOperands()); + unsigned MemOpIdx = 0; + bool ReplaceMemOps = false; + for (MachineMemOperand *MMO : I.memoperands()) { + // If this memory location can be a slot remapped here, + // we remove AA information. + bool MayHaveConflictingAAMD = false; + if (MMO->getAAInfo()) { + if (const Value *MMOV = MMO->getValue()) { + SmallVector<Value *, 4> Objs; + getUnderlyingObjectsForCodeGen(MMOV, Objs, MF->getDataLayout()); + + if (Objs.empty()) + MayHaveConflictingAAMD = true; + else + for (Value *V : Objs) { + // If this memory location comes from a known stack slot + // that is not remapped, we continue checking. + // Otherwise, we need to invalidate AA infomation. + const AllocaInst *AI = dyn_cast_or_null<AllocaInst>(V); + if (AI && MergedAllocas.count(AI)) { + MayHaveConflictingAAMD = true; + break; + } + } + } + } + if (MayHaveConflictingAAMD) { + NewMemOps[MemOpIdx++] = MF->getMachineMemOperand(MMO, AAMDNodes()); + ReplaceMemOps = true; + } + else + NewMemOps[MemOpIdx++] = MMO; + } + + // If any memory operand is updated, set memory references of + // this instruction. + if (ReplaceMemOps) + I.setMemRefs(std::make_pair(NewMemOps, I.getNumMemOperands())); } // Update the location of C++ catch objects for the MSVC personality routine. |