diff options
Diffstat (limited to 'contrib/llvm-project/llvm/lib/Analysis')
13 files changed, 255 insertions, 204 deletions
diff --git a/contrib/llvm-project/llvm/lib/Analysis/AssumptionCache.cpp b/contrib/llvm-project/llvm/lib/Analysis/AssumptionCache.cpp index fb3a6f8de2d6..1b7277df0e0c 100644 --- a/contrib/llvm-project/llvm/lib/Analysis/AssumptionCache.cpp +++ b/contrib/llvm-project/llvm/lib/Analysis/AssumptionCache.cpp @@ -17,6 +17,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/Analysis/AssumeBundleQueries.h" #include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Function.h" #include "llvm/IR/InstrTypes.h" @@ -77,9 +78,15 @@ findAffectedValues(CallBase *CI, TargetTransformInfo *TTI, }; for (unsigned Idx = 0; Idx != CI->getNumOperandBundles(); Idx++) { - if (CI->getOperandBundleAt(Idx).Inputs.size() > ABA_WasOn && - CI->getOperandBundleAt(Idx).getTagName() != IgnoreBundleTag) - AddAffected(CI->getOperandBundleAt(Idx).Inputs[ABA_WasOn], Idx); + OperandBundleUse Bundle = CI->getOperandBundleAt(Idx); + if (Bundle.getTagName() == "separate_storage") { + assert(Bundle.Inputs.size() == 2 && + "separate_storage must have two args"); + AddAffected(getUnderlyingObject(Bundle.Inputs[0]), Idx); + AddAffected(getUnderlyingObject(Bundle.Inputs[1]), Idx); + } else if (Bundle.Inputs.size() > ABA_WasOn && + Bundle.getTagName() != IgnoreBundleTag) + AddAffected(Bundle.Inputs[ABA_WasOn], Idx); } Value *Cond = CI->getArgOperand(0), *A, *B; diff --git a/contrib/llvm-project/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/contrib/llvm-project/llvm/lib/Analysis/BasicAliasAnalysis.cpp index 3de147368f23..a4a0846df7af 100644 --- a/contrib/llvm-project/llvm/lib/Analysis/BasicAliasAnalysis.cpp +++ b/contrib/llvm-project/llvm/lib/Analysis/BasicAliasAnalysis.cpp @@ -69,7 +69,7 @@ static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, cl::init(true)); static cl::opt<bool> EnableSeparateStorageAnalysis("basic-aa-separate-storage", - cl::Hidden, cl::init(false)); + cl::Hidden, cl::init(true)); /// SearchLimitReached / SearchTimes shows how often the limit of /// to decompose GEPs is reached. It will affect the precision @@ -639,7 +639,7 @@ BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL, continue; // Don't attempt to analyze GEPs if the scalable index is not zero. - TypeSize AllocTypeSize = DL.getTypeAllocSize(GTI.getIndexedType()); + TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL); if (AllocTypeSize.isScalable()) { Decomposed.Base = V; return Decomposed; @@ -650,7 +650,7 @@ BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL, continue; } - TypeSize AllocTypeSize = DL.getTypeAllocSize(GTI.getIndexedType()); + TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL); if (AllocTypeSize.isScalable()) { Decomposed.Base = V; return Decomposed; @@ -1543,28 +1543,45 @@ AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size, TLI, NullIsValidLocation))) return AliasResult::NoAlias; - if (CtxI && EnableSeparateStorageAnalysis) { - for (auto &AssumeVH : AC.assumptions()) { - if (!AssumeVH) + if (EnableSeparateStorageAnalysis) { + for (AssumptionCache::ResultElem &Elem : AC.assumptionsFor(O1)) { + if (!Elem || Elem.Index == AssumptionCache::ExprResultIdx) continue; - AssumeInst *Assume = cast<AssumeInst>(AssumeVH); - - for (unsigned Idx = 0; Idx < Assume->getNumOperandBundles(); Idx++) { - OperandBundleUse OBU = Assume->getOperandBundleAt(Idx); - if (OBU.getTagName() == "separate_storage") { - assert(OBU.Inputs.size() == 2); - const Value *Hint1 = OBU.Inputs[0].get(); - const Value *Hint2 = OBU.Inputs[1].get(); - // This is often a no-op; instcombine rewrites this for us. No-op - // getUnderlyingObject calls are fast, though. - const Value *HintO1 = getUnderlyingObject(Hint1); - const Value *HintO2 = getUnderlyingObject(Hint2); - - if (((O1 == HintO1 && O2 == HintO2) || - (O1 == HintO2 && O2 == HintO1)) && - isValidAssumeForContext(Assume, CtxI, DT)) + AssumeInst *Assume = cast<AssumeInst>(Elem); + OperandBundleUse OBU = Assume->getOperandBundleAt(Elem.Index); + if (OBU.getTagName() == "separate_storage") { + assert(OBU.Inputs.size() == 2); + const Value *Hint1 = OBU.Inputs[0].get(); + const Value *Hint2 = OBU.Inputs[1].get(); + // This is often a no-op; instcombine rewrites this for us. No-op + // getUnderlyingObject calls are fast, though. + const Value *HintO1 = getUnderlyingObject(Hint1); + const Value *HintO2 = getUnderlyingObject(Hint2); + + auto ValidAssumeForPtrContext = [&](const Value *Ptr) { + if (const Instruction *PtrI = dyn_cast<Instruction>(Ptr)) { + return isValidAssumeForContext(Assume, PtrI, DT, + /* AllowEphemerals */ true); + } + if (const Argument *PtrA = dyn_cast<Argument>(Ptr)) { + const Instruction *FirstI = + &*PtrA->getParent()->getEntryBlock().begin(); + return isValidAssumeForContext(Assume, FirstI, DT, + /* AllowEphemerals */ true); + } + return false; + }; + + if ((O1 == HintO1 && O2 == HintO2) || (O1 == HintO2 && O2 == HintO1)) { + // Note that we go back to V1 and V2 for the + // ValidAssumeForPtrContext checks; they're dominated by O1 and O2, + // so strictly more assumptions are valid for them. + if ((CtxI && isValidAssumeForContext(Assume, CtxI, DT, + /* AllowEphemerals */ true)) || + ValidAssumeForPtrContext(V1) || ValidAssumeForPtrContext(V2)) { return AliasResult::NoAlias; + } } } } diff --git a/contrib/llvm-project/llvm/lib/Analysis/ConstraintSystem.cpp b/contrib/llvm-project/llvm/lib/Analysis/ConstraintSystem.cpp index 35bdd869a88d..1a9c7c21e9ce 100644 --- a/contrib/llvm-project/llvm/lib/Analysis/ConstraintSystem.cpp +++ b/contrib/llvm-project/llvm/lib/Analysis/ConstraintSystem.cpp @@ -95,14 +95,14 @@ bool ConstraintSystem::eliminateUsingFM() { IdxUpper++; } - if (MulOverflow(UpperV, ((-1) * LowerLast), M1)) + if (MulOverflow(UpperV, -1 * LowerLast, M1)) return false; if (IdxLower < LowerRow.size() && LowerRow[IdxLower].Id == CurrentId) { LowerV = LowerRow[IdxLower].Coefficient; IdxLower++; } - if (MulOverflow(LowerV, (UpperLast), M2)) + if (MulOverflow(LowerV, UpperLast, M2)) return false; if (AddOverflow(M1, M2, N)) return false; diff --git a/contrib/llvm-project/llvm/lib/Analysis/InlineCost.cpp b/contrib/llvm-project/llvm/lib/Analysis/InlineCost.cpp index 7096e06d925a..1fa7badaa4fa 100644 --- a/contrib/llvm-project/llvm/lib/Analysis/InlineCost.cpp +++ b/contrib/llvm-project/llvm/lib/Analysis/InlineCost.cpp @@ -1429,7 +1429,7 @@ bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) { continue; } - APInt TypeSize(IntPtrWidth, DL.getTypeAllocSize(GTI.getIndexedType())); + APInt TypeSize(IntPtrWidth, GTI.getSequentialElementStride(DL)); Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize; } return true; diff --git a/contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp b/contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp index 78a833476334..d0c27cae0dff 100644 --- a/contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp @@ -2204,6 +2204,13 @@ static Value *simplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, match(Op1, m_c_Xor(m_Specific(Or), m_Specific(Y)))) return Constant::getNullValue(Op0->getType()); + const APInt *C1; + Value *A; + // (A ^ C) & (A ^ ~C) -> 0 + if (match(Op0, m_Xor(m_Value(A), m_APInt(C1))) && + match(Op1, m_Xor(m_Specific(A), m_SpecificInt(~*C1)))) + return Constant::getNullValue(Op0->getType()); + if (Op0->getType()->isIntOrIntVectorTy(1)) { if (std::optional<bool> Implied = isImpliedCondition(Op0, Op1, Q.DL)) { // If Op0 is true implies Op1 is true, then Op0 is a subset of Op1. @@ -2473,6 +2480,11 @@ static Value *simplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, if (Value *V = threadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse)) return V; + // (A ^ C) | (A ^ ~C) -> -1, i.e. all bits set to one. + if (match(Op0, m_Xor(m_Value(A), m_APInt(C1))) && + match(Op1, m_Xor(m_Specific(A), m_SpecificInt(~*C1)))) + return Constant::getAllOnesValue(Op0->getType()); + if (Op0->getType()->isIntOrIntVectorTy(1)) { if (std::optional<bool> Implied = isImpliedCondition(Op0, Op1, Q.DL, false)) { @@ -4301,7 +4313,7 @@ static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, // For vector types, the simplification must hold per-lane, so forbid // potentially cross-lane operations like shufflevector. if (!I->getType()->isVectorTy() || isa<ShuffleVectorInst>(I) || - isa<CallBase>(I)) + isa<CallBase>(I) || isa<BitCastInst>(I)) return nullptr; } diff --git a/contrib/llvm-project/llvm/lib/Analysis/Local.cpp b/contrib/llvm-project/llvm/lib/Analysis/Local.cpp index 30757abeb098..f5e080d2c78e 100644 --- a/contrib/llvm-project/llvm/lib/Analysis/Local.cpp +++ b/contrib/llvm-project/llvm/lib/Analysis/Local.cpp @@ -64,7 +64,7 @@ Value *llvm::emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, // Convert to correct type. if (Op->getType() != IntIdxTy) Op = Builder->CreateIntCast(Op, IntIdxTy, true, Op->getName() + ".c"); - TypeSize TSize = DL.getTypeAllocSize(GTI.getIndexedType()); + TypeSize TSize = GTI.getSequentialElementStride(DL); if (TSize != TypeSize::getFixed(1)) { Value *Scale = Builder->CreateTypeSize(IntIdxTy->getScalarType(), TSize); if (IntIdxTy->isVectorTy()) diff --git a/contrib/llvm-project/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/contrib/llvm-project/llvm/lib/Analysis/LoopAccessAnalysis.cpp index 89666018d925..aed60cc5a3f5 100644 --- a/contrib/llvm-project/llvm/lib/Analysis/LoopAccessAnalysis.cpp +++ b/contrib/llvm-project/llvm/lib/Analysis/LoopAccessAnalysis.cpp @@ -2703,7 +2703,10 @@ static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep) { // If it's a type with the same allocation size as the result of the GEP we // can peel off the zero index. - if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize) + TypeSize ElemSize = GEPTI.isStruct() + ? DL.getTypeAllocSize(GEPTI.getIndexedType()) + : GEPTI.getSequentialElementStride(DL); + if (ElemSize != GEPAllocSize) break; --LastOperand; } diff --git a/contrib/llvm-project/llvm/lib/Analysis/LoopInfo.cpp b/contrib/llvm-project/llvm/lib/Analysis/LoopInfo.cpp index 87ddfe3e92ae..59c96a3371e8 100644 --- a/contrib/llvm-project/llvm/lib/Analysis/LoopInfo.cpp +++ b/contrib/llvm-project/llvm/lib/Analysis/LoopInfo.cpp @@ -969,7 +969,9 @@ LoopInfo LoopAnalysis::run(Function &F, FunctionAnalysisManager &AM) { PreservedAnalyses LoopPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { - AM.getResult<LoopAnalysis>(F).print(OS); + auto &LI = AM.getResult<LoopAnalysis>(F); + OS << "Loop info for function '" << F.getName() << "':\n"; + LI.print(OS); return PreservedAnalyses::all(); } diff --git a/contrib/llvm-project/llvm/lib/Analysis/MemoryBuiltins.cpp b/contrib/llvm-project/llvm/lib/Analysis/MemoryBuiltins.cpp index 9e6811f3bf88..46a7a921d86d 100644 --- a/contrib/llvm-project/llvm/lib/Analysis/MemoryBuiltins.cpp +++ b/contrib/llvm-project/llvm/lib/Analysis/MemoryBuiltins.cpp @@ -577,10 +577,12 @@ Value *llvm::getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI) { //===----------------------------------------------------------------------===// // Utility functions to compute size of objects. // -static APInt getSizeWithOverflow(const SizeOffsetType &Data) { - if (Data.second.isNegative() || Data.first.ult(Data.second)) - return APInt(Data.first.getBitWidth(), 0); - return Data.first - Data.second; +static APInt getSizeWithOverflow(const SizeOffsetAPInt &Data) { + APInt Size = Data.Size; + APInt Offset = Data.Offset; + if (Offset.isNegative() || Size.ult(Offset)) + return APInt(Size.getBitWidth(), 0); + return Size - Offset; } /// Compute the size of the object pointed by Ptr. Returns true and the @@ -590,8 +592,8 @@ static APInt getSizeWithOverflow(const SizeOffsetType &Data) { bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts) { ObjectSizeOffsetVisitor Visitor(DL, TLI, Ptr->getContext(), Opts); - SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr)); - if (!Visitor.bothKnown(Data)) + SizeOffsetAPInt Data = Visitor.compute(const_cast<Value *>(Ptr)); + if (!Data.bothKnown()) return false; Size = getSizeWithOverflow(Data).getZExtValue(); @@ -640,8 +642,7 @@ Value *llvm::lowerObjectSizeCall( } else { LLVMContext &Ctx = ObjectSize->getFunction()->getContext(); ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, EvalOptions); - SizeOffsetEvalType SizeOffsetPair = - Eval.compute(ObjectSize->getArgOperand(0)); + SizeOffsetValue SizeOffsetPair = Eval.compute(ObjectSize->getArgOperand(0)); if (SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown()) { IRBuilder<TargetFolder, IRBuilderCallbackInserter> Builder( @@ -651,19 +652,19 @@ Value *llvm::lowerObjectSizeCall( })); Builder.SetInsertPoint(ObjectSize); + Value *Size = SizeOffsetPair.Size; + Value *Offset = SizeOffsetPair.Offset; + // If we've outside the end of the object, then we can always access // exactly 0 bytes. - Value *ResultSize = - Builder.CreateSub(SizeOffsetPair.first, SizeOffsetPair.second); - Value *UseZero = - Builder.CreateICmpULT(SizeOffsetPair.first, SizeOffsetPair.second); + Value *ResultSize = Builder.CreateSub(Size, Offset); + Value *UseZero = Builder.CreateICmpULT(Size, Offset); ResultSize = Builder.CreateZExtOrTrunc(ResultSize, ResultType); Value *Ret = Builder.CreateSelect( UseZero, ConstantInt::get(ResultType, 0), ResultSize); // The non-constant size expression cannot evaluate to -1. - if (!isa<Constant>(SizeOffsetPair.first) || - !isa<Constant>(SizeOffsetPair.second)) + if (!isa<Constant>(Size) || !isa<Constant>(Offset)) Builder.CreateAssumption( Builder.CreateICmpNE(Ret, ConstantInt::get(ResultType, -1))); @@ -697,12 +698,12 @@ ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout &DL, // a different address space. } -SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) { +SizeOffsetAPInt ObjectSizeOffsetVisitor::compute(Value *V) { InstructionsVisited = 0; return computeImpl(V); } -SizeOffsetType ObjectSizeOffsetVisitor::computeImpl(Value *V) { +SizeOffsetAPInt ObjectSizeOffsetVisitor::computeImpl(Value *V) { unsigned InitialIntTyBits = DL.getIndexTypeSizeInBits(V->getType()); // Stripping pointer casts can strip address space casts which can change the @@ -719,7 +720,7 @@ SizeOffsetType ObjectSizeOffsetVisitor::computeImpl(Value *V) { IntTyBits = DL.getIndexTypeSizeInBits(V->getType()); Zero = APInt::getZero(IntTyBits); - SizeOffsetType SOT = computeValue(V); + SizeOffsetAPInt SOT = computeValue(V); bool IndexTypeSizeChanged = InitialIntTyBits != IntTyBits; if (!IndexTypeSizeChanged && Offset.isZero()) @@ -729,27 +730,28 @@ SizeOffsetType ObjectSizeOffsetVisitor::computeImpl(Value *V) { // accumulated some constant offset (or both). Readjust the bit width to match // the argument index type size and apply the offset, as required. if (IndexTypeSizeChanged) { - if (knownSize(SOT) && !::CheckedZextOrTrunc(SOT.first, InitialIntTyBits)) - SOT.first = APInt(); - if (knownOffset(SOT) && !::CheckedZextOrTrunc(SOT.second, InitialIntTyBits)) - SOT.second = APInt(); + if (SOT.knownSize() && !::CheckedZextOrTrunc(SOT.Size, InitialIntTyBits)) + SOT.Size = APInt(); + if (SOT.knownOffset() && + !::CheckedZextOrTrunc(SOT.Offset, InitialIntTyBits)) + SOT.Offset = APInt(); } // If the computed offset is "unknown" we cannot add the stripped offset. - return {SOT.first, - SOT.second.getBitWidth() > 1 ? SOT.second + Offset : SOT.second}; + return {SOT.Size, + SOT.Offset.getBitWidth() > 1 ? SOT.Offset + Offset : SOT.Offset}; } -SizeOffsetType ObjectSizeOffsetVisitor::computeValue(Value *V) { +SizeOffsetAPInt ObjectSizeOffsetVisitor::computeValue(Value *V) { if (Instruction *I = dyn_cast<Instruction>(V)) { // If we have already seen this instruction, bail out. Cycles can happen in // unreachable code after constant propagation. - auto P = SeenInsts.try_emplace(I, unknown()); + auto P = SeenInsts.try_emplace(I, ObjectSizeOffsetVisitor::unknown()); if (!P.second) return P.first->second; ++InstructionsVisited; if (InstructionsVisited > ObjectSizeOffsetVisitorMaxVisitInstructions) - return unknown(); - SizeOffsetType Res = visit(*I); + return ObjectSizeOffsetVisitor::unknown(); + SizeOffsetAPInt Res = visit(*I); // Cache the result for later visits. If we happened to visit this during // the above recursion, we would consider it unknown until now. SeenInsts[I] = Res; @@ -768,55 +770,55 @@ SizeOffsetType ObjectSizeOffsetVisitor::computeValue(Value *V) { LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: " << *V << '\n'); - return unknown(); + return ObjectSizeOffsetVisitor::unknown(); } bool ObjectSizeOffsetVisitor::CheckedZextOrTrunc(APInt &I) { return ::CheckedZextOrTrunc(I, IntTyBits); } -SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) { +SizeOffsetAPInt ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) { TypeSize ElemSize = DL.getTypeAllocSize(I.getAllocatedType()); if (ElemSize.isScalable() && Options.EvalMode != ObjectSizeOpts::Mode::Min) - return unknown(); + return ObjectSizeOffsetVisitor::unknown(); APInt Size(IntTyBits, ElemSize.getKnownMinValue()); if (!I.isArrayAllocation()) - return std::make_pair(align(Size, I.getAlign()), Zero); + return SizeOffsetAPInt(align(Size, I.getAlign()), Zero); Value *ArraySize = I.getArraySize(); if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) { APInt NumElems = C->getValue(); if (!CheckedZextOrTrunc(NumElems)) - return unknown(); + return ObjectSizeOffsetVisitor::unknown(); bool Overflow; Size = Size.umul_ov(NumElems, Overflow); - return Overflow ? unknown() - : std::make_pair(align(Size, I.getAlign()), Zero); + return Overflow ? ObjectSizeOffsetVisitor::unknown() + : SizeOffsetAPInt(align(Size, I.getAlign()), Zero); } - return unknown(); + return ObjectSizeOffsetVisitor::unknown(); } -SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) { +SizeOffsetAPInt ObjectSizeOffsetVisitor::visitArgument(Argument &A) { Type *MemoryTy = A.getPointeeInMemoryValueType(); // No interprocedural analysis is done at the moment. if (!MemoryTy|| !MemoryTy->isSized()) { ++ObjectVisitorArgument; - return unknown(); + return ObjectSizeOffsetVisitor::unknown(); } APInt Size(IntTyBits, DL.getTypeAllocSize(MemoryTy)); - return std::make_pair(align(Size, A.getParamAlign()), Zero); + return SizeOffsetAPInt(align(Size, A.getParamAlign()), Zero); } -SizeOffsetType ObjectSizeOffsetVisitor::visitCallBase(CallBase &CB) { +SizeOffsetAPInt ObjectSizeOffsetVisitor::visitCallBase(CallBase &CB) { if (std::optional<APInt> Size = getAllocSize(&CB, TLI)) - return std::make_pair(*Size, Zero); - return unknown(); + return SizeOffsetAPInt(*Size, Zero); + return ObjectSizeOffsetVisitor::unknown(); } -SizeOffsetType -ObjectSizeOffsetVisitor::visitConstantPointerNull(ConstantPointerNull& CPN) { +SizeOffsetAPInt +ObjectSizeOffsetVisitor::visitConstantPointerNull(ConstantPointerNull &CPN) { // If null is unknown, there's nothing we can do. Additionally, non-zero // address spaces can make use of null, so we don't presume to know anything // about that. @@ -825,45 +827,46 @@ ObjectSizeOffsetVisitor::visitConstantPointerNull(ConstantPointerNull& CPN) { // them on the floor, but it's unclear what we should do when a NULL from // addrspace(1) gets casted to addrspace(0) (or vice-versa). if (Options.NullIsUnknownSize || CPN.getType()->getAddressSpace()) - return unknown(); - return std::make_pair(Zero, Zero); + return ObjectSizeOffsetVisitor::unknown(); + return SizeOffsetAPInt(Zero, Zero); } -SizeOffsetType -ObjectSizeOffsetVisitor::visitExtractElementInst(ExtractElementInst&) { - return unknown(); +SizeOffsetAPInt +ObjectSizeOffsetVisitor::visitExtractElementInst(ExtractElementInst &) { + return ObjectSizeOffsetVisitor::unknown(); } -SizeOffsetType -ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) { +SizeOffsetAPInt +ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst &) { // Easy cases were already folded by previous passes. - return unknown(); + return ObjectSizeOffsetVisitor::unknown(); } -SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalAlias(GlobalAlias &GA) { +SizeOffsetAPInt ObjectSizeOffsetVisitor::visitGlobalAlias(GlobalAlias &GA) { if (GA.isInterposable()) - return unknown(); + return ObjectSizeOffsetVisitor::unknown(); return computeImpl(GA.getAliasee()); } -SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalVariable(GlobalVariable &GV){ +SizeOffsetAPInt +ObjectSizeOffsetVisitor::visitGlobalVariable(GlobalVariable &GV) { if (!GV.getValueType()->isSized() || GV.hasExternalWeakLinkage() || ((!GV.hasInitializer() || GV.isInterposable()) && Options.EvalMode != ObjectSizeOpts::Mode::Min)) - return unknown(); + return ObjectSizeOffsetVisitor::unknown(); APInt Size(IntTyBits, DL.getTypeAllocSize(GV.getValueType())); - return std::make_pair(align(Size, GV.getAlign()), Zero); + return SizeOffsetAPInt(align(Size, GV.getAlign()), Zero); } -SizeOffsetType ObjectSizeOffsetVisitor::visitIntToPtrInst(IntToPtrInst&) { +SizeOffsetAPInt ObjectSizeOffsetVisitor::visitIntToPtrInst(IntToPtrInst &) { // clueless - return unknown(); + return ObjectSizeOffsetVisitor::unknown(); } -SizeOffsetType ObjectSizeOffsetVisitor::findLoadSizeOffset( +SizeOffsetAPInt ObjectSizeOffsetVisitor::findLoadSizeOffset( LoadInst &Load, BasicBlock &BB, BasicBlock::iterator From, - SmallDenseMap<BasicBlock *, SizeOffsetType, 8> &VisitedBlocks, + SmallDenseMap<BasicBlock *, SizeOffsetAPInt, 8> &VisitedBlocks, unsigned &ScannedInstCount) { constexpr unsigned MaxInstsToScan = 128; @@ -871,10 +874,10 @@ SizeOffsetType ObjectSizeOffsetVisitor::findLoadSizeOffset( if (Where != VisitedBlocks.end()) return Where->second; - auto Unknown = [this, &BB, &VisitedBlocks]() { - return VisitedBlocks[&BB] = unknown(); + auto Unknown = [&BB, &VisitedBlocks]() { + return VisitedBlocks[&BB] = ObjectSizeOffsetVisitor::unknown(); }; - auto Known = [&BB, &VisitedBlocks](SizeOffsetType SO) { + auto Known = [&BB, &VisitedBlocks](SizeOffsetAPInt SO) { return VisitedBlocks[&BB] = SO; }; @@ -951,46 +954,47 @@ SizeOffsetType ObjectSizeOffsetVisitor::findLoadSizeOffset( return Unknown(); } while (From-- != BB.begin()); - SmallVector<SizeOffsetType> PredecessorSizeOffsets; + SmallVector<SizeOffsetAPInt> PredecessorSizeOffsets; for (auto *PredBB : predecessors(&BB)) { PredecessorSizeOffsets.push_back(findLoadSizeOffset( Load, *PredBB, BasicBlock::iterator(PredBB->getTerminator()), VisitedBlocks, ScannedInstCount)); - if (!bothKnown(PredecessorSizeOffsets.back())) + if (!PredecessorSizeOffsets.back().bothKnown()) return Unknown(); } if (PredecessorSizeOffsets.empty()) return Unknown(); - return Known(std::accumulate(PredecessorSizeOffsets.begin() + 1, - PredecessorSizeOffsets.end(), - PredecessorSizeOffsets.front(), - [this](SizeOffsetType LHS, SizeOffsetType RHS) { - return combineSizeOffset(LHS, RHS); - })); + return Known(std::accumulate( + PredecessorSizeOffsets.begin() + 1, PredecessorSizeOffsets.end(), + PredecessorSizeOffsets.front(), + [this](SizeOffsetAPInt LHS, SizeOffsetAPInt RHS) { + return combineSizeOffset(LHS, RHS); + })); } -SizeOffsetType ObjectSizeOffsetVisitor::visitLoadInst(LoadInst &LI) { +SizeOffsetAPInt ObjectSizeOffsetVisitor::visitLoadInst(LoadInst &LI) { if (!Options.AA) { ++ObjectVisitorLoad; - return unknown(); + return ObjectSizeOffsetVisitor::unknown(); } - SmallDenseMap<BasicBlock *, SizeOffsetType, 8> VisitedBlocks; + SmallDenseMap<BasicBlock *, SizeOffsetAPInt, 8> VisitedBlocks; unsigned ScannedInstCount = 0; - SizeOffsetType SO = + SizeOffsetAPInt SO = findLoadSizeOffset(LI, *LI.getParent(), BasicBlock::iterator(LI), VisitedBlocks, ScannedInstCount); - if (!bothKnown(SO)) + if (!SO.bothKnown()) ++ObjectVisitorLoad; return SO; } -SizeOffsetType ObjectSizeOffsetVisitor::combineSizeOffset(SizeOffsetType LHS, - SizeOffsetType RHS) { - if (!bothKnown(LHS) || !bothKnown(RHS)) - return unknown(); +SizeOffsetAPInt +ObjectSizeOffsetVisitor::combineSizeOffset(SizeOffsetAPInt LHS, + SizeOffsetAPInt RHS) { + if (!LHS.bothKnown() || !RHS.bothKnown()) + return ObjectSizeOffsetVisitor::unknown(); switch (Options.EvalMode) { case ObjectSizeOpts::Mode::Min: @@ -998,40 +1002,45 @@ SizeOffsetType ObjectSizeOffsetVisitor::combineSizeOffset(SizeOffsetType LHS, case ObjectSizeOpts::Mode::Max: return (getSizeWithOverflow(LHS).sgt(getSizeWithOverflow(RHS))) ? LHS : RHS; case ObjectSizeOpts::Mode::ExactSizeFromOffset: - return (getSizeWithOverflow(LHS).eq(getSizeWithOverflow(RHS))) ? LHS - : unknown(); + return (getSizeWithOverflow(LHS).eq(getSizeWithOverflow(RHS))) + ? LHS + : ObjectSizeOffsetVisitor::unknown(); case ObjectSizeOpts::Mode::ExactUnderlyingSizeAndOffset: - return LHS == RHS ? LHS : unknown(); + return LHS == RHS ? LHS : ObjectSizeOffsetVisitor::unknown(); } llvm_unreachable("missing an eval mode"); } -SizeOffsetType ObjectSizeOffsetVisitor::visitPHINode(PHINode &PN) { +SizeOffsetAPInt ObjectSizeOffsetVisitor::visitPHINode(PHINode &PN) { if (PN.getNumIncomingValues() == 0) - return unknown(); + return ObjectSizeOffsetVisitor::unknown(); auto IncomingValues = PN.incoming_values(); return std::accumulate(IncomingValues.begin() + 1, IncomingValues.end(), computeImpl(*IncomingValues.begin()), - [this](SizeOffsetType LHS, Value *VRHS) { + [this](SizeOffsetAPInt LHS, Value *VRHS) { return combineSizeOffset(LHS, computeImpl(VRHS)); }); } -SizeOffsetType ObjectSizeOffsetVisitor::visitSelectInst(SelectInst &I) { +SizeOffsetAPInt ObjectSizeOffsetVisitor::visitSelectInst(SelectInst &I) { return combineSizeOffset(computeImpl(I.getTrueValue()), computeImpl(I.getFalseValue())); } -SizeOffsetType ObjectSizeOffsetVisitor::visitUndefValue(UndefValue&) { - return std::make_pair(Zero, Zero); +SizeOffsetAPInt ObjectSizeOffsetVisitor::visitUndefValue(UndefValue &) { + return SizeOffsetAPInt(Zero, Zero); } -SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) { +SizeOffsetAPInt ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) { LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor unknown instruction:" << I << '\n'); - return unknown(); + return ObjectSizeOffsetVisitor::unknown(); } +// Just set these right here... +SizeOffsetValue::SizeOffsetValue(const SizeOffsetWeakTrackingVH &SOT) + : SizeOffsetType(SOT.Size, SOT.Offset) {} + ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator( const DataLayout &DL, const TargetLibraryInfo *TLI, LLVMContext &Context, ObjectSizeOpts EvalOpts) @@ -1044,21 +1053,21 @@ ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator( // be different for later objects. } -SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) { +SizeOffsetValue ObjectSizeOffsetEvaluator::compute(Value *V) { // XXX - Are vectors of pointers possible here? IntTy = cast<IntegerType>(DL.getIndexType(V->getType())); Zero = ConstantInt::get(IntTy, 0); - SizeOffsetEvalType Result = compute_(V); + SizeOffsetValue Result = compute_(V); - if (!bothKnown(Result)) { + if (!Result.bothKnown()) { // Erase everything that was computed in this iteration from the cache, so // that no dangling references are left behind. We could be a bit smarter if // we kept a dependency graph. It's probably not worth the complexity. for (const Value *SeenVal : SeenVals) { CacheMapTy::iterator CacheIt = CacheMap.find(SeenVal); // non-computable results can be safely cached - if (CacheIt != CacheMap.end() && anyKnown(CacheIt->second)) + if (CacheIt != CacheMap.end() && CacheIt->second.anyKnown()) CacheMap.erase(CacheIt); } @@ -1074,12 +1083,12 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) { return Result; } -SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) { +SizeOffsetValue ObjectSizeOffsetEvaluator::compute_(Value *V) { ObjectSizeOffsetVisitor Visitor(DL, TLI, Context, EvalOpts); - SizeOffsetType Const = Visitor.compute(V); - if (Visitor.bothKnown(Const)) - return std::make_pair(ConstantInt::get(Context, Const.first), - ConstantInt::get(Context, Const.second)); + SizeOffsetAPInt Const = Visitor.compute(V); + if (Const.bothKnown()) + return SizeOffsetValue(ConstantInt::get(Context, Const.Size), + ConstantInt::get(Context, Const.Offset)); V = V->stripPointerCasts(); @@ -1095,13 +1104,13 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) { Builder.SetInsertPoint(I); // Now compute the size and offset. - SizeOffsetEvalType Result; + SizeOffsetValue Result; // Record the pointers that were handled in this run, so that they can be // cleaned later if something fails. We also use this set to break cycles that // can occur in dead code. if (!SeenVals.insert(V).second) { - Result = unknown(); + Result = ObjectSizeOffsetEvaluator::unknown(); } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { Result = visitGEPOperator(*GEP); } else if (Instruction *I = dyn_cast<Instruction>(V)) { @@ -1112,22 +1121,22 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) { isa<GlobalAlias>(V) || isa<GlobalVariable>(V)) { // Ignore values where we cannot do more than ObjectSizeVisitor. - Result = unknown(); + Result = ObjectSizeOffsetEvaluator::unknown(); } else { LLVM_DEBUG( dbgs() << "ObjectSizeOffsetEvaluator::compute() unhandled value: " << *V << '\n'); - Result = unknown(); + Result = ObjectSizeOffsetEvaluator::unknown(); } // Don't reuse CacheIt since it may be invalid at this point. - CacheMap[V] = Result; + CacheMap[V] = SizeOffsetWeakTrackingVH(Result); return Result; } -SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) { +SizeOffsetValue ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) { if (!I.getAllocatedType()->isSized()) - return unknown(); + return ObjectSizeOffsetEvaluator::unknown(); // must be a VLA assert(I.isArrayAllocation()); @@ -1143,86 +1152,85 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) { Value *Size = ConstantInt::get(ArraySize->getType(), DL.getTypeAllocSize(I.getAllocatedType())); Size = Builder.CreateMul(Size, ArraySize); - return std::make_pair(Size, Zero); + return SizeOffsetValue(Size, Zero); } -SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallBase(CallBase &CB) { +SizeOffsetValue ObjectSizeOffsetEvaluator::visitCallBase(CallBase &CB) { std::optional<AllocFnsTy> FnData = getAllocationSize(&CB, TLI); if (!FnData) - return unknown(); + return ObjectSizeOffsetEvaluator::unknown(); // Handle strdup-like functions separately. if (FnData->AllocTy == StrDupLike) { // TODO: implement evaluation of strdup/strndup - return unknown(); + return ObjectSizeOffsetEvaluator::unknown(); } Value *FirstArg = CB.getArgOperand(FnData->FstParam); FirstArg = Builder.CreateZExtOrTrunc(FirstArg, IntTy); if (FnData->SndParam < 0) - return std::make_pair(FirstArg, Zero); + return SizeOffsetValue(FirstArg, Zero); Value *SecondArg = CB.getArgOperand(FnData->SndParam); SecondArg = Builder.CreateZExtOrTrunc(SecondArg, IntTy); Value *Size = Builder.CreateMul(FirstArg, SecondArg); - return std::make_pair(Size, Zero); + return SizeOffsetValue(Size, Zero); } -SizeOffsetEvalType -ObjectSizeOffsetEvaluator::visitExtractElementInst(ExtractElementInst&) { - return unknown(); +SizeOffsetValue +ObjectSizeOffsetEvaluator::visitExtractElementInst(ExtractElementInst &) { + return ObjectSizeOffsetEvaluator::unknown(); } -SizeOffsetEvalType -ObjectSizeOffsetEvaluator::visitExtractValueInst(ExtractValueInst&) { - return unknown(); +SizeOffsetValue +ObjectSizeOffsetEvaluator::visitExtractValueInst(ExtractValueInst &) { + return ObjectSizeOffsetEvaluator::unknown(); } -SizeOffsetEvalType -ObjectSizeOffsetEvaluator::visitGEPOperator(GEPOperator &GEP) { - SizeOffsetEvalType PtrData = compute_(GEP.getPointerOperand()); - if (!bothKnown(PtrData)) - return unknown(); +SizeOffsetValue ObjectSizeOffsetEvaluator::visitGEPOperator(GEPOperator &GEP) { + SizeOffsetValue PtrData = compute_(GEP.getPointerOperand()); + if (!PtrData.bothKnown()) + return ObjectSizeOffsetEvaluator::unknown(); Value *Offset = emitGEPOffset(&Builder, DL, &GEP, /*NoAssumptions=*/true); - Offset = Builder.CreateAdd(PtrData.second, Offset); - return std::make_pair(PtrData.first, Offset); + Offset = Builder.CreateAdd(PtrData.Offset, Offset); + return SizeOffsetValue(PtrData.Size, Offset); } -SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitIntToPtrInst(IntToPtrInst&) { +SizeOffsetValue ObjectSizeOffsetEvaluator::visitIntToPtrInst(IntToPtrInst &) { // clueless - return unknown(); + return ObjectSizeOffsetEvaluator::unknown(); } -SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitLoadInst(LoadInst &LI) { - return unknown(); +SizeOffsetValue ObjectSizeOffsetEvaluator::visitLoadInst(LoadInst &LI) { + return ObjectSizeOffsetEvaluator::unknown(); } -SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) { +SizeOffsetValue ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) { // Create 2 PHIs: one for size and another for offset. PHINode *SizePHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues()); PHINode *OffsetPHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues()); // Insert right away in the cache to handle recursive PHIs. - CacheMap[&PHI] = std::make_pair(SizePHI, OffsetPHI); + CacheMap[&PHI] = SizeOffsetWeakTrackingVH(SizePHI, OffsetPHI); // Compute offset/size for each PHI incoming pointer. for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i) { BasicBlock *IncomingBlock = PHI.getIncomingBlock(i); Builder.SetInsertPoint(IncomingBlock, IncomingBlock->getFirstInsertionPt()); - SizeOffsetEvalType EdgeData = compute_(PHI.getIncomingValue(i)); + SizeOffsetValue EdgeData = compute_(PHI.getIncomingValue(i)); - if (!bothKnown(EdgeData)) { + if (!EdgeData.bothKnown()) { OffsetPHI->replaceAllUsesWith(PoisonValue::get(IntTy)); OffsetPHI->eraseFromParent(); InsertedInstructions.erase(OffsetPHI); SizePHI->replaceAllUsesWith(PoisonValue::get(IntTy)); SizePHI->eraseFromParent(); InsertedInstructions.erase(SizePHI); - return unknown(); + return ObjectSizeOffsetEvaluator::unknown(); } - SizePHI->addIncoming(EdgeData.first, IncomingBlock); - OffsetPHI->addIncoming(EdgeData.second, IncomingBlock); + SizePHI->addIncoming(EdgeData.Size, IncomingBlock); + OffsetPHI->addIncoming(EdgeData.Offset, IncomingBlock); } Value *Size = SizePHI, *Offset = OffsetPHI; @@ -1238,27 +1246,27 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) { OffsetPHI->eraseFromParent(); InsertedInstructions.erase(OffsetPHI); } - return std::make_pair(Size, Offset); + return SizeOffsetValue(Size, Offset); } -SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitSelectInst(SelectInst &I) { - SizeOffsetEvalType TrueSide = compute_(I.getTrueValue()); - SizeOffsetEvalType FalseSide = compute_(I.getFalseValue()); +SizeOffsetValue ObjectSizeOffsetEvaluator::visitSelectInst(SelectInst &I) { + SizeOffsetValue TrueSide = compute_(I.getTrueValue()); + SizeOffsetValue FalseSide = compute_(I.getFalseValue()); - if (!bothKnown(TrueSide) || !bothKnown(FalseSide)) - return unknown(); + if (!TrueSide.bothKnown() || !FalseSide.bothKnown()) + return ObjectSizeOffsetEvaluator::unknown(); if (TrueSide == FalseSide) return TrueSide; - Value *Size = Builder.CreateSelect(I.getCondition(), TrueSide.first, - FalseSide.first); - Value *Offset = Builder.CreateSelect(I.getCondition(), TrueSide.second, - FalseSide.second); - return std::make_pair(Size, Offset); + Value *Size = + Builder.CreateSelect(I.getCondition(), TrueSide.Size, FalseSide.Size); + Value *Offset = + Builder.CreateSelect(I.getCondition(), TrueSide.Offset, FalseSide.Offset); + return SizeOffsetValue(Size, Offset); } -SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitInstruction(Instruction &I) { +SizeOffsetValue ObjectSizeOffsetEvaluator::visitInstruction(Instruction &I) { LLVM_DEBUG(dbgs() << "ObjectSizeOffsetEvaluator unknown instruction:" << I << '\n'); - return unknown(); + return ObjectSizeOffsetEvaluator::unknown(); } diff --git a/contrib/llvm-project/llvm/lib/Analysis/MemorySSAUpdater.cpp b/contrib/llvm-project/llvm/lib/Analysis/MemorySSAUpdater.cpp index 9ad60f774e9f..e87ae7d71fff 100644 --- a/contrib/llvm-project/llvm/lib/Analysis/MemorySSAUpdater.cpp +++ b/contrib/llvm-project/llvm/lib/Analysis/MemorySSAUpdater.cpp @@ -568,7 +568,6 @@ static MemoryAccess *onlySingleValue(MemoryPhi *MP) { static MemoryAccess *getNewDefiningAccessForClone(MemoryAccess *MA, const ValueToValueMapTy &VMap, PhiToDefMap &MPhiMap, - bool CloneWasSimplified, MemorySSA *MSSA) { MemoryAccess *InsnDefining = MA; if (MemoryDef *DefMUD = dyn_cast<MemoryDef>(InsnDefining)) { @@ -578,18 +577,10 @@ static MemoryAccess *getNewDefiningAccessForClone(MemoryAccess *MA, if (Instruction *NewDefMUDI = cast_or_null<Instruction>(VMap.lookup(DefMUDI))) { InsnDefining = MSSA->getMemoryAccess(NewDefMUDI); - if (!CloneWasSimplified) - assert(InsnDefining && "Defining instruction cannot be nullptr."); - else if (!InsnDefining || isa<MemoryUse>(InsnDefining)) { + if (!InsnDefining || isa<MemoryUse>(InsnDefining)) { // The clone was simplified, it's no longer a MemoryDef, look up. - auto DefIt = DefMUD->getDefsIterator(); - // Since simplified clones only occur in single block cloning, a - // previous definition must exist, otherwise NewDefMUDI would not - // have been found in VMap. - assert(DefIt != MSSA->getBlockDefs(DefMUD->getBlock())->begin() && - "Previous def must exist"); InsnDefining = getNewDefiningAccessForClone( - &*(--DefIt), VMap, MPhiMap, CloneWasSimplified, MSSA); + DefMUD->getDefiningAccess(), VMap, MPhiMap, MSSA); } } } @@ -624,9 +615,9 @@ void MemorySSAUpdater::cloneUsesAndDefs(BasicBlock *BB, BasicBlock *NewBB, MemoryAccess *NewUseOrDef = MSSA->createDefinedAccess( NewInsn, getNewDefiningAccessForClone(MUD->getDefiningAccess(), VMap, - MPhiMap, CloneWasSimplified, MSSA), + MPhiMap, MSSA), /*Template=*/CloneWasSimplified ? nullptr : MUD, - /*CreationMustSucceed=*/CloneWasSimplified ? false : true); + /*CreationMustSucceed=*/false); if (NewUseOrDef) MSSA->insertIntoListsForBlock(NewUseOrDef, NewBB, MemorySSA::End); } diff --git a/contrib/llvm-project/llvm/lib/Analysis/TargetTransformInfo.cpp b/contrib/llvm-project/llvm/lib/Analysis/TargetTransformInfo.cpp index 67246afa2314..a5a18a538d76 100644 --- a/contrib/llvm-project/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/contrib/llvm-project/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -37,6 +37,10 @@ static cl::opt<unsigned> CacheLineSize( cl::desc("Use this to override the target cache line size when " "specified by the user.")); +static cl::opt<unsigned> MinPageSize( + "min-page-size", cl::init(0), cl::Hidden, + cl::desc("Use this to override the target's minimum page size.")); + static cl::opt<unsigned> PredictableBranchThreshold( "predictable-branch-threshold", cl::init(99), cl::Hidden, cl::desc( @@ -762,6 +766,11 @@ TargetTransformInfo::getCacheAssociativity(CacheLevel Level) const { return TTIImpl->getCacheAssociativity(Level); } +std::optional<unsigned> TargetTransformInfo::getMinPageSize() const { + return MinPageSize.getNumOccurrences() > 0 ? MinPageSize + : TTIImpl->getMinPageSize(); +} + unsigned TargetTransformInfo::getPrefetchDistance() const { return TTIImpl->getPrefetchDistance(); } diff --git a/contrib/llvm-project/llvm/lib/Analysis/VFABIDemangling.cpp b/contrib/llvm-project/llvm/lib/Analysis/VFABIDemangling.cpp index 426f98c0c628..8562d8fbfa1e 100644 --- a/contrib/llvm-project/llvm/lib/Analysis/VFABIDemangling.cpp +++ b/contrib/llvm-project/llvm/lib/Analysis/VFABIDemangling.cpp @@ -326,10 +326,6 @@ getScalableECFromSignature(const FunctionType *Signature, const VFISAKind ISA, // Only vector parameters are used when determining the VF; uniform or // linear are left as scalars, so do not affect VF. if (Param.ParamKind == VFParamKind::Vector) { - // If the scalar function doesn't actually have a corresponding argument, - // reject the mapping. - if (Param.ParamPos >= Signature->getNumParams()) - return std::nullopt; Type *PTy = Signature->getParamType(Param.ParamPos); std::optional<ElementCount> EC = getElementCountForTy(ISA, PTy); @@ -427,6 +423,11 @@ std::optional<VFInfo> VFABI::tryDemangleForVFABI(StringRef MangledName, if (Parameters.empty()) return std::nullopt; + // If the number of arguments of the scalar function does not match the + // vector variant we have just demangled then reject the mapping. + if (Parameters.size() != FTy->getNumParams()) + return std::nullopt; + // Figure out the number of lanes in vectors for this function variant. This // is easy for fixed length, as the vlen encoding just gives us the value // directly. However, if the vlen mangling indicated that this function diff --git a/contrib/llvm-project/llvm/lib/Analysis/ValueTracking.cpp b/contrib/llvm-project/llvm/lib/Analysis/ValueTracking.cpp index 16d78c1ded6d..940ae9eb7ee2 100644 --- a/contrib/llvm-project/llvm/lib/Analysis/ValueTracking.cpp +++ b/contrib/llvm-project/llvm/lib/Analysis/ValueTracking.cpp @@ -485,7 +485,8 @@ bool llvm::isAssumeLikeIntrinsic(const Instruction *I) { bool llvm::isValidAssumeForContext(const Instruction *Inv, const Instruction *CxtI, - const DominatorTree *DT) { + const DominatorTree *DT, + bool AllowEphemerals) { // There are two restrictions on the use of an assume: // 1. The assume must dominate the context (or the control flow must // reach the assume whenever it reaches the context). @@ -503,7 +504,7 @@ bool llvm::isValidAssumeForContext(const Instruction *Inv, // Don't let an assume affect itself - this would cause the problems // `isEphemeralValueOf` is trying to prevent, and it would also make // the loop below go out of bounds. - if (Inv == CxtI) + if (!AllowEphemerals && Inv == CxtI) return false; // The context comes first, but they're both in the same block. @@ -516,7 +517,7 @@ bool llvm::isValidAssumeForContext(const Instruction *Inv, if (!isGuaranteedToTransferExecutionToSuccessor(Range, 15)) return false; - return !isEphemeralValueOf(Inv, CxtI); + return AllowEphemerals || !isEphemeralValueOf(Inv, CxtI); } // Inv and CxtI are in different blocks. @@ -1196,7 +1197,7 @@ static void computeKnownBitsFromOperator(const Operator *I, unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits(); KnownBits IndexBits(IndexBitWidth); computeKnownBits(Index, IndexBits, Depth + 1, Q); - TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy); + TypeSize IndexTypeSize = GTI.getSequentialElementStride(Q.DL); uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinValue(); KnownBits ScalingFactor(IndexBitWidth); // Multiply by current sizeof type. @@ -2128,7 +2129,7 @@ static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, } // If we have a zero-sized type, the index doesn't matter. Keep looping. - if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).isZero()) + if (GTI.getSequentialElementStride(Q.DL).isZero()) continue; // Fast path the constant operand case both for efficiency and so we don't |
