diff options
Diffstat (limited to 'llvm/lib/IR')
| -rw-r--r-- | llvm/lib/IR/AsmWriter.cpp | 46 | ||||
| -rw-r--r-- | llvm/lib/IR/AttributeImpl.h | 3 | ||||
| -rw-r--r-- | llvm/lib/IR/Attributes.cpp | 69 | ||||
| -rw-r--r-- | llvm/lib/IR/AutoUpgrade.cpp | 134 | ||||
| -rw-r--r-- | llvm/lib/IR/BasicBlock.cpp | 4 | ||||
| -rw-r--r-- | llvm/lib/IR/ConstantFold.cpp | 51 | ||||
| -rw-r--r-- | llvm/lib/IR/Constants.cpp | 57 | ||||
| -rw-r--r-- | llvm/lib/IR/Core.cpp | 25 | ||||
| -rw-r--r-- | llvm/lib/IR/DIBuilder.cpp | 8 | ||||
| -rw-r--r-- | llvm/lib/IR/DataLayout.cpp | 129 | ||||
| -rw-r--r-- | llvm/lib/IR/Function.cpp | 17 | ||||
| -rw-r--r-- | llvm/lib/IR/Globals.cpp | 4 | ||||
| -rw-r--r-- | llvm/lib/IR/InlineAsm.cpp | 6 | ||||
| -rw-r--r-- | llvm/lib/IR/Instruction.cpp | 11 | ||||
| -rw-r--r-- | llvm/lib/IR/Instructions.cpp | 48 | ||||
| -rw-r--r-- | llvm/lib/IR/IntrinsicInst.cpp | 14 | ||||
| -rw-r--r-- | llvm/lib/IR/LLVMContextImpl.h | 7 | ||||
| -rw-r--r-- | llvm/lib/IR/LegacyPassManager.cpp | 12 | ||||
| -rw-r--r-- | llvm/lib/IR/Module.cpp | 18 | ||||
| -rw-r--r-- | llvm/lib/IR/ModuleSummaryIndex.cpp | 16 | ||||
| -rw-r--r-- | llvm/lib/IR/Operator.cpp | 5 | ||||
| -rw-r--r-- | llvm/lib/IR/SSAContext.cpp | 47 | ||||
| -rw-r--r-- | llvm/lib/IR/Value.cpp | 2 | ||||
| -rw-r--r-- | llvm/lib/IR/Verifier.cpp | 87 |
24 files changed, 555 insertions, 265 deletions
diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp index c9748e1387eb..bbe0c97e60a2 100644 --- a/llvm/lib/IR/AsmWriter.cpp +++ b/llvm/lib/IR/AsmWriter.cpp @@ -512,10 +512,8 @@ void TypePrinting::incorporateTypes() { // the unnamed ones out to a numbering and remove the anonymous structs. unsigned NextNumber = 0; - std::vector<StructType*>::iterator NextToUse = NamedTypes.begin(), I, E; - for (I = NamedTypes.begin(), E = NamedTypes.end(); I != E; ++I) { - StructType *STy = *I; - + std::vector<StructType *>::iterator NextToUse = NamedTypes.begin(); + for (StructType *STy : NamedTypes) { // Ignore anonymous types. if (STy->isLiteral()) continue; @@ -1450,6 +1448,12 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, return; } + if (const auto *NC = dyn_cast<NoCFIValue>(CV)) { + Out << "no_cfi "; + WriteAsOperandInternal(Out, NC->getGlobalValue(), WriterCtx); + return; + } + if (const ConstantArray *CA = dyn_cast<ConstantArray>(CV)) { Type *ETy = CA->getType()->getElementType(); Out << '['; @@ -1583,11 +1587,9 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, Out << ", "; } - if (CE->hasIndices()) { - ArrayRef<unsigned> Indices = CE->getIndices(); - for (unsigned i = 0, e = Indices.size(); i != e; ++i) - Out << ", " << Indices[i]; - } + if (CE->hasIndices()) + for (unsigned I : CE->getIndices()) + Out << ", " << I; if (CE->isCast()) { Out << " to "; @@ -3528,8 +3530,8 @@ void AssemblyWriter::printGlobal(const GlobalVariable *GV) { } maybePrintComdat(Out, *GV); - if (GV->getAlignment()) - Out << ", align " << GV->getAlignment(); + if (MaybeAlign A = GV->getAlign()) + Out << ", align " << A->value(); SmallVector<std::pair<unsigned, MDNode *>, 4> MDs; GV->getAllMetadata(MDs); @@ -3637,13 +3639,13 @@ void AssemblyWriter::printTypeIdentities() { } auto &NamedTypes = TypePrinter.getNamedTypes(); - for (unsigned I = 0, E = NamedTypes.size(); I != E; ++I) { - PrintLLVMName(Out, NamedTypes[I]->getName(), LocalPrefix); + for (StructType *NamedType : NamedTypes) { + PrintLLVMName(Out, NamedType->getName(), LocalPrefix); Out << " = type "; // Make sure we print out at least one level of the type structure, so // that we do not get %FILE = type %FILE - TypePrinter.printStructBody(NamedTypes[I], Out); + TypePrinter.printStructBody(NamedType, Out); Out << '\n'; } } @@ -3757,8 +3759,8 @@ void AssemblyWriter::printFunction(const Function *F) { Out << '"'; } maybePrintComdat(Out, *F); - if (F->getAlignment()) - Out << " align " << F->getAlignment(); + if (MaybeAlign A = F->getAlign()) + Out << " align " << A->value(); if (F->hasGC()) Out << " gc \"" << F->getGC() << '"'; if (F->hasPrefixData()) { @@ -4239,8 +4241,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) { Out << ", "; writeOperand(AI->getArraySize(), true); } - if (AI->getAlignment()) { - Out << ", align " << AI->getAlignment(); + if (MaybeAlign A = AI->getAlign()) { + Out << ", align " << A->value(); } unsigned AddrSpace = AI->getType()->getAddressSpace(); @@ -4310,13 +4312,13 @@ void AssemblyWriter::printInstruction(const Instruction &I) { if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) { if (LI->isAtomic()) writeAtomic(LI->getContext(), LI->getOrdering(), LI->getSyncScopeID()); - if (LI->getAlignment()) - Out << ", align " << LI->getAlignment(); + if (MaybeAlign A = LI->getAlign()) + Out << ", align " << A->value(); } else if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) { if (SI->isAtomic()) writeAtomic(SI->getContext(), SI->getOrdering(), SI->getSyncScopeID()); - if (SI->getAlignment()) - Out << ", align " << SI->getAlignment(); + if (MaybeAlign A = SI->getAlign()) + Out << ", align " << A->value(); } else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) { writeAtomicCmpXchg(CXI->getContext(), CXI->getSuccessOrdering(), CXI->getFailureOrdering(), CXI->getSyncScopeID()); diff --git a/llvm/lib/IR/AttributeImpl.h b/llvm/lib/IR/AttributeImpl.h index c5bbe6571096..1153fb827b56 100644 --- a/llvm/lib/IR/AttributeImpl.h +++ b/llvm/lib/IR/AttributeImpl.h @@ -253,7 +253,8 @@ public: uint64_t getDereferenceableBytes() const; uint64_t getDereferenceableOrNullBytes() const; std::pair<unsigned, Optional<unsigned>> getAllocSizeArgs() const; - std::pair<unsigned, unsigned> getVScaleRangeArgs() const; + unsigned getVScaleRangeMin() const; + Optional<unsigned> getVScaleRangeMax() const; std::string getAsString(bool InAttrGrp) const; Type *getAttributeType(Attribute::AttrKind Kind) const; diff --git a/llvm/lib/IR/Attributes.cpp b/llvm/lib/IR/Attributes.cpp index f81a446d6e46..c899afae6cce 100644 --- a/llvm/lib/IR/Attributes.cpp +++ b/llvm/lib/IR/Attributes.cpp @@ -78,15 +78,18 @@ unpackAllocSizeArgs(uint64_t Num) { return std::make_pair(ElemSizeArg, NumElemsArg); } -static uint64_t packVScaleRangeArgs(unsigned MinValue, unsigned MaxValue) { - return uint64_t(MinValue) << 32 | MaxValue; +static uint64_t packVScaleRangeArgs(unsigned MinValue, + Optional<unsigned> MaxValue) { + return uint64_t(MinValue) << 32 | MaxValue.getValueOr(0); } -static std::pair<unsigned, unsigned> unpackVScaleRangeArgs(uint64_t Value) { +static std::pair<unsigned, Optional<unsigned>> +unpackVScaleRangeArgs(uint64_t Value) { unsigned MaxValue = Value & std::numeric_limits<unsigned>::max(); unsigned MinValue = Value >> 32; - return std::make_pair(MinValue, MaxValue); + return std::make_pair(MinValue, + MaxValue > 0 ? MaxValue : Optional<unsigned>()); } Attribute Attribute::get(LLVMContext &Context, Attribute::AttrKind Kind, @@ -354,10 +357,16 @@ std::pair<unsigned, Optional<unsigned>> Attribute::getAllocSizeArgs() const { return unpackAllocSizeArgs(pImpl->getValueAsInt()); } -std::pair<unsigned, unsigned> Attribute::getVScaleRangeArgs() const { +unsigned Attribute::getVScaleRangeMin() const { + assert(hasAttribute(Attribute::VScaleRange) && + "Trying to get vscale args from non-vscale attribute"); + return unpackVScaleRangeArgs(pImpl->getValueAsInt()).first; +} + +Optional<unsigned> Attribute::getVScaleRangeMax() const { assert(hasAttribute(Attribute::VScaleRange) && "Trying to get vscale args from non-vscale attribute"); - return unpackVScaleRangeArgs(pImpl->getValueAsInt()); + return unpackVScaleRangeArgs(pImpl->getValueAsInt()).second; } std::string Attribute::getAsString(bool InAttrGrp) const { @@ -428,13 +437,13 @@ std::string Attribute::getAsString(bool InAttrGrp) const { } if (hasAttribute(Attribute::VScaleRange)) { - unsigned MinValue, MaxValue; - std::tie(MinValue, MaxValue) = getVScaleRangeArgs(); + unsigned MinValue = getVScaleRangeMin(); + Optional<unsigned> MaxValue = getVScaleRangeMax(); std::string Result = "vscale_range("; Result += utostr(MinValue); Result += ','; - Result += utostr(MaxValue); + Result += utostr(MaxValue.getValueOr(0)); Result += ')'; return Result; } @@ -717,9 +726,12 @@ std::pair<unsigned, Optional<unsigned>> AttributeSet::getAllocSizeArgs() const { : std::pair<unsigned, Optional<unsigned>>(0, 0); } -std::pair<unsigned, unsigned> AttributeSet::getVScaleRangeArgs() const { - return SetNode ? SetNode->getVScaleRangeArgs() - : std::pair<unsigned, unsigned>(0, 0); +unsigned AttributeSet::getVScaleRangeMin() const { + return SetNode ? SetNode->getVScaleRangeMin() : 1; +} + +Optional<unsigned> AttributeSet::getVScaleRangeMax() const { + return SetNode ? SetNode->getVScaleRangeMax() : None; } std::string AttributeSet::getAsString(bool InAttrGrp) const { @@ -897,10 +909,16 @@ AttributeSetNode::getAllocSizeArgs() const { return std::make_pair(0, 0); } -std::pair<unsigned, unsigned> AttributeSetNode::getVScaleRangeArgs() const { +unsigned AttributeSetNode::getVScaleRangeMin() const { if (auto A = findEnumAttribute(Attribute::VScaleRange)) - return A->getVScaleRangeArgs(); - return std::make_pair(0, 0); + return A->getVScaleRangeMin(); + return 1; +} + +Optional<unsigned> AttributeSetNode::getVScaleRangeMax() const { + if (auto A = findEnumAttribute(Attribute::VScaleRange)) + return A->getVScaleRangeMax(); + return None; } std::string AttributeSetNode::getAsString(bool InAttrGrp) const { @@ -1118,16 +1136,21 @@ AttributeList AttributeList::get(LLVMContext &C, AttributeSet FnAttrs, } AttributeList AttributeList::get(LLVMContext &C, unsigned Index, - const AttrBuilder &B) { - if (!B.hasAttributes()) + AttributeSet Attrs) { + if (!Attrs.hasAttributes()) return {}; Index = attrIdxToArrayIdx(Index); SmallVector<AttributeSet, 8> AttrSets(Index + 1); - AttrSets[Index] = AttributeSet::get(C, B); + AttrSets[Index] = Attrs; return getImpl(C, AttrSets); } AttributeList AttributeList::get(LLVMContext &C, unsigned Index, + const AttrBuilder &B) { + return get(C, Index, AttributeSet::get(C, B)); +} + +AttributeList AttributeList::get(LLVMContext &C, unsigned Index, ArrayRef<Attribute::AttrKind> Kinds) { SmallVector<std::pair<unsigned, Attribute>, 8> Attrs; for (const auto K : Kinds) @@ -1623,8 +1646,12 @@ std::pair<unsigned, Optional<unsigned>> AttrBuilder::getAllocSizeArgs() const { return unpackAllocSizeArgs(getRawIntAttr(Attribute::AllocSize)); } -std::pair<unsigned, unsigned> AttrBuilder::getVScaleRangeArgs() const { - return unpackVScaleRangeArgs(getRawIntAttr(Attribute::VScaleRange)); +unsigned AttrBuilder::getVScaleRangeMin() const { + return unpackVScaleRangeArgs(getRawIntAttr(Attribute::VScaleRange)).first; +} + +Optional<unsigned> AttrBuilder::getVScaleRangeMax() const { + return unpackVScaleRangeArgs(getRawIntAttr(Attribute::VScaleRange)).second; } AttrBuilder &AttrBuilder::addAlignmentAttr(MaybeAlign Align) { @@ -1669,7 +1696,7 @@ AttrBuilder &AttrBuilder::addAllocSizeAttrFromRawRepr(uint64_t RawArgs) { } AttrBuilder &AttrBuilder::addVScaleRangeAttr(unsigned MinValue, - unsigned MaxValue) { + Optional<unsigned> MaxValue) { return addVScaleRangeAttrFromRawRepr(packVScaleRangeArgs(MinValue, MaxValue)); } diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp index d73d1e9c20b3..b8ad2b294b87 100644 --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -702,6 +702,31 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) { NewFn = Intrinsic::getDeclaration(F->getParent(), IID, Tys); return true; } + + if (Name == "arm.mve.vctp64" && + cast<FixedVectorType>(F->getReturnType())->getNumElements() == 4) { + // A vctp64 returning a v4i1 is converted to return a v2i1. Rename the + // function and deal with it below in UpgradeIntrinsicCall. + rename(F); + return true; + } + // These too are changed to accept a v2i1 insteead of the old v4i1. + if (Name == "arm.mve.mull.int.predicated.v2i64.v4i32.v4i1" || + Name == "arm.mve.vqdmull.predicated.v2i64.v4i32.v4i1" || + Name == "arm.mve.vldr.gather.base.predicated.v2i64.v2i64.v4i1" || + Name == "arm.mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v4i1" || + Name == "arm.mve.vldr.gather.offset.predicated.v2i64.p0i64.v2i64.v4i1" || + Name == "arm.mve.vstr.scatter.base.predicated.v2i64.v2i64.v4i1" || + Name == "arm.mve.vstr.scatter.base.wb.predicated.v2i64.v2i64.v4i1" || + Name == "arm.mve.vstr.scatter.offset.predicated.p0i64.v2i64.v2i64.v4i1" || + Name == "arm.cde.vcx1q.predicated.v2i64.v4i1" || + Name == "arm.cde.vcx1qa.predicated.v2i64.v4i1" || + Name == "arm.cde.vcx2q.predicated.v2i64.v4i1" || + Name == "arm.cde.vcx2qa.predicated.v2i64.v4i1" || + Name == "arm.cde.vcx3q.predicated.v2i64.v4i1" || + Name == "arm.cde.vcx3qa.predicated.v2i64.v4i1") + return true; + break; } @@ -1803,6 +1828,96 @@ void llvm::UpgradeInlineAsmString(std::string *AsmStr) { } } +static Value *UpgradeARMIntrinsicCall(StringRef Name, CallInst *CI, Function *F, + IRBuilder<> &Builder) { + if (Name == "mve.vctp64.old") { + // Replace the old v4i1 vctp64 with a v2i1 vctp and predicate-casts to the + // correct type. + Value *VCTP = Builder.CreateCall( + Intrinsic::getDeclaration(F->getParent(), Intrinsic::arm_mve_vctp64), + CI->getArgOperand(0), CI->getName()); + Value *C1 = Builder.CreateCall( + Intrinsic::getDeclaration( + F->getParent(), Intrinsic::arm_mve_pred_v2i, + {VectorType::get(Builder.getInt1Ty(), 2, false)}), + VCTP); + return Builder.CreateCall( + Intrinsic::getDeclaration( + F->getParent(), Intrinsic::arm_mve_pred_i2v, + {VectorType::get(Builder.getInt1Ty(), 4, false)}), + C1); + } else if (Name == "mve.mull.int.predicated.v2i64.v4i32.v4i1" || + Name == "mve.vqdmull.predicated.v2i64.v4i32.v4i1" || + Name == "mve.vldr.gather.base.predicated.v2i64.v2i64.v4i1" || + Name == "mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v4i1" || + Name == "mve.vldr.gather.offset.predicated.v2i64.p0i64.v2i64.v4i1" || + Name == "mve.vstr.scatter.base.predicated.v2i64.v2i64.v4i1" || + Name == "mve.vstr.scatter.base.wb.predicated.v2i64.v2i64.v4i1" || + Name == "mve.vstr.scatter.offset.predicated.p0i64.v2i64.v2i64.v4i1" || + Name == "cde.vcx1q.predicated.v2i64.v4i1" || + Name == "cde.vcx1qa.predicated.v2i64.v4i1" || + Name == "cde.vcx2q.predicated.v2i64.v4i1" || + Name == "cde.vcx2qa.predicated.v2i64.v4i1" || + Name == "cde.vcx3q.predicated.v2i64.v4i1" || + Name == "cde.vcx3qa.predicated.v2i64.v4i1") { + std::vector<Type *> Tys; + unsigned ID = CI->getIntrinsicID(); + Type *V2I1Ty = FixedVectorType::get(Builder.getInt1Ty(), 2); + switch (ID) { + case Intrinsic::arm_mve_mull_int_predicated: + case Intrinsic::arm_mve_vqdmull_predicated: + case Intrinsic::arm_mve_vldr_gather_base_predicated: + Tys = {CI->getType(), CI->getOperand(0)->getType(), V2I1Ty}; + break; + case Intrinsic::arm_mve_vldr_gather_base_wb_predicated: + case Intrinsic::arm_mve_vstr_scatter_base_predicated: + case Intrinsic::arm_mve_vstr_scatter_base_wb_predicated: + Tys = {CI->getOperand(0)->getType(), CI->getOperand(0)->getType(), + V2I1Ty}; + break; + case Intrinsic::arm_mve_vldr_gather_offset_predicated: + Tys = {CI->getType(), CI->getOperand(0)->getType(), + CI->getOperand(1)->getType(), V2I1Ty}; + break; + case Intrinsic::arm_mve_vstr_scatter_offset_predicated: + Tys = {CI->getOperand(0)->getType(), CI->getOperand(1)->getType(), + CI->getOperand(2)->getType(), V2I1Ty}; + break; + case Intrinsic::arm_cde_vcx1q_predicated: + case Intrinsic::arm_cde_vcx1qa_predicated: + case Intrinsic::arm_cde_vcx2q_predicated: + case Intrinsic::arm_cde_vcx2qa_predicated: + case Intrinsic::arm_cde_vcx3q_predicated: + case Intrinsic::arm_cde_vcx3qa_predicated: + Tys = {CI->getOperand(1)->getType(), V2I1Ty}; + break; + default: + llvm_unreachable("Unhandled Intrinsic!"); + } + + std::vector<Value *> Ops; + for (Value *Op : CI->args()) { + Type *Ty = Op->getType(); + if (Ty->getScalarSizeInBits() == 1) { + Value *C1 = Builder.CreateCall( + Intrinsic::getDeclaration( + F->getParent(), Intrinsic::arm_mve_pred_v2i, + {VectorType::get(Builder.getInt1Ty(), 4, false)}), + Op); + Op = Builder.CreateCall( + Intrinsic::getDeclaration(F->getParent(), + Intrinsic::arm_mve_pred_i2v, {V2I1Ty}), + C1); + } + Ops.push_back(Op); + } + + Function *Fn = Intrinsic::getDeclaration(F->getParent(), ID, Tys); + return Builder.CreateCall(Fn, Ops, CI->getName()); + } + llvm_unreachable("Unknown function for ARM CallInst upgrade."); +} + /// Upgrade a call to an old intrinsic. All argument and return casting must be /// provided to seamlessly integrate with existing context. void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { @@ -1826,6 +1941,9 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { bool IsNVVM = Name.startswith("nvvm."); if (IsNVVM) Name = Name.substr(5); + bool IsARM = Name.startswith("arm."); + if (IsARM) + Name = Name.substr(4); if (IsX86 && Name.startswith("sse4a.movnt.")) { Module *M = F->getParent(); @@ -2289,14 +2407,12 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { if (CI->arg_size() >= 3) Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, CI->getArgOperand(1)); - } else if (IsX86 && (Name.startswith("avx512.mask.loadu."))) { - Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0), - CI->getArgOperand(1), CI->getArgOperand(2), - /*Aligned*/false); - } else if (IsX86 && (Name.startswith("avx512.mask.load."))) { - Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0), - CI->getArgOperand(1),CI->getArgOperand(2), - /*Aligned*/true); + } else if (IsX86 && Name.startswith("avx512.mask.load")) { + // "avx512.mask.loadu." or "avx512.mask.load." + bool Aligned = Name[16] != 'u'; // "avx512.mask.loadu". + Rep = + UpgradeMaskedLoad(Builder, CI->getArgOperand(0), CI->getArgOperand(1), + CI->getArgOperand(2), Aligned); } else if (IsX86 && Name.startswith("avx512.mask.expand.load.")) { auto *ResultTy = cast<FixedVectorType>(CI->getType()); Type *PtrTy = ResultTy->getElementType(); @@ -3649,6 +3765,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { F->getParent(), Intrinsic::convert_from_fp16, {Builder.getFloatTy()}), CI->getArgOperand(0), "h2f"); + } else if (IsARM) { + Rep = UpgradeARMIntrinsicCall(Name, CI, F, Builder); } else { llvm_unreachable("Unknown function for CallInst upgrade."); } diff --git a/llvm/lib/IR/BasicBlock.cpp b/llvm/lib/IR/BasicBlock.cpp index ed1956e0f7e9..7beafc485d09 100644 --- a/llvm/lib/IR/BasicBlock.cpp +++ b/llvm/lib/IR/BasicBlock.cpp @@ -450,8 +450,8 @@ BasicBlock *BasicBlock::splitBasicBlockBefore(iterator I, const Twine &BBName) { void BasicBlock::replacePhiUsesWith(BasicBlock *Old, BasicBlock *New) { // N.B. This might not be a complete BasicBlock, so don't assume // that it ends with a non-phi instruction. - for (iterator II = begin(), IE = end(); II != IE; ++II) { - PHINode *PN = dyn_cast<PHINode>(II); + for (Instruction &I : *this) { + PHINode *PN = dyn_cast<PHINode>(&I); if (!PN) break; PN->replaceIncomingBlockWith(Old, New); diff --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp index 437fd0558447..8668fe82601c 100644 --- a/llvm/lib/IR/ConstantFold.cpp +++ b/llvm/lib/IR/ConstantFold.cpp @@ -1801,46 +1801,8 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred, } else if (isa<ConstantFP>(C1) && isa<ConstantFP>(C2)) { const APFloat &C1V = cast<ConstantFP>(C1)->getValueAPF(); const APFloat &C2V = cast<ConstantFP>(C2)->getValueAPF(); - APFloat::cmpResult R = C1V.compare(C2V); - switch (pred) { - default: llvm_unreachable("Invalid FCmp Predicate"); - case FCmpInst::FCMP_FALSE: return Constant::getNullValue(ResultTy); - case FCmpInst::FCMP_TRUE: return Constant::getAllOnesValue(ResultTy); - case FCmpInst::FCMP_UNO: - return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered); - case FCmpInst::FCMP_ORD: - return ConstantInt::get(ResultTy, R!=APFloat::cmpUnordered); - case FCmpInst::FCMP_UEQ: - return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered || - R==APFloat::cmpEqual); - case FCmpInst::FCMP_OEQ: - return ConstantInt::get(ResultTy, R==APFloat::cmpEqual); - case FCmpInst::FCMP_UNE: - return ConstantInt::get(ResultTy, R!=APFloat::cmpEqual); - case FCmpInst::FCMP_ONE: - return ConstantInt::get(ResultTy, R==APFloat::cmpLessThan || - R==APFloat::cmpGreaterThan); - case FCmpInst::FCMP_ULT: - return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered || - R==APFloat::cmpLessThan); - case FCmpInst::FCMP_OLT: - return ConstantInt::get(ResultTy, R==APFloat::cmpLessThan); - case FCmpInst::FCMP_UGT: - return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered || - R==APFloat::cmpGreaterThan); - case FCmpInst::FCMP_OGT: - return ConstantInt::get(ResultTy, R==APFloat::cmpGreaterThan); - case FCmpInst::FCMP_ULE: - return ConstantInt::get(ResultTy, R!=APFloat::cmpGreaterThan); - case FCmpInst::FCMP_OLE: - return ConstantInt::get(ResultTy, R==APFloat::cmpLessThan || - R==APFloat::cmpEqual); - case FCmpInst::FCMP_UGE: - return ConstantInt::get(ResultTy, R!=APFloat::cmpLessThan); - case FCmpInst::FCMP_OGE: - return ConstantInt::get(ResultTy, R==APFloat::cmpGreaterThan || - R==APFloat::cmpEqual); - } + CmpInst::Predicate Predicate = CmpInst::Predicate(pred); + return ConstantInt::get(ResultTy, FCmpInst::compare(C1V, C2V, Predicate)); } else if (auto *C1VTy = dyn_cast<VectorType>(C1->getType())) { // Fast path for splatted constants. @@ -2215,9 +2177,8 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C, if (C->isNullValue()) { bool isNull = true; - for (unsigned i = 0, e = Idxs.size(); i != e; ++i) - if (!isa<UndefValue>(Idxs[i]) && - !cast<Constant>(Idxs[i])->isNullValue()) { + for (Value *Idx : Idxs) + if (!isa<UndefValue>(Idx) && !cast<Constant>(Idx)->isNullValue()) { isNull = false; break; } @@ -2233,8 +2194,8 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C, // The GEP returns a vector of pointers when one of more of // its arguments is a vector. - for (unsigned i = 0, e = Idxs.size(); i != e; ++i) { - if (auto *VT = dyn_cast<VectorType>(Idxs[i]->getType())) { + for (Value *Idx : Idxs) { + if (auto *VT = dyn_cast<VectorType>(Idx->getType())) { assert((!isa<VectorType>(GEPTy) || isa<ScalableVectorType>(GEPTy) == isa<ScalableVectorType>(VT)) && "Mismatched GEPTy vector types"); diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp index c66cfb6e9ac1..837be910f6d8 100644 --- a/llvm/lib/IR/Constants.cpp +++ b/llvm/lib/IR/Constants.cpp @@ -535,6 +535,9 @@ void llvm::deleteConstant(Constant *C) { case Constant::DSOLocalEquivalentVal: delete static_cast<DSOLocalEquivalent *>(C); break; + case Constant::NoCFIValueVal: + delete static_cast<NoCFIValue *>(C); + break; case Constant::UndefValueVal: delete static_cast<UndefValue *>(C); break; @@ -1296,9 +1299,10 @@ Constant *ConstantArray::getImpl(ArrayType *Ty, ArrayRef<Constant*> V) { if (V.empty()) return ConstantAggregateZero::get(Ty); - for (unsigned i = 0, e = V.size(); i != e; ++i) { - assert(V[i]->getType() == Ty->getElementType() && + for (Constant *C : V) { + assert(C->getType() == Ty->getElementType() && "Wrong type in array element initializer"); + (void)C; } // If this is an all-zero array, return a ConstantAggregateZero object. If @@ -1364,12 +1368,12 @@ Constant *ConstantStruct::get(StructType *ST, ArrayRef<Constant*> V) { isZero = V[0]->isNullValue(); // PoisonValue inherits UndefValue, so its check is not necessary. if (isUndef || isZero) { - for (unsigned i = 0, e = V.size(); i != e; ++i) { - if (!V[i]->isNullValue()) + for (Constant *C : V) { + if (!C->isNullValue()) isZero = false; - if (!isa<PoisonValue>(V[i])) + if (!isa<PoisonValue>(C)) isPoison = false; - if (isa<PoisonValue>(V[i]) || !isa<UndefValue>(V[i])) + if (isa<PoisonValue>(C) || !isa<UndefValue>(C)) isUndef = false; } } @@ -1962,6 +1966,47 @@ Value *DSOLocalEquivalent::handleOperandChangeImpl(Value *From, Value *To) { return nullptr; } +NoCFIValue *NoCFIValue::get(GlobalValue *GV) { + NoCFIValue *&NC = GV->getContext().pImpl->NoCFIValues[GV]; + if (!NC) + NC = new NoCFIValue(GV); + + assert(NC->getGlobalValue() == GV && + "NoCFIValue does not match the expected global value"); + return NC; +} + +NoCFIValue::NoCFIValue(GlobalValue *GV) + : Constant(GV->getType(), Value::NoCFIValueVal, &Op<0>(), 1) { + setOperand(0, GV); +} + +/// Remove the constant from the constant table. +void NoCFIValue::destroyConstantImpl() { + const GlobalValue *GV = getGlobalValue(); + GV->getContext().pImpl->NoCFIValues.erase(GV); +} + +Value *NoCFIValue::handleOperandChangeImpl(Value *From, Value *To) { + assert(From == getGlobalValue() && "Changing value does not match operand."); + + GlobalValue *GV = dyn_cast<GlobalValue>(To->stripPointerCasts()); + assert(GV && "Can only replace the operands with a global value"); + + NoCFIValue *&NewNC = getContext().pImpl->NoCFIValues[GV]; + if (NewNC) + return llvm::ConstantExpr::getBitCast(NewNC, getType()); + + getContext().pImpl->NoCFIValues.erase(getGlobalValue()); + NewNC = this; + setOperand(0, GV); + + if (GV->getType() != getType()) + mutateType(GV->getType()); + + return nullptr; +} + //---- ConstantExpr::get() implementations. // diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp index 2c396ae97499..a263d2536541 100644 --- a/llvm/lib/IR/Core.cpp +++ b/llvm/lib/IR/Core.cpp @@ -1696,6 +1696,14 @@ LLVMValueRef LLVMConstGEP(LLVMValueRef ConstantVal, return wrap(ConstantExpr::getGetElementPtr(Ty, Val, IdxList)); } +LLVMValueRef LLVMConstGEP2(LLVMTypeRef Ty, LLVMValueRef ConstantVal, + LLVMValueRef *ConstantIndices, unsigned NumIndices) { + ArrayRef<Constant *> IdxList(unwrap<Constant>(ConstantIndices, NumIndices), + NumIndices); + Constant *Val = unwrap<Constant>(ConstantVal); + return wrap(ConstantExpr::getGetElementPtr(unwrap(Ty), Val, IdxList)); +} + LLVMValueRef LLVMConstInBoundsGEP(LLVMValueRef ConstantVal, LLVMValueRef *ConstantIndices, unsigned NumIndices) { @@ -1707,6 +1715,15 @@ LLVMValueRef LLVMConstInBoundsGEP(LLVMValueRef ConstantVal, return wrap(ConstantExpr::getInBoundsGetElementPtr(Ty, Val, IdxList)); } +LLVMValueRef LLVMConstInBoundsGEP2(LLVMTypeRef Ty, LLVMValueRef ConstantVal, + LLVMValueRef *ConstantIndices, + unsigned NumIndices) { + ArrayRef<Constant *> IdxList(unwrap<Constant>(ConstantIndices, NumIndices), + NumIndices); + Constant *Val = unwrap<Constant>(ConstantVal); + return wrap(ConstantExpr::getInBoundsGetElementPtr(unwrap(Ty), Val, IdxList)); +} + LLVMValueRef LLVMConstTrunc(LLVMValueRef ConstantVal, LLVMTypeRef ToType) { return wrap(ConstantExpr::getTrunc(unwrap<Constant>(ConstantVal), unwrap(ToType))); @@ -3007,13 +3024,17 @@ LLVMTypeRef LLVMGetAllocatedType(LLVMValueRef Alloca) { /*--.. Operations on gep instructions (only) ...............................--*/ LLVMBool LLVMIsInBounds(LLVMValueRef GEP) { - return unwrap<GetElementPtrInst>(GEP)->isInBounds(); + return unwrap<GEPOperator>(GEP)->isInBounds(); } void LLVMSetIsInBounds(LLVMValueRef GEP, LLVMBool InBounds) { return unwrap<GetElementPtrInst>(GEP)->setIsInBounds(InBounds); } +LLVMTypeRef LLVMGetGEPSourceElementType(LLVMValueRef GEP) { + return wrap(unwrap<GEPOperator>(GEP)->getSourceElementType()); +} + /*--.. Operations on phi nodes .............................................--*/ void LLVMAddIncoming(LLVMValueRef PhiNode, LLVMValueRef *IncomingValues, @@ -3039,7 +3060,7 @@ LLVMBasicBlockRef LLVMGetIncomingBlock(LLVMValueRef PhiNode, unsigned Index) { unsigned LLVMGetNumIndices(LLVMValueRef Inst) { auto *I = unwrap(Inst); - if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) + if (auto *GEP = dyn_cast<GEPOperator>(I)) return GEP->getNumIndices(); if (auto *EV = dyn_cast<ExtractValueInst>(I)) return EV->getNumIndices(); diff --git a/llvm/lib/IR/DIBuilder.cpp b/llvm/lib/IR/DIBuilder.cpp index 548962bd6a98..35af22034a12 100644 --- a/llvm/lib/IR/DIBuilder.cpp +++ b/llvm/lib/IR/DIBuilder.cpp @@ -671,11 +671,11 @@ DIBuilder::getOrCreateMacroArray(ArrayRef<Metadata *> Elements) { DITypeRefArray DIBuilder::getOrCreateTypeArray(ArrayRef<Metadata *> Elements) { SmallVector<llvm::Metadata *, 16> Elts; - for (unsigned i = 0, e = Elements.size(); i != e; ++i) { - if (Elements[i] && isa<MDNode>(Elements[i])) - Elts.push_back(cast<DIType>(Elements[i])); + for (Metadata *E : Elements) { + if (isa_and_nonnull<MDNode>(E)) + Elts.push_back(cast<DIType>(E)); else - Elts.push_back(Elements[i]); + Elts.push_back(E); } return DITypeRefArray(MDNode::get(VMContext, Elts)); } diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp index 2ace18048262..61b2b13bfd03 100644 --- a/llvm/lib/IR/DataLayout.cpp +++ b/llvm/lib/IR/DataLayout.cpp @@ -124,26 +124,25 @@ LayoutAlignElem::operator==(const LayoutAlignElem &rhs) const { // PointerAlignElem, PointerAlign support //===----------------------------------------------------------------------===// -PointerAlignElem PointerAlignElem::get(uint32_t AddressSpace, Align ABIAlign, - Align PrefAlign, uint32_t TypeByteWidth, - uint32_t IndexWidth) { +PointerAlignElem PointerAlignElem::getInBits(uint32_t AddressSpace, + Align ABIAlign, Align PrefAlign, + uint32_t TypeBitWidth, + uint32_t IndexBitWidth) { assert(ABIAlign <= PrefAlign && "Preferred alignment worse than ABI!"); PointerAlignElem retval; retval.AddressSpace = AddressSpace; retval.ABIAlign = ABIAlign; retval.PrefAlign = PrefAlign; - retval.TypeByteWidth = TypeByteWidth; - retval.IndexWidth = IndexWidth; + retval.TypeBitWidth = TypeBitWidth; + retval.IndexBitWidth = IndexBitWidth; return retval; } bool PointerAlignElem::operator==(const PointerAlignElem &rhs) const { - return (ABIAlign == rhs.ABIAlign - && AddressSpace == rhs.AddressSpace - && PrefAlign == rhs.PrefAlign - && TypeByteWidth == rhs.TypeByteWidth - && IndexWidth == rhs.IndexWidth); + return (ABIAlign == rhs.ABIAlign && AddressSpace == rhs.AddressSpace && + PrefAlign == rhs.PrefAlign && TypeBitWidth == rhs.TypeBitWidth && + IndexBitWidth == rhs.IndexBitWidth); } //===----------------------------------------------------------------------===// @@ -197,7 +196,7 @@ void DataLayout::reset(StringRef Desc) { E.PrefAlign, E.TypeBitWidth)) return report_fatal_error(std::move(Err)); } - if (Error Err = setPointerAlignment(0, Align(8), Align(8), 8, 8)) + if (Error Err = setPointerAlignmentInBits(0, Align(8), Align(8), 64, 64)) return report_fatal_error(std::move(Err)); if (Error Err = parseSpecifier(Desc)) @@ -318,7 +317,7 @@ Error DataLayout::parseSpecifier(StringRef Desc) { if (Error Err = ::split(Rest, ':', Split)) return Err; unsigned PointerMemSize; - if (Error Err = getIntInBytes(Tok, PointerMemSize)) + if (Error Err = getInt(Tok, PointerMemSize)) return Err; if (!PointerMemSize) return reportError("Invalid pointer size of 0 bytes"); @@ -354,13 +353,13 @@ Error DataLayout::parseSpecifier(StringRef Desc) { if (!Rest.empty()) { if (Error Err = ::split(Rest, ':', Split)) return Err; - if (Error Err = getIntInBytes(Tok, IndexSize)) + if (Error Err = getInt(Tok, IndexSize)) return Err; if (!IndexSize) return reportError("Invalid index size of 0 bytes"); } } - if (Error Err = setPointerAlignment( + if (Error Err = setPointerAlignmentInBits( AddrSpace, assumeAligned(PointerABIAlign), assumeAligned(PointerPrefAlign), PointerMemSize, IndexSize)) return Err; @@ -603,9 +602,10 @@ DataLayout::getPointerAlignElem(uint32_t AddressSpace) const { return Pointers[0]; } -Error DataLayout::setPointerAlignment(uint32_t AddrSpace, Align ABIAlign, - Align PrefAlign, uint32_t TypeByteWidth, - uint32_t IndexWidth) { +Error DataLayout::setPointerAlignmentInBits(uint32_t AddrSpace, Align ABIAlign, + Align PrefAlign, + uint32_t TypeBitWidth, + uint32_t IndexBitWidth) { if (PrefAlign < ABIAlign) return reportError( "Preferred alignment cannot be less than the ABI alignment"); @@ -615,13 +615,14 @@ Error DataLayout::setPointerAlignment(uint32_t AddrSpace, Align ABIAlign, return A.AddressSpace < AddressSpace; }); if (I == Pointers.end() || I->AddressSpace != AddrSpace) { - Pointers.insert(I, PointerAlignElem::get(AddrSpace, ABIAlign, PrefAlign, - TypeByteWidth, IndexWidth)); + Pointers.insert(I, + PointerAlignElem::getInBits(AddrSpace, ABIAlign, PrefAlign, + TypeBitWidth, IndexBitWidth)); } else { I->ABIAlign = ABIAlign; I->PrefAlign = PrefAlign; - I->TypeByteWidth = TypeByteWidth; - I->IndexWidth = IndexWidth; + I->TypeBitWidth = TypeBitWidth; + I->IndexBitWidth = IndexBitWidth; } return Error::success(); } @@ -704,13 +705,14 @@ Align DataLayout::getPointerPrefAlignment(unsigned AS) const { } unsigned DataLayout::getPointerSize(unsigned AS) const { - return getPointerAlignElem(AS).TypeByteWidth; + return divideCeil(getPointerAlignElem(AS).TypeBitWidth, 8); } unsigned DataLayout::getMaxIndexSize() const { unsigned MaxIndexSize = 0; for (auto &P : Pointers) - MaxIndexSize = std::max(MaxIndexSize, P.IndexWidth); + MaxIndexSize = + std::max(MaxIndexSize, (unsigned)divideCeil(P.TypeBitWidth, 8)); return MaxIndexSize; } @@ -723,7 +725,7 @@ unsigned DataLayout::getPointerTypeSizeInBits(Type *Ty) const { } unsigned DataLayout::getIndexSize(unsigned AS) const { - return getPointerAlignElem(AS).IndexWidth; + return divideCeil(getPointerAlignElem(AS).IndexBitWidth, 8); } unsigned DataLayout::getIndexTypeSizeInBits(Type *Ty) const { @@ -901,16 +903,14 @@ int64_t DataLayout::getIndexedOffsetInType(Type *ElemTy, return Result; } -static void addElementIndex(SmallVectorImpl<APInt> &Indices, TypeSize ElemSize, - APInt &Offset) { +static APInt getElementIndex(TypeSize ElemSize, APInt &Offset) { // Skip over scalable or zero size elements. Also skip element sizes larger // than the positive index space, because the arithmetic below may not be // correct in that case. unsigned BitWidth = Offset.getBitWidth(); if (ElemSize.isScalable() || ElemSize == 0 || !isUIntN(BitWidth - 1, ElemSize)) { - Indices.push_back(APInt::getZero(BitWidth)); - return; + return APInt::getZero(BitWidth); } APInt Index = Offset.sdiv(ElemSize); @@ -921,47 +921,52 @@ static void addElementIndex(SmallVectorImpl<APInt> &Indices, TypeSize ElemSize, Offset += ElemSize; assert(Offset.isNonNegative() && "Remaining offset shouldn't be negative"); } - Indices.push_back(Index); + return Index; } -SmallVector<APInt> DataLayout::getGEPIndicesForOffset(Type *&ElemTy, - APInt &Offset) const { - assert(ElemTy->isSized() && "Element type must be sized"); - SmallVector<APInt> Indices; - addElementIndex(Indices, getTypeAllocSize(ElemTy), Offset); - while (Offset != 0) { - if (auto *ArrTy = dyn_cast<ArrayType>(ElemTy)) { - ElemTy = ArrTy->getElementType(); - addElementIndex(Indices, getTypeAllocSize(ElemTy), Offset); - continue; - } +Optional<APInt> DataLayout::getGEPIndexForOffset(Type *&ElemTy, + APInt &Offset) const { + if (auto *ArrTy = dyn_cast<ArrayType>(ElemTy)) { + ElemTy = ArrTy->getElementType(); + return getElementIndex(getTypeAllocSize(ElemTy), Offset); + } - if (auto *VecTy = dyn_cast<VectorType>(ElemTy)) { - ElemTy = VecTy->getElementType(); - unsigned ElemSizeInBits = getTypeSizeInBits(ElemTy).getFixedSize(); - // GEPs over non-multiple of 8 size vector elements are invalid. - if (ElemSizeInBits % 8 != 0) - break; + if (auto *VecTy = dyn_cast<VectorType>(ElemTy)) { + ElemTy = VecTy->getElementType(); + unsigned ElemSizeInBits = getTypeSizeInBits(ElemTy).getFixedSize(); + // GEPs over non-multiple of 8 size vector elements are invalid. + if (ElemSizeInBits % 8 != 0) + return None; - addElementIndex(Indices, TypeSize::Fixed(ElemSizeInBits / 8), Offset); - continue; - } + return getElementIndex(TypeSize::Fixed(ElemSizeInBits / 8), Offset); + } - if (auto *STy = dyn_cast<StructType>(ElemTy)) { - const StructLayout *SL = getStructLayout(STy); - uint64_t IntOffset = Offset.getZExtValue(); - if (IntOffset >= SL->getSizeInBytes()) - break; + if (auto *STy = dyn_cast<StructType>(ElemTy)) { + const StructLayout *SL = getStructLayout(STy); + uint64_t IntOffset = Offset.getZExtValue(); + if (IntOffset >= SL->getSizeInBytes()) + return None; - unsigned Index = SL->getElementContainingOffset(IntOffset); - Offset -= SL->getElementOffset(Index); - ElemTy = STy->getElementType(Index); - Indices.push_back(APInt(32, Index)); - continue; - } + unsigned Index = SL->getElementContainingOffset(IntOffset); + Offset -= SL->getElementOffset(Index); + ElemTy = STy->getElementType(Index); + return APInt(32, Index); + } + + // Non-aggregate type. + return None; +} - // Can't index into non-aggregate type. - break; +SmallVector<APInt> DataLayout::getGEPIndicesForOffset(Type *&ElemTy, + APInt &Offset) const { + assert(ElemTy->isSized() && "Element type must be sized"); + SmallVector<APInt> Indices; + Indices.push_back(getElementIndex(getTypeAllocSize(ElemTy), Offset)); + while (Offset != 0) { + Optional<APInt> Index = getGEPIndexForOffset(ElemTy, Offset); + if (!Index) + break; + Indices.push_back(*Index); } return Indices; diff --git a/llvm/lib/IR/Function.cpp b/llvm/lib/IR/Function.cpp index 82b20a8af91b..f1a6402fb11b 100644 --- a/llvm/lib/IR/Function.cpp +++ b/llvm/lib/IR/Function.cpp @@ -980,7 +980,10 @@ enum IIT_Info { IIT_STRUCT9 = 49, IIT_V256 = 50, IIT_AMX = 51, - IIT_PPCF128 = 52 + IIT_PPCF128 = 52, + IIT_V3 = 53, + IIT_EXTERNREF = 54, + IIT_FUNCREF = 55 }; static void DecodeIITType(unsigned &NextElt, ArrayRef<unsigned char> Infos, @@ -1056,6 +1059,10 @@ static void DecodeIITType(unsigned &NextElt, ArrayRef<unsigned char> Infos, OutputTable.push_back(IITDescriptor::getVector(2, IsScalableVector)); DecodeIITType(NextElt, Infos, Info, OutputTable); return; + case IIT_V3: + OutputTable.push_back(IITDescriptor::getVector(3, IsScalableVector)); + DecodeIITType(NextElt, Infos, Info, OutputTable); + return; case IIT_V4: OutputTable.push_back(IITDescriptor::getVector(4, IsScalableVector)); DecodeIITType(NextElt, Infos, Info, OutputTable); @@ -1092,6 +1099,14 @@ static void DecodeIITType(unsigned &NextElt, ArrayRef<unsigned char> Infos, OutputTable.push_back(IITDescriptor::getVector(1024, IsScalableVector)); DecodeIITType(NextElt, Infos, Info, OutputTable); return; + case IIT_EXTERNREF: + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 10)); + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct, 0)); + return; + case IIT_FUNCREF: + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 20)); + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 8)); + return; case IIT_PTR: OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 0)); DecodeIITType(NextElt, Infos, Info, OutputTable); diff --git a/llvm/lib/IR/Globals.cpp b/llvm/lib/IR/Globals.cpp index 9f38288095e3..b6bd25aa1234 100644 --- a/llvm/lib/IR/Globals.cpp +++ b/llvm/lib/IR/Globals.cpp @@ -126,7 +126,7 @@ void GlobalObject::setAlignment(MaybeAlign Align) { void GlobalObject::copyAttributesFrom(const GlobalObject *Src) { GlobalValue::copyAttributesFrom(Src); - setAlignment(MaybeAlign(Src->getAlignment())); + setAlignment(Src->getAlign()); setSection(Src->getSection()); } @@ -249,7 +249,7 @@ bool GlobalObject::canIncreaseAlignment() const { // alignment specified. (If it is assigned a section, the global // could be densely packed with other objects in the section, and // increasing the alignment could cause padding issues.) - if (hasSection() && getAlignment() > 0) + if (hasSection() && getAlign().hasValue()) return false; // On ELF platforms, we're further restricted in that we can't diff --git a/llvm/lib/IR/InlineAsm.cpp b/llvm/lib/IR/InlineAsm.cpp index 56932b457225..a0c48781ced5 100644 --- a/llvm/lib/IR/InlineAsm.cpp +++ b/llvm/lib/IR/InlineAsm.cpp @@ -262,12 +262,12 @@ bool InlineAsm::Verify(FunctionType *Ty, StringRef ConstStr) { unsigned NumOutputs = 0, NumInputs = 0, NumClobbers = 0; unsigned NumIndirect = 0; - for (unsigned i = 0, e = Constraints.size(); i != e; ++i) { - switch (Constraints[i].Type) { + for (const ConstraintInfo &Constraint : Constraints) { + switch (Constraint.Type) { case InlineAsm::isOutput: if ((NumInputs-NumIndirect) != 0 || NumClobbers != 0) return false; // outputs before inputs and clobbers. - if (!Constraints[i].isIndirect) { + if (!Constraint.isIndirect) { ++NumOutputs; break; } diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp index a4659da7e807..4480ec799c35 100644 --- a/llvm/lib/IR/Instruction.cpp +++ b/llvm/lib/IR/Instruction.cpp @@ -166,7 +166,10 @@ void Instruction::dropPoisonGeneratingFlags() { cast<GetElementPtrInst>(this)->setIsInBounds(false); break; } - // TODO: FastMathFlags! + if (isa<FPMathOperator>(this)) { + setHasNoNaNs(false); + setHasNoInfs(false); + } assert(!hasPoisonGeneratingFlags() && "must be kept in sync"); } @@ -436,17 +439,17 @@ static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2, if (const AllocaInst *AI = dyn_cast<AllocaInst>(I1)) return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() && - (AI->getAlignment() == cast<AllocaInst>(I2)->getAlignment() || + (AI->getAlign() == cast<AllocaInst>(I2)->getAlign() || IgnoreAlignment); if (const LoadInst *LI = dyn_cast<LoadInst>(I1)) return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() && - (LI->getAlignment() == cast<LoadInst>(I2)->getAlignment() || + (LI->getAlign() == cast<LoadInst>(I2)->getAlign() || IgnoreAlignment) && LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() && LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID(); if (const StoreInst *SI = dyn_cast<StoreInst>(I1)) return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() && - (SI->getAlignment() == cast<StoreInst>(I2)->getAlignment() || + (SI->getAlign() == cast<StoreInst>(I2)->getAlign() || IgnoreAlignment) && SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() && SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID(); diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp index ad27a6d8c08e..7798af3b19b9 100644 --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -1410,8 +1410,6 @@ bool AllocaInst::isStaticAlloca() const { void LoadInst::AssertOK() { assert(getOperand(0)->getType()->isPointerTy() && "Ptr must have pointer type."); - assert(!(isAtomic() && getAlignment() == 0) && - "Alignment required for atomic load"); } static Align computeLoadStoreDefaultAlign(Type *Ty, BasicBlock *BB) { @@ -1490,8 +1488,6 @@ void StoreInst::AssertOK() { assert(cast<PointerType>(getOperand(1)->getType()) ->isOpaqueOrPointeeTypeMatches(getOperand(0)->getType()) && "Ptr must be a pointer to Val type!"); - assert(!(isAtomic() && getAlignment() == 0) && - "Alignment required for atomic store"); } StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore) @@ -2328,7 +2324,6 @@ bool ShuffleVectorInst::isInsertSubvectorMask(ArrayRef<int> Mask, } Src1Elts.setBit(i); Src1Identity &= (M == (i + NumSrcElts)); - continue; } assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() && "unknown shuffle elements"); @@ -4165,6 +4160,47 @@ bool ICmpInst::compare(const APInt &LHS, const APInt &RHS, }; } +bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS, + FCmpInst::Predicate Pred) { + APFloat::cmpResult R = LHS.compare(RHS); + switch (Pred) { + default: + llvm_unreachable("Invalid FCmp Predicate"); + case FCmpInst::FCMP_FALSE: + return false; + case FCmpInst::FCMP_TRUE: + return true; + case FCmpInst::FCMP_UNO: + return R == APFloat::cmpUnordered; + case FCmpInst::FCMP_ORD: + return R != APFloat::cmpUnordered; + case FCmpInst::FCMP_UEQ: + return R == APFloat::cmpUnordered || R == APFloat::cmpEqual; + case FCmpInst::FCMP_OEQ: + return R == APFloat::cmpEqual; + case FCmpInst::FCMP_UNE: + return R != APFloat::cmpEqual; + case FCmpInst::FCMP_ONE: + return R == APFloat::cmpLessThan || R == APFloat::cmpGreaterThan; + case FCmpInst::FCMP_ULT: + return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan; + case FCmpInst::FCMP_OLT: + return R == APFloat::cmpLessThan; + case FCmpInst::FCMP_UGT: + return R == APFloat::cmpUnordered || R == APFloat::cmpGreaterThan; + case FCmpInst::FCMP_OGT: + return R == APFloat::cmpGreaterThan; + case FCmpInst::FCMP_ULE: + return R != APFloat::cmpGreaterThan; + case FCmpInst::FCMP_OLE: + return R == APFloat::cmpLessThan || R == APFloat::cmpEqual; + case FCmpInst::FCMP_UGE: + return R != APFloat::cmpLessThan; + case FCmpInst::FCMP_OGE: + return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual; + } +} + CmpInst::Predicate CmpInst::getFlippedSignednessPredicate(Predicate pred) { assert(CmpInst::isRelational(pred) && "Call only with non-equality predicates!"); @@ -4411,7 +4447,7 @@ void SwitchInstProfUpdateWrapper::addCase( Weights.getValue()[SI.getNumSuccessors() - 1] = *W; } else if (Weights) { Changed = true; - Weights.getValue().push_back(W ? *W : 0); + Weights.getValue().push_back(W.getValueOr(0)); } if (Weights) assert(SI.getNumSuccessors() == Weights->size() && diff --git a/llvm/lib/IR/IntrinsicInst.cpp b/llvm/lib/IR/IntrinsicInst.cpp index 9206cd37a6d1..8f7318665cfb 100644 --- a/llvm/lib/IR/IntrinsicInst.cpp +++ b/llvm/lib/IR/IntrinsicInst.cpp @@ -468,6 +468,7 @@ bool VPIntrinsic::canIgnoreVectorLengthParam() const { } Function *VPIntrinsic::getDeclarationForParams(Module *M, Intrinsic::ID VPID, + Type *ReturnType, ArrayRef<Value *> Params) { assert(isVPIntrinsic(VPID) && "not a VP intrinsic"); Function *VPFunc; @@ -486,22 +487,15 @@ Function *VPIntrinsic::getDeclarationForParams(Module *M, Intrinsic::ID VPID, break; case Intrinsic::vp_load: VPFunc = Intrinsic::getDeclaration( - M, VPID, - {Params[0]->getType()->getPointerElementType(), Params[0]->getType()}); + M, VPID, {ReturnType, Params[0]->getType()}); break; case Intrinsic::vp_gather: VPFunc = Intrinsic::getDeclaration( - M, VPID, - {VectorType::get(cast<VectorType>(Params[0]->getType()) - ->getElementType() - ->getPointerElementType(), - cast<VectorType>(Params[0]->getType())), - Params[0]->getType()}); + M, VPID, {ReturnType, Params[0]->getType()}); break; case Intrinsic::vp_store: VPFunc = Intrinsic::getDeclaration( - M, VPID, - {Params[1]->getType()->getPointerElementType(), Params[1]->getType()}); + M, VPID, {Params[0]->getType(), Params[1]->getType()}); break; case Intrinsic::vp_scatter: VPFunc = Intrinsic::getDeclaration( diff --git a/llvm/lib/IR/LLVMContextImpl.h b/llvm/lib/IR/LLVMContextImpl.h index b2909c425846..24c4a348f4da 100644 --- a/llvm/lib/IR/LLVMContextImpl.h +++ b/llvm/lib/IR/LLVMContextImpl.h @@ -386,8 +386,9 @@ template <> struct MDNodeKeyImpl<DIEnumerator> { IsUnsigned(N->isUnsigned()) {} bool isKeyOf(const DIEnumerator *RHS) const { - return APInt::isSameValue(Value, RHS->getValue()) && - IsUnsigned == RHS->isUnsigned() && Name == RHS->getRawName(); + return Value.getBitWidth() == RHS->getValue().getBitWidth() && + Value == RHS->getValue() && IsUnsigned == RHS->isUnsigned() && + Name == RHS->getRawName(); } unsigned getHashValue() const { return hash_combine(Value, Name); } @@ -1424,6 +1425,8 @@ public: DenseMap<const GlobalValue *, DSOLocalEquivalent *> DSOLocalEquivalents; + DenseMap<const GlobalValue *, NoCFIValue *> NoCFIValues; + ConstantUniqueMap<ConstantExpr> ExprConstants; ConstantUniqueMap<InlineAsm> InlineAsms; diff --git a/llvm/lib/IR/LegacyPassManager.cpp b/llvm/lib/IR/LegacyPassManager.cpp index 7bccf09012ca..bb72bec93066 100644 --- a/llvm/lib/IR/LegacyPassManager.cpp +++ b/llvm/lib/IR/LegacyPassManager.cpp @@ -886,9 +886,8 @@ void PMDataManager::recordAvailableAnalysis(Pass *P) { // implements as well. const PassInfo *PInf = TPM->findAnalysisPassInfo(PI); if (!PInf) return; - const std::vector<const PassInfo*> &II = PInf->getInterfacesImplemented(); - for (unsigned i = 0, e = II.size(); i != e; ++i) - AvailableAnalysis[II[i]->getTypeInfo()] = P; + for (const PassInfo *PI : PInf->getInterfacesImplemented()) + AvailableAnalysis[PI->getTypeInfo()] = P; } // Return true if P preserves high level analysis used by other @@ -1013,10 +1012,9 @@ void PMDataManager::freePass(Pass *P, StringRef Msg, // Remove all interfaces this pass implements, for which it is also // listed as the available implementation. - const std::vector<const PassInfo*> &II = PInf->getInterfacesImplemented(); - for (unsigned i = 0, e = II.size(); i != e; ++i) { - DenseMap<AnalysisID, Pass*>::iterator Pos = - AvailableAnalysis.find(II[i]->getTypeInfo()); + for (const PassInfo *PI : PInf->getInterfacesImplemented()) { + DenseMap<AnalysisID, Pass *>::iterator Pos = + AvailableAnalysis.find(PI->getTypeInfo()); if (Pos != AvailableAnalysis.end() && Pos->second == P) AvailableAnalysis.erase(Pos); } diff --git a/llvm/lib/IR/Module.cpp b/llvm/lib/IR/Module.cpp index 63ea41fba89a..a0485a59d0e0 100644 --- a/llvm/lib/IR/Module.cpp +++ b/llvm/lib/IR/Module.cpp @@ -750,8 +750,8 @@ void Module::setSDKVersion(const VersionTuple &V) { ConstantDataArray::get(Context, Entries)); } -VersionTuple Module::getSDKVersion() const { - auto *CM = dyn_cast_or_null<ConstantAsMetadata>(getModuleFlag("SDK Version")); +static VersionTuple getSDKVersionMD(Metadata *MD) { + auto *CM = dyn_cast_or_null<ConstantAsMetadata>(MD); if (!CM) return {}; auto *Arr = dyn_cast_or_null<ConstantDataArray>(CM->getValue()); @@ -775,6 +775,10 @@ VersionTuple Module::getSDKVersion() const { return Result; } +VersionTuple Module::getSDKVersion() const { + return getSDKVersionMD(getModuleFlag("SDK Version")); +} + GlobalVariable *llvm::collectUsedGlobalVariables( const Module &M, SmallVectorImpl<GlobalValue *> &Vec, bool CompilerUsed) { const char *Name = CompilerUsed ? "llvm.compiler.used" : "llvm.used"; @@ -809,3 +813,13 @@ void Module::setPartialSampleProfileRatio(const ModuleSummaryIndex &Index) { } } } + +StringRef Module::getDarwinTargetVariantTriple() const { + if (const auto *MD = getModuleFlag("darwin.target_variant.triple")) + return cast<MDString>(MD)->getString(); + return ""; +} + +VersionTuple Module::getDarwinTargetVariantSDKVersion() const { + return getSDKVersionMD(getModuleFlag("darwin.target_variant.SDK Version")); +} diff --git a/llvm/lib/IR/ModuleSummaryIndex.cpp b/llvm/lib/IR/ModuleSummaryIndex.cpp index 31c5cd938d03..a0ac7d3ad7d3 100644 --- a/llvm/lib/IR/ModuleSummaryIndex.cpp +++ b/llvm/lib/IR/ModuleSummaryIndex.cpp @@ -447,11 +447,17 @@ static std::string linkageToString(GlobalValue::LinkageTypes LT) { static std::string fflagsToString(FunctionSummary::FFlags F) { auto FlagValue = [](unsigned V) { return V ? '1' : '0'; }; - char FlagRep[] = {FlagValue(F.ReadNone), FlagValue(F.ReadOnly), - FlagValue(F.NoRecurse), FlagValue(F.ReturnDoesNotAlias), - FlagValue(F.NoInline), FlagValue(F.AlwaysInline), - FlagValue(F.NoUnwind), FlagValue(F.MayThrow), - FlagValue(F.HasUnknownCall), 0}; + char FlagRep[] = {FlagValue(F.ReadNone), + FlagValue(F.ReadOnly), + FlagValue(F.NoRecurse), + FlagValue(F.ReturnDoesNotAlias), + FlagValue(F.NoInline), + FlagValue(F.AlwaysInline), + FlagValue(F.NoUnwind), + FlagValue(F.MayThrow), + FlagValue(F.HasUnknownCall), + FlagValue(F.MustBeUnreachable), + 0}; return FlagRep; } diff --git a/llvm/lib/IR/Operator.cpp b/llvm/lib/IR/Operator.cpp index d15fcfbc5b9f..08c1fc931e2e 100644 --- a/llvm/lib/IR/Operator.cpp +++ b/llvm/lib/IR/Operator.cpp @@ -39,9 +39,10 @@ bool Operator::hasPoisonGeneratingFlags() const { return GEP->isInBounds() || GEP->getInRangeIndex() != None; } default: + if (const auto *FP = dyn_cast<FPMathOperator>(this)) + return FP->hasNoNaNs() || FP->hasNoInfs(); return false; } - // TODO: FastMathFlags! (On instructions, but not constexpr) } Type *GEPOperator::getSourceElementType() const { @@ -89,7 +90,7 @@ bool GEPOperator::accumulateConstantOffset( assert(Offset.getBitWidth() == DL.getIndexSizeInBits(getPointerAddressSpace()) && "The offset bit width does not match DL specification."); - SmallVector<const Value *> Index(value_op_begin() + 1, value_op_end()); + SmallVector<const Value *> Index(llvm::drop_begin(operand_values())); return GEPOperator::accumulateConstantOffset(getSourceElementType(), Index, DL, Offset, ExternalAnalysis); } diff --git a/llvm/lib/IR/SSAContext.cpp b/llvm/lib/IR/SSAContext.cpp new file mode 100644 index 000000000000..a96e39f32882 --- /dev/null +++ b/llvm/lib/IR/SSAContext.cpp @@ -0,0 +1,47 @@ +//===- SSAContext.cpp -------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// +/// This file defines a specialization of the GenericSSAContext<X> +/// template class for LLVM IR. +/// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/SSAContext.h" +#include "llvm/IR/Argument.h" +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/Instruction.h" +#include "llvm/Support/raw_ostream.h" + +using namespace llvm; + +BasicBlock *SSAContext::getEntryBlock(Function &F) { + return &F.getEntryBlock(); +} + +void SSAContext::setFunction(Function &Fn) { F = &Fn; } + +Printable SSAContext::print(Value *V) const { + return Printable([V](raw_ostream &Out) { V->print(Out); }); +} + +Printable SSAContext::print(Instruction *Inst) const { + return print(cast<Value>(Inst)); +} + +Printable SSAContext::print(BasicBlock *BB) const { + if (BB->hasName()) + return Printable([BB](raw_ostream &Out) { Out << BB->getName(); }); + + return Printable([BB](raw_ostream &Out) { + ModuleSlotTracker MST{BB->getParent()->getParent(), false}; + MST.incorporateFunction(*BB->getParent()); + Out << MST.getLocalSlot(BB); + }); +} diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp index b475c8327874..8741ed917f9f 100644 --- a/llvm/lib/IR/Value.cpp +++ b/llvm/lib/IR/Value.cpp @@ -928,7 +928,7 @@ Align Value::getPointerAlignment(const DataLayout &DL) const { } llvm_unreachable("Unhandled FunctionPtrAlignType"); } - const MaybeAlign Alignment(GO->getAlignment()); + const MaybeAlign Alignment(GO->getAlign()); if (!Alignment) { if (auto *GVar = dyn_cast<GlobalVariable>(GO)) { Type *ObjectType = GVar->getValueType(); diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp index 154b59835b01..fb7c423e54e2 100644 --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -543,7 +543,7 @@ private: void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal); void verifySwiftErrorValue(const Value *SwiftErrorVal); - void verifyTailCCMustTailAttrs(AttrBuilder Attrs, StringRef Context); + void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context); void verifyMustTailCall(CallInst &CI); bool verifyAttributeCount(AttributeList Attrs, unsigned Params); void verifyAttributeTypes(AttributeSet Attrs, const Value *V); @@ -553,8 +553,6 @@ private: void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs, const Value *V, bool IsIntrinsic); void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs); - template <typename T> - void verifyODRTypeAsScopeOperand(const MDNode &MD, T * = nullptr); void visitConstantExprsRecursively(const Constant *EntryC); void visitConstantExpr(const ConstantExpr *CE); @@ -604,26 +602,35 @@ void Verifier::visit(Instruction &I) { InstVisitor<Verifier>::visit(I); } -// Helper to recursively iterate over indirect users. By -// returning false, the callback can ask to stop recursing -// further. +// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further. static void forEachUser(const Value *User, SmallPtrSet<const Value *, 32> &Visited, llvm::function_ref<bool(const Value *)> Callback) { if (!Visited.insert(User).second) return; - for (const Value *TheNextUser : User->materialized_users()) - if (Callback(TheNextUser)) - forEachUser(TheNextUser, Visited, Callback); + + SmallVector<const Value *> WorkList; + append_range(WorkList, User->materialized_users()); + while (!WorkList.empty()) { + const Value *Cur = WorkList.pop_back_val(); + if (!Visited.insert(Cur).second) + continue; + if (Callback(Cur)) + append_range(WorkList, Cur->materialized_users()); + } } void Verifier::visitGlobalValue(const GlobalValue &GV) { Assert(!GV.isDeclaration() || GV.hasValidDeclarationLinkage(), "Global is external, but doesn't have external or weak linkage!", &GV); - if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) - Assert(GO->getAlignment() <= Value::MaximumAlignment, - "huge alignment values are unsupported", GO); + if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) { + + if (MaybeAlign A = GO->getAlign()) { + Assert(A->value() <= Value::MaximumAlignment, + "huge alignment values are unsupported", GO); + } + } Assert(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV), "Only global variables can have appending linkage!", &GV); @@ -733,8 +740,9 @@ void Verifier::visitGlobalVariable(const GlobalVariable &GV) { Value *V = Op->stripPointerCasts(); Assert(isa<GlobalVariable>(V) || isa<Function>(V) || isa<GlobalAlias>(V), - "invalid llvm.used member", V); - Assert(V->hasName(), "members of llvm.used must be named", V); + Twine("invalid ") + GV.getName() + " member", V); + Assert(V->hasName(), + Twine("members of ") + GV.getName() + " must be named", V); } } } @@ -860,19 +868,6 @@ void Verifier::visitNamedMDNode(const NamedMDNode &NMD) { } } -template <typename T> -void Verifier::verifyODRTypeAsScopeOperand(const MDNode &MD, T *) { - if (isa<T>(MD)) { - if (auto *N = dyn_cast_or_null<DICompositeType>(cast<T>(MD).getScope())) - // Of all the supported tags for DICompositeType(see visitDICompositeType) - // we know that enum type cannot be a scope. - AssertDI(N->getTag() != dwarf::DW_TAG_enumeration_type, - "enum type is not a scope; check enum type ODR " - "violation", - N, &MD); - } -} - void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) { // Only visit each node once. Metadata can be mutually recursive, so this // avoids infinite recursion here, as well as being an optimization. @@ -882,12 +877,6 @@ void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) { Assert(&MD.getContext() == &Context, "MDNode context does not match Module context!", &MD); - // Makes sure when a scope operand is a ODR type, the ODR type uniquing does - // not create invalid debug metadata. - // TODO: check that the non-ODR-type scope operand is valid. - verifyODRTypeAsScopeOperand<DIType>(MD); - verifyODRTypeAsScopeOperand<DILocalScope>(MD); - switch (MD.getMetadataID()) { default: llvm_unreachable("Invalid MDNode subclass"); @@ -2055,10 +2044,12 @@ void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs, } if (Attrs.hasFnAttr(Attribute::VScaleRange)) { - std::pair<unsigned, unsigned> Args = - Attrs.getFnAttrs().getVScaleRangeArgs(); + unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin(); + if (VScaleMin == 0) + CheckFailed("'vscale_range' minimum must be greater than 0", V); - if (Args.first > Args.second && Args.second != 0) + Optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax(); + if (VScaleMax && VScaleMin > VScaleMax) CheckFailed("'vscale_range' minimum cannot be greater than maximum", V); } @@ -3328,7 +3319,7 @@ void Verifier::visitCallBase(CallBase &Call) { visitInstruction(Call); } -void Verifier::verifyTailCCMustTailAttrs(AttrBuilder Attrs, +void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context) { Assert(!Attrs.contains(Attribute::InAlloca), Twine("inalloca attribute not allowed in ") + Context); @@ -3733,15 +3724,15 @@ void Verifier::visitLoadInst(LoadInst &LI) { PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType()); Assert(PTy, "Load operand must be a pointer.", &LI); Type *ElTy = LI.getType(); - Assert(LI.getAlignment() <= Value::MaximumAlignment, - "huge alignment values are unsupported", &LI); + if (MaybeAlign A = LI.getAlign()) { + Assert(A->value() <= Value::MaximumAlignment, + "huge alignment values are unsupported", &LI); + } Assert(ElTy->isSized(), "loading unsized types is not allowed", &LI); if (LI.isAtomic()) { Assert(LI.getOrdering() != AtomicOrdering::Release && LI.getOrdering() != AtomicOrdering::AcquireRelease, "Load cannot have Release ordering", &LI); - Assert(LI.getAlignment() != 0, - "Atomic load must specify explicit alignment", &LI); Assert(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(), "atomic load operand must have integer, pointer, or floating point " "type!", @@ -3761,15 +3752,15 @@ void Verifier::visitStoreInst(StoreInst &SI) { Type *ElTy = SI.getOperand(0)->getType(); Assert(PTy->isOpaqueOrPointeeTypeMatches(ElTy), "Stored value type does not match pointer operand type!", &SI, ElTy); - Assert(SI.getAlignment() <= Value::MaximumAlignment, - "huge alignment values are unsupported", &SI); + if (MaybeAlign A = SI.getAlign()) { + Assert(A->value() <= Value::MaximumAlignment, + "huge alignment values are unsupported", &SI); + } Assert(ElTy->isSized(), "storing unsized types is not allowed", &SI); if (SI.isAtomic()) { Assert(SI.getOrdering() != AtomicOrdering::Acquire && SI.getOrdering() != AtomicOrdering::AcquireRelease, "Store cannot have Acquire ordering", &SI); - Assert(SI.getAlignment() != 0, - "Atomic store must specify explicit alignment", &SI); Assert(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(), "atomic store operand must have integer, pointer, or floating point " "type!", @@ -3820,8 +3811,10 @@ void Verifier::visitAllocaInst(AllocaInst &AI) { "Cannot allocate unsized type", &AI); Assert(AI.getArraySize()->getType()->isIntegerTy(), "Alloca array size must have integer type", &AI); - Assert(AI.getAlignment() <= Value::MaximumAlignment, - "huge alignment values are unsupported", &AI); + if (MaybeAlign A = AI.getAlign()) { + Assert(A->value() <= Value::MaximumAlignment, + "huge alignment values are unsupported", &AI); + } if (AI.isSwiftError()) { verifySwiftErrorValue(&AI); |
