diff options
Diffstat (limited to 'lib/IR/Verifier.cpp')
-rw-r--r-- | lib/IR/Verifier.cpp | 451 |
1 files changed, 310 insertions, 141 deletions
diff --git a/lib/IR/Verifier.cpp b/lib/IR/Verifier.cpp index 30e77b92009f..9346c8bda75d 100644 --- a/lib/IR/Verifier.cpp +++ b/lib/IR/Verifier.cpp @@ -1,9 +1,8 @@ //===-- Verifier.cpp - Implement the Module Verifier -----------------------==// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // @@ -467,6 +466,7 @@ private: void visitReturnInst(ReturnInst &RI); void visitSwitchInst(SwitchInst &SI); void visitIndirectBrInst(IndirectBrInst &BI); + void visitCallBrInst(CallBrInst &CBI); void visitSelectInst(SelectInst &SI); void visitUserOp1(Instruction &I); void visitUserOp2(Instruction &I) { visitUserOp1(I); } @@ -500,7 +500,7 @@ private: const Value *V); void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V); void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs, - const Value *V); + const Value *V, bool IsIntrinsic); void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs); void visitConstantExprsRecursively(const Constant *EntryC); @@ -641,18 +641,18 @@ void Verifier::visitGlobalVariable(const GlobalVariable &GV) { PointerType *FuncPtrTy = FunctionType::get(Type::getVoidTy(Context), false)-> getPointerTo(DL.getProgramAddressSpace()); - // FIXME: Reject the 2-field form in LLVM 4.0. Assert(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) && STy->getTypeAtIndex(0u)->isIntegerTy(32) && STy->getTypeAtIndex(1) == FuncPtrTy, "wrong type for intrinsic global variable", &GV); - if (STy->getNumElements() == 3) { - Type *ETy = STy->getTypeAtIndex(2); - Assert(ETy->isPointerTy() && - cast<PointerType>(ETy)->getElementType()->isIntegerTy(8), - "wrong type for intrinsic global variable", &GV); - } + Assert(STy->getNumElements() == 3, + "the third field of the element type is mandatory, " + "specify i8* null to migrate from the obsoleted 2-field form"); + Type *ETy = STy->getTypeAtIndex(2); + Assert(ETy->isPointerTy() && + cast<PointerType>(ETy)->getElementType()->isIntegerTy(8), + "wrong type for intrinsic global variable", &GV); } } @@ -691,6 +691,13 @@ void Verifier::visitGlobalVariable(const GlobalVariable &GV) { "DIGlobalVariableExpression"); } + // Scalable vectors cannot be global variables, since we don't know + // the runtime size. If the global is a struct or an array containing + // scalable vectors, that will be caught by the isValidElementType methods + // in StructType or ArrayType instead. + if (auto *VTy = dyn_cast<VectorType>(GV.getValueType())) + Assert(!VTy->isScalable(), "Globals cannot contain scalable vectors", &GV); + if (!GV.hasInitializer()) { visitGlobalValue(GV); return; @@ -926,7 +933,8 @@ void Verifier::visitDIDerivedType(const DIDerivedType &N) { if (N.getDWARFAddressSpace()) { AssertDI(N.getTag() == dwarf::DW_TAG_pointer_type || - N.getTag() == dwarf::DW_TAG_reference_type, + N.getTag() == dwarf::DW_TAG_reference_type || + N.getTag() == dwarf::DW_TAG_rvalue_reference_type, "DWARF address space only applies to pointer or reference types", &N); } @@ -1156,6 +1164,14 @@ void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) { visitDILexicalBlockBase(N); } +void Verifier::visitDICommonBlock(const DICommonBlock &N) { + AssertDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N); + if (auto *S = N.getRawScope()) + AssertDI(isa<DIScope>(S), "invalid scope ref", &N, S); + if (auto *S = N.getRawDecl()) + AssertDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S); +} + void Verifier::visitDINamespace(const DINamespace &N) { AssertDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N); if (auto *S = N.getRawScope()) @@ -1224,7 +1240,6 @@ void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) { visitDIVariable(N); AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N); - AssertDI(!N.getName().empty(), "missing global variable name", &N); AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType()); AssertDI(N.getType(), "missing global variable type", &N); if (auto *Member = N.getRawStaticDataMemberDeclaration()) { @@ -1478,9 +1493,12 @@ void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) { static bool isFuncOnlyAttr(Attribute::AttrKind Kind) { switch (Kind) { case Attribute::NoReturn: + case Attribute::NoSync: + case Attribute::WillReturn: case Attribute::NoCfCheck: case Attribute::NoUnwind: case Attribute::NoInline: + case Attribute::NoFree: case Attribute::AlwaysInline: case Attribute::OptimizeForSize: case Attribute::StackProtect: @@ -1498,6 +1516,7 @@ static bool isFuncOnlyAttr(Attribute::AttrKind Kind) { case Attribute::ReturnsTwice: case Attribute::SanitizeAddress: case Attribute::SanitizeHWAddress: + case Attribute::SanitizeMemTag: case Attribute::SanitizeThread: case Attribute::SanitizeMemory: case Attribute::MinSize: @@ -1562,6 +1581,11 @@ void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty, verifyAttributeTypes(Attrs, /*IsFunction=*/false, V); + if (Attrs.hasAttribute(Attribute::ImmArg)) { + Assert(Attrs.getNumAttributes() == 1, + "Attribute 'immarg' is incompatible with other attributes", V); + } + // Check for mutually incompatible attributes. Only inreg is compatible with // sret. unsigned AttrCount = 0; @@ -1616,6 +1640,11 @@ void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty, "'noinline and alwaysinline' are incompatible!", V); + if (Attrs.hasAttribute(Attribute::ByVal) && Attrs.getByValType()) { + Assert(Attrs.getByValType() == cast<PointerType>(Ty)->getElementType(), + "Attribute 'byval' type does not match parameter!", V); + } + AttrBuilder IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty); Assert(!AttrBuilder(Attrs).overlaps(IncompatibleAttrs), "Wrong types for attribute: " + @@ -1649,7 +1678,7 @@ void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty, // Check parameter attributes against a function type. // The value V is printed in error messages. void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs, - const Value *V) { + const Value *V, bool IsIntrinsic) { if (Attrs.isEmpty()) return; @@ -1686,6 +1715,11 @@ void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs, Type *Ty = FT->getParamType(i); AttributeSet ArgAttrs = Attrs.getParamAttributes(i); + if (!IsIntrinsic) { + Assert(!ArgAttrs.hasAttribute(Attribute::ImmArg), + "immarg attribute only applies to intrinsics",V); + } + verifyParameterAttrs(ArgAttrs, Ty, V); if (ArgAttrs.hasAttribute(Attribute::Nest)) { @@ -1904,16 +1938,8 @@ void Verifier::verifyStatepoint(const CallBase &Call) { "reordering restrictions required by safepoint semantics", Call); - const Value *IDV = Call.getArgOperand(0); - Assert(isa<ConstantInt>(IDV), "gc.statepoint ID must be a constant integer", - Call); - - const Value *NumPatchBytesV = Call.getArgOperand(1); - Assert(isa<ConstantInt>(NumPatchBytesV), - "gc.statepoint number of patchable bytes must be a constant integer", - Call); const int64_t NumPatchBytes = - cast<ConstantInt>(NumPatchBytesV)->getSExtValue(); + cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue(); assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!"); Assert(NumPatchBytes >= 0, "gc.statepoint number of patchable bytes must be " @@ -1926,12 +1952,7 @@ void Verifier::verifyStatepoint(const CallBase &Call) { "gc.statepoint callee must be of function pointer type", Call, Target); FunctionType *TargetFuncType = cast<FunctionType>(PT->getElementType()); - const Value *NumCallArgsV = Call.getArgOperand(3); - Assert(isa<ConstantInt>(NumCallArgsV), - "gc.statepoint number of arguments to underlying call " - "must be constant integer", - Call); - const int NumCallArgs = cast<ConstantInt>(NumCallArgsV)->getZExtValue(); + const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue(); Assert(NumCallArgs >= 0, "gc.statepoint number of arguments to underlying call " "must be positive", @@ -1950,10 +1971,8 @@ void Verifier::verifyStatepoint(const CallBase &Call) { Assert(NumCallArgs == NumParams, "gc.statepoint mismatch in number of call args", Call); - const Value *FlagsV = Call.getArgOperand(4); - Assert(isa<ConstantInt>(FlagsV), - "gc.statepoint flags must be constant integer", Call); - const uint64_t Flags = cast<ConstantInt>(FlagsV)->getZExtValue(); + const uint64_t Flags + = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue(); Assert((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0, "unknown flag used in gc.statepoint flags argument", Call); @@ -2043,7 +2062,7 @@ void Verifier::verifyFrameRecoverIndices() { unsigned MaxRecoveredIndex = Counts.second.second; Assert(MaxRecoveredIndex <= EscapedObjectCount, "all indices passed to llvm.localrecover must be less than the " - "number of arguments passed ot llvm.localescape in the parent " + "number of arguments passed to llvm.localescape in the parent " "function", F); } @@ -2130,8 +2149,11 @@ void Verifier::visitFunction(const Function &F) { Assert(verifyAttributeCount(Attrs, FT->getNumParams()), "Attribute after last parameter!", &F); + bool isLLVMdotName = F.getName().size() >= 5 && + F.getName().substr(0, 5) == "llvm."; + // Check function attributes. - verifyFunctionAttrs(FT, Attrs, &F); + verifyFunctionAttrs(FT, Attrs, &F, isLLVMdotName); // On function declarations/definitions, we do not support the builtin // attribute. We do not check this in VerifyFunctionAttrs since that is @@ -2170,9 +2192,6 @@ void Verifier::visitFunction(const Function &F) { break; } - bool isLLVMdotName = F.getName().size() >= 5 && - F.getName().substr(0, 5) == "llvm."; - // Check that the argument values match the function type for this function... unsigned i = 0; for (const Argument &Arg : F.args()) { @@ -2220,8 +2239,11 @@ void Verifier::visitFunction(const Function &F) { MDs.empty() ? nullptr : MDs.front().second); } else if (F.isDeclaration()) { for (const auto &I : MDs) { - AssertDI(I.first != LLVMContext::MD_dbg, - "function declaration may not have a !dbg attachment", &F); + // This is used for call site debug information. + AssertDI(I.first != LLVMContext::MD_dbg || + !cast<DISubprogram>(I.second)->isDistinct(), + "function declaration may only have a unique !dbg attachment", + &F); Assert(I.first != LLVMContext::MD_prof, "function declaration may not have a !prof attachment", &F); @@ -2299,36 +2321,44 @@ void Verifier::visitFunction(const Function &F) { // FIXME: Check this incrementally while visiting !dbg attachments. // FIXME: Only check when N is the canonical subprogram for F. SmallPtrSet<const MDNode *, 32> Seen; - for (auto &BB : F) - for (auto &I : BB) { - // Be careful about using DILocation here since we might be dealing with - // broken code (this is the Verifier after all). - DILocation *DL = - dyn_cast_or_null<DILocation>(I.getDebugLoc().getAsMDNode()); - if (!DL) - continue; - if (!Seen.insert(DL).second) - continue; + auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) { + // Be careful about using DILocation here since we might be dealing with + // broken code (this is the Verifier after all). + const DILocation *DL = dyn_cast_or_null<DILocation>(Node); + if (!DL) + return; + if (!Seen.insert(DL).second) + return; - Metadata *Parent = DL->getRawScope(); - AssertDI(Parent && isa<DILocalScope>(Parent), - "DILocation's scope must be a DILocalScope", N, &F, &I, DL, - Parent); - DILocalScope *Scope = DL->getInlinedAtScope(); - if (Scope && !Seen.insert(Scope).second) - continue; + Metadata *Parent = DL->getRawScope(); + AssertDI(Parent && isa<DILocalScope>(Parent), + "DILocation's scope must be a DILocalScope", N, &F, &I, DL, + Parent); + DILocalScope *Scope = DL->getInlinedAtScope(); + if (Scope && !Seen.insert(Scope).second) + return; - DISubprogram *SP = Scope ? Scope->getSubprogram() : nullptr; + DISubprogram *SP = Scope ? Scope->getSubprogram() : nullptr; - // Scope and SP could be the same MDNode and we don't want to skip - // validation in that case - if (SP && ((Scope != SP) && !Seen.insert(SP).second)) - continue; + // Scope and SP could be the same MDNode and we don't want to skip + // validation in that case + if (SP && ((Scope != SP) && !Seen.insert(SP).second)) + return; - // FIXME: Once N is canonical, check "SP == &N". - AssertDI(SP->describes(&F), - "!dbg attachment points at wrong subprogram for function", N, &F, - &I, DL, Scope, SP); + // FIXME: Once N is canonical, check "SP == &N". + AssertDI(SP->describes(&F), + "!dbg attachment points at wrong subprogram for function", N, &F, + &I, DL, Scope, SP); + }; + for (auto &BB : F) + for (auto &I : BB) { + VisitDebugLoc(I, I.getDebugLoc().getAsMDNode()); + // The llvm.loop annotations also contain two DILocations. + if (auto MD = I.getMetadata(LLVMContext::MD_loop)) + for (unsigned i = 1; i < MD->getNumOperands(); ++i) + VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i))); + if (BrokenDebugInfo) + return; } } @@ -2451,6 +2481,26 @@ void Verifier::visitIndirectBrInst(IndirectBrInst &BI) { visitTerminator(BI); } +void Verifier::visitCallBrInst(CallBrInst &CBI) { + Assert(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", + &CBI); + Assert(CBI.getType()->isVoidTy(), "Callbr return value is not supported!", + &CBI); + for (unsigned i = 0, e = CBI.getNumSuccessors(); i != e; ++i) + Assert(CBI.getSuccessor(i)->getType()->isLabelTy(), + "Callbr successors must all have pointer type!", &CBI); + for (unsigned i = 0, e = CBI.getNumOperands(); i != e; ++i) { + Assert(i >= CBI.getNumArgOperands() || !isa<BasicBlock>(CBI.getOperand(i)), + "Using an unescaped label as a callbr argument!", &CBI); + if (isa<BasicBlock>(CBI.getOperand(i))) + for (unsigned j = i + 1; j != e; ++j) + Assert(CBI.getOperand(i) != CBI.getOperand(j), + "Duplicate callbr destination!", &CBI); + } + + visitTerminator(CBI); +} + void Verifier::visitSelectInst(SelectInst &SI) { Assert(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1), SI.getOperand(2)), @@ -2780,17 +2830,21 @@ void Verifier::visitCallBase(CallBase &Call) { Assert(verifyAttributeCount(Attrs, Call.arg_size()), "Attribute after last parameter!", Call); + bool IsIntrinsic = Call.getCalledFunction() && + Call.getCalledFunction()->getName().startswith("llvm."); + + Function *Callee + = dyn_cast<Function>(Call.getCalledValue()->stripPointerCasts()); + if (Attrs.hasAttribute(AttributeList::FunctionIndex, Attribute::Speculatable)) { // Don't allow speculatable on call sites, unless the underlying function // declaration is also speculatable. - Function *Callee = - dyn_cast<Function>(Call.getCalledValue()->stripPointerCasts()); Assert(Callee && Callee->isSpeculatable(), "speculatable attribute may not apply to call sites", Call); } // Verify call attributes. - verifyFunctionAttrs(FTy, Attrs, &Call); + verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic); // Conservatively check the inalloca argument. // We have a bug if we can find that there is an underlying alloca without @@ -2805,7 +2859,7 @@ void Verifier::visitCallBase(CallBase &Call) { // For each argument of the callsite, if it has the swifterror argument, // make sure the underlying alloca/parameter it comes from has a swifterror as // well. - for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) + for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) { if (Call.paramHasAttr(i, Attribute::SwiftError)) { Value *SwiftErrorArg = Call.getArgOperand(i); if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) { @@ -2822,6 +2876,21 @@ void Verifier::visitCallBase(CallBase &Call) { Call); } + if (Attrs.hasParamAttribute(i, Attribute::ImmArg)) { + // Don't allow immarg on call sites, unless the underlying declaration + // also has the matching immarg. + Assert(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg), + "immarg may not apply only to call sites", + Call.getArgOperand(i), Call); + } + + if (Call.paramHasAttr(i, Attribute::ImmArg)) { + Value *ArgVal = Call.getArgOperand(i); + Assert(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal), + "immarg operand has non-immediate parameter", ArgVal, Call); + } + } + if (FTy->isVarArg()) { // FIXME? is 'nest' even legal here? bool SawNest = false; @@ -2871,8 +2940,7 @@ void Verifier::visitCallBase(CallBase &Call) { } // Verify that there's no metadata unless it's a direct call to an intrinsic. - if (!Call.getCalledFunction() || - !Call.getCalledFunction()->getName().startswith("llvm.")) { + if (!IsIntrinsic) { for (Type *ParamTy : FTy->params()) { Assert(!ParamTy->isMetadataTy(), "Function has metadata parameter but isn't an intrinsic", Call); @@ -3236,7 +3304,7 @@ void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) { unsigned NumRanges = NumOperands / 2; Assert(NumRanges >= 1, "It should have at least one range!", Range); - ConstantRange LastRange(1); // Dummy initial value + ConstantRange LastRange(1, true); // Dummy initial value for (unsigned i = 0; i < NumRanges; ++i) { ConstantInt *Low = mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i)); @@ -3431,10 +3499,22 @@ void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) { PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType()); Assert(PTy, "First atomicrmw operand must be a pointer.", &RMWI); Type *ElTy = PTy->getElementType(); - Assert(ElTy->isIntegerTy(), "atomicrmw " + - AtomicRMWInst::getOperationName(Op) + - " operand must have integer type!", - &RMWI, ElTy); + if (Op == AtomicRMWInst::Xchg) { + Assert(ElTy->isIntegerTy() || ElTy->isFloatingPointTy(), "atomicrmw " + + AtomicRMWInst::getOperationName(Op) + + " operand must have integer or floating point type!", + &RMWI, ElTy); + } else if (AtomicRMWInst::isFPOperation(Op)) { + Assert(ElTy->isFloatingPointTy(), "atomicrmw " + + AtomicRMWInst::getOperationName(Op) + + " operand must have floating point type!", + &RMWI, ElTy); + } else { + Assert(ElTy->isIntegerTy(), "atomicrmw " + + AtomicRMWInst::getOperationName(Op) + + " operand must have integer type!", + &RMWI, ElTy); + } checkAtomicMemAccessSize(ElTy, &RMWI); Assert(ElTy == RMWI.getOperand(1)->getType(), "Argument value type does not match pointer operand type!", &RMWI, @@ -3886,7 +3966,7 @@ void Verifier::verifyDominatesUse(Instruction &I, unsigned i) { } // Quick check whether the def has already been encountered in the same block. - // PHI nodes are not checked to prevent accepting preceeding PHIs, because PHI + // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI // uses are defined to happen on the incoming edge, not at the instruction. // // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata) @@ -3981,7 +4061,8 @@ void Verifier::visitInstruction(Instruction &I) { F->getIntrinsicID() == Intrinsic::coro_destroy || F->getIntrinsicID() == Intrinsic::experimental_patchpoint_void || F->getIntrinsicID() == Intrinsic::experimental_patchpoint_i64 || - F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint, + F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint || + F->getIntrinsicID() == Intrinsic::wasm_rethrow_in_catch, "Cannot invoke an intrinsic other than donothing, patchpoint, " "statepoint, coro_resume or coro_destroy", &I); @@ -4095,14 +4176,14 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) { getIntrinsicInfoTableEntries(ID, Table); ArrayRef<Intrinsic::IITDescriptor> TableRef = Table; + // Walk the descriptors to extract overloaded types. SmallVector<Type *, 4> ArgTys; - Assert(!Intrinsic::matchIntrinsicType(IFTy->getReturnType(), - TableRef, ArgTys), + Intrinsic::MatchIntrinsicTypesResult Res = + Intrinsic::matchIntrinsicSignature(IFTy, TableRef, ArgTys); + Assert(Res != Intrinsic::MatchIntrinsicTypes_NoMatchRet, "Intrinsic has incorrect return type!", IF); - for (unsigned i = 0, e = IFTy->getNumParams(); i != e; ++i) - Assert(!Intrinsic::matchIntrinsicType(IFTy->getParamType(i), - TableRef, ArgTys), - "Intrinsic has incorrect argument type!", IF); + Assert(Res != Intrinsic::MatchIntrinsicTypes_NoMatchArg, + "Intrinsic has incorrect argument type!", IF); // Verify if the intrinsic call matches the vararg property. if (IsVarArg) @@ -4149,19 +4230,14 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) { "an array"); break; } - case Intrinsic::ctlz: // llvm.ctlz - case Intrinsic::cttz: // llvm.cttz - Assert(isa<ConstantInt>(Call.getArgOperand(1)), - "is_zero_undef argument of bit counting intrinsics must be a " - "constant int", - Call); - break; case Intrinsic::experimental_constrained_fadd: case Intrinsic::experimental_constrained_fsub: case Intrinsic::experimental_constrained_fmul: case Intrinsic::experimental_constrained_fdiv: case Intrinsic::experimental_constrained_frem: case Intrinsic::experimental_constrained_fma: + case Intrinsic::experimental_constrained_fptrunc: + case Intrinsic::experimental_constrained_fpext: case Intrinsic::experimental_constrained_sqrt: case Intrinsic::experimental_constrained_pow: case Intrinsic::experimental_constrained_powi: @@ -4211,9 +4287,7 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) { "alignment of arg 1 of memory intrinsic must be 0 or a power of 2", Call); } - Assert(isa<ConstantInt>(Call.getArgOperand(3)), - "isvolatile argument of memory intrinsics must be a constant int", - Call); + break; } case Intrinsic::memcpy_element_unordered_atomic: @@ -4222,11 +4296,7 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) { const auto *AMI = cast<AtomicMemIntrinsic>(&Call); ConstantInt *ElementSizeCI = - dyn_cast<ConstantInt>(AMI->getRawElementSizeInBytes()); - Assert(ElementSizeCI, - "element size of the element-wise unordered atomic memory " - "intrinsic must be a constant int", - Call); + cast<ConstantInt>(AMI->getRawElementSizeInBytes()); const APInt &ElementSizeVal = ElementSizeCI->getValue(); Assert(ElementSizeVal.isPowerOf2(), "element size of the element-wise atomic memory intrinsic " @@ -4281,28 +4351,14 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) { Call); break; case Intrinsic::prefetch: - Assert(isa<ConstantInt>(Call.getArgOperand(1)) && - isa<ConstantInt>(Call.getArgOperand(2)) && - cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2 && - cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4, + Assert(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2 && + cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4, "invalid arguments to llvm.prefetch", Call); break; case Intrinsic::stackprotector: Assert(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()), "llvm.stackprotector parameter #2 must resolve to an alloca.", Call); break; - case Intrinsic::lifetime_start: - case Intrinsic::lifetime_end: - case Intrinsic::invariant_start: - Assert(isa<ConstantInt>(Call.getArgOperand(0)), - "size argument of memory use markers must be a constant integer", - Call); - break; - case Intrinsic::invariant_end: - Assert(isa<ConstantInt>(Call.getArgOperand(1)), - "llvm.invariant.end parameter #2 must be a constant integer", Call); - break; - case Intrinsic::localescape: { BasicBlock *BB = Call.getParent(); Assert(BB == &BB->getParent()->front(), @@ -4327,9 +4383,7 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) { "llvm.localrecover first " "argument must be function defined in this module", Call); - auto *IdxArg = dyn_cast<ConstantInt>(Call.getArgOperand(2)); - Assert(IdxArg, "idx argument of llvm.localrecover must be a constant int", - Call); + auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2)); auto &Entry = FrameEscapeInfo[Fn]; Entry.second = unsigned( std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1)); @@ -4484,11 +4538,13 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) { Call); Value *Ptr = Call.getArgOperand(0); - // Value *Alignment = Call.getArgOperand(1); + ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1)); Value *Mask = Call.getArgOperand(2); Value *PassThru = Call.getArgOperand(3); Assert(Mask->getType()->isVectorTy(), "masked_load: mask must be vector", Call); + Assert(Alignment->getValue().isPowerOf2(), + "masked_load: alignment must be a power of 2", Call); // DataTy is the overloaded type Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType(); @@ -4504,10 +4560,12 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) { case Intrinsic::masked_store: { Value *Val = Call.getArgOperand(0); Value *Ptr = Call.getArgOperand(1); - // Value *Alignment = Call.getArgOperand(2); + ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2)); Value *Mask = Call.getArgOperand(3); Assert(Mask->getType()->isVectorTy(), "masked_store: mask must be vector", Call); + Assert(Alignment->getValue().isPowerOf2(), + "masked_store: alignment must be a power of 2", Call); // DataTy is the overloaded type Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType(); @@ -4563,22 +4621,41 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) { "of ints"); break; } - case Intrinsic::smul_fix: { + case Intrinsic::smul_fix: + case Intrinsic::smul_fix_sat: + case Intrinsic::umul_fix: { Value *Op1 = Call.getArgOperand(0); Value *Op2 = Call.getArgOperand(1); Assert(Op1->getType()->isIntOrIntVectorTy(), - "first operand of smul_fix must be an int type or vector " + "first operand of [us]mul_fix[_sat] must be an int type or vector " "of ints"); Assert(Op2->getType()->isIntOrIntVectorTy(), - "second operand of smul_fix must be an int type or vector " + "second operand of [us]mul_fix_[sat] must be an int type or vector " "of ints"); - auto *Op3 = dyn_cast<ConstantInt>(Call.getArgOperand(2)); - Assert(Op3, "third argument of smul_fix must be a constant integer"); + auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2)); Assert(Op3->getType()->getBitWidth() <= 32, - "third argument of smul_fix must fit within 32 bits"); - Assert(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(), - "the scale of smul_fix must be less than the width of the operands"); + "third argument of [us]mul_fix[_sat] must fit within 32 bits"); + + if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat) { + Assert( + Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(), + "the scale of smul_fix[_sat] must be less than the width of the operands"); + } else { + Assert(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(), + "the scale of umul_fix[_sat] must be less than or equal to the width of " + "the operands"); + } + break; + } + case Intrinsic::lround: + case Intrinsic::llround: + case Intrinsic::lrint: + case Intrinsic::llrint: { + Type *ValTy = Call.getArgOperand(0)->getType(); + Type *ResultTy = Call.getType(); + Assert(!ValTy->isVectorTy() && !ResultTy->isVectorTy(), + "Intrinsic does not support vectors", &Call); break; } }; @@ -4605,17 +4682,109 @@ static DISubprogram *getSubprogram(Metadata *LocalScope) { void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) { unsigned NumOperands = FPI.getNumArgOperands(); - Assert(((NumOperands == 5 && FPI.isTernaryOp()) || - (NumOperands == 3 && FPI.isUnaryOp()) || (NumOperands == 4)), - "invalid arguments for constrained FP intrinsic", &FPI); - Assert(isa<MetadataAsValue>(FPI.getArgOperand(NumOperands-1)), - "invalid exception behavior argument", &FPI); - Assert(isa<MetadataAsValue>(FPI.getArgOperand(NumOperands-2)), - "invalid rounding mode argument", &FPI); - Assert(FPI.getRoundingMode() != ConstrainedFPIntrinsic::rmInvalid, - "invalid rounding mode argument", &FPI); - Assert(FPI.getExceptionBehavior() != ConstrainedFPIntrinsic::ebInvalid, - "invalid exception behavior argument", &FPI); + bool HasExceptionMD = false; + bool HasRoundingMD = false; + switch (FPI.getIntrinsicID()) { + case Intrinsic::experimental_constrained_sqrt: + case Intrinsic::experimental_constrained_sin: + case Intrinsic::experimental_constrained_cos: + case Intrinsic::experimental_constrained_exp: + case Intrinsic::experimental_constrained_exp2: + case Intrinsic::experimental_constrained_log: + case Intrinsic::experimental_constrained_log10: + case Intrinsic::experimental_constrained_log2: + case Intrinsic::experimental_constrained_rint: + case Intrinsic::experimental_constrained_nearbyint: + case Intrinsic::experimental_constrained_ceil: + case Intrinsic::experimental_constrained_floor: + case Intrinsic::experimental_constrained_round: + case Intrinsic::experimental_constrained_trunc: + Assert((NumOperands == 3), "invalid arguments for constrained FP intrinsic", + &FPI); + HasExceptionMD = true; + HasRoundingMD = true; + break; + + case Intrinsic::experimental_constrained_fma: + Assert((NumOperands == 5), "invalid arguments for constrained FP intrinsic", + &FPI); + HasExceptionMD = true; + HasRoundingMD = true; + break; + + case Intrinsic::experimental_constrained_fadd: + case Intrinsic::experimental_constrained_fsub: + case Intrinsic::experimental_constrained_fmul: + case Intrinsic::experimental_constrained_fdiv: + case Intrinsic::experimental_constrained_frem: + case Intrinsic::experimental_constrained_pow: + case Intrinsic::experimental_constrained_powi: + case Intrinsic::experimental_constrained_maxnum: + case Intrinsic::experimental_constrained_minnum: + Assert((NumOperands == 4), "invalid arguments for constrained FP intrinsic", + &FPI); + HasExceptionMD = true; + HasRoundingMD = true; + break; + + case Intrinsic::experimental_constrained_fptrunc: + case Intrinsic::experimental_constrained_fpext: { + if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) { + Assert((NumOperands == 3), + "invalid arguments for constrained FP intrinsic", &FPI); + HasRoundingMD = true; + } else { + Assert((NumOperands == 2), + "invalid arguments for constrained FP intrinsic", &FPI); + } + HasExceptionMD = true; + + Value *Operand = FPI.getArgOperand(0); + Type *OperandTy = Operand->getType(); + Value *Result = &FPI; + Type *ResultTy = Result->getType(); + Assert(OperandTy->isFPOrFPVectorTy(), + "Intrinsic first argument must be FP or FP vector", &FPI); + Assert(ResultTy->isFPOrFPVectorTy(), + "Intrinsic result must be FP or FP vector", &FPI); + Assert(OperandTy->isVectorTy() == ResultTy->isVectorTy(), + "Intrinsic first argument and result disagree on vector use", &FPI); + if (OperandTy->isVectorTy()) { + auto *OperandVecTy = cast<VectorType>(OperandTy); + auto *ResultVecTy = cast<VectorType>(ResultTy); + Assert(OperandVecTy->getNumElements() == ResultVecTy->getNumElements(), + "Intrinsic first argument and result vector lengths must be equal", + &FPI); + } + if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) { + Assert(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(), + "Intrinsic first argument's type must be larger than result type", + &FPI); + } else { + Assert(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(), + "Intrinsic first argument's type must be smaller than result type", + &FPI); + } + } + break; + + default: + llvm_unreachable("Invalid constrained FP intrinsic!"); + } + + // If a non-metadata argument is passed in a metadata slot then the + // error will be caught earlier when the incorrect argument doesn't + // match the specification in the intrinsic call table. Thus, no + // argument type check is needed here. + + if (HasExceptionMD) { + Assert(FPI.getExceptionBehavior().hasValue(), + "invalid exception behavior argument", &FPI); + } + if (HasRoundingMD) { + Assert(FPI.getRoundingMode().hasValue(), + "invalid rounding mode argument", &FPI); + } } void Verifier::visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII) { |