diff options
Diffstat (limited to 'contrib/llvm-project/clang/lib/AST/ASTContext.cpp')
-rw-r--r-- | contrib/llvm-project/clang/lib/AST/ASTContext.cpp | 962 |
1 files changed, 803 insertions, 159 deletions
diff --git a/contrib/llvm-project/clang/lib/AST/ASTContext.cpp b/contrib/llvm-project/clang/lib/AST/ASTContext.cpp index cc5de9a6295e..1064507f3461 100644 --- a/contrib/llvm-project/clang/lib/AST/ASTContext.cpp +++ b/contrib/llvm-project/clang/lib/AST/ASTContext.cpp @@ -41,6 +41,7 @@ #include "clang/AST/RawCommentList.h" #include "clang/AST/RecordLayout.h" #include "clang/AST/Stmt.h" +#include "clang/AST/StmtOpenACC.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" #include "clang/AST/Type.h" @@ -85,7 +86,9 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MD5.h" #include "llvm/Support/MathExtras.h" +#include "llvm/Support/SipHash.h" #include "llvm/Support/raw_ostream.h" +#include "llvm/TargetParser/AArch64TargetParser.h" #include "llvm/TargetParser/Triple.h" #include <algorithm> #include <cassert> @@ -798,7 +801,7 @@ ASTContext::getCanonicalTemplateTemplateParmDecl( TemplateTemplateParmDecl *CanonTTP = TemplateTemplateParmDecl::Create( *this, getTranslationUnitDecl(), SourceLocation(), TTP->getDepth(), - TTP->getPosition(), TTP->isParameterPack(), nullptr, + TTP->getPosition(), TTP->isParameterPack(), nullptr, /*Typename=*/false, TemplateParameterList::Create(*this, SourceLocation(), SourceLocation(), CanonParams, SourceLocation(), /*RequiresClause=*/nullptr)); @@ -878,7 +881,8 @@ ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM, TemplateSpecializationTypes(this_()), DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()), DependentBitIntTypes(this_()), SubstTemplateTemplateParmPacks(this_()), - CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts), + ArrayParameterTypes(this_()), CanonTemplateTemplateParms(this_()), + SourceMgr(SM), LangOpts(LOpts), NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)), XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles, LangOpts.XRayNeverInstrumentFiles, @@ -1081,7 +1085,8 @@ void ASTContext::addModuleInitializer(Module *M, Decl *D) { Inits->Initializers.push_back(D); } -void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) { +void ASTContext::addLazyModuleInitializers(Module *M, + ArrayRef<GlobalDeclID> IDs) { auto *&Inits = ModuleInitializers[M]; if (!Inits) Inits = new (*this) PerModuleInitializers; @@ -1106,6 +1111,31 @@ void ASTContext::setCurrentNamedModule(Module *M) { CurrentCXXNamedModule = M; } +bool ASTContext::isInSameModule(const Module *M1, const Module *M2) { + if (!M1 != !M2) + return false; + + /// Get the representative module for M. The representative module is the + /// first module unit for a specific primary module name. So that the module + /// units have the same representative module belongs to the same module. + /// + /// The process is helpful to reduce the expensive string operations. + auto GetRepresentativeModule = [this](const Module *M) { + auto Iter = SameModuleLookupSet.find(M); + if (Iter != SameModuleLookupSet.end()) + return Iter->second; + + const Module *RepresentativeModule = + PrimaryModuleNameMap.try_emplace(M->getPrimaryModuleInterfaceName(), M) + .first->second; + SameModuleLookupSet[M] = RepresentativeModule; + return RepresentativeModule; + }; + + assert(M1 && "Shouldn't call `isInSameModule` if both M1 and M2 are none."); + return GetRepresentativeModule(M1) == GetRepresentativeModule(M2); +} + ExternCContextDecl *ASTContext::getExternCContextDecl() const { if (!ExternCContext) ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl()); @@ -1304,6 +1334,9 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target, // Placeholder type for bound members. InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); + // Placeholder type for unresolved templates. + InitBuiltinType(UnresolvedTemplateTy, BuiltinType::UnresolvedTemplate); + // Placeholder type for pseudo-objects. InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject); @@ -1318,16 +1351,14 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target, // Placeholder type for OMP array sections. if (LangOpts.OpenMP) { - InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); + InitBuiltinType(ArraySectionTy, BuiltinType::ArraySection); InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping); InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator); } - // Placeholder type for OpenACC array sections. - if (LangOpts.OpenACC) { - // FIXME: Once we implement OpenACC array sections in Sema, this will either - // be combined with the OpenMP type, or given its own type. In the meantime, - // just use the OpenMP type so that parsing can work. - InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); + // Placeholder type for OpenACC array sections, if we are ALSO in OMP mode, + // don't bother, as we're just using the same type as OMP. + if (LangOpts.OpenACC && !LangOpts.OpenMP) { + InitBuiltinType(ArraySectionTy, BuiltinType::ArraySection); } if (LangOpts.MatrixTypes) InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx); @@ -1353,7 +1384,8 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target, #include "clang/Basic/OpenCLExtensionTypes.def" } - if (Target.hasAArch64SVETypes()) { + if (Target.hasAArch64SVETypes() || + (AuxTarget && AuxTarget->hasAArch64SVETypes())) { #define SVE_TYPE(Name, Id, SingletonId) \ InitBuiltinType(SingletonId, BuiltinType::Id); #include "clang/Basic/AArch64SVEACLETypes.def" @@ -1380,6 +1412,13 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target, #include "clang/Basic/WebAssemblyReferenceTypes.def" } + if (Target.getTriple().isAMDGPU() || + (AuxTarget && AuxTarget->getTriple().isAMDGPU())) { +#define AMDGPU_TYPE(Name, Id, SingletonId) \ + InitBuiltinType(SingletonId, BuiltinType::Id); +#include "clang/Basic/AMDGPUTypes.def" + } + // Builtin type for __objc_yes and __objc_no ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ? SignedCharTy : BoolTy); @@ -1611,15 +1650,7 @@ const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { case BuiltinType::Float16: return Target->getHalfFormat(); case BuiltinType::Half: - // For HLSL, when the native half type is disabled, half will be treat as - // float. - if (getLangOpts().HLSL) - if (getLangOpts().NativeHalfType) - return Target->getHalfFormat(); - else - return Target->getFloatFormat(); - else - return Target->getHalfFormat(); + return Target->getHalfFormat(); case BuiltinType::Float: return Target->getFloatFormat(); case BuiltinType::Double: return Target->getDoubleFormat(); case BuiltinType::Ibm128: @@ -1692,7 +1723,7 @@ CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { if (VD->hasGlobalStorage() && !ForAlignof) { uint64_t TypeSize = !BaseT->isIncompleteType() ? getTypeSize(T.getTypePtr()) : 0; - Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize)); + Align = std::max(Align, getMinGlobalAlignOfVar(TypeSize, VD)); } // Fields can be subject to extra alignment constraints, like if @@ -1749,7 +1780,8 @@ TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const { // of a base-class subobject. We decide whether that's possible // during class layout, so here we can just trust the layout results. if (getLangOpts().CPlusPlus) { - if (const auto *RT = T->getAs<RecordType>()) { + if (const auto *RT = T->getAs<RecordType>(); + RT && !RT->getDecl()->isInvalidDecl()) { const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl()); Info.Width = layout.getDataSize(); } @@ -1764,7 +1796,7 @@ TypeInfoChars static getConstantArrayInfoInChars(const ASTContext &Context, const ConstantArrayType *CAT) { TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType()); - uint64_t Size = CAT->getSize().getZExtValue(); + uint64_t Size = CAT->getZExtSize(); assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <= (uint64_t)(-1)/Size) && "Overflow in array type char size evaluation"); @@ -1904,11 +1936,12 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { case Type::IncompleteArray: case Type::VariableArray: - case Type::ConstantArray: { + case Type::ConstantArray: + case Type::ArrayParameter: { // Model non-constant sized arrays as size zero, but track the alignment. uint64_t Size = 0; if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) - Size = CAT->getSize().getZExtValue(); + Size = CAT->getZExtSize(); TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType()); assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) && @@ -2202,6 +2235,13 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { Align = 8; \ break; #include "clang/Basic/WebAssemblyReferenceTypes.def" +#define AMDGPU_OPAQUE_PTR_TYPE(NAME, MANGLEDNAME, AS, WIDTH, ALIGN, ID, \ + SINGLETONID) \ + case BuiltinType::ID: \ + Width = WIDTH; \ + Align = ALIGN; \ + break; +#include "clang/Basic/AMDGPUTypes.def" } break; case Type::ObjCObjectPointer: @@ -2260,9 +2300,8 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { } case Type::BitInt: { const auto *EIT = cast<BitIntType>(T); - Align = std::clamp<unsigned>(llvm::PowerOf2Ceil(EIT->getNumBits()), - getCharWidth(), Target->getLongLongAlign()); - Width = llvm::alignTo(EIT->getNumBits(), Align); + Align = Target->getBitIntAlign(EIT->getNumBits()); + Width = Target->getBitIntWidth(EIT->getNumBits()); break; } case Type::Record: @@ -2346,6 +2385,9 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { return getTypeInfo( cast<AttributedType>(T)->getEquivalentType().getTypePtr()); + case Type::CountAttributed: + return getTypeInfo(cast<CountAttributedType>(T)->desugar().getTypePtr()); + case Type::BTFTagAttributed: return getTypeInfo( cast<BTFTagAttributedType>(T)->getWrappedType().getTypePtr()); @@ -2515,16 +2557,25 @@ unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const { /// getAlignOfGlobalVar - Return the alignment in bits that should be given /// to a global variable of the specified type. -unsigned ASTContext::getAlignOfGlobalVar(QualType T) const { +unsigned ASTContext::getAlignOfGlobalVar(QualType T, const VarDecl *VD) const { uint64_t TypeSize = getTypeSize(T.getTypePtr()); return std::max(getPreferredTypeAlign(T), - getTargetInfo().getMinGlobalAlign(TypeSize)); + getMinGlobalAlignOfVar(TypeSize, VD)); } /// getAlignOfGlobalVarInChars - Return the alignment in characters that /// should be given to a global variable of the specified type. -CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const { - return toCharUnitsFromBits(getAlignOfGlobalVar(T)); +CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T, + const VarDecl *VD) const { + return toCharUnitsFromBits(getAlignOfGlobalVar(T, VD)); +} + +unsigned ASTContext::getMinGlobalAlignOfVar(uint64_t Size, + const VarDecl *VD) const { + // Make the default handling as that of a non-weak definition in the + // current translation unit. + bool HasNonWeakDef = !VD || (VD->hasDefinition() && !VD->isWeak()); + return getTargetInfo().getMinGlobalAlign(Size, HasNonWeakDef); } CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const { @@ -2668,7 +2719,7 @@ getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context, if (Field->isBitField()) { // If we have explicit padding bits, they don't contribute bits // to the actual object representation, so return 0. - if (Field->isUnnamedBitfield()) + if (Field->isUnnamedBitField()) return 0; int64_t BitfieldSize = Field->getBitWidthValue(Context); @@ -2781,6 +2832,10 @@ bool ASTContext::hasUniqueObjectRepresentations( return hasUniqueObjectRepresentations(getBaseElementType(Ty), CheckIfTriviallyCopyable); + assert((Ty->isVoidType() || !Ty->isIncompleteType()) && + "hasUniqueObjectRepresentations should not be called with an " + "incomplete type"); + // (9.1) - T is trivially copyable... if (CheckIfTriviallyCopyable && !Ty.isTriviallyCopyableType(*this)) return false; @@ -3045,21 +3100,27 @@ QualType ASTContext::removeAddrSpaceQualType(QualType T) const { if (!T.hasAddressSpace()) return T; - // If we are composing extended qualifiers together, merge together - // into one ExtQuals node. QualifierCollector Quals; const Type *TypeNode; + // For arrays, strip the qualifier off the element type, then reconstruct the + // array type + if (T.getTypePtr()->isArrayType()) { + T = getUnqualifiedArrayType(T, Quals); + TypeNode = T.getTypePtr(); + } else { + // If we are composing extended qualifiers together, merge together + // into one ExtQuals node. + while (T.hasAddressSpace()) { + TypeNode = Quals.strip(T); + + // If the type no longer has an address space after stripping qualifiers, + // jump out. + if (!QualType(TypeNode, 0).hasAddressSpace()) + break; - while (T.hasAddressSpace()) { - TypeNode = Quals.strip(T); - - // If the type no longer has an address space after stripping qualifiers, - // jump out. - if (!QualType(TypeNode, 0).hasAddressSpace()) - break; - - // There might be sugar in the way. Strip it and try again. - T = T.getSingleStepDesugaredType(*this); + // There might be sugar in the way. Strip it and try again. + T = T.getSingleStepDesugaredType(*this); + } } Quals.removeAddressSpace(); @@ -3073,6 +3134,300 @@ QualType ASTContext::removeAddrSpaceQualType(QualType T) const { return QualType(TypeNode, Quals.getFastQualifiers()); } +uint16_t +ASTContext::getPointerAuthVTablePointerDiscriminator(const CXXRecordDecl *RD) { + assert(RD->isPolymorphic() && + "Attempted to get vtable pointer discriminator on a monomorphic type"); + std::unique_ptr<MangleContext> MC(createMangleContext()); + SmallString<256> Str; + llvm::raw_svector_ostream Out(Str); + MC->mangleCXXVTable(RD, Out); + return llvm::getPointerAuthStableSipHash(Str); +} + +/// Encode a function type for use in the discriminator of a function pointer +/// type. We can't use the itanium scheme for this since C has quite permissive +/// rules for type compatibility that we need to be compatible with. +/// +/// Formally, this function associates every function pointer type T with an +/// encoded string E(T). Let the equivalence relation T1 ~ T2 be defined as +/// E(T1) == E(T2). E(T) is part of the ABI of values of type T. C type +/// compatibility requires equivalent treatment under the ABI, so +/// CCompatible(T1, T2) must imply E(T1) == E(T2), that is, CCompatible must be +/// a subset of ~. Crucially, however, it must be a proper subset because +/// CCompatible is not an equivalence relation: for example, int[] is compatible +/// with both int[1] and int[2], but the latter are not compatible with each +/// other. Therefore this encoding function must be careful to only distinguish +/// types if there is no third type with which they are both required to be +/// compatible. +static void encodeTypeForFunctionPointerAuth(const ASTContext &Ctx, + raw_ostream &OS, QualType QT) { + // FIXME: Consider address space qualifiers. + const Type *T = QT.getCanonicalType().getTypePtr(); + + // FIXME: Consider using the C++ type mangling when we encounter a construct + // that is incompatible with C. + + switch (T->getTypeClass()) { + case Type::Atomic: + return encodeTypeForFunctionPointerAuth( + Ctx, OS, cast<AtomicType>(T)->getValueType()); + + case Type::LValueReference: + OS << "R"; + encodeTypeForFunctionPointerAuth(Ctx, OS, + cast<ReferenceType>(T)->getPointeeType()); + return; + case Type::RValueReference: + OS << "O"; + encodeTypeForFunctionPointerAuth(Ctx, OS, + cast<ReferenceType>(T)->getPointeeType()); + return; + + case Type::Pointer: + // C11 6.7.6.1p2: + // For two pointer types to be compatible, both shall be identically + // qualified and both shall be pointers to compatible types. + // FIXME: we should also consider pointee types. + OS << "P"; + return; + + case Type::ObjCObjectPointer: + case Type::BlockPointer: + OS << "P"; + return; + + case Type::Complex: + OS << "C"; + return encodeTypeForFunctionPointerAuth( + Ctx, OS, cast<ComplexType>(T)->getElementType()); + + case Type::VariableArray: + case Type::ConstantArray: + case Type::IncompleteArray: + case Type::ArrayParameter: + // C11 6.7.6.2p6: + // For two array types to be compatible, both shall have compatible + // element types, and if both size specifiers are present, and are integer + // constant expressions, then both size specifiers shall have the same + // constant value [...] + // + // So since ElemType[N] has to be compatible ElemType[], we can't encode the + // width of the array. + OS << "A"; + return encodeTypeForFunctionPointerAuth( + Ctx, OS, cast<ArrayType>(T)->getElementType()); + + case Type::ObjCInterface: + case Type::ObjCObject: + OS << "<objc_object>"; + return; + + case Type::Enum: { + // C11 6.7.2.2p4: + // Each enumerated type shall be compatible with char, a signed integer + // type, or an unsigned integer type. + // + // So we have to treat enum types as integers. + QualType UnderlyingType = cast<EnumType>(T)->getDecl()->getIntegerType(); + return encodeTypeForFunctionPointerAuth( + Ctx, OS, UnderlyingType.isNull() ? Ctx.IntTy : UnderlyingType); + } + + case Type::FunctionNoProto: + case Type::FunctionProto: { + // C11 6.7.6.3p15: + // For two function types to be compatible, both shall specify compatible + // return types. Moreover, the parameter type lists, if both are present, + // shall agree in the number of parameters and in the use of the ellipsis + // terminator; corresponding parameters shall have compatible types. + // + // That paragraph goes on to describe how unprototyped functions are to be + // handled, which we ignore here. Unprototyped function pointers are hashed + // as though they were prototyped nullary functions since thats probably + // what the user meant. This behavior is non-conforming. + // FIXME: If we add a "custom discriminator" function type attribute we + // should encode functions as their discriminators. + OS << "F"; + const auto *FuncType = cast<FunctionType>(T); + encodeTypeForFunctionPointerAuth(Ctx, OS, FuncType->getReturnType()); + if (const auto *FPT = dyn_cast<FunctionProtoType>(FuncType)) { + for (QualType Param : FPT->param_types()) { + Param = Ctx.getSignatureParameterType(Param); + encodeTypeForFunctionPointerAuth(Ctx, OS, Param); + } + if (FPT->isVariadic()) + OS << "z"; + } + OS << "E"; + return; + } + + case Type::MemberPointer: { + OS << "M"; + const auto *MPT = T->getAs<MemberPointerType>(); + encodeTypeForFunctionPointerAuth(Ctx, OS, QualType(MPT->getClass(), 0)); + encodeTypeForFunctionPointerAuth(Ctx, OS, MPT->getPointeeType()); + return; + } + case Type::ExtVector: + case Type::Vector: + OS << "Dv" << Ctx.getTypeSizeInChars(T).getQuantity(); + break; + + // Don't bother discriminating based on these types. + case Type::Pipe: + case Type::BitInt: + case Type::ConstantMatrix: + OS << "?"; + return; + + case Type::Builtin: { + const auto *BTy = T->getAs<BuiltinType>(); + switch (BTy->getKind()) { +#define SIGNED_TYPE(Id, SingletonId) \ + case BuiltinType::Id: \ + OS << "i"; \ + return; +#define UNSIGNED_TYPE(Id, SingletonId) \ + case BuiltinType::Id: \ + OS << "i"; \ + return; +#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id: +#define BUILTIN_TYPE(Id, SingletonId) +#include "clang/AST/BuiltinTypes.def" + llvm_unreachable("placeholder types should not appear here."); + + case BuiltinType::Half: + OS << "Dh"; + return; + case BuiltinType::Float: + OS << "f"; + return; + case BuiltinType::Double: + OS << "d"; + return; + case BuiltinType::LongDouble: + OS << "e"; + return; + case BuiltinType::Float16: + OS << "DF16_"; + return; + case BuiltinType::Float128: + OS << "g"; + return; + + case BuiltinType::Void: + OS << "v"; + return; + + case BuiltinType::ObjCId: + case BuiltinType::ObjCClass: + case BuiltinType::ObjCSel: + case BuiltinType::NullPtr: + OS << "P"; + return; + + // Don't bother discriminating based on OpenCL types. + case BuiltinType::OCLSampler: + case BuiltinType::OCLEvent: + case BuiltinType::OCLClkEvent: + case BuiltinType::OCLQueue: + case BuiltinType::OCLReserveID: + case BuiltinType::BFloat16: + case BuiltinType::VectorQuad: + case BuiltinType::VectorPair: + OS << "?"; + return; + + // Don't bother discriminating based on these seldom-used types. + case BuiltinType::Ibm128: + return; +#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ + case BuiltinType::Id: \ + return; +#include "clang/Basic/OpenCLImageTypes.def" +#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ + case BuiltinType::Id: \ + return; +#include "clang/Basic/OpenCLExtensionTypes.def" +#define SVE_TYPE(Name, Id, SingletonId) \ + case BuiltinType::Id: \ + return; +#include "clang/Basic/AArch64SVEACLETypes.def" + case BuiltinType::Dependent: + llvm_unreachable("should never get here"); + case BuiltinType::AMDGPUBufferRsrc: + case BuiltinType::WasmExternRef: +#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/RISCVVTypes.def" + llvm_unreachable("not yet implemented"); + } + } + case Type::Record: { + const RecordDecl *RD = T->getAs<RecordType>()->getDecl(); + const IdentifierInfo *II = RD->getIdentifier(); + + // In C++, an immediate typedef of an anonymous struct or union + // is considered to name it for ODR purposes, but C's specification + // of type compatibility does not have a similar rule. Using the typedef + // name in function type discriminators anyway, as we do here, + // therefore technically violates the C standard: two function pointer + // types defined in terms of two typedef'd anonymous structs with + // different names are formally still compatible, but we are assigning + // them different discriminators and therefore incompatible ABIs. + // + // This is a relatively minor violation that significantly improves + // discrimination in some cases and has not caused problems in + // practice. Regardless, it is now part of the ABI in places where + // function type discrimination is used, and it can no longer be + // changed except on new platforms. + + if (!II) + if (const TypedefNameDecl *Typedef = RD->getTypedefNameForAnonDecl()) + II = Typedef->getDeclName().getAsIdentifierInfo(); + + if (!II) { + OS << "<anonymous_record>"; + return; + } + OS << II->getLength() << II->getName(); + return; + } + case Type::DeducedTemplateSpecialization: + case Type::Auto: +#define NON_CANONICAL_TYPE(Class, Base) case Type::Class: +#define DEPENDENT_TYPE(Class, Base) case Type::Class: +#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: +#define ABSTRACT_TYPE(Class, Base) +#define TYPE(Class, Base) +#include "clang/AST/TypeNodes.inc" + llvm_unreachable("unexpected non-canonical or dependent type!"); + return; + } +} + +uint16_t ASTContext::getPointerAuthTypeDiscriminator(QualType T) { + assert(!T->isDependentType() && + "cannot compute type discriminator of a dependent type"); + + SmallString<256> Str; + llvm::raw_svector_ostream Out(Str); + + if (T->isFunctionPointerType() || T->isFunctionReferenceType()) + T = T->getPointeeType(); + + if (T->isFunctionType()) { + encodeTypeForFunctionPointerAuth(*this, Out, T); + } else { + T = T.getUnqualifiedType(); + std::unique_ptr<MangleContext> MC(createMangleContext()); + MC->mangleCanonicalTypeName(T, Out); + } + + return llvm::getPointerAuthStableSipHash(Str); +} + QualType ASTContext::getObjCGCQualType(QualType T, Qualifiers::GC GCAttr) const { QualType CanT = getCanonicalType(T); @@ -3111,6 +3466,32 @@ QualType ASTContext::removePtrSizeAddrSpace(QualType T) const { return T; } +QualType ASTContext::getCountAttributedType( + QualType WrappedTy, Expr *CountExpr, bool CountInBytes, bool OrNull, + ArrayRef<TypeCoupledDeclRefInfo> DependentDecls) const { + assert(WrappedTy->isPointerType() || WrappedTy->isArrayType()); + + llvm::FoldingSetNodeID ID; + CountAttributedType::Profile(ID, WrappedTy, CountExpr, CountInBytes, OrNull); + + void *InsertPos = nullptr; + CountAttributedType *CATy = + CountAttributedTypes.FindNodeOrInsertPos(ID, InsertPos); + if (CATy) + return QualType(CATy, 0); + + QualType CanonTy = getCanonicalType(WrappedTy); + size_t Size = CountAttributedType::totalSizeToAlloc<TypeCoupledDeclRefInfo>( + DependentDecls.size()); + CATy = (CountAttributedType *)Allocate(Size, TypeAlignment); + new (CATy) CountAttributedType(WrappedTy, CanonTy, CountExpr, CountInBytes, + OrNull, DependentDecls); + Types.push_back(CATy); + CountAttributedTypes.InsertNode(CATy, InsertPos); + + return QualType(CATy, 0); +} + const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, FunctionType::ExtInfo Info) { if (T->getExtInfo() == Info) @@ -3356,6 +3737,37 @@ QualType ASTContext::getDecayedType(QualType T) const { return getDecayedType(T, Decayed); } +QualType ASTContext::getArrayParameterType(QualType Ty) const { + if (Ty->isArrayParameterType()) + return Ty; + assert(Ty->isConstantArrayType() && "Ty must be an array type."); + const auto *ATy = cast<ConstantArrayType>(Ty); + llvm::FoldingSetNodeID ID; + ATy->Profile(ID, *this, ATy->getElementType(), ATy->getZExtSize(), + ATy->getSizeExpr(), ATy->getSizeModifier(), + ATy->getIndexTypeQualifiers().getAsOpaqueValue()); + void *InsertPos = nullptr; + ArrayParameterType *AT = + ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos); + if (AT) + return QualType(AT, 0); + + QualType Canonical; + if (!Ty.isCanonical()) { + Canonical = getArrayParameterType(getCanonicalType(Ty)); + + // Get the new insert position for the node we care about. + AT = ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!AT && "Shouldn't be in the map!"); + } + + AT = new (*this, alignof(ArrayParameterType)) + ArrayParameterType(ATy, Canonical); + Types.push_back(AT); + ArrayParameterTypes.InsertNode(AT, InsertPos); + return QualType(AT, 0); +} + /// getBlockPointerType - Return the uniqued reference to the type for /// a pointer to the specified block. QualType ASTContext::getBlockPointerType(QualType T) const { @@ -3520,8 +3932,8 @@ QualType ASTContext::getConstantArrayType(QualType EltTy, ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth()); llvm::FoldingSetNodeID ID; - ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM, - IndexTypeQuals); + ConstantArrayType::Profile(ID, *this, EltTy, ArySize.getZExtValue(), SizeExpr, + ASM, IndexTypeQuals); void *InsertPos = nullptr; if (ConstantArrayType *ATP = @@ -3545,11 +3957,8 @@ QualType ASTContext::getConstantArrayType(QualType EltTy, assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } - void *Mem = Allocate( - ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0), - alignof(ConstantArrayType)); - auto *New = new (Mem) - ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals); + auto *New = ConstantArrayType::Create(*this, EltTy, Canon, ArySize, SizeExpr, + ASM, IndexTypeQuals); ConstantArrayTypes.InsertNode(New, InsertPos); Types.push_back(New); return QualType(New, 0); @@ -3602,8 +4011,10 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const { case Type::Auto: case Type::DeducedTemplateSpecialization: case Type::PackExpansion: + case Type::PackIndexing: case Type::BitInt: case Type::DependentBitInt: + case Type::ArrayParameter: llvm_unreachable("type should never be variably-modified"); // These types can be variably-modified but should never need to @@ -3731,33 +4142,33 @@ QualType ASTContext::getDependentSizedArrayType(QualType elementType, numElements->isValueDependent()) && "Size must be type- or value-dependent!"); + SplitQualType canonElementType = getCanonicalType(elementType).split(); + + void *insertPos = nullptr; + llvm::FoldingSetNodeID ID; + DependentSizedArrayType::Profile( + ID, *this, numElements ? QualType(canonElementType.Ty, 0) : elementType, + ASM, elementTypeQuals, numElements); + + // Look for an existing type with these properties. + DependentSizedArrayType *canonTy = + DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); + // Dependently-sized array types that do not have a specified number // of elements will have their sizes deduced from a dependent - // initializer. We do no canonicalization here at all, which is okay - // because they can't be used in most locations. + // initializer. if (!numElements) { + if (canonTy) + return QualType(canonTy, 0); + auto *newType = new (*this, alignof(DependentSizedArrayType)) DependentSizedArrayType(elementType, QualType(), numElements, ASM, elementTypeQuals, brackets); + DependentSizedArrayTypes.InsertNode(newType, insertPos); Types.push_back(newType); return QualType(newType, 0); } - // Otherwise, we actually build a new type every time, but we - // also build a canonical type. - - SplitQualType canonElementType = getCanonicalType(elementType).split(); - - void *insertPos = nullptr; - llvm::FoldingSetNodeID ID; - DependentSizedArrayType::Profile(ID, *this, - QualType(canonElementType.Ty, 0), - ASM, elementTypeQuals, numElements); - - // Look for an existing type with these properties. - DependentSizedArrayType *canonTy = - DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); - // If we don't have one, build one. if (!canonTy) { canonTy = new (*this, alignof(DependentSizedArrayType)) @@ -4490,12 +4901,14 @@ QualType ASTContext::getFunctionTypeInternal( size_t Size = FunctionProtoType::totalSizeToAlloc< QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields, FunctionType::FunctionTypeArmAttributes, FunctionType::ExceptionType, - Expr *, FunctionDecl *, FunctionProtoType::ExtParameterInfo, Qualifiers>( + Expr *, FunctionDecl *, FunctionProtoType::ExtParameterInfo, Qualifiers, + FunctionEffect, EffectConditionExpr>( NumArgs, EPI.Variadic, EPI.requiresFunctionProtoTypeExtraBitfields(), EPI.requiresFunctionProtoTypeArmAttributes(), ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr, EPI.ExtParameterInfos ? NumArgs : 0, - EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0); + EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0, EPI.FunctionEffects.size(), + EPI.FunctionEffects.conditions().size()); auto *FTP = (FunctionProtoType *)Allocate(Size, alignof(FunctionProtoType)); FunctionProtoType::ExtProtoInfo newEPI = EPI; @@ -4503,6 +4916,8 @@ QualType ASTContext::getFunctionTypeInternal( Types.push_back(FTP); if (!Unique) FunctionProtoTypes.InsertNode(FTP, InsertPos); + if (!EPI.FunctionEffects.empty()) + AnyFunctionEffects = true; return QualType(FTP, 0); } @@ -4935,9 +5350,6 @@ ASTContext::getTemplateSpecializationType(TemplateName Template, QualType Underlying) const { assert(!Template.getAsDependentTemplateName() && "No dependent template names here!"); - // Look through qualified template names. - if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) - Template = QTN->getUnderlyingTemplate(); const auto *TD = Template.getAsTemplateDecl(); bool IsTypeAlias = TD && TD->isTypeAlias(); @@ -4973,10 +5385,6 @@ QualType ASTContext::getCanonicalTemplateSpecializationType( assert(!Template.getAsDependentTemplateName() && "No dependent template names here!"); - // Look through qualified template names. - if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) - Template = TemplateName(QTN->getUnderlyingTemplate()); - // Build the canonical template specialization type. TemplateName CanonTemplate = getCanonicalTemplateName(Template); bool AnyNonCanonArgs = false; @@ -5191,10 +5599,12 @@ TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) { Arg = TemplateArgument(E); } else { auto *TTP = cast<TemplateTemplateParmDecl>(Param); + TemplateName Name = getQualifiedTemplateName( + nullptr, /*TemplateKeyword=*/false, TemplateName(TTP)); if (TTP->isParameterPack()) - Arg = TemplateArgument(TemplateName(TTP), std::optional<unsigned>()); + Arg = TemplateArgument(Name, std::optional<unsigned>()); else - Arg = TemplateArgument(TemplateName(TTP)); + Arg = TemplateArgument(Name); } if (Param->isTemplateParameterPack()) @@ -5617,19 +6027,19 @@ QualType ASTContext::getTypeOfExprType(Expr *tofExpr, TypeOfKind Kind) const { if (Canon) { // We already have a "canonical" version of an identical, dependent // typeof(expr) type. Use that as our canonical type. - toe = new (*this, alignof(TypeOfExprType)) - TypeOfExprType(tofExpr, Kind, QualType((TypeOfExprType *)Canon, 0)); + toe = new (*this, alignof(TypeOfExprType)) TypeOfExprType( + *this, tofExpr, Kind, QualType((TypeOfExprType *)Canon, 0)); } else { // Build a new, canonical typeof(expr) type. Canon = new (*this, alignof(DependentTypeOfExprType)) - DependentTypeOfExprType(tofExpr, Kind); + DependentTypeOfExprType(*this, tofExpr, Kind); DependentTypeOfExprTypes.InsertNode(Canon, InsertPos); toe = Canon; } } else { QualType Canonical = getCanonicalType(tofExpr->getType()); toe = new (*this, alignof(TypeOfExprType)) - TypeOfExprType(tofExpr, Kind, Canonical); + TypeOfExprType(*this, tofExpr, Kind, Canonical); } Types.push_back(toe); return QualType(toe, 0); @@ -5642,8 +6052,8 @@ QualType ASTContext::getTypeOfExprType(Expr *tofExpr, TypeOfKind Kind) const { /// on canonical types (which are always unique). QualType ASTContext::getTypeOfType(QualType tofType, TypeOfKind Kind) const { QualType Canonical = getCanonicalType(tofType); - auto *tot = - new (*this, alignof(TypeOfType)) TypeOfType(tofType, Canonical, Kind); + auto *tot = new (*this, alignof(TypeOfType)) + TypeOfType(*this, tofType, Canonical, Kind); Types.push_back(tot); return QualType(tot, 0); } @@ -5705,6 +6115,39 @@ QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const { return QualType(dt, 0); } +QualType ASTContext::getPackIndexingType(QualType Pattern, Expr *IndexExpr, + bool FullySubstituted, + ArrayRef<QualType> Expansions, + int Index) const { + QualType Canonical; + if (FullySubstituted && Index != -1) { + Canonical = getCanonicalType(Expansions[Index]); + } else { + llvm::FoldingSetNodeID ID; + PackIndexingType::Profile(ID, *this, Pattern, IndexExpr); + void *InsertPos = nullptr; + PackIndexingType *Canon = + DependentPackIndexingTypes.FindNodeOrInsertPos(ID, InsertPos); + if (!Canon) { + void *Mem = Allocate( + PackIndexingType::totalSizeToAlloc<QualType>(Expansions.size()), + TypeAlignment); + Canon = new (Mem) + PackIndexingType(*this, QualType(), Pattern, IndexExpr, Expansions); + DependentPackIndexingTypes.InsertNode(Canon, InsertPos); + } + Canonical = QualType(Canon, 0); + } + + void *Mem = + Allocate(PackIndexingType::totalSizeToAlloc<QualType>(Expansions.size()), + TypeAlignment); + auto *T = new (Mem) + PackIndexingType(*this, Canonical, Pattern, IndexExpr, Expansions); + Types.push_back(T); + return QualType(T, 0); +} + /// getUnaryTransformationType - We don't unique these, since the memory /// savings are minimal and these are rare. QualType ASTContext::getUnaryTransformType(QualType BaseType, @@ -5812,7 +6255,8 @@ QualType ASTContext::getUnconstrainedType(QualType T) const { if (auto *AT = CanonT->getAs<AutoType>()) { if (!AT->isConstrained()) return T; - return getQualifiedType(getAutoType(QualType(), AT->getKeyword(), false, + return getQualifiedType(getAutoType(QualType(), AT->getKeyword(), + AT->isDependentType(), AT->containsUnexpandedParameterPack()), T.getQualifiers()); } @@ -5980,7 +6424,9 @@ CanQualType ASTContext::getCanonicalParamType(QualType T) const { T = getVariableArrayDecayedType(T); const Type *Ty = T.getTypePtr(); QualType Result; - if (isa<ArrayType>(Ty)) { + if (getLangOpts().HLSL && isa<ConstantArrayType>(Ty)) { + Result = getArrayParameterType(QualType(Ty, 0)); + } else if (isa<ArrayType>(Ty)) { Result = getArrayDecayedType(QualType(Ty,0)); } else if (isa<FunctionType>(Ty)) { Result = getPointerType(QualType(Ty, 0)); @@ -5992,7 +6438,7 @@ CanQualType ASTContext::getCanonicalParamType(QualType T) const { } QualType ASTContext::getUnqualifiedArrayType(QualType type, - Qualifiers &quals) { + Qualifiers &quals) const { SplitQualType splitType = type.getSplitUnqualifiedType(); // FIXME: getSplitUnqualifiedType() actually walks all the way to @@ -6387,7 +6833,8 @@ bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X, if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument()) return false; - return hasSameType(TTPX->getDefaultArgument(), TTPY->getDefaultArgument()); + return hasSameType(TTPX->getDefaultArgument().getArgument().getAsType(), + TTPY->getDefaultArgument().getArgument().getAsType()); } if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(X)) { @@ -6395,8 +6842,10 @@ bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X, if (!NTTPX->hasDefaultArgument() || !NTTPY->hasDefaultArgument()) return false; - Expr *DefaultArgumentX = NTTPX->getDefaultArgument()->IgnoreImpCasts(); - Expr *DefaultArgumentY = NTTPY->getDefaultArgument()->IgnoreImpCasts(); + Expr *DefaultArgumentX = + NTTPX->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts(); + Expr *DefaultArgumentY = + NTTPY->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts(); llvm::FoldingSetNodeID XID, YID; DefaultArgumentX->Profile(XID, *this, /*Canonical=*/true); DefaultArgumentY->Profile(YID, *this, /*Canonical=*/true); @@ -6689,7 +7138,7 @@ bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const { // Using shadow declarations with the same target match. if (const auto *USX = dyn_cast<UsingShadowDecl>(X)) { const auto *USY = cast<UsingShadowDecl>(Y); - return USX->getTargetDecl() == USY->getTargetDecl(); + return declaresSameEntity(USX->getTargetDecl(), USY->getTargetDecl()); } // Using declarations with the same qualifier match. (We already know that @@ -6799,14 +7248,14 @@ ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { // A namespace is canonical; build a nested-name-specifier with // this namespace and no prefix. return NestedNameSpecifier::Create(*this, nullptr, - NNS->getAsNamespace()->getOriginalNamespace()); + NNS->getAsNamespace()->getFirstDecl()); case NestedNameSpecifier::NamespaceAlias: // A namespace is canonical; build a nested-name-specifier with // this namespace and no prefix. - return NestedNameSpecifier::Create(*this, nullptr, - NNS->getAsNamespaceAlias()->getNamespace() - ->getOriginalNamespace()); + return NestedNameSpecifier::Create( + *this, nullptr, + NNS->getAsNamespaceAlias()->getNamespace()->getFirstDecl()); // The difference between TypeSpec and TypeSpecWithTemplate is that the // latter will have the 'template' keyword when printed. @@ -6822,16 +7271,13 @@ ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { // typedef typename T::type T1; // typedef typename T1::type T2; if (const auto *DNT = T->getAs<DependentNameType>()) - return NestedNameSpecifier::Create( - *this, DNT->getQualifier(), - const_cast<IdentifierInfo *>(DNT->getIdentifier())); + return NestedNameSpecifier::Create(*this, DNT->getQualifier(), + DNT->getIdentifier()); if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>()) - return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true, - const_cast<Type *>(T)); + return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true, T); // TODO: Set 'Template' parameter to true for other template types. - return NestedNameSpecifier::Create(*this, nullptr, false, - const_cast<Type *>(T)); + return NestedNameSpecifier::Create(*this, nullptr, false, T); } case NestedNameSpecifier::Global: @@ -6902,6 +7348,8 @@ const ArrayType *ASTContext::getAsArrayType(QualType T) const { } QualType ASTContext::getAdjustedParameterType(QualType T) const { + if (getLangOpts().HLSL && T->isConstantArrayType()) + return getArrayParameterType(T); if (T->isArrayType() || T->isFunctionType()) return getDecayedType(T); return T; @@ -6977,7 +7425,7 @@ uint64_t ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const { uint64_t ElementCount = 1; do { - ElementCount *= CA->getSize().getZExtValue(); + ElementCount *= CA->getZExtSize(); CA = dyn_cast_or_null<ConstantArrayType>( CA->getElementType()->getAsArrayTypeUnsafe()); } while (CA); @@ -7135,6 +7583,14 @@ QualType ASTContext::isPromotableBitField(Expr *E) const { // We perform that promotion here to match GCC and C++. // FIXME: C does not permit promotion of an enum bit-field whose rank is // greater than that of 'int'. We perform that promotion to match GCC. + // + // C23 6.3.1.1p2: + // The value from a bit-field of a bit-precise integer type is converted to + // the corresponding bit-precise integer type. (The rest is the same as in + // C11.) + if (QualType QT = Field->getType(); QT->isBitIntType()) + return QT; + if (BitWidth < IntSize) return IntTy; @@ -8043,6 +8499,8 @@ static char getObjCEncodingForPrimitiveType(const ASTContext *C, #include "clang/Basic/RISCVVTypes.def" #define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id: #include "clang/Basic/WebAssemblyReferenceTypes.def" +#define AMDGPU_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/AMDGPUTypes.def" { DiagnosticsEngine &Diags = C->getDiagnostics(); unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, @@ -8300,7 +8758,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, S += '['; if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) - S += llvm::utostr(CAT->getSize().getZExtValue()); + S += llvm::utostr(CAT->getZExtSize()); else { //Variable length arrays are encoded as a regular array with 0 elements. assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) && @@ -8512,6 +8970,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, case Type::DeducedTemplateSpecialization: return; + case Type::ArrayParameter: case Type::Pipe: #define ABSTRACT_TYPE(KIND, BASE) #define TYPE(KIND, BASE) @@ -9186,7 +9645,8 @@ TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const { TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, bool TemplateKeyword, TemplateName Template) const { - assert(NNS && "Missing nested-name-specifier in qualified template name"); + assert(Template.getKind() == TemplateName::Template || + Template.getKind() == TemplateName::UsingTemplate); // FIXME: Canonicalization? llvm::FoldingSetNodeID ID; @@ -9440,11 +9900,6 @@ static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) { bool ASTContext::areCompatibleSveTypes(QualType FirstType, QualType SecondType) { - assert( - ((FirstType->isSVESizelessBuiltinType() && SecondType->isVectorType()) || - (FirstType->isVectorType() && SecondType->isSVESizelessBuiltinType())) && - "Expected SVE builtin type and vector type!"); - auto IsValidCast = [this](QualType FirstType, QualType SecondType) { if (const auto *BT = FirstType->getAs<BuiltinType>()) { if (const auto *VT = SecondType->getAs<VectorType>()) { @@ -9470,11 +9925,6 @@ bool ASTContext::areCompatibleSveTypes(QualType FirstType, bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType, QualType SecondType) { - assert( - ((FirstType->isSVESizelessBuiltinType() && SecondType->isVectorType()) || - (FirstType->isVectorType() && SecondType->isSVESizelessBuiltinType())) && - "Expected SVE builtin type and vector type!"); - auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { const auto *BT = FirstType->getAs<BuiltinType>(); if (!BT) @@ -9529,11 +9979,11 @@ static uint64_t getRVVTypeSize(ASTContext &Context, const BuiltinType *Ty) { ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(Ty); - unsigned EltSize = Context.getTypeSize(Info.ElementType); + uint64_t EltSize = Context.getTypeSize(Info.ElementType); if (Info.ElementType == Context.BoolTy) EltSize = 1; - unsigned MinElts = Info.EC.getKnownMinValue(); + uint64_t MinElts = Info.EC.getKnownMinValue(); return VScale->first * MinElts * EltSize; } @@ -10391,6 +10841,8 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn); + std::optional<FunctionEffectSet> MergedFX; + if (lproto && rproto) { // two C99 style function prototypes assert((AllowCXX || (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) && @@ -10406,6 +10858,25 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, if (lproto->getMethodQuals() != rproto->getMethodQuals()) return {}; + // Function effects are handled similarly to noreturn, see above. + FunctionEffectsRef LHSFX = lproto->getFunctionEffects(); + FunctionEffectsRef RHSFX = rproto->getFunctionEffects(); + if (LHSFX != RHSFX) { + if (IsConditionalOperator) + MergedFX = FunctionEffectSet::getIntersection(LHSFX, RHSFX); + else { + FunctionEffectSet::Conflicts Errs; + MergedFX = FunctionEffectSet::getUnion(LHSFX, RHSFX, Errs); + // Here we're discarding a possible error due to conflicts in the effect + // sets. But we're not in a context where we can report it. The + // operation does however guarantee maintenance of invariants. + } + if (*MergedFX != LHSFX) + allLTypes = false; + if (*MergedFX != RHSFX) + allRTypes = false; + } + SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos; bool canUseLeft, canUseRight; if (!mergeExtParameterInfo(lproto, rproto, canUseLeft, canUseRight, @@ -10449,6 +10920,8 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, EPI.ExtInfo = einfo; EPI.ExtParameterInfos = newParamInfos.empty() ? nullptr : newParamInfos.data(); + if (MergedFX) + EPI.FunctionEffects = *MergedFX; return getFunctionType(retType, types, EPI); } @@ -10486,6 +10959,8 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo(); EPI.ExtInfo = einfo; + if (MergedFX) + EPI.FunctionEffects = *MergedFX; return getFunctionType(retType, proto->getParamTypes(), EPI); } @@ -10734,7 +11209,7 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer, { const ConstantArrayType* LCAT = getAsConstantArrayType(LHS); const ConstantArrayType* RCAT = getAsConstantArrayType(RHS); - if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize()) + if (LCAT && RCAT && RCAT->getZExtSize() != LCAT->getZExtSize()) return {}; QualType LHSElem = getAsArrayType(LHS)->getElementType(); @@ -10855,6 +11330,10 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer, assert(LHS != RHS && "Equivalent pipe types should have already been handled!"); return {}; + case Type::ArrayParameter: + assert(LHS != RHS && + "Equivalent ArrayParameter types should have already been handled!"); + return {}; case Type::BitInt: { // Merge two bit-precise int types, while trying to preserve typedef info. bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned(); @@ -11408,6 +11887,10 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, Type = Context.SveCountTy; break; } + case 'b': { + Type = Context.AMDGPUBufferRsrcTy; + break; + } default: llvm_unreachable("Unexpected target builtin type"); } @@ -11909,8 +12392,7 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) { !isMSStaticDataMemberInlineDefinition(VD)) return false; - // Variables in other module units shouldn't be forced to be emitted. - if (VD->isInAnotherModuleUnit()) + if (VD->shouldEmitInExternalSource()) return false; // Variables that can be needed in other TUs are required. @@ -12134,8 +12616,13 @@ QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth, } void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) { - if (Number > 1) - MangleNumbers[ND] = Number; + if (Number <= 1) + return; + + MangleNumbers[ND] = Number; + + if (Listener) + Listener->AddedManglingNumber(ND, Number); } unsigned ASTContext::getManglingNumber(const NamedDecl *ND, @@ -12154,8 +12641,13 @@ unsigned ASTContext::getManglingNumber(const NamedDecl *ND, } void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) { - if (Number > 1) - StaticLocalNumbers[VD] = Number; + if (Number <= 1) + return; + + StaticLocalNumbers[VD] = Number; + + if (Listener) + Listener->AddedStaticLocalNumbers(VD, Number); } unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const { @@ -12746,6 +13238,18 @@ static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X, getCommonArrayElementType(Ctx, AX, QX, AY, QY), AX->getSize(), SizeExpr, getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); } + case Type::ArrayParameter: { + const auto *AX = cast<ArrayParameterType>(X), + *AY = cast<ArrayParameterType>(Y); + assert(AX->getSize() == AY->getSize()); + const Expr *SizeExpr = Ctx.hasSameExpr(AX->getSizeExpr(), AY->getSizeExpr()) + ? AX->getSizeExpr() + : nullptr; + auto ArrayTy = Ctx.getConstantArrayType( + getCommonArrayElementType(Ctx, AX, QX, AY, QY), AX->getSize(), SizeExpr, + getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); + return Ctx.getArrayParameterType(ArrayTy); + } case Type::Atomic: { const auto *AX = cast<AtomicType>(X), *AY = cast<AtomicType>(Y); return Ctx.getAtomicType( @@ -12921,6 +13425,14 @@ static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X, // As Decltype is not uniqued, building a common type would be wasteful. return QualType(DX, 0); } + case Type::PackIndexing: { + const auto *DX = cast<PackIndexingType>(X); + [[maybe_unused]] const auto *DY = cast<PackIndexingType>(Y); + assert(DX->isDependentType()); + assert(DY->isDependentType()); + assert(Ctx.hasSameExpr(DX->getIndexExpr(), DY->getIndexExpr())); + return QualType(DX, 0); + } case Type::DependentName: { const auto *NX = cast<DependentNameType>(X), *NY = cast<DependentNameType>(Y); @@ -12999,6 +13511,7 @@ static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X, CANONICAL_TYPE(Builtin) CANONICAL_TYPE(Complex) CANONICAL_TYPE(ConstantArray) + CANONICAL_TYPE(ArrayParameter) CANONICAL_TYPE(ConstantMatrix) CANONICAL_TYPE(Enum) CANONICAL_TYPE(ExtVector) @@ -13081,6 +13594,7 @@ static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X, return Ctx.getAutoType(Ctx.getQualifiedType(Underlying), AX->getKeyword(), /*IsDependent=*/false, /*IsPack=*/false, CD, As); } + case Type::PackIndexing: case Type::Decltype: return QualType(); case Type::DeducedTemplateSpecialization: @@ -13180,6 +13694,32 @@ static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X, return QualType(); return Ctx.getUsingType(CD, Ctx.getQualifiedType(Underlying)); } + case Type::CountAttributed: { + const auto *DX = cast<CountAttributedType>(X), + *DY = cast<CountAttributedType>(Y); + if (DX->isCountInBytes() != DY->isCountInBytes()) + return QualType(); + if (DX->isOrNull() != DY->isOrNull()) + return QualType(); + Expr *CEX = DX->getCountExpr(); + Expr *CEY = DY->getCountExpr(); + llvm::ArrayRef<clang::TypeCoupledDeclRefInfo> CDX = DX->getCoupledDecls(); + if (Ctx.hasSameExpr(CEX, CEY)) + return Ctx.getCountAttributedType(Ctx.getQualifiedType(Underlying), CEX, + DX->isCountInBytes(), DX->isOrNull(), + CDX); + if (!CEX->isIntegerConstantExpr(Ctx) || !CEY->isIntegerConstantExpr(Ctx)) + return QualType(); + // Two declarations with the same integer constant may still differ in their + // expression pointers, so we need to evaluate them. + llvm::APSInt VX = *CEX->getIntegerConstantExpr(Ctx); + llvm::APSInt VY = *CEY->getIntegerConstantExpr(Ctx); + if (VX != VY) + return QualType(); + return Ctx.getCountAttributedType(Ctx.getQualifiedType(Underlying), CEX, + DX->isCountInBytes(), DX->isOrNull(), + CDX); + } } llvm_unreachable("Unhandled Type Class"); } @@ -13260,6 +13800,42 @@ QualType ASTContext::getCommonSugaredType(QualType X, QualType Y, return R; } +QualType ASTContext::getCorrespondingUnsaturatedType(QualType Ty) const { + assert(Ty->isFixedPointType()); + + if (Ty->isUnsaturatedFixedPointType()) + return Ty; + + switch (Ty->castAs<BuiltinType>()->getKind()) { + default: + llvm_unreachable("Not a saturated fixed point type!"); + case BuiltinType::SatShortAccum: + return ShortAccumTy; + case BuiltinType::SatAccum: + return AccumTy; + case BuiltinType::SatLongAccum: + return LongAccumTy; + case BuiltinType::SatUShortAccum: + return UnsignedShortAccumTy; + case BuiltinType::SatUAccum: + return UnsignedAccumTy; + case BuiltinType::SatULongAccum: + return UnsignedLongAccumTy; + case BuiltinType::SatShortFract: + return ShortFractTy; + case BuiltinType::SatFract: + return FractTy; + case BuiltinType::SatLongFract: + return LongFractTy; + case BuiltinType::SatUShortFract: + return UnsignedShortFractTy; + case BuiltinType::SatUFract: + return UnsignedFractTy; + case BuiltinType::SatULongFract: + return UnsignedLongFractTy; + } +} + QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const { assert(Ty->isFixedPointType()); @@ -13461,17 +14037,16 @@ QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const { } } -std::vector<std::string> ASTContext::filterFunctionTargetVersionAttrs( - const TargetVersionAttr *TV) const { - assert(TV != nullptr); - llvm::SmallVector<StringRef, 8> Feats; - std::vector<std::string> ResFeats; - TV->getFeatures(Feats); - for (auto &Feature : Feats) - if (Target->validateCpuSupports(Feature.str())) - // Use '?' to mark features that came from TargetVersion. - ResFeats.push_back("?" + Feature.str()); - return ResFeats; +// Given a list of FMV features, return a concatenated list of the +// corresponding backend features (which may contain duplicates). +static std::vector<std::string> getFMVBackendFeaturesFor( + const llvm::SmallVectorImpl<StringRef> &FMVFeatStrings) { + std::vector<std::string> BackendFeats; + for (StringRef F : FMVFeatStrings) + if (auto FMVExt = llvm::AArch64::parseFMVExtension(F)) + for (StringRef F : FMVExt->getImpliedFeatures()) + BackendFeats.push_back(F.str()); + return BackendFeats; } ParsedTargetAttr @@ -13506,10 +14081,12 @@ void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, // Make a copy of the features as passed on the command line into the // beginning of the additional features from the function to override. - ParsedAttr.Features.insert( - ParsedAttr.Features.begin(), - Target->getTargetOpts().FeaturesAsWritten.begin(), - Target->getTargetOpts().FeaturesAsWritten.end()); + // AArch64 handles command line option features in parseTargetAttr(). + if (!Target->getTriple().isAArch64()) + ParsedAttr.Features.insert( + ParsedAttr.Features.begin(), + Target->getTargetOpts().FeaturesAsWritten.begin(), + Target->getTargetOpts().FeaturesAsWritten.end()); if (ParsedAttr.CPU != "" && Target->isValidCPUName(ParsedAttr.CPU)) TargetCPU = ParsedAttr.CPU; @@ -13530,35 +14107,31 @@ void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, Target->getTargetOpts().FeaturesAsWritten.end()); Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); } else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) { - std::vector<std::string> Features; - StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex()); if (Target->getTriple().isAArch64()) { - // TargetClones for AArch64 - if (VersionStr != "default") { - SmallVector<StringRef, 1> VersionFeatures; - VersionStr.split(VersionFeatures, "+"); - for (auto &VFeature : VersionFeatures) { - VFeature = VFeature.trim(); - // Use '?' to mark features that came from AArch64 TargetClones. - Features.push_back((StringRef{"?"} + VFeature).str()); - } - } + llvm::SmallVector<StringRef, 8> Feats; + TC->getFeatures(Feats, GD.getMultiVersionIndex()); + std::vector<std::string> Features = getFMVBackendFeaturesFor(Feats); Features.insert(Features.begin(), Target->getTargetOpts().FeaturesAsWritten.begin(), Target->getTargetOpts().FeaturesAsWritten.end()); + Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); } else { + std::vector<std::string> Features; + StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex()); if (VersionStr.starts_with("arch=")) TargetCPU = VersionStr.drop_front(sizeof("arch=") - 1); else if (VersionStr != "default") Features.push_back((StringRef{"+"} + VersionStr).str()); + Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); } - Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); } else if (const auto *TV = FD->getAttr<TargetVersionAttr>()) { - std::vector<std::string> Feats = filterFunctionTargetVersionAttrs(TV); - Feats.insert(Feats.begin(), - Target->getTargetOpts().FeaturesAsWritten.begin(), - Target->getTargetOpts().FeaturesAsWritten.end()); - Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Feats); + llvm::SmallVector<StringRef, 8> Feats; + TV->getFeatures(Feats); + std::vector<std::string> Features = getFMVBackendFeaturesFor(Feats); + Features.insert(Features.begin(), + Target->getTargetOpts().FeaturesAsWritten.begin(), + Target->getTargetOpts().FeaturesAsWritten.end()); + Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); } else { FeatureMap = Target->getTargetOpts().FeatureMap; } @@ -13609,3 +14182,74 @@ StringRef ASTContext::getCUIDHash() const { CUIDHash = llvm::utohexstr(llvm::MD5Hash(LangOpts.CUID), /*LowerCase=*/true); return CUIDHash; } + +const CXXRecordDecl * +ASTContext::baseForVTableAuthentication(const CXXRecordDecl *ThisClass) { + assert(ThisClass); + assert(ThisClass->isPolymorphic()); + const CXXRecordDecl *PrimaryBase = ThisClass; + while (1) { + assert(PrimaryBase); + assert(PrimaryBase->isPolymorphic()); + auto &Layout = getASTRecordLayout(PrimaryBase); + auto Base = Layout.getPrimaryBase(); + if (!Base || Base == PrimaryBase || !Base->isPolymorphic()) + break; + PrimaryBase = Base; + } + return PrimaryBase; +} + +bool ASTContext::useAbbreviatedThunkName(GlobalDecl VirtualMethodDecl, + StringRef MangledName) { + auto *Method = cast<CXXMethodDecl>(VirtualMethodDecl.getDecl()); + assert(Method->isVirtual()); + bool DefaultIncludesPointerAuth = + LangOpts.PointerAuthCalls || LangOpts.PointerAuthIntrinsics; + + if (!DefaultIncludesPointerAuth) + return true; + + auto Existing = ThunksToBeAbbreviated.find(VirtualMethodDecl); + if (Existing != ThunksToBeAbbreviated.end()) + return Existing->second.contains(MangledName.str()); + + std::unique_ptr<MangleContext> Mangler(createMangleContext()); + llvm::StringMap<llvm::SmallVector<std::string, 2>> Thunks; + auto VtableContext = getVTableContext(); + if (const auto *ThunkInfos = VtableContext->getThunkInfo(VirtualMethodDecl)) { + auto *Destructor = dyn_cast<CXXDestructorDecl>(Method); + for (const auto &Thunk : *ThunkInfos) { + SmallString<256> ElidedName; + llvm::raw_svector_ostream ElidedNameStream(ElidedName); + if (Destructor) + Mangler->mangleCXXDtorThunk(Destructor, VirtualMethodDecl.getDtorType(), + Thunk, /* elideOverrideInfo */ true, + ElidedNameStream); + else + Mangler->mangleThunk(Method, Thunk, /* elideOverrideInfo */ true, + ElidedNameStream); + SmallString<256> MangledName; + llvm::raw_svector_ostream mangledNameStream(MangledName); + if (Destructor) + Mangler->mangleCXXDtorThunk(Destructor, VirtualMethodDecl.getDtorType(), + Thunk, /* elideOverrideInfo */ false, + mangledNameStream); + else + Mangler->mangleThunk(Method, Thunk, /* elideOverrideInfo */ false, + mangledNameStream); + + if (Thunks.find(ElidedName) == Thunks.end()) + Thunks[ElidedName] = {}; + Thunks[ElidedName].push_back(std::string(MangledName)); + } + } + llvm::StringSet<> SimplifiedThunkNames; + for (auto &ThunkList : Thunks) { + llvm::sort(ThunkList.second); + SimplifiedThunkNames.insert(ThunkList.second[0]); + } + bool Result = SimplifiedThunkNames.contains(MangledName); + ThunksToBeAbbreviated[VirtualMethodDecl] = std::move(SimplifiedThunkNames); + return Result; +} |