summaryrefslogtreecommitdiff
path: root/clang/lib/AST/ASTContext.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/AST/ASTContext.cpp')
-rw-r--r--clang/lib/AST/ASTContext.cpp940
1 files changed, 573 insertions, 367 deletions
diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp
index a51429264dbe..2ba643f12a82 100644
--- a/clang/lib/AST/ASTContext.cpp
+++ b/clang/lib/AST/ASTContext.cpp
@@ -29,15 +29,17 @@
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/ParentMapContext.h"
#include "clang/AST/RawCommentList.h"
#include "clang/AST/RecordLayout.h"
-#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
@@ -54,6 +56,7 @@
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Linkage.h"
+#include "clang/Basic/Module.h"
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/SanitizerBlacklist.h"
#include "clang/Basic/SourceLocation.h"
@@ -97,34 +100,8 @@
using namespace clang;
enum FloatingRank {
- Float16Rank, HalfRank, FloatRank, DoubleRank, LongDoubleRank, Float128Rank
+ BFloat16Rank, Float16Rank, HalfRank, FloatRank, DoubleRank, LongDoubleRank, Float128Rank
};
-const Expr *ASTContext::traverseIgnored(const Expr *E) const {
- return traverseIgnored(const_cast<Expr *>(E));
-}
-
-Expr *ASTContext::traverseIgnored(Expr *E) const {
- if (!E)
- return nullptr;
-
- switch (Traversal) {
- case ast_type_traits::TK_AsIs:
- return E;
- case ast_type_traits::TK_IgnoreImplicitCastsAndParentheses:
- return E->IgnoreParenImpCasts();
- case ast_type_traits::TK_IgnoreUnlessSpelledInSource:
- return E->IgnoreUnlessSpelledInSource();
- }
- llvm_unreachable("Invalid Traversal type!");
-}
-
-ast_type_traits::DynTypedNode
-ASTContext::traverseIgnored(const ast_type_traits::DynTypedNode &N) const {
- if (const auto *E = N.get<Expr>()) {
- return ast_type_traits::DynTypedNode::create(*traverseIgnored(E));
- }
- return N;
-}
/// \returns location that is relevant when searching for Doc comments related
/// to \p D.
@@ -321,6 +298,12 @@ RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
return getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile);
}
+void ASTContext::addComment(const RawComment &RC) {
+ assert(LangOpts.RetainCommentsFromSystemHeaders ||
+ !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin()));
+ Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc);
+}
+
/// If we have a 'templated' declaration for a template, adjust 'D' to
/// refer to the actual template.
/// If we have an implicit instantiation, adjust 'D' to refer to template.
@@ -493,10 +476,20 @@ void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls,
if (Comments.empty() || Decls.empty())
return;
- // See if there are any new comments that are not attached to a decl.
- // The location doesn't have to be precise - we care only about the file.
- const FileID File =
- SourceMgr.getDecomposedLoc((*Decls.begin())->getLocation()).first;
+ FileID File;
+ for (Decl *D : Decls) {
+ SourceLocation Loc = D->getLocation();
+ if (Loc.isValid()) {
+ // See if there are any new comments that are not attached to a decl.
+ // The location doesn't have to be precise - we care only about the file.
+ File = SourceMgr.getDecomposedLoc(Loc).first;
+ break;
+ }
+ }
+
+ if (File.isInvalid())
+ return;
+
auto CommentsInThisFile = Comments.getCommentsInFile(File);
if (!CommentsInThisFile || CommentsInThisFile->empty() ||
CommentsInThisFile->rbegin()->second->isAttached())
@@ -661,7 +654,7 @@ comments::FullComment *ASTContext::getCommentForDecl(
return FC;
}
-void
+void
ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &C,
TemplateTemplateParmDecl *Parm) {
@@ -716,6 +709,57 @@ ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
RequiresClause->Profile(ID, C, /*Canonical=*/true);
}
+static Expr *
+canonicalizeImmediatelyDeclaredConstraint(const ASTContext &C, Expr *IDC,
+ QualType ConstrainedType) {
+ // This is a bit ugly - we need to form a new immediately-declared
+ // constraint that references the new parameter; this would ideally
+ // require semantic analysis (e.g. template<C T> struct S {}; - the
+ // converted arguments of C<T> could be an argument pack if C is
+ // declared as template<typename... T> concept C = ...).
+ // We don't have semantic analysis here so we dig deep into the
+ // ready-made constraint expr and change the thing manually.
+ ConceptSpecializationExpr *CSE;
+ if (const auto *Fold = dyn_cast<CXXFoldExpr>(IDC))
+ CSE = cast<ConceptSpecializationExpr>(Fold->getLHS());
+ else
+ CSE = cast<ConceptSpecializationExpr>(IDC);
+ ArrayRef<TemplateArgument> OldConverted = CSE->getTemplateArguments();
+ SmallVector<TemplateArgument, 3> NewConverted;
+ NewConverted.reserve(OldConverted.size());
+ if (OldConverted.front().getKind() == TemplateArgument::Pack) {
+ // The case:
+ // template<typename... T> concept C = true;
+ // template<C<int> T> struct S; -> constraint is C<{T, int}>
+ NewConverted.push_back(ConstrainedType);
+ for (auto &Arg : OldConverted.front().pack_elements().drop_front(1))
+ NewConverted.push_back(Arg);
+ TemplateArgument NewPack(NewConverted);
+
+ NewConverted.clear();
+ NewConverted.push_back(NewPack);
+ assert(OldConverted.size() == 1 &&
+ "Template parameter pack should be the last parameter");
+ } else {
+ assert(OldConverted.front().getKind() == TemplateArgument::Type &&
+ "Unexpected first argument kind for immediately-declared "
+ "constraint");
+ NewConverted.push_back(ConstrainedType);
+ for (auto &Arg : OldConverted.drop_front(1))
+ NewConverted.push_back(Arg);
+ }
+ Expr *NewIDC = ConceptSpecializationExpr::Create(
+ C, CSE->getNamedConcept(), NewConverted, nullptr,
+ CSE->isInstantiationDependent(), CSE->containsUnexpandedParameterPack());
+
+ if (auto *OrigFold = dyn_cast<CXXFoldExpr>(IDC))
+ NewIDC = new (C) CXXFoldExpr(OrigFold->getType(), SourceLocation(), NewIDC,
+ BinaryOperatorKind::BO_LAnd,
+ SourceLocation(), /*RHS=*/nullptr,
+ SourceLocation(), /*NumExpansions=*/None);
+ return NewIDC;
+}
+
TemplateTemplateParmDecl *
ASTContext::getCanonicalTemplateTemplateParmDecl(
TemplateTemplateParmDecl *TTP) const {
@@ -743,68 +787,23 @@ ASTContext::getCanonicalTemplateTemplateParmDecl(
TTP->isExpandedParameterPack() ?
llvm::Optional<unsigned>(TTP->getNumExpansionParameters()) : None);
if (const auto *TC = TTP->getTypeConstraint()) {
- // This is a bit ugly - we need to form a new immediately-declared
- // constraint that references the new parameter; this would ideally
- // require semantic analysis (e.g. template<C T> struct S {}; - the
- // converted arguments of C<T> could be an argument pack if C is
- // declared as template<typename... T> concept C = ...).
- // We don't have semantic analysis here so we dig deep into the
- // ready-made constraint expr and change the thing manually.
- Expr *IDC = TC->getImmediatelyDeclaredConstraint();
- ConceptSpecializationExpr *CSE;
- if (const auto *Fold = dyn_cast<CXXFoldExpr>(IDC))
- CSE = cast<ConceptSpecializationExpr>(Fold->getLHS());
- else
- CSE = cast<ConceptSpecializationExpr>(IDC);
- ArrayRef<TemplateArgument> OldConverted = CSE->getTemplateArguments();
- SmallVector<TemplateArgument, 3> NewConverted;
- NewConverted.reserve(OldConverted.size());
-
QualType ParamAsArgument(NewTTP->getTypeForDecl(), 0);
- if (OldConverted.front().getKind() == TemplateArgument::Pack) {
- // The case:
- // template<typename... T> concept C = true;
- // template<C<int> T> struct S; -> constraint is C<{T, int}>
- NewConverted.push_back(ParamAsArgument);
- for (auto &Arg : OldConverted.front().pack_elements().drop_front(1))
- NewConverted.push_back(Arg);
- TemplateArgument NewPack(NewConverted);
-
- NewConverted.clear();
- NewConverted.push_back(NewPack);
- assert(OldConverted.size() == 1 &&
- "Template parameter pack should be the last parameter");
- } else {
- assert(OldConverted.front().getKind() == TemplateArgument::Type &&
- "Unexpected first argument kind for immediately-declared "
- "constraint");
- NewConverted.push_back(ParamAsArgument);
- for (auto &Arg : OldConverted.drop_front(1))
- NewConverted.push_back(Arg);
- }
- Expr *NewIDC = ConceptSpecializationExpr::Create(*this,
- NestedNameSpecifierLoc(), /*TemplateKWLoc=*/SourceLocation(),
- CSE->getConceptNameInfo(), /*FoundDecl=*/CSE->getNamedConcept(),
- CSE->getNamedConcept(),
- // Actually canonicalizing a TemplateArgumentLoc is difficult so we
- // simply omit the ArgsAsWritten
- /*ArgsAsWritten=*/nullptr, NewConverted, nullptr);
-
- if (auto *OrigFold = dyn_cast<CXXFoldExpr>(IDC))
- NewIDC = new (*this) CXXFoldExpr(OrigFold->getType(),
- SourceLocation(), NewIDC,
- BinaryOperatorKind::BO_LAnd,
- SourceLocation(), /*RHS=*/nullptr,
- SourceLocation(),
- /*NumExpansions=*/None);
-
+ Expr *NewIDC = canonicalizeImmediatelyDeclaredConstraint(
+ *this, TC->getImmediatelyDeclaredConstraint(),
+ ParamAsArgument);
+ TemplateArgumentListInfo CanonArgsAsWritten;
+ if (auto *Args = TC->getTemplateArgsAsWritten())
+ for (const auto &ArgLoc : Args->arguments())
+ CanonArgsAsWritten.addArgument(
+ TemplateArgumentLoc(ArgLoc.getArgument(),
+ TemplateArgumentLocInfo()));
NewTTP->setTypeConstraint(
NestedNameSpecifierLoc(),
DeclarationNameInfo(TC->getNamedConcept()->getDeclName(),
SourceLocation()), /*FoundDecl=*/nullptr,
// Actually canonicalizing a TemplateArgumentLoc is difficult so we
// simply omit the ArgsAsWritten
- CSE->getNamedConcept(), /*ArgsAsWritten=*/nullptr, NewIDC);
+ TC->getNamedConcept(), /*ArgsAsWritten=*/nullptr, NewIDC);
}
CanonParams.push_back(NewTTP);
} else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
@@ -839,6 +838,13 @@ ASTContext::getCanonicalTemplateTemplateParmDecl(
NTTP->isParameterPack(),
TInfo);
}
+ if (AutoType *AT = T->getContainedAutoType()) {
+ if (AT->isConstrained()) {
+ Param->setPlaceholderTypeConstraint(
+ canonicalizeImmediatelyDeclaredConstraint(
+ *this, NTTP->getPlaceholderTypeConstraint(), T));
+ }
+ }
CanonParams.push_back(Param);
} else
@@ -886,6 +892,7 @@ CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
case TargetCXXABI::GenericMIPS:
case TargetCXXABI::GenericItanium:
case TargetCXXABI::WebAssembly:
+ case TargetCXXABI::XL:
return CreateItaniumCXXABI(*this);
case TargetCXXABI::Microsoft:
return CreateMicrosoftCXXABI(*this);
@@ -900,6 +907,12 @@ interp::Context &ASTContext::getInterpContext() {
return *InterpContext.get();
}
+ParentMapContext &ASTContext::getParentMapContext() {
+ if (!ParentMapCtx)
+ ParentMapCtx.reset(new ParentMapContext(*this));
+ return *ParentMapCtx.get();
+}
+
static const LangASMap *getAddressSpaceMap(const TargetInfo &T,
const LangOptions &LOpts) {
if (LOpts.FakeAddressSpaceMap) {
@@ -943,7 +956,7 @@ ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
Builtin::Context &builtins)
: ConstantArrayTypes(this_()), FunctionProtoTypes(this_()),
TemplateSpecializationTypes(this_()),
- DependentTemplateSpecializationTypes(this_()),
+ DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()),
SubstTemplateTemplateParmPacks(this_()),
CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts),
SanitizerBL(new SanitizerBlacklist(LangOpts.SanitizerBlacklistFiles, SM)),
@@ -995,80 +1008,9 @@ ASTContext::~ASTContext() {
Value->~APValue();
}
-class ASTContext::ParentMap {
- /// Contains parents of a node.
- using ParentVector = llvm::SmallVector<ast_type_traits::DynTypedNode, 2>;
-
- /// Maps from a node to its parents. This is used for nodes that have
- /// pointer identity only, which are more common and we can save space by
- /// only storing a unique pointer to them.
- using ParentMapPointers = llvm::DenseMap<
- const void *,
- llvm::PointerUnion<const Decl *, const Stmt *,
- ast_type_traits::DynTypedNode *, ParentVector *>>;
-
- /// Parent map for nodes without pointer identity. We store a full
- /// DynTypedNode for all keys.
- using ParentMapOtherNodes = llvm::DenseMap<
- ast_type_traits::DynTypedNode,
- llvm::PointerUnion<const Decl *, const Stmt *,
- ast_type_traits::DynTypedNode *, ParentVector *>>;
-
- ParentMapPointers PointerParents;
- ParentMapOtherNodes OtherParents;
- class ASTVisitor;
-
- static ast_type_traits::DynTypedNode
- getSingleDynTypedNodeFromParentMap(ParentMapPointers::mapped_type U) {
- if (const auto *D = U.dyn_cast<const Decl *>())
- return ast_type_traits::DynTypedNode::create(*D);
- if (const auto *S = U.dyn_cast<const Stmt *>())
- return ast_type_traits::DynTypedNode::create(*S);
- return *U.get<ast_type_traits::DynTypedNode *>();
- }
-
- template <typename NodeTy, typename MapTy>
- static ASTContext::DynTypedNodeList getDynNodeFromMap(const NodeTy &Node,
- const MapTy &Map) {
- auto I = Map.find(Node);
- if (I == Map.end()) {
- return llvm::ArrayRef<ast_type_traits::DynTypedNode>();
- }
- if (const auto *V = I->second.template dyn_cast<ParentVector *>()) {
- return llvm::makeArrayRef(*V);
- }
- return getSingleDynTypedNodeFromParentMap(I->second);
- }
-
-public:
- ParentMap(ASTContext &Ctx);
- ~ParentMap() {
- for (const auto &Entry : PointerParents) {
- if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
- delete Entry.second.get<ast_type_traits::DynTypedNode *>();
- } else if (Entry.second.is<ParentVector *>()) {
- delete Entry.second.get<ParentVector *>();
- }
- }
- for (const auto &Entry : OtherParents) {
- if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
- delete Entry.second.get<ast_type_traits::DynTypedNode *>();
- } else if (Entry.second.is<ParentVector *>()) {
- delete Entry.second.get<ParentVector *>();
- }
- }
- }
-
- DynTypedNodeList getParents(const ast_type_traits::DynTypedNode &Node) {
- if (Node.getNodeKind().hasPointerIdentity())
- return getDynNodeFromMap(Node.getMemoizationData(), PointerParents);
- return getDynNodeFromMap(Node, OtherParents);
- }
-};
-
void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
TraversalScope = TopLevelDecls;
- Parents.clear();
+ getParentMapContext().clear();
}
void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const {
@@ -1163,6 +1105,15 @@ void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) {
Merged.erase(std::remove(Merged.begin(), Merged.end(), nullptr), Merged.end());
}
+ArrayRef<Module *>
+ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) {
+ auto MergedIt =
+ MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl()));
+ if (MergedIt == MergedDefModules.end())
+ return None;
+ return MergedIt->second;
+}
+
void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
if (LazyInitializers.empty())
return;
@@ -1432,8 +1383,13 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn);
// Placeholder type for OMP array sections.
- if (LangOpts.OpenMP)
+ if (LangOpts.OpenMP) {
InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection);
+ InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping);
+ InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator);
+ }
+ if (LangOpts.MatrixTypes)
+ InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx);
// C99 6.2.5p11.
FloatComplexTy = getComplexType(FloatTy);
@@ -1492,8 +1448,16 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
// half type (OpenCL 6.1.1.1) / ARM NEON __fp16
InitBuiltinType(HalfTy, BuiltinType::Half);
+ InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16);
+
// Builtin type used to help define __builtin_va_list.
VaListTagDecl = nullptr;
+
+ // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls.
+ if (LangOpts.MicrosoftExt || LangOpts.Borland) {
+ MSGuidTagDecl = buildImplicitRecord("_GUID");
+ TUDecl->addDecl(MSGuidTagDecl);
+ }
}
DiagnosticsEngine &ASTContext::getDiagnostics() const {
@@ -1666,7 +1630,8 @@ void ASTContext::getOverriddenMethods(
}
void ASTContext::addedLocalImportDecl(ImportDecl *Import) {
- assert(!Import->NextLocalImport && "Import declaration already in the chain");
+ assert(!Import->getNextLocalImport() &&
+ "Import declaration already in the chain");
assert(!Import->isFromASTFile() && "Non-local import declaration");
if (!FirstLocalImport) {
FirstLocalImport = Import;
@@ -1674,7 +1639,7 @@ void ASTContext::addedLocalImportDecl(ImportDecl *Import) {
return;
}
- LastLocalImport->NextLocalImport = Import;
+ LastLocalImport->setNextLocalImport(Import);
LastLocalImport = Import;
}
@@ -1688,6 +1653,8 @@ const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
switch (T->castAs<BuiltinType>()->getKind()) {
default:
llvm_unreachable("Not a floating point type!");
+ case BuiltinType::BFloat16:
+ return Target->getBFloat16Format();
case BuiltinType::Float16:
case BuiltinType::Half:
return Target->getHalfFormat();
@@ -1800,6 +1767,10 @@ CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
return toCharUnitsFromBits(Align);
}
+CharUnits ASTContext::getExnObjectAlignment() const {
+ return toCharUnitsFromBits(Target->getExnObjectAlignment());
+}
+
// getTypeInfoDataSizeInChars - Return the size of a type, in
// chars. If the type is a record, its data size is returned. This is
// the size of the memcpy that's performed when assigning this type
@@ -1930,24 +1901,24 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
case Type::IncompleteArray:
case Type::VariableArray:
- Width = 0;
- Align = getTypeAlign(cast<ArrayType>(T)->getElementType());
- break;
-
case Type::ConstantArray: {
- const auto *CAT = cast<ConstantArrayType>(T);
+ // Model non-constant sized arrays as size zero, but track the alignment.
+ uint64_t Size = 0;
+ if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
+ Size = CAT->getSize().getZExtValue();
- TypeInfo EltInfo = getTypeInfo(CAT->getElementType());
- uint64_t Size = CAT->getSize().getZExtValue();
+ TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType());
assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
"Overflow in array type bit size evaluation");
Width = EltInfo.Width * Size;
Align = EltInfo.Align;
+ AlignIsRequired = EltInfo.AlignIsRequired;
if (!getTargetInfo().getCXXABI().isMicrosoft() ||
getTargetInfo().getPointerWidth(0) == 64)
Width = llvm::alignTo(Width, Align);
break;
}
+
case Type::ExtVector:
case Type::Vector: {
const auto *VT = cast<VectorType>(T);
@@ -1967,6 +1938,17 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
break;
}
+ case Type::ConstantMatrix: {
+ const auto *MT = cast<ConstantMatrixType>(T);
+ TypeInfo ElementInfo = getTypeInfo(MT->getElementType());
+ // The internal layout of a matrix value is implementation defined.
+ // Initially be ABI compatible with arrays with respect to alignment and
+ // size.
+ Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns();
+ Align = ElementInfo.Align;
+ break;
+ }
+
case Type::Builtin:
switch (cast<BuiltinType>(T)->getKind()) {
default: llvm_unreachable("Unknown builtin type!");
@@ -2067,6 +2049,10 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
Width = Target->getLongFractWidth();
Align = Target->getLongFractAlign();
break;
+ case BuiltinType::BFloat16:
+ Width = Target->getBFloat16Width();
+ Align = Target->getBFloat16Align();
+ break;
case BuiltinType::Float16:
case BuiltinType::Half:
if (Target->hasFloat16Type() || !getLangOpts().OpenMP ||
@@ -2145,16 +2131,17 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
// Because the length is only known at runtime, we use a dummy value
// of 0 for the static length. The alignment values are those defined
// by the Procedure Call Standard for the Arm Architecture.
-#define SVE_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, IsSigned, IsFP)\
- case BuiltinType::Id: \
- Width = 0; \
- Align = 128; \
- break;
-#define SVE_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \
- case BuiltinType::Id: \
- Width = 0; \
- Align = 16; \
- break;
+#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
+ IsSigned, IsFP, IsBF) \
+ case BuiltinType::Id: \
+ Width = 0; \
+ Align = 128; \
+ break;
+#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \
+ case BuiltinType::Id: \
+ Width = 0; \
+ Align = 16; \
+ break;
#include "clang/Basic/AArch64SVEACLETypes.def"
}
break;
@@ -2202,11 +2189,25 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr());
case Type::ObjCInterface: {
const auto *ObjCI = cast<ObjCInterfaceType>(T);
+ if (ObjCI->getDecl()->isInvalidDecl()) {
+ Width = 8;
+ Align = 8;
+ break;
+ }
const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
Width = toBits(Layout.getSize());
Align = toBits(Layout.getAlignment());
break;
}
+ case Type::ExtInt: {
+ const auto *EIT = cast<ExtIntType>(T);
+ Align =
+ std::min(static_cast<unsigned>(std::max(
+ getCharWidth(), llvm::PowerOf2Ceil(EIT->getNumBits()))),
+ Target->getLongLongAlign());
+ Width = llvm::alignTo(EIT->getNumBits(), Align);
+ break;
+ }
case Type::Record:
case Type::Enum: {
const auto *TT = cast<TagType>(T);
@@ -3383,6 +3384,8 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
case Type::DependentVector:
case Type::ExtVector:
case Type::DependentSizedExtVector:
+ case Type::ConstantMatrix:
+ case Type::DependentSizedMatrix:
case Type::DependentAddressSpace:
case Type::ObjCObject:
case Type::ObjCInterface:
@@ -3403,6 +3406,8 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
case Type::Auto:
case Type::DeducedTemplateSpecialization:
case Type::PackExpansion:
+ case Type::ExtInt:
+ case Type::DependentExtInt:
llvm_unreachable("type should never be variably-modified");
// These types can be variably-modified but should never need to
@@ -3629,6 +3634,33 @@ QualType ASTContext::getIncompleteArrayType(QualType elementType,
return QualType(newType, 0);
}
+/// getScalableVectorType - Return the unique reference to a scalable vector
+/// type of the specified element type and size. VectorType must be a built-in
+/// type.
+QualType ASTContext::getScalableVectorType(QualType EltTy,
+ unsigned NumElts) const {
+ if (Target->hasAArch64SVETypes()) {
+ uint64_t EltTySize = getTypeSize(EltTy);
+#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
+ IsSigned, IsFP, IsBF) \
+ if (!EltTy->isBooleanType() && \
+ ((EltTy->hasIntegerRepresentation() && \
+ EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
+ (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
+ IsFP && !IsBF) || \
+ (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
+ IsBF && !IsFP)) && \
+ EltTySize == ElBits && NumElts == NumEls) { \
+ return SingletonId; \
+ }
+#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \
+ if (EltTy->isBooleanType() && NumElts == NumEls) \
+ return SingletonId;
+#include "clang/Basic/AArch64SVEACLETypes.def"
+ }
+ return QualType();
+}
+
/// getVectorType - Return the unique reference to a vector type of
/// the specified element type and size. VectorType must be a built-in type.
QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
@@ -3688,10 +3720,10 @@ ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr,
(void)CanonCheck;
DependentVectorTypes.InsertNode(New, InsertPos);
} else {
- QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
- SourceLocation());
+ QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr,
+ SourceLocation(), VecKind);
New = new (*this, TypeAlignment) DependentVectorType(
- *this, VecType, CanonExtTy, SizeExpr, AttrLoc, VecKind);
+ *this, VecType, CanonTy, SizeExpr, AttrLoc, VecKind);
}
}
@@ -3772,6 +3804,78 @@ ASTContext::getDependentSizedExtVectorType(QualType vecType,
return QualType(New, 0);
}
+QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows,
+ unsigned NumColumns) const {
+ llvm::FoldingSetNodeID ID;
+ ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns,
+ Type::ConstantMatrix);
+
+ assert(MatrixType::isValidElementType(ElementTy) &&
+ "need a valid element type");
+ assert(ConstantMatrixType::isDimensionValid(NumRows) &&
+ ConstantMatrixType::isDimensionValid(NumColumns) &&
+ "need valid matrix dimensions");
+ void *InsertPos = nullptr;
+ if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(MTP, 0);
+
+ QualType Canonical;
+ if (!ElementTy.isCanonical()) {
+ Canonical =
+ getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns);
+
+ ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!NewIP && "Matrix type shouldn't already exist in the map");
+ (void)NewIP;
+ }
+
+ auto *New = new (*this, TypeAlignment)
+ ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical);
+ MatrixTypes.InsertNode(New, InsertPos);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy,
+ Expr *RowExpr,
+ Expr *ColumnExpr,
+ SourceLocation AttrLoc) const {
+ QualType CanonElementTy = getCanonicalType(ElementTy);
+ llvm::FoldingSetNodeID ID;
+ DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr,
+ ColumnExpr);
+
+ void *InsertPos = nullptr;
+ DependentSizedMatrixType *Canon =
+ DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!Canon) {
+ Canon = new (*this, TypeAlignment) DependentSizedMatrixType(
+ *this, CanonElementTy, QualType(), RowExpr, ColumnExpr, AttrLoc);
+#ifndef NDEBUG
+ DependentSizedMatrixType *CanonCheck =
+ DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!CanonCheck && "Dependent-sized matrix canonical type broken");
+#endif
+ DependentSizedMatrixTypes.InsertNode(Canon, InsertPos);
+ Types.push_back(Canon);
+ }
+
+ // Already have a canonical version of the matrix type
+ //
+ // If it exactly matches the requested type, use it directly.
+ if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr &&
+ Canon->getRowExpr() == ColumnExpr)
+ return QualType(Canon, 0);
+
+ // Use Canon as the canonical type for newly-built type.
+ DependentSizedMatrixType *New = new (*this, TypeAlignment)
+ DependentSizedMatrixType(*this, ElementTy, QualType(Canon, 0), RowExpr,
+ ColumnExpr, AttrLoc);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType,
Expr *AddrSpaceExpr,
SourceLocation AttrLoc) const {
@@ -4075,6 +4179,39 @@ QualType ASTContext::getWritePipeType(QualType T) const {
return getPipeType(T, false);
}
+QualType ASTContext::getExtIntType(bool IsUnsigned, unsigned NumBits) const {
+ llvm::FoldingSetNodeID ID;
+ ExtIntType::Profile(ID, IsUnsigned, NumBits);
+
+ void *InsertPos = nullptr;
+ if (ExtIntType *EIT = ExtIntTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(EIT, 0);
+
+ auto *New = new (*this, TypeAlignment) ExtIntType(IsUnsigned, NumBits);
+ ExtIntTypes.InsertNode(New, InsertPos);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+QualType ASTContext::getDependentExtIntType(bool IsUnsigned,
+ Expr *NumBitsExpr) const {
+ assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent");
+ llvm::FoldingSetNodeID ID;
+ DependentExtIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr);
+
+ void *InsertPos = nullptr;
+ if (DependentExtIntType *Existing =
+ DependentExtIntTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(Existing, 0);
+
+ auto *New = new (*this, TypeAlignment)
+ DependentExtIntType(*this, IsUnsigned, NumBitsExpr);
+ DependentExtIntTypes.InsertNode(New, InsertPos);
+
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
#ifndef NDEBUG
static bool NeedsInjectedClassNameType(const RecordDecl *D) {
if (!isa<CXXRecordDecl>(D)) return false;
@@ -4587,7 +4724,7 @@ TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) {
} else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
Expr *E = new (*this) DeclRefExpr(
*this, NTTP, /*enclosing*/ false,
- NTTP->getType().getNonLValueExprType(*this),
+ NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this),
Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation());
if (NTTP->isParameterPack())
@@ -4859,7 +4996,7 @@ ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl,
ArrayRef<ObjCProtocolDecl *> protocols) const {
// Look in the folding set for an existing type.
llvm::FoldingSetNodeID ID;
- ObjCTypeParamType::Profile(ID, Decl, protocols);
+ ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols);
void *InsertPos = nullptr;
if (ObjCTypeParamType *TypeParam =
ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos))
@@ -4885,6 +5022,17 @@ ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl,
return QualType(newType, 0);
}
+void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig,
+ ObjCTypeParamDecl *New) const {
+ New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType()));
+ // Update TypeForDecl after updating TypeSourceInfo.
+ auto NewTypeParamTy = cast<ObjCTypeParamType>(New->getTypeForDecl());
+ SmallVector<ObjCProtocolDecl *, 8> protocols;
+ protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end());
+ QualType UpdatedTy = getObjCTypeParamType(New, protocols);
+ New->setTypeForDecl(UpdatedTy.getTypePtr());
+}
+
/// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
/// protocol list adopt all protocols in QT's qualified-id protocol
/// list.
@@ -5124,21 +5272,33 @@ QualType ASTContext::getUnaryTransformType(QualType BaseType,
/// getAutoType - Return the uniqued reference to the 'auto' type which has been
/// deduced to the given type, or to the canonical undeduced 'auto' type, or the
/// canonical deduced-but-dependent 'auto' type.
-QualType ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword,
- bool IsDependent, bool IsPack) const {
+QualType
+ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword,
+ bool IsDependent, bool IsPack,
+ ConceptDecl *TypeConstraintConcept,
+ ArrayRef<TemplateArgument> TypeConstraintArgs) const {
assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack");
- if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && !IsDependent)
+ if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto &&
+ !TypeConstraintConcept && !IsDependent)
return getAutoDeductType();
// Look in the folding set for an existing type.
void *InsertPos = nullptr;
llvm::FoldingSetNodeID ID;
- AutoType::Profile(ID, DeducedType, Keyword, IsDependent, IsPack);
+ AutoType::Profile(ID, *this, DeducedType, Keyword, IsDependent,
+ TypeConstraintConcept, TypeConstraintArgs);
if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(AT, 0);
- auto *AT = new (*this, TypeAlignment)
- AutoType(DeducedType, Keyword, IsDependent, IsPack);
+ void *Mem = Allocate(sizeof(AutoType) +
+ sizeof(TemplateArgument) * TypeConstraintArgs.size(),
+ TypeAlignment);
+ auto *AT = new (Mem) AutoType(
+ DeducedType, Keyword,
+ (IsDependent ? TypeDependence::DependentInstantiation
+ : TypeDependence::None) |
+ (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None),
+ TypeConstraintConcept, TypeConstraintArgs);
Types.push_back(AT);
if (InsertPos)
AutoTypes.InsertNode(AT, InsertPos);
@@ -5198,10 +5358,11 @@ QualType ASTContext::getAtomicType(QualType T) const {
/// getAutoDeductType - Get type pattern for deducing against 'auto'.
QualType ASTContext::getAutoDeductType() const {
if (AutoDeductTy.isNull())
- AutoDeductTy = QualType(
- new (*this, TypeAlignment) AutoType(QualType(), AutoTypeKeyword::Auto,
- /*dependent*/false, /*pack*/false),
- 0);
+ AutoDeductTy = QualType(new (*this, TypeAlignment)
+ AutoType(QualType(), AutoTypeKeyword::Auto,
+ TypeDependence::None,
+ /*concept*/ nullptr, /*args*/ {}),
+ 0);
return AutoDeductTy;
}
@@ -5837,6 +5998,7 @@ static FloatingRank getFloatingRank(QualType T) {
case BuiltinType::Double: return DoubleRank;
case BuiltinType::LongDouble: return LongDoubleRank;
case BuiltinType::Float128: return Float128Rank;
+ case BuiltinType::BFloat16: return BFloat16Rank;
}
}
@@ -5849,6 +6011,7 @@ QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size,
FloatingRank EltRank = getFloatingRank(Size);
if (Domain->isComplexType()) {
switch (EltRank) {
+ case BFloat16Rank: llvm_unreachable("Complex bfloat16 is not supported");
case Float16Rank:
case HalfRank: llvm_unreachable("Complex half is not supported");
case FloatRank: return FloatComplexTy;
@@ -5861,6 +6024,7 @@ QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size,
assert(Domain->isRealFloatingType() && "Unknown domain!");
switch (EltRank) {
case Float16Rank: return HalfTy;
+ case BFloat16Rank: return BFloat16Ty;
case HalfRank: return HalfTy;
case FloatRank: return FloatTy;
case DoubleRank: return DoubleTy;
@@ -5897,6 +6061,11 @@ int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const {
unsigned ASTContext::getIntegerRank(const Type *T) const {
assert(T->isCanonicalUnqualified() && "T should be canonicalized");
+ // Results in this 'losing' to any type of the same size, but winning if
+ // larger.
+ if (const auto *EIT = dyn_cast<ExtIntType>(T))
+ return 0 + (EIT->getNumBits() << 3);
+
switch (cast<BuiltinType>(T)->getKind()) {
default: llvm_unreachable("getIntegerRank(): not a built-in integer");
case BuiltinType::Bool:
@@ -6287,39 +6456,39 @@ QualType ASTContext::getBlockDescriptorExtendedType() const {
return getTagDeclType(BlockDescriptorExtendedType);
}
-TargetInfo::OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const {
+OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const {
const auto *BT = dyn_cast<BuiltinType>(T);
if (!BT) {
if (isa<PipeType>(T))
- return TargetInfo::OCLTK_Pipe;
+ return OCLTK_Pipe;
- return TargetInfo::OCLTK_Default;
+ return OCLTK_Default;
}
switch (BT->getKind()) {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id: \
- return TargetInfo::OCLTK_Image;
+ return OCLTK_Image;
#include "clang/Basic/OpenCLImageTypes.def"
case BuiltinType::OCLClkEvent:
- return TargetInfo::OCLTK_ClkEvent;
+ return OCLTK_ClkEvent;
case BuiltinType::OCLEvent:
- return TargetInfo::OCLTK_Event;
+ return OCLTK_Event;
case BuiltinType::OCLQueue:
- return TargetInfo::OCLTK_Queue;
+ return OCLTK_Queue;
case BuiltinType::OCLReserveID:
- return TargetInfo::OCLTK_ReserveID;
+ return OCLTK_ReserveID;
case BuiltinType::OCLSampler:
- return TargetInfo::OCLTK_Sampler;
+ return OCLTK_Sampler;
default:
- return TargetInfo::OCLTK_Default;
+ return OCLTK_Default;
}
}
@@ -6392,6 +6561,24 @@ bool ASTContext::getByrefLifetime(QualType Ty,
return true;
}
+CanQualType ASTContext::getNSUIntegerType() const {
+ assert(Target && "Expected target to be initialized");
+ const llvm::Triple &T = Target->getTriple();
+ // Windows is LLP64 rather than LP64
+ if (T.isOSWindows() && T.isArch64Bit())
+ return UnsignedLongLongTy;
+ return UnsignedLongTy;
+}
+
+CanQualType ASTContext::getNSIntegerType() const {
+ assert(Target && "Expected target to be initialized");
+ const llvm::Triple &T = Target->getTriple();
+ // Windows is LLP64 rather than LP64
+ if (T.isOSWindows() && T.isArch64Bit())
+ return LongLongTy;
+ return LongTy;
+}
+
TypedefDecl *ASTContext::getObjCInstanceTypeDecl() {
if (!ObjCInstanceTypeDecl)
ObjCInstanceTypeDecl =
@@ -6695,11 +6882,11 @@ ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
if (PD->isReadOnly()) {
S += ",R";
- if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_copy)
+ if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy)
S += ",C";
- if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_retain)
+ if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain)
S += ",&";
- if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak)
+ if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak)
S += ",W";
} else {
switch (PD->getSetterKind()) {
@@ -6715,15 +6902,15 @@ ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
if (Dynamic)
S += ",D";
- if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic)
S += ",N";
- if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_getter) {
+ if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) {
S += ",G";
S += PD->getGetterName().getAsString();
}
- if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter) {
+ if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) {
S += ",S";
S += PD->getSetterName().getAsString();
}
@@ -6815,6 +7002,7 @@ static char getObjCEncodingForPrimitiveType(const ASTContext *C,
case BuiltinType::LongDouble: return 'D';
case BuiltinType::NullPtr: return '*'; // like char*
+ case BuiltinType::BFloat16:
case BuiltinType::Float16:
case BuiltinType::Float128:
case BuiltinType::Half:
@@ -7255,6 +7443,11 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
*NotEncodedT = T;
return;
+ case Type::ConstantMatrix:
+ if (NotEncodedT)
+ *NotEncodedT = T;
+ return;
+
// We could see an undeduced auto type here during error recovery.
// Just ignore it.
case Type::Auto:
@@ -7262,6 +7455,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
return;
case Type::Pipe:
+ case Type::ExtInt:
#define ABSTRACT_TYPE(KIND, BASE)
#define TYPE(KIND, BASE)
#define DEPENDENT_TYPE(KIND, BASE) \
@@ -7783,6 +7977,57 @@ CreateSystemZBuiltinVaListDecl(const ASTContext *Context) {
return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
}
+static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) {
+ // typedef struct __va_list_tag {
+ RecordDecl *VaListTagDecl;
+ VaListTagDecl = Context->buildImplicitRecord("__va_list_tag");
+ VaListTagDecl->startDefinition();
+
+ const size_t NumFields = 3;
+ QualType FieldTypes[NumFields];
+ const char *FieldNames[NumFields];
+
+ // void *CurrentSavedRegisterArea;
+ FieldTypes[0] = Context->getPointerType(Context->VoidTy);
+ FieldNames[0] = "__current_saved_reg_area_pointer";
+
+ // void *SavedRegAreaEnd;
+ FieldTypes[1] = Context->getPointerType(Context->VoidTy);
+ FieldNames[1] = "__saved_reg_area_end_pointer";
+
+ // void *OverflowArea;
+ FieldTypes[2] = Context->getPointerType(Context->VoidTy);
+ FieldNames[2] = "__overflow_area_pointer";
+
+ // Create fields
+ for (unsigned i = 0; i < NumFields; ++i) {
+ FieldDecl *Field = FieldDecl::Create(
+ const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(),
+ SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i],
+ /*TInfo=*/0,
+ /*BitWidth=*/0,
+ /*Mutable=*/false, ICIS_NoInit);
+ Field->setAccess(AS_public);
+ VaListTagDecl->addDecl(Field);
+ }
+ VaListTagDecl->completeDefinition();
+ Context->VaListTagDecl = VaListTagDecl;
+ QualType VaListTagType = Context->getRecordType(VaListTagDecl);
+
+ // } __va_list_tag;
+ TypedefDecl *VaListTagTypedefDecl =
+ Context->buildImplicitTypedef(VaListTagType, "__va_list_tag");
+
+ QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl);
+
+ // typedef __va_list_tag __builtin_va_list[1];
+ llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
+ QualType VaListTagArrayType = Context->getConstantArrayType(
+ VaListTagTypedefType, Size, nullptr, ArrayType::Normal, 0);
+
+ return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
+}
+
static TypedefDecl *CreateVaListDecl(const ASTContext *Context,
TargetInfo::BuiltinVaListKind Kind) {
switch (Kind) {
@@ -7802,6 +8047,8 @@ static TypedefDecl *CreateVaListDecl(const ASTContext *Context,
return CreateAAPCSABIBuiltinVaListDecl(Context);
case TargetInfo::SystemZBuiltinVaList:
return CreateSystemZBuiltinVaListDecl(Context);
+ case TargetInfo::HexagonBuiltinVaList:
+ return CreateHexagonBuiltinVaListDecl(Context);
}
llvm_unreachable("Unhandled __builtin_va_list type kind");
@@ -8080,6 +8327,16 @@ static bool areCompatVectorTypes(const VectorType *LHS,
LHS->getNumElements() == RHS->getNumElements();
}
+/// areCompatMatrixTypes - Return true if the two specified matrix types are
+/// compatible.
+static bool areCompatMatrixTypes(const ConstantMatrixType *LHS,
+ const ConstantMatrixType *RHS) {
+ assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified());
+ return LHS->getElementType() == RHS->getElementType() &&
+ LHS->getNumRows() == RHS->getNumRows() &&
+ LHS->getNumColumns() == RHS->getNumColumns();
+}
+
bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
QualType SecondVec) {
assert(FirstVec->isVectorType() && "FirstVec should be a vector type");
@@ -8362,10 +8619,18 @@ bool ASTContext::canAssignObjCInterfacesInBlockPointer(
RHSOPT->isObjCQualifiedIdType());
}
- if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType())
- return finish(ObjCQualifiedIdTypesAreCompatible(
- (BlockReturnType ? LHSOPT : RHSOPT),
- (BlockReturnType ? RHSOPT : LHSOPT), false));
+ if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) {
+ if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking)
+ // Use for block parameters previous type checking for compatibility.
+ return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false) ||
+ // Or corrected type checking as in non-compat mode.
+ (!BlockReturnType &&
+ ObjCQualifiedIdTypesAreCompatible(RHSOPT, LHSOPT, false)));
+ else
+ return finish(ObjCQualifiedIdTypesAreCompatible(
+ (BlockReturnType ? LHSOPT : RHSOPT),
+ (BlockReturnType ? RHSOPT : LHSOPT), false));
+ }
const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType();
const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType();
@@ -8716,8 +8981,8 @@ bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) {
bool ASTContext::canBindObjCObjectType(QualType To, QualType From) {
return canAssignObjCInterfaces(
- getObjCObjectPointerType(To)->getAs<ObjCObjectPointerType>(),
- getObjCObjectPointerType(From)->getAs<ObjCObjectPointerType>());
+ getObjCObjectPointerType(To)->castAs<ObjCObjectPointerType>(),
+ getObjCObjectPointerType(From)->castAs<ObjCObjectPointerType>());
}
/// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible,
@@ -8783,8 +9048,8 @@ QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs,
}
QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
- bool OfBlockPointer,
- bool Unqualified) {
+ bool OfBlockPointer, bool Unqualified,
+ bool AllowCXX) {
const auto *lbase = lhs->castAs<FunctionType>();
const auto *rbase = rhs->castAs<FunctionType>();
const auto *lproto = dyn_cast<FunctionProtoType>(lbase);
@@ -8858,7 +9123,8 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn);
if (lproto && rproto) { // two C99 style function prototypes
- assert(!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec() &&
+ assert((AllowCXX ||
+ (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) &&
"C++ shouldn't be here");
// Compatible functions must have the same number of parameters
if (lproto->getNumParams() != rproto->getNumParams())
@@ -8922,7 +9188,7 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
const FunctionProtoType *proto = lproto ? lproto : rproto;
if (proto) {
- assert(!proto->hasExceptionSpec() && "C++ shouldn't be here");
+ assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here");
if (proto->isVariadic())
return {};
// Check that the types are compatible with the types that
@@ -9276,6 +9542,11 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
RHSCan->castAs<VectorType>()))
return LHS;
return {};
+ case Type::ConstantMatrix:
+ if (areCompatMatrixTypes(LHSCan->castAs<ConstantMatrixType>(),
+ RHSCan->castAs<ConstantMatrixType>()))
+ return LHS;
+ return {};
case Type::ObjCObject: {
// Check if the types are assignment compatible.
// FIXME: This should be type compatibility, e.g. whether
@@ -9301,6 +9572,21 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
assert(LHS != RHS &&
"Equivalent pipe types should have already been handled!");
return {};
+ case Type::ExtInt: {
+ // Merge two ext-int types, while trying to preserve typedef info.
+ bool LHSUnsigned = LHS->castAs<ExtIntType>()->isUnsigned();
+ bool RHSUnsigned = RHS->castAs<ExtIntType>()->isUnsigned();
+ unsigned LHSBits = LHS->castAs<ExtIntType>()->getNumBits();
+ unsigned RHSBits = RHS->castAs<ExtIntType>()->getNumBits();
+
+ // Like unsigned/int, shouldn't have a type if they dont match.
+ if (LHSUnsigned != RHSUnsigned)
+ return {};
+
+ if (LHSBits != RHSBits)
+ return {};
+ return LHS;
+ }
}
llvm_unreachable("Invalid Type::Class!");
@@ -9441,6 +9727,8 @@ unsigned ASTContext::getIntWidth(QualType T) const {
T = ET->getDecl()->getIntegerType();
if (T->isBooleanType())
return 1;
+ if(const auto *EIT = T->getAs<ExtIntType>())
+ return EIT->getNumBits();
// For builtin types, just use the standard type sizing method
return (unsigned)getTypeSize(T);
}
@@ -9622,6 +9910,11 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
// Read the base type.
switch (*Str++) {
default: llvm_unreachable("Unknown builtin type letter!");
+ case 'y':
+ assert(HowLong == 0 && !Signed && !Unsigned &&
+ "Bad modifiers used with 'y'!");
+ Type = Context.BFloat16Ty;
+ break;
case 'v':
assert(HowLong == 0 && !Signed && !Unsigned &&
"Bad modifiers used with 'v'!");
@@ -9717,6 +10010,19 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
else
Type = Context.getLValueReferenceType(Type);
break;
+ case 'q': {
+ char *End;
+ unsigned NumElements = strtoul(Str, &End, 10);
+ assert(End != Str && "Missing vector size");
+ Str = End;
+
+ QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
+ RequiresICE, false);
+ assert(!RequiresICE && "Can't require vector ICE");
+
+ Type = Context.getScalableVectorType(ElementType, NumElements);
+ break;
+ }
case 'V': {
char *End;
unsigned NumElements = strtoul(Str, &End, 10);
@@ -10109,6 +10415,8 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
return true;
else if (isa<PragmaDetectMismatchDecl>(D))
return true;
+ else if (isa<OMPRequiresDecl>(D))
+ return true;
else if (isa<OMPThreadPrivateDecl>(D))
return !D->getDeclContext()->isDependentContext();
else if (isa<OMPAllocateDecl>(D))
@@ -10298,10 +10606,15 @@ bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const {
VTableContextBase *ASTContext::getVTableContext() {
if (!VTContext.get()) {
- if (Target->getCXXABI().isMicrosoft())
+ auto ABI = Target->getCXXABI();
+ if (ABI.isMicrosoft())
VTContext.reset(new MicrosoftVTableContext(*this));
- else
- VTContext.reset(new ItaniumVTableContext(*this));
+ else {
+ auto ComponentLayout = getLangOpts().RelativeCXXABIVTables
+ ? ItaniumVTableContext::Relative
+ : ItaniumVTableContext::Pointer;
+ VTContext.reset(new ItaniumVTableContext(*this, ComponentLayout));
+ }
}
return VTContext.get();
}
@@ -10319,6 +10632,7 @@ MangleContext *ASTContext::createMangleContext(const TargetInfo *T) {
case TargetCXXABI::iOS64:
case TargetCXXABI::WebAssembly:
case TargetCXXABI::WatchOS:
+ case TargetCXXABI::XL:
return ItaniumMangleContext::create(*this, getDiagnostics());
case TargetCXXABI::Microsoft:
return MicrosoftMangleContext::create(*this, getDiagnostics());
@@ -10360,8 +10674,10 @@ QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth,
/// getRealTypeForBitwidth -
/// sets floating point QualTy according to specified bitwidth.
/// Returns empty type if there is no appropriate target types.
-QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth) const {
- TargetInfo::RealType Ty = getTargetInfo().getRealTypeByWidth(DestWidth);
+QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth,
+ bool ExplicitIEEE) const {
+ TargetInfo::RealType Ty =
+ getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitIEEE);
switch (Ty) {
case TargetInfo::Float:
return FloatTy;
@@ -10490,6 +10806,23 @@ ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const {
return Result;
}
+MSGuidDecl *
+ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const {
+ assert(MSGuidTagDecl && "building MS GUID without MS extensions?");
+
+ llvm::FoldingSetNodeID ID;
+ MSGuidDecl::Profile(ID, Parts);
+
+ void *InsertPos;
+ if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos))
+ return Existing;
+
+ QualType GUIDType = getMSGuidType().withConst();
+ MSGuidDecl *New = MSGuidDecl::Create(*this, GUIDType, Parts);
+ MSGuidDecls.InsertNode(New, InsertPos);
+ return New;
+}
+
bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const {
const llvm::Triple &T = getTargetInfo().getTriple();
if (!T.isOSDarwin())
@@ -10508,146 +10841,6 @@ bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const {
return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits);
}
-/// Template specializations to abstract away from pointers and TypeLocs.
-/// @{
-template <typename T>
-static ast_type_traits::DynTypedNode createDynTypedNode(const T &Node) {
- return ast_type_traits::DynTypedNode::create(*Node);
-}
-template <>
-ast_type_traits::DynTypedNode createDynTypedNode(const TypeLoc &Node) {
- return ast_type_traits::DynTypedNode::create(Node);
-}
-template <>
-ast_type_traits::DynTypedNode
-createDynTypedNode(const NestedNameSpecifierLoc &Node) {
- return ast_type_traits::DynTypedNode::create(Node);
-}
-/// @}
-
-/// A \c RecursiveASTVisitor that builds a map from nodes to their
-/// parents as defined by the \c RecursiveASTVisitor.
-///
-/// Note that the relationship described here is purely in terms of AST
-/// traversal - there are other relationships (for example declaration context)
-/// in the AST that are better modeled by special matchers.
-///
-/// FIXME: Currently only builds up the map using \c Stmt and \c Decl nodes.
-class ASTContext::ParentMap::ASTVisitor
- : public RecursiveASTVisitor<ASTVisitor> {
-public:
- ASTVisitor(ParentMap &Map, ASTContext &Context)
- : Map(Map), Context(Context) {}
-
-private:
- friend class RecursiveASTVisitor<ASTVisitor>;
-
- using VisitorBase = RecursiveASTVisitor<ASTVisitor>;
-
- bool shouldVisitTemplateInstantiations() const { return true; }
-
- bool shouldVisitImplicitCode() const { return true; }
-
- template <typename T, typename MapNodeTy, typename BaseTraverseFn,
- typename MapTy>
- bool TraverseNode(T Node, MapNodeTy MapNode, BaseTraverseFn BaseTraverse,
- MapTy *Parents) {
- if (!Node)
- return true;
- if (ParentStack.size() > 0) {
- // FIXME: Currently we add the same parent multiple times, but only
- // when no memoization data is available for the type.
- // For example when we visit all subexpressions of template
- // instantiations; this is suboptimal, but benign: the only way to
- // visit those is with hasAncestor / hasParent, and those do not create
- // new matches.
- // The plan is to enable DynTypedNode to be storable in a map or hash
- // map. The main problem there is to implement hash functions /
- // comparison operators for all types that DynTypedNode supports that
- // do not have pointer identity.
- auto &NodeOrVector = (*Parents)[MapNode];
- if (NodeOrVector.isNull()) {
- if (const auto *D = ParentStack.back().get<Decl>())
- NodeOrVector = D;
- else if (const auto *S = ParentStack.back().get<Stmt>())
- NodeOrVector = S;
- else
- NodeOrVector = new ast_type_traits::DynTypedNode(ParentStack.back());
- } else {
- if (!NodeOrVector.template is<ParentVector *>()) {
- auto *Vector = new ParentVector(
- 1, getSingleDynTypedNodeFromParentMap(NodeOrVector));
- delete NodeOrVector
- .template dyn_cast<ast_type_traits::DynTypedNode *>();
- NodeOrVector = Vector;
- }
-
- auto *Vector = NodeOrVector.template get<ParentVector *>();
- // Skip duplicates for types that have memoization data.
- // We must check that the type has memoization data before calling
- // std::find() because DynTypedNode::operator== can't compare all
- // types.
- bool Found = ParentStack.back().getMemoizationData() &&
- std::find(Vector->begin(), Vector->end(),
- ParentStack.back()) != Vector->end();
- if (!Found)
- Vector->push_back(ParentStack.back());
- }
- }
- ParentStack.push_back(createDynTypedNode(Node));
- bool Result = BaseTraverse();
- ParentStack.pop_back();
- return Result;
- }
-
- bool TraverseDecl(Decl *DeclNode) {
- return TraverseNode(
- DeclNode, DeclNode, [&] { return VisitorBase::TraverseDecl(DeclNode); },
- &Map.PointerParents);
- }
-
- bool TraverseStmt(Stmt *StmtNode) {
- Stmt *FilteredNode = StmtNode;
- if (auto *ExprNode = dyn_cast_or_null<Expr>(FilteredNode))
- FilteredNode = Context.traverseIgnored(ExprNode);
- return TraverseNode(FilteredNode, FilteredNode,
- [&] { return VisitorBase::TraverseStmt(FilteredNode); },
- &Map.PointerParents);
- }
-
- bool TraverseTypeLoc(TypeLoc TypeLocNode) {
- return TraverseNode(
- TypeLocNode, ast_type_traits::DynTypedNode::create(TypeLocNode),
- [&] { return VisitorBase::TraverseTypeLoc(TypeLocNode); },
- &Map.OtherParents);
- }
-
- bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNSLocNode) {
- return TraverseNode(
- NNSLocNode, ast_type_traits::DynTypedNode::create(NNSLocNode),
- [&] { return VisitorBase::TraverseNestedNameSpecifierLoc(NNSLocNode); },
- &Map.OtherParents);
- }
-
- ParentMap &Map;
- ASTContext &Context;
- llvm::SmallVector<ast_type_traits::DynTypedNode, 16> ParentStack;
-};
-
-ASTContext::ParentMap::ParentMap(ASTContext &Ctx) {
- ASTVisitor(*this, Ctx).TraverseAST(Ctx);
-}
-
-ASTContext::DynTypedNodeList
-ASTContext::getParents(const ast_type_traits::DynTypedNode &Node) {
- std::unique_ptr<ParentMap> &P = Parents[Traversal];
- if (!P)
- // We build the parent map for the traversal scope (usually whole TU), as
- // hasAncestor can escape any subtree.
- P = std::make_unique<ParentMap>(*this);
- return P->getParents(Node);
-}
-
bool
ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl,
const ObjCMethodDecl *MethodImpl) {
@@ -10958,3 +11151,16 @@ void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
Target->getTargetOpts().Features);
}
}
+
+OMPTraitInfo &ASTContext::getNewOMPTraitInfo() {
+ OMPTraitInfoVector.emplace_back(new OMPTraitInfo());
+ return *OMPTraitInfoVector.back();
+}
+
+const DiagnosticBuilder &
+clang::operator<<(const DiagnosticBuilder &DB,
+ const ASTContext::SectionInfo &Section) {
+ if (Section.Decl)
+ return DB << Section.Decl;
+ return DB << "a prior #pragma section";
+}