summaryrefslogtreecommitdiff
path: root/clang/lib/CodeGen/CodeGenModule.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2020-07-26 19:36:28 +0000
committerDimitry Andric <dim@FreeBSD.org>2020-07-26 19:36:28 +0000
commitcfca06d7963fa0909f90483b42a6d7d194d01e08 (patch)
tree209fb2a2d68f8f277793fc8df46c753d31bc853b /clang/lib/CodeGen/CodeGenModule.cpp
parent706b4fc47bbc608932d3b491ae19a3b9cde9497b (diff)
Notes
Diffstat (limited to 'clang/lib/CodeGen/CodeGenModule.cpp')
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp589
1 files changed, 365 insertions, 224 deletions
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 57beda26677c..4ae8ce7e5ccf 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -38,6 +38,7 @@
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
@@ -83,6 +84,7 @@ static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
case TargetCXXABI::GenericMIPS:
case TargetCXXABI::GenericItanium:
case TargetCXXABI::WebAssembly:
+ case TargetCXXABI::XL:
return CreateItaniumCXXABI(CGM);
case TargetCXXABI::Microsoft:
return CreateMicrosoftCXXABI(CGM);
@@ -110,6 +112,7 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
HalfTy = llvm::Type::getHalfTy(LLVMContext);
+ BFloatTy = llvm::Type::getBFloatTy(LLVMContext);
FloatTy = llvm::Type::getFloatTy(LLVMContext);
DoubleTy = llvm::Type::getDoubleTy(LLVMContext);
PointerWidthInBits = C.getTargetInfo().getPointerWidth(0);
@@ -219,14 +222,6 @@ void CodeGenModule::createOpenMPRuntime() {
OpenMPRuntime.reset(new CGOpenMPRuntime(*this));
break;
}
-
- // The OpenMP-IR-Builder should eventually replace the above runtime codegens
- // but we are not there yet so they both reside in CGModule for now and the
- // OpenMP-IR-Builder is opt-in only.
- if (LangOpts.OpenMPIRBuilder) {
- OMPBuilder.reset(new llvm::OpenMPIRBuilder(TheModule));
- OMPBuilder->initialize();
- }
}
void CodeGenModule::createCUDARuntime() {
@@ -408,7 +403,7 @@ void CodeGenModule::Release() {
checkAliases();
emitMultiVersionFunctions();
EmitCXXGlobalInitFunc();
- EmitCXXGlobalDtorFunc();
+ EmitCXXGlobalCleanUpFunc();
registerGlobalDtorsWithAtExit();
EmitCXXThreadLocalInitFunc();
if (ObjCRuntime)
@@ -447,6 +442,10 @@ void CodeGenModule::Release() {
CodeGenFunction(*this).EmitCfiCheckStub();
}
emitAtAvailableLinkGuard();
+ if (Context.getTargetInfo().getTriple().isWasm() &&
+ !Context.getTargetInfo().getTriple().isOSEmscripten()) {
+ EmitMainVoidAlias();
+ }
emitLLVMUsed();
if (SanStats)
SanStats->finish();
@@ -483,6 +482,14 @@ void CodeGenModule::Release() {
getModule().addModuleFlag(llvm::Module::Max, "Dwarf Version",
CodeGenOpts.DwarfVersion);
}
+
+ if (Context.getLangOpts().SemanticInterposition)
+ // Require various optimization to respect semantic interposition.
+ getModule().setSemanticInterposition(1);
+ else if (Context.getLangOpts().ExplicitNoSemanticInterposition)
+ // Allow dso_local on applicable targets.
+ getModule().setSemanticInterposition(0);
+
if (CodeGenOpts.EmitCodeView) {
// Indicate that we want CodeView in the metadata.
getModule().addModuleFlag(llvm::Module::Warning, "CodeView", 1);
@@ -513,7 +520,7 @@ void CodeGenModule::Release() {
"StrictVTablePointersRequirement",
llvm::MDNode::get(VMContext, Ops));
}
- if (DebugInfo)
+ if (getModuleDebugInfo())
// We support a single version in the linked module. The LLVM
// parser will drop debug info with a different version number
// (and warn about it, too).
@@ -537,11 +544,26 @@ void CodeGenModule::Release() {
getModule().addModuleFlag(llvm::Module::Error, "min_enum_size", EnumWidth);
}
+ if (Arch == llvm::Triple::riscv32 || Arch == llvm::Triple::riscv64) {
+ StringRef ABIStr = Target.getABI();
+ llvm::LLVMContext &Ctx = TheModule.getContext();
+ getModule().addModuleFlag(llvm::Module::Error, "target-abi",
+ llvm::MDString::get(Ctx, ABIStr));
+ }
+
if (CodeGenOpts.SanitizeCfiCrossDso) {
// Indicate that we want cross-DSO control flow integrity checks.
getModule().addModuleFlag(llvm::Module::Override, "Cross-DSO CFI", 1);
}
+ if (CodeGenOpts.WholeProgramVTables) {
+ // Indicate whether VFE was enabled for this module, so that the
+ // vcall_visibility metadata added under whole program vtables is handled
+ // appropriately in the optimizer.
+ getModule().addModuleFlag(llvm::Module::Error, "Virtual Function Elim",
+ CodeGenOpts.VirtualFunctionElimination);
+ }
+
if (LangOpts.Sanitize.has(SanitizerKind::CFIICall)) {
getModule().addModuleFlag(llvm::Module::Override,
"CFI Canonical Jump Tables",
@@ -567,7 +589,8 @@ void CodeGenModule::Release() {
// floating point values to 0. (This corresponds to its "__CUDA_FTZ"
// property.)
getModule().addModuleFlag(llvm::Module::Override, "nvvm-reflect-ftz",
- CodeGenOpts.FlushDenorm ? 1 : 0);
+ CodeGenOpts.FP32DenormalMode.Output !=
+ llvm::DenormalMode::IEEE);
}
// Emit OpenCL specific module metadata: OpenCL/SPIR version.
@@ -623,8 +646,8 @@ void CodeGenModule::Release() {
if (getCodeGenOpts().EmitGcovArcs || getCodeGenOpts().EmitGcovNotes)
EmitCoverageFile();
- if (DebugInfo)
- DebugInfo->finalize();
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ DI->finalize();
if (getCodeGenOpts().EmitVersionIdentMetadata)
EmitVersionIdentMetadata();
@@ -632,7 +655,9 @@ void CodeGenModule::Release() {
if (!getCodeGenOpts().RecordCommandLine.empty())
EmitCommandLineMetadata();
- EmitTargetMetadata();
+ getTargetCodeGenInfo().emitTargetMetadata(*this, MangledDeclNames);
+
+ EmitBackendOptionsMetadata(getCodeGenOpts());
}
void CodeGenModule::EmitOpenCLMetadata() {
@@ -652,6 +677,19 @@ void CodeGenModule::EmitOpenCLMetadata() {
OCLVerMD->addOperand(llvm::MDNode::get(Ctx, OCLVerElts));
}
+void CodeGenModule::EmitBackendOptionsMetadata(
+ const CodeGenOptions CodeGenOpts) {
+ switch (getTriple().getArch()) {
+ default:
+ break;
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
+ getModule().addModuleFlag(llvm::Module::Error, "SmallDataLimit",
+ CodeGenOpts.SmallDataLimit);
+ break;
+ }
+}
+
void CodeGenModule::UpdateCompletedType(const TagDecl *TD) {
// Make sure that this type is translated.
Types.UpdateCompletedType(TD);
@@ -671,6 +709,19 @@ llvm::MDNode *CodeGenModule::getTBAATypeInfo(QualType QTy) {
TBAAAccessInfo CodeGenModule::getTBAAAccessInfo(QualType AccessType) {
if (!TBAA)
return TBAAAccessInfo();
+ if (getLangOpts().CUDAIsDevice) {
+ // As CUDA builtin surface/texture types are replaced, skip generating TBAA
+ // access info.
+ if (AccessType->isCUDADeviceBuiltinSurfaceType()) {
+ if (getTargetCodeGenInfo().getCUDADeviceBuiltinSurfaceDeviceType() !=
+ nullptr)
+ return TBAAAccessInfo();
+ } else if (AccessType->isCUDADeviceBuiltinTextureType()) {
+ if (getTargetCodeGenInfo().getCUDADeviceBuiltinTextureDeviceType() !=
+ nullptr)
+ return TBAAAccessInfo();
+ }
+ }
return TBAA->getAccessInfo(AccessType);
}
@@ -856,7 +907,7 @@ static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
if (isa<llvm::Function>(GV) && !CGOpts.NoPLT && RM == llvm::Reloc::Static)
return true;
- // Otherwise don't assue it is local.
+ // Otherwise don't assume it is local.
return false;
}
@@ -912,9 +963,9 @@ static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) {
.Case("local-exec", llvm::GlobalVariable::LocalExecTLSModel);
}
-static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(
- CodeGenOptions::TLSModel M) {
- switch (M) {
+llvm::GlobalVariable::ThreadLocalMode
+CodeGenModule::GetDefaultLLVMTLSModel() const {
+ switch (CodeGenOpts.getDefaultTLSModel()) {
case CodeGenOptions::GeneralDynamicTLSModel:
return llvm::GlobalVariable::GeneralDynamicTLSModel;
case CodeGenOptions::LocalDynamicTLSModel:
@@ -931,7 +982,7 @@ void CodeGenModule::setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const {
assert(D.getTLSKind() && "setting TLS mode on non-TLS var!");
llvm::GlobalValue::ThreadLocalMode TLM;
- TLM = GetLLVMTLSModel(CodeGenOpts.getDefaultTLSModel());
+ TLM = GetDefaultLLVMTLSModel();
// Override the TLS model if it is explicitly specified.
if (const TLSModelAttr *Attr = D.getAttr<TLSModelAttr>()) {
@@ -997,23 +1048,19 @@ static std::string getMangledNameImpl(const CodeGenModule &CGM, GlobalDecl GD,
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
MangleContext &MC = CGM.getCXXABI().getMangleContext();
- if (MC.shouldMangleDeclName(ND)) {
- llvm::raw_svector_ostream Out(Buffer);
- if (const auto *D = dyn_cast<CXXConstructorDecl>(ND))
- MC.mangleCXXCtor(D, GD.getCtorType(), Out);
- else if (const auto *D = dyn_cast<CXXDestructorDecl>(ND))
- MC.mangleCXXDtor(D, GD.getDtorType(), Out);
- else
- MC.mangleName(ND, Out);
- } else {
+ if (MC.shouldMangleDeclName(ND))
+ MC.mangleName(GD.getWithDecl(ND), Out);
+ else {
IdentifierInfo *II = ND->getIdentifier();
assert(II && "Attempt to mangle unnamed decl.");
const auto *FD = dyn_cast<FunctionDecl>(ND);
if (FD &&
FD->getType()->castAs<FunctionType>()->getCallConv() == CC_X86RegCall) {
- llvm::raw_svector_ostream Out(Buffer);
Out << "__regcall3__" << II->getName();
+ } else if (FD && FD->hasAttr<CUDAGlobalAttr>() &&
+ GD.getKernelReferenceKind() == KernelReferenceKind::Stub) {
+ Out << "__device_stub__" << II->getName();
} else {
Out << II->getName();
}
@@ -1036,7 +1083,7 @@ static std::string getMangledNameImpl(const CodeGenModule &CGM, GlobalDecl GD,
}
}
- return Out.str();
+ return std::string(Out.str());
}
void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD,
@@ -1101,11 +1148,25 @@ StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
const auto *ND = cast<NamedDecl>(GD.getDecl());
std::string MangledName = getMangledNameImpl(*this, GD, ND);
- // Adjust kernel stub mangling as we may need to be able to differentiate
- // them from the kernel itself (e.g., for HIP).
- if (auto *FD = dyn_cast<FunctionDecl>(GD.getDecl()))
- if (!getLangOpts().CUDAIsDevice && FD->hasAttr<CUDAGlobalAttr>())
- MangledName = getCUDARuntime().getDeviceStubName(MangledName);
+ // Ensure either we have different ABIs between host and device compilations,
+ // says host compilation following MSVC ABI but device compilation follows
+ // Itanium C++ ABI or, if they follow the same ABI, kernel names after
+ // mangling should be the same after name stubbing. The later checking is
+ // very important as the device kernel name being mangled in host-compilation
+ // is used to resolve the device binaries to be executed. Inconsistent naming
+ // result in undefined behavior. Even though we cannot check that naming
+ // directly between host- and device-compilations, the host- and
+ // device-mangling in host compilation could help catching certain ones.
+ assert(!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() ||
+ getLangOpts().CUDAIsDevice ||
+ (getContext().getAuxTargetInfo() &&
+ (getContext().getAuxTargetInfo()->getCXXABI() !=
+ getContext().getTargetInfo().getCXXABI())) ||
+ getCUDARuntime().getDeviceSideName(ND) ==
+ getMangledNameImpl(
+ *this,
+ GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel),
+ ND));
auto Result = Manglings.insert(std::make_pair(MangledName, GD));
return MangledDeclNames[CanonicalGD] = Result.first->first();
@@ -1357,7 +1418,7 @@ void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
std::string typeName;
if (isPipe)
typeName = ty.getCanonicalType()
- ->getAs<PipeType>()
+ ->castAs<PipeType>()
->getElementType()
.getAsString(Policy);
else
@@ -1371,7 +1432,7 @@ void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
std::string baseTypeName;
if (isPipe)
baseTypeName = ty.getCanonicalType()
- ->getAs<PipeType>()
+ ->castAs<PipeType>()
->getElementType()
.getCanonicalType()
.getAsString(Policy);
@@ -1493,6 +1554,9 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
if (CodeGenOpts.UnwindTables)
B.addAttribute(llvm::Attribute::UWTable);
+ if (CodeGenOpts.StackClashProtector)
+ B.addAttribute("probe-stack", "inline-asm");
+
if (!hasUnwindExceptions(LangOpts))
B.addAttribute(llvm::Attribute::NoUnwind);
@@ -1840,9 +1904,16 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
else if (const auto *SA = FD->getAttr<SectionAttr>())
F->setSection(SA->getName());
+ // If we plan on emitting this inline builtin, we can't treat it as a builtin.
if (FD->isInlineBuiltinDeclaration()) {
- F->addAttribute(llvm::AttributeList::FunctionIndex,
- llvm::Attribute::NoBuiltin);
+ const FunctionDecl *FDBody;
+ bool HasBody = FD->hasBody(FDBody);
+ (void)HasBody;
+ assert(HasBody && "Inline builtin declarations should always have an "
+ "available body!");
+ if (shouldEmitFunction(FDBody))
+ F->addAttribute(llvm::AttributeList::FunctionIndex,
+ llvm::Attribute::NoBuiltin);
}
if (FD->isReplaceableGlobalAllocationFunction()) {
@@ -1850,15 +1921,6 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
// default, only if it is invoked by a new-expression or delete-expression.
F->addAttribute(llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoBuiltin);
-
- // A sane operator new returns a non-aliasing pointer.
- // FIXME: Also add NonNull attribute to the return value
- // for the non-nothrow forms?
- auto Kind = FD->getDeclName().getCXXOverloadedOperator();
- if (getCodeGenOpts().AssumeSaneOperatorNew &&
- (Kind == OO_New || Kind == OO_Array_New))
- F->addAttribute(llvm::AttributeList::ReturnIndex,
- llvm::Attribute::NoAlias);
}
if (isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD))
@@ -2375,13 +2437,8 @@ bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) {
return true;
}
-ConstantAddress CodeGenModule::GetAddrOfUuidDescriptor(
- const CXXUuidofExpr* E) {
- // Sema has verified that IIDSource has a __declspec(uuid()), and that its
- // well-formed.
- StringRef Uuid = E->getUuidStr();
- std::string Name = "_GUID_" + Uuid.lower();
- std::replace(Name.begin(), Name.end(), '-', '_');
+ConstantAddress CodeGenModule::GetAddrOfMSGuidDecl(const MSGuidDecl *GD) {
+ StringRef Name = getMangledName(GD);
// The UUID descriptor should be pointer aligned.
CharUnits Alignment = CharUnits::fromQuantity(PointerAlignInBytes);
@@ -2390,8 +2447,30 @@ ConstantAddress CodeGenModule::GetAddrOfUuidDescriptor(
if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
return ConstantAddress(GV, Alignment);
- llvm::Constant *Init = EmitUuidofInitializer(Uuid);
- assert(Init && "failed to initialize as constant");
+ ConstantEmitter Emitter(*this);
+ llvm::Constant *Init;
+
+ APValue &V = GD->getAsAPValue();
+ if (!V.isAbsent()) {
+ // If possible, emit the APValue version of the initializer. In particular,
+ // this gets the type of the constant right.
+ Init = Emitter.emitForInitializer(
+ GD->getAsAPValue(), GD->getType().getAddressSpace(), GD->getType());
+ } else {
+ // As a fallback, directly construct the constant.
+ // FIXME: This may get padding wrong under esoteric struct layout rules.
+ // MSVC appears to create a complete type 'struct __s_GUID' that it
+ // presumably uses to represent these constants.
+ MSGuidDecl::Parts Parts = GD->getParts();
+ llvm::Constant *Fields[4] = {
+ llvm::ConstantInt::get(Int32Ty, Parts.Part1),
+ llvm::ConstantInt::get(Int16Ty, Parts.Part2),
+ llvm::ConstantInt::get(Int16Ty, Parts.Part3),
+ llvm::ConstantDataArray::getRaw(
+ StringRef(reinterpret_cast<char *>(Parts.Part4And5), 8), 8,
+ Int8Ty)};
+ Init = llvm::ConstantStruct::getAnon(Fields);
+ }
auto *GV = new llvm::GlobalVariable(
getModule(), Init->getType(),
@@ -2399,7 +2478,16 @@ ConstantAddress CodeGenModule::GetAddrOfUuidDescriptor(
if (supportsCOMDAT())
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
setDSOLocal(GV);
- return ConstantAddress(GV, Alignment);
+
+ llvm::Constant *Addr = GV;
+ if (!V.isAbsent()) {
+ Emitter.finalize(GV);
+ } else {
+ llvm::Type *Ty = getTypes().ConvertTypeForMem(GD->getType());
+ Addr = llvm::ConstantExpr::getBitCast(
+ GV, Ty->getPointerTo(GV->getAddressSpace()));
+ }
+ return ConstantAddress(Addr, Alignment);
}
ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
@@ -2461,7 +2549,8 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
!Global->hasAttr<CUDAGlobalAttr>() &&
!Global->hasAttr<CUDAConstantAttr>() &&
!Global->hasAttr<CUDASharedAttr>() &&
- !(LangOpts.HIP && Global->hasAttr<HIPPinnedShadowAttr>()))
+ !Global->getType()->isCUDADeviceBuiltinSurfaceType() &&
+ !Global->getType()->isCUDADeviceBuiltinTextureType())
return;
} else {
// We need to emit host-side 'shadows' for all global
@@ -2554,11 +2643,6 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
return;
}
- // Check if this must be emitted as declare variant.
- if (LangOpts.OpenMP && isa<FunctionDecl>(Global) && OpenMPRuntime &&
- OpenMPRuntime->emitDeclareVariant(GD, /*IsForDefinition=*/false))
- return;
-
// If we're deferring emission of a C++ variable with an
// initializer, remember the order in which it appeared in the file.
if (getLangOpts().CPlusPlus && isa<VarDecl>(Global) &&
@@ -2741,8 +2825,8 @@ bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
// PR9614. Avoid cases where the source code is lying to us. An available
// externally function should have an equivalent function somewhere else,
- // but a function that calls itself is clearly not equivalent to the real
- // implementation.
+ // but a function that calls itself through asm label/`__builtin_` trickery is
+ // clearly not equivalent to the real implementation.
// This happens in glibc's btowc and in some configure checks.
return !isTriviallyRecursive(F);
}
@@ -2764,50 +2848,6 @@ void CodeGenModule::EmitMultiVersionFunctionDefinition(GlobalDecl GD,
EmitGlobalFunctionDefinition(GD, GV);
}
-void CodeGenModule::emitOpenMPDeviceFunctionRedefinition(
- GlobalDecl OldGD, GlobalDecl NewGD, llvm::GlobalValue *GV) {
- assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
- OpenMPRuntime && "Expected OpenMP device mode.");
- const auto *D = cast<FunctionDecl>(OldGD.getDecl());
-
- // Compute the function info and LLVM type.
- const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(OldGD);
- llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
-
- // Get or create the prototype for the function.
- if (!GV || (GV->getType()->getElementType() != Ty)) {
- GV = cast<llvm::GlobalValue>(GetOrCreateLLVMFunction(
- getMangledName(OldGD), Ty, GlobalDecl(), /*ForVTable=*/false,
- /*DontDefer=*/true, /*IsThunk=*/false, llvm::AttributeList(),
- ForDefinition));
- SetFunctionAttributes(OldGD, cast<llvm::Function>(GV),
- /*IsIncompleteFunction=*/false,
- /*IsThunk=*/false);
- }
- // We need to set linkage and visibility on the function before
- // generating code for it because various parts of IR generation
- // want to propagate this information down (e.g. to local static
- // declarations).
- auto *Fn = cast<llvm::Function>(GV);
- setFunctionLinkage(OldGD, Fn);
-
- // FIXME: this is redundant with part of
- // setFunctionDefinitionAttributes
- setGVProperties(Fn, OldGD);
-
- MaybeHandleStaticInExternC(D, Fn);
-
- maybeSetTrivialComdat(*D, *Fn);
-
- CodeGenFunction(*this).GenerateCode(NewGD, Fn, FI);
-
- setNonAliasAttributes(OldGD, Fn);
- SetLLVMFunctionAttributesForDefinition(D, Fn);
-
- if (D->hasAttr<AnnotateAttr>())
- AddGlobalAnnotations(D, Fn);
-}
-
void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
const auto *D = cast<ValueDecl>(GD.getDecl());
@@ -3122,14 +3162,9 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
EmitGlobal(GDDef);
}
}
- // Check if this must be emitted as declare variant and emit reference to
- // the the declare variant function.
- if (LangOpts.OpenMP && OpenMPRuntime)
- (void)OpenMPRuntime->emitDeclareVariant(GD, /*IsForDefinition=*/true);
if (FD->isMultiVersion()) {
- const auto *TA = FD->getAttr<TargetAttr>();
- if (TA && TA->isDefaultVersion())
+ if (FD->hasAttr<TargetAttr>())
UpdateMultiVersionNames(GD, FD);
if (!IsForDefinition)
return GetOrCreateMultiVersionResolver(GD, Ty, FD);
@@ -3169,7 +3204,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
}
if ((isa<llvm::Function>(Entry) || isa<llvm::GlobalAlias>(Entry)) &&
- (Entry->getType()->getElementType() == Ty)) {
+ (Entry->getValueType() == Ty)) {
return Entry;
}
@@ -3218,7 +3253,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
}
llvm::Constant *BC = llvm::ConstantExpr::getBitCast(
- F, Entry->getType()->getElementType()->getPointerTo());
+ F, Entry->getValueType()->getPointerTo());
addGlobalValReplacement(Entry, BC);
}
@@ -3277,7 +3312,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
// Make sure the result is of the requested type.
if (!IsIncompleteFunction) {
- assert(F->getType()->getElementType() == Ty);
+ assert(F->getFunctionType() == Ty);
return F;
}
@@ -3293,6 +3328,8 @@ llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
bool ForVTable,
bool DontDefer,
ForDefinition_t IsForDefinition) {
+ assert(!cast<FunctionDecl>(GD.getDecl())->isConsteval() &&
+ "consteval function should never be emitted");
// If there was no specific requested type, just convert it now.
if (!Ty) {
const auto *FD = cast<FunctionDecl>(GD.getDecl());
@@ -3568,7 +3605,7 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
llvm::Constant *Init = emitter.tryEmitForInitializer(*InitDecl);
if (Init) {
auto *InitType = Init->getType();
- if (GV->getType()->getElementType() != InitType) {
+ if (GV->getValueType() != InitType) {
// The type of the initializer does not match the definition.
// This happens when an initializer has a different type from
// the type of the global (because of padding at the end of a
@@ -3611,26 +3648,29 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
}
llvm::Constant *
-CodeGenModule::GetAddrOfGlobal(GlobalDecl GD,
- ForDefinition_t IsForDefinition) {
+CodeGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) {
const Decl *D = GD.getDecl();
+
if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
return getAddrOfCXXStructor(GD, /*FnInfo=*/nullptr, /*FnType=*/nullptr,
/*DontDefer=*/false, IsForDefinition);
- else if (isa<CXXMethodDecl>(D)) {
- auto FInfo = &getTypes().arrangeCXXMethodDeclaration(
- cast<CXXMethodDecl>(D));
+
+ if (isa<CXXMethodDecl>(D)) {
+ auto FInfo =
+ &getTypes().arrangeCXXMethodDeclaration(cast<CXXMethodDecl>(D));
auto Ty = getTypes().GetFunctionType(*FInfo);
return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
IsForDefinition);
- } else if (isa<FunctionDecl>(D)) {
+ }
+
+ if (isa<FunctionDecl>(D)) {
const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
IsForDefinition);
- } else
- return GetAddrOfGlobalVar(cast<VarDecl>(D), /*Ty=*/nullptr,
- IsForDefinition);
+ }
+
+ return GetAddrOfGlobalVar(cast<VarDecl>(D), /*Ty=*/nullptr, IsForDefinition);
}
llvm::GlobalVariable *CodeGenModule::CreateOrReplaceCXXRuntimeVariable(
@@ -3641,7 +3681,7 @@ llvm::GlobalVariable *CodeGenModule::CreateOrReplaceCXXRuntimeVariable(
if (GV) {
// Check if the variable has the right type.
- if (GV->getType()->getElementType() == Ty)
+ if (GV->getValueType() == Ty)
return GV;
// Because C++ name mangling, the only way we can end up with an already
@@ -3915,12 +3955,16 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
!getLangOpts().CUDAIsDevice &&
(D->hasAttr<CUDAConstantAttr>() || D->hasAttr<CUDADeviceAttr>() ||
D->hasAttr<CUDASharedAttr>());
+ bool IsCUDADeviceShadowVar =
+ getLangOpts().CUDAIsDevice &&
+ (D->getType()->isCUDADeviceBuiltinSurfaceType() ||
+ D->getType()->isCUDADeviceBuiltinTextureType());
// HIP pinned shadow of initialized host-side global variables are also
// left undefined.
- bool IsHIPPinnedShadowVar =
- getLangOpts().CUDAIsDevice && D->hasAttr<HIPPinnedShadowAttr>();
if (getLangOpts().CUDA &&
- (IsCUDASharedVar || IsCUDAShadowVar || IsHIPPinnedShadowVar))
+ (IsCUDASharedVar || IsCUDAShadowVar || IsCUDADeviceShadowVar))
+ Init = llvm::UndefValue::get(getTypes().ConvertType(ASTTy));
+ else if (D->hasAttr<LoaderUninitializedAttr>())
Init = llvm::UndefValue::get(getTypes().ConvertType(ASTTy));
else if (!InitExpr) {
// This is a tentative definition; tentative definitions are
@@ -3979,7 +4023,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// "extern int x[];") and then a definition of a different type (e.g.
// "int x[10];"). This also happens when an initializer has a different type
// from the type of the global (this happens with unions).
- if (!GV || GV->getType()->getElementType() != InitType ||
+ if (!GV || GV->getValueType() != InitType ||
GV->getType()->getAddressSpace() !=
getContext().getTargetAddressSpace(GetGlobalVarAddressSpace(D))) {
@@ -4026,34 +4070,56 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// global variables become internal definitions. These have to
// be internal in order to prevent name conflicts with global
// host variables with the same name in a different TUs.
- if (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
- D->hasAttr<HIPPinnedShadowAttr>()) {
+ if (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>()) {
Linkage = llvm::GlobalValue::InternalLinkage;
-
- // Shadow variables and their properties must be registered
- // with CUDA runtime.
- unsigned Flags = 0;
- if (!D->hasDefinition())
- Flags |= CGCUDARuntime::ExternDeviceVar;
- if (D->hasAttr<CUDAConstantAttr>())
- Flags |= CGCUDARuntime::ConstantDeviceVar;
- // Extern global variables will be registered in the TU where they are
- // defined.
+ // Shadow variables and their properties must be registered with CUDA
+ // runtime. Skip Extern global variables, which will be registered in
+ // the TU where they are defined.
if (!D->hasExternalStorage())
- getCUDARuntime().registerDeviceVar(D, *GV, Flags);
- } else if (D->hasAttr<CUDASharedAttr>())
+ getCUDARuntime().registerDeviceVar(D, *GV, !D->hasDefinition(),
+ D->hasAttr<CUDAConstantAttr>());
+ } else if (D->hasAttr<CUDASharedAttr>()) {
// __shared__ variables are odd. Shadows do get created, but
// they are not registered with the CUDA runtime, so they
// can't really be used to access their device-side
// counterparts. It's not clear yet whether it's nvcc's bug or
// a feature, but we've got to do the same for compatibility.
Linkage = llvm::GlobalValue::InternalLinkage;
+ } else if (D->getType()->isCUDADeviceBuiltinSurfaceType() ||
+ D->getType()->isCUDADeviceBuiltinTextureType()) {
+ // Builtin surfaces and textures and their template arguments are
+ // also registered with CUDA runtime.
+ Linkage = llvm::GlobalValue::InternalLinkage;
+ const ClassTemplateSpecializationDecl *TD =
+ cast<ClassTemplateSpecializationDecl>(
+ D->getType()->getAs<RecordType>()->getDecl());
+ const TemplateArgumentList &Args = TD->getTemplateArgs();
+ if (TD->hasAttr<CUDADeviceBuiltinSurfaceTypeAttr>()) {
+ assert(Args.size() == 2 &&
+ "Unexpected number of template arguments of CUDA device "
+ "builtin surface type.");
+ auto SurfType = Args[1].getAsIntegral();
+ if (!D->hasExternalStorage())
+ getCUDARuntime().registerDeviceSurf(D, *GV, !D->hasDefinition(),
+ SurfType.getSExtValue());
+ } else {
+ assert(Args.size() == 3 &&
+ "Unexpected number of template arguments of CUDA device "
+ "builtin texture type.");
+ auto TexType = Args[1].getAsIntegral();
+ auto Normalized = Args[2].getAsIntegral();
+ if (!D->hasExternalStorage())
+ getCUDARuntime().registerDeviceTex(D, *GV, !D->hasDefinition(),
+ TexType.getSExtValue(),
+ Normalized.getZExtValue());
+ }
+ }
}
}
- if (!IsHIPPinnedShadowVar)
- GV->setInitializer(Init);
- if (emitter) emitter->finalize(GV);
+ GV->setInitializer(Init);
+ if (emitter)
+ emitter->finalize(GV);
// If it is safe to mark the global 'constant', do so now.
GV->setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor &&
@@ -4068,17 +4134,24 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
GV->setAlignment(getContext().getDeclAlign(D).getAsAlign());
- // On Darwin, if the normal linkage of a C++ thread_local variable is
- // LinkOnce or Weak, we keep the normal linkage to prevent multiple
- // copies within a linkage unit; otherwise, the backing variable has
- // internal linkage and all accesses should just be calls to the
- // Itanium-specified entry point, which has the normal linkage of the
- // variable. This is to preserve the ability to change the implementation
- // behind the scenes.
- if (!D->isStaticLocal() && D->getTLSKind() == VarDecl::TLS_Dynamic &&
+ // On Darwin, unlike other Itanium C++ ABI platforms, the thread-wrapper
+ // function is only defined alongside the variable, not also alongside
+ // callers. Normally, all accesses to a thread_local go through the
+ // thread-wrapper in order to ensure initialization has occurred, underlying
+ // variable will never be used other than the thread-wrapper, so it can be
+ // converted to internal linkage.
+ //
+ // However, if the variable has the 'constinit' attribute, it _can_ be
+ // referenced directly, without calling the thread-wrapper, so the linkage
+ // must not be changed.
+ //
+ // Additionally, if the variable isn't plain external linkage, e.g. if it's
+ // weak or linkonce, the de-duplication semantics are important to preserve,
+ // so we don't change the linkage.
+ if (D->getTLSKind() == VarDecl::TLS_Dynamic &&
+ Linkage == llvm::GlobalValue::ExternalLinkage &&
Context.getTargetInfo().getTriple().isOSDarwin() &&
- !llvm::GlobalVariable::isLinkOnceLinkage(Linkage) &&
- !llvm::GlobalVariable::isWeakLinkage(Linkage))
+ !D->hasAttr<ConstInitAttr>())
Linkage = llvm::GlobalValue::InternalLinkage;
GV->setLinkage(Linkage);
@@ -4421,11 +4494,6 @@ void CodeGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
llvm::GlobalValue *GV) {
- // Check if this must be emitted as declare variant.
- if (LangOpts.OpenMP && OpenMPRuntime &&
- OpenMPRuntime->emitDeclareVariant(GD, /*IsForDefinition=*/true))
- return;
-
const auto *D = cast<FunctionDecl>(GD.getDecl());
// Compute the function info and LLVM type.
@@ -4433,7 +4501,7 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
// Get or create the prototype for the function.
- if (!GV || (GV->getType()->getElementType() != Ty))
+ if (!GV || (GV->getValueType() != Ty))
GV = cast<llvm::GlobalValue>(GetAddrOfFunction(GD, Ty, /*ForVTable=*/false,
/*DontDefer=*/true,
ForDefinition));
@@ -4457,7 +4525,7 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
maybeSetTrivialComdat(*D, *Fn);
- CodeGenFunction(*this).GenerateCode(D, Fn, FI);
+ CodeGenFunction(*this).GenerateCode(GD, Fn, FI);
setNonAliasAttributes(GD, Fn);
SetLLVMFunctionAttributesForDefinition(D, Fn);
@@ -4509,8 +4577,9 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
}
// Create the new alias itself, but don't set a name yet.
+ unsigned AS = Aliasee->getType()->getPointerAddressSpace();
auto *GA =
- llvm::GlobalAlias::create(DeclTy, 0, LT, "", Aliasee, &getModule());
+ llvm::GlobalAlias::create(DeclTy, AS, LT, "", Aliasee, &getModule());
if (Entry) {
if (GA->getAliasee() == Entry) {
@@ -5258,6 +5327,11 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
if (D->isTemplated())
return;
+ // Consteval function shouldn't be emitted.
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->isConsteval())
+ return;
+
switch (D->getKind()) {
case Decl::CXXConversion:
case Decl::CXXMethod:
@@ -5293,17 +5367,17 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
break;
case Decl::ClassTemplateSpecialization: {
const auto *Spec = cast<ClassTemplateSpecializationDecl>(D);
- if (DebugInfo &&
- Spec->getSpecializationKind() == TSK_ExplicitInstantiationDefinition &&
- Spec->hasDefinition())
- DebugInfo->completeTemplateDefinition(*Spec);
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ if (Spec->getSpecializationKind() ==
+ TSK_ExplicitInstantiationDefinition &&
+ Spec->hasDefinition())
+ DI->completeTemplateDefinition(*Spec);
} LLVM_FALLTHROUGH;
case Decl::CXXRecord:
- if (DebugInfo) {
+ if (CGDebugInfo *DI = getModuleDebugInfo())
if (auto *ES = D->getASTContext().getExternalSource())
if (ES->hasExternalDefinitions(D) == ExternalASTSource::EK_Never)
- DebugInfo->completeUnusedClass(cast<CXXRecordDecl>(*D));
- }
+ DI->completeUnusedClass(cast<CXXRecordDecl>(*D));
// Emit any static data members, they may be definitions.
for (auto *I : cast<CXXRecordDecl>(D)->decls())
if (isa<VarDecl>(I) || isa<CXXRecordDecl>(I))
@@ -5324,15 +5398,15 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::Using: // using X; [C++]
if (CGDebugInfo *DI = getModuleDebugInfo())
DI->EmitUsingDecl(cast<UsingDecl>(*D));
- return;
+ break;
case Decl::NamespaceAlias:
if (CGDebugInfo *DI = getModuleDebugInfo())
DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(*D));
- return;
+ break;
case Decl::UsingDirective: // using namespace X; [C++]
if (CGDebugInfo *DI = getModuleDebugInfo())
DI->EmitUsingDirective(cast<UsingDirectiveDecl>(*D));
- return;
+ break;
case Decl::CXXConstructor:
getCXXABI().EmitCXXConstructors(cast<CXXConstructorDecl>(D));
break;
@@ -5515,10 +5589,10 @@ void CodeGenModule::AddDeferredUnusedCoverageMapping(Decl *D) {
case Decl::CXXConstructor:
case Decl::CXXDestructor: {
if (!cast<FunctionDecl>(D)->doesThisDeclarationHaveABody())
- return;
+ break;
SourceManager &SM = getContext().getSourceManager();
if (LimitedCoverage && SM.getMainFileID() != SM.getFileID(D->getBeginLoc()))
- return;
+ break;
auto I = DeferredEmptyCoverageMappingDecls.find(D);
if (I == DeferredEmptyCoverageMappingDecls.end())
DeferredEmptyCoverageMappingDecls[D] = true;
@@ -5584,6 +5658,17 @@ void CodeGenModule::EmitDeferredUnusedCoverageMappings() {
}
}
+void CodeGenModule::EmitMainVoidAlias() {
+ // In order to transition away from "__original_main" gracefully, emit an
+ // alias for "main" in the no-argument case so that libc can detect when
+ // new-style no-argument main is in used.
+ if (llvm::Function *F = getModule().getFunction("main")) {
+ if (!F->isDeclaration() && F->arg_size() == 0 && !F->isVarArg() &&
+ F->getReturnType()->isIntegerTy(Context.getTargetInfo().getIntWidth()))
+ addUsedGlobal(llvm::GlobalAlias::create("__main_void", F));
+ }
+}
+
/// Turns the given pointer into a constant.
static llvm::Constant *GetPointerConstant(llvm::LLVMContext &Context,
const void *Ptr) {
@@ -5698,21 +5783,6 @@ void CodeGenModule::EmitCommandLineMetadata() {
CommandLineMetadata->addOperand(llvm::MDNode::get(Ctx, CommandLineNode));
}
-void CodeGenModule::EmitTargetMetadata() {
- // Warning, new MangledDeclNames may be appended within this loop.
- // We rely on MapVector insertions adding new elements to the end
- // of the container.
- // FIXME: Move this loop into the one target that needs it, and only
- // loop over those declarations for which we couldn't emit the target
- // metadata when we emitted the declaration.
- for (unsigned I = 0; I != MangledDeclNames.size(); ++I) {
- auto Val = *(MangledDeclNames.begin() + I);
- const Decl *D = Val.first.getDecl()->getMostRecentDecl();
- llvm::GlobalValue *GV = GetGlobalValue(Val.second);
- getTargetCodeGenInfo().emitTargetMD(D, GV, *this);
- }
-}
-
void CodeGenModule::EmitCoverageFile() {
if (getCodeGenOpts().CoverageDataFile.empty() &&
getCodeGenOpts().CoverageNotesFile.empty())
@@ -5735,39 +5805,14 @@ void CodeGenModule::EmitCoverageFile() {
}
}
-llvm::Constant *CodeGenModule::EmitUuidofInitializer(StringRef Uuid) {
- // Sema has checked that all uuid strings are of the form
- // "12345678-1234-1234-1234-1234567890ab".
- assert(Uuid.size() == 36);
- for (unsigned i = 0; i < 36; ++i) {
- if (i == 8 || i == 13 || i == 18 || i == 23) assert(Uuid[i] == '-');
- else assert(isHexDigit(Uuid[i]));
- }
-
- // The starts of all bytes of Field3 in Uuid. Field 3 is "1234-1234567890ab".
- const unsigned Field3ValueOffsets[8] = { 19, 21, 24, 26, 28, 30, 32, 34 };
-
- llvm::Constant *Field3[8];
- for (unsigned Idx = 0; Idx < 8; ++Idx)
- Field3[Idx] = llvm::ConstantInt::get(
- Int8Ty, Uuid.substr(Field3ValueOffsets[Idx], 2), 16);
-
- llvm::Constant *Fields[4] = {
- llvm::ConstantInt::get(Int32Ty, Uuid.substr(0, 8), 16),
- llvm::ConstantInt::get(Int16Ty, Uuid.substr(9, 4), 16),
- llvm::ConstantInt::get(Int16Ty, Uuid.substr(14, 4), 16),
- llvm::ConstantArray::get(llvm::ArrayType::get(Int8Ty, 8), Field3)
- };
-
- return llvm::ConstantStruct::getAnon(Fields);
-}
-
llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
bool ForEH) {
// Return a bogus pointer if RTTI is disabled, unless it's for EH.
// FIXME: should we even be calling this method if RTTI is disabled
// and it's not for EH?
- if ((!ForEH && !getLangOpts().RTTI) || getLangOpts().CUDAIsDevice)
+ if ((!ForEH && !getLangOpts().RTTI) || getLangOpts().CUDAIsDevice ||
+ (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
+ getTriple().isNVPTX()))
return llvm::Constant::getNullValue(Int8PtrTy);
if (ForEH && Ty->isObjCObjectPointerType() &&
@@ -5911,3 +5956,99 @@ CodeGenModule::createOpenCLIntToSamplerConversion(const Expr *E,
"__translate_sampler_initializer"),
{C});
}
+
+CharUnits CodeGenModule::getNaturalPointeeTypeAlignment(
+ QualType T, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo) {
+ return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo,
+ /* forPointeeType= */ true);
+}
+
+CharUnits CodeGenModule::getNaturalTypeAlignment(QualType T,
+ LValueBaseInfo *BaseInfo,
+ TBAAAccessInfo *TBAAInfo,
+ bool forPointeeType) {
+ if (TBAAInfo)
+ *TBAAInfo = getTBAAAccessInfo(T);
+
+ // FIXME: This duplicates logic in ASTContext::getTypeAlignIfKnown. But
+ // that doesn't return the information we need to compute BaseInfo.
+
+ // Honor alignment typedef attributes even on incomplete types.
+ // We also honor them straight for C++ class types, even as pointees;
+ // there's an expressivity gap here.
+ if (auto TT = T->getAs<TypedefType>()) {
+ if (auto Align = TT->getDecl()->getMaxAlignment()) {
+ if (BaseInfo)
+ *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType);
+ return getContext().toCharUnitsFromBits(Align);
+ }
+ }
+
+ bool AlignForArray = T->isArrayType();
+
+ // Analyze the base element type, so we don't get confused by incomplete
+ // array types.
+ T = getContext().getBaseElementType(T);
+
+ if (T->isIncompleteType()) {
+ // We could try to replicate the logic from
+ // ASTContext::getTypeAlignIfKnown, but nothing uses the alignment if the
+ // type is incomplete, so it's impossible to test. We could try to reuse
+ // getTypeAlignIfKnown, but that doesn't return the information we need
+ // to set BaseInfo. So just ignore the possibility that the alignment is
+ // greater than one.
+ if (BaseInfo)
+ *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
+ return CharUnits::One();
+ }
+
+ if (BaseInfo)
+ *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
+
+ CharUnits Alignment;
+ // For C++ class pointees, we don't know whether we're pointing at a
+ // base or a complete object, so we generally need to use the
+ // non-virtual alignment.
+ const CXXRecordDecl *RD;
+ if (forPointeeType && !AlignForArray && (RD = T->getAsCXXRecordDecl())) {
+ Alignment = getClassPointerAlignment(RD);
+ } else {
+ Alignment = getContext().getTypeAlignInChars(T);
+ if (T.getQualifiers().hasUnaligned())
+ Alignment = CharUnits::One();
+ }
+
+ // Cap to the global maximum type alignment unless the alignment
+ // was somehow explicit on the type.
+ if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
+ if (Alignment.getQuantity() > MaxAlign &&
+ !getContext().isAlignmentRequired(T))
+ Alignment = CharUnits::fromQuantity(MaxAlign);
+ }
+ return Alignment;
+}
+
+bool CodeGenModule::stopAutoInit() {
+ unsigned StopAfter = getContext().getLangOpts().TrivialAutoVarInitStopAfter;
+ if (StopAfter) {
+ // This number is positive only when -ftrivial-auto-var-init-stop-after=* is
+ // used
+ if (NumAutoVarInit >= StopAfter) {
+ return true;
+ }
+ if (!NumAutoVarInit) {
+ unsigned DiagID = getDiags().getCustomDiagID(
+ DiagnosticsEngine::Warning,
+ "-ftrivial-auto-var-init-stop-after=%0 has been enabled to limit the "
+ "number of times ftrivial-auto-var-init=%1 gets applied.");
+ getDiags().Report(DiagID)
+ << StopAfter
+ << (getContext().getLangOpts().getTrivialAutoVarInit() ==
+ LangOptions::TrivialAutoVarInitKind::Zero
+ ? "zero"
+ : "pattern");
+ }
+ ++NumAutoVarInit;
+ }
+ return false;
+}