summaryrefslogtreecommitdiff
path: root/lib/CodeGen/CodeGenModule.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2019-08-20 20:50:49 +0000
committerDimitry Andric <dim@FreeBSD.org>2019-08-20 20:50:49 +0000
commit2298981669bf3bd63335a4be179bc0f96823a8f4 (patch)
tree1cbe2eb27f030d2d70b80ee5ca3c86bee7326a9f /lib/CodeGen/CodeGenModule.cpp
parent9a83721404652cea39e9f02ae3e3b5c964602a5c (diff)
Notes
Diffstat (limited to 'lib/CodeGen/CodeGenModule.cpp')
-rw-r--r--lib/CodeGen/CodeGenModule.cpp577
1 files changed, 444 insertions, 133 deletions
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index 244738042cef0..6ff72ec045e62 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -1,9 +1,8 @@
//===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -34,6 +33,7 @@
#include "clang/AST/Mangle.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/CodeGenOptions.h"
@@ -47,17 +47,18 @@
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/ProfileSummary.h"
#include "llvm/ProfileData/InstrProfReader.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MD5.h"
+#include "llvm/Support/TimeProfiler.h"
using namespace clang;
using namespace CodeGen;
@@ -409,6 +410,10 @@ void CodeGenModule::Release() {
AddGlobalCtor(CudaCtorFunction);
}
if (OpenMPRuntime) {
+ if (llvm::Function *OpenMPRequiresDirectiveRegFun =
+ OpenMPRuntime->emitRequiresDirectiveRegFun()) {
+ AddGlobalCtor(OpenMPRequiresDirectiveRegFun, 0);
+ }
if (llvm::Function *OpenMPRegistrationFunction =
OpenMPRuntime->emitRegistrationFunction()) {
auto ComdatKey = OpenMPRegistrationFunction->hasComdat() ?
@@ -418,7 +423,9 @@ void CodeGenModule::Release() {
OpenMPRuntime->clear();
}
if (PGOReader) {
- getModule().setProfileSummary(PGOReader->getSummary().getMD(VMContext));
+ getModule().setProfileSummary(
+ PGOReader->getSummary(/* UseCS */ false).getMD(VMContext),
+ llvm::ProfileSummary::PSK_Instr);
if (PGOStats.hasDiagnostics())
PGOStats.reportDiagnostics(getDiags(), getCodeGenOpts().MainFileName);
}
@@ -443,6 +450,24 @@ void CodeGenModule::Release() {
EmitModuleLinkOptions();
}
+ // On ELF we pass the dependent library specifiers directly to the linker
+ // without manipulating them. This is in contrast to other platforms where
+ // they are mapped to a specific linker option by the compiler. This
+ // difference is a result of the greater variety of ELF linkers and the fact
+ // that ELF linkers tend to handle libraries in a more complicated fashion
+ // than on other platforms. This forces us to defer handling the dependent
+ // libs to the linker.
+ //
+ // CUDA/HIP device and host libraries are different. Currently there is no
+ // way to differentiate dependent libraries for host or device. Existing
+ // usage of #pragma comment(lib, *) is intended for host libraries on
+ // Windows. Therefore emit llvm.dependent-libraries only for host.
+ if (!ELFDependentLibraries.empty() && !Context.getLangOpts().CUDAIsDevice) {
+ auto *NMD = getModule().getOrInsertNamedMetadata("llvm.dependent-libraries");
+ for (auto *MD : ELFDependentLibraries)
+ NMD->addOperand(MD);
+ }
+
// Record mregparm value now so it is visible through rest of codegen.
if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
getModule().addModuleFlag(llvm::Module::Error, "NumRegisterParameters",
@@ -536,15 +561,16 @@ void CodeGenModule::Release() {
if (LangOpts.OpenCL) {
EmitOpenCLMetadata();
// Emit SPIR version.
- if (getTriple().getArch() == llvm::Triple::spir ||
- getTriple().getArch() == llvm::Triple::spir64) {
+ if (getTriple().isSPIR()) {
// SPIR v2.0 s2.12 - The SPIR version used by the module is stored in the
// opencl.spir.version named metadata.
+ // C++ is backwards compatible with OpenCL v2.0.
+ auto Version = LangOpts.OpenCLCPlusPlus ? 200 : LangOpts.OpenCLVersion;
llvm::Metadata *SPIRVerElts[] = {
llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- Int32Ty, LangOpts.OpenCLVersion / 100)),
+ Int32Ty, Version / 100)),
llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- Int32Ty, (LangOpts.OpenCLVersion / 100 > 1) ? 0 : 2))};
+ Int32Ty, (Version / 100 > 1) ? 0 : 2))};
llvm::NamedMDNode *SPIRVerMD =
TheModule.getOrInsertNamedMetadata("opencl.spir.version");
llvm::LLVMContext &Ctx = TheModule.getContext();
@@ -599,11 +625,14 @@ void CodeGenModule::Release() {
void CodeGenModule::EmitOpenCLMetadata() {
// SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the
// opencl.ocl.version named metadata node.
+ // C++ is backwards compatible with OpenCL v2.0.
+ // FIXME: We might need to add CXX version at some point too?
+ auto Version = LangOpts.OpenCLCPlusPlus ? 200 : LangOpts.OpenCLVersion;
llvm::Metadata *OCLVerElts[] = {
llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- Int32Ty, LangOpts.OpenCLVersion / 100)),
+ Int32Ty, Version / 100)),
llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- Int32Ty, (LangOpts.OpenCLVersion % 100) / 10))};
+ Int32Ty, (Version % 100) / 10))};
llvm::NamedMDNode *OCLVerMD =
TheModule.getOrInsertNamedMetadata("opencl.ocl.version");
llvm::LLVMContext &Ctx = TheModule.getContext();
@@ -731,9 +760,11 @@ void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
}
if (!D)
return;
- // Set visibility for definitions.
+ // Set visibility for definitions, and for declarations if requested globally
+ // or set explicitly.
LinkageInfo LV = D->getLinkageAndVisibility();
- if (LV.isVisibilityExplicit() || !GV->isDeclarationForLinker())
+ if (LV.isVisibilityExplicit() || getLangOpts().SetVisibilityForExternDecls ||
+ !GV->isDeclarationForLinker())
GV->setVisibility(GetLLVMVisibility(LV.getVisibility()));
}
@@ -758,6 +789,13 @@ static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
!GV->isThreadLocal())
return false;
}
+
+ // On COFF, don't mark 'extern_weak' symbols as DSO local. If these symbols
+ // remain unresolved in the link, they can be resolved to zero, which is
+ // outside the current DSO.
+ if (TT.isOSBinFormatCOFF() && GV->hasExternalWeakLinkage())
+ return false;
+
// Every other GV is local on COFF.
// Make an exception for windows OS in the triple: Some firmware builds use
// *-win32-macho triples. This (accidentally?) produced windows relocations
@@ -774,7 +812,7 @@ static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
const auto &CGOpts = CGM.getCodeGenOpts();
llvm::Reloc::Model RM = CGOpts.RelocationModel;
const auto &LOpts = CGM.getLangOpts();
- if (RM != llvm::Reloc::Static && !LOpts.PIE)
+ if (RM != llvm::Reloc::Static && !LOpts.PIE && !LOpts.OpenMPIsDevice)
return false;
// A definition cannot be preempted from an executable.
@@ -837,19 +875,20 @@ void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
GlobalDecl GD) const {
setDLLImportDLLExport(GV, GD);
- setGlobalVisibilityAndLocal(GV, dyn_cast<NamedDecl>(GD.getDecl()));
+ setGVPropertiesAux(GV, dyn_cast<NamedDecl>(GD.getDecl()));
}
void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
const NamedDecl *D) const {
setDLLImportDLLExport(GV, D);
- setGlobalVisibilityAndLocal(GV, D);
+ setGVPropertiesAux(GV, D);
}
-void CodeGenModule::setGlobalVisibilityAndLocal(llvm::GlobalValue *GV,
- const NamedDecl *D) const {
+void CodeGenModule::setGVPropertiesAux(llvm::GlobalValue *GV,
+ const NamedDecl *D) const {
setGlobalVisibility(GV, D);
setDSOLocal(GV);
+ GV->setPartition(CodeGenOpts.SymbolPartition);
}
static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) {
@@ -1047,8 +1086,15 @@ StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
// Keep the first result in the case of a mangling collision.
const auto *ND = cast<NamedDecl>(GD.getDecl());
- auto Result =
- Manglings.insert(std::make_pair(getMangledNameImpl(*this, GD, ND), GD));
+ std::string MangledName = getMangledNameImpl(*this, GD, ND);
+
+ // Adjust kernel stub mangling as we may need to be able to differentiate
+ // them from the kernel itself (e.g., for HIP).
+ if (auto *FD = dyn_cast<FunctionDecl>(GD.getDecl()))
+ if (!getLangOpts().CUDAIsDevice && FD->hasAttr<CUDAGlobalAttr>())
+ MangledName = getCUDARuntime().getDeviceStubName(MangledName);
+
+ auto Result = Manglings.insert(std::make_pair(MangledName, GD));
return MangledDeclNames[CanonicalGD] = Result.first->first();
}
@@ -1153,7 +1199,7 @@ CodeGenModule::getFunctionLinkage(GlobalDecl GD) {
return llvm::GlobalValue::InternalLinkage;
}
- return getLLVMLinkageForDeclarator(D, Linkage, /*isConstantVariable=*/false);
+ return getLLVMLinkageForDeclarator(D, Linkage, /*IsConstantVariable=*/false);
}
llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) {
@@ -1173,6 +1219,212 @@ void CodeGenModule::SetLLVMFunctionAttributes(GlobalDecl GD,
F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
}
+static void removeImageAccessQualifier(std::string& TyName) {
+ std::string ReadOnlyQual("__read_only");
+ std::string::size_type ReadOnlyPos = TyName.find(ReadOnlyQual);
+ if (ReadOnlyPos != std::string::npos)
+ // "+ 1" for the space after access qualifier.
+ TyName.erase(ReadOnlyPos, ReadOnlyQual.size() + 1);
+ else {
+ std::string WriteOnlyQual("__write_only");
+ std::string::size_type WriteOnlyPos = TyName.find(WriteOnlyQual);
+ if (WriteOnlyPos != std::string::npos)
+ TyName.erase(WriteOnlyPos, WriteOnlyQual.size() + 1);
+ else {
+ std::string ReadWriteQual("__read_write");
+ std::string::size_type ReadWritePos = TyName.find(ReadWriteQual);
+ if (ReadWritePos != std::string::npos)
+ TyName.erase(ReadWritePos, ReadWriteQual.size() + 1);
+ }
+ }
+}
+
+// Returns the address space id that should be produced to the
+// kernel_arg_addr_space metadata. This is always fixed to the ids
+// as specified in the SPIR 2.0 specification in order to differentiate
+// for example in clGetKernelArgInfo() implementation between the address
+// spaces with targets without unique mapping to the OpenCL address spaces
+// (basically all single AS CPUs).
+static unsigned ArgInfoAddressSpace(LangAS AS) {
+ switch (AS) {
+ case LangAS::opencl_global: return 1;
+ case LangAS::opencl_constant: return 2;
+ case LangAS::opencl_local: return 3;
+ case LangAS::opencl_generic: return 4; // Not in SPIR 2.0 specs.
+ default:
+ return 0; // Assume private.
+ }
+}
+
+void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
+ const FunctionDecl *FD,
+ CodeGenFunction *CGF) {
+ assert(((FD && CGF) || (!FD && !CGF)) &&
+ "Incorrect use - FD and CGF should either be both null or not!");
+ // Create MDNodes that represent the kernel arg metadata.
+ // Each MDNode is a list in the form of "key", N number of values which is
+ // the same number of values as their are kernel arguments.
+
+ const PrintingPolicy &Policy = Context.getPrintingPolicy();
+
+ // MDNode for the kernel argument address space qualifiers.
+ SmallVector<llvm::Metadata *, 8> addressQuals;
+
+ // MDNode for the kernel argument access qualifiers (images only).
+ SmallVector<llvm::Metadata *, 8> accessQuals;
+
+ // MDNode for the kernel argument type names.
+ SmallVector<llvm::Metadata *, 8> argTypeNames;
+
+ // MDNode for the kernel argument base type names.
+ SmallVector<llvm::Metadata *, 8> argBaseTypeNames;
+
+ // MDNode for the kernel argument type qualifiers.
+ SmallVector<llvm::Metadata *, 8> argTypeQuals;
+
+ // MDNode for the kernel argument names.
+ SmallVector<llvm::Metadata *, 8> argNames;
+
+ if (FD && CGF)
+ for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
+ const ParmVarDecl *parm = FD->getParamDecl(i);
+ QualType ty = parm->getType();
+ std::string typeQuals;
+
+ if (ty->isPointerType()) {
+ QualType pointeeTy = ty->getPointeeType();
+
+ // Get address qualifier.
+ addressQuals.push_back(
+ llvm::ConstantAsMetadata::get(CGF->Builder.getInt32(
+ ArgInfoAddressSpace(pointeeTy.getAddressSpace()))));
+
+ // Get argument type name.
+ std::string typeName =
+ pointeeTy.getUnqualifiedType().getAsString(Policy) + "*";
+
+ // Turn "unsigned type" to "utype"
+ std::string::size_type pos = typeName.find("unsigned");
+ if (pointeeTy.isCanonical() && pos != std::string::npos)
+ typeName.erase(pos + 1, 8);
+
+ argTypeNames.push_back(llvm::MDString::get(VMContext, typeName));
+
+ std::string baseTypeName =
+ pointeeTy.getUnqualifiedType().getCanonicalType().getAsString(
+ Policy) +
+ "*";
+
+ // Turn "unsigned type" to "utype"
+ pos = baseTypeName.find("unsigned");
+ if (pos != std::string::npos)
+ baseTypeName.erase(pos + 1, 8);
+
+ argBaseTypeNames.push_back(
+ llvm::MDString::get(VMContext, baseTypeName));
+
+ // Get argument type qualifiers:
+ if (ty.isRestrictQualified())
+ typeQuals = "restrict";
+ if (pointeeTy.isConstQualified() ||
+ (pointeeTy.getAddressSpace() == LangAS::opencl_constant))
+ typeQuals += typeQuals.empty() ? "const" : " const";
+ if (pointeeTy.isVolatileQualified())
+ typeQuals += typeQuals.empty() ? "volatile" : " volatile";
+ } else {
+ uint32_t AddrSpc = 0;
+ bool isPipe = ty->isPipeType();
+ if (ty->isImageType() || isPipe)
+ AddrSpc = ArgInfoAddressSpace(LangAS::opencl_global);
+
+ addressQuals.push_back(
+ llvm::ConstantAsMetadata::get(CGF->Builder.getInt32(AddrSpc)));
+
+ // Get argument type name.
+ std::string typeName;
+ if (isPipe)
+ typeName = ty.getCanonicalType()
+ ->getAs<PipeType>()
+ ->getElementType()
+ .getAsString(Policy);
+ else
+ typeName = ty.getUnqualifiedType().getAsString(Policy);
+
+ // Turn "unsigned type" to "utype"
+ std::string::size_type pos = typeName.find("unsigned");
+ if (ty.isCanonical() && pos != std::string::npos)
+ typeName.erase(pos + 1, 8);
+
+ std::string baseTypeName;
+ if (isPipe)
+ baseTypeName = ty.getCanonicalType()
+ ->getAs<PipeType>()
+ ->getElementType()
+ .getCanonicalType()
+ .getAsString(Policy);
+ else
+ baseTypeName =
+ ty.getUnqualifiedType().getCanonicalType().getAsString(Policy);
+
+ // Remove access qualifiers on images
+ // (as they are inseparable from type in clang implementation,
+ // but OpenCL spec provides a special query to get access qualifier
+ // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER):
+ if (ty->isImageType()) {
+ removeImageAccessQualifier(typeName);
+ removeImageAccessQualifier(baseTypeName);
+ }
+
+ argTypeNames.push_back(llvm::MDString::get(VMContext, typeName));
+
+ // Turn "unsigned type" to "utype"
+ pos = baseTypeName.find("unsigned");
+ if (pos != std::string::npos)
+ baseTypeName.erase(pos + 1, 8);
+
+ argBaseTypeNames.push_back(
+ llvm::MDString::get(VMContext, baseTypeName));
+
+ if (isPipe)
+ typeQuals = "pipe";
+ }
+
+ argTypeQuals.push_back(llvm::MDString::get(VMContext, typeQuals));
+
+ // Get image and pipe access qualifier:
+ if (ty->isImageType() || ty->isPipeType()) {
+ const Decl *PDecl = parm;
+ if (auto *TD = dyn_cast<TypedefType>(ty))
+ PDecl = TD->getDecl();
+ const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>();
+ if (A && A->isWriteOnly())
+ accessQuals.push_back(llvm::MDString::get(VMContext, "write_only"));
+ else if (A && A->isReadWrite())
+ accessQuals.push_back(llvm::MDString::get(VMContext, "read_write"));
+ else
+ accessQuals.push_back(llvm::MDString::get(VMContext, "read_only"));
+ } else
+ accessQuals.push_back(llvm::MDString::get(VMContext, "none"));
+
+ // Get argument name.
+ argNames.push_back(llvm::MDString::get(VMContext, parm->getName()));
+ }
+
+ Fn->setMetadata("kernel_arg_addr_space",
+ llvm::MDNode::get(VMContext, addressQuals));
+ Fn->setMetadata("kernel_arg_access_qual",
+ llvm::MDNode::get(VMContext, accessQuals));
+ Fn->setMetadata("kernel_arg_type",
+ llvm::MDNode::get(VMContext, argTypeNames));
+ Fn->setMetadata("kernel_arg_base_type",
+ llvm::MDNode::get(VMContext, argBaseTypeNames));
+ Fn->setMetadata("kernel_arg_type_qual",
+ llvm::MDNode::get(VMContext, argTypeQuals));
+ if (getCodeGenOpts().EmitOpenCLArgMetadata)
+ Fn->setMetadata("kernel_arg_name",
+ llvm::MDNode::get(VMContext, argNames));
+}
+
/// Determines whether the language options require us to model
/// unwind exceptions. We treat -fexceptions as mandating this
/// except under the fragile ObjC ABI with only ObjC exceptions
@@ -1544,12 +1796,8 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
const auto *FD = cast<FunctionDecl>(GD.getDecl());
- if (!IsIncompleteFunction) {
+ if (!IsIncompleteFunction)
SetLLVMFunctionAttributes(GD, getTypes().arrangeGlobalDeclaration(GD), F);
- // Setup target-specific attributes.
- if (F->isDeclaration())
- getTargetCodeGenInfo().setTargetAttributes(FD, F, *this);
- }
// Add the Returned attribute for "this", except for iOS 5 and earlier
// where substantial code, including the libstdc++ dylib, was compiled with
@@ -1569,6 +1817,10 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
setLinkageForGV(F, FD);
setGVProperties(F, FD);
+ // Setup target-specific attributes.
+ if (!IsIncompleteFunction && F->isDeclaration())
+ getTargetCodeGenInfo().setTargetAttributes(FD, F, *this);
+
if (const auto *CSA = FD->getAttr<CodeSegAttr>())
F->setSection(CSA->getName());
else if (const auto *SA = FD->getAttr<SectionAttr>())
@@ -1603,6 +1855,23 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
if (getLangOpts().OpenMP && FD->hasAttr<OMPDeclareSimdDeclAttr>())
getOpenMPRuntime().emitDeclareSimdFunction(FD, F);
+
+ if (const auto *CB = FD->getAttr<CallbackAttr>()) {
+ // Annotate the callback behavior as metadata:
+ // - The callback callee (as argument number).
+ // - The callback payloads (as argument numbers).
+ llvm::LLVMContext &Ctx = F->getContext();
+ llvm::MDBuilder MDB(Ctx);
+
+ // The payload indices are all but the first one in the encoding. The first
+ // identifies the callback callee.
+ int CalleeIdx = *CB->encoding_begin();
+ ArrayRef<int> PayloadIndices(CB->encoding_begin() + 1, CB->encoding_end());
+ F->addMetadata(llvm::LLVMContext::MD_callback,
+ *llvm::MDNode::get(Ctx, {MDB.createCallbackEncoding(
+ CalleeIdx, PayloadIndices,
+ /* VarArgsArePassed */ false)}));
+ }
}
void CodeGenModule::addUsedGlobal(llvm::GlobalValue *GV) {
@@ -1660,17 +1929,18 @@ void CodeGenModule::AddDetectMismatch(StringRef Name, StringRef Value) {
LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
}
-void CodeGenModule::AddELFLibDirective(StringRef Lib) {
+void CodeGenModule::AddDependentLib(StringRef Lib) {
auto &C = getLLVMContext();
- LinkerOptionsMetadata.push_back(llvm::MDNode::get(
- C, {llvm::MDString::get(C, "lib"), llvm::MDString::get(C, Lib)}));
-}
+ if (getTarget().getTriple().isOSBinFormatELF()) {
+ ELFDependentLibraries.push_back(
+ llvm::MDNode::get(C, llvm::MDString::get(C, Lib)));
+ return;
+ }
-void CodeGenModule::AddDependentLib(StringRef Lib) {
llvm::SmallString<24> Opt;
getTargetCodeGenInfo().getDependentLibraryOption(Lib, Opt);
auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
- LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
+ LinkerOptionsMetadata.push_back(llvm::MDNode::get(C, MDOpts));
}
/// Add link options implied by the given module, including modules
@@ -1693,7 +1963,6 @@ static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod,
// described by this module.
llvm::LLVMContext &Context = CGM.getLLVMContext();
bool IsELF = CGM.getTarget().getTriple().isOSBinFormatELF();
- bool IsPS4 = CGM.getTarget().getTriple().isPS4();
// For modules that use export_as for linking, use that module
// name instead.
@@ -1713,7 +1982,7 @@ static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod,
}
// Link against a library.
- if (IsELF && !IsPS4) {
+ if (IsELF) {
llvm::Metadata *Args[2] = {
llvm::MDString::get(Context, "lib"),
llvm::MDString::get(Context, Mod->LinkLibraries[I - 1].Library),
@@ -1970,9 +2239,11 @@ bool CodeGenModule::isInSanitizerBlacklist(llvm::GlobalVariable *GV,
SourceLocation Loc, QualType Ty,
StringRef Category) const {
// For now globals can be blacklisted only in ASan and KASan.
- const SanitizerMask EnabledAsanMask = LangOpts.Sanitize.Mask &
+ const SanitizerMask EnabledAsanMask =
+ LangOpts.Sanitize.Mask &
(SanitizerKind::Address | SanitizerKind::KernelAddress |
- SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress);
+ SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress |
+ SanitizerKind::MemTag);
if (!EnabledAsanMask)
return false;
const auto &SanitizerBL = getContext().getSanitizerBlacklist();
@@ -2146,7 +2417,8 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
if (!Global->hasAttr<CUDADeviceAttr>() &&
!Global->hasAttr<CUDAGlobalAttr>() &&
!Global->hasAttr<CUDAConstantAttr>() &&
- !Global->hasAttr<CUDASharedAttr>())
+ !Global->hasAttr<CUDASharedAttr>() &&
+ !(LangOpts.HIP && Global->hasAttr<HIPPinnedShadowAttr>()))
return;
} else {
// We need to emit host-side 'shadows' for all global
@@ -2173,6 +2445,10 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
if (MustBeEmitted(Global))
EmitOMPDeclareReduction(DRD);
return;
+ } else if (auto *DMD = dyn_cast<OMPDeclareMapperDecl>(Global)) {
+ if (MustBeEmitted(Global))
+ EmitOMPDeclareMapper(DMD);
+ return;
}
}
@@ -2202,13 +2478,19 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
// Emit declaration of the must-be-emitted declare target variable.
if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
- if (*Res == OMPDeclareTargetDeclAttr::MT_To) {
+ bool UnifiedMemoryEnabled =
+ getOpenMPRuntime().hasRequiresUnifiedSharedMemory();
+ if (*Res == OMPDeclareTargetDeclAttr::MT_To &&
+ !UnifiedMemoryEnabled) {
(void)GetAddrOfGlobalVar(VD);
} else {
- assert(*Res == OMPDeclareTargetDeclAttr::MT_Link &&
- "link claue expected.");
- (void)getOpenMPRuntime().getAddrOfDeclareTargetLink(VD);
+ assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
+ (*Res == OMPDeclareTargetDeclAttr::MT_To &&
+ UnifiedMemoryEnabled)) &&
+ "Link clause or to clause with unified memory expected.");
+ (void)getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
}
+
return;
}
}
@@ -2265,35 +2547,36 @@ static bool HasNonDllImportDtor(QualType T) {
}
namespace {
- struct FunctionIsDirectlyRecursive :
- public RecursiveASTVisitor<FunctionIsDirectlyRecursive> {
+ struct FunctionIsDirectlyRecursive
+ : public ConstStmtVisitor<FunctionIsDirectlyRecursive, bool> {
const StringRef Name;
const Builtin::Context &BI;
- bool Result;
- FunctionIsDirectlyRecursive(StringRef N, const Builtin::Context &C) :
- Name(N), BI(C), Result(false) {
- }
- typedef RecursiveASTVisitor<FunctionIsDirectlyRecursive> Base;
+ FunctionIsDirectlyRecursive(StringRef N, const Builtin::Context &C)
+ : Name(N), BI(C) {}
- bool TraverseCallExpr(CallExpr *E) {
+ bool VisitCallExpr(const CallExpr *E) {
const FunctionDecl *FD = E->getDirectCallee();
if (!FD)
- return true;
- AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
- if (Attr && Name == Attr->getLabel()) {
- Result = true;
return false;
- }
+ AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
+ if (Attr && Name == Attr->getLabel())
+ return true;
unsigned BuiltinID = FD->getBuiltinID();
if (!BuiltinID || !BI.isLibFunction(BuiltinID))
- return true;
+ return false;
StringRef BuiltinName = BI.getName(BuiltinID);
if (BuiltinName.startswith("__builtin_") &&
Name == BuiltinName.slice(strlen("__builtin_"), StringRef::npos)) {
- Result = true;
- return false;
+ return true;
}
- return true;
+ return false;
+ }
+
+ bool VisitStmt(const Stmt *S) {
+ for (const Stmt *Child : S->children())
+ if (Child && this->Visit(Child))
+ return true;
+ return false;
}
};
@@ -2378,8 +2661,8 @@ CodeGenModule::isTriviallyRecursive(const FunctionDecl *FD) {
}
FunctionIsDirectlyRecursive Walker(Name, Context.BuiltinInfo);
- Walker.TraverseFunctionDecl(const_cast<FunctionDecl*>(FD));
- return Walker.Result;
+ const Stmt *Body = FD->getBody();
+ return Body ? Walker.Visit(Body) : false;
}
bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
@@ -2447,13 +2730,19 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
if (!shouldEmitFunction(GD))
return;
+ llvm::TimeTraceScope TimeScope("CodeGen Function", [&]() {
+ std::string Name;
+ llvm::raw_string_ostream OS(Name);
+ FD->getNameForDiagnostic(OS, getContext().getPrintingPolicy(),
+ /*Qualified=*/true);
+ return Name;
+ });
+
if (const auto *Method = dyn_cast<CXXMethodDecl>(D)) {
// Make sure to emit the definition(s) before we emit the thunks.
// This is necessary for the generation of certain thunks.
- if (const auto *CD = dyn_cast<CXXConstructorDecl>(Method))
- ABI->emitCXXStructor(CD, getFromCtorType(GD.getCtorType()));
- else if (const auto *DD = dyn_cast<CXXDestructorDecl>(Method))
- ABI->emitCXXStructor(DD, getFromDtorType(GD.getDtorType()));
+ if (isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method))
+ ABI->emitCXXStructor(GD);
else if (FD->isMultiVersion())
EmitMultiVersionFunctionDefinition(GD, GV);
else
@@ -2537,10 +2826,9 @@ void CodeGenModule::emitMultiVersionFunctions() {
ResolverFunc->setComdat(
getModule().getOrInsertComdat(ResolverFunc->getName()));
- std::stable_sort(
- Options.begin(), Options.end(),
- [&TI](const CodeGenFunction::MultiVersionResolverOption &LHS,
- const CodeGenFunction::MultiVersionResolverOption &RHS) {
+ llvm::stable_sort(
+ Options, [&TI](const CodeGenFunction::MultiVersionResolverOption &LHS,
+ const CodeGenFunction::MultiVersionResolverOption &RHS) {
return TargetMVPriority(TI, LHS) > TargetMVPriority(TI, RHS);
});
CodeGenFunction CGF(*this);
@@ -2553,8 +2841,7 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
assert(FD && "Not a FunctionDecl?");
const auto *DD = FD->getAttr<CPUDispatchAttr>();
assert(DD && "Not a cpu_dispatch Function?");
- QualType CanonTy = Context.getCanonicalType(FD->getType());
- llvm::Type *DeclTy = getTypes().ConvertFunctionType(CanonTy, FD);
+ llvm::Type *DeclTy = getTypes().ConvertType(FD->getType());
if (const auto *CXXFD = dyn_cast<CXXMethodDecl>(FD)) {
const CGFunctionInfo &FInfo = getTypes().arrangeCXXMethodDeclaration(CXXFD);
@@ -2893,8 +3180,7 @@ llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
// If there was no specific requested type, just convert it now.
if (!Ty) {
const auto *FD = cast<FunctionDecl>(GD.getDecl());
- auto CanonTy = Context.getCanonicalType(FD->getType());
- Ty = getTypes().ConvertFunctionType(CanonTy, FD);
+ Ty = getTypes().ConvertType(FD->getType());
}
// Devirtualized destructor calls may come through here instead of via
@@ -2953,7 +3239,7 @@ GetRuntimeFunctionDecl(ASTContext &C, StringRef Name) {
/// CreateRuntimeFunction - Create a new runtime function with the specified
/// type and name.
-llvm::Constant *
+llvm::FunctionCallee
CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name,
llvm::AttributeList ExtraAttrs,
bool Local) {
@@ -2966,9 +3252,13 @@ CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name,
if (F->empty()) {
F->setCallingConv(getRuntimeCC());
- if (!Local && getTriple().isOSBinFormatCOFF() &&
- !getCodeGenOpts().LTOVisibilityPublicStd &&
- !getTriple().isWindowsGNUEnvironment()) {
+ // In Windows Itanium environments, try to mark runtime functions
+ // dllimport. For Mingw and MSVC, don't. We don't really know if the user
+ // will link their standard library statically or dynamically. Marking
+ // functions imported when they are not imported can cause linker errors
+ // and warnings.
+ if (!Local && getTriple().isWindowsItaniumEnvironment() &&
+ !getCodeGenOpts().LTOVisibilityPublicStd) {
const FunctionDecl *FD = GetRuntimeFunctionDecl(Context, Name);
if (!FD || FD->hasAttr<DLLImportAttr>()) {
F->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
@@ -2979,15 +3269,7 @@ CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name,
}
}
- return C;
-}
-
-/// CreateBuiltinFunction - Create a new builtin function with the specified
-/// type and name.
-llvm::Constant *
-CodeGenModule::CreateBuiltinFunction(llvm::FunctionType *FTy, StringRef Name,
- llvm::AttributeList ExtraAttrs) {
- return CreateRuntimeFunction(FTy, Name, ExtraAttrs, true);
+ return {FTy, C};
}
/// isTypeConstant - Determine whether an object of this type can be emitted
@@ -3199,6 +3481,9 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
return getTargetCodeGenInfo().performAddrSpaceCast(*this, GV, AddrSpace,
ExpectedAS, Ty);
+ if (GV->isDeclaration())
+ getTargetCodeGenInfo().setTargetAttributes(D, GV, *this);
+
return GV;
}
@@ -3206,15 +3491,8 @@ llvm::Constant *
CodeGenModule::GetAddrOfGlobal(GlobalDecl GD,
ForDefinition_t IsForDefinition) {
const Decl *D = GD.getDecl();
- if (isa<CXXConstructorDecl>(D))
- return getAddrOfCXXStructor(cast<CXXConstructorDecl>(D),
- getFromCtorType(GD.getCtorType()),
- /*FnInfo=*/nullptr, /*FnType=*/nullptr,
- /*DontDefer=*/false, IsForDefinition);
- else if (isa<CXXDestructorDecl>(D))
- return getAddrOfCXXStructor(cast<CXXDestructorDecl>(D),
- getFromDtorType(GD.getDtorType()),
- /*FnInfo=*/nullptr, /*FnType=*/nullptr,
+ if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
+ return getAddrOfCXXStructor(GD, /*FnInfo=*/nullptr, /*FnType=*/nullptr,
/*DontDefer=*/false, IsForDefinition);
else if (isa<CXXMethodDecl>(D)) {
auto FInfo = &getTypes().arrangeCXXMethodDeclaration(
@@ -3301,8 +3579,12 @@ llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
llvm::Constant *
CodeGenModule::CreateRuntimeVariable(llvm::Type *Ty,
StringRef Name) {
- auto *Ret =
- GetOrCreateLLVMGlobal(Name, llvm::PointerType::getUnqual(Ty), nullptr);
+ auto PtrTy =
+ getContext().getLangOpts().OpenCL
+ ? llvm::PointerType::get(
+ Ty, getContext().getTargetAddressSpace(LangAS::opencl_global))
+ : llvm::PointerType::getUnqual(Ty);
+ auto *Ret = GetOrCreateLLVMGlobal(Name, PtrTy, nullptr);
setDSOLocal(cast<llvm::GlobalValue>(Ret->stripPointerCasts()));
return Ret;
}
@@ -3359,6 +3641,11 @@ LangAS CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D) {
return LangAS::cuda_device;
}
+ if (LangOpts.OpenMP) {
+ LangAS AS;
+ if (OpenMPRuntime->hasAllocateAttributeForGlobalVar(D, AS))
+ return AS;
+ }
return getTargetCodeGenInfo().getGlobalVarAddressSpace(*this, D);
}
@@ -3432,6 +3719,11 @@ static bool shouldBeInCOMDAT(CodeGenModule &CGM, const Decl &D) {
if (!CGM.supportsCOMDAT())
return false;
+ // Do not set COMDAT attribute for CUDA/HIP stub functions to prevent
+ // them being "merged" by the COMDAT Folding linker optimization.
+ if (D.hasAttr<CUDAGlobalAttr>())
+ return false;
+
if (D.hasAttr<SelectAnyAttr>())
return true;
@@ -3496,7 +3788,12 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
!getLangOpts().CUDAIsDevice &&
(D->hasAttr<CUDAConstantAttr>() || D->hasAttr<CUDADeviceAttr>() ||
D->hasAttr<CUDASharedAttr>());
- if (getLangOpts().CUDA && (IsCUDASharedVar || IsCUDAShadowVar))
+ // HIP pinned shadow of initialized host-side global variables are also
+ // left undefined.
+ bool IsHIPPinnedShadowVar =
+ getLangOpts().CUDAIsDevice && D->hasAttr<HIPPinnedShadowAttr>();
+ if (getLangOpts().CUDA &&
+ (IsCUDASharedVar || IsCUDAShadowVar || IsHIPPinnedShadowVar))
Init = llvm::UndefValue::get(getTypes().ConvertType(ASTTy));
else if (!InitExpr) {
// This is a tentative definition; tentative definitions are
@@ -3599,14 +3896,16 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// / cudaMemcpyToSymbol() / cudaMemcpyFromSymbol())."
if (GV && LangOpts.CUDA) {
if (LangOpts.CUDAIsDevice) {
- if (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>())
+ if (Linkage != llvm::GlobalValue::InternalLinkage &&
+ (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>()))
GV->setExternallyInitialized(true);
} else {
// Host-side shadows of external declarations of device-side
// global variables become internal definitions. These have to
// be internal in order to prevent name conflicts with global
// host variables with the same name in a different TUs.
- if (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>()) {
+ if (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
+ D->hasAttr<HIPPinnedShadowAttr>()) {
Linkage = llvm::GlobalValue::InternalLinkage;
// Shadow variables and their properties must be registered
@@ -3619,7 +3918,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// Extern global variables will be registered in the TU where they are
// defined.
if (!D->hasExternalStorage())
- getCUDARuntime().registerDeviceVar(*GV, Flags);
+ getCUDARuntime().registerDeviceVar(D, *GV, Flags);
} else if (D->hasAttr<CUDASharedAttr>())
// __shared__ variables are odd. Shadows do get created, but
// they are not registered with the CUDA runtime, so they
@@ -3630,7 +3929,8 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
}
}
- GV->setInitializer(Init);
+ if (!IsHIPPinnedShadowVar)
+ GV->setInitializer(Init);
if (emitter) emitter->finalize(GV);
// If it is safe to mark the global 'constant', do so now.
@@ -3762,13 +4062,15 @@ static bool isVarDeclStrongDefinition(const ASTContext &Context,
}
}
- // Microsoft's link.exe doesn't support alignments greater than 32 for common
- // symbols, so symbols with greater alignment requirements cannot be common.
+ // Microsoft's link.exe doesn't support alignments greater than 32 bytes for
+ // common symbols, so symbols with greater alignment requirements cannot be
+ // common.
// Other COFF linkers (ld.bfd and LLD) support arbitrary power-of-two
// alignments for common symbols via the aligncomm directive, so this
// restriction only applies to MSVC environments.
if (Context.getTargetInfo().getTriple().isKnownWindowsMSVCEnvironment() &&
- Context.getTypeAlignIfKnown(D->getType()) > 32)
+ Context.getTypeAlignIfKnown(D->getType()) >
+ Context.toBits(CharUnits::fromQuantity(32)))
return true;
return false;
@@ -3877,9 +4179,10 @@ static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
}
// Recognize calls to the function.
- llvm::CallSite callSite(user);
+ llvm::CallBase *callSite = dyn_cast<llvm::CallBase>(user);
if (!callSite) continue;
- if (!callSite.isCallee(&*use)) continue;
+ if (!callSite->isCallee(&*use))
+ continue;
// If the return types don't match exactly, then we can't
// transform this call unless it's dead.
@@ -3888,18 +4191,19 @@ static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
// Get the call site's attribute list.
SmallVector<llvm::AttributeSet, 8> newArgAttrs;
- llvm::AttributeList oldAttrs = callSite.getAttributes();
+ llvm::AttributeList oldAttrs = callSite->getAttributes();
// If the function was passed too few arguments, don't transform.
unsigned newNumArgs = newFn->arg_size();
- if (callSite.arg_size() < newNumArgs) continue;
+ if (callSite->arg_size() < newNumArgs)
+ continue;
// If extra arguments were passed, we silently drop them.
// If any of the types mismatch, we don't transform.
unsigned argNo = 0;
bool dontTransform = false;
for (llvm::Argument &A : newFn->args()) {
- if (callSite.getArgument(argNo)->getType() != A.getType()) {
+ if (callSite->getArgOperand(argNo)->getType() != A.getType()) {
dontTransform = true;
break;
}
@@ -3913,35 +4217,33 @@ static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
// Okay, we can transform this. Create the new call instruction and copy
// over the required information.
- newArgs.append(callSite.arg_begin(), callSite.arg_begin() + argNo);
+ newArgs.append(callSite->arg_begin(), callSite->arg_begin() + argNo);
// Copy over any operand bundles.
- callSite.getOperandBundlesAsDefs(newBundles);
+ callSite->getOperandBundlesAsDefs(newBundles);
- llvm::CallSite newCall;
- if (callSite.isCall()) {
- newCall = llvm::CallInst::Create(newFn, newArgs, newBundles, "",
- callSite.getInstruction());
+ llvm::CallBase *newCall;
+ if (dyn_cast<llvm::CallInst>(callSite)) {
+ newCall =
+ llvm::CallInst::Create(newFn, newArgs, newBundles, "", callSite);
} else {
- auto *oldInvoke = cast<llvm::InvokeInst>(callSite.getInstruction());
- newCall = llvm::InvokeInst::Create(newFn,
- oldInvoke->getNormalDest(),
- oldInvoke->getUnwindDest(),
- newArgs, newBundles, "",
- callSite.getInstruction());
+ auto *oldInvoke = cast<llvm::InvokeInst>(callSite);
+ newCall = llvm::InvokeInst::Create(newFn, oldInvoke->getNormalDest(),
+ oldInvoke->getUnwindDest(), newArgs,
+ newBundles, "", callSite);
}
newArgs.clear(); // for the next iteration
if (!newCall->getType()->isVoidTy())
- newCall->takeName(callSite.getInstruction());
- newCall.setAttributes(llvm::AttributeList::get(
+ newCall->takeName(callSite);
+ newCall->setAttributes(llvm::AttributeList::get(
newFn->getContext(), oldAttrs.getFnAttributes(),
oldAttrs.getRetAttributes(), newArgAttrs));
- newCall.setCallingConv(callSite.getCallingConv());
+ newCall->setCallingConv(callSite->getCallingConv());
// Finally, remove the old call, replacing any uses with the new one.
if (!callSite->use_empty())
- callSite->replaceAllUsesWith(newCall.getInstruction());
+ callSite->replaceAllUsesWith(newCall);
// Copy debug location attached to CI.
if (callSite->getDebugLoc())
@@ -4373,9 +4675,12 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
GV = Fields.finishAndCreateGlobal("_unnamed_cfstring_", Alignment,
/*isConstant=*/false,
llvm::GlobalVariable::PrivateLinkage);
+ GV->addAttribute("objc_arc_inert");
switch (Triple.getObjectFormat()) {
case llvm::Triple::UnknownObjectFormat:
llvm_unreachable("unknown file format");
+ case llvm::Triple::XCOFF:
+ llvm_unreachable("XCOFF is not yet implemented");
case llvm::Triple::COFF:
case llvm::Triple::ELF:
case llvm::Triple::Wasm:
@@ -4504,7 +4809,8 @@ CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
if (auto GV = *Entry) {
if (Alignment.getQuantity() > GV->getAlignment())
GV->setAlignment(Alignment.getQuantity());
- return ConstantAddress(GV, Alignment);
+ return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
+ Alignment);
}
}
@@ -4566,7 +4872,8 @@ ConstantAddress CodeGenModule::GetAddrOfConstantCString(
if (auto GV = *Entry) {
if (Alignment.getQuantity() > GV->getAlignment())
GV->setAlignment(Alignment.getQuantity());
- return ConstantAddress(GV, Alignment);
+ return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
+ Alignment);
}
}
@@ -4615,7 +4922,7 @@ ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
// evaluating the initializer if the surrounding constant expression
// modifies the temporary.
Value = getContext().getMaterializedTemporaryValue(E, false);
- if (Value && Value->isUninit())
+ if (Value && Value->isAbsent())
Value = nullptr;
}
@@ -4861,6 +5168,7 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::UsingShadow:
case Decl::ClassTemplate:
case Decl::VarTemplate:
+ case Decl::Concept:
case Decl::VarTemplatePartialSpecialization:
case Decl::FunctionTemplate:
case Decl::TypeAliasTemplate:
@@ -4943,10 +5251,6 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
AppendLinkerOptions(PCD->getArg());
break;
case PCK_Lib:
- if (getTarget().getTriple().isOSBinFormatELF() &&
- !getTarget().getTriple().isPS4())
- AddELFLibDirective(PCD->getArg());
- else
AddDependentLib(PCD->getArg());
break;
case PCK_Compiler:
@@ -5030,10 +5334,17 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
EmitOMPThreadPrivateDecl(cast<OMPThreadPrivateDecl>(D));
break;
+ case Decl::OMPAllocate:
+ break;
+
case Decl::OMPDeclareReduction:
EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(D));
break;
+ case Decl::OMPDeclareMapper:
+ EmitOMPDeclareMapper(cast<OMPDeclareMapperDecl>(D));
+ break;
+
case Decl::OMPRequires:
EmitOMPRequiresDecl(cast<OMPRequiresDecl>(D));
break;