aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/clang/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/clang/lib/CodeGen')
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Address.h81
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp29
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp335
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h69
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp273
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp45
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h13
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp51
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCall.h11
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h1
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp24
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h3
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp37
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp122
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp29
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h4
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp132
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp74
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp46
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h31
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp36
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h17
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.h1
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h5
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp60
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h1
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp94
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp46
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h1
45 files changed, 997 insertions, 785 deletions
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Address.h b/contrib/llvm-project/clang/lib/CodeGen/Address.h
index 37c20291c0e8..3ac0f4f0d7e5 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Address.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/Address.h
@@ -14,30 +14,77 @@
#ifndef LLVM_CLANG_LIB_CODEGEN_ADDRESS_H
#define LLVM_CLANG_LIB_CODEGEN_ADDRESS_H
-#include "llvm/IR/Constants.h"
#include "clang/AST/CharUnits.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/Support/MathExtras.h"
namespace clang {
namespace CodeGen {
-/// An aligned address.
-class Address {
+// We try to save some space by using 6 bits over two PointerIntPairs to store
+// the alignment. However, some arches don't support 3 bits in a PointerIntPair
+// so we fallback to storing the alignment separately.
+template <typename T, bool = alignof(llvm::Value *) >= 8> class AddressImpl {};
+
+template <typename T> class AddressImpl<T, false> {
llvm::Value *Pointer;
llvm::Type *ElementType;
CharUnits Alignment;
+public:
+ AddressImpl(llvm::Value *Pointer, llvm::Type *ElementType,
+ CharUnits Alignment)
+ : Pointer(Pointer), ElementType(ElementType), Alignment(Alignment) {}
+ llvm::Value *getPointer() const { return Pointer; }
+ llvm::Type *getElementType() const { return ElementType; }
+ CharUnits getAlignment() const { return Alignment; }
+};
+
+template <typename T> class AddressImpl<T, true> {
+ // Int portion stores upper 3 bits of the log of the alignment.
+ llvm::PointerIntPair<llvm::Value *, 3, unsigned> Pointer;
+ // Int portion stores lower 3 bits of the log of the alignment.
+ llvm::PointerIntPair<llvm::Type *, 3, unsigned> ElementType;
+
+public:
+ AddressImpl(llvm::Value *Pointer, llvm::Type *ElementType,
+ CharUnits Alignment)
+ : Pointer(Pointer), ElementType(ElementType) {
+ if (Alignment.isZero())
+ return;
+ // Currently the max supported alignment is much less than 1 << 63 and is
+ // guaranteed to be a power of 2, so we can store the log of the alignment
+ // into 6 bits.
+ assert(Alignment.isPowerOfTwo() && "Alignment cannot be zero");
+ auto AlignLog = llvm::Log2_64(Alignment.getQuantity());
+ assert(AlignLog < (1 << 6) && "cannot fit alignment into 6 bits");
+ this->Pointer.setInt(AlignLog >> 3);
+ this->ElementType.setInt(AlignLog & 7);
+ }
+ llvm::Value *getPointer() const { return Pointer.getPointer(); }
+ llvm::Type *getElementType() const { return ElementType.getPointer(); }
+ CharUnits getAlignment() const {
+ unsigned AlignLog = (Pointer.getInt() << 3) | ElementType.getInt();
+ return CharUnits::fromQuantity(CharUnits::QuantityType(1) << AlignLog);
+ }
+};
+
+/// An aligned address.
+class Address {
+ AddressImpl<void> A;
+
protected:
- Address(std::nullptr_t) : Pointer(nullptr), ElementType(nullptr) {}
+ Address(std::nullptr_t) : A(nullptr, nullptr, CharUnits::Zero()) {}
public:
- Address(llvm::Value *pointer, llvm::Type *elementType, CharUnits alignment)
- : Pointer(pointer), ElementType(elementType), Alignment(alignment) {
- assert(pointer != nullptr && "Pointer cannot be null");
- assert(elementType != nullptr && "Element type cannot be null");
- assert(llvm::cast<llvm::PointerType>(pointer->getType())
- ->isOpaqueOrPointeeTypeMatches(elementType) &&
+ Address(llvm::Value *Pointer, llvm::Type *ElementType, CharUnits Alignment)
+ : A(Pointer, ElementType, Alignment) {
+ assert(Pointer != nullptr && "Pointer cannot be null");
+ assert(ElementType != nullptr && "Element type cannot be null");
+ assert(llvm::cast<llvm::PointerType>(Pointer->getType())
+ ->isOpaqueOrPointeeTypeMatches(ElementType) &&
"Incorrect pointer element type");
- assert(!alignment.isZero() && "Alignment cannot be zero");
}
// Deprecated: Use constructor with explicit element type instead.
@@ -46,11 +93,11 @@ public:
Alignment) {}
static Address invalid() { return Address(nullptr); }
- bool isValid() const { return Pointer != nullptr; }
+ bool isValid() const { return A.getPointer() != nullptr; }
llvm::Value *getPointer() const {
assert(isValid());
- return Pointer;
+ return A.getPointer();
}
/// Return the type of the pointer value.
@@ -61,7 +108,7 @@ public:
/// Return the type of the values stored in this address.
llvm::Type *getElementType() const {
assert(isValid());
- return ElementType;
+ return A.getElementType();
}
/// Return the address space that this address resides in.
@@ -77,19 +124,19 @@ public:
/// Return the alignment of this pointer.
CharUnits getAlignment() const {
assert(isValid());
- return Alignment;
+ return A.getAlignment();
}
/// Return address with different pointer, but same element type and
/// alignment.
Address withPointer(llvm::Value *NewPointer) const {
- return Address(NewPointer, ElementType, Alignment);
+ return Address(NewPointer, getElementType(), getAlignment());
}
/// Return address with different alignment, but same pointer and element
/// type.
Address withAlignment(CharUnits NewAlignment) const {
- return Address(Pointer, ElementType, NewAlignment);
+ return Address(getPointer(), getElementType(), NewAlignment);
}
};
diff --git a/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp b/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
index bacac0a20d4d..9ae5c870afc8 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
@@ -197,8 +197,7 @@ public:
PassManagerBuilderWrapper(const Triple &TargetTriple,
const CodeGenOptions &CGOpts,
const LangOptions &LangOpts)
- : PassManagerBuilder(), TargetTriple(TargetTriple), CGOpts(CGOpts),
- LangOpts(LangOpts) {}
+ : TargetTriple(TargetTriple), CGOpts(CGOpts), LangOpts(LangOpts) {}
const Triple &getTargetTriple() const { return TargetTriple; }
const CodeGenOptions &getCGOpts() const { return CGOpts; }
const LangOptions &getLangOpts() const { return LangOpts; }
@@ -359,7 +358,8 @@ static void addGeneralOptsForMemorySanitizer(const PassManagerBuilder &Builder,
int TrackOrigins = CGOpts.SanitizeMemoryTrackOrigins;
bool Recover = CGOpts.SanitizeRecover.has(SanitizerKind::Memory);
PM.add(createMemorySanitizerLegacyPassPass(
- MemorySanitizerOptions{TrackOrigins, Recover, CompileKernel}));
+ MemorySanitizerOptions{TrackOrigins, Recover, CompileKernel,
+ CGOpts.SanitizeMemoryParamRetval != 0}));
// MemorySanitizer inserts complex instrumentation that mostly follows
// the logic of the original code, but operates on "shadow" values.
@@ -645,6 +645,7 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
Options.MCOptions.CommandLineArgs = CodeGenOpts.CommandLineArgs;
Options.DebugStrictDwarf = CodeGenOpts.DebugStrictDwarf;
Options.ObjectFilenameForDebug = CodeGenOpts.ObjectFilenameForDebug;
+ Options.Hotpatch = CodeGenOpts.HotPatch;
return true;
}
@@ -1164,11 +1165,11 @@ static void addSanitizers(const Triple &TargetTriple,
int TrackOrigins = CodeGenOpts.SanitizeMemoryTrackOrigins;
bool Recover = CodeGenOpts.SanitizeRecover.has(Mask);
- MPM.addPass(
- ModuleMemorySanitizerPass({TrackOrigins, Recover, CompileKernel}));
+ MemorySanitizerOptions options(TrackOrigins, Recover, CompileKernel,
+ CodeGenOpts.SanitizeMemoryParamRetval);
+ MPM.addPass(ModuleMemorySanitizerPass(options));
FunctionPassManager FPM;
- FPM.addPass(
- MemorySanitizerPass({TrackOrigins, Recover, CompileKernel}));
+ FPM.addPass(MemorySanitizerPass(options));
if (Level != OptimizationLevel::O0) {
// MemorySanitizer inserts complex instrumentation that mostly
// follows the logic of the original code, but operates on
@@ -1491,8 +1492,11 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
}
// Now that we have all of the passes ready, run them.
- PrettyStackTraceString CrashInfo("Optimizer");
- MPM.run(*TheModule, MAM);
+ {
+ PrettyStackTraceString CrashInfo("Optimizer");
+ llvm::TimeTraceScope TimeScope("Optimizer");
+ MPM.run(*TheModule, MAM);
+ }
}
void EmitAssemblyHelper::RunCodegenPipeline(
@@ -1524,8 +1528,11 @@ void EmitAssemblyHelper::RunCodegenPipeline(
return;
}
- PrettyStackTraceString CrashInfo("Code generation");
- CodeGenPasses.run(*TheModule);
+ {
+ PrettyStackTraceString CrashInfo("Code generation");
+ llvm::TimeTraceScope TimeScope("CodeGenPasses");
+ CodeGenPasses.run(*TheModule);
+ }
}
/// A clean version of `EmitAssembly` that uses the new pass manager.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
index e81c5ba5055c..10569ae2c3f9 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
@@ -307,7 +307,7 @@ static RValue emitAtomicLibcall(CodeGenFunction &CGF,
const CGFunctionInfo &fnInfo =
CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
- llvm::AttrBuilder fnAttrB;
+ llvm::AttrBuilder fnAttrB(CGF.getLLVMContext());
fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
fnAttrB.addAttribute(llvm::Attribute::WillReturn);
llvm::AttributeList fnAttrs = llvm::AttributeList::get(
@@ -351,12 +351,12 @@ bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
bool AtomicInfo::emitMemSetZeroIfNecessary() const {
assert(LVal.isSimple());
- llvm::Value *addr = LVal.getPointer(CGF);
- if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
+ Address addr = LVal.getAddress(CGF);
+ if (!requiresMemSetZero(addr.getElementType()))
return false;
CGF.Builder.CreateMemSet(
- addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
+ addr.getPointer(), llvm::ConstantInt::get(CGF.Int8Ty, 0),
CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
LVal.getAlignment().getAsAlign());
return true;
@@ -1522,7 +1522,7 @@ RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
!AsValue)) {
auto *ValTy = AsValue
? CGF.ConvertTypeForMem(ValueTy)
- : getAtomicAddress().getType()->getPointerElementType();
+ : getAtomicAddress().getElementType();
if (ValTy->isIntegerTy()) {
assert(IntVal->getType() == ValTy && "Different integer types.");
return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
index 7bb6dbb8a8ac..1f1de3df857c 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
@@ -33,10 +33,10 @@ using namespace clang;
using namespace CodeGen;
CGBlockInfo::CGBlockInfo(const BlockDecl *block, StringRef name)
- : Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
- HasCXXObject(false), UsesStret(false), HasCapturedVariableLayout(false),
- CapturesNonExternalType(false), LocalAddress(Address::invalid()),
- StructureType(nullptr), Block(block) {
+ : Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
+ NoEscape(false), HasCXXObject(false), UsesStret(false),
+ HasCapturedVariableLayout(false), CapturesNonExternalType(false),
+ LocalAddress(Address::invalid()), StructureType(nullptr), Block(block) {
// Skip asm prefix, if any. 'name' is usually taken directly from
// the mangled name of the enclosing function.
@@ -66,17 +66,6 @@ static llvm::Constant *buildDisposeHelper(CodeGenModule &CGM,
namespace {
-/// Represents a type of copy/destroy operation that should be performed for an
-/// entity that's captured by a block.
-enum class BlockCaptureEntityKind {
- CXXRecord, // Copy or destroy
- ARCWeak,
- ARCStrong,
- NonTrivialCStruct,
- BlockObject, // Assign or release
- None
-};
-
/// Represents a captured entity that requires extra operations in order for
/// this entity to be copied or destroyed correctly.
struct BlockCaptureManagedEntity {
@@ -110,11 +99,7 @@ enum class CaptureStrKind {
} // end anonymous namespace
-static void findBlockCapturedManagedEntities(
- const CGBlockInfo &BlockInfo, const LangOptions &LangOpts,
- SmallVectorImpl<BlockCaptureManagedEntity> &ManagedCaptures);
-
-static std::string getBlockCaptureStr(const BlockCaptureManagedEntity &E,
+static std::string getBlockCaptureStr(const CGBlockInfo::Capture &Cap,
CaptureStrKind StrKind,
CharUnits BlockAlignment,
CodeGenModule &CGM);
@@ -124,34 +109,33 @@ static std::string getBlockDescriptorName(const CGBlockInfo &BlockInfo,
std::string Name = "__block_descriptor_";
Name += llvm::to_string(BlockInfo.BlockSize.getQuantity()) + "_";
- if (BlockInfo.needsCopyDisposeHelpers()) {
+ if (BlockInfo.NeedsCopyDispose) {
if (CGM.getLangOpts().Exceptions)
Name += "e";
if (CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
Name += "a";
Name += llvm::to_string(BlockInfo.BlockAlign.getQuantity()) + "_";
- SmallVector<BlockCaptureManagedEntity, 4> ManagedCaptures;
- findBlockCapturedManagedEntities(BlockInfo, CGM.getContext().getLangOpts(),
- ManagedCaptures);
+ for (auto &Cap : BlockInfo.SortedCaptures) {
+ if (Cap.isConstantOrTrivial())
+ continue;
- for (const BlockCaptureManagedEntity &E : ManagedCaptures) {
- Name += llvm::to_string(E.Capture->getOffset().getQuantity());
+ Name += llvm::to_string(Cap.getOffset().getQuantity());
- if (E.CopyKind == E.DisposeKind) {
+ if (Cap.CopyKind == Cap.DisposeKind) {
// If CopyKind and DisposeKind are the same, merge the capture
// information.
- assert(E.CopyKind != BlockCaptureEntityKind::None &&
+ assert(Cap.CopyKind != BlockCaptureEntityKind::None &&
"shouldn't see BlockCaptureManagedEntity that is None");
- Name += getBlockCaptureStr(E, CaptureStrKind::Merged,
+ Name += getBlockCaptureStr(Cap, CaptureStrKind::Merged,
BlockInfo.BlockAlign, CGM);
} else {
// If CopyKind and DisposeKind are not the same, which can happen when
// either Kind is None or the captured object is a __strong block,
// concatenate the copy and dispose strings.
- Name += getBlockCaptureStr(E, CaptureStrKind::CopyHelper,
+ Name += getBlockCaptureStr(Cap, CaptureStrKind::CopyHelper,
BlockInfo.BlockAlign, CGM);
- Name += getBlockCaptureStr(E, CaptureStrKind::DisposeHelper,
+ Name += getBlockCaptureStr(Cap, CaptureStrKind::DisposeHelper,
BlockInfo.BlockAlign, CGM);
}
}
@@ -223,7 +207,7 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
// Optional copy/dispose helpers.
bool hasInternalHelper = false;
- if (blockInfo.needsCopyDisposeHelpers()) {
+ if (blockInfo.NeedsCopyDispose) {
// copy_func_helper_decl
llvm::Constant *copyHelper = buildCopyHelper(CGM, blockInfo);
elements.add(copyHelper);
@@ -340,17 +324,21 @@ namespace {
struct BlockLayoutChunk {
CharUnits Alignment;
CharUnits Size;
- Qualifiers::ObjCLifetime Lifetime;
const BlockDecl::Capture *Capture; // null for 'this'
llvm::Type *Type;
QualType FieldType;
+ BlockCaptureEntityKind CopyKind, DisposeKind;
+ BlockFieldFlags CopyFlags, DisposeFlags;
BlockLayoutChunk(CharUnits align, CharUnits size,
- Qualifiers::ObjCLifetime lifetime,
- const BlockDecl::Capture *capture,
- llvm::Type *type, QualType fieldType)
- : Alignment(align), Size(size), Lifetime(lifetime),
- Capture(capture), Type(type), FieldType(fieldType) {}
+ const BlockDecl::Capture *capture, llvm::Type *type,
+ QualType fieldType, BlockCaptureEntityKind CopyKind,
+ BlockFieldFlags CopyFlags,
+ BlockCaptureEntityKind DisposeKind,
+ BlockFieldFlags DisposeFlags)
+ : Alignment(align), Size(size), Capture(capture), Type(type),
+ FieldType(fieldType), CopyKind(CopyKind), DisposeKind(DisposeKind),
+ CopyFlags(CopyFlags), DisposeFlags(DisposeFlags) {}
/// Tell the block info that this chunk has the given field index.
void setIndex(CGBlockInfo &info, unsigned index, CharUnits offset) {
@@ -358,32 +346,93 @@ namespace {
info.CXXThisIndex = index;
info.CXXThisOffset = offset;
} else {
- auto C = CGBlockInfo::Capture::makeIndex(index, offset, FieldType);
- info.Captures.insert({Capture->getVariable(), C});
+ info.SortedCaptures.push_back(CGBlockInfo::Capture::makeIndex(
+ index, offset, FieldType, CopyKind, CopyFlags, DisposeKind,
+ DisposeFlags, Capture));
}
}
+
+ bool isTrivial() const {
+ return CopyKind == BlockCaptureEntityKind::None &&
+ DisposeKind == BlockCaptureEntityKind::None;
+ }
};
- /// Order by 1) all __strong together 2) next, all byfref together 3) next,
- /// all __weak together. Preserve descending alignment in all situations.
+ /// Order by 1) all __strong together 2) next, all block together 3) next,
+ /// all byref together 4) next, all __weak together. Preserve descending
+ /// alignment in all situations.
bool operator<(const BlockLayoutChunk &left, const BlockLayoutChunk &right) {
if (left.Alignment != right.Alignment)
return left.Alignment > right.Alignment;
auto getPrefOrder = [](const BlockLayoutChunk &chunk) {
- if (chunk.Capture && chunk.Capture->isByRef())
- return 1;
- if (chunk.Lifetime == Qualifiers::OCL_Strong)
+ switch (chunk.CopyKind) {
+ case BlockCaptureEntityKind::ARCStrong:
return 0;
- if (chunk.Lifetime == Qualifiers::OCL_Weak)
- return 2;
- return 3;
+ case BlockCaptureEntityKind::BlockObject:
+ switch (chunk.CopyFlags.getBitMask()) {
+ case BLOCK_FIELD_IS_OBJECT:
+ return 0;
+ case BLOCK_FIELD_IS_BLOCK:
+ return 1;
+ case BLOCK_FIELD_IS_BYREF:
+ return 2;
+ default:
+ break;
+ }
+ break;
+ case BlockCaptureEntityKind::ARCWeak:
+ return 3;
+ default:
+ break;
+ }
+ return 4;
};
return getPrefOrder(left) < getPrefOrder(right);
}
} // end anonymous namespace
+static std::pair<BlockCaptureEntityKind, BlockFieldFlags>
+computeCopyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
+ const LangOptions &LangOpts);
+
+static std::pair<BlockCaptureEntityKind, BlockFieldFlags>
+computeDestroyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
+ const LangOptions &LangOpts);
+
+static void addBlockLayout(CharUnits align, CharUnits size,
+ const BlockDecl::Capture *capture, llvm::Type *type,
+ QualType fieldType,
+ SmallVectorImpl<BlockLayoutChunk> &Layout,
+ CGBlockInfo &Info, CodeGenModule &CGM) {
+ if (!capture) {
+ // 'this' capture.
+ Layout.push_back(BlockLayoutChunk(
+ align, size, capture, type, fieldType, BlockCaptureEntityKind::None,
+ BlockFieldFlags(), BlockCaptureEntityKind::None, BlockFieldFlags()));
+ return;
+ }
+
+ const LangOptions &LangOpts = CGM.getLangOpts();
+ BlockCaptureEntityKind CopyKind, DisposeKind;
+ BlockFieldFlags CopyFlags, DisposeFlags;
+
+ std::tie(CopyKind, CopyFlags) =
+ computeCopyInfoForBlockCapture(*capture, fieldType, LangOpts);
+ std::tie(DisposeKind, DisposeFlags) =
+ computeDestroyInfoForBlockCapture(*capture, fieldType, LangOpts);
+ Layout.push_back(BlockLayoutChunk(align, size, capture, type, fieldType,
+ CopyKind, CopyFlags, DisposeKind,
+ DisposeFlags));
+
+ if (Info.NoEscape)
+ return;
+
+ if (!Layout.back().isTrivial())
+ Info.NeedsCopyDispose = true;
+}
+
/// Determines if the given type is safe for constant capture in C++.
static bool isSafeForCXXConstantCapture(QualType type) {
const RecordType *recordType =
@@ -541,6 +590,9 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
CGM.getLangOpts().getGC() == LangOptions::NonGC)
info.HasCapturedVariableLayout = true;
+ if (block->doesNotEscape())
+ info.NoEscape = true;
+
// Collect the layout chunks.
SmallVector<BlockLayoutChunk, 16> layout;
layout.reserve(block->capturesCXXThis() +
@@ -560,9 +612,8 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
auto TInfo = CGM.getContext().getTypeInfoInChars(thisType);
maxFieldAlign = std::max(maxFieldAlign, TInfo.Align);
- layout.push_back(BlockLayoutChunk(TInfo.Align, TInfo.Width,
- Qualifiers::OCL_None,
- nullptr, llvmType, thisType));
+ addBlockLayout(TInfo.Align, TInfo.Width, nullptr, llvmType, thisType,
+ layout, info, CGM);
}
// Next, all the block captures.
@@ -570,9 +621,6 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
const VarDecl *variable = CI.getVariable();
if (CI.isEscapingByref()) {
- // We have to copy/dispose of the __block reference.
- info.NeedsCopyDispose = true;
-
// Just use void* instead of a pointer to the byref type.
CharUnits align = CGM.getPointerAlign();
maxFieldAlign = std::max(maxFieldAlign, align);
@@ -581,72 +629,28 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
// the capture field type should always match.
assert(CGF && getCaptureFieldType(*CGF, CI) == variable->getType() &&
"capture type differs from the variable type");
- layout.push_back(BlockLayoutChunk(align, CGM.getPointerSize(),
- Qualifiers::OCL_None, &CI,
- CGM.VoidPtrTy, variable->getType()));
+ addBlockLayout(align, CGM.getPointerSize(), &CI, CGM.VoidPtrTy,
+ variable->getType(), layout, info, CGM);
continue;
}
// Otherwise, build a layout chunk with the size and alignment of
// the declaration.
if (llvm::Constant *constant = tryCaptureAsConstant(CGM, CGF, variable)) {
- info.Captures[variable] = CGBlockInfo::Capture::makeConstant(constant);
+ info.SortedCaptures.push_back(
+ CGBlockInfo::Capture::makeConstant(constant, &CI));
continue;
}
QualType VT = getCaptureFieldType(*CGF, CI);
- // If we have a lifetime qualifier, honor it for capture purposes.
- // That includes *not* copying it if it's __unsafe_unretained.
- Qualifiers::ObjCLifetime lifetime = VT.getObjCLifetime();
- if (lifetime) {
- switch (lifetime) {
- case Qualifiers::OCL_None: llvm_unreachable("impossible");
- case Qualifiers::OCL_ExplicitNone:
- case Qualifiers::OCL_Autoreleasing:
- break;
-
- case Qualifiers::OCL_Strong:
- case Qualifiers::OCL_Weak:
- info.NeedsCopyDispose = true;
- }
-
- // Block pointers require copy/dispose. So do Objective-C pointers.
- } else if (VT->isObjCRetainableType()) {
- // But honor the inert __unsafe_unretained qualifier, which doesn't
- // actually make it into the type system.
- if (VT->isObjCInertUnsafeUnretainedType()) {
- lifetime = Qualifiers::OCL_ExplicitNone;
- } else {
- info.NeedsCopyDispose = true;
- // used for mrr below.
- lifetime = Qualifiers::OCL_Strong;
- }
-
- // So do types that require non-trivial copy construction.
- } else if (CI.hasCopyExpr()) {
- info.NeedsCopyDispose = true;
- info.HasCXXObject = true;
- if (!VT->getAsCXXRecordDecl()->isExternallyVisible())
- info.CapturesNonExternalType = true;
-
- // So do C structs that require non-trivial copy construction or
- // destruction.
- } else if (VT.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct ||
- VT.isDestructedType() == QualType::DK_nontrivial_c_struct) {
- info.NeedsCopyDispose = true;
-
- // And so do types with destructors.
- } else if (CGM.getLangOpts().CPlusPlus) {
- if (const CXXRecordDecl *record = VT->getAsCXXRecordDecl()) {
- if (!record->hasTrivialDestructor()) {
+ if (CGM.getLangOpts().CPlusPlus)
+ if (const CXXRecordDecl *record = VT->getAsCXXRecordDecl())
+ if (CI.hasCopyExpr() || !record->hasTrivialDestructor()) {
info.HasCXXObject = true;
- info.NeedsCopyDispose = true;
if (!record->isExternallyVisible())
info.CapturesNonExternalType = true;
}
- }
- }
CharUnits size = C.getTypeSizeInChars(VT);
CharUnits align = C.getDeclAlign(variable);
@@ -656,8 +660,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
llvm::Type *llvmType =
CGM.getTypes().ConvertTypeForMem(VT);
- layout.push_back(
- BlockLayoutChunk(align, size, lifetime, &CI, llvmType, VT));
+ addBlockLayout(align, size, &CI, llvmType, VT, layout, info, CGM);
}
// If that was everything, we're done here.
@@ -665,6 +668,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
info.StructureType =
llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
info.CanBeGlobal = true;
+ info.buildCaptureMap();
return;
}
@@ -718,6 +722,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
// ...until we get to the alignment of the maximum field.
if (endAlign >= maxFieldAlign) {
+ ++li;
break;
}
}
@@ -770,6 +775,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
endAlign = getLowBit(blockSize);
}
+ info.buildCaptureMap();
info.StructureType =
llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
}
@@ -826,7 +832,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// If the block is non-escaping, set field 'isa 'to NSConcreteGlobalBlock
// and set the BLOCK_IS_GLOBAL bit of field 'flags'. Copying a non-escaping
// block just returns the original block and releasing it is a no-op.
- llvm::Constant *blockISA = blockInfo.getBlockDecl()->doesNotEscape()
+ llvm::Constant *blockISA = blockInfo.NoEscape
? CGM.getNSConcreteGlobalBlock()
: CGM.getNSConcreteStackBlock();
isa = llvm::ConstantExpr::getBitCast(blockISA, VoidPtrTy);
@@ -838,13 +844,13 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
flags = BLOCK_HAS_SIGNATURE;
if (blockInfo.HasCapturedVariableLayout)
flags |= BLOCK_HAS_EXTENDED_LAYOUT;
- if (blockInfo.needsCopyDisposeHelpers())
+ if (blockInfo.NeedsCopyDispose)
flags |= BLOCK_HAS_COPY_DISPOSE;
if (blockInfo.HasCXXObject)
flags |= BLOCK_HAS_CXX_OBJ;
if (blockInfo.UsesStret)
flags |= BLOCK_USE_STRET;
- if (blockInfo.getBlockDecl()->doesNotEscape())
+ if (blockInfo.NoEscape)
flags |= BLOCK_IS_NOESCAPE | BLOCK_IS_GLOBAL;
}
@@ -1033,7 +1039,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
}
// Push a cleanup for the capture if necessary.
- if (!blockInfo.NeedsCopyDispose)
+ if (!blockInfo.NoEscape && !blockInfo.NeedsCopyDispose)
continue;
// Ignore __block captures; there's nothing special in the on-stack block
@@ -1654,6 +1660,11 @@ computeCopyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
// For all other types, the memcpy is fine.
return std::make_pair(BlockCaptureEntityKind::None, BlockFieldFlags());
+ // Honor the inert __unsafe_unretained qualifier, which doesn't actually
+ // make it into the type system.
+ if (T->isObjCInertUnsafeUnretainedType())
+ return std::make_pair(BlockCaptureEntityKind::None, BlockFieldFlags());
+
// Special rules for ARC captures:
Qualifiers QS = T.getQualifiers();
@@ -1669,34 +1680,6 @@ computeCopyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
llvm_unreachable("after exhaustive PrimitiveCopyKind switch");
}
-static std::pair<BlockCaptureEntityKind, BlockFieldFlags>
-computeDestroyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
- const LangOptions &LangOpts);
-
-/// Find the set of block captures that need to be explicitly copied or destroy.
-static void findBlockCapturedManagedEntities(
- const CGBlockInfo &BlockInfo, const LangOptions &LangOpts,
- SmallVectorImpl<BlockCaptureManagedEntity> &ManagedCaptures) {
- for (const auto &CI : BlockInfo.getBlockDecl()->captures()) {
- const VarDecl *Variable = CI.getVariable();
- const CGBlockInfo::Capture &Capture = BlockInfo.getCapture(Variable);
- if (Capture.isConstant())
- continue;
-
- QualType VT = Capture.fieldType();
- auto CopyInfo = computeCopyInfoForBlockCapture(CI, VT, LangOpts);
- auto DisposeInfo = computeDestroyInfoForBlockCapture(CI, VT, LangOpts);
- if (CopyInfo.first != BlockCaptureEntityKind::None ||
- DisposeInfo.first != BlockCaptureEntityKind::None)
- ManagedCaptures.emplace_back(CopyInfo.first, DisposeInfo.first,
- CopyInfo.second, DisposeInfo.second, CI,
- Capture);
- }
-
- // Sort the captures by offset.
- llvm::sort(ManagedCaptures);
-}
-
namespace {
/// Release a __block variable.
struct CallBlockRelease final : EHScopeStack::Cleanup {
@@ -1732,13 +1715,13 @@ bool CodeGenFunction::cxxDestructorCanThrow(QualType T) {
}
// Return a string that has the information about a capture.
-static std::string getBlockCaptureStr(const BlockCaptureManagedEntity &E,
+static std::string getBlockCaptureStr(const CGBlockInfo::Capture &Cap,
CaptureStrKind StrKind,
CharUnits BlockAlignment,
CodeGenModule &CGM) {
std::string Str;
ASTContext &Ctx = CGM.getContext();
- const BlockDecl::Capture &CI = *E.CI;
+ const BlockDecl::Capture &CI = *Cap.Cap;
QualType CaptureTy = CI.getVariable()->getType();
BlockCaptureEntityKind Kind;
@@ -1747,15 +1730,16 @@ static std::string getBlockCaptureStr(const BlockCaptureManagedEntity &E,
// CaptureStrKind::Merged should be passed only when the operations and the
// flags are the same for copy and dispose.
assert((StrKind != CaptureStrKind::Merged ||
- (E.CopyKind == E.DisposeKind && E.CopyFlags == E.DisposeFlags)) &&
+ (Cap.CopyKind == Cap.DisposeKind &&
+ Cap.CopyFlags == Cap.DisposeFlags)) &&
"different operations and flags");
if (StrKind == CaptureStrKind::DisposeHelper) {
- Kind = E.DisposeKind;
- Flags = E.DisposeFlags;
+ Kind = Cap.DisposeKind;
+ Flags = Cap.DisposeFlags;
} else {
- Kind = E.CopyKind;
- Flags = E.CopyFlags;
+ Kind = Cap.CopyKind;
+ Flags = Cap.CopyFlags;
}
switch (Kind) {
@@ -1803,8 +1787,7 @@ static std::string getBlockCaptureStr(const BlockCaptureManagedEntity &E,
}
case BlockCaptureEntityKind::NonTrivialCStruct: {
bool IsVolatile = CaptureTy.isVolatileQualified();
- CharUnits Alignment =
- BlockAlignment.alignmentAtOffset(E.Capture->getOffset());
+ CharUnits Alignment = BlockAlignment.alignmentAtOffset(Cap.getOffset());
Str += "n";
std::string FuncStr;
@@ -1829,7 +1812,7 @@ static std::string getBlockCaptureStr(const BlockCaptureManagedEntity &E,
}
static std::string getCopyDestroyHelperFuncName(
- const SmallVectorImpl<BlockCaptureManagedEntity> &Captures,
+ const SmallVectorImpl<CGBlockInfo::Capture> &Captures,
CharUnits BlockAlignment, CaptureStrKind StrKind, CodeGenModule &CGM) {
assert((StrKind == CaptureStrKind::CopyHelper ||
StrKind == CaptureStrKind::DisposeHelper) &&
@@ -1843,9 +1826,11 @@ static std::string getCopyDestroyHelperFuncName(
Name += "a";
Name += llvm::to_string(BlockAlignment.getQuantity()) + "_";
- for (const BlockCaptureManagedEntity &E : Captures) {
- Name += llvm::to_string(E.Capture->getOffset().getQuantity());
- Name += getBlockCaptureStr(E, StrKind, BlockAlignment, CGM);
+ for (auto &Cap : Captures) {
+ if (Cap.isConstantOrTrivial())
+ continue;
+ Name += llvm::to_string(Cap.getOffset().getQuantity());
+ Name += getBlockCaptureStr(Cap, StrKind, BlockAlignment, CGM);
}
return Name;
@@ -1916,11 +1901,9 @@ static void setBlockHelperAttributesVisibility(bool CapturesNonExternalType,
/// the contents of an individual __block variable to the heap.
llvm::Constant *
CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
- SmallVector<BlockCaptureManagedEntity, 4> CopiedCaptures;
- findBlockCapturedManagedEntities(blockInfo, getLangOpts(), CopiedCaptures);
- std::string FuncName =
- getCopyDestroyHelperFuncName(CopiedCaptures, blockInfo.BlockAlign,
- CaptureStrKind::CopyHelper, CGM);
+ std::string FuncName = getCopyDestroyHelperFuncName(
+ blockInfo.SortedCaptures, blockInfo.BlockAlign,
+ CaptureStrKind::CopyHelper, CGM);
if (llvm::GlobalValue *Func = CGM.getModule().getNamedValue(FuncName))
return llvm::ConstantExpr::getBitCast(Func, VoidPtrTy);
@@ -1967,17 +1950,19 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
dst = Address(Builder.CreateLoad(dst), blockInfo.BlockAlign);
dst = Builder.CreateBitCast(dst, structPtrTy, "block.dest");
- for (const auto &CopiedCapture : CopiedCaptures) {
- const BlockDecl::Capture &CI = *CopiedCapture.CI;
- const CGBlockInfo::Capture &capture = *CopiedCapture.Capture;
+ for (auto &capture : blockInfo.SortedCaptures) {
+ if (capture.isConstantOrTrivial())
+ continue;
+
+ const BlockDecl::Capture &CI = *capture.Cap;
QualType captureType = CI.getVariable()->getType();
- BlockFieldFlags flags = CopiedCapture.CopyFlags;
+ BlockFieldFlags flags = capture.CopyFlags;
unsigned index = capture.getIndex();
Address srcField = Builder.CreateStructGEP(src, index);
Address dstField = Builder.CreateStructGEP(dst, index);
- switch (CopiedCapture.CopyKind) {
+ switch (capture.CopyKind) {
case BlockCaptureEntityKind::CXXRecord:
// If there's an explicit copy expression, we do that.
assert(CI.getCopyExpr() && "copy expression for variable is missing");
@@ -2040,7 +2025,7 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
// Ensure that we destroy the copied object if an exception is thrown later
// in the helper function.
- pushCaptureCleanup(CopiedCapture.CopyKind, dstField, captureType, flags,
+ pushCaptureCleanup(capture.CopyKind, dstField, captureType, flags,
/*ForCopyHelper*/ true, CI.getVariable(), *this);
}
@@ -2085,8 +2070,10 @@ computeDestroyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
BlockFieldFlags());
case QualType::DK_none: {
// Non-ARC captures are strong, and we need to use _Block_object_dispose.
+ // But honor the inert __unsafe_unretained qualifier, which doesn't actually
+ // make it into the type system.
if (T->isObjCRetainableType() && !T.getQualifiers().hasObjCLifetime() &&
- !LangOpts.ObjCAutoRefCount)
+ !LangOpts.ObjCAutoRefCount && !T->isObjCInertUnsafeUnretainedType())
return std::make_pair(BlockCaptureEntityKind::BlockObject,
getBlockFieldFlagsForObjCObjectPointer(CI, T));
// Otherwise, we have nothing to do.
@@ -2105,11 +2092,9 @@ computeDestroyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
/// variable.
llvm::Constant *
CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
- SmallVector<BlockCaptureManagedEntity, 4> DestroyedCaptures;
- findBlockCapturedManagedEntities(blockInfo, getLangOpts(), DestroyedCaptures);
- std::string FuncName =
- getCopyDestroyHelperFuncName(DestroyedCaptures, blockInfo.BlockAlign,
- CaptureStrKind::DisposeHelper, CGM);
+ std::string FuncName = getCopyDestroyHelperFuncName(
+ blockInfo.SortedCaptures, blockInfo.BlockAlign,
+ CaptureStrKind::DisposeHelper, CGM);
if (llvm::GlobalValue *Func = CGM.getModule().getNamedValue(FuncName))
return llvm::ConstantExpr::getBitCast(Func, VoidPtrTy);
@@ -2153,14 +2138,16 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
CodeGenFunction::RunCleanupsScope cleanups(*this);
- for (const auto &DestroyedCapture : DestroyedCaptures) {
- const BlockDecl::Capture &CI = *DestroyedCapture.CI;
- const CGBlockInfo::Capture &capture = *DestroyedCapture.Capture;
- BlockFieldFlags flags = DestroyedCapture.DisposeFlags;
+ for (auto &capture : blockInfo.SortedCaptures) {
+ if (capture.isConstantOrTrivial())
+ continue;
+
+ const BlockDecl::Capture &CI = *capture.Cap;
+ BlockFieldFlags flags = capture.DisposeFlags;
Address srcField = Builder.CreateStructGEP(src, capture.getIndex());
- pushCaptureCleanup(DestroyedCapture.DisposeKind, srcField,
+ pushCaptureCleanup(capture.DisposeKind, srcField,
CI.getVariable()->getType(), flags,
/*ForCopyHelper*/ false, CI.getVariable(), *this);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h
index 698ecd3d926a..e8857d98894f 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h
@@ -26,14 +26,7 @@
#include "clang/Basic/TargetInfo.h"
namespace llvm {
-class Constant;
-class Function;
-class GlobalValue;
-class DataLayout;
-class FunctionType;
-class PointerType;
class Value;
-class LLVMContext;
}
namespace clang {
@@ -148,6 +141,17 @@ public:
CharUnits FieldOffset;
};
+/// Represents a type of copy/destroy operation that should be performed for an
+/// entity that's captured by a block.
+enum class BlockCaptureEntityKind {
+ None,
+ CXXRecord, // Copy or destroy
+ ARCWeak,
+ ARCStrong,
+ NonTrivialCStruct,
+ BlockObject, // Assign or release
+};
+
/// CGBlockInfo - Information to generate a block literal.
class CGBlockInfo {
public:
@@ -197,20 +201,40 @@ public:
return FieldType;
}
- static Capture makeIndex(unsigned index, CharUnits offset,
- QualType FieldType) {
+ static Capture
+ makeIndex(unsigned index, CharUnits offset, QualType FieldType,
+ BlockCaptureEntityKind CopyKind, BlockFieldFlags CopyFlags,
+ BlockCaptureEntityKind DisposeKind, BlockFieldFlags DisposeFlags,
+ const BlockDecl::Capture *Cap) {
Capture v;
v.Data = (index << 1) | 1;
v.Offset = offset.getQuantity();
v.FieldType = FieldType;
+ v.CopyKind = CopyKind;
+ v.CopyFlags = CopyFlags;
+ v.DisposeKind = DisposeKind;
+ v.DisposeFlags = DisposeFlags;
+ v.Cap = Cap;
return v;
}
- static Capture makeConstant(llvm::Value *value) {
+ static Capture makeConstant(llvm::Value *value,
+ const BlockDecl::Capture *Cap) {
Capture v;
v.Data = reinterpret_cast<uintptr_t>(value);
+ v.Cap = Cap;
return v;
}
+
+ bool isConstantOrTrivial() const {
+ return CopyKind == BlockCaptureEntityKind::None &&
+ DisposeKind == BlockCaptureEntityKind::None;
+ }
+
+ BlockCaptureEntityKind CopyKind = BlockCaptureEntityKind::None,
+ DisposeKind = BlockCaptureEntityKind::None;
+ BlockFieldFlags CopyFlags, DisposeFlags;
+ const BlockDecl::Capture *Cap;
};
/// CanBeGlobal - True if the block can be global, i.e. it has
@@ -221,6 +245,9 @@ public:
/// dispose helper functions if the block were escaping.
bool NeedsCopyDispose : 1;
+ /// Indicates whether the block is non-escaping.
+ bool NoEscape : 1;
+
/// HasCXXObject - True if the block's custom copy/dispose functions
/// need to be run even in GC mode.
bool HasCXXObject : 1;
@@ -238,8 +265,11 @@ public:
/// functions.
bool CapturesNonExternalType : 1;
- /// The mapping of allocated indexes within the block.
- llvm::DenseMap<const VarDecl*, Capture> Captures;
+ /// Mapping from variables to pointers to captures in SortedCaptures.
+ llvm::DenseMap<const VarDecl *, Capture *> Captures;
+
+ /// The block's captures. Non-constant captures are sorted by their offsets.
+ llvm::SmallVector<Capture, 4> SortedCaptures;
Address LocalAddress;
llvm::StructType *StructureType;
@@ -263,14 +293,18 @@ public:
/// has been encountered.
CGBlockInfo *NextBlockInfo;
+ void buildCaptureMap() {
+ for (auto &C : SortedCaptures)
+ Captures[C.Cap->getVariable()] = &C;
+ }
+
const Capture &getCapture(const VarDecl *var) const {
return const_cast<CGBlockInfo*>(this)->getCapture(var);
}
Capture &getCapture(const VarDecl *var) {
- llvm::DenseMap<const VarDecl*, Capture>::iterator
- it = Captures.find(var);
+ auto it = Captures.find(var);
assert(it != Captures.end() && "no entry for variable!");
- return it->second;
+ return *it->second;
}
const BlockDecl *getBlockDecl() const { return Block; }
@@ -281,11 +315,6 @@ public:
}
CGBlockInfo(const BlockDecl *blockDecl, StringRef Name);
-
- // Indicates whether the block needs a custom copy or dispose function.
- bool needsCopyDisposeHelpers() const {
- return NeedsCopyDispose && !Block->doesNotEscape();
- }
};
} // end namespace CodeGen
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
index 1982b40ff667..2b7862e618bd 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
@@ -159,6 +159,7 @@ static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
static Value *MakeBinaryAtomicValue(
CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
+
QualType T = E->getType();
assert(E->getArg(0)->getType()->isPointerType());
assert(CGF.getContext().hasSameUnqualifiedType(T,
@@ -532,13 +533,13 @@ static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
// Emit a simple mangled intrinsic that has 1 argument and a return type
// matching the argument type.
-static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
- const CallExpr *E,
- unsigned IntrinsicID) {
+static Value *emitUnaryBuiltin(CodeGenFunction &CGF, const CallExpr *E,
+ unsigned IntrinsicID,
+ llvm::StringRef Name = "") {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
- return CGF.Builder.CreateCall(F, Src0);
+ return CGF.Builder.CreateCall(F, Src0, Name);
}
// Emit an intrinsic that has 2 operands of the same type as its result.
@@ -1060,7 +1061,10 @@ static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF,
llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
- return CGF.Builder.CreateCall(IA, {Addr});
+ llvm::CallInst *CI = CGF.Builder.CreateCall(IA, {Addr});
+ CI->addParamAttr(
+ 0, Attribute::get(CGF.getLLVMContext(), Attribute::ElementType, RetType));
+ return CI;
}
namespace {
@@ -3122,24 +3126,34 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
case Builtin::BI__builtin_elementwise_abs: {
- Value *Op0 = EmitScalarExpr(E->getArg(0));
Value *Result;
- if (Op0->getType()->isIntOrIntVectorTy())
+ QualType QT = E->getArg(0)->getType();
+
+ if (auto *VecTy = QT->getAs<VectorType>())
+ QT = VecTy->getElementType();
+ if (QT->isIntegerType())
Result = Builder.CreateBinaryIntrinsic(
- llvm::Intrinsic::abs, Op0, Builder.getFalse(), nullptr, "elt.abs");
+ llvm::Intrinsic::abs, EmitScalarExpr(E->getArg(0)),
+ Builder.getFalse(), nullptr, "elt.abs");
else
- Result = Builder.CreateUnaryIntrinsic(llvm::Intrinsic::fabs, Op0, nullptr,
- "elt.abs");
- return RValue::get(Result);
- }
+ Result = emitUnaryBuiltin(*this, E, llvm::Intrinsic::fabs, "elt.abs");
- case Builtin::BI__builtin_elementwise_ceil: {
- Value *Op0 = EmitScalarExpr(E->getArg(0));
- Value *Result = Builder.CreateUnaryIntrinsic(llvm::Intrinsic::ceil, Op0,
- nullptr, "elt.ceil");
return RValue::get(Result);
}
+ case Builtin::BI__builtin_elementwise_ceil:
+ return RValue::get(
+ emitUnaryBuiltin(*this, E, llvm::Intrinsic::ceil, "elt.ceil"));
+ case Builtin::BI__builtin_elementwise_floor:
+ return RValue::get(
+ emitUnaryBuiltin(*this, E, llvm::Intrinsic::floor, "elt.floor"));
+ case Builtin::BI__builtin_elementwise_roundeven:
+ return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::roundeven,
+ "elt.roundeven"));
+ case Builtin::BI__builtin_elementwise_trunc:
+ return RValue::get(
+ emitUnaryBuiltin(*this, E, llvm::Intrinsic::trunc, "elt.trunc"));
+
case Builtin::BI__builtin_elementwise_max: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
Value *Op1 = EmitScalarExpr(E->getArg(1));
@@ -3174,52 +3188,48 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
case Builtin::BI__builtin_reduce_max: {
- auto GetIntrinsicID = [](QualType QT, llvm::Type *IrTy) {
- if (IrTy->isIntOrIntVectorTy()) {
- if (auto *VecTy = QT->getAs<VectorType>())
- QT = VecTy->getElementType();
- if (QT->isSignedIntegerType())
- return llvm::Intrinsic::vector_reduce_smax;
- else
- return llvm::Intrinsic::vector_reduce_umax;
- }
+ auto GetIntrinsicID = [](QualType QT) {
+ if (auto *VecTy = QT->getAs<VectorType>())
+ QT = VecTy->getElementType();
+ if (QT->isSignedIntegerType())
+ return llvm::Intrinsic::vector_reduce_smax;
+ if (QT->isUnsignedIntegerType())
+ return llvm::Intrinsic::vector_reduce_umax;
+ assert(QT->isFloatingType() && "must have a float here");
return llvm::Intrinsic::vector_reduce_fmax;
};
- Value *Op0 = EmitScalarExpr(E->getArg(0));
- Value *Result = Builder.CreateUnaryIntrinsic(
- GetIntrinsicID(E->getArg(0)->getType(), Op0->getType()), Op0, nullptr,
- "rdx.min");
- return RValue::get(Result);
+ return RValue::get(emitUnaryBuiltin(
+ *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
}
case Builtin::BI__builtin_reduce_min: {
- auto GetIntrinsicID = [](QualType QT, llvm::Type *IrTy) {
- if (IrTy->isIntOrIntVectorTy()) {
- if (auto *VecTy = QT->getAs<VectorType>())
- QT = VecTy->getElementType();
- if (QT->isSignedIntegerType())
- return llvm::Intrinsic::vector_reduce_smin;
- else
- return llvm::Intrinsic::vector_reduce_umin;
- }
+ auto GetIntrinsicID = [](QualType QT) {
+ if (auto *VecTy = QT->getAs<VectorType>())
+ QT = VecTy->getElementType();
+ if (QT->isSignedIntegerType())
+ return llvm::Intrinsic::vector_reduce_smin;
+ if (QT->isUnsignedIntegerType())
+ return llvm::Intrinsic::vector_reduce_umin;
+ assert(QT->isFloatingType() && "must have a float here");
return llvm::Intrinsic::vector_reduce_fmin;
};
- Value *Op0 = EmitScalarExpr(E->getArg(0));
- Value *Result = Builder.CreateUnaryIntrinsic(
- GetIntrinsicID(E->getArg(0)->getType(), Op0->getType()), Op0, nullptr,
- "rdx.min");
- return RValue::get(Result);
- }
- case Builtin::BI__builtin_reduce_xor: {
- Value *Op0 = EmitScalarExpr(E->getArg(0));
- Value *Result = Builder.CreateUnaryIntrinsic(
- llvm::Intrinsic::vector_reduce_xor, Op0, nullptr, "rdx.xor");
- return RValue::get(Result);
+ return RValue::get(emitUnaryBuiltin(
+ *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
}
+ case Builtin::BI__builtin_reduce_xor:
+ return RValue::get(emitUnaryBuiltin(
+ *this, E, llvm::Intrinsic::vector_reduce_xor, "rdx.xor"));
+ case Builtin::BI__builtin_reduce_or:
+ return RValue::get(emitUnaryBuiltin(
+ *this, E, llvm::Intrinsic::vector_reduce_or, "rdx.or"));
+ case Builtin::BI__builtin_reduce_and:
+ return RValue::get(emitUnaryBuiltin(
+ *this, E, llvm::Intrinsic::vector_reduce_and, "rdx.and"));
+
case Builtin::BI__builtin_matrix_transpose: {
- const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
+ auto *MatrixTy = E->getArg(0)->getType()->castAs<ConstantMatrixType>();
Value *MatValue = EmitScalarExpr(E->getArg(0));
MatrixBuilder<CGBuilderTy> MB(Builder);
Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
@@ -3423,6 +3433,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BIalloca:
case Builtin::BI_alloca:
+ case Builtin::BI__builtin_alloca_uninitialized:
case Builtin::BI__builtin_alloca: {
Value *Size = EmitScalarExpr(E->getArg(0));
const TargetInfo &TI = getContext().getTargetInfo();
@@ -3433,10 +3444,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
.getAsAlign();
AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
AI->setAlignment(SuitableAlignmentInBytes);
- initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
+ if (BuiltinID != Builtin::BI__builtin_alloca_uninitialized)
+ initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
return RValue::get(AI);
}
+ case Builtin::BI__builtin_alloca_with_align_uninitialized:
case Builtin::BI__builtin_alloca_with_align: {
Value *Size = EmitScalarExpr(E->getArg(0));
Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
@@ -3446,7 +3459,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
AI->setAlignment(AlignmentInBytes);
- initializeAlloca(*this, AI, Size, AlignmentInBytes);
+ if (BuiltinID != Builtin::BI__builtin_alloca_with_align_uninitialized)
+ initializeAlloca(*this, AI, Size, AlignmentInBytes);
return RValue::get(AI);
}
@@ -4921,7 +4935,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Value *Block =
Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
- AttrBuilder B;
+ AttrBuilder B(Builder.getContext());
B.addByValAttr(NDRangeL.getAddress(*this).getElementType());
llvm::AttributeList ByValAttrSet =
llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
@@ -5860,6 +5874,10 @@ static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
+ NEONMAP1(vqrdmlah_v, arm_neon_vqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlahq_v, arm_neon_vqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlsh_v, arm_neon_vqrdmlsh, Add1ArgType),
+ NEONMAP1(vqrdmlshq_v, arm_neon_vqrdmlsh, Add1ArgType),
NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
@@ -6085,6 +6103,10 @@ static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
+ NEONMAP1(vqrdmlah_v, aarch64_neon_sqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlahq_v, aarch64_neon_sqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlsh_v, aarch64_neon_sqrdmlsh, Add1ArgType),
+ NEONMAP1(vqrdmlshq_v, aarch64_neon_sqrdmlsh, Add1ArgType),
NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0),
NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
@@ -6287,6 +6309,10 @@ static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
+ NEONMAP1(vqrdmlahh_s16, aarch64_neon_sqrdmlah, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqrdmlahs_s32, aarch64_neon_sqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlshh_s16, aarch64_neon_sqrdmlsh, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqrdmlshs_s32, aarch64_neon_sqrdmlsh, Add1ArgType),
NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
@@ -14271,73 +14297,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, Ops[0]);
}
}
- case X86::BI__builtin_ia32_pabsb128:
- case X86::BI__builtin_ia32_pabsw128:
- case X86::BI__builtin_ia32_pabsd128:
- case X86::BI__builtin_ia32_pabsb256:
- case X86::BI__builtin_ia32_pabsw256:
- case X86::BI__builtin_ia32_pabsd256:
- case X86::BI__builtin_ia32_pabsq128:
- case X86::BI__builtin_ia32_pabsq256:
- case X86::BI__builtin_ia32_pabsb512:
- case X86::BI__builtin_ia32_pabsw512:
- case X86::BI__builtin_ia32_pabsd512:
- case X86::BI__builtin_ia32_pabsq512: {
- Function *F = CGM.getIntrinsic(Intrinsic::abs, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
- }
- case X86::BI__builtin_ia32_pmaxsb128:
- case X86::BI__builtin_ia32_pmaxsw128:
- case X86::BI__builtin_ia32_pmaxsd128:
- case X86::BI__builtin_ia32_pmaxsq128:
- case X86::BI__builtin_ia32_pmaxsb256:
- case X86::BI__builtin_ia32_pmaxsw256:
- case X86::BI__builtin_ia32_pmaxsd256:
- case X86::BI__builtin_ia32_pmaxsq256:
- case X86::BI__builtin_ia32_pmaxsb512:
- case X86::BI__builtin_ia32_pmaxsw512:
- case X86::BI__builtin_ia32_pmaxsd512:
- case X86::BI__builtin_ia32_pmaxsq512:
- return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smax);
- case X86::BI__builtin_ia32_pmaxub128:
- case X86::BI__builtin_ia32_pmaxuw128:
- case X86::BI__builtin_ia32_pmaxud128:
- case X86::BI__builtin_ia32_pmaxuq128:
- case X86::BI__builtin_ia32_pmaxub256:
- case X86::BI__builtin_ia32_pmaxuw256:
- case X86::BI__builtin_ia32_pmaxud256:
- case X86::BI__builtin_ia32_pmaxuq256:
- case X86::BI__builtin_ia32_pmaxub512:
- case X86::BI__builtin_ia32_pmaxuw512:
- case X86::BI__builtin_ia32_pmaxud512:
- case X86::BI__builtin_ia32_pmaxuq512:
- return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umax);
- case X86::BI__builtin_ia32_pminsb128:
- case X86::BI__builtin_ia32_pminsw128:
- case X86::BI__builtin_ia32_pminsd128:
- case X86::BI__builtin_ia32_pminsq128:
- case X86::BI__builtin_ia32_pminsb256:
- case X86::BI__builtin_ia32_pminsw256:
- case X86::BI__builtin_ia32_pminsd256:
- case X86::BI__builtin_ia32_pminsq256:
- case X86::BI__builtin_ia32_pminsb512:
- case X86::BI__builtin_ia32_pminsw512:
- case X86::BI__builtin_ia32_pminsd512:
- case X86::BI__builtin_ia32_pminsq512:
- return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smin);
- case X86::BI__builtin_ia32_pminub128:
- case X86::BI__builtin_ia32_pminuw128:
- case X86::BI__builtin_ia32_pminud128:
- case X86::BI__builtin_ia32_pminuq128:
- case X86::BI__builtin_ia32_pminub256:
- case X86::BI__builtin_ia32_pminuw256:
- case X86::BI__builtin_ia32_pminud256:
- case X86::BI__builtin_ia32_pminuq256:
- case X86::BI__builtin_ia32_pminub512:
- case X86::BI__builtin_ia32_pminuw512:
- case X86::BI__builtin_ia32_pminud512:
- case X86::BI__builtin_ia32_pminuq512:
- return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umin);
case X86::BI__builtin_ia32_pmuludq128:
case X86::BI__builtin_ia32_pmuludq256:
@@ -14418,12 +14377,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::vector_reduce_add, Ops[0]->getType());
return Builder.CreateCall(F, {Ops[0]});
}
- case X86::BI__builtin_ia32_reduce_and_d512:
- case X86::BI__builtin_ia32_reduce_and_q512: {
- Function *F =
- CGM.getIntrinsic(Intrinsic::vector_reduce_and, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0]});
- }
case X86::BI__builtin_ia32_reduce_fadd_pd512:
case X86::BI__builtin_ia32_reduce_fadd_ps512:
case X86::BI__builtin_ia32_reduce_fadd_ph512:
@@ -14470,36 +14423,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::vector_reduce_mul, Ops[0]->getType());
return Builder.CreateCall(F, {Ops[0]});
}
- case X86::BI__builtin_ia32_reduce_or_d512:
- case X86::BI__builtin_ia32_reduce_or_q512: {
- Function *F =
- CGM.getIntrinsic(Intrinsic::vector_reduce_or, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0]});
- }
- case X86::BI__builtin_ia32_reduce_smax_d512:
- case X86::BI__builtin_ia32_reduce_smax_q512: {
- Function *F =
- CGM.getIntrinsic(Intrinsic::vector_reduce_smax, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0]});
- }
- case X86::BI__builtin_ia32_reduce_smin_d512:
- case X86::BI__builtin_ia32_reduce_smin_q512: {
- Function *F =
- CGM.getIntrinsic(Intrinsic::vector_reduce_smin, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0]});
- }
- case X86::BI__builtin_ia32_reduce_umax_d512:
- case X86::BI__builtin_ia32_reduce_umax_q512: {
- Function *F =
- CGM.getIntrinsic(Intrinsic::vector_reduce_umax, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0]});
- }
- case X86::BI__builtin_ia32_reduce_umin_d512:
- case X86::BI__builtin_ia32_reduce_umin_q512: {
- Function *F =
- CGM.getIntrinsic(Intrinsic::vector_reduce_umin, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0]});
- }
// 3DNow!
case X86::BI__builtin_ia32_pswapdsf:
@@ -17279,7 +17202,7 @@ static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
case NVPTX::BI__mma_tf32_m16n16k8_ld_a:
return MMA_LDST(4, m16n16k8_load_a_tf32);
case NVPTX::BI__mma_tf32_m16n16k8_ld_b:
- return MMA_LDST(2, m16n16k8_load_b_tf32);
+ return MMA_LDST(4, m16n16k8_load_b_tf32);
case NVPTX::BI__mma_tf32_m16n16k8_ld_c:
return MMA_LDST(8, m16n16k8_load_c_f32);
@@ -18448,15 +18371,11 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
llvm_unreachable("unexpected builtin ID");
}
llvm::Type *SrcT = Vec->getType();
- llvm::Type *TruncT =
- SrcT->getWithNewType(llvm::IntegerType::get(getLLVMContext(), 32));
+ llvm::Type *TruncT = SrcT->getWithNewType(Builder.getInt32Ty());
Function *Callee = CGM.getIntrinsic(IntNo, {TruncT, SrcT});
Value *Trunc = Builder.CreateCall(Callee, Vec);
- Value *Splat = Builder.CreateVectorSplat(2, Builder.getInt32(0));
- Value *ConcatMask =
- llvm::ConstantVector::get({Builder.getInt32(0), Builder.getInt32(1),
- Builder.getInt32(2), Builder.getInt32(3)});
- return Builder.CreateShuffleVector(Trunc, Splat, ConcatMask);
+ Value *Splat = Constant::getNullValue(TruncT);
+ return Builder.CreateShuffleVector(Trunc, Splat, ArrayRef<int>{0, 1, 2, 3});
}
case WebAssembly::BI__builtin_wasm_shuffle_i8x16: {
Value *Ops[18];
@@ -18822,6 +18741,8 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
case RISCV::BI__builtin_riscv_bcompress_64:
case RISCV::BI__builtin_riscv_bdecompress_32:
case RISCV::BI__builtin_riscv_bdecompress_64:
+ case RISCV::BI__builtin_riscv_bfp_32:
+ case RISCV::BI__builtin_riscv_bfp_64:
case RISCV::BI__builtin_riscv_grev_32:
case RISCV::BI__builtin_riscv_grev_64:
case RISCV::BI__builtin_riscv_gorc_32:
@@ -18841,7 +18762,11 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
case RISCV::BI__builtin_riscv_crc32c_b:
case RISCV::BI__builtin_riscv_crc32c_h:
case RISCV::BI__builtin_riscv_crc32c_w:
- case RISCV::BI__builtin_riscv_crc32c_d: {
+ case RISCV::BI__builtin_riscv_crc32c_d:
+ case RISCV::BI__builtin_riscv_fsl_32:
+ case RISCV::BI__builtin_riscv_fsr_32:
+ case RISCV::BI__builtin_riscv_fsl_64:
+ case RISCV::BI__builtin_riscv_fsr_64: {
switch (BuiltinID) {
default: llvm_unreachable("unexpected builtin ID");
// Zbb
@@ -18871,6 +18796,12 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
ID = Intrinsic::riscv_bdecompress;
break;
+ // Zbf
+ case RISCV::BI__builtin_riscv_bfp_32:
+ case RISCV::BI__builtin_riscv_bfp_64:
+ ID = Intrinsic::riscv_bfp;
+ break;
+
// Zbp
case RISCV::BI__builtin_riscv_grev_32:
case RISCV::BI__builtin_riscv_grev_64:
@@ -18926,6 +18857,16 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
case RISCV::BI__builtin_riscv_crc32c_d:
ID = Intrinsic::riscv_crc32c_d;
break;
+
+ // Zbt
+ case RISCV::BI__builtin_riscv_fsl_32:
+ case RISCV::BI__builtin_riscv_fsl_64:
+ ID = Intrinsic::riscv_fsl;
+ break;
+ case RISCV::BI__builtin_riscv_fsr_32:
+ case RISCV::BI__builtin_riscv_fsr_64:
+ ID = Intrinsic::riscv_fsr;
+ break;
}
IntrinsicTypes = {ResultType};
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp
index 9714730e3c4b..0b441e382f11 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp
@@ -154,6 +154,51 @@ void CGCXXABI::setCXXABIThisValue(CodeGenFunction &CGF, llvm::Value *ThisPtr) {
CGF.CXXABIThisValue = ThisPtr;
}
+bool CGCXXABI::mayNeedDestruction(const VarDecl *VD) const {
+ if (VD->needsDestruction(getContext()))
+ return true;
+
+ // If the variable has an incomplete class type (or array thereof), it
+ // might need destruction.
+ const Type *T = VD->getType()->getBaseElementTypeUnsafe();
+ if (T->getAs<RecordType>() && T->isIncompleteType())
+ return true;
+
+ return false;
+}
+
+bool CGCXXABI::isEmittedWithConstantInitializer(
+ const VarDecl *VD, bool InspectInitForWeakDef) const {
+ VD = VD->getMostRecentDecl();
+ if (VD->hasAttr<ConstInitAttr>())
+ return true;
+
+ // All later checks examine the initializer specified on the variable. If
+ // the variable is weak, such examination would not be correct.
+ if (!InspectInitForWeakDef && (VD->isWeak() || VD->hasAttr<SelectAnyAttr>()))
+ return false;
+
+ const VarDecl *InitDecl = VD->getInitializingDeclaration();
+ if (!InitDecl)
+ return false;
+
+ // If there's no initializer to run, this is constant initialization.
+ if (!InitDecl->hasInit())
+ return true;
+
+ // If we have the only definition, we don't need a thread wrapper if we
+ // will emit the value as a constant.
+ if (isUniqueGVALinkage(getContext().GetGVALinkageForVariable(VD)))
+ return !mayNeedDestruction(VD) && InitDecl->evaluateValue();
+
+ // Otherwise, we need a thread wrapper unless we know that every
+ // translation unit will emit the value as a constant. We rely on the
+ // variable being constant-initialized in every translation unit if it's
+ // constant-initialized in any translation unit, which isn't actually
+ // guaranteed by the standard but is necessary for sanity.
+ return InitDecl->hasConstantInitialization();
+}
+
void CGCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
RValue RV, QualType ResultType) {
assert(!CGF.hasAggregateEvaluationKind(ResultType) &&
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
index ea839db7528e..b96222b3ce28 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
@@ -31,7 +31,6 @@ class CXXConstructorDecl;
class CXXDestructorDecl;
class CXXMethodDecl;
class CXXRecordDecl;
-class FieldDecl;
class MangleContext;
namespace CodeGen {
@@ -80,6 +79,18 @@ protected:
ASTContext &getContext() const { return CGM.getContext(); }
+ bool mayNeedDestruction(const VarDecl *VD) const;
+
+ /// Determine whether we will definitely emit this variable with a constant
+ /// initializer, either because the language semantics demand it or because
+ /// we know that the initializer is a constant.
+ // For weak definitions, any initializer available in the current translation
+ // is not necessarily reflective of the initializer used; such initializers
+ // are ignored unless if InspectInitForWeakDef is true.
+ bool
+ isEmittedWithConstantInitializer(const VarDecl *VD,
+ bool InspectInitForWeakDef = false) const;
+
virtual bool requiresArrayCookie(const CXXDeleteExpr *E, QualType eltType);
virtual bool requiresArrayCookie(const CXXNewExpr *E);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
index d70f78fea6b4..a37ff8844e88 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
@@ -1892,7 +1892,7 @@ void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
}
void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) {
- llvm::AttrBuilder FuncAttrs;
+ llvm::AttrBuilder FuncAttrs(F.getContext());
getDefaultFunctionAttributes(F.getName(), F.hasOptNone(),
/* AttrOnCallSite = */ false, FuncAttrs);
// TODO: call GetCPUAndFeaturesAttributes?
@@ -2014,8 +2014,8 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
llvm::AttributeList &AttrList,
unsigned &CallingConv,
bool AttrOnCallSite, bool IsThunk) {
- llvm::AttrBuilder FuncAttrs;
- llvm::AttrBuilder RetAttrs;
+ llvm::AttrBuilder FuncAttrs(getLLVMContext());
+ llvm::AttrBuilder RetAttrs(getLLVMContext());
// Collect function IR attributes from the CC lowering.
// We'll collect the paramete and result attributes later.
@@ -2243,7 +2243,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
getLangOpts().Sanitize.has(SanitizerKind::Return);
// Determine if the return type could be partially undef
- if (CodeGenOpts.EnableNoundefAttrs && HasStrictReturn) {
+ if (!CodeGenOpts.DisableNoundefAttrs && HasStrictReturn) {
if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect &&
DetermineNoUndef(RetTy, getTypes(), DL, RetAI))
RetAttrs.addAttribute(llvm::Attribute::NoUndef);
@@ -2302,7 +2302,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
// Attach attributes to sret.
if (IRFunctionArgs.hasSRetArg()) {
- llvm::AttrBuilder SRETAttrs;
+ llvm::AttrBuilder SRETAttrs(getLLVMContext());
SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy));
hasUsedSRet = true;
if (RetAI.getInReg())
@@ -2314,7 +2314,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
// Attach attributes to inalloca argument.
if (IRFunctionArgs.hasInallocaArg()) {
- llvm::AttrBuilder Attrs;
+ llvm::AttrBuilder Attrs(getLLVMContext());
Attrs.addInAllocaAttr(FI.getArgStruct());
ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
llvm::AttributeSet::get(getLLVMContext(), Attrs);
@@ -2329,7 +2329,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
assert(IRArgs.second == 1 && "Expected only a single `this` pointer.");
- llvm::AttrBuilder Attrs;
+ llvm::AttrBuilder Attrs(getLLVMContext());
QualType ThisTy =
FI.arg_begin()->type.castAs<PointerType>()->getPointeeType();
@@ -2364,7 +2364,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
I != E; ++I, ++ArgNo) {
QualType ParamType = I->type;
const ABIArgInfo &AI = I->info;
- llvm::AttrBuilder Attrs;
+ llvm::AttrBuilder Attrs(getLLVMContext());
// Add attribute for padding argument, if necessary.
if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
@@ -2372,14 +2372,15 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
llvm::AttributeSet::get(
getLLVMContext(),
- llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
+ llvm::AttrBuilder(getLLVMContext()).addAttribute(llvm::Attribute::InReg));
}
}
// Decide whether the argument we're handling could be partially undef
- bool ArgNoUndef = DetermineNoUndef(ParamType, getTypes(), DL, AI);
- if (CodeGenOpts.EnableNoundefAttrs && ArgNoUndef)
+ if (!CodeGenOpts.DisableNoundefAttrs &&
+ DetermineNoUndef(ParamType, getTypes(), DL, AI)) {
Attrs.addAttribute(llvm::Attribute::NoUndef);
+ }
// 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
// have the corresponding parameter variable. It doesn't make
@@ -2519,8 +2520,8 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
unsigned FirstIRArg, NumIRArgs;
std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
for (unsigned i = 0; i < NumIRArgs; i++)
- ArgAttrs[FirstIRArg + i] =
- llvm::AttributeSet::get(getLLVMContext(), Attrs);
+ ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes(
+ getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), Attrs));
}
}
assert(ArgNo == FI.arg_size());
@@ -2747,11 +2748,11 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
QualType ETy = ArrTy->getElementType();
llvm::Align Alignment =
CGM.getNaturalTypeAlignment(ETy).getAsAlign();
- AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
+ AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment));
uint64_t ArrSize = ArrTy->getSize().getZExtValue();
if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
ArrSize) {
- llvm::AttrBuilder Attrs;
+ llvm::AttrBuilder Attrs(getLLVMContext());
Attrs.addDereferenceableAttr(
getContext().getTypeSizeInChars(ETy).getQuantity() *
ArrSize);
@@ -2771,7 +2772,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
QualType ETy = ArrTy->getElementType();
llvm::Align Alignment =
CGM.getNaturalTypeAlignment(ETy).getAsAlign();
- AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
+ AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment));
if (!getContext().getTargetAddressSpace(ETy) &&
!CGM.getCodeGenOpts().NullPointerIsValid)
AI->addAttr(llvm::Attribute::NonNull);
@@ -2793,7 +2794,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
- AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(
+ AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(
llvm::Align(AlignmentInt)));
}
}
@@ -3879,9 +3880,8 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
}
// Create the temporary.
- Address temp = CGF.CreateTempAlloca(destType->getElementType(),
- CGF.getPointerAlign(),
- "icr.temp");
+ Address temp = CGF.CreateTempAlloca(destType->getPointerElementType(),
+ CGF.getPointerAlign(), "icr.temp");
// Loading an l-value can introduce a cleanup if the l-value is __weak,
// and that cleanup will be conditional if we can't prove that the l-value
// isn't null, so we need to register a dominating point so that the cleanups
@@ -3891,9 +3891,8 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
// Zero-initialize it if we're not doing a copy-initialization.
bool shouldCopy = CRE->shouldCopy();
if (!shouldCopy) {
- llvm::Value *null =
- llvm::ConstantPointerNull::get(
- cast<llvm::PointerType>(destType->getElementType()));
+ llvm::Value *null = llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(destType->getPointerElementType()));
CGF.Builder.CreateStore(null, temp);
}
@@ -3935,7 +3934,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
assert(srcRV.isScalar());
llvm::Value *src = srcRV.getScalarVal();
- src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
+ src = CGF.Builder.CreateBitCast(src, destType->getPointerElementType(),
"icr.cast");
// Use an ordinary store, not a store-to-lvalue.
@@ -5074,8 +5073,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
#ifndef NDEBUG
// Assert that these structs have equivalent element types.
llvm::StructType *FullTy = CallInfo.getArgStruct();
- llvm::StructType *DeclaredTy = cast<llvm::StructType>(
- cast<llvm::PointerType>(LastParamTy)->getElementType());
+ llvm::StructType *DeclaredTy =
+ cast<llvm::StructType>(LastParamTy->getPointerElementType());
assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
DE = DeclaredTy->element_end(),
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCall.h b/contrib/llvm-project/clang/lib/CodeGen/CGCall.h
index c8594068c3fc..af63e1bddd2d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCall.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCall.h
@@ -26,17 +26,13 @@
#include "ABIInfo.h"
namespace llvm {
-class AttributeList;
-class Function;
class Type;
class Value;
} // namespace llvm
namespace clang {
-class ASTContext;
class Decl;
class FunctionDecl;
-class ObjCMethodDecl;
class VarDecl;
namespace CodeGen {
@@ -49,11 +45,11 @@ class CGCalleeInfo {
GlobalDecl CalleeDecl;
public:
- explicit CGCalleeInfo() : CalleeProtoTy(nullptr), CalleeDecl() {}
+ explicit CGCalleeInfo() : CalleeProtoTy(nullptr) {}
CGCalleeInfo(const FunctionProtoType *calleeProtoTy, GlobalDecl calleeDecl)
: CalleeProtoTy(calleeProtoTy), CalleeDecl(calleeDecl) {}
CGCalleeInfo(const FunctionProtoType *calleeProtoTy)
- : CalleeProtoTy(calleeProtoTy), CalleeDecl() {}
+ : CalleeProtoTy(calleeProtoTy) {}
CGCalleeInfo(GlobalDecl calleeDecl)
: CalleeProtoTy(nullptr), CalleeDecl(calleeDecl) {}
@@ -116,7 +112,8 @@ public:
assert(functionPtr && "configuring callee without function pointer");
assert(functionPtr->getType()->isPointerTy());
assert(functionPtr->getType()->isOpaquePointerTy() ||
- functionPtr->getType()->getPointerElementType()->isFunctionTy());
+ functionPtr->getType()->getNonOpaquePointerElementType()
+ ->isFunctionTy());
}
static CGCallee forBuiltin(unsigned builtinID,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
index 8f99ff0d50ff..520e119ada26 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
@@ -390,7 +390,7 @@ Address CodeGenFunction::GetAddressOfBaseClass(
llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result");
PHI->addIncoming(Value.getPointer(), notNullBB);
PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB);
- Value = Address(PHI, Value.getAlignment());
+ Value = Value.withPointer(PHI);
}
return Value;
@@ -1983,7 +1983,7 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
CharUnits eltAlignment =
arrayBase.getAlignment()
.alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
- Address curAddr = Address(cur, eltAlignment);
+ Address curAddr = Address(cur, elementType, eltAlignment);
// Zero initialize the storage, if requested.
if (zeroInitialize)
@@ -2852,9 +2852,8 @@ llvm::Value *CodeGenFunction::EmitVTableTypeCheckedLoad(
SanitizerHandler::CFICheckFail, {}, {});
}
- return Builder.CreateBitCast(
- Builder.CreateExtractValue(CheckedLoad, 0),
- cast<llvm::PointerType>(VTable->getType())->getElementType());
+ return Builder.CreateBitCast(Builder.CreateExtractValue(CheckedLoad, 0),
+ VTable->getType()->getPointerElementType());
}
void CodeGenFunction::EmitForwardingCallToLambda(
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h b/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h
index 76f3a48f32f3..079a3e25d6dc 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h
@@ -23,7 +23,6 @@ namespace llvm {
class BasicBlock;
class Value;
class ConstantInt;
-class AllocaInst;
}
namespace clang {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
index 2041d2a5b4c9..c1763cbbc5a0 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
@@ -707,6 +707,10 @@ void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
if (Stmt *Ret = S.getReturnStmt())
EmitStmt(Ret);
+
+ // LLVM require the frontend to add the function attribute. See
+ // Coroutines.rst.
+ CurFn->addFnAttr("coroutine.presplit", "0");
}
// Emit coroutine intrinsic and patch up arguments of the token type.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
index 6e189a61dd20..1a9080604a79 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -354,13 +354,9 @@ CGDebugInfo::computeChecksum(FileID FID, SmallString<32> &Checksum) const {
if (!MemBuffer)
return None;
- llvm::MD5 Hash;
- llvm::MD5::MD5Result Result;
-
- Hash.update(MemBuffer->getBuffer());
- Hash.final(Result);
-
- Hash.stringifyResult(Result, Checksum);
+ llvm::toHex(
+ llvm::MD5::hash(llvm::arrayRefFromStringRef(MemBuffer->getBuffer())),
+ /*LowerCase*/ true, Checksum);
return llvm::DIFile::CSK_MD5;
}
@@ -722,7 +718,7 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
auto *LowerBound =
llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
llvm::Type::getInt64Ty(CGM.getLLVMContext()), 0));
- SmallVector<int64_t, 9> Expr(
+ SmallVector<uint64_t, 9> Expr(
{llvm::dwarf::DW_OP_constu, NumElemsPerVG, llvm::dwarf::DW_OP_bregx,
/* AArch64::VG */ 46, 0, llvm::dwarf::DW_OP_mul,
llvm::dwarf::DW_OP_constu, 1, llvm::dwarf::DW_OP_minus});
@@ -768,7 +764,7 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
}
// Element count = (VLENB / SEW) x LMUL
- SmallVector<int64_t, 12> Expr(
+ SmallVector<uint64_t, 12> Expr(
// The DW_OP_bregx operation has two operands: a register which is
// specified by an unsigned LEB128 number, followed by a signed LEB128
// offset.
@@ -3690,7 +3686,7 @@ void CGDebugInfo::CollectContainingType(const CXXRecordDecl *RD,
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
if (const CXXRecordDecl *PBase = RL.getPrimaryBase()) {
// Seek non-virtual primary base root.
- while (1) {
+ while (true) {
const ASTRecordLayout &BRL = CGM.getContext().getASTRecordLayout(PBase);
const CXXRecordDecl *PBT = BRL.getPrimaryBase();
if (PBT && !BRL.isPrimaryBaseVirtual())
@@ -4325,7 +4321,7 @@ void CGDebugInfo::CreateLexicalBlock(SourceLocation Loc) {
}
void CGDebugInfo::AppendAddressSpaceXDeref(
- unsigned AddressSpace, SmallVectorImpl<int64_t> &Expr) const {
+ unsigned AddressSpace, SmallVectorImpl<uint64_t> &Expr) const {
Optional<unsigned> DWARFAddressSpace =
CGM.getTarget().getDWARFAddressSpace(AddressSpace);
if (!DWARFAddressSpace)
@@ -4494,7 +4490,7 @@ llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
Line = getLineNumber(VD->getLocation());
Column = getColumnNumber(VD->getLocation());
}
- SmallVector<int64_t, 13> Expr;
+ SmallVector<uint64_t, 13> Expr;
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
if (VD->isImplicit())
Flags |= llvm::DINode::FlagArtificial;
@@ -4720,7 +4716,7 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
target.getStructLayout(blockInfo.StructureType)
->getElementOffset(blockInfo.getCapture(VD).getIndex()));
- SmallVector<int64_t, 9> addr;
+ SmallVector<uint64_t, 9> addr;
addr.push_back(llvm::dwarf::DW_OP_deref);
addr.push_back(llvm::dwarf::DW_OP_plus_uconst);
addr.push_back(offset.getQuantity());
@@ -5191,7 +5187,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
} else {
auto Align = getDeclAlignIfRequired(D, CGM.getContext());
- SmallVector<int64_t, 4> Expr;
+ SmallVector<uint64_t, 4> Expr;
unsigned AddressSpace =
CGM.getContext().getTargetAddressSpace(D->getType());
if (CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
index 14ff0eeabd21..a76426e585c8 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
@@ -40,7 +40,6 @@ class ClassTemplateSpecializationDecl;
class GlobalDecl;
class ModuleMap;
class ObjCInterfaceDecl;
-class ObjCIvarDecl;
class UsingDecl;
class VarDecl;
enum class DynamicInitKind : unsigned;
@@ -363,7 +362,7 @@ class CGDebugInfo {
/// Extended dereferencing mechanism is has the following format:
/// DW_OP_constu <DWARF Address Space> DW_OP_swap DW_OP_xderef
void AppendAddressSpaceXDeref(unsigned AddressSpace,
- SmallVectorImpl<int64_t> &Expr) const;
+ SmallVectorImpl<uint64_t> &Expr) const;
/// A helper function to collect debug info for the default elements of a
/// block.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
index e09279c1d455..18d658436086 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
@@ -1392,9 +1392,11 @@ void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
else {
// Create an artificial VarDecl to generate debug info for.
IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
- auto VlaExprTy = VlaSize.NumElts->getType()->getPointerElementType();
+ assert(cast<llvm::PointerType>(VlaSize.NumElts->getType())
+ ->isOpaqueOrPointeeTypeMatches(SizeTy) &&
+ "Number of VLA elements must be SizeTy");
auto QT = getContext().getIntTypeForBitwidth(
- VlaExprTy->getScalarSizeInBits(), false);
+ SizeTy->getScalarSizeInBits(), false);
auto *ArtificialDecl = VarDecl::Create(
getContext(), const_cast<DeclContext *>(D.getDeclContext()),
D.getLocation(), D.getLocation(), NameIdent, QT,
@@ -2250,16 +2252,17 @@ void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
// Shift the address back by one element.
llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
+ llvm::Type *llvmElementType = ConvertTypeForMem(elementType);
llvm::Value *element = Builder.CreateInBoundsGEP(
- elementPast->getType()->getPointerElementType(), elementPast, negativeOne,
- "arraydestroy.element");
+ llvmElementType, elementPast, negativeOne, "arraydestroy.element");
if (useEHCleanup)
pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign,
destroyer);
// Perform the actual destruction there.
- destroyer(*this, Address(element, elementAlign), elementType);
+ destroyer(*this, Address(element, llvmElementType, elementAlign),
+ elementType);
if (useEHCleanup)
PopCleanupBlock();
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
index 3579761f1429..7b880c1354e1 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -136,6 +136,7 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
}
// Otherwise, the standard logic requires a helper function.
} else {
+ Addr = Addr.getElementBitCast(CGF.ConvertTypeForMem(Type));
Func = CodeGenFunction(CGM)
.generateDestroyHelper(Addr, Type, CGF.getDestroyer(DtorKind),
CGF.needsEHCleanup(DtorKind), &D);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
index 34b4951a7f72..0fb7ec26a85e 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
@@ -1931,7 +1931,7 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
if (LV.isMatrixElt()) {
llvm::Value *Idx = LV.getMatrixIdx();
if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
- const auto *const MatTy = LV.getType()->getAs<ConstantMatrixType>();
+ const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>();
llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
}
@@ -2077,7 +2077,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
if (Dst.isMatrixElt()) {
llvm::Value *Idx = Dst.getMatrixIdx();
if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
- const auto *const MatTy = Dst.getType()->getAs<ConstantMatrixType>();
+ const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();
llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
}
@@ -3178,7 +3178,7 @@ static void emitCheckHandlerCall(CodeGenFunction &CGF,
bool MayReturn =
!IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
- llvm::AttrBuilder B;
+ llvm::AttrBuilder B(CGF.getLLVMContext());
if (!MayReturn) {
B.addAttribute(llvm::Attribute::NoReturn)
.addAttribute(llvm::Attribute::NoUnwind);
@@ -4699,12 +4699,9 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
if (LV.isSimple()) {
Address V = LV.getAddress(*this);
if (V.isValid()) {
- llvm::Type *T =
- ConvertTypeForMem(E->getType())
- ->getPointerTo(
- cast<llvm::PointerType>(V.getType())->getAddressSpace());
- if (V.getType() != T)
- LV.setAddress(Builder.CreateBitCast(V, T));
+ llvm::Type *T = ConvertTypeForMem(E->getType());
+ if (V.getElementType() != T)
+ LV.setAddress(Builder.CreateElementBitCast(V, T));
}
}
return LV;
@@ -4763,8 +4760,9 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
CGM.EmitExplicitCastExprType(CE, this);
LValue LV = EmitLValue(E->getSubExpr());
- Address V = Builder.CreateBitCast(LV.getAddress(*this),
- ConvertType(CE->getTypeAsWritten()));
+ Address V = Builder.CreateElementBitCast(
+ LV.getAddress(*this),
+ ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
EmitVTablePtrCheckForCast(E->getType(), V.getPointer(),
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
index 3b996b89a1d7..0968afd82064 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
@@ -614,8 +614,8 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
// every temporary created in a default argument is sequenced before
// the construction of the next array element, if any
CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
- LValue elementLV =
- CGF.MakeAddrLValue(Address(currentElement, elementAlign), elementType);
+ LValue elementLV = CGF.MakeAddrLValue(
+ Address(currentElement, llvmElementType, elementAlign), elementType);
if (filler)
EmitInitializationToLValue(filler, elementLV);
else
@@ -1801,6 +1801,7 @@ void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
CharUnits elementAlign =
destPtr.getAlignment().alignmentOfArrayElement(elementSize);
+ llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
@@ -1810,8 +1811,8 @@ void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
llvm::PHINode *index =
Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
index->addIncoming(zero, entryBB);
- llvm::Value *element = Builder.CreateInBoundsGEP(
- begin->getType()->getPointerElementType(), begin, index);
+ llvm::Value *element =
+ Builder.CreateInBoundsGEP(llvmElementType, begin, index);
// Prepare for a cleanup.
QualType::DestructionKind dtorKind = elementType.isDestructedType();
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
index e32462eb635c..4e8933fffe03 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
@@ -1613,8 +1613,9 @@ ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
if (GlobalConstStr->getType()->getPointerAddressSpace() == ExprAS)
return GlobalConstStr;
- llvm::Type *EltTy = GlobalConstStr->getType()->getPointerElementType();
- llvm::PointerType *NewPtrTy = llvm::PointerType::get(EltTy, ExprAS);
+ llvm::PointerType *PtrTy = cast<llvm::PointerType>(GlobalConstStr->getType());
+ llvm::PointerType *NewPtrTy =
+ llvm::PointerType::getWithSamePointeeType(PtrTy, ExprAS);
return Builder.CreateAddrSpaceCast(GlobalConstStr, NewPtrTy, "usn_addr_cast");
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
index b5bcf157036d..8cc609186f9e 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
@@ -847,7 +847,7 @@ static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) {
// FIXME: Allow unaligned atomic load/store on x86. (It is not
// currently supported by the backend.)
- return 0;
+ return false;
}
/// Return the maximum size that permits atomic accesses for the given
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
index b2bf60d2c0fc..52b449090868 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -2347,9 +2347,10 @@ llvm::Value *CGObjCGNU::GetTypedSelector(CodeGenFunction &CGF, Selector Sel,
}
}
if (!SelValue) {
- SelValue = llvm::GlobalAlias::create(
- SelectorTy->getElementType(), 0, llvm::GlobalValue::PrivateLinkage,
- ".objc_selector_" + Sel.getAsString(), &TheModule);
+ SelValue = llvm::GlobalAlias::create(SelectorTy->getPointerElementType(), 0,
+ llvm::GlobalValue::PrivateLinkage,
+ ".objc_selector_" + Sel.getAsString(),
+ &TheModule);
Types.emplace_back(TypeEncoding, SelValue);
}
@@ -2576,14 +2577,16 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
if (IsClassMessage) {
if (!MetaClassPtrAlias) {
MetaClassPtrAlias = llvm::GlobalAlias::create(
- IdTy->getElementType(), 0, llvm::GlobalValue::InternalLinkage,
+ IdTy->getPointerElementType(), 0,
+ llvm::GlobalValue::InternalLinkage,
".objc_metaclass_ref" + Class->getNameAsString(), &TheModule);
}
ReceiverClass = MetaClassPtrAlias;
} else {
if (!ClassPtrAlias) {
ClassPtrAlias = llvm::GlobalAlias::create(
- IdTy->getElementType(), 0, llvm::GlobalValue::InternalLinkage,
+ IdTy->getPointerElementType(), 0,
+ llvm::GlobalValue::InternalLinkage,
".objc_class_ref" + Class->getNameAsString(), &TheModule);
}
ReceiverClass = ClassPtrAlias;
@@ -3706,7 +3709,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
GenerateProtocolHolderCategory();
llvm::StructType *selStructTy =
- dyn_cast<llvm::StructType>(SelectorTy->getElementType());
+ dyn_cast<llvm::StructType>(SelectorTy->getPointerElementType());
llvm::Type *selStructPtrTy = SelectorTy;
if (!selStructTy) {
selStructTy = llvm::StructType::get(CGM.getLLVMContext(),
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
index 425d1a793439..e7dba4c8feab 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
@@ -2138,16 +2138,7 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
const ObjCCommonTypesHelper &ObjCTypes) {
CodeGenTypes &Types = CGM.getTypes();
auto selTy = CGF.getContext().getObjCSelType();
- llvm::Value *SelValue;
-
- if (Method && Method->isDirectMethod()) {
- // Direct methods will synthesize the proper `_cmd` internally,
- // so just don't bother with setting the `_cmd` argument.
- assert(!IsSuper);
- SelValue = llvm::UndefValue::get(Types.ConvertType(selTy));
- } else {
- SelValue = GetSelector(CGF, Sel);
- }
+ llvm::Value *SelValue = llvm::UndefValue::get(Types.ConvertType(selTy));
CallArgList ActualArgs;
if (!IsSuper)
@@ -2168,10 +2159,15 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
canMessageReceiverBeNull(CGF, Method, IsSuper, ClassReceiver, Arg0);
bool RequiresNullCheck = false;
+ bool RequiresSelValue = true;
llvm::FunctionCallee Fn = nullptr;
if (Method && Method->isDirectMethod()) {
+ assert(!IsSuper);
Fn = GenerateDirectMethod(Method, Method->getClassInterface());
+ // Direct methods will synthesize the proper `_cmd` internally,
+ // so just don't bother with setting the `_cmd` argument.
+ RequiresSelValue = false;
} else if (CGM.ReturnSlotInterferesWithArgs(MSI.CallInfo)) {
if (ReceiverCanBeNull) RequiresNullCheck = true;
Fn = (ObjCABI == 2) ? ObjCTypes.getSendStretFn2(IsSuper)
@@ -2209,6 +2205,12 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
nullReturn.init(CGF, Arg0);
}
+ // If a selector value needs to be passed, emit the load before the call.
+ if (RequiresSelValue) {
+ SelValue = GetSelector(CGF, Sel);
+ ActualArgs[1] = CallArg(RValue::get(SelValue), selTy);
+ }
+
llvm::CallBase *CallSite;
CGCallee Callee = CGCallee::forDirect(BitcastFn);
RValue rvalue = CGF.EmitCall(MSI.CallInfo, Callee, Return, ActualArgs,
@@ -2487,7 +2489,7 @@ void CGObjCCommonMac::BuildRCRecordLayout(const llvm::StructLayout *RecLayout,
if (FQT->isUnionType())
HasUnion = true;
- BuildRCBlockVarRecordLayout(FQT->getAs<RecordType>(),
+ BuildRCBlockVarRecordLayout(FQT->castAs<RecordType>(),
BytePos + FieldOffset, HasUnion);
continue;
}
@@ -2935,8 +2937,7 @@ CGObjCCommonMac::BuildRCBlockLayout(CodeGenModule &CGM,
std::string CGObjCCommonMac::getRCBlockLayoutStr(CodeGenModule &CGM,
const CGBlockInfo &blockInfo) {
fillRunSkipBlockVars(CGM, blockInfo);
- return getBlockLayoutInfoString(RunSkipBlockVars,
- blockInfo.needsCopyDisposeHelpers());
+ return getBlockLayoutInfoString(RunSkipBlockVars, blockInfo.NeedsCopyDispose);
}
llvm::Constant *CGObjCCommonMac::BuildByrefLayout(CodeGen::CodeGenModule &CGM,
@@ -4370,7 +4371,11 @@ FragileHazards::FragileHazards(CodeGenFunction &CGF) : CGF(CGF) {
void FragileHazards::emitWriteHazard() {
if (Locals.empty()) return;
- CGF.EmitNounwindRuntimeCall(WriteHazard, Locals);
+ llvm::CallInst *Call = CGF.EmitNounwindRuntimeCall(WriteHazard, Locals);
+ for (auto Pair : llvm::enumerate(Locals))
+ Call->addParamAttr(Pair.index(), llvm::Attribute::get(
+ CGF.getLLVMContext(), llvm::Attribute::ElementType,
+ cast<llvm::AllocaInst>(Pair.value())->getAllocatedType()));
}
void FragileHazards::emitReadHazard(CGBuilderTy &Builder) {
@@ -4378,6 +4383,10 @@ void FragileHazards::emitReadHazard(CGBuilderTy &Builder) {
llvm::CallInst *call = Builder.CreateCall(ReadHazard, Locals);
call->setDoesNotThrow();
call->setCallingConv(CGF.getRuntimeCC());
+ for (auto Pair : llvm::enumerate(Locals))
+ call->addParamAttr(Pair.index(), llvm::Attribute::get(
+ Builder.getContext(), llvm::Attribute::ElementType,
+ cast<llvm::AllocaInst>(Pair.value())->getAllocatedType()));
}
/// Emit read hazards in all the protected blocks, i.e. all the blocks
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index e35c15421520..db1c3ca191ca 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -837,12 +837,11 @@ void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
}
llvm::Value *Size;
llvm::Value *SizeInChars;
- auto *ElemType =
- cast<llvm::PointerType>(OrigAddresses[N].first.getPointer(CGF)->getType())
- ->getElementType();
+ auto *ElemType = OrigAddresses[N].first.getAddress(CGF).getElementType();
auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
if (AsArraySection) {
- Size = CGF.Builder.CreatePtrDiff(OrigAddresses[N].second.getPointer(CGF),
+ Size = CGF.Builder.CreatePtrDiff(ElemType,
+ OrigAddresses[N].second.getPointer(CGF),
OrigAddresses[N].first.getPointer(CGF));
Size = CGF.Builder.CreateNUWAdd(
Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
@@ -1008,7 +1007,8 @@ Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
OriginalBaseLValue);
Address SharedAddr = SharedAddresses[N].first.getAddress(CGF);
llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
- BaseLValue.getPointer(CGF), SharedAddr.getPointer());
+ SharedAddr.getElementType(), BaseLValue.getPointer(CGF),
+ SharedAddr.getPointer());
llvm::Value *PrivatePointer =
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
PrivateAddr.getPointer(), SharedAddr.getType());
@@ -1429,24 +1429,25 @@ static StringRef getIdentStringFromSourceLocation(CodeGenFunction &CGF,
llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
SourceLocation Loc,
unsigned Flags) {
+ uint32_t SrcLocStrSize;
llvm::Constant *SrcLocStr;
if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo ||
Loc.isInvalid()) {
- SrcLocStr = OMPBuilder.getOrCreateDefaultSrcLocStr();
+ SrcLocStr = OMPBuilder.getOrCreateDefaultSrcLocStr(SrcLocStrSize);
} else {
- std::string FunctionName = "";
+ std::string FunctionName;
if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
FunctionName = FD->getQualifiedNameAsString();
PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
const char *FileName = PLoc.getFilename();
unsigned Line = PLoc.getLine();
unsigned Column = PLoc.getColumn();
- SrcLocStr =
- OMPBuilder.getOrCreateSrcLocStr(FunctionName, FileName, Line, Column);
+ SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(FunctionName, FileName, Line,
+ Column, SrcLocStrSize);
}
unsigned Reserved2Flags = getDefaultLocationReserved2Flags();
- return OMPBuilder.getOrCreateIdent(SrcLocStr, llvm::omp::IdentFlag(Flags),
- Reserved2Flags);
+ return OMPBuilder.getOrCreateIdent(
+ SrcLocStr, SrcLocStrSize, llvm::omp::IdentFlag(Flags), Reserved2Flags);
}
llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
@@ -1457,10 +1458,11 @@ llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
if (CGM.getLangOpts().OpenMPIRBuilder) {
SmallString<128> Buffer;
OMPBuilder.updateToLocation(CGF.Builder.saveIP());
+ uint32_t SrcLocStrSize;
auto *SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(
- getIdentStringFromSourceLocation(CGF, Loc, Buffer));
+ getIdentStringFromSourceLocation(CGF, Loc, Buffer), SrcLocStrSize);
return OMPBuilder.getOrCreateThreadID(
- OMPBuilder.getOrCreateIdent(SrcLocStr));
+ OMPBuilder.getOrCreateIdent(SrcLocStr, SrcLocStrSize));
}
llvm::Value *ThreadID = nullptr;
@@ -3464,8 +3466,7 @@ static bool isAllocatableDecl(const VarDecl *VD) {
return false;
const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
// Use the default allocation.
- return !((AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc ||
- AA->getAllocatorType() == OMPAllocateDeclAttr::OMPNullMemAlloc) &&
+ return !(AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc &&
!AA->getAllocator());
}
@@ -8120,7 +8121,7 @@ private:
.getAddress(CGF);
}
Size = CGF.Builder.CreatePtrDiff(
- CGF.EmitCastToVoidPtr(ComponentLB.getPointer()),
+ CGF.Int8Ty, CGF.EmitCastToVoidPtr(ComponentLB.getPointer()),
CGF.EmitCastToVoidPtr(LB.getPointer()));
break;
}
@@ -8141,7 +8142,7 @@ private:
CombinedInfo.BasePointers.push_back(BP.getPointer());
CombinedInfo.Pointers.push_back(LB.getPointer());
Size = CGF.Builder.CreatePtrDiff(
- CGF.Builder.CreateConstGEP(HB, 1).getPointer(),
+ CGF.Int8Ty, CGF.Builder.CreateConstGEP(HB, 1).getPointer(),
CGF.EmitCastToVoidPtr(LB.getPointer()));
CombinedInfo.Sizes.push_back(
CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
@@ -8967,7 +8968,7 @@ public:
CGF.Builder.CreateConstGEP1_32(HBAddr.getElementType(), HB, /*Idx0=*/1);
llvm::Value *CLAddr = CGF.Builder.CreatePointerCast(LB, CGF.VoidPtrTy);
llvm::Value *CHAddr = CGF.Builder.CreatePointerCast(HAddr, CGF.VoidPtrTy);
- llvm::Value *Diff = CGF.Builder.CreatePtrDiff(CHAddr, CLAddr);
+ llvm::Value *Diff = CGF.Builder.CreatePtrDiff(CGF.Int8Ty, CHAddr, CLAddr);
llvm::Value *Size = CGF.Builder.CreateIntCast(Diff, CGF.Int64Ty,
/*isSigned=*/false);
CombinedInfo.Sizes.push_back(Size);
@@ -9527,8 +9528,9 @@ llvm::Constant *
emitMappingInformation(CodeGenFunction &CGF, llvm::OpenMPIRBuilder &OMPBuilder,
MappableExprsHandler::MappingExprInfo &MapExprs) {
+ uint32_t SrcLocStrSize;
if (!MapExprs.getMapDecl() && !MapExprs.getMapExpr())
- return OMPBuilder.getOrCreateDefaultSrcLocStr();
+ return OMPBuilder.getOrCreateDefaultSrcLocStr(SrcLocStrSize);
SourceLocation Loc;
if (!MapExprs.getMapDecl() && MapExprs.getMapExpr()) {
@@ -9540,7 +9542,7 @@ emitMappingInformation(CodeGenFunction &CGF, llvm::OpenMPIRBuilder &OMPBuilder,
Loc = MapExprs.getMapDecl()->getLocation();
}
- std::string ExprName = "";
+ std::string ExprName;
if (MapExprs.getMapExpr()) {
PrintingPolicy P(CGF.getContext().getLangOpts());
llvm::raw_string_ostream OS(ExprName);
@@ -9551,8 +9553,9 @@ emitMappingInformation(CodeGenFunction &CGF, llvm::OpenMPIRBuilder &OMPBuilder,
}
PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
- return OMPBuilder.getOrCreateSrcLocStr(PLoc.getFilename(), ExprName.c_str(),
- PLoc.getLine(), PLoc.getColumn());
+ return OMPBuilder.getOrCreateSrcLocStr(PLoc.getFilename(), ExprName,
+ PLoc.getLine(), PLoc.getColumn(),
+ SrcLocStrSize);
}
/// Emit the arrays used to pass the captures and map information to the
@@ -10216,8 +10219,7 @@ void CGOpenMPRuntime::emitUDMapperArrayInitOrDel(
llvm::Value *Cond;
if (IsInit) {
// base != begin?
- llvm::Value *BaseIsBegin = MapperCGF.Builder.CreateIsNotNull(
- MapperCGF.Builder.CreatePtrDiff(Base, Begin));
+ llvm::Value *BaseIsBegin = MapperCGF.Builder.CreateICmpNE(Base, Begin);
// IsPtrAndObj?
llvm::Value *PtrAndObjBit = MapperCGF.Builder.CreateAnd(
MapType,
@@ -10581,7 +10583,7 @@ void CGOpenMPRuntime::emitTargetCall(
emitOffloadingArraysArgument(
CGF, Info.BasePointersArray, Info.PointersArray, Info.SizesArray,
Info.MapTypesArray, Info.MapNamesArray, Info.MappersArray, Info,
- {/*ForEndTask=*/false});
+ {/*ForEndCall=*/false});
InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
InputInfo.BasePointersArray =
@@ -11463,7 +11465,7 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
emitOffloadingArraysArgument(
CGF, Info.BasePointersArray, Info.PointersArray, Info.SizesArray,
Info.MapTypesArray, Info.MapNamesArray, Info.MappersArray, Info,
- {/*ForEndTask=*/false});
+ {/*ForEndCall=*/false});
InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
InputInfo.BasePointersArray =
Address(Info.BasePointersArray, CGM.getPointerAlign());
@@ -12213,6 +12215,26 @@ Address CGOpenMPRuntime::getParameterAddress(CodeGenFunction &CGF,
return CGF.GetAddrOfLocalVar(NativeParam);
}
+/// Return allocator value from expression, or return a null allocator (default
+/// when no allocator specified).
+static llvm::Value *getAllocatorVal(CodeGenFunction &CGF,
+ const Expr *Allocator) {
+ llvm::Value *AllocVal;
+ if (Allocator) {
+ AllocVal = CGF.EmitScalarExpr(Allocator);
+ // According to the standard, the original allocator type is a enum
+ // (integer). Convert to pointer type, if required.
+ AllocVal = CGF.EmitScalarConversion(AllocVal, Allocator->getType(),
+ CGF.getContext().VoidPtrTy,
+ Allocator->getExprLoc());
+ } else {
+ // If no allocator specified, it defaults to the null allocator.
+ AllocVal = llvm::Constant::getNullValue(
+ CGF.CGM.getTypes().ConvertType(CGF.getContext().VoidPtrTy));
+ }
+ return AllocVal;
+}
+
Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD) {
if (!VD)
@@ -12249,20 +12271,24 @@ Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
}
llvm::Value *ThreadID = getThreadID(CGF, CVD->getBeginLoc());
const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
- assert(AA->getAllocator() &&
- "Expected allocator expression for non-default allocator.");
- llvm::Value *Allocator = CGF.EmitScalarExpr(AA->getAllocator());
- // According to the standard, the original allocator type is a enum
- // (integer). Convert to pointer type, if required.
- Allocator = CGF.EmitScalarConversion(
- Allocator, AA->getAllocator()->getType(), CGF.getContext().VoidPtrTy,
- AA->getAllocator()->getExprLoc());
- llvm::Value *Args[] = {ThreadID, Size, Allocator};
-
- llvm::Value *Addr =
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_alloc),
- Args, getName({CVD->getName(), ".void.addr"}));
+ const Expr *Allocator = AA->getAllocator();
+ llvm::Value *AllocVal = getAllocatorVal(CGF, Allocator);
+ llvm::Value *Alignment =
+ AA->getAlignment()
+ ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(AA->getAlignment()),
+ CGM.SizeTy, /*isSigned=*/false)
+ : nullptr;
+ SmallVector<llvm::Value *, 4> Args;
+ Args.push_back(ThreadID);
+ if (Alignment)
+ Args.push_back(Alignment);
+ Args.push_back(Size);
+ Args.push_back(AllocVal);
+ llvm::omp::RuntimeFunction FnID =
+ Alignment ? OMPRTL___kmpc_aligned_alloc : OMPRTL___kmpc_alloc;
+ llvm::Value *Addr = CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), FnID), Args,
+ getName({CVD->getName(), ".void.addr"}));
llvm::FunctionCallee FiniRTLFn = OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_free);
QualType Ty = CGM.getContext().getPointerType(CVD->getType());
@@ -12276,14 +12302,14 @@ Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
llvm::FunctionCallee RTLFn;
SourceLocation::UIntTy LocEncoding;
Address Addr;
- const Expr *Allocator;
+ const Expr *AllocExpr;
public:
OMPAllocateCleanupTy(llvm::FunctionCallee RTLFn,
SourceLocation::UIntTy LocEncoding, Address Addr,
- const Expr *Allocator)
+ const Expr *AllocExpr)
: RTLFn(RTLFn), LocEncoding(LocEncoding), Addr(Addr),
- Allocator(Allocator) {}
+ AllocExpr(AllocExpr) {}
void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
if (!CGF.HaveInsertPoint())
return;
@@ -12292,14 +12318,8 @@ Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
CGF, SourceLocation::getFromRawEncoding(LocEncoding));
Args[1] = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
Addr.getPointer(), CGF.VoidPtrTy);
- llvm::Value *AllocVal = CGF.EmitScalarExpr(Allocator);
- // According to the standard, the original allocator type is a enum
- // (integer). Convert to pointer type, if required.
- AllocVal = CGF.EmitScalarConversion(AllocVal, Allocator->getType(),
- CGF.getContext().VoidPtrTy,
- Allocator->getExprLoc());
+ llvm::Value *AllocVal = getAllocatorVal(CGF, AllocExpr);
Args[2] = AllocVal;
-
CGF.EmitRuntimeCall(RTLFn, Args);
}
};
@@ -12307,7 +12327,7 @@ Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
UntiedRealAddr.isValid() ? UntiedRealAddr : Address(Addr, Align);
CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(
NormalAndEHCleanup, FiniRTLFn, CVD->getLocation().getRawEncoding(),
- VDAddr, AA->getAllocator());
+ VDAddr, Allocator);
if (UntiedRealAddr.isValid())
if (auto *Region =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
index b83ec78696d1..19754b0cfacc 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
@@ -35,7 +35,6 @@ class ArrayType;
class Constant;
class FunctionType;
class GlobalVariable;
-class StructType;
class Type;
class Value;
class OpenMPIRBuilder;
@@ -48,7 +47,6 @@ class OMPExecutableDirective;
class OMPLoopDirective;
class VarDecl;
class OMPDeclareReductionDecl;
-class IdentifierInfo;
namespace CodeGen {
class Address;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
index 866454ddeaed..e09ea5e01b1a 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
@@ -1402,10 +1402,14 @@ void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
// Allocate space for the variable to be globalized
llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())};
- llvm::Instruction *VoidPtr =
+ llvm::CallBase *VoidPtr =
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_alloc_shared),
AllocArgs, VD->getName());
+ // FIXME: We should use the variables actual alignment as an argument.
+ VoidPtr->addRetAttr(llvm::Attribute::get(
+ CGM.getLLVMContext(), llvm::Attribute::Alignment,
+ CGM.getContext().getTargetInfo().getNewAlign() / 8));
// Cast the void pointer and get the address of the globalized variable.
llvm::PointerType *VarPtrTy = CGF.ConvertTypeForMem(VarTy)->getPointerTo();
@@ -1438,10 +1442,13 @@ void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
// Allocate space for this VLA object to be globalized.
llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())};
- llvm::Instruction *VoidPtr =
+ llvm::CallBase *VoidPtr =
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_alloc_shared),
AllocArgs, VD->getName());
+ VoidPtr->addRetAttr(
+ llvm::Attribute::get(CGM.getLLVMContext(), llvm::Attribute::Alignment,
+ CGM.getContext().getTargetInfo().getNewAlign()));
I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(
std::pair<llvm::Value *, llvm::Value *>(
@@ -1791,8 +1798,9 @@ static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
Ptr = Address(PhiSrc, Ptr.getAlignment());
ElemPtr = Address(PhiDest, ElemPtr.getAlignment());
llvm::Value *PtrDiff = Bld.CreatePtrDiff(
- PtrEnd.getPointer(), Bld.CreatePointerBitCastOrAddrSpaceCast(
- Ptr.getPointer(), CGF.VoidPtrTy));
+ CGF.Int8Ty, PtrEnd.getPointer(),
+ Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr.getPointer(),
+ CGF.VoidPtrTy));
Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)),
ThenBB, ExitBB);
CGF.EmitBlock(ThenBB);
@@ -3394,12 +3402,13 @@ CGOpenMPRuntimeGPU::getParameterAddress(CodeGenFunction &CGF,
LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation());
// First cast to generic.
TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
- /*AddrSpace=*/0));
+ TargetAddr, llvm::PointerType::getWithSamePointeeType(
+ cast<llvm::PointerType>(TargetAddr->getType()), /*AddrSpace=*/0));
// Cast from generic to native address space.
TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
- NativePointeeAddrSpace));
+ TargetAddr, llvm::PointerType::getWithSamePointeeType(
+ cast<llvm::PointerType>(TargetAddr->getType()),
+ NativePointeeAddrSpace));
Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType);
CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false,
NativeParamType);
@@ -3424,8 +3433,8 @@ void CGOpenMPRuntimeGPU::emitOutlinedFunctionCall(
continue;
}
llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- NativeArg,
- NativeArg->getType()->getPointerElementType()->getPointerTo());
+ NativeArg, llvm::PointerType::getWithSamePointeeType(
+ cast<llvm::PointerType>(NativeArg->getType()), /*AddrSpace*/ 0));
TargetArgs.emplace_back(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h b/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h
index e6665b72bcba..5a3bcdf72f7b 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h
@@ -93,8 +93,8 @@ struct CGBitFieldInfo {
CharUnits VolatileStorageOffset;
CGBitFieldInfo()
- : Offset(), Size(), IsSigned(), StorageSize(), StorageOffset(),
- VolatileOffset(), VolatileStorageSize(), VolatileStorageOffset() {}
+ : Offset(), Size(), IsSigned(), StorageSize(), VolatileOffset(),
+ VolatileStorageSize() {}
CGBitFieldInfo(unsigned Offset, unsigned Size, bool IsSigned,
unsigned StorageSize, CharUnits StorageOffset)
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index cf8313f92587..6f85bca8a201 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -411,7 +411,7 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
continue;
}
llvm::Type *Type =
- Types.ConvertTypeForMem(Field->getType(), /*ForBitFields=*/true);
+ Types.ConvertTypeForMem(Field->getType(), /*ForBitField=*/true);
// If we don't have a run yet, or don't live within the previous run's
// allocated storage then we allocate some storage and start a new run.
if (Run == FieldEnd || BitOffset >= Tail) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
index ef0068cd3b0c..520483bc08b6 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
@@ -2109,42 +2109,35 @@ AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
}
-llvm::Value*
-CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
- LValue InputValue, QualType InputType,
- std::string &ConstraintStr,
- SourceLocation Loc) {
- llvm::Value *Arg;
+std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
+ const TargetInfo::ConstraintInfo &Info, LValue InputValue,
+ QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
if (Info.allowsRegister() || !Info.allowsMemory()) {
- if (CodeGenFunction::hasScalarEvaluationKind(InputType)) {
- Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
- } else {
- llvm::Type *Ty = ConvertType(InputType);
- uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
- if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
- getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
- Ty = llvm::IntegerType::get(getLLVMContext(), Size);
- Ty = llvm::PointerType::getUnqual(Ty);
-
- Arg = Builder.CreateLoad(
- Builder.CreateBitCast(InputValue.getAddress(*this), Ty));
- } else {
- Arg = InputValue.getPointer(*this);
- ConstraintStr += '*';
- }
+ if (CodeGenFunction::hasScalarEvaluationKind(InputType))
+ return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
+
+ llvm::Type *Ty = ConvertType(InputType);
+ uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
+ if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
+ getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
+ Ty = llvm::IntegerType::get(getLLVMContext(), Size);
+ Ty = llvm::PointerType::getUnqual(Ty);
+
+ return {Builder.CreateLoad(
+ Builder.CreateBitCast(InputValue.getAddress(*this), Ty)),
+ nullptr};
}
- } else {
- Arg = InputValue.getPointer(*this);
- ConstraintStr += '*';
}
- return Arg;
+ Address Addr = InputValue.getAddress(*this);
+ ConstraintStr += '*';
+ return {Addr.getPointer(), Addr.getElementType()};
}
-llvm::Value* CodeGenFunction::EmitAsmInput(
- const TargetInfo::ConstraintInfo &Info,
- const Expr *InputExpr,
- std::string &ConstraintStr) {
+std::pair<llvm::Value *, llvm::Type *>
+CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
+ const Expr *InputExpr,
+ std::string &ConstraintStr) {
// If this can't be a register or memory, i.e., has to be a constant
// (immediate or symbolic), try to emit it as such.
if (!Info.allowsRegister() && !Info.allowsMemory()) {
@@ -2155,19 +2148,20 @@ llvm::Value* CodeGenFunction::EmitAsmInput(
llvm::APSInt IntResult;
if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
getContext()))
- return llvm::ConstantInt::get(getLLVMContext(), IntResult);
+ return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
}
Expr::EvalResult Result;
if (InputExpr->EvaluateAsInt(Result, getContext()))
- return llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt());
+ return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
+ nullptr};
}
if (Info.allowsRegister() || !Info.allowsMemory())
if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
- return EmitScalarExpr(InputExpr);
+ return {EmitScalarExpr(InputExpr), nullptr};
if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
- return EmitScalarExpr(InputExpr);
+ return {EmitScalarExpr(InputExpr), nullptr};
InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
LValue Dest = EmitLValue(InputExpr);
return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
@@ -2209,6 +2203,7 @@ static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
bool HasUnwindClobber, bool ReadOnly,
bool ReadNone, bool NoMerge, const AsmStmt &S,
const std::vector<llvm::Type *> &ResultRegTypes,
+ const std::vector<llvm::Type *> &ArgElemTypes,
CodeGenFunction &CGF,
std::vector<llvm::Value *> &RegResults) {
if (!HasUnwindClobber)
@@ -2224,6 +2219,15 @@ static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
Result.addFnAttr(llvm::Attribute::ReadOnly);
}
+ // Add elementtype attribute for indirect constraints.
+ for (auto Pair : llvm::enumerate(ArgElemTypes)) {
+ if (Pair.value()) {
+ auto Attr = llvm::Attribute::get(
+ CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
+ Result.addParamAttr(Pair.index(), Attr);
+ }
+ }
+
// Slap the source location of the inline asm into a !srcloc metadata on the
// call.
if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
@@ -2291,6 +2295,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
std::vector<llvm::Type *> ResultRegTypes;
std::vector<llvm::Type *> ResultTruncRegTypes;
std::vector<llvm::Type *> ArgTypes;
+ std::vector<llvm::Type *> ArgElemTypes;
std::vector<llvm::Value*> Args;
llvm::BitVector ResultTypeRequiresCast;
@@ -2298,6 +2303,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
std::string InOutConstraints;
std::vector<llvm::Value*> InOutArgs;
std::vector<llvm::Type*> InOutArgTypes;
+ std::vector<llvm::Type*> InOutArgElemTypes;
// Keep track of out constraints for tied input operand.
std::vector<std::string> OutputConstraints;
@@ -2399,21 +2405,19 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
std::max((uint64_t)LargestVectorWidth,
VT->getPrimitiveSizeInBits().getKnownMinSize());
} else {
- llvm::Type *DestAddrTy = Dest.getAddress(*this).getType();
- llvm::Value *DestPtr = Dest.getPointer(*this);
+ Address DestAddr = Dest.getAddress(*this);
// Matrix types in memory are represented by arrays, but accessed through
// vector pointers, with the alignment specified on the access operation.
// For inline assembly, update pointer arguments to use vector pointers.
// Otherwise there will be a mis-match if the matrix is also an
// input-argument which is represented as vector.
- if (isa<MatrixType>(OutExpr->getType().getCanonicalType())) {
- DestAddrTy = llvm::PointerType::get(
- ConvertType(OutExpr->getType()),
- cast<llvm::PointerType>(DestAddrTy)->getAddressSpace());
- DestPtr = Builder.CreateBitCast(DestPtr, DestAddrTy);
- }
- ArgTypes.push_back(DestAddrTy);
- Args.push_back(DestPtr);
+ if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
+ DestAddr = Builder.CreateElementBitCast(
+ DestAddr, ConvertType(OutExpr->getType()));
+
+ ArgTypes.push_back(DestAddr.getType());
+ ArgElemTypes.push_back(DestAddr.getElementType());
+ Args.push_back(DestAddr.getPointer());
Constraints += "=*";
Constraints += OutputConstraint;
ReadOnly = ReadNone = false;
@@ -2423,9 +2427,11 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
InOutConstraints += ',';
const Expr *InputExpr = S.getOutputExpr(i);
- llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
- InOutConstraints,
- InputExpr->getExprLoc());
+ llvm::Value *Arg;
+ llvm::Type *ArgElemType;
+ std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
+ Info, Dest, InputExpr->getType(), InOutConstraints,
+ InputExpr->getExprLoc());
if (llvm::Type* AdjTy =
getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
@@ -2444,6 +2450,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
InOutConstraints += OutputConstraint;
InOutArgTypes.push_back(Arg->getType());
+ InOutArgElemTypes.push_back(ArgElemType);
InOutArgs.push_back(Arg);
}
}
@@ -2483,7 +2490,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
getTarget(), CGM, S, false /* No EarlyClobber */);
std::string ReplaceConstraint (InputConstraint);
- llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
+ llvm::Value *Arg;
+ llvm::Type *ArgElemType;
+ std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
// If this input argument is tied to a larger output result, extend the
// input to be the same size as the output. The LLVM backend wants to see
@@ -2528,10 +2537,19 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
VT->getPrimitiveSizeInBits().getKnownMinSize());
ArgTypes.push_back(Arg->getType());
+ ArgElemTypes.push_back(ArgElemType);
Args.push_back(Arg);
Constraints += InputConstraint;
}
+ // Append the "input" part of inout constraints.
+ for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
+ ArgTypes.push_back(InOutArgTypes[i]);
+ ArgElemTypes.push_back(InOutArgElemTypes[i]);
+ Args.push_back(InOutArgs[i]);
+ }
+ Constraints += InOutConstraints;
+
// Labels
SmallVector<llvm::BasicBlock *, 16> Transfer;
llvm::BasicBlock *Fallthrough = nullptr;
@@ -2546,21 +2564,15 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
llvm::BlockAddress::get(CurFn, Dest.getBlock());
Args.push_back(BA);
ArgTypes.push_back(BA->getType());
+ ArgElemTypes.push_back(nullptr);
if (!Constraints.empty())
Constraints += ',';
- Constraints += 'X';
+ Constraints += 'i';
}
Fallthrough = createBasicBlock("asm.fallthrough");
}
}
- // Append the "input" part of inout constraints last.
- for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
- ArgTypes.push_back(InOutArgTypes[i]);
- Args.push_back(InOutArgs[i]);
- }
- Constraints += InOutConstraints;
-
bool HasUnwindClobber = false;
// Clobbers
@@ -2647,18 +2659,18 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
EmitBlock(Fallthrough);
UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
- ResultRegTypes, *this, RegResults);
+ ResultRegTypes, ArgElemTypes, *this, RegResults);
} else if (HasUnwindClobber) {
llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
- InNoMergeAttributedStmt, S, ResultRegTypes, *this,
- RegResults);
+ InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
+ *this, RegResults);
} else {
llvm::CallInst *Result =
Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
- ResultRegTypes, *this, RegResults);
+ ResultRegTypes, ArgElemTypes, *this, RegResults);
}
assert(RegResults.size() == ResultRegTypes.size());
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
index 4c11f7d67534..0db59dd2624c 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -2584,7 +2584,67 @@ static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S,
}
}
+static bool isSupportedByOpenMPIRBuilder(const OMPExecutableDirective &S) {
+ // Check for unsupported clauses
+ if (!S.clauses().empty()) {
+ // Currently no clause is supported
+ return false;
+ }
+
+ // Check if we have a statement with the ordered directive.
+ // Visit the statement hierarchy to find a compound statement
+ // with a ordered directive in it.
+ if (const auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(S.getRawStmt())) {
+ if (const Stmt *SyntacticalLoop = CanonLoop->getLoopStmt()) {
+ for (const Stmt *SubStmt : SyntacticalLoop->children()) {
+ if (!SubStmt)
+ continue;
+ if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(SubStmt)) {
+ for (const Stmt *CSSubStmt : CS->children()) {
+ if (!CSSubStmt)
+ continue;
+ if (isa<OMPOrderedDirective>(CSSubStmt)) {
+ return false;
+ }
+ }
+ }
+ }
+ }
+ }
+ return true;
+}
+
void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
+ bool UseOMPIRBuilder =
+ CGM.getLangOpts().OpenMPIRBuilder && isSupportedByOpenMPIRBuilder(S);
+ if (UseOMPIRBuilder) {
+ auto &&CodeGenIRBuilder = [this, &S, UseOMPIRBuilder](CodeGenFunction &CGF,
+ PrePostActionTy &) {
+ // Use the OpenMPIRBuilder if enabled.
+ if (UseOMPIRBuilder) {
+ // Emit the associated statement and get its loop representation.
+ llvm::DebugLoc DL = SourceLocToDebugLoc(S.getBeginLoc());
+ const Stmt *Inner = S.getRawStmt();
+ llvm::CanonicalLoopInfo *CLI =
+ EmitOMPCollapsedCanonicalLoopNest(Inner, 1);
+
+ llvm::OpenMPIRBuilder &OMPBuilder =
+ CGM.getOpenMPRuntime().getOMPBuilder();
+ // Add SIMD specific metadata
+ OMPBuilder.applySimd(DL, CLI);
+ return;
+ }
+ };
+ {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
+ OMPLexicalScope Scope(*this, S, OMPD_unknown);
+ CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd,
+ CodeGenIRBuilder);
+ }
+ return;
+ }
+
ParentLoopDirectiveForScanRegion ScanRegion(*this, S);
OMPFirstScanLoop = true;
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
@@ -4460,8 +4520,9 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
CGF.getContext().getASTRecordLayout(CaptureRecord);
unsigned Offset =
Layout.getFieldOffset(It->second->getFieldIndex()) / CharWidth;
- (void)DI->EmitDeclareOfAutoVariable(SharedVar, ContextValue,
- CGF.Builder, false);
+ if (CGF.CGM.getCodeGenOpts().hasReducedDebugInfo())
+ (void)DI->EmitDeclareOfAutoVariable(SharedVar, ContextValue,
+ CGF.Builder, false);
llvm::Instruction &Last = CGF.Builder.GetInsertBlock()->back();
// Get the call dbg.declare instruction we just created and update
// its DIExpression to add offset to base address.
@@ -4560,8 +4621,10 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
CGF.getContext().getDeclAlign(Pair.first));
Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
if (auto *DI = CGF.getDebugInfo())
- DI->EmitDeclareOfAutoVariable(Pair.first, Pair.second.getPointer(),
- CGF.Builder, /*UsePointerValue*/ true);
+ if (CGF.CGM.getCodeGenOpts().hasReducedDebugInfo())
+ (void)DI->EmitDeclareOfAutoVariable(
+ Pair.first, Pair.second.getPointer(), CGF.Builder,
+ /*UsePointerValue*/ true);
}
// Adjust mapping for internal locals by mapping actual memory instead of
// a pointer to this memory.
@@ -6046,6 +6109,7 @@ static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
case OMPC_inbranch:
case OMPC_notinbranch:
case OMPC_link:
+ case OMPC_indirect:
case OMPC_use:
case OMPC_novariants:
case OMPC_nocontext:
@@ -6789,7 +6853,7 @@ void CodeGenFunction::EmitOMPTargetDataDirective(
public:
explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers)
- : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {}
+ : PrivatizeDevicePointers(PrivatizeDevicePointers) {}
void Enter(CodeGenFunction &CGF) override {
PrivatizeDevicePointers = true;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp
index 482499da1b0f..c839376880c4 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp
@@ -1178,7 +1178,7 @@ bool CodeGenModule::HasLTOVisibilityPublicStd(const CXXRecordDecl *RD) {
return false;
const DeclContext *DC = RD;
- while (1) {
+ while (true) {
auto *D = cast<Decl>(DC);
DC = DC->getParent();
if (isa<TranslationUnitDecl>(DC->getRedeclContext())) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
index e6adec6948af..50e1638924d1 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -740,7 +740,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
#include "clang/Basic/Sanitizers.def"
#undef SANITIZER
- } while (0);
+ } while (false);
if (D) {
bool NoSanitizeCoverage = false;
@@ -882,6 +882,13 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
if (Offset)
Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
}
+ // Instruct that functions for COFF/CodeView targets should start with a
+ // patchable instruction, but only on x86/x64. Don't forward this to ARM/ARM64
+ // backends as they don't need it -- instructions on these architectures are
+ // always atomically patchable at runtime.
+ if (CGM.getCodeGenOpts().HotPatch &&
+ getContext().getTargetInfo().getTriple().isX86())
+ Fn->addFnAttr("patchable-function", "prologue-short-redirect");
// Add no-jump-tables value.
if (CGM.getCodeGenOpts().NoUseJumpTables)
@@ -1595,9 +1602,9 @@ void CodeGenFunction::EmitBranchToCounterBlock(
if (!InstrumentRegions || !isInstrumentedCondition(Cond))
return EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount, LH);
- llvm::BasicBlock *ThenBlock = NULL;
- llvm::BasicBlock *ElseBlock = NULL;
- llvm::BasicBlock *NextBlock = NULL;
+ llvm::BasicBlock *ThenBlock = nullptr;
+ llvm::BasicBlock *ElseBlock = nullptr;
+ llvm::BasicBlock *NextBlock = nullptr;
// Create the block we'll use to increment the appropriate counter.
llvm::BasicBlock *CounterIncrBlock = createBasicBlock("lop.rhscnt");
@@ -2109,6 +2116,7 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
// Create the actual GEP.
addr = Address(Builder.CreateInBoundsGEP(
addr.getElementType(), addr.getPointer(), gepIndices, "array.begin"),
+ ConvertTypeForMem(eltType),
addr.getAlignment());
}
@@ -2246,32 +2254,36 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
// Unknown size indication requires no size computation.
// Otherwise, evaluate and record it.
- if (const Expr *size = vat->getSizeExpr()) {
+ if (const Expr *sizeExpr = vat->getSizeExpr()) {
// It's possible that we might have emitted this already,
// e.g. with a typedef and a pointer to it.
- llvm::Value *&entry = VLASizeMap[size];
+ llvm::Value *&entry = VLASizeMap[sizeExpr];
if (!entry) {
- llvm::Value *Size = EmitScalarExpr(size);
+ llvm::Value *size = EmitScalarExpr(sizeExpr);
// C11 6.7.6.2p5:
// If the size is an expression that is not an integer constant
// expression [...] each time it is evaluated it shall have a value
// greater than zero.
- if (SanOpts.has(SanitizerKind::VLABound) &&
- size->getType()->isSignedIntegerType()) {
+ if (SanOpts.has(SanitizerKind::VLABound)) {
SanitizerScope SanScope(this);
- llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
+ llvm::Value *Zero = llvm::Constant::getNullValue(size->getType());
+ clang::QualType SEType = sizeExpr->getType();
+ llvm::Value *CheckCondition =
+ SEType->isSignedIntegerType()
+ ? Builder.CreateICmpSGT(size, Zero)
+ : Builder.CreateICmpUGT(size, Zero);
llvm::Constant *StaticArgs[] = {
- EmitCheckSourceLocation(size->getBeginLoc()),
- EmitCheckTypeDescriptor(size->getType())};
- EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
- SanitizerKind::VLABound),
- SanitizerHandler::VLABoundNotPositive, StaticArgs, Size);
+ EmitCheckSourceLocation(sizeExpr->getBeginLoc()),
+ EmitCheckTypeDescriptor(SEType)};
+ EmitCheck(std::make_pair(CheckCondition, SanitizerKind::VLABound),
+ SanitizerHandler::VLABoundNotPositive, StaticArgs, size);
}
// Always zexting here would be wrong if it weren't
// undefined behavior to have a negative bound.
- entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
+ // FIXME: What about when size's type is larger than size_t?
+ entry = Builder.CreateIntCast(size, SizeTy, /*signed*/ false);
}
}
type = vat->getElementType();
@@ -2694,7 +2706,7 @@ void CodeGenFunction::emitAlignmentAssumptionCheck(
SanitizerScope SanScope(this);
if (!OffsetValue)
- OffsetValue = Builder.getInt1(0); // no offset.
+ OffsetValue = Builder.getInt1(false); // no offset.
llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
EmitCheckSourceLocation(SecondaryLoc),
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
index f76ce8a6400d..6db888dcec08 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
@@ -46,7 +46,6 @@ namespace llvm {
class BasicBlock;
class LLVMContext;
class MDNode;
-class Module;
class SwitchInst;
class Twine;
class Value;
@@ -55,13 +54,11 @@ class CanonicalLoopInfo;
namespace clang {
class ASTContext;
-class BlockDecl;
class CXXDestructorDecl;
class CXXForRangeStmt;
class CXXTryStmt;
class Decl;
class LabelDecl;
-class EnumConstantDecl;
class FunctionDecl;
class FunctionProtoType;
class LabelStmt;
@@ -80,7 +77,6 @@ class ObjCAtSynchronizedStmt;
class ObjCAutoreleasePoolStmt;
class OMPUseDevicePtrClause;
class OMPUseDeviceAddrClause;
-class ReturnsNonNullAttr;
class SVETypeFlags;
class OMPExecutableDirective;
@@ -92,12 +88,10 @@ namespace CodeGen {
class CodeGenTypes;
class CGCallee;
class CGFunctionInfo;
-class CGRecordLayout;
class CGBlockInfo;
class CGCXXABI;
class BlockByrefHelpers;
class BlockByrefInfo;
-class BlockFlags;
class BlockFieldFlags;
class RegionCodeGenTy;
class TargetCodeGenInfo;
@@ -182,6 +176,7 @@ template <> struct DominatingValue<Address> {
struct saved_type {
DominatingLLVMValue::saved_type SavedValue;
+ llvm::Type *ElementType;
CharUnits Alignment;
};
@@ -190,11 +185,11 @@ template <> struct DominatingValue<Address> {
}
static saved_type save(CodeGenFunction &CGF, type value) {
return { DominatingLLVMValue::save(CGF, value.getPointer()),
- value.getAlignment() };
+ value.getElementType(), value.getAlignment() };
}
static type restore(CodeGenFunction &CGF, saved_type value) {
return Address(DominatingLLVMValue::restore(CGF, value.SavedValue),
- value.Alignment);
+ value.ElementType, value.Alignment);
}
};
@@ -241,11 +236,10 @@ public:
/// A jump destination is an abstract label, branching to which may
/// require a jump out through normal cleanups.
struct JumpDest {
- JumpDest() : Block(nullptr), ScopeDepth(), Index(0) {}
- JumpDest(llvm::BasicBlock *Block,
- EHScopeStack::stable_iterator Depth,
+ JumpDest() : Block(nullptr), Index(0) {}
+ JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth,
unsigned Index)
- : Block(Block), ScopeDepth(Depth), Index(Index) {}
+ : Block(Block), ScopeDepth(Depth), Index(Index) {}
bool isValid() const { return Block != nullptr; }
llvm::BasicBlock *getBlock() const { return Block; }
@@ -4677,13 +4671,14 @@ private:
SmallVectorImpl<llvm::Value *> &IRCallArgs,
unsigned &IRCallArgPos);
- llvm::Value* EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
- const Expr *InputExpr, std::string &ConstraintStr);
+ std::pair<llvm::Value *, llvm::Type *>
+ EmitAsmInput(const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr,
+ std::string &ConstraintStr);
- llvm::Value* EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
- LValue InputValue, QualType InputType,
- std::string &ConstraintStr,
- SourceLocation Loc);
+ std::pair<llvm::Value *, llvm::Type *>
+ EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info, LValue InputValue,
+ QualType InputType, std::string &ConstraintStr,
+ SourceLocation Loc);
/// Attempts to statically evaluate the object size of E. If that
/// fails, emits code to figure the size of E out for us. This is
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
index 36b7ce87336c..d534cf182f5a 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
@@ -565,7 +565,9 @@ void CodeGenModule::Release() {
"__amdgpu_device_library_preserve_asan_functions_ptr", nullptr,
llvm::GlobalVariable::NotThreadLocal);
addCompilerUsedGlobal(Var);
- getModule().addModuleFlag(llvm::Module::Override, "amdgpu_hostcall", 1);
+ if (!getModule().getModuleFlag("amdgpu_hostcall")) {
+ getModule().addModuleFlag(llvm::Module::Override, "amdgpu_hostcall", 1);
+ }
}
emitLLVMUsed();
@@ -610,7 +612,7 @@ void CodeGenModule::Release() {
if (Context.getLangOpts().SemanticInterposition)
// Require various optimization to respect semantic interposition.
- getModule().setSemanticInterposition(1);
+ getModule().setSemanticInterposition(true);
if (CodeGenOpts.EmitCodeView) {
// Indicate that we want CodeView in the metadata.
@@ -710,6 +712,9 @@ void CodeGenModule::Release() {
1);
}
+ if (CodeGenOpts.IBTSeal)
+ getModule().addModuleFlag(llvm::Module::Override, "ibt-seal", 1);
+
// Add module metadata for return address signing (ignoring
// non-leaf/all) and stack tagging. These are actually turned on by function
// attributes, but we use module metadata to emit build attributes. This is
@@ -1368,7 +1373,8 @@ static std::string getMangledNameImpl(CodeGenModule &CGM, GlobalDecl GD,
}
void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD,
- const FunctionDecl *FD) {
+ const FunctionDecl *FD,
+ StringRef &CurName) {
if (!FD->isMultiVersion())
return;
@@ -1400,7 +1406,11 @@ void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD,
if (ExistingRecord != std::end(Manglings))
Manglings.remove(&(*ExistingRecord));
auto Result = Manglings.insert(std::make_pair(OtherName, OtherGD));
- MangledDeclNames[OtherGD.getCanonicalDecl()] = Result.first->first();
+ StringRef OtherNameRef = MangledDeclNames[OtherGD.getCanonicalDecl()] =
+ Result.first->first();
+ // If this is the current decl is being created, make sure we update the name.
+ if (GD.getCanonicalDecl() == OtherGD.getCanonicalDecl())
+ CurName = OtherNameRef;
if (llvm::GlobalValue *Entry = GetGlobalValue(NonTargetName))
Entry->setName(OtherName);
}
@@ -1819,7 +1829,7 @@ CodeGenModule::getMostBaseClasses(const CXXRecordDecl *RD) {
void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
llvm::Function *F) {
- llvm::AttrBuilder B;
+ llvm::AttrBuilder B(F->getContext());
if (CodeGenOpts.UnwindTables)
B.addAttribute(llvm::Attribute::UWTable);
@@ -1982,7 +1992,7 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
void CodeGenModule::setLLVMFunctionFEnvAttributes(const FunctionDecl *D,
llvm::Function *F) {
if (D->hasAttr<StrictFPAttr>()) {
- llvm::AttrBuilder FuncAttrs;
+ llvm::AttrBuilder FuncAttrs(F->getContext());
FuncAttrs.addAttribute("strictfp");
F->addFnAttrs(FuncAttrs);
}
@@ -2092,12 +2102,12 @@ void CodeGenModule::setNonAliasAttributes(GlobalDecl GD,
if (!D->getAttr<SectionAttr>())
F->addFnAttr("implicit-section-name", SA->getName());
- llvm::AttrBuilder Attrs;
+ llvm::AttrBuilder Attrs(F->getContext());
if (GetCPUAndFeaturesAttributes(GD, Attrs)) {
// We know that GetCPUAndFeaturesAttributes will always have the
// newest set, since it has the newest possible FunctionDecl, so the
// new ones should replace the old.
- llvm::AttrBuilder RemoveAttrs;
+ llvm::AttributeMask RemoveAttrs;
RemoveAttrs.addAttribute("target-cpu");
RemoveAttrs.addAttribute("target-features");
RemoveAttrs.addAttribute("tune-cpu");
@@ -3479,6 +3489,7 @@ void CodeGenModule::emitMultiVersionFunctions() {
void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
const auto *FD = cast<FunctionDecl>(GD.getDecl());
assert(FD && "Not a FunctionDecl?");
+ assert(FD->isCPUDispatchMultiVersion() && "Not a multiversion function?");
const auto *DD = FD->getAttr<CPUDispatchAttr>();
assert(DD && "Not a cpu_dispatch Function?");
llvm::Type *DeclTy = getTypes().ConvertType(FD->getType());
@@ -3489,14 +3500,16 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
}
StringRef ResolverName = getMangledName(GD);
+ UpdateMultiVersionNames(GD, FD, ResolverName);
llvm::Type *ResolverType;
GlobalDecl ResolverGD;
- if (getTarget().supportsIFunc())
+ if (getTarget().supportsIFunc()) {
ResolverType = llvm::FunctionType::get(
llvm::PointerType::get(DeclTy,
Context.getTargetAddressSpace(FD->getType())),
false);
+ }
else {
ResolverType = DeclTy;
ResolverGD = GD;
@@ -3688,8 +3701,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
}
if (FD->isMultiVersion()) {
- if (FD->hasAttr<TargetAttr>())
- UpdateMultiVersionNames(GD, FD);
+ UpdateMultiVersionNames(GD, FD, MangledName);
if (!IsForDefinition)
return GetOrCreateMultiVersionResolver(GD, Ty, FD);
}
@@ -3785,7 +3797,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
if (D)
SetFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk);
if (ExtraAttrs.hasFnAttrs()) {
- llvm::AttrBuilder B(ExtraAttrs, llvm::AttributeList::FunctionIndex);
+ llvm::AttrBuilder B(F->getContext(), ExtraAttrs.getFnAttrs());
F->addFnAttrs(B);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
index f1565511f98a..e803022508a4 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
@@ -46,7 +46,6 @@ class GlobalValue;
class DataLayout;
class FunctionType;
class LLVMContext;
-class OpenMPIRBuilder;
class IndexedInstrProfReader;
}
@@ -55,17 +54,13 @@ class ASTContext;
class AtomicType;
class FunctionDecl;
class IdentifierInfo;
-class ObjCMethodDecl;
class ObjCImplementationDecl;
-class ObjCCategoryImplDecl;
-class ObjCProtocolDecl;
class ObjCEncodeExpr;
class BlockExpr;
class CharUnits;
class Decl;
class Expr;
class Stmt;
-class InitListExpr;
class StringLiteral;
class NamedDecl;
class ValueDecl;
@@ -78,13 +73,10 @@ class AnnotateAttr;
class CXXDestructorDecl;
class Module;
class CoverageSourceInfo;
-class TargetAttr;
class InitSegAttr;
-struct ParsedTargetAttr;
namespace CodeGen {
-class CallArgList;
class CodeGenFunction;
class CodeGenTBAA;
class CGCXXABI;
@@ -93,8 +85,6 @@ class CGObjCRuntime;
class CGOpenCLRuntime;
class CGOpenMPRuntime;
class CGCUDARuntime;
-class BlockFieldFlags;
-class FunctionArgList;
class CoverageMappingModuleGen;
class TargetCodeGenInfo;
@@ -311,7 +301,7 @@ private:
const TargetInfo &Target;
std::unique_ptr<CGCXXABI> ABI;
llvm::LLVMContext &VMContext;
- std::string ModuleNameHash = "";
+ std::string ModuleNameHash;
std::unique_ptr<CodeGenTBAA> TBAA;
@@ -345,7 +335,7 @@ private:
/// for emission and therefore should only be output if they are actually
/// used. If a decl is in this, then it is known to have not been referenced
/// yet.
- std::map<StringRef, GlobalDecl> DeferredDecls;
+ llvm::DenseMap<StringRef, GlobalDecl> DeferredDecls;
/// This is a list of deferred decls which we have seen that *are* actually
/// referenced. These get code generated when the module is done.
@@ -1478,7 +1468,8 @@ private:
llvm::Constant *GetOrCreateMultiVersionResolver(GlobalDecl GD,
llvm::Type *DeclTy,
const FunctionDecl *FD);
- void UpdateMultiVersionNames(GlobalDecl GD, const FunctionDecl *FD);
+ void UpdateMultiVersionNames(GlobalDecl GD, const FunctionDecl *FD,
+ StringRef &CurName);
llvm::Constant *
GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty, LangAS AddrSpace,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp
index ab953c2c7d52..6657f2a91e3d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp
@@ -131,7 +131,7 @@ public:
static_assert(LastHashType <= TooBig, "Too many types in HashType");
PGOHash(PGOHashVersion HashVersion)
- : Working(0), Count(0), HashVersion(HashVersion), MD5() {}
+ : Working(0), Count(0), HashVersion(HashVersion) {}
void combine(HashType Type);
uint64_t finalize();
PGOHashVersion getHashVersion() const { return HashVersion; }
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.h
index e8e006f41616..a65963596fe9 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.h
@@ -29,7 +29,6 @@ namespace clang {
class Type;
namespace CodeGen {
-class CGRecordLayout;
// TBAAAccessKind - A kind of TBAA memory access descriptor.
enum class TBAAAccessKind : unsigned {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
index 77721510dfd0..4839e22c4b14 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -643,11 +643,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
llvm::Type *PointeeType = ConvertTypeForMem(ETy);
if (PointeeType->isVoidTy())
PointeeType = llvm::Type::getInt8Ty(getLLVMContext());
-
- unsigned AS = PointeeType->isFunctionTy()
- ? getDataLayout().getProgramAddressSpace()
- : Context.getTargetAddressSpace(ETy);
-
+ unsigned AS = Context.getTargetAddressSpace(ETy);
ResultType = llvm::PointerType::get(PointeeType, AS);
break;
}
@@ -748,7 +744,13 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
llvm::Type *PointeeType = CGM.getLangOpts().OpenCL
? CGM.getGenericBlockLiteralType()
: ConvertTypeForMem(FTy);
- unsigned AS = Context.getTargetAddressSpace(FTy);
+ // Block pointers lower to function type. For function type,
+ // getTargetAddressSpace() returns default address space for
+ // function pointer i.e. program address space. Therefore, for block
+ // pointers, it is important to pass qualifiers when calling
+ // getTargetAddressSpace(), to ensure that we get the address space
+ // for data pointers and not function pointers.
+ unsigned AS = Context.getTargetAddressSpace(FTy.getQualifiers());
ResultType = llvm::PointerType::get(PointeeType, AS);
break;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h
index f8f7542e4c83..28b831222943 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h
@@ -31,14 +31,9 @@ namespace clang {
class ASTContext;
template <typename> class CanQual;
class CXXConstructorDecl;
-class CXXDestructorDecl;
class CXXMethodDecl;
class CodeGenOptions;
-class FieldDecl;
class FunctionProtoType;
-class ObjCInterfaceDecl;
-class ObjCIvarDecl;
-class PointerType;
class QualType;
class RecordDecl;
class TagDecl;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
index 1a15b09c7b2b..2979d92c8417 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -334,59 +334,6 @@ public:
ArrayRef<llvm::Function *> CXXThreadLocalInits,
ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
- bool mayNeedDestruction(const VarDecl *VD) const {
- if (VD->needsDestruction(getContext()))
- return true;
-
- // If the variable has an incomplete class type (or array thereof), it
- // might need destruction.
- const Type *T = VD->getType()->getBaseElementTypeUnsafe();
- if (T->getAs<RecordType>() && T->isIncompleteType())
- return true;
-
- return false;
- }
-
- /// Determine whether we will definitely emit this variable with a constant
- /// initializer, either because the language semantics demand it or because
- /// we know that the initializer is a constant.
- // For weak definitions, any initializer available in the current translation
- // is not necessarily reflective of the initializer used; such initializers
- // are ignored unless if InspectInitForWeakDef is true.
- bool
- isEmittedWithConstantInitializer(const VarDecl *VD,
- bool InspectInitForWeakDef = false) const {
- VD = VD->getMostRecentDecl();
- if (VD->hasAttr<ConstInitAttr>())
- return true;
-
- // All later checks examine the initializer specified on the variable. If
- // the variable is weak, such examination would not be correct.
- if (!InspectInitForWeakDef &&
- (VD->isWeak() || VD->hasAttr<SelectAnyAttr>()))
- return false;
-
- const VarDecl *InitDecl = VD->getInitializingDeclaration();
- if (!InitDecl)
- return false;
-
- // If there's no initializer to run, this is constant initialization.
- if (!InitDecl->hasInit())
- return true;
-
- // If we have the only definition, we don't need a thread wrapper if we
- // will emit the value as a constant.
- if (isUniqueGVALinkage(getContext().GetGVALinkageForVariable(VD)))
- return !mayNeedDestruction(VD) && InitDecl->evaluateValue();
-
- // Otherwise, we need a thread wrapper unless we know that every
- // translation unit will emit the value as a constant. We rely on the
- // variable being constant-initialized in every translation unit if it's
- // constant-initialized in any translation unit, which isn't actually
- // guaranteed by the standard but is necessary for sanity.
- return InitDecl->hasConstantInitialization();
- }
-
bool usesThreadWrapperFunction(const VarDecl *VD) const override {
return !isEmittedWithConstantInitializer(VD) ||
mayNeedDestruction(VD);
@@ -697,8 +644,8 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
CharUnits VTablePtrAlign =
CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
CGF.getPointerAlign());
- llvm::Value *VTable =
- CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
+ llvm::Value *VTable = CGF.GetVTablePtr(
+ Address(This, ThisAddr.getElementType(), VTablePtrAlign), VTableTy, RD);
// Apply the offset.
// On ARM64, to reserve extra space in virtual member function pointers,
@@ -4525,8 +4472,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
// pad. The best solution is to fix the personality function.
} else {
// Pull the pointer for the reference type off.
- llvm::Type *PtrTy =
- cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
+ llvm::Type *PtrTy = LLVMCatchTy->getPointerElementType();
// Create the temporary and write the adjusted pointer into it.
Address ExnPtrTmp =
diff --git a/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h b/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h
index 32906a000269..d249b5b0eb88 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h
@@ -17,7 +17,6 @@
namespace llvm {
class DIMacroFile;
-class DIMacroNode;
}
namespace clang {
class Preprocessor;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index 5971a7709304..e00ff2b68719 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -401,7 +401,9 @@ public:
ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
bool usesThreadWrapperFunction(const VarDecl *VD) const override {
- return false;
+ return getContext().getLangOpts().isCompatibleWithMSVC(
+ LangOptions::MSVC2019_5) &&
+ (!isEmittedWithConstantInitializer(VD) || mayNeedDestruction(VD));
}
LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
QualType LValType) override;
@@ -2397,11 +2399,97 @@ void MicrosoftCXXABI::EmitThreadLocalInitFuncs(
}
}
+static llvm::GlobalValue *getTlsGuardVar(CodeGenModule &CGM) {
+ // __tls_guard comes from the MSVC runtime and reflects
+ // whether TLS has been initialized for a particular thread.
+ // It is set from within __dyn_tls_init by the runtime.
+ // Every library and executable has its own variable.
+ llvm::Type *VTy = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+ llvm::Constant *TlsGuardConstant =
+ CGM.CreateRuntimeVariable(VTy, "__tls_guard");
+ llvm::GlobalValue *TlsGuard = cast<llvm::GlobalValue>(TlsGuardConstant);
+
+ TlsGuard->setThreadLocal(true);
+
+ return TlsGuard;
+}
+
+static llvm::FunctionCallee getDynTlsOnDemandInitFn(CodeGenModule &CGM) {
+ // __dyn_tls_on_demand_init comes from the MSVC runtime and triggers
+ // dynamic TLS initialization by calling __dyn_tls_init internally.
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()), {},
+ /*isVarArg=*/false);
+ return CGM.CreateRuntimeFunction(
+ FTy, "__dyn_tls_on_demand_init",
+ llvm::AttributeList::get(CGM.getLLVMContext(),
+ llvm::AttributeList::FunctionIndex,
+ llvm::Attribute::NoUnwind),
+ /*Local=*/true);
+}
+
+static void emitTlsGuardCheck(CodeGenFunction &CGF, llvm::GlobalValue *TlsGuard,
+ llvm::BasicBlock *DynInitBB,
+ llvm::BasicBlock *ContinueBB) {
+ llvm::LoadInst *TlsGuardValue =
+ CGF.Builder.CreateLoad(Address(TlsGuard, CharUnits::One()));
+ llvm::Value *CmpResult =
+ CGF.Builder.CreateICmpEQ(TlsGuardValue, CGF.Builder.getInt8(0));
+ CGF.Builder.CreateCondBr(CmpResult, DynInitBB, ContinueBB);
+}
+
+static void emitDynamicTlsInitializationCall(CodeGenFunction &CGF,
+ llvm::GlobalValue *TlsGuard,
+ llvm::BasicBlock *ContinueBB) {
+ llvm::FunctionCallee Initializer = getDynTlsOnDemandInitFn(CGF.CGM);
+ llvm::Function *InitializerFunction =
+ cast<llvm::Function>(Initializer.getCallee());
+ llvm::CallInst *CallVal = CGF.Builder.CreateCall(InitializerFunction);
+ CallVal->setCallingConv(InitializerFunction->getCallingConv());
+
+ CGF.Builder.CreateBr(ContinueBB);
+}
+
+static void emitDynamicTlsInitialization(CodeGenFunction &CGF) {
+ llvm::BasicBlock *DynInitBB =
+ CGF.createBasicBlock("dyntls.dyn_init", CGF.CurFn);
+ llvm::BasicBlock *ContinueBB =
+ CGF.createBasicBlock("dyntls.continue", CGF.CurFn);
+
+ llvm::GlobalValue *TlsGuard = getTlsGuardVar(CGF.CGM);
+
+ emitTlsGuardCheck(CGF, TlsGuard, DynInitBB, ContinueBB);
+ CGF.Builder.SetInsertPoint(DynInitBB);
+ emitDynamicTlsInitializationCall(CGF, TlsGuard, ContinueBB);
+ CGF.Builder.SetInsertPoint(ContinueBB);
+}
+
LValue MicrosoftCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
const VarDecl *VD,
QualType LValType) {
- CGF.CGM.ErrorUnsupported(VD, "thread wrappers");
- return LValue();
+ // Dynamic TLS initialization works by checking the state of a
+ // guard variable (__tls_guard) to see whether TLS initialization
+ // for a thread has happend yet.
+ // If not, the initialization is triggered on-demand
+ // by calling __dyn_tls_on_demand_init.
+ emitDynamicTlsInitialization(CGF);
+
+ // Emit the variable just like any regular global variable.
+
+ llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
+ llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
+
+ unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
+ V = CGF.Builder.CreateBitCast(V, RealVarTy->getPointerTo(AS));
+
+ CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
+ Address Addr(V, Alignment);
+
+ LValue LV = VD->getType()->isReferenceType()
+ ? CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
+ AlignmentSource::Decl)
+ : CGF.MakeAddrLValue(Addr, LValType, AlignmentSource::Decl);
+ return LV;
}
static ConstantAddress getInitThreadEpochPtr(CodeGenModule &CGM) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp b/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
index f7b83c45022d..9fe7e5d1f5c3 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
@@ -156,6 +156,7 @@ public:
CodeGenOpts.setDebuggerTuning(CI.getCodeGenOpts().getDebuggerTuning());
CodeGenOpts.DebugPrefixMap =
CI.getInvocation().getCodeGenOpts().DebugPrefixMap;
+ CodeGenOpts.DebugStrictDwarf = CI.getCodeGenOpts().DebugStrictDwarf;
}
~PCHContainerGenerator() override = default;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
index 85089cdb2200..fb81169003fc 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
@@ -855,19 +855,19 @@ public:
if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
llvm::Function *Fn = cast<llvm::Function>(GV);
- llvm::AttrBuilder B;
+ llvm::AttrBuilder B(GV->getContext());
B.addAttribute("wasm-import-module", Attr->getImportModule());
Fn->addFnAttrs(B);
}
if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) {
llvm::Function *Fn = cast<llvm::Function>(GV);
- llvm::AttrBuilder B;
+ llvm::AttrBuilder B(GV->getContext());
B.addAttribute("wasm-import-name", Attr->getImportName());
Fn->addFnAttrs(B);
}
if (const auto *Attr = FD->getAttr<WebAssemblyExportNameAttr>()) {
llvm::Function *Fn = cast<llvm::Function>(GV);
- llvm::AttrBuilder B;
+ llvm::AttrBuilder B(GV->getContext());
B.addAttribute("wasm-export-name", Attr->getExportName());
Fn->addFnAttrs(B);
}
@@ -1606,7 +1606,7 @@ static bool isSIMDVectorType(ASTContext &Context, QualType Ty) {
static bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty) {
const RecordType *RT = Ty->getAs<RecordType>();
if (!RT)
- return 0;
+ return false;
const RecordDecl *RD = RT->getDecl();
// If this is a C++ record, check the bases first.
@@ -6414,7 +6414,7 @@ public:
// AAPCS guarantees that sp will be 8-byte aligned on any public interface,
// however this is not necessarily true on taking any interrupt. Instruct
// the backend to perform a realignment as part of the function prologue.
- llvm::AttrBuilder B;
+ llvm::AttrBuilder B(Fn->getContext());
B.addStackAlignmentAttr(8);
Fn->addFnAttrs(B);
}
@@ -8282,14 +8282,15 @@ public:
LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
const VarDecl *D) const override {
- // Check if a global/static variable is defined within address space 1
+ // Check if global/static variable is defined in address space
+ // 1~6 (__flash, __flash1, __flash2, __flash3, __flash4, __flash5)
// but not constant.
LangAS AS = D->getType().getAddressSpace();
- if (isTargetAddressSpace(AS) && toTargetAddressSpace(AS) == 1 &&
- !D->getType().isConstQualified())
+ if (isTargetAddressSpace(AS) && 1 <= toTargetAddressSpace(AS) &&
+ toTargetAddressSpace(AS) <= 6 && !D->getType().isConstQualified())
CGM.getDiags().Report(D->getLocation(),
diag::err_verify_nonconst_addrspace)
- << "__flash";
+ << "__flash*";
return TargetCodeGenInfo::getGlobalVarAddressSpace(CGM, D);
}
@@ -8693,7 +8694,7 @@ Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF,
llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
"__new_saved_reg_area_pointer");
- llvm::Value *UsingStack = 0;
+ llvm::Value *UsingStack = nullptr;
UsingStack = CGF.Builder.CreateICmpSGT(__new_saved_reg_area_pointer,
__saved_reg_area_end_pointer);
@@ -8935,9 +8936,9 @@ private:
llvm::Type *coerceKernelArgumentType(llvm::Type *Ty, unsigned FromAS,
unsigned ToAS) const {
// Single value types.
- if (Ty->isPointerTy() && Ty->getPointerAddressSpace() == FromAS)
- return llvm::PointerType::get(
- cast<llvm::PointerType>(Ty)->getElementType(), ToAS);
+ auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(Ty);
+ if (PtrTy && PtrTy->getAddressSpace() == FromAS)
+ return llvm::PointerType::getWithSamePointeeType(PtrTy, ToAS);
return Ty;
}
@@ -9304,16 +9305,9 @@ void AMDGPUTargetCodeGenInfo::setTargetAttributes(
if (FD)
setFunctionDeclAttributes(FD, F, M);
- const bool IsOpenCLKernel =
- M.getLangOpts().OpenCL && FD && FD->hasAttr<OpenCLKernelAttr>();
const bool IsHIPKernel =
M.getLangOpts().HIP && FD && FD->hasAttr<CUDAGlobalAttr>();
- const bool IsOpenMP = M.getLangOpts().OpenMP && !FD;
- if ((IsOpenCLKernel || IsHIPKernel || IsOpenMP) &&
- (M.getTriple().getOS() == llvm::Triple::AMDHSA))
- F->addFnAttr("amdgpu-implicitarg-num-bytes", "56");
-
if (IsHIPKernel)
F->addFnAttr("uniform-work-group-size", "true");
@@ -9340,8 +9334,8 @@ llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
return llvm::ConstantPointerNull::get(PT);
auto &Ctx = CGM.getContext();
- auto NPT = llvm::PointerType::get(PT->getElementType(),
- Ctx.getTargetAddressSpace(LangAS::opencl_generic));
+ auto NPT = llvm::PointerType::getWithSamePointeeType(
+ PT, Ctx.getTargetAddressSpace(LangAS::opencl_generic));
return llvm::ConstantExpr::getAddrSpaceCast(
llvm::ConstantPointerNull::get(NPT), PT);
}
@@ -10276,9 +10270,9 @@ ABIArgInfo SPIRVABIInfo::classifyKernelArgumentType(QualType Ty) const {
llvm::Type *LTy = CGT.ConvertType(Ty);
auto DefaultAS = getContext().getTargetAddressSpace(LangAS::Default);
auto GlobalAS = getContext().getTargetAddressSpace(LangAS::cuda_device);
- if (LTy->isPointerTy() && LTy->getPointerAddressSpace() == DefaultAS) {
- LTy = llvm::PointerType::get(
- cast<llvm::PointerType>(LTy)->getElementType(), GlobalAS);
+ auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(LTy);
+ if (PtrTy && PtrTy->getAddressSpace() == DefaultAS) {
+ LTy = llvm::PointerType::getWithSamePointeeType(PtrTy, GlobalAS);
return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
}
}
@@ -11417,7 +11411,7 @@ TargetCodeGenInfo::createEnqueuedBlockKernel(CodeGenFunction &CGF,
auto &C = CGF.getLLVMContext();
std::string Name = Invoke->getName().str() + "_kernel";
auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
- auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
+ auto *F = llvm::Function::Create(FT, llvm::GlobalValue::ExternalLinkage, Name,
&CGF.CGM.getModule());
auto IP = CGF.Builder.saveIP();
auto *BB = llvm::BasicBlock::Create(C, "entry", F);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h
index aa8bbb60a75f..dfdb2f5f55bb 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h
@@ -38,7 +38,6 @@ class ABIInfo;
class CallArgList;
class CodeGenFunction;
class CGBlockInfo;
-class CGFunctionInfo;
/// TargetCodeGenInfo - This class organizes various target-specific
/// codegeneration issues, like target-specific attributes, builtins and so