aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/CodeGen')
-rw-r--r--clang/lib/CodeGen/ABIInfo.h2
-rw-r--r--clang/lib/CodeGen/Address.h64
-rw-r--r--clang/lib/CodeGen/BackendUtil.cpp22
-rw-r--r--clang/lib/CodeGen/CGAtomic.cpp19
-rw-r--r--clang/lib/CodeGen/CGBlocks.cpp3
-rw-r--r--clang/lib/CodeGen/CGBuilder.h46
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp199
-rw-r--r--clang/lib/CodeGen/CGCUDANV.cpp3
-rw-r--r--clang/lib/CodeGen/CGCall.cpp57
-rw-r--r--clang/lib/CodeGen/CGCall.h3
-rw-r--r--clang/lib/CodeGen/CGClass.cpp50
-rw-r--r--clang/lib/CodeGen/CGCleanup.h11
-rw-r--r--clang/lib/CodeGen/CGCoroutine.cpp4
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.cpp19
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.h2
-rw-r--r--clang/lib/CodeGen/CGDecl.cpp10
-rw-r--r--clang/lib/CodeGen/CGDeclCXX.cpp12
-rw-r--r--clang/lib/CodeGen/CGException.cpp8
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp80
-rw-r--r--clang/lib/CodeGen/CGExprAgg.cpp11
-rw-r--r--clang/lib/CodeGen/CGExprCXX.cpp18
-rw-r--r--clang/lib/CodeGen/CGExprConstant.cpp7
-rw-r--r--clang/lib/CodeGen/CGExprScalar.cpp64
-rw-r--r--clang/lib/CodeGen/CGNonTrivialStruct.cpp33
-rw-r--r--clang/lib/CodeGen/CGObjC.cpp8
-rw-r--r--clang/lib/CodeGen/CGObjCGNU.cpp12
-rw-r--r--clang/lib/CodeGen/CGObjCMac.cpp5
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp106
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.h26
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp39
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntimeGPU.h5
-rw-r--r--clang/lib/CodeGen/CGStmt.cpp2
-rw-r--r--clang/lib/CodeGen/CGStmtOpenMP.cpp76
-rw-r--r--clang/lib/CodeGen/CGValue.h76
-rw-r--r--clang/lib/CodeGen/CodeGenAction.cpp1
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.cpp71
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.h34
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp54
-rw-r--r--clang/lib/CodeGen/CodeGenModule.h3
-rw-r--r--clang/lib/CodeGen/CodeGenTBAA.cpp4
-rw-r--r--clang/lib/CodeGen/CodeGenTypes.cpp12
-rw-r--r--clang/lib/CodeGen/ItaniumCXXABI.cpp11
-rw-r--r--clang/lib/CodeGen/MicrosoftCXXABI.cpp8
-rw-r--r--clang/lib/CodeGen/TargetInfo.cpp241
44 files changed, 933 insertions, 608 deletions
diff --git a/clang/lib/CodeGen/ABIInfo.h b/clang/lib/CodeGen/ABIInfo.h
index 56f0dd4322d2..0d12183055e1 100644
--- a/clang/lib/CodeGen/ABIInfo.h
+++ b/clang/lib/CodeGen/ABIInfo.h
@@ -105,7 +105,7 @@ namespace swiftcall {
uint64_t &Members) const;
// Implement the Type::IsPromotableIntegerType for ABI specific needs. The
- // only difference is that this considers _ExtInt as well.
+ // only difference is that this considers bit-precise integer types as well.
bool isPromotableIntegerTypeForABI(QualType Ty) const;
/// A convenience method to return an indirect ABIArgInfo with an
diff --git a/clang/lib/CodeGen/Address.h b/clang/lib/CodeGen/Address.h
index 6a8e57f8db33..37c20291c0e8 100644
--- a/clang/lib/CodeGen/Address.h
+++ b/clang/lib/CodeGen/Address.h
@@ -23,15 +23,29 @@ namespace CodeGen {
/// An aligned address.
class Address {
llvm::Value *Pointer;
+ llvm::Type *ElementType;
CharUnits Alignment;
+
+protected:
+ Address(std::nullptr_t) : Pointer(nullptr), ElementType(nullptr) {}
+
public:
- Address(llvm::Value *pointer, CharUnits alignment)
- : Pointer(pointer), Alignment(alignment) {
- assert((!alignment.isZero() || pointer == nullptr) &&
- "creating valid address with invalid alignment");
+ Address(llvm::Value *pointer, llvm::Type *elementType, CharUnits alignment)
+ : Pointer(pointer), ElementType(elementType), Alignment(alignment) {
+ assert(pointer != nullptr && "Pointer cannot be null");
+ assert(elementType != nullptr && "Element type cannot be null");
+ assert(llvm::cast<llvm::PointerType>(pointer->getType())
+ ->isOpaqueOrPointeeTypeMatches(elementType) &&
+ "Incorrect pointer element type");
+ assert(!alignment.isZero() && "Alignment cannot be zero");
}
- static Address invalid() { return Address(nullptr, CharUnits()); }
+ // Deprecated: Use constructor with explicit element type instead.
+ Address(llvm::Value *Pointer, CharUnits Alignment)
+ : Address(Pointer, Pointer->getType()->getPointerElementType(),
+ Alignment) {}
+
+ static Address invalid() { return Address(nullptr); }
bool isValid() const { return Pointer != nullptr; }
llvm::Value *getPointer() const {
@@ -45,11 +59,9 @@ public:
}
/// Return the type of the values stored in this address.
- ///
- /// When IR pointer types lose their element type, we should simply
- /// store it in Address instead for the convenience of writing code.
llvm::Type *getElementType() const {
- return getType()->getElementType();
+ assert(isValid());
+ return ElementType;
}
/// Return the address space that this address resides in.
@@ -67,30 +79,42 @@ public:
assert(isValid());
return Alignment;
}
+
+ /// Return address with different pointer, but same element type and
+ /// alignment.
+ Address withPointer(llvm::Value *NewPointer) const {
+ return Address(NewPointer, ElementType, Alignment);
+ }
+
+ /// Return address with different alignment, but same pointer and element
+ /// type.
+ Address withAlignment(CharUnits NewAlignment) const {
+ return Address(Pointer, ElementType, NewAlignment);
+ }
};
/// A specialization of Address that requires the address to be an
/// LLVM Constant.
class ConstantAddress : public Address {
+ ConstantAddress(std::nullptr_t) : Address(nullptr) {}
+
public:
- ConstantAddress(llvm::Constant *pointer, CharUnits alignment)
- : Address(pointer, alignment) {}
+ ConstantAddress(llvm::Constant *pointer, llvm::Type *elementType,
+ CharUnits alignment)
+ : Address(pointer, elementType, alignment) {}
static ConstantAddress invalid() {
- return ConstantAddress(nullptr, CharUnits());
+ return ConstantAddress(nullptr);
}
llvm::Constant *getPointer() const {
return llvm::cast<llvm::Constant>(Address::getPointer());
}
- ConstantAddress getBitCast(llvm::Type *ty) const {
- return ConstantAddress(llvm::ConstantExpr::getBitCast(getPointer(), ty),
- getAlignment());
- }
-
- ConstantAddress getElementBitCast(llvm::Type *ty) const {
- return getBitCast(ty->getPointerTo(getAddressSpace()));
+ ConstantAddress getElementBitCast(llvm::Type *ElemTy) const {
+ llvm::Constant *BitCast = llvm::ConstantExpr::getBitCast(
+ getPointer(), ElemTy->getPointerTo(getAddressSpace()));
+ return ConstantAddress(BitCast, ElemTy, getAlignment());
}
static bool isaImpl(Address addr) {
@@ -98,7 +122,7 @@ public:
}
static ConstantAddress castImpl(Address addr) {
return ConstantAddress(llvm::cast<llvm::Constant>(addr.getPointer()),
- addr.getAlignment());
+ addr.getElementType(), addr.getAlignment());
}
};
diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp
index 510f3911939c..bacac0a20d4d 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -94,10 +94,16 @@ using namespace llvm;
llvm::PassPluginLibraryInfo get##Ext##PluginInfo();
#include "llvm/Support/Extension.def"
+namespace llvm {
+extern cl::opt<bool> DebugInfoCorrelate;
+}
+
namespace {
// Default filename used for profile generation.
-static constexpr StringLiteral DefaultProfileGenName = "default_%m.profraw";
+std::string getDefaultProfileGenName() {
+ return DebugInfoCorrelate ? "default_%p.proflite" : "default_%m.profraw";
+}
class EmitAssemblyHelper {
DiagnosticsEngine &Diags;
@@ -597,8 +603,6 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
Options.ForceDwarfFrameSection = CodeGenOpts.ForceDwarfFrameSection;
Options.EmitCallSiteInfo = CodeGenOpts.EmitCallSiteInfo;
Options.EnableAIXExtendedAltivecABI = CodeGenOpts.EnableAIXExtendedAltivecABI;
- Options.ValueTrackingVariableLocations =
- CodeGenOpts.ValueTrackingVariableLocations;
Options.XRayOmitFunctionIndex = CodeGenOpts.XRayOmitFunctionIndex;
Options.LoopAlignment = CodeGenOpts.LoopAlignment;
@@ -640,6 +644,7 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
Options.MCOptions.Argv0 = CodeGenOpts.Argv0;
Options.MCOptions.CommandLineArgs = CodeGenOpts.CommandLineArgs;
Options.DebugStrictDwarf = CodeGenOpts.DebugStrictDwarf;
+ Options.ObjectFilenameForDebug = CodeGenOpts.ObjectFilenameForDebug;
return true;
}
@@ -886,7 +891,7 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
if (!CodeGenOpts.InstrProfileOutput.empty())
PMBuilder.PGOInstrGen = CodeGenOpts.InstrProfileOutput;
else
- PMBuilder.PGOInstrGen = std::string(DefaultProfileGenName);
+ PMBuilder.PGOInstrGen = getDefaultProfileGenName();
}
if (CodeGenOpts.hasProfileIRUse()) {
PMBuilder.PGOInstrUse = CodeGenOpts.ProfileInstrumentUsePath;
@@ -1231,7 +1236,7 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
if (CodeGenOpts.hasProfileIRInstr())
// -fprofile-generate.
PGOOpt = PGOOptions(CodeGenOpts.InstrProfileOutput.empty()
- ? std::string(DefaultProfileGenName)
+ ? getDefaultProfileGenName()
: CodeGenOpts.InstrProfileOutput,
"", "", PGOOptions::IRInstr, PGOOptions::NoCSAction,
CodeGenOpts.DebugInfoForProfiling);
@@ -1269,13 +1274,13 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
"Cannot run CSProfileGen pass with ProfileGen or SampleUse "
" pass");
PGOOpt->CSProfileGenFile = CodeGenOpts.InstrProfileOutput.empty()
- ? std::string(DefaultProfileGenName)
+ ? getDefaultProfileGenName()
: CodeGenOpts.InstrProfileOutput;
PGOOpt->CSAction = PGOOptions::CSIRInstr;
} else
PGOOpt = PGOOptions("",
CodeGenOpts.InstrProfileOutput.empty()
- ? std::string(DefaultProfileGenName)
+ ? getDefaultProfileGenName()
: CodeGenOpts.InstrProfileOutput,
"", PGOOptions::NoAction, PGOOptions::CSIRInstr,
CodeGenOpts.DebugInfoForProfiling);
@@ -1577,7 +1582,8 @@ static void runThinLTOBackend(
return;
auto AddStream = [&](size_t Task) {
- return std::make_unique<CachedFileStream>(std::move(OS));
+ return std::make_unique<CachedFileStream>(std::move(OS),
+ CGOpts.ObjectFilenameForDebug);
};
lto::Config Conf;
if (CGOpts.SaveTempsFilePrefix != "") {
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index b68e6328acdf..e81c5ba5055c 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -1079,8 +1079,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
if (AS == LangAS::opencl_generic)
return V;
auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
- auto T = V->getType();
- auto *DestType = T->getPointerElementType()->getPointerTo(DestAS);
+ auto T = llvm::cast<llvm::PointerType>(V->getType());
+ auto *DestType = llvm::PointerType::getWithSamePointeeType(T, DestAS);
return getTargetHooks().performAddrSpaceCast(
*this, V, AS, LangAS::opencl_generic, DestType, false);
@@ -1321,15 +1321,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
ResVal = Builder.CreateNot(ResVal);
Builder.CreateStore(
- ResVal,
- Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo()));
+ ResVal, Builder.CreateElementBitCast(Dest, ResVal->getType()));
}
if (RValTy->isVoidType())
return RValue::get(nullptr);
return convertTempToRValue(
- Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
+ Builder.CreateElementBitCast(Dest, ConvertTypeForMem(RValTy)),
RValTy, E->getExprLoc());
}
@@ -1382,8 +1381,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
return RValue::get(nullptr);
return convertTempToRValue(
- Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
- Dest.getAddressSpace())),
+ Builder.CreateElementBitCast(Dest, ConvertTypeForMem(RValTy)),
RValTy, E->getExprLoc());
}
@@ -1455,17 +1453,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
return convertTempToRValue(
- Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
- Dest.getAddressSpace())),
+ Builder.CreateElementBitCast(Dest, ConvertTypeForMem(RValTy)),
RValTy, E->getExprLoc());
}
Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
- unsigned addrspace =
- cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
llvm::IntegerType *ty =
llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
- return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
+ return CGF.Builder.CreateElementBitCast(addr, ty);
}
Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp
index 2da2014345d8..7bb6dbb8a8ac 100644
--- a/clang/lib/CodeGen/CGBlocks.cpp
+++ b/clang/lib/CodeGen/CGBlocks.cpp
@@ -2721,8 +2721,7 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
Address addr = emission.Addr;
// That's an alloca of the byref structure type.
- llvm::StructType *byrefType = cast<llvm::StructType>(
- cast<llvm::PointerType>(addr.getPointer()->getType())->getElementType());
+ llvm::StructType *byrefType = cast<llvm::StructType>(addr.getElementType());
unsigned nextHeaderIndex = 0;
CharUnits nextHeaderOffset;
diff --git a/clang/lib/CodeGen/CGBuilder.h b/clang/lib/CodeGen/CGBuilder.h
index 4fad44a105cd..7c9f41e84eaf 100644
--- a/clang/lib/CodeGen/CGBuilder.h
+++ b/clang/lib/CodeGen/CGBuilder.h
@@ -86,7 +86,8 @@ public:
llvm::LoadInst *CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr,
CharUnits Align,
const llvm::Twine &Name = "") {
- assert(Addr->getType()->getPointerElementType() == Ty);
+ assert(llvm::cast<llvm::PointerType>(Addr->getType())
+ ->isOpaqueOrPointeeTypeMatches(Ty));
return CreateAlignedLoad(Ty, Addr, Align.getAsAlign(), Name);
}
@@ -115,13 +116,15 @@ public:
/// Emit a load from an i1 flag variable.
llvm::LoadInst *CreateFlagLoad(llvm::Value *Addr,
const llvm::Twine &Name = "") {
- assert(Addr->getType()->getPointerElementType() == getInt1Ty());
+ assert(llvm::cast<llvm::PointerType>(Addr->getType())
+ ->isOpaqueOrPointeeTypeMatches(getInt1Ty()));
return CreateAlignedLoad(getInt1Ty(), Addr, CharUnits::One(), Name);
}
/// Emit a store to an i1 flag variable.
llvm::StoreInst *CreateFlagStore(bool Value, llvm::Value *Addr) {
- assert(Addr->getType()->getPointerElementType() == getInt1Ty());
+ assert(llvm::cast<llvm::PointerType>(Addr->getType())
+ ->isOpaqueOrPointeeTypeMatches(getInt1Ty()));
return CreateAlignedStore(getInt1(Value), Addr, CharUnits::One());
}
@@ -165,8 +168,9 @@ public:
/// preserving information like the alignment and address space.
Address CreateElementBitCast(Address Addr, llvm::Type *Ty,
const llvm::Twine &Name = "") {
- auto PtrTy = Ty->getPointerTo(Addr.getAddressSpace());
- return CreateBitCast(Addr, PtrTy, Name);
+ auto *PtrTy = Ty->getPointerTo(Addr.getAddressSpace());
+ return Address(CreateBitCast(Addr.getPointer(), PtrTy, Name),
+ Ty, Addr.getAlignment());
}
using CGBuilderBaseTy::CreatePointerBitCastOrAddrSpaceCast;
@@ -194,6 +198,7 @@ public:
return Address(CreateStructGEP(Addr.getElementType(),
Addr.getPointer(), Index, Name),
+ ElTy->getElementType(Index),
Addr.getAlignment().alignmentAtOffset(Offset));
}
@@ -215,6 +220,7 @@ public:
return Address(
CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
{getSize(CharUnits::Zero()), getSize(Index)}, Name),
+ ElTy->getElementType(),
Addr.getAlignment().alignmentAtOffset(Index * EltSize));
}
@@ -231,6 +237,7 @@ public:
return Address(CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
getSize(Index), Name),
+ ElTy,
Addr.getAlignment().alignmentAtOffset(Index * EltSize));
}
@@ -247,15 +254,32 @@ public:
return Address(CreateGEP(Addr.getElementType(), Addr.getPointer(),
getSize(Index), Name),
+ Addr.getElementType(),
Addr.getAlignment().alignmentAtOffset(Index * EltSize));
}
+ /// Create GEP with single dynamic index. The address alignment is reduced
+ /// according to the element size.
+ using CGBuilderBaseTy::CreateGEP;
+ Address CreateGEP(Address Addr, llvm::Value *Index,
+ const llvm::Twine &Name = "") {
+ const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
+ CharUnits EltSize =
+ CharUnits::fromQuantity(DL.getTypeAllocSize(Addr.getElementType()));
+
+ return Address(CreateGEP(Addr.getElementType(), Addr.getPointer(), Index,
+ Name),
+ Addr.getElementType(),
+ Addr.getAlignment().alignmentOfArrayElement(EltSize));
+ }
+
/// Given a pointer to i8, adjust it by a given constant offset.
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset,
const llvm::Twine &Name = "") {
assert(Addr.getElementType() == TypeCache.Int8Ty);
return Address(CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
getSize(Offset), Name),
+ Addr.getElementType(),
Addr.getAlignment().alignmentAtOffset(Offset));
}
Address CreateConstByteGEP(Address Addr, CharUnits Offset,
@@ -263,6 +287,7 @@ public:
assert(Addr.getElementType() == TypeCache.Int8Ty);
return Address(CreateGEP(Addr.getElementType(), Addr.getPointer(),
getSize(Offset), Name),
+ Addr.getElementType(),
Addr.getAlignment().alignmentAtOffset(Offset));
}
@@ -278,8 +303,9 @@ public:
/*isSigned=*/true);
if (!GEP->accumulateConstantOffset(DL, Offset))
llvm_unreachable("offset of GEP with constants is always computable");
- return Address(GEP, Addr.getAlignment().alignmentAtOffset(
- CharUnits::fromQuantity(Offset.getSExtValue())));
+ return Address(GEP, GEP->getResultElementType(),
+ Addr.getAlignment().alignmentAtOffset(
+ CharUnits::fromQuantity(Offset.getSExtValue())));
}
using CGBuilderBaseTy::CreateMemCpy;
@@ -330,8 +356,14 @@ public:
return Address(CreatePreserveStructAccessIndex(ElTy, Addr.getPointer(),
Index, FieldIndex, DbgInfo),
+ ElTy->getElementType(Index),
Addr.getAlignment().alignmentAtOffset(Offset));
}
+
+ using CGBuilderBaseTy::CreateLaunderInvariantGroup;
+ Address CreateLaunderInvariantGroup(Address Addr) {
+ return Addr.withPointer(CreateLaunderInvariantGroup(Addr.getPointer()));
+ }
};
} // end namespace CodeGen
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 5d6df59cc405..1982b40ff667 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -96,13 +96,33 @@ llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
StringRef Name;
GlobalDecl D(FD);
+ // TODO: This list should be expanded or refactored after all GCC-compatible
+ // std libcall builtins are implemented.
+ static SmallDenseMap<unsigned, StringRef, 8> F128Builtins{
+ {Builtin::BI__builtin_printf, "__printfieee128"},
+ {Builtin::BI__builtin_vsnprintf, "__vsnprintfieee128"},
+ {Builtin::BI__builtin_vsprintf, "__vsprintfieee128"},
+ {Builtin::BI__builtin_sprintf, "__sprintfieee128"},
+ {Builtin::BI__builtin_snprintf, "__snprintfieee128"},
+ {Builtin::BI__builtin_fprintf, "__fprintfieee128"},
+ {Builtin::BI__builtin_nexttowardf128, "__nexttowardieee128"},
+ };
+
// If the builtin has been declared explicitly with an assembler label,
// use the mangled name. This differs from the plain label on platforms
// that prefix labels.
if (FD->hasAttr<AsmLabelAttr>())
Name = getMangledName(D);
- else
- Name = Context.BuiltinInfo.getName(BuiltinID) + 10;
+ else {
+ // TODO: This mutation should also be applied to other targets other than
+ // PPC, after backend supports IEEE 128-bit style libcalls.
+ if (getTriple().isPPC64() &&
+ &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad() &&
+ F128Builtins.find(BuiltinID) != F128Builtins.end())
+ Name = F128Builtins[BuiltinID];
+ else
+ Name = Context.BuiltinInfo.getName(BuiltinID) + 10;
+ }
llvm::FunctionType *Ty =
cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
@@ -667,7 +687,7 @@ getIntegerWidthAndSignedness(const clang::ASTContext &context,
const clang::QualType Type) {
assert(Type->isIntegerType() && "Given type is not an integer.");
unsigned Width = Type->isBooleanType() ? 1
- : Type->isExtIntType() ? context.getIntWidth(Type)
+ : Type->isBitIntType() ? context.getIntWidth(Type)
: context.getTypeInfo(Type).Width;
bool Signed = Type->isSignedIntegerType();
return {Width, Signed};
@@ -1482,8 +1502,7 @@ Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
Value *ArgValue = EmitScalarExpr(E->getArg(1));
llvm::Type *ArgType = ArgValue->getType();
- llvm::Type *IndexType =
- IndexAddress.getPointer()->getType()->getPointerElementType();
+ llvm::Type *IndexType = IndexAddress.getElementType();
llvm::Type *ResultType = ConvertType(E->getType());
Value *ArgZero = llvm::Constant::getNullValue(ArgType);
@@ -3113,6 +3132,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
"elt.abs");
return RValue::get(Result);
}
+
+ case Builtin::BI__builtin_elementwise_ceil: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Result = Builder.CreateUnaryIntrinsic(llvm::Intrinsic::ceil, Op0,
+ nullptr, "elt.ceil");
+ return RValue::get(Result);
+ }
+
case Builtin::BI__builtin_elementwise_max: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
Value *Op1 = EmitScalarExpr(E->getArg(1));
@@ -3184,6 +3211,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(Result);
}
+ case Builtin::BI__builtin_reduce_xor: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Result = Builder.CreateUnaryIntrinsic(
+ llvm::Intrinsic::vector_reduce_xor, Op0, nullptr, "rdx.xor");
+ return RValue::get(Result);
+ }
+
case Builtin::BI__builtin_matrix_transpose: {
const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
Value *MatValue = EmitScalarExpr(E->getArg(0));
@@ -4478,6 +4512,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
case Builtin::BI__builtin_addressof:
return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
+ case Builtin::BI__builtin_function_start:
+ return RValue::get(CGM.GetFunctionStart(
+ E->getArg(0)->getAsBuiltinConstantDeclRef(CGM.getContext())));
case Builtin::BI__builtin_operator_new:
return EmitBuiltinNewDeleteCall(
E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
@@ -4674,8 +4711,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
case Builtin::BI__builtin_coro_suspend:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
- case Builtin::BI__builtin_coro_param:
- return EmitCoroutineIntrinsic(E, Intrinsic::coro_param);
// OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
case Builtin::BIread_pipe:
@@ -5221,9 +5256,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Type *BPP = Int8PtrPtrTy;
DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"),
- DestAddr.getAlignment());
+ Int8PtrTy, DestAddr.getAlignment());
SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"),
- SrcAddr.getAlignment());
+ Int8PtrTy, SrcAddr.getAlignment());
Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
@@ -6385,6 +6420,7 @@ static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = {
#define GET_SVE_LLVM_INTRINSIC_MAP
#include "clang/Basic/arm_sve_builtin_cg.inc"
+#include "clang/Basic/BuiltinsAArch64NeonSVEBridge_cg.def"
#undef GET_SVE_LLVM_INTRINSIC_MAP
};
@@ -9308,6 +9344,54 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_tbl2, VTy);
return Builder.CreateCall(F, {V0, V1, Ops[1]});
}
+
+ case SVE::BI__builtin_sve_svset_neonq_s8:
+ case SVE::BI__builtin_sve_svset_neonq_s16:
+ case SVE::BI__builtin_sve_svset_neonq_s32:
+ case SVE::BI__builtin_sve_svset_neonq_s64:
+ case SVE::BI__builtin_sve_svset_neonq_u8:
+ case SVE::BI__builtin_sve_svset_neonq_u16:
+ case SVE::BI__builtin_sve_svset_neonq_u32:
+ case SVE::BI__builtin_sve_svset_neonq_u64:
+ case SVE::BI__builtin_sve_svset_neonq_f16:
+ case SVE::BI__builtin_sve_svset_neonq_f32:
+ case SVE::BI__builtin_sve_svset_neonq_f64:
+ case SVE::BI__builtin_sve_svset_neonq_bf16: {
+ return Builder.CreateInsertVector(Ty, Ops[0], Ops[1], Builder.getInt64(0));
+ }
+
+ case SVE::BI__builtin_sve_svget_neonq_s8:
+ case SVE::BI__builtin_sve_svget_neonq_s16:
+ case SVE::BI__builtin_sve_svget_neonq_s32:
+ case SVE::BI__builtin_sve_svget_neonq_s64:
+ case SVE::BI__builtin_sve_svget_neonq_u8:
+ case SVE::BI__builtin_sve_svget_neonq_u16:
+ case SVE::BI__builtin_sve_svget_neonq_u32:
+ case SVE::BI__builtin_sve_svget_neonq_u64:
+ case SVE::BI__builtin_sve_svget_neonq_f16:
+ case SVE::BI__builtin_sve_svget_neonq_f32:
+ case SVE::BI__builtin_sve_svget_neonq_f64:
+ case SVE::BI__builtin_sve_svget_neonq_bf16: {
+ return Builder.CreateExtractVector(Ty, Ops[0], Builder.getInt64(0));
+ }
+
+ case SVE::BI__builtin_sve_svdup_neonq_s8:
+ case SVE::BI__builtin_sve_svdup_neonq_s16:
+ case SVE::BI__builtin_sve_svdup_neonq_s32:
+ case SVE::BI__builtin_sve_svdup_neonq_s64:
+ case SVE::BI__builtin_sve_svdup_neonq_u8:
+ case SVE::BI__builtin_sve_svdup_neonq_u16:
+ case SVE::BI__builtin_sve_svdup_neonq_u32:
+ case SVE::BI__builtin_sve_svdup_neonq_u64:
+ case SVE::BI__builtin_sve_svdup_neonq_f16:
+ case SVE::BI__builtin_sve_svdup_neonq_f32:
+ case SVE::BI__builtin_sve_svdup_neonq_f64:
+ case SVE::BI__builtin_sve_svdup_neonq_bf16: {
+ Value *Insert = Builder.CreateInsertVector(Ty, UndefValue::get(Ty), Ops[0],
+ Builder.getInt64(0));
+ return Builder.CreateIntrinsic(Intrinsic::aarch64_sve_dupq_lane, {Ty},
+ {Insert, Builder.getInt64(0)});
+ }
}
/// Should not happen
@@ -15331,7 +15415,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
// If the user wants the entire vector, just load the entire vector.
if (NumBytes == 16) {
Value *BC = Builder.CreateBitCast(Ops[0], ResTy->getPointerTo());
- Value *LD = Builder.CreateLoad(Address(BC, CharUnits::fromQuantity(1)));
+ Value *LD =
+ Builder.CreateLoad(Address(BC, ResTy, CharUnits::fromQuantity(1)));
if (!IsLE)
return LD;
@@ -15392,8 +15477,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
RevMask.push_back(15 - Idx);
StVec = Builder.CreateShuffleVector(Ops[2], Ops[2], RevMask);
}
- return Builder.CreateStore(StVec,
- Address(BC, CharUnits::fromQuantity(1)));
+ return Builder.CreateStore(
+ StVec, Address(BC, Ops[2]->getType(), CharUnits::fromQuantity(1)));
}
auto *ConvTy = Int64Ty;
unsigned NumElts = 0;
@@ -15427,8 +15512,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrinsic::bswap, ConvTy);
Elt = Builder.CreateCall(F, Elt);
}
- return Builder.CreateStore(Elt,
- Address(PtrBC, CharUnits::fromQuantity(1)));
+ return Builder.CreateStore(
+ Elt, Address(PtrBC, ConvTy, CharUnits::fromQuantity(1)));
};
unsigned Stored = 0;
unsigned RemainingBytes = NumBytes;
@@ -16222,7 +16307,8 @@ Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {
auto *DstTy =
CGF.Int16Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
- auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(2)));
+ auto *LD = CGF.Builder.CreateLoad(
+ Address(Cast, CGF.Int16Ty, CharUnits::fromQuantity(2)));
llvm::MDBuilder MDHelper(CGF.getLLVMContext());
llvm::MDNode *RNode = MDHelper.createRange(APInt(16, 1),
APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1));
@@ -16242,7 +16328,8 @@ Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) {
auto *DstTy =
CGF.Int32Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
- auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(4)));
+ auto *LD = CGF.Builder.CreateLoad(
+ Address(Cast, CGF.Int32Ty, CharUnits::fromQuantity(4)));
LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
llvm::MDNode::get(CGF.getLLVMContext(), None));
return LD;
@@ -16314,8 +16401,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
- llvm::Type *RealFlagType
- = FlagOutPtr.getPointer()->getType()->getPointerElementType();
+ llvm::Type *RealFlagType = FlagOutPtr.getElementType();
llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
Builder.CreateStore(FlagExt, FlagOutPtr);
@@ -16572,6 +16658,15 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
llvm::Value *RayInverseDir = EmitScalarExpr(E->getArg(4));
llvm::Value *TextureDescr = EmitScalarExpr(E->getArg(5));
+ // The builtins take these arguments as vec4 where the last element is
+ // ignored. The intrinsic takes them as vec3.
+ RayOrigin = Builder.CreateShuffleVector(RayOrigin, RayOrigin,
+ ArrayRef<int>{0, 1, 2});
+ RayDir =
+ Builder.CreateShuffleVector(RayDir, RayDir, ArrayRef<int>{0, 1, 2});
+ RayInverseDir = Builder.CreateShuffleVector(RayInverseDir, RayInverseDir,
+ ArrayRef<int>{0, 1, 2});
+
Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_image_bvh_intersect_ray,
{NodePtr->getType(), RayDir->getType()});
return Builder.CreateCall(F, {NodePtr, RayExtent, RayOrigin, RayDir,
@@ -17938,7 +18033,7 @@ RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) {
if (getLangOpts().isSignedOverflowDefined())
Result = Builder.CreateGEP(Int8Ty, Base, Difference, "aligned_result");
else
- Result = EmitCheckedInBoundsGEP(Base, Difference,
+ Result = EmitCheckedInBoundsGEP(Int8Ty, Base, Difference,
/*SignedIndices=*/true,
/*isSubtraction=*/!AlignUp,
E->getExprLoc(), "aligned_result");
@@ -18501,6 +18596,7 @@ getIntrinsicForHexagonNonGCCBuiltin(unsigned BuiltinID) {
CUSTOM_BUILTIN_MAPPING(S2_storerf_pcr, 0)
CUSTOM_BUILTIN_MAPPING(S2_storeri_pcr, 0)
CUSTOM_BUILTIN_MAPPING(S2_storerd_pcr, 0)
+ // Legacy builtins that take a vector in place of a vector predicate.
CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64)
CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq, 64)
CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64)
@@ -18534,8 +18630,8 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) {
// The base pointer is passed by address, so it needs to be loaded.
Address A = EmitPointerWithAlignment(E->getArg(0));
- Address BP = Address(
- Builder.CreateBitCast(A.getPointer(), Int8PtrPtrTy), A.getAlignment());
+ Address BP = Address(Builder.CreateBitCast(
+ A.getPointer(), Int8PtrPtrTy), Int8PtrTy, A.getAlignment());
llvm::Value *Base = Builder.CreateLoad(BP);
// The treatment of both loads and stores is the same: the arguments for
// the builtin are the same as the arguments for the intrinsic.
@@ -18579,7 +18675,7 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
// per call.
Address DestAddr = EmitPointerWithAlignment(E->getArg(1));
DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), Int8PtrTy),
- DestAddr.getAlignment());
+ Int8Ty, DestAddr.getAlignment());
llvm::Value *DestAddress = DestAddr.getPointer();
// Operands are Base, Dest, Modifier.
@@ -18626,8 +18722,8 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: {
// Get the type from the 0-th argument.
llvm::Type *VecType = ConvertType(E->getArg(0)->getType());
- Address PredAddr = Builder.CreateBitCast(
- EmitPointerWithAlignment(E->getArg(2)), VecType->getPointerTo(0));
+ Address PredAddr = Builder.CreateElementBitCast(
+ EmitPointerWithAlignment(E->getArg(2)), VecType);
llvm::Value *PredIn = V2Q(Builder.CreateLoad(PredAddr));
llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID),
{EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn});
@@ -18638,6 +18734,27 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
return Builder.CreateExtractValue(Result, 0);
}
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstoreq:
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorenq:
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentq:
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentnq:
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstoreq_128B:
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorenq_128B:
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentq_128B:
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentnq_128B: {
+ SmallVector<llvm::Value*,4> Ops;
+ const Expr *PredOp = E->getArg(0);
+ // There will be an implicit cast to a boolean vector. Strip it.
+ if (auto *Cast = dyn_cast<ImplicitCastExpr>(PredOp)) {
+ if (Cast->getCastKind() == CK_BitCast)
+ PredOp = Cast->getSubExpr();
+ Ops.push_back(V2Q(EmitScalarExpr(PredOp)));
+ }
+ for (int i = 1, e = E->getNumArgs(); i != e; ++i)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
+ }
+
case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci:
case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci:
case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci:
@@ -18674,40 +18791,6 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty);
case Hexagon::BI__builtin_brev_ldd:
return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty);
-
- default: {
- if (ID == Intrinsic::not_intrinsic)
- return nullptr;
-
- auto IsVectorPredTy = [](llvm::Type *T) {
- return T->isVectorTy() &&
- cast<llvm::VectorType>(T)->getElementType()->isIntegerTy(1);
- };
-
- llvm::Function *IntrFn = CGM.getIntrinsic(ID);
- llvm::FunctionType *IntrTy = IntrFn->getFunctionType();
- SmallVector<llvm::Value*,4> Ops;
- for (unsigned i = 0, e = IntrTy->getNumParams(); i != e; ++i) {
- llvm::Type *T = IntrTy->getParamType(i);
- const Expr *A = E->getArg(i);
- if (IsVectorPredTy(T)) {
- // There will be an implicit cast to a boolean vector. Strip it.
- if (auto *Cast = dyn_cast<ImplicitCastExpr>(A)) {
- if (Cast->getCastKind() == CK_BitCast)
- A = Cast->getSubExpr();
- }
- Ops.push_back(V2Q(EmitScalarExpr(A)));
- } else {
- Ops.push_back(EmitScalarExpr(A));
- }
- }
-
- llvm::Value *Call = Builder.CreateCall(IntrFn, Ops);
- if (IsVectorPredTy(IntrTy->getReturnType()))
- Call = Q2V(Call);
-
- return Call;
- } // default
} // switch
return nullptr;
diff --git a/clang/lib/CodeGen/CGCUDANV.cpp b/clang/lib/CodeGen/CGCUDANV.cpp
index a1b4431ca8c4..c4e3f7f54f4f 100644
--- a/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/clang/lib/CodeGen/CGCUDANV.cpp
@@ -814,6 +814,9 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
Linkage,
/*Initializer=*/llvm::ConstantPointerNull::get(VoidPtrPtrTy),
"__hip_gpubin_handle");
+ if (Linkage == llvm::GlobalValue::LinkOnceAnyLinkage)
+ GpuBinaryHandle->setComdat(
+ CGM.getModule().getOrInsertComdat(GpuBinaryHandle->getName()));
GpuBinaryHandle->setAlignment(CGM.getPointerAlign().getAsAlign());
// Prevent the weak symbol in different shared libraries being merged.
if (Linkage != llvm::GlobalValue::InternalLinkage)
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index d830a7e01709..d70f78fea6b4 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -1261,8 +1261,7 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
//
// FIXME: Assert that we aren't truncating non-padding bits when have access
// to that information.
- Src = CGF.Builder.CreateBitCast(Src,
- Ty->getPointerTo(Src.getAddressSpace()));
+ Src = CGF.Builder.CreateElementBitCast(Src, Ty);
return CGF.Builder.CreateLoad(Src);
}
@@ -1832,11 +1831,6 @@ void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
if (LangOpts.getFPExceptionMode() == LangOptions::FPE_Ignore)
FuncAttrs.addAttribute("no-trapping-math", "true");
- // Strict (compliant) code is the default, so only add this attribute to
- // indicate that we are trying to workaround a problem case.
- if (!CodeGenOpts.StrictFloatCastOverflow)
- FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
-
// TODO: Are these all needed?
// unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
if (LangOpts.NoHonorInfs)
@@ -1971,7 +1965,7 @@ static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types,
// there's no internal padding (typeSizeEqualsStoreSize).
return false;
}
- if (QTy->isExtIntType())
+ if (QTy->isBitIntType())
return true;
if (QTy->isReferenceType())
return true;
@@ -2686,8 +2680,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
case ABIArgInfo::Indirect:
case ABIArgInfo::IndirectAliased: {
assert(NumIRArgs == 1);
- Address ParamAddr =
- Address(Fn->getArg(FirstIRArg), ArgI.getIndirectAlign());
+ Address ParamAddr = Address(Fn->getArg(FirstIRArg), ConvertTypeForMem(Ty),
+ ArgI.getIndirectAlign());
if (!hasScalarEvaluationKind(Ty)) {
// Aggregates and complex variables are accessed by reference. All we
@@ -3475,12 +3469,19 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
case TEK_Aggregate:
// Do nothing; aggregrates get evaluated directly into the destination.
break;
- case TEK_Scalar:
- EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
- MakeNaturalAlignAddrLValue(&*AI, RetTy),
- /*isInit*/ true);
+ case TEK_Scalar: {
+ LValueBaseInfo BaseInfo;
+ TBAAAccessInfo TBAAInfo;
+ CharUnits Alignment =
+ CGM.getNaturalTypeAlignment(RetTy, &BaseInfo, &TBAAInfo);
+ Address ArgAddr(&*AI, ConvertType(RetTy), Alignment);
+ LValue ArgVal =
+ LValue::MakeAddr(ArgAddr, RetTy, getContext(), BaseInfo, TBAAInfo);
+ EmitStoreOfScalar(
+ Builder.CreateLoad(ReturnValue), ArgVal, /*isInit*/ true);
break;
}
+ }
break;
}
@@ -4134,8 +4135,7 @@ void CodeGenFunction::EmitCallArgs(
}
// If we still have any arguments, emit them using the type of the argument.
- for (auto *A : llvm::make_range(std::next(ArgRange.begin(), ArgTypes.size()),
- ArgRange.end()))
+ for (auto *A : llvm::drop_begin(ArgRange, ArgTypes.size()))
ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
@@ -4308,11 +4308,8 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
// If we're using inalloca, use the argument memory. Otherwise, use a
// temporary.
- AggValueSlot Slot;
- if (args.isUsingInAlloca())
- Slot = createPlaceholderSlot(*this, type);
- else
- Slot = CreateAggTemp(type, "agg.tmp");
+ AggValueSlot Slot = args.isUsingInAlloca()
+ ? createPlaceholderSlot(*this, type) : CreateAggTemp(type, "agg.tmp");
bool DestroyedInCallee = true, NeedsEHCleanup = true;
if (const auto *RD = type->getAsCXXRecordDecl())
@@ -4651,13 +4648,13 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
//
// In other cases, we assert that the types match up (until pointers stop
// having pointee types).
- llvm::Type *TypeFromVal;
if (Callee.isVirtual())
- TypeFromVal = Callee.getVirtualFunctionType();
- else
- TypeFromVal =
- Callee.getFunctionPointer()->getType()->getPointerElementType();
- assert(IRFuncTy == TypeFromVal);
+ assert(IRFuncTy == Callee.getVirtualFunctionType());
+ else {
+ llvm::PointerType *PtrTy =
+ llvm::cast<llvm::PointerType>(Callee.getFunctionPointer()->getType());
+ assert(PtrTy->isOpaqueOrPointeeTypeMatches(IRFuncTy));
+ }
}
#endif
@@ -4872,7 +4869,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
I->copyInto(*this, AI);
} else {
// Skip the extra memcpy call.
- auto *T = V->getType()->getPointerElementType()->getPointerTo(
+ auto *T = llvm::PointerType::getWithSamePointeeType(
+ cast<llvm::PointerType>(V->getType()),
CGM.getDataLayout().getAllocaAddrSpace());
IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
*this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
@@ -4967,8 +4965,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
Src = TempAlloca;
} else {
- Src = Builder.CreateBitCast(Src,
- STy->getPointerTo(Src.getAddressSpace()));
+ Src = Builder.CreateElementBitCast(Src, STy);
}
assert(NumIRArgs == STy->getNumElements());
diff --git a/clang/lib/CodeGen/CGCall.h b/clang/lib/CodeGen/CGCall.h
index e3d9fec6d363..c8594068c3fc 100644
--- a/clang/lib/CodeGen/CGCall.h
+++ b/clang/lib/CodeGen/CGCall.h
@@ -115,7 +115,8 @@ public:
AbstractInfo = abstractInfo;
assert(functionPtr && "configuring callee without function pointer");
assert(functionPtr->getType()->isPointerTy());
- assert(functionPtr->getType()->getPointerElementType()->isFunctionTy());
+ assert(functionPtr->getType()->isOpaquePointerTy() ||
+ functionPtr->getType()->getPointerElementType()->isFunctionTy());
}
static CGCallee forBuiltin(unsigned builtinID,
diff --git a/clang/lib/CodeGen/CGClass.cpp b/clang/lib/CodeGen/CGClass.cpp
index 0df64d4d5d26..8f99ff0d50ff 100644
--- a/clang/lib/CodeGen/CGClass.cpp
+++ b/clang/lib/CodeGen/CGClass.cpp
@@ -127,18 +127,18 @@ CodeGenModule::getDynamicOffsetAlignment(CharUnits actualBaseAlign,
Address CodeGenFunction::LoadCXXThisAddress() {
assert(CurFuncDecl && "loading 'this' without a func declaration?");
- assert(isa<CXXMethodDecl>(CurFuncDecl));
+ auto *MD = cast<CXXMethodDecl>(CurFuncDecl);
// Lazily compute CXXThisAlignment.
if (CXXThisAlignment.isZero()) {
// Just use the best known alignment for the parent.
// TODO: if we're currently emitting a complete-object ctor/dtor,
// we can always use the complete-object alignment.
- auto RD = cast<CXXMethodDecl>(CurFuncDecl)->getParent();
- CXXThisAlignment = CGM.getClassPointerAlignment(RD);
+ CXXThisAlignment = CGM.getClassPointerAlignment(MD->getParent());
}
- return Address(LoadCXXThis(), CXXThisAlignment);
+ llvm::Type *Ty = ConvertType(MD->getThisType()->getPointeeType());
+ return Address(LoadCXXThis(), Ty, CXXThisAlignment);
}
/// Emit the address of a field using a member data pointer.
@@ -286,7 +286,7 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, Address addr,
}
alignment = alignment.alignmentAtOffset(nonVirtualOffset);
- return Address(ptr, alignment);
+ return Address(ptr, CGF.Int8Ty, alignment);
}
Address CodeGenFunction::GetAddressOfBaseClass(
@@ -326,9 +326,9 @@ Address CodeGenFunction::GetAddressOfBaseClass(
}
// Get the base pointer type.
+ llvm::Type *BaseValueTy = ConvertType((PathEnd[-1])->getType());
llvm::Type *BasePtrTy =
- ConvertType((PathEnd[-1])->getType())
- ->getPointerTo(Value.getType()->getPointerAddressSpace());
+ BaseValueTy->getPointerTo(Value.getType()->getPointerAddressSpace());
QualType DerivedTy = getContext().getRecordType(Derived);
CharUnits DerivedAlign = CGM.getClassPointerAlignment(Derived);
@@ -342,7 +342,7 @@ Address CodeGenFunction::GetAddressOfBaseClass(
EmitTypeCheck(TCK_Upcast, Loc, Value.getPointer(),
DerivedTy, DerivedAlign, SkippedChecks);
}
- return Builder.CreateBitCast(Value, BasePtrTy);
+ return Builder.CreateElementBitCast(Value, BaseValueTy);
}
llvm::BasicBlock *origBB = nullptr;
@@ -379,7 +379,7 @@ Address CodeGenFunction::GetAddressOfBaseClass(
VirtualOffset, Derived, VBase);
// Cast to the destination type.
- Value = Builder.CreateBitCast(Value, BasePtrTy);
+ Value = Builder.CreateElementBitCast(Value, BaseValueTy);
// Build a phi if we needed a null check.
if (NullCheckValue) {
@@ -406,16 +406,16 @@ CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr,
QualType DerivedTy =
getContext().getCanonicalType(getContext().getTagDeclType(Derived));
- unsigned AddrSpace =
- BaseAddr.getPointer()->getType()->getPointerAddressSpace();
- llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo(AddrSpace);
+ unsigned AddrSpace = BaseAddr.getAddressSpace();
+ llvm::Type *DerivedValueTy = ConvertType(DerivedTy);
+ llvm::Type *DerivedPtrTy = DerivedValueTy->getPointerTo(AddrSpace);
llvm::Value *NonVirtualOffset =
CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd);
if (!NonVirtualOffset) {
// No offset, we can just cast back.
- return Builder.CreateBitCast(BaseAddr, DerivedPtrTy);
+ return Builder.CreateElementBitCast(BaseAddr, DerivedValueTy);
}
llvm::BasicBlock *CastNull = nullptr;
@@ -453,7 +453,7 @@ CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr,
Value = PHI;
}
- return Address(Value, CGM.getClassPointerAlignment(Derived));
+ return Address(Value, DerivedValueTy, CGM.getClassPointerAlignment(Derived));
}
llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD,
@@ -996,16 +996,8 @@ namespace {
private:
void emitMemcpyIR(Address DestPtr, Address SrcPtr, CharUnits Size) {
- llvm::PointerType *DPT = DestPtr.getType();
- llvm::Type *DBP =
- llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace());
- DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP);
-
- llvm::PointerType *SPT = SrcPtr.getType();
- llvm::Type *SBP =
- llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace());
- SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP);
-
+ DestPtr = CGF.Builder.CreateElementBitCast(DestPtr, CGF.Int8Ty);
+ SrcPtr = CGF.Builder.CreateElementBitCast(SrcPtr, CGF.Int8Ty);
CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity());
}
@@ -2068,8 +2060,8 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
if (SlotAS != ThisAS) {
unsigned TargetThisAS = getContext().getTargetAddressSpace(ThisAS);
- llvm::Type *NewType =
- ThisPtr->getType()->getPointerElementType()->getPointerTo(TargetThisAS);
+ llvm::Type *NewType = llvm::PointerType::getWithSamePointeeType(
+ This.getType(), TargetThisAS);
ThisPtr = getTargetHooks().performAddrSpaceCast(*this, This.getPointer(),
ThisAS, SlotAS, NewType);
}
@@ -2507,9 +2499,6 @@ void CodeGenFunction::InitializeVTablePointer(const VPtr &Vptr) {
// Apply the offsets.
Address VTableField = LoadCXXThisAddress();
- unsigned ThisAddrSpace =
- VTableField.getPointer()->getType()->getPointerAddressSpace();
-
if (!NonVirtualOffset.isZero() || VirtualOffset)
VTableField = ApplyNonVirtualAndVirtualOffset(
*this, VTableField, NonVirtualOffset, VirtualOffset, Vptr.VTableClass,
@@ -2525,8 +2514,7 @@ void CodeGenFunction::InitializeVTablePointer(const VPtr &Vptr) {
->getPointerTo(GlobalsAS);
// vtable field is is derived from `this` pointer, therefore they should be in
// the same addr space. Note that this might not be LLVM address space 0.
- VTableField = Builder.CreateBitCast(VTableField,
- VTablePtrTy->getPointerTo(ThisAddrSpace));
+ VTableField = Builder.CreateElementBitCast(VTableField, VTablePtrTy);
VTableAddressPoint = Builder.CreateBitCast(VTableAddressPoint, VTablePtrTy);
llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField);
diff --git a/clang/lib/CodeGen/CGCleanup.h b/clang/lib/CodeGen/CGCleanup.h
index 1b54c0018d27..76f3a48f32f3 100644
--- a/clang/lib/CodeGen/CGCleanup.h
+++ b/clang/lib/CodeGen/CGCleanup.h
@@ -242,7 +242,7 @@ class alignas(8) EHCleanupScope : public EHScope {
/// An optional i1 variable indicating whether this cleanup has been
/// activated yet.
- llvm::AllocaInst *ActiveFlag;
+ Address ActiveFlag;
/// Extra information required for cleanups that have resolved
/// branches through them. This has to be allocated on the side
@@ -290,7 +290,8 @@ public:
EHScopeStack::stable_iterator enclosingEH)
: EHScope(EHScope::Cleanup, enclosingEH),
EnclosingNormal(enclosingNormal), NormalBlock(nullptr),
- ActiveFlag(nullptr), ExtInfo(nullptr), FixupDepth(fixupDepth) {
+ ActiveFlag(Address::invalid()), ExtInfo(nullptr),
+ FixupDepth(fixupDepth) {
CleanupBits.IsNormalCleanup = isNormal;
CleanupBits.IsEHCleanup = isEH;
CleanupBits.IsActive = true;
@@ -320,13 +321,13 @@ public:
bool isLifetimeMarker() const { return CleanupBits.IsLifetimeMarker; }
void setLifetimeMarker() { CleanupBits.IsLifetimeMarker = true; }
- bool hasActiveFlag() const { return ActiveFlag != nullptr; }
+ bool hasActiveFlag() const { return ActiveFlag.isValid(); }
Address getActiveFlag() const {
- return Address(ActiveFlag, CharUnits::One());
+ return ActiveFlag;
}
void setActiveFlag(Address Var) {
assert(Var.getAlignment().isOne());
- ActiveFlag = cast<llvm::AllocaInst>(Var.getPointer());
+ ActiveFlag = Var;
}
void setTestFlagInNormalCleanup() {
diff --git a/clang/lib/CodeGen/CGCoroutine.cpp b/clang/lib/CodeGen/CGCoroutine.cpp
index ca071d3d2e80..2041d2a5b4c9 100644
--- a/clang/lib/CodeGen/CGCoroutine.cpp
+++ b/clang/lib/CodeGen/CGCoroutine.cpp
@@ -597,6 +597,10 @@ void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
CGM.getIntrinsic(llvm::Intrinsic::coro_begin), {CoroId, Phi});
CurCoro.Data->CoroBegin = CoroBegin;
+ // We need to emit `get_­return_­object` first. According to:
+ // [dcl.fct.def.coroutine]p7
+ // The call to get_­return_­object is sequenced before the call to
+ // initial_­suspend and is invoked at most once.
GetReturnObjectManager GroManager(*this, S);
GroManager.EmitGroAlloca();
diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp
index af651e6f44b7..6e189a61dd20 100644
--- a/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -768,7 +768,7 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
}
// Element count = (VLENB / SEW) x LMUL
- SmallVector<int64_t, 9> Expr(
+ SmallVector<int64_t, 12> Expr(
// The DW_OP_bregx operation has two operands: a register which is
// specified by an unsigned LEB128 number, followed by a signed LEB128
// offset.
@@ -782,6 +782,8 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
Expr.push_back(llvm::dwarf::DW_OP_div);
else
Expr.push_back(llvm::dwarf::DW_OP_mul);
+ // Element max index = count - 1
+ Expr.append({llvm::dwarf::DW_OP_constu, 1, llvm::dwarf::DW_OP_minus});
auto *LowerBound =
llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
@@ -884,9 +886,9 @@ llvm::DIType *CGDebugInfo::CreateType(const AutoType *Ty) {
return DBuilder.createUnspecifiedType("auto");
}
-llvm::DIType *CGDebugInfo::CreateType(const ExtIntType *Ty) {
+llvm::DIType *CGDebugInfo::CreateType(const BitIntType *Ty) {
- StringRef Name = Ty->isUnsigned() ? "unsigned _ExtInt" : "_ExtInt";
+ StringRef Name = Ty->isUnsigned() ? "unsigned _BitInt" : "_BitInt";
llvm::dwarf::TypeKind Encoding = Ty->isUnsigned()
? llvm::dwarf::DW_ATE_unsigned
: llvm::dwarf::DW_ATE_signed;
@@ -3353,6 +3355,9 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
case Type::Elaborated:
T = cast<ElaboratedType>(T)->getNamedType();
break;
+ case Type::Using:
+ T = cast<UsingType>(T)->getUnderlyingType();
+ break;
case Type::Paren:
T = cast<ParenType>(T)->getInnerType();
break;
@@ -3531,8 +3536,8 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit,
case Type::Atomic:
return CreateType(cast<AtomicType>(Ty), Unit);
- case Type::ExtInt:
- return CreateType(cast<ExtIntType>(Ty));
+ case Type::BitInt:
+ return CreateType(cast<BitIntType>(Ty));
case Type::Pipe:
return CreateType(cast<PipeType>(Ty), Unit);
@@ -3545,6 +3550,7 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit,
case Type::Decayed:
case Type::DeducedTemplateSpecialization:
case Type::Elaborated:
+ case Type::Using:
case Type::Paren:
case Type::MacroQualified:
case Type::SubstTemplateTypeParm:
@@ -3633,6 +3639,9 @@ llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
// Record exports it symbols to the containing structure.
if (CXXRD->isAnonymousStructOrUnion())
Flags |= llvm::DINode::FlagExportSymbols;
+
+ Flags |= getAccessFlag(CXXRD->getAccess(),
+ dyn_cast<CXXRecordDecl>(CXXRD->getDeclContext()));
}
llvm::DINodeArray Annotations = CollectBTFDeclTagAnnotations(D);
diff --git a/clang/lib/CodeGen/CGDebugInfo.h b/clang/lib/CodeGen/CGDebugInfo.h
index a7b72fa5f5a6..14ff0eeabd21 100644
--- a/clang/lib/CodeGen/CGDebugInfo.h
+++ b/clang/lib/CodeGen/CGDebugInfo.h
@@ -177,7 +177,7 @@ class CGDebugInfo {
llvm::DIType *CreateType(const BuiltinType *Ty);
llvm::DIType *CreateType(const ComplexType *Ty);
llvm::DIType *CreateType(const AutoType *Ty);
- llvm::DIType *CreateType(const ExtIntType *Ty);
+ llvm::DIType *CreateType(const BitIntType *Ty);
llvm::DIType *CreateQualifiedType(QualType Ty, llvm::DIFile *Fg,
TypeLoc TL = TypeLoc());
llvm::DIType *CreateQualifiedType(const FunctionProtoType *Ty,
diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp
index 941671c61482..e09279c1d455 100644
--- a/clang/lib/CodeGen/CGDecl.cpp
+++ b/clang/lib/CodeGen/CGDecl.cpp
@@ -405,7 +405,8 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
// Store into LocalDeclMap before generating initializer to handle
// circular references.
- setAddrOfLocalVar(&D, Address(addr, alignment));
+ llvm::Type *elemTy = ConvertTypeForMem(D.getType());
+ setAddrOfLocalVar(&D, Address(addr, elemTy, alignment));
// We can't have a VLA here, but we can have a pointer to a VLA,
// even though that doesn't really make any sense.
@@ -458,8 +459,7 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
// RAUW's the GV uses of this constant will be invalid.
llvm::Constant *castedAddr =
llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType);
- if (var != castedAddr)
- LocalDeclMap.find(&D)->second = Address(castedAddr, alignment);
+ LocalDeclMap.find(&D)->second = Address(castedAddr, elemTy, alignment);
CGM.setStaticLocalDeclAddress(&D, castedAddr);
CGM.getSanitizerMetadata()->reportGlobalToASan(var, D);
@@ -1146,7 +1146,7 @@ Address CodeGenModule::createUnnamedGlobalFrom(const VarDecl &D,
CacheEntry->setAlignment(Align.getAsAlign());
}
- return Address(CacheEntry, Align);
+ return Address(CacheEntry, CacheEntry->getValueType(), Align);
}
static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM,
@@ -1193,7 +1193,7 @@ static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
bool valueAlreadyCorrect =
constant->isNullValue() || isa<llvm::UndefValue>(constant);
if (!valueAlreadyCorrect) {
- Loc = Builder.CreateBitCast(Loc, Ty->getPointerTo(Loc.getAddressSpace()));
+ Loc = Builder.CreateElementBitCast(Loc, Ty);
emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder,
IsAutoInit);
}
diff --git a/clang/lib/CodeGen/CGDeclCXX.cpp b/clang/lib/CodeGen/CGDeclCXX.cpp
index d22f9dc3b68c..3579761f1429 100644
--- a/clang/lib/CodeGen/CGDeclCXX.cpp
+++ b/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -172,7 +172,7 @@ void CodeGenFunction::EmitInvariantStart(llvm::Constant *Addr, CharUnits Size) {
}
void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
- llvm::Constant *DeclPtr,
+ llvm::GlobalVariable *GV,
bool PerformInit) {
const Expr *Init = D.getInit();
@@ -194,14 +194,16 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
// "shared" address space qualifier, but the constructor of StructWithCtor
// expects "this" in the "generic" address space.
unsigned ExpectedAddrSpace = getContext().getTargetAddressSpace(T);
- unsigned ActualAddrSpace = DeclPtr->getType()->getPointerAddressSpace();
+ unsigned ActualAddrSpace = GV->getAddressSpace();
+ llvm::Constant *DeclPtr = GV;
if (ActualAddrSpace != ExpectedAddrSpace) {
- llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(T);
- llvm::PointerType *PTy = llvm::PointerType::get(LTy, ExpectedAddrSpace);
+ llvm::PointerType *PTy = llvm::PointerType::getWithSamePointeeType(
+ GV->getType(), ExpectedAddrSpace);
DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(DeclPtr, PTy);
}
- ConstantAddress DeclAddr(DeclPtr, getContext().getDeclAlign(&D));
+ ConstantAddress DeclAddr(
+ DeclPtr, GV->getValueType(), getContext().getDeclAlign(&D));
if (!T->isReferenceType()) {
if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
diff --git a/clang/lib/CodeGen/CGException.cpp b/clang/lib/CodeGen/CGException.cpp
index aff9c77d53c7..91ecbecc843f 100644
--- a/clang/lib/CodeGen/CGException.cpp
+++ b/clang/lib/CodeGen/CGException.cpp
@@ -400,8 +400,8 @@ void CodeGenFunction::EmitAnyExprToExn(const Expr *e, Address addr) {
// __cxa_allocate_exception returns a void*; we need to cast this
// to the appropriate type for the object.
- llvm::Type *ty = ConvertTypeForMem(e->getType())->getPointerTo();
- Address typedAddr = Builder.CreateBitCast(addr, ty);
+ llvm::Type *ty = ConvertTypeForMem(e->getType());
+ Address typedAddr = Builder.CreateElementBitCast(addr, ty);
// FIXME: this isn't quite right! If there's a final unelided call
// to a copy constructor, then according to [except.terminate]p1 we
@@ -421,13 +421,13 @@ void CodeGenFunction::EmitAnyExprToExn(const Expr *e, Address addr) {
Address CodeGenFunction::getExceptionSlot() {
if (!ExceptionSlot)
ExceptionSlot = CreateTempAlloca(Int8PtrTy, "exn.slot");
- return Address(ExceptionSlot, getPointerAlign());
+ return Address(ExceptionSlot, Int8PtrTy, getPointerAlign());
}
Address CodeGenFunction::getEHSelectorSlot() {
if (!EHSelectorSlot)
EHSelectorSlot = CreateTempAlloca(Int32Ty, "ehselector.slot");
- return Address(EHSelectorSlot, CharUnits::fromQuantity(4));
+ return Address(EHSelectorSlot, Int32Ty, CharUnits::fromQuantity(4));
}
llvm::Value *CodeGenFunction::getExceptionFromSlot() {
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index 4332e74dbb24..34b4951a7f72 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -71,7 +71,7 @@ Address CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty,
llvm::Value *ArraySize) {
auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
Alloca->setAlignment(Align.getAsAlign());
- return Address(Alloca, Align);
+ return Address(Alloca, Ty, Align);
}
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
@@ -101,7 +101,7 @@ Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,
Ty->getPointerTo(DestAddrSpace), /*non-null*/ true);
}
- return Address(V, Align);
+ return Address(V, Ty, Align);
}
/// CreateTempAlloca - This creates an alloca and inserts it into the entry
@@ -144,7 +144,7 @@ Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
/*ArraySize=*/nullptr, Alloca);
if (Ty->isConstantMatrixType()) {
- auto *ArrayTy = cast<llvm::ArrayType>(Result.getType()->getElementType());
+ auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());
auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
ArrayTy->getNumElements());
@@ -1099,7 +1099,7 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
if (BaseInfo)
BaseInfo->mergeForCast(TargetTypeBaseInfo);
- Addr = Address(Addr.getPointer(), Align);
+ Addr = Address(Addr.getPointer(), Addr.getElementType(), Align);
}
}
@@ -1111,10 +1111,12 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
CodeGenFunction::CFITCK_UnrelatedCast,
CE->getBeginLoc());
}
- return CE->getCastKind() != CK_AddressSpaceConversion
- ? Builder.CreateBitCast(Addr, ConvertType(E->getType()))
- : Builder.CreateAddrSpaceCast(Addr,
- ConvertType(E->getType()));
+
+ if (CE->getCastKind() == CK_AddressSpaceConversion)
+ return Builder.CreateAddrSpaceCast(Addr, ConvertType(E->getType()));
+
+ llvm::Type *ElemTy = ConvertTypeForMem(E->getType()->getPointeeType());
+ return Builder.CreateElementBitCast(Addr, ElemTy);
}
break;
@@ -1160,7 +1162,8 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
// Otherwise, use the alignment of the type.
CharUnits Align =
CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo, TBAAInfo);
- return Address(EmitScalarExpr(E), Align);
+ llvm::Type *ElemTy = ConvertTypeForMem(E->getType()->getPointeeType());
+ return Address(EmitScalarExpr(E), ElemTy, Align);
}
llvm::Value *CodeGenFunction::EmitNonNullRValueCheck(RValue RV, QualType T) {
@@ -1306,7 +1309,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
const ConstantExpr *CE = cast<ConstantExpr>(E);
if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
QualType RetType = cast<CallExpr>(CE->getSubExpr()->IgnoreImplicit())
- ->getCallReturnType(getContext());
+ ->getCallReturnType(getContext())
+ ->getPointeeType();
return MakeNaturalAlignAddrLValue(Result, RetType);
}
return EmitLValue(cast<ConstantExpr>(E)->getSubExpr());
@@ -1342,10 +1346,11 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
if (LV.isSimple()) {
// Defend against branches out of gnu statement expressions surrounded by
// cleanups.
- llvm::Value *V = LV.getPointer(*this);
+ Address Addr = LV.getAddress(*this);
+ llvm::Value *V = Addr.getPointer();
Scope.ForceCleanup({&V});
- return LValue::MakeAddr(Address(V, LV.getAlignment()), LV.getType(),
- getContext(), LV.getBaseInfo(), LV.getTBAAInfo());
+ return LValue::MakeAddr(Addr.withPointer(V), LV.getType(), getContext(),
+ LV.getBaseInfo(), LV.getTBAAInfo());
}
// FIXME: Is it possible to create an ExprWithCleanups that produces a
// bitfield lvalue or some other non-simple lvalue?
@@ -1777,16 +1782,14 @@ llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
// MatrixType), if it points to a array (the memory type of MatrixType).
static Address MaybeConvertMatrixAddress(Address Addr, CodeGenFunction &CGF,
bool IsVector = true) {
- auto *ArrayTy = dyn_cast<llvm::ArrayType>(
- cast<llvm::PointerType>(Addr.getPointer()->getType())->getElementType());
+ auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());
if (ArrayTy && IsVector) {
auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
ArrayTy->getNumElements());
return Address(CGF.Builder.CreateElementBitCast(Addr, VectorTy));
}
- auto *VectorTy = dyn_cast<llvm::VectorType>(
- cast<llvm::PointerType>(Addr.getPointer()->getType())->getElementType());
+ auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType());
if (VectorTy && !IsVector) {
auto *ArrayTy = llvm::ArrayType::get(
VectorTy->getElementType(),
@@ -2475,10 +2478,11 @@ CodeGenFunction::EmitLoadOfReference(LValue RefLVal,
Builder.CreateLoad(RefLVal.getAddress(*this), RefLVal.isVolatile());
CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo());
+ QualType PointeeType = RefLVal.getType()->getPointeeType();
CharUnits Align = CGM.getNaturalTypeAlignment(
- RefLVal.getType()->getPointeeType(), PointeeBaseInfo, PointeeTBAAInfo,
+ PointeeType, PointeeBaseInfo, PointeeTBAAInfo,
/* forPointeeType= */ true);
- return Address(Load, Align);
+ return Address(Load, ConvertTypeForMem(PointeeType), Align);
}
LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) {
@@ -2528,7 +2532,7 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
- Address Addr(V, Alignment);
+ Address Addr(V, RealVarTy, Alignment);
// Emit reference to the private copy of the variable if it is an OpenMP
// threadprivate variable.
if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
@@ -2610,7 +2614,7 @@ static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) {
llvm::Value *Ptr =
llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
- return LValue::MakeGlobalReg(Address(Ptr, Alignment), VD->getType());
+ return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType());
}
/// Determine whether we can emit a reference to \p VD from the current
@@ -2706,7 +2710,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
/* BaseInfo= */ nullptr,
/* TBAAInfo= */ nullptr,
/* forPointeeType= */ true);
- Addr = Address(Val, Alignment);
+ Addr = Address(Val, ConvertTypeForMem(E->getType()), Alignment);
}
return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
}
@@ -2783,9 +2787,10 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
// Otherwise, it might be static local we haven't emitted yet for
// some reason; most likely, because it's in an outer function.
} else if (VD->isStaticLocal()) {
- addr = Address(CGM.getOrCreateStaticVarDecl(
- *VD, CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false)),
- getContext().getDeclAlign(VD));
+ llvm::Constant *var = CGM.getOrCreateStaticVarDecl(
+ *VD, CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false));
+ addr = Address(
+ var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD));
// No other cases for now.
} else {
@@ -3586,7 +3591,7 @@ static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF,
SourceLocation loc,
const llvm::Twine &name = "arrayidx") {
if (inbounds) {
- return CGF.EmitCheckedInBoundsGEP(ptr, indices, signedIndices,
+ return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices,
CodeGenFunction::NotSubtraction, loc,
name);
} else {
@@ -3698,7 +3703,7 @@ static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
idx, DbgInfo);
}
- return Address(eltPtr, eltAlign);
+ return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
}
LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
@@ -4380,8 +4385,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
hasAnyVptr(FieldType, getContext()))
// Because unions can easily skip invariant.barriers, we need to add
// a barrier every time CXXRecord field with vptr is referenced.
- addr = Address(Builder.CreateLaunderInvariantGroup(addr.getPointer()),
- addr.getAlignment());
+ addr = Builder.CreateLaunderInvariantGroup(addr);
if (IsInPreservedAIRegion ||
(getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
@@ -4539,10 +4543,10 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
// because it can't be used.
if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(live->IgnoreParens())) {
EmitCXXThrowExpr(ThrowExpr);
- llvm::Type *Ty =
- llvm::PointerType::getUnqual(ConvertType(dead->getType()));
+ llvm::Type *ElemTy = ConvertType(dead->getType());
+ llvm::Type *Ty = llvm::PointerType::getUnqual(ElemTy);
return MakeAddrLValue(
- Address(llvm::UndefValue::get(Ty), CharUnits::One()),
+ Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
dead->getType());
}
return EmitLValue(live);
@@ -4584,11 +4588,13 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
EmitBlock(contBlock);
if (lhs && rhs) {
- llvm::PHINode *phi =
- Builder.CreatePHI(lhs->getPointer(*this)->getType(), 2, "cond-lvalue");
- phi->addIncoming(lhs->getPointer(*this), lhsBlock);
- phi->addIncoming(rhs->getPointer(*this), rhsBlock);
- Address result(phi, std::min(lhs->getAlignment(), rhs->getAlignment()));
+ Address lhsAddr = lhs->getAddress(*this);
+ Address rhsAddr = rhs->getAddress(*this);
+ llvm::PHINode *phi = Builder.CreatePHI(lhsAddr.getType(), 2, "cond-lvalue");
+ phi->addIncoming(lhsAddr.getPointer(), lhsBlock);
+ phi->addIncoming(rhsAddr.getPointer(), rhsBlock);
+ Address result(phi, lhsAddr.getElementType(),
+ std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment()));
AlignmentSource alignSource =
std::max(lhs->getBaseInfo().getAlignmentSource(),
rhs->getBaseInfo().getAlignmentSource());
diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp
index 5b56a587fa5f..3b996b89a1d7 100644
--- a/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/clang/lib/CodeGen/CGExprAgg.cpp
@@ -301,7 +301,7 @@ void AggExprEmitter::withReturnValueSlot(
if (!UseTemp)
return;
- assert(Dest.getPointer() != Src.getAggregatePointer());
+ assert(Dest.isIgnored() || Dest.getPointer() != Src.getAggregatePointer());
EmitFinalDestCopy(E->getType(), Src);
if (!RequiresDestruction && LifetimeStartInst) {
@@ -493,7 +493,7 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
CharUnits elementAlign =
DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
- llvm::Type *llvmElementType = begin->getType()->getPointerElementType();
+ llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
// Consider initializing the array by copying from a global. For this to be
// more efficient than per-element initialization, the size of the elements
@@ -513,7 +513,8 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
Emitter.finalize(GV);
CharUnits Align = CGM.getContext().getTypeAlignInChars(ArrayQTy);
GV->setAlignment(Align.getAsAlign());
- EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GV, ArrayQTy, Align));
+ Address GVAddr(GV, GV->getValueType(), Align);
+ EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GVAddr, ArrayQTy));
return;
}
}
@@ -565,8 +566,8 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
}
- LValue elementLV =
- CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
+ LValue elementLV = CGF.MakeAddrLValue(
+ Address(element, llvmElementType, elementAlign), elementType);
EmitInitializationToLValue(E->getInit(i), elementLV);
}
diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp
index cc838bf38c6c..0571c498c377 100644
--- a/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/clang/lib/CodeGen/CGExprCXX.cpp
@@ -1052,13 +1052,8 @@ void CodeGenFunction::EmitNewArrayInitializer(
InitListElements =
cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
->getSize().getZExtValue();
- CurPtr =
- Address(Builder.CreateInBoundsGEP(CurPtr.getElementType(),
- CurPtr.getPointer(),
- Builder.getSize(InitListElements),
- "string.init.end"),
- CurPtr.getAlignment().alignmentAtOffset(InitListElements *
- ElementSize));
+ CurPtr = Builder.CreateConstInBoundsGEP(
+ CurPtr, InitListElements, "string.init.end");
// Zero out the rest, if any remain.
llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
@@ -1135,7 +1130,7 @@ void CodeGenFunction::EmitNewArrayInitializer(
}
// Switch back to initializing one base element at a time.
- CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr.getType());
+ CurPtr = Builder.CreateElementBitCast(CurPtr, BeginPtr.getElementType());
}
// If all elements have already been initialized, skip any further
@@ -1594,7 +1589,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// In these cases, discard the computed alignment and use the
// formal alignment of the allocated type.
if (BaseInfo.getAlignmentSource() != AlignmentSource::Decl)
- allocation = Address(allocation.getPointer(), allocAlign);
+ allocation = allocation.withAlignment(allocAlign);
// Set up allocatorArgs for the call to operator delete if it's not
// the reserved global operator.
@@ -1664,7 +1659,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign));
}
- allocation = Address(RV.getScalarVal(), allocationAlign);
+ allocation = Address(RV.getScalarVal(), Int8Ty, allocationAlign);
}
// Emit a null check on the allocation result if the allocation
@@ -1725,8 +1720,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// of optimization level.
if (CGM.getCodeGenOpts().StrictVTablePointers &&
allocator->isReservedGlobalPlacementOperator())
- result = Address(Builder.CreateLaunderInvariantGroup(result.getPointer()),
- result.getAlignment());
+ result = Builder.CreateLaunderInvariantGroup(result);
// Emit sanitizer checks for pointer value now, so that in the case of an
// array it was checked only once and not at each constructor call. We may
diff --git a/clang/lib/CodeGen/CGExprConstant.cpp b/clang/lib/CodeGen/CGExprConstant.cpp
index ff900ed077e6..cf1f2e0eab92 100644
--- a/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/clang/lib/CodeGen/CGExprConstant.cpp
@@ -899,7 +899,7 @@ static ConstantAddress tryEmitGlobalCompoundLiteral(CodeGenModule &CGM,
CharUnits Align = CGM.getContext().getTypeAlignInChars(E->getType());
if (llvm::GlobalVariable *Addr =
CGM.getAddrOfConstantCompoundLiteralIfEmitted(E))
- return ConstantAddress(Addr, Align);
+ return ConstantAddress(Addr, Addr->getValueType(), Align);
LangAS addressSpace = E->getType().getAddressSpace();
@@ -921,7 +921,7 @@ static ConstantAddress tryEmitGlobalCompoundLiteral(CodeGenModule &CGM,
emitter.finalize(GV);
GV->setAlignment(Align.getAsAlign());
CGM.setAddrOfConstantCompoundLiteral(E, GV);
- return ConstantAddress(GV, Align);
+ return ConstantAddress(GV, GV->getValueType(), Align);
}
static llvm::Constant *
@@ -1988,6 +1988,9 @@ ConstantLValueEmitter::VisitAddrLabelExpr(const AddrLabelExpr *E) {
ConstantLValue
ConstantLValueEmitter::VisitCallExpr(const CallExpr *E) {
unsigned builtin = E->getBuiltinCallee();
+ if (builtin == Builtin::BI__builtin_function_start)
+ return CGM.GetFunctionStart(
+ E->getArg(0)->getAsBuiltinConstantDeclRef(CGM.getContext()));
if (builtin != Builtin::BI__builtin___CFStringMakeConstantString &&
builtin != Builtin::BI__builtin___NSStringMakeConstantString)
return nullptr;
diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp
index ae9434f96529..e32462eb635c 100644
--- a/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/clang/lib/CodeGen/CGExprScalar.cpp
@@ -1240,7 +1240,18 @@ Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
if (isa<llvm::IntegerType>(DstElementTy)) {
assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
- if (DstElementType->isSignedIntegerOrEnumerationType())
+ bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
+
+ // If we can't recognize overflow as undefined behavior, assume that
+ // overflow saturates. This protects against normal optimizations if we are
+ // compiling with non-standard FP semantics.
+ if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
+ llvm::Intrinsic::ID IID =
+ IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
+ return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src);
+ }
+
+ if (IsSigned)
return Builder.CreateFPToSI(Src, DstTy, "conv");
return Builder.CreateFPToUI(Src, DstTy, "conv");
}
@@ -2631,12 +2642,12 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
= CGF.getContext().getAsVariableArrayType(type)) {
llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
+ llvm::Type *elemTy = value->getType()->getPointerElementType();
if (CGF.getLangOpts().isSignedOverflowDefined())
- value = Builder.CreateGEP(value->getType()->getPointerElementType(),
- value, numElts, "vla.inc");
+ value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc");
else
value = CGF.EmitCheckedInBoundsGEP(
- value, numElts, /*SignedIndices=*/false, isSubtraction,
+ elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction,
E->getExprLoc(), "vla.inc");
// Arithmetic on function pointers (!) is just +-1.
@@ -2647,7 +2658,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
else
- value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
+ value = CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt,
+ /*SignedIndices=*/false,
isSubtraction, E->getExprLoc(),
"incdec.funcptr");
value = Builder.CreateBitCast(value, input->getType());
@@ -2655,13 +2667,13 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// For everything else, we can just do a simple increment.
} else {
llvm::Value *amt = Builder.getInt32(amount);
+ llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
if (CGF.getLangOpts().isSignedOverflowDefined())
- value = Builder.CreateGEP(value->getType()->getPointerElementType(),
- value, amt, "incdec.ptr");
+ value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr");
else
- value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
- isSubtraction, E->getExprLoc(),
- "incdec.ptr");
+ value = CGF.EmitCheckedInBoundsGEP(
+ elemTy, value, amt, /*SignedIndices=*/false, isSubtraction,
+ E->getExprLoc(), "incdec.ptr");
}
// Vector increment/decrement.
@@ -2771,9 +2783,9 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr");
else
- value = CGF.EmitCheckedInBoundsGEP(value, sizeValue,
- /*SignedIndices=*/false, isSubtraction,
- E->getExprLoc(), "incdec.objptr");
+ value = CGF.EmitCheckedInBoundsGEP(
+ CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction,
+ E->getExprLoc(), "incdec.objptr");
value = Builder.CreateBitCast(value, input->getType());
}
@@ -3508,16 +3520,15 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
// GEP indexes are signed, and scaling an index isn't permitted to
// signed-overflow, so we use the same semantics for our explicit
// multiply. We suppress this if overflow is not undefined behavior.
+ llvm::Type *elemTy = pointer->getType()->getPointerElementType();
if (CGF.getLangOpts().isSignedOverflowDefined()) {
index = CGF.Builder.CreateMul(index, numElements, "vla.index");
- pointer = CGF.Builder.CreateGEP(
- pointer->getType()->getPointerElementType(), pointer, index,
- "add.ptr");
+ pointer = CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
} else {
index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
- pointer =
- CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
- op.E->getExprLoc(), "add.ptr");
+ pointer = CGF.EmitCheckedInBoundsGEP(
+ elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),
+ "add.ptr");
}
return pointer;
}
@@ -3531,12 +3542,13 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
return CGF.Builder.CreateBitCast(result, pointer->getType());
}
+ llvm::Type *elemTy = CGF.ConvertTypeForMem(elementType);
if (CGF.getLangOpts().isSignedOverflowDefined())
- return CGF.Builder.CreateGEP(
- pointer->getType()->getPointerElementType(), pointer, index, "add.ptr");
+ return CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
- return CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
- op.E->getExprLoc(), "add.ptr");
+ return CGF.EmitCheckedInBoundsGEP(
+ elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),
+ "add.ptr");
}
// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
@@ -5057,12 +5069,12 @@ static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
}
Value *
-CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,
+CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
+ ArrayRef<Value *> IdxList,
bool SignedIndices, bool IsSubtraction,
SourceLocation Loc, const Twine &Name) {
llvm::Type *PtrTy = Ptr->getType();
- Value *GEPVal = Builder.CreateInBoundsGEP(
- PtrTy->getPointerElementType(), Ptr, IdxList, Name);
+ Value *GEPVal = Builder.CreateInBoundsGEP(ElemTy, Ptr, IdxList, Name);
// If the pointer overflow sanitizer isn't enabled, do nothing.
if (!SanOpts.has(SanitizerKind::PointerOverflow))
diff --git a/clang/lib/CodeGen/CGNonTrivialStruct.cpp b/clang/lib/CodeGen/CGNonTrivialStruct.cpp
index ad505fc5a0d4..e3b0e069b830 100644
--- a/clang/lib/CodeGen/CGNonTrivialStruct.cpp
+++ b/clang/lib/CodeGen/CGNonTrivialStruct.cpp
@@ -366,11 +366,11 @@ template <class Derived> struct GenFuncBase {
llvm::ConstantInt::get(NumElts->getType(), BaseEltSize);
llvm::Value *SizeInBytes =
CGF.Builder.CreateNUWMul(BaseEltSizeVal, NumElts);
- Address BC = CGF.Builder.CreateBitCast(DstAddr, CGF.CGM.Int8PtrTy);
+ Address BC = CGF.Builder.CreateElementBitCast(DstAddr, CGF.CGM.Int8Ty);
llvm::Value *DstArrayEnd =
CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BC.getPointer(), SizeInBytes);
- DstArrayEnd = CGF.Builder.CreateBitCast(DstArrayEnd, CGF.CGM.Int8PtrPtrTy,
- "dstarray.end");
+ DstArrayEnd = CGF.Builder.CreateBitCast(
+ DstArrayEnd, CGF.CGM.Int8PtrPtrTy, "dstarray.end");
llvm::BasicBlock *PreheaderBB = CGF.Builder.GetInsertBlock();
// Create the header block and insert the phi instructions.
@@ -426,9 +426,9 @@ template <class Derived> struct GenFuncBase {
assert(Addr.isValid() && "invalid address");
if (Offset.getQuantity() == 0)
return Addr;
- Addr = CGF->Builder.CreateBitCast(Addr, CGF->CGM.Int8PtrTy);
+ Addr = CGF->Builder.CreateElementBitCast(Addr, CGF->CGM.Int8Ty);
Addr = CGF->Builder.CreateConstInBoundsGEP(Addr, Offset.getQuantity());
- return CGF->Builder.CreateBitCast(Addr, CGF->CGM.Int8PtrPtrTy);
+ return CGF->Builder.CreateElementBitCast(Addr, CGF->CGM.Int8PtrTy);
}
Address getAddrWithOffset(Address Addr, CharUnits StructFieldOffset,
@@ -491,9 +491,8 @@ template <class Derived> struct GenFuncBase {
for (unsigned I = 0; I < N; ++I) {
Alignments[I] = Addrs[I].getAlignment();
- Ptrs[I] =
- CallerCGF.Builder.CreateBitCast(Addrs[I], CallerCGF.CGM.Int8PtrPtrTy)
- .getPointer();
+ Ptrs[I] = CallerCGF.Builder.CreateElementBitCast(
+ Addrs[I], CallerCGF.CGM.Int8PtrTy).getPointer();
}
if (llvm::Function *F =
@@ -554,19 +553,21 @@ struct GenBinaryFunc : CopyStructVisitor<Derived, IsMove>,
return;
QualType RT = QualType(FD->getParent()->getTypeForDecl(), 0);
- llvm::PointerType *PtrTy = this->CGF->ConvertType(RT)->getPointerTo();
+ llvm::Type *Ty = this->CGF->ConvertType(RT);
Address DstAddr = this->getAddrWithOffset(Addrs[DstIdx], Offset);
LValue DstBase = this->CGF->MakeAddrLValue(
- this->CGF->Builder.CreateBitCast(DstAddr, PtrTy), FT);
+ this->CGF->Builder.CreateElementBitCast(DstAddr, Ty), FT);
DstLV = this->CGF->EmitLValueForField(DstBase, FD);
Address SrcAddr = this->getAddrWithOffset(Addrs[SrcIdx], Offset);
LValue SrcBase = this->CGF->MakeAddrLValue(
- this->CGF->Builder.CreateBitCast(SrcAddr, PtrTy), FT);
+ this->CGF->Builder.CreateElementBitCast(SrcAddr, Ty), FT);
SrcLV = this->CGF->EmitLValueForField(SrcBase, FD);
} else {
- llvm::PointerType *Ty = this->CGF->ConvertTypeForMem(FT)->getPointerTo();
- Address DstAddr = this->CGF->Builder.CreateBitCast(Addrs[DstIdx], Ty);
- Address SrcAddr = this->CGF->Builder.CreateBitCast(Addrs[SrcIdx], Ty);
+ llvm::Type *Ty = this->CGF->ConvertTypeForMem(FT);
+ Address DstAddr =
+ this->CGF->Builder.CreateElementBitCast(Addrs[DstIdx], Ty);
+ Address SrcAddr =
+ this->CGF->Builder.CreateElementBitCast(Addrs[SrcIdx], Ty);
DstLV = this->CGF->MakeAddrLValue(DstAddr, FT);
SrcLV = this->CGF->MakeAddrLValue(SrcAddr, FT);
}
@@ -817,7 +818,7 @@ void CodeGenFunction::destroyNonTrivialCStruct(CodeGenFunction &CGF,
void CodeGenFunction::defaultInitNonTrivialCStructVar(LValue Dst) {
GenDefaultInitialize Gen(getContext());
Address DstPtr =
- Builder.CreateBitCast(Dst.getAddress(*this), CGM.Int8PtrPtrTy);
+ Builder.CreateElementBitCast(Dst.getAddress(*this), CGM.Int8PtrTy);
Gen.setCGF(this);
QualType QT = Dst.getType();
QT = Dst.isVolatile() ? QT.withVolatile() : QT;
@@ -830,7 +831,7 @@ static void callSpecialFunction(G &&Gen, StringRef FuncName, QualType QT,
std::array<Address, N> Addrs) {
auto SetArtificialLoc = ApplyDebugLocation::CreateArtificial(CGF);
for (unsigned I = 0; I < N; ++I)
- Addrs[I] = CGF.Builder.CreateBitCast(Addrs[I], CGF.CGM.Int8PtrPtrTy);
+ Addrs[I] = CGF.Builder.CreateElementBitCast(Addrs[I], CGF.CGM.Int8PtrTy);
QT = IsVolatile ? QT.withVolatile() : QT;
Gen.callFunc(FuncName, QT, Addrs, CGF);
}
diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp
index ac26f0d4232c..b5bcf157036d 100644
--- a/clang/lib/CodeGen/CGObjC.cpp
+++ b/clang/lib/CodeGen/CGObjC.cpp
@@ -3915,8 +3915,8 @@ static llvm::Value *emitIsPlatformVersionAtLeast(CodeGenFunction &CGF,
Args.push_back(
llvm::ConstantInt::get(CGM.Int32Ty, getBaseMachOPlatformID(TT)));
Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Version.getMajor()));
- Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Min ? *Min : 0));
- Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, SMin ? *SMin : 0));
+ Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Min.getValueOr(0)));
+ Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, SMin.getValueOr(0)));
};
assert(!Version.empty() && "unexpected empty version");
@@ -3952,8 +3952,8 @@ CodeGenFunction::EmitBuiltinAvailable(const VersionTuple &Version) {
Optional<unsigned> Min = Version.getMinor(), SMin = Version.getSubminor();
llvm::Value *Args[] = {
llvm::ConstantInt::get(CGM.Int32Ty, Version.getMajor()),
- llvm::ConstantInt::get(CGM.Int32Ty, Min ? *Min : 0),
- llvm::ConstantInt::get(CGM.Int32Ty, SMin ? *SMin : 0),
+ llvm::ConstantInt::get(CGM.Int32Ty, Min.getValueOr(0)),
+ llvm::ConstantInt::get(CGM.Int32Ty, SMin.getValueOr(0))
};
llvm::Value *CallRes =
diff --git a/clang/lib/CodeGen/CGObjCGNU.cpp b/clang/lib/CodeGen/CGObjCGNU.cpp
index e016644150b4..b2bf60d2c0fc 100644
--- a/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -978,7 +978,9 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
// Look for an existing one
llvm::StringMap<llvm::Constant*>::iterator old = ObjCStrings.find(Str);
if (old != ObjCStrings.end())
- return ConstantAddress(old->getValue(), Align);
+ return ConstantAddress(
+ old->getValue(), old->getValue()->getType()->getPointerElementType(),
+ Align);
bool isNonASCII = SL->containsNonAscii();
@@ -1000,7 +1002,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
auto *ObjCStr = llvm::ConstantExpr::getIntToPtr(
llvm::ConstantInt::get(Int64Ty, str), IdTy);
ObjCStrings[Str] = ObjCStr;
- return ConstantAddress(ObjCStr, Align);
+ return ConstantAddress(ObjCStr, IdTy->getPointerElementType(), Align);
}
StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass;
@@ -1114,7 +1116,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
llvm::Constant *ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStrGV, IdTy);
ObjCStrings[Str] = ObjCStr;
ConstantStrings.push_back(ObjCStr);
- return ConstantAddress(ObjCStr, Align);
+ return ConstantAddress(ObjCStr, IdTy->getPointerElementType(), Align);
}
void PushProperty(ConstantArrayBuilder &PropertiesArray,
@@ -2476,7 +2478,7 @@ ConstantAddress CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
// Look for an existing one
llvm::StringMap<llvm::Constant*>::iterator old = ObjCStrings.find(Str);
if (old != ObjCStrings.end())
- return ConstantAddress(old->getValue(), Align);
+ return ConstantAddress(old->getValue(), Int8Ty, Align);
StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass;
@@ -2503,7 +2505,7 @@ ConstantAddress CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStr, PtrToInt8Ty);
ObjCStrings[Str] = ObjCStr;
ConstantStrings.push_back(ObjCStr);
- return ConstantAddress(ObjCStr, Align);
+ return ConstantAddress(ObjCStr, Int8Ty, Align);
}
///Generates a message send where the super is the receiver. This is a message
diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp
index 5b925359ac25..425d1a793439 100644
--- a/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/clang/lib/CodeGen/CGObjCMac.cpp
@@ -1983,7 +1983,8 @@ CGObjCCommonMac::GenerateConstantNSString(const StringLiteral *Literal) {
GetConstantStringEntry(NSConstantStringMap, Literal, StringLength);
if (auto *C = Entry.second)
- return ConstantAddress(C, CharUnits::fromQuantity(C->getAlignment()));
+ return ConstantAddress(
+ C, C->getValueType(), CharUnits::fromQuantity(C->getAlignment()));
// If we don't already have it, get _NSConstantStringClassReference.
llvm::Constant *Class = getNSConstantStringClassRef();
@@ -2036,7 +2037,7 @@ CGObjCCommonMac::GenerateConstantNSString(const StringLiteral *Literal) {
: NSStringSection);
Entry.second = GV;
- return ConstantAddress(GV, Alignment);
+ return ConstantAddress(GV, GV->getValueType(), Alignment);
}
enum {
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 75709b3c7e78..e35c15421520 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -15,6 +15,7 @@
#include "CGCleanup.h"
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
+#include "TargetInfo.h"
#include "clang/AST/APValue.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
@@ -687,8 +688,6 @@ static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
// Drill down to the base element type on both arrays.
const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
- DestAddr =
- CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
if (DRD)
SrcAddr =
CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
@@ -775,7 +774,7 @@ LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF,
}
void ReductionCodeGen::emitAggregateInitialization(
- CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
+ CodeGenFunction &CGF, unsigned N, Address PrivateAddr, Address SharedAddr,
const OMPDeclareReductionDecl *DRD) {
// Emit VarDecl with copy init for arrays.
// Get the address of the original variable captured in current
@@ -788,7 +787,7 @@ void ReductionCodeGen::emitAggregateInitialization(
EmitDeclareReductionInit,
EmitDeclareReductionInit ? ClausesData[N].ReductionOp
: PrivateVD->getInit(),
- DRD, SharedLVal.getAddress(CGF));
+ DRD, SharedAddr);
}
ReductionCodeGen::ReductionCodeGen(ArrayRef<const Expr *> Shareds,
@@ -882,7 +881,7 @@ void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N,
}
void ReductionCodeGen::emitInitialization(
- CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
+ CodeGenFunction &CGF, unsigned N, Address PrivateAddr, Address SharedAddr,
llvm::function_ref<bool(CodeGenFunction &)> DefaultInit) {
assert(SharedAddresses.size() > N && "No variable was generated");
const auto *PrivateVD =
@@ -892,21 +891,15 @@ void ReductionCodeGen::emitInitialization(
QualType PrivateType = PrivateVD->getType();
PrivateAddr = CGF.Builder.CreateElementBitCast(
PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
- QualType SharedType = SharedAddresses[N].first.getType();
- SharedLVal = CGF.MakeAddrLValue(
- CGF.Builder.CreateElementBitCast(SharedLVal.getAddress(CGF),
- CGF.ConvertTypeForMem(SharedType)),
- SharedType, SharedAddresses[N].first.getBaseInfo(),
- CGF.CGM.getTBAAInfoForSubobject(SharedAddresses[N].first, SharedType));
if (CGF.getContext().getAsArrayType(PrivateVD->getType())) {
if (DRD && DRD->getInitializer())
(void)DefaultInit(CGF);
- emitAggregateInitialization(CGF, N, PrivateAddr, SharedLVal, DRD);
+ emitAggregateInitialization(CGF, N, PrivateAddr, SharedAddr, DRD);
} else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
(void)DefaultInit(CGF);
+ QualType SharedType = SharedAddresses[N].first.getType();
emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp,
- PrivateAddr, SharedLVal.getAddress(CGF),
- SharedLVal.getType());
+ PrivateAddr, SharedAddr, SharedType);
} else if (!DefaultInit(CGF) && PrivateVD->hasInit() &&
!CGF.isTrivialInitializer(PrivateVD->getInit())) {
CGF.EmitAnyExprToMem(PrivateVD->getInit(), PrivateAddr,
@@ -2016,12 +2009,13 @@ Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
StringRef Name) {
std::string Suffix = getName({"artificial", ""});
llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType);
- llvm::Value *GAddr =
+ llvm::GlobalVariable *GAddr =
getOrCreateInternalVariable(VarLVType, Twine(Name).concat(Suffix));
if (CGM.getLangOpts().OpenMP && CGM.getLangOpts().OpenMPUseTLS &&
CGM.getTarget().isTLSSupported()) {
- cast<llvm::GlobalVariable>(GAddr)->setThreadLocal(/*Val=*/true);
- return Address(GAddr, CGM.getContext().getTypeAlignInChars(VarType));
+ GAddr->setThreadLocal(/*Val=*/true);
+ return Address(GAddr, GAddr->getValueType(),
+ CGM.getContext().getTypeAlignInChars(VarType));
}
std::string CacheSuffix = getName({"cache", ""});
llvm::Value *Args[] = {
@@ -2084,7 +2078,8 @@ void CGOpenMPRuntime::emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond) {
+ const Expr *IfCond,
+ llvm::Value *NumThreads) {
if (!CGF.HaveInsertPoint())
return;
llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
@@ -2175,7 +2170,7 @@ Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
return ThreadIDTemp;
}
-llvm::Constant *CGOpenMPRuntime::getOrCreateInternalVariable(
+llvm::GlobalVariable *CGOpenMPRuntime::getOrCreateInternalVariable(
llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
@@ -2183,7 +2178,7 @@ llvm::Constant *CGOpenMPRuntime::getOrCreateInternalVariable(
StringRef RuntimeName = Out.str();
auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
if (Elem.second) {
- assert(Elem.second->getType()->getPointerElementType() == Ty &&
+ assert(Elem.second->getType()->isOpaqueOrPointeeTypeMatches(Ty) &&
"OMP internal variable has different type than requested");
return &*Elem.second;
}
@@ -4498,10 +4493,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
std::tie(Addr, Size) = getPointerAndSize(CGF, E);
llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
LValue Base = CGF.MakeAddrLValue(
- Address(CGF.Builder.CreateGEP(AffinitiesArray.getElementType(),
- AffinitiesArray.getPointer(), Idx),
- AffinitiesArray.getAlignment()),
- KmpTaskAffinityInfoTy);
+ CGF.Builder.CreateGEP(AffinitiesArray, Idx), KmpTaskAffinityInfoTy);
// affs[i].base_addr = &<Affinities[i].second>;
LValue BaseAddrLVal = CGF.EmitLValueForField(
Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
@@ -4665,12 +4657,10 @@ CGOpenMPRuntime::getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal,
Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy));
Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
Base.getTBAAInfo());
- llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
- Addr.getElementType(), Addr.getPointer(),
- llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
+ Address DepObjAddr = CGF.Builder.CreateGEP(
+ Addr, llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
LValue NumDepsBase = CGF.MakeAddrLValue(
- Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
- Base.getBaseInfo(), Base.getTBAAInfo());
+ DepObjAddr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo());
// NumDeps = deps[i].base_addr;
LValue BaseAddrLVal = CGF.EmitLValueForField(
NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
@@ -4706,10 +4696,7 @@ static void emitDependData(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
LValue &PosLVal = *Pos.get<LValue *>();
llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
Base = CGF.MakeAddrLValue(
- Address(CGF.Builder.CreateGEP(DependenciesArray.getElementType(),
- DependenciesArray.getPointer(), Idx),
- DependenciesArray.getAlignment()),
- KmpDependInfoTy);
+ CGF.Builder.CreateGEP(DependenciesArray, Idx), KmpDependInfoTy);
}
// deps[i].base_addr = &<Dependencies[i].second>;
LValue BaseAddrLVal = CGF.EmitLValueForField(
@@ -4766,12 +4753,10 @@ emitDepobjElementsSizes(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
Base.getAddress(CGF), KmpDependInfoPtrT);
Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
Base.getTBAAInfo());
- llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
- Addr.getElementType(), Addr.getPointer(),
- llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
+ Address DepObjAddr = CGF.Builder.CreateGEP(
+ Addr, llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
LValue NumDepsBase = CGF.MakeAddrLValue(
- Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
- Base.getBaseInfo(), Base.getTBAAInfo());
+ DepObjAddr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo());
// NumDeps = deps[i].base_addr;
LValue BaseAddrLVal = CGF.EmitLValueForField(
NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
@@ -4827,12 +4812,10 @@ static void emitDepobjElements(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
Base.getTBAAInfo());
// Get number of elements in a single depobj.
- llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
- Addr.getElementType(), Addr.getPointer(),
- llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
+ Address DepObjAddr = CGF.Builder.CreateGEP(
+ Addr, llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
LValue NumDepsBase = CGF.MakeAddrLValue(
- Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
- Base.getBaseInfo(), Base.getTBAAInfo());
+ DepObjAddr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo());
// NumDeps = deps[i].base_addr;
LValue BaseAddrLVal = CGF.EmitLValueForField(
NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
@@ -4844,10 +4827,7 @@ static void emitDepobjElements(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
ElSize,
CGF.Builder.CreateIntCast(NumDeps, CGF.SizeTy, /*isSigned=*/false));
llvm::Value *Pos = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
- Address DepAddr =
- Address(CGF.Builder.CreateGEP(DependenciesArray.getElementType(),
- DependenciesArray.getPointer(), Pos),
- DependenciesArray.getAlignment());
+ Address DepAddr = CGF.Builder.CreateGEP(DependenciesArray, Pos);
CGF.Builder.CreateMemCpy(DepAddr, Base.getAddress(CGF), Size);
// Increase pos.
@@ -5929,25 +5909,20 @@ static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
CGM.getContext().getSizeType(), Loc);
}
RCG.emitAggregateType(CGF, N, Size);
- LValue OrigLVal;
+ Address OrigAddr = Address::invalid();
// If initializer uses initializer from declare reduction construct, emit a
// pointer to the address of the original reduction item (reuired by reduction
// initializer)
if (RCG.usesReductionInitializer(N)) {
Address SharedAddr = CGF.GetAddrOfLocalVar(&ParamOrig);
- SharedAddr = CGF.EmitLoadOfPointer(
+ OrigAddr = CGF.EmitLoadOfPointer(
SharedAddr,
CGM.getContext().VoidPtrTy.castAs<PointerType>()->getTypePtr());
- OrigLVal = CGF.MakeAddrLValue(SharedAddr, CGM.getContext().VoidPtrTy);
- } else {
- OrigLVal = CGF.MakeNaturalAlignAddrLValue(
- llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
- CGM.getContext().VoidPtrTy);
}
// Emit the initializer:
// %0 = bitcast void* %arg to <type>*
// store <type> <init>, <type>* %0
- RCG.emitInitialization(CGF, N, PrivateAddr, OrigLVal,
+ RCG.emitInitialization(CGF, N, PrivateAddr, OrigAddr,
[](CodeGenFunction &) { return false; });
CGF.FinishFunction();
return Fn;
@@ -6122,7 +6097,7 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
llvm::Value *Idxs[] = {llvm::ConstantInt::get(CGM.SizeTy, /*V=*/0),
llvm::ConstantInt::get(CGM.SizeTy, Cnt)};
llvm::Value *GEP = CGF.EmitCheckedInBoundsGEP(
- TaskRedInput.getPointer(), Idxs,
+ TaskRedInput.getElementType(), TaskRedInput.getPointer(), Idxs,
/*SignedIndices=*/false, /*IsSubtraction=*/false, Loc,
".rd_input.gep.");
LValue ElemLVal = CGF.MakeNaturalAlignAddrLValue(GEP, RDType);
@@ -6620,6 +6595,8 @@ void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
OutlinedFn->addFnAttr("omp_target_thread_limit",
std::to_string(DefaultValThreads));
}
+
+ CGM.getTargetCodeGenInfo().setTargetAttributes(nullptr, OutlinedFn, CGM);
}
/// Checks if the expression is constant or does not have non-trivial function
@@ -12680,12 +12657,11 @@ void CGOpenMPRuntime::emitLastprivateConditionalUpdate(CodeGenFunction &CGF,
// Last value of the lastprivate conditional.
// decltype(priv_a) last_a;
- llvm::Constant *Last = getOrCreateInternalVariable(
+ llvm::GlobalVariable *Last = getOrCreateInternalVariable(
CGF.ConvertTypeForMem(LVal.getType()), UniqueDeclName);
- cast<llvm::GlobalVariable>(Last)->setAlignment(
- LVal.getAlignment().getAsAlign());
- LValue LastLVal =
- CGF.MakeAddrLValue(Last, LVal.getType(), LVal.getAlignment());
+ Last->setAlignment(LVal.getAlignment().getAsAlign());
+ LValue LastLVal = CGF.MakeAddrLValue(
+ Address(Last, Last->getValueType(), LVal.getAlignment()), LVal.getType());
// Global loop counter. Required to handle inner parallel-for regions.
// iv
@@ -12812,7 +12788,7 @@ void CGOpenMPRuntime::checkAndEmitSharedLastprivateConditional(
const CapturedStmt *CS = D.getCapturedStmt(CaptureRegions.back());
for (const auto &Pair : It->DeclToUniqueName) {
const auto *VD = cast<VarDecl>(Pair.first->getCanonicalDecl());
- if (!CS->capturesVariable(VD) || IgnoredDecls.count(VD) > 0)
+ if (!CS->capturesVariable(VD) || IgnoredDecls.contains(VD))
continue;
auto I = LPCI->getSecond().find(Pair.first);
assert(I != LPCI->getSecond().end() &&
@@ -12858,7 +12834,8 @@ void CGOpenMPRuntime::emitLastprivateConditionalFinalUpdate(
if (!GV)
return;
LValue LPLVal = CGF.MakeAddrLValue(
- GV, PrivLVal.getType().getNonReferenceType(), PrivLVal.getAlignment());
+ Address(GV, GV->getValueType(), PrivLVal.getAlignment()),
+ PrivLVal.getType().getNonReferenceType());
llvm::Value *Res = CGF.EmitLoadOfScalar(LPLVal, Loc);
CGF.EmitStoreOfScalar(Res, PrivLVal);
}
@@ -12887,7 +12864,8 @@ void CGOpenMPSIMDRuntime::emitParallelCall(CodeGenFunction &CGF,
SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond) {
+ const Expr *IfCond,
+ llvm::Value *NumThreads) {
llvm_unreachable("Not supported in SIMD-only mode");
}
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.h b/clang/lib/CodeGen/CGOpenMPRuntime.h
index 527a23a8af6a..b83ec78696d1 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.h
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.h
@@ -162,10 +162,10 @@ private:
/// Performs aggregate initialization.
/// \param N Number of reduction item in the common list.
/// \param PrivateAddr Address of the corresponding private item.
- /// \param SharedLVal Address of the original shared variable.
+ /// \param SharedAddr Address of the original shared variable.
/// \param DRD Declare reduction construct used for reduction item.
void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N,
- Address PrivateAddr, LValue SharedLVal,
+ Address PrivateAddr, Address SharedAddr,
const OMPDeclareReductionDecl *DRD);
public:
@@ -187,10 +187,10 @@ public:
/// \param PrivateAddr Address of the corresponding private item.
/// \param DefaultInit Default initialization sequence that should be
/// performed if no reduction specific initialization is found.
- /// \param SharedLVal Address of the original shared variable.
+ /// \param SharedAddr Address of the original shared variable.
void
emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr,
- LValue SharedLVal,
+ Address SharedAddr,
llvm::function_ref<bool(CodeGenFunction &)> DefaultInit);
/// Returns true if the private copy requires cleanups.
bool needCleanups(unsigned N);
@@ -471,8 +471,8 @@ private:
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
- llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator>
- InternalVars;
+ llvm::StringMap<llvm::AssertingVH<llvm::GlobalVariable>,
+ llvm::BumpPtrAllocator> InternalVars;
/// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *);
llvm::Type *KmpRoutineEntryPtrTy = nullptr;
QualType KmpRoutineEntryPtrQTy;
@@ -829,9 +829,9 @@ private:
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
- llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty,
- const llvm::Twine &Name,
- unsigned AddressSpace = 0);
+ llvm::GlobalVariable *getOrCreateInternalVariable(llvm::Type *Ty,
+ const llvm::Twine &Name,
+ unsigned AddressSpace = 0);
/// Set of threadprivate variables with the generated initializer.
llvm::StringSet<> ThreadPrivateWithDefinition;
@@ -1015,11 +1015,13 @@ public:
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
+ /// \param NumThreads The value corresponding to the num_threads clause, if
+ /// any, or nullptr.
///
virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond);
+ const Expr *IfCond, llvm::Value *NumThreads);
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
@@ -1991,11 +1993,13 @@ public:
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
+ /// \param NumThreads The value corresponding to the num_threads clause, if
+ /// any, or nullptr.
///
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond) override;
+ const Expr *IfCond, llvm::Value *NumThreads) override;
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
index dcb224f33156..866454ddeaed 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
@@ -1221,11 +1221,7 @@ void CGOpenMPRuntimeGPU::emitProcBindClause(CodeGenFunction &CGF,
void CGOpenMPRuntimeGPU::emitNumThreadsClause(CodeGenFunction &CGF,
llvm::Value *NumThreads,
SourceLocation Loc) {
- // Do nothing in case of SPMD mode and L0 parallel.
- if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
- return;
-
- CGOpenMPRuntime::emitNumThreadsClause(CGF, NumThreads, Loc);
+ // Nothing to do.
}
void CGOpenMPRuntimeGPU::emitNumTeamsClause(CodeGenFunction &CGF,
@@ -1510,13 +1506,16 @@ void CGOpenMPRuntimeGPU::emitParallelCall(CodeGenFunction &CGF,
SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond) {
+ const Expr *IfCond,
+ llvm::Value *NumThreads) {
if (!CGF.HaveInsertPoint())
return;
- auto &&ParallelGen = [this, Loc, OutlinedFn, CapturedVars,
- IfCond](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ auto &&ParallelGen = [this, Loc, OutlinedFn, CapturedVars, IfCond,
+ NumThreads](CodeGenFunction &CGF,
+ PrePostActionTy &Action) {
CGBuilderTy &Bld = CGF.Builder;
+ llvm::Value *NumThreadsVal = NumThreads;
llvm::Function *WFn = WrapperFunctionsMap[OutlinedFn];
llvm::Value *ID = llvm::ConstantPointerNull::get(CGM.Int8PtrTy);
if (WFn)
@@ -1556,13 +1555,18 @@ void CGOpenMPRuntimeGPU::emitParallelCall(CodeGenFunction &CGF,
else
IfCondVal = llvm::ConstantInt::get(CGF.Int32Ty, 1);
- assert(IfCondVal && "Expected a value");
+ if (!NumThreadsVal)
+ NumThreadsVal = llvm::ConstantInt::get(CGF.Int32Ty, -1);
+ else
+ NumThreadsVal = Bld.CreateZExtOrTrunc(NumThreadsVal, CGF.Int32Ty),
+
+ assert(IfCondVal && "Expected a value");
llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
llvm::Value *Args[] = {
RTLoc,
getThreadID(CGF, Loc),
IfCondVal,
- llvm::ConstantInt::get(CGF.Int32Ty, -1),
+ NumThreadsVal,
llvm::ConstantInt::get(CGF.Int32Ty, -1),
FnPtr,
ID,
@@ -2186,11 +2190,8 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
// elemptr = ((CopyType*)(elemptrptr)) + I
Address ElemPtr = Address(ElemPtrPtr, Align);
ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
- if (NumIters > 1) {
- ElemPtr = Address(Bld.CreateGEP(ElemPtr.getElementType(),
- ElemPtr.getPointer(), Cnt),
- ElemPtr.getAlignment());
- }
+ if (NumIters > 1)
+ ElemPtr = Bld.CreateGEP(ElemPtr, Cnt);
// Get pointer to location in transfer medium.
// MediumPtr = &medium[warp_id]
@@ -2256,11 +2257,8 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
Address TargetElemPtr = Address(TargetElemPtrVal, Align);
TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
- if (NumIters > 1) {
- TargetElemPtr = Address(Bld.CreateGEP(TargetElemPtr.getElementType(),
- TargetElemPtr.getPointer(), Cnt),
- TargetElemPtr.getAlignment());
- }
+ if (NumIters > 1)
+ TargetElemPtr = Bld.CreateGEP(TargetElemPtr, Cnt);
// *TargetElemPtr = SrcMediumVal;
llvm::Value *SrcMediumValue =
@@ -3899,6 +3897,7 @@ void CGOpenMPRuntimeGPU::processRequiresDirective(
case CudaArch::GFX1033:
case CudaArch::GFX1034:
case CudaArch::GFX1035:
+ case CudaArch::Generic:
case CudaArch::UNUSED:
case CudaArch::UNKNOWN:
break;
diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
index ac51264d7685..1d30c5061743 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
+++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
@@ -257,10 +257,13 @@ public:
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
+ /// \param NumThreads The value corresponding to the num_threads clause, if
+ /// any,
+ /// or nullptr.
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond) override;
+ const Expr *IfCond, llvm::Value *NumThreads) override;
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index d399ff919cc3..ef0068cd3b0c 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -2454,7 +2454,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
if (RetAI.isDirect() || RetAI.isExtend()) {
// Make a fake lvalue for the return value slot.
- LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy);
+ LValue ReturnSlot = MakeAddrLValueWithoutTBAA(ReturnValue, FnRetTy);
CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
*this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
ResultRegDests, AsmString, S.getNumOutputs());
diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp
index f6853a22cd36..4c11f7d67534 100644
--- a/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -24,10 +24,13 @@
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PrettyStackTrace.h"
+#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Metadata.h"
#include "llvm/Support/AtomicOrdering.h"
using namespace clang;
using namespace CodeGen;
@@ -375,8 +378,7 @@ static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc,
AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(),
Ctx.getPointerType(DstType), Loc);
Address TmpAddr =
- CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType))
- .getAddress(CGF);
+ CGF.MakeNaturalAlignAddrLValue(CastedPtr, DstType).getAddress(CGF);
return TmpAddr;
}
@@ -1245,7 +1247,7 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
RedCG.emitAggregateType(*this, Count);
AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(),
- RedCG.getSharedLValue(Count),
+ RedCG.getSharedLValue(Count).getAddress(*this),
[&Emission](CodeGenFunction &CGF) {
CGF.EmitAutoVarInit(Emission);
return true;
@@ -1557,14 +1559,14 @@ static void emitCommonOMPParallelDirective(
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
const CodeGenBoundParametersTy &CodeGenBoundParameters) {
const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
+ llvm::Value *NumThreads = nullptr;
llvm::Function *OutlinedFn =
CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) {
CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
- llvm::Value *NumThreads =
- CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
- /*IgnoreResultAssign=*/true);
+ NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
+ /*IgnoreResultAssign=*/true);
CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
CGF, NumThreads, NumThreadsClause->getBeginLoc());
}
@@ -1591,7 +1593,7 @@ static void emitCommonOMPParallelDirective(
CodeGenBoundParameters(CGF, S, CapturedVars);
CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn,
- CapturedVars, IfCond);
+ CapturedVars, IfCond, NumThreads);
}
static bool isAllocatableDecl(const VarDecl *VD) {
@@ -1972,7 +1974,7 @@ CodeGenFunction::EmitOMPCollapsedCanonicalLoopNest(const Stmt *S, int Depth) {
// Pop the \p Depth loops requested by the call from that stack and restore
// the previous context.
- OMPLoopNestStack.set_size(OMPLoopNestStack.size() - Depth);
+ OMPLoopNestStack.pop_back_n(Depth);
ExpectedOMPLoopDepth = ParentExpectedOMPLoopDepth;
return Result;
@@ -4299,10 +4301,10 @@ public:
PrivateDecls.push_back(VD);
}
}
- void VisitOMPExecutableDirective(const OMPExecutableDirective *) { return; }
- void VisitCapturedStmt(const CapturedStmt *) { return; }
- void VisitLambdaExpr(const LambdaExpr *) { return; }
- void VisitBlockExpr(const BlockExpr *) { return; }
+ void VisitOMPExecutableDirective(const OMPExecutableDirective *) {}
+ void VisitCapturedStmt(const CapturedStmt *) {}
+ void VisitLambdaExpr(const LambdaExpr *) {}
+ void VisitBlockExpr(const BlockExpr *) {}
void VisitStmt(const Stmt *S) {
if (!S)
return;
@@ -4431,6 +4433,53 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
UntiedLocalVars;
// Set proper addresses for generated private copies.
OMPPrivateScope Scope(CGF);
+ // Generate debug info for variables present in shared clause.
+ if (auto *DI = CGF.getDebugInfo()) {
+ llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields =
+ CGF.CapturedStmtInfo->getCaptureFields();
+ llvm::Value *ContextValue = CGF.CapturedStmtInfo->getContextValue();
+ if (CaptureFields.size() && ContextValue) {
+ unsigned CharWidth = CGF.getContext().getCharWidth();
+ // The shared variables are packed together as members of structure.
+ // So the address of each shared variable can be computed by adding
+ // offset of it (within record) to the base address of record. For each
+ // shared variable, debug intrinsic llvm.dbg.declare is generated with
+ // appropriate expressions (DIExpression).
+ // Ex:
+ // %12 = load %struct.anon*, %struct.anon** %__context.addr.i
+ // call void @llvm.dbg.declare(metadata %struct.anon* %12,
+ // metadata !svar1,
+ // metadata !DIExpression(DW_OP_deref))
+ // call void @llvm.dbg.declare(metadata %struct.anon* %12,
+ // metadata !svar2,
+ // metadata !DIExpression(DW_OP_plus_uconst, 8, DW_OP_deref))
+ for (auto It = CaptureFields.begin(); It != CaptureFields.end(); ++It) {
+ const VarDecl *SharedVar = It->first;
+ RecordDecl *CaptureRecord = It->second->getParent();
+ const ASTRecordLayout &Layout =
+ CGF.getContext().getASTRecordLayout(CaptureRecord);
+ unsigned Offset =
+ Layout.getFieldOffset(It->second->getFieldIndex()) / CharWidth;
+ (void)DI->EmitDeclareOfAutoVariable(SharedVar, ContextValue,
+ CGF.Builder, false);
+ llvm::Instruction &Last = CGF.Builder.GetInsertBlock()->back();
+ // Get the call dbg.declare instruction we just created and update
+ // its DIExpression to add offset to base address.
+ if (auto DDI = dyn_cast<llvm::DbgVariableIntrinsic>(&Last)) {
+ SmallVector<uint64_t, 8> Ops;
+ // Add offset to the base address if non zero.
+ if (Offset) {
+ Ops.push_back(llvm::dwarf::DW_OP_plus_uconst);
+ Ops.push_back(Offset);
+ }
+ Ops.push_back(llvm::dwarf::DW_OP_deref);
+ auto &Ctx = DDI->getContext();
+ llvm::DIExpression *DIExpr = llvm::DIExpression::get(Ctx, Ops);
+ Last.setOperand(2, llvm::MetadataAsValue::get(Ctx, DIExpr));
+ }
+ }
+ }
+ }
llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> FirstprivatePtrs;
if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() ||
!Data.LastprivateVars.empty() || !Data.PrivateLocals.empty()) {
@@ -5918,6 +5967,9 @@ static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
emitOMPAtomicCaptureExpr(CGF, AO, IsPostfixUpdate, V, X, E, UE,
IsXLHSInRHSPart, Loc);
break;
+ case OMPC_compare:
+ // Do nothing here as we already emit an error.
+ break;
case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
diff --git a/clang/lib/CodeGen/CGValue.h b/clang/lib/CodeGen/CGValue.h
index 4b39a0520833..f01eece042f8 100644
--- a/clang/lib/CodeGen/CGValue.h
+++ b/clang/lib/CodeGen/CGValue.h
@@ -47,6 +47,8 @@ class RValue {
llvm::PointerIntPair<llvm::Value *, 2, Flavor> V1;
// Stores second value and volatility.
llvm::PointerIntPair<llvm::Value *, 1, bool> V2;
+ // Stores element type for aggregate values.
+ llvm::Type *ElementType;
public:
bool isScalar() const { return V1.getInt() == Scalar; }
@@ -71,7 +73,8 @@ public:
Address getAggregateAddress() const {
assert(isAggregate() && "Not an aggregate!");
auto align = reinterpret_cast<uintptr_t>(V2.getPointer()) >> AggAlignShift;
- return Address(V1.getPointer(), CharUnits::fromQuantity(align));
+ return Address(
+ V1.getPointer(), ElementType, CharUnits::fromQuantity(align));
}
llvm::Value *getAggregatePointer() const {
assert(isAggregate() && "Not an aggregate!");
@@ -108,6 +111,7 @@ public:
RValue ER;
ER.V1.setPointer(addr.getPointer());
ER.V1.setInt(Aggregate);
+ ER.ElementType = addr.getElementType();
auto align = static_cast<uintptr_t>(addr.getAlignment().getQuantity());
ER.V2.setPointer(reinterpret_cast<llvm::Value*>(align << AggAlignShift));
@@ -175,6 +179,7 @@ class LValue {
} LVType;
llvm::Value *V;
+ llvm::Type *ElementType;
union {
// Index into a vector subscript: V[i]
@@ -230,6 +235,13 @@ private:
LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo) {
assert((!Alignment.isZero() || Type->isIncompleteType()) &&
"initializing l-value with zero alignment!");
+ if (isGlobalReg())
+ assert(ElementType == nullptr && "Global reg does not store elem type");
+ else
+ assert(llvm::cast<llvm::PointerType>(V->getType())
+ ->isOpaqueOrPointeeTypeMatches(ElementType) &&
+ "Pointer element type mismatch");
+
this->Type = Type;
this->Quals = Quals;
const unsigned MaxAlign = 1U << 31;
@@ -327,17 +339,18 @@ public:
return V;
}
Address getAddress(CodeGenFunction &CGF) const {
- return Address(getPointer(CGF), getAlignment());
+ return Address(getPointer(CGF), ElementType, getAlignment());
}
void setAddress(Address address) {
assert(isSimple());
V = address.getPointer();
+ ElementType = address.getElementType();
Alignment = address.getAlignment().getQuantity();
}
// vector elt lvalue
Address getVectorAddress() const {
- return Address(getVectorPointer(), getAlignment());
+ return Address(getVectorPointer(), ElementType, getAlignment());
}
llvm::Value *getVectorPointer() const {
assert(isVectorElt());
@@ -349,7 +362,7 @@ public:
}
Address getMatrixAddress() const {
- return Address(getMatrixPointer(), getAlignment());
+ return Address(getMatrixPointer(), ElementType, getAlignment());
}
llvm::Value *getMatrixPointer() const {
assert(isMatrixElt());
@@ -362,7 +375,7 @@ public:
// extended vector elements.
Address getExtVectorAddress() const {
- return Address(getExtVectorPointer(), getAlignment());
+ return Address(getExtVectorPointer(), ElementType, getAlignment());
}
llvm::Value *getExtVectorPointer() const {
assert(isExtVectorElt());
@@ -375,7 +388,7 @@ public:
// bitfield lvalue
Address getBitFieldAddress() const {
- return Address(getBitFieldPointer(), getAlignment());
+ return Address(getBitFieldPointer(), ElementType, getAlignment());
}
llvm::Value *getBitFieldPointer() const { assert(isBitField()); return V; }
const CGBitFieldInfo &getBitFieldInfo() const {
@@ -395,6 +408,7 @@ public:
R.LVType = Simple;
assert(address.getPointer()->getType()->isPointerTy());
R.V = address.getPointer();
+ R.ElementType = address.getElementType();
R.Initialize(type, qs, address.getAlignment(), BaseInfo, TBAAInfo);
return R;
}
@@ -405,6 +419,7 @@ public:
LValue R;
R.LVType = VectorElt;
R.V = vecAddress.getPointer();
+ R.ElementType = vecAddress.getElementType();
R.VectorIdx = Idx;
R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(),
BaseInfo, TBAAInfo);
@@ -417,6 +432,7 @@ public:
LValue R;
R.LVType = ExtVectorElt;
R.V = vecAddress.getPointer();
+ R.ElementType = vecAddress.getElementType();
R.VectorElts = Elts;
R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(),
BaseInfo, TBAAInfo);
@@ -435,17 +451,20 @@ public:
LValue R;
R.LVType = BitField;
R.V = Addr.getPointer();
+ R.ElementType = Addr.getElementType();
R.BitFieldInfo = &Info;
R.Initialize(type, type.getQualifiers(), Addr.getAlignment(), BaseInfo,
TBAAInfo);
return R;
}
- static LValue MakeGlobalReg(Address Reg, QualType type) {
+ static LValue MakeGlobalReg(llvm::Value *V, CharUnits alignment,
+ QualType type) {
LValue R;
R.LVType = GlobalReg;
- R.V = Reg.getPointer();
- R.Initialize(type, type.getQualifiers(), Reg.getAlignment(),
+ R.V = V;
+ R.ElementType = nullptr;
+ R.Initialize(type, type.getQualifiers(), alignment,
LValueBaseInfo(AlignmentSource::Decl), TBAAAccessInfo());
return R;
}
@@ -456,6 +475,7 @@ public:
LValue R;
R.LVType = MatrixElt;
R.V = matAddress.getPointer();
+ R.ElementType = matAddress.getElementType();
R.VectorIdx = Idx;
R.Initialize(type, type.getQualifiers(), matAddress.getAlignment(),
BaseInfo, TBAAInfo);
@@ -470,13 +490,11 @@ public:
/// An aggregate value slot.
class AggValueSlot {
/// The address.
- llvm::Value *Addr;
+ Address Addr;
// Qualifiers
Qualifiers Quals;
- unsigned Alignment;
-
/// DestructedFlag - This is set to true if some external code is
/// responsible for setting up a destructor for the slot. Otherwise
/// the code which constructs it should push the appropriate cleanup.
@@ -520,6 +538,14 @@ class AggValueSlot {
/// them.
bool SanitizerCheckedFlag : 1;
+ AggValueSlot(Address Addr, Qualifiers Quals, bool DestructedFlag,
+ bool ObjCGCFlag, bool ZeroedFlag, bool AliasedFlag,
+ bool OverlapFlag, bool SanitizerCheckedFlag)
+ : Addr(Addr), Quals(Quals), DestructedFlag(DestructedFlag),
+ ObjCGCFlag(ObjCGCFlag), ZeroedFlag(ZeroedFlag),
+ AliasedFlag(AliasedFlag), OverlapFlag(OverlapFlag),
+ SanitizerCheckedFlag(SanitizerCheckedFlag) {}
+
public:
enum IsAliased_t { IsNotAliased, IsAliased };
enum IsDestructed_t { IsNotDestructed, IsDestructed };
@@ -553,22 +579,8 @@ public:
Overlap_t mayOverlap,
IsZeroed_t isZeroed = IsNotZeroed,
IsSanitizerChecked_t isChecked = IsNotSanitizerChecked) {
- AggValueSlot AV;
- if (addr.isValid()) {
- AV.Addr = addr.getPointer();
- AV.Alignment = addr.getAlignment().getQuantity();
- } else {
- AV.Addr = nullptr;
- AV.Alignment = 0;
- }
- AV.Quals = quals;
- AV.DestructedFlag = isDestructed;
- AV.ObjCGCFlag = needsGC;
- AV.ZeroedFlag = isZeroed;
- AV.AliasedFlag = isAliased;
- AV.OverlapFlag = mayOverlap;
- AV.SanitizerCheckedFlag = isChecked;
- return AV;
+ return AggValueSlot(addr, quals, isDestructed, needsGC, isZeroed, isAliased,
+ mayOverlap, isChecked);
}
static AggValueSlot
@@ -609,19 +621,19 @@ public:
}
llvm::Value *getPointer() const {
- return Addr;
+ return Addr.getPointer();
}
Address getAddress() const {
- return Address(Addr, getAlignment());
+ return Addr;
}
bool isIgnored() const {
- return Addr == nullptr;
+ return !Addr.isValid();
}
CharUnits getAlignment() const {
- return CharUnits::fromQuantity(Alignment);
+ return Addr.getAlignment();
}
IsAliased_t isPotentiallyAliased() const {
diff --git a/clang/lib/CodeGen/CodeGenAction.cpp b/clang/lib/CodeGen/CodeGenAction.cpp
index 52c54d3c7a72..b72b16cf2b5f 100644
--- a/clang/lib/CodeGen/CodeGenAction.cpp
+++ b/clang/lib/CodeGen/CodeGenAction.cpp
@@ -571,7 +571,6 @@ void BackendConsumer::SrcMgrDiagHandler(const llvm::DiagnosticInfoSrcMgr &DI) {
// If Loc is invalid, we still need to report the issue, it just gets no
// location info.
Diags.Report(Loc, DiagID).AddString(Message);
- return;
}
bool
diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp
index d87cf2d49720..e6adec6948af 100644
--- a/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -188,8 +188,8 @@ LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
- return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo,
- TBAAInfo);
+ Address Addr(V, ConvertTypeForMem(T), Alignment);
+ return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo);
}
/// Given a value of type T* that may not be to a complete object,
@@ -200,7 +200,8 @@ CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
TBAAAccessInfo TBAAInfo;
CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
/* forPointeeType= */ true);
- return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo);
+ Address Addr(V, ConvertTypeForMem(T), Align);
+ return MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
}
@@ -243,7 +244,7 @@ TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
case Type::Enum:
case Type::ObjCObjectPointer:
case Type::Pipe:
- case Type::ExtInt:
+ case Type::BitInt:
return TEK_Scalar;
// Complexes.
@@ -1070,7 +1071,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
auto AI = CurFn->arg_begin();
if (CurFnInfo->getReturnInfo().isSRetAfterThis())
++AI;
- ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign());
+ ReturnValue = Address(&*AI, ConvertType(RetTy),
+ CurFnInfo->getReturnInfo().getIndirectAlign());
if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
ReturnValuePointer =
CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr");
@@ -1298,47 +1300,44 @@ QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
const CGFunctionInfo &FnInfo) {
+ assert(Fn && "generating code for null Function");
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
CurGD = GD;
FunctionArgList Args;
QualType ResTy = BuildFunctionArgList(GD, Args);
- // When generating code for a builtin with an inline declaration, use a
- // mangled name to hold the actual body, while keeping an external definition
- // in case the function pointer is referenced somewhere.
- if (Fn) {
- if (FD->isInlineBuiltinDeclaration()) {
- std::string FDInlineName = (Fn->getName() + ".inline").str();
- llvm::Module *M = Fn->getParent();
- llvm::Function *Clone = M->getFunction(FDInlineName);
- if (!Clone) {
- Clone = llvm::Function::Create(Fn->getFunctionType(),
- llvm::GlobalValue::InternalLinkage,
- Fn->getAddressSpace(), FDInlineName, M);
- Clone->addFnAttr(llvm::Attribute::AlwaysInline);
- }
- Fn->setLinkage(llvm::GlobalValue::ExternalLinkage);
- Fn = Clone;
+ if (FD->isInlineBuiltinDeclaration()) {
+ // When generating code for a builtin with an inline declaration, use a
+ // mangled name to hold the actual body, while keeping an external
+ // definition in case the function pointer is referenced somewhere.
+ std::string FDInlineName = (Fn->getName() + ".inline").str();
+ llvm::Module *M = Fn->getParent();
+ llvm::Function *Clone = M->getFunction(FDInlineName);
+ if (!Clone) {
+ Clone = llvm::Function::Create(Fn->getFunctionType(),
+ llvm::GlobalValue::InternalLinkage,
+ Fn->getAddressSpace(), FDInlineName, M);
+ Clone->addFnAttr(llvm::Attribute::AlwaysInline);
}
-
+ Fn->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ Fn = Clone;
+ } else {
// Detect the unusual situation where an inline version is shadowed by a
// non-inline version. In that case we should pick the external one
// everywhere. That's GCC behavior too. Unfortunately, I cannot find a way
// to detect that situation before we reach codegen, so do some late
// replacement.
- else {
- for (const FunctionDecl *PD = FD->getPreviousDecl(); PD;
- PD = PD->getPreviousDecl()) {
- if (LLVM_UNLIKELY(PD->isInlineBuiltinDeclaration())) {
- std::string FDInlineName = (Fn->getName() + ".inline").str();
- llvm::Module *M = Fn->getParent();
- if (llvm::Function *Clone = M->getFunction(FDInlineName)) {
- Clone->replaceAllUsesWith(Fn);
- Clone->eraseFromParent();
- }
- break;
+ for (const FunctionDecl *PD = FD->getPreviousDecl(); PD;
+ PD = PD->getPreviousDecl()) {
+ if (LLVM_UNLIKELY(PD->isInlineBuiltinDeclaration())) {
+ std::string FDInlineName = (Fn->getName() + ".inline").str();
+ llvm::Module *M = Fn->getParent();
+ if (llvm::Function *Clone = M->getFunction(FDInlineName)) {
+ Clone->replaceAllUsesWith(Fn);
+ Clone->eraseFromParent();
}
+ break;
}
}
}
@@ -1347,8 +1346,7 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
if (FD->hasAttr<NoDebugAttr>()) {
// Clear non-distinct debug info that was possibly attached to the function
// due to an earlier declaration without the nodebug attribute
- if (Fn)
- Fn->setSubprogram(nullptr);
+ Fn->setSubprogram(nullptr);
// Disable debug info indefinitely for this function
DebugInfo = nullptr;
}
@@ -2202,12 +2200,13 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
case Type::Record:
case Type::Enum:
case Type::Elaborated:
+ case Type::Using:
case Type::TemplateSpecialization:
case Type::ObjCTypeParam:
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
- case Type::ExtInt:
+ case Type::BitInt:
llvm_unreachable("type class is never variably-modified!");
case Type::Adjusted:
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index ff5b6634da1c..f76ce8a6400d 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -459,6 +459,11 @@ public:
/// Get the name of the capture helper.
virtual StringRef getHelperName() const { return "__captured_stmt"; }
+ /// Get the CaptureFields
+ llvm::SmallDenseMap<const VarDecl *, FieldDecl *> getCaptureFields() {
+ return CaptureFields;
+ }
+
private:
/// The kind of captured statement being generated.
CapturedRegionKind Kind;
@@ -2494,14 +2499,16 @@ public:
LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
AlignmentSource Source = AlignmentSource::Type) {
- return LValue::MakeAddr(Address(V, Alignment), T, getContext(),
- LValueBaseInfo(Source), CGM.getTBAAAccessInfo(T));
+ Address Addr(V, ConvertTypeForMem(T), Alignment);
+ return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
+ CGM.getTBAAAccessInfo(T));
}
- LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
- LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo) {
- return LValue::MakeAddr(Address(V, Alignment), T, getContext(),
- BaseInfo, TBAAInfo);
+ LValue
+ MakeAddrLValueWithoutTBAA(Address Addr, QualType T,
+ AlignmentSource Source = AlignmentSource::Type) {
+ return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
+ TBAAAccessInfo());
}
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T);
@@ -3128,15 +3135,18 @@ public:
class ParamValue {
llvm::Value *Value;
+ llvm::Type *ElementType;
unsigned Alignment;
- ParamValue(llvm::Value *V, unsigned A) : Value(V), Alignment(A) {}
+ ParamValue(llvm::Value *V, llvm::Type *T, unsigned A)
+ : Value(V), ElementType(T), Alignment(A) {}
public:
static ParamValue forDirect(llvm::Value *value) {
- return ParamValue(value, 0);
+ return ParamValue(value, nullptr, 0);
}
static ParamValue forIndirect(Address addr) {
assert(!addr.getAlignment().isZero());
- return ParamValue(addr.getPointer(), addr.getAlignment().getQuantity());
+ return ParamValue(addr.getPointer(), addr.getElementType(),
+ addr.getAlignment().getQuantity());
}
bool isIndirect() const { return Alignment != 0; }
@@ -3149,7 +3159,7 @@ public:
Address getIndirectAddress() const {
assert(isIndirect());
- return Address(Value, CharUnits::fromQuantity(Alignment));
+ return Address(Value, ElementType, CharUnits::fromQuantity(Alignment));
}
};
@@ -4405,7 +4415,7 @@ public:
/// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
/// variable with global storage.
- void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
+ void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::GlobalVariable *GV,
bool PerformInit);
llvm::Function *createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor,
@@ -4556,7 +4566,7 @@ public:
/// \p SignedIndices indicates whether any of the GEP indices are signed.
/// \p IsSubtraction indicates whether the expression used to form the GEP
/// is a subtraction.
- llvm::Value *EmitCheckedInBoundsGEP(llvm::Value *Ptr,
+ llvm::Value *EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr,
ArrayRef<llvm::Value *> IdxList,
bool SignedIndices,
bool IsSubtraction,
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 9ba1a5c25e81..36b7ce87336c 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -2832,7 +2832,7 @@ ConstantAddress CodeGenModule::GetAddrOfMSGuidDecl(const MSGuidDecl *GD) {
// Look for an existing global.
if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
- return ConstantAddress(GV, Alignment);
+ return ConstantAddress(GV, GV->getValueType(), Alignment);
ConstantEmitter Emitter(*this);
llvm::Constant *Init;
@@ -2866,15 +2866,15 @@ ConstantAddress CodeGenModule::GetAddrOfMSGuidDecl(const MSGuidDecl *GD) {
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
setDSOLocal(GV);
- llvm::Constant *Addr = GV;
if (!V.isAbsent()) {
Emitter.finalize(GV);
- } else {
- llvm::Type *Ty = getTypes().ConvertTypeForMem(GD->getType());
- Addr = llvm::ConstantExpr::getBitCast(
- GV, Ty->getPointerTo(GV->getAddressSpace()));
+ return ConstantAddress(GV, GV->getValueType(), Alignment);
}
- return ConstantAddress(Addr, Alignment);
+
+ llvm::Type *Ty = getTypes().ConvertTypeForMem(GD->getType());
+ llvm::Constant *Addr = llvm::ConstantExpr::getBitCast(
+ GV, Ty->getPointerTo(GV->getAddressSpace()));
+ return ConstantAddress(Addr, Ty, Alignment);
}
ConstantAddress CodeGenModule::GetAddrOfTemplateParamObject(
@@ -2883,7 +2883,7 @@ ConstantAddress CodeGenModule::GetAddrOfTemplateParamObject(
CharUnits Alignment = getNaturalTypeAlignment(TPO->getType());
if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
- return ConstantAddress(GV, Alignment);
+ return ConstantAddress(GV, GV->getValueType(), Alignment);
ConstantEmitter Emitter(*this);
llvm::Constant *Init = Emitter.emitForInitializer(
@@ -2901,7 +2901,7 @@ ConstantAddress CodeGenModule::GetAddrOfTemplateParamObject(
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
Emitter.finalize(GV);
- return ConstantAddress(GV, Alignment);
+ return ConstantAddress(GV, GV->getValueType(), Alignment);
}
ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
@@ -2916,7 +2916,7 @@ ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
if (Entry) {
unsigned AS = getContext().getTargetAddressSpace(VD->getType());
auto Ptr = llvm::ConstantExpr::getBitCast(Entry, DeclTy->getPointerTo(AS));
- return ConstantAddress(Ptr, Alignment);
+ return ConstantAddress(Ptr, DeclTy, Alignment);
}
llvm::Constant *Aliasee;
@@ -2932,7 +2932,7 @@ ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
F->setLinkage(llvm::Function::ExternalWeakLinkage);
WeakRefReferences.insert(F);
- return ConstantAddress(Aliasee, Alignment);
+ return ConstantAddress(Aliasee, DeclTy, Alignment);
}
void CodeGenModule::EmitGlobal(GlobalDecl GD) {
@@ -3886,6 +3886,14 @@ llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
return F;
}
+llvm::Constant *CodeGenModule::GetFunctionStart(const ValueDecl *Decl) {
+ llvm::GlobalValue *F =
+ cast<llvm::GlobalValue>(GetAddrOfFunction(Decl)->stripPointerCasts());
+
+ return llvm::ConstantExpr::getBitCast(llvm::NoCFIValue::get(F),
+ llvm::Type::getInt8PtrTy(VMContext));
+}
+
static const FunctionDecl *
GetRuntimeFunctionDecl(ASTContext &C, StringRef Name) {
TranslationUnitDecl *TUDecl = C.getTranslationUnitDecl();
@@ -5228,7 +5236,8 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
StringLength);
if (auto *C = Entry.second)
- return ConstantAddress(C, CharUnits::fromQuantity(C->getAlignment()));
+ return ConstantAddress(
+ C, C->getValueType(), CharUnits::fromQuantity(C->getAlignment()));
llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
llvm::Constant *Zeros[] = { Zero, Zero };
@@ -5409,7 +5418,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
}
Entry.second = GV;
- return ConstantAddress(GV, Alignment);
+ return ConstantAddress(GV, GV->getValueType(), Alignment);
}
bool CodeGenModule::getExpressionLocationsEnabled() const {
@@ -5527,7 +5536,7 @@ CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
if (uint64_t(Alignment.getQuantity()) > GV->getAlignment())
GV->setAlignment(Alignment.getAsAlign());
return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
- Alignment);
+ GV->getValueType(), Alignment);
}
}
@@ -5557,7 +5566,7 @@ CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
QualType());
return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
- Alignment);
+ GV->getValueType(), Alignment);
}
/// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
@@ -5590,7 +5599,7 @@ ConstantAddress CodeGenModule::GetAddrOfConstantCString(
if (uint64_t(Alignment.getQuantity()) > GV->getAlignment())
GV->setAlignment(Alignment.getAsAlign());
return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
- Alignment);
+ GV->getValueType(), Alignment);
}
}
@@ -5604,7 +5613,7 @@ ConstantAddress CodeGenModule::GetAddrOfConstantCString(
*Entry = GV;
return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
- Alignment);
+ GV->getValueType(), Alignment);
}
ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
@@ -5634,7 +5643,9 @@ ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
getModule(), Type, false, llvm::GlobalVariable::InternalLinkage,
nullptr);
}
- return ConstantAddress(InsertResult.first->second, Align);
+ return ConstantAddress(
+ InsertResult.first->second,
+ InsertResult.first->second->getType()->getPointerElementType(), Align);
}
// FIXME: If an externally-visible declaration extends multiple temporaries,
@@ -5725,7 +5736,7 @@ ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
}
Entry = CV;
- return ConstantAddress(CV, Align);
+ return ConstantAddress(CV, Type, Align);
}
/// EmitObjCPropertyImplementations - Emit information for synthesized
@@ -6398,6 +6409,11 @@ void CodeGenModule::EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) {
llvm::Metadata *
CodeGenModule::CreateMetadataIdentifierImpl(QualType T, MetadataTypeMap &Map,
StringRef Suffix) {
+ if (auto *FnType = T->getAs<FunctionProtoType>())
+ T = getContext().getFunctionType(
+ FnType->getReturnType(), FnType->getParamTypes(),
+ FnType->getExtProtoInfo().withExceptionSpec(EST_None));
+
llvm::Metadata *&InternalId = Map[T.getCanonicalType()];
if (InternalId)
return InternalId;
diff --git a/clang/lib/CodeGen/CodeGenModule.h b/clang/lib/CodeGen/CodeGenModule.h
index e1c7f486d334..f1565511f98a 100644
--- a/clang/lib/CodeGen/CodeGenModule.h
+++ b/clang/lib/CodeGen/CodeGenModule.h
@@ -881,6 +881,9 @@ public:
ForDefinition_t IsForDefinition
= NotForDefinition);
+ // Return the function body address of the given function.
+ llvm::Constant *GetFunctionStart(const ValueDecl *Decl);
+
/// Get the address of the RTTI descriptor for the given type.
llvm::Constant *GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH = false);
diff --git a/clang/lib/CodeGen/CodeGenTBAA.cpp b/clang/lib/CodeGen/CodeGenTBAA.cpp
index f4ebe6885675..95763d8e18b7 100644
--- a/clang/lib/CodeGen/CodeGenTBAA.cpp
+++ b/clang/lib/CodeGen/CodeGenTBAA.cpp
@@ -209,12 +209,12 @@ llvm::MDNode *CodeGenTBAA::getTypeInfoHelper(const Type *Ty) {
return createScalarTypeNode(OutName, getChar(), Size);
}
- if (const auto *EIT = dyn_cast<ExtIntType>(Ty)) {
+ if (const auto *EIT = dyn_cast<BitIntType>(Ty)) {
SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
// Don't specify signed/unsigned since integer types can alias despite sign
// differences.
- Out << "_ExtInt(" << EIT->getNumBits() << ')';
+ Out << "_BitInt(" << EIT->getNumBits() << ')';
return createScalarTypeNode(OutName, getChar(), Size);
}
diff --git a/clang/lib/CodeGen/CodeGenTypes.cpp b/clang/lib/CodeGen/CodeGenTypes.cpp
index fb05475a4e8c..77721510dfd0 100644
--- a/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -97,10 +97,10 @@ llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool ForBitField) {
llvm::Type *R = ConvertType(T);
- // If this is a bool type, or an ExtIntType in a bitfield representation,
- // map this integer to the target-specified size.
- if ((ForBitField && T->isExtIntType()) ||
- (!T->isExtIntType() && R->isIntegerTy(1)))
+ // If this is a bool type, or a bit-precise integer type in a bitfield
+ // representation, map this integer to the target-specified size.
+ if ((ForBitField && T->isBitIntType()) ||
+ (!T->isBitIntType() && R->isIntegerTy(1)))
return llvm::IntegerType::get(getLLVMContext(),
(unsigned)Context.getTypeSize(T));
@@ -786,8 +786,8 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
ResultType = CGM.getOpenCLRuntime().getPipeType(cast<PipeType>(Ty));
break;
}
- case Type::ExtInt: {
- const auto &EIT = cast<ExtIntType>(Ty);
+ case Type::BitInt: {
+ const auto &EIT = cast<BitIntType>(Ty);
ResultType = llvm::Type::getIntNTy(getLLVMContext(), EIT->getNumBits());
break;
}
diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp
index 04163aeaddc5..1a15b09c7b2b 100644
--- a/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -1345,7 +1345,8 @@ void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
- CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
+ CGF.EmitAnyExprToExn(
+ E->getSubExpr(), Address(ExceptionPtr, CGM.Int8Ty, ExnAlign));
// Now throw the exception.
llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
@@ -2465,7 +2466,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
CGM.setStaticLocalDeclGuardAddress(&D, guard);
}
- Address guardAddr = Address(guard, guardAlignment);
+ Address guardAddr = Address(guard, guard->getValueType(), guardAlignment);
// Test whether the variable has completed initialization.
//
@@ -2880,7 +2881,7 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
Guard->setAlignment(GuardAlign.getAsAlign());
CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
- InitFunc, OrderedInits, ConstantAddress(Guard, GuardAlign));
+ InitFunc, OrderedInits, ConstantAddress(Guard, CGM.Int8Ty, GuardAlign));
// On Darwin platforms, use CXX_FAST_TLS calling convention.
if (CGM.getTarget().getTriple().isOSDarwin()) {
InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
@@ -3529,7 +3530,7 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
llvm_unreachable("Pipe types shouldn't get here");
case Type::Builtin:
- case Type::ExtInt:
+ case Type::BitInt:
// GCC treats vector and complex types as fundamental types.
case Type::Vector:
case Type::ExtVector:
@@ -3802,7 +3803,7 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
case Type::Pipe:
break;
- case Type::ExtInt:
+ case Type::BitInt:
break;
case Type::ConstantArray:
diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index 0fd5a0ffe06c..5971a7709304 100644
--- a/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -917,7 +917,7 @@ void MicrosoftCXXABI::emitBeginCatch(CodeGenFunction &CGF,
std::tuple<Address, llvm::Value *, const CXXRecordDecl *>
MicrosoftCXXABI::performBaseAdjustment(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy) {
- Value = CGF.Builder.CreateBitCast(Value, CGF.Int8PtrTy);
+ Value = CGF.Builder.CreateElementBitCast(Value, CGF.Int8Ty);
const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
const ASTContext &Context = getContext();
@@ -2408,14 +2408,14 @@ static ConstantAddress getInitThreadEpochPtr(CodeGenModule &CGM) {
StringRef VarName("_Init_thread_epoch");
CharUnits Align = CGM.getIntAlign();
if (auto *GV = CGM.getModule().getNamedGlobal(VarName))
- return ConstantAddress(GV, Align);
+ return ConstantAddress(GV, GV->getValueType(), Align);
auto *GV = new llvm::GlobalVariable(
CGM.getModule(), CGM.IntTy,
/*isConstant=*/false, llvm::GlobalVariable::ExternalLinkage,
/*Initializer=*/nullptr, VarName,
/*InsertBefore=*/nullptr, llvm::GlobalVariable::GeneralDynamicTLSModel);
GV->setAlignment(Align.getAsAlign());
- return ConstantAddress(GV, Align);
+ return ConstantAddress(GV, GV->getValueType(), Align);
}
static llvm::FunctionCallee getInitThreadHeaderFn(CodeGenModule &CGM) {
@@ -2567,7 +2567,7 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
GI->Guard = GuardVar;
}
- ConstantAddress GuardAddr(GuardVar, GuardAlign);
+ ConstantAddress GuardAddr(GuardVar, GuardTy, GuardAlign);
assert(GuardVar->getLinkage() == GV->getLinkage() &&
"static local from the same function had different linkage");
diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp
index 36e0319c8ab9..85089cdb2200 100644
--- a/clang/lib/CodeGen/TargetInfo.cpp
+++ b/clang/lib/CodeGen/TargetInfo.cpp
@@ -104,7 +104,7 @@ bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
if (Ty->isPromotableIntegerType())
return true;
- if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy))
return true;
@@ -431,7 +431,7 @@ static Address emitMergePHI(CodeGenFunction &CGF,
PHI->addIncoming(Addr1.getPointer(), Block1);
PHI->addIncoming(Addr2.getPointer(), Block2);
CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
- return Address(PHI, Align);
+ return Address(PHI, Addr1.getElementType(), Align);
}
TargetCodeGenInfo::~TargetCodeGenInfo() = default;
@@ -762,7 +762,7 @@ ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
Ty = EnumTy->getDecl()->getIntegerType();
ASTContext &Context = getContext();
- if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() >
Context.getTypeSize(Context.getTargetInfo().hasInt128Type()
? Context.Int128Ty
@@ -784,7 +784,7 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() >
getContext().getTypeSize(getContext().getTargetInfo().hasInt128Type()
? getContext().Int128Ty
@@ -1008,8 +1008,9 @@ ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
} else if (Ty->isFloatingType()) {
// Floating-point types don't go inreg.
return ABIArgInfo::getDirect();
- } else if (const auto *EIT = Ty->getAs<ExtIntType>()) {
- // Treat extended integers as integers if <=64, otherwise pass indirectly.
+ } else if (const auto *EIT = Ty->getAs<BitIntType>()) {
+ // Treat bit-precise integers as integers if <= 64, otherwise pass
+ // indirectly.
if (EIT->getNumBits() > 64)
return getNaturalAlignIndirect(Ty);
return ABIArgInfo::getDirect();
@@ -1027,8 +1028,8 @@ ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
if (isAggregateTypeForABI(RetTy))
return getNaturalAlignIndirect(RetTy);
- // Treat extended integers as integers if <=64, otherwise pass indirectly.
- if (const auto *EIT = RetTy->getAs<ExtIntType>()) {
+ // Treat bit-precise integers as integers if <= 64, otherwise pass indirectly.
+ if (const auto *EIT = RetTy->getAs<BitIntType>()) {
if (EIT->getNumBits() > 64)
return getNaturalAlignIndirect(RetTy);
return ABIArgInfo::getDirect();
@@ -1590,7 +1591,7 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() > 64)
return getIndirectReturnResult(RetTy, State);
@@ -1926,7 +1927,7 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
return ABIArgInfo::getExtend(Ty);
}
- if (const auto * EIT = Ty->getAs<ExtIntType>()) {
+ if (const auto *EIT = Ty->getAs<BitIntType>()) {
if (EIT->getNumBits() <= 64) {
if (InReg)
return ABIArgInfo::getDirectInReg();
@@ -3009,7 +3010,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
return;
}
- if (const auto *EITy = Ty->getAs<ExtIntType>()) {
+ if (const auto *EITy = Ty->getAs<BitIntType>()) {
if (EITy->getNumBits() <= 64)
Current = Integer;
else if (EITy->getNumBits() <= 128)
@@ -3200,7 +3201,7 @@ ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- if (Ty->isExtIntType())
+ if (Ty->isBitIntType())
return getNaturalAlignIndirect(Ty);
return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
@@ -3237,7 +3238,7 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
// but this code would be much safer if we could mark the argument with
// 'onstack'. See PR12193.
if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty) &&
- !Ty->isExtIntType()) {
+ !Ty->isBitIntType()) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
@@ -4033,7 +4034,7 @@ static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
// AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
- return Address(Res, Align);
+ return Address(Res, LTy, Align);
}
Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
@@ -4146,7 +4147,7 @@ Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
} else if (neededInt) {
RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset),
- CharUnits::fromQuantity(8));
+ CGF.Int8Ty, CharUnits::fromQuantity(8));
RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
// Copy to a temporary if necessary to ensure the appropriate alignment.
@@ -4164,7 +4165,7 @@ Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
} else if (neededSSE == 1) {
RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset),
- CharUnits::fromQuantity(16));
+ CGF.Int8Ty, CharUnits::fromQuantity(16));
RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
} else {
assert(neededSSE == 2 && "Invalid number of needed registers!");
@@ -4176,7 +4177,7 @@ Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
// all the SSE registers to the RSA.
Address RegAddrLo = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea,
fp_offset),
- CharUnits::fromQuantity(16));
+ CGF.Int8Ty, CharUnits::fromQuantity(16));
Address RegAddrHi =
CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
CharUnits::fromQuantity(16));
@@ -4357,12 +4358,12 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
}
}
- if (Ty->isExtIntType()) {
+ if (Ty->isBitIntType()) {
// MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
// not 1, 2, 4, or 8 bytes, must be passed by reference."
- // However, non-power-of-two _ExtInts will be passed as 1,2,4 or 8 bytes
- // anyway as long is it fits in them, so we don't have to check the power of
- // 2.
+ // However, non-power-of-two bit-precise integers will be passed as 1, 2, 4,
+ // or 8 bytes anyway as long is it fits in them, so we don't have to check
+ // the power of 2.
if (Width <= 64)
return ABIArgInfo::getDirect();
return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
@@ -5069,7 +5070,7 @@ PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
break;
}
- if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() < 64)
return true;
@@ -5083,13 +5084,16 @@ CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
if (const ComplexType *CTy = Ty->getAs<ComplexType>())
Ty = CTy->getElementType();
+ auto FloatUsesVector = [this](QualType Ty){
+ return Ty->isRealFloatingType() && &getContext().getFloatTypeSemantics(
+ Ty) == &llvm::APFloat::IEEEquad();
+ };
+
// Only vector types of size 16 bytes need alignment (larger types are
// passed via reference, smaller types are not aligned).
if (Ty->isVectorType()) {
return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
- } else if (Ty->isRealFloatingType() &&
- &getContext().getFloatTypeSemantics(Ty) ==
- &llvm::APFloat::IEEEquad()) {
+ } else if (FloatUsesVector(Ty)) {
// According to ABI document section 'Optional Save Areas': If extended
// precision floating-point values in IEEE BINARY 128 QUADRUPLE PRECISION
// format are supported, map them to a single quadword, quadword aligned.
@@ -5116,7 +5120,9 @@ CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
// With special case aggregates, only vector base types need alignment.
if (AlignAsType) {
- return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8);
+ bool UsesVector = AlignAsType->isVectorType() ||
+ FloatUsesVector(QualType(AlignAsType, 0));
+ return CharUnits::fromQuantity(UsesVector ? 16 : 8);
}
// Otherwise, we only need alignment for any aggregate type that
@@ -5289,7 +5295,7 @@ PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
}
}
- if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() > 128)
return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
@@ -5365,7 +5371,7 @@ PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
}
}
- if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() > 128)
return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
@@ -5717,7 +5723,7 @@ AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadic,
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() > 128)
return getNaturalAlignIndirect(Ty);
@@ -5819,7 +5825,7 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() > 128)
return getNaturalAlignIndirect(RetTy);
@@ -6561,7 +6567,7 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
Ty = EnumTy->getDecl()->getIntegerType();
}
- if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() > 64)
return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
@@ -6763,7 +6769,7 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() > 64)
return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
@@ -7100,7 +7106,7 @@ bool NVPTXABIInfo::isUnsupportedType(QualType T) const {
(T->isFloat128Type() ||
(T->isRealFloatingType() && Context.getTypeSize(T) == 128)))
return true;
- if (const auto *EIT = T->getAs<ExtIntType>())
+ if (const auto *EIT = T->getAs<BitIntType>())
return EIT->getNumBits() >
(Context.getTargetInfo().hasInt128Type() ? 128U : 64U);
if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() &&
@@ -7177,7 +7183,7 @@ ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
return getNaturalAlignIndirect(Ty, /* byval */ true);
}
- if (const auto *EIT = Ty->getAs<ExtIntType>()) {
+ if (const auto *EIT = Ty->getAs<BitIntType>()) {
if ((EIT->getNumBits() > 128) ||
(!getContext().getTargetInfo().hasInt128Type() &&
EIT->getNumBits() > 64))
@@ -7391,7 +7397,7 @@ bool SystemZABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
if (ABIInfo::isPromotableIntegerTypeForABI(Ty))
return true;
- if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() < 64)
return true;
@@ -7994,7 +8000,7 @@ MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
Ty = EnumTy->getDecl()->getIntegerType();
// Make sure we pass indirectly things that are too large.
- if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() > 128 ||
(EIT->getNumBits() > 64 &&
!getContext().getTargetInfo().hasInt128Type()))
@@ -8085,7 +8091,7 @@ ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
RetTy = EnumTy->getDecl()->getIntegerType();
// Make sure we pass indirectly things that are too large.
- if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() > 128 ||
(EIT->getNumBits() > 64 &&
!getContext().getTargetInfo().hasInt128Type()))
@@ -8460,7 +8466,7 @@ ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty,
if (Size <= 64)
HexagonAdjustRegsLeft(Size, RegsLeft);
- if (Size > 64 && Ty->isExtIntType())
+ if (Size > 64 && Ty->isBitIntType())
return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
return isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
@@ -8516,7 +8522,7 @@ ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- if (Size > 64 && RetTy->isExtIntType())
+ if (Size > 64 && RetTy->isBitIntType())
return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
@@ -8887,7 +8893,7 @@ ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
bool InReg = shouldUseInReg(Ty, State);
// Don't pass >64 bit integers in registers.
- if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() > 64)
return getIndirectResult(Ty, /*ByVal=*/true, State);
@@ -9161,6 +9167,10 @@ class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
public:
AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
: TargetCodeGenInfo(std::make_unique<AMDGPUABIInfo>(CGT)) {}
+
+ void setFunctionDeclAttributes(const FunctionDecl *FD, llvm::Function *F,
+ CodeGenModule &CGM) const;
+
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const override;
unsigned getOpenCLKernelCallingConv() const override;
@@ -9200,36 +9210,13 @@ static bool requiresAMDGPUProtectedVisibility(const Decl *D,
cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinTextureType()));
}
-void AMDGPUTargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
- if (requiresAMDGPUProtectedVisibility(D, GV)) {
- GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
- GV->setDSOLocal(true);
- }
-
- if (GV->isDeclaration())
- return;
- const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
- if (!FD)
- return;
-
- llvm::Function *F = cast<llvm::Function>(GV);
-
- const auto *ReqdWGS = M.getLangOpts().OpenCL ?
- FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr;
-
-
- const bool IsOpenCLKernel = M.getLangOpts().OpenCL &&
- FD->hasAttr<OpenCLKernelAttr>();
- const bool IsHIPKernel = M.getLangOpts().HIP &&
- FD->hasAttr<CUDAGlobalAttr>();
- if ((IsOpenCLKernel || IsHIPKernel) &&
- (M.getTriple().getOS() == llvm::Triple::AMDHSA))
- F->addFnAttr("amdgpu-implicitarg-num-bytes", "56");
-
- if (IsHIPKernel)
- F->addFnAttr("uniform-work-group-size", "true");
-
+void AMDGPUTargetCodeGenInfo::setFunctionDeclAttributes(
+ const FunctionDecl *FD, llvm::Function *F, CodeGenModule &M) const {
+ const auto *ReqdWGS =
+ M.getLangOpts().OpenCL ? FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr;
+ const bool IsOpenCLKernel =
+ M.getLangOpts().OpenCL && FD->hasAttr<OpenCLKernelAttr>();
+ const bool IsHIPKernel = M.getLangOpts().HIP && FD->hasAttr<CUDAGlobalAttr>();
const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>();
if (ReqdWGS || FlatWGS) {
@@ -9297,6 +9284,38 @@ void AMDGPUTargetCodeGenInfo::setTargetAttributes(
if (NumVGPR != 0)
F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR));
}
+}
+
+void AMDGPUTargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
+ if (requiresAMDGPUProtectedVisibility(D, GV)) {
+ GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
+ GV->setDSOLocal(true);
+ }
+
+ if (GV->isDeclaration())
+ return;
+
+ llvm::Function *F = dyn_cast<llvm::Function>(GV);
+ if (!F)
+ return;
+
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (FD)
+ setFunctionDeclAttributes(FD, F, M);
+
+ const bool IsOpenCLKernel =
+ M.getLangOpts().OpenCL && FD && FD->hasAttr<OpenCLKernelAttr>();
+ const bool IsHIPKernel =
+ M.getLangOpts().HIP && FD && FD->hasAttr<CUDAGlobalAttr>();
+
+ const bool IsOpenMP = M.getLangOpts().OpenMP && !FD;
+ if ((IsOpenCLKernel || IsHIPKernel || IsOpenMP) &&
+ (M.getTriple().getOS() == llvm::Triple::AMDHSA))
+ F->addFnAttr("amdgpu-implicitarg-num-bytes", "56");
+
+ if (IsHIPKernel)
+ F->addFnAttr("uniform-work-group-size", "true");
if (M.getContext().getTargetInfo().allowAMDGPUUnsafeFPAtomics())
F->addFnAttr("amdgpu-unsafe-fp-atomics", "true");
@@ -9343,7 +9362,9 @@ AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
if (AddrSpace != LangAS::Default)
return AddrSpace;
- if (CGM.isTypeConstant(D->getType(), false)) {
+ // Only promote to address space 4 if VarDecl has constant initialization.
+ if (CGM.isTypeConstant(D->getType(), false) &&
+ D->hasConstantInitialization()) {
if (auto ConstAS = CGM.getTarget().getConstantAddressSpace())
return ConstAS.getValue();
}
@@ -9606,7 +9627,7 @@ SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
if (Size < 64 && Ty->isIntegerType())
return ABIArgInfo::getExtend(Ty);
- if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() < 64)
return ABIArgInfo::getExtend(Ty);
@@ -9860,7 +9881,7 @@ ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty,
ABIArgInfo::getDirect(Result, 0, nullptr, false);
}
- if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() > 64)
return getIndirectByValue(Ty);
@@ -10209,12 +10230,23 @@ public:
private:
void setCCs();
};
+
+class SPIRVABIInfo : public CommonSPIRABIInfo {
+public:
+ SPIRVABIInfo(CodeGenTypes &CGT) : CommonSPIRABIInfo(CGT) {}
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+private:
+ ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
+};
} // end anonymous namespace
namespace {
class CommonSPIRTargetCodeGenInfo : public TargetCodeGenInfo {
public:
CommonSPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
: TargetCodeGenInfo(std::make_unique<CommonSPIRABIInfo>(CGT)) {}
+ CommonSPIRTargetCodeGenInfo(std::unique_ptr<ABIInfo> ABIInfo)
+ : TargetCodeGenInfo(std::move(ABIInfo)) {}
LangAS getASTAllocaAddressSpace() const override {
return getLangASFromTargetAS(
@@ -10223,18 +10255,60 @@ public:
unsigned getOpenCLKernelCallingConv() const override;
};
-
+class SPIRVTargetCodeGenInfo : public CommonSPIRTargetCodeGenInfo {
+public:
+ SPIRVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : CommonSPIRTargetCodeGenInfo(std::make_unique<SPIRVABIInfo>(CGT)) {}
+ void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
+};
} // End anonymous namespace.
+
void CommonSPIRABIInfo::setCCs() {
assert(getRuntimeCC() == llvm::CallingConv::C);
RuntimeCC = llvm::CallingConv::SPIR_FUNC;
}
+ABIArgInfo SPIRVABIInfo::classifyKernelArgumentType(QualType Ty) const {
+ if (getContext().getLangOpts().HIP) {
+ // Coerce pointer arguments with default address space to CrossWorkGroup
+ // pointers for HIPSPV. When the language mode is HIP, the SPIRTargetInfo
+ // maps cuda_device to SPIR-V's CrossWorkGroup address space.
+ llvm::Type *LTy = CGT.ConvertType(Ty);
+ auto DefaultAS = getContext().getTargetAddressSpace(LangAS::Default);
+ auto GlobalAS = getContext().getTargetAddressSpace(LangAS::cuda_device);
+ if (LTy->isPointerTy() && LTy->getPointerAddressSpace() == DefaultAS) {
+ LTy = llvm::PointerType::get(
+ cast<llvm::PointerType>(LTy)->getElementType(), GlobalAS);
+ return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
+ }
+ }
+ return classifyArgumentType(Ty);
+}
+
+void SPIRVABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ // The logic is same as in DefaultABIInfo with an exception on the kernel
+ // arguments handling.
+ llvm::CallingConv::ID CC = FI.getCallingConvention();
+
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+
+ for (auto &I : FI.arguments()) {
+ if (CC == llvm::CallingConv::SPIR_KERNEL) {
+ I.info = classifyKernelArgumentType(I.type);
+ } else {
+ I.info = classifyArgumentType(I.type);
+ }
+ }
+}
+
namespace clang {
namespace CodeGen {
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
- DefaultABIInfo SPIRABI(CGM.getTypes());
- SPIRABI.computeInfo(FI);
+ if (CGM.getTarget().getTriple().isSPIRV())
+ SPIRVABIInfo(CGM.getTypes()).computeInfo(FI);
+ else
+ CommonSPIRABIInfo(CGM.getTypes()).computeInfo(FI);
}
}
}
@@ -10243,6 +10317,16 @@ unsigned CommonSPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
return llvm::CallingConv::SPIR_KERNEL;
}
+void SPIRVTargetCodeGenInfo::setCUDAKernelCallingConvention(
+ const FunctionType *&FT) const {
+ // Convert HIP kernels to SPIR-V kernels.
+ if (getABIInfo().getContext().getLangOpts().HIP) {
+ FT = getABIInfo().getContext().adjustFunctionType(
+ FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel));
+ return;
+ }
+}
+
static bool appendType(SmallStringEnc &Enc, QualType QType,
const CodeGen::CodeGenModule &CGM,
TypeStringCache &TSC);
@@ -10943,7 +11027,7 @@ ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
return extendType(Ty);
}
- if (const auto *EIT = Ty->getAs<ExtIntType>()) {
+ if (const auto *EIT = Ty->getAs<BitIntType>()) {
if (EIT->getNumBits() < XLen && !MustUseStack)
return extendType(Ty);
if (EIT->getNumBits() > 128 ||
@@ -11308,9 +11392,10 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
return SetCGInfo(new ARCTargetCodeGenInfo(Types));
case llvm::Triple::spir:
case llvm::Triple::spir64:
+ return SetCGInfo(new CommonSPIRTargetCodeGenInfo(Types));
case llvm::Triple::spirv32:
case llvm::Triple::spirv64:
- return SetCGInfo(new CommonSPIRTargetCodeGenInfo(Types));
+ return SetCGInfo(new SPIRVTargetCodeGenInfo(Types));
case llvm::Triple::ve:
return SetCGInfo(new VETargetCodeGenInfo(Types));
}