summaryrefslogtreecommitdiff
path: root/lib/CodeGen/TargetInfo.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen/TargetInfo.cpp')
-rw-r--r--lib/CodeGen/TargetInfo.cpp838
1 files changed, 413 insertions, 425 deletions
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index c05b23a32e86..53154b513eb7 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -37,7 +37,8 @@ static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
unsigned LastIndex) {
// Alternatively, we could emit this as a loop in the source.
for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
- llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I);
+ llvm::Value *Cell =
+ Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
Builder.CreateStore(Value, Cell);
}
}
@@ -107,6 +108,10 @@ bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
return false;
}
+bool ABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
+ return false;
+}
+
void ABIArgInfo::dump() const {
raw_ostream &OS = llvm::errs();
OS << "(ABIArgInfo Kind=";
@@ -238,7 +243,7 @@ static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
/// \return The field declaration for the single non-empty field, if
/// it exists.
static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
- const RecordType *RT = T->getAsStructureType();
+ const RecordType *RT = T->getAs<RecordType>();
if (!RT)
return nullptr;
@@ -339,9 +344,15 @@ static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
//
// FIXME: This needs to be generalized to handle classes as well.
const RecordDecl *RD = RT->getDecl();
- if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
+ if (!RD->isStruct())
return false;
+ // We try to expand CLike CXXRecordDecl.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ if (!CXXRD->isCLike())
+ return false;
+ }
+
uint64_t Size = 0;
for (const auto *FD : RD->fields()) {
@@ -399,8 +410,16 @@ llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
}
ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
- if (isAggregateTypeForABI(Ty))
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ if (isAggregateTypeForABI(Ty)) {
+ // Records with non-trivial destructors/copy-constructors should not be
+ // passed by value.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
+
return ABIArgInfo::getIndirect(0);
+ }
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
@@ -1352,7 +1371,8 @@ bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
} else {
// 9 is %eflags, which doesn't get a size on Darwin for some
// reason.
- Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9));
+ Builder.CreateStore(
+ Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9));
// 11-16 are st(0..5). Not sure why we stop at 5.
// These have size 12, which is sizeof(long double) on
@@ -1475,14 +1495,13 @@ class X86_64ABIInfo : public ABIInfo {
return !getTarget().getTriple().isOSDarwin();
}
- bool HasAVX;
// Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
// 64-bit hardware.
bool Has64BitPointers;
public:
- X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) :
- ABIInfo(CGT), HasAVX(hasavx),
+ X86_64ABIInfo(CodeGen::CodeGenTypes &CGT) :
+ ABIInfo(CGT),
Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
}
@@ -1503,6 +1522,14 @@ public:
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const override;
+
+ bool has64BitPointers() const {
+ return Has64BitPointers;
+ }
+
+ bool hasAVX() const {
+ return getTarget().getABI() == "avx";
+ }
};
/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
@@ -1532,10 +1559,9 @@ public:
};
class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
- bool HasAVX;
public:
- X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
- : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)), HasAVX(HasAVX) {}
+ X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new X86_64ABIInfo(CGT)) {}
const X86_64ABIInfo &getABIInfo() const {
return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
@@ -1588,24 +1614,47 @@ public:
llvm::Constant *
getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
- unsigned Sig = (0xeb << 0) | // jmp rel8
- (0x0a << 8) | // .+0x0c
- ('F' << 16) |
- ('T' << 24);
+ unsigned Sig;
+ if (getABIInfo().has64BitPointers())
+ Sig = (0xeb << 0) | // jmp rel8
+ (0x0a << 8) | // .+0x0c
+ ('F' << 16) |
+ ('T' << 24);
+ else
+ Sig = (0xeb << 0) | // jmp rel8
+ (0x06 << 8) | // .+0x08
+ ('F' << 16) |
+ ('T' << 24);
return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
}
unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
- return HasAVX ? 32 : 16;
+ return getABIInfo().hasAVX() ? 32 : 16;
+ }
+};
+
+class PS4TargetCodeGenInfo : public X86_64TargetCodeGenInfo {
+public:
+ PS4TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : X86_64TargetCodeGenInfo(CGT) {}
+
+ void getDependentLibraryOption(llvm::StringRef Lib,
+ llvm::SmallString<24> &Opt) const override {
+ Opt = "\01";
+ Opt += Lib;
}
};
static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
- // If the argument does not end in .lib, automatically add the suffix. This
- // matches the behavior of MSVC.
- std::string ArgStr = Lib;
+ // If the argument does not end in .lib, automatically add the suffix.
+ // If the argument contains a space, enclose it in quotes.
+ // This matches the behavior of MSVC.
+ bool Quote = (Lib.find(" ") != StringRef::npos);
+ std::string ArgStr = Quote ? "\"" : "";
+ ArgStr += Lib;
if (!Lib.endswith_lower(".lib"))
ArgStr += ".lib";
+ ArgStr += Quote ? "\"" : "";
return ArgStr;
}
@@ -1615,6 +1664,9 @@ public:
bool d, bool p, bool w, unsigned RegParms)
: X86_32TargetCodeGenInfo(CGT, d, p, w, RegParms) {}
+ void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override;
+
void getDependentLibraryOption(llvm::StringRef Lib,
llvm::SmallString<24> &Opt) const override {
Opt = "/DEFAULTLIB:";
@@ -1628,11 +1680,35 @@ public:
}
};
+static void addStackProbeSizeTargetAttribute(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) {
+ if (isa<FunctionDecl>(D)) {
+ if (CGM.getCodeGenOpts().StackProbeSize != 4096) {
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+
+ Fn->addFnAttr("stack-probe-size", llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
+ }
+ }
+}
+
+void WinX86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const {
+ X86_32TargetCodeGenInfo::SetTargetAttributes(D, GV, CGM);
+
+ addStackProbeSizeTargetAttribute(D, GV, CGM);
+}
+
class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
- bool HasAVX;
+ bool hasAVX() const { return getABIInfo().getTarget().getABI() == "avx"; }
+
public:
- WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
- : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)), HasAVX(HasAVX) {}
+ WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
+
+ void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override;
int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
return 7;
@@ -1661,10 +1737,17 @@ public:
}
unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
- return HasAVX ? 32 : 16;
+ return hasAVX() ? 32 : 16;
}
};
+void WinX86_64TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const {
+ TargetCodeGenInfo::SetTargetAttributes(D, GV, CGM);
+
+ addStackProbeSizeTargetAttribute(D, GV, CGM);
+}
}
void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
@@ -1843,7 +1926,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
// split.
if (OffsetBase && OffsetBase != 64)
Hi = Lo;
- } else if (Size == 128 || (HasAVX && isNamedArg && Size == 256)) {
+ } else if (Size == 128 || (hasAVX() && isNamedArg && Size == 256)) {
// Arguments of 256-bits are split into four eightbyte chunks. The
// least significant one belongs to class SSE and all the others to class
// SSEUP. The original Lo and Hi design considers that types can't be
@@ -2065,7 +2148,7 @@ ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
uint64_t Size = getContext().getTypeSize(VecTy);
- unsigned LargestVector = HasAVX ? 256 : 128;
+ unsigned LargestVector = hasAVX() ? 256 : 128;
if (Size <= 64 || Size > LargestVector)
return true;
}
@@ -2142,20 +2225,9 @@ llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
Ty = QualType(InnerTy, 0);
llvm::Type *IRType = CGT.ConvertType(Ty);
-
- // If the preferred type is a 16-byte vector, prefer to pass it.
- if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
- llvm::Type *EltTy = VT->getElementType();
- unsigned BitWidth = VT->getBitWidth();
- if ((BitWidth >= 128 && BitWidth <= 256) &&
- (EltTy->isFloatTy() || EltTy->isDoubleTy() ||
- EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) ||
- EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) ||
- EltTy->isIntegerTy(128)))
- return VT;
- }
-
- return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2);
+ assert(isa<llvm::VectorType>(IRType) &&
+ "Trying to return a non-vector type in a vector register!");
+ return IRType;
}
/// BitsContainNoUserData - Return true if the specified [start,end) bit range
@@ -2717,8 +2789,8 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
QualType Ty,
CodeGenFunction &CGF) {
- llvm::Value *overflow_arg_area_p =
- CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
+ llvm::Value *overflow_arg_area_p = CGF.Builder.CreateStructGEP(
+ nullptr, VAListAddr, 2, "overflow_arg_area_p");
llvm::Value *overflow_arg_area =
CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
@@ -2798,14 +2870,16 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
llvm::Value *gp_offset_p = nullptr, *gp_offset = nullptr;
llvm::Value *fp_offset_p = nullptr, *fp_offset = nullptr;
if (neededInt) {
- gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
+ gp_offset_p =
+ CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 0, "gp_offset_p");
gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
}
if (neededSSE) {
- fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
+ fp_offset_p =
+ CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 1, "fp_offset_p");
fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
llvm::Value *FitsInFP =
llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
@@ -2833,9 +2907,8 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// simple assembling of a structure from scattered addresses has many more
// loads than necessary. Can we clean this up?
llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
- llvm::Value *RegAddr =
- CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
- "reg_save_area");
+ llvm::Value *RegAddr = CGF.Builder.CreateLoad(
+ CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 3), "reg_save_area");
if (neededInt && neededSSE) {
// FIXME: Cleanup.
assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
@@ -2855,9 +2928,9 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
llvm::Value *V =
CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 0));
V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 1));
RegAddr = CGF.Builder.CreateBitCast(Tmp,
llvm::PointerType::getUnqual(LTy));
@@ -2894,10 +2967,10 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
DblPtrTy));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 0));
V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
DblPtrTy));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 1));
RegAddr = CGF.Builder.CreateBitCast(Tmp,
llvm::PointerType::getUnqual(LTy));
}
@@ -2984,7 +3057,7 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
return ABIArgInfo::getDirect();
}
- if (RT || Ty->isMemberPointerType()) {
+ if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
// MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
// not 1, 2, 4, or 8 bytes, must be passed by reference."
if (Width > 64 || !llvm::isPowerOf2_64(Width))
@@ -3040,48 +3113,6 @@ llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
return AddrTyped;
}
-namespace {
-
-class NaClX86_64ABIInfo : public ABIInfo {
- public:
- NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
- : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {}
- void computeInfo(CGFunctionInfo &FI) const override;
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
- private:
- PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv.
- X86_64ABIInfo NInfo; // Used for everything else.
-};
-
-class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
- bool HasAVX;
- public:
- NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
- : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)), HasAVX(HasAVX) {
- }
- unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
- return HasAVX ? 32 : 16;
- }
-};
-
-}
-
-void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
- if (FI.getASTCallingConvention() == CC_PnaclCall)
- PInfo.computeInfo(FI);
- else
- NInfo.computeInfo(FI);
-}
-
-llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- // Always use the native convention; calling pnacl-style varargs functions
- // is unuspported.
- return NInfo.EmitVAArg(VAListAddr, Ty, CGF);
-}
-
-
// PowerPC-32
namespace {
/// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
@@ -3257,13 +3288,42 @@ public:
private:
static const unsigned GPRBits = 64;
ABIKind Kind;
+ bool HasQPX;
+
+ // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and
+ // will be passed in a QPX register.
+ bool IsQPXVectorTy(const Type *Ty) const {
+ if (!HasQPX)
+ return false;
+
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ unsigned NumElements = VT->getNumElements();
+ if (NumElements == 1)
+ return false;
+
+ if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
+ if (getContext().getTypeSize(Ty) <= 256)
+ return true;
+ } else if (VT->getElementType()->
+ isSpecificBuiltinType(BuiltinType::Float)) {
+ if (getContext().getTypeSize(Ty) <= 128)
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ bool IsQPXVectorTy(QualType Ty) const {
+ return IsQPXVectorTy(Ty.getTypePtr());
+ }
public:
- PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind)
- : DefaultABIInfo(CGT), Kind(Kind) {}
+ PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX)
+ : DefaultABIInfo(CGT), Kind(Kind), HasQPX(HasQPX) {}
bool isPromotableTypeForABI(QualType Ty) const;
- bool isAlignedParamType(QualType Ty) const;
+ bool isAlignedParamType(QualType Ty, bool &Align32) const;
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType Ty) const;
@@ -3288,7 +3348,8 @@ public:
const Type *T = isSingleElementStruct(I.type, getContext());
if (T) {
const BuiltinType *BT = T->getAs<BuiltinType>();
- if ((T->isVectorType() && getContext().getTypeSize(T) == 128) ||
+ if (IsQPXVectorTy(T) ||
+ (T->isVectorType() && getContext().getTypeSize(T) == 128) ||
(BT && BT->isFloatingPoint())) {
QualType QT(T, 0);
I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
@@ -3304,10 +3365,13 @@ public:
};
class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
+ bool HasQPX;
+
public:
PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
- PPC64_SVR4_ABIInfo::ABIKind Kind)
- : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind)) {}
+ PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX)
+ : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX)),
+ HasQPX(HasQPX) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
// This is recovered from gcc output.
@@ -3317,7 +3381,12 @@ public:
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const override;
- unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
+ unsigned getOpenMPSimdDefaultAlignment(QualType QT) const override {
+ if (HasQPX)
+ if (const PointerType *PT = QT->getAs<PointerType>())
+ if (PT->getPointeeType()->isSpecificBuiltinType(BuiltinType::Double))
+ return 32; // Natural alignment for QPX doubles.
+
return 16; // Natural alignment for Altivec and VSX vectors.
}
};
@@ -3370,15 +3439,23 @@ PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
/// isAlignedParamType - Determine whether a type requires 16-byte
/// alignment in the parameter area.
bool
-PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty) const {
+PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty, bool &Align32) const {
+ Align32 = false;
+
// Complex types are passed just like their elements.
if (const ComplexType *CTy = Ty->getAs<ComplexType>())
Ty = CTy->getElementType();
// Only vector types of size 16 bytes need alignment (larger types are
// passed via reference, smaller types are not aligned).
- if (Ty->isVectorType())
+ if (IsQPXVectorTy(Ty)) {
+ if (getContext().getTypeSize(Ty) > 128)
+ Align32 = true;
+
+ return true;
+ } else if (Ty->isVectorType()) {
return getContext().getTypeSize(Ty) == 128;
+ }
// For single-element float/vector structs, we consider the whole type
// to have the same alignment requirements as its single element.
@@ -3386,7 +3463,7 @@ PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty) const {
const Type *EltType = isSingleElementStruct(Ty, getContext());
if (EltType) {
const BuiltinType *BT = EltType->getAs<BuiltinType>();
- if ((EltType->isVectorType() &&
+ if (IsQPXVectorTy(EltType) || (EltType->isVectorType() &&
getContext().getTypeSize(EltType) == 128) ||
(BT && BT->isFloatingPoint()))
AlignAsType = EltType;
@@ -3400,13 +3477,22 @@ PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty) const {
AlignAsType = Base;
// With special case aggregates, only vector base types need alignment.
- if (AlignAsType)
+ if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
+ if (getContext().getTypeSize(AlignAsType) > 128)
+ Align32 = true;
+
+ return true;
+ } else if (AlignAsType) {
return AlignAsType->isVectorType();
+ }
// Otherwise, we only need alignment for any aggregate type that
// has an alignment requirement of >= 16 bytes.
- if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128)
+ if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
+ if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
+ Align32 = true;
return true;
+ }
return false;
}
@@ -3512,7 +3598,7 @@ bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
return true;
}
if (const VectorType *VT = Ty->getAs<VectorType>()) {
- if (getContext().getTypeSize(VT) == 128)
+ if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
return true;
}
return false;
@@ -3538,7 +3624,7 @@ PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
// Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
// or via reference (larger than 16 bytes).
- if (Ty->isVectorType()) {
+ if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) {
uint64_t Size = getContext().getTypeSize(Ty);
if (Size > 128)
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
@@ -3552,7 +3638,9 @@ PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
- uint64_t ABIAlign = isAlignedParamType(Ty)? 16 : 8;
+ bool Align32;
+ uint64_t ABIAlign = isAlignedParamType(Ty, Align32) ?
+ (Align32 ? 32 : 16) : 8;
uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
// ELFv2 homogeneous aggregates are passed as array types.
@@ -3609,7 +3697,7 @@ PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
// Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
// or via reference (larger than 16 bytes).
- if (RetTy->isVectorType()) {
+ if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) {
uint64_t Size = getContext().getTypeSize(RetTy);
if (Size > 128)
return ABIArgInfo::getIndirect(0);
@@ -3666,10 +3754,13 @@ llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
// Handle types that require 16-byte alignment in the parameter save area.
- if (isAlignedParamType(Ty)) {
+ bool Align32;
+ if (isAlignedParamType(Ty, Align32)) {
llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
- AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(15));
- AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt64(-16));
+ AddrAsInt = Builder.CreateAdd(AddrAsInt,
+ Builder.getInt64(Align32 ? 31 : 15));
+ AddrAsInt = Builder.CreateAnd(AddrAsInt,
+ Builder.getInt64(Align32 ? -32 : -16));
Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
}
@@ -3714,10 +3805,12 @@ llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy);
llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal");
llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag");
- llvm::Value *Ptr = CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty),
- "vacplx");
- llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, ".real");
- llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, ".imag");
+ llvm::AllocaInst *Ptr =
+ CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty), "vacplx");
+ llvm::Value *RealPtr =
+ Builder.CreateStructGEP(Ptr->getAllocatedType(), Ptr, 0, ".real");
+ llvm::Value *ImagPtr =
+ Builder.CreateStructGEP(Ptr->getAllocatedType(), Ptr, 1, ".imag");
Builder.CreateStore(Real, RealPtr, false);
Builder.CreateStore(Imag, ImagPtr, false);
return Ptr;
@@ -3837,8 +3930,8 @@ private:
llvm::Value *EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
- virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override {
+ llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const override {
return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
: EmitAAPCSVAArg(VAListAddr, Ty, CGF);
}
@@ -3849,13 +3942,15 @@ public:
AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
: TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {}
- StringRef getARCRetainAutoreleasedReturnValueMarker() const {
+ StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue";
}
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { return 31; }
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ return 31;
+ }
- virtual bool doesReturnSlotInterfereWithArgs() const { return false; }
+ bool doesReturnSlotInterfereWithArgs() const override { return false; }
};
}
@@ -3964,7 +4059,15 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
// Aggregates <= 16 bytes are returned directly in registers or on the stack.
uint64_t Size = getContext().getTypeSize(RetTy);
if (Size <= 128) {
+ unsigned Alignment = getContext().getTypeAlign(RetTy);
Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
+
+ // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
+ // For aggregates with 16-byte alignment, we use i128.
+ if (Alignment < 128 && Size == 128) {
+ llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
+ }
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
}
@@ -4047,13 +4150,15 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
int RegSize = IsIndirect ? 8 : getContext().getTypeSize(Ty) / 8;
if (!IsFPR) {
// 3 is the field number of __gr_offs
- reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
+ reg_offs_p =
+ CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 3, "gr_offs_p");
reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
reg_top_index = 1; // field number for __gr_top
RegSize = llvm::RoundUpToAlignment(RegSize, 8);
} else {
// 4 is the field number of __vr_offs.
- reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
+ reg_offs_p =
+ CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 4, "vr_offs_p");
reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
reg_top_index = 2; // field number for __vr_top
RegSize = 16 * NumRegs;
@@ -4114,8 +4219,8 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
CGF.EmitBlock(InRegBlock);
llvm::Value *reg_top_p = nullptr, *reg_top = nullptr;
- reg_top_p =
- CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
+ reg_top_p = CGF.Builder.CreateStructGEP(nullptr, VAListAddr, reg_top_index,
+ "reg_top_p");
reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs);
llvm::Value *RegAddr = nullptr;
@@ -4138,7 +4243,7 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
- llvm::Value *Tmp = CGF.CreateTempAlloca(HFATy);
+ llvm::AllocaInst *Tmp = CGF.CreateTempAlloca(HFATy);
int Offset = 0;
if (CGF.CGM.getDataLayout().isBigEndian() && Ctx.getTypeSize(Base) < 128)
@@ -4149,7 +4254,8 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset);
LoadAddr = CGF.Builder.CreateBitCast(
LoadAddr, llvm::PointerType::getUnqual(BaseTy));
- llvm::Value *StoreAddr = CGF.Builder.CreateStructGEP(Tmp, i);
+ llvm::Value *StoreAddr =
+ CGF.Builder.CreateStructGEP(Tmp->getAllocatedType(), Tmp, i);
llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
CGF.Builder.CreateStore(Elem, StoreAddr);
@@ -4182,7 +4288,7 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
CGF.EmitBlock(OnStackBlock);
llvm::Value *stack_p = nullptr, *OnStackAddr = nullptr;
- stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
+ stack_p = CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 0, "stack_p");
OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack");
// Again, stack arguments may need realigmnent. In this case both integer and
@@ -4324,17 +4430,10 @@ public:
private:
ABIKind Kind;
- mutable int VFPRegs[16];
- const unsigned NumVFPs;
- const unsigned NumGPRs;
- mutable unsigned AllocatedGPRs;
- mutable unsigned AllocatedVFPs;
public:
- ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind),
- NumVFPs(16), NumGPRs(4) {
+ ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {
setCCs();
- resetAllocatedRegs();
}
bool isEABI() const {
@@ -4364,8 +4463,7 @@ public:
private:
ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
- ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
- bool &IsCPRC) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic) const;
bool isIllegalVectorType(QualType Ty) const;
bool isHomogeneousAggregateBaseType(QualType Ty) const override;
@@ -4380,10 +4478,6 @@ private:
llvm::CallingConv::ID getLLVMDefaultCC() const;
llvm::CallingConv::ID getABIDefaultCC() const;
void setCCs();
-
- void markAllocatedGPRs(unsigned Alignment, unsigned NumRequired) const;
- void markAllocatedVFPs(unsigned Alignment, unsigned NumRequired) const;
- void resetAllocatedRegs(void) const;
};
class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -4456,55 +4550,43 @@ public:
}
};
+class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
+ void addStackProbeSizeTargetAttribute(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const;
+
+public:
+ WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
+ : ARMTargetCodeGenInfo(CGT, K) {}
+
+ void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override;
+};
+
+void WindowsARMTargetCodeGenInfo::addStackProbeSizeTargetAttribute(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
+ if (!isa<FunctionDecl>(D))
+ return;
+ if (CGM.getCodeGenOpts().StackProbeSize == 4096)
+ return;
+
+ llvm::Function *F = cast<llvm::Function>(GV);
+ F->addFnAttr("stack-probe-size",
+ llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
+}
+
+void WindowsARMTargetCodeGenInfo::SetTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
+ ARMTargetCodeGenInfo::SetTargetAttributes(D, GV, CGM);
+ addStackProbeSizeTargetAttribute(D, GV, CGM);
+}
}
void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
- // To correctly handle Homogeneous Aggregate, we need to keep track of the
- // VFP registers allocated so far.
- // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
- // VFP registers of the appropriate type unallocated then the argument is
- // allocated to the lowest-numbered sequence of such registers.
- // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
- // unallocated are marked as unavailable.
- resetAllocatedRegs();
-
- if (getCXXABI().classifyReturnType(FI)) {
- if (FI.getReturnInfo().isIndirect())
- markAllocatedGPRs(1, 1);
- } else {
+ if (!getCXXABI().classifyReturnType(FI))
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic());
- }
- for (auto &I : FI.arguments()) {
- unsigned PreAllocationVFPs = AllocatedVFPs;
- unsigned PreAllocationGPRs = AllocatedGPRs;
- bool IsCPRC = false;
- // 6.1.2.3 There is one VFP co-processor register class using registers
- // s0-s15 (d0-d7) for passing arguments.
- I.info = classifyArgumentType(I.type, FI.isVariadic(), IsCPRC);
-
- // If we have allocated some arguments onto the stack (due to running
- // out of VFP registers), we cannot split an argument between GPRs and
- // the stack. If this situation occurs, we add padding to prevent the
- // GPRs from being used. In this situation, the current argument could
- // only be allocated by rule C.8, so rule C.6 would mark these GPRs as
- // unusable anyway.
- // We do not have to do this if the argument is being passed ByVal, as the
- // backend can handle that situation correctly.
- const bool StackUsed = PreAllocationGPRs > NumGPRs || PreAllocationVFPs > NumVFPs;
- const bool IsByVal = I.info.isIndirect() && I.info.getIndirectByVal();
- if (!IsCPRC && PreAllocationGPRs < NumGPRs && AllocatedGPRs > NumGPRs &&
- StackUsed && !IsByVal) {
- llvm::Type *PaddingTy = llvm::ArrayType::get(
- llvm::Type::getInt32Ty(getVMContext()), NumGPRs - PreAllocationGPRs);
- if (I.info.canHaveCoerceToType()) {
- I.info = ABIArgInfo::getDirect(I.info.getCoerceToType() /* type */,
- 0 /* offset */, PaddingTy, true);
- } else {
- I.info = ABIArgInfo::getDirect(nullptr /* type */, 0 /* offset */,
- PaddingTy, true);
- }
- }
- }
+
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type, FI.isVariadic());
// Always honor user-specified calling convention.
if (FI.getCallingConvention() != llvm::CallingConv::C)
@@ -4512,7 +4594,7 @@ void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
llvm::CallingConv::ID cc = getRuntimeCC();
if (cc != llvm::CallingConv::C)
- FI.setEffectiveCallingConvention(cc);
+ FI.setEffectiveCallingConvention(cc);
}
/// Return the default calling convention that LLVM will use.
@@ -4550,64 +4632,8 @@ void ARMABIInfo::setCCs() {
llvm::CallingConv::ARM_APCS : llvm::CallingConv::ARM_AAPCS);
}
-/// markAllocatedVFPs - update VFPRegs according to the alignment and
-/// number of VFP registers (unit is S register) requested.
-void ARMABIInfo::markAllocatedVFPs(unsigned Alignment,
- unsigned NumRequired) const {
- // Early Exit.
- if (AllocatedVFPs >= 16) {
- // We use AllocatedVFP > 16 to signal that some CPRCs were allocated on
- // the stack.
- AllocatedVFPs = 17;
- return;
- }
- // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
- // VFP registers of the appropriate type unallocated then the argument is
- // allocated to the lowest-numbered sequence of such registers.
- for (unsigned I = 0; I < 16; I += Alignment) {
- bool FoundSlot = true;
- for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
- if (J >= 16 || VFPRegs[J]) {
- FoundSlot = false;
- break;
- }
- if (FoundSlot) {
- for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
- VFPRegs[J] = 1;
- AllocatedVFPs += NumRequired;
- return;
- }
- }
- // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
- // unallocated are marked as unavailable.
- for (unsigned I = 0; I < 16; I++)
- VFPRegs[I] = 1;
- AllocatedVFPs = 17; // We do not have enough VFP registers.
-}
-
-/// Update AllocatedGPRs to record the number of general purpose registers
-/// which have been allocated. It is valid for AllocatedGPRs to go above 4,
-/// this represents arguments being stored on the stack.
-void ARMABIInfo::markAllocatedGPRs(unsigned Alignment,
- unsigned NumRequired) const {
- assert((Alignment == 1 || Alignment == 2) && "Alignment must be 4 or 8 bytes");
-
- if (Alignment == 2 && AllocatedGPRs & 0x1)
- AllocatedGPRs += 1;
-
- AllocatedGPRs += NumRequired;
-}
-
-void ARMABIInfo::resetAllocatedRegs(void) const {
- AllocatedGPRs = 0;
- AllocatedVFPs = 0;
- for (unsigned i = 0; i < NumVFPs; ++i)
- VFPRegs[i] = 0;
-}
-
-ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
- bool &IsCPRC) const {
- // We update number of allocated VFPs according to
+ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
+ bool isVariadic) const {
// 6.1.2.1 The following argument types are VFP CPRCs:
// A single-precision floating-point type (including promoted
// half-precision types); A double-precision floating-point type;
@@ -4625,58 +4651,20 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
if (Size <= 32) {
llvm::Type *ResType =
llvm::Type::getInt32Ty(getVMContext());
- markAllocatedGPRs(1, 1);
return ABIArgInfo::getDirect(ResType);
}
if (Size == 64) {
llvm::Type *ResType = llvm::VectorType::get(
llvm::Type::getInt32Ty(getVMContext()), 2);
- if (getABIKind() == ARMABIInfo::AAPCS || isVariadic){
- markAllocatedGPRs(2, 2);
- } else {
- markAllocatedVFPs(2, 2);
- IsCPRC = true;
- }
return ABIArgInfo::getDirect(ResType);
}
if (Size == 128) {
llvm::Type *ResType = llvm::VectorType::get(
llvm::Type::getInt32Ty(getVMContext()), 4);
- if (getABIKind() == ARMABIInfo::AAPCS || isVariadic) {
- markAllocatedGPRs(2, 4);
- } else {
- markAllocatedVFPs(4, 4);
- IsCPRC = true;
- }
return ABIArgInfo::getDirect(ResType);
}
- markAllocatedGPRs(1, 1);
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
}
- // Update VFPRegs for legal vector types.
- if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
- if (const VectorType *VT = Ty->getAs<VectorType>()) {
- uint64_t Size = getContext().getTypeSize(VT);
- // Size of a legal vector should be power of 2 and above 64.
- markAllocatedVFPs(Size >= 128 ? 4 : 2, Size / 32);
- IsCPRC = true;
- }
- }
- // Update VFPRegs for floating point types.
- if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
- if (BT->getKind() == BuiltinType::Half ||
- BT->getKind() == BuiltinType::Float) {
- markAllocatedVFPs(1, 1);
- IsCPRC = true;
- }
- if (BT->getKind() == BuiltinType::Double ||
- BT->getKind() == BuiltinType::LongDouble) {
- markAllocatedVFPs(2, 2);
- IsCPRC = true;
- }
- }
- }
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
@@ -4684,15 +4672,11 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
Ty = EnumTy->getDecl()->getIntegerType();
}
- unsigned Size = getContext().getTypeSize(Ty);
- if (!IsCPRC)
- markAllocatedGPRs(Size > 32 ? 2 : 1, (Size + 31) / 32);
return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend()
: ABIArgInfo::getDirect());
}
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
- markAllocatedGPRs(1, 1);
return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
}
@@ -4708,19 +4692,6 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
if (isHomogeneousAggregate(Ty, Base, Members)) {
assert(Base && "Base class should be set for homogeneous aggregate");
// Base can be a floating-point or a vector.
- if (Base->isVectorType()) {
- // ElementSize is in number of floats.
- unsigned ElementSize = getContext().getTypeSize(Base) == 64 ? 2 : 4;
- markAllocatedVFPs(ElementSize,
- Members * ElementSize);
- } else if (Base->isSpecificBuiltinType(BuiltinType::Float))
- markAllocatedVFPs(1, Members);
- else {
- assert(Base->isSpecificBuiltinType(BuiltinType::Double) ||
- Base->isSpecificBuiltinType(BuiltinType::LongDouble));
- markAllocatedVFPs(2, Members * 2);
- }
- IsCPRC = true;
return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
}
}
@@ -4732,15 +4703,11 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
uint64_t ABIAlign = 4;
uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
- getABIKind() == ARMABIInfo::AAPCS)
+ getABIKind() == ARMABIInfo::AAPCS)
ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
+
if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
- // Update Allocated GPRs. Since this is only used when the size of the
- // argument is greater than 64 bytes, this will always use up any available
- // registers (of which there are 4). We also don't care about getting the
- // alignment right, because general-purpose registers cannot be back-filled.
- markAllocatedGPRs(1, 4);
- return ABIArgInfo::getIndirect(TyAlign, /*ByVal=*/true,
+ return ABIArgInfo::getIndirect(ABIAlign, /*ByVal=*/true,
/*Realign=*/TyAlign > ABIAlign);
}
@@ -4752,11 +4719,9 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
if (getContext().getTypeAlign(Ty) <= 32) {
ElemTy = llvm::Type::getInt32Ty(getVMContext());
SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
- markAllocatedGPRs(1, SizeRegs);
} else {
ElemTy = llvm::Type::getInt64Ty(getVMContext());
SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
- markAllocatedGPRs(2, SizeRegs * 2);
}
return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
@@ -4856,7 +4821,6 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
// Large vector types should be returned via memory.
if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) {
- markAllocatedGPRs(1, 1);
return ABIArgInfo::getIndirect(0);
}
@@ -4894,7 +4858,6 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
}
// Otherwise return in memory.
- markAllocatedGPRs(1, 1);
return ABIArgInfo::getIndirect(0);
}
@@ -4930,7 +4893,6 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
}
- markAllocatedGPRs(1, 1);
return ABIArgInfo::getIndirect(0);
}
@@ -5046,42 +5008,6 @@ llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
return AddrTyped;
}
-namespace {
-
-class NaClARMABIInfo : public ABIInfo {
- public:
- NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
- : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {}
- void computeInfo(CGFunctionInfo &FI) const override;
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
- private:
- PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv.
- ARMABIInfo NInfo; // Used for everything else.
-};
-
-class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo {
- public:
- NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
- : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {}
-};
-
-}
-
-void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
- if (FI.getASTCallingConvention() == CC_PnaclCall)
- PInfo.computeInfo(FI);
- else
- static_cast<const ABIInfo&>(NInfo).computeInfo(FI);
-}
-
-llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- // Always use the native convention; calling pnacl-style varargs functions
- // is unsupported.
- return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF);
-}
-
//===----------------------------------------------------------------------===//
// NVPTX ABI Implementation
//===----------------------------------------------------------------------===//
@@ -5190,18 +5116,22 @@ SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
// Create !{<func-ref>, metadata !"kernel", i32 1} node
addNVVMMetadata(F, "kernel", 1);
}
- if (FD->hasAttr<CUDALaunchBoundsAttr>()) {
+ if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) {
// Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
- addNVVMMetadata(F, "maxntidx",
- FD->getAttr<CUDALaunchBoundsAttr>()->getMaxThreads());
- // min blocks is a default argument for CUDALaunchBoundsAttr, so getting a
- // zero value from getMinBlocks either means it was not specified in
- // __launch_bounds__ or the user specified a 0 value. In both cases, we
- // don't have to add a PTX directive.
- int MinCTASM = FD->getAttr<CUDALaunchBoundsAttr>()->getMinBlocks();
- if (MinCTASM > 0) {
- // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
- addNVVMMetadata(F, "minctasm", MinCTASM);
+ llvm::APSInt MaxThreads(32);
+ MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext());
+ if (MaxThreads > 0)
+ addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue());
+
+ // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was
+ // not specified in __launch_bounds__ or if the user specified a 0 value,
+ // we don't have to add a PTX directive.
+ if (Attr->getMinBlocks()) {
+ llvm::APSInt MinBlocks(32);
+ MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext());
+ if (MinBlocks > 0)
+ // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
+ addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue());
}
}
}
@@ -5231,12 +5161,17 @@ void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
namespace {
class SystemZABIInfo : public ABIInfo {
+ bool HasVector;
+
public:
- SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+ SystemZABIInfo(CodeGenTypes &CGT, bool HV)
+ : ABIInfo(CGT), HasVector(HV) {}
bool isPromotableIntegerType(QualType Ty) const;
bool isCompoundType(QualType Ty) const;
+ bool isVectorArgumentType(QualType Ty) const;
bool isFPArgumentType(QualType Ty) const;
+ QualType GetSingleElementType(QualType Ty) const;
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType ArgTy) const;
@@ -5254,8 +5189,8 @@ public:
class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
public:
- SystemZTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {}
+ SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector)
+ : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {}
};
}
@@ -5282,7 +5217,15 @@ bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
}
bool SystemZABIInfo::isCompoundType(QualType Ty) const {
- return Ty->isAnyComplexType() || isAggregateTypeForABI(Ty);
+ return (Ty->isAnyComplexType() ||
+ Ty->isVectorType() ||
+ isAggregateTypeForABI(Ty));
+}
+
+bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
+ return (HasVector &&
+ Ty->isVectorType() &&
+ getContext().getTypeSize(Ty) <= 128);
}
bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
@@ -5295,9 +5238,13 @@ bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
return false;
}
+ return false;
+}
+
+QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
if (const RecordType *RT = Ty->getAsStructureType()) {
const RecordDecl *RD = RT->getDecl();
- bool Found = false;
+ QualType Found;
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
@@ -5308,36 +5255,34 @@ bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
if (isEmptyRecord(getContext(), Base, true))
continue;
- if (Found)
- return false;
- Found = isFPArgumentType(Base);
- if (!Found)
- return false;
+ if (!Found.isNull())
+ return Ty;
+ Found = GetSingleElementType(Base);
}
// Check the fields.
for (const auto *FD : RD->fields()) {
- // Empty bitfields don't affect things either way.
+ // For compatibility with GCC, ignore empty bitfields in C++ mode.
// Unlike isSingleElementStruct(), empty structure and array fields
// do count. So do anonymous bitfields that aren't zero-sized.
- if (FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
- return true;
+ if (getContext().getLangOpts().CPlusPlus &&
+ FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
+ continue;
// Unlike isSingleElementStruct(), arrays do not count.
- // Nested isFPArgumentType structures still do though.
- if (Found)
- return false;
- Found = isFPArgumentType(FD->getType());
- if (!Found)
- return false;
+ // Nested structures still do though.
+ if (!Found.isNull())
+ return Ty;
+ Found = GetSingleElementType(FD->getType());
}
// Unlike isSingleElementStruct(), trailing padding is allowed.
// An 8-byte aligned struct s { float f; } is passed as a double.
- return Found;
+ if (!Found.isNull())
+ return Found;
}
- return false;
+ return Ty;
}
llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
@@ -5350,26 +5295,56 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// i8 *__reg_save_area;
// };
- // Every argument occupies 8 bytes and is passed by preference in either
- // GPRs or FPRs.
+ // Every non-vector argument occupies 8 bytes and is passed by preference
+ // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
+ // always passed on the stack.
Ty = CGF.getContext().getCanonicalType(Ty);
+ llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Type *APTy = llvm::PointerType::getUnqual(ArgTy);
ABIArgInfo AI = classifyArgumentType(Ty);
- bool InFPRs = isFPArgumentType(Ty);
-
- llvm::Type *APTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
bool IsIndirect = AI.isIndirect();
+ bool InFPRs = false;
+ bool IsVector = false;
unsigned UnpaddedBitSize;
if (IsIndirect) {
APTy = llvm::PointerType::getUnqual(APTy);
UnpaddedBitSize = 64;
- } else
+ } else {
+ if (AI.getCoerceToType())
+ ArgTy = AI.getCoerceToType();
+ InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
+ IsVector = ArgTy->isVectorTy();
UnpaddedBitSize = getContext().getTypeSize(Ty);
- unsigned PaddedBitSize = 64;
+ }
+ unsigned PaddedBitSize = (IsVector && UnpaddedBitSize > 64) ? 128 : 64;
assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size.");
unsigned PaddedSize = PaddedBitSize / 8;
unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8;
+ llvm::Type *IndexTy = CGF.Int64Ty;
+ llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize);
+
+ if (IsVector) {
+ // Work out the address of a vector argument on the stack.
+ // Vector arguments are always passed in the high bits of a
+ // single (8 byte) or double (16 byte) stack slot.
+ llvm::Value *OverflowArgAreaPtr =
+ CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 2,
+ "overflow_arg_area_ptr");
+ llvm::Value *OverflowArgArea =
+ CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area");
+ llvm::Value *MemAddr =
+ CGF.Builder.CreateBitCast(OverflowArgArea, APTy, "mem_addr");
+
+ // Update overflow_arg_area_ptr pointer
+ llvm::Value *NewOverflowArgArea =
+ CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area");
+ CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
+
+ return MemAddr;
+ }
+
unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding;
if (InFPRs) {
MaxRegs = 4; // Maximum of 4 FPR arguments
@@ -5383,10 +5358,9 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
RegPadding = Padding; // values are passed in the low bits of a GPR
}
- llvm::Value *RegCountPtr =
- CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
+ llvm::Value *RegCountPtr = CGF.Builder.CreateStructGEP(
+ nullptr, VAListAddr, RegCountField, "reg_count_ptr");
llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
- llvm::Type *IndexTy = RegCount->getType();
llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
"fits_in_regs");
@@ -5400,7 +5374,6 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CGF.EmitBlock(InRegBlock);
// Work out the address of an argument register.
- llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize);
llvm::Value *ScaledRegCount =
CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
llvm::Value *RegBase =
@@ -5408,7 +5381,7 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
llvm::Value *RegOffset =
CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
llvm::Value *RegSaveAreaPtr =
- CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
+ CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 3, "reg_save_area_ptr");
llvm::Value *RegSaveArea =
CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
llvm::Value *RawRegAddr =
@@ -5427,8 +5400,8 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CGF.EmitBlock(InMemBlock);
// Work out the address of a stack argument.
- llvm::Value *OverflowArgAreaPtr =
- CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
+ llvm::Value *OverflowArgAreaPtr = CGF.Builder.CreateStructGEP(
+ nullptr, VAListAddr, 2, "overflow_arg_area_ptr");
llvm::Value *OverflowArgArea =
CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area");
llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding);
@@ -5458,6 +5431,8 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
+ if (isVectorArgumentType(RetTy))
+ return ABIArgInfo::getDirect();
if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
return ABIArgInfo::getIndirect(0);
return (isPromotableIntegerType(RetTy) ?
@@ -5473,8 +5448,16 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
if (isPromotableIntegerType(Ty))
return ABIArgInfo::getExtend();
- // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
+ // Handle vector types and vector-like structure types. Note that
+ // as opposed to float-like structure types, we do not allow any
+ // padding for vector-like structures, so verify the sizes match.
uint64_t Size = getContext().getTypeSize(Ty);
+ QualType SingleElementTy = GetSingleElementType(Ty);
+ if (isVectorArgumentType(SingleElementTy) &&
+ getContext().getTypeSize(SingleElementTy) == Size)
+ return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
+
+ // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
@@ -5488,7 +5471,7 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
// The structure is passed as an unextended integer, a float, or a double.
llvm::Type *PassTy;
- if (isFPArgumentType(Ty)) {
+ if (isFPArgumentType(SingleElementTy)) {
assert(Size == 32 || Size == 64);
if (Size == 32)
PassTy = llvm::Type::getFloatTy(getVMContext());
@@ -5568,6 +5551,7 @@ public:
void computeInfo(CGFunctionInfo &FI) const override;
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const override;
+ bool shouldSignExtUnsignedType(QualType Ty) const override;
};
class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -5870,6 +5854,16 @@ llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
return AddrTyped;
}
+bool MipsABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
+ int TySize = getContext().getTypeSize(Ty);
+
+ // MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
+ if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
+ return true;
+
+ return false;
+}
+
bool
MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
@@ -6264,12 +6258,7 @@ private:
// Check if Ty is a usable substitute for the coercion type.
bool isUsableType(llvm::StructType *Ty) const {
- if (Ty->getNumElements() != Elems.size())
- return false;
- for (unsigned i = 0, e = Elems.size(); i != e; ++i)
- if (Elems[i] != Ty->getElementType(i))
- return false;
- return true;
+ return llvm::makeArrayRef(Elems) == Ty->elements();
}
// Get the coercion type as a literal struct type.
@@ -7102,6 +7091,12 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
{
+ if (Triple.getOS() == llvm::Triple::Win32) {
+ TheTargetCodeGenInfo =
+ new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP);
+ return *TheTargetCodeGenInfo;
+ }
+
ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
if (getTarget().getABI() == "apcs-gnu")
Kind = ARMABIInfo::APCS;
@@ -7110,14 +7105,7 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
Triple.getEnvironment() == llvm::Triple::GNUEABIHF))
Kind = ARMABIInfo::AAPCS_VFP;
- switch (Triple.getOS()) {
- case llvm::Triple::NaCl:
- return *(TheTargetCodeGenInfo =
- new NaClARMTargetCodeGenInfo(Types, Kind));
- default:
- return *(TheTargetCodeGenInfo =
- new ARMTargetCodeGenInfo(Types, Kind));
- }
+ return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind));
}
case llvm::Triple::ppc:
@@ -7127,19 +7115,21 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
if (getTarget().getABI() == "elfv2")
Kind = PPC64_SVR4_ABIInfo::ELFv2;
+ bool HasQPX = getTarget().getABI() == "elfv1-qpx";
return *(TheTargetCodeGenInfo =
- new PPC64_SVR4_TargetCodeGenInfo(Types, Kind));
+ new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX));
} else
return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
case llvm::Triple::ppc64le: {
assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
- if (getTarget().getABI() == "elfv1")
+ if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx")
Kind = PPC64_SVR4_ABIInfo::ELFv1;
+ bool HasQPX = getTarget().getABI() == "elfv1-qpx";
return *(TheTargetCodeGenInfo =
- new PPC64_SVR4_TargetCodeGenInfo(Types, Kind));
+ new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX));
}
case llvm::Triple::nvptx:
@@ -7149,8 +7139,11 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::msp430:
return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
- case llvm::Triple::systemz:
- return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types));
+ case llvm::Triple::systemz: {
+ bool HasVector = getTarget().getABI() == "vector";
+ return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types,
+ HasVector));
+ }
case llvm::Triple::tce:
return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types));
@@ -7177,18 +7170,13 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
}
case llvm::Triple::x86_64: {
- bool HasAVX = getTarget().getABI() == "avx";
-
switch (Triple.getOS()) {
case llvm::Triple::Win32:
- return *(TheTargetCodeGenInfo =
- new WinX86_64TargetCodeGenInfo(Types, HasAVX));
- case llvm::Triple::NaCl:
- return *(TheTargetCodeGenInfo =
- new NaClX86_64TargetCodeGenInfo(Types, HasAVX));
+ return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types));
+ case llvm::Triple::PS4:
+ return *(TheTargetCodeGenInfo = new PS4TargetCodeGenInfo(Types));
default:
- return *(TheTargetCodeGenInfo =
- new X86_64TargetCodeGenInfo(Types, HasAVX));
+ return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types));
}
}
case llvm::Triple::hexagon: