diff options
| author | Dimitry Andric <dim@FreeBSD.org> | 2012-12-02 13:20:44 +0000 | 
|---|---|---|
| committer | Dimitry Andric <dim@FreeBSD.org> | 2012-12-02 13:20:44 +0000 | 
| commit | 13cc256e404620c1de0cbcc4e43ce1e2dbbc4898 (patch) | |
| tree | 2732d02d7d51218d6eed98ac7fcfc5b8794896b5 /lib/CodeGen/TargetInfo.cpp | |
| parent | 657bc3d9848e3be92029b2416031340988cd0111 (diff) | |
Diffstat (limited to 'lib/CodeGen/TargetInfo.cpp')
| -rw-r--r-- | lib/CodeGen/TargetInfo.cpp | 752 | 
1 files changed, 663 insertions, 89 deletions
| diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp index 9c23ed9871da..ffff0d0a1bc4 100644 --- a/lib/CodeGen/TargetInfo.cpp +++ b/lib/CodeGen/TargetInfo.cpp @@ -18,7 +18,7 @@  #include "clang/AST/RecordLayout.h"  #include "clang/Frontend/CodeGenOptions.h"  #include "llvm/Type.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h"  #include "llvm/ADT/Triple.h"  #include "llvm/Support/raw_ostream.h"  using namespace clang; @@ -51,8 +51,8 @@ llvm::LLVMContext &ABIInfo::getVMContext() const {    return CGT.getLLVMContext();  } -const llvm::TargetData &ABIInfo::getTargetData() const { -  return CGT.getTargetData(); +const llvm::DataLayout &ABIInfo::getDataLayout() const { +  return CGT.getDataLayout();  } @@ -389,6 +389,90 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());  } +//===----------------------------------------------------------------------===// +// le32/PNaCl bitcode ABI Implementation +//===----------------------------------------------------------------------===// + +class PNaClABIInfo : public ABIInfo { + public: +  PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} + +  ABIArgInfo classifyReturnType(QualType RetTy) const; +  ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs) const; + +  virtual void computeInfo(CGFunctionInfo &FI) const; +  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, +                                 CodeGenFunction &CGF) const; +}; + +class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { + public: +  PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) +    : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {} +}; + +void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { +    FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); + +    unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() : 0; + +    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); +         it != ie; ++it) +      it->info = classifyArgumentType(it->type, FreeRegs); +  } + +llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, +                                       CodeGenFunction &CGF) const { +  return 0; +} + +ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty, +                                              unsigned &FreeRegs) const { +  if (isAggregateTypeForABI(Ty)) { +    // Records with non trivial destructors/constructors should not be passed +    // by value. +    FreeRegs = 0; +    if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) +      return ABIArgInfo::getIndirect(0, /*ByVal=*/false); + +    return ABIArgInfo::getIndirect(0); +  } + +  // Treat an enum type as its underlying type. +  if (const EnumType *EnumTy = Ty->getAs<EnumType>()) +    Ty = EnumTy->getDecl()->getIntegerType(); + +  ABIArgInfo BaseInfo = (Ty->isPromotableIntegerType() ? +          ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); + +  // Regparm regs hold 32 bits. +  unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; +  if (SizeInRegs == 0) return BaseInfo; +  if (SizeInRegs > FreeRegs) { +    FreeRegs = 0; +    return BaseInfo; +  } +  FreeRegs -= SizeInRegs; +  return BaseInfo.isDirect() ? +      ABIArgInfo::getDirectInReg(BaseInfo.getCoerceToType()) : +      ABIArgInfo::getExtendInReg(BaseInfo.getCoerceToType()); +} + +ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { +  if (RetTy->isVoidType()) +    return ABIArgInfo::getIgnore(); + +  if (isAggregateTypeForABI(RetTy)) +    return ABIArgInfo::getIndirect(0); + +  // Treat an enum type as its underlying type. +  if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) +    RetTy = EnumTy->getDecl()->getIntegerType(); + +  return (RetTy->isPromotableIntegerType() ? +          ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); +} +  /// UseX86_MMXType - Return true if this is an MMX type that should use the  /// special x86_mmx type.  bool UseX86_MMXType(llvm::Type *IRType) { @@ -435,7 +519,8 @@ class X86_32ABIInfo : public ABIInfo {    /// getIndirectResult - Give a source type \arg Ty, return a suitable result    /// such that the argument will be passed in memory. -  ABIArgInfo getIndirectResult(QualType Ty, bool ByVal = true) const; +  ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, +                               unsigned &FreeRegs) const;    /// \brief Return the alignment to use for the given type on the stack.    unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; @@ -443,9 +528,10 @@ class X86_32ABIInfo : public ABIInfo {    Class classify(QualType Ty) const;    ABIArgInfo classifyReturnType(QualType RetTy,                                  unsigned callingConvention) const; -  ABIArgInfo classifyArgumentTypeWithReg(QualType RetTy, -                                         unsigned &FreeRegs) const; -  ABIArgInfo classifyArgumentType(QualType RetTy) const; +  ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs, +                                  bool IsFastCall) const; +  bool shouldUseInReg(QualType Ty, unsigned &FreeRegs, +                      bool IsFastCall, bool &NeedsPadding) const;  public: @@ -682,9 +768,15 @@ unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,    return MinABIStackAlignInBytes;  } -ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const { -  if (!ByVal) +ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, +                                            unsigned &FreeRegs) const { +  if (!ByVal) { +    if (FreeRegs) { +      --FreeRegs; // Non byval indirects just use one pointer. +      return ABIArgInfo::getIndirectInReg(0, false); +    }      return ABIArgInfo::getIndirect(0, false); +  }    // Compute the byval alignment.    unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; @@ -714,45 +806,51 @@ X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {    return Integer;  } -ABIArgInfo -X86_32ABIInfo::classifyArgumentTypeWithReg(QualType Ty, -                                           unsigned &FreeRegs) const { -  // Common case first. -  if (FreeRegs == 0) -    return classifyArgumentType(Ty); - +bool X86_32ABIInfo::shouldUseInReg(QualType Ty, unsigned &FreeRegs, +                                   bool IsFastCall, bool &NeedsPadding) const { +  NeedsPadding = false;    Class C = classify(Ty);    if (C == Float) -    return classifyArgumentType(Ty); +    return false; + +  unsigned Size = getContext().getTypeSize(Ty); +  unsigned SizeInRegs = (Size + 31) / 32; -  unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;    if (SizeInRegs == 0) -    return classifyArgumentType(Ty); +    return false;    if (SizeInRegs > FreeRegs) {      FreeRegs = 0; -    return classifyArgumentType(Ty); +    return false;    } -  assert(SizeInRegs >= 1 && SizeInRegs <= 3); +    FreeRegs -= SizeInRegs; -  // If it is a simple scalar, keep the type so that we produce a cleaner IR. -  ABIArgInfo Foo = classifyArgumentType(Ty); -  if (Foo.isDirect() && !Foo.getDirectOffset() && !Foo.getPaddingType()) -    return ABIArgInfo::getDirectInReg(Foo.getCoerceToType()); -  if (Foo.isExtend()) -    return ABIArgInfo::getExtendInReg(Foo.getCoerceToType()); +  if (IsFastCall) { +    if (Size > 32) +      return false; + +    if (Ty->isIntegralOrEnumerationType()) +      return true; + +    if (Ty->isPointerType()) +      return true; + +    if (Ty->isReferenceType()) +      return true; + +    if (FreeRegs) +      NeedsPadding = true; -  llvm::LLVMContext &LLVMContext = getVMContext(); -  llvm::Type *Int32 = llvm::Type::getInt32Ty(LLVMContext); -  SmallVector<llvm::Type*, 3> Elements; -  for (unsigned I = 0; I < SizeInRegs; ++I) -    Elements.push_back(Int32); -  llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); -  return ABIArgInfo::getDirectInReg(Result); +    return false; +  } + +  return true;  } -ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const { +ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, +                                               unsigned &FreeRegs, +                                               bool IsFastCall) const {    // FIXME: Set alignment on indirect arguments.    if (isAggregateTypeForABI(Ty)) {      // Structures with flexible arrays are always indirect. @@ -760,25 +858,38 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {        // Structures with either a non-trivial destructor or a non-trivial        // copy constructor are always indirect.        if (hasNonTrivialDestructorOrCopyConstructor(RT)) -        return getIndirectResult(Ty, /*ByVal=*/false); +        return getIndirectResult(Ty, false, FreeRegs);        if (RT->getDecl()->hasFlexibleArrayMember()) -        return getIndirectResult(Ty); +        return getIndirectResult(Ty, true, FreeRegs);      }      // Ignore empty structs/unions.      if (isEmptyRecord(getContext(), Ty, true))        return ABIArgInfo::getIgnore(); +    llvm::LLVMContext &LLVMContext = getVMContext(); +    llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); +    bool NeedsPadding; +    if (shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding)) { +      unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; +      SmallVector<llvm::Type*, 3> Elements; +      for (unsigned I = 0; I < SizeInRegs; ++I) +        Elements.push_back(Int32); +      llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); +      return ABIArgInfo::getDirectInReg(Result); +    } +    llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : 0; +      // Expand small (<= 128-bit) record types when we know that the stack layout      // of those arguments will match the struct. This is important because the      // LLVM backend isn't smart enough to remove byval, which inhibits many      // optimizations.      if (getContext().getTypeSize(Ty) <= 4*32 &&          canExpandIndirectArgument(Ty, getContext())) -      return ABIArgInfo::getExpand(); +      return ABIArgInfo::getExpandWithPadding(IsFastCall, PaddingType); -    return getIndirectResult(Ty); +    return getIndirectResult(Ty, true, FreeRegs);    }    if (const VectorType *VT = Ty->getAs<VectorType>()) { @@ -809,16 +920,32 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {    if (const EnumType *EnumTy = Ty->getAs<EnumType>())      Ty = EnumTy->getDecl()->getIntegerType(); -  return (Ty->isPromotableIntegerType() ? -          ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); +  bool NeedsPadding; +  bool InReg = shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding); + +  if (Ty->isPromotableIntegerType()) { +    if (InReg) +      return ABIArgInfo::getExtendInReg(); +    return ABIArgInfo::getExtend(); +  } +  if (InReg) +    return ABIArgInfo::getDirectInReg(); +  return ABIArgInfo::getDirect();  }  void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),                                            FI.getCallingConvention()); -  unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() : -    DefaultNumRegisterParameters; +  unsigned CC = FI.getCallingConvention(); +  bool IsFastCall = CC == llvm::CallingConv::X86_FastCall; +  unsigned FreeRegs; +  if (IsFastCall) +    FreeRegs = 2; +  else if (FI.getHasRegParm()) +    FreeRegs = FI.getRegParm(); +  else +    FreeRegs = DefaultNumRegisterParameters;    // If the return value is indirect, then the hidden argument is consuming one    // integer register. @@ -832,7 +959,7 @@ void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();         it != ie; ++it) -    it->info = classifyArgumentTypeWithReg(it->type, FreeRegs); +    it->info = classifyArgumentType(it->type, FreeRegs, IsFastCall);  }  llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, @@ -884,7 +1011,10 @@ void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D,        llvm::Function *Fn = cast<llvm::Function>(GV);        // Now add the 'alignstack' attribute with a value of 16. -      Fn->addFnAttr(llvm::Attribute::constructStackAlignmentFromInt(16)); +      llvm::AttrBuilder B; +      B.addStackAlignmentAttr(16); +      Fn->addAttribute(llvm::AttrListPtr::FunctionIndex, +                       llvm::Attributes::get(CGM.getLLVMContext(), B));      }    }  } @@ -1030,10 +1160,15 @@ class X86_64ABIInfo : public ABIInfo {    }    bool HasAVX; +  // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on +  // 64-bit hardware. +  bool Has64BitPointers;  public:    X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) : -      ABIInfo(CGT), HasAVX(hasavx) {} +      ABIInfo(CGT), HasAVX(hasavx), +      Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { +  }    bool isPassedUsingAVXType(QualType type) const {      unsigned neededInt, neededSSE; @@ -1070,7 +1205,7 @@ public:  class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {  public:    X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) -    : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {} +      : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {}    const X86_64ABIInfo &getABIInfo() const {      return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); @@ -1243,7 +1378,10 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,        Hi = Integer;      } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {        Current = Integer; -    } else if (k == BuiltinType::Float || k == BuiltinType::Double) { +    } else if ((k == BuiltinType::Float || k == BuiltinType::Double) || +               (k == BuiltinType::LongDouble && +                getContext().getTargetInfo().getTriple().getOS() == +                llvm::Triple::NativeClient)) {        Current = SSE;      } else if (k == BuiltinType::LongDouble) {        Lo = X87; @@ -1266,7 +1404,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,    }    if (Ty->isMemberPointerType()) { -    if (Ty->isMemberFunctionPointerType()) +    if (Ty->isMemberFunctionPointerType() && Has64BitPointers)        Lo = Hi = Integer;      else        Current = Integer; @@ -1329,7 +1467,10 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,          Lo = Hi = Integer;      } else if (ET == getContext().FloatTy)        Current = SSE; -    else if (ET == getContext().DoubleTy) +    else if (ET == getContext().DoubleTy || +             (ET == getContext().LongDoubleTy && +              getContext().getTargetInfo().getTriple().getOS() == +              llvm::Triple::NativeClient))        Lo = Hi = SSE;      else if (ET == getContext().LongDoubleTy)        Current = ComplexX87; @@ -1708,7 +1849,7 @@ static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,  /// float at offset 4.  It is conservatively correct for this routine to return  /// false.  static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, -                                  const llvm::TargetData &TD) { +                                  const llvm::DataLayout &TD) {    // Base case if we find a float.    if (IROffset == 0 && IRType->isFloatTy())      return true; @@ -1748,8 +1889,8 @@ GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,    // We want to pass as <2 x float> if the LLVM IR type contains a float at    // offset+0 and offset+4.  Walk the LLVM IR type to find out if this is the    // case. -  if (ContainsFloatAtOffset(IRType, IROffset, getTargetData()) && -      ContainsFloatAtOffset(IRType, IROffset+4, getTargetData())) +  if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && +      ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))      return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);    return llvm::Type::getDoubleTy(getVMContext()); @@ -1777,7 +1918,8 @@ GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,    // returning an 8-byte unit starting with it.  See if we can safely use it.    if (IROffset == 0) {      // Pointers and int64's always fill the 8-byte unit. -    if (isa<llvm::PointerType>(IRType) || IRType->isIntegerTy(64)) +    if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || +        IRType->isIntegerTy(64))        return IRType;      // If we have a 1/2/4-byte integer, we can use it only if the rest of the @@ -1787,8 +1929,10 @@ GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,      // have to do this analysis on the source type because we can't depend on      // unions being lowered a specific way etc.      if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || -        IRType->isIntegerTy(32)) { -      unsigned BitWidth = cast<llvm::IntegerType>(IRType)->getBitWidth(); +        IRType->isIntegerTy(32) || +        (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { +      unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : +          cast<llvm::IntegerType>(IRType)->getBitWidth();        if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,                                  SourceOffset*8+64, getContext())) @@ -1798,7 +1942,7 @@ GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,    if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {      // If this is a struct, recurse into the field at the specified offset. -    const llvm::StructLayout *SL = getTargetData().getStructLayout(STy); +    const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);      if (IROffset < SL->getSizeInBytes()) {        unsigned FieldIdx = SL->getElementContainingOffset(IROffset);        IROffset -= SL->getElementOffset(FieldIdx); @@ -1810,7 +1954,7 @@ GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,    if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {      llvm::Type *EltTy = ATy->getElementType(); -    unsigned EltSize = getTargetData().getTypeAllocSize(EltTy); +    unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);      unsigned EltOffset = IROffset/EltSize*EltSize;      return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,                                    SourceOffset); @@ -1837,14 +1981,14 @@ GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,  /// return {i32*, float}.  static llvm::Type *  GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, -                           const llvm::TargetData &TD) { +                           const llvm::DataLayout &TD) {    // In order to correctly satisfy the ABI, we need to the high part to start    // at offset 8.  If the high and low parts we inferred are both 4-byte types    // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have    // the second element at offset 8.  Check for this:    unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);    unsigned HiAlign = TD.getABITypeAlignment(Hi); -  unsigned HiStart = llvm::TargetData::RoundUpAlignment(LoSize, HiAlign); +  unsigned HiStart = llvm::DataLayout::RoundUpAlignment(LoSize, HiAlign);    assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");    // To handle this, we have to increase the size of the low part so that the @@ -1996,7 +2140,7 @@ classifyReturnType(QualType RetTy) const {    // known to pass in the high eightbyte of the result.  We do this by forming a    // first class struct aggregate with the high and low part: {low, high}    if (HighPart) -    ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData()); +    ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());    return ABIArgInfo::getDirect(ResType);  } @@ -2122,7 +2266,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(    // known to pass in the high eightbyte of the result.  We do this by forming a    // first class struct aggregate with the high and low part: {low, high}    if (HighPart) -    ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData()); +    ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());    return ABIArgInfo::getDirect(ResType);  } @@ -2435,6 +2579,43 @@ llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,    return AddrTyped;  } +namespace { + +class NaClX86_64ABIInfo : public ABIInfo { + public: +  NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) +      : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {} +  virtual void computeInfo(CGFunctionInfo &FI) const; +  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, +                                 CodeGenFunction &CGF) const; + private: +  PNaClABIInfo PInfo;  // Used for generating calls with pnaclcall callingconv. +  X86_64ABIInfo NInfo; // Used for everything else. +}; + +class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo  { + public: +  NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) +      : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)) {} +}; + +} + +void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { +  if (FI.getASTCallingConvention() == CC_PnaclCall) +    PInfo.computeInfo(FI); +  else +    NInfo.computeInfo(FI); +} + +llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, +                                          CodeGenFunction &CGF) const { +  // Always use the native convention; calling pnacl-style varargs functions +  // is unuspported. +  return NInfo.EmitVAArg(VAListAddr, Ty, CGF); +} + +  // PowerPC-32  namespace { @@ -2497,6 +2678,62 @@ PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,  // PowerPC-64  namespace { +/// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. +class PPC64_SVR4_ABIInfo : public DefaultABIInfo { + +public: +  PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} + +  bool isPromotableTypeForABI(QualType Ty) const; + +  ABIArgInfo classifyReturnType(QualType RetTy) const; +  ABIArgInfo classifyArgumentType(QualType Ty) const; + +  // TODO: We can add more logic to computeInfo to improve performance. +  // Example: For aggregate arguments that fit in a register, we could +  // use getDirectInReg (as is done below for structs containing a single +  // floating-point value) to avoid pushing them to memory on function +  // entry.  This would require changing the logic in PPCISelLowering +  // when lowering the parameters in the caller and args in the callee. +  virtual void computeInfo(CGFunctionInfo &FI) const { +    FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); +    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); +         it != ie; ++it) { +      // We rely on the default argument classification for the most part. +      // One exception:  An aggregate containing a single floating-point +      // item must be passed in a register if one is available. +      const Type *T = isSingleElementStruct(it->type, getContext()); +      if (T) { +        const BuiltinType *BT = T->getAs<BuiltinType>(); +        if (BT && BT->isFloatingPoint()) { +          QualType QT(T, 0); +          it->info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); +          continue; +        } +      } +      it->info = classifyArgumentType(it->type); +    } +  } + +  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr,  +                                 QualType Ty, +                                 CodeGenFunction &CGF) const; +}; + +class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { +public: +  PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT) +    : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT)) {} + +  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { +    // This is recovered from gcc output. +    return 1; // r1 is the dedicated stack pointer +  } + +  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, +                               llvm::Value *Address) const; +}; +  class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {  public:    PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} @@ -2512,9 +2749,94 @@ public:  } +// Return true if the ABI requires Ty to be passed sign- or zero- +// extended to 64 bits.  bool -PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, -                                                llvm::Value *Address) const { +PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { +  // Treat an enum type as its underlying type. +  if (const EnumType *EnumTy = Ty->getAs<EnumType>()) +    Ty = EnumTy->getDecl()->getIntegerType(); + +  // Promotable integer types are required to be promoted by the ABI. +  if (Ty->isPromotableIntegerType()) +    return true; + +  // In addition to the usual promotable integer types, we also need to +  // extend all 32-bit types, since the ABI requires promotion to 64 bits. +  if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) +    switch (BT->getKind()) { +    case BuiltinType::Int: +    case BuiltinType::UInt: +      return true; +    default: +      break; +    } + +  return false; +} + +ABIArgInfo +PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { +  if (isAggregateTypeForABI(Ty)) { +    // Records with non trivial destructors/constructors should not be passed +    // by value. +    if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) +      return ABIArgInfo::getIndirect(0, /*ByVal=*/false); + +    return ABIArgInfo::getIndirect(0); +  } + +  return (isPromotableTypeForABI(Ty) ? +          ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); +} + +ABIArgInfo +PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { +  if (RetTy->isVoidType()) +    return ABIArgInfo::getIgnore(); + +  if (isAggregateTypeForABI(RetTy)) +    return ABIArgInfo::getIndirect(0); + +  return (isPromotableTypeForABI(RetTy) ? +          ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); +} + +// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. +llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr, +                                           QualType Ty, +                                           CodeGenFunction &CGF) const { +  llvm::Type *BP = CGF.Int8PtrTy; +  llvm::Type *BPP = CGF.Int8PtrPtrTy; + +  CGBuilderTy &Builder = CGF.Builder; +  llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); +  llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); + +  // Update the va_list pointer. +  unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8; +  unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8); +  llvm::Value *NextAddr = +    Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), +                      "ap.next"); +  Builder.CreateStore(NextAddr, VAListAddrAsBPP); + +  // If the argument is smaller than 8 bytes, it is right-adjusted in +  // its doubleword slot.  Adjust the pointer to pick it up from the +  // correct offset. +  if (SizeInBytes < 8) { +    llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); +    AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes)); +    Addr = Builder.CreateIntToPtr(AddrAsInt, BP); +  } + +  llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); +  return Builder.CreateBitCast(Addr, PTy); +} + +static bool +PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, +                              llvm::Value *Address) {    // This is calculated from the LLVM and GCC tables and verified    // against gcc output.  AFAIK all ABIs use the same encoding. @@ -2553,6 +2875,21 @@ PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,    return false;  } +bool +PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( +  CodeGen::CodeGenFunction &CGF, +  llvm::Value *Address) const { + +  return PPC64_initDwarfEHRegSizeTable(CGF, Address); +} + +bool +PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, +                                                llvm::Value *Address) const { + +  return PPC64_initDwarfEHRegSizeTable(CGF, Address); +} +  //===----------------------------------------------------------------------===//  // ARM ABI Implementation  //===----------------------------------------------------------------------===// @@ -2576,14 +2913,18 @@ public:    bool isEABI() const {      StringRef Env =        getContext().getTargetInfo().getTriple().getEnvironmentName(); -    return (Env == "gnueabi" || Env == "eabi" || Env == "androideabi"); +    return (Env == "gnueabi" || Env == "eabi" || +            Env == "android" || Env == "androideabi");    }  private:    ABIKind getABIKind() const { return Kind; }    ABIArgInfo classifyReturnType(QualType RetTy) const; -  ABIArgInfo classifyArgumentType(QualType RetTy) const; +  ABIArgInfo classifyArgumentType(QualType RetTy, int *VFPRegs, +                                  unsigned &AllocatedVFP, +                                  bool &IsHA) const; +  bool isIllegalVectorType(QualType Ty) const;    virtual void computeInfo(CGFunctionInfo &FI) const; @@ -2626,10 +2967,33 @@ public:  }  void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { +  // To correctly handle Homogeneous Aggregate, we need to keep track of the +  // VFP registers allocated so far. +  // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive +  // VFP registers of the appropriate type unallocated then the argument is +  // allocated to the lowest-numbered sequence of such registers. +  // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are +  // unallocated are marked as unavailable.  +  unsigned AllocatedVFP = 0; +  int VFPRegs[16] = { 0 };    FI.getReturnInfo() = classifyReturnType(FI.getReturnType());    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); -       it != ie; ++it) -    it->info = classifyArgumentType(it->type); +       it != ie; ++it) { +    unsigned PreAllocation = AllocatedVFP; +    bool IsHA = false; +    // 6.1.2.3 There is one VFP co-processor register class using registers +    // s0-s15 (d0-d7) for passing arguments. +    const unsigned NumVFPs = 16; +    it->info = classifyArgumentType(it->type, VFPRegs, AllocatedVFP, IsHA); +    // If we do not have enough VFP registers for the HA, any VFP registers +    // that are unallocated are marked as unavailable. To achieve this, we add +    // padding of (NumVFPs - PreAllocation) floats. +    if (IsHA && AllocatedVFP > NumVFPs && PreAllocation < NumVFPs) { +      llvm::Type *PaddingTy = llvm::ArrayType::get( +          llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocation); +      it->info = ABIArgInfo::getExpandWithPadding(false, PaddingTy); +    } +  }    // Always honor user-specified calling convention.    if (FI.getCallingConvention() != llvm::CallingConv::C) @@ -2637,7 +3001,9 @@ void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {    // Calling convention as default by an ABI.    llvm::CallingConv::ID DefaultCC; -  if (isEABI()) +  if (getContext().getTargetInfo().getTriple().getEnvironmentName()=="gnueabihf") +    DefaultCC = llvm::CallingConv::ARM_AAPCS_VFP; +  else if (isEABI())      DefaultCC = llvm::CallingConv::ARM_AAPCS;    else      DefaultCC = llvm::CallingConv::ARM_APCS; @@ -2729,7 +3095,88 @@ static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,    return (Members > 0 && Members <= 4);  } -ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const { +/// markAllocatedVFPs - update VFPRegs according to the alignment and +/// number of VFP registers (unit is S register) requested. +static void markAllocatedVFPs(int *VFPRegs, unsigned &AllocatedVFP, +                              unsigned Alignment, +                              unsigned NumRequired) { +  // Early Exit. +  if (AllocatedVFP >= 16) +    return; +  // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive +  // VFP registers of the appropriate type unallocated then the argument is +  // allocated to the lowest-numbered sequence of such registers. +  for (unsigned I = 0; I < 16; I += Alignment) { +    bool FoundSlot = true; +    for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++) +      if (J >= 16 || VFPRegs[J]) { +         FoundSlot = false; +         break; +      } +    if (FoundSlot) { +      for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++) +        VFPRegs[J] = 1; +      AllocatedVFP += NumRequired; +      return; +    } +  } +  // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are +  // unallocated are marked as unavailable. +  for (unsigned I = 0; I < 16; I++) +    VFPRegs[I] = 1; +  AllocatedVFP = 17; // We do not have enough VFP registers. +} + +ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, int *VFPRegs, +                                            unsigned &AllocatedVFP, +                                            bool &IsHA) const { +  // We update number of allocated VFPs according to +  // 6.1.2.1 The following argument types are VFP CPRCs: +  //   A single-precision floating-point type (including promoted +  //   half-precision types); A double-precision floating-point type; +  //   A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate +  //   with a Base Type of a single- or double-precision floating-point type, +  //   64-bit containerized vectors or 128-bit containerized vectors with one +  //   to four Elements. + +  // Handle illegal vector types here. +  if (isIllegalVectorType(Ty)) { +    uint64_t Size = getContext().getTypeSize(Ty); +    if (Size <= 32) { +      llvm::Type *ResType = +          llvm::Type::getInt32Ty(getVMContext()); +      return ABIArgInfo::getDirect(ResType); +    } +    if (Size == 64) { +      llvm::Type *ResType = llvm::VectorType::get( +          llvm::Type::getInt32Ty(getVMContext()), 2); +      markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2); +      return ABIArgInfo::getDirect(ResType); +    } +    if (Size == 128) { +      llvm::Type *ResType = llvm::VectorType::get( +          llvm::Type::getInt32Ty(getVMContext()), 4); +      markAllocatedVFPs(VFPRegs, AllocatedVFP, 4, 4); +      return ABIArgInfo::getDirect(ResType); +    } +    return ABIArgInfo::getIndirect(0, /*ByVal=*/false); +  } +  // Update VFPRegs for legal vector types. +  if (const VectorType *VT = Ty->getAs<VectorType>()) { +    uint64_t Size = getContext().getTypeSize(VT); +    // Size of a legal vector should be power of 2 and above 64. +    markAllocatedVFPs(VFPRegs, AllocatedVFP, Size >= 128 ? 4 : 2, Size / 32); +  } +  // Update VFPRegs for floating point types. +  if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { +    if (BT->getKind() == BuiltinType::Half || +        BT->getKind() == BuiltinType::Float) +      markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, 1); +    if (BT->getKind() == BuiltinType::Double || +        BT->getKind() == BuiltinType::LongDouble) +      markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2); +  } +    if (!isAggregateTypeForABI(Ty)) {      // Treat an enum type as its underlying type.      if (const EnumType *EnumTy = Ty->getAs<EnumType>()) @@ -2749,18 +3196,42 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {      return ABIArgInfo::getIndirect(0, /*ByVal=*/false);    if (getABIKind() == ARMABIInfo::AAPCS_VFP) { -    // Homogeneous Aggregates need to be expanded. +    // Homogeneous Aggregates need to be expanded when we can fit the aggregate +    // into VFP registers.      const Type *Base = 0; -    if (isHomogeneousAggregate(Ty, Base, getContext())) { +    uint64_t Members = 0; +    if (isHomogeneousAggregate(Ty, Base, getContext(), &Members)) {        assert(Base && "Base class should be set for homogeneous aggregate"); +      // Base can be a floating-point or a vector. +      if (Base->isVectorType()) { +        // ElementSize is in number of floats. +        unsigned ElementSize = getContext().getTypeSize(Base) == 64 ? 2 : 4; +        markAllocatedVFPs(VFPRegs, AllocatedVFP, ElementSize, +                          Members * ElementSize); +      } else if (Base->isSpecificBuiltinType(BuiltinType::Float)) +        markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, Members); +      else { +        assert(Base->isSpecificBuiltinType(BuiltinType::Double) || +               Base->isSpecificBuiltinType(BuiltinType::LongDouble)); +        markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, Members * 2); +      } +      IsHA = true;        return ABIArgInfo::getExpand();      }    }    // Support byval for ARM. -  if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64) || -      getContext().getTypeAlign(Ty) > 64) { -    return ABIArgInfo::getIndirect(0, /*ByVal=*/true); +  // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at +  // most 8-byte. We realign the indirect argument if type alignment is bigger +  // than ABI alignment. +  uint64_t ABIAlign = 4; +  uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8; +  if (getABIKind() == ARMABIInfo::AAPCS_VFP || +      getABIKind() == ARMABIInfo::AAPCS) +    ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); +  if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) { +    return ABIArgInfo::getIndirect(0, /*ByVal=*/true, +           /*Realign=*/TyAlign > ABIAlign);    }    // Otherwise, pass by coercing to a structure of the appropriate size. @@ -2946,6 +3417,21 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const {    return ABIArgInfo::getIndirect(0);  } +/// isIllegalVector - check whether Ty is an illegal vector type. +bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { +  if (const VectorType *VT = Ty->getAs<VectorType>()) { +    // Check whether VT is legal. +    unsigned NumElements = VT->getNumElements(); +    uint64_t Size = getContext().getTypeSize(VT); +    // NumElements should be power of 2. +    if ((NumElements & (NumElements - 1)) != 0) +      return true; +    // Size should be greater than 32 bits. +    return Size <= 32; +  } +  return false; +} +  llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,                                     CodeGenFunction &CGF) const {    llvm::Type *BP = CGF.Int8PtrTy; @@ -2954,30 +3440,104 @@ llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,    CGBuilderTy &Builder = CGF.Builder;    llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");    llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); -  // Handle address alignment for type alignment > 32 bits + +  uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;    uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; +  bool IsIndirect = false; + +  // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for +  // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. +  if (getABIKind() == ARMABIInfo::AAPCS_VFP || +      getABIKind() == ARMABIInfo::AAPCS) +    TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); +  else +    TyAlign = 4; +  // Use indirect if size of the illegal vector is bigger than 16 bytes. +  if (isIllegalVectorType(Ty) && Size > 16) { +    IsIndirect = true; +    Size = 4; +    TyAlign = 4; +  } + +  // Handle address alignment for ABI alignment > 4 bytes.    if (TyAlign > 4) {      assert((TyAlign & (TyAlign - 1)) == 0 &&             "Alignment is not power of 2!");      llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);      AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));      AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); -    Addr = Builder.CreateIntToPtr(AddrAsInt, BP); +    Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");    } -  llvm::Type *PTy = -    llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); -  llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);    uint64_t Offset = -    llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); +    llvm::RoundUpToAlignment(Size, 4);    llvm::Value *NextAddr =      Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),                        "ap.next");    Builder.CreateStore(NextAddr, VAListAddrAsBPP); +  if (IsIndirect) +    Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP)); +  else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) { +    // We can't directly cast ap.cur to pointer to a vector type, since ap.cur +    // may not be correctly aligned for the vector type. We create an aligned +    // temporary space and copy the content over from ap.cur to the temporary +    // space. This is necessary if the natural alignment of the type is greater +    // than the ABI alignment. +    llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); +    CharUnits CharSize = getContext().getTypeSizeInChars(Ty); +    llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty), +                                                    "var.align"); +    llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); +    llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy); +    Builder.CreateMemCpy(Dst, Src, +        llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()), +        TyAlign, false); +    Addr = AlignedTemp; //The content is in aligned location. +  } +  llvm::Type *PTy = +    llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); +  llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); +    return AddrTyped;  } +namespace { + +class NaClARMABIInfo : public ABIInfo { + public: +  NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) +      : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {} +  virtual void computeInfo(CGFunctionInfo &FI) const; +  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, +                                 CodeGenFunction &CGF) const; + private: +  PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. +  ARMABIInfo NInfo; // Used for everything else. +}; + +class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo  { + public: +  NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) +      : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {} +}; + +} + +void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const { +  if (FI.getASTCallingConvention() == CC_PnaclCall) +    PInfo.computeInfo(FI); +  else +    static_cast<const ABIInfo&>(NInfo).computeInfo(FI); +} + +llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, +                                       CodeGenFunction &CGF) const { +  // Always use the native convention; calling pnacl-style varargs functions +  // is unsupported. +  return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF); +} +  //===----------------------------------------------------------------------===//  // NVPTX ABI Implementation  //===----------------------------------------------------------------------===// @@ -3072,7 +3632,7 @@ SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,        // OpenCL __kernel functions get a kernel calling convention        F->setCallingConv(llvm::CallingConv::PTX_Kernel);        // And kernel functions are not subject to inlining -      F->addFnAttr(llvm::Attribute::NoInline); +      F->addFnAttr(llvm::Attributes::NoInline);      }    } @@ -3188,7 +3748,7 @@ void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D,        F->setCallingConv(CC);        // Step 2: Add attributes goodness. -      F->addFnAttr(llvm::Attribute::NoInline); +      F->addFnAttr(llvm::Attributes::NoInline);    }    // Step 3: Emit _interrupt_handler alias. @@ -3226,7 +3786,7 @@ void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,        F->setCallingConv(llvm::CallingConv::MSP430_INTR);        // Step 2: Add attributes goodness. -      F->addFnAttr(llvm::Attribute::NoInline); +      F->addFnAttr(llvm::Attributes::NoInline);        // Step 3: Emit ISR vector alias.        unsigned Num = attr->getNumber() + 0xffe0; @@ -3583,7 +4143,7 @@ void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,    if (M.getLangOpts().OpenCL) {      if (FD->hasAttr<OpenCLKernelAttr>()) {        // OpenCL C Kernel functions are not subject to inlining -      F->addFnAttr(llvm::Attribute::NoInline); +      F->addFnAttr(llvm::Attributes::NoInline);        if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) { @@ -3767,6 +4327,8 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {    default:      return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); +  case llvm::Triple::le32: +    return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types));    case llvm::Triple::mips:    case llvm::Triple::mipsel:      return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true)); @@ -3779,19 +4341,29 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {    case llvm::Triple::thumb:      {        ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; -        if (strcmp(getContext().getTargetInfo().getABI(), "apcs-gnu") == 0)          Kind = ARMABIInfo::APCS; -      else if (CodeGenOpts.FloatABI == "hard") +      else if (CodeGenOpts.FloatABI == "hard" || +               (CodeGenOpts.FloatABI != "soft" && Triple.getEnvironment()==llvm::Triple::GNUEABIHF))          Kind = ARMABIInfo::AAPCS_VFP; -      return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind)); +      switch (Triple.getOS()) { +        case llvm::Triple::NativeClient: +          return *(TheTargetCodeGenInfo = +                   new NaClARMTargetCodeGenInfo(Types, Kind)); +        default: +          return *(TheTargetCodeGenInfo = +                   new ARMTargetCodeGenInfo(Types, Kind)); +      }      }    case llvm::Triple::ppc:      return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));    case llvm::Triple::ppc64: -    return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types)); +    if (Triple.isOSBinFormatELF()) +      return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types)); +    else +      return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));    case llvm::Triple::nvptx:    case llvm::Triple::nvptx64: @@ -3848,6 +4420,8 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {      case llvm::Triple::MinGW32:      case llvm::Triple::Cygwin:        return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types)); +    case llvm::Triple::NativeClient: +      return *(TheTargetCodeGenInfo = new NaClX86_64TargetCodeGenInfo(Types, HasAVX));      default:        return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types,                                                                    HasAVX)); | 
