diff options
Diffstat (limited to 'lib/CodeGen/CGCall.cpp')
| -rw-r--r-- | lib/CodeGen/CGCall.cpp | 662 | 
1 files changed, 558 insertions, 104 deletions
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp index 935985049c01d..242b5962070a3 100644 --- a/lib/CodeGen/CGCall.cpp +++ b/lib/CodeGen/CGCall.cpp @@ -14,6 +14,7 @@  #include "CGCall.h"  #include "ABIInfo.h" +#include "CGBlocks.h"  #include "CGCXXABI.h"  #include "CGCleanup.h"  #include "CodeGenFunction.h" @@ -25,9 +26,11 @@  #include "clang/Basic/TargetBuiltins.h"  #include "clang/Basic/TargetInfo.h"  #include "clang/CodeGen/CGFunctionInfo.h" +#include "clang/CodeGen/SwiftCallingConv.h"  #include "clang/Frontend/CodeGenOptions.h"  #include "llvm/ADT/StringExtras.h"  #include "llvm/IR/Attributes.h" +#include "llvm/IR/CallingConv.h"  #include "llvm/IR/CallSite.h"  #include "llvm/IR/DataLayout.h"  #include "llvm/IR/InlineAsm.h" @@ -39,7 +42,7 @@ using namespace CodeGen;  /***/ -static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { +unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {    switch (CC) {    default: return llvm::CallingConv::C;    case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; @@ -55,7 +58,10 @@ static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {    // TODO: Add support for __vectorcall to LLVM.    case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;    case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; -  case CC_SpirKernel: return llvm::CallingConv::SPIR_KERNEL; +  case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv(); +  case CC_PreserveMost: return llvm::CallingConv::PreserveMost; +  case CC_PreserveAll: return llvm::CallingConv::PreserveAll; +  case CC_Swift: return llvm::CallingConv::Swift;    }  } @@ -90,15 +96,25 @@ CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {    return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),                                   /*instanceMethod=*/false,                                   /*chainCall=*/false, None, -                                 FTNP->getExtInfo(), RequiredArgs(0)); +                                 FTNP->getExtInfo(), {}, RequiredArgs(0));  }  /// Adds the formal paramaters in FPT to the given prefix. If any parameter in  /// FPT has pass_object_size attrs, then we'll add parameters for those, too.  static void appendParameterTypes(const CodeGenTypes &CGT,                                   SmallVectorImpl<CanQualType> &prefix, -                                 const CanQual<FunctionProtoType> &FPT, +              SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, +                                 CanQual<FunctionProtoType> FPT,                                   const FunctionDecl *FD) { +  // Fill out paramInfos. +  if (FPT->hasExtParameterInfos() || !paramInfos.empty()) { +    assert(paramInfos.size() <= prefix.size()); +    auto protoParamInfos = FPT->getExtParameterInfos(); +    paramInfos.reserve(prefix.size() + protoParamInfos.size()); +    paramInfos.resize(prefix.size()); +    paramInfos.append(protoParamInfos.begin(), protoParamInfos.end()); +  } +    // Fast path: unknown target.    if (FD == nullptr) {      prefix.append(FPT->param_type_begin(), FPT->param_type_end()); @@ -125,13 +141,17 @@ arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,                          SmallVectorImpl<CanQualType> &prefix,                          CanQual<FunctionProtoType> FTP,                          const FunctionDecl *FD) { -  RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); +  SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; +  RequiredArgs Required = +      RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD);    // FIXME: Kill copy. -  appendParameterTypes(CGT, prefix, FTP, FD); +  appendParameterTypes(CGT, prefix, paramInfos, FTP, FD);    CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); +    return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,                                       /*chainCall=*/false, prefix, -                                     FTP->getExtInfo(), required); +                                     FTP->getExtInfo(), paramInfos, +                                     Required);  }  /// Arrange the argument and result information for a value of the @@ -173,6 +193,12 @@ static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {    if (D->hasAttr<SysVABIAttr>())      return IsWindows ? CC_X86_64SysV : CC_C; +  if (D->hasAttr<PreserveMostAttr>()) +    return CC_PreserveMost; + +  if (D->hasAttr<PreserveAllAttr>()) +    return CC_PreserveAll; +    return CC_C;  } @@ -219,16 +245,33 @@ CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {    return arrangeFreeFunctionType(prototype, MD);  } +bool CodeGenTypes::inheritingCtorHasParams( +    const InheritedConstructor &Inherited, CXXCtorType Type) { +  // Parameters are unnecessary if we're constructing a base class subobject +  // and the inherited constructor lives in a virtual base. +  return Type == Ctor_Complete || +         !Inherited.getShadowDecl()->constructsVirtualBase() || +         !Target.getCXXABI().hasConstructorVariants(); +  } +  const CGFunctionInfo &  CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,                                              StructorType Type) {    SmallVector<CanQualType, 16> argTypes; +  SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;    argTypes.push_back(GetThisType(Context, MD->getParent())); +  bool PassParams = true; +    GlobalDecl GD;    if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {      GD = GlobalDecl(CD, toCXXCtorType(Type)); + +    // A base class inheriting constructor doesn't get forwarded arguments +    // needed to construct a virtual base (or base class thereof). +    if (auto Inherited = CD->getInheritedConstructor()) +      PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type));    } else {      auto *DD = dyn_cast<CXXDestructorDecl>(MD);      GD = GlobalDecl(DD, toCXXDtorType(Type)); @@ -237,12 +280,14 @@ CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,    CanQual<FunctionProtoType> FTP = GetFormalType(MD);    // Add the formal parameters. -  appendParameterTypes(*this, argTypes, FTP, MD); +  if (PassParams) +    appendParameterTypes(*this, argTypes, paramInfos, FTP, MD);    TheCXXABI.buildStructorSignature(MD, Type, argTypes);    RequiredArgs required = -      (MD->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All); +      (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) +                                      : RequiredArgs::All);    FunctionType::ExtInfo extInfo = FTP->getExtInfo();    CanQualType resultType = TheCXXABI.HasThisReturn(GD) @@ -252,7 +297,53 @@ CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,                                       : Context.VoidTy;    return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,                                   /*chainCall=*/false, argTypes, extInfo, -                                 required); +                                 paramInfos, required); +} + +static SmallVector<CanQualType, 16> +getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { +  SmallVector<CanQualType, 16> argTypes; +  for (auto &arg : args) +    argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); +  return argTypes; +} + +static SmallVector<CanQualType, 16> +getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { +  SmallVector<CanQualType, 16> argTypes; +  for (auto &arg : args) +    argTypes.push_back(ctx.getCanonicalParamType(arg->getType())); +  return argTypes; +} + +static void addExtParameterInfosForCall( +         llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, +                                        const FunctionProtoType *proto, +                                        unsigned prefixArgs, +                                        unsigned totalArgs) { +  assert(proto->hasExtParameterInfos()); +  assert(paramInfos.size() <= prefixArgs); +  assert(proto->getNumParams() + prefixArgs <= totalArgs); + +  // Add default infos for any prefix args that don't already have infos. +  paramInfos.resize(prefixArgs); + +  // Add infos for the prototype. +  auto protoInfos = proto->getExtParameterInfos(); +  paramInfos.append(protoInfos.begin(), protoInfos.end()); + +  // Add default infos for the variadic arguments. +  paramInfos.resize(totalArgs); +} + +static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> +getExtParameterInfosForCall(const FunctionProtoType *proto, +                            unsigned prefixArgs, unsigned totalArgs) { +  llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result; +  if (proto->hasExtParameterInfos()) { +    addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs); +  } +  return result;  }  /// Arrange a call to a C++ method, passing the given arguments. @@ -267,7 +358,7 @@ CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,      ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));    CanQual<FunctionProtoType> FPT = GetFormalType(D); -  RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs); +  RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs, D);    GlobalDecl GD(D, CtorKind);    CanQualType ResultType = TheCXXABI.HasThisReturn(GD)                                 ? ArgTypes.front() @@ -276,9 +367,11 @@ CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,                                       : Context.VoidTy;    FunctionType::ExtInfo Info = FPT->getExtInfo(); +  auto ParamInfos = getExtParameterInfosForCall(FPT.getTypePtr(), 1 + ExtraArgs, +                                                ArgTypes.size());    return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,                                   /*chainCall=*/false, ArgTypes, Info, -                                 Required); +                                 ParamInfos, Required);  }  /// Arrange the argument and result information for the declaration or @@ -299,7 +392,7 @@ CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {      CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();      return arrangeLLVMFunctionInfo(          noProto->getReturnType(), /*instanceMethod=*/false, -        /*chainCall=*/false, None, noProto->getExtInfo(), RequiredArgs::All); +        /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);    }    assert(isa<FunctionProtoType>(FTy)); @@ -328,7 +421,7 @@ CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,    argTys.push_back(Context.getCanonicalParamType(receiverType));    argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));    // FIXME: Kill copy? -  for (const auto *I : MD->params()) { +  for (const auto *I : MD->parameters()) {      argTys.push_back(Context.getCanonicalParamType(I->getType()));    } @@ -345,7 +438,18 @@ CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,    return arrangeLLVMFunctionInfo(        GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, -      /*chainCall=*/false, argTys, einfo, required); +      /*chainCall=*/false, argTys, einfo, {}, required); +} + +const CGFunctionInfo & +CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType, +                                                 const CallArgList &args) { +  auto argTypes = getArgTypesForCall(Context, args); +  FunctionType::ExtInfo einfo; + +  return arrangeLLVMFunctionInfo( +      GetReturnType(returnType), /*instanceMethod=*/false, +      /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);  }  const CGFunctionInfo & @@ -374,7 +478,7 @@ CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) {    CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };    return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,                                   /*chainCall=*/false, ArgTys, -                                 FTP->getExtInfo(), RequiredArgs(1)); +                                 FTP->getExtInfo(), {}, RequiredArgs(1));  }  const CGFunctionInfo & @@ -394,7 +498,8 @@ CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,        /*IsVariadic=*/false, /*IsCXXMethod=*/true);    return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,                                   /*chainCall=*/false, ArgTys, -                                 FunctionType::ExtInfo(CC), RequiredArgs::All); +                                 FunctionType::ExtInfo(CC), {}, +                                 RequiredArgs::All);  }  /// Arrange a call as unto a free function, except possibly with an @@ -408,6 +513,8 @@ arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,                              bool chainCall) {    assert(args.size() >= numExtraRequiredArgs); +  llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; +    // In most cases, there are no optional arguments.    RequiredArgs required = RequiredArgs::All; @@ -417,6 +524,10 @@ arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,      if (proto->isVariadic())        required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs); +    if (proto->hasExtParameterInfos()) +      addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, +                                  args.size()); +    // If we don't have a prototype at all, but we're supposed to    // explicitly use the variadic convention for unprototyped calls,    // treat all of the arguments as required but preserve the nominal @@ -433,7 +544,8 @@ arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,      argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));    return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),                                       /*instanceMethod=*/false, chainCall, -                                     argTypes, fnType->getExtInfo(), required); +                                     argTypes, fnType->getExtInfo(), paramInfos, +                                     required);  }  /// Figure out the rules for calling a function with the given formal @@ -448,7 +560,7 @@ CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,                                       chainCall ? 1 : 0, chainCall);  } -/// A block function call is essentially a free-function call with an +/// A block function is essentially a free function with an  /// extra implicit argument.  const CGFunctionInfo &  CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, @@ -458,54 +570,99 @@ CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,  }  const CGFunctionInfo & -CodeGenTypes::arrangeFreeFunctionCall(QualType resultType, -                                      const CallArgList &args, -                                      FunctionType::ExtInfo info, -                                      RequiredArgs required) { +CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto, +                                              const FunctionArgList ¶ms) { +  auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size()); +  auto argTypes = getArgTypesForDeclaration(Context, params); + +  return arrangeLLVMFunctionInfo( +      GetReturnType(proto->getReturnType()), +      /*instanceMethod*/ false, /*chainCall*/ false, argTypes, +      proto->getExtInfo(), paramInfos, +      RequiredArgs::forPrototypePlus(proto, 1, nullptr)); +} + +const CGFunctionInfo & +CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, +                                         const CallArgList &args) {    // FIXME: Kill copy.    SmallVector<CanQualType, 16> argTypes;    for (const auto &Arg : args)      argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));    return arrangeLLVMFunctionInfo(        GetReturnType(resultType), /*instanceMethod=*/false, -      /*chainCall=*/false, argTypes, info, required); +      /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), +      /*paramInfos=*/ {}, RequiredArgs::All);  } -/// Arrange a call to a C++ method, passing the given arguments.  const CGFunctionInfo & -CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, -                                   const FunctionProtoType *FPT, -                                   RequiredArgs required) { -  // FIXME: Kill copy. -  SmallVector<CanQualType, 16> argTypes; -  for (const auto &Arg : args) -    argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); +CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType, +                                                const FunctionArgList &args) { +  auto argTypes = getArgTypesForDeclaration(Context, args); + +  return arrangeLLVMFunctionInfo( +      GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, +      argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); +} -  FunctionType::ExtInfo info = FPT->getExtInfo(); +const CGFunctionInfo & +CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, +                                              ArrayRef<CanQualType> argTypes) {    return arrangeLLVMFunctionInfo( -      GetReturnType(FPT->getReturnType()), /*instanceMethod=*/true, -      /*chainCall=*/false, argTypes, info, required); +      resultType, /*instanceMethod=*/false, /*chainCall=*/false, +      argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);  } -const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration( -    QualType resultType, const FunctionArgList &args, -    const FunctionType::ExtInfo &info, bool isVariadic) { +/// Arrange a call to a C++ method, passing the given arguments. +const CGFunctionInfo & +CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, +                                   const FunctionProtoType *proto, +                                   RequiredArgs required) { +  unsigned numRequiredArgs = +    (proto->isVariadic() ? required.getNumRequiredArgs() : args.size()); +  unsigned numPrefixArgs = numRequiredArgs - proto->getNumParams(); +  auto paramInfos = +    getExtParameterInfosForCall(proto, numPrefixArgs, args.size()); +    // FIXME: Kill copy. -  SmallVector<CanQualType, 16> argTypes; -  for (auto Arg : args) -    argTypes.push_back(Context.getCanonicalParamType(Arg->getType())); +  auto argTypes = getArgTypesForCall(Context, args); -  RequiredArgs required = -    (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All); +  FunctionType::ExtInfo info = proto->getExtInfo();    return arrangeLLVMFunctionInfo( -      GetReturnType(resultType), /*instanceMethod=*/false, -      /*chainCall=*/false, argTypes, info, required); +      GetReturnType(proto->getReturnType()), /*instanceMethod=*/true, +      /*chainCall=*/false, argTypes, info, paramInfos, required);  }  const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {    return arrangeLLVMFunctionInfo(        getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, -      None, FunctionType::ExtInfo(), RequiredArgs::All); +      None, FunctionType::ExtInfo(), {}, RequiredArgs::All); +} + +const CGFunctionInfo & +CodeGenTypes::arrangeCall(const CGFunctionInfo &signature, +                          const CallArgList &args) { +  assert(signature.arg_size() <= args.size()); +  if (signature.arg_size() == args.size()) +    return signature; + +  SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; +  auto sigParamInfos = signature.getExtParameterInfos(); +  if (!sigParamInfos.empty()) { +    paramInfos.append(sigParamInfos.begin(), sigParamInfos.end()); +    paramInfos.resize(args.size()); +  } + +  auto argTypes = getArgTypesForCall(Context, args); + +  assert(signature.getRequiredArgs().allowsOptionalArgs()); +  return arrangeLLVMFunctionInfo(signature.getReturnType(), +                                 signature.isInstanceMethod(), +                                 signature.isChainCall(), +                                 argTypes, +                                 signature.getExtInfo(), +                                 paramInfos, +                                 signature.getRequiredArgs());  }  /// Arrange the argument and result information for an abstract value @@ -517,25 +674,26 @@ CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,                                        bool chainCall,                                        ArrayRef<CanQualType> argTypes,                                        FunctionType::ExtInfo info, +                     ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,                                        RequiredArgs required) {    assert(std::all_of(argTypes.begin(), argTypes.end(),                       std::mem_fun_ref(&CanQualType::isCanonicalAsParam))); -  unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); -    // Lookup or create unique function info.    llvm::FoldingSetNodeID ID; -  CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, required, -                          resultType, argTypes); +  CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos, +                          required, resultType, argTypes);    void *insertPos = nullptr;    CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);    if (FI)      return *FI; +  unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); +    // Construct the function info.  We co-allocate the ArgInfos.    FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, -                              resultType, argTypes, required); +                              paramInfos, resultType, argTypes, required);    FunctionInfos.InsertNode(FI, insertPos);    bool inserted = FunctionsBeingProcessed.insert(FI).second; @@ -543,7 +701,11 @@ CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,    assert(inserted && "Recursively being processed?");    // Compute ABI information. -  getABIInfo().computeInfo(*FI); +  if (info.getCC() != CC_Swift) { +    getABIInfo().computeInfo(*FI); +  } else { +    swiftcall::computeABIInfo(CGM, *FI); +  }    // Loop over all of the computed argument and return value info.  If any of    // them are direct or extend without a specified coerce type, specify the @@ -566,11 +728,16 @@ CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,                                         bool instanceMethod,                                         bool chainCall,                                         const FunctionType::ExtInfo &info, +                                       ArrayRef<ExtParameterInfo> paramInfos,                                         CanQualType resultType,                                         ArrayRef<CanQualType> argTypes,                                         RequiredArgs required) { -  void *buffer = operator new(sizeof(CGFunctionInfo) + -                              sizeof(ArgInfo) * (argTypes.size() + 1)); +  assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); + +  void *buffer = +    operator new(totalSizeToAlloc<ArgInfo,             ExtParameterInfo>( +                                  argTypes.size() + 1, paramInfos.size())); +    CGFunctionInfo *FI = new(buffer) CGFunctionInfo();    FI->CallingConvention = llvmCC;    FI->EffectiveCallingConvention = llvmCC; @@ -585,9 +752,12 @@ CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,    FI->ArgStruct = nullptr;    FI->ArgStructAlign = 0;    FI->NumArgs = argTypes.size(); +  FI->HasExtParameterInfos = !paramInfos.empty();    FI->getArgsBuffer()[0].type = resultType;    for (unsigned i = 0, e = argTypes.size(); i != e; ++i)      FI->getArgsBuffer()[i + 1].type = argTypes[i]; +  for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) +    FI->getExtParameterInfosBuffer()[i] = paramInfos[i];    return FI;  } @@ -634,7 +804,8 @@ struct RecordExpansion : TypeExpansion {    RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,                    SmallVector<const FieldDecl *, 1> &&Fields) -      : TypeExpansion(TEK_Record), Bases(Bases), Fields(Fields) {} +      : TypeExpansion(TEK_Record), Bases(std::move(Bases)), +        Fields(std::move(Fields)) {}    static bool classof(const TypeExpansion *TE) {      return TE->Kind == TEK_Record;    } @@ -773,7 +944,7 @@ static void forConstantArrayExpansion(CodeGenFunction &CGF,  }  void CodeGenFunction::ExpandTypeFromArgs( -    QualType Ty, LValue LV, SmallVectorImpl<llvm::Argument *>::iterator &AI) { +    QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) {    assert(LV.isSimple() &&           "Unexpected non-simple lvalue during struct expansion."); @@ -798,7 +969,7 @@ void CodeGenFunction::ExpandTypeFromArgs(      }      for (auto FD : RExp->Fields) {        // FIXME: What are the right qualifiers here? -      LValue SubLV = EmitLValueForField(LV, FD); +      LValue SubLV = EmitLValueForFieldInitialization(LV, FD);        ExpandTypeFromArgs(FD->getType(), SubLV, AI);      }    } else if (isa<ComplexExpansion>(Exp.get())) { @@ -1220,11 +1391,13 @@ void ClangToLLVMArgMapping::construct(const ASTContext &Context,        // ignore and inalloca doesn't have matching LLVM parameters.        IRArgs.NumberOfArgs = 0;        break; -    case ABIArgInfo::Expand: { +    case ABIArgInfo::CoerceAndExpand: +      IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size(); +      break; +    case ABIArgInfo::Expand:        IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);        break;      } -    }      if (IRArgs.NumberOfArgs > 0) {        IRArgs.FirstArgIndex = IRArgNo; @@ -1323,6 +1496,10 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {    case ABIArgInfo::Ignore:      resultType = llvm::Type::getVoidTy(getLLVMContext());      break; + +  case ABIArgInfo::CoerceAndExpand: +    resultType = retAI.getUnpaddedCoerceAndExpandType(); +    break;    }    ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); @@ -1390,6 +1567,15 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {        break;      } +    case ABIArgInfo::CoerceAndExpand: { +      auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; +      for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) { +        *ArgTypesIter++ = EltTy; +      } +      assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); +      break; +    } +      case ABIArgInfo::Expand:        auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;        getExpandedTypes(it->type, ArgTypesIter); @@ -1450,6 +1636,7 @@ void CodeGenModule::ConstructAttributeList(    const Decl *TargetDecl = CalleeInfo.getCalleeDecl(); +  bool HasAnyX86InterruptAttr = false;    // FIXME: handle sseregparm someday...    if (TargetDecl) {      if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) @@ -1487,6 +1674,7 @@ void CodeGenModule::ConstructAttributeList(      if (TargetDecl->hasAttr<ReturnsNonNullAttr>())        RetAttrs.addAttribute(llvm::Attribute::NonNull); +    HasAnyX86InterruptAttr = TargetDecl->hasAttr<AnyX86InterruptAttr>();      HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();    } @@ -1526,10 +1714,11 @@ void CodeGenModule::ConstructAttributeList(      }      bool DisableTailCalls = -        CodeGenOpts.DisableTailCalls || +        CodeGenOpts.DisableTailCalls || HasAnyX86InterruptAttr ||          (TargetDecl && TargetDecl->hasAttr<DisableTailCallsAttr>()); -    FuncAttrs.addAttribute("disable-tail-calls", -                           llvm::toStringRef(DisableTailCalls)); +    FuncAttrs.addAttribute( +        "disable-tail-calls", +        llvm::toStringRef(DisableTailCalls));      FuncAttrs.addAttribute("less-precise-fpmad",                             llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD)); @@ -1543,9 +1732,13 @@ void CodeGenModule::ConstructAttributeList(                             llvm::toStringRef(CodeGenOpts.SoftFloat));      FuncAttrs.addAttribute("stack-protector-buffer-size",                             llvm::utostr(CodeGenOpts.SSPBufferSize)); +    FuncAttrs.addAttribute("no-signed-zeros-fp-math", +                           llvm::toStringRef(CodeGenOpts.NoSignedZeros));      if (CodeGenOpts.StackRealignment)        FuncAttrs.addAttribute("stackrealign"); +    if (CodeGenOpts.Backchain) +      FuncAttrs.addAttribute("backchain");      // Add target-cpu and target-features attributes to functions. If      // we have a decl for the function and it has a target attribute then @@ -1594,6 +1787,18 @@ void CodeGenModule::ConstructAttributeList(      }    } +  if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { +    // Conservatively, mark all functions and calls in CUDA as convergent +    // (meaning, they may call an intrinsically convergent op, such as +    // __syncthreads(), and so can't have certain optimizations applied around +    // them).  LLVM will remove this attribute where it safely can. +    FuncAttrs.addAttribute(llvm::Attribute::Convergent); + +    // Respect -fcuda-flush-denormals-to-zero. +    if (getLangOpts().CUDADeviceFlushDenormalsToZero) +      FuncAttrs.addAttribute("nvptx-f32ftz", "true"); +  } +    ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);    QualType RetTy = FI.getReturnType(); @@ -1620,6 +1825,9 @@ void CodeGenModule::ConstructAttributeList(      break;    } +  case ABIArgInfo::CoerceAndExpand: +    break; +    case ABIArgInfo::Expand:      llvm_unreachable("Invalid ABI kind for return argument");    } @@ -1639,10 +1847,13 @@ void CodeGenModule::ConstructAttributeList(          getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs));    } +  bool hasUsedSRet = false; +    // Attach attributes to sret.    if (IRFunctionArgs.hasSRetArg()) {      llvm::AttrBuilder SRETAttrs;      SRETAttrs.addAttribute(llvm::Attribute::StructRet); +    hasUsedSRet = true;      if (RetAI.getInReg())        SRETAttrs.addAttribute(llvm::Attribute::InReg);      PAL.push_back(llvm::AttributeSet::get( @@ -1727,7 +1938,8 @@ void CodeGenModule::ConstructAttributeList(      }      case ABIArgInfo::Ignore:      case ABIArgInfo::Expand: -      continue; +    case ABIArgInfo::CoerceAndExpand: +      break;      case ABIArgInfo::InAlloca:        // inalloca disables readnone and readonly. @@ -1745,6 +1957,41 @@ void CodeGenModule::ConstructAttributeList(          Attrs.addAttribute(llvm::Attribute::NonNull);      } +    switch (FI.getExtParameterInfo(ArgNo).getABI()) { +    case ParameterABI::Ordinary: +      break; + +    case ParameterABI::SwiftIndirectResult: { +      // Add 'sret' if we haven't already used it for something, but +      // only if the result is void. +      if (!hasUsedSRet && RetTy->isVoidType()) { +        Attrs.addAttribute(llvm::Attribute::StructRet); +        hasUsedSRet = true; +      } + +      // Add 'noalias' in either case. +      Attrs.addAttribute(llvm::Attribute::NoAlias); + +      // Add 'dereferenceable' and 'alignment'. +      auto PTy = ParamType->getPointeeType(); +      if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { +        auto info = getContext().getTypeInfoInChars(PTy); +        Attrs.addDereferenceableAttr(info.first.getQuantity()); +        Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(), +                                                 info.second.getQuantity())); +      } +      break; +    } + +    case ParameterABI::SwiftErrorResult: +      Attrs.addAttribute(llvm::Attribute::SwiftError); +      break; + +    case ParameterABI::SwiftContext: +      Attrs.addAttribute(llvm::Attribute::SwiftSelf); +      break; +    } +      if (Attrs.hasAttributes()) {        unsigned FirstIRArg, NumIRArgs;        std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); @@ -1810,6 +2057,18 @@ static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,    return nullptr;  } +namespace { +  struct CopyBackSwiftError final : EHScopeStack::Cleanup { +    Address Temp; +    Address Arg; +    CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {} +    void Emit(CodeGenFunction &CGF, Flags flags) override { +      llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp); +      CGF.Builder.CreateStore(errorValue, Arg); +    } +  }; +} +  void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,                                           llvm::Function *Fn,                                           const FunctionArgList &Args) { @@ -1835,7 +2094,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,    ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);    // Flattened function arguments. -  SmallVector<llvm::Argument *, 16> FnArgs; +  SmallVector<llvm::Value *, 16> FnArgs;    FnArgs.reserve(IRFunctionArgs.totalIRArgs());    for (auto &Arg : Fn->args()) {      FnArgs.push_back(&Arg); @@ -1856,7 +2115,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,    // Name the struct return parameter.    if (IRFunctionArgs.hasSRetArg()) { -    auto AI = FnArgs[IRFunctionArgs.getSRetArgNo()]; +    auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);      AI->setName("agg.result");      AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1,                                          llvm::Attribute::NoAlias)); @@ -1944,8 +2203,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,            ArgI.getCoerceToType() == ConvertType(Ty) &&            ArgI.getDirectOffset() == 0) {          assert(NumIRArgs == 1); -        auto AI = FnArgs[FirstIRArg]; -        llvm::Value *V = AI; +        llvm::Value *V = FnArgs[FirstIRArg]; +        auto AI = cast<llvm::Argument>(V);          if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {            if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), @@ -2014,6 +2273,25 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,                                                AI->getArgNo() + 1,                                                llvm::Attribute::NoAlias)); +        // LLVM expects swifterror parameters to be used in very restricted +        // ways.  Copy the value into a less-restricted temporary. +        if (FI.getExtParameterInfo(ArgNo).getABI() +              == ParameterABI::SwiftErrorResult) { +          QualType pointeeTy = Ty->getPointeeType(); +          assert(pointeeTy->isPointerType()); +          Address temp = +            CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); +          Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy)); +          llvm::Value *incomingErrorValue = Builder.CreateLoad(arg); +          Builder.CreateStore(incomingErrorValue, temp); +          V = temp.getPointer(); + +          // Push a cleanup to copy the value back at the end of the function. +          // The convention does not guarantee that the value will be written +          // back if the function exits with an unwind exception. +          EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg); +        } +          // Ensure the argument is the correct type.          if (V->getType() != ArgI.getCoerceToType())            V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); @@ -2100,6 +2378,29 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,        break;      } +    case ABIArgInfo::CoerceAndExpand: { +      // Reconstruct into a temporary. +      Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); +      ArgVals.push_back(ParamValue::forIndirect(alloca)); + +      auto coercionType = ArgI.getCoerceAndExpandType(); +      alloca = Builder.CreateElementBitCast(alloca, coercionType); +      auto layout = CGM.getDataLayout().getStructLayout(coercionType); + +      unsigned argIndex = FirstIRArg; +      for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { +        llvm::Type *eltType = coercionType->getElementType(i); +        if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) +          continue; + +        auto eltAddr = Builder.CreateStructGEP(alloca, i, layout); +        auto elt = FnArgs[argIndex++]; +        Builder.CreateStore(elt, eltAddr); +      } +      assert(argIndex == FirstIRArg + NumIRArgs); +      break; +    } +      case ABIArgInfo::Expand: {        // If this structure was expanded into multiple arguments then        // we need to create a temporary and reconstruct it from the @@ -2462,9 +2763,26 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,      // In ARC, end functions that return a retainable type with a call      // to objc_autoreleaseReturnValue.      if (AutoreleaseResult) { +#ifndef NDEBUG +      // Type::isObjCRetainabletype has to be called on a QualType that hasn't +      // been stripped of the typedefs, so we cannot use RetTy here. Get the +      // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from +      // CurCodeDecl or BlockInfo. +      QualType RT; + +      if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl)) +        RT = FD->getReturnType(); +      else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl)) +        RT = MD->getReturnType(); +      else if (isa<BlockDecl>(CurCodeDecl)) +        RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType(); +      else +        llvm_unreachable("Unexpected function/method type"); +        assert(getLangOpts().ObjCAutoRefCount &&               !FI.isReturnsRetained() && -             RetTy->isObjCRetainableType()); +             RT->isObjCRetainableType()); +#endif        RV = emitAutoreleaseOfResult(*this, RV);      } @@ -2473,6 +2791,40 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,    case ABIArgInfo::Ignore:      break; +  case ABIArgInfo::CoerceAndExpand: { +    auto coercionType = RetAI.getCoerceAndExpandType(); +    auto layout = CGM.getDataLayout().getStructLayout(coercionType); + +    // Load all of the coerced elements out into results. +    llvm::SmallVector<llvm::Value*, 4> results; +    Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType); +    for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { +      auto coercedEltType = coercionType->getElementType(i); +      if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType)) +        continue; + +      auto eltAddr = Builder.CreateStructGEP(addr, i, layout); +      auto elt = Builder.CreateLoad(eltAddr); +      results.push_back(elt); +    } + +    // If we have one result, it's the single direct result type. +    if (results.size() == 1) { +      RV = results[0]; + +    // Otherwise, we need to make a first-class aggregate. +    } else { +      // Construct a return type that lacks padding elements. +      llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); + +      RV = llvm::UndefValue::get(returnType); +      for (unsigned i = 0, e = results.size(); i != e; ++i) { +        RV = Builder.CreateInsertValue(RV, results[i], i); +      } +    } +    break; +  } +    case ABIArgInfo::Expand:      llvm_unreachable("Invalid ABI kind for return argument");    } @@ -2536,23 +2888,15 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,    QualType type = param->getType(); -  // For the most part, we just need to load the alloca, except: -  // 1) aggregate r-values are actually pointers to temporaries, and -  // 2) references to non-scalars are pointers directly to the aggregate. -  // I don't know why references to scalars are different here. -  if (const ReferenceType *ref = type->getAs<ReferenceType>()) { -    if (!hasScalarEvaluationKind(ref->getPointeeType())) -      return args.add(RValue::getAggregate(local), type); - -    // Locals which are references to scalars are represented -    // with allocas holding the pointer. -    return args.add(RValue::get(Builder.CreateLoad(local)), type); -  } -    assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&           "cannot emit delegate call arguments for inalloca arguments!"); -  args.add(convertTempToRValue(local, type, loc), type); +  // For the most part, we just need to load the alloca, except that +  // aggregate r-values are actually pointers to temporaries. +  if (type->isReferenceType()) +    args.add(RValue::get(Builder.CreateLoad(local)), type); +  else +    args.add(convertTempToRValue(local, type, loc), type);  }  static bool isProvablyNull(llvm::Value *addr) { @@ -2863,10 +3207,10 @@ void CodeGenFunction::EmitCallArgs(      size_t CallArgsStart = Args.size();      for (int I = ArgTypes.size() - 1; I >= 0; --I) {        CallExpr::const_arg_iterator Arg = ArgRange.begin() + I; +      MaybeEmitImplicitObjectSize(I, *Arg);        EmitCallArg(Args, *Arg, ArgTypes[I]);        EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(),                            CalleeDecl, ParamsToSkip + I); -      MaybeEmitImplicitObjectSize(I, *Arg);      }      // Un-reverse the arguments we just evaluated so they match up with the LLVM @@ -3046,24 +3390,13 @@ CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,    return EmitRuntimeCall(callee, None, name);  } -/// Emits a simple call (never an invoke) to the given runtime -/// function. -llvm::CallInst * -CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, -                                 ArrayRef<llvm::Value*> args, -                                 const llvm::Twine &name) { -  llvm::CallInst *call = Builder.CreateCall(callee, args, name); -  call->setCallingConv(getRuntimeCC()); -  return call; -} -  // Calls which may throw must have operand bundles indicating which funclet  // they are nested within.  static void -getBundlesForFunclet(llvm::Value *Callee, -                     llvm::Instruction *CurrentFuncletPad, +getBundlesForFunclet(llvm::Value *Callee, llvm::Instruction *CurrentFuncletPad,                       SmallVectorImpl<llvm::OperandBundleDef> &BundleList) { -  // There is no need for a funclet operand bundle if we aren't inside a funclet. +  // There is no need for a funclet operand bundle if we aren't inside a +  // funclet.    if (!CurrentFuncletPad)      return; @@ -3075,6 +3408,19 @@ getBundlesForFunclet(llvm::Value *Callee,    BundleList.emplace_back("funclet", CurrentFuncletPad);  } +/// Emits a simple call (never an invoke) to the given runtime function. +llvm::CallInst * +CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, +                                 ArrayRef<llvm::Value*> args, +                                 const llvm::Twine &name) { +  SmallVector<llvm::OperandBundleDef, 1> BundleList; +  getBundlesForFunclet(callee, CurrentFuncletPad, BundleList); + +  llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList, name); +  call->setCallingConv(getRuntimeCC()); +  return call; +} +  /// Emits a call or invoke to the given noreturn runtime function.  void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,                                                 ArrayRef<llvm::Value*> args) { @@ -3098,8 +3444,7 @@ void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,    }  } -/// Emits a call or invoke instruction to the given nullary runtime -/// function. +/// Emits a call or invoke instruction to the given nullary runtime function.  llvm::CallSite  CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,                                           const Twine &name) { @@ -3123,13 +3468,16 @@ CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,                                    ArrayRef<llvm::Value *> Args,                                    const Twine &Name) {    llvm::BasicBlock *InvokeDest = getInvokeDest(); +  SmallVector<llvm::OperandBundleDef, 1> BundleList; +  getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList);    llvm::Instruction *Inst;    if (!InvokeDest) -    Inst = Builder.CreateCall(Callee, Args, Name); +    Inst = Builder.CreateCall(Callee, Args, BundleList, Name);    else {      llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); -    Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name); +    Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList, +                                Name);      EmitBlock(ContBB);    } @@ -3208,7 +3556,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,    // alloca to hold the result, unless one is given to us.    Address SRetPtr = Address::invalid();    size_t UnusedReturnSize = 0; -  if (RetAI.isIndirect() || RetAI.isInAlloca()) { +  if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {      if (!ReturnValue.isNull()) {        SRetPtr = ReturnValue.getValue();      } else { @@ -3222,12 +3570,15 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,      }      if (IRFunctionArgs.hasSRetArg()) {        IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer(); -    } else { +    } else if (RetAI.isInAlloca()) {        Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());        Builder.CreateStore(SRetPtr.getPointer(), Addr);      }    } +  Address swiftErrorTemp = Address::invalid(); +  Address swiftErrorArg = Address::invalid(); +    assert(CallInfo.arg_size() == CallArgs.size() &&           "Mismatch between function signature & arguments.");    unsigned ArgNo = 0; @@ -3334,6 +3685,25 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,          else            V = Builder.CreateLoad(RV.getAggregateAddress()); +        // Implement swifterror by copying into a new swifterror argument. +        // We'll write back in the normal path out of the call. +        if (CallInfo.getExtParameterInfo(ArgNo).getABI() +              == ParameterABI::SwiftErrorResult) { +          assert(!swiftErrorTemp.isValid() && "multiple swifterror args"); + +          QualType pointeeTy = I->Ty->getPointeeType(); +          swiftErrorArg = +            Address(V, getContext().getTypeAlignInChars(pointeeTy)); + +          swiftErrorTemp = +            CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); +          V = swiftErrorTemp.getPointer(); +          cast<llvm::AllocaInst>(V)->setSwiftError(true); + +          llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg); +          Builder.CreateStore(errorValue, swiftErrorTemp); +        } +          // We might have to widen integers, but we should never truncate.          if (ArgInfo.getCoerceToType() != V->getType() &&              V->getType()->isIntegerTy()) @@ -3344,6 +3714,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,          if (FirstIRArg < IRFuncTy->getNumParams() &&              V->getType() != IRFuncTy->getParamType(FirstIRArg))            V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); +          IRCallArgs[FirstIRArg] = V;          break;        } @@ -3402,6 +3773,51 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,        break;      } +    case ABIArgInfo::CoerceAndExpand: { +      auto coercionType = ArgInfo.getCoerceAndExpandType(); +      auto layout = CGM.getDataLayout().getStructLayout(coercionType); + +      llvm::Value *tempSize = nullptr; +      Address addr = Address::invalid(); +      if (RV.isAggregate()) { +        addr = RV.getAggregateAddress(); +      } else { +        assert(RV.isScalar()); // complex should always just be direct + +        llvm::Type *scalarType = RV.getScalarVal()->getType(); +        auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType); +        auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType); + +        tempSize = llvm::ConstantInt::get(CGM.Int64Ty, scalarSize); + +        // Materialize to a temporary. +        addr = CreateTempAlloca(RV.getScalarVal()->getType(), +                 CharUnits::fromQuantity(std::max(layout->getAlignment(), +                                                  scalarAlign))); +        EmitLifetimeStart(scalarSize, addr.getPointer()); + +        Builder.CreateStore(RV.getScalarVal(), addr); +      } + +      addr = Builder.CreateElementBitCast(addr, coercionType); + +      unsigned IRArgPos = FirstIRArg; +      for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { +        llvm::Type *eltType = coercionType->getElementType(i); +        if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; +        Address eltAddr = Builder.CreateStructGEP(addr, i, layout); +        llvm::Value *elt = Builder.CreateLoad(eltAddr); +        IRCallArgs[IRArgPos++] = elt; +      } +      assert(IRArgPos == FirstIRArg + NumIRArgs); + +      if (tempSize) { +        EmitLifetimeEnd(tempSize, addr.getPointer()); +      } + +      break; +    } +      case ABIArgInfo::Expand:        unsigned IRArgPos = FirstIRArg;        ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos); @@ -3541,6 +3957,13 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,    CS.setAttributes(Attrs);    CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); +  // Insert instrumentation or attach profile metadata at indirect call sites. +  // For more details, see the comment before the definition of +  // IPVK_IndirectCallTarget in InstrProfData.inc. +  if (!CS.getCalledFunction()) +    PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget, +                     CS.getInstruction(), Callee); +    // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC    // optimizer it can aggressively ignore unwind edges.    if (CGM.getLangOpts().ObjCAutoRefCount) @@ -3567,9 +3990,15 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,    }    llvm::Instruction *CI = CS.getInstruction(); -  if (Builder.isNamePreserving() && !CI->getType()->isVoidTy()) +  if (!CI->getType()->isVoidTy())      CI->setName("call"); +  // Perform the swifterror writeback. +  if (swiftErrorTemp.isValid()) { +    llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp); +    Builder.CreateStore(errorResult, swiftErrorArg); +  } +    // Emit any writebacks immediately.  Arguably this should happen    // after any return-value munging.    if (CallArgs.hasWritebacks()) @@ -3587,6 +4016,31 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,    RValue Ret = [&] {      switch (RetAI.getKind()) { +    case ABIArgInfo::CoerceAndExpand: { +      auto coercionType = RetAI.getCoerceAndExpandType(); +      auto layout = CGM.getDataLayout().getStructLayout(coercionType); + +      Address addr = SRetPtr; +      addr = Builder.CreateElementBitCast(addr, coercionType); + +      assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()); +      bool requiresExtract = isa<llvm::StructType>(CI->getType()); + +      unsigned unpaddedIndex = 0; +      for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { +        llvm::Type *eltType = coercionType->getElementType(i); +        if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; +        Address eltAddr = Builder.CreateStructGEP(addr, i, layout); +        llvm::Value *elt = CI; +        if (requiresExtract) +          elt = Builder.CreateExtractValue(elt, unpaddedIndex++); +        else +          assert(unpaddedIndex == 0); +        Builder.CreateStore(elt, eltAddr); +      } +      // FALLTHROUGH +    } +      case ABIArgInfo::InAlloca:      case ABIArgInfo::Indirect: {        RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());  | 
