aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp')
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp154
1 files changed, 127 insertions, 27 deletions
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
index 724ecedf3b3f..a5d8205a34a8 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
@@ -1,9 +1,8 @@
//===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -13,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/GlobalISel/CallLowering.h"
+#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -21,13 +21,17 @@
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
+#define DEBUG_TYPE "call-lowering"
+
using namespace llvm;
void CallLowering::anchor() {}
-bool CallLowering::lowerCall(
- MachineIRBuilder &MIRBuilder, ImmutableCallSite CS, unsigned ResReg,
- ArrayRef<unsigned> ArgRegs, std::function<unsigned()> GetCalleeReg) const {
+bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS,
+ ArrayRef<Register> ResRegs,
+ ArrayRef<ArrayRef<Register>> ArgRegs,
+ Register SwiftErrorVReg,
+ std::function<unsigned()> GetCalleeReg) const {
auto &DL = CS.getParent()->getParent()->getParent()->getDataLayout();
// First step is to marshall all the function's parameters into the correct
@@ -40,8 +44,8 @@ bool CallLowering::lowerCall(
ArgInfo OrigArg{ArgRegs[i], Arg->getType(), ISD::ArgFlagsTy{},
i < NumFixedArgs};
setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CS);
- // We don't currently support swifterror or swiftself args.
- if (OrigArg.Flags.isSwiftError() || OrigArg.Flags.isSwiftSelf())
+ // We don't currently support swiftself args.
+ if (OrigArg.Flags.isSwiftSelf())
return false;
OrigArgs.push_back(OrigArg);
++i;
@@ -53,11 +57,12 @@ bool CallLowering::lowerCall(
else
Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
- ArgInfo OrigRet{ResReg, CS.getType(), ISD::ArgFlagsTy{}};
+ ArgInfo OrigRet{ResRegs, CS.getType(), ISD::ArgFlagsTy{}};
if (!OrigRet.Ty->isVoidTy())
setArgFlags(OrigRet, AttributeList::ReturnIndex, DL, CS);
- return lowerCall(MIRBuilder, CS.getCallingConv(), Callee, OrigRet, OrigArgs);
+ return lowerCall(MIRBuilder, CS.getCallingConv(), Callee, OrigRet, OrigArgs,
+ SwiftErrorVReg);
}
template <typename FuncInfoTy>
@@ -84,7 +89,10 @@ void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx,
if (Arg.Flags.isByVal() || Arg.Flags.isInAlloca()) {
Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType();
- Arg.Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
+
+ auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType();
+ Arg.Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy));
+
// For ByVal, alignment should be passed from FE. BE will guess if
// this info is not there but there are cases it cannot get right.
unsigned FrameAlign;
@@ -109,21 +117,78 @@ CallLowering::setArgFlags<CallInst>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
const DataLayout &DL,
const CallInst &FuncInfo) const;
+Register CallLowering::packRegs(ArrayRef<Register> SrcRegs, Type *PackedTy,
+ MachineIRBuilder &MIRBuilder) const {
+ assert(SrcRegs.size() > 1 && "Nothing to pack");
+
+ const DataLayout &DL = MIRBuilder.getMF().getDataLayout();
+ MachineRegisterInfo *MRI = MIRBuilder.getMRI();
+
+ LLT PackedLLT = getLLTForType(*PackedTy, DL);
+
+ SmallVector<LLT, 8> LLTs;
+ SmallVector<uint64_t, 8> Offsets;
+ computeValueLLTs(DL, *PackedTy, LLTs, &Offsets);
+ assert(LLTs.size() == SrcRegs.size() && "Regs / types mismatch");
+
+ Register Dst = MRI->createGenericVirtualRegister(PackedLLT);
+ MIRBuilder.buildUndef(Dst);
+ for (unsigned i = 0; i < SrcRegs.size(); ++i) {
+ Register NewDst = MRI->createGenericVirtualRegister(PackedLLT);
+ MIRBuilder.buildInsert(NewDst, Dst, SrcRegs[i], Offsets[i]);
+ Dst = NewDst;
+ }
+
+ return Dst;
+}
+
+void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg,
+ Type *PackedTy,
+ MachineIRBuilder &MIRBuilder) const {
+ assert(DstRegs.size() > 1 && "Nothing to unpack");
+
+ const DataLayout &DL = MIRBuilder.getMF().getDataLayout();
+
+ SmallVector<LLT, 8> LLTs;
+ SmallVector<uint64_t, 8> Offsets;
+ computeValueLLTs(DL, *PackedTy, LLTs, &Offsets);
+ assert(LLTs.size() == DstRegs.size() && "Regs / types mismatch");
+
+ for (unsigned i = 0; i < DstRegs.size(); ++i)
+ MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]);
+}
+
bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder,
ArrayRef<ArgInfo> Args,
ValueHandler &Handler) const {
MachineFunction &MF = MIRBuilder.getMF();
const Function &F = MF.getFunction();
- const DataLayout &DL = F.getParent()->getDataLayout();
-
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
+ return handleAssignments(CCInfo, ArgLocs, MIRBuilder, Args, Handler);
+}
+
+bool CallLowering::handleAssignments(CCState &CCInfo,
+ SmallVectorImpl<CCValAssign> &ArgLocs,
+ MachineIRBuilder &MIRBuilder,
+ ArrayRef<ArgInfo> Args,
+ ValueHandler &Handler) const {
+ MachineFunction &MF = MIRBuilder.getMF();
+ const Function &F = MF.getFunction();
+ const DataLayout &DL = F.getParent()->getDataLayout();
unsigned NumArgs = Args.size();
for (unsigned i = 0; i != NumArgs; ++i) {
MVT CurVT = MVT::getVT(Args[i].Ty);
- if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo))
- return false;
+ if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo)) {
+ // Try to use the register type if we couldn't assign the VT.
+ if (!Handler.isArgumentHandler() || !CurVT.isValid())
+ return false;
+ CurVT = TLI->getRegisterTypeForCallingConv(
+ F.getContext(), F.getCallingConv(), EVT(CurVT));
+ if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo))
+ return false;
+ }
}
for (unsigned i = 0, e = Args.size(), j = 0; i != e; ++i, ++j) {
@@ -137,16 +202,49 @@ bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder,
continue;
}
- if (VA.isRegLoc())
- Handler.assignValueToReg(Args[i].Reg, VA.getLocReg(), VA);
- else if (VA.isMemLoc()) {
- unsigned Size = VA.getValVT() == MVT::iPTR
- ? DL.getPointerSize()
- : alignTo(VA.getValVT().getSizeInBits(), 8) / 8;
+ assert(Args[i].Regs.size() == 1 &&
+ "Can't handle multiple virtual regs yet");
+
+ // FIXME: Pack registers if we have more than one.
+ Register ArgReg = Args[i].Regs[0];
+
+ if (VA.isRegLoc()) {
+ MVT OrigVT = MVT::getVT(Args[i].Ty);
+ MVT VAVT = VA.getValVT();
+ if (Handler.isArgumentHandler() && VAVT != OrigVT) {
+ if (VAVT.getSizeInBits() < OrigVT.getSizeInBits())
+ return false; // Can't handle this type of arg yet.
+ const LLT VATy(VAVT);
+ Register NewReg =
+ MIRBuilder.getMRI()->createGenericVirtualRegister(VATy);
+ Handler.assignValueToReg(NewReg, VA.getLocReg(), VA);
+ // If it's a vector type, we either need to truncate the elements
+ // or do an unmerge to get the lower block of elements.
+ if (VATy.isVector() &&
+ VATy.getNumElements() > OrigVT.getVectorNumElements()) {
+ const LLT OrigTy(OrigVT);
+ // Just handle the case where the VA type is 2 * original type.
+ if (VATy.getNumElements() != OrigVT.getVectorNumElements() * 2) {
+ LLVM_DEBUG(dbgs()
+ << "Incoming promoted vector arg has too many elts");
+ return false;
+ }
+ auto Unmerge = MIRBuilder.buildUnmerge({OrigTy, OrigTy}, {NewReg});
+ MIRBuilder.buildCopy(ArgReg, Unmerge.getReg(0));
+ } else {
+ MIRBuilder.buildTrunc(ArgReg, {NewReg}).getReg(0);
+ }
+ } else {
+ Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
+ }
+ } else if (VA.isMemLoc()) {
+ MVT VT = MVT::getVT(Args[i].Ty);
+ unsigned Size = VT == MVT::iPTR ? DL.getPointerSize()
+ : alignTo(VT.getSizeInBits(), 8) / 8;
unsigned Offset = VA.getLocMemOffset();
MachinePointerInfo MPO;
- unsigned StackAddr = Handler.getStackAddress(Size, Offset, MPO);
- Handler.assignValueToAddress(Args[i].Reg, StackAddr, Size, MPO, VA);
+ Register StackAddr = Handler.getStackAddress(Size, Offset, MPO);
+ Handler.assignValueToAddress(ArgReg, StackAddr, Size, MPO, VA);
} else {
// FIXME: Support byvals and other weirdness
return false;
@@ -155,9 +253,11 @@ bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder,
return true;
}
-unsigned CallLowering::ValueHandler::extendRegister(unsigned ValReg,
+Register CallLowering::ValueHandler::extendRegister(Register ValReg,
CCValAssign &VA) {
LLT LocTy{VA.getLocVT()};
+ if (LocTy.getSizeInBits() == MRI.getType(ValReg).getSizeInBits())
+ return ValReg;
switch (VA.getLocInfo()) {
default: break;
case CCValAssign::Full:
@@ -170,12 +270,12 @@ unsigned CallLowering::ValueHandler::extendRegister(unsigned ValReg,
return MIB->getOperand(0).getReg();
}
case CCValAssign::SExt: {
- unsigned NewReg = MRI.createGenericVirtualRegister(LocTy);
+ Register NewReg = MRI.createGenericVirtualRegister(LocTy);
MIRBuilder.buildSExt(NewReg, ValReg);
return NewReg;
}
case CCValAssign::ZExt: {
- unsigned NewReg = MRI.createGenericVirtualRegister(LocTy);
+ Register NewReg = MRI.createGenericVirtualRegister(LocTy);
MIRBuilder.buildZExt(NewReg, ValReg);
return NewReg;
}