summaryrefslogtreecommitdiff
path: root/llvm/lib/Target/AVR/AVRISelLowering.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/AVR/AVRISelLowering.cpp')
-rw-r--r--llvm/lib/Target/AVR/AVRISelLowering.cpp411
1 files changed, 187 insertions, 224 deletions
diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp
index 880688807702d..bf9b32e1278e3 100644
--- a/llvm/lib/Target/AVR/AVRISelLowering.cpp
+++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp
@@ -14,6 +14,7 @@
#include "AVRISelLowering.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -151,10 +152,12 @@ AVRTargetLowering::AVRTargetLowering(const AVRTargetMachine &TM,
setOperationAction(ISD::SREM, MVT::i16, Expand);
// Make division and modulus custom
- for (MVT VT : MVT::integer_valuetypes()) {
- setOperationAction(ISD::UDIVREM, VT, Custom);
- setOperationAction(ISD::SDIVREM, VT, Custom);
- }
+ setOperationAction(ISD::UDIVREM, MVT::i8, Custom);
+ setOperationAction(ISD::UDIVREM, MVT::i16, Custom);
+ setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
+ setOperationAction(ISD::SDIVREM, MVT::i8, Custom);
+ setOperationAction(ISD::SDIVREM, MVT::i16, Custom);
+ setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
// Do not use MUL. The AVR instructions are closer to SMUL_LOHI &co.
setOperationAction(ISD::MUL, MVT::i8, Expand);
@@ -190,41 +193,29 @@ AVRTargetLowering::AVRTargetLowering(const AVRTargetMachine &TM,
// improvements in how we treat 16-bit "registers" to be feasible.
}
- // Division rtlib functions (not supported)
+ // Division rtlib functions (not supported), use divmod functions instead
setLibcallName(RTLIB::SDIV_I8, nullptr);
setLibcallName(RTLIB::SDIV_I16, nullptr);
setLibcallName(RTLIB::SDIV_I32, nullptr);
- setLibcallName(RTLIB::SDIV_I64, nullptr);
- setLibcallName(RTLIB::SDIV_I128, nullptr);
setLibcallName(RTLIB::UDIV_I8, nullptr);
setLibcallName(RTLIB::UDIV_I16, nullptr);
setLibcallName(RTLIB::UDIV_I32, nullptr);
- setLibcallName(RTLIB::UDIV_I64, nullptr);
- setLibcallName(RTLIB::UDIV_I128, nullptr);
- // Modulus rtlib functions (not supported)
+ // Modulus rtlib functions (not supported), use divmod functions instead
setLibcallName(RTLIB::SREM_I8, nullptr);
setLibcallName(RTLIB::SREM_I16, nullptr);
setLibcallName(RTLIB::SREM_I32, nullptr);
- setLibcallName(RTLIB::SREM_I64, nullptr);
- setLibcallName(RTLIB::SREM_I128, nullptr);
setLibcallName(RTLIB::UREM_I8, nullptr);
setLibcallName(RTLIB::UREM_I16, nullptr);
setLibcallName(RTLIB::UREM_I32, nullptr);
- setLibcallName(RTLIB::UREM_I64, nullptr);
- setLibcallName(RTLIB::UREM_I128, nullptr);
// Division and modulus rtlib functions
setLibcallName(RTLIB::SDIVREM_I8, "__divmodqi4");
setLibcallName(RTLIB::SDIVREM_I16, "__divmodhi4");
setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
- setLibcallName(RTLIB::SDIVREM_I64, "__divmoddi4");
- setLibcallName(RTLIB::SDIVREM_I128, "__divmodti4");
setLibcallName(RTLIB::UDIVREM_I8, "__udivmodqi4");
setLibcallName(RTLIB::UDIVREM_I16, "__udivmodhi4");
setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
- setLibcallName(RTLIB::UDIVREM_I64, "__udivmoddi4");
- setLibcallName(RTLIB::UDIVREM_I128, "__udivmodti4");
// Several of the runtime library functions use a special calling conv
setLibcallCallingConv(RTLIB::SDIVREM_I8, CallingConv::AVR_BUILTIN);
@@ -259,6 +250,8 @@ const char *AVRTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE(ASR);
NODE(LSLLOOP);
NODE(LSRLOOP);
+ NODE(ROLLOOP);
+ NODE(RORLOOP);
NODE(ASRLOOP);
NODE(BRCOND);
NODE(CMP);
@@ -282,6 +275,8 @@ SDValue AVRTargetLowering::LowerShifts(SDValue Op, SelectionDAG &DAG) const {
const SDNode *N = Op.getNode();
EVT VT = Op.getValueType();
SDLoc dl(N);
+ assert(isPowerOf2_32(VT.getSizeInBits()) &&
+ "Expected power-of-2 shift amount");
// Expand non-constant shifts to loops.
if (!isa<ConstantSDNode>(N->getOperand(1))) {
@@ -294,12 +289,20 @@ SDValue AVRTargetLowering::LowerShifts(SDValue Op, SelectionDAG &DAG) const {
case ISD::SRL:
return DAG.getNode(AVRISD::LSRLOOP, dl, VT, N->getOperand(0),
N->getOperand(1));
- case ISD::ROTL:
- return DAG.getNode(AVRISD::ROLLOOP, dl, VT, N->getOperand(0),
- N->getOperand(1));
- case ISD::ROTR:
- return DAG.getNode(AVRISD::RORLOOP, dl, VT, N->getOperand(0),
- N->getOperand(1));
+ case ISD::ROTL: {
+ SDValue Amt = N->getOperand(1);
+ EVT AmtVT = Amt.getValueType();
+ Amt = DAG.getNode(ISD::AND, dl, AmtVT, Amt,
+ DAG.getConstant(VT.getSizeInBits() - 1, dl, AmtVT));
+ return DAG.getNode(AVRISD::ROLLOOP, dl, VT, N->getOperand(0), Amt);
+ }
+ case ISD::ROTR: {
+ SDValue Amt = N->getOperand(1);
+ EVT AmtVT = Amt.getValueType();
+ Amt = DAG.getNode(ISD::AND, dl, AmtVT, Amt,
+ DAG.getConstant(VT.getSizeInBits() - 1, dl, AmtVT));
+ return DAG.getNode(AVRISD::RORLOOP, dl, VT, N->getOperand(0), Amt);
+ }
case ISD::SRA:
return DAG.getNode(AVRISD::ASRLOOP, dl, VT, N->getOperand(0),
N->getOperand(1));
@@ -315,9 +318,11 @@ SDValue AVRTargetLowering::LowerShifts(SDValue Op, SelectionDAG &DAG) const {
break;
case ISD::ROTL:
Opc8 = AVRISD::ROL;
+ ShiftAmount = ShiftAmount % VT.getSizeInBits();
break;
case ISD::ROTR:
Opc8 = AVRISD::ROR;
+ ShiftAmount = ShiftAmount % VT.getSizeInBits();
break;
case ISD::SRL:
Opc8 = AVRISD::LSR;
@@ -357,12 +362,6 @@ SDValue AVRTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
case MVT::i32:
LC = IsSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;
break;
- case MVT::i64:
- LC = IsSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64;
- break;
- case MVT::i128:
- LC = IsSigned ? RTLIB::SDIVREM_I128 : RTLIB::UDIVREM_I128;
- break;
}
SDValue InChain = DAG.getEntryNode();
@@ -883,173 +882,145 @@ bool AVRTargetLowering::isOffsetFoldingLegal(
#include "AVRGenCallingConv.inc"
-/// For each argument in a function store the number of pieces it is composed
-/// of.
-static void parseFunctionArgs(const SmallVectorImpl<ISD::InputArg> &Ins,
- SmallVectorImpl<unsigned> &Out) {
- for (const ISD::InputArg &Arg : Ins) {
- if(Arg.PartOffset > 0) continue;
- unsigned Bytes = ((Arg.ArgVT.getSizeInBits()) + 7) / 8;
-
- Out.push_back((Bytes + 1) / 2);
- }
-}
-
-/// For external symbols there is no function prototype information so we
-/// have to rely directly on argument sizes.
-static void parseExternFuncCallArgs(const SmallVectorImpl<ISD::OutputArg> &In,
- SmallVectorImpl<unsigned> &Out) {
- for (unsigned i = 0, e = In.size(); i != e;) {
- unsigned Size = 0;
- unsigned Offset = 0;
- while ((i != e) && (In[i].PartOffset == Offset)) {
- Offset += In[i].VT.getStoreSize();
- ++i;
- ++Size;
- }
- Out.push_back(Size);
- }
-}
-
-static StringRef getFunctionName(TargetLowering::CallLoweringInfo &CLI) {
- SDValue Callee = CLI.Callee;
-
- if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee)) {
- return G->getSymbol();
- }
-
- if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
- return G->getGlobal()->getName();
- }
-
- llvm_unreachable("don't know how to get the name for this callee");
-}
+/// Registers for calling conventions, ordered in reverse as required by ABI.
+/// Both arrays must be of the same length.
+static const MCPhysReg RegList8[] = {
+ AVR::R25, AVR::R24, AVR::R23, AVR::R22, AVR::R21, AVR::R20,
+ AVR::R19, AVR::R18, AVR::R17, AVR::R16, AVR::R15, AVR::R14,
+ AVR::R13, AVR::R12, AVR::R11, AVR::R10, AVR::R9, AVR::R8};
+static const MCPhysReg RegList16[] = {
+ AVR::R26R25, AVR::R25R24, AVR::R24R23, AVR::R23R22,
+ AVR::R22R21, AVR::R21R20, AVR::R20R19, AVR::R19R18,
+ AVR::R18R17, AVR::R17R16, AVR::R16R15, AVR::R15R14,
+ AVR::R14R13, AVR::R13R12, AVR::R12R11, AVR::R11R10,
+ AVR::R10R9, AVR::R9R8};
+
+static_assert(array_lengthof(RegList8) == array_lengthof(RegList16),
+ "8-bit and 16-bit register arrays must be of equal length");
/// Analyze incoming and outgoing function arguments. We need custom C++ code
-/// to handle special constraints in the ABI like reversing the order of the
-/// pieces of splitted arguments. In addition, all pieces of a certain argument
-/// have to be passed either using registers or the stack but never mixing both.
-static void analyzeStandardArguments(TargetLowering::CallLoweringInfo *CLI,
- const Function *F, const DataLayout *TD,
- const SmallVectorImpl<ISD::OutputArg> *Outs,
- const SmallVectorImpl<ISD::InputArg> *Ins,
- CallingConv::ID CallConv,
- SmallVectorImpl<CCValAssign> &ArgLocs,
- CCState &CCInfo, bool IsCall, bool IsVarArg) {
- static const MCPhysReg RegList8[] = {AVR::R24, AVR::R22, AVR::R20,
- AVR::R18, AVR::R16, AVR::R14,
- AVR::R12, AVR::R10, AVR::R8};
- static const MCPhysReg RegList16[] = {AVR::R25R24, AVR::R23R22, AVR::R21R20,
- AVR::R19R18, AVR::R17R16, AVR::R15R14,
- AVR::R13R12, AVR::R11R10, AVR::R9R8};
- if (IsVarArg) {
- // Variadic functions do not need all the analysis below.
- if (IsCall) {
- CCInfo.AnalyzeCallOperands(*Outs, ArgCC_AVR_Vararg);
- } else {
- CCInfo.AnalyzeFormalArguments(*Ins, ArgCC_AVR_Vararg);
+/// to handle special constraints in the ABI.
+/// In addition, all pieces of a certain argument have to be passed either
+/// using registers or the stack but never mixing both.
+template <typename ArgT>
+static void
+analyzeArguments(TargetLowering::CallLoweringInfo *CLI, const Function *F,
+ const DataLayout *TD, const SmallVectorImpl<ArgT> &Args,
+ SmallVectorImpl<CCValAssign> &ArgLocs, CCState &CCInfo) {
+ unsigned NumArgs = Args.size();
+ // This is the index of the last used register, in RegList*.
+ // -1 means R26 (R26 is never actually used in CC).
+ int RegLastIdx = -1;
+ // Once a value is passed to the stack it will always be used
+ bool UseStack = false;
+ for (unsigned i = 0; i != NumArgs;) {
+ MVT VT = Args[i].VT;
+ // We have to count the number of bytes for each function argument, that is
+ // those Args with the same OrigArgIndex. This is important in case the
+ // function takes an aggregate type.
+ // Current argument will be between [i..j).
+ unsigned ArgIndex = Args[i].OrigArgIndex;
+ unsigned TotalBytes = VT.getStoreSize();
+ unsigned j = i + 1;
+ for (; j != NumArgs; ++j) {
+ if (Args[j].OrigArgIndex != ArgIndex)
+ break;
+ TotalBytes += Args[j].VT.getStoreSize();
}
- return;
- }
-
- // Fill in the Args array which will contain original argument sizes.
- SmallVector<unsigned, 8> Args;
- if (IsCall) {
- parseExternFuncCallArgs(*Outs, Args);
- } else {
- assert(F != nullptr && "function should not be null");
- parseFunctionArgs(*Ins, Args);
- }
-
- unsigned RegsLeft = array_lengthof(RegList8), ValNo = 0;
- // Variadic functions always use the stack.
- bool UsesStack = false;
- for (unsigned i = 0, pos = 0, e = Args.size(); i != e; ++i) {
- unsigned Size = Args[i];
-
- // If we have a zero-sized argument, don't attempt to lower it.
- // AVR-GCC does not support zero-sized arguments and so we need not
- // worry about ABI compatibility.
- if (Size == 0) continue;
-
- MVT LocVT = (IsCall) ? (*Outs)[pos].VT : (*Ins)[pos].VT;
-
- // If we have plenty of regs to pass the whole argument do it.
- if (!UsesStack && (Size <= RegsLeft)) {
- const MCPhysReg *RegList = (LocVT == MVT::i16) ? RegList16 : RegList8;
+ // Round up to even number of bytes.
+ TotalBytes = alignTo(TotalBytes, 2);
+ // Skip zero sized arguments
+ if (TotalBytes == 0)
+ continue;
+ // The index of the first register to be used
+ unsigned RegIdx = RegLastIdx + TotalBytes;
+ RegLastIdx = RegIdx;
+ // If there are not enough registers, use the stack
+ if (RegIdx >= array_lengthof(RegList8)) {
+ UseStack = true;
+ }
+ for (; i != j; ++i) {
+ MVT VT = Args[i].VT;
- for (unsigned j = 0; j != Size; ++j) {
- unsigned Reg = CCInfo.AllocateReg(
- ArrayRef<MCPhysReg>(RegList, array_lengthof(RegList8)));
+ if (UseStack) {
+ auto evt = EVT(VT).getTypeForEVT(CCInfo.getContext());
+ unsigned Offset = CCInfo.AllocateStack(TD->getTypeAllocSize(evt),
+ TD->getABITypeAlign(evt));
CCInfo.addLoc(
- CCValAssign::getReg(ValNo++, LocVT, Reg, LocVT, CCValAssign::Full));
- --RegsLeft;
- }
-
- // Reverse the order of the pieces to agree with the "big endian" format
- // required in the calling convention ABI.
- std::reverse(ArgLocs.begin() + pos, ArgLocs.begin() + pos + Size);
- } else {
- // Pass the rest of arguments using the stack.
- UsesStack = true;
- for (unsigned j = 0; j != Size; ++j) {
- unsigned Offset = CCInfo.AllocateStack(
- TD->getTypeAllocSize(EVT(LocVT).getTypeForEVT(CCInfo.getContext())),
- TD->getABITypeAlignment(
- EVT(LocVT).getTypeForEVT(CCInfo.getContext())));
- CCInfo.addLoc(CCValAssign::getMem(ValNo++, LocVT, Offset, LocVT,
- CCValAssign::Full));
+ CCValAssign::getMem(i, VT, Offset, VT, CCValAssign::Full));
+ } else {
+ unsigned Reg;
+ if (VT == MVT::i8) {
+ Reg = CCInfo.AllocateReg(RegList8[RegIdx]);
+ } else if (VT == MVT::i16) {
+ Reg = CCInfo.AllocateReg(RegList16[RegIdx]);
+ } else {
+ llvm_unreachable(
+ "calling convention can only manage i8 and i16 types");
+ }
+ assert(Reg && "register not available in calling convention");
+ CCInfo.addLoc(CCValAssign::getReg(i, VT, Reg, VT, CCValAssign::Full));
+ // Registers inside a particular argument are sorted in increasing order
+ // (remember the array is reversed).
+ RegIdx -= VT.getStoreSize();
}
}
- pos += Size;
}
}
-static void analyzeBuiltinArguments(TargetLowering::CallLoweringInfo &CLI,
- const Function *F, const DataLayout *TD,
- const SmallVectorImpl<ISD::OutputArg> *Outs,
- const SmallVectorImpl<ISD::InputArg> *Ins,
- CallingConv::ID CallConv,
- SmallVectorImpl<CCValAssign> &ArgLocs,
- CCState &CCInfo, bool IsCall, bool IsVarArg) {
- StringRef FuncName = getFunctionName(CLI);
-
- if (FuncName.startswith("__udivmod") || FuncName.startswith("__divmod")) {
- CCInfo.AnalyzeCallOperands(*Outs, ArgCC_AVR_BUILTIN_DIV);
- } else {
- analyzeStandardArguments(&CLI, F, TD, Outs, Ins,
- CallConv, ArgLocs, CCInfo,
- IsCall, IsVarArg);
+/// Count the total number of bytes needed to pass or return these arguments.
+template <typename ArgT>
+static unsigned getTotalArgumentsSizeInBytes(const SmallVectorImpl<ArgT> &Args) {
+ unsigned TotalBytes = 0;
+
+ for (const ArgT& Arg : Args) {
+ TotalBytes += Arg.VT.getStoreSize();
}
+ return TotalBytes;
}
-static void analyzeArguments(TargetLowering::CallLoweringInfo *CLI,
- const Function *F, const DataLayout *TD,
- const SmallVectorImpl<ISD::OutputArg> *Outs,
- const SmallVectorImpl<ISD::InputArg> *Ins,
- CallingConv::ID CallConv,
- SmallVectorImpl<CCValAssign> &ArgLocs,
- CCState &CCInfo, bool IsCall, bool IsVarArg) {
- switch (CallConv) {
- case CallingConv::AVR_BUILTIN: {
- analyzeBuiltinArguments(*CLI, F, TD, Outs, Ins,
- CallConv, ArgLocs, CCInfo,
- IsCall, IsVarArg);
- return;
- }
- default: {
- analyzeStandardArguments(CLI, F, TD, Outs, Ins,
- CallConv, ArgLocs, CCInfo,
- IsCall, IsVarArg);
- return;
+/// Analyze incoming and outgoing value of returning from a function.
+/// The algorithm is similar to analyzeArguments, but there can only be
+/// one value, possibly an aggregate, and it is limited to 8 bytes.
+template <typename ArgT>
+static void analyzeReturnValues(const SmallVectorImpl<ArgT> &Args,
+ CCState &CCInfo) {
+ unsigned NumArgs = Args.size();
+ unsigned TotalBytes = getTotalArgumentsSizeInBytes(Args);
+ // CanLowerReturn() guarantees this assertion.
+ assert(TotalBytes <= 8 && "return values greater than 8 bytes cannot be lowered");
+
+ // GCC-ABI says that the size is rounded up to the next even number,
+ // but actually once it is more than 4 it will always round up to 8.
+ if (TotalBytes > 4) {
+ TotalBytes = 8;
+ } else {
+ TotalBytes = alignTo(TotalBytes, 2);
+ }
+
+ // The index of the first register to use.
+ int RegIdx = TotalBytes - 1;
+ for (unsigned i = 0; i != NumArgs; ++i) {
+ MVT VT = Args[i].VT;
+ unsigned Reg;
+ if (VT == MVT::i8) {
+ Reg = CCInfo.AllocateReg(RegList8[RegIdx]);
+ } else if (VT == MVT::i16) {
+ Reg = CCInfo.AllocateReg(RegList16[RegIdx]);
+ } else {
+ llvm_unreachable("calling convention can only manage i8 and i16 types");
}
+ assert(Reg && "register not available in calling convention");
+ CCInfo.addLoc(CCValAssign::getReg(i, VT, Reg, VT, CCValAssign::Full));
+ // Registers sort in increasing order
+ RegIdx -= VT.getStoreSize();
}
}
SDValue AVRTargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
auto DL = DAG.getDataLayout();
@@ -1059,8 +1030,12 @@ SDValue AVRTargetLowering::LowerFormalArguments(
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
*DAG.getContext());
- analyzeArguments(nullptr, &MF.getFunction(), &DL, 0, &Ins, CallConv, ArgLocs, CCInfo,
- false, isVarArg);
+ // Variadic functions do not need all the analysis below.
+ if (isVarArg) {
+ CCInfo.AnalyzeFormalArguments(Ins, ArgCC_AVR_Vararg);
+ } else {
+ analyzeArguments(nullptr, &MF.getFunction(), &DL, Ins, ArgLocs, CCInfo);
+ }
SDValue ArgValue;
for (CCValAssign &VA : ArgLocs) {
@@ -1181,8 +1156,12 @@ SDValue AVRTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
getPointerTy(DAG.getDataLayout()));
}
- analyzeArguments(&CLI, F, &DAG.getDataLayout(), &Outs, 0, CallConv, ArgLocs, CCInfo,
- true, isVarArg);
+ // Variadic functions do not need all the analysis below.
+ if (isVarArg) {
+ CCInfo.AnalyzeCallOperands(Outs, ArgCC_AVR_Vararg);
+ } else {
+ analyzeArguments(&CLI, F, &DAG.getDataLayout(), Outs, ArgLocs, CCInfo);
+ }
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset();
@@ -1319,13 +1298,10 @@ SDValue AVRTargetLowering::LowerCallResult(
*DAG.getContext());
// Handle runtime calling convs.
- auto CCFunction = CCAssignFnForReturn(CallConv);
- CCInfo.AnalyzeCallResult(Ins, CCFunction);
-
- if (CallConv != CallingConv::AVR_BUILTIN && RVLocs.size() > 1) {
- // Reverse splitted return values to get the "big endian" format required
- // to agree with the calling convention ABI.
- std::reverse(RVLocs.begin(), RVLocs.end());
+ if (CallConv == CallingConv::AVR_BUILTIN) {
+ CCInfo.AnalyzeCallResult(Ins, RetCC_AVR_BUILTIN);
+ } else {
+ analyzeReturnValues(Ins, CCInfo);
}
// Copy all of the result registers out of their specified physreg.
@@ -1344,26 +1320,17 @@ SDValue AVRTargetLowering::LowerCallResult(
// Return Value Calling Convention Implementation
//===----------------------------------------------------------------------===//
-CCAssignFn *AVRTargetLowering::CCAssignFnForReturn(CallingConv::ID CC) const {
- switch (CC) {
- case CallingConv::AVR_BUILTIN:
- return RetCC_AVR_BUILTIN;
- default:
- return RetCC_AVR;
+bool AVRTargetLowering::CanLowerReturn(
+ CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
+ if (CallConv == CallingConv::AVR_BUILTIN) {
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
+ return CCInfo.CheckReturn(Outs, RetCC_AVR_BUILTIN);
}
-}
-bool
-AVRTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
- MachineFunction &MF, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- LLVMContext &Context) const
-{
- SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
-
- auto CCFunction = CCAssignFnForReturn(CallConv);
- return CCInfo.CheckReturn(Outs, CCFunction);
+ unsigned TotalBytes = getTotalArgumentsSizeInBytes(Outs);
+ return TotalBytes <= 8;
}
SDValue
@@ -1379,25 +1346,19 @@ AVRTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
*DAG.getContext());
- // Analyze return values.
- auto CCFunction = CCAssignFnForReturn(CallConv);
- CCInfo.AnalyzeReturn(Outs, CCFunction);
-
- // If this is the first return lowered for this function, add the regs to
- // the liveout set for the function.
MachineFunction &MF = DAG.getMachineFunction();
- unsigned e = RVLocs.size();
- // Reverse splitted return values to get the "big endian" format required
- // to agree with the calling convention ABI.
- if (e > 1) {
- std::reverse(RVLocs.begin(), RVLocs.end());
+ // Analyze return values.
+ if (CallConv == CallingConv::AVR_BUILTIN) {
+ CCInfo.AnalyzeReturn(Outs, RetCC_AVR_BUILTIN);
+ } else {
+ analyzeReturnValues(Outs, CCInfo);
}
SDValue Flag;
SmallVector<SDValue, 4> RetOps(1, Chain);
// Copy the result values into the output registers.
- for (unsigned i = 0; i != e; ++i) {
+ for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
@@ -1415,10 +1376,12 @@ AVRTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
return Chain;
}
+ const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
+
unsigned RetOpc =
- (CallConv == CallingConv::AVR_INTR || CallConv == CallingConv::AVR_SIGNAL)
- ? AVRISD::RETI_FLAG
- : AVRISD::RET_FLAG;
+ AFI->isInterruptOrSignalHandler()
+ ? AVRISD::RETI_FLAG
+ : AVRISD::RET_FLAG;
RetOps[0] = Chain; // Update chain.
@@ -1514,8 +1477,8 @@ MachineBasicBlock *AVRTargetLowering::insertShift(MachineInstr &MI,
LoopBB->addSuccessor(RemBB);
LoopBB->addSuccessor(LoopBB);
- unsigned ShiftAmtReg = RI.createVirtualRegister(&AVR::LD8RegClass);
- unsigned ShiftAmtReg2 = RI.createVirtualRegister(&AVR::LD8RegClass);
+ Register ShiftAmtReg = RI.createVirtualRegister(&AVR::LD8RegClass);
+ Register ShiftAmtReg2 = RI.createVirtualRegister(&AVR::LD8RegClass);
Register ShiftReg = RI.createVirtualRegister(RC);
Register ShiftReg2 = RI.createVirtualRegister(RC);
Register ShiftAmtSrcReg = MI.getOperand(2).getReg();