diff options
Diffstat (limited to 'contrib/llvm-project/llvm/lib/Target/AVR')
55 files changed, 13909 insertions, 0 deletions
diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVR.h b/contrib/llvm-project/llvm/lib/Target/AVR/AVR.h new file mode 100644 index 000000000000..f0746d73c95f --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVR.h @@ -0,0 +1,57 @@ +//===-- AVR.h - Top-level interface for AVR representation ------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains the entry points for global functions defined in the LLVM +// AVR back-end. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AVR_H +#define LLVM_AVR_H + +#include "llvm/CodeGen/SelectionDAGNodes.h" +#include "llvm/Target/TargetMachine.h" + +namespace llvm { + +class AVRTargetMachine; +class FunctionPass; + +FunctionPass *createAVRISelDag(AVRTargetMachine &TM, + CodeGenOpt::Level OptLevel); +FunctionPass *createAVRExpandPseudoPass(); +FunctionPass *createAVRFrameAnalyzerPass(); +FunctionPass *createAVRRelaxMemPass(); +FunctionPass *createAVRDynAllocaSRPass(); +FunctionPass *createAVRBranchSelectionPass(); + +void initializeAVRExpandPseudoPass(PassRegistry&); +void initializeAVRRelaxMemPass(PassRegistry&); + +/// Contains the AVR backend. +namespace AVR { + +/// An integer that identifies all of the supported AVR address spaces. +enum AddressSpace { DataMemory, ProgramMemory }; + +/// Checks if a given type is a pointer to program memory. +template <typename T> bool isProgramMemoryAddress(T *V) { + return cast<PointerType>(V->getType())->getAddressSpace() == ProgramMemory; +} + +inline bool isProgramMemoryAccess(MemSDNode const *N) { + auto V = N->getMemOperand()->getValue(); + + return (V != nullptr) ? isProgramMemoryAddress(V) : false; +} + +} // end of namespace AVR + +} // end namespace llvm + +#endif // LLVM_AVR_H diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVR.td b/contrib/llvm-project/llvm/lib/Target/AVR/AVR.td new file mode 100644 index 000000000000..53768f99df3b --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVR.td @@ -0,0 +1,80 @@ +//===-- AVR.td - Describe the AVR Target Machine ----------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// +// This is the top level entry point for the AVR target. +//===---------------------------------------------------------------------===// + +//===---------------------------------------------------------------------===// +// Target-independent interfaces which we are implementing +//===---------------------------------------------------------------------===// + +include "llvm/Target/Target.td" + +//===---------------------------------------------------------------------===// +// AVR Device Definitions +//===---------------------------------------------------------------------===// + +include "AVRDevices.td" + +//===---------------------------------------------------------------------===// +// Register File Description +//===---------------------------------------------------------------------===// + +include "AVRRegisterInfo.td" + +//===---------------------------------------------------------------------===// +// Instruction Descriptions +//===---------------------------------------------------------------------===// + +include "AVRInstrInfo.td" + +def AVRInstrInfo : InstrInfo; + +//===---------------------------------------------------------------------===// +// Calling Conventions +//===---------------------------------------------------------------------===// + +include "AVRCallingConv.td" + +//===---------------------------------------------------------------------===// +// Assembly Printers +//===---------------------------------------------------------------------===// + +def AVRAsmWriter : AsmWriter { + string AsmWriterClassName = "InstPrinter"; + bit isMCAsmWriter = 1; +} + +//===---------------------------------------------------------------------===// +// Assembly Parsers +//===---------------------------------------------------------------------===// + +def AVRAsmParser : AsmParser { + let ShouldEmitMatchRegisterName = 1; + let ShouldEmitMatchRegisterAltName = 1; +} + +def AVRAsmParserVariant : AsmParserVariant { + int Variant = 0; + + // Recognize hard coded registers. + string RegisterPrefix = "$"; + string TokenizingCharacters = "+"; +} + +//===---------------------------------------------------------------------===// +// Target Declaration +//===---------------------------------------------------------------------===// + +def AVR : Target { + let InstructionSet = AVRInstrInfo; + let AssemblyWriters = [AVRAsmWriter]; + + let AssemblyParsers = [AVRAsmParser]; + let AssemblyParserVariants = [AVRAsmParserVariant]; +} + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRAsmPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/AVRAsmPrinter.cpp new file mode 100644 index 000000000000..7586bd7b78fc --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRAsmPrinter.cpp @@ -0,0 +1,184 @@ +//===-- AVRAsmPrinter.cpp - AVR LLVM assembly writer ----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains a printer that converts from our internal representation +// of machine-dependent LLVM code to GAS-format AVR assembly language. +// +//===----------------------------------------------------------------------===// + +#include "AVR.h" +#include "AVRMCInstLower.h" +#include "AVRSubtarget.h" +#include "MCTargetDesc/AVRInstPrinter.h" +#include "TargetInfo/AVRTargetInfo.h" + +#include "llvm/CodeGen/AsmPrinter.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/TargetRegisterInfo.h" +#include "llvm/CodeGen/TargetSubtargetInfo.h" +#include "llvm/IR/Mangler.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/TargetRegistry.h" +#include "llvm/Support/raw_ostream.h" + +#define DEBUG_TYPE "avr-asm-printer" + +namespace llvm { + +/// An AVR assembly code printer. +class AVRAsmPrinter : public AsmPrinter { +public: + AVRAsmPrinter(TargetMachine &TM, + std::unique_ptr<MCStreamer> Streamer) + : AsmPrinter(TM, std::move(Streamer)), MRI(*TM.getMCRegisterInfo()) { } + + StringRef getPassName() const override { return "AVR Assembly Printer"; } + + void printOperand(const MachineInstr *MI, unsigned OpNo, raw_ostream &O); + + bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum, + const char *ExtraCode, raw_ostream &O) override; + + bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum, + const char *ExtraCode, raw_ostream &O) override; + + void EmitInstruction(const MachineInstr *MI) override; + +private: + const MCRegisterInfo &MRI; +}; + +void AVRAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo, + raw_ostream &O) { + const MachineOperand &MO = MI->getOperand(OpNo); + + switch (MO.getType()) { + case MachineOperand::MO_Register: + O << AVRInstPrinter::getPrettyRegisterName(MO.getReg(), MRI); + break; + case MachineOperand::MO_Immediate: + O << MO.getImm(); + break; + case MachineOperand::MO_GlobalAddress: + O << getSymbol(MO.getGlobal()); + break; + case MachineOperand::MO_ExternalSymbol: + O << *GetExternalSymbolSymbol(MO.getSymbolName()); + break; + case MachineOperand::MO_MachineBasicBlock: + O << *MO.getMBB()->getSymbol(); + break; + default: + llvm_unreachable("Not implemented yet!"); + } +} + +bool AVRAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum, + const char *ExtraCode, raw_ostream &O) { + // Default asm printer can only deal with some extra codes, + // so try it first. + bool Error = AsmPrinter::PrintAsmOperand(MI, OpNum, ExtraCode, O); + + if (Error && ExtraCode && ExtraCode[0]) { + if (ExtraCode[1] != 0) + return true; // Unknown modifier. + + if (ExtraCode[0] >= 'A' && ExtraCode[0] <= 'Z') { + const MachineOperand &RegOp = MI->getOperand(OpNum); + + assert(RegOp.isReg() && "Operand must be a register when you're" + "using 'A'..'Z' operand extracodes."); + unsigned Reg = RegOp.getReg(); + + unsigned ByteNumber = ExtraCode[0] - 'A'; + + unsigned OpFlags = MI->getOperand(OpNum - 1).getImm(); + unsigned NumOpRegs = InlineAsm::getNumOperandRegisters(OpFlags); + (void)NumOpRegs; + + const AVRSubtarget &STI = MF->getSubtarget<AVRSubtarget>(); + const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); + + const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(Reg); + unsigned BytesPerReg = TRI.getRegSizeInBits(*RC) / 8; + assert(BytesPerReg <= 2 && "Only 8 and 16 bit regs are supported."); + + unsigned RegIdx = ByteNumber / BytesPerReg; + assert(RegIdx < NumOpRegs && "Multibyte index out of range."); + + Reg = MI->getOperand(OpNum + RegIdx).getReg(); + + if (BytesPerReg == 2) { + Reg = TRI.getSubReg(Reg, ByteNumber % BytesPerReg ? AVR::sub_hi + : AVR::sub_lo); + } + + O << AVRInstPrinter::getPrettyRegisterName(Reg, MRI); + return false; + } + } + + if (Error) + printOperand(MI, OpNum, O); + + return false; +} + +bool AVRAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, + unsigned OpNum, const char *ExtraCode, + raw_ostream &O) { + if (ExtraCode && ExtraCode[0]) { + llvm_unreachable("This branch is not implemented yet"); + } + + const MachineOperand &MO = MI->getOperand(OpNum); + (void)MO; + assert(MO.isReg() && "Unexpected inline asm memory operand"); + + // TODO: We should be able to look up the alternative name for + // the register if it's given. + // TableGen doesn't expose a way of getting retrieving names + // for registers. + if (MI->getOperand(OpNum).getReg() == AVR::R31R30) { + O << "Z"; + } else { + assert(MI->getOperand(OpNum).getReg() == AVR::R29R28 && + "Wrong register class for memory operand."); + O << "Y"; + } + + // If NumOpRegs == 2, then we assume it is product of a FrameIndex expansion + // and the second operand is an Imm. + unsigned OpFlags = MI->getOperand(OpNum - 1).getImm(); + unsigned NumOpRegs = InlineAsm::getNumOperandRegisters(OpFlags); + + if (NumOpRegs == 2) { + O << '+' << MI->getOperand(OpNum + 1).getImm(); + } + + return false; +} + +void AVRAsmPrinter::EmitInstruction(const MachineInstr *MI) { + AVRMCInstLower MCInstLowering(OutContext, *this); + + MCInst I; + MCInstLowering.lowerInstruction(*MI, I); + EmitToStreamer(*OutStreamer, I); +} + +} // end of namespace llvm + +extern "C" void LLVMInitializeAVRAsmPrinter() { + llvm::RegisterAsmPrinter<llvm::AVRAsmPrinter> X(llvm::getTheAVRTarget()); +} + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRCallingConv.td b/contrib/llvm-project/llvm/lib/Target/AVR/AVRCallingConv.td new file mode 100644 index 000000000000..213e35fca66d --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRCallingConv.td @@ -0,0 +1,57 @@ +//===-- AVRCallingConv.td - Calling Conventions for AVR ----*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// This describes the calling conventions for AVR architecture. +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// AVR Return Value Calling Convention +//===----------------------------------------------------------------------===// + +def RetCC_AVR : CallingConv +<[ + // i8 is returned in R24. + CCIfType<[i8], CCAssignToReg<[R24]>>, + + // i16 are returned in R25:R24, R23:R22, R21:R20 and R19:R18. + CCIfType<[i16], CCAssignToReg<[R25R24, R23R22, R21R20, R19R18]>> +]>; + +// Special return value calling convention for runtime functions. +def RetCC_AVR_BUILTIN : CallingConv +<[ + CCIfType<[i8], CCAssignToReg<[R24,R25]>>, + CCIfType<[i16], CCAssignToReg<[R23R22, R25R24]>> +]>; + +//===----------------------------------------------------------------------===// +// AVR Argument Calling Conventions +//===----------------------------------------------------------------------===// + +// The calling conventions are implemented in custom C++ code + +// Calling convention for variadic functions. +def ArgCC_AVR_Vararg : CallingConv +<[ + // i16 are always passed through the stack with an alignment of 1. + CCAssignToStack<2, 1> +]>; + +// Special argument calling convention for +// division runtime functions. +def ArgCC_AVR_BUILTIN_DIV : CallingConv +<[ + CCIfType<[i8], CCAssignToReg<[R24,R22]>>, + CCIfType<[i16], CCAssignToReg<[R25R24, R23R22]>> +]>; + +//===----------------------------------------------------------------------===// +// Callee-saved register lists. +//===----------------------------------------------------------------------===// + +def CSR_Normal : CalleeSavedRegs<(add R29, R28, (sequence "R%u", 17, 2))>; +def CSR_Interrupts : CalleeSavedRegs<(add (sequence "R%u", 31, 0))>; diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRDevices.td b/contrib/llvm-project/llvm/lib/Target/AVR/AVRDevices.td new file mode 100644 index 000000000000..62def4574437 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRDevices.td @@ -0,0 +1,490 @@ +//===---------------------------------------------------------------------===// +// AVR Device Definitions +//===---------------------------------------------------------------------===// + +// :TODO: Implement the skip errata, see `gcc/config/avr/avr-arch.h` for details +// :TODO: We define all devices with SRAM to have all variants of LD/ST/LDD/STD. +// In reality, avr1 (no SRAM) has one variant each of `LD` and `ST`. +// avr2 (with SRAM) adds the rest of the variants. + + +// A feature set aggregates features, grouping them. We don't want to create a +// new member in AVRSubtarget (to store a value) for each set because we do not +// care if the set is supported, only the subfeatures inside the set. We fix +// this by simply setting the same dummy member for all feature sets, which is +// then ignored. +class FeatureSet<string name, string desc, list<SubtargetFeature> i> + : SubtargetFeature<name, "m_FeatureSetDummy", "true", desc, i>; + +// A family of microcontrollers, defining a set of supported features. +class Family<string name, list<SubtargetFeature> i> + : FeatureSet<name, !strconcat("The device is a part of the ", + name, " family"), i>; + +// The device has SRAM, and supports the bare minimum of +// SRAM-relevant instructions. +// +// These are: +// LD - all 9 variants +// ST - all 9 variants +// LDD - two variants for Y and Z +// STD - two variants for Y and Z +// `LDS Rd, K` +// `STS k, Rr` +// `PUSH`/`POP` +def FeatureSRAM : SubtargetFeature<"sram", "m_hasSRAM", "true", + "The device has random access memory">; + +// The device supports the `JMP k` and `CALL k` instructions. +def FeatureJMPCALL : SubtargetFeature<"jmpcall", "m_hasJMPCALL", "true", + "The device supports the `JMP` and " + "`CALL` instructions">; + + +// The device supports the indirect branches `IJMP` and `ICALL`. +def FeatureIJMPCALL : SubtargetFeature<"ijmpcall", "m_hasIJMPCALL", + "true", + "The device supports `IJMP`/`ICALL`" + "instructions">; + +// The device supports the extended indirect branches `EIJMP` and `EICALL`. +def FeatureEIJMPCALL : SubtargetFeature<"eijmpcall", "m_hasEIJMPCALL", + "true", "The device supports the " + "`EIJMP`/`EICALL` instructions">; + +// The device supports `ADDI Rd, K`, `SUBI Rd, K`. +def FeatureADDSUBIW : SubtargetFeature<"addsubiw", "m_hasADDSUBIW", + "true", "Enable 16-bit register-immediate " + "addition and subtraction instructions">; + +// The device has an 8-bit stack pointer (SP) register. +def FeatureSmallStack : SubtargetFeature<"smallstack", "m_hasSmallStack", + "true", "The device has an 8-bit " + "stack pointer">; + +// The device supports the 16-bit GPR pair MOVW instruction. +def FeatureMOVW : SubtargetFeature<"movw", "m_hasMOVW", "true", + "The device supports the 16-bit MOVW " + "instruction">; + +// The device supports the `LPM` instruction, with implied destination being r0. +def FeatureLPM : SubtargetFeature<"lpm", "m_hasLPM", "true", + "The device supports the `LPM` instruction">; + +// The device supports the `LPM Rd, Z[+] instruction. +def FeatureLPMX : SubtargetFeature<"lpmx", "m_hasLPMX", "true", + "The device supports the `LPM Rd, Z[+]` " + "instruction">; + +// The device supports the `ELPM` instruction. +def FeatureELPM : SubtargetFeature<"elpm", "m_hasELPM", "true", + "The device supports the ELPM instruction">; + +// The device supports the `ELPM Rd, Z[+]` instructions. +def FeatureELPMX : SubtargetFeature<"elpmx", "m_hasELPMX", "true", + "The device supports the `ELPM Rd, Z[+]` " + "instructions">; + +// The device supports the `SPM` instruction. +def FeatureSPM : SubtargetFeature<"spm", "m_hasSPM", "true", + "The device supports the `SPM` instruction">; + +// The device supports the `SPM Z+` instruction. +def FeatureSPMX : SubtargetFeature<"spmx", "m_hasSPMX", "true", + "The device supports the `SPM Z+` " + "instruction">; + +// The device supports the `DES k` instruction. +def FeatureDES : SubtargetFeature<"des", "m_hasDES", "true", + "The device supports the `DES k` encryption " + "instruction">; + +// The device supports the Read-Write-Modify instructions +// XCH, LAS, LAC, and LAT. +def FeatureRMW : SubtargetFeature<"rmw", "m_supportsRMW", "true", + "The device supports the read-write-modify " + "instructions: XCH, LAS, LAC, LAT">; + +// The device supports the `[F]MUL[S][U]` family of instructions. +def FeatureMultiplication : SubtargetFeature<"mul", "m_supportsMultiplication", + "true", "The device supports the " + "multiplication instructions">; + +// The device supports the `BREAK` instruction. +def FeatureBREAK : SubtargetFeature<"break", "m_hasBREAK", "true", + "The device supports the `BREAK` debugging " + "instruction">; + +// The device has instruction encodings specific to the Tiny core. +def FeatureTinyEncoding : SubtargetFeature<"tinyencoding", + "m_hasTinyEncoding", "true", + "The device has Tiny core specific " + "instruction encodings">; + +class ELFArch<string name> : SubtargetFeature<"", "ELFArch", + !strconcat("ELF::",name), "">; + +// ELF e_flags architecture values +def ELFArchAVR1 : ELFArch<"EF_AVR_ARCH_AVR1">; +def ELFArchAVR2 : ELFArch<"EF_AVR_ARCH_AVR2">; +def ELFArchAVR25 : ELFArch<"EF_AVR_ARCH_AVR25">; +def ELFArchAVR3 : ELFArch<"EF_AVR_ARCH_AVR3">; +def ELFArchAVR31 : ELFArch<"EF_AVR_ARCH_AVR31">; +def ELFArchAVR35 : ELFArch<"EF_AVR_ARCH_AVR35">; +def ELFArchAVR4 : ELFArch<"EF_AVR_ARCH_AVR4">; +def ELFArchAVR5 : ELFArch<"EF_AVR_ARCH_AVR5">; +def ELFArchAVR51 : ELFArch<"EF_AVR_ARCH_AVR51">; +def ELFArchAVR6 : ELFArch<"EF_AVR_ARCH_AVR6">; +def ELFArchTiny : ELFArch<"EF_AVR_ARCH_AVRTINY">; +def ELFArchXMEGA1 : ELFArch<"EF_AVR_ARCH_XMEGA1">; +def ELFArchXMEGA2 : ELFArch<"EF_AVR_ARCH_XMEGA2">; +def ELFArchXMEGA3 : ELFArch<"EF_AVR_ARCH_XMEGA3">; +def ELFArchXMEGA4 : ELFArch<"EF_AVR_ARCH_XMEGA4">; +def ELFArchXMEGA5 : ELFArch<"EF_AVR_ARCH_XMEGA5">; +def ELFArchXMEGA6 : ELFArch<"EF_AVR_ARCH_XMEGA6">; +def ELFArchXMEGA7 : ELFArch<"EF_AVR_ARCH_XMEGA7">; + +//===---------------------------------------------------------------------===// +// AVR Families +//===---------------------------------------------------------------------===// + +// The device has at least the bare minimum that **every** single AVR +// device should have. +def FamilyAVR0 : Family<"avr0", []>; + +def FamilyAVR1 : Family<"avr1", [FamilyAVR0, FeatureLPM]>; + +def FamilyAVR2 : Family<"avr2", + [FamilyAVR1, FeatureIJMPCALL, FeatureADDSUBIW, + FeatureSRAM]>; + +def FamilyAVR25 : Family<"avr25", + [FamilyAVR2, FeatureMOVW, FeatureLPMX, + FeatureSPM, FeatureBREAK]>; + +def FamilyAVR3 : Family<"avr3", + [FamilyAVR2, FeatureJMPCALL]>; + +def FamilyAVR31 : Family<"avr31", + [FamilyAVR3, FeatureELPM]>; + +def FamilyAVR35 : Family<"avr35", + [FamilyAVR3, FeatureMOVW, FeatureLPMX, + FeatureSPM, FeatureBREAK]>; + +def FamilyAVR4 : Family<"avr4", + [FamilyAVR2, FeatureMultiplication, + FeatureMOVW, FeatureLPMX, FeatureSPM, + FeatureBREAK]>; + +def FamilyAVR5 : Family<"avr5", + [FamilyAVR3, FeatureMultiplication, + FeatureMOVW, FeatureLPMX, FeatureSPM, + FeatureBREAK]>; + +def FamilyAVR51 : Family<"avr51", + [FamilyAVR5, FeatureELPM, FeatureELPMX]>; + +def FamilyAVR6 : Family<"avr6", + [FamilyAVR51]>; + +def FamilyTiny : Family<"avrtiny", + [FamilyAVR0, FeatureBREAK, FeatureSRAM, + FeatureTinyEncoding]>; + +def FamilyXMEGA : Family<"xmega", + [FamilyAVR51, FeatureEIJMPCALL, FeatureSPMX, + FeatureDES]>; + +def FamilyXMEGAU : Family<"xmegau", + [FamilyXMEGA, FeatureRMW]>; + +def FeatureSetSpecial : FeatureSet<"special", + "Enable use of the entire instruction " + "set - used for debugging", + [FeatureSRAM, FeatureJMPCALL, + FeatureIJMPCALL, FeatureEIJMPCALL, + FeatureADDSUBIW, FeatureMOVW, + FeatureLPM, FeatureLPMX, FeatureELPM, + FeatureELPMX, FeatureSPM, FeatureSPMX, + FeatureDES, FeatureRMW, + FeatureMultiplication, FeatureBREAK]>; + +//===---------------------------------------------------------------------===// +// AVR microcontrollers supported. +//===---------------------------------------------------------------------===// + +class Device<string Name, Family Fam, ELFArch Arch, + list<SubtargetFeature> ExtraFeatures = []> + : Processor<Name, NoItineraries, !listconcat([Fam,Arch],ExtraFeatures)>; + +// Generic MCUs +// Note that several versions of GCC has strange ELF architecture +// settings for backwards compatibility - see `gas/config/tc-avr.c` +// in AVR binutils. We do not replicate this. +def : Device<"avr1", FamilyAVR1, ELFArchAVR1>; +def : Device<"avr2", FamilyAVR2, ELFArchAVR2>; +def : Device<"avr25", FamilyAVR25, ELFArchAVR25>; +def : Device<"avr3", FamilyAVR3, ELFArchAVR3>; +def : Device<"avr31", FamilyAVR31, ELFArchAVR31>; +def : Device<"avr35", FamilyAVR35, ELFArchAVR35>; +def : Device<"avr4", FamilyAVR4, ELFArchAVR4>; +def : Device<"avr5", FamilyAVR5, ELFArchAVR5>; +def : Device<"avr51", FamilyAVR51, ELFArchAVR51>; +def : Device<"avr6", FamilyAVR6, ELFArchAVR6>; +def : Device<"avrxmega1", FamilyXMEGA, ELFArchXMEGA1>; +def : Device<"avrxmega2", FamilyXMEGA, ELFArchXMEGA2>; +def : Device<"avrxmega3", FamilyXMEGA, ELFArchXMEGA3>; +def : Device<"avrxmega4", FamilyXMEGA, ELFArchXMEGA4>; +def : Device<"avrxmega5", FamilyXMEGA, ELFArchXMEGA5>; +def : Device<"avrxmega6", FamilyXMEGA, ELFArchXMEGA6>; +def : Device<"avrxmega7", FamilyXMEGA, ELFArchXMEGA7>; +def : Device<"avrtiny", FamilyTiny, ELFArchTiny>; + +// Specific MCUs +def : Device<"at90s1200", FamilyAVR0, ELFArchAVR1>; +def : Device<"attiny11", FamilyAVR1, ELFArchAVR1>; +def : Device<"attiny12", FamilyAVR1, ELFArchAVR1>; +def : Device<"attiny15", FamilyAVR1, ELFArchAVR1>; +def : Device<"attiny28", FamilyAVR1, ELFArchAVR1>; +def : Device<"at90s2313", FamilyAVR2, ELFArchAVR2>; +def : Device<"at90s2323", FamilyAVR2, ELFArchAVR2>; +def : Device<"at90s2333", FamilyAVR2, ELFArchAVR2>; +def : Device<"at90s2343", FamilyAVR2, ELFArchAVR2>; +def : Device<"attiny22", FamilyAVR2, ELFArchAVR2>; +def : Device<"attiny26", FamilyAVR2, ELFArchAVR2, [FeatureLPMX]>; +def : Device<"at86rf401", FamilyAVR2, ELFArchAVR25, + [FeatureMOVW, FeatureLPMX]>; +def : Device<"at90s4414", FamilyAVR2, ELFArchAVR2>; +def : Device<"at90s4433", FamilyAVR2, ELFArchAVR2>; +def : Device<"at90s4434", FamilyAVR2, ELFArchAVR2>; +def : Device<"at90s8515", FamilyAVR2, ELFArchAVR2>; +def : Device<"at90c8534", FamilyAVR2, ELFArchAVR2>; +def : Device<"at90s8535", FamilyAVR2, ELFArchAVR2>; +def : Device<"ata5272", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny13", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny13a", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny2313", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny2313a", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny24", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny24a", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny4313", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny44", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny44a", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny84", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny84a", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny25", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny45", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny85", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny261", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny261a", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny461", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny461a", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny861", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny861a", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny87", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny43u", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny48", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny88", FamilyAVR25, ELFArchAVR25>; +def : Device<"attiny828", FamilyAVR25, ELFArchAVR25>; +def : Device<"at43usb355", FamilyAVR3, ELFArchAVR3>; +def : Device<"at76c711", FamilyAVR3, ELFArchAVR3>; +def : Device<"atmega103", FamilyAVR31, ELFArchAVR31>; +def : Device<"at43usb320", FamilyAVR31, ELFArchAVR31>; +def : Device<"attiny167", FamilyAVR35, ELFArchAVR35>; +def : Device<"at90usb82", FamilyAVR35, ELFArchAVR35>; +def : Device<"at90usb162", FamilyAVR35, ELFArchAVR35>; +def : Device<"ata5505", FamilyAVR35, ELFArchAVR35>; +def : Device<"atmega8u2", FamilyAVR35, ELFArchAVR35>; +def : Device<"atmega16u2", FamilyAVR35, ELFArchAVR35>; +def : Device<"atmega32u2", FamilyAVR35, ELFArchAVR35>; +def : Device<"attiny1634", FamilyAVR35, ELFArchAVR35>; +def : Device<"atmega8", FamilyAVR4, ELFArchAVR4>; // FIXME: family may be wrong +def : Device<"ata6289", FamilyAVR4, ELFArchAVR4>; +def : Device<"atmega8a", FamilyAVR4, ELFArchAVR4>; +def : Device<"ata6285", FamilyAVR4, ELFArchAVR4>; +def : Device<"ata6286", FamilyAVR4, ELFArchAVR4>; +def : Device<"atmega48", FamilyAVR4, ELFArchAVR4>; +def : Device<"atmega48a", FamilyAVR4, ELFArchAVR4>; +def : Device<"atmega48pa", FamilyAVR4, ELFArchAVR4>; +def : Device<"atmega48p", FamilyAVR4, ELFArchAVR4>; +def : Device<"atmega88", FamilyAVR4, ELFArchAVR4>; +def : Device<"atmega88a", FamilyAVR4, ELFArchAVR4>; +def : Device<"atmega88p", FamilyAVR4, ELFArchAVR4>; +def : Device<"atmega88pa", FamilyAVR4, ELFArchAVR4>; +def : Device<"atmega8515", FamilyAVR2, ELFArchAVR4, + [FeatureMultiplication, FeatureMOVW, FeatureLPMX, FeatureSPM]>; +def : Device<"atmega8535", FamilyAVR2, ELFArchAVR4, + [FeatureMultiplication, FeatureMOVW, FeatureLPMX, FeatureSPM]>; +def : Device<"atmega8hva", FamilyAVR4, ELFArchAVR4>; +def : Device<"at90pwm1", FamilyAVR4, ELFArchAVR4>; +def : Device<"at90pwm2", FamilyAVR4, ELFArchAVR4>; +def : Device<"at90pwm2b", FamilyAVR4, ELFArchAVR4>; +def : Device<"at90pwm3", FamilyAVR4, ELFArchAVR4>; +def : Device<"at90pwm3b", FamilyAVR4, ELFArchAVR4>; +def : Device<"at90pwm81", FamilyAVR4, ELFArchAVR4>; +def : Device<"ata5790", FamilyAVR5, ELFArchAVR5>; +def : Device<"ata5795", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega16", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega16a", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega161", FamilyAVR3, ELFArchAVR5, + [FeatureMultiplication, FeatureMOVW, FeatureLPMX, FeatureSPM]>; +def : Device<"atmega162", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega163", FamilyAVR3, ELFArchAVR5, + [FeatureMultiplication, FeatureMOVW, FeatureLPMX, FeatureSPM]>; +def : Device<"atmega164a", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega164p", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega164pa", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega165", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega165a", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega165p", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega165pa", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega168", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega168a", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega168p", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega168pa", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega169", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega169a", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega169p", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega169pa", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega32", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega32a", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega323", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega324a", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega324p", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega324pa", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega325", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega325a", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega325p", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega325pa", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega3250", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega3250a", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega3250p", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega3250pa", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega328", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega328p", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega329", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega329a", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega329p", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega329pa", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega3290", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega3290a", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega3290p", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega3290pa", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega406", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega64", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega64a", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega640", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega644", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega644a", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega644p", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega644pa", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega645", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega645a", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega645p", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega649", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega649a", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega649p", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega6450", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega6450a", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega6450p", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega6490", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega6490a", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega6490p", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega64rfr2", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega644rfr2", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega16hva", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega16hva2", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega16hvb", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega16hvbrevb", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega32hvb", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega32hvbrevb", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega64hve", FamilyAVR5, ELFArchAVR5>; +def : Device<"at90can32", FamilyAVR5, ELFArchAVR5>; +def : Device<"at90can64", FamilyAVR5, ELFArchAVR5>; +def : Device<"at90pwm161", FamilyAVR5, ELFArchAVR5>; +def : Device<"at90pwm216", FamilyAVR5, ELFArchAVR5>; +def : Device<"at90pwm316", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega32c1", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega64c1", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega16m1", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega32m1", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega64m1", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega16u4", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega32u4", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega32u6", FamilyAVR5, ELFArchAVR5>; +def : Device<"at90usb646", FamilyAVR5, ELFArchAVR5>; +def : Device<"at90usb647", FamilyAVR5, ELFArchAVR5>; +def : Device<"at90scr100", FamilyAVR5, ELFArchAVR5>; +def : Device<"at94k", FamilyAVR3, ELFArchAVR5, + [FeatureMultiplication, FeatureMOVW, FeatureLPMX]>; +def : Device<"m3000", FamilyAVR5, ELFArchAVR5>; +def : Device<"atmega128", FamilyAVR51, ELFArchAVR51>; +def : Device<"atmega128a", FamilyAVR51, ELFArchAVR51>; +def : Device<"atmega1280", FamilyAVR51, ELFArchAVR51>; +def : Device<"atmega1281", FamilyAVR51, ELFArchAVR51>; +def : Device<"atmega1284", FamilyAVR51, ELFArchAVR51>; +def : Device<"atmega1284p", FamilyAVR51, ELFArchAVR51>; +def : Device<"atmega128rfa1", FamilyAVR51, ELFArchAVR51>; +def : Device<"atmega128rfr2", FamilyAVR51, ELFArchAVR51>; +def : Device<"atmega1284rfr2", FamilyAVR51, ELFArchAVR51>; +def : Device<"at90can128", FamilyAVR51, ELFArchAVR51>; +def : Device<"at90usb1286", FamilyAVR51, ELFArchAVR51>; +def : Device<"at90usb1287", FamilyAVR51, ELFArchAVR51>; +def : Device<"atmega2560", FamilyAVR6, ELFArchAVR6>; +def : Device<"atmega2561", FamilyAVR6, ELFArchAVR6>; +def : Device<"atmega256rfr2", FamilyAVR6, ELFArchAVR6>; +def : Device<"atmega2564rfr2", FamilyAVR6, ELFArchAVR6>; +def : Device<"atxmega16a4", FamilyXMEGA, ELFArchXMEGA2>; +def : Device<"atxmega16a4u", FamilyXMEGAU, ELFArchXMEGA2>; +def : Device<"atxmega16c4", FamilyXMEGAU, ELFArchXMEGA2>; +def : Device<"atxmega16d4", FamilyXMEGA, ELFArchXMEGA2>; +def : Device<"atxmega32a4", FamilyXMEGA, ELFArchXMEGA2>; +def : Device<"atxmega32a4u", FamilyXMEGAU, ELFArchXMEGA2>; +def : Device<"atxmega32c4", FamilyXMEGAU, ELFArchXMEGA2>; +def : Device<"atxmega32d4", FamilyXMEGA, ELFArchXMEGA2>; +def : Device<"atxmega32e5", FamilyXMEGA, ELFArchXMEGA2>; +def : Device<"atxmega16e5", FamilyXMEGA, ELFArchXMEGA2>; +def : Device<"atxmega8e5", FamilyXMEGA, ELFArchXMEGA2>; +def : Device<"atxmega32x1", FamilyXMEGA, ELFArchXMEGA2>; +def : Device<"atxmega64a3", FamilyXMEGA, ELFArchXMEGA4>; +def : Device<"atxmega64a3u", FamilyXMEGAU, ELFArchXMEGA4>; +def : Device<"atxmega64a4u", FamilyXMEGAU, ELFArchXMEGA4>; +def : Device<"atxmega64b1", FamilyXMEGAU, ELFArchXMEGA4>; +def : Device<"atxmega64b3", FamilyXMEGAU, ELFArchXMEGA4>; +def : Device<"atxmega64c3", FamilyXMEGAU, ELFArchXMEGA4>; +def : Device<"atxmega64d3", FamilyXMEGA, ELFArchXMEGA4>; +def : Device<"atxmega64d4", FamilyXMEGA, ELFArchXMEGA4>; +def : Device<"atxmega64a1", FamilyXMEGA, ELFArchXMEGA5>; +def : Device<"atxmega64a1u", FamilyXMEGAU, ELFArchXMEGA5>; +def : Device<"atxmega128a3", FamilyXMEGA, ELFArchXMEGA6>; +def : Device<"atxmega128a3u", FamilyXMEGAU, ELFArchXMEGA6>; +def : Device<"atxmega128b1", FamilyXMEGAU, ELFArchXMEGA6>; +def : Device<"atxmega128b3", FamilyXMEGAU, ELFArchXMEGA6>; +def : Device<"atxmega128c3", FamilyXMEGAU, ELFArchXMEGA6>; +def : Device<"atxmega128d3", FamilyXMEGA, ELFArchXMEGA6>; +def : Device<"atxmega128d4", FamilyXMEGA, ELFArchXMEGA6>; +def : Device<"atxmega192a3", FamilyXMEGA, ELFArchXMEGA6>; +def : Device<"atxmega192a3u", FamilyXMEGAU, ELFArchXMEGA6>; +def : Device<"atxmega192c3", FamilyXMEGAU, ELFArchXMEGA6>; +def : Device<"atxmega192d3", FamilyXMEGA, ELFArchXMEGA6>; +def : Device<"atxmega256a3", FamilyXMEGA, ELFArchXMEGA6>; +def : Device<"atxmega256a3u", FamilyXMEGAU, ELFArchXMEGA6>; +def : Device<"atxmega256a3b", FamilyXMEGA, ELFArchXMEGA6>; +def : Device<"atxmega256a3bu", FamilyXMEGAU, ELFArchXMEGA6>; +def : Device<"atxmega256c3", FamilyXMEGAU, ELFArchXMEGA6>; +def : Device<"atxmega256d3", FamilyXMEGA, ELFArchXMEGA6>; +def : Device<"atxmega384c3", FamilyXMEGAU, ELFArchXMEGA6>; +def : Device<"atxmega384d3", FamilyXMEGA, ELFArchXMEGA6>; +def : Device<"atxmega128a1", FamilyXMEGA, ELFArchXMEGA7>; +def : Device<"atxmega128a1u", FamilyXMEGAU, ELFArchXMEGA7>; +def : Device<"atxmega128a4u", FamilyXMEGAU, ELFArchXMEGA7>; +def : Device<"attiny4", FamilyTiny, ELFArchTiny>; +def : Device<"attiny5", FamilyTiny, ELFArchTiny>; +def : Device<"attiny9", FamilyTiny, ELFArchTiny>; +def : Device<"attiny10", FamilyTiny, ELFArchTiny>; +def : Device<"attiny20", FamilyTiny, ELFArchTiny>; +def : Device<"attiny40", FamilyTiny, ELFArchTiny>; +def : Device<"attiny102", FamilyTiny, ELFArchTiny>; +def : Device<"attiny104", FamilyTiny, ELFArchTiny>; + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRExpandPseudoInsts.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/AVRExpandPseudoInsts.cpp new file mode 100644 index 000000000000..c45b2d0e39c1 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRExpandPseudoInsts.cpp @@ -0,0 +1,1587 @@ +//===-- AVRExpandPseudoInsts.cpp - Expand pseudo instructions -------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains a pass that expands pseudo instructions into target +// instructions. This pass should be run after register allocation but before +// the post-regalloc scheduling pass. +// +//===----------------------------------------------------------------------===// + +#include "AVR.h" +#include "AVRInstrInfo.h" +#include "AVRTargetMachine.h" +#include "MCTargetDesc/AVRMCTargetDesc.h" + +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/RegisterScavenging.h" +#include "llvm/CodeGen/TargetRegisterInfo.h" + +using namespace llvm; + +#define AVR_EXPAND_PSEUDO_NAME "AVR pseudo instruction expansion pass" + +namespace { + +/// Expands "placeholder" instructions marked as pseudo into +/// actual AVR instructions. +class AVRExpandPseudo : public MachineFunctionPass { +public: + static char ID; + + AVRExpandPseudo() : MachineFunctionPass(ID) { + initializeAVRExpandPseudoPass(*PassRegistry::getPassRegistry()); + } + + bool runOnMachineFunction(MachineFunction &MF) override; + + StringRef getPassName() const override { return AVR_EXPAND_PSEUDO_NAME; } + +private: + typedef MachineBasicBlock Block; + typedef Block::iterator BlockIt; + + const AVRRegisterInfo *TRI; + const TargetInstrInfo *TII; + + /// The register to be used for temporary storage. + const unsigned SCRATCH_REGISTER = AVR::R0; + /// The IO address of the status register. + const unsigned SREG_ADDR = 0x3f; + + bool expandMBB(Block &MBB); + bool expandMI(Block &MBB, BlockIt MBBI); + template <unsigned OP> bool expand(Block &MBB, BlockIt MBBI); + + MachineInstrBuilder buildMI(Block &MBB, BlockIt MBBI, unsigned Opcode) { + return BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(Opcode)); + } + + MachineInstrBuilder buildMI(Block &MBB, BlockIt MBBI, unsigned Opcode, + unsigned DstReg) { + return BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(Opcode), DstReg); + } + + MachineRegisterInfo &getRegInfo(Block &MBB) { return MBB.getParent()->getRegInfo(); } + + bool expandArith(unsigned OpLo, unsigned OpHi, Block &MBB, BlockIt MBBI); + bool expandLogic(unsigned Op, Block &MBB, BlockIt MBBI); + bool expandLogicImm(unsigned Op, Block &MBB, BlockIt MBBI); + bool isLogicImmOpRedundant(unsigned Op, unsigned ImmVal) const; + + template<typename Func> + bool expandAtomic(Block &MBB, BlockIt MBBI, Func f); + + template<typename Func> + bool expandAtomicBinaryOp(unsigned Opcode, Block &MBB, BlockIt MBBI, Func f); + + bool expandAtomicBinaryOp(unsigned Opcode, Block &MBB, BlockIt MBBI); + + bool expandAtomicArithmeticOp(unsigned MemOpcode, + unsigned ArithOpcode, + Block &MBB, + BlockIt MBBI); + + /// Scavenges a free GPR8 register for use. + unsigned scavengeGPR8(MachineInstr &MI); +}; + +char AVRExpandPseudo::ID = 0; + +bool AVRExpandPseudo::expandMBB(MachineBasicBlock &MBB) { + bool Modified = false; + + BlockIt MBBI = MBB.begin(), E = MBB.end(); + while (MBBI != E) { + BlockIt NMBBI = std::next(MBBI); + Modified |= expandMI(MBB, MBBI); + MBBI = NMBBI; + } + + return Modified; +} + +bool AVRExpandPseudo::runOnMachineFunction(MachineFunction &MF) { + bool Modified = false; + + const AVRSubtarget &STI = MF.getSubtarget<AVRSubtarget>(); + TRI = STI.getRegisterInfo(); + TII = STI.getInstrInfo(); + + // We need to track liveness in order to use register scavenging. + MF.getProperties().set(MachineFunctionProperties::Property::TracksLiveness); + + for (Block &MBB : MF) { + bool ContinueExpanding = true; + unsigned ExpandCount = 0; + + // Continue expanding the block until all pseudos are expanded. + do { + assert(ExpandCount < 10 && "pseudo expand limit reached"); + + bool BlockModified = expandMBB(MBB); + Modified |= BlockModified; + ExpandCount++; + + ContinueExpanding = BlockModified; + } while (ContinueExpanding); + } + + return Modified; +} + +bool AVRExpandPseudo:: +expandArith(unsigned OpLo, unsigned OpHi, Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned SrcLoReg, SrcHiReg, DstLoReg, DstHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + unsigned SrcReg = MI.getOperand(2).getReg(); + bool DstIsDead = MI.getOperand(0).isDead(); + bool DstIsKill = MI.getOperand(1).isKill(); + bool SrcIsKill = MI.getOperand(2).isKill(); + bool ImpIsDead = MI.getOperand(3).isDead(); + TRI->splitReg(SrcReg, SrcLoReg, SrcHiReg); + TRI->splitReg(DstReg, DstLoReg, DstHiReg); + + buildMI(MBB, MBBI, OpLo) + .addReg(DstLoReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstLoReg, getKillRegState(DstIsKill)) + .addReg(SrcLoReg, getKillRegState(SrcIsKill)); + + auto MIBHI = buildMI(MBB, MBBI, OpHi) + .addReg(DstHiReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstHiReg, getKillRegState(DstIsKill)) + .addReg(SrcHiReg, getKillRegState(SrcIsKill)); + + if (ImpIsDead) + MIBHI->getOperand(3).setIsDead(); + + // SREG is always implicitly killed + MIBHI->getOperand(4).setIsKill(); + + MI.eraseFromParent(); + return true; +} + +bool AVRExpandPseudo:: +expandLogic(unsigned Op, Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned SrcLoReg, SrcHiReg, DstLoReg, DstHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + unsigned SrcReg = MI.getOperand(2).getReg(); + bool DstIsDead = MI.getOperand(0).isDead(); + bool DstIsKill = MI.getOperand(1).isKill(); + bool SrcIsKill = MI.getOperand(2).isKill(); + bool ImpIsDead = MI.getOperand(3).isDead(); + TRI->splitReg(SrcReg, SrcLoReg, SrcHiReg); + TRI->splitReg(DstReg, DstLoReg, DstHiReg); + + auto MIBLO = buildMI(MBB, MBBI, Op) + .addReg(DstLoReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstLoReg, getKillRegState(DstIsKill)) + .addReg(SrcLoReg, getKillRegState(SrcIsKill)); + + // SREG is always implicitly dead + MIBLO->getOperand(3).setIsDead(); + + auto MIBHI = buildMI(MBB, MBBI, Op) + .addReg(DstHiReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstHiReg, getKillRegState(DstIsKill)) + .addReg(SrcHiReg, getKillRegState(SrcIsKill)); + + if (ImpIsDead) + MIBHI->getOperand(3).setIsDead(); + + MI.eraseFromParent(); + return true; +} + +bool AVRExpandPseudo:: + isLogicImmOpRedundant(unsigned Op, unsigned ImmVal) const { + + // ANDI Rd, 0xff is redundant. + if (Op == AVR::ANDIRdK && ImmVal == 0xff) + return true; + + // ORI Rd, 0x0 is redundant. + if (Op == AVR::ORIRdK && ImmVal == 0x0) + return true; + + return false; +} + +bool AVRExpandPseudo:: +expandLogicImm(unsigned Op, Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned DstLoReg, DstHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + bool DstIsDead = MI.getOperand(0).isDead(); + bool SrcIsKill = MI.getOperand(1).isKill(); + bool ImpIsDead = MI.getOperand(3).isDead(); + unsigned Imm = MI.getOperand(2).getImm(); + unsigned Lo8 = Imm & 0xff; + unsigned Hi8 = (Imm >> 8) & 0xff; + TRI->splitReg(DstReg, DstLoReg, DstHiReg); + + if (!isLogicImmOpRedundant(Op, Lo8)) { + auto MIBLO = buildMI(MBB, MBBI, Op) + .addReg(DstLoReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstLoReg, getKillRegState(SrcIsKill)) + .addImm(Lo8); + + // SREG is always implicitly dead + MIBLO->getOperand(3).setIsDead(); + } + + if (!isLogicImmOpRedundant(Op, Hi8)) { + auto MIBHI = buildMI(MBB, MBBI, Op) + .addReg(DstHiReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstHiReg, getKillRegState(SrcIsKill)) + .addImm(Hi8); + + if (ImpIsDead) + MIBHI->getOperand(3).setIsDead(); + } + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::ADDWRdRr>(Block &MBB, BlockIt MBBI) { + return expandArith(AVR::ADDRdRr, AVR::ADCRdRr, MBB, MBBI); +} + +template <> +bool AVRExpandPseudo::expand<AVR::ADCWRdRr>(Block &MBB, BlockIt MBBI) { + return expandArith(AVR::ADCRdRr, AVR::ADCRdRr, MBB, MBBI); +} + +template <> +bool AVRExpandPseudo::expand<AVR::SUBWRdRr>(Block &MBB, BlockIt MBBI) { + return expandArith(AVR::SUBRdRr, AVR::SBCRdRr, MBB, MBBI); +} + +template <> +bool AVRExpandPseudo::expand<AVR::SUBIWRdK>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned DstLoReg, DstHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + bool DstIsDead = MI.getOperand(0).isDead(); + bool SrcIsKill = MI.getOperand(1).isKill(); + bool ImpIsDead = MI.getOperand(3).isDead(); + TRI->splitReg(DstReg, DstLoReg, DstHiReg); + + auto MIBLO = buildMI(MBB, MBBI, AVR::SUBIRdK) + .addReg(DstLoReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstLoReg, getKillRegState(SrcIsKill)); + + auto MIBHI = buildMI(MBB, MBBI, AVR::SBCIRdK) + .addReg(DstHiReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstHiReg, getKillRegState(SrcIsKill)); + + switch (MI.getOperand(2).getType()) { + case MachineOperand::MO_GlobalAddress: { + const GlobalValue *GV = MI.getOperand(2).getGlobal(); + int64_t Offs = MI.getOperand(2).getOffset(); + unsigned TF = MI.getOperand(2).getTargetFlags(); + MIBLO.addGlobalAddress(GV, Offs, TF | AVRII::MO_NEG | AVRII::MO_LO); + MIBHI.addGlobalAddress(GV, Offs, TF | AVRII::MO_NEG | AVRII::MO_HI); + break; + } + case MachineOperand::MO_Immediate: { + unsigned Imm = MI.getOperand(2).getImm(); + MIBLO.addImm(Imm & 0xff); + MIBHI.addImm((Imm >> 8) & 0xff); + break; + } + default: + llvm_unreachable("Unknown operand type!"); + } + + if (ImpIsDead) + MIBHI->getOperand(3).setIsDead(); + + // SREG is always implicitly killed + MIBHI->getOperand(4).setIsKill(); + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::SBCWRdRr>(Block &MBB, BlockIt MBBI) { + return expandArith(AVR::SBCRdRr, AVR::SBCRdRr, MBB, MBBI); +} + +template <> +bool AVRExpandPseudo::expand<AVR::SBCIWRdK>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, DstLoReg, DstHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + bool DstIsDead = MI.getOperand(0).isDead(); + bool SrcIsKill = MI.getOperand(1).isKill(); + bool ImpIsDead = MI.getOperand(3).isDead(); + unsigned Imm = MI.getOperand(2).getImm(); + unsigned Lo8 = Imm & 0xff; + unsigned Hi8 = (Imm >> 8) & 0xff; + OpLo = AVR::SBCIRdK; + OpHi = AVR::SBCIRdK; + TRI->splitReg(DstReg, DstLoReg, DstHiReg); + + auto MIBLO = buildMI(MBB, MBBI, OpLo) + .addReg(DstLoReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstLoReg, getKillRegState(SrcIsKill)) + .addImm(Lo8); + + // SREG is always implicitly killed + MIBLO->getOperand(4).setIsKill(); + + auto MIBHI = buildMI(MBB, MBBI, OpHi) + .addReg(DstHiReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstHiReg, getKillRegState(SrcIsKill)) + .addImm(Hi8); + + if (ImpIsDead) + MIBHI->getOperand(3).setIsDead(); + + // SREG is always implicitly killed + MIBHI->getOperand(4).setIsKill(); + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::ANDWRdRr>(Block &MBB, BlockIt MBBI) { + return expandLogic(AVR::ANDRdRr, MBB, MBBI); +} + +template <> +bool AVRExpandPseudo::expand<AVR::ANDIWRdK>(Block &MBB, BlockIt MBBI) { + return expandLogicImm(AVR::ANDIRdK, MBB, MBBI); +} + +template <> +bool AVRExpandPseudo::expand<AVR::ORWRdRr>(Block &MBB, BlockIt MBBI) { + return expandLogic(AVR::ORRdRr, MBB, MBBI); +} + +template <> +bool AVRExpandPseudo::expand<AVR::ORIWRdK>(Block &MBB, BlockIt MBBI) { + return expandLogicImm(AVR::ORIRdK, MBB, MBBI); +} + +template <> +bool AVRExpandPseudo::expand<AVR::EORWRdRr>(Block &MBB, BlockIt MBBI) { + return expandLogic(AVR::EORRdRr, MBB, MBBI); +} + +template <> +bool AVRExpandPseudo::expand<AVR::COMWRd>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, DstLoReg, DstHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + bool DstIsDead = MI.getOperand(0).isDead(); + bool DstIsKill = MI.getOperand(1).isKill(); + bool ImpIsDead = MI.getOperand(2).isDead(); + OpLo = AVR::COMRd; + OpHi = AVR::COMRd; + TRI->splitReg(DstReg, DstLoReg, DstHiReg); + + auto MIBLO = buildMI(MBB, MBBI, OpLo) + .addReg(DstLoReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstLoReg, getKillRegState(DstIsKill)); + + // SREG is always implicitly dead + MIBLO->getOperand(2).setIsDead(); + + auto MIBHI = buildMI(MBB, MBBI, OpHi) + .addReg(DstHiReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstHiReg, getKillRegState(DstIsKill)); + + if (ImpIsDead) + MIBHI->getOperand(2).setIsDead(); + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::CPWRdRr>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, SrcLoReg, SrcHiReg, DstLoReg, DstHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + unsigned SrcReg = MI.getOperand(1).getReg(); + bool DstIsKill = MI.getOperand(0).isKill(); + bool SrcIsKill = MI.getOperand(1).isKill(); + bool ImpIsDead = MI.getOperand(2).isDead(); + OpLo = AVR::CPRdRr; + OpHi = AVR::CPCRdRr; + TRI->splitReg(SrcReg, SrcLoReg, SrcHiReg); + TRI->splitReg(DstReg, DstLoReg, DstHiReg); + + // Low part + buildMI(MBB, MBBI, OpLo) + .addReg(DstLoReg, getKillRegState(DstIsKill)) + .addReg(SrcLoReg, getKillRegState(SrcIsKill)); + + auto MIBHI = buildMI(MBB, MBBI, OpHi) + .addReg(DstHiReg, getKillRegState(DstIsKill)) + .addReg(SrcHiReg, getKillRegState(SrcIsKill)); + + if (ImpIsDead) + MIBHI->getOperand(2).setIsDead(); + + // SREG is always implicitly killed + MIBHI->getOperand(3).setIsKill(); + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::CPCWRdRr>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, SrcLoReg, SrcHiReg, DstLoReg, DstHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + unsigned SrcReg = MI.getOperand(1).getReg(); + bool DstIsKill = MI.getOperand(0).isKill(); + bool SrcIsKill = MI.getOperand(1).isKill(); + bool ImpIsDead = MI.getOperand(2).isDead(); + OpLo = AVR::CPCRdRr; + OpHi = AVR::CPCRdRr; + TRI->splitReg(SrcReg, SrcLoReg, SrcHiReg); + TRI->splitReg(DstReg, DstLoReg, DstHiReg); + + auto MIBLO = buildMI(MBB, MBBI, OpLo) + .addReg(DstLoReg, getKillRegState(DstIsKill)) + .addReg(SrcLoReg, getKillRegState(SrcIsKill)); + + // SREG is always implicitly killed + MIBLO->getOperand(3).setIsKill(); + + auto MIBHI = buildMI(MBB, MBBI, OpHi) + .addReg(DstHiReg, getKillRegState(DstIsKill)) + .addReg(SrcHiReg, getKillRegState(SrcIsKill)); + + if (ImpIsDead) + MIBHI->getOperand(2).setIsDead(); + + // SREG is always implicitly killed + MIBHI->getOperand(3).setIsKill(); + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::LDIWRdK>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, DstLoReg, DstHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + bool DstIsDead = MI.getOperand(0).isDead(); + OpLo = AVR::LDIRdK; + OpHi = AVR::LDIRdK; + TRI->splitReg(DstReg, DstLoReg, DstHiReg); + + auto MIBLO = buildMI(MBB, MBBI, OpLo) + .addReg(DstLoReg, RegState::Define | getDeadRegState(DstIsDead)); + + auto MIBHI = buildMI(MBB, MBBI, OpHi) + .addReg(DstHiReg, RegState::Define | getDeadRegState(DstIsDead)); + + switch (MI.getOperand(1).getType()) { + case MachineOperand::MO_GlobalAddress: { + const GlobalValue *GV = MI.getOperand(1).getGlobal(); + int64_t Offs = MI.getOperand(1).getOffset(); + unsigned TF = MI.getOperand(1).getTargetFlags(); + + MIBLO.addGlobalAddress(GV, Offs, TF | AVRII::MO_LO); + MIBHI.addGlobalAddress(GV, Offs, TF | AVRII::MO_HI); + break; + } + case MachineOperand::MO_BlockAddress: { + const BlockAddress *BA = MI.getOperand(1).getBlockAddress(); + unsigned TF = MI.getOperand(1).getTargetFlags(); + + MIBLO.add(MachineOperand::CreateBA(BA, TF | AVRII::MO_LO)); + MIBHI.add(MachineOperand::CreateBA(BA, TF | AVRII::MO_HI)); + break; + } + case MachineOperand::MO_Immediate: { + unsigned Imm = MI.getOperand(1).getImm(); + + MIBLO.addImm(Imm & 0xff); + MIBHI.addImm((Imm >> 8) & 0xff); + break; + } + default: + llvm_unreachable("Unknown operand type!"); + } + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::LDSWRdK>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, DstLoReg, DstHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + bool DstIsDead = MI.getOperand(0).isDead(); + OpLo = AVR::LDSRdK; + OpHi = AVR::LDSRdK; + TRI->splitReg(DstReg, DstLoReg, DstHiReg); + + auto MIBLO = buildMI(MBB, MBBI, OpLo) + .addReg(DstLoReg, RegState::Define | getDeadRegState(DstIsDead)); + + auto MIBHI = buildMI(MBB, MBBI, OpHi) + .addReg(DstHiReg, RegState::Define | getDeadRegState(DstIsDead)); + + switch (MI.getOperand(1).getType()) { + case MachineOperand::MO_GlobalAddress: { + const GlobalValue *GV = MI.getOperand(1).getGlobal(); + int64_t Offs = MI.getOperand(1).getOffset(); + unsigned TF = MI.getOperand(1).getTargetFlags(); + + MIBLO.addGlobalAddress(GV, Offs, TF); + MIBHI.addGlobalAddress(GV, Offs + 1, TF); + break; + } + case MachineOperand::MO_Immediate: { + unsigned Imm = MI.getOperand(1).getImm(); + + MIBLO.addImm(Imm); + MIBHI.addImm(Imm + 1); + break; + } + default: + llvm_unreachable("Unknown operand type!"); + } + + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::LDWRdPtr>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, DstLoReg, DstHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + unsigned TmpReg = 0; // 0 for no temporary register + unsigned SrcReg = MI.getOperand(1).getReg(); + bool SrcIsKill = MI.getOperand(1).isKill(); + OpLo = AVR::LDRdPtr; + OpHi = AVR::LDDRdPtrQ; + TRI->splitReg(DstReg, DstLoReg, DstHiReg); + + // Use a temporary register if src and dst registers are the same. + if (DstReg == SrcReg) + TmpReg = scavengeGPR8(MI); + + unsigned CurDstLoReg = (DstReg == SrcReg) ? TmpReg : DstLoReg; + unsigned CurDstHiReg = (DstReg == SrcReg) ? TmpReg : DstHiReg; + + // Load low byte. + auto MIBLO = buildMI(MBB, MBBI, OpLo) + .addReg(CurDstLoReg, RegState::Define) + .addReg(SrcReg, RegState::Define); + + // Push low byte onto stack if necessary. + if (TmpReg) + buildMI(MBB, MBBI, AVR::PUSHRr).addReg(TmpReg); + + // Load high byte. + auto MIBHI = buildMI(MBB, MBBI, OpHi) + .addReg(CurDstHiReg, RegState::Define) + .addReg(SrcReg, getKillRegState(SrcIsKill)) + .addImm(1); + + if (TmpReg) { + // Move the high byte into the final destination. + buildMI(MBB, MBBI, AVR::MOVRdRr).addReg(DstHiReg).addReg(TmpReg); + + // Move the low byte from the scratch space into the final destination. + buildMI(MBB, MBBI, AVR::POPRd).addReg(DstLoReg); + } + + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::LDWRdPtrPi>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, DstLoReg, DstHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + unsigned SrcReg = MI.getOperand(1).getReg(); + bool DstIsDead = MI.getOperand(0).isDead(); + bool SrcIsDead = MI.getOperand(1).isKill(); + OpLo = AVR::LDRdPtrPi; + OpHi = AVR::LDRdPtrPi; + TRI->splitReg(DstReg, DstLoReg, DstHiReg); + + assert(DstReg != SrcReg && "SrcReg and DstReg cannot be the same"); + + auto MIBLO = buildMI(MBB, MBBI, OpLo) + .addReg(DstLoReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(SrcReg, RegState::Define) + .addReg(SrcReg, RegState::Kill); + + auto MIBHI = buildMI(MBB, MBBI, OpHi) + .addReg(DstHiReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(SrcReg, RegState::Define | getDeadRegState(SrcIsDead)) + .addReg(SrcReg, RegState::Kill); + + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::LDWRdPtrPd>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, DstLoReg, DstHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + unsigned SrcReg = MI.getOperand(1).getReg(); + bool DstIsDead = MI.getOperand(0).isDead(); + bool SrcIsDead = MI.getOperand(1).isKill(); + OpLo = AVR::LDRdPtrPd; + OpHi = AVR::LDRdPtrPd; + TRI->splitReg(DstReg, DstLoReg, DstHiReg); + + assert(DstReg != SrcReg && "SrcReg and DstReg cannot be the same"); + + auto MIBHI = buildMI(MBB, MBBI, OpHi) + .addReg(DstHiReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(SrcReg, RegState::Define) + .addReg(SrcReg, RegState::Kill); + + auto MIBLO = buildMI(MBB, MBBI, OpLo) + .addReg(DstLoReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(SrcReg, RegState::Define | getDeadRegState(SrcIsDead)) + .addReg(SrcReg, RegState::Kill); + + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::LDDWRdPtrQ>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, DstLoReg, DstHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + unsigned TmpReg = 0; // 0 for no temporary register + unsigned SrcReg = MI.getOperand(1).getReg(); + unsigned Imm = MI.getOperand(2).getImm(); + bool SrcIsKill = MI.getOperand(1).isKill(); + OpLo = AVR::LDDRdPtrQ; + OpHi = AVR::LDDRdPtrQ; + TRI->splitReg(DstReg, DstLoReg, DstHiReg); + + // Since we add 1 to the Imm value for the high byte below, and 63 is the highest Imm value + // allowed for the instruction, 62 is the limit here. + assert(Imm <= 62 && "Offset is out of range"); + + // Use a temporary register if src and dst registers are the same. + if (DstReg == SrcReg) + TmpReg = scavengeGPR8(MI); + + unsigned CurDstLoReg = (DstReg == SrcReg) ? TmpReg : DstLoReg; + unsigned CurDstHiReg = (DstReg == SrcReg) ? TmpReg : DstHiReg; + + // Load low byte. + auto MIBLO = buildMI(MBB, MBBI, OpLo) + .addReg(CurDstLoReg, RegState::Define) + .addReg(SrcReg) + .addImm(Imm); + + // Push low byte onto stack if necessary. + if (TmpReg) + buildMI(MBB, MBBI, AVR::PUSHRr).addReg(TmpReg); + + // Load high byte. + auto MIBHI = buildMI(MBB, MBBI, OpHi) + .addReg(CurDstHiReg, RegState::Define) + .addReg(SrcReg, getKillRegState(SrcIsKill)) + .addImm(Imm + 1); + + if (TmpReg) { + // Move the high byte into the final destination. + buildMI(MBB, MBBI, AVR::MOVRdRr).addReg(DstHiReg).addReg(TmpReg); + + // Move the low byte from the scratch space into the final destination. + buildMI(MBB, MBBI, AVR::POPRd).addReg(DstLoReg); + } + + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::LPMWRdZ>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, DstLoReg, DstHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + unsigned TmpReg = 0; // 0 for no temporary register + unsigned SrcReg = MI.getOperand(1).getReg(); + bool SrcIsKill = MI.getOperand(1).isKill(); + OpLo = AVR::LPMRdZPi; + OpHi = AVR::LPMRdZ; + TRI->splitReg(DstReg, DstLoReg, DstHiReg); + + // Use a temporary register if src and dst registers are the same. + if (DstReg == SrcReg) + TmpReg = scavengeGPR8(MI); + + unsigned CurDstLoReg = (DstReg == SrcReg) ? TmpReg : DstLoReg; + unsigned CurDstHiReg = (DstReg == SrcReg) ? TmpReg : DstHiReg; + + // Load low byte. + auto MIBLO = buildMI(MBB, MBBI, OpLo) + .addReg(CurDstLoReg, RegState::Define) + .addReg(SrcReg); + + // Push low byte onto stack if necessary. + if (TmpReg) + buildMI(MBB, MBBI, AVR::PUSHRr).addReg(TmpReg); + + // Load high byte. + auto MIBHI = buildMI(MBB, MBBI, OpHi) + .addReg(CurDstHiReg, RegState::Define) + .addReg(SrcReg, getKillRegState(SrcIsKill)); + + if (TmpReg) { + // Move the high byte into the final destination. + buildMI(MBB, MBBI, AVR::MOVRdRr).addReg(DstHiReg).addReg(TmpReg); + + // Move the low byte from the scratch space into the final destination. + buildMI(MBB, MBBI, AVR::POPRd).addReg(DstLoReg); + } + + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::LPMWRdZPi>(Block &MBB, BlockIt MBBI) { + llvm_unreachable("wide LPMPi is unimplemented"); +} + +template<typename Func> +bool AVRExpandPseudo::expandAtomic(Block &MBB, BlockIt MBBI, Func f) { + // Remove the pseudo instruction. + MachineInstr &MI = *MBBI; + + // Store the SREG. + buildMI(MBB, MBBI, AVR::INRdA) + .addReg(SCRATCH_REGISTER, RegState::Define) + .addImm(SREG_ADDR); + + // Disable exceptions. + buildMI(MBB, MBBI, AVR::BCLRs).addImm(7); // CLI + + f(MI); + + // Restore the status reg. + buildMI(MBB, MBBI, AVR::OUTARr) + .addImm(SREG_ADDR) + .addReg(SCRATCH_REGISTER); + + MI.eraseFromParent(); + return true; +} + +template<typename Func> +bool AVRExpandPseudo::expandAtomicBinaryOp(unsigned Opcode, + Block &MBB, + BlockIt MBBI, + Func f) { + return expandAtomic(MBB, MBBI, [&](MachineInstr &MI) { + auto Op1 = MI.getOperand(0); + auto Op2 = MI.getOperand(1); + + MachineInstr &NewInst = + *buildMI(MBB, MBBI, Opcode).add(Op1).add(Op2).getInstr(); + f(NewInst); + }); +} + +bool AVRExpandPseudo::expandAtomicBinaryOp(unsigned Opcode, + Block &MBB, + BlockIt MBBI) { + return expandAtomicBinaryOp(Opcode, MBB, MBBI, [](MachineInstr &MI) {}); +} + +bool AVRExpandPseudo::expandAtomicArithmeticOp(unsigned Width, + unsigned ArithOpcode, + Block &MBB, + BlockIt MBBI) { + return expandAtomic(MBB, MBBI, [&](MachineInstr &MI) { + auto Op1 = MI.getOperand(0); + auto Op2 = MI.getOperand(1); + + unsigned LoadOpcode = (Width == 8) ? AVR::LDRdPtr : AVR::LDWRdPtr; + unsigned StoreOpcode = (Width == 8) ? AVR::STPtrRr : AVR::STWPtrRr; + + // Create the load + buildMI(MBB, MBBI, LoadOpcode).add(Op1).add(Op2); + + // Create the arithmetic op + buildMI(MBB, MBBI, ArithOpcode).add(Op1).add(Op1).add(Op2); + + // Create the store + buildMI(MBB, MBBI, StoreOpcode).add(Op2).add(Op1); + }); +} + +unsigned AVRExpandPseudo::scavengeGPR8(MachineInstr &MI) { + MachineBasicBlock &MBB = *MI.getParent(); + RegScavenger RS; + + RS.enterBasicBlock(MBB); + RS.forward(MI); + + BitVector Candidates = + TRI->getAllocatableSet + (*MBB.getParent(), &AVR::GPR8RegClass); + + // Exclude all the registers being used by the instruction. + for (MachineOperand &MO : MI.operands()) { + if (MO.isReg() && MO.getReg() != 0 && !MO.isDef() && + !TargetRegisterInfo::isVirtualRegister(MO.getReg())) + Candidates.reset(MO.getReg()); + } + + BitVector Available = RS.getRegsAvailable(&AVR::GPR8RegClass); + Available &= Candidates; + + signed Reg = Available.find_first(); + assert(Reg != -1 && "ran out of registers"); + return Reg; +} + +template<> +bool AVRExpandPseudo::expand<AVR::AtomicLoad8>(Block &MBB, BlockIt MBBI) { + return expandAtomicBinaryOp(AVR::LDRdPtr, MBB, MBBI); +} + +template<> +bool AVRExpandPseudo::expand<AVR::AtomicLoad16>(Block &MBB, BlockIt MBBI) { + return expandAtomicBinaryOp(AVR::LDWRdPtr, MBB, MBBI); +} + +template<> +bool AVRExpandPseudo::expand<AVR::AtomicStore8>(Block &MBB, BlockIt MBBI) { + return expandAtomicBinaryOp(AVR::STPtrRr, MBB, MBBI); +} + +template<> +bool AVRExpandPseudo::expand<AVR::AtomicStore16>(Block &MBB, BlockIt MBBI) { + return expandAtomicBinaryOp(AVR::STWPtrRr, MBB, MBBI); +} + +template<> +bool AVRExpandPseudo::expand<AVR::AtomicLoadAdd8>(Block &MBB, BlockIt MBBI) { + return expandAtomicArithmeticOp(8, AVR::ADDRdRr, MBB, MBBI); +} + +template<> +bool AVRExpandPseudo::expand<AVR::AtomicLoadAdd16>(Block &MBB, BlockIt MBBI) { + return expandAtomicArithmeticOp(16, AVR::ADDWRdRr, MBB, MBBI); +} + +template<> +bool AVRExpandPseudo::expand<AVR::AtomicLoadSub8>(Block &MBB, BlockIt MBBI) { + return expandAtomicArithmeticOp(8, AVR::SUBRdRr, MBB, MBBI); +} + +template<> +bool AVRExpandPseudo::expand<AVR::AtomicLoadSub16>(Block &MBB, BlockIt MBBI) { + return expandAtomicArithmeticOp(16, AVR::SUBWRdRr, MBB, MBBI); +} + +template<> +bool AVRExpandPseudo::expand<AVR::AtomicLoadAnd8>(Block &MBB, BlockIt MBBI) { + return expandAtomicArithmeticOp(8, AVR::ANDRdRr, MBB, MBBI); +} + +template<> +bool AVRExpandPseudo::expand<AVR::AtomicLoadAnd16>(Block &MBB, BlockIt MBBI) { + return expandAtomicArithmeticOp(16, AVR::ANDWRdRr, MBB, MBBI); +} + +template<> +bool AVRExpandPseudo::expand<AVR::AtomicLoadOr8>(Block &MBB, BlockIt MBBI) { + return expandAtomicArithmeticOp(8, AVR::ORRdRr, MBB, MBBI); +} + +template<> +bool AVRExpandPseudo::expand<AVR::AtomicLoadOr16>(Block &MBB, BlockIt MBBI) { + return expandAtomicArithmeticOp(16, AVR::ORWRdRr, MBB, MBBI); +} + +template<> +bool AVRExpandPseudo::expand<AVR::AtomicLoadXor8>(Block &MBB, BlockIt MBBI) { + return expandAtomicArithmeticOp(8, AVR::EORRdRr, MBB, MBBI); +} + +template<> +bool AVRExpandPseudo::expand<AVR::AtomicLoadXor16>(Block &MBB, BlockIt MBBI) { + return expandAtomicArithmeticOp(16, AVR::EORWRdRr, MBB, MBBI); +} + +template<> +bool AVRExpandPseudo::expand<AVR::AtomicFence>(Block &MBB, BlockIt MBBI) { + // On AVR, there is only one core and so atomic fences do nothing. + MBBI->eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::STSWKRr>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, SrcLoReg, SrcHiReg; + unsigned SrcReg = MI.getOperand(1).getReg(); + bool SrcIsKill = MI.getOperand(1).isKill(); + OpLo = AVR::STSKRr; + OpHi = AVR::STSKRr; + TRI->splitReg(SrcReg, SrcLoReg, SrcHiReg); + + // Write the high byte first in case this address belongs to a special + // I/O address with a special temporary register. + auto MIBHI = buildMI(MBB, MBBI, OpHi); + auto MIBLO = buildMI(MBB, MBBI, OpLo); + + switch (MI.getOperand(0).getType()) { + case MachineOperand::MO_GlobalAddress: { + const GlobalValue *GV = MI.getOperand(0).getGlobal(); + int64_t Offs = MI.getOperand(0).getOffset(); + unsigned TF = MI.getOperand(0).getTargetFlags(); + + MIBLO.addGlobalAddress(GV, Offs, TF); + MIBHI.addGlobalAddress(GV, Offs + 1, TF); + break; + } + case MachineOperand::MO_Immediate: { + unsigned Imm = MI.getOperand(0).getImm(); + + MIBLO.addImm(Imm); + MIBHI.addImm(Imm + 1); + break; + } + default: + llvm_unreachable("Unknown operand type!"); + } + + MIBLO.addReg(SrcLoReg, getKillRegState(SrcIsKill)); + MIBHI.addReg(SrcHiReg, getKillRegState(SrcIsKill)); + + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::STWPtrRr>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, SrcLoReg, SrcHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + unsigned SrcReg = MI.getOperand(1).getReg(); + bool SrcIsKill = MI.getOperand(1).isKill(); + OpLo = AVR::STPtrRr; + OpHi = AVR::STDPtrQRr; + TRI->splitReg(SrcReg, SrcLoReg, SrcHiReg); + + //:TODO: need to reverse this order like inw and stsw? + auto MIBLO = buildMI(MBB, MBBI, OpLo) + .addReg(DstReg) + .addReg(SrcLoReg, getKillRegState(SrcIsKill)); + + auto MIBHI = buildMI(MBB, MBBI, OpHi) + .addReg(DstReg) + .addImm(1) + .addReg(SrcHiReg, getKillRegState(SrcIsKill)); + + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::STWPtrPiRr>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, SrcLoReg, SrcHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + unsigned SrcReg = MI.getOperand(2).getReg(); + unsigned Imm = MI.getOperand(3).getImm(); + bool DstIsDead = MI.getOperand(0).isDead(); + bool SrcIsKill = MI.getOperand(2).isKill(); + OpLo = AVR::STPtrPiRr; + OpHi = AVR::STPtrPiRr; + TRI->splitReg(SrcReg, SrcLoReg, SrcHiReg); + + assert(DstReg != SrcReg && "SrcReg and DstReg cannot be the same"); + + auto MIBLO = buildMI(MBB, MBBI, OpLo) + .addReg(DstReg, RegState::Define) + .addReg(DstReg, RegState::Kill) + .addReg(SrcLoReg, getKillRegState(SrcIsKill)) + .addImm(Imm); + + auto MIBHI = buildMI(MBB, MBBI, OpHi) + .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstReg, RegState::Kill) + .addReg(SrcHiReg, getKillRegState(SrcIsKill)) + .addImm(Imm); + + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::STWPtrPdRr>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, SrcLoReg, SrcHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + unsigned SrcReg = MI.getOperand(2).getReg(); + unsigned Imm = MI.getOperand(3).getImm(); + bool DstIsDead = MI.getOperand(0).isDead(); + bool SrcIsKill = MI.getOperand(2).isKill(); + OpLo = AVR::STPtrPdRr; + OpHi = AVR::STPtrPdRr; + TRI->splitReg(SrcReg, SrcLoReg, SrcHiReg); + + assert(DstReg != SrcReg && "SrcReg and DstReg cannot be the same"); + + auto MIBHI = buildMI(MBB, MBBI, OpHi) + .addReg(DstReg, RegState::Define) + .addReg(DstReg, RegState::Kill) + .addReg(SrcHiReg, getKillRegState(SrcIsKill)) + .addImm(Imm); + + auto MIBLO = buildMI(MBB, MBBI, OpLo) + .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstReg, RegState::Kill) + .addReg(SrcLoReg, getKillRegState(SrcIsKill)) + .addImm(Imm); + + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::STDWPtrQRr>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, SrcLoReg, SrcHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + unsigned SrcReg = MI.getOperand(2).getReg(); + unsigned Imm = MI.getOperand(1).getImm(); + bool DstIsKill = MI.getOperand(0).isKill(); + bool SrcIsKill = MI.getOperand(2).isKill(); + OpLo = AVR::STDPtrQRr; + OpHi = AVR::STDPtrQRr; + TRI->splitReg(SrcReg, SrcLoReg, SrcHiReg); + + // Since we add 1 to the Imm value for the high byte below, and 63 is the highest Imm value + // allowed for the instruction, 62 is the limit here. + assert(Imm <= 62 && "Offset is out of range"); + + auto MIBLO = buildMI(MBB, MBBI, OpLo) + .addReg(DstReg) + .addImm(Imm) + .addReg(SrcLoReg, getKillRegState(SrcIsKill)); + + auto MIBHI = buildMI(MBB, MBBI, OpHi) + .addReg(DstReg, getKillRegState(DstIsKill)) + .addImm(Imm + 1) + .addReg(SrcHiReg, getKillRegState(SrcIsKill)); + + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::INWRdA>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, DstLoReg, DstHiReg; + unsigned Imm = MI.getOperand(1).getImm(); + unsigned DstReg = MI.getOperand(0).getReg(); + bool DstIsDead = MI.getOperand(0).isDead(); + OpLo = AVR::INRdA; + OpHi = AVR::INRdA; + TRI->splitReg(DstReg, DstLoReg, DstHiReg); + + // Since we add 1 to the Imm value for the high byte below, and 63 is the highest Imm value + // allowed for the instruction, 62 is the limit here. + assert(Imm <= 62 && "Address is out of range"); + + auto MIBLO = buildMI(MBB, MBBI, OpLo) + .addReg(DstLoReg, RegState::Define | getDeadRegState(DstIsDead)) + .addImm(Imm); + + auto MIBHI = buildMI(MBB, MBBI, OpHi) + .addReg(DstHiReg, RegState::Define | getDeadRegState(DstIsDead)) + .addImm(Imm + 1); + + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::OUTWARr>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, SrcLoReg, SrcHiReg; + unsigned Imm = MI.getOperand(0).getImm(); + unsigned SrcReg = MI.getOperand(1).getReg(); + bool SrcIsKill = MI.getOperand(1).isKill(); + OpLo = AVR::OUTARr; + OpHi = AVR::OUTARr; + TRI->splitReg(SrcReg, SrcLoReg, SrcHiReg); + + // Since we add 1 to the Imm value for the high byte below, and 63 is the highest Imm value + // allowed for the instruction, 62 is the limit here. + assert(Imm <= 62 && "Address is out of range"); + + // 16 bit I/O writes need the high byte first + auto MIBHI = buildMI(MBB, MBBI, OpHi) + .addImm(Imm + 1) + .addReg(SrcHiReg, getKillRegState(SrcIsKill)); + + auto MIBLO = buildMI(MBB, MBBI, OpLo) + .addImm(Imm) + .addReg(SrcLoReg, getKillRegState(SrcIsKill)); + + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::PUSHWRr>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, SrcLoReg, SrcHiReg; + unsigned SrcReg = MI.getOperand(0).getReg(); + bool SrcIsKill = MI.getOperand(0).isKill(); + unsigned Flags = MI.getFlags(); + OpLo = AVR::PUSHRr; + OpHi = AVR::PUSHRr; + TRI->splitReg(SrcReg, SrcLoReg, SrcHiReg); + + // Low part + buildMI(MBB, MBBI, OpLo) + .addReg(SrcLoReg, getKillRegState(SrcIsKill)) + .setMIFlags(Flags); + + // High part + buildMI(MBB, MBBI, OpHi) + .addReg(SrcHiReg, getKillRegState(SrcIsKill)) + .setMIFlags(Flags); + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::POPWRd>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, DstLoReg, DstHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + unsigned Flags = MI.getFlags(); + OpLo = AVR::POPRd; + OpHi = AVR::POPRd; + TRI->splitReg(DstReg, DstLoReg, DstHiReg); + + buildMI(MBB, MBBI, OpHi, DstHiReg).setMIFlags(Flags); // High + buildMI(MBB, MBBI, OpLo, DstLoReg).setMIFlags(Flags); // Low + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::LSLWRd>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, DstLoReg, DstHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + bool DstIsDead = MI.getOperand(0).isDead(); + bool DstIsKill = MI.getOperand(1).isKill(); + bool ImpIsDead = MI.getOperand(2).isDead(); + OpLo = AVR::ADDRdRr; // ADD Rd, Rd <==> LSL Rd + OpHi = AVR::ADCRdRr; // ADC Rd, Rd <==> ROL Rd + TRI->splitReg(DstReg, DstLoReg, DstHiReg); + + // Low part + buildMI(MBB, MBBI, OpLo) + .addReg(DstLoReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstLoReg) + .addReg(DstLoReg, getKillRegState(DstIsKill)); + + auto MIBHI = buildMI(MBB, MBBI, OpHi) + .addReg(DstHiReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstHiReg) + .addReg(DstHiReg, getKillRegState(DstIsKill)); + + if (ImpIsDead) + MIBHI->getOperand(3).setIsDead(); + + // SREG is always implicitly killed + MIBHI->getOperand(4).setIsKill(); + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::LSRWRd>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, DstLoReg, DstHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + bool DstIsDead = MI.getOperand(0).isDead(); + bool DstIsKill = MI.getOperand(1).isKill(); + bool ImpIsDead = MI.getOperand(2).isDead(); + OpLo = AVR::RORRd; + OpHi = AVR::LSRRd; + TRI->splitReg(DstReg, DstLoReg, DstHiReg); + + // High part + buildMI(MBB, MBBI, OpHi) + .addReg(DstHiReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstHiReg, getKillRegState(DstIsKill)); + + auto MIBLO = buildMI(MBB, MBBI, OpLo) + .addReg(DstLoReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstLoReg, getKillRegState(DstIsKill)); + + if (ImpIsDead) + MIBLO->getOperand(2).setIsDead(); + + // SREG is always implicitly killed + MIBLO->getOperand(3).setIsKill(); + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::RORWRd>(Block &MBB, BlockIt MBBI) { + llvm_unreachable("RORW unimplemented"); + return false; +} + +template <> +bool AVRExpandPseudo::expand<AVR::ROLWRd>(Block &MBB, BlockIt MBBI) { + llvm_unreachable("ROLW unimplemented"); + return false; +} + +template <> +bool AVRExpandPseudo::expand<AVR::ASRWRd>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, DstLoReg, DstHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + bool DstIsDead = MI.getOperand(0).isDead(); + bool DstIsKill = MI.getOperand(1).isKill(); + bool ImpIsDead = MI.getOperand(2).isDead(); + OpLo = AVR::RORRd; + OpHi = AVR::ASRRd; + TRI->splitReg(DstReg, DstLoReg, DstHiReg); + + // High part + buildMI(MBB, MBBI, OpHi) + .addReg(DstHiReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstHiReg, getKillRegState(DstIsKill)); + + auto MIBLO = buildMI(MBB, MBBI, OpLo) + .addReg(DstLoReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstLoReg, getKillRegState(DstIsKill)); + + if (ImpIsDead) + MIBLO->getOperand(2).setIsDead(); + + // SREG is always implicitly killed + MIBLO->getOperand(3).setIsKill(); + + MI.eraseFromParent(); + return true; +} + +template <> bool AVRExpandPseudo::expand<AVR::SEXT>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned DstLoReg, DstHiReg; + // sext R17:R16, R17 + // mov r16, r17 + // lsl r17 + // sbc r17, r17 + // sext R17:R16, R13 + // mov r16, r13 + // mov r17, r13 + // lsl r17 + // sbc r17, r17 + // sext R17:R16, R16 + // mov r17, r16 + // lsl r17 + // sbc r17, r17 + unsigned DstReg = MI.getOperand(0).getReg(); + unsigned SrcReg = MI.getOperand(1).getReg(); + bool DstIsDead = MI.getOperand(0).isDead(); + bool SrcIsKill = MI.getOperand(1).isKill(); + bool ImpIsDead = MI.getOperand(2).isDead(); + TRI->splitReg(DstReg, DstLoReg, DstHiReg); + + if (SrcReg != DstLoReg) { + auto MOV = buildMI(MBB, MBBI, AVR::MOVRdRr) + .addReg(DstLoReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(SrcReg); + + if (SrcReg == DstHiReg) { + MOV->getOperand(1).setIsKill(); + } + } + + if (SrcReg != DstHiReg) { + buildMI(MBB, MBBI, AVR::MOVRdRr) + .addReg(DstHiReg, RegState::Define) + .addReg(SrcReg, getKillRegState(SrcIsKill)); + } + + buildMI(MBB, MBBI, AVR::ADDRdRr) // LSL Rd <==> ADD Rd, Rr + .addReg(DstHiReg, RegState::Define) + .addReg(DstHiReg) + .addReg(DstHiReg, RegState::Kill); + + auto SBC = buildMI(MBB, MBBI, AVR::SBCRdRr) + .addReg(DstHiReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstHiReg, RegState::Kill) + .addReg(DstHiReg, RegState::Kill); + + if (ImpIsDead) + SBC->getOperand(3).setIsDead(); + + // SREG is always implicitly killed + SBC->getOperand(4).setIsKill(); + + MI.eraseFromParent(); + return true; +} + +template <> bool AVRExpandPseudo::expand<AVR::ZEXT>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned DstLoReg, DstHiReg; + // zext R25:R24, R20 + // mov R24, R20 + // eor R25, R25 + // zext R25:R24, R24 + // eor R25, R25 + // zext R25:R24, R25 + // mov R24, R25 + // eor R25, R25 + unsigned DstReg = MI.getOperand(0).getReg(); + unsigned SrcReg = MI.getOperand(1).getReg(); + bool DstIsDead = MI.getOperand(0).isDead(); + bool SrcIsKill = MI.getOperand(1).isKill(); + bool ImpIsDead = MI.getOperand(2).isDead(); + TRI->splitReg(DstReg, DstLoReg, DstHiReg); + + if (SrcReg != DstLoReg) { + buildMI(MBB, MBBI, AVR::MOVRdRr) + .addReg(DstLoReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(SrcReg, getKillRegState(SrcIsKill)); + } + + auto EOR = buildMI(MBB, MBBI, AVR::EORRdRr) + .addReg(DstHiReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstHiReg, RegState::Kill) + .addReg(DstHiReg, RegState::Kill); + + if (ImpIsDead) + EOR->getOperand(3).setIsDead(); + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::SPREAD>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned OpLo, OpHi, DstLoReg, DstHiReg; + unsigned DstReg = MI.getOperand(0).getReg(); + bool DstIsDead = MI.getOperand(0).isDead(); + unsigned Flags = MI.getFlags(); + OpLo = AVR::INRdA; + OpHi = AVR::INRdA; + TRI->splitReg(DstReg, DstLoReg, DstHiReg); + + // Low part + buildMI(MBB, MBBI, OpLo) + .addReg(DstLoReg, RegState::Define | getDeadRegState(DstIsDead)) + .addImm(0x3d) + .setMIFlags(Flags); + + // High part + buildMI(MBB, MBBI, OpHi) + .addReg(DstHiReg, RegState::Define | getDeadRegState(DstIsDead)) + .addImm(0x3e) + .setMIFlags(Flags); + + MI.eraseFromParent(); + return true; +} + +template <> +bool AVRExpandPseudo::expand<AVR::SPWRITE>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + unsigned SrcLoReg, SrcHiReg; + unsigned SrcReg = MI.getOperand(1).getReg(); + bool SrcIsKill = MI.getOperand(1).isKill(); + unsigned Flags = MI.getFlags(); + TRI->splitReg(SrcReg, SrcLoReg, SrcHiReg); + + buildMI(MBB, MBBI, AVR::INRdA) + .addReg(AVR::R0, RegState::Define) + .addImm(SREG_ADDR) + .setMIFlags(Flags); + + buildMI(MBB, MBBI, AVR::BCLRs).addImm(0x07).setMIFlags(Flags); + + buildMI(MBB, MBBI, AVR::OUTARr) + .addImm(0x3e) + .addReg(SrcHiReg, getKillRegState(SrcIsKill)) + .setMIFlags(Flags); + + buildMI(MBB, MBBI, AVR::OUTARr) + .addImm(SREG_ADDR) + .addReg(AVR::R0, RegState::Kill) + .setMIFlags(Flags); + + buildMI(MBB, MBBI, AVR::OUTARr) + .addImm(0x3d) + .addReg(SrcLoReg, getKillRegState(SrcIsKill)) + .setMIFlags(Flags); + + MI.eraseFromParent(); + return true; +} + +bool AVRExpandPseudo::expandMI(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + int Opcode = MBBI->getOpcode(); + +#define EXPAND(Op) \ + case Op: \ + return expand<Op>(MBB, MI) + + switch (Opcode) { + EXPAND(AVR::ADDWRdRr); + EXPAND(AVR::ADCWRdRr); + EXPAND(AVR::SUBWRdRr); + EXPAND(AVR::SUBIWRdK); + EXPAND(AVR::SBCWRdRr); + EXPAND(AVR::SBCIWRdK); + EXPAND(AVR::ANDWRdRr); + EXPAND(AVR::ANDIWRdK); + EXPAND(AVR::ORWRdRr); + EXPAND(AVR::ORIWRdK); + EXPAND(AVR::EORWRdRr); + EXPAND(AVR::COMWRd); + EXPAND(AVR::CPWRdRr); + EXPAND(AVR::CPCWRdRr); + EXPAND(AVR::LDIWRdK); + EXPAND(AVR::LDSWRdK); + EXPAND(AVR::LDWRdPtr); + EXPAND(AVR::LDWRdPtrPi); + EXPAND(AVR::LDWRdPtrPd); + case AVR::LDDWRdYQ: //:FIXME: remove this once PR13375 gets fixed + EXPAND(AVR::LDDWRdPtrQ); + EXPAND(AVR::LPMWRdZ); + EXPAND(AVR::LPMWRdZPi); + EXPAND(AVR::AtomicLoad8); + EXPAND(AVR::AtomicLoad16); + EXPAND(AVR::AtomicStore8); + EXPAND(AVR::AtomicStore16); + EXPAND(AVR::AtomicLoadAdd8); + EXPAND(AVR::AtomicLoadAdd16); + EXPAND(AVR::AtomicLoadSub8); + EXPAND(AVR::AtomicLoadSub16); + EXPAND(AVR::AtomicLoadAnd8); + EXPAND(AVR::AtomicLoadAnd16); + EXPAND(AVR::AtomicLoadOr8); + EXPAND(AVR::AtomicLoadOr16); + EXPAND(AVR::AtomicLoadXor8); + EXPAND(AVR::AtomicLoadXor16); + EXPAND(AVR::AtomicFence); + EXPAND(AVR::STSWKRr); + EXPAND(AVR::STWPtrRr); + EXPAND(AVR::STWPtrPiRr); + EXPAND(AVR::STWPtrPdRr); + EXPAND(AVR::STDWPtrQRr); + EXPAND(AVR::INWRdA); + EXPAND(AVR::OUTWARr); + EXPAND(AVR::PUSHWRr); + EXPAND(AVR::POPWRd); + EXPAND(AVR::LSLWRd); + EXPAND(AVR::LSRWRd); + EXPAND(AVR::RORWRd); + EXPAND(AVR::ROLWRd); + EXPAND(AVR::ASRWRd); + EXPAND(AVR::SEXT); + EXPAND(AVR::ZEXT); + EXPAND(AVR::SPREAD); + EXPAND(AVR::SPWRITE); + } +#undef EXPAND + return false; +} + +} // end of anonymous namespace + +INITIALIZE_PASS(AVRExpandPseudo, "avr-expand-pseudo", + AVR_EXPAND_PSEUDO_NAME, false, false) +namespace llvm { + +FunctionPass *createAVRExpandPseudoPass() { return new AVRExpandPseudo(); } + +} // end of namespace llvm diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRFrameLowering.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/AVRFrameLowering.cpp new file mode 100644 index 000000000000..5e91bb8632c1 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRFrameLowering.cpp @@ -0,0 +1,545 @@ +//===-- AVRFrameLowering.cpp - AVR Frame Information ----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains the AVR implementation of TargetFrameLowering class. +// +//===----------------------------------------------------------------------===// + +#include "AVRFrameLowering.h" + +#include "AVR.h" +#include "AVRInstrInfo.h" +#include "AVRMachineFunctionInfo.h" +#include "AVRTargetMachine.h" +#include "MCTargetDesc/AVRMCTargetDesc.h" + +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/IR/Function.h" + +#include <vector> + +namespace llvm { + +AVRFrameLowering::AVRFrameLowering() + : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 1, -2) {} + +bool AVRFrameLowering::canSimplifyCallFramePseudos( + const MachineFunction &MF) const { + // Always simplify call frame pseudo instructions, even when + // hasReservedCallFrame is false. + return true; +} + +bool AVRFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { + // Reserve call frame memory in function prologue under the following + // conditions: + // - Y pointer is reserved to be the frame pointer. + // - The function does not contain variable sized objects. + + const MachineFrameInfo &MFI = MF.getFrameInfo(); + return hasFP(MF) && !MFI.hasVarSizedObjects(); +} + +void AVRFrameLowering::emitPrologue(MachineFunction &MF, + MachineBasicBlock &MBB) const { + MachineBasicBlock::iterator MBBI = MBB.begin(); + CallingConv::ID CallConv = MF.getFunction().getCallingConv(); + DebugLoc DL = (MBBI != MBB.end()) ? MBBI->getDebugLoc() : DebugLoc(); + const AVRSubtarget &STI = MF.getSubtarget<AVRSubtarget>(); + const AVRInstrInfo &TII = *STI.getInstrInfo(); + bool HasFP = hasFP(MF); + + // Interrupt handlers re-enable interrupts in function entry. + if (CallConv == CallingConv::AVR_INTR) { + BuildMI(MBB, MBBI, DL, TII.get(AVR::BSETs)) + .addImm(0x07) + .setMIFlag(MachineInstr::FrameSetup); + } + + // Save the frame pointer if we have one. + if (HasFP) { + BuildMI(MBB, MBBI, DL, TII.get(AVR::PUSHWRr)) + .addReg(AVR::R29R28, RegState::Kill) + .setMIFlag(MachineInstr::FrameSetup); + } + + // Emit special prologue code to save R1, R0 and SREG in interrupt/signal + // handlers before saving any other registers. + if (CallConv == CallingConv::AVR_INTR || + CallConv == CallingConv::AVR_SIGNAL) { + BuildMI(MBB, MBBI, DL, TII.get(AVR::PUSHWRr)) + .addReg(AVR::R1R0, RegState::Kill) + .setMIFlag(MachineInstr::FrameSetup); + + BuildMI(MBB, MBBI, DL, TII.get(AVR::INRdA), AVR::R0) + .addImm(0x3f) + .setMIFlag(MachineInstr::FrameSetup); + BuildMI(MBB, MBBI, DL, TII.get(AVR::PUSHRr)) + .addReg(AVR::R0, RegState::Kill) + .setMIFlag(MachineInstr::FrameSetup); + BuildMI(MBB, MBBI, DL, TII.get(AVR::EORRdRr)) + .addReg(AVR::R0, RegState::Define) + .addReg(AVR::R0, RegState::Kill) + .addReg(AVR::R0, RegState::Kill) + .setMIFlag(MachineInstr::FrameSetup); + } + + // Early exit if the frame pointer is not needed in this function. + if (!HasFP) { + return; + } + + const MachineFrameInfo &MFI = MF.getFrameInfo(); + const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>(); + unsigned FrameSize = MFI.getStackSize() - AFI->getCalleeSavedFrameSize(); + + // Skip the callee-saved push instructions. + while ( + (MBBI != MBB.end()) && MBBI->getFlag(MachineInstr::FrameSetup) && + (MBBI->getOpcode() == AVR::PUSHRr || MBBI->getOpcode() == AVR::PUSHWRr)) { + ++MBBI; + } + + // Update Y with the new base value. + BuildMI(MBB, MBBI, DL, TII.get(AVR::SPREAD), AVR::R29R28) + .addReg(AVR::SP) + .setMIFlag(MachineInstr::FrameSetup); + + // Mark the FramePtr as live-in in every block except the entry. + for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end(); + I != E; ++I) { + I->addLiveIn(AVR::R29R28); + } + + if (!FrameSize) { + return; + } + + // Reserve the necessary frame memory by doing FP -= <size>. + unsigned Opcode = (isUInt<6>(FrameSize)) ? AVR::SBIWRdK : AVR::SUBIWRdK; + + MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opcode), AVR::R29R28) + .addReg(AVR::R29R28, RegState::Kill) + .addImm(FrameSize) + .setMIFlag(MachineInstr::FrameSetup); + // The SREG implicit def is dead. + MI->getOperand(3).setIsDead(); + + // Write back R29R28 to SP and temporarily disable interrupts. + BuildMI(MBB, MBBI, DL, TII.get(AVR::SPWRITE), AVR::SP) + .addReg(AVR::R29R28) + .setMIFlag(MachineInstr::FrameSetup); +} + +void AVRFrameLowering::emitEpilogue(MachineFunction &MF, + MachineBasicBlock &MBB) const { + CallingConv::ID CallConv = MF.getFunction().getCallingConv(); + bool isHandler = (CallConv == CallingConv::AVR_INTR || + CallConv == CallingConv::AVR_SIGNAL); + + // Early exit if the frame pointer is not needed in this function except for + // signal/interrupt handlers where special code generation is required. + if (!hasFP(MF) && !isHandler) { + return; + } + + MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); + assert(MBBI->getDesc().isReturn() && + "Can only insert epilog into returning blocks"); + + DebugLoc DL = MBBI->getDebugLoc(); + const MachineFrameInfo &MFI = MF.getFrameInfo(); + const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>(); + unsigned FrameSize = MFI.getStackSize() - AFI->getCalleeSavedFrameSize(); + const AVRSubtarget &STI = MF.getSubtarget<AVRSubtarget>(); + const AVRInstrInfo &TII = *STI.getInstrInfo(); + + // Emit special epilogue code to restore R1, R0 and SREG in interrupt/signal + // handlers at the very end of the function, just before reti. + if (isHandler) { + BuildMI(MBB, MBBI, DL, TII.get(AVR::POPRd), AVR::R0); + BuildMI(MBB, MBBI, DL, TII.get(AVR::OUTARr)) + .addImm(0x3f) + .addReg(AVR::R0, RegState::Kill); + BuildMI(MBB, MBBI, DL, TII.get(AVR::POPWRd), AVR::R1R0); + } + + if (hasFP(MF)) + BuildMI(MBB, MBBI, DL, TII.get(AVR::POPWRd), AVR::R29R28); + + // Early exit if there is no need to restore the frame pointer. + if (!FrameSize) { + return; + } + + // Skip the callee-saved pop instructions. + while (MBBI != MBB.begin()) { + MachineBasicBlock::iterator PI = std::prev(MBBI); + int Opc = PI->getOpcode(); + + if (Opc != AVR::POPRd && Opc != AVR::POPWRd && !PI->isTerminator()) { + break; + } + + --MBBI; + } + + unsigned Opcode; + + // Select the optimal opcode depending on how big it is. + if (isUInt<6>(FrameSize)) { + Opcode = AVR::ADIWRdK; + } else { + Opcode = AVR::SUBIWRdK; + FrameSize = -FrameSize; + } + + // Restore the frame pointer by doing FP += <size>. + MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opcode), AVR::R29R28) + .addReg(AVR::R29R28, RegState::Kill) + .addImm(FrameSize); + // The SREG implicit def is dead. + MI->getOperand(3).setIsDead(); + + // Write back R29R28 to SP and temporarily disable interrupts. + BuildMI(MBB, MBBI, DL, TII.get(AVR::SPWRITE), AVR::SP) + .addReg(AVR::R29R28, RegState::Kill); +} + +// Return true if the specified function should have a dedicated frame +// pointer register. This is true if the function meets any of the following +// conditions: +// - a register has been spilled +// - has allocas +// - input arguments are passed using the stack +// +// Notice that strictly this is not a frame pointer because it contains SP after +// frame allocation instead of having the original SP in function entry. +bool AVRFrameLowering::hasFP(const MachineFunction &MF) const { + const AVRMachineFunctionInfo *FuncInfo = MF.getInfo<AVRMachineFunctionInfo>(); + + return (FuncInfo->getHasSpills() || FuncInfo->getHasAllocas() || + FuncInfo->getHasStackArgs()); +} + +bool AVRFrameLowering::spillCalleeSavedRegisters( + MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, + const std::vector<CalleeSavedInfo> &CSI, + const TargetRegisterInfo *TRI) const { + if (CSI.empty()) { + return false; + } + + unsigned CalleeFrameSize = 0; + DebugLoc DL = MBB.findDebugLoc(MI); + MachineFunction &MF = *MBB.getParent(); + const AVRSubtarget &STI = MF.getSubtarget<AVRSubtarget>(); + const TargetInstrInfo &TII = *STI.getInstrInfo(); + AVRMachineFunctionInfo *AVRFI = MF.getInfo<AVRMachineFunctionInfo>(); + + for (unsigned i = CSI.size(); i != 0; --i) { + unsigned Reg = CSI[i - 1].getReg(); + bool IsNotLiveIn = !MBB.isLiveIn(Reg); + + assert(TRI->getRegSizeInBits(*TRI->getMinimalPhysRegClass(Reg)) == 8 && + "Invalid register size"); + + // Add the callee-saved register as live-in only if it is not already a + // live-in register, this usually happens with arguments that are passed + // through callee-saved registers. + if (IsNotLiveIn) { + MBB.addLiveIn(Reg); + } + + // Do not kill the register when it is an input argument. + BuildMI(MBB, MI, DL, TII.get(AVR::PUSHRr)) + .addReg(Reg, getKillRegState(IsNotLiveIn)) + .setMIFlag(MachineInstr::FrameSetup); + ++CalleeFrameSize; + } + + AVRFI->setCalleeSavedFrameSize(CalleeFrameSize); + + return true; +} + +bool AVRFrameLowering::restoreCalleeSavedRegisters( + MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, + std::vector<CalleeSavedInfo> &CSI, + const TargetRegisterInfo *TRI) const { + if (CSI.empty()) { + return false; + } + + DebugLoc DL = MBB.findDebugLoc(MI); + const MachineFunction &MF = *MBB.getParent(); + const AVRSubtarget &STI = MF.getSubtarget<AVRSubtarget>(); + const TargetInstrInfo &TII = *STI.getInstrInfo(); + + for (const CalleeSavedInfo &CCSI : CSI) { + unsigned Reg = CCSI.getReg(); + + assert(TRI->getRegSizeInBits(*TRI->getMinimalPhysRegClass(Reg)) == 8 && + "Invalid register size"); + + BuildMI(MBB, MI, DL, TII.get(AVR::POPRd), Reg); + } + + return true; +} + +/// Replace pseudo store instructions that pass arguments through the stack with +/// real instructions. If insertPushes is true then all instructions are +/// replaced with push instructions, otherwise regular std instructions are +/// inserted. +static void fixStackStores(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const TargetInstrInfo &TII, bool insertPushes) { + const AVRSubtarget &STI = MBB.getParent()->getSubtarget<AVRSubtarget>(); + const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); + + // Iterate through the BB until we hit a call instruction or we reach the end. + for (auto I = MI, E = MBB.end(); I != E && !I->isCall();) { + MachineBasicBlock::iterator NextMI = std::next(I); + MachineInstr &MI = *I; + unsigned Opcode = I->getOpcode(); + + // Only care of pseudo store instructions where SP is the base pointer. + if (Opcode != AVR::STDSPQRr && Opcode != AVR::STDWSPQRr) { + I = NextMI; + continue; + } + + assert(MI.getOperand(0).getReg() == AVR::SP && + "Invalid register, should be SP!"); + if (insertPushes) { + // Replace this instruction with a push. + unsigned SrcReg = MI.getOperand(2).getReg(); + bool SrcIsKill = MI.getOperand(2).isKill(); + + // We can't use PUSHWRr here because when expanded the order of the new + // instructions are reversed from what we need. Perform the expansion now. + if (Opcode == AVR::STDWSPQRr) { + BuildMI(MBB, I, MI.getDebugLoc(), TII.get(AVR::PUSHRr)) + .addReg(TRI.getSubReg(SrcReg, AVR::sub_hi), + getKillRegState(SrcIsKill)); + BuildMI(MBB, I, MI.getDebugLoc(), TII.get(AVR::PUSHRr)) + .addReg(TRI.getSubReg(SrcReg, AVR::sub_lo), + getKillRegState(SrcIsKill)); + } else { + BuildMI(MBB, I, MI.getDebugLoc(), TII.get(AVR::PUSHRr)) + .addReg(SrcReg, getKillRegState(SrcIsKill)); + } + + MI.eraseFromParent(); + I = NextMI; + continue; + } + + // Replace this instruction with a regular store. Use Y as the base + // pointer since it is guaranteed to contain a copy of SP. + unsigned STOpc = + (Opcode == AVR::STDWSPQRr) ? AVR::STDWPtrQRr : AVR::STDPtrQRr; + + MI.setDesc(TII.get(STOpc)); + MI.getOperand(0).setReg(AVR::R29R28); + + I = NextMI; + } +} + +MachineBasicBlock::iterator AVRFrameLowering::eliminateCallFramePseudoInstr( + MachineFunction &MF, MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI) const { + const AVRSubtarget &STI = MF.getSubtarget<AVRSubtarget>(); + const AVRInstrInfo &TII = *STI.getInstrInfo(); + + // There is nothing to insert when the call frame memory is allocated during + // function entry. Delete the call frame pseudo and replace all pseudo stores + // with real store instructions. + if (hasReservedCallFrame(MF)) { + fixStackStores(MBB, MI, TII, false); + return MBB.erase(MI); + } + + DebugLoc DL = MI->getDebugLoc(); + unsigned int Opcode = MI->getOpcode(); + int Amount = TII.getFrameSize(*MI); + + // Adjcallstackup does not need to allocate stack space for the call, instead + // we insert push instructions that will allocate the necessary stack. + // For adjcallstackdown we convert it into an 'adiw reg, <amt>' handling + // the read and write of SP in I/O space. + if (Amount != 0) { + assert(getStackAlignment() == 1 && "Unsupported stack alignment"); + + if (Opcode == TII.getCallFrameSetupOpcode()) { + fixStackStores(MBB, MI, TII, true); + } else { + assert(Opcode == TII.getCallFrameDestroyOpcode()); + + // Select the best opcode to adjust SP based on the offset size. + unsigned addOpcode; + if (isUInt<6>(Amount)) { + addOpcode = AVR::ADIWRdK; + } else { + addOpcode = AVR::SUBIWRdK; + Amount = -Amount; + } + + // Build the instruction sequence. + BuildMI(MBB, MI, DL, TII.get(AVR::SPREAD), AVR::R31R30).addReg(AVR::SP); + + MachineInstr *New = BuildMI(MBB, MI, DL, TII.get(addOpcode), AVR::R31R30) + .addReg(AVR::R31R30, RegState::Kill) + .addImm(Amount); + New->getOperand(3).setIsDead(); + + BuildMI(MBB, MI, DL, TII.get(AVR::SPWRITE), AVR::SP) + .addReg(AVR::R31R30, RegState::Kill); + } + } + + return MBB.erase(MI); +} + +void AVRFrameLowering::determineCalleeSaves(MachineFunction &MF, + BitVector &SavedRegs, + RegScavenger *RS) const { + TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); + + // If we have a frame pointer, the Y register needs to be saved as well. + // We don't do that here however - the prologue and epilogue generation + // code will handle it specially. +} +/// The frame analyzer pass. +/// +/// Scans the function for allocas and used arguments +/// that are passed through the stack. +struct AVRFrameAnalyzer : public MachineFunctionPass { + static char ID; + AVRFrameAnalyzer() : MachineFunctionPass(ID) {} + + bool runOnMachineFunction(MachineFunction &MF) { + const MachineFrameInfo &MFI = MF.getFrameInfo(); + AVRMachineFunctionInfo *FuncInfo = MF.getInfo<AVRMachineFunctionInfo>(); + + // If there are no fixed frame indexes during this stage it means there + // are allocas present in the function. + if (MFI.getNumObjects() != MFI.getNumFixedObjects()) { + // Check for the type of allocas present in the function. We only care + // about fixed size allocas so do not give false positives if only + // variable sized allocas are present. + for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { + // Variable sized objects have size 0. + if (MFI.getObjectSize(i)) { + FuncInfo->setHasAllocas(true); + break; + } + } + } + + // If there are fixed frame indexes present, scan the function to see if + // they are really being used. + if (MFI.getNumFixedObjects() == 0) { + return false; + } + + // Ok fixed frame indexes present, now scan the function to see if they + // are really being used, otherwise we can ignore them. + for (const MachineBasicBlock &BB : MF) { + for (const MachineInstr &MI : BB) { + int Opcode = MI.getOpcode(); + + if ((Opcode != AVR::LDDRdPtrQ) && (Opcode != AVR::LDDWRdPtrQ) && + (Opcode != AVR::STDPtrQRr) && (Opcode != AVR::STDWPtrQRr)) { + continue; + } + + for (const MachineOperand &MO : MI.operands()) { + if (!MO.isFI()) { + continue; + } + + if (MFI.isFixedObjectIndex(MO.getIndex())) { + FuncInfo->setHasStackArgs(true); + return false; + } + } + } + } + + return false; + } + + StringRef getPassName() const { return "AVR Frame Analyzer"; } +}; + +char AVRFrameAnalyzer::ID = 0; + +/// Creates instance of the frame analyzer pass. +FunctionPass *createAVRFrameAnalyzerPass() { return new AVRFrameAnalyzer(); } + +/// Create the Dynalloca Stack Pointer Save/Restore pass. +/// Insert a copy of SP before allocating the dynamic stack memory and restore +/// it in function exit to restore the original SP state. This avoids the need +/// of reserving a register pair for a frame pointer. +struct AVRDynAllocaSR : public MachineFunctionPass { + static char ID; + AVRDynAllocaSR() : MachineFunctionPass(ID) {} + + bool runOnMachineFunction(MachineFunction &MF) { + // Early exit when there are no variable sized objects in the function. + if (!MF.getFrameInfo().hasVarSizedObjects()) { + return false; + } + + const AVRSubtarget &STI = MF.getSubtarget<AVRSubtarget>(); + const TargetInstrInfo &TII = *STI.getInstrInfo(); + MachineBasicBlock &EntryMBB = MF.front(); + MachineBasicBlock::iterator MBBI = EntryMBB.begin(); + DebugLoc DL = EntryMBB.findDebugLoc(MBBI); + + unsigned SPCopy = + MF.getRegInfo().createVirtualRegister(&AVR::DREGSRegClass); + + // Create a copy of SP in function entry before any dynallocas are + // inserted. + BuildMI(EntryMBB, MBBI, DL, TII.get(AVR::COPY), SPCopy).addReg(AVR::SP); + + // Restore SP in all exit basic blocks. + for (MachineBasicBlock &MBB : MF) { + // If last instruction is a return instruction, add a restore copy. + if (!MBB.empty() && MBB.back().isReturn()) { + MBBI = MBB.getLastNonDebugInstr(); + DL = MBBI->getDebugLoc(); + BuildMI(MBB, MBBI, DL, TII.get(AVR::COPY), AVR::SP) + .addReg(SPCopy, RegState::Kill); + } + } + + return true; + } + + StringRef getPassName() const { + return "AVR dynalloca stack pointer save/restore"; + } +}; + +char AVRDynAllocaSR::ID = 0; + +/// createAVRDynAllocaSRPass - returns an instance of the dynalloca stack +/// pointer save/restore pass. +FunctionPass *createAVRDynAllocaSRPass() { return new AVRDynAllocaSR(); } + +} // end of namespace llvm + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRFrameLowering.h b/contrib/llvm-project/llvm/lib/Target/AVR/AVRFrameLowering.h new file mode 100644 index 000000000000..a7658438232a --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRFrameLowering.h @@ -0,0 +1,45 @@ +//===-- AVRFrameLowering.h - Define frame lowering for AVR ------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AVR_FRAME_LOWERING_H +#define LLVM_AVR_FRAME_LOWERING_H + +#include "llvm/CodeGen/TargetFrameLowering.h" + +namespace llvm { + +/// Utilities for creating function call frames. +class AVRFrameLowering : public TargetFrameLowering { +public: + explicit AVRFrameLowering(); + +public: + void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override; + void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override; + bool hasFP(const MachineFunction &MF) const override; + bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const std::vector<CalleeSavedInfo> &CSI, + const TargetRegisterInfo *TRI) const override; + bool + restoreCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + std::vector<CalleeSavedInfo> &CSI, + const TargetRegisterInfo *TRI) const override; + bool hasReservedCallFrame(const MachineFunction &MF) const override; + bool canSimplifyCallFramePseudos(const MachineFunction &MF) const override; + void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, + RegScavenger *RS = nullptr) const override; + MachineBasicBlock::iterator + eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI) const override; +}; + +} // end namespace llvm + +#endif // LLVM_AVR_FRAME_LOWERING_H diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRISelDAGToDAG.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/AVRISelDAGToDAG.cpp new file mode 100644 index 000000000000..5cb4441c4380 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRISelDAGToDAG.cpp @@ -0,0 +1,557 @@ +//===-- AVRISelDAGToDAG.cpp - A dag to dag inst selector for AVR ----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines an instruction selector for the AVR target. +// +//===----------------------------------------------------------------------===// + +#include "AVR.h" +#include "AVRTargetMachine.h" +#include "MCTargetDesc/AVRMCTargetDesc.h" + +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/SelectionDAGISel.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" + +#define DEBUG_TYPE "avr-isel" + +namespace llvm { + +/// Lowers LLVM IR (in DAG form) to AVR MC instructions (in DAG form). +class AVRDAGToDAGISel : public SelectionDAGISel { +public: + AVRDAGToDAGISel(AVRTargetMachine &TM, CodeGenOpt::Level OptLevel) + : SelectionDAGISel(TM, OptLevel), Subtarget(nullptr) {} + + StringRef getPassName() const override { + return "AVR DAG->DAG Instruction Selection"; + } + + bool runOnMachineFunction(MachineFunction &MF) override; + + bool SelectAddr(SDNode *Op, SDValue N, SDValue &Base, SDValue &Disp); + + bool selectIndexedLoad(SDNode *N); + unsigned selectIndexedProgMemLoad(const LoadSDNode *LD, MVT VT); + + bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintCode, + std::vector<SDValue> &OutOps) override; + +// Include the pieces autogenerated from the target description. +#include "AVRGenDAGISel.inc" + +private: + void Select(SDNode *N) override; + bool trySelect(SDNode *N); + + template <unsigned NodeType> bool select(SDNode *N); + bool selectMultiplication(SDNode *N); + + const AVRSubtarget *Subtarget; +}; + +bool AVRDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) { + Subtarget = &MF.getSubtarget<AVRSubtarget>(); + return SelectionDAGISel::runOnMachineFunction(MF); +} + +bool AVRDAGToDAGISel::SelectAddr(SDNode *Op, SDValue N, SDValue &Base, + SDValue &Disp) { + SDLoc dl(Op); + auto DL = CurDAG->getDataLayout(); + MVT PtrVT = getTargetLowering()->getPointerTy(DL); + + // if the address is a frame index get the TargetFrameIndex. + if (const FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(N)) { + Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), PtrVT); + Disp = CurDAG->getTargetConstant(0, dl, MVT::i8); + + return true; + } + + // Match simple Reg + uimm6 operands. + if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB && + !CurDAG->isBaseWithConstantOffset(N)) { + return false; + } + + if (const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { + int RHSC = (int)RHS->getZExtValue(); + + // Convert negative offsets into positives ones. + if (N.getOpcode() == ISD::SUB) { + RHSC = -RHSC; + } + + // <#Frame index + const> + // Allow folding offsets bigger than 63 so the frame pointer can be used + // directly instead of copying it around by adjusting and restoring it for + // each access. + if (N.getOperand(0).getOpcode() == ISD::FrameIndex) { + int FI = cast<FrameIndexSDNode>(N.getOperand(0))->getIndex(); + + Base = CurDAG->getTargetFrameIndex(FI, PtrVT); + Disp = CurDAG->getTargetConstant(RHSC, dl, MVT::i16); + + return true; + } + + // The value type of the memory instruction determines what is the maximum + // offset allowed. + MVT VT = cast<MemSDNode>(Op)->getMemoryVT().getSimpleVT(); + + // We only accept offsets that fit in 6 bits (unsigned). + if (isUInt<6>(RHSC) && (VT == MVT::i8 || VT == MVT::i16)) { + Base = N.getOperand(0); + Disp = CurDAG->getTargetConstant(RHSC, dl, MVT::i8); + + return true; + } + } + + return false; +} + +bool AVRDAGToDAGISel::selectIndexedLoad(SDNode *N) { + const LoadSDNode *LD = cast<LoadSDNode>(N); + ISD::MemIndexedMode AM = LD->getAddressingMode(); + MVT VT = LD->getMemoryVT().getSimpleVT(); + auto PtrVT = getTargetLowering()->getPointerTy(CurDAG->getDataLayout()); + + // We only care if this load uses a POSTINC or PREDEC mode. + if ((LD->getExtensionType() != ISD::NON_EXTLOAD) || + (AM != ISD::POST_INC && AM != ISD::PRE_DEC)) { + + return false; + } + + unsigned Opcode = 0; + bool isPre = (AM == ISD::PRE_DEC); + int Offs = cast<ConstantSDNode>(LD->getOffset())->getSExtValue(); + + switch (VT.SimpleTy) { + case MVT::i8: { + if ((!isPre && Offs != 1) || (isPre && Offs != -1)) { + return false; + } + + Opcode = (isPre) ? AVR::LDRdPtrPd : AVR::LDRdPtrPi; + break; + } + case MVT::i16: { + if ((!isPre && Offs != 2) || (isPre && Offs != -2)) { + return false; + } + + Opcode = (isPre) ? AVR::LDWRdPtrPd : AVR::LDWRdPtrPi; + break; + } + default: + return false; + } + + SDNode *ResNode = CurDAG->getMachineNode(Opcode, SDLoc(N), VT, + PtrVT, MVT::Other, + LD->getBasePtr(), LD->getChain()); + ReplaceUses(N, ResNode); + CurDAG->RemoveDeadNode(N); + + return true; +} + +unsigned AVRDAGToDAGISel::selectIndexedProgMemLoad(const LoadSDNode *LD, + MVT VT) { + ISD::MemIndexedMode AM = LD->getAddressingMode(); + + // Progmem indexed loads only work in POSTINC mode. + if (LD->getExtensionType() != ISD::NON_EXTLOAD || AM != ISD::POST_INC) { + return 0; + } + + unsigned Opcode = 0; + int Offs = cast<ConstantSDNode>(LD->getOffset())->getSExtValue(); + + switch (VT.SimpleTy) { + case MVT::i8: { + if (Offs != 1) { + return 0; + } + Opcode = AVR::LPMRdZPi; + break; + } + case MVT::i16: { + if (Offs != 2) { + return 0; + } + Opcode = AVR::LPMWRdZPi; + break; + } + default: + return 0; + } + + return Opcode; +} + +bool AVRDAGToDAGISel::SelectInlineAsmMemoryOperand(const SDValue &Op, + unsigned ConstraintCode, + std::vector<SDValue> &OutOps) { + assert((ConstraintCode == InlineAsm::Constraint_m || + ConstraintCode == InlineAsm::Constraint_Q) && + "Unexpected asm memory constraint"); + + MachineRegisterInfo &RI = MF->getRegInfo(); + const AVRSubtarget &STI = MF->getSubtarget<AVRSubtarget>(); + const TargetLowering &TL = *STI.getTargetLowering(); + SDLoc dl(Op); + auto DL = CurDAG->getDataLayout(); + + const RegisterSDNode *RegNode = dyn_cast<RegisterSDNode>(Op); + + // If address operand is of PTRDISPREGS class, all is OK, then. + if (RegNode && + RI.getRegClass(RegNode->getReg()) == &AVR::PTRDISPREGSRegClass) { + OutOps.push_back(Op); + return false; + } + + if (Op->getOpcode() == ISD::FrameIndex) { + SDValue Base, Disp; + + if (SelectAddr(Op.getNode(), Op, Base, Disp)) { + OutOps.push_back(Base); + OutOps.push_back(Disp); + + return false; + } + + return true; + } + + // If Op is add 'register, immediate' and + // register is either virtual register or register of PTRDISPREGSRegClass + if (Op->getOpcode() == ISD::ADD || Op->getOpcode() == ISD::SUB) { + SDValue CopyFromRegOp = Op->getOperand(0); + SDValue ImmOp = Op->getOperand(1); + ConstantSDNode *ImmNode = dyn_cast<ConstantSDNode>(ImmOp); + + unsigned Reg; + bool CanHandleRegImmOpt = true; + + CanHandleRegImmOpt &= ImmNode != 0; + CanHandleRegImmOpt &= ImmNode->getAPIntValue().getZExtValue() < 64; + + if (CopyFromRegOp->getOpcode() == ISD::CopyFromReg) { + RegisterSDNode *RegNode = + cast<RegisterSDNode>(CopyFromRegOp->getOperand(1)); + Reg = RegNode->getReg(); + CanHandleRegImmOpt &= (TargetRegisterInfo::isVirtualRegister(Reg) || + AVR::PTRDISPREGSRegClass.contains(Reg)); + } else { + CanHandleRegImmOpt = false; + } + + // If we detect proper case - correct virtual register class + // if needed and go to another inlineasm operand. + if (CanHandleRegImmOpt) { + SDValue Base, Disp; + + if (RI.getRegClass(Reg) != &AVR::PTRDISPREGSRegClass) { + SDLoc dl(CopyFromRegOp); + + unsigned VReg = RI.createVirtualRegister(&AVR::PTRDISPREGSRegClass); + + SDValue CopyToReg = + CurDAG->getCopyToReg(CopyFromRegOp, dl, VReg, CopyFromRegOp); + + SDValue NewCopyFromRegOp = + CurDAG->getCopyFromReg(CopyToReg, dl, VReg, TL.getPointerTy(DL)); + + Base = NewCopyFromRegOp; + } else { + Base = CopyFromRegOp; + } + + if (ImmNode->getValueType(0) != MVT::i8) { + Disp = CurDAG->getTargetConstant(ImmNode->getAPIntValue().getZExtValue(), dl, MVT::i8); + } else { + Disp = ImmOp; + } + + OutOps.push_back(Base); + OutOps.push_back(Disp); + + return false; + } + } + + // More generic case. + // Create chain that puts Op into pointer register + // and return that register. + unsigned VReg = RI.createVirtualRegister(&AVR::PTRDISPREGSRegClass); + + SDValue CopyToReg = CurDAG->getCopyToReg(Op, dl, VReg, Op); + SDValue CopyFromReg = + CurDAG->getCopyFromReg(CopyToReg, dl, VReg, TL.getPointerTy(DL)); + + OutOps.push_back(CopyFromReg); + + return false; +} + +template <> bool AVRDAGToDAGISel::select<ISD::FrameIndex>(SDNode *N) { + auto DL = CurDAG->getDataLayout(); + + // Convert the frameindex into a temp instruction that will hold the + // effective address of the final stack slot. + int FI = cast<FrameIndexSDNode>(N)->getIndex(); + SDValue TFI = + CurDAG->getTargetFrameIndex(FI, getTargetLowering()->getPointerTy(DL)); + + CurDAG->SelectNodeTo(N, AVR::FRMIDX, + getTargetLowering()->getPointerTy(DL), TFI, + CurDAG->getTargetConstant(0, SDLoc(N), MVT::i16)); + return true; +} + +template <> bool AVRDAGToDAGISel::select<ISD::STORE>(SDNode *N) { + // Use the STD{W}SPQRr pseudo instruction when passing arguments through + // the stack on function calls for further expansion during the PEI phase. + const StoreSDNode *ST = cast<StoreSDNode>(N); + SDValue BasePtr = ST->getBasePtr(); + + // Early exit when the base pointer is a frame index node or a constant. + if (isa<FrameIndexSDNode>(BasePtr) || isa<ConstantSDNode>(BasePtr) || + BasePtr.isUndef()) { + return false; + } + + const RegisterSDNode *RN = dyn_cast<RegisterSDNode>(BasePtr.getOperand(0)); + // Only stores where SP is the base pointer are valid. + if (!RN || (RN->getReg() != AVR::SP)) { + return false; + } + + int CST = (int)cast<ConstantSDNode>(BasePtr.getOperand(1))->getZExtValue(); + SDValue Chain = ST->getChain(); + EVT VT = ST->getValue().getValueType(); + SDLoc DL(N); + SDValue Offset = CurDAG->getTargetConstant(CST, DL, MVT::i16); + SDValue Ops[] = {BasePtr.getOperand(0), Offset, ST->getValue(), Chain}; + unsigned Opc = (VT == MVT::i16) ? AVR::STDWSPQRr : AVR::STDSPQRr; + + SDNode *ResNode = CurDAG->getMachineNode(Opc, DL, MVT::Other, Ops); + + // Transfer memory operands. + CurDAG->setNodeMemRefs(cast<MachineSDNode>(ResNode), {ST->getMemOperand()}); + + ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0)); + CurDAG->RemoveDeadNode(N); + + return true; +} + +template <> bool AVRDAGToDAGISel::select<ISD::LOAD>(SDNode *N) { + const LoadSDNode *LD = cast<LoadSDNode>(N); + if (!AVR::isProgramMemoryAccess(LD)) { + // Check if the opcode can be converted into an indexed load. + return selectIndexedLoad(N); + } + + assert(Subtarget->hasLPM() && "cannot load from program memory on this mcu"); + + // This is a flash memory load, move the pointer into R31R30 and emit + // the lpm instruction. + MVT VT = LD->getMemoryVT().getSimpleVT(); + SDValue Chain = LD->getChain(); + SDValue Ptr = LD->getBasePtr(); + SDNode *ResNode; + SDLoc DL(N); + + Chain = CurDAG->getCopyToReg(Chain, DL, AVR::R31R30, Ptr, SDValue()); + Ptr = CurDAG->getCopyFromReg(Chain, DL, AVR::R31R30, MVT::i16, + Chain.getValue(1)); + + SDValue RegZ = CurDAG->getRegister(AVR::R31R30, MVT::i16); + + // Check if the opcode can be converted into an indexed load. + if (unsigned LPMOpc = selectIndexedProgMemLoad(LD, VT)) { + // It is legal to fold the load into an indexed load. + ResNode = CurDAG->getMachineNode(LPMOpc, DL, VT, MVT::i16, MVT::Other, Ptr, + RegZ); + ReplaceUses(SDValue(N, 1), SDValue(ResNode, 1)); + } else { + // Selecting an indexed load is not legal, fallback to a normal load. + switch (VT.SimpleTy) { + case MVT::i8: + ResNode = CurDAG->getMachineNode(AVR::LPMRdZ, DL, MVT::i8, MVT::Other, + Ptr, RegZ); + break; + case MVT::i16: + ResNode = CurDAG->getMachineNode(AVR::LPMWRdZ, DL, MVT::i16, + MVT::Other, Ptr, RegZ); + ReplaceUses(SDValue(N, 1), SDValue(ResNode, 1)); + break; + default: + llvm_unreachable("Unsupported VT!"); + } + } + + // Transfer memory operands. + CurDAG->setNodeMemRefs(cast<MachineSDNode>(ResNode), {LD->getMemOperand()}); + + ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0)); + ReplaceUses(SDValue(N, 1), SDValue(ResNode, 1)); + CurDAG->RemoveDeadNode(N); + + return true; +} + +template <> bool AVRDAGToDAGISel::select<AVRISD::CALL>(SDNode *N) { + SDValue InFlag; + SDValue Chain = N->getOperand(0); + SDValue Callee = N->getOperand(1); + unsigned LastOpNum = N->getNumOperands() - 1; + + // Direct calls are autogenerated. + unsigned Op = Callee.getOpcode(); + if (Op == ISD::TargetGlobalAddress || Op == ISD::TargetExternalSymbol) { + return false; + } + + // Skip the incoming flag if present + if (N->getOperand(LastOpNum).getValueType() == MVT::Glue) { + --LastOpNum; + } + + SDLoc DL(N); + Chain = CurDAG->getCopyToReg(Chain, DL, AVR::R31R30, Callee, InFlag); + SmallVector<SDValue, 8> Ops; + Ops.push_back(CurDAG->getRegister(AVR::R31R30, MVT::i16)); + + // Map all operands into the new node. + for (unsigned i = 2, e = LastOpNum + 1; i != e; ++i) { + Ops.push_back(N->getOperand(i)); + } + + Ops.push_back(Chain); + Ops.push_back(Chain.getValue(1)); + + SDNode *ResNode = + CurDAG->getMachineNode(AVR::ICALL, DL, MVT::Other, MVT::Glue, Ops); + + ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0)); + ReplaceUses(SDValue(N, 1), SDValue(ResNode, 1)); + CurDAG->RemoveDeadNode(N); + + return true; +} + +template <> bool AVRDAGToDAGISel::select<ISD::BRIND>(SDNode *N) { + SDValue Chain = N->getOperand(0); + SDValue JmpAddr = N->getOperand(1); + + SDLoc DL(N); + // Move the destination address of the indirect branch into R31R30. + Chain = CurDAG->getCopyToReg(Chain, DL, AVR::R31R30, JmpAddr); + SDNode *ResNode = CurDAG->getMachineNode(AVR::IJMP, DL, MVT::Other, Chain); + + ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0)); + CurDAG->RemoveDeadNode(N); + + return true; +} + +bool AVRDAGToDAGISel::selectMultiplication(llvm::SDNode *N) { + SDLoc DL(N); + MVT Type = N->getSimpleValueType(0); + + assert(Type == MVT::i8 && "unexpected value type"); + + bool isSigned = N->getOpcode() == ISD::SMUL_LOHI; + unsigned MachineOp = isSigned ? AVR::MULSRdRr : AVR::MULRdRr; + + SDValue Lhs = N->getOperand(0); + SDValue Rhs = N->getOperand(1); + SDNode *Mul = CurDAG->getMachineNode(MachineOp, DL, MVT::Glue, Lhs, Rhs); + SDValue InChain = CurDAG->getEntryNode(); + SDValue InGlue = SDValue(Mul, 0); + + // Copy the low half of the result, if it is needed. + if (N->hasAnyUseOfValue(0)) { + SDValue CopyFromLo = + CurDAG->getCopyFromReg(InChain, DL, AVR::R0, Type, InGlue); + + ReplaceUses(SDValue(N, 0), CopyFromLo); + + InChain = CopyFromLo.getValue(1); + InGlue = CopyFromLo.getValue(2); + } + + // Copy the high half of the result, if it is needed. + if (N->hasAnyUseOfValue(1)) { + SDValue CopyFromHi = + CurDAG->getCopyFromReg(InChain, DL, AVR::R1, Type, InGlue); + + ReplaceUses(SDValue(N, 1), CopyFromHi); + + InChain = CopyFromHi.getValue(1); + InGlue = CopyFromHi.getValue(2); + } + + CurDAG->RemoveDeadNode(N); + + // We need to clear R1. This is currently done (dirtily) + // using a custom inserter. + + return true; +} + +void AVRDAGToDAGISel::Select(SDNode *N) { + // If we have a custom node, we already have selected! + if (N->isMachineOpcode()) { + LLVM_DEBUG(errs() << "== "; N->dump(CurDAG); errs() << "\n"); + N->setNodeId(-1); + return; + } + + // See if subclasses can handle this node. + if (trySelect(N)) + return; + + // Select the default instruction + SelectCode(N); +} + +bool AVRDAGToDAGISel::trySelect(SDNode *N) { + unsigned Opcode = N->getOpcode(); + SDLoc DL(N); + + switch (Opcode) { + // Nodes we fully handle. + case ISD::FrameIndex: return select<ISD::FrameIndex>(N); + case ISD::BRIND: return select<ISD::BRIND>(N); + case ISD::UMUL_LOHI: + case ISD::SMUL_LOHI: return selectMultiplication(N); + + // Nodes we handle partially. Other cases are autogenerated + case ISD::STORE: return select<ISD::STORE>(N); + case ISD::LOAD: return select<ISD::LOAD>(N); + case AVRISD::CALL: return select<AVRISD::CALL>(N); + default: return false; + } +} + +FunctionPass *createAVRISelDag(AVRTargetMachine &TM, + CodeGenOpt::Level OptLevel) { + return new AVRDAGToDAGISel(TM, OptLevel); +} + +} // end of namespace llvm + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/AVRISelLowering.cpp new file mode 100644 index 000000000000..f159beee9730 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRISelLowering.cpp @@ -0,0 +1,2049 @@ +//===-- AVRISelLowering.cpp - AVR DAG Lowering Implementation -------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the interfaces that AVR uses to lower LLVM code into a +// selection DAG. +// +//===----------------------------------------------------------------------===// + +#include "AVRISelLowering.h" + +#include "llvm/ADT/StringSwitch.h" +#include "llvm/CodeGen/CallingConvLower.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/SelectionDAG.h" +#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" +#include "llvm/IR/Function.h" +#include "llvm/Support/ErrorHandling.h" + +#include "AVR.h" +#include "AVRMachineFunctionInfo.h" +#include "AVRSubtarget.h" +#include "AVRTargetMachine.h" +#include "MCTargetDesc/AVRMCTargetDesc.h" + +namespace llvm { + +AVRTargetLowering::AVRTargetLowering(const AVRTargetMachine &TM, + const AVRSubtarget &STI) + : TargetLowering(TM), Subtarget(STI) { + // Set up the register classes. + addRegisterClass(MVT::i8, &AVR::GPR8RegClass); + addRegisterClass(MVT::i16, &AVR::DREGSRegClass); + + // Compute derived properties from the register classes. + computeRegisterProperties(Subtarget.getRegisterInfo()); + + setBooleanContents(ZeroOrOneBooleanContent); + setBooleanVectorContents(ZeroOrOneBooleanContent); + setSchedulingPreference(Sched::RegPressure); + setStackPointerRegisterToSaveRestore(AVR::SP); + setSupportsUnalignedAtomics(true); + + setOperationAction(ISD::GlobalAddress, MVT::i16, Custom); + setOperationAction(ISD::BlockAddress, MVT::i16, Custom); + + setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); + setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); + setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i8, Expand); + setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i16, Expand); + + for (MVT VT : MVT::integer_valuetypes()) { + for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) { + setLoadExtAction(N, VT, MVT::i1, Promote); + setLoadExtAction(N, VT, MVT::i8, Expand); + } + } + + setTruncStoreAction(MVT::i16, MVT::i8, Expand); + + for (MVT VT : MVT::integer_valuetypes()) { + setOperationAction(ISD::ADDC, VT, Legal); + setOperationAction(ISD::SUBC, VT, Legal); + setOperationAction(ISD::ADDE, VT, Legal); + setOperationAction(ISD::SUBE, VT, Legal); + } + + // sub (x, imm) gets canonicalized to add (x, -imm), so for illegal types + // revert into a sub since we don't have an add with immediate instruction. + setOperationAction(ISD::ADD, MVT::i32, Custom); + setOperationAction(ISD::ADD, MVT::i64, Custom); + + // our shift instructions are only able to shift 1 bit at a time, so handle + // this in a custom way. + setOperationAction(ISD::SRA, MVT::i8, Custom); + setOperationAction(ISD::SHL, MVT::i8, Custom); + setOperationAction(ISD::SRL, MVT::i8, Custom); + setOperationAction(ISD::SRA, MVT::i16, Custom); + setOperationAction(ISD::SHL, MVT::i16, Custom); + setOperationAction(ISD::SRL, MVT::i16, Custom); + setOperationAction(ISD::SHL_PARTS, MVT::i16, Expand); + setOperationAction(ISD::SRA_PARTS, MVT::i16, Expand); + setOperationAction(ISD::SRL_PARTS, MVT::i16, Expand); + + setOperationAction(ISD::ROTL, MVT::i8, Custom); + setOperationAction(ISD::ROTL, MVT::i16, Expand); + setOperationAction(ISD::ROTR, MVT::i8, Custom); + setOperationAction(ISD::ROTR, MVT::i16, Expand); + + setOperationAction(ISD::BR_CC, MVT::i8, Custom); + setOperationAction(ISD::BR_CC, MVT::i16, Custom); + setOperationAction(ISD::BR_CC, MVT::i32, Custom); + setOperationAction(ISD::BR_CC, MVT::i64, Custom); + setOperationAction(ISD::BRCOND, MVT::Other, Expand); + + setOperationAction(ISD::SELECT_CC, MVT::i8, Custom); + setOperationAction(ISD::SELECT_CC, MVT::i16, Custom); + setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); + setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); + setOperationAction(ISD::SETCC, MVT::i8, Custom); + setOperationAction(ISD::SETCC, MVT::i16, Custom); + setOperationAction(ISD::SETCC, MVT::i32, Custom); + setOperationAction(ISD::SETCC, MVT::i64, Custom); + setOperationAction(ISD::SELECT, MVT::i8, Expand); + setOperationAction(ISD::SELECT, MVT::i16, Expand); + + setOperationAction(ISD::BSWAP, MVT::i16, Expand); + + // Add support for postincrement and predecrement load/stores. + setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal); + setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal); + setIndexedLoadAction(ISD::PRE_DEC, MVT::i8, Legal); + setIndexedLoadAction(ISD::PRE_DEC, MVT::i16, Legal); + setIndexedStoreAction(ISD::POST_INC, MVT::i8, Legal); + setIndexedStoreAction(ISD::POST_INC, MVT::i16, Legal); + setIndexedStoreAction(ISD::PRE_DEC, MVT::i8, Legal); + setIndexedStoreAction(ISD::PRE_DEC, MVT::i16, Legal); + + setOperationAction(ISD::BR_JT, MVT::Other, Expand); + + setOperationAction(ISD::VASTART, MVT::Other, Custom); + setOperationAction(ISD::VAEND, MVT::Other, Expand); + setOperationAction(ISD::VAARG, MVT::Other, Expand); + setOperationAction(ISD::VACOPY, MVT::Other, Expand); + + // Atomic operations which must be lowered to rtlib calls + for (MVT VT : MVT::integer_valuetypes()) { + setOperationAction(ISD::ATOMIC_SWAP, VT, Expand); + setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_NAND, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_MAX, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_MIN, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_UMAX, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_UMIN, VT, Expand); + } + + // Division/remainder + setOperationAction(ISD::UDIV, MVT::i8, Expand); + setOperationAction(ISD::UDIV, MVT::i16, Expand); + setOperationAction(ISD::UREM, MVT::i8, Expand); + setOperationAction(ISD::UREM, MVT::i16, Expand); + setOperationAction(ISD::SDIV, MVT::i8, Expand); + setOperationAction(ISD::SDIV, MVT::i16, Expand); + setOperationAction(ISD::SREM, MVT::i8, Expand); + setOperationAction(ISD::SREM, MVT::i16, Expand); + + // Make division and modulus custom + for (MVT VT : MVT::integer_valuetypes()) { + setOperationAction(ISD::UDIVREM, VT, Custom); + setOperationAction(ISD::SDIVREM, VT, Custom); + } + + // Do not use MUL. The AVR instructions are closer to SMUL_LOHI &co. + setOperationAction(ISD::MUL, MVT::i8, Expand); + setOperationAction(ISD::MUL, MVT::i16, Expand); + + // Expand 16 bit multiplications. + setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand); + setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand); + + // Expand multiplications to libcalls when there is + // no hardware MUL. + if (!Subtarget.supportsMultiplication()) { + setOperationAction(ISD::SMUL_LOHI, MVT::i8, Expand); + setOperationAction(ISD::UMUL_LOHI, MVT::i8, Expand); + } + + for (MVT VT : MVT::integer_valuetypes()) { + setOperationAction(ISD::MULHS, VT, Expand); + setOperationAction(ISD::MULHU, VT, Expand); + } + + for (MVT VT : MVT::integer_valuetypes()) { + setOperationAction(ISD::CTPOP, VT, Expand); + setOperationAction(ISD::CTLZ, VT, Expand); + setOperationAction(ISD::CTTZ, VT, Expand); + } + + for (MVT VT : MVT::integer_valuetypes()) { + setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); + // TODO: The generated code is pretty poor. Investigate using the + // same "shift and subtract with carry" trick that we do for + // extending 8-bit to 16-bit. This may require infrastructure + // improvements in how we treat 16-bit "registers" to be feasible. + } + + // Division rtlib functions (not supported) + setLibcallName(RTLIB::SDIV_I8, nullptr); + setLibcallName(RTLIB::SDIV_I16, nullptr); + setLibcallName(RTLIB::SDIV_I32, nullptr); + setLibcallName(RTLIB::SDIV_I64, nullptr); + setLibcallName(RTLIB::SDIV_I128, nullptr); + setLibcallName(RTLIB::UDIV_I8, nullptr); + setLibcallName(RTLIB::UDIV_I16, nullptr); + setLibcallName(RTLIB::UDIV_I32, nullptr); + setLibcallName(RTLIB::UDIV_I64, nullptr); + setLibcallName(RTLIB::UDIV_I128, nullptr); + + // Modulus rtlib functions (not supported) + setLibcallName(RTLIB::SREM_I8, nullptr); + setLibcallName(RTLIB::SREM_I16, nullptr); + setLibcallName(RTLIB::SREM_I32, nullptr); + setLibcallName(RTLIB::SREM_I64, nullptr); + setLibcallName(RTLIB::SREM_I128, nullptr); + setLibcallName(RTLIB::UREM_I8, nullptr); + setLibcallName(RTLIB::UREM_I16, nullptr); + setLibcallName(RTLIB::UREM_I32, nullptr); + setLibcallName(RTLIB::UREM_I64, nullptr); + setLibcallName(RTLIB::UREM_I128, nullptr); + + // Division and modulus rtlib functions + setLibcallName(RTLIB::SDIVREM_I8, "__divmodqi4"); + setLibcallName(RTLIB::SDIVREM_I16, "__divmodhi4"); + setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4"); + setLibcallName(RTLIB::SDIVREM_I64, "__divmoddi4"); + setLibcallName(RTLIB::SDIVREM_I128, "__divmodti4"); + setLibcallName(RTLIB::UDIVREM_I8, "__udivmodqi4"); + setLibcallName(RTLIB::UDIVREM_I16, "__udivmodhi4"); + setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4"); + setLibcallName(RTLIB::UDIVREM_I64, "__udivmoddi4"); + setLibcallName(RTLIB::UDIVREM_I128, "__udivmodti4"); + + // Several of the runtime library functions use a special calling conv + setLibcallCallingConv(RTLIB::SDIVREM_I8, CallingConv::AVR_BUILTIN); + setLibcallCallingConv(RTLIB::SDIVREM_I16, CallingConv::AVR_BUILTIN); + setLibcallCallingConv(RTLIB::UDIVREM_I8, CallingConv::AVR_BUILTIN); + setLibcallCallingConv(RTLIB::UDIVREM_I16, CallingConv::AVR_BUILTIN); + + // Trigonometric rtlib functions + setLibcallName(RTLIB::SIN_F32, "sin"); + setLibcallName(RTLIB::COS_F32, "cos"); + + setMinFunctionAlignment(1); + setMinimumJumpTableEntries(UINT_MAX); +} + +const char *AVRTargetLowering::getTargetNodeName(unsigned Opcode) const { +#define NODE(name) \ + case AVRISD::name: \ + return #name + + switch (Opcode) { + default: + return nullptr; + NODE(RET_FLAG); + NODE(RETI_FLAG); + NODE(CALL); + NODE(WRAPPER); + NODE(LSL); + NODE(LSR); + NODE(ROL); + NODE(ROR); + NODE(ASR); + NODE(LSLLOOP); + NODE(LSRLOOP); + NODE(ASRLOOP); + NODE(BRCOND); + NODE(CMP); + NODE(CMPC); + NODE(TST); + NODE(SELECT_CC); +#undef NODE + } +} + +EVT AVRTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, + EVT VT) const { + assert(!VT.isVector() && "No AVR SetCC type for vectors!"); + return MVT::i8; +} + +SDValue AVRTargetLowering::LowerShifts(SDValue Op, SelectionDAG &DAG) const { + //:TODO: this function has to be completely rewritten to produce optimal + // code, for now it's producing very long but correct code. + unsigned Opc8; + const SDNode *N = Op.getNode(); + EVT VT = Op.getValueType(); + SDLoc dl(N); + + // Expand non-constant shifts to loops. + if (!isa<ConstantSDNode>(N->getOperand(1))) { + switch (Op.getOpcode()) { + default: + llvm_unreachable("Invalid shift opcode!"); + case ISD::SHL: + return DAG.getNode(AVRISD::LSLLOOP, dl, VT, N->getOperand(0), + N->getOperand(1)); + case ISD::SRL: + return DAG.getNode(AVRISD::LSRLOOP, dl, VT, N->getOperand(0), + N->getOperand(1)); + case ISD::ROTL: + return DAG.getNode(AVRISD::ROLLOOP, dl, VT, N->getOperand(0), + N->getOperand(1)); + case ISD::ROTR: + return DAG.getNode(AVRISD::RORLOOP, dl, VT, N->getOperand(0), + N->getOperand(1)); + case ISD::SRA: + return DAG.getNode(AVRISD::ASRLOOP, dl, VT, N->getOperand(0), + N->getOperand(1)); + } + } + + uint64_t ShiftAmount = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); + SDValue Victim = N->getOperand(0); + + switch (Op.getOpcode()) { + case ISD::SRA: + Opc8 = AVRISD::ASR; + break; + case ISD::ROTL: + Opc8 = AVRISD::ROL; + break; + case ISD::ROTR: + Opc8 = AVRISD::ROR; + break; + case ISD::SRL: + Opc8 = AVRISD::LSR; + break; + case ISD::SHL: + Opc8 = AVRISD::LSL; + break; + default: + llvm_unreachable("Invalid shift opcode"); + } + + while (ShiftAmount--) { + Victim = DAG.getNode(Opc8, dl, VT, Victim); + } + + return Victim; +} + +SDValue AVRTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const { + unsigned Opcode = Op->getOpcode(); + assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && + "Invalid opcode for Div/Rem lowering"); + bool IsSigned = (Opcode == ISD::SDIVREM); + EVT VT = Op->getValueType(0); + Type *Ty = VT.getTypeForEVT(*DAG.getContext()); + + RTLIB::Libcall LC; + switch (VT.getSimpleVT().SimpleTy) { + default: + llvm_unreachable("Unexpected request for libcall!"); + case MVT::i8: + LC = IsSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; + break; + case MVT::i16: + LC = IsSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; + break; + case MVT::i32: + LC = IsSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; + break; + case MVT::i64: + LC = IsSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; + break; + case MVT::i128: + LC = IsSigned ? RTLIB::SDIVREM_I128 : RTLIB::UDIVREM_I128; + break; + } + + SDValue InChain = DAG.getEntryNode(); + + TargetLowering::ArgListTy Args; + TargetLowering::ArgListEntry Entry; + for (SDValue const &Value : Op->op_values()) { + Entry.Node = Value; + Entry.Ty = Value.getValueType().getTypeForEVT(*DAG.getContext()); + Entry.IsSExt = IsSigned; + Entry.IsZExt = !IsSigned; + Args.push_back(Entry); + } + + SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), + getPointerTy(DAG.getDataLayout())); + + Type *RetTy = (Type *)StructType::get(Ty, Ty); + + SDLoc dl(Op); + TargetLowering::CallLoweringInfo CLI(DAG); + CLI.setDebugLoc(dl) + .setChain(InChain) + .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)) + .setInRegister() + .setSExtResult(IsSigned) + .setZExtResult(!IsSigned); + + std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); + return CallInfo.first; +} + +SDValue AVRTargetLowering::LowerGlobalAddress(SDValue Op, + SelectionDAG &DAG) const { + auto DL = DAG.getDataLayout(); + + const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); + int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); + + // Create the TargetGlobalAddress node, folding in the constant offset. + SDValue Result = + DAG.getTargetGlobalAddress(GV, SDLoc(Op), getPointerTy(DL), Offset); + return DAG.getNode(AVRISD::WRAPPER, SDLoc(Op), getPointerTy(DL), Result); +} + +SDValue AVRTargetLowering::LowerBlockAddress(SDValue Op, + SelectionDAG &DAG) const { + auto DL = DAG.getDataLayout(); + const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); + + SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(DL)); + + return DAG.getNode(AVRISD::WRAPPER, SDLoc(Op), getPointerTy(DL), Result); +} + +/// IntCCToAVRCC - Convert a DAG integer condition code to an AVR CC. +static AVRCC::CondCodes intCCToAVRCC(ISD::CondCode CC) { + switch (CC) { + default: + llvm_unreachable("Unknown condition code!"); + case ISD::SETEQ: + return AVRCC::COND_EQ; + case ISD::SETNE: + return AVRCC::COND_NE; + case ISD::SETGE: + return AVRCC::COND_GE; + case ISD::SETLT: + return AVRCC::COND_LT; + case ISD::SETUGE: + return AVRCC::COND_SH; + case ISD::SETULT: + return AVRCC::COND_LO; + } +} + +/// Returns appropriate AVR CMP/CMPC nodes and corresponding condition code for +/// the given operands. +SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, + SDValue &AVRcc, SelectionDAG &DAG, + SDLoc DL) const { + SDValue Cmp; + EVT VT = LHS.getValueType(); + bool UseTest = false; + + switch (CC) { + default: + break; + case ISD::SETLE: { + // Swap operands and reverse the branching condition. + std::swap(LHS, RHS); + CC = ISD::SETGE; + break; + } + case ISD::SETGT: { + if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { + switch (C->getSExtValue()) { + case -1: { + // When doing lhs > -1 use a tst instruction on the top part of lhs + // and use brpl instead of using a chain of cp/cpc. + UseTest = true; + AVRcc = DAG.getConstant(AVRCC::COND_PL, DL, MVT::i8); + break; + } + case 0: { + // Turn lhs > 0 into 0 < lhs since 0 can be materialized with + // __zero_reg__ in lhs. + RHS = LHS; + LHS = DAG.getConstant(0, DL, VT); + CC = ISD::SETLT; + break; + } + default: { + // Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows + // us to fold the constant into the cmp instruction. + RHS = DAG.getConstant(C->getSExtValue() + 1, DL, VT); + CC = ISD::SETGE; + break; + } + } + break; + } + // Swap operands and reverse the branching condition. + std::swap(LHS, RHS); + CC = ISD::SETLT; + break; + } + case ISD::SETLT: { + if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { + switch (C->getSExtValue()) { + case 1: { + // Turn lhs < 1 into 0 >= lhs since 0 can be materialized with + // __zero_reg__ in lhs. + RHS = LHS; + LHS = DAG.getConstant(0, DL, VT); + CC = ISD::SETGE; + break; + } + case 0: { + // When doing lhs < 0 use a tst instruction on the top part of lhs + // and use brmi instead of using a chain of cp/cpc. + UseTest = true; + AVRcc = DAG.getConstant(AVRCC::COND_MI, DL, MVT::i8); + break; + } + } + } + break; + } + case ISD::SETULE: { + // Swap operands and reverse the branching condition. + std::swap(LHS, RHS); + CC = ISD::SETUGE; + break; + } + case ISD::SETUGT: { + // Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows us to + // fold the constant into the cmp instruction. + if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { + RHS = DAG.getConstant(C->getSExtValue() + 1, DL, VT); + CC = ISD::SETUGE; + break; + } + // Swap operands and reverse the branching condition. + std::swap(LHS, RHS); + CC = ISD::SETULT; + break; + } + } + + // Expand 32 and 64 bit comparisons with custom CMP and CMPC nodes instead of + // using the default and/or/xor expansion code which is much longer. + if (VT == MVT::i32) { + SDValue LHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS, + DAG.getIntPtrConstant(0, DL)); + SDValue LHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS, + DAG.getIntPtrConstant(1, DL)); + SDValue RHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS, + DAG.getIntPtrConstant(0, DL)); + SDValue RHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS, + DAG.getIntPtrConstant(1, DL)); + + if (UseTest) { + // When using tst we only care about the highest part. + SDValue Top = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHShi, + DAG.getIntPtrConstant(1, DL)); + Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue, Top); + } else { + Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHSlo, RHSlo); + Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHShi, RHShi, Cmp); + } + } else if (VT == MVT::i64) { + SDValue LHS_0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, LHS, + DAG.getIntPtrConstant(0, DL)); + SDValue LHS_1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, LHS, + DAG.getIntPtrConstant(1, DL)); + + SDValue LHS0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_0, + DAG.getIntPtrConstant(0, DL)); + SDValue LHS1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_0, + DAG.getIntPtrConstant(1, DL)); + SDValue LHS2 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_1, + DAG.getIntPtrConstant(0, DL)); + SDValue LHS3 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_1, + DAG.getIntPtrConstant(1, DL)); + + SDValue RHS_0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, RHS, + DAG.getIntPtrConstant(0, DL)); + SDValue RHS_1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, RHS, + DAG.getIntPtrConstant(1, DL)); + + SDValue RHS0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_0, + DAG.getIntPtrConstant(0, DL)); + SDValue RHS1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_0, + DAG.getIntPtrConstant(1, DL)); + SDValue RHS2 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_1, + DAG.getIntPtrConstant(0, DL)); + SDValue RHS3 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_1, + DAG.getIntPtrConstant(1, DL)); + + if (UseTest) { + // When using tst we only care about the highest part. + SDValue Top = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS3, + DAG.getIntPtrConstant(1, DL)); + Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue, Top); + } else { + Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHS0, RHS0); + Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS1, RHS1, Cmp); + Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS2, RHS2, Cmp); + Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS3, RHS3, Cmp); + } + } else if (VT == MVT::i8 || VT == MVT::i16) { + if (UseTest) { + // When using tst we only care about the highest part. + Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue, + (VT == MVT::i8) + ? LHS + : DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, + LHS, DAG.getIntPtrConstant(1, DL))); + } else { + Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHS, RHS); + } + } else { + llvm_unreachable("Invalid comparison size"); + } + + // When using a test instruction AVRcc is already set. + if (!UseTest) { + AVRcc = DAG.getConstant(intCCToAVRCC(CC), DL, MVT::i8); + } + + return Cmp; +} + +SDValue AVRTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { + SDValue Chain = Op.getOperand(0); + ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); + SDValue LHS = Op.getOperand(2); + SDValue RHS = Op.getOperand(3); + SDValue Dest = Op.getOperand(4); + SDLoc dl(Op); + + SDValue TargetCC; + SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, dl); + + return DAG.getNode(AVRISD::BRCOND, dl, MVT::Other, Chain, Dest, TargetCC, + Cmp); +} + +SDValue AVRTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { + SDValue LHS = Op.getOperand(0); + SDValue RHS = Op.getOperand(1); + SDValue TrueV = Op.getOperand(2); + SDValue FalseV = Op.getOperand(3); + ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); + SDLoc dl(Op); + + SDValue TargetCC; + SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, dl); + + SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); + SDValue Ops[] = {TrueV, FalseV, TargetCC, Cmp}; + + return DAG.getNode(AVRISD::SELECT_CC, dl, VTs, Ops); +} + +SDValue AVRTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { + SDValue LHS = Op.getOperand(0); + SDValue RHS = Op.getOperand(1); + ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); + SDLoc DL(Op); + + SDValue TargetCC; + SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, DL); + + SDValue TrueV = DAG.getConstant(1, DL, Op.getValueType()); + SDValue FalseV = DAG.getConstant(0, DL, Op.getValueType()); + SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); + SDValue Ops[] = {TrueV, FalseV, TargetCC, Cmp}; + + return DAG.getNode(AVRISD::SELECT_CC, DL, VTs, Ops); +} + +SDValue AVRTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { + const MachineFunction &MF = DAG.getMachineFunction(); + const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>(); + const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); + auto DL = DAG.getDataLayout(); + SDLoc dl(Op); + + // Vastart just stores the address of the VarArgsFrameIndex slot into the + // memory location argument. + SDValue FI = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), getPointerTy(DL)); + + return DAG.getStore(Op.getOperand(0), dl, FI, Op.getOperand(1), + MachinePointerInfo(SV), 0); +} + +SDValue AVRTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { + switch (Op.getOpcode()) { + default: + llvm_unreachable("Don't know how to custom lower this!"); + case ISD::SHL: + case ISD::SRA: + case ISD::SRL: + case ISD::ROTL: + case ISD::ROTR: + return LowerShifts(Op, DAG); + case ISD::GlobalAddress: + return LowerGlobalAddress(Op, DAG); + case ISD::BlockAddress: + return LowerBlockAddress(Op, DAG); + case ISD::BR_CC: + return LowerBR_CC(Op, DAG); + case ISD::SELECT_CC: + return LowerSELECT_CC(Op, DAG); + case ISD::SETCC: + return LowerSETCC(Op, DAG); + case ISD::VASTART: + return LowerVASTART(Op, DAG); + case ISD::SDIVREM: + case ISD::UDIVREM: + return LowerDivRem(Op, DAG); + } + + return SDValue(); +} + +/// Replace a node with an illegal result type +/// with a new node built out of custom code. +void AVRTargetLowering::ReplaceNodeResults(SDNode *N, + SmallVectorImpl<SDValue> &Results, + SelectionDAG &DAG) const { + SDLoc DL(N); + + switch (N->getOpcode()) { + case ISD::ADD: { + // Convert add (x, imm) into sub (x, -imm). + if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) { + SDValue Sub = DAG.getNode( + ISD::SUB, DL, N->getValueType(0), N->getOperand(0), + DAG.getConstant(-C->getAPIntValue(), DL, C->getValueType(0))); + Results.push_back(Sub); + } + break; + } + default: { + SDValue Res = LowerOperation(SDValue(N, 0), DAG); + + for (unsigned I = 0, E = Res->getNumValues(); I != E; ++I) + Results.push_back(Res.getValue(I)); + + break; + } + } +} + +/// Return true if the addressing mode represented +/// by AM is legal for this target, for a load/store of the specified type. +bool AVRTargetLowering::isLegalAddressingMode(const DataLayout &DL, + const AddrMode &AM, Type *Ty, + unsigned AS, Instruction *I) const { + int64_t Offs = AM.BaseOffs; + + // Allow absolute addresses. + if (AM.BaseGV && !AM.HasBaseReg && AM.Scale == 0 && Offs == 0) { + return true; + } + + // Flash memory instructions only allow zero offsets. + if (isa<PointerType>(Ty) && AS == AVR::ProgramMemory) { + return false; + } + + // Allow reg+<6bit> offset. + if (Offs < 0) + Offs = -Offs; + if (AM.BaseGV == 0 && AM.HasBaseReg && AM.Scale == 0 && isUInt<6>(Offs)) { + return true; + } + + return false; +} + +/// Returns true by value, base pointer and +/// offset pointer and addressing mode by reference if the node's address +/// can be legally represented as pre-indexed load / store address. +bool AVRTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, + SDValue &Offset, + ISD::MemIndexedMode &AM, + SelectionDAG &DAG) const { + EVT VT; + const SDNode *Op; + SDLoc DL(N); + + if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { + VT = LD->getMemoryVT(); + Op = LD->getBasePtr().getNode(); + if (LD->getExtensionType() != ISD::NON_EXTLOAD) + return false; + if (AVR::isProgramMemoryAccess(LD)) { + return false; + } + } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { + VT = ST->getMemoryVT(); + Op = ST->getBasePtr().getNode(); + if (AVR::isProgramMemoryAccess(ST)) { + return false; + } + } else { + return false; + } + + if (VT != MVT::i8 && VT != MVT::i16) { + return false; + } + + if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB) { + return false; + } + + if (const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) { + int RHSC = RHS->getSExtValue(); + if (Op->getOpcode() == ISD::SUB) + RHSC = -RHSC; + + if ((VT == MVT::i16 && RHSC != -2) || (VT == MVT::i8 && RHSC != -1)) { + return false; + } + + Base = Op->getOperand(0); + Offset = DAG.getConstant(RHSC, DL, MVT::i8); + AM = ISD::PRE_DEC; + + return true; + } + + return false; +} + +/// Returns true by value, base pointer and +/// offset pointer and addressing mode by reference if this node can be +/// combined with a load / store to form a post-indexed load / store. +bool AVRTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, + SDValue &Base, + SDValue &Offset, + ISD::MemIndexedMode &AM, + SelectionDAG &DAG) const { + EVT VT; + SDLoc DL(N); + + if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { + VT = LD->getMemoryVT(); + if (LD->getExtensionType() != ISD::NON_EXTLOAD) + return false; + } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { + VT = ST->getMemoryVT(); + if (AVR::isProgramMemoryAccess(ST)) { + return false; + } + } else { + return false; + } + + if (VT != MVT::i8 && VT != MVT::i16) { + return false; + } + + if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB) { + return false; + } + + if (const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) { + int RHSC = RHS->getSExtValue(); + if (Op->getOpcode() == ISD::SUB) + RHSC = -RHSC; + if ((VT == MVT::i16 && RHSC != 2) || (VT == MVT::i8 && RHSC != 1)) { + return false; + } + + Base = Op->getOperand(0); + Offset = DAG.getConstant(RHSC, DL, MVT::i8); + AM = ISD::POST_INC; + + return true; + } + + return false; +} + +bool AVRTargetLowering::isOffsetFoldingLegal( + const GlobalAddressSDNode *GA) const { + return true; +} + +//===----------------------------------------------------------------------===// +// Formal Arguments Calling Convention Implementation +//===----------------------------------------------------------------------===// + +#include "AVRGenCallingConv.inc" + +/// For each argument in a function store the number of pieces it is composed +/// of. +static void parseFunctionArgs(const SmallVectorImpl<ISD::InputArg> &Ins, + SmallVectorImpl<unsigned> &Out) { + for (const ISD::InputArg &Arg : Ins) { + if(Arg.PartOffset > 0) continue; + unsigned Bytes = ((Arg.ArgVT.getSizeInBits()) + 7) / 8; + + Out.push_back((Bytes + 1) / 2); + } +} + +/// For external symbols there is no function prototype information so we +/// have to rely directly on argument sizes. +static void parseExternFuncCallArgs(const SmallVectorImpl<ISD::OutputArg> &In, + SmallVectorImpl<unsigned> &Out) { + for (unsigned i = 0, e = In.size(); i != e;) { + unsigned Size = 0; + unsigned Offset = 0; + while ((i != e) && (In[i].PartOffset == Offset)) { + Offset += In[i].VT.getStoreSize(); + ++i; + ++Size; + } + Out.push_back(Size); + } +} + +static StringRef getFunctionName(TargetLowering::CallLoweringInfo &CLI) { + SDValue Callee = CLI.Callee; + + if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee)) { + return G->getSymbol(); + } + + if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { + return G->getGlobal()->getName(); + } + + llvm_unreachable("don't know how to get the name for this callee"); +} + +/// Analyze incoming and outgoing function arguments. We need custom C++ code +/// to handle special constraints in the ABI like reversing the order of the +/// pieces of splitted arguments. In addition, all pieces of a certain argument +/// have to be passed either using registers or the stack but never mixing both. +static void analyzeStandardArguments(TargetLowering::CallLoweringInfo *CLI, + const Function *F, const DataLayout *TD, + const SmallVectorImpl<ISD::OutputArg> *Outs, + const SmallVectorImpl<ISD::InputArg> *Ins, + CallingConv::ID CallConv, + SmallVectorImpl<CCValAssign> &ArgLocs, + CCState &CCInfo, bool IsCall, bool IsVarArg) { + static const MCPhysReg RegList8[] = {AVR::R24, AVR::R22, AVR::R20, + AVR::R18, AVR::R16, AVR::R14, + AVR::R12, AVR::R10, AVR::R8}; + static const MCPhysReg RegList16[] = {AVR::R25R24, AVR::R23R22, AVR::R21R20, + AVR::R19R18, AVR::R17R16, AVR::R15R14, + AVR::R13R12, AVR::R11R10, AVR::R9R8}; + if (IsVarArg) { + // Variadic functions do not need all the analysis below. + if (IsCall) { + CCInfo.AnalyzeCallOperands(*Outs, ArgCC_AVR_Vararg); + } else { + CCInfo.AnalyzeFormalArguments(*Ins, ArgCC_AVR_Vararg); + } + return; + } + + // Fill in the Args array which will contain original argument sizes. + SmallVector<unsigned, 8> Args; + if (IsCall) { + parseExternFuncCallArgs(*Outs, Args); + } else { + assert(F != nullptr && "function should not be null"); + parseFunctionArgs(*Ins, Args); + } + + unsigned RegsLeft = array_lengthof(RegList8), ValNo = 0; + // Variadic functions always use the stack. + bool UsesStack = false; + for (unsigned i = 0, pos = 0, e = Args.size(); i != e; ++i) { + unsigned Size = Args[i]; + + // If we have a zero-sized argument, don't attempt to lower it. + // AVR-GCC does not support zero-sized arguments and so we need not + // worry about ABI compatibility. + if (Size == 0) continue; + + MVT LocVT = (IsCall) ? (*Outs)[pos].VT : (*Ins)[pos].VT; + + // If we have plenty of regs to pass the whole argument do it. + if (!UsesStack && (Size <= RegsLeft)) { + const MCPhysReg *RegList = (LocVT == MVT::i16) ? RegList16 : RegList8; + + for (unsigned j = 0; j != Size; ++j) { + unsigned Reg = CCInfo.AllocateReg( + ArrayRef<MCPhysReg>(RegList, array_lengthof(RegList8))); + CCInfo.addLoc( + CCValAssign::getReg(ValNo++, LocVT, Reg, LocVT, CCValAssign::Full)); + --RegsLeft; + } + + // Reverse the order of the pieces to agree with the "big endian" format + // required in the calling convention ABI. + std::reverse(ArgLocs.begin() + pos, ArgLocs.begin() + pos + Size); + } else { + // Pass the rest of arguments using the stack. + UsesStack = true; + for (unsigned j = 0; j != Size; ++j) { + unsigned Offset = CCInfo.AllocateStack( + TD->getTypeAllocSize(EVT(LocVT).getTypeForEVT(CCInfo.getContext())), + TD->getABITypeAlignment( + EVT(LocVT).getTypeForEVT(CCInfo.getContext()))); + CCInfo.addLoc(CCValAssign::getMem(ValNo++, LocVT, Offset, LocVT, + CCValAssign::Full)); + } + } + pos += Size; + } +} + +static void analyzeBuiltinArguments(TargetLowering::CallLoweringInfo &CLI, + const Function *F, const DataLayout *TD, + const SmallVectorImpl<ISD::OutputArg> *Outs, + const SmallVectorImpl<ISD::InputArg> *Ins, + CallingConv::ID CallConv, + SmallVectorImpl<CCValAssign> &ArgLocs, + CCState &CCInfo, bool IsCall, bool IsVarArg) { + StringRef FuncName = getFunctionName(CLI); + + if (FuncName.startswith("__udivmod") || FuncName.startswith("__divmod")) { + CCInfo.AnalyzeCallOperands(*Outs, ArgCC_AVR_BUILTIN_DIV); + } else { + analyzeStandardArguments(&CLI, F, TD, Outs, Ins, + CallConv, ArgLocs, CCInfo, + IsCall, IsVarArg); + } +} + +static void analyzeArguments(TargetLowering::CallLoweringInfo *CLI, + const Function *F, const DataLayout *TD, + const SmallVectorImpl<ISD::OutputArg> *Outs, + const SmallVectorImpl<ISD::InputArg> *Ins, + CallingConv::ID CallConv, + SmallVectorImpl<CCValAssign> &ArgLocs, + CCState &CCInfo, bool IsCall, bool IsVarArg) { + switch (CallConv) { + case CallingConv::AVR_BUILTIN: { + analyzeBuiltinArguments(*CLI, F, TD, Outs, Ins, + CallConv, ArgLocs, CCInfo, + IsCall, IsVarArg); + return; + } + default: { + analyzeStandardArguments(CLI, F, TD, Outs, Ins, + CallConv, ArgLocs, CCInfo, + IsCall, IsVarArg); + return; + } + } +} + +SDValue AVRTargetLowering::LowerFormalArguments( + SDValue Chain, CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) const { + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo &MFI = MF.getFrameInfo(); + auto DL = DAG.getDataLayout(); + + // Assign locations to all of the incoming arguments. + SmallVector<CCValAssign, 16> ArgLocs; + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); + + analyzeArguments(nullptr, &MF.getFunction(), &DL, 0, &Ins, CallConv, ArgLocs, CCInfo, + false, isVarArg); + + SDValue ArgValue; + for (CCValAssign &VA : ArgLocs) { + + // Arguments stored on registers. + if (VA.isRegLoc()) { + EVT RegVT = VA.getLocVT(); + const TargetRegisterClass *RC; + if (RegVT == MVT::i8) { + RC = &AVR::GPR8RegClass; + } else if (RegVT == MVT::i16) { + RC = &AVR::DREGSRegClass; + } else { + llvm_unreachable("Unknown argument type!"); + } + + unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); + ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); + + // :NOTE: Clang should not promote any i8 into i16 but for safety the + // following code will handle zexts or sexts generated by other + // front ends. Otherwise: + // If this is an 8 bit value, it is really passed promoted + // to 16 bits. Insert an assert[sz]ext to capture this, then + // truncate to the right size. + switch (VA.getLocInfo()) { + default: + llvm_unreachable("Unknown loc info!"); + case CCValAssign::Full: + break; + case CCValAssign::BCvt: + ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); + break; + case CCValAssign::SExt: + ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, + DAG.getValueType(VA.getValVT())); + ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); + break; + case CCValAssign::ZExt: + ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, + DAG.getValueType(VA.getValVT())); + ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); + break; + } + + InVals.push_back(ArgValue); + } else { + // Sanity check. + assert(VA.isMemLoc()); + + EVT LocVT = VA.getLocVT(); + + // Create the frame index object for this incoming parameter. + int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8, + VA.getLocMemOffset(), true); + + // Create the SelectionDAG nodes corresponding to a load + // from this parameter. + SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DL)); + InVals.push_back(DAG.getLoad(LocVT, dl, Chain, FIN, + MachinePointerInfo::getFixedStack(MF, FI), + 0)); + } + } + + // If the function takes variable number of arguments, make a frame index for + // the start of the first vararg value... for expansion of llvm.va_start. + if (isVarArg) { + unsigned StackSize = CCInfo.getNextStackOffset(); + AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>(); + + AFI->setVarArgsFrameIndex(MFI.CreateFixedObject(2, StackSize, true)); + } + + return Chain; +} + +//===----------------------------------------------------------------------===// +// Call Calling Convention Implementation +//===----------------------------------------------------------------------===// + +SDValue AVRTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, + SmallVectorImpl<SDValue> &InVals) const { + SelectionDAG &DAG = CLI.DAG; + SDLoc &DL = CLI.DL; + SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; + SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; + SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; + SDValue Chain = CLI.Chain; + SDValue Callee = CLI.Callee; + bool &isTailCall = CLI.IsTailCall; + CallingConv::ID CallConv = CLI.CallConv; + bool isVarArg = CLI.IsVarArg; + + MachineFunction &MF = DAG.getMachineFunction(); + + // AVR does not yet support tail call optimization. + isTailCall = false; + + // Analyze operands of the call, assigning locations to each operand. + SmallVector<CCValAssign, 16> ArgLocs; + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); + + // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every + // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol + // node so that legalize doesn't hack it. + const Function *F = nullptr; + if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { + const GlobalValue *GV = G->getGlobal(); + + F = cast<Function>(GV); + Callee = + DAG.getTargetGlobalAddress(GV, DL, getPointerTy(DAG.getDataLayout())); + } else if (const ExternalSymbolSDNode *ES = + dyn_cast<ExternalSymbolSDNode>(Callee)) { + Callee = DAG.getTargetExternalSymbol(ES->getSymbol(), + getPointerTy(DAG.getDataLayout())); + } + + analyzeArguments(&CLI, F, &DAG.getDataLayout(), &Outs, 0, CallConv, ArgLocs, CCInfo, + true, isVarArg); + + // Get a count of how many bytes are to be pushed on the stack. + unsigned NumBytes = CCInfo.getNextStackOffset(); + + Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL); + + SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; + + // First, walk the register assignments, inserting copies. + unsigned AI, AE; + bool HasStackArgs = false; + for (AI = 0, AE = ArgLocs.size(); AI != AE; ++AI) { + CCValAssign &VA = ArgLocs[AI]; + EVT RegVT = VA.getLocVT(); + SDValue Arg = OutVals[AI]; + + // Promote the value if needed. With Clang this should not happen. + switch (VA.getLocInfo()) { + default: + llvm_unreachable("Unknown loc info!"); + case CCValAssign::Full: + break; + case CCValAssign::SExt: + Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, RegVT, Arg); + break; + case CCValAssign::ZExt: + Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, RegVT, Arg); + break; + case CCValAssign::AExt: + Arg = DAG.getNode(ISD::ANY_EXTEND, DL, RegVT, Arg); + break; + case CCValAssign::BCvt: + Arg = DAG.getNode(ISD::BITCAST, DL, RegVT, Arg); + break; + } + + // Stop when we encounter a stack argument, we need to process them + // in reverse order in the loop below. + if (VA.isMemLoc()) { + HasStackArgs = true; + break; + } + + // Arguments that can be passed on registers must be kept in the RegsToPass + // vector. + RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); + } + + // Second, stack arguments have to walked in reverse order by inserting + // chained stores, this ensures their order is not changed by the scheduler + // and that the push instruction sequence generated is correct, otherwise they + // can be freely intermixed. + if (HasStackArgs) { + for (AE = AI, AI = ArgLocs.size(); AI != AE; --AI) { + unsigned Loc = AI - 1; + CCValAssign &VA = ArgLocs[Loc]; + SDValue Arg = OutVals[Loc]; + + assert(VA.isMemLoc()); + + // SP points to one stack slot further so add one to adjust it. + SDValue PtrOff = DAG.getNode( + ISD::ADD, DL, getPointerTy(DAG.getDataLayout()), + DAG.getRegister(AVR::SP, getPointerTy(DAG.getDataLayout())), + DAG.getIntPtrConstant(VA.getLocMemOffset() + 1, DL)); + + Chain = + DAG.getStore(Chain, DL, Arg, PtrOff, + MachinePointerInfo::getStack(MF, VA.getLocMemOffset()), + 0); + } + } + + // Build a sequence of copy-to-reg nodes chained together with token chain and + // flag operands which copy the outgoing args into registers. The InFlag in + // necessary since all emited instructions must be stuck together. + SDValue InFlag; + for (auto Reg : RegsToPass) { + Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, InFlag); + InFlag = Chain.getValue(1); + } + + // Returns a chain & a flag for retval copy to use. + SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); + SmallVector<SDValue, 8> Ops; + Ops.push_back(Chain); + Ops.push_back(Callee); + + // Add argument registers to the end of the list so that they are known live + // into the call. + for (auto Reg : RegsToPass) { + Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType())); + } + + // Add a register mask operand representing the call-preserved registers. + const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); + const uint32_t *Mask = + TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); + assert(Mask && "Missing call preserved mask for calling convention"); + Ops.push_back(DAG.getRegisterMask(Mask)); + + if (InFlag.getNode()) { + Ops.push_back(InFlag); + } + + Chain = DAG.getNode(AVRISD::CALL, DL, NodeTys, Ops); + InFlag = Chain.getValue(1); + + // Create the CALLSEQ_END node. + Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true), + DAG.getIntPtrConstant(0, DL, true), InFlag, DL); + + if (!Ins.empty()) { + InFlag = Chain.getValue(1); + } + + // Handle result values, copying them out of physregs into vregs that we + // return. + return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, DL, DAG, + InVals); +} + +/// Lower the result values of a call into the +/// appropriate copies out of appropriate physical registers. +/// +SDValue AVRTargetLowering::LowerCallResult( + SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) const { + + // Assign locations to each value returned by this call. + SmallVector<CCValAssign, 16> RVLocs; + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); + + // Handle runtime calling convs. + auto CCFunction = CCAssignFnForReturn(CallConv); + CCInfo.AnalyzeCallResult(Ins, CCFunction); + + if (CallConv != CallingConv::AVR_BUILTIN && RVLocs.size() > 1) { + // Reverse splitted return values to get the "big endian" format required + // to agree with the calling convention ABI. + std::reverse(RVLocs.begin(), RVLocs.end()); + } + + // Copy all of the result registers out of their specified physreg. + for (CCValAssign const &RVLoc : RVLocs) { + Chain = DAG.getCopyFromReg(Chain, dl, RVLoc.getLocReg(), RVLoc.getValVT(), + InFlag) + .getValue(1); + InFlag = Chain.getValue(2); + InVals.push_back(Chain.getValue(0)); + } + + return Chain; +} + +//===----------------------------------------------------------------------===// +// Return Value Calling Convention Implementation +//===----------------------------------------------------------------------===// + +CCAssignFn *AVRTargetLowering::CCAssignFnForReturn(CallingConv::ID CC) const { + switch (CC) { + case CallingConv::AVR_BUILTIN: + return RetCC_AVR_BUILTIN; + default: + return RetCC_AVR; + } +} + +bool +AVRTargetLowering::CanLowerReturn(CallingConv::ID CallConv, + MachineFunction &MF, bool isVarArg, + const SmallVectorImpl<ISD::OutputArg> &Outs, + LLVMContext &Context) const +{ + SmallVector<CCValAssign, 16> RVLocs; + CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); + + auto CCFunction = CCAssignFnForReturn(CallConv); + return CCInfo.CheckReturn(Outs, CCFunction); +} + +SDValue +AVRTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, + bool isVarArg, + const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<SDValue> &OutVals, + const SDLoc &dl, SelectionDAG &DAG) const { + // CCValAssign - represent the assignment of the return value to locations. + SmallVector<CCValAssign, 16> RVLocs; + + // CCState - Info about the registers and stack slot. + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); + + // Analyze return values. + auto CCFunction = CCAssignFnForReturn(CallConv); + CCInfo.AnalyzeReturn(Outs, CCFunction); + + // If this is the first return lowered for this function, add the regs to + // the liveout set for the function. + MachineFunction &MF = DAG.getMachineFunction(); + unsigned e = RVLocs.size(); + + // Reverse splitted return values to get the "big endian" format required + // to agree with the calling convention ABI. + if (e > 1) { + std::reverse(RVLocs.begin(), RVLocs.end()); + } + + SDValue Flag; + SmallVector<SDValue, 4> RetOps(1, Chain); + // Copy the result values into the output registers. + for (unsigned i = 0; i != e; ++i) { + CCValAssign &VA = RVLocs[i]; + assert(VA.isRegLoc() && "Can only return in registers!"); + + Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag); + + // Guarantee that all emitted copies are stuck together with flags. + Flag = Chain.getValue(1); + RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); + } + + // Don't emit the ret/reti instruction when the naked attribute is present in + // the function being compiled. + if (MF.getFunction().getAttributes().hasAttribute( + AttributeList::FunctionIndex, Attribute::Naked)) { + return Chain; + } + + unsigned RetOpc = + (CallConv == CallingConv::AVR_INTR || CallConv == CallingConv::AVR_SIGNAL) + ? AVRISD::RETI_FLAG + : AVRISD::RET_FLAG; + + RetOps[0] = Chain; // Update chain. + + if (Flag.getNode()) { + RetOps.push_back(Flag); + } + + return DAG.getNode(RetOpc, dl, MVT::Other, RetOps); +} + +//===----------------------------------------------------------------------===// +// Custom Inserters +//===----------------------------------------------------------------------===// + +MachineBasicBlock *AVRTargetLowering::insertShift(MachineInstr &MI, + MachineBasicBlock *BB) const { + unsigned Opc; + const TargetRegisterClass *RC; + bool HasRepeatedOperand = false; + MachineFunction *F = BB->getParent(); + MachineRegisterInfo &RI = F->getRegInfo(); + const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); + DebugLoc dl = MI.getDebugLoc(); + + switch (MI.getOpcode()) { + default: + llvm_unreachable("Invalid shift opcode!"); + case AVR::Lsl8: + Opc = AVR::ADDRdRr; // LSL is an alias of ADD Rd, Rd + RC = &AVR::GPR8RegClass; + HasRepeatedOperand = true; + break; + case AVR::Lsl16: + Opc = AVR::LSLWRd; + RC = &AVR::DREGSRegClass; + break; + case AVR::Asr8: + Opc = AVR::ASRRd; + RC = &AVR::GPR8RegClass; + break; + case AVR::Asr16: + Opc = AVR::ASRWRd; + RC = &AVR::DREGSRegClass; + break; + case AVR::Lsr8: + Opc = AVR::LSRRd; + RC = &AVR::GPR8RegClass; + break; + case AVR::Lsr16: + Opc = AVR::LSRWRd; + RC = &AVR::DREGSRegClass; + break; + case AVR::Rol8: + Opc = AVR::ADCRdRr; // ROL is an alias of ADC Rd, Rd + RC = &AVR::GPR8RegClass; + HasRepeatedOperand = true; + break; + case AVR::Rol16: + Opc = AVR::ROLWRd; + RC = &AVR::DREGSRegClass; + break; + case AVR::Ror8: + Opc = AVR::RORRd; + RC = &AVR::GPR8RegClass; + break; + case AVR::Ror16: + Opc = AVR::RORWRd; + RC = &AVR::DREGSRegClass; + break; + } + + const BasicBlock *LLVM_BB = BB->getBasicBlock(); + + MachineFunction::iterator I; + for (I = BB->getIterator(); I != F->end() && &(*I) != BB; ++I); + if (I != F->end()) ++I; + + // Create loop block. + MachineBasicBlock *LoopBB = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *RemBB = F->CreateMachineBasicBlock(LLVM_BB); + + F->insert(I, LoopBB); + F->insert(I, RemBB); + + // Update machine-CFG edges by transferring all successors of the current + // block to the block containing instructions after shift. + RemBB->splice(RemBB->begin(), BB, std::next(MachineBasicBlock::iterator(MI)), + BB->end()); + RemBB->transferSuccessorsAndUpdatePHIs(BB); + + // Add adges BB => LoopBB => RemBB, BB => RemBB, LoopBB => LoopBB. + BB->addSuccessor(LoopBB); + BB->addSuccessor(RemBB); + LoopBB->addSuccessor(RemBB); + LoopBB->addSuccessor(LoopBB); + + unsigned ShiftAmtReg = RI.createVirtualRegister(&AVR::LD8RegClass); + unsigned ShiftAmtReg2 = RI.createVirtualRegister(&AVR::LD8RegClass); + unsigned ShiftReg = RI.createVirtualRegister(RC); + unsigned ShiftReg2 = RI.createVirtualRegister(RC); + unsigned ShiftAmtSrcReg = MI.getOperand(2).getReg(); + unsigned SrcReg = MI.getOperand(1).getReg(); + unsigned DstReg = MI.getOperand(0).getReg(); + + // BB: + // cpi N, 0 + // breq RemBB + BuildMI(BB, dl, TII.get(AVR::CPIRdK)).addReg(ShiftAmtSrcReg).addImm(0); + BuildMI(BB, dl, TII.get(AVR::BREQk)).addMBB(RemBB); + + // LoopBB: + // ShiftReg = phi [%SrcReg, BB], [%ShiftReg2, LoopBB] + // ShiftAmt = phi [%N, BB], [%ShiftAmt2, LoopBB] + // ShiftReg2 = shift ShiftReg + // ShiftAmt2 = ShiftAmt - 1; + BuildMI(LoopBB, dl, TII.get(AVR::PHI), ShiftReg) + .addReg(SrcReg) + .addMBB(BB) + .addReg(ShiftReg2) + .addMBB(LoopBB); + BuildMI(LoopBB, dl, TII.get(AVR::PHI), ShiftAmtReg) + .addReg(ShiftAmtSrcReg) + .addMBB(BB) + .addReg(ShiftAmtReg2) + .addMBB(LoopBB); + + auto ShiftMI = BuildMI(LoopBB, dl, TII.get(Opc), ShiftReg2).addReg(ShiftReg); + if (HasRepeatedOperand) + ShiftMI.addReg(ShiftReg); + + BuildMI(LoopBB, dl, TII.get(AVR::SUBIRdK), ShiftAmtReg2) + .addReg(ShiftAmtReg) + .addImm(1); + BuildMI(LoopBB, dl, TII.get(AVR::BRNEk)).addMBB(LoopBB); + + // RemBB: + // DestReg = phi [%SrcReg, BB], [%ShiftReg, LoopBB] + BuildMI(*RemBB, RemBB->begin(), dl, TII.get(AVR::PHI), DstReg) + .addReg(SrcReg) + .addMBB(BB) + .addReg(ShiftReg2) + .addMBB(LoopBB); + + MI.eraseFromParent(); // The pseudo instruction is gone now. + return RemBB; +} + +static bool isCopyMulResult(MachineBasicBlock::iterator const &I) { + if (I->getOpcode() == AVR::COPY) { + unsigned SrcReg = I->getOperand(1).getReg(); + return (SrcReg == AVR::R0 || SrcReg == AVR::R1); + } + + return false; +} + +// The mul instructions wreak havock on our zero_reg R1. We need to clear it +// after the result has been evacuated. This is probably not the best way to do +// it, but it works for now. +MachineBasicBlock *AVRTargetLowering::insertMul(MachineInstr &MI, + MachineBasicBlock *BB) const { + const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); + MachineBasicBlock::iterator I(MI); + ++I; // in any case insert *after* the mul instruction + if (isCopyMulResult(I)) + ++I; + if (isCopyMulResult(I)) + ++I; + BuildMI(*BB, I, MI.getDebugLoc(), TII.get(AVR::EORRdRr), AVR::R1) + .addReg(AVR::R1) + .addReg(AVR::R1); + return BB; +} + +MachineBasicBlock * +AVRTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, + MachineBasicBlock *MBB) const { + int Opc = MI.getOpcode(); + + // Pseudo shift instructions with a non constant shift amount are expanded + // into a loop. + switch (Opc) { + case AVR::Lsl8: + case AVR::Lsl16: + case AVR::Lsr8: + case AVR::Lsr16: + case AVR::Rol8: + case AVR::Rol16: + case AVR::Ror8: + case AVR::Ror16: + case AVR::Asr8: + case AVR::Asr16: + return insertShift(MI, MBB); + case AVR::MULRdRr: + case AVR::MULSRdRr: + return insertMul(MI, MBB); + } + + assert((Opc == AVR::Select16 || Opc == AVR::Select8) && + "Unexpected instr type to insert"); + + const AVRInstrInfo &TII = (const AVRInstrInfo &)*MI.getParent() + ->getParent() + ->getSubtarget() + .getInstrInfo(); + DebugLoc dl = MI.getDebugLoc(); + + // To "insert" a SELECT instruction, we insert the diamond + // control-flow pattern. The incoming instruction knows the + // destination vreg to set, the condition code register to branch + // on, the true/false values to select between, and a branch opcode + // to use. + + MachineFunction *MF = MBB->getParent(); + const BasicBlock *LLVM_BB = MBB->getBasicBlock(); + MachineBasicBlock *FallThrough = MBB->getFallThrough(); + + // If the current basic block falls through to another basic block, + // we must insert an unconditional branch to the fallthrough destination + // if we are to insert basic blocks at the prior fallthrough point. + if (FallThrough != nullptr) { + BuildMI(MBB, dl, TII.get(AVR::RJMPk)).addMBB(FallThrough); + } + + MachineBasicBlock *trueMBB = MF->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *falseMBB = MF->CreateMachineBasicBlock(LLVM_BB); + + MachineFunction::iterator I; + for (I = MF->begin(); I != MF->end() && &(*I) != MBB; ++I); + if (I != MF->end()) ++I; + MF->insert(I, trueMBB); + MF->insert(I, falseMBB); + + // Transfer remaining instructions and all successors of the current + // block to the block which will contain the Phi node for the + // select. + trueMBB->splice(trueMBB->begin(), MBB, + std::next(MachineBasicBlock::iterator(MI)), MBB->end()); + trueMBB->transferSuccessorsAndUpdatePHIs(MBB); + + AVRCC::CondCodes CC = (AVRCC::CondCodes)MI.getOperand(3).getImm(); + BuildMI(MBB, dl, TII.getBrCond(CC)).addMBB(trueMBB); + BuildMI(MBB, dl, TII.get(AVR::RJMPk)).addMBB(falseMBB); + MBB->addSuccessor(falseMBB); + MBB->addSuccessor(trueMBB); + + // Unconditionally flow back to the true block + BuildMI(falseMBB, dl, TII.get(AVR::RJMPk)).addMBB(trueMBB); + falseMBB->addSuccessor(trueMBB); + + // Set up the Phi node to determine where we came from + BuildMI(*trueMBB, trueMBB->begin(), dl, TII.get(AVR::PHI), MI.getOperand(0).getReg()) + .addReg(MI.getOperand(1).getReg()) + .addMBB(MBB) + .addReg(MI.getOperand(2).getReg()) + .addMBB(falseMBB) ; + + MI.eraseFromParent(); // The pseudo instruction is gone now. + return trueMBB; +} + +//===----------------------------------------------------------------------===// +// Inline Asm Support +//===----------------------------------------------------------------------===// + +AVRTargetLowering::ConstraintType +AVRTargetLowering::getConstraintType(StringRef Constraint) const { + if (Constraint.size() == 1) { + // See http://www.nongnu.org/avr-libc/user-manual/inline_asm.html + switch (Constraint[0]) { + default: + break; + case 'a': // Simple upper registers + case 'b': // Base pointer registers pairs + case 'd': // Upper register + case 'l': // Lower registers + case 'e': // Pointer register pairs + case 'q': // Stack pointer register + case 'r': // Any register + case 'w': // Special upper register pairs + return C_RegisterClass; + case 't': // Temporary register + case 'x': case 'X': // Pointer register pair X + case 'y': case 'Y': // Pointer register pair Y + case 'z': case 'Z': // Pointer register pair Z + return C_Register; + case 'Q': // A memory address based on Y or Z pointer with displacement. + return C_Memory; + case 'G': // Floating point constant + case 'I': // 6-bit positive integer constant + case 'J': // 6-bit negative integer constant + case 'K': // Integer constant (Range: 2) + case 'L': // Integer constant (Range: 0) + case 'M': // 8-bit integer constant + case 'N': // Integer constant (Range: -1) + case 'O': // Integer constant (Range: 8, 16, 24) + case 'P': // Integer constant (Range: 1) + case 'R': // Integer constant (Range: -6 to 5)x + return C_Immediate; + } + } + + return TargetLowering::getConstraintType(Constraint); +} + +unsigned +AVRTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const { + // Not sure if this is actually the right thing to do, but we got to do + // *something* [agnat] + switch (ConstraintCode[0]) { + case 'Q': + return InlineAsm::Constraint_Q; + } + return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); +} + +AVRTargetLowering::ConstraintWeight +AVRTargetLowering::getSingleConstraintMatchWeight( + AsmOperandInfo &info, const char *constraint) const { + ConstraintWeight weight = CW_Invalid; + Value *CallOperandVal = info.CallOperandVal; + + // If we don't have a value, we can't do a match, + // but allow it at the lowest weight. + // (this behaviour has been copied from the ARM backend) + if (!CallOperandVal) { + return CW_Default; + } + + // Look at the constraint type. + switch (*constraint) { + default: + weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); + break; + case 'd': + case 'r': + case 'l': + weight = CW_Register; + break; + case 'a': + case 'b': + case 'e': + case 'q': + case 't': + case 'w': + case 'x': case 'X': + case 'y': case 'Y': + case 'z': case 'Z': + weight = CW_SpecificReg; + break; + case 'G': + if (const ConstantFP *C = dyn_cast<ConstantFP>(CallOperandVal)) { + if (C->isZero()) { + weight = CW_Constant; + } + } + break; + case 'I': + if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { + if (isUInt<6>(C->getZExtValue())) { + weight = CW_Constant; + } + } + break; + case 'J': + if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { + if ((C->getSExtValue() >= -63) && (C->getSExtValue() <= 0)) { + weight = CW_Constant; + } + } + break; + case 'K': + if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { + if (C->getZExtValue() == 2) { + weight = CW_Constant; + } + } + break; + case 'L': + if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { + if (C->getZExtValue() == 0) { + weight = CW_Constant; + } + } + break; + case 'M': + if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { + if (isUInt<8>(C->getZExtValue())) { + weight = CW_Constant; + } + } + break; + case 'N': + if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { + if (C->getSExtValue() == -1) { + weight = CW_Constant; + } + } + break; + case 'O': + if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { + if ((C->getZExtValue() == 8) || (C->getZExtValue() == 16) || + (C->getZExtValue() == 24)) { + weight = CW_Constant; + } + } + break; + case 'P': + if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { + if (C->getZExtValue() == 1) { + weight = CW_Constant; + } + } + break; + case 'R': + if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { + if ((C->getSExtValue() >= -6) && (C->getSExtValue() <= 5)) { + weight = CW_Constant; + } + } + break; + case 'Q': + weight = CW_Memory; + break; + } + + return weight; +} + +std::pair<unsigned, const TargetRegisterClass *> +AVRTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, + StringRef Constraint, + MVT VT) const { + // We only support i8 and i16. + // + //:FIXME: remove this assert for now since it gets sometimes executed + // assert((VT == MVT::i16 || VT == MVT::i8) && "Wrong operand type."); + + if (Constraint.size() == 1) { + switch (Constraint[0]) { + case 'a': // Simple upper registers r16..r23. + return std::make_pair(0U, &AVR::LD8loRegClass); + case 'b': // Base pointer registers: y, z. + return std::make_pair(0U, &AVR::PTRDISPREGSRegClass); + case 'd': // Upper registers r16..r31. + return std::make_pair(0U, &AVR::LD8RegClass); + case 'l': // Lower registers r0..r15. + return std::make_pair(0U, &AVR::GPR8loRegClass); + case 'e': // Pointer register pairs: x, y, z. + return std::make_pair(0U, &AVR::PTRREGSRegClass); + case 'q': // Stack pointer register: SPH:SPL. + return std::make_pair(0U, &AVR::GPRSPRegClass); + case 'r': // Any register: r0..r31. + if (VT == MVT::i8) + return std::make_pair(0U, &AVR::GPR8RegClass); + + assert(VT == MVT::i16 && "inline asm constraint too large"); + return std::make_pair(0U, &AVR::DREGSRegClass); + case 't': // Temporary register: r0. + return std::make_pair(unsigned(AVR::R0), &AVR::GPR8RegClass); + case 'w': // Special upper register pairs: r24, r26, r28, r30. + return std::make_pair(0U, &AVR::IWREGSRegClass); + case 'x': // Pointer register pair X: r27:r26. + case 'X': + return std::make_pair(unsigned(AVR::R27R26), &AVR::PTRREGSRegClass); + case 'y': // Pointer register pair Y: r29:r28. + case 'Y': + return std::make_pair(unsigned(AVR::R29R28), &AVR::PTRREGSRegClass); + case 'z': // Pointer register pair Z: r31:r30. + case 'Z': + return std::make_pair(unsigned(AVR::R31R30), &AVR::PTRREGSRegClass); + default: + break; + } + } + + return TargetLowering::getRegForInlineAsmConstraint( + Subtarget.getRegisterInfo(), Constraint, VT); +} + +void AVRTargetLowering::LowerAsmOperandForConstraint(SDValue Op, + std::string &Constraint, + std::vector<SDValue> &Ops, + SelectionDAG &DAG) const { + SDValue Result(0, 0); + SDLoc DL(Op); + EVT Ty = Op.getValueType(); + + // Currently only support length 1 constraints. + if (Constraint.length() != 1) { + return; + } + + char ConstraintLetter = Constraint[0]; + switch (ConstraintLetter) { + default: + break; + // Deal with integers first: + case 'I': + case 'J': + case 'K': + case 'L': + case 'M': + case 'N': + case 'O': + case 'P': + case 'R': { + const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); + if (!C) { + return; + } + + int64_t CVal64 = C->getSExtValue(); + uint64_t CUVal64 = C->getZExtValue(); + switch (ConstraintLetter) { + case 'I': // 0..63 + if (!isUInt<6>(CUVal64)) + return; + Result = DAG.getTargetConstant(CUVal64, DL, Ty); + break; + case 'J': // -63..0 + if (CVal64 < -63 || CVal64 > 0) + return; + Result = DAG.getTargetConstant(CVal64, DL, Ty); + break; + case 'K': // 2 + if (CUVal64 != 2) + return; + Result = DAG.getTargetConstant(CUVal64, DL, Ty); + break; + case 'L': // 0 + if (CUVal64 != 0) + return; + Result = DAG.getTargetConstant(CUVal64, DL, Ty); + break; + case 'M': // 0..255 + if (!isUInt<8>(CUVal64)) + return; + // i8 type may be printed as a negative number, + // e.g. 254 would be printed as -2, + // so we force it to i16 at least. + if (Ty.getSimpleVT() == MVT::i8) { + Ty = MVT::i16; + } + Result = DAG.getTargetConstant(CUVal64, DL, Ty); + break; + case 'N': // -1 + if (CVal64 != -1) + return; + Result = DAG.getTargetConstant(CVal64, DL, Ty); + break; + case 'O': // 8, 16, 24 + if (CUVal64 != 8 && CUVal64 != 16 && CUVal64 != 24) + return; + Result = DAG.getTargetConstant(CUVal64, DL, Ty); + break; + case 'P': // 1 + if (CUVal64 != 1) + return; + Result = DAG.getTargetConstant(CUVal64, DL, Ty); + break; + case 'R': // -6..5 + if (CVal64 < -6 || CVal64 > 5) + return; + Result = DAG.getTargetConstant(CVal64, DL, Ty); + break; + } + + break; + } + case 'G': + const ConstantFPSDNode *FC = dyn_cast<ConstantFPSDNode>(Op); + if (!FC || !FC->isZero()) + return; + // Soften float to i8 0 + Result = DAG.getTargetConstant(0, DL, MVT::i8); + break; + } + + if (Result.getNode()) { + Ops.push_back(Result); + return; + } + + return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); +} + +unsigned AVRTargetLowering::getRegisterByName(const char *RegName, + EVT VT, + SelectionDAG &DAG) const { + unsigned Reg; + + if (VT == MVT::i8) { + Reg = StringSwitch<unsigned>(RegName) + .Case("r0", AVR::R0).Case("r1", AVR::R1).Case("r2", AVR::R2) + .Case("r3", AVR::R3).Case("r4", AVR::R4).Case("r5", AVR::R5) + .Case("r6", AVR::R6).Case("r7", AVR::R7).Case("r8", AVR::R8) + .Case("r9", AVR::R9).Case("r10", AVR::R10).Case("r11", AVR::R11) + .Case("r12", AVR::R12).Case("r13", AVR::R13).Case("r14", AVR::R14) + .Case("r15", AVR::R15).Case("r16", AVR::R16).Case("r17", AVR::R17) + .Case("r18", AVR::R18).Case("r19", AVR::R19).Case("r20", AVR::R20) + .Case("r21", AVR::R21).Case("r22", AVR::R22).Case("r23", AVR::R23) + .Case("r24", AVR::R24).Case("r25", AVR::R25).Case("r26", AVR::R26) + .Case("r27", AVR::R27).Case("r28", AVR::R28).Case("r29", AVR::R29) + .Case("r30", AVR::R30).Case("r31", AVR::R31) + .Case("X", AVR::R27R26).Case("Y", AVR::R29R28).Case("Z", AVR::R31R30) + .Default(0); + } else { + Reg = StringSwitch<unsigned>(RegName) + .Case("r0", AVR::R1R0).Case("r2", AVR::R3R2) + .Case("r4", AVR::R5R4).Case("r6", AVR::R7R6) + .Case("r8", AVR::R9R8).Case("r10", AVR::R11R10) + .Case("r12", AVR::R13R12).Case("r14", AVR::R15R14) + .Case("r16", AVR::R17R16).Case("r18", AVR::R19R18) + .Case("r20", AVR::R21R20).Case("r22", AVR::R23R22) + .Case("r24", AVR::R25R24).Case("r26", AVR::R27R26) + .Case("r28", AVR::R29R28).Case("r30", AVR::R31R30) + .Case("X", AVR::R27R26).Case("Y", AVR::R29R28).Case("Z", AVR::R31R30) + .Default(0); + } + + if (Reg) + return Reg; + + report_fatal_error("Invalid register name global variable"); +} + +} // end of namespace llvm diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRISelLowering.h b/contrib/llvm-project/llvm/lib/Target/AVR/AVRISelLowering.h new file mode 100644 index 000000000000..ed2d0835903c --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRISelLowering.h @@ -0,0 +1,184 @@ +//===-- AVRISelLowering.h - AVR DAG Lowering Interface ----------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the interfaces that AVR uses to lower LLVM code into a +// selection DAG. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AVR_ISEL_LOWERING_H +#define LLVM_AVR_ISEL_LOWERING_H + +#include "llvm/CodeGen/CallingConvLower.h" +#include "llvm/CodeGen/TargetLowering.h" + +namespace llvm { + +namespace AVRISD { + +/// AVR Specific DAG Nodes +enum NodeType { + /// Start the numbering where the builtin ops leave off. + FIRST_NUMBER = ISD::BUILTIN_OP_END, + /// Return from subroutine. + RET_FLAG, + /// Return from ISR. + RETI_FLAG, + /// Represents an abstract call instruction, + /// which includes a bunch of information. + CALL, + /// A wrapper node for TargetConstantPool, + /// TargetExternalSymbol, and TargetGlobalAddress. + WRAPPER, + LSL, ///< Logical shift left. + LSR, ///< Logical shift right. + ASR, ///< Arithmetic shift right. + ROR, ///< Bit rotate right. + ROL, ///< Bit rotate left. + LSLLOOP, ///< A loop of single logical shift left instructions. + LSRLOOP, ///< A loop of single logical shift right instructions. + ROLLOOP, ///< A loop of single left bit rotate instructions. + RORLOOP, ///< A loop of single right bit rotate instructions. + ASRLOOP, ///< A loop of single arithmetic shift right instructions. + /// AVR conditional branches. Operand 0 is the chain operand, operand 1 + /// is the block to branch if condition is true, operand 2 is the + /// condition code, and operand 3 is the flag operand produced by a CMP + /// or TEST instruction. + BRCOND, + /// Compare instruction. + CMP, + /// Compare with carry instruction. + CMPC, + /// Test for zero or minus instruction. + TST, + /// Operand 0 and operand 1 are selection variable, operand 2 + /// is condition code and operand 3 is flag operand. + SELECT_CC +}; + +} // end of namespace AVRISD + +class AVRSubtarget; +class AVRTargetMachine; + +/// Performs target lowering for the AVR. +class AVRTargetLowering : public TargetLowering { +public: + explicit AVRTargetLowering(const AVRTargetMachine &TM, + const AVRSubtarget &STI); + +public: + MVT getScalarShiftAmountTy(const DataLayout &, EVT LHSTy) const override { + return MVT::i8; + } + + MVT::SimpleValueType getCmpLibcallReturnType() const override { + return MVT::i8; + } + + const char *getTargetNodeName(unsigned Opcode) const override; + + SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; + + void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results, + SelectionDAG &DAG) const override; + + bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, + unsigned AS, + Instruction *I = nullptr) const override; + + bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, + ISD::MemIndexedMode &AM, + SelectionDAG &DAG) const override; + + bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, + SDValue &Offset, ISD::MemIndexedMode &AM, + SelectionDAG &DAG) const override; + + bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override; + + EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, + EVT VT) const override; + + MachineBasicBlock * + EmitInstrWithCustomInserter(MachineInstr &MI, + MachineBasicBlock *MBB) const override; + + ConstraintType getConstraintType(StringRef Constraint) const override; + + ConstraintWeight + getSingleConstraintMatchWeight(AsmOperandInfo &info, + const char *constraint) const override; + + std::pair<unsigned, const TargetRegisterClass *> + getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, + StringRef Constraint, MVT VT) const override; + + unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override; + + void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, + std::vector<SDValue> &Ops, + SelectionDAG &DAG) const override; + + unsigned getRegisterByName(const char* RegName, EVT VT, + SelectionDAG &DAG) const override; + + bool shouldSplitFunctionArgumentsAsLittleEndian(const DataLayout &DL) + const override { + return false; + } + +private: + SDValue getAVRCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue &AVRcc, + SelectionDAG &DAG, SDLoc dl) const; + SDValue LowerShifts(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; + + CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC) const; + + bool CanLowerReturn(CallingConv::ID CallConv, + MachineFunction &MF, bool isVarArg, + const SmallVectorImpl<ISD::OutputArg> &Outs, + LLVMContext &Context) const override; + + SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<SDValue> &OutVals, const SDLoc &dl, + SelectionDAG &DAG) const override; + SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, + bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, + const SDLoc &dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) const override; + SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, + SmallVectorImpl<SDValue> &InVals) const override; + SDValue LowerCallResult(SDValue Chain, SDValue InFlag, + CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, + const SDLoc &dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) const; + +protected: + + const AVRSubtarget &Subtarget; + +private: + MachineBasicBlock *insertShift(MachineInstr &MI, MachineBasicBlock *BB) const; + MachineBasicBlock *insertMul(MachineInstr &MI, MachineBasicBlock *BB) const; +}; + +} // end namespace llvm + +#endif // LLVM_AVR_ISEL_LOWERING_H diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRInstrFormats.td b/contrib/llvm-project/llvm/lib/Target/AVR/AVRInstrFormats.td new file mode 100644 index 000000000000..347e683cd47f --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRInstrFormats.td @@ -0,0 +1,578 @@ +//===-- AVRInstrInfo.td - AVR Instruction Formats ----------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// AVR Instruction Format Definitions. +// +//===----------------------------------------------------------------------===// + +// A generic AVR instruction. +class AVRInst<dag outs, dag ins, string asmstr, list<dag> pattern> : Instruction +{ + let Namespace = "AVR"; + + dag OutOperandList = outs; + dag InOperandList = ins; + let AsmString = asmstr; + let Pattern = pattern; + + field bits<32> SoftFail = 0; +} + +/// A 16-bit AVR instruction. +class AVRInst16<dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst<outs, ins, asmstr, pattern> +{ + field bits<16> Inst; + + let Size = 2; +} + +/// a 32-bit AVR instruction. +class AVRInst32<dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst<outs, ins, asmstr, pattern> +{ + field bits<32> Inst; + + let Size = 4; +} + +// A class for pseudo instructions. +// Psuedo instructions are not real AVR instructions. The DAG stores +// psuedo instructions which are replaced by real AVR instructions by +// AVRExpandPseudoInsts.cpp. +// +// For example, the ADDW (add wide, as in add 16 bit values) instruction +// is defined as a pseudo instruction. In AVRExpandPseudoInsts.cpp, +// the instruction is then replaced by two add instructions - one for each byte. +class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst16<outs, ins, asmstr, pattern> +{ + let Pattern = pattern; + + let isPseudo = 1; + let isCodeGenOnly = 1; +} + +//===----------------------------------------------------------------------===// +// Register / register instruction: <|opcode|ffrd|dddd|rrrr|> +// opcode = 4 bits. +// f = secondary opcode = 2 bits +// d = destination = 5 bits +// r = source = 5 bits +// (Accepts all registers) +//===----------------------------------------------------------------------===// +class FRdRr<bits<4> opcode, bits<2> f, dag outs, dag ins, string asmstr, + list<dag> pattern> : AVRInst16<outs, ins, asmstr, pattern> +{ + bits<5> rd; + bits<5> rr; + + let Inst{15-12} = opcode; + let Inst{11-10} = f; + let Inst{9} = rr{4}; + let Inst{8-4} = rd; + let Inst{3-0} = rr{3-0}; +} + +class FTST<bits<4> opcode, bits<2> f, dag outs, dag ins, string asmstr, + list<dag> pattern> : AVRInst16<outs, ins, asmstr, pattern> +{ + bits<5> rd; + + let Inst{15-12} = opcode; + let Inst{11-10} = f; + let Inst{9} = rd{4}; + let Inst{8-4} = rd; + let Inst{3-0} = rd{3-0}; +} + +//===----------------------------------------------------------------------===// +// Instruction of the format `<mnemonic> Z, Rd` +// <|1001|001r|rrrr|0ttt> +//===----------------------------------------------------------------------===// +class FZRd<bits<3> t, dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst16<outs, ins, asmstr, pattern> +{ + bits<5> rd; + + let Inst{15-12} = 0b1001; + + let Inst{11-9} = 0b001; + let Inst{8} = rd{4}; + + let Inst{7-4} = rd{3-0}; + + let Inst{3} = 0; + let Inst{2-0} = t; +} + +//===----------------------------------------------------------------------===// +// Register / immediate8 instruction: <|opcode|KKKK|dddd|KKKK|> +// opcode = 4 bits. +// K = constant data = 8 bits +// d = destination = 4 bits +// (Only accepts r16-r31) +//===----------------------------------------------------------------------===// +class FRdK<bits<4> opcode, dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst16<outs, ins, asmstr, pattern> +{ + bits<4> rd; + bits<8> k; + + let Inst{15-12} = opcode; + let Inst{11-8} = k{7-4}; + let Inst{7-4} = rd{3-0}; + let Inst{3-0} = k{3-0}; + + let isAsCheapAsAMove = 1; +} + +//===----------------------------------------------------------------------===// +// Register instruction: <|opcode|fffd|dddd|ffff|> +// opcode = 4 bits. +// f = secondary opcode = 7 bits +// d = destination = 5 bits +// (Accepts all registers) +//===----------------------------------------------------------------------===// +class FRd<bits<4> opcode, bits<7> f, dag outs, dag ins, string asmstr, + list<dag> pattern> : AVRInst16<outs, ins, asmstr, pattern> +{ + bits<5> d; + + let Inst{15-12} = opcode; + let Inst{11-9} = f{6-4}; + let Inst{8-4} = d; + let Inst{3-0} = f{3-0}; +} + +//===----------------------------------------------------------------------===// +// [STD/LDD] P+q, Rr special encoding: <|10q0|qqtr|rrrr|pqqq> +// t = type (1 for STD, 0 for LDD) +// q = displacement (6 bits) +// r = register (5 bits) +// p = pointer register (1 bit) [1 for Y, 0 for Z] +//===----------------------------------------------------------------------===// +class FSTDLDD<bit type, dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst16<outs, ins, asmstr, pattern> +{ + bits<7> memri; + bits<5> reg; // the GP register + + let Inst{15-14} = 0b10; + let Inst{13} = memri{5}; + let Inst{12} = 0; + + let Inst{11-10} = memri{4-3}; + let Inst{9} = type; + let Inst{8} = reg{4}; + + let Inst{7-4} = reg{3-0}; + + let Inst{3} = memri{6}; + let Inst{2-0} = memri{2-0}; +} + +//===---------------------------------------------------------------------===// +// An ST/LD instruction. +// <|100i|00tr|rrrr|ppaa|> +// t = type (1 for store, 0 for load) +// a = regular/postinc/predec (reg = 0b00, postinc = 0b01, predec = 0b10) +// p = pointer register +// r = src/dst register +// +// Note that the bit labelled 'i' above does not follow a simple pattern, +// so there exists a post encoder method to set it manually. +//===---------------------------------------------------------------------===// +class FSTLD<bit type, bits<2> mode, dag outs, dag ins, + string asmstr, list<dag> pattern> + : AVRInst16<outs, ins, asmstr, pattern> +{ + bits<2> ptrreg; + bits<5> reg; + + let Inst{15-13} = 0b100; + // This bit varies depending on the arguments and the mode. + // We have a post encoder method to set this bit manually. + let Inst{12} = 0; + + let Inst{11-10} = 0b00; + let Inst{9} = type; + let Inst{8} = reg{4}; + + let Inst{7-4} = reg{3-0}; + + let Inst{3-2} = ptrreg{1-0}; + let Inst{1-0} = mode{1-0}; + + let PostEncoderMethod = "loadStorePostEncoder"; +} + +//===---------------------------------------------------------------------===// +// Special format for the LPM/ELPM instructions +// [E]LPM Rd, Z[+] +// <|1001|000d|dddd|01ep> +// d = destination register +// e = is elpm +// p = is postincrement +//===---------------------------------------------------------------------===// +class FLPMX<bit e, bit p, dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst16<outs, ins, asmstr, pattern> +{ + bits<5> reg; + + let Inst{15-12} = 0b1001; + + let Inst{11-9} = 0b000; + let Inst{8} = reg{4}; + + let Inst{7-4} = reg{3-0}; + + let Inst{3-2} = 0b01; + let Inst{1} = e; + let Inst{0} = p; +} + +//===----------------------------------------------------------------------===// +// MOVWRdRr special encoding: <|0000|0001|dddd|rrrr|> +// d = destination = 4 bits +// r = source = 4 bits +// (Only accepts even registers) +//===----------------------------------------------------------------------===// +class FMOVWRdRr<dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst16<outs, ins, asmstr, pattern> +{ + bits<5> d; + bits<5> r; + + let Inst{15-8} = 0b00000001; + let Inst{7-4} = d{4-1}; + let Inst{3-0} = r{4-1}; +} + +//===----------------------------------------------------------------------===// +// MULSrr special encoding: <|0000|0010|dddd|rrrr|> +// d = multiplicand = 4 bits +// r = multiplier = 4 bits +// (Only accepts r16-r31) +//===----------------------------------------------------------------------===// +class FMUL2RdRr<bit f, dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst16<outs, ins, asmstr, pattern> +{ + bits<5> rd; // accept 5 bits but only encode the lower 4 + bits<5> rr; // accept 5 bits but only encode the lower 4 + + let Inst{15-9} = 0b0000001; + let Inst{8} = f; + let Inst{7-4} = rd{3-0}; + let Inst{3-0} = rr{3-0}; +} + +// Special encoding for the FMUL family of instructions. +// +// <0000|0011|fddd|frrr|> +// +// ff = 0b01 for FMUL +// 0b10 for FMULS +// 0b11 for FMULSU +// +// ddd = destination register +// rrr = source register +class FFMULRdRr<bits<2> f, dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst16<outs, ins, asmstr, pattern> +{ + bits<3> rd; + bits<3> rr; + + let Inst{15-8} = 0b00000011; + let Inst{7} = f{1}; + let Inst{6-4} = rd; + let Inst{3} = f{0}; + let Inst{2-0} = rr; +} + + +//===----------------------------------------------------------------------===// +// Arithmetic word instructions (ADIW / SBIW): <|1001|011f|kkdd|kkkk|> +// f = secondary opcode = 1 bit +// k = constant data = 6 bits +// d = destination = 4 bits +// (Only accepts r25:24 r27:26 r29:28 r31:30) +//===----------------------------------------------------------------------===// +class FWRdK<bit f, dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst16<outs, ins, asmstr, pattern> +{ + bits<5> dst; // accept 5 bits but only encode bits 1 and 2 + bits<6> k; + + let Inst{15-9} = 0b1001011; + let Inst{8} = f; + let Inst{7-6} = k{5-4}; + let Inst{5-4} = dst{2-1}; + let Inst{3-0} = k{3-0}; +} + +//===----------------------------------------------------------------------===// +// In I/O instruction: <|1011|0AAd|dddd|AAAA|> +// A = I/O location address = 6 bits +// d = destination = 5 bits +// (Accepts all registers) +//===----------------------------------------------------------------------===// +class FIORdA<dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst16<outs, ins, asmstr, pattern> +{ + bits<5> d; + bits<6> A; + + let Inst{15-11} = 0b10110; + let Inst{10-9} = A{5-4}; + let Inst{8-4} = d; + let Inst{3-0} = A{3-0}; +} + +//===----------------------------------------------------------------------===// +// Out I/O instruction: <|1011|1AAr|rrrr|AAAA|> +// A = I/O location address = 6 bits +// d = destination = 5 bits +// (Accepts all registers) +//===----------------------------------------------------------------------===// +class FIOARr<dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst16<outs, ins, asmstr, pattern> +{ + bits<6> A; + bits<5> r; + + let Inst{15-11} = 0b10111; + let Inst{10-9} = A{5-4}; + let Inst{8-4} = r; + let Inst{3-0} = A{3-0}; +} + +//===----------------------------------------------------------------------===// +// I/O bit instruction. +// <|1001|10tt|AAAA|Abbb> +// t = type (1 for SBI, 0 for CBI) +// A = I/O location address (5 bits) +// b = bit number +//===----------------------------------------------------------------------===// +class FIOBIT<bits<2> t, dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst16<outs, ins, asmstr, pattern> +{ + bits<5> A; + bits<3> b; + + let Inst{15-12} = 0b1001; + + let Inst{11-10} = 0b10; + let Inst{9-8} = t; + + let Inst{7-4} = A{4-1}; + + let Inst{3} = A{0}; + let Inst{2-0} = b{2-0}; +} + +//===----------------------------------------------------------------------===// +// BST/BLD instruction. +// <|1111|1ttd|dddd|0bbb> +// t = type (1 for BST, 0 for BLD) +// d = destination register +// b = bit +//===----------------------------------------------------------------------===// +class FRdB<bits<2> t, dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst16<outs, ins, asmstr, pattern> +{ + bits<5> rd; + bits<3> b; + + let Inst{15-12} = 0b1111; + + let Inst{11} = 0b1; + let Inst{10-9} = t; + let Inst{8} = rd{4}; + + let Inst{7-4} = rd{3-0}; + + let Inst{3} = 0; + let Inst{2-0} = b; +} + +// Special encoding for the `DES K` instruction. +// +// <|1001|0100|KKKK|1011> +// +// KKKK = 4 bit immediate +class FDES<dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst16<outs, ins, asmstr, pattern> +{ + bits<4> k; + + let Inst{15-12} = 0b1001; + + let Inst{11-8} = 0b0100; + + let Inst{7-4} = k; + + let Inst{3-0} = 0b1011; +} + +//===----------------------------------------------------------------------===// +// Conditional Branching instructions: <|1111|0fkk|kkkk|ksss|> +// f = secondary opcode = 1 bit +// k = constant address = 7 bits +// s = bit in status register = 3 bits +//===----------------------------------------------------------------------===// +class FBRsk<bit f, bits<3> s, dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst16<outs, ins, asmstr, pattern> +{ + bits<7> k; + + let Inst{15-11} = 0b11110; + let Inst{10} = f; + let Inst{9-3} = k; + let Inst{2-0} = s; +} + +//===----------------------------------------------------------------------===// +// Special, opcode only instructions: <|opcode|> +//===----------------------------------------------------------------------===// + +class F16<bits<16> opcode, dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst16<outs, ins, asmstr, pattern> +{ + let Inst = opcode; +} + +class F32<bits<32> opcode, dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst32<outs, ins, asmstr, pattern> +{ + let Inst = opcode; +} + +//===----------------------------------------------------------------------===// +// Branching instructions with immediate12: <|110f|kkkk|kkkk|kkkk|> +// f = secondary opcode = 1 bit +// k = constant address = 12 bits +//===----------------------------------------------------------------------===// +class FBRk<bit f, dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst16<outs, ins, asmstr, pattern> +{ + bits<12> k; + + let Inst{15-13} = 0b110; + let Inst{12} = f; + let Inst{11-0} = k; +} + +//===----------------------------------------------------------------------===// +// 32 bits branching instructions: <|1001|010k|kkkk|fffk|kkkk|kkkk|kkkk|kkkk|> +// f = secondary opcode = 3 bits +// k = constant address = 22 bits +//===----------------------------------------------------------------------===// +class F32BRk<bits<3> f, dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst32<outs, ins, asmstr, pattern> +{ + bits<22> k; + + let Inst{31-25} = 0b1001010; + let Inst{24-20} = k{21-17}; + let Inst{19-17} = f; + let Inst{16-0} = k{16-0}; +} + +//===----------------------------------------------------------------------===// +// 32 bits direct mem instructions: <|1001|00fd|dddd|0000|kkkk|kkkk|kkkk|kkkk|> +// f = secondary opcode = 1 bit +// d = destination = 5 bits +// k = constant address = 16 bits +// (Accepts all registers) +//===----------------------------------------------------------------------===// +class F32DM<bit f, dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst32<outs, ins, asmstr, pattern> +{ + bits<5> rd; + bits<16> k; + + let Inst{31-28} = 0b1001; + + let Inst{27-26} = 0b00; + let Inst{25} = f; + let Inst{24} = rd{4}; + + let Inst{23-20} = rd{3-0}; + + let Inst{19-16} = 0b0000; + + let Inst{15-0} = k; +} + +// <|1001|0100|bfff|1000> +class FS<bit b, dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst16<outs, ins, asmstr, pattern> +{ + bits<3> s; + + let Inst{15-12} = 0b1001; + + let Inst{11-8} = 0b0100; + + let Inst{7} = b; + let Inst{6-4} = s; + + let Inst{3-0} = 0b1000; +} + +// Set/clr bit in status flag instructions/ +// <BRBS|BRBC> s, k +// --------------------- +// <|1111|0fkk|kkkk|ksss> +class FSK<bit f, dag outs, dag ins, string asmstr, list<dag> pattern> + : AVRInst16<outs, ins, asmstr, pattern> +{ + bits<7> k; + bits<3> s; + + let Inst{15-12} = 0b1111; + + let Inst{11} = 0; + let Inst{10} = f; + let Inst{9-8} = k{6-5}; + + let Inst{7-4} = k{4-1}; + + let Inst{3} = k{0}; + let Inst{2-0} = s; +} + +class ExtensionPseudo<dag outs, dag ins, string asmstr, list<dag> pattern> + : Pseudo<outs, ins, asmstr, pattern> +{ + let Defs = [SREG]; +} + +class StorePseudo<dag outs, dag ins, string asmstr, list<dag> pattern> + : Pseudo<outs, ins, asmstr, pattern> +{ + let Defs = [SP]; +} + +class SelectPseudo<dag outs, dag ins, string asmstr, list<dag> pattern> + : Pseudo<outs, ins, asmstr, pattern> +{ + let usesCustomInserter = 1; + + let Uses = [SREG]; +} + +class ShiftPseudo<dag outs, dag ins, string asmstr, list<dag> pattern> + : Pseudo<outs, ins, asmstr, pattern> +{ + let usesCustomInserter = 1; + + let Defs = [SREG]; +} + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRInstrInfo.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/AVRInstrInfo.cpp new file mode 100644 index 000000000000..ba7a95e92c5c --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRInstrInfo.cpp @@ -0,0 +1,574 @@ +//===-- AVRInstrInfo.cpp - AVR Instruction Information --------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains the AVR implementation of the TargetInstrInfo class. +// +//===----------------------------------------------------------------------===// + +#include "AVRInstrInfo.h" + +#include "llvm/ADT/STLExtras.h" +#include "llvm/CodeGen/MachineConstantPool.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineMemOperand.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/Function.h" +#include "llvm/MC/MCContext.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/TargetRegistry.h" + +#include "AVR.h" +#include "AVRMachineFunctionInfo.h" +#include "AVRRegisterInfo.h" +#include "AVRTargetMachine.h" +#include "MCTargetDesc/AVRMCTargetDesc.h" + +#define GET_INSTRINFO_CTOR_DTOR +#include "AVRGenInstrInfo.inc" + +namespace llvm { + +AVRInstrInfo::AVRInstrInfo() + : AVRGenInstrInfo(AVR::ADJCALLSTACKDOWN, AVR::ADJCALLSTACKUP), RI() {} + +void AVRInstrInfo::copyPhysReg(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const DebugLoc &DL, unsigned DestReg, + unsigned SrcReg, bool KillSrc) const { + const AVRSubtarget &STI = MBB.getParent()->getSubtarget<AVRSubtarget>(); + const AVRRegisterInfo &TRI = *STI.getRegisterInfo(); + unsigned Opc; + + // Not all AVR devices support the 16-bit `MOVW` instruction. + if (AVR::DREGSRegClass.contains(DestReg, SrcReg)) { + if (STI.hasMOVW()) { + BuildMI(MBB, MI, DL, get(AVR::MOVWRdRr), DestReg) + .addReg(SrcReg, getKillRegState(KillSrc)); + } else { + unsigned DestLo, DestHi, SrcLo, SrcHi; + + TRI.splitReg(DestReg, DestLo, DestHi); + TRI.splitReg(SrcReg, SrcLo, SrcHi); + + // Copy each individual register with the `MOV` instruction. + BuildMI(MBB, MI, DL, get(AVR::MOVRdRr), DestLo) + .addReg(SrcLo, getKillRegState(KillSrc)); + BuildMI(MBB, MI, DL, get(AVR::MOVRdRr), DestHi) + .addReg(SrcHi, getKillRegState(KillSrc)); + } + } else { + if (AVR::GPR8RegClass.contains(DestReg, SrcReg)) { + Opc = AVR::MOVRdRr; + } else if (SrcReg == AVR::SP && AVR::DREGSRegClass.contains(DestReg)) { + Opc = AVR::SPREAD; + } else if (DestReg == AVR::SP && AVR::DREGSRegClass.contains(SrcReg)) { + Opc = AVR::SPWRITE; + } else { + llvm_unreachable("Impossible reg-to-reg copy"); + } + + BuildMI(MBB, MI, DL, get(Opc), DestReg) + .addReg(SrcReg, getKillRegState(KillSrc)); + } +} + +unsigned AVRInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, + int &FrameIndex) const { + switch (MI.getOpcode()) { + case AVR::LDDRdPtrQ: + case AVR::LDDWRdYQ: { //:FIXME: remove this once PR13375 gets fixed + if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() && + MI.getOperand(2).getImm() == 0) { + FrameIndex = MI.getOperand(1).getIndex(); + return MI.getOperand(0).getReg(); + } + break; + } + default: + break; + } + + return 0; +} + +unsigned AVRInstrInfo::isStoreToStackSlot(const MachineInstr &MI, + int &FrameIndex) const { + switch (MI.getOpcode()) { + case AVR::STDPtrQRr: + case AVR::STDWPtrQRr: { + if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() && + MI.getOperand(1).getImm() == 0) { + FrameIndex = MI.getOperand(0).getIndex(); + return MI.getOperand(2).getReg(); + } + break; + } + default: + break; + } + + return 0; +} + +void AVRInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned SrcReg, bool isKill, + int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const { + MachineFunction &MF = *MBB.getParent(); + AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>(); + + AFI->setHasSpills(true); + + DebugLoc DL; + if (MI != MBB.end()) { + DL = MI->getDebugLoc(); + } + + const MachineFrameInfo &MFI = MF.getFrameInfo(); + + MachineMemOperand *MMO = MF.getMachineMemOperand( + MachinePointerInfo::getFixedStack(MF, FrameIndex), + MachineMemOperand::MOStore, MFI.getObjectSize(FrameIndex), + MFI.getObjectAlignment(FrameIndex)); + + unsigned Opcode = 0; + if (TRI->isTypeLegalForClass(*RC, MVT::i8)) { + Opcode = AVR::STDPtrQRr; + } else if (TRI->isTypeLegalForClass(*RC, MVT::i16)) { + Opcode = AVR::STDWPtrQRr; + } else { + llvm_unreachable("Cannot store this register into a stack slot!"); + } + + BuildMI(MBB, MI, DL, get(Opcode)) + .addFrameIndex(FrameIndex) + .addImm(0) + .addReg(SrcReg, getKillRegState(isKill)) + .addMemOperand(MMO); +} + +void AVRInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned DestReg, int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const { + DebugLoc DL; + if (MI != MBB.end()) { + DL = MI->getDebugLoc(); + } + + MachineFunction &MF = *MBB.getParent(); + const MachineFrameInfo &MFI = MF.getFrameInfo(); + + MachineMemOperand *MMO = MF.getMachineMemOperand( + MachinePointerInfo::getFixedStack(MF, FrameIndex), + MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIndex), + MFI.getObjectAlignment(FrameIndex)); + + unsigned Opcode = 0; + if (TRI->isTypeLegalForClass(*RC, MVT::i8)) { + Opcode = AVR::LDDRdPtrQ; + } else if (TRI->isTypeLegalForClass(*RC, MVT::i16)) { + // Opcode = AVR::LDDWRdPtrQ; + //:FIXME: remove this once PR13375 gets fixed + Opcode = AVR::LDDWRdYQ; + } else { + llvm_unreachable("Cannot load this register from a stack slot!"); + } + + BuildMI(MBB, MI, DL, get(Opcode), DestReg) + .addFrameIndex(FrameIndex) + .addImm(0) + .addMemOperand(MMO); +} + +const MCInstrDesc &AVRInstrInfo::getBrCond(AVRCC::CondCodes CC) const { + switch (CC) { + default: + llvm_unreachable("Unknown condition code!"); + case AVRCC::COND_EQ: + return get(AVR::BREQk); + case AVRCC::COND_NE: + return get(AVR::BRNEk); + case AVRCC::COND_GE: + return get(AVR::BRGEk); + case AVRCC::COND_LT: + return get(AVR::BRLTk); + case AVRCC::COND_SH: + return get(AVR::BRSHk); + case AVRCC::COND_LO: + return get(AVR::BRLOk); + case AVRCC::COND_MI: + return get(AVR::BRMIk); + case AVRCC::COND_PL: + return get(AVR::BRPLk); + } +} + +AVRCC::CondCodes AVRInstrInfo::getCondFromBranchOpc(unsigned Opc) const { + switch (Opc) { + default: + return AVRCC::COND_INVALID; + case AVR::BREQk: + return AVRCC::COND_EQ; + case AVR::BRNEk: + return AVRCC::COND_NE; + case AVR::BRSHk: + return AVRCC::COND_SH; + case AVR::BRLOk: + return AVRCC::COND_LO; + case AVR::BRMIk: + return AVRCC::COND_MI; + case AVR::BRPLk: + return AVRCC::COND_PL; + case AVR::BRGEk: + return AVRCC::COND_GE; + case AVR::BRLTk: + return AVRCC::COND_LT; + } +} + +AVRCC::CondCodes AVRInstrInfo::getOppositeCondition(AVRCC::CondCodes CC) const { + switch (CC) { + default: + llvm_unreachable("Invalid condition!"); + case AVRCC::COND_EQ: + return AVRCC::COND_NE; + case AVRCC::COND_NE: + return AVRCC::COND_EQ; + case AVRCC::COND_SH: + return AVRCC::COND_LO; + case AVRCC::COND_LO: + return AVRCC::COND_SH; + case AVRCC::COND_GE: + return AVRCC::COND_LT; + case AVRCC::COND_LT: + return AVRCC::COND_GE; + case AVRCC::COND_MI: + return AVRCC::COND_PL; + case AVRCC::COND_PL: + return AVRCC::COND_MI; + } +} + +bool AVRInstrInfo::analyzeBranch(MachineBasicBlock &MBB, + MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl<MachineOperand> &Cond, + bool AllowModify) const { + // Start from the bottom of the block and work up, examining the + // terminator instructions. + MachineBasicBlock::iterator I = MBB.end(); + MachineBasicBlock::iterator UnCondBrIter = MBB.end(); + + while (I != MBB.begin()) { + --I; + if (I->isDebugInstr()) { + continue; + } + + // Working from the bottom, when we see a non-terminator + // instruction, we're done. + if (!isUnpredicatedTerminator(*I)) { + break; + } + + // A terminator that isn't a branch can't easily be handled + // by this analysis. + if (!I->getDesc().isBranch()) { + return true; + } + + // Handle unconditional branches. + //:TODO: add here jmp + if (I->getOpcode() == AVR::RJMPk) { + UnCondBrIter = I; + + if (!AllowModify) { + TBB = I->getOperand(0).getMBB(); + continue; + } + + // If the block has any instructions after a JMP, delete them. + while (std::next(I) != MBB.end()) { + std::next(I)->eraseFromParent(); + } + + Cond.clear(); + FBB = 0; + + // Delete the JMP if it's equivalent to a fall-through. + if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) { + TBB = 0; + I->eraseFromParent(); + I = MBB.end(); + UnCondBrIter = MBB.end(); + continue; + } + + // TBB is used to indicate the unconditinal destination. + TBB = I->getOperand(0).getMBB(); + continue; + } + + // Handle conditional branches. + AVRCC::CondCodes BranchCode = getCondFromBranchOpc(I->getOpcode()); + if (BranchCode == AVRCC::COND_INVALID) { + return true; // Can't handle indirect branch. + } + + // Working from the bottom, handle the first conditional branch. + if (Cond.empty()) { + MachineBasicBlock *TargetBB = I->getOperand(0).getMBB(); + if (AllowModify && UnCondBrIter != MBB.end() && + MBB.isLayoutSuccessor(TargetBB)) { + // If we can modify the code and it ends in something like: + // + // jCC L1 + // jmp L2 + // L1: + // ... + // L2: + // + // Then we can change this to: + // + // jnCC L2 + // L1: + // ... + // L2: + // + // Which is a bit more efficient. + // We conditionally jump to the fall-through block. + BranchCode = getOppositeCondition(BranchCode); + unsigned JNCC = getBrCond(BranchCode).getOpcode(); + MachineBasicBlock::iterator OldInst = I; + + BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(JNCC)) + .addMBB(UnCondBrIter->getOperand(0).getMBB()); + BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(AVR::RJMPk)) + .addMBB(TargetBB); + + OldInst->eraseFromParent(); + UnCondBrIter->eraseFromParent(); + + // Restart the analysis. + UnCondBrIter = MBB.end(); + I = MBB.end(); + continue; + } + + FBB = TBB; + TBB = I->getOperand(0).getMBB(); + Cond.push_back(MachineOperand::CreateImm(BranchCode)); + continue; + } + + // Handle subsequent conditional branches. Only handle the case where all + // conditional branches branch to the same destination. + assert(Cond.size() == 1); + assert(TBB); + + // Only handle the case where all conditional branches branch to + // the same destination. + if (TBB != I->getOperand(0).getMBB()) { + return true; + } + + AVRCC::CondCodes OldBranchCode = (AVRCC::CondCodes)Cond[0].getImm(); + // If the conditions are the same, we can leave them alone. + if (OldBranchCode == BranchCode) { + continue; + } + + return true; + } + + return false; +} + +unsigned AVRInstrInfo::insertBranch(MachineBasicBlock &MBB, + MachineBasicBlock *TBB, + MachineBasicBlock *FBB, + ArrayRef<MachineOperand> Cond, + const DebugLoc &DL, + int *BytesAdded) const { + if (BytesAdded) *BytesAdded = 0; + + // Shouldn't be a fall through. + assert(TBB && "insertBranch must not be told to insert a fallthrough"); + assert((Cond.size() == 1 || Cond.size() == 0) && + "AVR branch conditions have one component!"); + + if (Cond.empty()) { + assert(!FBB && "Unconditional branch with multiple successors!"); + auto &MI = *BuildMI(&MBB, DL, get(AVR::RJMPk)).addMBB(TBB); + if (BytesAdded) + *BytesAdded += getInstSizeInBytes(MI); + return 1; + } + + // Conditional branch. + unsigned Count = 0; + AVRCC::CondCodes CC = (AVRCC::CondCodes)Cond[0].getImm(); + auto &CondMI = *BuildMI(&MBB, DL, getBrCond(CC)).addMBB(TBB); + + if (BytesAdded) *BytesAdded += getInstSizeInBytes(CondMI); + ++Count; + + if (FBB) { + // Two-way Conditional branch. Insert the second branch. + auto &MI = *BuildMI(&MBB, DL, get(AVR::RJMPk)).addMBB(FBB); + if (BytesAdded) *BytesAdded += getInstSizeInBytes(MI); + ++Count; + } + + return Count; +} + +unsigned AVRInstrInfo::removeBranch(MachineBasicBlock &MBB, + int *BytesRemoved) const { + if (BytesRemoved) *BytesRemoved = 0; + + MachineBasicBlock::iterator I = MBB.end(); + unsigned Count = 0; + + while (I != MBB.begin()) { + --I; + if (I->isDebugInstr()) { + continue; + } + //:TODO: add here the missing jmp instructions once they are implemented + // like jmp, {e}ijmp, and other cond branches, ... + if (I->getOpcode() != AVR::RJMPk && + getCondFromBranchOpc(I->getOpcode()) == AVRCC::COND_INVALID) { + break; + } + + // Remove the branch. + if (BytesRemoved) *BytesRemoved += getInstSizeInBytes(*I); + I->eraseFromParent(); + I = MBB.end(); + ++Count; + } + + return Count; +} + +bool AVRInstrInfo::reverseBranchCondition( + SmallVectorImpl<MachineOperand> &Cond) const { + assert(Cond.size() == 1 && "Invalid AVR branch condition!"); + + AVRCC::CondCodes CC = static_cast<AVRCC::CondCodes>(Cond[0].getImm()); + Cond[0].setImm(getOppositeCondition(CC)); + + return false; +} + +unsigned AVRInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { + unsigned Opcode = MI.getOpcode(); + + switch (Opcode) { + // A regular instruction + default: { + const MCInstrDesc &Desc = get(Opcode); + return Desc.getSize(); + } + case TargetOpcode::EH_LABEL: + case TargetOpcode::IMPLICIT_DEF: + case TargetOpcode::KILL: + case TargetOpcode::DBG_VALUE: + return 0; + case TargetOpcode::INLINEASM: + case TargetOpcode::INLINEASM_BR: { + const MachineFunction &MF = *MI.getParent()->getParent(); + const AVRTargetMachine &TM = static_cast<const AVRTargetMachine&>(MF.getTarget()); + const AVRSubtarget &STI = MF.getSubtarget<AVRSubtarget>(); + const TargetInstrInfo &TII = *STI.getInstrInfo(); + + return TII.getInlineAsmLength(MI.getOperand(0).getSymbolName(), + *TM.getMCAsmInfo()); + } + } +} + +MachineBasicBlock * +AVRInstrInfo::getBranchDestBlock(const MachineInstr &MI) const { + switch (MI.getOpcode()) { + default: + llvm_unreachable("unexpected opcode!"); + case AVR::JMPk: + case AVR::CALLk: + case AVR::RCALLk: + case AVR::RJMPk: + case AVR::BREQk: + case AVR::BRNEk: + case AVR::BRSHk: + case AVR::BRLOk: + case AVR::BRMIk: + case AVR::BRPLk: + case AVR::BRGEk: + case AVR::BRLTk: + return MI.getOperand(0).getMBB(); + case AVR::BRBSsk: + case AVR::BRBCsk: + return MI.getOperand(1).getMBB(); + case AVR::SBRCRrB: + case AVR::SBRSRrB: + case AVR::SBICAb: + case AVR::SBISAb: + llvm_unreachable("unimplemented branch instructions"); + } +} + +bool AVRInstrInfo::isBranchOffsetInRange(unsigned BranchOp, + int64_t BrOffset) const { + + switch (BranchOp) { + default: + llvm_unreachable("unexpected opcode!"); + case AVR::JMPk: + case AVR::CALLk: + return true; + case AVR::RCALLk: + case AVR::RJMPk: + return isIntN(13, BrOffset); + case AVR::BRBSsk: + case AVR::BRBCsk: + case AVR::BREQk: + case AVR::BRNEk: + case AVR::BRSHk: + case AVR::BRLOk: + case AVR::BRMIk: + case AVR::BRPLk: + case AVR::BRGEk: + case AVR::BRLTk: + return isIntN(7, BrOffset); + } +} + +unsigned AVRInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB, + MachineBasicBlock &NewDestBB, + const DebugLoc &DL, + int64_t BrOffset, + RegScavenger *RS) const { + // This method inserts a *direct* branch (JMP), despite its name. + // LLVM calls this method to fixup unconditional branches; it never calls + // insertBranch or some hypothetical "insertDirectBranch". + // See lib/CodeGen/RegisterRelaxation.cpp for details. + // We end up here when a jump is too long for a RJMP instruction. + auto &MI = *BuildMI(&MBB, DL, get(AVR::JMPk)).addMBB(&NewDestBB); + + return getInstSizeInBytes(MI); +} + +} // end of namespace llvm + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRInstrInfo.h b/contrib/llvm-project/llvm/lib/Target/AVR/AVRInstrInfo.h new file mode 100644 index 000000000000..ba74af325474 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRInstrInfo.h @@ -0,0 +1,121 @@ +//===-- AVRInstrInfo.h - AVR Instruction Information ------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains the AVR implementation of the TargetInstrInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AVR_INSTR_INFO_H +#define LLVM_AVR_INSTR_INFO_H + +#include "llvm/CodeGen/TargetInstrInfo.h" + +#include "AVRRegisterInfo.h" + +#define GET_INSTRINFO_HEADER +#include "AVRGenInstrInfo.inc" +#undef GET_INSTRINFO_HEADER + +namespace llvm { + +namespace AVRCC { + +/// AVR specific condition codes. +/// These correspond to `AVR_*_COND` in `AVRInstrInfo.td`. +/// They must be kept in synch. +enum CondCodes { + COND_EQ, //!< Equal + COND_NE, //!< Not equal + COND_GE, //!< Greater than or equal + COND_LT, //!< Less than + COND_SH, //!< Unsigned same or higher + COND_LO, //!< Unsigned lower + COND_MI, //!< Minus + COND_PL, //!< Plus + COND_INVALID +}; + +} // end of namespace AVRCC + +namespace AVRII { + +/// Specifies a target operand flag. +enum TOF { + MO_NO_FLAG, + + /// On a symbol operand, this represents the lo part. + MO_LO = (1 << 1), + + /// On a symbol operand, this represents the hi part. + MO_HI = (1 << 2), + + /// On a symbol operand, this represents it has to be negated. + MO_NEG = (1 << 3) +}; + +} // end of namespace AVRII + +/// Utilities related to the AVR instruction set. +class AVRInstrInfo : public AVRGenInstrInfo { +public: + explicit AVRInstrInfo(); + + const AVRRegisterInfo &getRegisterInfo() const { return RI; } + const MCInstrDesc &getBrCond(AVRCC::CondCodes CC) const; + AVRCC::CondCodes getCondFromBranchOpc(unsigned Opc) const; + AVRCC::CondCodes getOppositeCondition(AVRCC::CondCodes CC) const; + unsigned getInstSizeInBytes(const MachineInstr &MI) const override; + + void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, + const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, + bool KillSrc) const override; + void storeRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, unsigned SrcReg, + bool isKill, int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const override; + void loadRegFromStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, unsigned DestReg, + int FrameIndex, const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const override; + unsigned isLoadFromStackSlot(const MachineInstr &MI, + int &FrameIndex) const override; + unsigned isStoreToStackSlot(const MachineInstr &MI, + int &FrameIndex) const override; + + // Branch analysis. + bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl<MachineOperand> &Cond, + bool AllowModify = false) const override; + unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, + MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond, + const DebugLoc &DL, + int *BytesAdded = nullptr) const override; + unsigned removeBranch(MachineBasicBlock &MBB, + int *BytesRemoved = nullptr) const override; + bool + reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override; + + MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override; + + bool isBranchOffsetInRange(unsigned BranchOpc, + int64_t BrOffset) const override; + + unsigned insertIndirectBranch(MachineBasicBlock &MBB, + MachineBasicBlock &NewDestBB, + const DebugLoc &DL, + int64_t BrOffset, + RegScavenger *RS) const override; +private: + const AVRRegisterInfo RI; +}; + +} // end namespace llvm + +#endif // LLVM_AVR_INSTR_INFO_H diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRInstrInfo.td b/contrib/llvm-project/llvm/lib/Target/AVR/AVRInstrInfo.td new file mode 100644 index 000000000000..caca9b617609 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRInstrInfo.td @@ -0,0 +1,2103 @@ +//===-- AVRInstrInfo.td - AVR Instruction defs -------------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file describes the AVR instructions in TableGen format. +// +//===----------------------------------------------------------------------===// + +include "AVRInstrFormats.td" + +//===----------------------------------------------------------------------===// +// AVR Type Profiles +//===----------------------------------------------------------------------===// + +def SDT_AVRCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i16>, SDTCisVT<1, i16>]>; +def SDT_AVRCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i16>, SDTCisVT<1, i16>]>; +def SDT_AVRCall : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>; +def SDT_AVRWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>; +def SDT_AVRBrcond : SDTypeProfile<0, 2, + [SDTCisVT<0, OtherVT>, SDTCisVT<1, i8>]>; +def SDT_AVRCmp : SDTypeProfile<0, 2, [SDTCisSameAs<0, 1>]>; +def SDT_AVRTst : SDTypeProfile<0, 1, [SDTCisInt<0>]>; +def SDT_AVRSelectCC : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, + SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>; + +//===----------------------------------------------------------------------===// +// AVR Specific Node Definitions +//===----------------------------------------------------------------------===// + +def AVRretflag : SDNode<"AVRISD::RET_FLAG", SDTNone, + [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; +def AVRretiflag : SDNode<"AVRISD::RETI_FLAG", SDTNone, + [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; + +def AVRcallseq_start : SDNode<"ISD::CALLSEQ_START", SDT_AVRCallSeqStart, + [SDNPHasChain, SDNPOutGlue]>; +def AVRcallseq_end : SDNode<"ISD::CALLSEQ_END", SDT_AVRCallSeqEnd, + [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; + +def AVRcall : SDNode<"AVRISD::CALL", SDT_AVRCall, + [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, SDNPVariadic]>; + +def AVRWrapper : SDNode<"AVRISD::WRAPPER", SDT_AVRWrapper>; + +def AVRbrcond : SDNode<"AVRISD::BRCOND", SDT_AVRBrcond, + [SDNPHasChain, SDNPInGlue]>; +def AVRcmp : SDNode<"AVRISD::CMP", SDT_AVRCmp, [SDNPOutGlue]>; +def AVRcmpc : SDNode<"AVRISD::CMPC", SDT_AVRCmp, [SDNPInGlue, SDNPOutGlue]>; +def AVRtst : SDNode<"AVRISD::TST", SDT_AVRTst, [SDNPOutGlue]>; +def AVRselectcc: SDNode<"AVRISD::SELECT_CC", SDT_AVRSelectCC, [SDNPInGlue]>; + +// Shift nodes. +def AVRlsl : SDNode<"AVRISD::LSL", SDTIntUnaryOp>; +def AVRlsr : SDNode<"AVRISD::LSR", SDTIntUnaryOp>; +def AVRrol : SDNode<"AVRISD::ROL", SDTIntUnaryOp>; +def AVRror : SDNode<"AVRISD::ROR", SDTIntUnaryOp>; +def AVRasr : SDNode<"AVRISD::ASR", SDTIntUnaryOp>; + +// Pseudo shift nodes for non-constant shift amounts. +def AVRlslLoop : SDNode<"AVRISD::LSLLOOP", SDTIntShiftOp>; +def AVRlsrLoop : SDNode<"AVRISD::LSRLOOP", SDTIntShiftOp>; +def AVRrolLoop : SDNode<"AVRISD::ROLLOOP", SDTIntShiftOp>; +def AVRrorLoop : SDNode<"AVRISD::RORLOOP", SDTIntShiftOp>; +def AVRasrLoop : SDNode<"AVRISD::ASRLOOP", SDTIntShiftOp>; + +//===----------------------------------------------------------------------===// +// AVR Operands, Complex Patterns and Transformations Definitions. +//===----------------------------------------------------------------------===// + +def imm8_neg_XFORM : SDNodeXForm<imm, +[{ + return CurDAG->getTargetConstant(-N->getAPIntValue(), SDLoc(N), MVT::i8); +}]>; + +def imm16_neg_XFORM : SDNodeXForm<imm, +[{ + return CurDAG->getTargetConstant(-N->getAPIntValue(), SDLoc(N), MVT::i16); +}]>; + +def imm0_63_neg : PatLeaf<(imm), +[{ + int64_t val = -N->getSExtValue(); + return val >= 0 && val < 64; +}], imm16_neg_XFORM>; + +def uimm6 : PatLeaf<(imm), [{ return isUInt<6>(N->getZExtValue()); }]>; + +// imm_com8_XFORM - Return the complement of a imm_com8 value +def imm_com8_XFORM : SDNodeXForm<imm, [{ + return CurDAG->getTargetConstant(~((uint8_t)N->getZExtValue()), SDLoc(N), + MVT::i8); +}]>; + +// imm_com8 - Match an immediate that is a complement +// of a 8-bit immediate. +// Note: this pattern doesn't require an encoder method and such, as it's +// only used on aliases (Pat<> and InstAlias<>). The actual encoding +// is handled by the destination instructions, which use imm_com8. +def imm_com8_asmoperand : AsmOperandClass { let Name = "ImmCom8"; } +def imm_com8 : Operand<i8> { + let ParserMatchClass = imm_com8_asmoperand; +} + +def ioaddr_XFORM : SDNodeXForm<imm, +[{ + return CurDAG->getTargetConstant(uint8_t(N->getZExtValue()) - 0x20, SDLoc(N), MVT::i8); +}]>; + +def iobitpos8_XFORM : SDNodeXForm<imm, +[{ + return CurDAG->getTargetConstant(Log2_32(uint8_t(N->getZExtValue())), + SDLoc(N), MVT::i8); +}]>; + +def iobitposn8_XFORM : SDNodeXForm<imm, +[{ + return CurDAG->getTargetConstant(Log2_32(uint8_t(~N->getZExtValue())), + SDLoc(N), MVT::i8); +}]>; + +def ioaddr8 : PatLeaf<(imm), +[{ + uint64_t val = N->getZExtValue(); + return val >= 0x20 && val < 0x60; +}], ioaddr_XFORM>; + +def lowioaddr8 : PatLeaf<(imm), +[{ + uint64_t val = N->getZExtValue(); + return val >= 0x20 && val < 0x40; +}], ioaddr_XFORM>; + +def ioaddr16 : PatLeaf<(imm), +[{ + uint64_t val = N->getZExtValue(); + return val >= 0x20 && val < 0x5f; +}], ioaddr_XFORM>; + +def iobitpos8 : PatLeaf<(imm), +[{ + return isPowerOf2_32(uint8_t(N->getZExtValue())); +}], iobitpos8_XFORM>; + +def iobitposn8 : PatLeaf<(imm), +[{ + return isPowerOf2_32(uint8_t(~N->getZExtValue())); +}], iobitposn8_XFORM>; + +def MemriAsmOperand : AsmOperandClass { + let Name = "Memri"; + let ParserMethod = "parseMemriOperand"; +} + +/// Address operand for `reg+imm` used by STD and LDD. +def memri : Operand<iPTR> +{ + let MIOperandInfo = (ops PTRDISPREGS, i16imm); + + let PrintMethod = "printMemri"; + let EncoderMethod = "encodeMemri"; + + let ParserMatchClass = MemriAsmOperand; +} + +// Address operand for `SP+imm` used by STD{W}SPQRr +def memspi : Operand<iPTR> +{ + let MIOperandInfo = (ops GPRSP, i16imm); +} + +def relbrtarget_7 : Operand<OtherVT> +{ + let PrintMethod = "printPCRelImm"; + let EncoderMethod = "encodeRelCondBrTarget<AVR::fixup_7_pcrel>"; +} + +def brtarget_13 : Operand<OtherVT> +{ + let PrintMethod = "printPCRelImm"; + let EncoderMethod = "encodeRelCondBrTarget<AVR::fixup_13_pcrel>"; +} + +// The target of a 22 or 16-bit call/jmp instruction. +def call_target : Operand<iPTR> +{ + let EncoderMethod = "encodeCallTarget"; +} + +// A 16-bit address (which can lead to an R_AVR_16 relocation). +def imm16 : Operand<i16> +{ + let EncoderMethod = "encodeImm<AVR::fixup_16, 2>"; +} + +/// A 6-bit immediate used in the ADIW/SBIW instructions. +def imm_arith6 : Operand<i16> +{ + let EncoderMethod = "encodeImm<AVR::fixup_6_adiw, 0>"; +} + +/// An 8-bit immediate inside an instruction with the same format +/// as the `LDI` instruction (the `FRdK` format). +def imm_ldi8 : Operand<i8> +{ + let EncoderMethod = "encodeImm<AVR::fixup_ldi, 0>"; +} + +/// A 5-bit port number used in SBIC and friends (the `FIOBIT` format). +def imm_port5 : Operand<i8> +{ + let EncoderMethod = "encodeImm<AVR::fixup_port5, 0>"; +} + +/// A 6-bit port number used in the `IN` instruction and friends (the +/// `FIORdA` format. +def imm_port6 : Operand<i8> +{ + let EncoderMethod = "encodeImm<AVR::fixup_port6, 0>"; +} + +// Addressing mode pattern reg+imm6 +def addr : ComplexPattern<iPTR, 2, "SelectAddr", [], [SDNPWantRoot]>; + +// AsmOperand class for a pointer register. +// Used with the LD/ST family of instructions. +// See FSTLD in AVRInstrFormats.td +def PtrRegAsmOperand : AsmOperandClass +{ + let Name = "Reg"; +} + +// A special operand type for the LD/ST instructions. +// It converts the pointer register number into a two-bit field used in the +// instruction. +def LDSTPtrReg : Operand<i16> +{ + let MIOperandInfo = (ops PTRREGS); + let EncoderMethod = "encodeLDSTPtrReg"; + + let ParserMatchClass = PtrRegAsmOperand; +} + +// A special operand type for the LDD/STD instructions. +// It behaves identically to the LD/ST version, except restricts +// the pointer registers to Y and Z. +def LDDSTDPtrReg : Operand<i16> +{ + let MIOperandInfo = (ops PTRDISPREGS); + let EncoderMethod = "encodeLDSTPtrReg"; + + let ParserMatchClass = PtrRegAsmOperand; +} + +//===----------------------------------------------------------------------===// +// AVR predicates for subtarget features +//===----------------------------------------------------------------------===// + +def HasSRAM : Predicate<"Subtarget->hasSRAM()">, + AssemblerPredicate<"FeatureSRAM">; + +def HasJMPCALL : Predicate<"Subtarget->hasJMPCALL()">, + AssemblerPredicate<"FeatureJMPCALL">; + +def HasIJMPCALL : Predicate<"Subtarget->hasIJMPCALL()">, + AssemblerPredicate<"FeatureIJMPCALL">; + +def HasEIJMPCALL : Predicate<"Subtarget->hasEIJMPCALL()">, + AssemblerPredicate<"FeatureEIJMPCALL">; + +def HasADDSUBIW : Predicate<"Subtarget->hasADDSUBIW()">, + AssemblerPredicate<"FeatureADDSUBIW">; + +def HasSmallStack : Predicate<"Subtarget->HasSmallStack()">, + AssemblerPredicate<"FeatureSmallStack">; + +def HasMOVW : Predicate<"Subtarget->hasMOVW()">, + AssemblerPredicate<"FeatureMOVW">; + +def HasLPM : Predicate<"Subtarget->hasLPM()">, + AssemblerPredicate<"FeatureLPM">; + +def HasLPMX : Predicate<"Subtarget->hasLPMX()">, + AssemblerPredicate<"FeatureLPMX">; + +def HasELPM : Predicate<"Subtarget->hasELPM()">, + AssemblerPredicate<"FeatureELPM">; + +def HasELPMX : Predicate<"Subtarget->hasELPMX()">, + AssemblerPredicate<"FeatureELPMX">; + +def HasSPM : Predicate<"Subtarget->hasSPM()">, + AssemblerPredicate<"FeatureSPM">; + +def HasSPMX : Predicate<"Subtarget->hasSPMX()">, + AssemblerPredicate<"FeatureSPMX">; + +def HasDES : Predicate<"Subtarget->hasDES()">, + AssemblerPredicate<"FeatureDES">; + +def SupportsRMW : Predicate<"Subtarget->supportsRMW()">, + AssemblerPredicate<"FeatureRMW">; + +def SupportsMultiplication : Predicate<"Subtarget->supportsMultiplication()">, + AssemblerPredicate<"FeatureMultiplication">; + +def HasBREAK : Predicate<"Subtarget->hasBREAK()">, + AssemblerPredicate<"FeatureBREAK">; + +def HasTinyEncoding : Predicate<"Subtarget->hasTinyEncoding()">, + AssemblerPredicate<"FeatureTinyEncoding">; + + +// AVR specific condition code. These correspond to AVR_*_COND in +// AVRInstrInfo.td. They must be kept in synch. +def AVR_COND_EQ : PatLeaf<(i8 0)>; +def AVR_COND_NE : PatLeaf<(i8 1)>; +def AVR_COND_GE : PatLeaf<(i8 2)>; +def AVR_COND_LT : PatLeaf<(i8 3)>; +def AVR_COND_SH : PatLeaf<(i8 4)>; +def AVR_COND_LO : PatLeaf<(i8 5)>; +def AVR_COND_MI : PatLeaf<(i8 6)>; +def AVR_COND_PL : PatLeaf<(i8 7)>; + + +//===----------------------------------------------------------------------===// +//===----------------------------------------------------------------------===// +// AVR Instruction list +//===----------------------------------------------------------------------===// +//===----------------------------------------------------------------------===// + +// ADJCALLSTACKDOWN/UP implicitly use/def SP because they may be expanded into +// a stack adjustment and the codegen must know that they may modify the stack +// pointer before prolog-epilog rewriting occurs. +// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become +// sub / add which can clobber SREG. +let Defs = [SP, SREG], +Uses = [SP] in +{ + def ADJCALLSTACKDOWN : Pseudo<(outs), + (ins i16imm:$amt, i16imm:$amt2), + "#ADJCALLSTACKDOWN", + [(AVRcallseq_start timm:$amt, timm:$amt2)]>; + + // R31R30 is used to update SP, since it is a scratch reg and this instruction + // is placed after the function call then R31R30 should be always free. + //let Defs = [R31R30], + //Uses = [R31R30] in + //:TODO: if we enable this, the pseudo is killed because it looks dead + def ADJCALLSTACKUP : Pseudo<(outs), + (ins i16imm:$amt1, i16imm:$amt2), + "#ADJCALLSTACKUP", + [(AVRcallseq_end timm:$amt1, timm:$amt2)]>; +} + +//===----------------------------------------------------------------------===// +// Addition +//===----------------------------------------------------------------------===// +let isCommutable = 1, +Constraints = "$src = $rd", +Defs = [SREG] in +{ + // ADD Rd, Rr + // Adds two 8-bit registers. + def ADDRdRr : FRdRr<0b0000, + 0b11, + (outs GPR8:$rd), + (ins GPR8:$src, GPR8:$rr), + "add\t$rd, $rr", + [(set i8:$rd, (add i8:$src, i8:$rr)), + (implicit SREG)]>; + + // ADDW Rd+1:Rd, Rr+1:Rr + // Pseudo instruction to add four 8-bit registers as two 16-bit values. + // + // Expands to: + // add Rd, Rr + // adc Rd+1, Rr+1 + def ADDWRdRr : Pseudo<(outs DREGS:$rd), + (ins DREGS:$src, DREGS:$rr), + "addw\t$rd, $rr", + [(set i16:$rd, (add i16:$src, i16:$rr)), + (implicit SREG)]>; + + // ADC Rd, Rr + // Adds two 8-bit registers with carry. + let Uses = [SREG] in + def ADCRdRr : FRdRr<0b0001, + 0b11, + (outs GPR8:$rd), + (ins GPR8:$src, GPR8:$rr), + "adc\t$rd, $rr", + [(set i8:$rd, (adde i8:$src, i8:$rr)), + (implicit SREG)]>; + + // ADCW Rd+1:Rd, Rr+1:Rr + // Pseudo instruction to add four 8-bit registers as two 16-bit values with + // carry. + // + // Expands to: + // adc Rd, Rr + // adc Rd+1, Rr+1 + let Uses = [SREG] in + def ADCWRdRr : Pseudo<(outs DREGS:$rd), + (ins DREGS:$src, DREGS:$rr), + "adcw\t$rd, $rr", + [(set i16:$rd, (adde i16:$src, i16:$rr)), + (implicit SREG)]>; + + // AIDW Rd, k + // Adds an immediate 6-bit value K to Rd, placing the result in Rd. + def ADIWRdK : FWRdK<0b0, + (outs IWREGS:$rd), + (ins IWREGS:$src, imm_arith6:$k), + "adiw\t$rd, $k", + [(set i16:$rd, (add i16:$src, uimm6:$k)), + (implicit SREG)]>, + Requires<[HasADDSUBIW]>; +} + +//===----------------------------------------------------------------------===// +// Subtraction +//===----------------------------------------------------------------------===// +let Constraints = "$src = $rd", +Defs = [SREG] in +{ + // SUB Rd, Rr + // Subtracts the 8-bit value of Rr from Rd and places the value in Rd. + def SUBRdRr : FRdRr<0b0001, + 0b10, + (outs GPR8:$rd), + (ins GPR8:$src, GPR8:$rr), + "sub\t$rd, $rr", + [(set i8:$rd, (sub i8:$src, i8:$rr)), + (implicit SREG)]>; + + // SUBW Rd+1:Rd, Rr+1:Rr + // Subtracts two 16-bit values and places the result into Rd. + // + // Expands to: + // sub Rd, Rr + // sbc Rd+1, Rr+1 + def SUBWRdRr : Pseudo<(outs DREGS:$rd), + (ins DREGS:$src, DREGS:$rr), + "subw\t$rd, $rr", + [(set i16:$rd, (sub i16:$src, i16:$rr)), + (implicit SREG)]>; + + def SUBIRdK : FRdK<0b0101, + (outs LD8:$rd), + (ins LD8:$src, imm_ldi8:$k), + "subi\t$rd, $k", + [(set i8:$rd, (sub i8:$src, imm:$k)), + (implicit SREG)]>; + + // SUBIW Rd+1:Rd, K+1:K + // + // Expands to: + // subi Rd, K + // sbci Rd+1, K+1 + def SUBIWRdK : Pseudo<(outs DLDREGS:$rd), + (ins DLDREGS:$src, i16imm:$rr), + "subiw\t$rd, $rr", + [(set i16:$rd, (sub i16:$src, imm:$rr)), + (implicit SREG)]>; + + def SBIWRdK : FWRdK<0b1, + (outs IWREGS:$rd), + (ins IWREGS:$src, imm_arith6:$k), + "sbiw\t$rd, $k", + [(set i16:$rd, (sub i16:$src, uimm6:$k)), + (implicit SREG)]>, + Requires<[HasADDSUBIW]>; + + // Subtract with carry operations which must read the carry flag in SREG. + let Uses = [SREG] in + { + def SBCRdRr : FRdRr<0b0000, + 0b10, + (outs GPR8:$rd), + (ins GPR8:$src, GPR8:$rr), + "sbc\t$rd, $rr", + [(set i8:$rd, (sube i8:$src, i8:$rr)), + (implicit SREG)]>; + + // SBCW Rd+1:Rd, Rr+1:Rr + // + // Expands to: + // sbc Rd, Rr + // sbc Rd+1, Rr+1 + def SBCWRdRr : Pseudo<(outs DREGS:$rd), + (ins DREGS:$src, DREGS:$rr), + "sbcw\t$rd, $rr", + [(set i16:$rd, (sube i16:$src, i16:$rr)), + (implicit SREG)]>; + + def SBCIRdK : FRdK<0b0100, + (outs LD8:$rd), + (ins LD8:$src, imm_ldi8:$k), + "sbci\t$rd, $k", + [(set i8:$rd, (sube i8:$src, imm:$k)), + (implicit SREG)]>; + + // SBCIW Rd+1:Rd, K+1:K + // sbci Rd, K + // sbci Rd+1, K+1 + def SBCIWRdK : Pseudo<(outs DLDREGS:$rd), + (ins DLDREGS:$src, i16imm:$rr), + "sbciw\t$rd, $rr", + [(set i16:$rd, (sube i16:$src, imm:$rr)), + (implicit SREG)]>; + } +} + +//===----------------------------------------------------------------------===// +// Increment and Decrement +//===----------------------------------------------------------------------===// +let Constraints = "$src = $rd", +Defs = [SREG] in +{ + def INCRd : FRd<0b1001, + 0b0100011, + (outs GPR8:$rd), + (ins GPR8:$src), + "inc\t$rd", + [(set i8:$rd, (add i8:$src, 1)), (implicit SREG)]>; + + def DECRd : FRd<0b1001, + 0b0101010, + (outs GPR8:$rd), + (ins GPR8:$src), + "dec\t$rd", + [(set i8:$rd, (add i8:$src, -1)), (implicit SREG)]>; +} + +//===----------------------------------------------------------------------===// +// Multiplication +//===----------------------------------------------------------------------===// + +let isCommutable = 1, +Defs = [R1, R0, SREG] in +{ + // MUL Rd, Rr + // Multiplies Rd by Rr and places the result into R1:R0. + let usesCustomInserter = 1 in { + def MULRdRr : FRdRr<0b1001, 0b11, + (outs), + (ins GPR8:$lhs, GPR8:$rhs), + "mul\t$lhs, $rhs", + [/*(set R1, R0, (smullohi i8:$lhs, i8:$rhs))*/]>, + Requires<[SupportsMultiplication]>; + + def MULSRdRr : FMUL2RdRr<0, + (outs), + (ins GPR8:$lhs, GPR8:$rhs), + "muls\t$lhs, $rhs", + []>, + Requires<[SupportsMultiplication]>; + } + + def MULSURdRr : FMUL2RdRr<1, + (outs), + (ins GPR8:$lhs, GPR8:$rhs), + "mulsu\t$lhs, $rhs", + []>, + Requires<[SupportsMultiplication]>; + + def FMUL : FFMULRdRr<0b01, + (outs), + (ins GPR8:$lhs, GPR8:$rhs), + "fmul\t$lhs, $rhs", + []>, + Requires<[SupportsMultiplication]>; + + def FMULS : FFMULRdRr<0b10, + (outs), + (ins GPR8:$lhs, GPR8:$rhs), + "fmuls\t$lhs, $rhs", + []>, + Requires<[SupportsMultiplication]>; + + def FMULSU : FFMULRdRr<0b11, + (outs), + (ins GPR8:$lhs, GPR8:$rhs), + "fmulsu\t$lhs, $rhs", + []>, + Requires<[SupportsMultiplication]>; +} + +let Defs = [R15, R14, R13, R12, R11, R10, R9, + R8, R7, R6, R5, R4, R3, R2, R1, R0] in +def DESK : FDES<(outs), + (ins i8imm:$k), + "des\t$k", + []>, + Requires<[HasDES]>; + +//===----------------------------------------------------------------------===// +// Logic +//===----------------------------------------------------------------------===// +let Constraints = "$src = $rd", +Defs = [SREG] in +{ + // Register-Register logic instructions (which have the + // property of commutativity). + let isCommutable = 1 in + { + def ANDRdRr : FRdRr<0b0010, + 0b00, + (outs GPR8:$rd), + (ins GPR8:$src, GPR8:$rr), + "and\t$rd, $rr", + [(set i8:$rd, (and i8:$src, i8:$rr)), + (implicit SREG)]>; + + // ANDW Rd+1:Rd, Rr+1:Rr + // + // Expands to: + // and Rd, Rr + // and Rd+1, Rr+1 + def ANDWRdRr : Pseudo<(outs DREGS:$rd), + (ins DREGS:$src, DREGS:$rr), + "andw\t$rd, $rr", + [(set i16:$rd, (and i16:$src, i16:$rr)), + (implicit SREG)]>; + + def ORRdRr : FRdRr<0b0010, + 0b10, + (outs GPR8:$rd), + (ins GPR8:$src, GPR8:$rr), + "or\t$rd, $rr", + [(set i8:$rd, (or i8:$src, i8:$rr)), + (implicit SREG)]>; + + // ORW Rd+1:Rd, Rr+1:Rr + // + // Expands to: + // or Rd, Rr + // or Rd+1, Rr+1 + def ORWRdRr : Pseudo<(outs DREGS:$rd), + (ins DREGS:$src, DREGS:$rr), + "orw\t$rd, $rr", + [(set i16:$rd, (or i16:$src, i16:$rr)), + (implicit SREG)]>; + + def EORRdRr : FRdRr<0b0010, + 0b01, + (outs GPR8:$rd), + (ins GPR8:$src, GPR8:$rr), + "eor\t$rd, $rr", + [(set i8:$rd, (xor i8:$src, i8:$rr)), + (implicit SREG)]>; + + // EORW Rd+1:Rd, Rr+1:Rr + // + // Expands to: + // eor Rd, Rr + // eor Rd+1, Rr+1 + def EORWRdRr : Pseudo<(outs DREGS:$rd), + (ins DREGS:$src, DREGS:$rr), + "eorw\t$rd, $rr", + [(set i16:$rd, (xor i16:$src, i16:$rr)), + (implicit SREG)]>; + } + + def ANDIRdK : FRdK<0b0111, + (outs LD8:$rd), + (ins LD8:$src, imm_ldi8:$k), + "andi\t$rd, $k", + [(set i8:$rd, (and i8:$src, imm:$k)), + (implicit SREG)]>; + + // ANDI Rd+1:Rd, K+1:K + // + // Expands to: + // andi Rd, K + // andi Rd+1, K+1 + def ANDIWRdK : Pseudo<(outs DLDREGS:$rd), + (ins DLDREGS:$src, i16imm:$k), + "andiw\t$rd, $k", + [(set i16:$rd, (and i16:$src, imm:$k)), + (implicit SREG)]>; + + def ORIRdK : FRdK<0b0110, + (outs LD8:$rd), + (ins LD8:$src, imm_ldi8:$k), + "ori\t$rd, $k", + [(set i8:$rd, (or i8:$src, imm:$k)), + (implicit SREG)]>; + + // ORIW Rd+1:Rd, K+1,K + // + // Expands to: + // ori Rd, K + // ori Rd+1, K+1 + def ORIWRdK : Pseudo<(outs DLDREGS:$rd), + (ins DLDREGS:$src, i16imm:$rr), + "oriw\t$rd, $rr", + [(set i16:$rd, (or i16:$src, imm:$rr)), + (implicit SREG)]>; +} + +//===----------------------------------------------------------------------===// +// One's/Two's Complement +//===----------------------------------------------------------------------===// +let Constraints = "$src = $rd", +Defs = [SREG] in +{ + def COMRd : FRd<0b1001, + 0b0100000, + (outs GPR8:$rd), + (ins GPR8:$src), + "com\t$rd", + [(set i8:$rd, (not i8:$src)), (implicit SREG)]>; + + // COMW Rd+1:Rd + // + // Expands to: + // com Rd + // com Rd+1 + def COMWRd : Pseudo<(outs DREGS:$rd), + (ins DREGS:$src), + "comw\t$rd", + [(set i16:$rd, (not i16:$src)), (implicit SREG)]>; + + //:TODO: optimize NEG for wider types + def NEGRd : FRd<0b1001, + 0b0100001, + (outs GPR8:$rd), + (ins GPR8:$src), + "neg\t$rd", + [(set i8:$rd, (ineg i8:$src)), (implicit SREG)]>; +} + +// TST Rd +// Test for zero of minus. +// This operation is identical to a `Rd AND Rd`. +def : InstAlias<"tst\t$rd", (ANDRdRr GPR8:$rd, GPR8:$rd)>; + +// SBR Rd, K +// +// Mnemonic alias to 'ORI Rd, K'. Same bit pattern, same operands, +// same everything. +def : InstAlias<"sbr\t$rd, $k", + (ORIRdK LD8:$rd, imm_ldi8:$k), + /* Disable display, so we don't override ORI */ 0>; + +//===----------------------------------------------------------------------===// +// Jump instructions +//===----------------------------------------------------------------------===// +let isBarrier = 1, +isBranch = 1, +isTerminator = 1 in +{ + def RJMPk : FBRk<0, + (outs), + (ins brtarget_13:$target), + "rjmp\t$target", + [(br bb:$target)]>; + + let isIndirectBranch = 1, + Uses = [R31R30] in + def IJMP : F16<0b1001010000001001, + (outs), + (ins), + "ijmp", + []>, + Requires<[HasIJMPCALL]>; + + let isIndirectBranch = 1, + Uses = [R31R30] in + def EIJMP : F16<0b1001010000011001, + (outs), + (ins), + "eijmp", + []>, + Requires<[HasEIJMPCALL]>; + + def JMPk : F32BRk<0b110, + (outs), + (ins call_target:$k), + "jmp\t$k", + []>, + Requires<[HasJMPCALL]>; +} + +//===----------------------------------------------------------------------===// +// Call instructions +//===----------------------------------------------------------------------===// +let isCall = 1 in +{ + // SP is marked as a use to prevent stack-pointer assignments that appear + // immediately before calls from potentially appearing dead. + let Uses = [SP] in + def RCALLk : FBRk<1, + (outs), + (ins brtarget_13:$target), + "rcall\t$target", + []>; + + // SP is marked as a use to prevent stack-pointer assignments that appear + // immediately before calls from potentially appearing dead. + let Uses = [SP, R31R30] in + def ICALL : F16<0b1001010100001001, + (outs), + (ins variable_ops), + "icall", + []>, + Requires<[HasIJMPCALL]>; + + // SP is marked as a use to prevent stack-pointer assignments that appear + // immediately before calls from potentially appearing dead. + let Uses = [SP, R31R30] in + def EICALL : F16<0b1001010100011001, + (outs), + (ins variable_ops), + "eicall", + []>, + Requires<[HasEIJMPCALL]>; + + // SP is marked as a use to prevent stack-pointer assignments that appear + // immediately before calls from potentially appearing dead. + // + //:TODO: the imm field can be either 16 or 22 bits in devices with more + // than 64k of ROM, fix it once we support the largest devices. + let Uses = [SP] in + def CALLk : F32BRk<0b111, + (outs), + (ins call_target:$k), + "call\t$k", + [(AVRcall imm:$k)]>, + Requires<[HasJMPCALL]>; +} + +//===----------------------------------------------------------------------===// +// Return instructions. +//===----------------------------------------------------------------------===// +let isTerminator = 1, +isReturn = 1, +isBarrier = 1 in +{ + def RET : F16<0b1001010100001000, + (outs), + (ins), + "ret", + [(AVRretflag)]>; + + def RETI : F16<0b1001010100011000, + (outs), + (ins), + "reti", + [(AVRretiflag)]>; +} + +//===----------------------------------------------------------------------===// +// Compare operations. +//===----------------------------------------------------------------------===// +let Defs = [SREG] in +{ + // CPSE Rd, Rr + // Compare Rd and Rr, skipping the next instruction if they are equal. + let isBarrier = 1, + isBranch = 1, + isTerminator = 1 in + def CPSE : FRdRr<0b0001, + 0b00, + (outs), + (ins GPR8:$rd, GPR8:$rr), + "cpse\t$rd, $rr", + []>; + + def CPRdRr : FRdRr<0b0001, + 0b01, + (outs), + (ins GPR8:$rd, GPR8:$rr), + "cp\t$rd, $rr", + [(AVRcmp i8:$rd, i8:$rr), (implicit SREG)]>; + + // CPW Rd+1:Rd, Rr+1:Rr + // + // Expands to: + // cp Rd, Rr + // cpc Rd+1, Rr+1 + def CPWRdRr : Pseudo<(outs), + (ins DREGS:$src, DREGS:$src2), + "cpw\t$src, $src2", + [(AVRcmp i16:$src, i16:$src2), (implicit SREG)]>; + + let Uses = [SREG] in + def CPCRdRr : FRdRr<0b0000, + 0b01, + (outs), + (ins GPR8:$rd, GPR8:$rr), + "cpc\t$rd, $rr", + [(AVRcmpc i8:$rd, i8:$rr), (implicit SREG)]>; + + // CPCW Rd+1:Rd. Rr+1:Rr + // + // Expands to: + // cpc Rd, Rr + // cpc Rd+1, Rr+1 + let Uses = [SREG] in + def CPCWRdRr : Pseudo<(outs), + (ins DREGS:$src, DREGS:$src2), + "cpcw\t$src, $src2", + [(AVRcmpc i16:$src, i16:$src2), (implicit SREG)]>; + + // CPI Rd, K + // Compares a register with an 8 bit immediate. + def CPIRdK : FRdK<0b0011, + (outs), + (ins LD8:$rd, imm_ldi8:$k), + "cpi\t$rd, $k", + [(AVRcmp i8:$rd, imm:$k), (implicit SREG)]>; +} + +//===----------------------------------------------------------------------===// +// Register conditional skipping/branching operations. +//===----------------------------------------------------------------------===// +let isBranch = 1, +isTerminator = 1 in +{ + // Conditional skipping on GPR register bits, and + // conditional skipping on IO register bits. + let isBarrier = 1 in + { + def SBRCRrB : FRdB<0b10, + (outs), + (ins GPR8:$rr, i8imm:$b), + "sbrc\t$rr, $b", + []>; + + def SBRSRrB : FRdB<0b11, + (outs), + (ins GPR8:$rr, i8imm:$b), + "sbrs\t$rr, $b", + []>; + + def SBICAb : FIOBIT<0b01, + (outs), + (ins imm_port5:$a, i8imm:$b), + "sbic\t$a, $b", + []>; + + def SBISAb : FIOBIT<0b11, + (outs), + (ins imm_port5:$a, i8imm:$b), + "sbis\t$a, $b", + []>; + } + + // Relative branches on status flag bits. + let Uses = [SREG] in + { + // BRBS s, k + // Branch if `s` flag in status register is set. + def BRBSsk : FSK<0, + (outs), + (ins i8imm:$s, relbrtarget_7:$k), + "brbs\t$s, $k", + []>; + + // BRBC s, k + // Branch if `s` flag in status register is clear. + def BRBCsk : FSK<1, + (outs), + (ins i8imm:$s, relbrtarget_7:$k), + "brbc\t$s, $k", + []>; + } +} + + +// BRCS k +// Branch if carry flag is set +def : InstAlias<"brcs\t$k", (BRBSsk 0, relbrtarget_7:$k)>; + +// BRCC k +// Branch if carry flag is clear +def : InstAlias<"brcc\t$k", (BRBCsk 0, relbrtarget_7:$k)>; + +// BRHS k +// Branch if half carry flag is set +def : InstAlias<"brhs\t$k", (BRBSsk 5, relbrtarget_7:$k)>; + +// BRHC k +// Branch if half carry flag is clear +def : InstAlias<"brhc\t$k", (BRBCsk 5, relbrtarget_7:$k)>; + +// BRTS k +// Branch if the T flag is set +def : InstAlias<"brts\t$k", (BRBSsk 6, relbrtarget_7:$k)>; + +// BRTC k +// Branch if the T flag is clear +def : InstAlias<"brtc\t$k", (BRBCsk 6, relbrtarget_7:$k)>; + +// BRVS k +// Branch if the overflow flag is set +def : InstAlias<"brvs\t$k", (BRBSsk 3, relbrtarget_7:$k)>; + +// BRVC k +// Branch if the overflow flag is clear +def : InstAlias<"brvc\t$k", (BRBCsk 3, relbrtarget_7:$k)>; + +// BRIE k +// Branch if the global interrupt flag is enabled +def : InstAlias<"brie\t$k", (BRBSsk 7, relbrtarget_7:$k)>; + +// BRID k +// Branch if the global interrupt flag is disabled +def : InstAlias<"brid\t$k", (BRBCsk 7, relbrtarget_7:$k)>; + +//===----------------------------------------------------------------------===// +// PC-relative conditional branches +//===----------------------------------------------------------------------===// +// Based on status register. We cannot simplify these into instruction aliases +// because we also need to be able to specify a pattern to match for ISel. +let isBranch = 1, +isTerminator = 1, +Uses = [SREG] in +{ + def BREQk : FBRsk<0, + 0b001, + (outs), + (ins relbrtarget_7:$target), + "breq\t$target", + [(AVRbrcond bb:$target, AVR_COND_EQ)]>; + + def BRNEk : FBRsk<1, + 0b001, + (outs), + (ins relbrtarget_7:$target), + "brne\t$target", + [(AVRbrcond bb:$target, AVR_COND_NE)]>; + + + def BRSHk : FBRsk<1, + 0b000, + (outs), + (ins relbrtarget_7:$target), + "brsh\t$target", + [(AVRbrcond bb:$target, AVR_COND_SH)]>; + + def BRLOk : FBRsk<0, + 0b000, + (outs), + (ins relbrtarget_7:$target), + "brlo\t$target", + [(AVRbrcond bb:$target, AVR_COND_LO)]>; + + def BRMIk : FBRsk<0, + 0b010, + (outs), + (ins relbrtarget_7:$target), + "brmi\t$target", + [(AVRbrcond bb:$target, AVR_COND_MI)]>; + + def BRPLk : FBRsk<1, + 0b010, + (outs), + (ins relbrtarget_7:$target), + "brpl\t$target", + [(AVRbrcond bb:$target, AVR_COND_PL)]>; + + def BRGEk : FBRsk<1, + 0b100, + (outs), + (ins relbrtarget_7:$target), + "brge\t$target", + [(AVRbrcond bb:$target, AVR_COND_GE)]>; + + def BRLTk : FBRsk<0, + 0b100, + (outs), + (ins relbrtarget_7:$target), + "brlt\t$target", + [(AVRbrcond bb:$target, AVR_COND_LT)]>; +} + +//===----------------------------------------------------------------------===// +// Data transfer instructions +//===----------------------------------------------------------------------===// +// 8 and 16-bit register move instructions. +let hasSideEffects = 0 in +{ + def MOVRdRr : FRdRr<0b0010, + 0b11, + (outs GPR8:$rd), + (ins GPR8:$rr), + "mov\t$rd, $rr", + []>; + + def MOVWRdRr : FMOVWRdRr<(outs DREGS:$dst), + (ins DREGS:$src), + "movw\t$dst, $src", + []>, + Requires<[HasMOVW]>; +} + +// Load immediate values into registers. +let isReMaterializable = 1 in +{ + def LDIRdK : FRdK<0b1110, + (outs LD8:$rd), + (ins imm_ldi8:$k), + "ldi\t$rd, $k", + [(set i8:$rd, imm:$k)]>; + + // LDIW Rd+1:Rd, K+1:K + // + // Expands to: + // ldi Rd, K + // ldi Rd+1, K+1 + def LDIWRdK : Pseudo<(outs DLDREGS:$dst), + (ins i16imm:$src), + "ldiw\t$dst, $src", + [(set i16:$dst, imm:$src)]>; +} + +// Load from data space into register. +let canFoldAsLoad = 1, +isReMaterializable = 1 in +{ + def LDSRdK : F32DM<0b0, + (outs GPR8:$rd), + (ins imm16:$k), + "lds\t$rd, $k", + [(set i8:$rd, (load imm:$k))]>, + Requires<[HasSRAM]>; + + // LDSW Rd+1:Rd, K+1:K + // + // Expands to: + // lds Rd, (K+1:K) + // lds Rd+1 (K+1:K) + 1 + def LDSWRdK : Pseudo<(outs DREGS:$dst), + (ins i16imm:$src), + "ldsw\t$dst, $src", + [(set i16:$dst, (load imm:$src))]>, + Requires<[HasSRAM]>; +} + +// Indirect loads. +let canFoldAsLoad = 1, +isReMaterializable = 1 in +{ + def LDRdPtr : FSTLD<0, + 0b00, + (outs GPR8:$reg), + (ins LDSTPtrReg:$ptrreg), + "ld\t$reg, $ptrreg", + [(set GPR8:$reg, (load i16:$ptrreg))]>, + Requires<[HasSRAM]>; + + // LDW Rd+1:Rd, P + // + // Expands to: + // ld Rd, P + // ldd Rd+1, P+1 + let Constraints = "@earlyclobber $reg" in + def LDWRdPtr : Pseudo<(outs DREGS:$reg), + (ins PTRDISPREGS:$ptrreg), + "ldw\t$reg, $ptrreg", + [(set i16:$reg, (load i16:$ptrreg))]>, + Requires<[HasSRAM]>; +} + +// Indirect loads (with postincrement or predecrement). +let mayLoad = 1, +hasSideEffects = 0, +Constraints = "$ptrreg = $base_wb,@earlyclobber $reg" in +{ + def LDRdPtrPi : FSTLD<0, + 0b01, + (outs GPR8:$reg, PTRREGS:$base_wb), + (ins LDSTPtrReg:$ptrreg), + "ld\t$reg, $ptrreg+", + []>, + Requires<[HasSRAM]>; + + // LDW Rd+1:Rd, P+ + // Expands to: + // ld Rd, P+ + // ld Rd+1, P+ + def LDWRdPtrPi : Pseudo<(outs DREGS:$reg, PTRREGS:$base_wb), + (ins PTRREGS:$ptrreg), + "ldw\t$reg, $ptrreg+", + []>, + Requires<[HasSRAM]>; + + def LDRdPtrPd : FSTLD<0, + 0b10, + (outs GPR8:$reg, PTRREGS:$base_wb), + (ins LDSTPtrReg:$ptrreg), + "ld\t$reg, -$ptrreg", + []>, + Requires<[HasSRAM]>; + + // LDW Rd+1:Rd, -P + // + // Expands to: + // ld Rd+1, -P + // ld Rd, -P + def LDWRdPtrPd : Pseudo<(outs DREGS:$reg, PTRREGS:$base_wb), + (ins PTRREGS:$ptrreg), + "ldw\t$reg, -$ptrreg", + []>, + Requires<[HasSRAM]>; +} + +// Load indirect with displacement operations. +let canFoldAsLoad = 1, +isReMaterializable = 1 in +{ + let Constraints = "@earlyclobber $reg" in + def LDDRdPtrQ : FSTDLDD<0, + (outs GPR8:$reg), + (ins memri:$memri), + "ldd\t$reg, $memri", + [(set i8:$reg, (load addr:$memri))]>, + Requires<[HasSRAM]>; + + // LDDW Rd+1:Rd, P+q + // + // Expands to: + // ldd Rd, P+q + // ldd Rd+1, P+q+1 + let Constraints = "@earlyclobber $dst" in + def LDDWRdPtrQ : Pseudo<(outs DREGS_WITHOUT_YZ_WORKAROUND:$dst), + (ins memri:$memri), + "lddw\t$dst, $memri", + [(set i16:$dst, (load addr:$memri))]>, + Requires<[HasSRAM]>; + + // An identical pseudo instruction to LDDWRdPtrQ, expect restricted to the Y + // register and without the @earlyclobber flag. + // + // Used to work around a bug caused by the register allocator not + // being able to handle the expansion of a COPY into an machine instruction + // that has an earlyclobber flag. This is because the register allocator will + // try expand a copy from a register slot into an earlyclobber instruction. + // Instructions that are earlyclobber need to be in a dedicated earlyclobber slot. + // + // This pseudo instruction can be used pre-AVR pseudo expansion in order to + // get a frame index load without directly using earlyclobber instructions. + // + // The pseudo expansion pass trivially expands this into LDDWRdPtrQ. + // + // This instruction may be removed once PR13375 is fixed. + let mayLoad = 1, + hasSideEffects = 0 in + def LDDWRdYQ : Pseudo<(outs DREGS:$dst), + (ins memri:$memri), + "lddw\t$dst, $memri", + []>, + Requires<[HasSRAM]>; +} + +class AtomicLoad<PatFrag Op, RegisterClass DRC, + RegisterClass PTRRC> : + Pseudo<(outs DRC:$rd), (ins PTRRC:$rr), "atomic_op", + [(set DRC:$rd, (Op i16:$rr))]>; + +class AtomicStore<PatFrag Op, RegisterClass DRC, + RegisterClass PTRRC> : + Pseudo<(outs), (ins PTRRC:$rd, DRC:$rr), "atomic_op", + [(Op i16:$rd, DRC:$rr)]>; + +class AtomicLoadOp<PatFrag Op, RegisterClass DRC, + RegisterClass PTRRC> : + Pseudo<(outs DRC:$rd), (ins PTRRC:$rr, DRC:$operand), + "atomic_op", + [(set DRC:$rd, (Op i16:$rr, DRC:$operand))]>; + +// FIXME: I think 16-bit atomic binary ops need to mark +// r0 as clobbered. + +// Atomic instructions +// =================== +// +// These are all expanded by AVRExpandPseudoInsts +// +// 8-bit operations can use any pointer register because +// they are expanded directly into an LD/ST instruction. +// +// 16-bit operations use 16-bit load/store postincrement instructions, +// which require PTRDISPREGS. + +def AtomicLoad8 : AtomicLoad<atomic_load_8, GPR8, PTRREGS>; +def AtomicLoad16 : AtomicLoad<atomic_load_16, DREGS, PTRDISPREGS>; + +def AtomicStore8 : AtomicStore<atomic_store_8, GPR8, PTRREGS>; +def AtomicStore16 : AtomicStore<atomic_store_16, DREGS, PTRDISPREGS>; + +class AtomicLoadOp8<PatFrag Op> : AtomicLoadOp<Op, GPR8, PTRREGS>; +class AtomicLoadOp16<PatFrag Op> : AtomicLoadOp<Op, DREGS, PTRDISPREGS>; + +def AtomicLoadAdd8 : AtomicLoadOp8<atomic_load_add_8>; +def AtomicLoadAdd16 : AtomicLoadOp16<atomic_load_add_16>; +def AtomicLoadSub8 : AtomicLoadOp8<atomic_load_sub_8>; +def AtomicLoadSub16 : AtomicLoadOp16<atomic_load_sub_16>; +def AtomicLoadAnd8 : AtomicLoadOp8<atomic_load_and_8>; +def AtomicLoadAnd16 : AtomicLoadOp16<atomic_load_and_16>; +def AtomicLoadOr8 : AtomicLoadOp8<atomic_load_or_8>; +def AtomicLoadOr16 : AtomicLoadOp16<atomic_load_or_16>; +def AtomicLoadXor8 : AtomicLoadOp8<atomic_load_xor_8>; +def AtomicLoadXor16 : AtomicLoadOp16<atomic_load_xor_16>; +def AtomicFence : Pseudo<(outs), (ins), "atomic_fence", + [(atomic_fence imm, imm)]>; + +// Indirect store from register to data space. +def STSKRr : F32DM<0b1, + (outs), + (ins imm16:$k, GPR8:$rd), + "sts\t$k, $rd", + [(store i8:$rd, imm:$k)]>, + Requires<[HasSRAM]>; + +// STSW K+1:K, Rr+1:Rr +// +// Expands to: +// sts Rr+1, (K+1:K) + 1 +// sts Rr, (K+1:K) +def STSWKRr : Pseudo<(outs), + (ins i16imm:$dst, DREGS:$src), + "stsw\t$dst, $src", + [(store i16:$src, imm:$dst)]>, + Requires<[HasSRAM]>; + +// Indirect stores. +// ST P, Rr +// Stores the value of Rr into the location addressed by pointer P. +def STPtrRr : FSTLD<1, + 0b00, + (outs), + (ins LDSTPtrReg:$ptrreg, GPR8:$reg), + "st\t$ptrreg, $reg", + [(store GPR8:$reg, i16:$ptrreg)]>, + Requires<[HasSRAM]>; + +// STW P, Rr+1:Rr +// Stores the value of Rr into the location addressed by pointer P. +// +// Expands to: +// st P, Rr +// std P+1, Rr+1 +def STWPtrRr : Pseudo<(outs), + (ins PTRDISPREGS:$ptrreg, DREGS:$reg), + "stw\t$ptrreg, $reg", + [(store i16:$reg, i16:$ptrreg)]>, + Requires<[HasSRAM]>; + +// Indirect stores (with postincrement or predecrement). +let Constraints = "$ptrreg = $base_wb,@earlyclobber $base_wb" in +{ + + // ST P+, Rr + // Stores the value of Rr into the location addressed by pointer P. + // Post increments P. + def STPtrPiRr : FSTLD<1, + 0b01, + (outs LDSTPtrReg:$base_wb), + (ins LDSTPtrReg:$ptrreg, GPR8:$reg, i8imm:$offs), + "st\t$ptrreg+, $reg", + [(set i16:$base_wb, + (post_store GPR8:$reg, i16:$ptrreg, imm:$offs))]>, + Requires<[HasSRAM]>; + + // STW P+, Rr+1:Rr + // Stores the value of Rr into the location addressed by pointer P. + // Post increments P. + // + // Expands to: + // st P+, Rr + // st P+, Rr+1 + def STWPtrPiRr : Pseudo<(outs PTRREGS:$base_wb), + (ins PTRREGS:$ptrreg, DREGS:$trh, i8imm:$offs), + "stw\t$ptrreg+, $trh", + [(set PTRREGS:$base_wb, + (post_store DREGS:$trh, PTRREGS:$ptrreg, imm:$offs))]>, + Requires<[HasSRAM]>; + + // ST -P, Rr + // Stores the value of Rr into the location addressed by pointer P. + // Pre decrements P. + def STPtrPdRr : FSTLD<1, + 0b10, + (outs LDSTPtrReg:$base_wb), + (ins LDSTPtrReg:$ptrreg, GPR8:$reg, i8imm:$offs), + "st\t-$ptrreg, $reg", + [(set i16:$base_wb, + (pre_store GPR8:$reg, i16:$ptrreg, imm:$offs))]>, + Requires<[HasSRAM]>; + + // STW -P, Rr+1:Rr + // Stores the value of Rr into the location addressed by pointer P. + // Pre decrements P. + // + // Expands to: + // st -P, Rr+1 + // st -P, Rr + def STWPtrPdRr : Pseudo<(outs PTRREGS:$base_wb), + (ins PTRREGS:$ptrreg, DREGS:$reg, i8imm:$offs), + "stw\t-$ptrreg, $reg", + [(set PTRREGS:$base_wb, + (pre_store i16:$reg, i16:$ptrreg, imm:$offs))]>, + Requires<[HasSRAM]>; +} + +// Store indirect with displacement operations. +// STD P+q, Rr +// Stores the value of Rr into the location addressed by pointer P with a +// displacement of q. Does not modify P. +def STDPtrQRr : FSTDLDD<1, + (outs), + (ins memri:$memri, GPR8:$reg), + "std\t$memri, $reg", + [(store i8:$reg, addr:$memri)]>, + Requires<[HasSRAM]>; + +// STDW P+q, Rr+1:Rr +// Stores the value of Rr into the location addressed by pointer P with a +// displacement of q. Does not modify P. +// +// Expands to: +// std P+q, Rr +// std P+q+1, Rr+1 +def STDWPtrQRr : Pseudo<(outs), + (ins memri:$memri, DREGS:$src), + "stdw\t$memri, $src", + [(store i16:$src, addr:$memri)]>, + Requires<[HasSRAM]>; + + +// Load program memory operations. +let canFoldAsLoad = 1, +isReMaterializable = 1, +mayLoad = 1, +hasSideEffects = 0 in +{ + let Defs = [R0], + Uses = [R31R30] in + def LPM : F16<0b1001010111001000, + (outs), + (ins), + "lpm", + []>, + Requires<[HasLPM]>; + + def LPMRdZ : FLPMX<0, + 0, + (outs GPR8:$dst), + (ins ZREG:$z), + "lpm\t$dst, $z", + []>, + Requires<[HasLPMX]>; + + // Load program memory, while postincrementing the Z register. + let Defs = [R31R30] in + { + def LPMRdZPi : FLPMX<0, + 1, + (outs GPR8:$dst), + (ins ZREG:$z), + "lpm\t$dst, $z+", + []>, + Requires<[HasLPMX]>; + + def LPMWRdZ : Pseudo<(outs DREGS:$dst), + (ins ZREG:$z), + "lpmw\t$dst, $z", + []>, + Requires<[HasLPMX]>; + + def LPMWRdZPi : Pseudo<(outs DREGS:$dst), + (ins ZREG:$z), + "lpmw\t$dst, $z+", + []>, + Requires<[HasLPMX]>; + } +} + +// Extended load program memory operations. +let mayLoad = 1, +hasSideEffects = 0 in +{ + let Defs = [R0], + Uses = [R31R30] in + def ELPM : F16<0b1001010111011000, + (outs), + (ins), + "elpm", + []>, + Requires<[HasELPM]>; + + def ELPMRdZ : FLPMX<1, + 0, + (outs GPR8:$dst), + (ins ZREG:$z), + "elpm\t$dst, $z", + []>, + Requires<[HasELPMX]>; + + let Defs = [R31R30] in + def ELPMRdZPi : FLPMX<1, + 1, + (outs GPR8:$dst), + (ins ZREG: $z), + "elpm\t$dst, $z+", + []>, + Requires<[HasELPMX]>; +} + +// Store program memory operations. +let Uses = [R1, R0] in +{ + let Uses = [R31R30, R1, R0] in + def SPM : F16<0b1001010111101000, + (outs), + (ins), + "spm", + []>, + Requires<[HasSPM]>; + + let Defs = [R31R30] in + def SPMZPi : F16<0b1001010111111000, + (outs), + (ins ZREG:$z), + "spm $z+", + []>, + Requires<[HasSPMX]>; +} + +// Read data from IO location operations. +let canFoldAsLoad = 1, +isReMaterializable = 1 in +{ + def INRdA : FIORdA<(outs GPR8:$dst), + (ins imm_port6:$src), + "in\t$dst, $src", + [(set i8:$dst, (load ioaddr8:$src))]>; + + def INWRdA : Pseudo<(outs DREGS:$dst), + (ins imm_port6:$src), + "inw\t$dst, $src", + [(set i16:$dst, (load ioaddr16:$src))]>; +} + +// Write data to IO location operations. +def OUTARr : FIOARr<(outs), + (ins imm_port6:$dst, GPR8:$src), + "out\t$dst, $src", + [(store i8:$src, ioaddr8:$dst)]>; + +def OUTWARr : Pseudo<(outs), + (ins imm_port6:$dst, DREGS:$src), + "outw\t$dst, $src", + [(store i16:$src, ioaddr16:$dst)]>; + +// Stack push/pop operations. +let Defs = [SP], +Uses = [SP], +hasSideEffects = 0 in +{ + // Stack push operations. + let mayStore = 1 in + { + def PUSHRr : FRd<0b1001, + 0b0011111, + (outs), + (ins GPR8:$reg), + "push\t$reg", + []>, + Requires<[HasSRAM]>; + + def PUSHWRr : Pseudo<(outs), + (ins DREGS:$reg), + "pushw\t$reg", + []>, + Requires<[HasSRAM]>; + } + + // Stack pop operations. + let mayLoad = 1 in + { + def POPRd : FRd<0b1001, + 0b0001111, + (outs GPR8:$reg), + (ins), + "pop\t$reg", + []>, + Requires<[HasSRAM]>; + + def POPWRd : Pseudo<(outs DREGS:$reg), + (ins), + "popw\t$reg", + []>, + Requires<[HasSRAM]>; + } +} + +// Read-Write-Modify (RMW) instructions. +def XCHZRd : FZRd<0b100, + (outs GPR8:$rd), + (ins ZREG:$z), + "xch\t$z, $rd", + []>, + Requires<[SupportsRMW]>; + +def LASZRd : FZRd<0b101, + (outs GPR8:$rd), + (ins ZREG:$z), + "las\t$z, $rd", + []>, + Requires<[SupportsRMW]>; + +def LACZRd : FZRd<0b110, + (outs GPR8:$rd), + (ins ZREG:$z), + "lac\t$z, $rd", + []>, + Requires<[SupportsRMW]>; + +def LATZRd : FZRd<0b111, + (outs GPR8:$rd), + (ins ZREG:$z), + "lat\t$z, $rd", + []>, + Requires<[SupportsRMW]>; + +//===----------------------------------------------------------------------===// +// Bit and bit-test instructions +//===----------------------------------------------------------------------===// + +// Bit shift/rotate operations. +let Constraints = "$src = $rd", +Defs = [SREG] in +{ + // 8-bit LSL is an alias of ADD Rd, Rd + + def LSLWRd : Pseudo<(outs DREGS:$rd), + (ins DREGS:$src), + "lslw\t$rd", + [(set i16:$rd, (AVRlsl i16:$src)), (implicit SREG)]>; + + def LSRRd : FRd<0b1001, + 0b0100110, + (outs GPR8:$rd), + (ins GPR8:$src), + "lsr\t$rd", + [(set i8:$rd, (AVRlsr i8:$src)), (implicit SREG)]>; + + def LSRWRd : Pseudo<(outs DREGS:$rd), + (ins DREGS:$src), + "lsrw\t$rd", + [(set i16:$rd, (AVRlsr i16:$src)), (implicit SREG)]>; + + def ASRRd : FRd<0b1001, + 0b0100101, + (outs GPR8:$rd), + (ins GPR8:$src), + "asr\t$rd", + [(set i8:$rd, (AVRasr i8:$src)), (implicit SREG)]>; + + def ASRWRd : Pseudo<(outs DREGS:$rd), + (ins DREGS:$src), + "asrw\t$rd", + [(set i16:$rd, (AVRasr i16:$src)), (implicit SREG)]>; + + // Bit rotate operations. + let Uses = [SREG] in + { + // 8-bit ROL is an alias of ADC Rd, Rd + + def ROLWRd : Pseudo<(outs DREGS:$rd), + (ins DREGS:$src), + "rolw\t$rd", + [(set i16:$rd, (AVRrol i16:$src)), (implicit SREG)]>; + + def RORRd : FRd<0b1001, + 0b0100111, + (outs GPR8:$rd), + (ins GPR8:$src), + "ror\t$rd", + [(set i8:$rd, (AVRror i8:$src)), (implicit SREG)]>; + + def RORWRd : Pseudo<(outs DREGS:$rd), + (ins DREGS:$src), + "rorw\t$rd", + [(set i16:$rd, (AVRror i16:$src)), (implicit SREG)]>; + } +} + +// SWAP Rd +// Swaps the high and low nibbles in a register. +let Constraints = "$src = $rd" in +def SWAPRd : FRd<0b1001, + 0b0100010, + (outs GPR8:$rd), + (ins GPR8:$src), + "swap\t$rd", + [(set i8:$rd, (bswap i8:$src))]>; + +// IO register bit set/clear operations. +//:TODO: add patterns when popcount(imm)==2 to be expanded with 2 sbi/cbi +// instead of in+ori+out which requires one more instr. +def SBIAb : FIOBIT<0b10, + (outs), + (ins imm_port5:$addr, i8imm:$bit), + "sbi\t$addr, $bit", + [(store (or (i8 (load lowioaddr8:$addr)), iobitpos8:$bit), + lowioaddr8:$addr)]>; + +def CBIAb : FIOBIT<0b00, + (outs), + (ins imm_port5:$addr, i8imm:$bit), + "cbi\t$addr, $bit", + [(store (and (i8 (load lowioaddr8:$addr)), iobitposn8:$bit), + lowioaddr8:$addr)]>; + +// Status register bit load/store operations. +let Defs = [SREG] in +def BST : FRdB<0b01, + (outs), + (ins GPR8:$rd, i8imm:$b), + "bst\t$rd, $b", + []>; + +let Uses = [SREG] in +def BLD : FRdB<0b00, + (outs), + (ins GPR8:$rd, i8imm:$b), + "bld\t$rd, $b", + []>; + +def CBR : InstAlias<"cbr\t$rd, $k", (ANDIRdK LD8:$rd, imm_com8:$k), 0>; + +// CLR Rd +// Alias for EOR Rd, Rd +// ------------- +// Clears all bits in a register. +def CLR : InstAlias<"clr\t$rd", (EORRdRr GPR8:$rd, GPR8:$rd)>; + +// LSL Rd +// Alias for ADD Rd, Rd +// -------------- +// Logical shift left one bit. +def LSL : InstAlias<"lsl\t$rd", (ADDRdRr GPR8:$rd, GPR8:$rd)>; + +def ROL : InstAlias<"rol\t$rd", (ADCRdRr GPR8:$rd, GPR8:$rd)>; + +// SER Rd +// Alias for LDI Rd, 0xff +// --------- +// Sets all bits in a register. +def : InstAlias<"ser\t$rd", (LDIRdK LD8:$rd, 0xff), 0>; + +let Defs = [SREG] in +def BSETs : FS<0, + (outs), + (ins i8imm:$s), + "bset\t$s", + []>; + +let Defs = [SREG] in +def BCLRs : FS<1, + (outs), + (ins i8imm:$s), + "bclr\t$s", + []>; + +// Set/clear aliases for the carry (C) status flag (bit 0). +def : InstAlias<"sec", (BSETs 0)>; +def : InstAlias<"clc", (BCLRs 0)>; + +// Set/clear aliases for the zero (Z) status flag (bit 1). +def : InstAlias<"sez", (BSETs 1)>; +def : InstAlias<"clz", (BCLRs 1)>; + +// Set/clear aliases for the negative (N) status flag (bit 2). +def : InstAlias<"sen", (BSETs 2)>; +def : InstAlias<"cln", (BCLRs 2)>; + +// Set/clear aliases for the overflow (V) status flag (bit 3). +def : InstAlias<"sev", (BSETs 3)>; +def : InstAlias<"clv", (BCLRs 3)>; + +// Set/clear aliases for the signed (S) status flag (bit 4). +def : InstAlias<"ses", (BSETs 4)>; +def : InstAlias<"cls", (BCLRs 4)>; + +// Set/clear aliases for the half-carry (H) status flag (bit 5). +def : InstAlias<"seh", (BSETs 5)>; +def : InstAlias<"clh", (BCLRs 5)>; + +// Set/clear aliases for the T status flag (bit 6). +def : InstAlias<"set", (BSETs 6)>; +def : InstAlias<"clt", (BCLRs 6)>; + +// Set/clear aliases for the interrupt (I) status flag (bit 7). +def : InstAlias<"sei", (BSETs 7)>; +def : InstAlias<"cli", (BCLRs 7)>; + +//===----------------------------------------------------------------------===// +// Special/Control instructions +//===----------------------------------------------------------------------===// + +// BREAK +// Breakpoint instruction +// --------- +// <|1001|0101|1001|1000> +def BREAK : F16<0b1001010110011000, + (outs), + (ins), + "break", + []>, + Requires<[HasBREAK]>; + +// NOP +// No-operation instruction +// --------- +// <|0000|0000|0000|0000> +def NOP : F16<0b0000000000000000, + (outs), + (ins), + "nop", + []>; + +// SLEEP +// Sleep instruction +// --------- +// <|1001|0101|1000|1000> +def SLEEP : F16<0b1001010110001000, + (outs), + (ins), + "sleep", + []>; + +// WDR +// Watchdog reset +// --------- +// <|1001|0101|1010|1000> +def WDR : F16<0b1001010110101000, + (outs), + (ins), + "wdr", + []>; + +//===----------------------------------------------------------------------===// +// Pseudo instructions for later expansion +//===----------------------------------------------------------------------===// + +//:TODO: Optimize this for wider types AND optimize the following code +// compile int foo(char a, char b, char c, char d) {return d+b;} +// looks like a missed sext_inreg opportunity. +def SEXT : ExtensionPseudo< + (outs DREGS:$dst), + (ins GPR8:$src), + "sext\t$dst, $src", + [(set i16:$dst, (sext i8:$src)), (implicit SREG)] +>; + +def ZEXT : ExtensionPseudo< + (outs DREGS:$dst), + (ins GPR8:$src), + "zext\t$dst, $src", + [(set i16:$dst, (zext i8:$src)), (implicit SREG)] +>; + +// This pseudo gets expanded into a movw+adiw thus it clobbers SREG. +let Defs = [SREG], + hasSideEffects = 0 in +def FRMIDX : Pseudo<(outs DLDREGS:$dst), + (ins DLDREGS:$src, i16imm:$src2), + "frmidx\t$dst, $src, $src2", + []>; + +// This pseudo is either converted to a regular store or a push which clobbers +// SP. +def STDSPQRr : StorePseudo< + (outs), + (ins memspi:$dst, GPR8:$src), + "stdstk\t$dst, $src", + [(store i8:$src, addr:$dst)] +>; + +// This pseudo is either converted to a regular store or a push which clobbers +// SP. +def STDWSPQRr : StorePseudo< + (outs), + (ins memspi:$dst, DREGS:$src), + "stdwstk\t$dst, $src", + [(store i16:$src, addr:$dst)] +>; + +// SP read/write pseudos. +let hasSideEffects = 0 in +{ + let Uses = [SP] in + def SPREAD : Pseudo< + (outs DREGS:$dst), + (ins GPRSP:$src), + "spread\t$dst, $src", + [] + >; + + let Defs = [SP] in + def SPWRITE : Pseudo< + (outs GPRSP:$dst), + (ins DREGS:$src), + "spwrite\t$dst, $src", + []>; +} + +def Select8 : SelectPseudo< + (outs GPR8:$dst), + (ins GPR8:$src, GPR8:$src2, i8imm:$cc), + "# Select8 PSEUDO", + [(set i8:$dst, (AVRselectcc i8:$src, i8:$src2, imm:$cc))] +>; + +def Select16 : SelectPseudo< + (outs DREGS:$dst), + (ins DREGS:$src, DREGS:$src2, i8imm:$cc), + "# Select16 PSEUDO", + [(set i16:$dst, (AVRselectcc i16:$src, i16:$src2, imm:$cc))] +>; + +def Lsl8 : ShiftPseudo< + (outs GPR8:$dst), + (ins GPR8:$src, GPR8:$cnt), + "# Lsl8 PSEUDO", + [(set i8:$dst, (AVRlslLoop i8:$src, i8:$cnt))] +>; + +def Lsl16 : ShiftPseudo< + (outs DREGS:$dst), + (ins DREGS:$src, GPR8:$cnt), + "# Lsl16 PSEUDO", + [(set i16:$dst, (AVRlslLoop i16:$src, i8:$cnt))] +>; + +def Lsr8 : ShiftPseudo< + (outs GPR8:$dst), + (ins GPR8:$src, GPR8:$cnt), + "# Lsr8 PSEUDO", + [(set i8:$dst, (AVRlsrLoop i8:$src, i8:$cnt))] +>; + +def Lsr16 : ShiftPseudo< + (outs DREGS:$dst), + (ins DREGS:$src, GPR8:$cnt), + "# Lsr16 PSEUDO", + [(set i16:$dst, (AVRlsrLoop i16:$src, i8:$cnt))] +>; + +def Rol8 : ShiftPseudo< + (outs GPR8:$dst), + (ins GPR8:$src, GPR8:$cnt), + "# Rol8 PSEUDO", + [(set i8:$dst, (AVRrolLoop i8:$src, i8:$cnt))] +>; + +def Rol16 : ShiftPseudo< + (outs DREGS:$dst), + (ins DREGS:$src, GPR8:$cnt), + "# Rol16 PSEUDO", + [(set i16:$dst, (AVRrolLoop i16:$src, i8:$cnt))] +>; + +def Ror8 : ShiftPseudo< + (outs GPR8:$dst), + (ins GPR8:$src, GPR8:$cnt), + "# Ror8 PSEUDO", + [(set i8:$dst, (AVRrorLoop i8:$src, i8:$cnt))] +>; + +def Ror16 : ShiftPseudo< + (outs DREGS:$dst), + (ins DREGS:$src, GPR8:$cnt), + "# Ror16 PSEUDO", + [(set i16:$dst, (AVRrorLoop i16:$src, i8:$cnt))] +>; + +def Asr8 : ShiftPseudo< + (outs GPR8:$dst), + (ins GPR8:$src, GPR8:$cnt), + "# Asr8 PSEUDO", + [(set i8:$dst, (AVRasrLoop i8:$src, i8:$cnt))] +>; + +def Asr16 : ShiftPseudo< + (outs DREGS:$dst), + (ins DREGS:$src, GPR8:$cnt), + "# Asr16 PSEUDO", + [(set i16:$dst, (AVRasrLoop i16:$src, i8:$cnt))] +>; + + +//===----------------------------------------------------------------------===// +// Non-Instruction Patterns +//===----------------------------------------------------------------------===// + +//:TODO: look in x86InstrCompiler.td for odd encoding trick related to +// add x, 128 -> sub x, -128. Clang is emitting an eor for this (ldi+eor) + +// the add instruction always writes the carry flag +def : Pat<(addc i8:$src, i8:$src2), + (ADDRdRr i8:$src, i8:$src2)>; +def : Pat<(addc DREGS:$src, DREGS:$src2), + (ADDWRdRr DREGS:$src, DREGS:$src2)>; + +// all sub instruction variants always writes the carry flag +def : Pat<(subc i8:$src, i8:$src2), + (SUBRdRr i8:$src, i8:$src2)>; +def : Pat<(subc i16:$src, i16:$src2), + (SUBWRdRr i16:$src, i16:$src2)>; +def : Pat<(subc i8:$src, imm:$src2), + (SUBIRdK i8:$src, imm:$src2)>; +def : Pat<(subc i16:$src, imm:$src2), + (SUBIWRdK i16:$src, imm:$src2)>; + +// These patterns convert add (x, -imm) to sub (x, imm) since we dont have +// any add with imm instructions. Also take care of the adiw/sbiw instructions. +def : Pat<(add i16:$src1, imm0_63_neg:$src2), + (SBIWRdK i16:$src1, (imm0_63_neg:$src2))>; +def : Pat<(add i16:$src1, imm:$src2), + (SUBIWRdK i16:$src1, (imm16_neg_XFORM imm:$src2))>; +def : Pat<(addc i16:$src1, imm:$src2), + (SUBIWRdK i16:$src1, (imm16_neg_XFORM imm:$src2))>; +def : Pat<(adde i16:$src1, imm:$src2), + (SBCIWRdK i16:$src1, (imm16_neg_XFORM imm:$src2))>; + +def : Pat<(add i8:$src1, imm:$src2), + (SUBIRdK i8:$src1, (imm8_neg_XFORM imm:$src2))>; +def : Pat<(addc i8:$src1, imm:$src2), + (SUBIRdK i8:$src1, (imm8_neg_XFORM imm:$src2))>; +def : Pat<(adde i8:$src1, imm:$src2), + (SBCIRdK i8:$src1, (imm8_neg_XFORM imm:$src2))>; + +// Calls. +def : Pat<(AVRcall (i16 tglobaladdr:$dst)), + (CALLk tglobaladdr:$dst)>; +def : Pat<(AVRcall (i16 texternalsym:$dst)), + (CALLk texternalsym:$dst)>; + +// `anyext` +def : Pat<(i16 (anyext i8:$src)), + (INSERT_SUBREG (i16 (IMPLICIT_DEF)), i8:$src, sub_lo)>; + +// `trunc` +def : Pat<(i8 (trunc i16:$src)), + (EXTRACT_SUBREG i16:$src, sub_lo)>; + +// sext_inreg +def : Pat<(sext_inreg i16:$src, i8), + (SEXT (i8 (EXTRACT_SUBREG i16:$src, sub_lo)))>; + +// GlobalAddress +def : Pat<(i16 (AVRWrapper tglobaladdr:$dst)), + (LDIWRdK tglobaladdr:$dst)>; +def : Pat<(add i16:$src, (AVRWrapper tglobaladdr:$src2)), + (SUBIWRdK i16:$src, tglobaladdr:$src2)>; +def : Pat<(i8 (load (AVRWrapper tglobaladdr:$dst))), + (LDSRdK tglobaladdr:$dst)>; +def : Pat<(i16 (load (AVRWrapper tglobaladdr:$dst))), + (LDSWRdK tglobaladdr:$dst)>; +def : Pat<(store i8:$src, (i16 (AVRWrapper tglobaladdr:$dst))), + (STSKRr tglobaladdr:$dst, i8:$src)>; +def : Pat<(store i16:$src, (i16 (AVRWrapper tglobaladdr:$dst))), + (STSWKRr tglobaladdr:$dst, i16:$src)>; + +// BlockAddress +def : Pat<(i16 (AVRWrapper tblockaddress:$dst)), + (LDIWRdK tblockaddress:$dst)>; + +// hi-reg truncation : trunc(int16 >> 8) +//:FIXME: i think it's better to emit an extract subreg node in the DAG than +// all this mess once we get optimal shift code +// lol... I think so, too. [@agnat] +def : Pat<(i8 (trunc (AVRlsr (AVRlsr (AVRlsr (AVRlsr (AVRlsr (AVRlsr (AVRlsr + (AVRlsr DREGS:$src)))))))))), + (EXTRACT_SUBREG DREGS:$src, sub_hi)>; + +// :FIXME: DAGCombiner produces an shl node after legalization from these seq: +// BR_JT -> (mul x, 2) -> (shl x, 1) +def : Pat<(shl i16:$src1, (i8 1)), + (LSLWRd i16:$src1)>; + +// Lowering of 'tst' node to 'TST' instruction. +// TST is an alias of AND Rd, Rd. +def : Pat<(AVRtst i8:$rd), + (ANDRdRr GPR8:$rd, GPR8:$rd)>; + +// Lowering of 'lsl' node to 'LSL' instruction. +// LSL is an alias of 'ADD Rd, Rd' +def : Pat<(AVRlsl i8:$rd), + (ADDRdRr GPR8:$rd, GPR8:$rd)>; + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRMCInstLower.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/AVRMCInstLower.cpp new file mode 100644 index 000000000000..49a318762b63 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRMCInstLower.cpp @@ -0,0 +1,111 @@ +//===-- AVRMCInstLower.cpp - Convert AVR MachineInstr to an MCInst --------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains code to lower AVR MachineInstrs to their corresponding +// MCInst records. +// +//===----------------------------------------------------------------------===// + +#include "AVRMCInstLower.h" + +#include "AVRInstrInfo.h" +#include "MCTargetDesc/AVRMCExpr.h" + +#include "llvm/CodeGen/AsmPrinter.h" +#include "llvm/IR/Mangler.h" +#include "llvm/MC/MCInst.h" +#include "llvm/Support/ErrorHandling.h" + +namespace llvm { + +MCOperand AVRMCInstLower::lowerSymbolOperand(const MachineOperand &MO, + MCSymbol *Sym) const { + unsigned char TF = MO.getTargetFlags(); + const MCExpr *Expr = MCSymbolRefExpr::create(Sym, Ctx); + + bool IsNegated = false; + if (TF & AVRII::MO_NEG) { IsNegated = true; } + + if (!MO.isJTI() && MO.getOffset()) { + Expr = MCBinaryExpr::createAdd( + Expr, MCConstantExpr::create(MO.getOffset(), Ctx), Ctx); + } + + bool IsFunction = MO.isGlobal() && isa<Function>(MO.getGlobal()); + + if (TF & AVRII::MO_LO) { + if (IsFunction) { + // N.B. Should we use _GS fixups here to cope with >128k progmem? + Expr = AVRMCExpr::create(AVRMCExpr::VK_AVR_PM_LO8, Expr, IsNegated, Ctx); + } else { + Expr = AVRMCExpr::create(AVRMCExpr::VK_AVR_LO8, Expr, IsNegated, Ctx); + } + } else if (TF & AVRII::MO_HI) { + if (IsFunction) { + // N.B. Should we use _GS fixups here to cope with >128k progmem? + Expr = AVRMCExpr::create(AVRMCExpr::VK_AVR_PM_HI8, Expr, IsNegated, Ctx); + } else { + Expr = AVRMCExpr::create(AVRMCExpr::VK_AVR_HI8, Expr, IsNegated, Ctx); + } + } else if (TF != 0) { + llvm_unreachable("Unknown target flag on symbol operand"); + } + + return MCOperand::createExpr(Expr); +} + +void AVRMCInstLower::lowerInstruction(const MachineInstr &MI, MCInst &OutMI) const { + OutMI.setOpcode(MI.getOpcode()); + + for (MachineOperand const &MO : MI.operands()) { + MCOperand MCOp; + + switch (MO.getType()) { + default: + MI.print(errs()); + llvm_unreachable("unknown operand type"); + case MachineOperand::MO_Register: + // Ignore all implicit register operands. + if (MO.isImplicit()) + continue; + MCOp = MCOperand::createReg(MO.getReg()); + break; + case MachineOperand::MO_Immediate: + MCOp = MCOperand::createImm(MO.getImm()); + break; + case MachineOperand::MO_GlobalAddress: + MCOp = lowerSymbolOperand(MO, Printer.getSymbol(MO.getGlobal())); + break; + case MachineOperand::MO_ExternalSymbol: + MCOp = lowerSymbolOperand( + MO, Printer.GetExternalSymbolSymbol(MO.getSymbolName())); + break; + case MachineOperand::MO_MachineBasicBlock: + MCOp = MCOperand::createExpr( + MCSymbolRefExpr::create(MO.getMBB()->getSymbol(), Ctx)); + break; + case MachineOperand::MO_RegisterMask: + continue; + case MachineOperand::MO_BlockAddress: + MCOp = lowerSymbolOperand( + MO, Printer.GetBlockAddressSymbol(MO.getBlockAddress())); + break; + case MachineOperand::MO_JumpTableIndex: + MCOp = lowerSymbolOperand(MO, Printer.GetJTISymbol(MO.getIndex())); + break; + case MachineOperand::MO_ConstantPoolIndex: + MCOp = lowerSymbolOperand(MO, Printer.GetCPISymbol(MO.getIndex())); + break; + } + + OutMI.addOperand(MCOp); + } +} + +} // end of namespace llvm + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRMCInstLower.h b/contrib/llvm-project/llvm/lib/Target/AVR/AVRMCInstLower.h new file mode 100644 index 000000000000..5e0f42ac16a7 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRMCInstLower.h @@ -0,0 +1,42 @@ +//===-- AVRMCInstLower.h - Lower MachineInstr to MCInst ---------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AVR_MCINST_LOWER_H +#define LLVM_AVR_MCINST_LOWER_H + +#include "llvm/Support/Compiler.h" + +namespace llvm { + +class AsmPrinter; +class MachineInstr; +class MachineOperand; +class MCContext; +class MCInst; +class MCOperand; +class MCSymbol; + +/// Lowers `MachineInstr` objects into `MCInst` objects. +class AVRMCInstLower { +public: + AVRMCInstLower(MCContext &Ctx, AsmPrinter &Printer) + : Ctx(Ctx), Printer(Printer) {} + + /// Lowers a `MachineInstr` into a `MCInst`. + void lowerInstruction(const MachineInstr &MI, MCInst &OutMI) const; + MCOperand lowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const; + +private: + MCContext &Ctx; + AsmPrinter &Printer; +}; + +} // end namespace llvm + +#endif // LLVM_AVR_MCINST_LOWER_H + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRMachineFunctionInfo.h b/contrib/llvm-project/llvm/lib/Target/AVR/AVRMachineFunctionInfo.h new file mode 100644 index 000000000000..5226e30491c3 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRMachineFunctionInfo.h @@ -0,0 +1,68 @@ +//===-- AVRMachineFuctionInfo.h - AVR machine function info -----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares AVR-specific per-machine-function information. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AVR_MACHINE_FUNCTION_INFO_H +#define LLVM_AVR_MACHINE_FUNCTION_INFO_H + +#include "llvm/CodeGen/MachineFunction.h" + +namespace llvm { + +/// Contains AVR-specific information for each MachineFunction. +class AVRMachineFunctionInfo : public MachineFunctionInfo { + /// Indicates if a register has been spilled by the register + /// allocator. + bool HasSpills; + + /// Indicates if there are any fixed size allocas present. + /// Note that if there are only variable sized allocas this is set to false. + bool HasAllocas; + + /// Indicates if arguments passed using the stack are being + /// used inside the function. + bool HasStackArgs; + + /// Size of the callee-saved register portion of the + /// stack frame in bytes. + unsigned CalleeSavedFrameSize; + + /// FrameIndex for start of varargs area. + int VarArgsFrameIndex; + +public: + AVRMachineFunctionInfo() + : HasSpills(false), HasAllocas(false), HasStackArgs(false), + CalleeSavedFrameSize(0), VarArgsFrameIndex(0) {} + + explicit AVRMachineFunctionInfo(MachineFunction &MF) + : HasSpills(false), HasAllocas(false), HasStackArgs(false), + CalleeSavedFrameSize(0), VarArgsFrameIndex(0) {} + + bool getHasSpills() const { return HasSpills; } + void setHasSpills(bool B) { HasSpills = B; } + + bool getHasAllocas() const { return HasAllocas; } + void setHasAllocas(bool B) { HasAllocas = B; } + + bool getHasStackArgs() const { return HasStackArgs; } + void setHasStackArgs(bool B) { HasStackArgs = B; } + + unsigned getCalleeSavedFrameSize() const { return CalleeSavedFrameSize; } + void setCalleeSavedFrameSize(unsigned Bytes) { CalleeSavedFrameSize = Bytes; } + + int getVarArgsFrameIndex() const { return VarArgsFrameIndex; } + void setVarArgsFrameIndex(int Idx) { VarArgsFrameIndex = Idx; } +}; + +} // end llvm namespace + +#endif // LLVM_AVR_MACHINE_FUNCTION_INFO_H diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.cpp new file mode 100644 index 000000000000..a6b36f80485d --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.cpp @@ -0,0 +1,290 @@ +//===-- AVRRegisterInfo.cpp - AVR Register Information --------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains the AVR implementation of the TargetRegisterInfo class. +// +//===----------------------------------------------------------------------===// + +#include "AVRRegisterInfo.h" + +#include "llvm/ADT/BitVector.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/IR/Function.h" +#include "llvm/CodeGen/TargetFrameLowering.h" + +#include "AVR.h" +#include "AVRInstrInfo.h" +#include "AVRTargetMachine.h" +#include "MCTargetDesc/AVRMCTargetDesc.h" + +#define GET_REGINFO_TARGET_DESC +#include "AVRGenRegisterInfo.inc" + +namespace llvm { + +AVRRegisterInfo::AVRRegisterInfo() : AVRGenRegisterInfo(0) {} + +const uint16_t * +AVRRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { + CallingConv::ID CC = MF->getFunction().getCallingConv(); + + return ((CC == CallingConv::AVR_INTR || CC == CallingConv::AVR_SIGNAL) + ? CSR_Interrupts_SaveList + : CSR_Normal_SaveList); +} + +const uint32_t * +AVRRegisterInfo::getCallPreservedMask(const MachineFunction &MF, + CallingConv::ID CC) const { + return ((CC == CallingConv::AVR_INTR || CC == CallingConv::AVR_SIGNAL) + ? CSR_Interrupts_RegMask + : CSR_Normal_RegMask); +} + +BitVector AVRRegisterInfo::getReservedRegs(const MachineFunction &MF) const { + BitVector Reserved(getNumRegs()); + + // Reserve the intermediate result registers r1 and r2 + // The result of instructions like 'mul' is always stored here. + Reserved.set(AVR::R0); + Reserved.set(AVR::R1); + Reserved.set(AVR::R1R0); + + // Reserve the stack pointer. + Reserved.set(AVR::SPL); + Reserved.set(AVR::SPH); + Reserved.set(AVR::SP); + + // We tenatively reserve the frame pointer register r29:r28 because the + // function may require one, but we cannot tell until register allocation + // is complete, which can be too late. + // + // Instead we just unconditionally reserve the Y register. + // + // TODO: Write a pass to enumerate functions which reserved the Y register + // but didn't end up needing a frame pointer. In these, we can + // convert one or two of the spills inside to use the Y register. + Reserved.set(AVR::R28); + Reserved.set(AVR::R29); + Reserved.set(AVR::R29R28); + + return Reserved; +} + +const TargetRegisterClass * +AVRRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC, + const MachineFunction &MF) const { + const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); + if (TRI->isTypeLegalForClass(*RC, MVT::i16)) { + return &AVR::DREGSRegClass; + } + + if (TRI->isTypeLegalForClass(*RC, MVT::i8)) { + return &AVR::GPR8RegClass; + } + + llvm_unreachable("Invalid register size"); +} + +/// Fold a frame offset shared between two add instructions into a single one. +static void foldFrameOffset(MachineBasicBlock::iterator &II, int &Offset, unsigned DstReg) { + MachineInstr &MI = *II; + int Opcode = MI.getOpcode(); + + // Don't bother trying if the next instruction is not an add or a sub. + if ((Opcode != AVR::SUBIWRdK) && (Opcode != AVR::ADIWRdK)) { + return; + } + + // Check that DstReg matches with next instruction, otherwise the instruction + // is not related to stack address manipulation. + if (DstReg != MI.getOperand(0).getReg()) { + return; + } + + // Add the offset in the next instruction to our offset. + switch (Opcode) { + case AVR::SUBIWRdK: + Offset += -MI.getOperand(2).getImm(); + break; + case AVR::ADIWRdK: + Offset += MI.getOperand(2).getImm(); + break; + } + + // Finally remove the instruction. + II++; + MI.eraseFromParent(); +} + +void AVRRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, + int SPAdj, unsigned FIOperandNum, + RegScavenger *RS) const { + assert(SPAdj == 0 && "Unexpected SPAdj value"); + + MachineInstr &MI = *II; + DebugLoc dl = MI.getDebugLoc(); + MachineBasicBlock &MBB = *MI.getParent(); + const MachineFunction &MF = *MBB.getParent(); + const AVRTargetMachine &TM = (const AVRTargetMachine &)MF.getTarget(); + const TargetInstrInfo &TII = *TM.getSubtargetImpl()->getInstrInfo(); + const MachineFrameInfo &MFI = MF.getFrameInfo(); + const TargetFrameLowering *TFI = TM.getSubtargetImpl()->getFrameLowering(); + int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); + int Offset = MFI.getObjectOffset(FrameIndex); + + // Add one to the offset because SP points to an empty slot. + Offset += MFI.getStackSize() - TFI->getOffsetOfLocalArea() + 1; + // Fold incoming offset. + Offset += MI.getOperand(FIOperandNum + 1).getImm(); + + // This is actually "load effective address" of the stack slot + // instruction. We have only two-address instructions, thus we need to + // expand it into move + add. + if (MI.getOpcode() == AVR::FRMIDX) { + MI.setDesc(TII.get(AVR::MOVWRdRr)); + MI.getOperand(FIOperandNum).ChangeToRegister(AVR::R29R28, false); + MI.RemoveOperand(2); + + assert(Offset > 0 && "Invalid offset"); + + // We need to materialize the offset via an add instruction. + unsigned Opcode; + unsigned DstReg = MI.getOperand(0).getReg(); + assert(DstReg != AVR::R29R28 && "Dest reg cannot be the frame pointer"); + + II++; // Skip over the FRMIDX (and now MOVW) instruction. + + // Generally, to load a frame address two add instructions are emitted that + // could get folded into a single one: + // movw r31:r30, r29:r28 + // adiw r31:r30, 29 + // adiw r31:r30, 16 + // to: + // movw r31:r30, r29:r28 + // adiw r31:r30, 45 + if (II != MBB.end()) + foldFrameOffset(II, Offset, DstReg); + + // Select the best opcode based on DstReg and the offset size. + switch (DstReg) { + case AVR::R25R24: + case AVR::R27R26: + case AVR::R31R30: { + if (isUInt<6>(Offset)) { + Opcode = AVR::ADIWRdK; + break; + } + LLVM_FALLTHROUGH; + } + default: { + // This opcode will get expanded into a pair of subi/sbci. + Opcode = AVR::SUBIWRdK; + Offset = -Offset; + break; + } + } + + MachineInstr *New = BuildMI(MBB, II, dl, TII.get(Opcode), DstReg) + .addReg(DstReg, RegState::Kill) + .addImm(Offset); + New->getOperand(3).setIsDead(); + + return; + } + + // If the offset is too big we have to adjust and restore the frame pointer + // to materialize a valid load/store with displacement. + //:TODO: consider using only one adiw/sbiw chain for more than one frame index + if (Offset > 62) { + unsigned AddOpc = AVR::ADIWRdK, SubOpc = AVR::SBIWRdK; + int AddOffset = Offset - 63 + 1; + + // For huge offsets where adiw/sbiw cannot be used use a pair of subi/sbci. + if ((Offset - 63 + 1) > 63) { + AddOpc = AVR::SUBIWRdK; + SubOpc = AVR::SUBIWRdK; + AddOffset = -AddOffset; + } + + // It is possible that the spiller places this frame instruction in between + // a compare and branch, invalidating the contents of SREG set by the + // compare instruction because of the add/sub pairs. Conservatively save and + // restore SREG before and after each add/sub pair. + BuildMI(MBB, II, dl, TII.get(AVR::INRdA), AVR::R0).addImm(0x3f); + + MachineInstr *New = BuildMI(MBB, II, dl, TII.get(AddOpc), AVR::R29R28) + .addReg(AVR::R29R28, RegState::Kill) + .addImm(AddOffset); + New->getOperand(3).setIsDead(); + + // Restore SREG. + BuildMI(MBB, std::next(II), dl, TII.get(AVR::OUTARr)) + .addImm(0x3f) + .addReg(AVR::R0, RegState::Kill); + + // No need to set SREG as dead here otherwise if the next instruction is a + // cond branch it will be using a dead register. + BuildMI(MBB, std::next(II), dl, TII.get(SubOpc), AVR::R29R28) + .addReg(AVR::R29R28, RegState::Kill) + .addImm(Offset - 63 + 1); + + Offset = 62; + } + + MI.getOperand(FIOperandNum).ChangeToRegister(AVR::R29R28, false); + assert(isUInt<6>(Offset) && "Offset is out of range"); + MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); +} + +Register AVRRegisterInfo::getFrameRegister(const MachineFunction &MF) const { + const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); + if (TFI->hasFP(MF)) { + // The Y pointer register + return AVR::R28; + } + + return AVR::SP; +} + +const TargetRegisterClass * +AVRRegisterInfo::getPointerRegClass(const MachineFunction &MF, + unsigned Kind) const { + // FIXME: Currently we're using avr-gcc as reference, so we restrict + // ptrs to Y and Z regs. Though avr-gcc has buggy implementation + // of memory constraint, so we can fix it and bit avr-gcc here ;-) + return &AVR::PTRDISPREGSRegClass; +} + +void AVRRegisterInfo::splitReg(unsigned Reg, + unsigned &LoReg, + unsigned &HiReg) const { + assert(AVR::DREGSRegClass.contains(Reg) && "can only split 16-bit registers"); + + LoReg = getSubReg(Reg, AVR::sub_lo); + HiReg = getSubReg(Reg, AVR::sub_hi); +} + +bool AVRRegisterInfo::shouldCoalesce(MachineInstr *MI, + const TargetRegisterClass *SrcRC, + unsigned SubReg, + const TargetRegisterClass *DstRC, + unsigned DstSubReg, + const TargetRegisterClass *NewRC, + LiveIntervals &LIS) const { + if(this->getRegClass(AVR::PTRDISPREGSRegClassID)->hasSubClassEq(NewRC)) { + return false; + } + + return TargetRegisterInfo::shouldCoalesce(MI, SrcRC, SubReg, DstRC, DstSubReg, NewRC, LIS); +} + +} // end of namespace llvm diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.h b/contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.h new file mode 100644 index 000000000000..8e6e63af3d57 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.h @@ -0,0 +1,69 @@ +//===-- AVRRegisterInfo.h - AVR Register Information Impl -------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains the AVR implementation of the TargetRegisterInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AVR_REGISTER_INFO_H +#define LLVM_AVR_REGISTER_INFO_H + +#include "llvm/CodeGen/TargetRegisterInfo.h" + +#define GET_REGINFO_HEADER +#include "AVRGenRegisterInfo.inc" + +namespace llvm { + +/// Utilities relating to AVR registers. +class AVRRegisterInfo : public AVRGenRegisterInfo { +public: + AVRRegisterInfo(); + +public: + const uint16_t * + getCalleeSavedRegs(const MachineFunction *MF = 0) const override; + const uint32_t *getCallPreservedMask(const MachineFunction &MF, + CallingConv::ID CC) const override; + BitVector getReservedRegs(const MachineFunction &MF) const override; + + const TargetRegisterClass * + getLargestLegalSuperClass(const TargetRegisterClass *RC, + const MachineFunction &MF) const override; + + /// Stack Frame Processing Methods + void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, + unsigned FIOperandNum, + RegScavenger *RS = NULL) const override; + + Register getFrameRegister(const MachineFunction &MF) const override; + + const TargetRegisterClass * + getPointerRegClass(const MachineFunction &MF, + unsigned Kind = 0) const override; + + /// Splits a 16-bit `DREGS` register into the lo/hi register pair. + /// \param Reg A 16-bit register to split. + void splitReg(unsigned Reg, unsigned &LoReg, unsigned &HiReg) const; + + bool trackLivenessAfterRegAlloc(const MachineFunction &) const override { + return true; + } + + bool shouldCoalesce(MachineInstr *MI, + const TargetRegisterClass *SrcRC, + unsigned SubReg, + const TargetRegisterClass *DstRC, + unsigned DstSubReg, + const TargetRegisterClass *NewRC, + LiveIntervals &LIS) const override; +}; + +} // end namespace llvm + +#endif // LLVM_AVR_REGISTER_INFO_H diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.td b/contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.td new file mode 100644 index 000000000000..ea38fedd22ce --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.td @@ -0,0 +1,230 @@ +//===-- AVRRegisterInfo.td - AVR Register defs -------------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Declarations that describe the AVR register file +//===----------------------------------------------------------------------===// + +// 8-bit General purpose register definition. +class AVRReg<bits<16> num, + string name, + list<Register> subregs = [], + list<string> altNames = []> + : RegisterWithSubRegs<name, subregs> +{ + field bits<16> Num = num; + + let HWEncoding = num; + let Namespace = "AVR"; + let SubRegs = subregs; + let AltNames = altNames; +} + +// Subregister indices. +let Namespace = "AVR" in +{ + def sub_lo : SubRegIndex<8>; + def sub_hi : SubRegIndex<8, 8>; +} + +let Namespace = "AVR" in { + def ptr : RegAltNameIndex; +} + + +//===----------------------------------------------------------------------===// +// 8-bit general purpose registers +//===----------------------------------------------------------------------===// + +def R0 : AVRReg<0, "r0">, DwarfRegNum<[0]>; +def R1 : AVRReg<1, "r1">, DwarfRegNum<[1]>; +def R2 : AVRReg<2, "r2">, DwarfRegNum<[2]>; +def R3 : AVRReg<3, "r3">, DwarfRegNum<[3]>; +def R4 : AVRReg<4, "r4">, DwarfRegNum<[4]>; +def R5 : AVRReg<5, "r5">, DwarfRegNum<[5]>; +def R6 : AVRReg<6, "r6">, DwarfRegNum<[6]>; +def R7 : AVRReg<7, "r7">, DwarfRegNum<[7]>; +def R8 : AVRReg<8, "r8">, DwarfRegNum<[8]>; +def R9 : AVRReg<9, "r9">, DwarfRegNum<[9]>; +def R10 : AVRReg<10, "r10">, DwarfRegNum<[10]>; +def R11 : AVRReg<11, "r11">, DwarfRegNum<[11]>; +def R12 : AVRReg<12, "r12">, DwarfRegNum<[12]>; +def R13 : AVRReg<13, "r13">, DwarfRegNum<[13]>; +def R14 : AVRReg<14, "r14">, DwarfRegNum<[14]>; +def R15 : AVRReg<15, "r15">, DwarfRegNum<[15]>; +def R16 : AVRReg<16, "r16">, DwarfRegNum<[16]>; +def R17 : AVRReg<17, "r17">, DwarfRegNum<[17]>; +def R18 : AVRReg<18, "r18">, DwarfRegNum<[18]>; +def R19 : AVRReg<19, "r19">, DwarfRegNum<[19]>; +def R20 : AVRReg<20, "r20">, DwarfRegNum<[20]>; +def R21 : AVRReg<21, "r21">, DwarfRegNum<[21]>; +def R22 : AVRReg<22, "r22">, DwarfRegNum<[22]>; +def R23 : AVRReg<23, "r23">, DwarfRegNum<[23]>; +def R24 : AVRReg<24, "r24">, DwarfRegNum<[24]>; +def R25 : AVRReg<25, "r25">, DwarfRegNum<[25]>; +def R26 : AVRReg<26, "r26">, DwarfRegNum<[26]>; +def R27 : AVRReg<27, "r27">, DwarfRegNum<[27]>; +def R28 : AVRReg<28, "r28">, DwarfRegNum<[28]>; +def R29 : AVRReg<29, "r29">, DwarfRegNum<[29]>; +def R30 : AVRReg<30, "r30">, DwarfRegNum<[30]>; +def R31 : AVRReg<31, "r31">, DwarfRegNum<[31]>; +def SPL : AVRReg<32, "SPL">, DwarfRegNum<[32]>; +def SPH : AVRReg<33, "SPH">, DwarfRegNum<[33]>; + +let SubRegIndices = [sub_lo, sub_hi], +CoveredBySubRegs = 1 in +{ + // 16 bit GPR pairs. + def SP : AVRReg<32, "SP", [SPL, SPH]>, DwarfRegNum<[32]>; + + // The pointer registers (X,Y,Z) are a special case because they + // are printed as a `high:low` pair when a DREG is expected, + // but printed using `X`, `Y`, `Z` when a pointer register is expected. + let RegAltNameIndices = [ptr] in { + def R31R30 : AVRReg<30, "r31:r30", [R30, R31], ["Z"]>, DwarfRegNum<[30]>; + def R29R28 : AVRReg<28, "r29:r28", [R28, R29], ["Y"]>, DwarfRegNum<[28]>; + def R27R26 : AVRReg<26, "r27:r26", [R26, R27], ["X"]>, DwarfRegNum<[26]>; + } + def R25R24 : AVRReg<24, "r25:r24", [R24, R25]>, DwarfRegNum<[24]>; + def R23R22 : AVRReg<22, "r23:r22", [R22, R23]>, DwarfRegNum<[22]>; + def R21R20 : AVRReg<20, "r21:r20", [R20, R21]>, DwarfRegNum<[20]>; + def R19R18 : AVRReg<18, "r19:r18", [R18, R19]>, DwarfRegNum<[18]>; + def R17R16 : AVRReg<16, "r17:r16", [R16, R17]>, DwarfRegNum<[16]>; + def R15R14 : AVRReg<14, "r15:r14", [R14, R15]>, DwarfRegNum<[14]>; + def R13R12 : AVRReg<12, "r13:r12", [R12, R13]>, DwarfRegNum<[12]>; + def R11R10 : AVRReg<10, "r11:r10", [R10, R11]>, DwarfRegNum<[10]>; + def R9R8 : AVRReg<8, "r9:r8", [R8, R9]>, DwarfRegNum<[8]>; + def R7R6 : AVRReg<6, "r7:r6", [R6, R7]>, DwarfRegNum<[6]>; + def R5R4 : AVRReg<4, "r5:r4", [R4, R5]>, DwarfRegNum<[4]>; + def R3R2 : AVRReg<2, "r3:r2", [R2, R3]>, DwarfRegNum<[2]>; + def R1R0 : AVRReg<0, "r1:r0", [R0, R1]>, DwarfRegNum<[0]>; +} + +//===----------------------------------------------------------------------===// +// Register Classes +//===----------------------------------------------------------------------===// + +// Main 8-bit register class. +def GPR8 : RegisterClass<"AVR", [i8], 8, + ( + // Return value and argument registers. + add R24, R25, R18, R19, R20, R21, R22, R23, + // Scratch registers. + R30, R31, R26, R27, + // Callee saved registers. + R28, R29, R17, R16, R15, R14, R13, R12, R11, R10, + R9, R8, R7, R6, R5, R4, R3, R2, R0, R1 + )>; + +// Simple lower registers r0..r15 +def GPR8lo : RegisterClass<"AVR", [i8], 8, + ( + add R15, R14, R13, R12, R11, R10, R9, R8, R7, R6, R5, R4, R3, R2, R0, R1 + )>; + +// 8-bit register class for instructions which take immediates. +def LD8 : RegisterClass<"AVR", [i8], 8, + ( + // Return value and arguments. + add R24, R25, R18, R19, R20, R21, R22, R23, + // Scratch registers. + R30, R31, R26, R27, + // Callee saved registers. + R28, R29, R17, R16 + )>; + +// Simple lower registers r16..r23 +def LD8lo : RegisterClass<"AVR", [i8], 8, + ( + add R23, R22, R21, R20, R19, R18, R17, R16 + )>; + +// Main 16-bit pair register class. +def DREGS : RegisterClass<"AVR", [i16], 8, + ( + // Return value and arguments. + add R25R24, R19R18, R21R20, R23R22, + // Scratch registers. + R31R30, R27R26, + // Callee saved registers. + R29R28, R17R16, R15R14, R13R12, R11R10, + R9R8, R7R6, R5R4, R3R2, R1R0 + )>; + +// The 16-bit DREGS register class, excluding the Z pointer register. +// +// This is used by instructions which cause high pointer register +// contention which leads to an assertion in the register allocator. +// +// There is no technical reason why instructions that use this class +// cannot use Z; it's simply a workaround a regalloc bug. +// +// More information can be found in PR39553. +def DREGS_WITHOUT_YZ_WORKAROUND : RegisterClass<"AVR", [i16], 8, + ( + // Return value and arguments. + add R25R24, R19R18, R21R20, R23R22, + // Scratch registers. + R27R26, + // Callee saved registers. + R17R16, R15R14, R13R12, R11R10, + R9R8, R7R6, R5R4, R3R2, R1R0 + )>; + +// 16-bit register class for immediate instructions. +def DLDREGS : RegisterClass<"AVR", [i16], 8, + ( + // Return value and arguments. + add R25R24, R19R18, R21R20, R23R22, + // Scratch registers. + R31R30, R27R26, + // Callee saved registers. + R29R28, R17R16 + )>; + +// 16-bit register class for the adiw/sbiw instructions. +def IWREGS : RegisterClass<"AVR", [i16], 8, + ( + // Return value and arguments. + add R25R24, + // Scratch registers. + R31R30, R27R26, + // Callee saved registers. + R29R28 + )>; + +// 16-bit register class for the ld and st instructions. +// AKA X,Y, and Z +def PTRREGS : RegisterClass<"AVR", [i16], 8, + ( + add R27R26, // X + R29R28, // Y + R31R30 // Z + ), ptr>; + +// 16-bit register class for the ldd and std instructions. +// AKA Y and Z. +def PTRDISPREGS : RegisterClass<"AVR", [i16], 8, + ( + add R31R30, R29R28 + ), ptr>; + +// We have a bunch of instructions with an explicit Z register argument. We +// model this using a register class containing only the Z register. +def ZREG : RegisterClass<"AVR", [i16], 8, (add R31R30)>; + +// Register class used for the stack read pseudo instruction. +def GPRSP: RegisterClass<"AVR", [i16], 8, (add SP)>; + +// Status register. +def SREG : AVRReg<14, "FLAGS">, DwarfRegNum<[88]>; +def CCR : RegisterClass<"AVR", [i8], 8, (add SREG)> +{ + let CopyCost = -1; // Don't allow copying of status registers +} + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRRelaxMemOperations.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/AVRRelaxMemOperations.cpp new file mode 100644 index 000000000000..6be901743e82 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRRelaxMemOperations.cpp @@ -0,0 +1,148 @@ +//===-- AVRRelaxMemOperations.cpp - Relax out of range loads/stores -------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains a pass which relaxes out of range memory operations into +// equivalent operations which handle bigger addresses. +// +//===----------------------------------------------------------------------===// + +#include "AVR.h" +#include "AVRInstrInfo.h" +#include "AVRTargetMachine.h" +#include "MCTargetDesc/AVRMCTargetDesc.h" + +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetRegisterInfo.h" + +using namespace llvm; + +#define AVR_RELAX_MEM_OPS_NAME "AVR memory operation relaxation pass" + +namespace { + +class AVRRelaxMem : public MachineFunctionPass { +public: + static char ID; + + AVRRelaxMem() : MachineFunctionPass(ID) { + initializeAVRRelaxMemPass(*PassRegistry::getPassRegistry()); + } + + bool runOnMachineFunction(MachineFunction &MF) override; + + StringRef getPassName() const override { return AVR_RELAX_MEM_OPS_NAME; } + +private: + typedef MachineBasicBlock Block; + typedef Block::iterator BlockIt; + + const TargetInstrInfo *TII; + + template <unsigned OP> bool relax(Block &MBB, BlockIt MBBI); + + bool runOnBasicBlock(Block &MBB); + bool runOnInstruction(Block &MBB, BlockIt MBBI); + + MachineInstrBuilder buildMI(Block &MBB, BlockIt MBBI, unsigned Opcode) { + return BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(Opcode)); + } +}; + +char AVRRelaxMem::ID = 0; + +bool AVRRelaxMem::runOnMachineFunction(MachineFunction &MF) { + bool Modified = false; + + const AVRSubtarget &STI = MF.getSubtarget<AVRSubtarget>(); + TII = STI.getInstrInfo(); + + for (Block &MBB : MF) { + bool BlockModified = runOnBasicBlock(MBB); + Modified |= BlockModified; + } + + return Modified; +} + +bool AVRRelaxMem::runOnBasicBlock(Block &MBB) { + bool Modified = false; + + BlockIt MBBI = MBB.begin(), E = MBB.end(); + while (MBBI != E) { + BlockIt NMBBI = std::next(MBBI); + Modified |= runOnInstruction(MBB, MBBI); + MBBI = NMBBI; + } + + return Modified; +} + +template <> +bool AVRRelaxMem::relax<AVR::STDWPtrQRr>(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + + MachineOperand &Ptr = MI.getOperand(0); + MachineOperand &Src = MI.getOperand(2); + int64_t Imm = MI.getOperand(1).getImm(); + + // We can definitely optimise this better. + if (Imm > 63) { + // Push the previous state of the pointer register. + // This instruction must preserve the value. + buildMI(MBB, MBBI, AVR::PUSHWRr) + .addReg(Ptr.getReg()); + + // Add the immediate to the pointer register. + buildMI(MBB, MBBI, AVR::SBCIWRdK) + .addReg(Ptr.getReg(), RegState::Define) + .addReg(Ptr.getReg()) + .addImm(-Imm); + + // Store the value in the source register to the address + // pointed to by the pointer register. + buildMI(MBB, MBBI, AVR::STWPtrRr) + .addReg(Ptr.getReg()) + .addReg(Src.getReg(), getKillRegState(Src.isKill())); + + // Pop the original state of the pointer register. + buildMI(MBB, MBBI, AVR::POPWRd) + .addReg(Ptr.getReg(), getKillRegState(Ptr.isKill())); + + MI.removeFromParent(); + } + + return false; +} + +bool AVRRelaxMem::runOnInstruction(Block &MBB, BlockIt MBBI) { + MachineInstr &MI = *MBBI; + int Opcode = MBBI->getOpcode(); + +#define RELAX(Op) \ + case Op: \ + return relax<Op>(MBB, MI) + + switch (Opcode) { + RELAX(AVR::STDWPtrQRr); + } +#undef RELAX + return false; +} + +} // end of anonymous namespace + +INITIALIZE_PASS(AVRRelaxMem, "avr-relax-mem", + AVR_RELAX_MEM_OPS_NAME, false, false) + +namespace llvm { + +FunctionPass *createAVRRelaxMemPass() { return new AVRRelaxMem(); } + +} // end of namespace llvm diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRSelectionDAGInfo.h b/contrib/llvm-project/llvm/lib/Target/AVR/AVRSelectionDAGInfo.h new file mode 100644 index 000000000000..3e7bd57f10cf --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRSelectionDAGInfo.h @@ -0,0 +1,27 @@ +//===-- AVRSelectionDAGInfo.h - AVR SelectionDAG Info -----------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the AVR subclass for SelectionDAGTargetInfo. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AVR_SELECTION_DAG_INFO_H +#define LLVM_AVR_SELECTION_DAG_INFO_H + +#include "llvm/CodeGen/SelectionDAGTargetInfo.h" + +namespace llvm { + +/// Holds information about the AVR instruction selection DAG. +class AVRSelectionDAGInfo : public SelectionDAGTargetInfo { +public: +}; + +} // end namespace llvm + +#endif // LLVM_AVR_SELECTION_DAG_INFO_H diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRSubtarget.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/AVRSubtarget.cpp new file mode 100644 index 000000000000..6a41036fdd6c --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRSubtarget.cpp @@ -0,0 +1,54 @@ +//===-- AVRSubtarget.cpp - AVR Subtarget Information ----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements the AVR specific subclass of TargetSubtargetInfo. +// +//===----------------------------------------------------------------------===// + +#include "AVRSubtarget.h" + +#include "llvm/BinaryFormat/ELF.h" +#include "llvm/Support/TargetRegistry.h" + +#include "AVR.h" +#include "AVRTargetMachine.h" +#include "MCTargetDesc/AVRMCTargetDesc.h" + +#define DEBUG_TYPE "avr-subtarget" + +#define GET_SUBTARGETINFO_TARGET_DESC +#define GET_SUBTARGETINFO_CTOR +#include "AVRGenSubtargetInfo.inc" + +namespace llvm { + +AVRSubtarget::AVRSubtarget(const Triple &TT, const std::string &CPU, + const std::string &FS, const AVRTargetMachine &TM) + : AVRGenSubtargetInfo(TT, CPU, FS), InstrInfo(), FrameLowering(), + TLInfo(TM, initializeSubtargetDependencies(CPU, FS, TM)), TSInfo(), + + // Subtarget features + m_hasSRAM(false), m_hasJMPCALL(false), m_hasIJMPCALL(false), + m_hasEIJMPCALL(false), m_hasADDSUBIW(false), m_hasSmallStack(false), + m_hasMOVW(false), m_hasLPM(false), m_hasLPMX(false), m_hasELPM(false), + m_hasELPMX(false), m_hasSPM(false), m_hasSPMX(false), m_hasDES(false), + m_supportsRMW(false), m_supportsMultiplication(false), m_hasBREAK(false), + m_hasTinyEncoding(false), ELFArch(false), m_FeatureSetDummy(false) { + // Parse features string. + ParseSubtargetFeatures(CPU, FS); +} + +AVRSubtarget & +AVRSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS, + const TargetMachine &TM) { + // Parse features string. + ParseSubtargetFeatures(CPU, FS); + return *this; +} + +} // end of namespace llvm diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRSubtarget.h b/contrib/llvm-project/llvm/lib/Target/AVR/AVRSubtarget.h new file mode 100644 index 000000000000..da9289af7c8d --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRSubtarget.h @@ -0,0 +1,120 @@ +//===-- AVRSubtarget.h - Define Subtarget for the AVR -----------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares the AVR specific subclass of TargetSubtargetInfo. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AVR_SUBTARGET_H +#define LLVM_AVR_SUBTARGET_H + +#include "llvm/CodeGen/TargetSubtargetInfo.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/Target/TargetMachine.h" + +#include "AVRFrameLowering.h" +#include "AVRISelLowering.h" +#include "AVRInstrInfo.h" +#include "AVRSelectionDAGInfo.h" + +#define GET_SUBTARGETINFO_HEADER +#include "AVRGenSubtargetInfo.inc" + +namespace llvm { + +/// A specific AVR target MCU. +class AVRSubtarget : public AVRGenSubtargetInfo { +public: + //! Creates an AVR subtarget. + //! \param TT The target triple. + //! \param CPU The CPU to target. + //! \param FS The feature string. + //! \param TM The target machine. + AVRSubtarget(const Triple &TT, const std::string &CPU, const std::string &FS, + const AVRTargetMachine &TM); + + const AVRInstrInfo *getInstrInfo() const override { return &InstrInfo; } + const TargetFrameLowering *getFrameLowering() const override { return &FrameLowering; } + const AVRTargetLowering *getTargetLowering() const override { return &TLInfo; } + const AVRSelectionDAGInfo *getSelectionDAGInfo() const override { return &TSInfo; } + const AVRRegisterInfo *getRegisterInfo() const override { return &InstrInfo.getRegisterInfo(); } + + /// Parses a subtarget feature string, setting appropriate options. + /// \note Definition of function is auto generated by `tblgen`. + void ParseSubtargetFeatures(StringRef CPU, StringRef FS); + + AVRSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS, + const TargetMachine &TM); + + // Subtarget feature getters. + // See AVR.td for details. + bool hasSRAM() const { return m_hasSRAM; } + bool hasJMPCALL() const { return m_hasJMPCALL; } + bool hasIJMPCALL() const { return m_hasIJMPCALL; } + bool hasEIJMPCALL() const { return m_hasEIJMPCALL; } + bool hasADDSUBIW() const { return m_hasADDSUBIW; } + bool hasSmallStack() const { return m_hasSmallStack; } + bool hasMOVW() const { return m_hasMOVW; } + bool hasLPM() const { return m_hasLPM; } + bool hasLPMX() const { return m_hasLPMX; } + bool hasELPM() const { return m_hasELPM; } + bool hasELPMX() const { return m_hasELPMX; } + bool hasSPM() const { return m_hasSPM; } + bool hasSPMX() const { return m_hasSPMX; } + bool hasDES() const { return m_hasDES; } + bool supportsRMW() const { return m_supportsRMW; } + bool supportsMultiplication() const { return m_supportsMultiplication; } + bool hasBREAK() const { return m_hasBREAK; } + bool hasTinyEncoding() const { return m_hasTinyEncoding; } + + /// Gets the ELF architecture for the e_flags field + /// of an ELF object file. + unsigned getELFArch() const { + assert(ELFArch != 0 && + "every device must have an associate ELF architecture"); + return ELFArch; + } + +private: + AVRInstrInfo InstrInfo; + AVRFrameLowering FrameLowering; + AVRTargetLowering TLInfo; + AVRSelectionDAGInfo TSInfo; + + // Subtarget feature settings + // See AVR.td for details. + bool m_hasSRAM; + bool m_hasJMPCALL; + bool m_hasIJMPCALL; + bool m_hasEIJMPCALL; + bool m_hasADDSUBIW; + bool m_hasSmallStack; + bool m_hasMOVW; + bool m_hasLPM; + bool m_hasLPMX; + bool m_hasELPM; + bool m_hasELPMX; + bool m_hasSPM; + bool m_hasSPMX; + bool m_hasDES; + bool m_supportsRMW; + bool m_supportsMultiplication; + bool m_hasBREAK; + bool m_hasTinyEncoding; + + /// The ELF e_flags architecture. + unsigned ELFArch; + + // Dummy member, used by FeatureSet's. We cannot have a SubtargetFeature with + // no variable, so we instead bind pseudo features to this variable. + bool m_FeatureSetDummy; +}; + +} // end namespace llvm + +#endif // LLVM_AVR_SUBTARGET_H diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetMachine.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetMachine.cpp new file mode 100644 index 000000000000..a36c8b0f9649 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetMachine.cpp @@ -0,0 +1,124 @@ +//===-- AVRTargetMachine.cpp - Define TargetMachine for AVR ---------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the AVR specific subclass of TargetMachine. +// +//===----------------------------------------------------------------------===// + +#include "AVRTargetMachine.h" + +#include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/TargetPassConfig.h" +#include "llvm/IR/LegacyPassManager.h" +#include "llvm/IR/Module.h" +#include "llvm/Support/TargetRegistry.h" + +#include "AVR.h" +#include "AVRTargetObjectFile.h" +#include "MCTargetDesc/AVRMCTargetDesc.h" +#include "TargetInfo/AVRTargetInfo.h" + +namespace llvm { + +static const char *AVRDataLayout = "e-P1-p:16:8-i8:8-i16:8-i32:8-i64:8-f32:8-f64:8-n8-a:8"; + +/// Processes a CPU name. +static StringRef getCPU(StringRef CPU) { + if (CPU.empty() || CPU == "generic") { + return "avr2"; + } + + return CPU; +} + +static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) { + return RM.hasValue() ? *RM : Reloc::Static; +} + +AVRTargetMachine::AVRTargetMachine(const Target &T, const Triple &TT, + StringRef CPU, StringRef FS, + const TargetOptions &Options, + Optional<Reloc::Model> RM, + Optional<CodeModel::Model> CM, + CodeGenOpt::Level OL, bool JIT) + : LLVMTargetMachine(T, AVRDataLayout, TT, getCPU(CPU), FS, Options, + getEffectiveRelocModel(RM), + getEffectiveCodeModel(CM, CodeModel::Small), OL), + SubTarget(TT, getCPU(CPU), FS, *this) { + this->TLOF = make_unique<AVRTargetObjectFile>(); + initAsmInfo(); +} + +namespace { +/// AVR Code Generator Pass Configuration Options. +class AVRPassConfig : public TargetPassConfig { +public: + AVRPassConfig(AVRTargetMachine &TM, PassManagerBase &PM) + : TargetPassConfig(TM, PM) {} + + AVRTargetMachine &getAVRTargetMachine() const { + return getTM<AVRTargetMachine>(); + } + + bool addInstSelector() override; + void addPreSched2() override; + void addPreEmitPass() override; + void addPreRegAlloc() override; +}; +} // namespace + +TargetPassConfig *AVRTargetMachine::createPassConfig(PassManagerBase &PM) { + return new AVRPassConfig(*this, PM); +} + +extern "C" void LLVMInitializeAVRTarget() { + // Register the target. + RegisterTargetMachine<AVRTargetMachine> X(getTheAVRTarget()); + + auto &PR = *PassRegistry::getPassRegistry(); + initializeAVRExpandPseudoPass(PR); + initializeAVRRelaxMemPass(PR); +} + +const AVRSubtarget *AVRTargetMachine::getSubtargetImpl() const { + return &SubTarget; +} + +const AVRSubtarget *AVRTargetMachine::getSubtargetImpl(const Function &) const { + return &SubTarget; +} + +//===----------------------------------------------------------------------===// +// Pass Pipeline Configuration +//===----------------------------------------------------------------------===// + +bool AVRPassConfig::addInstSelector() { + // Install an instruction selector. + addPass(createAVRISelDag(getAVRTargetMachine(), getOptLevel())); + // Create the frame analyzer pass used by the PEI pass. + addPass(createAVRFrameAnalyzerPass()); + + return false; +} + +void AVRPassConfig::addPreRegAlloc() { + // Create the dynalloc SP save/restore pass to handle variable sized allocas. + addPass(createAVRDynAllocaSRPass()); +} + +void AVRPassConfig::addPreSched2() { + addPass(createAVRRelaxMemPass()); + addPass(createAVRExpandPseudoPass()); +} + +void AVRPassConfig::addPreEmitPass() { + // Must run branch selection immediately preceding the asm printer. + addPass(&BranchRelaxationPassID); +} + +} // end of namespace llvm diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetMachine.h b/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetMachine.h new file mode 100644 index 000000000000..f9015c8741ea --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetMachine.h @@ -0,0 +1,56 @@ +//===-- AVRTargetMachine.h - Define TargetMachine for AVR -------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares the AVR specific subclass of TargetMachine. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AVR_TARGET_MACHINE_H +#define LLVM_AVR_TARGET_MACHINE_H + +#include "llvm/IR/DataLayout.h" +#include "llvm/Target/TargetMachine.h" + +#include "AVRFrameLowering.h" +#include "AVRISelLowering.h" +#include "AVRInstrInfo.h" +#include "AVRSelectionDAGInfo.h" +#include "AVRSubtarget.h" + +namespace llvm { + +/// A generic AVR implementation. +class AVRTargetMachine : public LLVMTargetMachine { +public: + AVRTargetMachine(const Target &T, const Triple &TT, StringRef CPU, + StringRef FS, const TargetOptions &Options, + Optional<Reloc::Model> RM, + Optional<CodeModel::Model> CM, + CodeGenOpt::Level OL, bool JIT); + + const AVRSubtarget *getSubtargetImpl() const; + const AVRSubtarget *getSubtargetImpl(const Function &) const override; + + TargetLoweringObjectFile *getObjFileLowering() const override { + return this->TLOF.get(); + } + + TargetPassConfig *createPassConfig(PassManagerBase &PM) override; + + bool isMachineVerifierClean() const override { + return false; + } + +private: + std::unique_ptr<TargetLoweringObjectFile> TLOF; + AVRSubtarget SubTarget; +}; + +} // end namespace llvm + +#endif // LLVM_AVR_TARGET_MACHINE_H diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetObjectFile.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetObjectFile.cpp new file mode 100644 index 000000000000..980096a09835 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetObjectFile.cpp @@ -0,0 +1,40 @@ +//===-- AVRTargetObjectFile.cpp - AVR Object Files ------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "AVRTargetObjectFile.h" + +#include "llvm/BinaryFormat/ELF.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/IR/Mangler.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCSectionELF.h" + +#include "AVR.h" + +namespace llvm { +void AVRTargetObjectFile::Initialize(MCContext &Ctx, const TargetMachine &TM) { + Base::Initialize(Ctx, TM); + ProgmemDataSection = + Ctx.getELFSection(".progmem.data", ELF::SHT_PROGBITS, ELF::SHF_ALLOC); +} + +MCSection * +AVRTargetObjectFile::SelectSectionForGlobal(const GlobalObject *GO, + SectionKind Kind, + const TargetMachine &TM) const { + // Global values in flash memory are placed in the progmem.data section + // unless they already have a user assigned section. + if (AVR::isProgramMemoryAddress(GO) && !GO->hasSection()) + return ProgmemDataSection; + + // Otherwise, we work the same way as ELF. + return Base::SelectSectionForGlobal(GO, Kind, TM); +} +} // end of namespace llvm + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetObjectFile.h b/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetObjectFile.h new file mode 100644 index 000000000000..53d8510d9a21 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetObjectFile.h @@ -0,0 +1,32 @@ +//===-- AVRTargetObjectFile.h - AVR Object Info -----------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AVR_TARGET_OBJECT_FILE_H +#define LLVM_AVR_TARGET_OBJECT_FILE_H + +#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" + +namespace llvm { + +/// Lowering for an AVR ELF32 object file. +class AVRTargetObjectFile : public TargetLoweringObjectFileELF { + typedef TargetLoweringObjectFileELF Base; + +public: + void Initialize(MCContext &ctx, const TargetMachine &TM) override; + + MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind, + const TargetMachine &TM) const override; + +private: + MCSection *ProgmemDataSection; +}; + +} // end namespace llvm + +#endif // LLVM_AVR_TARGET_OBJECT_FILE_H diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AsmParser/AVRAsmParser.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/AsmParser/AVRAsmParser.cpp new file mode 100644 index 000000000000..aac5644711e2 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/AsmParser/AVRAsmParser.cpp @@ -0,0 +1,730 @@ +//===---- AVRAsmParser.cpp - Parse AVR assembly to MCInst instructions ----===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "AVR.h" +#include "AVRRegisterInfo.h" +#include "MCTargetDesc/AVRMCELFStreamer.h" +#include "MCTargetDesc/AVRMCExpr.h" +#include "MCTargetDesc/AVRMCTargetDesc.h" +#include "TargetInfo/AVRTargetInfo.h" + +#include "llvm/ADT/APInt.h" +#include "llvm/ADT/StringSwitch.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCInstBuilder.h" +#include "llvm/MC/MCParser/MCAsmLexer.h" +#include "llvm/MC/MCParser/MCParsedAsmOperand.h" +#include "llvm/MC/MCParser/MCTargetAsmParser.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/MC/MCValue.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/TargetRegistry.h" + +#include <sstream> + +#define DEBUG_TYPE "avr-asm-parser" + +using namespace llvm; + +namespace { +/// Parses AVR assembly from a stream. +class AVRAsmParser : public MCTargetAsmParser { + const MCSubtargetInfo &STI; + MCAsmParser &Parser; + const MCRegisterInfo *MRI; + const std::string GENERATE_STUBS = "gs"; + +#define GET_ASSEMBLER_HEADER +#include "AVRGenAsmMatcher.inc" + + bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, + OperandVector &Operands, MCStreamer &Out, + uint64_t &ErrorInfo, + bool MatchingInlineAsm) override; + + bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override; + + bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, + SMLoc NameLoc, OperandVector &Operands) override; + + bool ParseDirective(AsmToken DirectiveID) override; + + OperandMatchResultTy parseMemriOperand(OperandVector &Operands); + + bool parseOperand(OperandVector &Operands); + int parseRegisterName(unsigned (*matchFn)(StringRef)); + int parseRegisterName(); + int parseRegister(); + bool tryParseRegisterOperand(OperandVector &Operands); + bool tryParseExpression(OperandVector &Operands); + bool tryParseRelocExpression(OperandVector &Operands); + void eatComma(); + + unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, + unsigned Kind) override; + + unsigned toDREG(unsigned Reg, unsigned From = AVR::sub_lo) { + MCRegisterClass const *Class = &AVRMCRegisterClasses[AVR::DREGSRegClassID]; + return MRI->getMatchingSuperReg(Reg, From, Class); + } + + bool emit(MCInst &Instruction, SMLoc const &Loc, MCStreamer &Out) const; + bool invalidOperand(SMLoc const &Loc, OperandVector const &Operands, + uint64_t const &ErrorInfo); + bool missingFeature(SMLoc const &Loc, uint64_t const &ErrorInfo); + + bool parseLiteralValues(unsigned SizeInBytes, SMLoc L); + +public: + AVRAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser, + const MCInstrInfo &MII, const MCTargetOptions &Options) + : MCTargetAsmParser(Options, STI, MII), STI(STI), Parser(Parser) { + MCAsmParserExtension::Initialize(Parser); + MRI = getContext().getRegisterInfo(); + + setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); + } + + MCAsmParser &getParser() const { return Parser; } + MCAsmLexer &getLexer() const { return Parser.getLexer(); } +}; + +/// An parsed AVR assembly operand. +class AVROperand : public MCParsedAsmOperand { + typedef MCParsedAsmOperand Base; + enum KindTy { k_Immediate, k_Register, k_Token, k_Memri } Kind; + +public: + AVROperand(StringRef Tok, SMLoc const &S) + : Base(), Kind(k_Token), Tok(Tok), Start(S), End(S) {} + AVROperand(unsigned Reg, SMLoc const &S, SMLoc const &E) + : Base(), Kind(k_Register), RegImm({Reg, nullptr}), Start(S), End(E) {} + AVROperand(MCExpr const *Imm, SMLoc const &S, SMLoc const &E) + : Base(), Kind(k_Immediate), RegImm({0, Imm}), Start(S), End(E) {} + AVROperand(unsigned Reg, MCExpr const *Imm, SMLoc const &S, SMLoc const &E) + : Base(), Kind(k_Memri), RegImm({Reg, Imm}), Start(S), End(E) {} + + struct RegisterImmediate { + unsigned Reg; + MCExpr const *Imm; + }; + union { + StringRef Tok; + RegisterImmediate RegImm; + }; + + SMLoc Start, End; + +public: + void addRegOperands(MCInst &Inst, unsigned N) const { + assert(Kind == k_Register && "Unexpected operand kind"); + assert(N == 1 && "Invalid number of operands!"); + + Inst.addOperand(MCOperand::createReg(getReg())); + } + + void addExpr(MCInst &Inst, const MCExpr *Expr) const { + // Add as immediate when possible + if (!Expr) + Inst.addOperand(MCOperand::createImm(0)); + else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) + Inst.addOperand(MCOperand::createImm(CE->getValue())); + else + Inst.addOperand(MCOperand::createExpr(Expr)); + } + + void addImmOperands(MCInst &Inst, unsigned N) const { + assert(Kind == k_Immediate && "Unexpected operand kind"); + assert(N == 1 && "Invalid number of operands!"); + + const MCExpr *Expr = getImm(); + addExpr(Inst, Expr); + } + + /// Adds the contained reg+imm operand to an instruction. + void addMemriOperands(MCInst &Inst, unsigned N) const { + assert(Kind == k_Memri && "Unexpected operand kind"); + assert(N == 2 && "Invalid number of operands"); + + Inst.addOperand(MCOperand::createReg(getReg())); + addExpr(Inst, getImm()); + } + + void addImmCom8Operands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + // The operand is actually a imm8, but we have its bitwise + // negation in the assembly source, so twiddle it here. + const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); + Inst.addOperand(MCOperand::createImm(~(uint8_t)CE->getValue())); + } + + bool isImmCom8() const { + if (!isImm()) return false; + const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); + if (!CE) return false; + int64_t Value = CE->getValue(); + return isUInt<8>(Value); + } + + bool isReg() const { return Kind == k_Register; } + bool isImm() const { return Kind == k_Immediate; } + bool isToken() const { return Kind == k_Token; } + bool isMem() const { return Kind == k_Memri; } + bool isMemri() const { return Kind == k_Memri; } + + StringRef getToken() const { + assert(Kind == k_Token && "Invalid access!"); + return Tok; + } + + unsigned getReg() const { + assert((Kind == k_Register || Kind == k_Memri) && "Invalid access!"); + + return RegImm.Reg; + } + + const MCExpr *getImm() const { + assert((Kind == k_Immediate || Kind == k_Memri) && "Invalid access!"); + return RegImm.Imm; + } + + static std::unique_ptr<AVROperand> CreateToken(StringRef Str, SMLoc S) { + return make_unique<AVROperand>(Str, S); + } + + static std::unique_ptr<AVROperand> CreateReg(unsigned RegNum, SMLoc S, + SMLoc E) { + return make_unique<AVROperand>(RegNum, S, E); + } + + static std::unique_ptr<AVROperand> CreateImm(const MCExpr *Val, SMLoc S, + SMLoc E) { + return make_unique<AVROperand>(Val, S, E); + } + + static std::unique_ptr<AVROperand> + CreateMemri(unsigned RegNum, const MCExpr *Val, SMLoc S, SMLoc E) { + return make_unique<AVROperand>(RegNum, Val, S, E); + } + + void makeToken(StringRef Token) { + Kind = k_Token; + Tok = Token; + } + + void makeReg(unsigned RegNo) { + Kind = k_Register; + RegImm = {RegNo, nullptr}; + } + + void makeImm(MCExpr const *Ex) { + Kind = k_Immediate; + RegImm = {0, Ex}; + } + + void makeMemri(unsigned RegNo, MCExpr const *Imm) { + Kind = k_Memri; + RegImm = {RegNo, Imm}; + } + + SMLoc getStartLoc() const { return Start; } + SMLoc getEndLoc() const { return End; } + + virtual void print(raw_ostream &O) const { + switch (Kind) { + case k_Token: + O << "Token: \"" << getToken() << "\""; + break; + case k_Register: + O << "Register: " << getReg(); + break; + case k_Immediate: + O << "Immediate: \"" << *getImm() << "\""; + break; + case k_Memri: { + // only manually print the size for non-negative values, + // as the sign is inserted automatically. + O << "Memri: \"" << getReg() << '+' << *getImm() << "\""; + break; + } + } + O << "\n"; + } +}; + +} // end anonymous namespace. + +// Auto-generated Match Functions + +/// Maps from the set of all register names to a register number. +/// \note Generated by TableGen. +static unsigned MatchRegisterName(StringRef Name); + +/// Maps from the set of all alternative registernames to a register number. +/// \note Generated by TableGen. +static unsigned MatchRegisterAltName(StringRef Name); + +bool AVRAsmParser::invalidOperand(SMLoc const &Loc, + OperandVector const &Operands, + uint64_t const &ErrorInfo) { + SMLoc ErrorLoc = Loc; + char const *Diag = 0; + + if (ErrorInfo != ~0U) { + if (ErrorInfo >= Operands.size()) { + Diag = "too few operands for instruction."; + } else { + AVROperand const &Op = (AVROperand const &)*Operands[ErrorInfo]; + + // TODO: See if we can do a better error than just "invalid ...". + if (Op.getStartLoc() != SMLoc()) { + ErrorLoc = Op.getStartLoc(); + } + } + } + + if (!Diag) { + Diag = "invalid operand for instruction"; + } + + return Error(ErrorLoc, Diag); +} + +bool AVRAsmParser::missingFeature(llvm::SMLoc const &Loc, + uint64_t const &ErrorInfo) { + return Error(Loc, "instruction requires a CPU feature not currently enabled"); +} + +bool AVRAsmParser::emit(MCInst &Inst, SMLoc const &Loc, MCStreamer &Out) const { + Inst.setLoc(Loc); + Out.EmitInstruction(Inst, STI); + + return false; +} + +bool AVRAsmParser::MatchAndEmitInstruction(SMLoc Loc, unsigned &Opcode, + OperandVector &Operands, + MCStreamer &Out, uint64_t &ErrorInfo, + bool MatchingInlineAsm) { + MCInst Inst; + unsigned MatchResult = + MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm); + + switch (MatchResult) { + case Match_Success: return emit(Inst, Loc, Out); + case Match_MissingFeature: return missingFeature(Loc, ErrorInfo); + case Match_InvalidOperand: return invalidOperand(Loc, Operands, ErrorInfo); + case Match_MnemonicFail: return Error(Loc, "invalid instruction"); + default: return true; + } +} + +/// Parses a register name using a given matching function. +/// Checks for lowercase or uppercase if necessary. +int AVRAsmParser::parseRegisterName(unsigned (*matchFn)(StringRef)) { + StringRef Name = Parser.getTok().getString(); + + int RegNum = matchFn(Name); + + // GCC supports case insensitive register names. Some of the AVR registers + // are all lower case, some are all upper case but non are mixed. We prefer + // to use the original names in the register definitions. That is why we + // have to test both upper and lower case here. + if (RegNum == AVR::NoRegister) { + RegNum = matchFn(Name.lower()); + } + if (RegNum == AVR::NoRegister) { + RegNum = matchFn(Name.upper()); + } + + return RegNum; +} + +int AVRAsmParser::parseRegisterName() { + int RegNum = parseRegisterName(&MatchRegisterName); + + if (RegNum == AVR::NoRegister) + RegNum = parseRegisterName(&MatchRegisterAltName); + + return RegNum; +} + +int AVRAsmParser::parseRegister() { + int RegNum = AVR::NoRegister; + + if (Parser.getTok().is(AsmToken::Identifier)) { + // Check for register pair syntax + if (Parser.getLexer().peekTok().is(AsmToken::Colon)) { + Parser.Lex(); + Parser.Lex(); // Eat high (odd) register and colon + + if (Parser.getTok().is(AsmToken::Identifier)) { + // Convert lower (even) register to DREG + RegNum = toDREG(parseRegisterName()); + } + } else { + RegNum = parseRegisterName(); + } + } + return RegNum; +} + +bool AVRAsmParser::tryParseRegisterOperand(OperandVector &Operands) { + int RegNo = parseRegister(); + + if (RegNo == AVR::NoRegister) + return true; + + AsmToken const &T = Parser.getTok(); + Operands.push_back(AVROperand::CreateReg(RegNo, T.getLoc(), T.getEndLoc())); + Parser.Lex(); // Eat register token. + + return false; +} + +bool AVRAsmParser::tryParseExpression(OperandVector &Operands) { + SMLoc S = Parser.getTok().getLoc(); + + if (!tryParseRelocExpression(Operands)) + return false; + + if ((Parser.getTok().getKind() == AsmToken::Plus || + Parser.getTok().getKind() == AsmToken::Minus) && + Parser.getLexer().peekTok().getKind() == AsmToken::Identifier) { + // Don't handle this case - it should be split into two + // separate tokens. + return true; + } + + // Parse (potentially inner) expression + MCExpr const *Expression; + if (getParser().parseExpression(Expression)) + return true; + + SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); + Operands.push_back(AVROperand::CreateImm(Expression, S, E)); + return false; +} + +bool AVRAsmParser::tryParseRelocExpression(OperandVector &Operands) { + bool isNegated = false; + AVRMCExpr::VariantKind ModifierKind = AVRMCExpr::VK_AVR_None; + + SMLoc S = Parser.getTok().getLoc(); + + // Check for sign + AsmToken tokens[2]; + size_t ReadCount = Parser.getLexer().peekTokens(tokens); + + if (ReadCount == 2) { + if ((tokens[0].getKind() == AsmToken::Identifier && + tokens[1].getKind() == AsmToken::LParen) || + (tokens[0].getKind() == AsmToken::LParen && + tokens[1].getKind() == AsmToken::Minus)) { + + AsmToken::TokenKind CurTok = Parser.getLexer().getKind(); + if (CurTok == AsmToken::Minus || + tokens[1].getKind() == AsmToken::Minus) { + isNegated = true; + } else { + assert(CurTok == AsmToken::Plus); + isNegated = false; + } + + // Eat the sign + if (CurTok == AsmToken::Minus || CurTok == AsmToken::Plus) + Parser.Lex(); + } + } + + // Check if we have a target specific modifier (lo8, hi8, &c) + if (Parser.getTok().getKind() != AsmToken::Identifier || + Parser.getLexer().peekTok().getKind() != AsmToken::LParen) { + // Not a reloc expr + return true; + } + StringRef ModifierName = Parser.getTok().getString(); + ModifierKind = AVRMCExpr::getKindByName(ModifierName.str().c_str()); + + if (ModifierKind != AVRMCExpr::VK_AVR_None) { + Parser.Lex(); + Parser.Lex(); // Eat modifier name and parenthesis + if (Parser.getTok().getString() == GENERATE_STUBS && + Parser.getTok().getKind() == AsmToken::Identifier) { + std::string GSModName = ModifierName.str() + "_" + GENERATE_STUBS; + ModifierKind = AVRMCExpr::getKindByName(GSModName.c_str()); + if (ModifierKind != AVRMCExpr::VK_AVR_None) + Parser.Lex(); // Eat gs modifier name + } + } else { + return Error(Parser.getTok().getLoc(), "unknown modifier"); + } + + if (tokens[1].getKind() == AsmToken::Minus || + tokens[1].getKind() == AsmToken::Plus) { + Parser.Lex(); + assert(Parser.getTok().getKind() == AsmToken::LParen); + Parser.Lex(); // Eat the sign and parenthesis + } + + MCExpr const *InnerExpression; + if (getParser().parseExpression(InnerExpression)) + return true; + + if (tokens[1].getKind() == AsmToken::Minus || + tokens[1].getKind() == AsmToken::Plus) { + assert(Parser.getTok().getKind() == AsmToken::RParen); + Parser.Lex(); // Eat closing parenthesis + } + + // If we have a modifier wrap the inner expression + assert(Parser.getTok().getKind() == AsmToken::RParen); + Parser.Lex(); // Eat closing parenthesis + + MCExpr const *Expression = AVRMCExpr::create(ModifierKind, InnerExpression, + isNegated, getContext()); + + SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); + Operands.push_back(AVROperand::CreateImm(Expression, S, E)); + + return false; +} + +bool AVRAsmParser::parseOperand(OperandVector &Operands) { + LLVM_DEBUG(dbgs() << "parseOperand\n"); + + switch (getLexer().getKind()) { + default: + return Error(Parser.getTok().getLoc(), "unexpected token in operand"); + + case AsmToken::Identifier: + // Try to parse a register, if it fails, + // fall through to the next case. + if (!tryParseRegisterOperand(Operands)) { + return false; + } + LLVM_FALLTHROUGH; + case AsmToken::LParen: + case AsmToken::Integer: + case AsmToken::Dot: + return tryParseExpression(Operands); + case AsmToken::Plus: + case AsmToken::Minus: { + // If the sign preceeds a number, parse the number, + // otherwise treat the sign a an independent token. + switch (getLexer().peekTok().getKind()) { + case AsmToken::Integer: + case AsmToken::BigNum: + case AsmToken::Identifier: + case AsmToken::Real: + if (!tryParseExpression(Operands)) + return false; + break; + default: + break; + } + // Treat the token as an independent token. + Operands.push_back(AVROperand::CreateToken(Parser.getTok().getString(), + Parser.getTok().getLoc())); + Parser.Lex(); // Eat the token. + return false; + } + } + + // Could not parse operand + return true; +} + +OperandMatchResultTy +AVRAsmParser::parseMemriOperand(OperandVector &Operands) { + LLVM_DEBUG(dbgs() << "parseMemriOperand()\n"); + + SMLoc E, S; + MCExpr const *Expression; + int RegNo; + + // Parse register. + { + RegNo = parseRegister(); + + if (RegNo == AVR::NoRegister) + return MatchOperand_ParseFail; + + S = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); + Parser.Lex(); // Eat register token. + } + + // Parse immediate; + { + if (getParser().parseExpression(Expression)) + return MatchOperand_ParseFail; + + E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); + } + + Operands.push_back(AVROperand::CreateMemri(RegNo, Expression, S, E)); + + return MatchOperand_Success; +} + +bool AVRAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, + SMLoc &EndLoc) { + StartLoc = Parser.getTok().getLoc(); + RegNo = parseRegister(); + EndLoc = Parser.getTok().getLoc(); + + return (RegNo == AVR::NoRegister); +} + +void AVRAsmParser::eatComma() { + if (getLexer().is(AsmToken::Comma)) { + Parser.Lex(); + } else { + // GCC allows commas to be omitted. + } +} + +bool AVRAsmParser::ParseInstruction(ParseInstructionInfo &Info, + StringRef Mnemonic, SMLoc NameLoc, + OperandVector &Operands) { + Operands.push_back(AVROperand::CreateToken(Mnemonic, NameLoc)); + + bool first = true; + while (getLexer().isNot(AsmToken::EndOfStatement)) { + if (!first) eatComma(); + + first = false; + + auto MatchResult = MatchOperandParserImpl(Operands, Mnemonic); + + if (MatchResult == MatchOperand_Success) { + continue; + } + + if (MatchResult == MatchOperand_ParseFail) { + SMLoc Loc = getLexer().getLoc(); + Parser.eatToEndOfStatement(); + + return Error(Loc, "failed to parse register and immediate pair"); + } + + if (parseOperand(Operands)) { + SMLoc Loc = getLexer().getLoc(); + Parser.eatToEndOfStatement(); + return Error(Loc, "unexpected token in argument list"); + } + } + Parser.Lex(); // Consume the EndOfStatement + return false; +} + +bool AVRAsmParser::ParseDirective(llvm::AsmToken DirectiveID) { + StringRef IDVal = DirectiveID.getIdentifier(); + if (IDVal.lower() == ".long") { + parseLiteralValues(SIZE_LONG, DirectiveID.getLoc()); + } else if (IDVal.lower() == ".word" || IDVal.lower() == ".short") { + parseLiteralValues(SIZE_WORD, DirectiveID.getLoc()); + } else if (IDVal.lower() == ".byte") { + parseLiteralValues(1, DirectiveID.getLoc()); + } + return true; +} + +bool AVRAsmParser::parseLiteralValues(unsigned SizeInBytes, SMLoc L) { + MCAsmParser &Parser = getParser(); + AVRMCELFStreamer &AVRStreamer = + static_cast<AVRMCELFStreamer &>(Parser.getStreamer()); + AsmToken Tokens[2]; + size_t ReadCount = Parser.getLexer().peekTokens(Tokens); + if (ReadCount == 2 && Parser.getTok().getKind() == AsmToken::Identifier && + Tokens[0].getKind() == AsmToken::Minus && + Tokens[1].getKind() == AsmToken::Identifier) { + MCSymbol *Symbol = getContext().getOrCreateSymbol(".text"); + AVRStreamer.EmitValueForModiferKind(Symbol, SizeInBytes, L, + AVRMCExpr::VK_AVR_None); + return false; + } + + if (Parser.getTok().getKind() == AsmToken::Identifier && + Parser.getLexer().peekTok().getKind() == AsmToken::LParen) { + StringRef ModifierName = Parser.getTok().getString(); + AVRMCExpr::VariantKind ModifierKind = + AVRMCExpr::getKindByName(ModifierName.str().c_str()); + if (ModifierKind != AVRMCExpr::VK_AVR_None) { + Parser.Lex(); + Parser.Lex(); // Eat the modifier and parenthesis + } else { + return Error(Parser.getTok().getLoc(), "unknown modifier"); + } + MCSymbol *Symbol = + getContext().getOrCreateSymbol(Parser.getTok().getString()); + AVRStreamer.EmitValueForModiferKind(Symbol, SizeInBytes, L, ModifierKind); + return false; + } + + auto parseOne = [&]() -> bool { + const MCExpr *Value; + if (Parser.parseExpression(Value)) + return true; + Parser.getStreamer().EmitValue(Value, SizeInBytes, L); + return false; + }; + return (parseMany(parseOne)); +} + +extern "C" void LLVMInitializeAVRAsmParser() { + RegisterMCAsmParser<AVRAsmParser> X(getTheAVRTarget()); +} + +#define GET_REGISTER_MATCHER +#define GET_MATCHER_IMPLEMENTATION +#include "AVRGenAsmMatcher.inc" + +// Uses enums defined in AVRGenAsmMatcher.inc +unsigned AVRAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp, + unsigned ExpectedKind) { + AVROperand &Op = static_cast<AVROperand &>(AsmOp); + MatchClassKind Expected = static_cast<MatchClassKind>(ExpectedKind); + + // If need be, GCC converts bare numbers to register names + // It's ugly, but GCC supports it. + if (Op.isImm()) { + if (MCConstantExpr const *Const = dyn_cast<MCConstantExpr>(Op.getImm())) { + int64_t RegNum = Const->getValue(); + std::ostringstream RegName; + RegName << "r" << RegNum; + RegNum = MatchRegisterName(RegName.str().c_str()); + if (RegNum != AVR::NoRegister) { + Op.makeReg(RegNum); + if (validateOperandClass(Op, Expected) == Match_Success) { + return Match_Success; + } + } + // Let the other quirks try their magic. + } + } + + if (Op.isReg()) { + // If the instruction uses a register pair but we got a single, lower + // register we perform a "class cast". + if (isSubclass(Expected, MCK_DREGS)) { + unsigned correspondingDREG = toDREG(Op.getReg()); + + if (correspondingDREG != AVR::NoRegister) { + Op.makeReg(correspondingDREG); + return validateOperandClass(Op, Expected); + } + } + } + return Match_InvalidOperand; +} diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/Disassembler/AVRDisassembler.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/Disassembler/AVRDisassembler.cpp new file mode 100644 index 000000000000..e203a5069c85 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/Disassembler/AVRDisassembler.cpp @@ -0,0 +1,156 @@ +//===- AVRDisassembler.cpp - Disassembler for AVR ---------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is part of the AVR Disassembler. +// +//===----------------------------------------------------------------------===// + +#include "AVR.h" +#include "AVRRegisterInfo.h" +#include "AVRSubtarget.h" +#include "MCTargetDesc/AVRMCTargetDesc.h" +#include "TargetInfo/AVRTargetInfo.h" + +#include "llvm/MC/MCAsmInfo.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCDisassembler/MCDisassembler.h" +#include "llvm/MC/MCFixedLenDisassembler.h" +#include "llvm/MC/MCInst.h" +#include "llvm/Support/TargetRegistry.h" + +using namespace llvm; + +#define DEBUG_TYPE "avr-disassembler" + +typedef MCDisassembler::DecodeStatus DecodeStatus; + +namespace { + +/// A disassembler class for AVR. +class AVRDisassembler : public MCDisassembler { +public: + AVRDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx) + : MCDisassembler(STI, Ctx) {} + virtual ~AVRDisassembler() {} + + DecodeStatus getInstruction(MCInst &Instr, uint64_t &Size, + ArrayRef<uint8_t> Bytes, uint64_t Address, + raw_ostream &VStream, + raw_ostream &CStream) const override; +}; +} + +static MCDisassembler *createAVRDisassembler(const Target &T, + const MCSubtargetInfo &STI, + MCContext &Ctx) { + return new AVRDisassembler(STI, Ctx); +} + + +extern "C" void LLVMInitializeAVRDisassembler() { + // Register the disassembler. + TargetRegistry::RegisterMCDisassembler(getTheAVRTarget(), + createAVRDisassembler); +} + +static DecodeStatus DecodeGPR8RegisterClass(MCInst &Inst, unsigned RegNo, + uint64_t Address, const void *Decoder) { + return MCDisassembler::Success; +} + +static DecodeStatus DecodeLD8RegisterClass(MCInst &Inst, unsigned RegNo, + uint64_t Address, const void *Decoder) { + return MCDisassembler::Success; +} + +static DecodeStatus DecodePTRREGSRegisterClass(MCInst &Inst, unsigned RegNo, + uint64_t Address, const void *Decoder) { + return MCDisassembler::Success; +} + +#include "AVRGenDisassemblerTables.inc" + +static DecodeStatus readInstruction16(ArrayRef<uint8_t> Bytes, uint64_t Address, + uint64_t &Size, uint32_t &Insn) { + if (Bytes.size() < 2) { + Size = 0; + return MCDisassembler::Fail; + } + + Size = 2; + Insn = (Bytes[0] << 0) | (Bytes[1] << 8); + + return MCDisassembler::Success; +} + +static DecodeStatus readInstruction32(ArrayRef<uint8_t> Bytes, uint64_t Address, + uint64_t &Size, uint32_t &Insn) { + + if (Bytes.size() < 4) { + Size = 0; + return MCDisassembler::Fail; + } + + Size = 4; + Insn = (Bytes[0] << 0) | (Bytes[1] << 8) | (Bytes[2] << 16) | (Bytes[3] << 24); + + return MCDisassembler::Success; +} + +static const uint8_t *getDecoderTable(uint64_t Size) { + + switch (Size) { + case 2: return DecoderTable16; + case 4: return DecoderTable32; + default: llvm_unreachable("instructions must be 16 or 32-bits"); + } +} + +DecodeStatus AVRDisassembler::getInstruction(MCInst &Instr, uint64_t &Size, + ArrayRef<uint8_t> Bytes, + uint64_t Address, + raw_ostream &VStream, + raw_ostream &CStream) const { + uint32_t Insn; + + DecodeStatus Result; + + // Try decode a 16-bit instruction. + { + Result = readInstruction16(Bytes, Address, Size, Insn); + + if (Result == MCDisassembler::Fail) return MCDisassembler::Fail; + + // Try to auto-decode a 16-bit instruction. + Result = decodeInstruction(getDecoderTable(Size), Instr, + Insn, Address, this, STI); + + if (Result != MCDisassembler::Fail) + return Result; + } + + // Try decode a 32-bit instruction. + { + Result = readInstruction32(Bytes, Address, Size, Insn); + + if (Result == MCDisassembler::Fail) return MCDisassembler::Fail; + + Result = decodeInstruction(getDecoderTable(Size), Instr, Insn, + Address, this, STI); + + if (Result != MCDisassembler::Fail) { + return Result; + } + + return MCDisassembler::Fail; + } +} + +typedef DecodeStatus (*DecodeFunc)(MCInst &MI, unsigned insn, uint64_t Address, + const void *Decoder); + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRAsmBackend.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRAsmBackend.cpp new file mode 100644 index 000000000000..e92b16c8ee9d --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRAsmBackend.cpp @@ -0,0 +1,486 @@ +//===-- AVRAsmBackend.cpp - AVR Asm Backend ------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements the AVRAsmBackend class. +// +//===----------------------------------------------------------------------===// + +#include "MCTargetDesc/AVRAsmBackend.h" +#include "MCTargetDesc/AVRFixupKinds.h" +#include "MCTargetDesc/AVRMCTargetDesc.h" + +#include "llvm/MC/MCAsmBackend.h" +#include "llvm/MC/MCAssembler.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCDirectives.h" +#include "llvm/MC/MCELFObjectWriter.h" +#include "llvm/MC/MCFixupKindInfo.h" +#include "llvm/MC/MCObjectWriter.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/MC/MCValue.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/raw_ostream.h" + +// FIXME: we should be doing checks to make sure asm operands +// are not out of bounds. + +namespace adjust { + +using namespace llvm; + +void signed_width(unsigned Width, uint64_t Value, std::string Description, + const MCFixup &Fixup, MCContext *Ctx = nullptr) { + if (!isIntN(Width, Value)) { + std::string Diagnostic = "out of range " + Description; + + int64_t Min = minIntN(Width); + int64_t Max = maxIntN(Width); + + Diagnostic += " (expected an integer in the range " + std::to_string(Min) + + " to " + std::to_string(Max) + ")"; + + if (Ctx) { + Ctx->reportFatalError(Fixup.getLoc(), Diagnostic); + } else { + llvm_unreachable(Diagnostic.c_str()); + } + } +} + +void unsigned_width(unsigned Width, uint64_t Value, std::string Description, + const MCFixup &Fixup, MCContext *Ctx = nullptr) { + if (!isUIntN(Width, Value)) { + std::string Diagnostic = "out of range " + Description; + + int64_t Max = maxUIntN(Width); + + Diagnostic += " (expected an integer in the range 0 to " + + std::to_string(Max) + ")"; + + if (Ctx) { + Ctx->reportFatalError(Fixup.getLoc(), Diagnostic); + } else { + llvm_unreachable(Diagnostic.c_str()); + } + } +} + +/// Adjusts the value of a branch target before fixup application. +void adjustBranch(unsigned Size, const MCFixup &Fixup, uint64_t &Value, + MCContext *Ctx = nullptr) { + // We have one extra bit of precision because the value is rightshifted by + // one. + unsigned_width(Size + 1, Value, std::string("branch target"), Fixup, Ctx); + + // Rightshifts the value by one. + AVR::fixups::adjustBranchTarget(Value); +} + +/// Adjusts the value of a relative branch target before fixup application. +void adjustRelativeBranch(unsigned Size, const MCFixup &Fixup, uint64_t &Value, + MCContext *Ctx = nullptr) { + // We have one extra bit of precision because the value is rightshifted by + // one. + signed_width(Size + 1, Value, std::string("branch target"), Fixup, Ctx); + + Value -= 2; + + // Rightshifts the value by one. + AVR::fixups::adjustBranchTarget(Value); +} + +/// 22-bit absolute fixup. +/// +/// Resolves to: +/// 1001 kkkk 010k kkkk kkkk kkkk 111k kkkk +/// +/// Offset of 0 (so the result is left shifted by 3 bits before application). +void fixup_call(unsigned Size, const MCFixup &Fixup, uint64_t &Value, + MCContext *Ctx = nullptr) { + adjustBranch(Size, Fixup, Value, Ctx); + + auto top = Value & (0xf00000 << 6); // the top four bits + auto middle = Value & (0x1ffff << 5); // the middle 13 bits + auto bottom = Value & 0x1f; // end bottom 5 bits + + Value = (top << 6) | (middle << 3) | (bottom << 0); +} + +/// 7-bit PC-relative fixup. +/// +/// Resolves to: +/// 0000 00kk kkkk k000 +/// Offset of 0 (so the result is left shifted by 3 bits before application). +void fixup_7_pcrel(unsigned Size, const MCFixup &Fixup, uint64_t &Value, + MCContext *Ctx = nullptr) { + adjustRelativeBranch(Size, Fixup, Value, Ctx); + + // Because the value may be negative, we must mask out the sign bits + Value &= 0x7f; +} + +/// 12-bit PC-relative fixup. +/// Yes, the fixup is 12 bits even though the name says otherwise. +/// +/// Resolves to: +/// 0000 kkkk kkkk kkkk +/// Offset of 0 (so the result isn't left-shifted before application). +void fixup_13_pcrel(unsigned Size, const MCFixup &Fixup, uint64_t &Value, + MCContext *Ctx = nullptr) { + adjustRelativeBranch(Size, Fixup, Value, Ctx); + + // Because the value may be negative, we must mask out the sign bits + Value &= 0xfff; +} + +/// 6-bit fixup for the immediate operand of the ADIW family of +/// instructions. +/// +/// Resolves to: +/// 0000 0000 kk00 kkkk +void fixup_6_adiw(const MCFixup &Fixup, uint64_t &Value, + MCContext *Ctx = nullptr) { + unsigned_width(6, Value, std::string("immediate"), Fixup, Ctx); + + Value = ((Value & 0x30) << 2) | (Value & 0x0f); +} + +/// 5-bit port number fixup on the SBIC family of instructions. +/// +/// Resolves to: +/// 0000 0000 AAAA A000 +void fixup_port5(const MCFixup &Fixup, uint64_t &Value, + MCContext *Ctx = nullptr) { + unsigned_width(5, Value, std::string("port number"), Fixup, Ctx); + + Value &= 0x1f; + + Value <<= 3; +} + +/// 6-bit port number fixup on the `IN` family of instructions. +/// +/// Resolves to: +/// 1011 0AAd dddd AAAA +void fixup_port6(const MCFixup &Fixup, uint64_t &Value, + MCContext *Ctx = nullptr) { + unsigned_width(6, Value, std::string("port number"), Fixup, Ctx); + + Value = ((Value & 0x30) << 5) | (Value & 0x0f); +} + +/// Adjusts a program memory address. +/// This is a simple right-shift. +void pm(uint64_t &Value) { + Value >>= 1; +} + +/// Fixups relating to the LDI instruction. +namespace ldi { + +/// Adjusts a value to fix up the immediate of an `LDI Rd, K` instruction. +/// +/// Resolves to: +/// 0000 KKKK 0000 KKKK +/// Offset of 0 (so the result isn't left-shifted before application). +void fixup(unsigned Size, const MCFixup &Fixup, uint64_t &Value, + MCContext *Ctx = nullptr) { + uint64_t upper = Value & 0xf0; + uint64_t lower = Value & 0x0f; + + Value = (upper << 4) | lower; +} + +void neg(uint64_t &Value) { Value *= -1; } + +void lo8(unsigned Size, const MCFixup &Fixup, uint64_t &Value, + MCContext *Ctx = nullptr) { + Value &= 0xff; + ldi::fixup(Size, Fixup, Value, Ctx); +} + +void hi8(unsigned Size, const MCFixup &Fixup, uint64_t &Value, + MCContext *Ctx = nullptr) { + Value = (Value & 0xff00) >> 8; + ldi::fixup(Size, Fixup, Value, Ctx); +} + +void hh8(unsigned Size, const MCFixup &Fixup, uint64_t &Value, + MCContext *Ctx = nullptr) { + Value = (Value & 0xff0000) >> 16; + ldi::fixup(Size, Fixup, Value, Ctx); +} + +void ms8(unsigned Size, const MCFixup &Fixup, uint64_t &Value, + MCContext *Ctx = nullptr) { + Value = (Value & 0xff000000) >> 24; + ldi::fixup(Size, Fixup, Value, Ctx); +} + +} // end of ldi namespace +} // end of adjust namespace + +namespace llvm { + +// Prepare value for the target space for it +void AVRAsmBackend::adjustFixupValue(const MCFixup &Fixup, + const MCValue &Target, + uint64_t &Value, + MCContext *Ctx) const { + // The size of the fixup in bits. + uint64_t Size = AVRAsmBackend::getFixupKindInfo(Fixup.getKind()).TargetSize; + + unsigned Kind = Fixup.getKind(); + + // Parsed LLVM-generated temporary labels are already + // adjusted for instruction size, but normal labels aren't. + // + // To handle both cases, we simply un-adjust the temporary label + // case so it acts like all other labels. + if (const MCSymbolRefExpr *A = Target.getSymA()) { + if (A->getSymbol().isTemporary()) + Value += 2; + } + + switch (Kind) { + default: + llvm_unreachable("unhandled fixup"); + case AVR::fixup_7_pcrel: + adjust::fixup_7_pcrel(Size, Fixup, Value, Ctx); + break; + case AVR::fixup_13_pcrel: + adjust::fixup_13_pcrel(Size, Fixup, Value, Ctx); + break; + case AVR::fixup_call: + adjust::fixup_call(Size, Fixup, Value, Ctx); + break; + case AVR::fixup_ldi: + adjust::ldi::fixup(Size, Fixup, Value, Ctx); + break; + case AVR::fixup_lo8_ldi: + adjust::ldi::lo8(Size, Fixup, Value, Ctx); + break; + case AVR::fixup_lo8_ldi_pm: + case AVR::fixup_lo8_ldi_gs: + adjust::pm(Value); + adjust::ldi::lo8(Size, Fixup, Value, Ctx); + break; + case AVR::fixup_hi8_ldi: + adjust::ldi::hi8(Size, Fixup, Value, Ctx); + break; + case AVR::fixup_hi8_ldi_pm: + case AVR::fixup_hi8_ldi_gs: + adjust::pm(Value); + adjust::ldi::hi8(Size, Fixup, Value, Ctx); + break; + case AVR::fixup_hh8_ldi: + case AVR::fixup_hh8_ldi_pm: + if (Kind == AVR::fixup_hh8_ldi_pm) adjust::pm(Value); + + adjust::ldi::hh8(Size, Fixup, Value, Ctx); + break; + case AVR::fixup_ms8_ldi: + adjust::ldi::ms8(Size, Fixup, Value, Ctx); + break; + + case AVR::fixup_lo8_ldi_neg: + case AVR::fixup_lo8_ldi_pm_neg: + if (Kind == AVR::fixup_lo8_ldi_pm_neg) adjust::pm(Value); + + adjust::ldi::neg(Value); + adjust::ldi::lo8(Size, Fixup, Value, Ctx); + break; + case AVR::fixup_hi8_ldi_neg: + case AVR::fixup_hi8_ldi_pm_neg: + if (Kind == AVR::fixup_hi8_ldi_pm_neg) adjust::pm(Value); + + adjust::ldi::neg(Value); + adjust::ldi::hi8(Size, Fixup, Value, Ctx); + break; + case AVR::fixup_hh8_ldi_neg: + case AVR::fixup_hh8_ldi_pm_neg: + if (Kind == AVR::fixup_hh8_ldi_pm_neg) adjust::pm(Value); + + adjust::ldi::neg(Value); + adjust::ldi::hh8(Size, Fixup, Value, Ctx); + break; + case AVR::fixup_ms8_ldi_neg: + adjust::ldi::neg(Value); + adjust::ldi::ms8(Size, Fixup, Value, Ctx); + break; + case AVR::fixup_16: + adjust::unsigned_width(16, Value, std::string("port number"), Fixup, Ctx); + + Value &= 0xffff; + break; + case AVR::fixup_16_pm: + Value >>= 1; // Flash addresses are always shifted. + adjust::unsigned_width(16, Value, std::string("port number"), Fixup, Ctx); + + Value &= 0xffff; + break; + + case AVR::fixup_6_adiw: + adjust::fixup_6_adiw(Fixup, Value, Ctx); + break; + + case AVR::fixup_port5: + adjust::fixup_port5(Fixup, Value, Ctx); + break; + + case AVR::fixup_port6: + adjust::fixup_port6(Fixup, Value, Ctx); + break; + + // Fixups which do not require adjustments. + case FK_Data_1: + case FK_Data_2: + case FK_Data_4: + case FK_Data_8: + break; + + case FK_GPRel_4: + llvm_unreachable("don't know how to adjust this fixup"); + break; + } +} + +std::unique_ptr<MCObjectTargetWriter> +AVRAsmBackend::createObjectTargetWriter() const { + return createAVRELFObjectWriter(MCELFObjectTargetWriter::getOSABI(OSType)); +} + +void AVRAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, + const MCValue &Target, + MutableArrayRef<char> Data, uint64_t Value, + bool IsResolved, + const MCSubtargetInfo *STI) const { + adjustFixupValue(Fixup, Target, Value, &Asm.getContext()); + if (Value == 0) + return; // Doesn't change encoding. + + MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind()); + + // The number of bits in the fixup mask + auto NumBits = Info.TargetSize + Info.TargetOffset; + auto NumBytes = (NumBits / 8) + ((NumBits % 8) == 0 ? 0 : 1); + + // Shift the value into position. + Value <<= Info.TargetOffset; + + unsigned Offset = Fixup.getOffset(); + assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!"); + + // For each byte of the fragment that the fixup touches, mask in the + // bits from the fixup value. + for (unsigned i = 0; i < NumBytes; ++i) { + uint8_t mask = (((Value >> (i * 8)) & 0xff)); + Data[Offset + i] |= mask; + } +} + +MCFixupKindInfo const &AVRAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { + // NOTE: Many AVR fixups work on sets of non-contignous bits. We work around + // this by saying that the fixup is the size of the entire instruction. + const static MCFixupKindInfo Infos[AVR::NumTargetFixupKinds] = { + // This table *must* be in same the order of fixup_* kinds in + // AVRFixupKinds.h. + // + // name offset bits flags + {"fixup_32", 0, 32, 0}, + + {"fixup_7_pcrel", 3, 7, MCFixupKindInfo::FKF_IsPCRel}, + {"fixup_13_pcrel", 0, 12, MCFixupKindInfo::FKF_IsPCRel}, + + {"fixup_16", 0, 16, 0}, + {"fixup_16_pm", 0, 16, 0}, + + {"fixup_ldi", 0, 8, 0}, + + {"fixup_lo8_ldi", 0, 8, 0}, + {"fixup_hi8_ldi", 0, 8, 0}, + {"fixup_hh8_ldi", 0, 8, 0}, + {"fixup_ms8_ldi", 0, 8, 0}, + + {"fixup_lo8_ldi_neg", 0, 8, 0}, + {"fixup_hi8_ldi_neg", 0, 8, 0}, + {"fixup_hh8_ldi_neg", 0, 8, 0}, + {"fixup_ms8_ldi_neg", 0, 8, 0}, + + {"fixup_lo8_ldi_pm", 0, 8, 0}, + {"fixup_hi8_ldi_pm", 0, 8, 0}, + {"fixup_hh8_ldi_pm", 0, 8, 0}, + + {"fixup_lo8_ldi_pm_neg", 0, 8, 0}, + {"fixup_hi8_ldi_pm_neg", 0, 8, 0}, + {"fixup_hh8_ldi_pm_neg", 0, 8, 0}, + + {"fixup_call", 0, 22, 0}, + + {"fixup_6", 0, 16, 0}, // non-contiguous + {"fixup_6_adiw", 0, 6, 0}, + + {"fixup_lo8_ldi_gs", 0, 8, 0}, + {"fixup_hi8_ldi_gs", 0, 8, 0}, + + {"fixup_8", 0, 8, 0}, + {"fixup_8_lo8", 0, 8, 0}, + {"fixup_8_hi8", 0, 8, 0}, + {"fixup_8_hlo8", 0, 8, 0}, + + {"fixup_diff8", 0, 8, 0}, + {"fixup_diff16", 0, 16, 0}, + {"fixup_diff32", 0, 32, 0}, + + {"fixup_lds_sts_16", 0, 16, 0}, + + {"fixup_port6", 0, 16, 0}, // non-contiguous + {"fixup_port5", 3, 5, 0}, + }; + + if (Kind < FirstTargetFixupKind) + return MCAsmBackend::getFixupKindInfo(Kind); + + assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && + "Invalid kind!"); + + return Infos[Kind - FirstTargetFixupKind]; +} + +bool AVRAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const { + // If the count is not 2-byte aligned, we must be writing data into the text + // section (otherwise we have unaligned instructions, and thus have far + // bigger problems), so just write zeros instead. + assert((Count % 2) == 0 && "NOP instructions must be 2 bytes"); + + OS.write_zeros(Count); + return true; +} + +bool AVRAsmBackend::shouldForceRelocation(const MCAssembler &Asm, + const MCFixup &Fixup, + const MCValue &Target) { + switch ((unsigned) Fixup.getKind()) { + default: return false; + // Fixups which should always be recorded as relocations. + case AVR::fixup_7_pcrel: + case AVR::fixup_13_pcrel: + case AVR::fixup_call: + return true; + } +} + +MCAsmBackend *createAVRAsmBackend(const Target &T, const MCSubtargetInfo &STI, + const MCRegisterInfo &MRI, + const llvm::MCTargetOptions &TO) { + return new AVRAsmBackend(STI.getTargetTriple().getOS()); +} + +} // end of namespace llvm + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRAsmBackend.h b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRAsmBackend.h new file mode 100644 index 000000000000..1e713db38145 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRAsmBackend.h @@ -0,0 +1,80 @@ +//===-- AVRAsmBackend.h - AVR Asm Backend --------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// \file The AVR assembly backend implementation. +// +//===----------------------------------------------------------------------===// +// + +#ifndef LLVM_AVR_ASM_BACKEND_H +#define LLVM_AVR_ASM_BACKEND_H + +#include "MCTargetDesc/AVRFixupKinds.h" + +#include "llvm/ADT/Triple.h" +#include "llvm/MC/MCAsmBackend.h" + +namespace llvm { + +class MCAssembler; +class MCObjectWriter; +class Target; + +struct MCFixupKindInfo; + +/// Utilities for manipulating generated AVR machine code. +class AVRAsmBackend : public MCAsmBackend { +public: + AVRAsmBackend(Triple::OSType OSType) + : MCAsmBackend(support::little), OSType(OSType) {} + + void adjustFixupValue(const MCFixup &Fixup, const MCValue &Target, + uint64_t &Value, MCContext *Ctx = nullptr) const; + + std::unique_ptr<MCObjectTargetWriter> + createObjectTargetWriter() const override; + + void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, + const MCValue &Target, MutableArrayRef<char> Data, + uint64_t Value, bool IsResolved, + const MCSubtargetInfo *STI) const override; + + const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override; + + unsigned getNumFixupKinds() const override { + return AVR::NumTargetFixupKinds; + } + + bool mayNeedRelaxation(const MCInst &Inst, + const MCSubtargetInfo &STI) const override { + return false; + } + + bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, + const MCRelaxableFragment *DF, + const MCAsmLayout &Layout) const override { + llvm_unreachable("RelaxInstruction() unimplemented"); + return false; + } + + void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI, + MCInst &Res) const override {} + + bool writeNopData(raw_ostream &OS, uint64_t Count) const override; + + bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup, + const MCValue &Target) override; + +private: + Triple::OSType OSType; +}; + +} // end namespace llvm + +#endif // LLVM_AVR_ASM_BACKEND_H + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRELFObjectWriter.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRELFObjectWriter.cpp new file mode 100644 index 000000000000..6025e4b2437c --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRELFObjectWriter.cpp @@ -0,0 +1,159 @@ +//===-- AVRELFObjectWriter.cpp - AVR ELF Writer ---------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "MCTargetDesc/AVRFixupKinds.h" +#include "MCTargetDesc/AVRMCTargetDesc.h" + +#include "llvm/MC/MCAssembler.h" +#include "llvm/MC/MCELFObjectWriter.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCObjectWriter.h" +#include "llvm/MC/MCSection.h" +#include "llvm/MC/MCValue.h" +#include "llvm/Support/ErrorHandling.h" + +namespace llvm { + +/// Writes AVR machine code into an ELF32 object file. +class AVRELFObjectWriter : public MCELFObjectTargetWriter { +public: + AVRELFObjectWriter(uint8_t OSABI); + + virtual ~AVRELFObjectWriter() {} + + unsigned getRelocType(MCContext &Ctx, + const MCValue &Target, + const MCFixup &Fixup, + bool IsPCRel) const override; +}; + +AVRELFObjectWriter::AVRELFObjectWriter(uint8_t OSABI) + : MCELFObjectTargetWriter(false, OSABI, ELF::EM_AVR, true) {} + +unsigned AVRELFObjectWriter::getRelocType(MCContext &Ctx, + const MCValue &Target, + const MCFixup &Fixup, + bool IsPCRel) const { + MCSymbolRefExpr::VariantKind Modifier = Target.getAccessVariant(); + switch ((unsigned) Fixup.getKind()) { + case FK_Data_1: + switch (Modifier) { + default: + llvm_unreachable("Unsupported Modifier"); + case MCSymbolRefExpr::VK_None: + return ELF::R_AVR_8; + case MCSymbolRefExpr::VK_AVR_DIFF8: + return ELF::R_AVR_DIFF8; + case MCSymbolRefExpr::VK_AVR_LO8: + return ELF::R_AVR_8_LO8; + case MCSymbolRefExpr::VK_AVR_HI8: + return ELF::R_AVR_8_HI8; + case MCSymbolRefExpr::VK_AVR_HLO8: + return ELF::R_AVR_8_HLO8; + } + case FK_Data_4: + switch (Modifier) { + default: + llvm_unreachable("Unsupported Modifier"); + case MCSymbolRefExpr::VK_None: + return ELF::R_AVR_32; + case MCSymbolRefExpr::VK_AVR_DIFF32: + return ELF::R_AVR_DIFF32; + } + case FK_Data_2: + switch (Modifier) { + default: + llvm_unreachable("Unsupported Modifier"); + case MCSymbolRefExpr::VK_None: + return ELF::R_AVR_16; + case MCSymbolRefExpr::VK_AVR_NONE: + return ELF::R_AVR_16_PM; + case MCSymbolRefExpr::VK_AVR_DIFF16: + return ELF::R_AVR_DIFF16; + } + case AVR::fixup_32: + return ELF::R_AVR_32; + case AVR::fixup_7_pcrel: + return ELF::R_AVR_7_PCREL; + case AVR::fixup_13_pcrel: + return ELF::R_AVR_13_PCREL; + case AVR::fixup_16: + return ELF::R_AVR_16; + case AVR::fixup_16_pm: + return ELF::R_AVR_16_PM; + case AVR::fixup_lo8_ldi: + return ELF::R_AVR_LO8_LDI; + case AVR::fixup_hi8_ldi: + return ELF::R_AVR_HI8_LDI; + case AVR::fixup_hh8_ldi: + return ELF::R_AVR_HH8_LDI; + case AVR::fixup_lo8_ldi_neg: + return ELF::R_AVR_LO8_LDI_NEG; + case AVR::fixup_hi8_ldi_neg: + return ELF::R_AVR_HI8_LDI_NEG; + case AVR::fixup_hh8_ldi_neg: + return ELF::R_AVR_HH8_LDI_NEG; + case AVR::fixup_lo8_ldi_pm: + return ELF::R_AVR_LO8_LDI_PM; + case AVR::fixup_hi8_ldi_pm: + return ELF::R_AVR_HI8_LDI_PM; + case AVR::fixup_hh8_ldi_pm: + return ELF::R_AVR_HH8_LDI_PM; + case AVR::fixup_lo8_ldi_pm_neg: + return ELF::R_AVR_LO8_LDI_PM_NEG; + case AVR::fixup_hi8_ldi_pm_neg: + return ELF::R_AVR_HI8_LDI_PM_NEG; + case AVR::fixup_hh8_ldi_pm_neg: + return ELF::R_AVR_HH8_LDI_PM_NEG; + case AVR::fixup_call: + return ELF::R_AVR_CALL; + case AVR::fixup_ldi: + return ELF::R_AVR_LDI; + case AVR::fixup_6: + return ELF::R_AVR_6; + case AVR::fixup_6_adiw: + return ELF::R_AVR_6_ADIW; + case AVR::fixup_ms8_ldi: + return ELF::R_AVR_MS8_LDI; + case AVR::fixup_ms8_ldi_neg: + return ELF::R_AVR_MS8_LDI_NEG; + case AVR::fixup_lo8_ldi_gs: + return ELF::R_AVR_LO8_LDI_GS; + case AVR::fixup_hi8_ldi_gs: + return ELF::R_AVR_HI8_LDI_GS; + case AVR::fixup_8: + return ELF::R_AVR_8; + case AVR::fixup_8_lo8: + return ELF::R_AVR_8_LO8; + case AVR::fixup_8_hi8: + return ELF::R_AVR_8_HI8; + case AVR::fixup_8_hlo8: + return ELF::R_AVR_8_HLO8; + case AVR::fixup_diff8: + return ELF::R_AVR_DIFF8; + case AVR::fixup_diff16: + return ELF::R_AVR_DIFF16; + case AVR::fixup_diff32: + return ELF::R_AVR_DIFF32; + case AVR::fixup_lds_sts_16: + return ELF::R_AVR_LDS_STS_16; + case AVR::fixup_port6: + return ELF::R_AVR_PORT6; + case AVR::fixup_port5: + return ELF::R_AVR_PORT5; + default: + llvm_unreachable("invalid fixup kind!"); + } +} + +std::unique_ptr<MCObjectTargetWriter> createAVRELFObjectWriter(uint8_t OSABI) { + return make_unique<AVRELFObjectWriter>(OSABI); +} + +} // end of namespace llvm + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRELFStreamer.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRELFStreamer.cpp new file mode 100644 index 000000000000..6d126ed622aa --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRELFStreamer.cpp @@ -0,0 +1,68 @@ +#include "AVRELFStreamer.h" + +#include "llvm/BinaryFormat/ELF.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/MC/SubtargetFeature.h" +#include "llvm/Support/FormattedStream.h" + +#include "AVRMCTargetDesc.h" + +namespace llvm { + +static unsigned getEFlagsForFeatureSet(const FeatureBitset &Features) { + unsigned EFlags = 0; + + // Set architecture + if (Features[AVR::ELFArchAVR1]) + EFlags |= ELF::EF_AVR_ARCH_AVR1; + else if (Features[AVR::ELFArchAVR2]) + EFlags |= ELF::EF_AVR_ARCH_AVR2; + else if (Features[AVR::ELFArchAVR25]) + EFlags |= ELF::EF_AVR_ARCH_AVR25; + else if (Features[AVR::ELFArchAVR3]) + EFlags |= ELF::EF_AVR_ARCH_AVR3; + else if (Features[AVR::ELFArchAVR31]) + EFlags |= ELF::EF_AVR_ARCH_AVR31; + else if (Features[AVR::ELFArchAVR35]) + EFlags |= ELF::EF_AVR_ARCH_AVR35; + else if (Features[AVR::ELFArchAVR4]) + EFlags |= ELF::EF_AVR_ARCH_AVR4; + else if (Features[AVR::ELFArchAVR5]) + EFlags |= ELF::EF_AVR_ARCH_AVR5; + else if (Features[AVR::ELFArchAVR51]) + EFlags |= ELF::EF_AVR_ARCH_AVR51; + else if (Features[AVR::ELFArchAVR6]) + EFlags |= ELF::EF_AVR_ARCH_AVR6; + else if (Features[AVR::ELFArchTiny]) + EFlags |= ELF::EF_AVR_ARCH_AVRTINY; + else if (Features[AVR::ELFArchXMEGA1]) + EFlags |= ELF::EF_AVR_ARCH_XMEGA1; + else if (Features[AVR::ELFArchXMEGA2]) + EFlags |= ELF::EF_AVR_ARCH_XMEGA2; + else if (Features[AVR::ELFArchXMEGA3]) + EFlags |= ELF::EF_AVR_ARCH_XMEGA3; + else if (Features[AVR::ELFArchXMEGA4]) + EFlags |= ELF::EF_AVR_ARCH_XMEGA4; + else if (Features[AVR::ELFArchXMEGA5]) + EFlags |= ELF::EF_AVR_ARCH_XMEGA5; + else if (Features[AVR::ELFArchXMEGA6]) + EFlags |= ELF::EF_AVR_ARCH_XMEGA6; + else if (Features[AVR::ELFArchXMEGA7]) + EFlags |= ELF::EF_AVR_ARCH_XMEGA7; + + return EFlags; +} + +AVRELFStreamer::AVRELFStreamer(MCStreamer &S, + const MCSubtargetInfo &STI) + : AVRTargetStreamer(S) { + + MCAssembler &MCA = getStreamer().getAssembler(); + unsigned EFlags = MCA.getELFHeaderEFlags(); + + EFlags |= getEFlagsForFeatureSet(STI.getFeatureBits()); + + MCA.setELFHeaderEFlags(EFlags); +} + +} // end namespace llvm diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRELFStreamer.h b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRELFStreamer.h new file mode 100644 index 000000000000..461f1660c952 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRELFStreamer.h @@ -0,0 +1,28 @@ +//===----- AVRELFStreamer.h - AVR Target Streamer --------------*- C++ -*--===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AVR_ELF_STREAMER_H +#define LLVM_AVR_ELF_STREAMER_H + +#include "AVRTargetStreamer.h" + +namespace llvm { + +/// A target streamer for an AVR ELF object file. +class AVRELFStreamer : public AVRTargetStreamer { +public: + AVRELFStreamer(MCStreamer &S, const MCSubtargetInfo &STI); + + MCELFStreamer &getStreamer() { + return static_cast<MCELFStreamer &>(Streamer); + } +}; + +} // end namespace llvm + +#endif diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRFixupKinds.h b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRFixupKinds.h new file mode 100644 index 000000000000..b3504b89e4d3 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRFixupKinds.h @@ -0,0 +1,147 @@ +//===-- AVRFixupKinds.h - AVR Specific Fixup Entries ------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AVR_FIXUP_KINDS_H +#define LLVM_AVR_FIXUP_KINDS_H + +#include "llvm/MC/MCFixup.h" + +namespace llvm { +namespace AVR { + +/// The set of supported fixups. +/// +/// Although most of the current fixup types reflect a unique relocation +/// one can have multiple fixup types for a given relocation and thus need +/// to be uniquely named. +/// +/// \note This table *must* be in the same order of +/// MCFixupKindInfo Infos[AVR::NumTargetFixupKinds] +/// in `AVRAsmBackend.cpp`. +enum Fixups { + /// A 32-bit AVR fixup. + fixup_32 = FirstTargetFixupKind, + + /// A 7-bit PC-relative fixup for the family of conditional + /// branches which take 7-bit targets (BRNE,BRGT,etc). + fixup_7_pcrel, + /// A 12-bit PC-relative fixup for the family of branches + /// which take 12-bit targets (RJMP,RCALL,etc). + /// \note Although the fixup is labelled as 13 bits, it + /// is actually only encoded in 12. The reason for + /// The nonmenclature is that AVR branch targets are + /// rightshifted by 1, because instructions are always + /// aligned to 2 bytes, so the 0'th bit is always 0. + /// This way there is 13-bits of precision. + fixup_13_pcrel, + + /// A 16-bit address. + fixup_16, + /// A 16-bit program memory address. + fixup_16_pm, + + /// Replaces the 8-bit immediate with another value. + fixup_ldi, + + /// Replaces the immediate operand of a 16-bit `Rd, K` instruction + /// with the lower 8 bits of a 16-bit value (bits 0-7). + fixup_lo8_ldi, + /// Replaces the immediate operand of a 16-bit `Rd, K` instruction + /// with the upper 8 bits of a 16-bit value (bits 8-15). + fixup_hi8_ldi, + /// Replaces the immediate operand of a 16-bit `Rd, K` instruction + /// with the upper 8 bits of a 24-bit value (bits 16-23). + fixup_hh8_ldi, + /// Replaces the immediate operand of a 16-bit `Rd, K` instruction + /// with the upper 8 bits of a 32-bit value (bits 24-31). + fixup_ms8_ldi, + + /// Replaces the immediate operand of a 16-bit `Rd, K` instruction + /// with the lower 8 bits of a negated 16-bit value (bits 0-7). + fixup_lo8_ldi_neg, + /// Replaces the immediate operand of a 16-bit `Rd, K` instruction + /// with the upper 8 bits of a negated 16-bit value (bits 8-15). + fixup_hi8_ldi_neg, + /// Replaces the immediate operand of a 16-bit `Rd, K` instruction + /// with the upper 8 bits of a negated negated 24-bit value (bits 16-23). + fixup_hh8_ldi_neg, + /// Replaces the immediate operand of a 16-bit `Rd, K` instruction + /// with the upper 8 bits of a negated negated 32-bit value (bits 24-31). + fixup_ms8_ldi_neg, + + /// Replaces the immediate operand of a 16-bit `Rd, K` instruction + /// with the lower 8 bits of a 16-bit program memory address value (bits 0-7). + fixup_lo8_ldi_pm, + /// Replaces the immediate operand of a 16-bit `Rd, K` instruction + /// with the upper 8 bits of a 16-bit program memory address value (bits + /// 8-15). + fixup_hi8_ldi_pm, + /// Replaces the immediate operand of a 16-bit `Rd, K` instruction + /// with the upper 8 bits of a 24-bit program memory address value (bits + /// 16-23). + fixup_hh8_ldi_pm, + + /// Replaces the immediate operand of a 16-bit `Rd, K` instruction + /// with the lower 8 bits of a negated 16-bit program memory address value + /// (bits 0-7). + fixup_lo8_ldi_pm_neg, + /// Replaces the immediate operand of a 16-bit `Rd, K` instruction + /// with the upper 8 bits of a negated 16-bit program memory address value + /// (bits 8-15). + fixup_hi8_ldi_pm_neg, + /// Replaces the immediate operand of a 16-bit `Rd, K` instruction + /// with the upper 8 bits of a negated 24-bit program memory address value + /// (bits 16-23). + fixup_hh8_ldi_pm_neg, + + /// A 22-bit fixup for the target of a `CALL k` or `JMP k` instruction. + fixup_call, + + fixup_6, + /// A symbol+addr fixup for the `LDD <x>+<n>, <r>" family of instructions. + fixup_6_adiw, + + fixup_lo8_ldi_gs, + fixup_hi8_ldi_gs, + + fixup_8, + fixup_8_lo8, + fixup_8_hi8, + fixup_8_hlo8, + + fixup_diff8, + fixup_diff16, + fixup_diff32, + + fixup_lds_sts_16, + + /// A 6-bit port address. + fixup_port6, + /// A 5-bit port address. + fixup_port5, + + // Marker + LastTargetFixupKind, + NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind +}; + +namespace fixups { + +/// Adjusts the value of a branch target. +/// All branch targets in AVR are rightshifted by 1 to take advantage +/// of the fact that all instructions are aligned to addresses of size +/// 2, so bit 0 of an address is always 0. This gives us another bit +/// of precision. +/// \param[in,out] The target to adjust. +template <typename T> inline void adjustBranchTarget(T &val) { val >>= 1; } + +} // end of namespace fixups +} +} // end of namespace llvm::AVR + +#endif // LLVM_AVR_FIXUP_KINDS_H diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRInstPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRInstPrinter.cpp new file mode 100644 index 000000000000..88ce9a25680e --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRInstPrinter.cpp @@ -0,0 +1,170 @@ +//===-- AVRInstPrinter.cpp - Convert AVR MCInst to assembly syntax --------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This class prints an AVR MCInst to a .s file. +// +//===----------------------------------------------------------------------===// + +#include "AVRInstPrinter.h" + +#include "MCTargetDesc/AVRMCTargetDesc.h" + +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCInstrDesc.h" +#include "llvm/MC/MCInstrInfo.h" +#include "llvm/MC/MCRegisterInfo.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/FormattedStream.h" + +#include <cstring> + +#define DEBUG_TYPE "asm-printer" + +namespace llvm { + +// Include the auto-generated portion of the assembly writer. +#define PRINT_ALIAS_INSTR +#include "AVRGenAsmWriter.inc" + +void AVRInstPrinter::printInst(const MCInst *MI, raw_ostream &O, + StringRef Annot, const MCSubtargetInfo &STI) { + unsigned Opcode = MI->getOpcode(); + + // First handle load and store instructions with postinc or predec + // of the form "ld reg, X+". + // TODO: We should be able to rewrite this using TableGen data. + switch (Opcode) { + case AVR::LDRdPtr: + case AVR::LDRdPtrPi: + case AVR::LDRdPtrPd: + O << "\tld\t"; + printOperand(MI, 0, O); + O << ", "; + + if (Opcode == AVR::LDRdPtrPd) + O << '-'; + + printOperand(MI, 1, O); + + if (Opcode == AVR::LDRdPtrPi) + O << '+'; + break; + case AVR::STPtrRr: + O << "\tst\t"; + printOperand(MI, 0, O); + O << ", "; + printOperand(MI, 1, O); + break; + case AVR::STPtrPiRr: + case AVR::STPtrPdRr: + O << "\tst\t"; + + if (Opcode == AVR::STPtrPdRr) + O << '-'; + + printOperand(MI, 1, O); + + if (Opcode == AVR::STPtrPiRr) + O << '+'; + + O << ", "; + printOperand(MI, 2, O); + break; + default: + if (!printAliasInstr(MI, O)) + printInstruction(MI, O); + + printAnnotation(O, Annot); + break; + } +} + +const char *AVRInstPrinter::getPrettyRegisterName(unsigned RegNum, + MCRegisterInfo const &MRI) { + // GCC prints register pairs by just printing the lower register + // If the register contains a subregister, print it instead + if (MRI.getNumSubRegIndices() > 0) { + unsigned RegLoNum = MRI.getSubReg(RegNum, AVR::sub_lo); + RegNum = (RegLoNum != AVR::NoRegister) ? RegLoNum : RegNum; + } + + return getRegisterName(RegNum); +} + +void AVRInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, + raw_ostream &O) { + const MCOperand &Op = MI->getOperand(OpNo); + const MCOperandInfo &MOI = this->MII.get(MI->getOpcode()).OpInfo[OpNo]; + + if (Op.isReg()) { + bool isPtrReg = (MOI.RegClass == AVR::PTRREGSRegClassID) || + (MOI.RegClass == AVR::PTRDISPREGSRegClassID) || + (MOI.RegClass == AVR::ZREGRegClassID); + + if (isPtrReg) { + O << getRegisterName(Op.getReg(), AVR::ptr); + } else { + O << getPrettyRegisterName(Op.getReg(), MRI); + } + } else if (Op.isImm()) { + O << Op.getImm(); + } else { + assert(Op.isExpr() && "Unknown operand kind in printOperand"); + O << *Op.getExpr(); + } +} + +/// This is used to print an immediate value that ends up +/// being encoded as a pc-relative value. +void AVRInstPrinter::printPCRelImm(const MCInst *MI, unsigned OpNo, + raw_ostream &O) { + const MCOperand &Op = MI->getOperand(OpNo); + + if (Op.isImm()) { + int64_t Imm = Op.getImm(); + O << '.'; + + // Print a position sign if needed. + // Negative values have their sign printed automatically. + if (Imm >= 0) + O << '+'; + + O << Imm; + } else { + assert(Op.isExpr() && "Unknown pcrel immediate operand"); + O << *Op.getExpr(); + } +} + +void AVRInstPrinter::printMemri(const MCInst *MI, unsigned OpNo, + raw_ostream &O) { + assert(MI->getOperand(OpNo).isReg() && "Expected a register for the first operand"); + + const MCOperand &OffsetOp = MI->getOperand(OpNo + 1); + + // Print the register. + printOperand(MI, OpNo, O); + + // Print the {+,-}offset. + if (OffsetOp.isImm()) { + int64_t Offset = OffsetOp.getImm(); + + if (Offset >= 0) + O << '+'; + + O << Offset; + } else if (OffsetOp.isExpr()) { + O << *OffsetOp.getExpr(); + } else { + llvm_unreachable("unknown type for offset"); + } +} + +} // end of namespace llvm + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRInstPrinter.h b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRInstPrinter.h new file mode 100644 index 000000000000..5b758a7503c9 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRInstPrinter.h @@ -0,0 +1,53 @@ +//===- AVRInstPrinter.h - Convert AVR MCInst to assembly syntax -*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This class prints an AVR MCInst to a .s file. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AVR_INST_PRINTER_H +#define LLVM_AVR_INST_PRINTER_H + +#include "llvm/MC/MCInstPrinter.h" + +#include "MCTargetDesc/AVRMCTargetDesc.h" + +namespace llvm { + +/// Prints AVR instructions to a textual stream. +class AVRInstPrinter : public MCInstPrinter { +public: + AVRInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII, + const MCRegisterInfo &MRI) + : MCInstPrinter(MAI, MII, MRI) {} + + static const char *getPrettyRegisterName(unsigned RegNo, + MCRegisterInfo const &MRI); + + void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot, + const MCSubtargetInfo &STI) override; + +private: + static const char *getRegisterName(unsigned RegNo, + unsigned AltIdx = AVR::NoRegAltName); + + void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O); + void printPCRelImm(const MCInst *MI, unsigned OpNo, raw_ostream &O); + void printMemri(const MCInst *MI, unsigned OpNo, raw_ostream &O); + + // Autogenerated by TableGen. + void printInstruction(const MCInst *MI, raw_ostream &O); + bool printAliasInstr(const MCInst *MI, raw_ostream &O); + void printCustomAliasOperand(const MCInst *MI, unsigned OpIdx, + unsigned PrintMethodIdx, raw_ostream &O); +}; + +} // end namespace llvm + +#endif // LLVM_AVR_INST_PRINTER_H + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.cpp new file mode 100644 index 000000000000..99b2172c562f --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.cpp @@ -0,0 +1,29 @@ +//===-- AVRMCAsmInfo.cpp - AVR asm properties -----------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains the declarations of the AVRMCAsmInfo properties. +// +//===----------------------------------------------------------------------===// + +#include "AVRMCAsmInfo.h" + +#include "llvm/ADT/Triple.h" + +namespace llvm { + +AVRMCAsmInfo::AVRMCAsmInfo(const Triple &TT) { + CodePointerSize = 2; + CalleeSaveStackSlotSize = 2; + CommentString = ";"; + PrivateGlobalPrefix = ".L"; + UsesELFSectionDirectiveForBSS = true; + UseIntegratedAssembler = true; + SupportsDebugInformation = true; +} + +} // end of namespace llvm diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.h b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.h new file mode 100644 index 000000000000..b2fa18777bc0 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.h @@ -0,0 +1,30 @@ +//===-- AVRMCAsmInfo.h - AVR asm properties ---------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains the declaration of the AVRMCAsmInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AVR_ASM_INFO_H +#define LLVM_AVR_ASM_INFO_H + +#include "llvm/MC/MCAsmInfo.h" + +namespace llvm { + +class Triple; + +/// Specifies the format of AVR assembly files. +class AVRMCAsmInfo : public MCAsmInfo { +public: + explicit AVRMCAsmInfo(const Triple &TT); +}; + +} // end namespace llvm + +#endif // LLVM_AVR_ASM_INFO_H diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCCodeEmitter.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCCodeEmitter.cpp new file mode 100644 index 000000000000..bc0488778685 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCCodeEmitter.cpp @@ -0,0 +1,304 @@ +//===-- AVRMCCodeEmitter.cpp - Convert AVR Code to Machine Code -----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements the AVRMCCodeEmitter class. +// +//===----------------------------------------------------------------------===// + +#include "AVRMCCodeEmitter.h" + +#include "MCTargetDesc/AVRMCExpr.h" +#include "MCTargetDesc/AVRMCTargetDesc.h" + +#include "llvm/ADT/APFloat.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCFixup.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCInstrInfo.h" +#include "llvm/MC/MCRegisterInfo.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/raw_ostream.h" + +#define DEBUG_TYPE "mccodeemitter" + +#define GET_INSTRMAP_INFO +#include "AVRGenInstrInfo.inc" +#undef GET_INSTRMAP_INFO + +namespace llvm { + +/// Performs a post-encoding step on a `LD` or `ST` instruction. +/// +/// The encoding of the LD/ST family of instructions is inconsistent w.r.t +/// the pointer register and the addressing mode. +/// +/// The permutations of the format are as followed: +/// ld Rd, X `1001 000d dddd 1100` +/// ld Rd, X+ `1001 000d dddd 1101` +/// ld Rd, -X `1001 000d dddd 1110` +/// +/// ld Rd, Y `1000 000d dddd 1000` +/// ld Rd, Y+ `1001 000d dddd 1001` +/// ld Rd, -Y `1001 000d dddd 1010` +/// +/// ld Rd, Z `1000 000d dddd 0000` +/// ld Rd, Z+ `1001 000d dddd 0001` +/// ld Rd, -Z `1001 000d dddd 0010` +/// ^ +/// | +/// Note this one inconsistent bit - it is 1 sometimes and 0 at other times. +/// There is no logical pattern. Looking at a truth table, the following +/// formula can be derived to fit the pattern: +// +/// ``` +/// inconsistent_bit = is_predec OR is_postinc OR is_reg_x +/// ``` +// +/// We manually set this bit in this post encoder method. +unsigned +AVRMCCodeEmitter::loadStorePostEncoder(const MCInst &MI, unsigned EncodedValue, + const MCSubtargetInfo &STI) const { + + assert(MI.getOperand(0).isReg() && MI.getOperand(1).isReg() && + "the load/store operands must be registers"); + + unsigned Opcode = MI.getOpcode(); + + // check whether either of the registers are the X pointer register. + bool IsRegX = MI.getOperand(0).getReg() == AVR::R27R26 || + MI.getOperand(1).getReg() == AVR::R27R26; + + bool IsPredec = Opcode == AVR::LDRdPtrPd || Opcode == AVR::STPtrPdRr; + bool IsPostinc = Opcode == AVR::LDRdPtrPi || Opcode == AVR::STPtrPiRr; + + // Check if we need to set the inconsistent bit + if (IsRegX || IsPredec || IsPostinc) { + EncodedValue |= (1 << 12); + } + + return EncodedValue; +} + +template <AVR::Fixups Fixup> +unsigned +AVRMCCodeEmitter::encodeRelCondBrTarget(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + + if (MO.isExpr()) { + Fixups.push_back(MCFixup::create(0, MO.getExpr(), + MCFixupKind(Fixup), MI.getLoc())); + return 0; + } + + assert(MO.isImm()); + + // Take the size of the current instruction away. + // With labels, this is implicitly done. + auto target = MO.getImm(); + AVR::fixups::adjustBranchTarget(target); + return target; +} + +unsigned AVRMCCodeEmitter::encodeLDSTPtrReg(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + auto MO = MI.getOperand(OpNo); + + // The operand should be a pointer register. + assert(MO.isReg()); + + switch (MO.getReg()) { + case AVR::R27R26: return 0x03; // X: 0b11 + case AVR::R29R28: return 0x02; // Y: 0b10 + case AVR::R31R30: return 0x00; // Z: 0b00 + default: + llvm_unreachable("invalid pointer register"); + } +} + +/// Encodes a `memri` operand. +/// The operand is 7-bits. +/// * The lower 6 bits is the immediate +/// * The upper bit is the pointer register bit (Z=0,Y=1) +unsigned AVRMCCodeEmitter::encodeMemri(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + auto RegOp = MI.getOperand(OpNo); + auto OffsetOp = MI.getOperand(OpNo + 1); + + assert(RegOp.isReg() && "Expected register operand"); + + uint8_t RegBit = 0; + + switch (RegOp.getReg()) { + default: + llvm_unreachable("Expected either Y or Z register"); + case AVR::R31R30: + RegBit = 0; + break; // Z register + case AVR::R29R28: + RegBit = 1; + break; // Y register + } + + int8_t OffsetBits; + + if (OffsetOp.isImm()) { + OffsetBits = OffsetOp.getImm(); + } else if (OffsetOp.isExpr()) { + OffsetBits = 0; + Fixups.push_back(MCFixup::create(0, OffsetOp.getExpr(), + MCFixupKind(AVR::fixup_6), MI.getLoc())); + } else { + llvm_unreachable("invalid value for offset"); + } + + return (RegBit << 6) | OffsetBits; +} + +unsigned AVRMCCodeEmitter::encodeComplement(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + // The operand should be an immediate. + assert(MI.getOperand(OpNo).isImm()); + + auto Imm = MI.getOperand(OpNo).getImm(); + return (~0) - Imm; +} + +template <AVR::Fixups Fixup, unsigned Offset> +unsigned AVRMCCodeEmitter::encodeImm(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + auto MO = MI.getOperand(OpNo); + + if (MO.isExpr()) { + if (isa<AVRMCExpr>(MO.getExpr())) { + // If the expression is already an AVRMCExpr (i.e. a lo8(symbol), + // we shouldn't perform any more fixups. Without this check, we would + // instead create a fixup to the symbol named 'lo8(symbol)' which + // is not correct. + return getExprOpValue(MO.getExpr(), Fixups, STI); + } + + MCFixupKind FixupKind = static_cast<MCFixupKind>(Fixup); + Fixups.push_back(MCFixup::create(Offset, MO.getExpr(), FixupKind, MI.getLoc())); + + return 0; + } + + assert(MO.isImm()); + return MO.getImm(); +} + +unsigned AVRMCCodeEmitter::encodeCallTarget(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + auto MO = MI.getOperand(OpNo); + + if (MO.isExpr()) { + MCFixupKind FixupKind = static_cast<MCFixupKind>(AVR::fixup_call); + Fixups.push_back(MCFixup::create(0, MO.getExpr(), FixupKind, MI.getLoc())); + return 0; + } + + assert(MO.isImm()); + + auto Target = MO.getImm(); + AVR::fixups::adjustBranchTarget(Target); + return Target; +} + +unsigned AVRMCCodeEmitter::getExprOpValue(const MCExpr *Expr, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + + MCExpr::ExprKind Kind = Expr->getKind(); + + if (Kind == MCExpr::Binary) { + Expr = static_cast<const MCBinaryExpr *>(Expr)->getLHS(); + Kind = Expr->getKind(); + } + + if (Kind == MCExpr::Target) { + AVRMCExpr const *AVRExpr = cast<AVRMCExpr>(Expr); + int64_t Result; + if (AVRExpr->evaluateAsConstant(Result)) { + return Result; + } + + MCFixupKind FixupKind = static_cast<MCFixupKind>(AVRExpr->getFixupKind()); + Fixups.push_back(MCFixup::create(0, AVRExpr, FixupKind)); + return 0; + } + + assert(Kind == MCExpr::SymbolRef); + return 0; +} + +unsigned AVRMCCodeEmitter::getMachineOpValue(const MCInst &MI, + const MCOperand &MO, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + if (MO.isReg()) return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()); + if (MO.isImm()) return static_cast<unsigned>(MO.getImm()); + + if (MO.isFPImm()) + return static_cast<unsigned>(APFloat(MO.getFPImm()) + .bitcastToAPInt() + .getHiBits(32) + .getLimitedValue()); + + // MO must be an Expr. + assert(MO.isExpr()); + + return getExprOpValue(MO.getExpr(), Fixups, STI); +} + +void AVRMCCodeEmitter::emitInstruction(uint64_t Val, unsigned Size, + const MCSubtargetInfo &STI, + raw_ostream &OS) const { + const uint16_t *Words = reinterpret_cast<uint16_t const *>(&Val); + size_t WordCount = Size / 2; + + for (int64_t i = WordCount - 1; i >= 0; --i) { + uint16_t Word = Words[i]; + + OS << (uint8_t) ((Word & 0x00ff) >> 0); + OS << (uint8_t) ((Word & 0xff00) >> 8); + } +} + +void AVRMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); + + // Get byte count of instruction + unsigned Size = Desc.getSize(); + + assert(Size > 0 && "Instruction size cannot be zero"); + + uint64_t BinaryOpCode = getBinaryCodeForInstr(MI, Fixups, STI); + emitInstruction(BinaryOpCode, Size, STI, OS); +} + +MCCodeEmitter *createAVRMCCodeEmitter(const MCInstrInfo &MCII, + const MCRegisterInfo &MRI, + MCContext &Ctx) { + return new AVRMCCodeEmitter(MCII, Ctx); +} + +#include "AVRGenMCCodeEmitter.inc" + +} // end of namespace llvm diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCCodeEmitter.h b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCCodeEmitter.h new file mode 100644 index 000000000000..2e24d885c155 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCCodeEmitter.h @@ -0,0 +1,115 @@ +//===-- AVRMCCodeEmitter.h - Convert AVR Code to Machine Code -------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the AVRMCCodeEmitter class. +// +//===----------------------------------------------------------------------===// +// + +#ifndef LLVM_AVR_CODE_EMITTER_H +#define LLVM_AVR_CODE_EMITTER_H + +#include "AVRFixupKinds.h" + +#include "llvm/MC/MCCodeEmitter.h" +#include "llvm/Support/DataTypes.h" + +#define GET_INSTRINFO_OPERAND_TYPES_ENUM +#include "AVRGenInstrInfo.inc" + +namespace llvm { + +class MCContext; +class MCExpr; +class MCFixup; +class MCInst; +class MCInstrInfo; +class MCOperand; +class MCSubtargetInfo; +class raw_ostream; + +/// Writes AVR machine code to a stream. +class AVRMCCodeEmitter : public MCCodeEmitter { +public: + AVRMCCodeEmitter(const MCInstrInfo &MCII, MCContext &Ctx) + : MCII(MCII), Ctx(Ctx) {} + +private: + /// Finishes up encoding an LD/ST instruction. + /// The purpose of this function is to set an bit in the instruction + /// which follows no logical pattern. See the implementation for details. + unsigned loadStorePostEncoder(const MCInst &MI, unsigned EncodedValue, + const MCSubtargetInfo &STI) const; + + /// Gets the encoding for a conditional branch target. + template <AVR::Fixups Fixup> + unsigned encodeRelCondBrTarget(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + /// Encodes the `PTRREGS` operand to a load or store instruction. + unsigned encodeLDSTPtrReg(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + /// Encodes a `register+immediate` operand for `LDD`/`STD`. + unsigned encodeMemri(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + /// Takes the complement of a number (~0 - val). + unsigned encodeComplement(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + /// Encodes an immediate value with a given fixup. + /// \tparam Offset The offset into the instruction for the fixup. + template <AVR::Fixups Fixup, unsigned Offset> + unsigned encodeImm(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + /// Gets the encoding of the target for the `CALL k` instruction. + unsigned encodeCallTarget(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + /// TableGen'ed function to get the binary encoding for an instruction. + uint64_t getBinaryCodeForInstr(const MCInst &MI, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getExprOpValue(const MCExpr *Expr, SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + /// Returns the binary encoding of operand. + /// + /// If the machine operand requires relocation, the relocation is recorded + /// and zero is returned. + unsigned getMachineOpValue(const MCInst &MI, const MCOperand &MO, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; + + void emitInstruction(uint64_t Val, unsigned Size, const MCSubtargetInfo &STI, + raw_ostream &OS) const; + + void encodeInstruction(const MCInst &MI, raw_ostream &OS, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const override; + + AVRMCCodeEmitter(const AVRMCCodeEmitter &) = delete; + void operator=(const AVRMCCodeEmitter &) = delete; + + const MCInstrInfo &MCII; + MCContext &Ctx; +}; + +} // end namespace of llvm. + +#endif // LLVM_AVR_CODE_EMITTER_H + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCELFStreamer.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCELFStreamer.cpp new file mode 100644 index 000000000000..d9169f90a765 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCELFStreamer.cpp @@ -0,0 +1,52 @@ +//===--------- AVRMCELFStreamer.cpp - AVR subclass of MCELFStreamer -------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a stub that parses a MCInst bundle and passes the +// instructions on to the real streamer. +// +//===----------------------------------------------------------------------===// +#define DEBUG_TYPE "avrmcelfstreamer" + +#include "MCTargetDesc/AVRMCELFStreamer.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/MC/MCObjectWriter.h" + +using namespace llvm; + +void AVRMCELFStreamer::EmitValueForModiferKind( + const MCSymbol *Sym, unsigned SizeInBytes, SMLoc Loc, + AVRMCExpr::VariantKind ModifierKind) { + MCSymbolRefExpr::VariantKind Kind = MCSymbolRefExpr::VK_AVR_NONE; + if (ModifierKind == AVRMCExpr::VK_AVR_None) { + Kind = MCSymbolRefExpr::VK_AVR_DIFF8; + if (SizeInBytes == SIZE_LONG) + Kind = MCSymbolRefExpr::VK_AVR_DIFF32; + else if (SizeInBytes == SIZE_WORD) + Kind = MCSymbolRefExpr::VK_AVR_DIFF16; + } else if (ModifierKind == AVRMCExpr::VK_AVR_LO8) + Kind = MCSymbolRefExpr::VK_AVR_LO8; + else if (ModifierKind == AVRMCExpr::VK_AVR_HI8) + Kind = MCSymbolRefExpr::VK_AVR_HI8; + else if (ModifierKind == AVRMCExpr::VK_AVR_HH8) + Kind = MCSymbolRefExpr::VK_AVR_HLO8; + MCELFStreamer::EmitValue(MCSymbolRefExpr::create(Sym, Kind, getContext()), + SizeInBytes, Loc); +} + +namespace llvm { +MCStreamer *createAVRELFStreamer(Triple const &TT, MCContext &Context, + std::unique_ptr<MCAsmBackend> MAB, + std::unique_ptr<MCObjectWriter> OW, + std::unique_ptr<MCCodeEmitter> CE) { + return new AVRMCELFStreamer(Context, std::move(MAB), std::move(OW), + std::move(CE)); +} + +} // end namespace llvm diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCELFStreamer.h b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCELFStreamer.h new file mode 100644 index 000000000000..37a610bc4248 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCELFStreamer.h @@ -0,0 +1,56 @@ +//===--------- AVRMCELFStreamer.h - AVR subclass of MCELFStreamer ---------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_AVR_MCTARGETDESC_AVRMCELFSTREAMER_H +#define LLVM_LIB_TARGET_AVR_MCTARGETDESC_AVRMCELFSTREAMER_H + +#include "MCTargetDesc/AVRMCExpr.h" +#include "MCTargetDesc/AVRMCTargetDesc.h" +#include "llvm/MC/MCAsmBackend.h" +#include "llvm/MC/MCCodeEmitter.h" +#include "llvm/MC/MCELFStreamer.h" +#include "llvm/MC/MCInstrInfo.h" +#include "llvm/MC/MCObjectWriter.h" + +namespace llvm { + +const int SIZE_LONG = 4; +const int SIZE_WORD = 2; + +class AVRMCELFStreamer : public MCELFStreamer { + std::unique_ptr<MCInstrInfo> MCII; + +public: + AVRMCELFStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> TAB, + std::unique_ptr<MCObjectWriter> OW, + std::unique_ptr<MCCodeEmitter> Emitter) + : MCELFStreamer(Context, std::move(TAB), std::move(OW), + std::move(Emitter)), + MCII(createAVRMCInstrInfo()) {} + + AVRMCELFStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> TAB, + std::unique_ptr<MCObjectWriter> OW, + std::unique_ptr<MCCodeEmitter> Emitter, + MCAssembler *Assembler) + : MCELFStreamer(Context, std::move(TAB), std::move(OW), + std::move(Emitter)), + MCII(createAVRMCInstrInfo()) {} + + void EmitValueForModiferKind( + const MCSymbol *Sym, unsigned SizeInBytes, SMLoc Loc = SMLoc(), + AVRMCExpr::VariantKind ModifierKind = AVRMCExpr::VK_AVR_None); +}; + +MCStreamer *createAVRELFStreamer(Triple const &TT, MCContext &Context, + std::unique_ptr<MCAsmBackend> MAB, + std::unique_ptr<MCObjectWriter> OW, + std::unique_ptr<MCCodeEmitter> CE); + +} // end namespace llvm + +#endif // LLVM_LIB_TARGET_AVR_MCTARGETDESC_AVRMCELFSTREAMER_H diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCExpr.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCExpr.cpp new file mode 100644 index 000000000000..0a53e5346779 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCExpr.cpp @@ -0,0 +1,214 @@ +//===-- AVRMCExpr.cpp - AVR specific MC expression classes ----------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "AVRMCExpr.h" + +#include "llvm/MC/MCAsmLayout.h" +#include "llvm/MC/MCAssembler.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCValue.h" + +namespace llvm { + +namespace { + +const struct ModifierEntry { + const char * const Spelling; + AVRMCExpr::VariantKind VariantKind; +} ModifierNames[] = { + {"lo8", AVRMCExpr::VK_AVR_LO8}, {"hi8", AVRMCExpr::VK_AVR_HI8}, + {"hh8", AVRMCExpr::VK_AVR_HH8}, // synonym with hlo8 + {"hlo8", AVRMCExpr::VK_AVR_HH8}, {"hhi8", AVRMCExpr::VK_AVR_HHI8}, + + {"pm_lo8", AVRMCExpr::VK_AVR_PM_LO8}, {"pm_hi8", AVRMCExpr::VK_AVR_PM_HI8}, + {"pm_hh8", AVRMCExpr::VK_AVR_PM_HH8}, + + {"lo8_gs", AVRMCExpr::VK_AVR_LO8_GS}, {"hi8_gs", AVRMCExpr::VK_AVR_HI8_GS}, + {"gs", AVRMCExpr::VK_AVR_GS}, +}; + +} // end of anonymous namespace + +const AVRMCExpr *AVRMCExpr::create(VariantKind Kind, const MCExpr *Expr, + bool Negated, MCContext &Ctx) { + return new (Ctx) AVRMCExpr(Kind, Expr, Negated); +} + +void AVRMCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const { + assert(Kind != VK_AVR_None); + + if (isNegated()) + OS << '-'; + + OS << getName() << '('; + getSubExpr()->print(OS, MAI); + OS << ')'; +} + +bool AVRMCExpr::evaluateAsConstant(int64_t &Result) const { + MCValue Value; + + bool isRelocatable = + getSubExpr()->evaluateAsRelocatable(Value, nullptr, nullptr); + + if (!isRelocatable) + return false; + + if (Value.isAbsolute()) { + Result = evaluateAsInt64(Value.getConstant()); + return true; + } + + return false; +} + +bool AVRMCExpr::evaluateAsRelocatableImpl(MCValue &Result, + const MCAsmLayout *Layout, + const MCFixup *Fixup) const { + MCValue Value; + bool isRelocatable = SubExpr->evaluateAsRelocatable(Value, Layout, Fixup); + + if (!isRelocatable) + return false; + + if (Value.isAbsolute()) { + Result = MCValue::get(evaluateAsInt64(Value.getConstant())); + } else { + if (!Layout) return false; + + MCContext &Context = Layout->getAssembler().getContext(); + const MCSymbolRefExpr *Sym = Value.getSymA(); + MCSymbolRefExpr::VariantKind Modifier = Sym->getKind(); + if (Modifier != MCSymbolRefExpr::VK_None) + return false; + + Sym = MCSymbolRefExpr::create(&Sym->getSymbol(), Modifier, Context); + Result = MCValue::get(Sym, Value.getSymB(), Value.getConstant()); + } + + return true; +} + +int64_t AVRMCExpr::evaluateAsInt64(int64_t Value) const { + if (Negated) + Value *= -1; + + switch (Kind) { + case AVRMCExpr::VK_AVR_LO8: + Value &= 0xff; + break; + case AVRMCExpr::VK_AVR_HI8: + Value &= 0xff00; + Value >>= 8; + break; + case AVRMCExpr::VK_AVR_HH8: + Value &= 0xff0000; + Value >>= 16; + break; + case AVRMCExpr::VK_AVR_HHI8: + Value &= 0xff000000; + Value >>= 24; + break; + case AVRMCExpr::VK_AVR_PM_LO8: + case AVRMCExpr::VK_AVR_LO8_GS: + Value >>= 1; // Program memory addresses must always be shifted by one. + Value &= 0xff; + break; + case AVRMCExpr::VK_AVR_PM_HI8: + case AVRMCExpr::VK_AVR_HI8_GS: + Value >>= 1; // Program memory addresses must always be shifted by one. + Value &= 0xff00; + Value >>= 8; + break; + case AVRMCExpr::VK_AVR_PM_HH8: + Value >>= 1; // Program memory addresses must always be shifted by one. + Value &= 0xff0000; + Value >>= 16; + break; + case AVRMCExpr::VK_AVR_GS: + Value >>= 1; // Program memory addresses must always be shifted by one. + break; + + case AVRMCExpr::VK_AVR_None: + llvm_unreachable("Uninitialized expression."); + } + return static_cast<uint64_t>(Value) & 0xff; +} + +AVR::Fixups AVRMCExpr::getFixupKind() const { + AVR::Fixups Kind = AVR::Fixups::LastTargetFixupKind; + + switch (getKind()) { + case VK_AVR_LO8: + Kind = isNegated() ? AVR::fixup_lo8_ldi_neg : AVR::fixup_lo8_ldi; + break; + case VK_AVR_HI8: + Kind = isNegated() ? AVR::fixup_hi8_ldi_neg : AVR::fixup_hi8_ldi; + break; + case VK_AVR_HH8: + Kind = isNegated() ? AVR::fixup_hh8_ldi_neg : AVR::fixup_hh8_ldi; + break; + case VK_AVR_HHI8: + Kind = isNegated() ? AVR::fixup_ms8_ldi_neg : AVR::fixup_ms8_ldi; + break; + + case VK_AVR_PM_LO8: + Kind = isNegated() ? AVR::fixup_lo8_ldi_pm_neg : AVR::fixup_lo8_ldi_pm; + break; + case VK_AVR_PM_HI8: + Kind = isNegated() ? AVR::fixup_hi8_ldi_pm_neg : AVR::fixup_hi8_ldi_pm; + break; + case VK_AVR_PM_HH8: + Kind = isNegated() ? AVR::fixup_hh8_ldi_pm_neg : AVR::fixup_hh8_ldi_pm; + break; + case VK_AVR_GS: + Kind = AVR::fixup_16_pm; + break; + case VK_AVR_LO8_GS: + Kind = AVR::fixup_lo8_ldi_gs; + break; + case VK_AVR_HI8_GS: + Kind = AVR::fixup_hi8_ldi_gs; + break; + + case VK_AVR_None: + llvm_unreachable("Uninitialized expression"); + } + + return Kind; +} + +void AVRMCExpr::visitUsedExpr(MCStreamer &Streamer) const { + Streamer.visitUsedExpr(*getSubExpr()); +} + +const char *AVRMCExpr::getName() const { + const auto &Modifier = std::find_if( + std::begin(ModifierNames), std::end(ModifierNames), + [this](ModifierEntry const &Mod) { return Mod.VariantKind == Kind; }); + + if (Modifier != std::end(ModifierNames)) { + return Modifier->Spelling; + } + return nullptr; +} + +AVRMCExpr::VariantKind AVRMCExpr::getKindByName(StringRef Name) { + const auto &Modifier = std::find_if( + std::begin(ModifierNames), std::end(ModifierNames), + [&Name](ModifierEntry const &Mod) { return Mod.Spelling == Name; }); + + if (Modifier != std::end(ModifierNames)) { + return Modifier->VariantKind; + } + return VK_AVR_None; +} + +} // end of namespace llvm + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCExpr.h b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCExpr.h new file mode 100644 index 000000000000..3b696bab1715 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCExpr.h @@ -0,0 +1,91 @@ +//===-- AVRMCExpr.h - AVR specific MC expression classes --------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AVR_MCEXPR_H +#define LLVM_AVR_MCEXPR_H + +#include "llvm/MC/MCExpr.h" + +#include "MCTargetDesc/AVRFixupKinds.h" + +namespace llvm { + +/// A expression in AVR machine code. +class AVRMCExpr : public MCTargetExpr { +public: + /// Specifies the type of an expression. + enum VariantKind { + VK_AVR_None, + + VK_AVR_HI8, ///< Corresponds to `hi8()`. + VK_AVR_LO8, ///< Corresponds to `lo8()`. + VK_AVR_HH8, ///< Corresponds to `hlo8() and hh8()`. + VK_AVR_HHI8, ///< Corresponds to `hhi8()`. + + VK_AVR_PM_LO8, ///< Corresponds to `pm_lo8()`. + VK_AVR_PM_HI8, ///< Corresponds to `pm_hi8()`. + VK_AVR_PM_HH8, ///< Corresponds to `pm_hh8()`. + + VK_AVR_LO8_GS, ///< Corresponds to `lo8(gs())`. + VK_AVR_HI8_GS, ///< Corresponds to `hi8(gs())`. + VK_AVR_GS, ///< Corresponds to `gs()`. + }; + +public: + /// Creates an AVR machine code expression. + static const AVRMCExpr *create(VariantKind Kind, const MCExpr *Expr, + bool isNegated, MCContext &Ctx); + + /// Gets the type of the expression. + VariantKind getKind() const { return Kind; } + /// Gets the name of the expression. + const char *getName() const; + const MCExpr *getSubExpr() const { return SubExpr; } + /// Gets the fixup which corresponds to the expression. + AVR::Fixups getFixupKind() const; + /// Evaluates the fixup as a constant value. + bool evaluateAsConstant(int64_t &Result) const; + + bool isNegated() const { return Negated; } + void setNegated(bool negated = true) { Negated = negated; } + + void printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const override; + bool evaluateAsRelocatableImpl(MCValue &Res, const MCAsmLayout *Layout, + const MCFixup *Fixup) const override; + + void visitUsedExpr(MCStreamer &streamer) const override; + + MCFragment *findAssociatedFragment() const override { + return getSubExpr()->findAssociatedFragment(); + } + + void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const override {} + + static bool classof(const MCExpr *E) { + return E->getKind() == MCExpr::Target; + } + +public: + static VariantKind getKindByName(StringRef Name); + +private: + int64_t evaluateAsInt64(int64_t Value) const; + + const VariantKind Kind; + const MCExpr *SubExpr; + bool Negated; + +private: + explicit AVRMCExpr(VariantKind Kind, const MCExpr *Expr, bool Negated) + : Kind(Kind), SubExpr(Expr), Negated(Negated) {} + ~AVRMCExpr() {} +}; + +} // end namespace llvm + +#endif // LLVM_AVR_MCEXPR_H diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCTargetDesc.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCTargetDesc.cpp new file mode 100644 index 000000000000..f6607b26a065 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCTargetDesc.cpp @@ -0,0 +1,127 @@ +//===-- AVRMCTargetDesc.cpp - AVR Target Descriptions ---------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file provides AVR specific target descriptions. +// +//===----------------------------------------------------------------------===// + +#include "AVRELFStreamer.h" +#include "AVRInstPrinter.h" +#include "AVRMCAsmInfo.h" +#include "AVRMCELFStreamer.h" +#include "AVRMCTargetDesc.h" +#include "AVRTargetStreamer.h" +#include "TargetInfo/AVRTargetInfo.h" + +#include "llvm/MC/MCAsmBackend.h" +#include "llvm/MC/MCELFStreamer.h" +#include "llvm/MC/MCCodeEmitter.h" +#include "llvm/MC/MCInstrInfo.h" +#include "llvm/MC/MCRegisterInfo.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/Support/TargetRegistry.h" + +#define GET_INSTRINFO_MC_DESC +#include "AVRGenInstrInfo.inc" + +#define GET_SUBTARGETINFO_MC_DESC +#include "AVRGenSubtargetInfo.inc" + +#define GET_REGINFO_MC_DESC +#include "AVRGenRegisterInfo.inc" + +using namespace llvm; + +MCInstrInfo *llvm::createAVRMCInstrInfo() { + MCInstrInfo *X = new MCInstrInfo(); + InitAVRMCInstrInfo(X); + + return X; +} + +static MCRegisterInfo *createAVRMCRegisterInfo(const Triple &TT) { + MCRegisterInfo *X = new MCRegisterInfo(); + InitAVRMCRegisterInfo(X, 0); + + return X; +} + +static MCSubtargetInfo *createAVRMCSubtargetInfo(const Triple &TT, + StringRef CPU, StringRef FS) { + return createAVRMCSubtargetInfoImpl(TT, CPU, FS); +} + +static MCInstPrinter *createAVRMCInstPrinter(const Triple &T, + unsigned SyntaxVariant, + const MCAsmInfo &MAI, + const MCInstrInfo &MII, + const MCRegisterInfo &MRI) { + if (SyntaxVariant == 0) { + return new AVRInstPrinter(MAI, MII, MRI); + } + + return nullptr; +} + +static MCStreamer *createMCStreamer(const Triple &T, MCContext &Context, + std::unique_ptr<MCAsmBackend> &&MAB, + std::unique_ptr<MCObjectWriter> &&OW, + std::unique_ptr<MCCodeEmitter> &&Emitter, + bool RelaxAll) { + return createELFStreamer(Context, std::move(MAB), std::move(OW), + std::move(Emitter), RelaxAll); +} + +static MCTargetStreamer * +createAVRObjectTargetStreamer(MCStreamer &S, const MCSubtargetInfo &STI) { + return new AVRELFStreamer(S, STI); +} + +static MCTargetStreamer *createMCAsmTargetStreamer(MCStreamer &S, + formatted_raw_ostream &OS, + MCInstPrinter *InstPrint, + bool isVerboseAsm) { + return new AVRTargetAsmStreamer(S); +} + +extern "C" void LLVMInitializeAVRTargetMC() { + // Register the MC asm info. + RegisterMCAsmInfo<AVRMCAsmInfo> X(getTheAVRTarget()); + + // Register the MC instruction info. + TargetRegistry::RegisterMCInstrInfo(getTheAVRTarget(), createAVRMCInstrInfo); + + // Register the MC register info. + TargetRegistry::RegisterMCRegInfo(getTheAVRTarget(), createAVRMCRegisterInfo); + + // Register the MC subtarget info. + TargetRegistry::RegisterMCSubtargetInfo(getTheAVRTarget(), + createAVRMCSubtargetInfo); + + // Register the MCInstPrinter. + TargetRegistry::RegisterMCInstPrinter(getTheAVRTarget(), + createAVRMCInstPrinter); + + // Register the MC Code Emitter + TargetRegistry::RegisterMCCodeEmitter(getTheAVRTarget(), createAVRMCCodeEmitter); + + // Register the obj streamer + TargetRegistry::RegisterELFStreamer(getTheAVRTarget(), createMCStreamer); + + // Register the obj target streamer. + TargetRegistry::RegisterObjectTargetStreamer(getTheAVRTarget(), + createAVRObjectTargetStreamer); + + // Register the asm target streamer. + TargetRegistry::RegisterAsmTargetStreamer(getTheAVRTarget(), + createMCAsmTargetStreamer); + + // Register the asm backend (as little endian). + TargetRegistry::RegisterMCAsmBackend(getTheAVRTarget(), createAVRAsmBackend); +} + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCTargetDesc.h b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCTargetDesc.h new file mode 100644 index 000000000000..470db01ff468 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRMCTargetDesc.h @@ -0,0 +1,61 @@ +//===-- AVRMCTargetDesc.h - AVR Target Descriptions -------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file provides AVR specific target descriptions. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AVR_MCTARGET_DESC_H +#define LLVM_AVR_MCTARGET_DESC_H + +#include "llvm/Support/DataTypes.h" + +#include <memory> + +namespace llvm { + +class MCAsmBackend; +class MCCodeEmitter; +class MCContext; +class MCInstrInfo; +class MCObjectTargetWriter; +class MCRegisterInfo; +class MCSubtargetInfo; +class MCTargetOptions; +class StringRef; +class Target; +class Triple; +class raw_pwrite_stream; + +MCInstrInfo *createAVRMCInstrInfo(); + +/// Creates a machine code emitter for AVR. +MCCodeEmitter *createAVRMCCodeEmitter(const MCInstrInfo &MCII, + const MCRegisterInfo &MRI, + MCContext &Ctx); + +/// Creates an assembly backend for AVR. +MCAsmBackend *createAVRAsmBackend(const Target &T, const MCSubtargetInfo &STI, + const MCRegisterInfo &MRI, + const llvm::MCTargetOptions &TO); + +/// Creates an ELF object writer for AVR. +std::unique_ptr<MCObjectTargetWriter> createAVRELFObjectWriter(uint8_t OSABI); + +} // end namespace llvm + +#define GET_REGINFO_ENUM +#include "AVRGenRegisterInfo.inc" + +#define GET_INSTRINFO_ENUM +#include "AVRGenInstrInfo.inc" + +#define GET_SUBTARGETINFO_ENUM +#include "AVRGenSubtargetInfo.inc" + +#endif // LLVM_AVR_MCTARGET_DESC_H diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRTargetStreamer.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRTargetStreamer.cpp new file mode 100644 index 000000000000..3487a2bbb864 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRTargetStreamer.cpp @@ -0,0 +1,43 @@ +//===-- AVRTargetStreamer.cpp - AVR Target Streamer Methods ---------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file provides AVR specific target streamer methods. +// +//===----------------------------------------------------------------------===// + +#include "AVRTargetStreamer.h" + +#include "llvm/MC/MCContext.h" + +namespace llvm { + +AVRTargetStreamer::AVRTargetStreamer(MCStreamer &S) : MCTargetStreamer(S) {} + +AVRTargetAsmStreamer::AVRTargetAsmStreamer(MCStreamer &S) + : AVRTargetStreamer(S) {} + +void AVRTargetStreamer::finish() { + MCStreamer &OS = getStreamer(); + MCContext &Context = OS.getContext(); + + MCSymbol *DoCopyData = Context.getOrCreateSymbol("__do_copy_data"); + MCSymbol *DoClearBss = Context.getOrCreateSymbol("__do_clear_bss"); + + // FIXME: We can disable __do_copy_data if there are no static RAM variables. + + OS.emitRawComment(" Declaring this symbol tells the CRT that it should"); + OS.emitRawComment("copy all variables from program memory to RAM on startup"); + OS.EmitSymbolAttribute(DoCopyData, MCSA_Global); + + OS.emitRawComment(" Declaring this symbol tells the CRT that it should"); + OS.emitRawComment("clear the zeroed data section on startup"); + OS.EmitSymbolAttribute(DoClearBss, MCSA_Global); +} + +} // end namespace llvm + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRTargetStreamer.h b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRTargetStreamer.h new file mode 100644 index 000000000000..5c4d1a22f6c6 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRTargetStreamer.h @@ -0,0 +1,33 @@ +//===-- AVRTargetStreamer.h - AVR Target Streamer --------------*- C++ -*--===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AVR_TARGET_STREAMER_H +#define LLVM_AVR_TARGET_STREAMER_H + +#include "llvm/MC/MCELFStreamer.h" + +namespace llvm { +class MCStreamer; + +/// A generic AVR target output stream. +class AVRTargetStreamer : public MCTargetStreamer { +public: + explicit AVRTargetStreamer(MCStreamer &S); + + void finish() override; +}; + +/// A target streamer for textual AVR assembly code. +class AVRTargetAsmStreamer : public AVRTargetStreamer { +public: + explicit AVRTargetAsmStreamer(MCStreamer &S); +}; + +} // end namespace llvm + +#endif // LLVM_AVR_TARGET_STREAMER_H diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/README.md b/contrib/llvm-project/llvm/lib/Target/AVR/README.md new file mode 100644 index 000000000000..bd8b453aa81e --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/README.md @@ -0,0 +1,8 @@ +# AVR backend + +This experimental backend is for the 8-bit Atmel [AVR](https://en.wikipedia.org/wiki/Atmel_AVR) microcontroller. + +## Useful links + +* [Unresolved bugs](https://llvm.org/bugs/buglist.cgi?product=libraries&component=Backend%3A%20AVR&resolution=---&list_id=109466) +* [Architecture notes](https://github.com/avr-llvm/architecture) diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/TODO.md b/contrib/llvm-project/llvm/lib/Target/AVR/TODO.md new file mode 100644 index 000000000000..3a333355646d --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/TODO.md @@ -0,0 +1,7 @@ +# Write an XFAIL test for this `FIXME` in `AVRInstrInfo.td` + +``` +// :FIXME: DAGCombiner produces an shl node after legalization from these seq: +// BR_JT -> (mul x, 2) -> (shl x, 1) +``` + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/TargetInfo/AVRTargetInfo.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/TargetInfo/AVRTargetInfo.cpp new file mode 100644 index 000000000000..c62d5cb85bc4 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/TargetInfo/AVRTargetInfo.cpp @@ -0,0 +1,22 @@ +//===-- AVRTargetInfo.cpp - AVR Target Implementation ---------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "TargetInfo/AVRTargetInfo.h" +#include "llvm/Support/TargetRegistry.h" +namespace llvm { +Target &getTheAVRTarget() { + static Target TheAVRTarget; + return TheAVRTarget; +} +} + +extern "C" void LLVMInitializeAVRTargetInfo() { + llvm::RegisterTarget<llvm::Triple::avr> X(llvm::getTheAVRTarget(), "avr", + "Atmel AVR Microcontroller", "AVR"); +} + diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/TargetInfo/AVRTargetInfo.h b/contrib/llvm-project/llvm/lib/Target/AVR/TargetInfo/AVRTargetInfo.h new file mode 100644 index 000000000000..7e0186bbdae1 --- /dev/null +++ b/contrib/llvm-project/llvm/lib/Target/AVR/TargetInfo/AVRTargetInfo.h @@ -0,0 +1,18 @@ +//===-- AVRTargetInfo.h - AVR Target Implementation -------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AVR_TARGET_INFO_H +#define LLVM_AVR_TARGET_INFO_H + +namespace llvm { +class Target; + +Target &getTheAVRTarget(); +} // namespace llvm + +#endif // LLVM_AVR_TARGET_INFO_H |