summaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/Target/RISCV
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/llvm/lib/Target/RISCV')
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp1870
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp330
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp377
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h149
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFObjectWriter.cpp142
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.cpp68
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.h30
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVFixupKinds.h92
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp126
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.h56
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCAsmInfo.cpp47
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCAsmInfo.h33
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp370
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp298
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.h94
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCTargetDesc.cpp108
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCTargetDesc.h58
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp47
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.h44
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCV.h44
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCV.td111
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp147
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVCallingConv.td65
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp716
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp408
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVFrameLowering.h58
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp292
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp2665
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.h214
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrFormats.td314
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrFormatsC.td159
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp468
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.h85
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.td1086
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoA.td371
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoC.td763
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoD.td339
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoF.td389
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoM.td84
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp137
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVMachineFunctionInfo.h52
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp285
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp157
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.h58
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.td229
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.cpp50
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.h92
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSystemOperands.td349
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp115
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetMachine.h47
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetObjectFile.cpp114
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetObjectFile.h48
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp92
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h52
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/TargetInfo/RISCVTargetInfo.cpp28
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/TargetInfo/RISCVTargetInfo.h21
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.cpp80
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h194
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/Utils/RISCVMatInt.cpp93
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/Utils/RISCVMatInt.h44
60 files changed, 15454 insertions, 0 deletions
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
new file mode 100644
index 000000000000..0df194d1e185
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
@@ -0,0 +1,1870 @@
+//===-- RISCVAsmParser.cpp - Parse RISCV assembly to MCInst instructions --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/RISCVAsmBackend.h"
+#include "MCTargetDesc/RISCVMCExpr.h"
+#include "MCTargetDesc/RISCVMCTargetDesc.h"
+#include "MCTargetDesc/RISCVTargetStreamer.h"
+#include "TargetInfo/RISCVTargetInfo.h"
+#include "Utils/RISCVBaseInfo.h"
+#include "Utils/RISCVMatInt.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstBuilder.h"
+#include "llvm/MC/MCObjectFileInfo.h"
+#include "llvm/MC/MCParser/MCAsmLexer.h"
+#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/MC/MCParser/MCTargetAsmParser.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/TargetRegistry.h"
+
+#include <limits>
+
+using namespace llvm;
+
+// Include the auto-generated portion of the compress emitter.
+#define GEN_COMPRESS_INSTR
+#include "RISCVGenCompressInstEmitter.inc"
+
+namespace {
+struct RISCVOperand;
+
+class RISCVAsmParser : public MCTargetAsmParser {
+ SmallVector<FeatureBitset, 4> FeatureBitStack;
+
+ SMLoc getLoc() const { return getParser().getTok().getLoc(); }
+ bool isRV64() const { return getSTI().hasFeature(RISCV::Feature64Bit); }
+ bool isRV32E() const { return getSTI().hasFeature(RISCV::FeatureRV32E); }
+
+ RISCVTargetStreamer &getTargetStreamer() {
+ MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
+ return static_cast<RISCVTargetStreamer &>(TS);
+ }
+
+ unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
+ unsigned Kind) override;
+
+ bool generateImmOutOfRangeError(OperandVector &Operands, uint64_t ErrorInfo,
+ int64_t Lower, int64_t Upper, Twine Msg);
+
+ bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
+ OperandVector &Operands, MCStreamer &Out,
+ uint64_t &ErrorInfo,
+ bool MatchingInlineAsm) override;
+
+ bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
+
+ bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
+ SMLoc NameLoc, OperandVector &Operands) override;
+
+ bool ParseDirective(AsmToken DirectiveID) override;
+
+ // Helper to actually emit an instruction to the MCStreamer. Also, when
+ // possible, compression of the instruction is performed.
+ void emitToStreamer(MCStreamer &S, const MCInst &Inst);
+
+ // Helper to emit a combination of LUI, ADDI(W), and SLLI instructions that
+ // synthesize the desired immedate value into the destination register.
+ void emitLoadImm(unsigned DestReg, int64_t Value, MCStreamer &Out);
+
+ // Helper to emit a combination of AUIPC and SecondOpcode. Used to implement
+ // helpers such as emitLoadLocalAddress and emitLoadAddress.
+ void emitAuipcInstPair(MCOperand DestReg, MCOperand TmpReg,
+ const MCExpr *Symbol, RISCVMCExpr::VariantKind VKHi,
+ unsigned SecondOpcode, SMLoc IDLoc, MCStreamer &Out);
+
+ // Helper to emit pseudo instruction "lla" used in PC-rel addressing.
+ void emitLoadLocalAddress(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out);
+
+ // Helper to emit pseudo instruction "la" used in GOT/PC-rel addressing.
+ void emitLoadAddress(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out);
+
+ // Helper to emit pseudo instruction "la.tls.ie" used in initial-exec TLS
+ // addressing.
+ void emitLoadTLSIEAddress(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out);
+
+ // Helper to emit pseudo instruction "la.tls.gd" used in global-dynamic TLS
+ // addressing.
+ void emitLoadTLSGDAddress(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out);
+
+ // Helper to emit pseudo load/store instruction with a symbol.
+ void emitLoadStoreSymbol(MCInst &Inst, unsigned Opcode, SMLoc IDLoc,
+ MCStreamer &Out, bool HasTmpReg);
+
+ // Checks that a PseudoAddTPRel is using x4/tp in its second input operand.
+ // Enforcing this using a restricted register class for the second input
+ // operand of PseudoAddTPRel results in a poor diagnostic due to the fact
+ // 'add' is an overloaded mnemonic.
+ bool checkPseudoAddTPRel(MCInst &Inst, OperandVector &Operands);
+
+ /// Helper for processing MC instructions that have been successfully matched
+ /// by MatchAndEmitInstruction. Modifications to the emitted instructions,
+ /// like the expansion of pseudo instructions (e.g., "li"), can be performed
+ /// in this method.
+ bool processInstruction(MCInst &Inst, SMLoc IDLoc, OperandVector &Operands,
+ MCStreamer &Out);
+
+// Auto-generated instruction matching functions
+#define GET_ASSEMBLER_HEADER
+#include "RISCVGenAsmMatcher.inc"
+
+ OperandMatchResultTy parseCSRSystemRegister(OperandVector &Operands);
+ OperandMatchResultTy parseImmediate(OperandVector &Operands);
+ OperandMatchResultTy parseRegister(OperandVector &Operands,
+ bool AllowParens = false);
+ OperandMatchResultTy parseMemOpBaseReg(OperandVector &Operands);
+ OperandMatchResultTy parseAtomicMemOp(OperandVector &Operands);
+ OperandMatchResultTy parseOperandWithModifier(OperandVector &Operands);
+ OperandMatchResultTy parseBareSymbol(OperandVector &Operands);
+ OperandMatchResultTy parseCallSymbol(OperandVector &Operands);
+ OperandMatchResultTy parseJALOffset(OperandVector &Operands);
+
+ bool parseOperand(OperandVector &Operands, StringRef Mnemonic);
+
+ bool parseDirectiveOption();
+
+ void setFeatureBits(uint64_t Feature, StringRef FeatureString) {
+ if (!(getSTI().getFeatureBits()[Feature])) {
+ MCSubtargetInfo &STI = copySTI();
+ setAvailableFeatures(
+ ComputeAvailableFeatures(STI.ToggleFeature(FeatureString)));
+ }
+ }
+
+ void clearFeatureBits(uint64_t Feature, StringRef FeatureString) {
+ if (getSTI().getFeatureBits()[Feature]) {
+ MCSubtargetInfo &STI = copySTI();
+ setAvailableFeatures(
+ ComputeAvailableFeatures(STI.ToggleFeature(FeatureString)));
+ }
+ }
+
+ void pushFeatureBits() {
+ FeatureBitStack.push_back(getSTI().getFeatureBits());
+ }
+
+ bool popFeatureBits() {
+ if (FeatureBitStack.empty())
+ return true;
+
+ FeatureBitset FeatureBits = FeatureBitStack.pop_back_val();
+ copySTI().setFeatureBits(FeatureBits);
+ setAvailableFeatures(ComputeAvailableFeatures(FeatureBits));
+
+ return false;
+ }
+public:
+ enum RISCVMatchResultTy {
+ Match_Dummy = FIRST_TARGET_MATCH_RESULT_TY,
+#define GET_OPERAND_DIAGNOSTIC_TYPES
+#include "RISCVGenAsmMatcher.inc"
+#undef GET_OPERAND_DIAGNOSTIC_TYPES
+ };
+
+ static bool classifySymbolRef(const MCExpr *Expr,
+ RISCVMCExpr::VariantKind &Kind,
+ int64_t &Addend);
+
+ RISCVAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
+ const MCInstrInfo &MII, const MCTargetOptions &Options)
+ : MCTargetAsmParser(Options, STI, MII) {
+ Parser.addAliasForDirective(".half", ".2byte");
+ Parser.addAliasForDirective(".hword", ".2byte");
+ Parser.addAliasForDirective(".word", ".4byte");
+ Parser.addAliasForDirective(".dword", ".8byte");
+ setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
+ }
+};
+
+/// RISCVOperand - Instances of this class represent a parsed machine
+/// instruction
+struct RISCVOperand : public MCParsedAsmOperand {
+
+ enum KindTy {
+ Token,
+ Register,
+ Immediate,
+ SystemRegister
+ } Kind;
+
+ bool IsRV64;
+
+ struct RegOp {
+ unsigned RegNum;
+ };
+
+ struct ImmOp {
+ const MCExpr *Val;
+ };
+
+ struct SysRegOp {
+ const char *Data;
+ unsigned Length;
+ unsigned Encoding;
+ // FIXME: Add the Encoding parsed fields as needed for checks,
+ // e.g.: read/write or user/supervisor/machine privileges.
+ };
+
+ SMLoc StartLoc, EndLoc;
+ union {
+ StringRef Tok;
+ RegOp Reg;
+ ImmOp Imm;
+ struct SysRegOp SysReg;
+ };
+
+ RISCVOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
+
+public:
+ RISCVOperand(const RISCVOperand &o) : MCParsedAsmOperand() {
+ Kind = o.Kind;
+ IsRV64 = o.IsRV64;
+ StartLoc = o.StartLoc;
+ EndLoc = o.EndLoc;
+ switch (Kind) {
+ case Register:
+ Reg = o.Reg;
+ break;
+ case Immediate:
+ Imm = o.Imm;
+ break;
+ case Token:
+ Tok = o.Tok;
+ break;
+ case SystemRegister:
+ SysReg = o.SysReg;
+ break;
+ }
+ }
+
+ bool isToken() const override { return Kind == Token; }
+ bool isReg() const override { return Kind == Register; }
+ bool isImm() const override { return Kind == Immediate; }
+ bool isMem() const override { return false; }
+ bool isSystemRegister() const { return Kind == SystemRegister; }
+
+ static bool evaluateConstantImm(const MCExpr *Expr, int64_t &Imm,
+ RISCVMCExpr::VariantKind &VK) {
+ if (auto *RE = dyn_cast<RISCVMCExpr>(Expr)) {
+ VK = RE->getKind();
+ return RE->evaluateAsConstant(Imm);
+ }
+
+ if (auto CE = dyn_cast<MCConstantExpr>(Expr)) {
+ VK = RISCVMCExpr::VK_RISCV_None;
+ Imm = CE->getValue();
+ return true;
+ }
+
+ return false;
+ }
+
+ // True if operand is a symbol with no modifiers, or a constant with no
+ // modifiers and isShiftedInt<N-1, 1>(Op).
+ template <int N> bool isBareSimmNLsb0() const {
+ int64_t Imm;
+ RISCVMCExpr::VariantKind VK;
+ if (!isImm())
+ return false;
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ bool IsValid;
+ if (!IsConstantImm)
+ IsValid = RISCVAsmParser::classifySymbolRef(getImm(), VK, Imm);
+ else
+ IsValid = isShiftedInt<N - 1, 1>(Imm);
+ return IsValid && VK == RISCVMCExpr::VK_RISCV_None;
+ }
+
+ // Predicate methods for AsmOperands defined in RISCVInstrInfo.td
+
+ bool isBareSymbol() const {
+ int64_t Imm;
+ RISCVMCExpr::VariantKind VK;
+ // Must be of 'immediate' type but not a constant.
+ if (!isImm() || evaluateConstantImm(getImm(), Imm, VK))
+ return false;
+ return RISCVAsmParser::classifySymbolRef(getImm(), VK, Imm) &&
+ VK == RISCVMCExpr::VK_RISCV_None;
+ }
+
+ bool isCallSymbol() const {
+ int64_t Imm;
+ RISCVMCExpr::VariantKind VK;
+ // Must be of 'immediate' type but not a constant.
+ if (!isImm() || evaluateConstantImm(getImm(), Imm, VK))
+ return false;
+ return RISCVAsmParser::classifySymbolRef(getImm(), VK, Imm) &&
+ (VK == RISCVMCExpr::VK_RISCV_CALL ||
+ VK == RISCVMCExpr::VK_RISCV_CALL_PLT);
+ }
+
+ bool isTPRelAddSymbol() const {
+ int64_t Imm;
+ RISCVMCExpr::VariantKind VK;
+ // Must be of 'immediate' type but not a constant.
+ if (!isImm() || evaluateConstantImm(getImm(), Imm, VK))
+ return false;
+ return RISCVAsmParser::classifySymbolRef(getImm(), VK, Imm) &&
+ VK == RISCVMCExpr::VK_RISCV_TPREL_ADD;
+ }
+
+ bool isCSRSystemRegister() const { return isSystemRegister(); }
+
+ /// Return true if the operand is a valid for the fence instruction e.g.
+ /// ('iorw').
+ bool isFenceArg() const {
+ if (!isImm())
+ return false;
+ const MCExpr *Val = getImm();
+ auto *SVal = dyn_cast<MCSymbolRefExpr>(Val);
+ if (!SVal || SVal->getKind() != MCSymbolRefExpr::VK_None)
+ return false;
+
+ StringRef Str = SVal->getSymbol().getName();
+ // Letters must be unique, taken from 'iorw', and in ascending order. This
+ // holds as long as each individual character is one of 'iorw' and is
+ // greater than the previous character.
+ char Prev = '\0';
+ for (char c : Str) {
+ if (c != 'i' && c != 'o' && c != 'r' && c != 'w')
+ return false;
+ if (c <= Prev)
+ return false;
+ Prev = c;
+ }
+ return true;
+ }
+
+ /// Return true if the operand is a valid floating point rounding mode.
+ bool isFRMArg() const {
+ if (!isImm())
+ return false;
+ const MCExpr *Val = getImm();
+ auto *SVal = dyn_cast<MCSymbolRefExpr>(Val);
+ if (!SVal || SVal->getKind() != MCSymbolRefExpr::VK_None)
+ return false;
+
+ StringRef Str = SVal->getSymbol().getName();
+
+ return RISCVFPRndMode::stringToRoundingMode(Str) != RISCVFPRndMode::Invalid;
+ }
+
+ bool isImmXLenLI() const {
+ int64_t Imm;
+ RISCVMCExpr::VariantKind VK;
+ if (!isImm())
+ return false;
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ if (VK == RISCVMCExpr::VK_RISCV_LO || VK == RISCVMCExpr::VK_RISCV_PCREL_LO)
+ return true;
+ // Given only Imm, ensuring that the actually specified constant is either
+ // a signed or unsigned 64-bit number is unfortunately impossible.
+ bool IsInRange = isRV64() ? true : isInt<32>(Imm) || isUInt<32>(Imm);
+ return IsConstantImm && IsInRange && VK == RISCVMCExpr::VK_RISCV_None;
+ }
+
+ bool isUImmLog2XLen() const {
+ int64_t Imm;
+ RISCVMCExpr::VariantKind VK;
+ if (!isImm())
+ return false;
+ if (!evaluateConstantImm(getImm(), Imm, VK) ||
+ VK != RISCVMCExpr::VK_RISCV_None)
+ return false;
+ return (isRV64() && isUInt<6>(Imm)) || isUInt<5>(Imm);
+ }
+
+ bool isUImmLog2XLenNonZero() const {
+ int64_t Imm;
+ RISCVMCExpr::VariantKind VK;
+ if (!isImm())
+ return false;
+ if (!evaluateConstantImm(getImm(), Imm, VK) ||
+ VK != RISCVMCExpr::VK_RISCV_None)
+ return false;
+ if (Imm == 0)
+ return false;
+ return (isRV64() && isUInt<6>(Imm)) || isUInt<5>(Imm);
+ }
+
+ bool isUImm5() const {
+ int64_t Imm;
+ RISCVMCExpr::VariantKind VK;
+ if (!isImm())
+ return false;
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ return IsConstantImm && isUInt<5>(Imm) && VK == RISCVMCExpr::VK_RISCV_None;
+ }
+
+ bool isUImm5NonZero() const {
+ int64_t Imm;
+ RISCVMCExpr::VariantKind VK;
+ if (!isImm())
+ return false;
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ return IsConstantImm && isUInt<5>(Imm) && (Imm != 0) &&
+ VK == RISCVMCExpr::VK_RISCV_None;
+ }
+
+ bool isSImm6() const {
+ if (!isImm())
+ return false;
+ RISCVMCExpr::VariantKind VK;
+ int64_t Imm;
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ return IsConstantImm && isInt<6>(Imm) &&
+ VK == RISCVMCExpr::VK_RISCV_None;
+ }
+
+ bool isSImm6NonZero() const {
+ if (!isImm())
+ return false;
+ RISCVMCExpr::VariantKind VK;
+ int64_t Imm;
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ return IsConstantImm && isInt<6>(Imm) && (Imm != 0) &&
+ VK == RISCVMCExpr::VK_RISCV_None;
+ }
+
+ bool isCLUIImm() const {
+ if (!isImm())
+ return false;
+ int64_t Imm;
+ RISCVMCExpr::VariantKind VK;
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ return IsConstantImm && (Imm != 0) &&
+ (isUInt<5>(Imm) || (Imm >= 0xfffe0 && Imm <= 0xfffff)) &&
+ VK == RISCVMCExpr::VK_RISCV_None;
+ }
+
+ bool isUImm7Lsb00() const {
+ if (!isImm())
+ return false;
+ int64_t Imm;
+ RISCVMCExpr::VariantKind VK;
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ return IsConstantImm && isShiftedUInt<5, 2>(Imm) &&
+ VK == RISCVMCExpr::VK_RISCV_None;
+ }
+
+ bool isUImm8Lsb00() const {
+ if (!isImm())
+ return false;
+ int64_t Imm;
+ RISCVMCExpr::VariantKind VK;
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ return IsConstantImm && isShiftedUInt<6, 2>(Imm) &&
+ VK == RISCVMCExpr::VK_RISCV_None;
+ }
+
+ bool isUImm8Lsb000() const {
+ if (!isImm())
+ return false;
+ int64_t Imm;
+ RISCVMCExpr::VariantKind VK;
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ return IsConstantImm && isShiftedUInt<5, 3>(Imm) &&
+ VK == RISCVMCExpr::VK_RISCV_None;
+ }
+
+ bool isSImm9Lsb0() const { return isBareSimmNLsb0<9>(); }
+
+ bool isUImm9Lsb000() const {
+ if (!isImm())
+ return false;
+ int64_t Imm;
+ RISCVMCExpr::VariantKind VK;
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ return IsConstantImm && isShiftedUInt<6, 3>(Imm) &&
+ VK == RISCVMCExpr::VK_RISCV_None;
+ }
+
+ bool isUImm10Lsb00NonZero() const {
+ if (!isImm())
+ return false;
+ int64_t Imm;
+ RISCVMCExpr::VariantKind VK;
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ return IsConstantImm && isShiftedUInt<8, 2>(Imm) && (Imm != 0) &&
+ VK == RISCVMCExpr::VK_RISCV_None;
+ }
+
+ bool isSImm12() const {
+ RISCVMCExpr::VariantKind VK;
+ int64_t Imm;
+ bool IsValid;
+ if (!isImm())
+ return false;
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ if (!IsConstantImm)
+ IsValid = RISCVAsmParser::classifySymbolRef(getImm(), VK, Imm);
+ else
+ IsValid = isInt<12>(Imm);
+ return IsValid && ((IsConstantImm && VK == RISCVMCExpr::VK_RISCV_None) ||
+ VK == RISCVMCExpr::VK_RISCV_LO ||
+ VK == RISCVMCExpr::VK_RISCV_PCREL_LO ||
+ VK == RISCVMCExpr::VK_RISCV_TPREL_LO);
+ }
+
+ bool isSImm12Lsb0() const { return isBareSimmNLsb0<12>(); }
+
+ bool isSImm13Lsb0() const { return isBareSimmNLsb0<13>(); }
+
+ bool isSImm10Lsb0000NonZero() const {
+ if (!isImm())
+ return false;
+ int64_t Imm;
+ RISCVMCExpr::VariantKind VK;
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ return IsConstantImm && (Imm != 0) && isShiftedInt<6, 4>(Imm) &&
+ VK == RISCVMCExpr::VK_RISCV_None;
+ }
+
+ bool isUImm20LUI() const {
+ RISCVMCExpr::VariantKind VK;
+ int64_t Imm;
+ bool IsValid;
+ if (!isImm())
+ return false;
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ if (!IsConstantImm) {
+ IsValid = RISCVAsmParser::classifySymbolRef(getImm(), VK, Imm);
+ return IsValid && (VK == RISCVMCExpr::VK_RISCV_HI ||
+ VK == RISCVMCExpr::VK_RISCV_TPREL_HI);
+ } else {
+ return isUInt<20>(Imm) && (VK == RISCVMCExpr::VK_RISCV_None ||
+ VK == RISCVMCExpr::VK_RISCV_HI ||
+ VK == RISCVMCExpr::VK_RISCV_TPREL_HI);
+ }
+ }
+
+ bool isUImm20AUIPC() const {
+ RISCVMCExpr::VariantKind VK;
+ int64_t Imm;
+ bool IsValid;
+ if (!isImm())
+ return false;
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ if (!IsConstantImm) {
+ IsValid = RISCVAsmParser::classifySymbolRef(getImm(), VK, Imm);
+ return IsValid && (VK == RISCVMCExpr::VK_RISCV_PCREL_HI ||
+ VK == RISCVMCExpr::VK_RISCV_GOT_HI ||
+ VK == RISCVMCExpr::VK_RISCV_TLS_GOT_HI ||
+ VK == RISCVMCExpr::VK_RISCV_TLS_GD_HI);
+ } else {
+ return isUInt<20>(Imm) && (VK == RISCVMCExpr::VK_RISCV_None ||
+ VK == RISCVMCExpr::VK_RISCV_PCREL_HI ||
+ VK == RISCVMCExpr::VK_RISCV_GOT_HI ||
+ VK == RISCVMCExpr::VK_RISCV_TLS_GOT_HI ||
+ VK == RISCVMCExpr::VK_RISCV_TLS_GD_HI);
+ }
+ }
+
+ bool isSImm21Lsb0JAL() const { return isBareSimmNLsb0<21>(); }
+
+ bool isImmZero() const {
+ if (!isImm())
+ return false;
+ int64_t Imm;
+ RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ return IsConstantImm && (Imm == 0) && VK == RISCVMCExpr::VK_RISCV_None;
+ }
+
+ /// getStartLoc - Gets location of the first token of this operand
+ SMLoc getStartLoc() const override { return StartLoc; }
+ /// getEndLoc - Gets location of the last token of this operand
+ SMLoc getEndLoc() const override { return EndLoc; }
+ /// True if this operand is for an RV64 instruction
+ bool isRV64() const { return IsRV64; }
+
+ unsigned getReg() const override {
+ assert(Kind == Register && "Invalid type access!");
+ return Reg.RegNum;
+ }
+
+ StringRef getSysReg() const {
+ assert(Kind == SystemRegister && "Invalid access!");
+ return StringRef(SysReg.Data, SysReg.Length);
+ }
+
+ const MCExpr *getImm() const {
+ assert(Kind == Immediate && "Invalid type access!");
+ return Imm.Val;
+ }
+
+ StringRef getToken() const {
+ assert(Kind == Token && "Invalid type access!");
+ return Tok;
+ }
+
+ void print(raw_ostream &OS) const override {
+ switch (Kind) {
+ case Immediate:
+ OS << *getImm();
+ break;
+ case Register:
+ OS << "<register x";
+ OS << getReg() << ">";
+ break;
+ case Token:
+ OS << "'" << getToken() << "'";
+ break;
+ case SystemRegister:
+ OS << "<sysreg: " << getSysReg() << '>';
+ break;
+ }
+ }
+
+ static std::unique_ptr<RISCVOperand> createToken(StringRef Str, SMLoc S,
+ bool IsRV64) {
+ auto Op = make_unique<RISCVOperand>(Token);
+ Op->Tok = Str;
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ Op->IsRV64 = IsRV64;
+ return Op;
+ }
+
+ static std::unique_ptr<RISCVOperand> createReg(unsigned RegNo, SMLoc S,
+ SMLoc E, bool IsRV64) {
+ auto Op = make_unique<RISCVOperand>(Register);
+ Op->Reg.RegNum = RegNo;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ Op->IsRV64 = IsRV64;
+ return Op;
+ }
+
+ static std::unique_ptr<RISCVOperand> createImm(const MCExpr *Val, SMLoc S,
+ SMLoc E, bool IsRV64) {
+ auto Op = make_unique<RISCVOperand>(Immediate);
+ Op->Imm.Val = Val;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ Op->IsRV64 = IsRV64;
+ return Op;
+ }
+
+ static std::unique_ptr<RISCVOperand>
+ createSysReg(StringRef Str, SMLoc S, unsigned Encoding, bool IsRV64) {
+ auto Op = make_unique<RISCVOperand>(SystemRegister);
+ Op->SysReg.Data = Str.data();
+ Op->SysReg.Length = Str.size();
+ Op->SysReg.Encoding = Encoding;
+ Op->StartLoc = S;
+ Op->IsRV64 = IsRV64;
+ return Op;
+ }
+
+ void addExpr(MCInst &Inst, const MCExpr *Expr) const {
+ assert(Expr && "Expr shouldn't be null!");
+ int64_t Imm = 0;
+ RISCVMCExpr::VariantKind VK;
+ bool IsConstant = evaluateConstantImm(Expr, Imm, VK);
+
+ if (IsConstant)
+ Inst.addOperand(MCOperand::createImm(Imm));
+ else
+ Inst.addOperand(MCOperand::createExpr(Expr));
+ }
+
+ // Used by the TableGen Code
+ void addRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::createReg(getReg()));
+ }
+
+ void addImmOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ addExpr(Inst, getImm());
+ }
+
+ void addFenceArgOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ // isFenceArg has validated the operand, meaning this cast is safe
+ auto SE = cast<MCSymbolRefExpr>(getImm());
+
+ unsigned Imm = 0;
+ for (char c : SE->getSymbol().getName()) {
+ switch (c) {
+ default:
+ llvm_unreachable("FenceArg must contain only [iorw]");
+ case 'i': Imm |= RISCVFenceField::I; break;
+ case 'o': Imm |= RISCVFenceField::O; break;
+ case 'r': Imm |= RISCVFenceField::R; break;
+ case 'w': Imm |= RISCVFenceField::W; break;
+ }
+ }
+ Inst.addOperand(MCOperand::createImm(Imm));
+ }
+
+ void addCSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::createImm(SysReg.Encoding));
+ }
+
+ // Returns the rounding mode represented by this RISCVOperand. Should only
+ // be called after checking isFRMArg.
+ RISCVFPRndMode::RoundingMode getRoundingMode() const {
+ // isFRMArg has validated the operand, meaning this cast is safe.
+ auto SE = cast<MCSymbolRefExpr>(getImm());
+ RISCVFPRndMode::RoundingMode FRM =
+ RISCVFPRndMode::stringToRoundingMode(SE->getSymbol().getName());
+ assert(FRM != RISCVFPRndMode::Invalid && "Invalid rounding mode");
+ return FRM;
+ }
+
+ void addFRMArgOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::createImm(getRoundingMode()));
+ }
+};
+} // end anonymous namespace.
+
+#define GET_REGISTER_MATCHER
+#define GET_MATCHER_IMPLEMENTATION
+#include "RISCVGenAsmMatcher.inc"
+
+// Return the matching FPR64 register for the given FPR32.
+// FIXME: Ideally this function could be removed in favour of using
+// information from TableGen.
+unsigned convertFPR32ToFPR64(unsigned Reg) {
+ switch (Reg) {
+ default:
+ llvm_unreachable("Not a recognised FPR32 register");
+ case RISCV::F0_32: return RISCV::F0_64;
+ case RISCV::F1_32: return RISCV::F1_64;
+ case RISCV::F2_32: return RISCV::F2_64;
+ case RISCV::F3_32: return RISCV::F3_64;
+ case RISCV::F4_32: return RISCV::F4_64;
+ case RISCV::F5_32: return RISCV::F5_64;
+ case RISCV::F6_32: return RISCV::F6_64;
+ case RISCV::F7_32: return RISCV::F7_64;
+ case RISCV::F8_32: return RISCV::F8_64;
+ case RISCV::F9_32: return RISCV::F9_64;
+ case RISCV::F10_32: return RISCV::F10_64;
+ case RISCV::F11_32: return RISCV::F11_64;
+ case RISCV::F12_32: return RISCV::F12_64;
+ case RISCV::F13_32: return RISCV::F13_64;
+ case RISCV::F14_32: return RISCV::F14_64;
+ case RISCV::F15_32: return RISCV::F15_64;
+ case RISCV::F16_32: return RISCV::F16_64;
+ case RISCV::F17_32: return RISCV::F17_64;
+ case RISCV::F18_32: return RISCV::F18_64;
+ case RISCV::F19_32: return RISCV::F19_64;
+ case RISCV::F20_32: return RISCV::F20_64;
+ case RISCV::F21_32: return RISCV::F21_64;
+ case RISCV::F22_32: return RISCV::F22_64;
+ case RISCV::F23_32: return RISCV::F23_64;
+ case RISCV::F24_32: return RISCV::F24_64;
+ case RISCV::F25_32: return RISCV::F25_64;
+ case RISCV::F26_32: return RISCV::F26_64;
+ case RISCV::F27_32: return RISCV::F27_64;
+ case RISCV::F28_32: return RISCV::F28_64;
+ case RISCV::F29_32: return RISCV::F29_64;
+ case RISCV::F30_32: return RISCV::F30_64;
+ case RISCV::F31_32: return RISCV::F31_64;
+ }
+}
+
+unsigned RISCVAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
+ unsigned Kind) {
+ RISCVOperand &Op = static_cast<RISCVOperand &>(AsmOp);
+ if (!Op.isReg())
+ return Match_InvalidOperand;
+
+ unsigned Reg = Op.getReg();
+ bool IsRegFPR32 =
+ RISCVMCRegisterClasses[RISCV::FPR32RegClassID].contains(Reg);
+ bool IsRegFPR32C =
+ RISCVMCRegisterClasses[RISCV::FPR32CRegClassID].contains(Reg);
+
+ // As the parser couldn't differentiate an FPR32 from an FPR64, coerce the
+ // register from FPR32 to FPR64 or FPR32C to FPR64C if necessary.
+ if ((IsRegFPR32 && Kind == MCK_FPR64) ||
+ (IsRegFPR32C && Kind == MCK_FPR64C)) {
+ Op.Reg.RegNum = convertFPR32ToFPR64(Reg);
+ return Match_Success;
+ }
+ return Match_InvalidOperand;
+}
+
+bool RISCVAsmParser::generateImmOutOfRangeError(
+ OperandVector &Operands, uint64_t ErrorInfo, int64_t Lower, int64_t Upper,
+ Twine Msg = "immediate must be an integer in the range") {
+ SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
+ return Error(ErrorLoc, Msg + " [" + Twine(Lower) + ", " + Twine(Upper) + "]");
+}
+
+bool RISCVAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
+ OperandVector &Operands,
+ MCStreamer &Out,
+ uint64_t &ErrorInfo,
+ bool MatchingInlineAsm) {
+ MCInst Inst;
+
+ auto Result =
+ MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm);
+ switch (Result) {
+ default:
+ break;
+ case Match_Success:
+ return processInstruction(Inst, IDLoc, Operands, Out);
+ case Match_MissingFeature:
+ return Error(IDLoc, "instruction use requires an option to be enabled");
+ case Match_MnemonicFail:
+ return Error(IDLoc, "unrecognized instruction mnemonic");
+ case Match_InvalidOperand: {
+ SMLoc ErrorLoc = IDLoc;
+ if (ErrorInfo != ~0U) {
+ if (ErrorInfo >= Operands.size())
+ return Error(ErrorLoc, "too few operands for instruction");
+
+ ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
+ if (ErrorLoc == SMLoc())
+ ErrorLoc = IDLoc;
+ }
+ return Error(ErrorLoc, "invalid operand for instruction");
+ }
+ }
+
+ // Handle the case when the error message is of specific type
+ // other than the generic Match_InvalidOperand, and the
+ // corresponding operand is missing.
+ if (Result > FIRST_TARGET_MATCH_RESULT_TY) {
+ SMLoc ErrorLoc = IDLoc;
+ if (ErrorInfo != ~0U && ErrorInfo >= Operands.size())
+ return Error(ErrorLoc, "too few operands for instruction");
+ }
+
+ switch(Result) {
+ default:
+ break;
+ case Match_InvalidImmXLenLI:
+ if (isRV64()) {
+ SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
+ return Error(ErrorLoc, "operand must be a constant 64-bit integer");
+ }
+ return generateImmOutOfRangeError(Operands, ErrorInfo,
+ std::numeric_limits<int32_t>::min(),
+ std::numeric_limits<uint32_t>::max());
+ case Match_InvalidUImmLog2XLen:
+ if (isRV64())
+ return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 6) - 1);
+ return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 5) - 1);
+ case Match_InvalidUImmLog2XLenNonZero:
+ if (isRV64())
+ return generateImmOutOfRangeError(Operands, ErrorInfo, 1, (1 << 6) - 1);
+ return generateImmOutOfRangeError(Operands, ErrorInfo, 1, (1 << 5) - 1);
+ case Match_InvalidUImm5:
+ return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 5) - 1);
+ case Match_InvalidSImm6:
+ return generateImmOutOfRangeError(Operands, ErrorInfo, -(1 << 5),
+ (1 << 5) - 1);
+ case Match_InvalidSImm6NonZero:
+ return generateImmOutOfRangeError(
+ Operands, ErrorInfo, -(1 << 5), (1 << 5) - 1,
+ "immediate must be non-zero in the range");
+ case Match_InvalidCLUIImm:
+ return generateImmOutOfRangeError(
+ Operands, ErrorInfo, 1, (1 << 5) - 1,
+ "immediate must be in [0xfffe0, 0xfffff] or");
+ case Match_InvalidUImm7Lsb00:
+ return generateImmOutOfRangeError(
+ Operands, ErrorInfo, 0, (1 << 7) - 4,
+ "immediate must be a multiple of 4 bytes in the range");
+ case Match_InvalidUImm8Lsb00:
+ return generateImmOutOfRangeError(
+ Operands, ErrorInfo, 0, (1 << 8) - 4,
+ "immediate must be a multiple of 4 bytes in the range");
+ case Match_InvalidUImm8Lsb000:
+ return generateImmOutOfRangeError(
+ Operands, ErrorInfo, 0, (1 << 8) - 8,
+ "immediate must be a multiple of 8 bytes in the range");
+ case Match_InvalidSImm9Lsb0:
+ return generateImmOutOfRangeError(
+ Operands, ErrorInfo, -(1 << 8), (1 << 8) - 2,
+ "immediate must be a multiple of 2 bytes in the range");
+ case Match_InvalidUImm9Lsb000:
+ return generateImmOutOfRangeError(
+ Operands, ErrorInfo, 0, (1 << 9) - 8,
+ "immediate must be a multiple of 8 bytes in the range");
+ case Match_InvalidUImm10Lsb00NonZero:
+ return generateImmOutOfRangeError(
+ Operands, ErrorInfo, 4, (1 << 10) - 4,
+ "immediate must be a multiple of 4 bytes in the range");
+ case Match_InvalidSImm10Lsb0000NonZero:
+ return generateImmOutOfRangeError(
+ Operands, ErrorInfo, -(1 << 9), (1 << 9) - 16,
+ "immediate must be a multiple of 16 bytes and non-zero in the range");
+ case Match_InvalidSImm12:
+ return generateImmOutOfRangeError(
+ Operands, ErrorInfo, -(1 << 11), (1 << 11) - 1,
+ "operand must be a symbol with %lo/%pcrel_lo/%tprel_lo modifier or an "
+ "integer in the range");
+ case Match_InvalidSImm12Lsb0:
+ return generateImmOutOfRangeError(
+ Operands, ErrorInfo, -(1 << 11), (1 << 11) - 2,
+ "immediate must be a multiple of 2 bytes in the range");
+ case Match_InvalidSImm13Lsb0:
+ return generateImmOutOfRangeError(
+ Operands, ErrorInfo, -(1 << 12), (1 << 12) - 2,
+ "immediate must be a multiple of 2 bytes in the range");
+ case Match_InvalidUImm20LUI:
+ return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 20) - 1,
+ "operand must be a symbol with "
+ "%hi/%tprel_hi modifier or an integer in "
+ "the range");
+ case Match_InvalidUImm20AUIPC:
+ return generateImmOutOfRangeError(
+ Operands, ErrorInfo, 0, (1 << 20) - 1,
+ "operand must be a symbol with a "
+ "%pcrel_hi/%got_pcrel_hi/%tls_ie_pcrel_hi/%tls_gd_pcrel_hi modifier or "
+ "an integer in the range");
+ case Match_InvalidSImm21Lsb0JAL:
+ return generateImmOutOfRangeError(
+ Operands, ErrorInfo, -(1 << 20), (1 << 20) - 2,
+ "immediate must be a multiple of 2 bytes in the range");
+ case Match_InvalidCSRSystemRegister: {
+ return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 12) - 1,
+ "operand must be a valid system register "
+ "name or an integer in the range");
+ }
+ case Match_InvalidFenceArg: {
+ SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
+ return Error(
+ ErrorLoc,
+ "operand must be formed of letters selected in-order from 'iorw'");
+ }
+ case Match_InvalidFRMArg: {
+ SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
+ return Error(
+ ErrorLoc,
+ "operand must be a valid floating point rounding mode mnemonic");
+ }
+ case Match_InvalidBareSymbol: {
+ SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
+ return Error(ErrorLoc, "operand must be a bare symbol name");
+ }
+ case Match_InvalidCallSymbol: {
+ SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
+ return Error(ErrorLoc, "operand must be a bare symbol name");
+ }
+ case Match_InvalidTPRelAddSymbol: {
+ SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
+ return Error(ErrorLoc, "operand must be a symbol with %tprel_add modifier");
+ }
+ }
+
+ llvm_unreachable("Unknown match type detected!");
+}
+
+// Attempts to match Name as a register (either using the default name or
+// alternative ABI names), setting RegNo to the matching register. Upon
+// failure, returns true and sets RegNo to 0. If IsRV32E then registers
+// x16-x31 will be rejected.
+static bool matchRegisterNameHelper(bool IsRV32E, unsigned &RegNo,
+ StringRef Name) {
+ RegNo = MatchRegisterName(Name);
+ if (RegNo == 0)
+ RegNo = MatchRegisterAltName(Name);
+ if (IsRV32E && RegNo >= RISCV::X16 && RegNo <= RISCV::X31)
+ RegNo = 0;
+ return RegNo == 0;
+}
+
+bool RISCVAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
+ SMLoc &EndLoc) {
+ const AsmToken &Tok = getParser().getTok();
+ StartLoc = Tok.getLoc();
+ EndLoc = Tok.getEndLoc();
+ RegNo = 0;
+ StringRef Name = getLexer().getTok().getIdentifier();
+
+ if (matchRegisterNameHelper(isRV32E(), RegNo, Name))
+ return Error(StartLoc, "invalid register name");
+
+ getParser().Lex(); // Eat identifier token.
+ return false;
+}
+
+OperandMatchResultTy RISCVAsmParser::parseRegister(OperandVector &Operands,
+ bool AllowParens) {
+ SMLoc FirstS = getLoc();
+ bool HadParens = false;
+ AsmToken LParen;
+
+ // If this is an LParen and a parenthesised register name is allowed, parse it
+ // atomically.
+ if (AllowParens && getLexer().is(AsmToken::LParen)) {
+ AsmToken Buf[2];
+ size_t ReadCount = getLexer().peekTokens(Buf);
+ if (ReadCount == 2 && Buf[1].getKind() == AsmToken::RParen) {
+ HadParens = true;
+ LParen = getParser().getTok();
+ getParser().Lex(); // Eat '('
+ }
+ }
+
+ switch (getLexer().getKind()) {
+ default:
+ if (HadParens)
+ getLexer().UnLex(LParen);
+ return MatchOperand_NoMatch;
+ case AsmToken::Identifier:
+ StringRef Name = getLexer().getTok().getIdentifier();
+ unsigned RegNo;
+ matchRegisterNameHelper(isRV32E(), RegNo, Name);
+
+ if (RegNo == 0) {
+ if (HadParens)
+ getLexer().UnLex(LParen);
+ return MatchOperand_NoMatch;
+ }
+ if (HadParens)
+ Operands.push_back(RISCVOperand::createToken("(", FirstS, isRV64()));
+ SMLoc S = getLoc();
+ SMLoc E = SMLoc::getFromPointer(S.getPointer() - 1);
+ getLexer().Lex();
+ Operands.push_back(RISCVOperand::createReg(RegNo, S, E, isRV64()));
+ }
+
+ if (HadParens) {
+ getParser().Lex(); // Eat ')'
+ Operands.push_back(RISCVOperand::createToken(")", getLoc(), isRV64()));
+ }
+
+ return MatchOperand_Success;
+}
+
+OperandMatchResultTy
+RISCVAsmParser::parseCSRSystemRegister(OperandVector &Operands) {
+ SMLoc S = getLoc();
+ const MCExpr *Res;
+
+ switch (getLexer().getKind()) {
+ default:
+ return MatchOperand_NoMatch;
+ case AsmToken::LParen:
+ case AsmToken::Minus:
+ case AsmToken::Plus:
+ case AsmToken::Exclaim:
+ case AsmToken::Tilde:
+ case AsmToken::Integer:
+ case AsmToken::String: {
+ if (getParser().parseExpression(Res))
+ return MatchOperand_ParseFail;
+
+ auto *CE = dyn_cast<MCConstantExpr>(Res);
+ if (CE) {
+ int64_t Imm = CE->getValue();
+ if (isUInt<12>(Imm)) {
+ auto SysReg = RISCVSysReg::lookupSysRegByEncoding(Imm);
+ // Accept an immediate representing a named or un-named Sys Reg
+ // if the range is valid, regardless of the required features.
+ Operands.push_back(RISCVOperand::createSysReg(
+ SysReg ? SysReg->Name : "", S, Imm, isRV64()));
+ return MatchOperand_Success;
+ }
+ }
+
+ Twine Msg = "immediate must be an integer in the range";
+ Error(S, Msg + " [" + Twine(0) + ", " + Twine((1 << 12) - 1) + "]");
+ return MatchOperand_ParseFail;
+ }
+ case AsmToken::Identifier: {
+ StringRef Identifier;
+ if (getParser().parseIdentifier(Identifier))
+ return MatchOperand_ParseFail;
+
+ auto SysReg = RISCVSysReg::lookupSysRegByName(Identifier);
+ // Accept a named Sys Reg if the required features are present.
+ if (SysReg) {
+ if (!SysReg->haveRequiredFeatures(getSTI().getFeatureBits())) {
+ Error(S, "system register use requires an option to be enabled");
+ return MatchOperand_ParseFail;
+ }
+ Operands.push_back(RISCVOperand::createSysReg(
+ Identifier, S, SysReg->Encoding, isRV64()));
+ return MatchOperand_Success;
+ }
+
+ Twine Msg = "operand must be a valid system register name "
+ "or an integer in the range";
+ Error(S, Msg + " [" + Twine(0) + ", " + Twine((1 << 12) - 1) + "]");
+ return MatchOperand_ParseFail;
+ }
+ case AsmToken::Percent: {
+ // Discard operand with modifier.
+ Twine Msg = "immediate must be an integer in the range";
+ Error(S, Msg + " [" + Twine(0) + ", " + Twine((1 << 12) - 1) + "]");
+ return MatchOperand_ParseFail;
+ }
+ }
+
+ return MatchOperand_NoMatch;
+}
+
+OperandMatchResultTy RISCVAsmParser::parseImmediate(OperandVector &Operands) {
+ SMLoc S = getLoc();
+ SMLoc E = SMLoc::getFromPointer(S.getPointer() - 1);
+ const MCExpr *Res;
+
+ switch (getLexer().getKind()) {
+ default:
+ return MatchOperand_NoMatch;
+ case AsmToken::LParen:
+ case AsmToken::Dot:
+ case AsmToken::Minus:
+ case AsmToken::Plus:
+ case AsmToken::Exclaim:
+ case AsmToken::Tilde:
+ case AsmToken::Integer:
+ case AsmToken::String:
+ case AsmToken::Identifier:
+ if (getParser().parseExpression(Res))
+ return MatchOperand_ParseFail;
+ break;
+ case AsmToken::Percent:
+ return parseOperandWithModifier(Operands);
+ }
+
+ Operands.push_back(RISCVOperand::createImm(Res, S, E, isRV64()));
+ return MatchOperand_Success;
+}
+
+OperandMatchResultTy
+RISCVAsmParser::parseOperandWithModifier(OperandVector &Operands) {
+ SMLoc S = getLoc();
+ SMLoc E = SMLoc::getFromPointer(S.getPointer() - 1);
+
+ if (getLexer().getKind() != AsmToken::Percent) {
+ Error(getLoc(), "expected '%' for operand modifier");
+ return MatchOperand_ParseFail;
+ }
+
+ getParser().Lex(); // Eat '%'
+
+ if (getLexer().getKind() != AsmToken::Identifier) {
+ Error(getLoc(), "expected valid identifier for operand modifier");
+ return MatchOperand_ParseFail;
+ }
+ StringRef Identifier = getParser().getTok().getIdentifier();
+ RISCVMCExpr::VariantKind VK = RISCVMCExpr::getVariantKindForName(Identifier);
+ if (VK == RISCVMCExpr::VK_RISCV_Invalid) {
+ Error(getLoc(), "unrecognized operand modifier");
+ return MatchOperand_ParseFail;
+ }
+
+ getParser().Lex(); // Eat the identifier
+ if (getLexer().getKind() != AsmToken::LParen) {
+ Error(getLoc(), "expected '('");
+ return MatchOperand_ParseFail;
+ }
+ getParser().Lex(); // Eat '('
+
+ const MCExpr *SubExpr;
+ if (getParser().parseParenExpression(SubExpr, E)) {
+ return MatchOperand_ParseFail;
+ }
+
+ const MCExpr *ModExpr = RISCVMCExpr::create(SubExpr, VK, getContext());
+ Operands.push_back(RISCVOperand::createImm(ModExpr, S, E, isRV64()));
+ return MatchOperand_Success;
+}
+
+OperandMatchResultTy RISCVAsmParser::parseBareSymbol(OperandVector &Operands) {
+ SMLoc S = getLoc();
+ SMLoc E = SMLoc::getFromPointer(S.getPointer() - 1);
+ const MCExpr *Res;
+
+ if (getLexer().getKind() != AsmToken::Identifier)
+ return MatchOperand_NoMatch;
+
+ StringRef Identifier;
+ AsmToken Tok = getLexer().getTok();
+
+ if (getParser().parseIdentifier(Identifier))
+ return MatchOperand_ParseFail;
+
+ if (Identifier.consume_back("@plt")) {
+ Error(getLoc(), "'@plt' operand not valid for instruction");
+ return MatchOperand_ParseFail;
+ }
+
+ MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier);
+
+ if (Sym->isVariable()) {
+ const MCExpr *V = Sym->getVariableValue(/*SetUsed=*/false);
+ if (!isa<MCSymbolRefExpr>(V)) {
+ getLexer().UnLex(Tok); // Put back if it's not a bare symbol.
+ return MatchOperand_NoMatch;
+ }
+ Res = V;
+ } else
+ Res = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
+
+ MCBinaryExpr::Opcode Opcode;
+ switch (getLexer().getKind()) {
+ default:
+ Operands.push_back(RISCVOperand::createImm(Res, S, E, isRV64()));
+ return MatchOperand_Success;
+ case AsmToken::Plus:
+ Opcode = MCBinaryExpr::Add;
+ break;
+ case AsmToken::Minus:
+ Opcode = MCBinaryExpr::Sub;
+ break;
+ }
+
+ const MCExpr *Expr;
+ if (getParser().parseExpression(Expr))
+ return MatchOperand_ParseFail;
+ Res = MCBinaryExpr::create(Opcode, Res, Expr, getContext());
+ Operands.push_back(RISCVOperand::createImm(Res, S, E, isRV64()));
+ return MatchOperand_Success;
+}
+
+OperandMatchResultTy RISCVAsmParser::parseCallSymbol(OperandVector &Operands) {
+ SMLoc S = getLoc();
+ SMLoc E = SMLoc::getFromPointer(S.getPointer() - 1);
+ const MCExpr *Res;
+
+ if (getLexer().getKind() != AsmToken::Identifier)
+ return MatchOperand_NoMatch;
+
+ // Avoid parsing the register in `call rd, foo` as a call symbol.
+ if (getLexer().peekTok().getKind() != AsmToken::EndOfStatement)
+ return MatchOperand_NoMatch;
+
+ StringRef Identifier;
+ if (getParser().parseIdentifier(Identifier))
+ return MatchOperand_ParseFail;
+
+ RISCVMCExpr::VariantKind Kind = RISCVMCExpr::VK_RISCV_CALL;
+ if (Identifier.consume_back("@plt"))
+ Kind = RISCVMCExpr::VK_RISCV_CALL_PLT;
+
+ MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier);
+ Res = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
+ Res = RISCVMCExpr::create(Res, Kind, getContext());
+ Operands.push_back(RISCVOperand::createImm(Res, S, E, isRV64()));
+ return MatchOperand_Success;
+}
+
+OperandMatchResultTy RISCVAsmParser::parseJALOffset(OperandVector &Operands) {
+ // Parsing jal operands is fiddly due to the `jal foo` and `jal ra, foo`
+ // both being acceptable forms. When parsing `jal ra, foo` this function
+ // will be called for the `ra` register operand in an attempt to match the
+ // single-operand alias. parseJALOffset must fail for this case. It would
+ // seem logical to try parse the operand using parseImmediate and return
+ // NoMatch if the next token is a comma (meaning we must be parsing a jal in
+ // the second form rather than the first). We can't do this as there's no
+ // way of rewinding the lexer state. Instead, return NoMatch if this operand
+ // is an identifier and is followed by a comma.
+ if (getLexer().is(AsmToken::Identifier) &&
+ getLexer().peekTok().is(AsmToken::Comma))
+ return MatchOperand_NoMatch;
+
+ return parseImmediate(Operands);
+}
+
+OperandMatchResultTy
+RISCVAsmParser::parseMemOpBaseReg(OperandVector &Operands) {
+ if (getLexer().isNot(AsmToken::LParen)) {
+ Error(getLoc(), "expected '('");
+ return MatchOperand_ParseFail;
+ }
+
+ getParser().Lex(); // Eat '('
+ Operands.push_back(RISCVOperand::createToken("(", getLoc(), isRV64()));
+
+ if (parseRegister(Operands) != MatchOperand_Success) {
+ Error(getLoc(), "expected register");
+ return MatchOperand_ParseFail;
+ }
+
+ if (getLexer().isNot(AsmToken::RParen)) {
+ Error(getLoc(), "expected ')'");
+ return MatchOperand_ParseFail;
+ }
+
+ getParser().Lex(); // Eat ')'
+ Operands.push_back(RISCVOperand::createToken(")", getLoc(), isRV64()));
+
+ return MatchOperand_Success;
+}
+
+OperandMatchResultTy RISCVAsmParser::parseAtomicMemOp(OperandVector &Operands) {
+ // Atomic operations such as lr.w, sc.w, and amo*.w accept a "memory operand"
+ // as one of their register operands, such as `(a0)`. This just denotes that
+ // the register (in this case `a0`) contains a memory address.
+ //
+ // Normally, we would be able to parse these by putting the parens into the
+ // instruction string. However, GNU as also accepts a zero-offset memory
+ // operand (such as `0(a0)`), and ignores the 0. Normally this would be parsed
+ // with parseImmediate followed by parseMemOpBaseReg, but these instructions
+ // do not accept an immediate operand, and we do not want to add a "dummy"
+ // operand that is silently dropped.
+ //
+ // Instead, we use this custom parser. This will: allow (and discard) an
+ // offset if it is zero; require (and discard) parentheses; and add only the
+ // parsed register operand to `Operands`.
+ //
+ // These operands are printed with RISCVInstPrinter::printAtomicMemOp, which
+ // will only print the register surrounded by parentheses (which GNU as also
+ // uses as its canonical representation for these operands).
+ std::unique_ptr<RISCVOperand> OptionalImmOp;
+
+ if (getLexer().isNot(AsmToken::LParen)) {
+ // Parse an Integer token. We do not accept arbritrary constant expressions
+ // in the offset field (because they may include parens, which complicates
+ // parsing a lot).
+ int64_t ImmVal;
+ SMLoc ImmStart = getLoc();
+ if (getParser().parseIntToken(ImmVal,
+ "expected '(' or optional integer offset"))
+ return MatchOperand_ParseFail;
+
+ // Create a RISCVOperand for checking later (so the error messages are
+ // nicer), but we don't add it to Operands.
+ SMLoc ImmEnd = getLoc();
+ OptionalImmOp =
+ RISCVOperand::createImm(MCConstantExpr::create(ImmVal, getContext()),
+ ImmStart, ImmEnd, isRV64());
+ }
+
+ if (getLexer().isNot(AsmToken::LParen)) {
+ Error(getLoc(), OptionalImmOp ? "expected '(' after optional integer offset"
+ : "expected '(' or optional integer offset");
+ return MatchOperand_ParseFail;
+ }
+ getParser().Lex(); // Eat '('
+
+ if (parseRegister(Operands) != MatchOperand_Success) {
+ Error(getLoc(), "expected register");
+ return MatchOperand_ParseFail;
+ }
+
+ if (getLexer().isNot(AsmToken::RParen)) {
+ Error(getLoc(), "expected ')'");
+ return MatchOperand_ParseFail;
+ }
+ getParser().Lex(); // Eat ')'
+
+ // Deferred Handling of non-zero offsets. This makes the error messages nicer.
+ if (OptionalImmOp && !OptionalImmOp->isImmZero()) {
+ Error(OptionalImmOp->getStartLoc(), "optional integer offset must be 0",
+ SMRange(OptionalImmOp->getStartLoc(), OptionalImmOp->getEndLoc()));
+ return MatchOperand_ParseFail;
+ }
+
+ return MatchOperand_Success;
+}
+
+/// Looks at a token type and creates the relevant operand from this
+/// information, adding to Operands. If operand was parsed, returns false, else
+/// true.
+bool RISCVAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
+ // Check if the current operand has a custom associated parser, if so, try to
+ // custom parse the operand, or fallback to the general approach.
+ OperandMatchResultTy Result =
+ MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
+ if (Result == MatchOperand_Success)
+ return false;
+ if (Result == MatchOperand_ParseFail)
+ return true;
+
+ // Attempt to parse token as a register.
+ if (parseRegister(Operands, true) == MatchOperand_Success)
+ return false;
+
+ // Attempt to parse token as an immediate
+ if (parseImmediate(Operands) == MatchOperand_Success) {
+ // Parse memory base register if present
+ if (getLexer().is(AsmToken::LParen))
+ return parseMemOpBaseReg(Operands) != MatchOperand_Success;
+ return false;
+ }
+
+ // Finally we have exhausted all options and must declare defeat.
+ Error(getLoc(), "unknown operand");
+ return true;
+}
+
+bool RISCVAsmParser::ParseInstruction(ParseInstructionInfo &Info,
+ StringRef Name, SMLoc NameLoc,
+ OperandVector &Operands) {
+ // Ensure that if the instruction occurs when relaxation is enabled,
+ // relocations are forced for the file. Ideally this would be done when there
+ // is enough information to reliably determine if the instruction itself may
+ // cause relaxations. Unfortunately instruction processing stage occurs in the
+ // same pass as relocation emission, so it's too late to set a 'sticky bit'
+ // for the entire file.
+ if (getSTI().getFeatureBits()[RISCV::FeatureRelax]) {
+ auto *Assembler = getTargetStreamer().getStreamer().getAssemblerPtr();
+ if (Assembler != nullptr) {
+ RISCVAsmBackend &MAB =
+ static_cast<RISCVAsmBackend &>(Assembler->getBackend());
+ MAB.setForceRelocs();
+ }
+ }
+
+ // First operand is token for instruction
+ Operands.push_back(RISCVOperand::createToken(Name, NameLoc, isRV64()));
+
+ // If there are no more operands, then finish
+ if (getLexer().is(AsmToken::EndOfStatement))
+ return false;
+
+ // Parse first operand
+ if (parseOperand(Operands, Name))
+ return true;
+
+ // Parse until end of statement, consuming commas between operands
+ unsigned OperandIdx = 1;
+ while (getLexer().is(AsmToken::Comma)) {
+ // Consume comma token
+ getLexer().Lex();
+
+ // Parse next operand
+ if (parseOperand(Operands, Name))
+ return true;
+
+ ++OperandIdx;
+ }
+
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ SMLoc Loc = getLexer().getLoc();
+ getParser().eatToEndOfStatement();
+ return Error(Loc, "unexpected token");
+ }
+
+ getParser().Lex(); // Consume the EndOfStatement.
+ return false;
+}
+
+bool RISCVAsmParser::classifySymbolRef(const MCExpr *Expr,
+ RISCVMCExpr::VariantKind &Kind,
+ int64_t &Addend) {
+ Kind = RISCVMCExpr::VK_RISCV_None;
+ Addend = 0;
+
+ if (const RISCVMCExpr *RE = dyn_cast<RISCVMCExpr>(Expr)) {
+ Kind = RE->getKind();
+ Expr = RE->getSubExpr();
+ }
+
+ // It's a simple symbol reference or constant with no addend.
+ if (isa<MCConstantExpr>(Expr) || isa<MCSymbolRefExpr>(Expr))
+ return true;
+
+ const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
+ if (!BE)
+ return false;
+
+ if (!isa<MCSymbolRefExpr>(BE->getLHS()))
+ return false;
+
+ if (BE->getOpcode() != MCBinaryExpr::Add &&
+ BE->getOpcode() != MCBinaryExpr::Sub)
+ return false;
+
+ // We are able to support the subtraction of two symbol references
+ if (BE->getOpcode() == MCBinaryExpr::Sub &&
+ isa<MCSymbolRefExpr>(BE->getRHS()))
+ return true;
+
+ // See if the addend is a constant, otherwise there's more going
+ // on here than we can deal with.
+ auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
+ if (!AddendExpr)
+ return false;
+
+ Addend = AddendExpr->getValue();
+ if (BE->getOpcode() == MCBinaryExpr::Sub)
+ Addend = -Addend;
+
+ // It's some symbol reference + a constant addend
+ return Kind != RISCVMCExpr::VK_RISCV_Invalid;
+}
+
+bool RISCVAsmParser::ParseDirective(AsmToken DirectiveID) {
+ // This returns false if this function recognizes the directive
+ // regardless of whether it is successfully handles or reports an
+ // error. Otherwise it returns true to give the generic parser a
+ // chance at recognizing it.
+ StringRef IDVal = DirectiveID.getString();
+
+ if (IDVal == ".option")
+ return parseDirectiveOption();
+
+ return true;
+}
+
+bool RISCVAsmParser::parseDirectiveOption() {
+ MCAsmParser &Parser = getParser();
+ // Get the option token.
+ AsmToken Tok = Parser.getTok();
+ // At the moment only identifiers are supported.
+ if (Tok.isNot(AsmToken::Identifier))
+ return Error(Parser.getTok().getLoc(),
+ "unexpected token, expected identifier");
+
+ StringRef Option = Tok.getIdentifier();
+
+ if (Option == "push") {
+ getTargetStreamer().emitDirectiveOptionPush();
+
+ Parser.Lex();
+ if (Parser.getTok().isNot(AsmToken::EndOfStatement))
+ return Error(Parser.getTok().getLoc(),
+ "unexpected token, expected end of statement");
+
+ pushFeatureBits();
+ return false;
+ }
+
+ if (Option == "pop") {
+ SMLoc StartLoc = Parser.getTok().getLoc();
+ getTargetStreamer().emitDirectiveOptionPop();
+
+ Parser.Lex();
+ if (Parser.getTok().isNot(AsmToken::EndOfStatement))
+ return Error(Parser.getTok().getLoc(),
+ "unexpected token, expected end of statement");
+
+ if (popFeatureBits())
+ return Error(StartLoc, ".option pop with no .option push");
+
+ return false;
+ }
+
+ if (Option == "rvc") {
+ getTargetStreamer().emitDirectiveOptionRVC();
+
+ Parser.Lex();
+ if (Parser.getTok().isNot(AsmToken::EndOfStatement))
+ return Error(Parser.getTok().getLoc(),
+ "unexpected token, expected end of statement");
+
+ setFeatureBits(RISCV::FeatureStdExtC, "c");
+ return false;
+ }
+
+ if (Option == "norvc") {
+ getTargetStreamer().emitDirectiveOptionNoRVC();
+
+ Parser.Lex();
+ if (Parser.getTok().isNot(AsmToken::EndOfStatement))
+ return Error(Parser.getTok().getLoc(),
+ "unexpected token, expected end of statement");
+
+ clearFeatureBits(RISCV::FeatureStdExtC, "c");
+ return false;
+ }
+
+ if (Option == "relax") {
+ getTargetStreamer().emitDirectiveOptionRelax();
+
+ Parser.Lex();
+ if (Parser.getTok().isNot(AsmToken::EndOfStatement))
+ return Error(Parser.getTok().getLoc(),
+ "unexpected token, expected end of statement");
+
+ setFeatureBits(RISCV::FeatureRelax, "relax");
+ return false;
+ }
+
+ if (Option == "norelax") {
+ getTargetStreamer().emitDirectiveOptionNoRelax();
+
+ Parser.Lex();
+ if (Parser.getTok().isNot(AsmToken::EndOfStatement))
+ return Error(Parser.getTok().getLoc(),
+ "unexpected token, expected end of statement");
+
+ clearFeatureBits(RISCV::FeatureRelax, "relax");
+ return false;
+ }
+
+ // Unknown option.
+ Warning(Parser.getTok().getLoc(),
+ "unknown option, expected 'push', 'pop', 'rvc', 'norvc', 'relax' or "
+ "'norelax'");
+ Parser.eatToEndOfStatement();
+ return false;
+}
+
+void RISCVAsmParser::emitToStreamer(MCStreamer &S, const MCInst &Inst) {
+ MCInst CInst;
+ bool Res = compressInst(CInst, Inst, getSTI(), S.getContext());
+ CInst.setLoc(Inst.getLoc());
+ S.EmitInstruction((Res ? CInst : Inst), getSTI());
+}
+
+void RISCVAsmParser::emitLoadImm(unsigned DestReg, int64_t Value,
+ MCStreamer &Out) {
+ RISCVMatInt::InstSeq Seq;
+ RISCVMatInt::generateInstSeq(Value, isRV64(), Seq);
+
+ unsigned SrcReg = RISCV::X0;
+ for (RISCVMatInt::Inst &Inst : Seq) {
+ if (Inst.Opc == RISCV::LUI) {
+ emitToStreamer(
+ Out, MCInstBuilder(RISCV::LUI).addReg(DestReg).addImm(Inst.Imm));
+ } else {
+ emitToStreamer(
+ Out, MCInstBuilder(Inst.Opc).addReg(DestReg).addReg(SrcReg).addImm(
+ Inst.Imm));
+ }
+
+ // Only the first instruction has X0 as its source.
+ SrcReg = DestReg;
+ }
+}
+
+void RISCVAsmParser::emitAuipcInstPair(MCOperand DestReg, MCOperand TmpReg,
+ const MCExpr *Symbol,
+ RISCVMCExpr::VariantKind VKHi,
+ unsigned SecondOpcode, SMLoc IDLoc,
+ MCStreamer &Out) {
+ // A pair of instructions for PC-relative addressing; expands to
+ // TmpLabel: AUIPC TmpReg, VKHi(symbol)
+ // OP DestReg, TmpReg, %pcrel_lo(TmpLabel)
+ MCContext &Ctx = getContext();
+
+ MCSymbol *TmpLabel = Ctx.createTempSymbol(
+ "pcrel_hi", /* AlwaysAddSuffix */ true, /* CanBeUnnamed */ false);
+ Out.EmitLabel(TmpLabel);
+
+ const RISCVMCExpr *SymbolHi = RISCVMCExpr::create(Symbol, VKHi, Ctx);
+ emitToStreamer(
+ Out, MCInstBuilder(RISCV::AUIPC).addOperand(TmpReg).addExpr(SymbolHi));
+
+ const MCExpr *RefToLinkTmpLabel =
+ RISCVMCExpr::create(MCSymbolRefExpr::create(TmpLabel, Ctx),
+ RISCVMCExpr::VK_RISCV_PCREL_LO, Ctx);
+
+ emitToStreamer(Out, MCInstBuilder(SecondOpcode)
+ .addOperand(DestReg)
+ .addOperand(TmpReg)
+ .addExpr(RefToLinkTmpLabel));
+}
+
+void RISCVAsmParser::emitLoadLocalAddress(MCInst &Inst, SMLoc IDLoc,
+ MCStreamer &Out) {
+ // The load local address pseudo-instruction "lla" is used in PC-relative
+ // addressing of local symbols:
+ // lla rdest, symbol
+ // expands to
+ // TmpLabel: AUIPC rdest, %pcrel_hi(symbol)
+ // ADDI rdest, rdest, %pcrel_lo(TmpLabel)
+ MCOperand DestReg = Inst.getOperand(0);
+ const MCExpr *Symbol = Inst.getOperand(1).getExpr();
+ emitAuipcInstPair(DestReg, DestReg, Symbol, RISCVMCExpr::VK_RISCV_PCREL_HI,
+ RISCV::ADDI, IDLoc, Out);
+}
+
+void RISCVAsmParser::emitLoadAddress(MCInst &Inst, SMLoc IDLoc,
+ MCStreamer &Out) {
+ // The load address pseudo-instruction "la" is used in PC-relative and
+ // GOT-indirect addressing of global symbols:
+ // la rdest, symbol
+ // expands to either (for non-PIC)
+ // TmpLabel: AUIPC rdest, %pcrel_hi(symbol)
+ // ADDI rdest, rdest, %pcrel_lo(TmpLabel)
+ // or (for PIC)
+ // TmpLabel: AUIPC rdest, %got_pcrel_hi(symbol)
+ // Lx rdest, %pcrel_lo(TmpLabel)(rdest)
+ MCOperand DestReg = Inst.getOperand(0);
+ const MCExpr *Symbol = Inst.getOperand(1).getExpr();
+ unsigned SecondOpcode;
+ RISCVMCExpr::VariantKind VKHi;
+ // FIXME: Should check .option (no)pic when implemented
+ if (getContext().getObjectFileInfo()->isPositionIndependent()) {
+ SecondOpcode = isRV64() ? RISCV::LD : RISCV::LW;
+ VKHi = RISCVMCExpr::VK_RISCV_GOT_HI;
+ } else {
+ SecondOpcode = RISCV::ADDI;
+ VKHi = RISCVMCExpr::VK_RISCV_PCREL_HI;
+ }
+ emitAuipcInstPair(DestReg, DestReg, Symbol, VKHi, SecondOpcode, IDLoc, Out);
+}
+
+void RISCVAsmParser::emitLoadTLSIEAddress(MCInst &Inst, SMLoc IDLoc,
+ MCStreamer &Out) {
+ // The load TLS IE address pseudo-instruction "la.tls.ie" is used in
+ // initial-exec TLS model addressing of global symbols:
+ // la.tls.ie rdest, symbol
+ // expands to
+ // TmpLabel: AUIPC rdest, %tls_ie_pcrel_hi(symbol)
+ // Lx rdest, %pcrel_lo(TmpLabel)(rdest)
+ MCOperand DestReg = Inst.getOperand(0);
+ const MCExpr *Symbol = Inst.getOperand(1).getExpr();
+ unsigned SecondOpcode = isRV64() ? RISCV::LD : RISCV::LW;
+ emitAuipcInstPair(DestReg, DestReg, Symbol, RISCVMCExpr::VK_RISCV_TLS_GOT_HI,
+ SecondOpcode, IDLoc, Out);
+}
+
+void RISCVAsmParser::emitLoadTLSGDAddress(MCInst &Inst, SMLoc IDLoc,
+ MCStreamer &Out) {
+ // The load TLS GD address pseudo-instruction "la.tls.gd" is used in
+ // global-dynamic TLS model addressing of global symbols:
+ // la.tls.gd rdest, symbol
+ // expands to
+ // TmpLabel: AUIPC rdest, %tls_gd_pcrel_hi(symbol)
+ // ADDI rdest, rdest, %pcrel_lo(TmpLabel)
+ MCOperand DestReg = Inst.getOperand(0);
+ const MCExpr *Symbol = Inst.getOperand(1).getExpr();
+ emitAuipcInstPair(DestReg, DestReg, Symbol, RISCVMCExpr::VK_RISCV_TLS_GD_HI,
+ RISCV::ADDI, IDLoc, Out);
+}
+
+void RISCVAsmParser::emitLoadStoreSymbol(MCInst &Inst, unsigned Opcode,
+ SMLoc IDLoc, MCStreamer &Out,
+ bool HasTmpReg) {
+ // The load/store pseudo-instruction does a pc-relative load with
+ // a symbol.
+ //
+ // The expansion looks like this
+ //
+ // TmpLabel: AUIPC tmp, %pcrel_hi(symbol)
+ // [S|L]X rd, %pcrel_lo(TmpLabel)(tmp)
+ MCOperand DestReg = Inst.getOperand(0);
+ unsigned SymbolOpIdx = HasTmpReg ? 2 : 1;
+ unsigned TmpRegOpIdx = HasTmpReg ? 1 : 0;
+ MCOperand TmpReg = Inst.getOperand(TmpRegOpIdx);
+ const MCExpr *Symbol = Inst.getOperand(SymbolOpIdx).getExpr();
+ emitAuipcInstPair(DestReg, TmpReg, Symbol, RISCVMCExpr::VK_RISCV_PCREL_HI,
+ Opcode, IDLoc, Out);
+}
+
+bool RISCVAsmParser::checkPseudoAddTPRel(MCInst &Inst,
+ OperandVector &Operands) {
+ assert(Inst.getOpcode() == RISCV::PseudoAddTPRel && "Invalid instruction");
+ assert(Inst.getOperand(2).isReg() && "Unexpected second operand kind");
+ if (Inst.getOperand(2).getReg() != RISCV::X4) {
+ SMLoc ErrorLoc = ((RISCVOperand &)*Operands[3]).getStartLoc();
+ return Error(ErrorLoc, "the second input operand must be tp/x4 when using "
+ "%tprel_add modifier");
+ }
+
+ return false;
+}
+
+bool RISCVAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
+ OperandVector &Operands,
+ MCStreamer &Out) {
+ Inst.setLoc(IDLoc);
+
+ switch (Inst.getOpcode()) {
+ default:
+ break;
+ case RISCV::PseudoLI: {
+ unsigned Reg = Inst.getOperand(0).getReg();
+ const MCOperand &Op1 = Inst.getOperand(1);
+ if (Op1.isExpr()) {
+ // We must have li reg, %lo(sym) or li reg, %pcrel_lo(sym) or similar.
+ // Just convert to an addi. This allows compatibility with gas.
+ emitToStreamer(Out, MCInstBuilder(RISCV::ADDI)
+ .addReg(Reg)
+ .addReg(RISCV::X0)
+ .addExpr(Op1.getExpr()));
+ return false;
+ }
+ int64_t Imm = Inst.getOperand(1).getImm();
+ // On RV32 the immediate here can either be a signed or an unsigned
+ // 32-bit number. Sign extension has to be performed to ensure that Imm
+ // represents the expected signed 64-bit number.
+ if (!isRV64())
+ Imm = SignExtend64<32>(Imm);
+ emitLoadImm(Reg, Imm, Out);
+ return false;
+ }
+ case RISCV::PseudoLLA:
+ emitLoadLocalAddress(Inst, IDLoc, Out);
+ return false;
+ case RISCV::PseudoLA:
+ emitLoadAddress(Inst, IDLoc, Out);
+ return false;
+ case RISCV::PseudoLA_TLS_IE:
+ emitLoadTLSIEAddress(Inst, IDLoc, Out);
+ return false;
+ case RISCV::PseudoLA_TLS_GD:
+ emitLoadTLSGDAddress(Inst, IDLoc, Out);
+ return false;
+ case RISCV::PseudoLB:
+ emitLoadStoreSymbol(Inst, RISCV::LB, IDLoc, Out, /*HasTmpReg=*/false);
+ return false;
+ case RISCV::PseudoLBU:
+ emitLoadStoreSymbol(Inst, RISCV::LBU, IDLoc, Out, /*HasTmpReg=*/false);
+ return false;
+ case RISCV::PseudoLH:
+ emitLoadStoreSymbol(Inst, RISCV::LH, IDLoc, Out, /*HasTmpReg=*/false);
+ return false;
+ case RISCV::PseudoLHU:
+ emitLoadStoreSymbol(Inst, RISCV::LHU, IDLoc, Out, /*HasTmpReg=*/false);
+ return false;
+ case RISCV::PseudoLW:
+ emitLoadStoreSymbol(Inst, RISCV::LW, IDLoc, Out, /*HasTmpReg=*/false);
+ return false;
+ case RISCV::PseudoLWU:
+ emitLoadStoreSymbol(Inst, RISCV::LWU, IDLoc, Out, /*HasTmpReg=*/false);
+ return false;
+ case RISCV::PseudoLD:
+ emitLoadStoreSymbol(Inst, RISCV::LD, IDLoc, Out, /*HasTmpReg=*/false);
+ return false;
+ case RISCV::PseudoFLW:
+ emitLoadStoreSymbol(Inst, RISCV::FLW, IDLoc, Out, /*HasTmpReg=*/true);
+ return false;
+ case RISCV::PseudoFLD:
+ emitLoadStoreSymbol(Inst, RISCV::FLD, IDLoc, Out, /*HasTmpReg=*/true);
+ return false;
+ case RISCV::PseudoSB:
+ emitLoadStoreSymbol(Inst, RISCV::SB, IDLoc, Out, /*HasTmpReg=*/true);
+ return false;
+ case RISCV::PseudoSH:
+ emitLoadStoreSymbol(Inst, RISCV::SH, IDLoc, Out, /*HasTmpReg=*/true);
+ return false;
+ case RISCV::PseudoSW:
+ emitLoadStoreSymbol(Inst, RISCV::SW, IDLoc, Out, /*HasTmpReg=*/true);
+ return false;
+ case RISCV::PseudoSD:
+ emitLoadStoreSymbol(Inst, RISCV::SD, IDLoc, Out, /*HasTmpReg=*/true);
+ return false;
+ case RISCV::PseudoFSW:
+ emitLoadStoreSymbol(Inst, RISCV::FSW, IDLoc, Out, /*HasTmpReg=*/true);
+ return false;
+ case RISCV::PseudoFSD:
+ emitLoadStoreSymbol(Inst, RISCV::FSD, IDLoc, Out, /*HasTmpReg=*/true);
+ return false;
+ case RISCV::PseudoAddTPRel:
+ if (checkPseudoAddTPRel(Inst, Operands))
+ return true;
+ break;
+ }
+
+ emitToStreamer(Out, Inst);
+ return false;
+}
+
+extern "C" void LLVMInitializeRISCVAsmParser() {
+ RegisterMCAsmParser<RISCVAsmParser> X(getTheRISCV32Target());
+ RegisterMCAsmParser<RISCVAsmParser> Y(getTheRISCV64Target());
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
new file mode 100644
index 000000000000..36200c03f703
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
@@ -0,0 +1,330 @@
+//===-- RISCVDisassembler.cpp - Disassembler for RISCV --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the RISCVDisassembler class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/RISCVMCTargetDesc.h"
+#include "TargetInfo/RISCVTargetInfo.h"
+#include "Utils/RISCVBaseInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
+#include "llvm/MC/MCFixedLenDisassembler.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/TargetRegistry.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "riscv-disassembler"
+
+typedef MCDisassembler::DecodeStatus DecodeStatus;
+
+namespace {
+class RISCVDisassembler : public MCDisassembler {
+
+public:
+ RISCVDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx)
+ : MCDisassembler(STI, Ctx) {}
+
+ DecodeStatus getInstruction(MCInst &Instr, uint64_t &Size,
+ ArrayRef<uint8_t> Bytes, uint64_t Address,
+ raw_ostream &VStream,
+ raw_ostream &CStream) const override;
+};
+} // end anonymous namespace
+
+static MCDisassembler *createRISCVDisassembler(const Target &T,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new RISCVDisassembler(STI, Ctx);
+}
+
+extern "C" void LLVMInitializeRISCVDisassembler() {
+ // Register the disassembler for each target.
+ TargetRegistry::RegisterMCDisassembler(getTheRISCV32Target(),
+ createRISCVDisassembler);
+ TargetRegistry::RegisterMCDisassembler(getTheRISCV64Target(),
+ createRISCVDisassembler);
+}
+
+static const unsigned GPRDecoderTable[] = {
+ RISCV::X0, RISCV::X1, RISCV::X2, RISCV::X3,
+ RISCV::X4, RISCV::X5, RISCV::X6, RISCV::X7,
+ RISCV::X8, RISCV::X9, RISCV::X10, RISCV::X11,
+ RISCV::X12, RISCV::X13, RISCV::X14, RISCV::X15,
+ RISCV::X16, RISCV::X17, RISCV::X18, RISCV::X19,
+ RISCV::X20, RISCV::X21, RISCV::X22, RISCV::X23,
+ RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27,
+ RISCV::X28, RISCV::X29, RISCV::X30, RISCV::X31
+};
+
+static DecodeStatus DecodeGPRRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ const FeatureBitset &FeatureBits =
+ static_cast<const MCDisassembler *>(Decoder)
+ ->getSubtargetInfo()
+ .getFeatureBits();
+ bool IsRV32E = FeatureBits[RISCV::FeatureRV32E];
+
+ if (RegNo > array_lengthof(GPRDecoderTable) || (IsRV32E && RegNo > 15))
+ return MCDisassembler::Fail;
+
+ // We must define our own mapping from RegNo to register identifier.
+ // Accessing index RegNo in the register class will work in the case that
+ // registers were added in ascending order, but not in general.
+ unsigned Reg = GPRDecoderTable[RegNo];
+ Inst.addOperand(MCOperand::createReg(Reg));
+ return MCDisassembler::Success;
+}
+
+static const unsigned FPR32DecoderTable[] = {
+ RISCV::F0_32, RISCV::F1_32, RISCV::F2_32, RISCV::F3_32,
+ RISCV::F4_32, RISCV::F5_32, RISCV::F6_32, RISCV::F7_32,
+ RISCV::F8_32, RISCV::F9_32, RISCV::F10_32, RISCV::F11_32,
+ RISCV::F12_32, RISCV::F13_32, RISCV::F14_32, RISCV::F15_32,
+ RISCV::F16_32, RISCV::F17_32, RISCV::F18_32, RISCV::F19_32,
+ RISCV::F20_32, RISCV::F21_32, RISCV::F22_32, RISCV::F23_32,
+ RISCV::F24_32, RISCV::F25_32, RISCV::F26_32, RISCV::F27_32,
+ RISCV::F28_32, RISCV::F29_32, RISCV::F30_32, RISCV::F31_32
+};
+
+static DecodeStatus DecodeFPR32RegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > array_lengthof(FPR32DecoderTable))
+ return MCDisassembler::Fail;
+
+ // We must define our own mapping from RegNo to register identifier.
+ // Accessing index RegNo in the register class will work in the case that
+ // registers were added in ascending order, but not in general.
+ unsigned Reg = FPR32DecoderTable[RegNo];
+ Inst.addOperand(MCOperand::createReg(Reg));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeFPR32CRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 8) {
+ return MCDisassembler::Fail;
+ }
+ unsigned Reg = FPR32DecoderTable[RegNo + 8];
+ Inst.addOperand(MCOperand::createReg(Reg));
+ return MCDisassembler::Success;
+}
+
+static const unsigned FPR64DecoderTable[] = {
+ RISCV::F0_64, RISCV::F1_64, RISCV::F2_64, RISCV::F3_64,
+ RISCV::F4_64, RISCV::F5_64, RISCV::F6_64, RISCV::F7_64,
+ RISCV::F8_64, RISCV::F9_64, RISCV::F10_64, RISCV::F11_64,
+ RISCV::F12_64, RISCV::F13_64, RISCV::F14_64, RISCV::F15_64,
+ RISCV::F16_64, RISCV::F17_64, RISCV::F18_64, RISCV::F19_64,
+ RISCV::F20_64, RISCV::F21_64, RISCV::F22_64, RISCV::F23_64,
+ RISCV::F24_64, RISCV::F25_64, RISCV::F26_64, RISCV::F27_64,
+ RISCV::F28_64, RISCV::F29_64, RISCV::F30_64, RISCV::F31_64
+};
+
+static DecodeStatus DecodeFPR64RegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > array_lengthof(FPR64DecoderTable))
+ return MCDisassembler::Fail;
+
+ // We must define our own mapping from RegNo to register identifier.
+ // Accessing index RegNo in the register class will work in the case that
+ // registers were added in ascending order, but not in general.
+ unsigned Reg = FPR64DecoderTable[RegNo];
+ Inst.addOperand(MCOperand::createReg(Reg));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeFPR64CRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 8) {
+ return MCDisassembler::Fail;
+ }
+ unsigned Reg = FPR64DecoderTable[RegNo + 8];
+ Inst.addOperand(MCOperand::createReg(Reg));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeGPRNoX0RegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo == 0) {
+ return MCDisassembler::Fail;
+ }
+
+ return DecodeGPRRegisterClass(Inst, RegNo, Address, Decoder);
+}
+
+static DecodeStatus DecodeGPRNoX0X2RegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo == 2) {
+ return MCDisassembler::Fail;
+ }
+
+ return DecodeGPRNoX0RegisterClass(Inst, RegNo, Address, Decoder);
+}
+
+static DecodeStatus DecodeGPRCRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 8)
+ return MCDisassembler::Fail;
+
+ unsigned Reg = GPRDecoderTable[RegNo + 8];
+ Inst.addOperand(MCOperand::createReg(Reg));
+ return MCDisassembler::Success;
+}
+
+// Add implied SP operand for instructions *SP compressed instructions. The SP
+// operand isn't explicitly encoded in the instruction.
+static void addImplySP(MCInst &Inst, int64_t Address, const void *Decoder) {
+ if (Inst.getOpcode() == RISCV::C_LWSP || Inst.getOpcode() == RISCV::C_SWSP ||
+ Inst.getOpcode() == RISCV::C_LDSP || Inst.getOpcode() == RISCV::C_SDSP ||
+ Inst.getOpcode() == RISCV::C_FLWSP ||
+ Inst.getOpcode() == RISCV::C_FSWSP ||
+ Inst.getOpcode() == RISCV::C_FLDSP ||
+ Inst.getOpcode() == RISCV::C_FSDSP ||
+ Inst.getOpcode() == RISCV::C_ADDI4SPN) {
+ DecodeGPRRegisterClass(Inst, 2, Address, Decoder);
+ }
+ if (Inst.getOpcode() == RISCV::C_ADDI16SP) {
+ DecodeGPRRegisterClass(Inst, 2, Address, Decoder);
+ DecodeGPRRegisterClass(Inst, 2, Address, Decoder);
+ }
+}
+
+template <unsigned N>
+static DecodeStatus decodeUImmOperand(MCInst &Inst, uint64_t Imm,
+ int64_t Address, const void *Decoder) {
+ assert(isUInt<N>(Imm) && "Invalid immediate");
+ addImplySP(Inst, Address, Decoder);
+ Inst.addOperand(MCOperand::createImm(Imm));
+ return MCDisassembler::Success;
+}
+
+template <unsigned N>
+static DecodeStatus decodeUImmNonZeroOperand(MCInst &Inst, uint64_t Imm,
+ int64_t Address,
+ const void *Decoder) {
+ if (Imm == 0)
+ return MCDisassembler::Fail;
+ return decodeUImmOperand<N>(Inst, Imm, Address, Decoder);
+}
+
+template <unsigned N>
+static DecodeStatus decodeSImmOperand(MCInst &Inst, uint64_t Imm,
+ int64_t Address, const void *Decoder) {
+ assert(isUInt<N>(Imm) && "Invalid immediate");
+ addImplySP(Inst, Address, Decoder);
+ // Sign-extend the number in the bottom N bits of Imm
+ Inst.addOperand(MCOperand::createImm(SignExtend64<N>(Imm)));
+ return MCDisassembler::Success;
+}
+
+template <unsigned N>
+static DecodeStatus decodeSImmNonZeroOperand(MCInst &Inst, uint64_t Imm,
+ int64_t Address,
+ const void *Decoder) {
+ if (Imm == 0)
+ return MCDisassembler::Fail;
+ return decodeSImmOperand<N>(Inst, Imm, Address, Decoder);
+}
+
+template <unsigned N>
+static DecodeStatus decodeSImmOperandAndLsl1(MCInst &Inst, uint64_t Imm,
+ int64_t Address,
+ const void *Decoder) {
+ assert(isUInt<N>(Imm) && "Invalid immediate");
+ // Sign-extend the number in the bottom N bits of Imm after accounting for
+ // the fact that the N bit immediate is stored in N-1 bits (the LSB is
+ // always zero)
+ Inst.addOperand(MCOperand::createImm(SignExtend64<N>(Imm << 1)));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodeCLUIImmOperand(MCInst &Inst, uint64_t Imm,
+ int64_t Address,
+ const void *Decoder) {
+ assert(isUInt<6>(Imm) && "Invalid immediate");
+ if (Imm > 31) {
+ Imm = (SignExtend64<6>(Imm) & 0xfffff);
+ }
+ Inst.addOperand(MCOperand::createImm(Imm));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodeFRMArg(MCInst &Inst, uint64_t Imm,
+ int64_t Address,
+ const void *Decoder) {
+ assert(isUInt<3>(Imm) && "Invalid immediate");
+ if (!llvm::RISCVFPRndMode::isValidRoundingMode(Imm))
+ return MCDisassembler::Fail;
+
+ Inst.addOperand(MCOperand::createImm(Imm));
+ return MCDisassembler::Success;
+}
+
+#include "RISCVGenDisassemblerTables.inc"
+
+DecodeStatus RISCVDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
+ ArrayRef<uint8_t> Bytes,
+ uint64_t Address,
+ raw_ostream &OS,
+ raw_ostream &CS) const {
+ // TODO: This will need modification when supporting instruction set
+ // extensions with instructions > 32-bits (up to 176 bits wide).
+ uint32_t Insn;
+ DecodeStatus Result;
+
+ // It's a 32 bit instruction if bit 0 and 1 are 1.
+ if ((Bytes[0] & 0x3) == 0x3) {
+ if (Bytes.size() < 4) {
+ Size = 0;
+ return MCDisassembler::Fail;
+ }
+ Insn = support::endian::read32le(Bytes.data());
+ LLVM_DEBUG(dbgs() << "Trying RISCV32 table :\n");
+ Result = decodeInstruction(DecoderTable32, MI, Insn, Address, this, STI);
+ Size = 4;
+ } else {
+ if (Bytes.size() < 2) {
+ Size = 0;
+ return MCDisassembler::Fail;
+ }
+ Insn = support::endian::read16le(Bytes.data());
+
+ if (!STI.getFeatureBits()[RISCV::Feature64Bit]) {
+ LLVM_DEBUG(
+ dbgs() << "Trying RISCV32Only_16 table (16-bit Instruction):\n");
+ // Calling the auto-generated decoder function.
+ Result = decodeInstruction(DecoderTableRISCV32Only_16, MI, Insn, Address,
+ this, STI);
+ if (Result != MCDisassembler::Fail) {
+ Size = 2;
+ return Result;
+ }
+ }
+
+ LLVM_DEBUG(dbgs() << "Trying RISCV_C table (16-bit Instruction):\n");
+ // Calling the auto-generated decoder function.
+ Result = decodeInstruction(DecoderTable16, MI, Insn, Address, this, STI);
+ Size = 2;
+ }
+
+ return Result;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
new file mode 100644
index 000000000000..61c5845cc175
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
@@ -0,0 +1,377 @@
+//===-- RISCVAsmBackend.cpp - RISCV Assembler Backend ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVAsmBackend.h"
+#include "RISCVMCExpr.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+// If linker relaxation is enabled, or the relax option had previously been
+// enabled, always emit relocations even if the fixup can be resolved. This is
+// necessary for correctness as offsets may change during relaxation.
+bool RISCVAsmBackend::shouldForceRelocation(const MCAssembler &Asm,
+ const MCFixup &Fixup,
+ const MCValue &Target) {
+ bool ShouldForce = false;
+
+ switch ((unsigned)Fixup.getKind()) {
+ default:
+ break;
+ case FK_Data_1:
+ case FK_Data_2:
+ case FK_Data_4:
+ case FK_Data_8:
+ if (Target.isAbsolute())
+ return false;
+ break;
+ case RISCV::fixup_riscv_got_hi20:
+ case RISCV::fixup_riscv_tls_got_hi20:
+ case RISCV::fixup_riscv_tls_gd_hi20:
+ return true;
+ case RISCV::fixup_riscv_pcrel_lo12_i:
+ case RISCV::fixup_riscv_pcrel_lo12_s:
+ // For pcrel_lo12, force a relocation if the target of the corresponding
+ // pcrel_hi20 is not in the same fragment.
+ const MCFixup *T = cast<RISCVMCExpr>(Fixup.getValue())->getPCRelHiFixup();
+ if (!T) {
+ Asm.getContext().reportError(Fixup.getLoc(),
+ "could not find corresponding %pcrel_hi");
+ return false;
+ }
+
+ switch ((unsigned)T->getKind()) {
+ default:
+ llvm_unreachable("Unexpected fixup kind for pcrel_lo12");
+ break;
+ case RISCV::fixup_riscv_got_hi20:
+ case RISCV::fixup_riscv_tls_got_hi20:
+ case RISCV::fixup_riscv_tls_gd_hi20:
+ ShouldForce = true;
+ break;
+ case RISCV::fixup_riscv_pcrel_hi20:
+ ShouldForce = T->getValue()->findAssociatedFragment() !=
+ Fixup.getValue()->findAssociatedFragment();
+ break;
+ }
+ break;
+ }
+
+ return ShouldForce || STI.getFeatureBits()[RISCV::FeatureRelax] ||
+ ForceRelocs;
+}
+
+bool RISCVAsmBackend::fixupNeedsRelaxationAdvanced(const MCFixup &Fixup,
+ bool Resolved,
+ uint64_t Value,
+ const MCRelaxableFragment *DF,
+ const MCAsmLayout &Layout,
+ const bool WasForced) const {
+ // Return true if the symbol is actually unresolved.
+ // Resolved could be always false when shouldForceRelocation return true.
+ // We use !WasForced to indicate that the symbol is unresolved and not forced
+ // by shouldForceRelocation.
+ if (!Resolved && !WasForced)
+ return true;
+
+ int64_t Offset = int64_t(Value);
+ switch ((unsigned)Fixup.getKind()) {
+ default:
+ return false;
+ case RISCV::fixup_riscv_rvc_branch:
+ // For compressed branch instructions the immediate must be
+ // in the range [-256, 254].
+ return Offset > 254 || Offset < -256;
+ case RISCV::fixup_riscv_rvc_jump:
+ // For compressed jump instructions the immediate must be
+ // in the range [-2048, 2046].
+ return Offset > 2046 || Offset < -2048;
+ }
+}
+
+void RISCVAsmBackend::relaxInstruction(const MCInst &Inst,
+ const MCSubtargetInfo &STI,
+ MCInst &Res) const {
+ // TODO: replace this with call to auto generated uncompressinstr() function.
+ switch (Inst.getOpcode()) {
+ default:
+ llvm_unreachable("Opcode not expected!");
+ case RISCV::C_BEQZ:
+ // c.beqz $rs1, $imm -> beq $rs1, X0, $imm.
+ Res.setOpcode(RISCV::BEQ);
+ Res.addOperand(Inst.getOperand(0));
+ Res.addOperand(MCOperand::createReg(RISCV::X0));
+ Res.addOperand(Inst.getOperand(1));
+ break;
+ case RISCV::C_BNEZ:
+ // c.bnez $rs1, $imm -> bne $rs1, X0, $imm.
+ Res.setOpcode(RISCV::BNE);
+ Res.addOperand(Inst.getOperand(0));
+ Res.addOperand(MCOperand::createReg(RISCV::X0));
+ Res.addOperand(Inst.getOperand(1));
+ break;
+ case RISCV::C_J:
+ // c.j $imm -> jal X0, $imm.
+ Res.setOpcode(RISCV::JAL);
+ Res.addOperand(MCOperand::createReg(RISCV::X0));
+ Res.addOperand(Inst.getOperand(0));
+ break;
+ case RISCV::C_JAL:
+ // c.jal $imm -> jal X1, $imm.
+ Res.setOpcode(RISCV::JAL);
+ Res.addOperand(MCOperand::createReg(RISCV::X1));
+ Res.addOperand(Inst.getOperand(0));
+ break;
+ }
+}
+
+// Given a compressed control flow instruction this function returns
+// the expanded instruction.
+unsigned RISCVAsmBackend::getRelaxedOpcode(unsigned Op) const {
+ switch (Op) {
+ default:
+ return Op;
+ case RISCV::C_BEQZ:
+ return RISCV::BEQ;
+ case RISCV::C_BNEZ:
+ return RISCV::BNE;
+ case RISCV::C_J:
+ case RISCV::C_JAL: // fall through.
+ return RISCV::JAL;
+ }
+}
+
+bool RISCVAsmBackend::mayNeedRelaxation(const MCInst &Inst,
+ const MCSubtargetInfo &STI) const {
+ return getRelaxedOpcode(Inst.getOpcode()) != Inst.getOpcode();
+}
+
+bool RISCVAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
+ bool HasStdExtC = STI.getFeatureBits()[RISCV::FeatureStdExtC];
+ unsigned MinNopLen = HasStdExtC ? 2 : 4;
+
+ if ((Count % MinNopLen) != 0)
+ return false;
+
+ // The canonical nop on RISC-V is addi x0, x0, 0.
+ for (; Count >= 4; Count -= 4)
+ OS.write("\x13\0\0\0", 4);
+
+ // The canonical nop on RVC is c.nop.
+ if (Count && HasStdExtC)
+ OS.write("\x01\0", 2);
+
+ return true;
+}
+
+static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
+ MCContext &Ctx) {
+ unsigned Kind = Fixup.getKind();
+ switch (Kind) {
+ default:
+ llvm_unreachable("Unknown fixup kind!");
+ case RISCV::fixup_riscv_got_hi20:
+ case RISCV::fixup_riscv_tls_got_hi20:
+ case RISCV::fixup_riscv_tls_gd_hi20:
+ llvm_unreachable("Relocation should be unconditionally forced\n");
+ case FK_Data_1:
+ case FK_Data_2:
+ case FK_Data_4:
+ case FK_Data_8:
+ case FK_Data_6b:
+ return Value;
+ case RISCV::fixup_riscv_lo12_i:
+ case RISCV::fixup_riscv_pcrel_lo12_i:
+ case RISCV::fixup_riscv_tprel_lo12_i:
+ return Value & 0xfff;
+ case RISCV::fixup_riscv_lo12_s:
+ case RISCV::fixup_riscv_pcrel_lo12_s:
+ case RISCV::fixup_riscv_tprel_lo12_s:
+ return (((Value >> 5) & 0x7f) << 25) | ((Value & 0x1f) << 7);
+ case RISCV::fixup_riscv_hi20:
+ case RISCV::fixup_riscv_pcrel_hi20:
+ case RISCV::fixup_riscv_tprel_hi20:
+ // Add 1 if bit 11 is 1, to compensate for low 12 bits being negative.
+ return ((Value + 0x800) >> 12) & 0xfffff;
+ case RISCV::fixup_riscv_jal: {
+ if (!isInt<21>(Value))
+ Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
+ if (Value & 0x1)
+ Ctx.reportError(Fixup.getLoc(), "fixup value must be 2-byte aligned");
+ // Need to produce imm[19|10:1|11|19:12] from the 21-bit Value.
+ unsigned Sbit = (Value >> 20) & 0x1;
+ unsigned Hi8 = (Value >> 12) & 0xff;
+ unsigned Mid1 = (Value >> 11) & 0x1;
+ unsigned Lo10 = (Value >> 1) & 0x3ff;
+ // Inst{31} = Sbit;
+ // Inst{30-21} = Lo10;
+ // Inst{20} = Mid1;
+ // Inst{19-12} = Hi8;
+ Value = (Sbit << 19) | (Lo10 << 9) | (Mid1 << 8) | Hi8;
+ return Value;
+ }
+ case RISCV::fixup_riscv_branch: {
+ if (!isInt<13>(Value))
+ Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
+ if (Value & 0x1)
+ Ctx.reportError(Fixup.getLoc(), "fixup value must be 2-byte aligned");
+ // Need to extract imm[12], imm[10:5], imm[4:1], imm[11] from the 13-bit
+ // Value.
+ unsigned Sbit = (Value >> 12) & 0x1;
+ unsigned Hi1 = (Value >> 11) & 0x1;
+ unsigned Mid6 = (Value >> 5) & 0x3f;
+ unsigned Lo4 = (Value >> 1) & 0xf;
+ // Inst{31} = Sbit;
+ // Inst{30-25} = Mid6;
+ // Inst{11-8} = Lo4;
+ // Inst{7} = Hi1;
+ Value = (Sbit << 31) | (Mid6 << 25) | (Lo4 << 8) | (Hi1 << 7);
+ return Value;
+ }
+ case RISCV::fixup_riscv_call:
+ case RISCV::fixup_riscv_call_plt: {
+ // Jalr will add UpperImm with the sign-extended 12-bit LowerImm,
+ // we need to add 0x800ULL before extract upper bits to reflect the
+ // effect of the sign extension.
+ uint64_t UpperImm = (Value + 0x800ULL) & 0xfffff000ULL;
+ uint64_t LowerImm = Value & 0xfffULL;
+ return UpperImm | ((LowerImm << 20) << 32);
+ }
+ case RISCV::fixup_riscv_rvc_jump: {
+ // Need to produce offset[11|4|9:8|10|6|7|3:1|5] from the 11-bit Value.
+ unsigned Bit11 = (Value >> 11) & 0x1;
+ unsigned Bit4 = (Value >> 4) & 0x1;
+ unsigned Bit9_8 = (Value >> 8) & 0x3;
+ unsigned Bit10 = (Value >> 10) & 0x1;
+ unsigned Bit6 = (Value >> 6) & 0x1;
+ unsigned Bit7 = (Value >> 7) & 0x1;
+ unsigned Bit3_1 = (Value >> 1) & 0x7;
+ unsigned Bit5 = (Value >> 5) & 0x1;
+ Value = (Bit11 << 10) | (Bit4 << 9) | (Bit9_8 << 7) | (Bit10 << 6) |
+ (Bit6 << 5) | (Bit7 << 4) | (Bit3_1 << 1) | Bit5;
+ return Value;
+ }
+ case RISCV::fixup_riscv_rvc_branch: {
+ // Need to produce offset[8|4:3], [reg 3 bit], offset[7:6|2:1|5]
+ unsigned Bit8 = (Value >> 8) & 0x1;
+ unsigned Bit7_6 = (Value >> 6) & 0x3;
+ unsigned Bit5 = (Value >> 5) & 0x1;
+ unsigned Bit4_3 = (Value >> 3) & 0x3;
+ unsigned Bit2_1 = (Value >> 1) & 0x3;
+ Value = (Bit8 << 12) | (Bit4_3 << 10) | (Bit7_6 << 5) | (Bit2_1 << 3) |
+ (Bit5 << 2);
+ return Value;
+ }
+
+ }
+}
+
+void RISCVAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
+ const MCValue &Target,
+ MutableArrayRef<char> Data, uint64_t Value,
+ bool IsResolved,
+ const MCSubtargetInfo *STI) const {
+ MCContext &Ctx = Asm.getContext();
+ MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
+ if (!Value)
+ return; // Doesn't change encoding.
+ // Apply any target-specific value adjustments.
+ Value = adjustFixupValue(Fixup, Value, Ctx);
+
+ // Shift the value into position.
+ Value <<= Info.TargetOffset;
+
+ unsigned Offset = Fixup.getOffset();
+ unsigned NumBytes = alignTo(Info.TargetSize + Info.TargetOffset, 8) / 8;
+
+ assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
+
+ // For each byte of the fragment that the fixup touches, mask in the
+ // bits from the fixup value.
+ for (unsigned i = 0; i != NumBytes; ++i) {
+ Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
+ }
+}
+
+// Linker relaxation may change code size. We have to insert Nops
+// for .align directive when linker relaxation enabled. So then Linker
+// could satisfy alignment by removing Nops.
+// The function return the total Nops Size we need to insert.
+bool RISCVAsmBackend::shouldInsertExtraNopBytesForCodeAlign(
+ const MCAlignFragment &AF, unsigned &Size) {
+ // Calculate Nops Size only when linker relaxation enabled.
+ if (!STI.getFeatureBits()[RISCV::FeatureRelax])
+ return false;
+
+ bool HasStdExtC = STI.getFeatureBits()[RISCV::FeatureStdExtC];
+ unsigned MinNopLen = HasStdExtC ? 2 : 4;
+
+ if (AF.getAlignment() <= MinNopLen) {
+ return false;
+ } else {
+ Size = AF.getAlignment() - MinNopLen;
+ return true;
+ }
+}
+
+// We need to insert R_RISCV_ALIGN relocation type to indicate the
+// position of Nops and the total bytes of the Nops have been inserted
+// when linker relaxation enabled.
+// The function insert fixup_riscv_align fixup which eventually will
+// transfer to R_RISCV_ALIGN relocation type.
+bool RISCVAsmBackend::shouldInsertFixupForCodeAlign(MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ MCAlignFragment &AF) {
+ // Insert the fixup only when linker relaxation enabled.
+ if (!STI.getFeatureBits()[RISCV::FeatureRelax])
+ return false;
+
+ // Calculate total Nops we need to insert. If there are none to insert
+ // then simply return.
+ unsigned Count;
+ if (!shouldInsertExtraNopBytesForCodeAlign(AF, Count) || (Count == 0))
+ return false;
+
+ MCContext &Ctx = Asm.getContext();
+ const MCExpr *Dummy = MCConstantExpr::create(0, Ctx);
+ // Create fixup_riscv_align fixup.
+ MCFixup Fixup =
+ MCFixup::create(0, Dummy, MCFixupKind(RISCV::fixup_riscv_align), SMLoc());
+
+ uint64_t FixedValue = 0;
+ MCValue NopBytes = MCValue::get(Count);
+
+ Asm.getWriter().recordRelocation(Asm, Layout, &AF, Fixup, NopBytes,
+ FixedValue);
+
+ return true;
+}
+
+std::unique_ptr<MCObjectTargetWriter>
+RISCVAsmBackend::createObjectTargetWriter() const {
+ return createRISCVELFObjectWriter(OSABI, Is64Bit);
+}
+
+MCAsmBackend *llvm::createRISCVAsmBackend(const Target &T,
+ const MCSubtargetInfo &STI,
+ const MCRegisterInfo &MRI,
+ const MCTargetOptions &Options) {
+ const Triple &TT = STI.getTargetTriple();
+ uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TT.getOS());
+ return new RISCVAsmBackend(STI, OSABI, TT.isArch64Bit(), Options);
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h
new file mode 100644
index 000000000000..254249c87dc8
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h
@@ -0,0 +1,149 @@
+//===-- RISCVAsmBackend.h - RISCV Assembler Backend -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVASMBACKEND_H
+#define LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVASMBACKEND_H
+
+#include "MCTargetDesc/RISCVFixupKinds.h"
+#include "MCTargetDesc/RISCVMCTargetDesc.h"
+#include "Utils/RISCVBaseInfo.h"
+#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCFixupKindInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+
+namespace llvm {
+class MCAssembler;
+class MCObjectTargetWriter;
+class raw_ostream;
+
+class RISCVAsmBackend : public MCAsmBackend {
+ const MCSubtargetInfo &STI;
+ uint8_t OSABI;
+ bool Is64Bit;
+ bool ForceRelocs = false;
+ const MCTargetOptions &TargetOptions;
+ RISCVABI::ABI TargetABI = RISCVABI::ABI_Unknown;
+
+public:
+ RISCVAsmBackend(const MCSubtargetInfo &STI, uint8_t OSABI, bool Is64Bit,
+ const MCTargetOptions &Options)
+ : MCAsmBackend(support::little), STI(STI), OSABI(OSABI), Is64Bit(Is64Bit),
+ TargetOptions(Options) {
+ TargetABI = RISCVABI::computeTargetABI(
+ STI.getTargetTriple(), STI.getFeatureBits(), Options.getABIName());
+ RISCVFeatures::validate(STI.getTargetTriple(), STI.getFeatureBits());
+ }
+ ~RISCVAsmBackend() override {}
+
+ void setForceRelocs() { ForceRelocs = true; }
+
+ // Returns true if relocations will be forced for shouldForceRelocation by
+ // default. This will be true if relaxation is enabled or had previously
+ // been enabled.
+ bool willForceRelocations() const {
+ return ForceRelocs || STI.getFeatureBits()[RISCV::FeatureRelax];
+ }
+
+ // Generate diff expression relocations if the relax feature is enabled or had
+ // previously been enabled, otherwise it is safe for the assembler to
+ // calculate these internally.
+ bool requiresDiffExpressionRelocations() const override {
+ return willForceRelocations();
+ }
+
+ // Return Size with extra Nop Bytes for alignment directive in code section.
+ bool shouldInsertExtraNopBytesForCodeAlign(const MCAlignFragment &AF,
+ unsigned &Size) override;
+
+ // Insert target specific fixup type for alignment directive in code section.
+ bool shouldInsertFixupForCodeAlign(MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ MCAlignFragment &AF) override;
+
+ void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
+ const MCValue &Target, MutableArrayRef<char> Data,
+ uint64_t Value, bool IsResolved,
+ const MCSubtargetInfo *STI) const override;
+
+ std::unique_ptr<MCObjectTargetWriter>
+ createObjectTargetWriter() const override;
+
+ bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
+ const MCValue &Target) override;
+
+ bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
+ const MCRelaxableFragment *DF,
+ const MCAsmLayout &Layout) const override {
+ llvm_unreachable("Handled by fixupNeedsRelaxationAdvanced");
+ }
+
+ bool fixupNeedsRelaxationAdvanced(const MCFixup &Fixup, bool Resolved,
+ uint64_t Value,
+ const MCRelaxableFragment *DF,
+ const MCAsmLayout &Layout,
+ const bool WasForced) const override;
+
+ unsigned getNumFixupKinds() const override {
+ return RISCV::NumTargetFixupKinds;
+ }
+
+ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
+ const static MCFixupKindInfo Infos[] = {
+ // This table *must* be in the order that the fixup_* kinds are defined in
+ // RISCVFixupKinds.h.
+ //
+ // name offset bits flags
+ { "fixup_riscv_hi20", 12, 20, 0 },
+ { "fixup_riscv_lo12_i", 20, 12, 0 },
+ { "fixup_riscv_lo12_s", 0, 32, 0 },
+ { "fixup_riscv_pcrel_hi20", 12, 20, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_riscv_pcrel_lo12_i", 20, 12, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_riscv_pcrel_lo12_s", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_riscv_got_hi20", 12, 20, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_riscv_tprel_hi20", 12, 20, 0 },
+ { "fixup_riscv_tprel_lo12_i", 20, 12, 0 },
+ { "fixup_riscv_tprel_lo12_s", 0, 32, 0 },
+ { "fixup_riscv_tprel_add", 0, 0, 0 },
+ { "fixup_riscv_tls_got_hi20", 12, 20, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_riscv_tls_gd_hi20", 12, 20, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_riscv_jal", 12, 20, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_riscv_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_riscv_rvc_jump", 2, 11, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_riscv_rvc_branch", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_riscv_call", 0, 64, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_riscv_call_plt", 0, 64, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_riscv_relax", 0, 0, 0 },
+ { "fixup_riscv_align", 0, 0, 0 }
+ };
+ static_assert((array_lengthof(Infos)) == RISCV::NumTargetFixupKinds,
+ "Not all fixup kinds added to Infos array");
+
+ if (Kind < FirstTargetFixupKind)
+ return MCAsmBackend::getFixupKindInfo(Kind);
+
+ assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
+ "Invalid kind!");
+ return Infos[Kind - FirstTargetFixupKind];
+ }
+
+ bool mayNeedRelaxation(const MCInst &Inst,
+ const MCSubtargetInfo &STI) const override;
+ unsigned getRelaxedOpcode(unsigned Op) const;
+
+ void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
+ MCInst &Res) const override;
+
+
+ bool writeNopData(raw_ostream &OS, uint64_t Count) const override;
+
+ const MCTargetOptions &getTargetOptions() const { return TargetOptions; }
+ RISCVABI::ABI getTargetABI() const { return TargetABI; }
+};
+}
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFObjectWriter.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFObjectWriter.cpp
new file mode 100644
index 000000000000..169876eb7719
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFObjectWriter.cpp
@@ -0,0 +1,142 @@
+//===-- RISCVELFObjectWriter.cpp - RISCV ELF Writer -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/RISCVFixupKinds.h"
+#include "MCTargetDesc/RISCVMCExpr.h"
+#include "MCTargetDesc/RISCVMCTargetDesc.h"
+#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace llvm;
+
+namespace {
+class RISCVELFObjectWriter : public MCELFObjectTargetWriter {
+public:
+ RISCVELFObjectWriter(uint8_t OSABI, bool Is64Bit);
+
+ ~RISCVELFObjectWriter() override;
+
+ // Return true if the given relocation must be with a symbol rather than
+ // section plus offset.
+ bool needsRelocateWithSymbol(const MCSymbol &Sym,
+ unsigned Type) const override {
+ // TODO: this is very conservative, update once RISC-V psABI requirements
+ // are clarified.
+ return true;
+ }
+
+protected:
+ unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
+ const MCFixup &Fixup, bool IsPCRel) const override;
+};
+}
+
+RISCVELFObjectWriter::RISCVELFObjectWriter(uint8_t OSABI, bool Is64Bit)
+ : MCELFObjectTargetWriter(Is64Bit, OSABI, ELF::EM_RISCV,
+ /*HasRelocationAddend*/ true) {}
+
+RISCVELFObjectWriter::~RISCVELFObjectWriter() {}
+
+unsigned RISCVELFObjectWriter::getRelocType(MCContext &Ctx,
+ const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel) const {
+ const MCExpr *Expr = Fixup.getValue();
+ // Determine the type of the relocation
+ unsigned Kind = Fixup.getKind();
+ if (IsPCRel) {
+ switch (Kind) {
+ default:
+ llvm_unreachable("invalid fixup kind!");
+ case FK_Data_4:
+ case FK_PCRel_4:
+ return ELF::R_RISCV_32_PCREL;
+ case RISCV::fixup_riscv_pcrel_hi20:
+ return ELF::R_RISCV_PCREL_HI20;
+ case RISCV::fixup_riscv_pcrel_lo12_i:
+ return ELF::R_RISCV_PCREL_LO12_I;
+ case RISCV::fixup_riscv_pcrel_lo12_s:
+ return ELF::R_RISCV_PCREL_LO12_S;
+ case RISCV::fixup_riscv_got_hi20:
+ return ELF::R_RISCV_GOT_HI20;
+ case RISCV::fixup_riscv_tls_got_hi20:
+ return ELF::R_RISCV_TLS_GOT_HI20;
+ case RISCV::fixup_riscv_tls_gd_hi20:
+ return ELF::R_RISCV_TLS_GD_HI20;
+ case RISCV::fixup_riscv_jal:
+ return ELF::R_RISCV_JAL;
+ case RISCV::fixup_riscv_branch:
+ return ELF::R_RISCV_BRANCH;
+ case RISCV::fixup_riscv_rvc_jump:
+ return ELF::R_RISCV_RVC_JUMP;
+ case RISCV::fixup_riscv_rvc_branch:
+ return ELF::R_RISCV_RVC_BRANCH;
+ case RISCV::fixup_riscv_call:
+ return ELF::R_RISCV_CALL;
+ case RISCV::fixup_riscv_call_plt:
+ return ELF::R_RISCV_CALL_PLT;
+ }
+ }
+
+ switch (Kind) {
+ default:
+ llvm_unreachable("invalid fixup kind!");
+ case FK_Data_4:
+ if (Expr->getKind() == MCExpr::Target &&
+ cast<RISCVMCExpr>(Expr)->getKind() == RISCVMCExpr::VK_RISCV_32_PCREL)
+ return ELF::R_RISCV_32_PCREL;
+ return ELF::R_RISCV_32;
+ case FK_Data_8:
+ return ELF::R_RISCV_64;
+ case FK_Data_Add_1:
+ return ELF::R_RISCV_ADD8;
+ case FK_Data_Add_2:
+ return ELF::R_RISCV_ADD16;
+ case FK_Data_Add_4:
+ return ELF::R_RISCV_ADD32;
+ case FK_Data_Add_8:
+ return ELF::R_RISCV_ADD64;
+ case FK_Data_Add_6b:
+ return ELF::R_RISCV_SET6;
+ case FK_Data_Sub_1:
+ return ELF::R_RISCV_SUB8;
+ case FK_Data_Sub_2:
+ return ELF::R_RISCV_SUB16;
+ case FK_Data_Sub_4:
+ return ELF::R_RISCV_SUB32;
+ case FK_Data_Sub_8:
+ return ELF::R_RISCV_SUB64;
+ case FK_Data_Sub_6b:
+ return ELF::R_RISCV_SUB6;
+ case RISCV::fixup_riscv_hi20:
+ return ELF::R_RISCV_HI20;
+ case RISCV::fixup_riscv_lo12_i:
+ return ELF::R_RISCV_LO12_I;
+ case RISCV::fixup_riscv_lo12_s:
+ return ELF::R_RISCV_LO12_S;
+ case RISCV::fixup_riscv_tprel_hi20:
+ return ELF::R_RISCV_TPREL_HI20;
+ case RISCV::fixup_riscv_tprel_lo12_i:
+ return ELF::R_RISCV_TPREL_LO12_I;
+ case RISCV::fixup_riscv_tprel_lo12_s:
+ return ELF::R_RISCV_TPREL_LO12_S;
+ case RISCV::fixup_riscv_tprel_add:
+ return ELF::R_RISCV_TPREL_ADD;
+ case RISCV::fixup_riscv_relax:
+ return ELF::R_RISCV_RELAX;
+ case RISCV::fixup_riscv_align:
+ return ELF::R_RISCV_ALIGN;
+ }
+}
+
+std::unique_ptr<MCObjectTargetWriter>
+llvm::createRISCVELFObjectWriter(uint8_t OSABI, bool Is64Bit) {
+ return llvm::make_unique<RISCVELFObjectWriter>(OSABI, Is64Bit);
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.cpp
new file mode 100644
index 000000000000..40fa195f3790
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.cpp
@@ -0,0 +1,68 @@
+//===-- RISCVELFStreamer.cpp - RISCV ELF Target Streamer Methods ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides RISCV specific target streamer methods.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVELFStreamer.h"
+#include "MCTargetDesc/RISCVAsmBackend.h"
+#include "RISCVMCTargetDesc.h"
+#include "Utils/RISCVBaseInfo.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+
+using namespace llvm;
+
+// This part is for ELF object output.
+RISCVTargetELFStreamer::RISCVTargetELFStreamer(MCStreamer &S,
+ const MCSubtargetInfo &STI)
+ : RISCVTargetStreamer(S) {
+ MCAssembler &MCA = getStreamer().getAssembler();
+ const FeatureBitset &Features = STI.getFeatureBits();
+ auto &MAB = static_cast<RISCVAsmBackend &>(MCA.getBackend());
+ RISCVABI::ABI ABI = MAB.getTargetABI();
+ assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
+
+ unsigned EFlags = MCA.getELFHeaderEFlags();
+
+ if (Features[RISCV::FeatureStdExtC])
+ EFlags |= ELF::EF_RISCV_RVC;
+
+ switch (ABI) {
+ case RISCVABI::ABI_ILP32:
+ case RISCVABI::ABI_LP64:
+ break;
+ case RISCVABI::ABI_ILP32F:
+ case RISCVABI::ABI_LP64F:
+ EFlags |= ELF::EF_RISCV_FLOAT_ABI_SINGLE;
+ break;
+ case RISCVABI::ABI_ILP32D:
+ case RISCVABI::ABI_LP64D:
+ EFlags |= ELF::EF_RISCV_FLOAT_ABI_DOUBLE;
+ break;
+ case RISCVABI::ABI_ILP32E:
+ EFlags |= ELF::EF_RISCV_RVE;
+ break;
+ case RISCVABI::ABI_Unknown:
+ llvm_unreachable("Improperly initialised target ABI");
+ }
+
+ MCA.setELFHeaderEFlags(EFlags);
+}
+
+MCELFStreamer &RISCVTargetELFStreamer::getStreamer() {
+ return static_cast<MCELFStreamer &>(Streamer);
+}
+
+void RISCVTargetELFStreamer::emitDirectiveOptionPush() {}
+void RISCVTargetELFStreamer::emitDirectiveOptionPop() {}
+void RISCVTargetELFStreamer::emitDirectiveOptionRVC() {}
+void RISCVTargetELFStreamer::emitDirectiveOptionNoRVC() {}
+void RISCVTargetELFStreamer::emitDirectiveOptionRelax() {}
+void RISCVTargetELFStreamer::emitDirectiveOptionNoRelax() {}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.h b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.h
new file mode 100644
index 000000000000..138df786eaf3
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.h
@@ -0,0 +1,30 @@
+//===-- RISCVELFStreamer.h - RISCV ELF Target Streamer ---------*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVELFSTREAMER_H
+#define LLVM_LIB_TARGET_RISCV_RISCVELFSTREAMER_H
+
+#include "RISCVTargetStreamer.h"
+#include "llvm/MC/MCELFStreamer.h"
+
+namespace llvm {
+
+class RISCVTargetELFStreamer : public RISCVTargetStreamer {
+public:
+ MCELFStreamer &getStreamer();
+ RISCVTargetELFStreamer(MCStreamer &S, const MCSubtargetInfo &STI);
+
+ virtual void emitDirectiveOptionPush();
+ virtual void emitDirectiveOptionPop();
+ virtual void emitDirectiveOptionRVC();
+ virtual void emitDirectiveOptionNoRVC();
+ virtual void emitDirectiveOptionRelax();
+ virtual void emitDirectiveOptionNoRelax();
+};
+}
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVFixupKinds.h b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVFixupKinds.h
new file mode 100644
index 000000000000..6c7933340608
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVFixupKinds.h
@@ -0,0 +1,92 @@
+//===-- RISCVFixupKinds.h - RISCV Specific Fixup Entries --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVFIXUPKINDS_H
+#define LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVFIXUPKINDS_H
+
+#include "llvm/MC/MCFixup.h"
+
+#undef RISCV
+
+namespace llvm {
+namespace RISCV {
+enum Fixups {
+ // fixup_riscv_hi20 - 20-bit fixup corresponding to hi(foo) for
+ // instructions like lui
+ fixup_riscv_hi20 = FirstTargetFixupKind,
+ // fixup_riscv_lo12_i - 12-bit fixup corresponding to lo(foo) for
+ // instructions like addi
+ fixup_riscv_lo12_i,
+ // fixup_riscv_lo12_s - 12-bit fixup corresponding to lo(foo) for
+ // the S-type store instructions
+ fixup_riscv_lo12_s,
+ // fixup_riscv_pcrel_hi20 - 20-bit fixup corresponding to pcrel_hi(foo) for
+ // instructions like auipc
+ fixup_riscv_pcrel_hi20,
+ // fixup_riscv_pcrel_lo12_i - 12-bit fixup corresponding to pcrel_lo(foo) for
+ // instructions like addi
+ fixup_riscv_pcrel_lo12_i,
+ // fixup_riscv_pcrel_lo12_s - 12-bit fixup corresponding to pcrel_lo(foo) for
+ // the S-type store instructions
+ fixup_riscv_pcrel_lo12_s,
+ // fixup_riscv_got_hi20 - 20-bit fixup corresponding to got_pcrel_hi(foo) for
+ // instructions like auipc
+ fixup_riscv_got_hi20,
+ // fixup_riscv_tprel_hi20 - 20-bit fixup corresponding to tprel_hi(foo) for
+ // instructions like lui
+ fixup_riscv_tprel_hi20,
+ // fixup_riscv_tprel_lo12_i - 12-bit fixup corresponding to tprel_lo(foo) for
+ // instructions like addi
+ fixup_riscv_tprel_lo12_i,
+ // fixup_riscv_tprel_lo12_s - 12-bit fixup corresponding to tprel_lo(foo) for
+ // the S-type store instructions
+ fixup_riscv_tprel_lo12_s,
+ // fixup_riscv_tprel_add - A fixup corresponding to %tprel_add(foo) for the
+ // add_tls instruction. Used to provide a hint to the linker.
+ fixup_riscv_tprel_add,
+ // fixup_riscv_tls_got_hi20 - 20-bit fixup corresponding to
+ // tls_ie_pcrel_hi(foo) for instructions like auipc
+ fixup_riscv_tls_got_hi20,
+ // fixup_riscv_tls_gd_hi20 - 20-bit fixup corresponding to
+ // tls_gd_pcrel_hi(foo) for instructions like auipc
+ fixup_riscv_tls_gd_hi20,
+ // fixup_riscv_jal - 20-bit fixup for symbol references in the jal
+ // instruction
+ fixup_riscv_jal,
+ // fixup_riscv_branch - 12-bit fixup for symbol references in the branch
+ // instructions
+ fixup_riscv_branch,
+ // fixup_riscv_rvc_jump - 11-bit fixup for symbol references in the
+ // compressed jump instruction
+ fixup_riscv_rvc_jump,
+ // fixup_riscv_rvc_branch - 8-bit fixup for symbol references in the
+ // compressed branch instruction
+ fixup_riscv_rvc_branch,
+ // fixup_riscv_call - A fixup representing a call attached to the auipc
+ // instruction in a pair composed of adjacent auipc+jalr instructions.
+ fixup_riscv_call,
+ // fixup_riscv_call_plt - A fixup representing a procedure linkage table call
+ // attached to the auipc instruction in a pair composed of adjacent auipc+jalr
+ // instructions.
+ fixup_riscv_call_plt,
+ // fixup_riscv_relax - Used to generate an R_RISCV_RELAX relocation type,
+ // which indicates the linker may relax the instruction pair.
+ fixup_riscv_relax,
+ // fixup_riscv_align - Used to generate an R_RISCV_ALIGN relocation type,
+ // which indicates the linker should fixup the alignment after linker
+ // relaxation.
+ fixup_riscv_align,
+
+ // fixup_riscv_invalid - used as a sentinel and a marker, must be last fixup
+ fixup_riscv_invalid,
+ NumTargetFixupKinds = fixup_riscv_invalid - FirstTargetFixupKind
+};
+} // end namespace RISCV
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp
new file mode 100644
index 000000000000..d7452200e05f
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp
@@ -0,0 +1,126 @@
+//===-- RISCVInstPrinter.cpp - Convert RISCV MCInst to asm syntax ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This class prints an RISCV MCInst to a .s file.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVInstPrinter.h"
+#include "MCTargetDesc/RISCVMCExpr.h"
+#include "Utils/RISCVBaseInfo.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormattedStream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "asm-printer"
+
+// Include the auto-generated portion of the assembly writer.
+#define PRINT_ALIAS_INSTR
+#include "RISCVGenAsmWriter.inc"
+
+// Include the auto-generated portion of the compress emitter.
+#define GEN_UNCOMPRESS_INSTR
+#include "RISCVGenCompressInstEmitter.inc"
+
+static cl::opt<bool>
+ NoAliases("riscv-no-aliases",
+ cl::desc("Disable the emission of assembler pseudo instructions"),
+ cl::init(false), cl::Hidden);
+
+void RISCVInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
+ StringRef Annot, const MCSubtargetInfo &STI) {
+ bool Res = false;
+ const MCInst *NewMI = MI;
+ MCInst UncompressedMI;
+ if (!NoAliases)
+ Res = uncompressInst(UncompressedMI, *MI, MRI, STI);
+ if (Res)
+ NewMI = const_cast<MCInst *>(&UncompressedMI);
+ if (NoAliases || !printAliasInstr(NewMI, STI, O))
+ printInstruction(NewMI, STI, O);
+ printAnnotation(O, Annot);
+}
+
+void RISCVInstPrinter::printRegName(raw_ostream &O, unsigned RegNo) const {
+ O << getRegisterName(RegNo);
+}
+
+void RISCVInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI, raw_ostream &O,
+ const char *Modifier) {
+ assert((Modifier == 0 || Modifier[0] == 0) && "No modifiers supported");
+ const MCOperand &MO = MI->getOperand(OpNo);
+
+ if (MO.isReg()) {
+ printRegName(O, MO.getReg());
+ return;
+ }
+
+ if (MO.isImm()) {
+ O << MO.getImm();
+ return;
+ }
+
+ assert(MO.isExpr() && "Unknown operand kind in printOperand");
+ MO.getExpr()->print(O, &MAI);
+}
+
+void RISCVInstPrinter::printCSRSystemRegister(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ unsigned Imm = MI->getOperand(OpNo).getImm();
+ auto SysReg = RISCVSysReg::lookupSysRegByEncoding(Imm);
+ if (SysReg && SysReg->haveRequiredFeatures(STI.getFeatureBits()))
+ O << SysReg->Name;
+ else
+ O << Imm;
+}
+
+void RISCVInstPrinter::printFenceArg(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ unsigned FenceArg = MI->getOperand(OpNo).getImm();
+ assert (((FenceArg >> 4) == 0) && "Invalid immediate in printFenceArg");
+
+ if ((FenceArg & RISCVFenceField::I) != 0)
+ O << 'i';
+ if ((FenceArg & RISCVFenceField::O) != 0)
+ O << 'o';
+ if ((FenceArg & RISCVFenceField::R) != 0)
+ O << 'r';
+ if ((FenceArg & RISCVFenceField::W) != 0)
+ O << 'w';
+ if (FenceArg == 0)
+ O << "unknown";
+}
+
+void RISCVInstPrinter::printFRMArg(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI, raw_ostream &O) {
+ auto FRMArg =
+ static_cast<RISCVFPRndMode::RoundingMode>(MI->getOperand(OpNo).getImm());
+ O << RISCVFPRndMode::roundingModeToString(FRMArg);
+}
+
+void RISCVInstPrinter::printAtomicMemOp(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ const MCOperand &MO = MI->getOperand(OpNo);
+
+ assert(MO.isReg() && "printAtomicMemOp can only print register operands");
+ O << "(";
+ printRegName(O, MO.getReg());
+ O << ")";
+ return;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.h b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.h
new file mode 100644
index 000000000000..908b2de2ad3c
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.h
@@ -0,0 +1,56 @@
+//===-- RISCVInstPrinter.h - Convert RISCV MCInst to asm syntax ---*- C++ -*--//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This class prints a RISCV MCInst to a .s file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVINSTPRINTER_H
+#define LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVINSTPRINTER_H
+
+#include "MCTargetDesc/RISCVMCTargetDesc.h"
+#include "llvm/MC/MCInstPrinter.h"
+
+namespace llvm {
+class MCOperand;
+
+class RISCVInstPrinter : public MCInstPrinter {
+public:
+ RISCVInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI)
+ : MCInstPrinter(MAI, MII, MRI) {}
+
+ void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot,
+ const MCSubtargetInfo &STI) override;
+ void printRegName(raw_ostream &O, unsigned RegNo) const override;
+
+ void printOperand(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
+ raw_ostream &O, const char *Modifier = nullptr);
+ void printCSRSystemRegister(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ void printFenceArg(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ void printFRMArg(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
+ raw_ostream &O);
+ void printAtomicMemOp(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+
+ // Autogenerated by tblgen.
+ void printInstruction(const MCInst *MI, const MCSubtargetInfo &STI,
+ raw_ostream &O);
+ bool printAliasInstr(const MCInst *MI, const MCSubtargetInfo &STI,
+ raw_ostream &O);
+ void printCustomAliasOperand(const MCInst *MI, unsigned OpIdx,
+ unsigned PrintMethodIdx,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ static const char *getRegisterName(unsigned RegNo,
+ unsigned AltIdx = RISCV::ABIRegAltName);
+};
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCAsmInfo.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCAsmInfo.cpp
new file mode 100644
index 000000000000..089a2def4c21
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCAsmInfo.cpp
@@ -0,0 +1,47 @@
+//===-- RISCVMCAsmInfo.cpp - RISCV Asm properties -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declarations of the RISCVMCAsmInfo properties.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVMCAsmInfo.h"
+#include "MCTargetDesc/RISCVMCExpr.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/MC/MCStreamer.h"
+using namespace llvm;
+
+void RISCVMCAsmInfo::anchor() {}
+
+RISCVMCAsmInfo::RISCVMCAsmInfo(const Triple &TT) {
+ CodePointerSize = CalleeSaveStackSlotSize = TT.isArch64Bit() ? 8 : 4;
+ CommentString = "#";
+ AlignmentIsInBytes = false;
+ SupportsDebugInformation = true;
+ ExceptionsType = ExceptionHandling::DwarfCFI;
+ Data16bitsDirective = "\t.half\t";
+ Data32bitsDirective = "\t.word\t";
+}
+
+const MCExpr *RISCVMCAsmInfo::getExprForFDESymbol(const MCSymbol *Sym,
+ unsigned Encoding,
+ MCStreamer &Streamer) const {
+ if (!(Encoding & dwarf::DW_EH_PE_pcrel))
+ return MCAsmInfo::getExprForFDESymbol(Sym, Encoding, Streamer);
+
+ // The default symbol subtraction results in an ADD/SUB relocation pair.
+ // Processing this relocation pair is problematic when linker relaxation is
+ // enabled, so we follow binutils in using the R_RISCV_32_PCREL relocation
+ // for the FDE initial location.
+ MCContext &Ctx = Streamer.getContext();
+ const MCExpr *ME =
+ MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, Ctx);
+ assert(Encoding & dwarf::DW_EH_PE_sdata4 && "Unexpected encoding");
+ return RISCVMCExpr::create(ME, RISCVMCExpr::VK_RISCV_32_PCREL, Ctx);
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCAsmInfo.h b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCAsmInfo.h
new file mode 100644
index 000000000000..6824baf699aa
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCAsmInfo.h
@@ -0,0 +1,33 @@
+//===-- RISCVMCAsmInfo.h - RISCV Asm Info ----------------------*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the RISCVMCAsmInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVMCASMINFO_H
+#define LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVMCASMINFO_H
+
+#include "llvm/MC/MCAsmInfoELF.h"
+
+namespace llvm {
+class Triple;
+
+class RISCVMCAsmInfo : public MCAsmInfoELF {
+ void anchor() override;
+
+public:
+ explicit RISCVMCAsmInfo(const Triple &TargetTriple);
+
+ const MCExpr *getExprForFDESymbol(const MCSymbol *Sym, unsigned Encoding,
+ MCStreamer &Streamer) const override;
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp
new file mode 100644
index 000000000000..6fd8552eeaaa
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp
@@ -0,0 +1,370 @@
+//===-- RISCVMCCodeEmitter.cpp - Convert RISCV code to machine code -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the RISCVMCCodeEmitter class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/RISCVFixupKinds.h"
+#include "MCTargetDesc/RISCVMCExpr.h"
+#include "MCTargetDesc/RISCVMCTargetDesc.h"
+#include "Utils/RISCVBaseInfo.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstBuilder.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "mccodeemitter"
+
+STATISTIC(MCNumEmitted, "Number of MC instructions emitted");
+STATISTIC(MCNumFixups, "Number of MC fixups created");
+
+namespace {
+class RISCVMCCodeEmitter : public MCCodeEmitter {
+ RISCVMCCodeEmitter(const RISCVMCCodeEmitter &) = delete;
+ void operator=(const RISCVMCCodeEmitter &) = delete;
+ MCContext &Ctx;
+ MCInstrInfo const &MCII;
+
+public:
+ RISCVMCCodeEmitter(MCContext &ctx, MCInstrInfo const &MCII)
+ : Ctx(ctx), MCII(MCII) {}
+
+ ~RISCVMCCodeEmitter() override {}
+
+ void encodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const override;
+
+ void expandFunctionCall(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ void expandAddTPRel(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// TableGen'erated function for getting the binary encoding for an
+ /// instruction.
+ uint64_t getBinaryCodeForInstr(const MCInst &MI,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// Return binary encoding of operand. If the machine operand requires
+ /// relocation, record the relocation and return zero.
+ unsigned getMachineOpValue(const MCInst &MI, const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ unsigned getImmOpValueAsr1(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ unsigned getImmOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+};
+} // end anonymous namespace
+
+MCCodeEmitter *llvm::createRISCVMCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
+ MCContext &Ctx) {
+ return new RISCVMCCodeEmitter(Ctx, MCII);
+}
+
+// Expand PseudoCALL(Reg) and PseudoTAIL to AUIPC and JALR with relocation
+// types. We expand PseudoCALL(Reg) and PseudoTAIL while encoding, meaning AUIPC
+// and JALR won't go through RISCV MC to MC compressed instruction
+// transformation. This is acceptable because AUIPC has no 16-bit form and
+// C_JALR have no immediate operand field. We let linker relaxation deal with
+// it. When linker relaxation enabled, AUIPC and JALR have chance relax to JAL.
+// If C extension is enabled, JAL has chance relax to C_JAL.
+void RISCVMCCodeEmitter::expandFunctionCall(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ MCInst TmpInst;
+ MCOperand Func;
+ unsigned Ra;
+ if (MI.getOpcode() == RISCV::PseudoTAIL) {
+ Func = MI.getOperand(0);
+ Ra = RISCV::X6;
+ } else if (MI.getOpcode() == RISCV::PseudoCALLReg) {
+ Func = MI.getOperand(1);
+ Ra = MI.getOperand(0).getReg();
+ } else {
+ Func = MI.getOperand(0);
+ Ra = RISCV::X1;
+ }
+ uint32_t Binary;
+
+ assert(Func.isExpr() && "Expected expression");
+
+ const MCExpr *CallExpr = Func.getExpr();
+
+ // Emit AUIPC Ra, Func with R_RISCV_CALL relocation type.
+ TmpInst = MCInstBuilder(RISCV::AUIPC)
+ .addReg(Ra)
+ .addOperand(MCOperand::createExpr(CallExpr));
+ Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI);
+ support::endian::write(OS, Binary, support::little);
+
+ if (MI.getOpcode() == RISCV::PseudoTAIL)
+ // Emit JALR X0, X6, 0
+ TmpInst = MCInstBuilder(RISCV::JALR).addReg(RISCV::X0).addReg(Ra).addImm(0);
+ else
+ // Emit JALR Ra, Ra, 0
+ TmpInst = MCInstBuilder(RISCV::JALR).addReg(Ra).addReg(Ra).addImm(0);
+ Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI);
+ support::endian::write(OS, Binary, support::little);
+}
+
+// Expand PseudoAddTPRel to a simple ADD with the correct relocation.
+void RISCVMCCodeEmitter::expandAddTPRel(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ MCOperand DestReg = MI.getOperand(0);
+ MCOperand SrcReg = MI.getOperand(1);
+ MCOperand TPReg = MI.getOperand(2);
+ assert(TPReg.isReg() && TPReg.getReg() == RISCV::X4 &&
+ "Expected thread pointer as second input to TP-relative add");
+
+ MCOperand SrcSymbol = MI.getOperand(3);
+ assert(SrcSymbol.isExpr() &&
+ "Expected expression as third input to TP-relative add");
+
+ const RISCVMCExpr *Expr = dyn_cast<RISCVMCExpr>(SrcSymbol.getExpr());
+ assert(Expr && Expr->getKind() == RISCVMCExpr::VK_RISCV_TPREL_ADD &&
+ "Expected tprel_add relocation on TP-relative symbol");
+
+ // Emit the correct tprel_add relocation for the symbol.
+ Fixups.push_back(MCFixup::create(
+ 0, Expr, MCFixupKind(RISCV::fixup_riscv_tprel_add), MI.getLoc()));
+
+ // Emit fixup_riscv_relax for tprel_add where the relax feature is enabled.
+ if (STI.getFeatureBits()[RISCV::FeatureRelax]) {
+ const MCConstantExpr *Dummy = MCConstantExpr::create(0, Ctx);
+ Fixups.push_back(MCFixup::create(
+ 0, Dummy, MCFixupKind(RISCV::fixup_riscv_relax), MI.getLoc()));
+ }
+
+ // Emit a normal ADD instruction with the given operands.
+ MCInst TmpInst = MCInstBuilder(RISCV::ADD)
+ .addOperand(DestReg)
+ .addOperand(SrcReg)
+ .addOperand(TPReg);
+ uint32_t Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI);
+ support::endian::write(OS, Binary, support::little);
+}
+
+void RISCVMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
+ // Get byte count of instruction.
+ unsigned Size = Desc.getSize();
+
+ if (MI.getOpcode() == RISCV::PseudoCALLReg ||
+ MI.getOpcode() == RISCV::PseudoCALL ||
+ MI.getOpcode() == RISCV::PseudoTAIL) {
+ expandFunctionCall(MI, OS, Fixups, STI);
+ MCNumEmitted += 2;
+ return;
+ }
+
+ if (MI.getOpcode() == RISCV::PseudoAddTPRel) {
+ expandAddTPRel(MI, OS, Fixups, STI);
+ MCNumEmitted += 1;
+ return;
+ }
+
+ switch (Size) {
+ default:
+ llvm_unreachable("Unhandled encodeInstruction length!");
+ case 2: {
+ uint16_t Bits = getBinaryCodeForInstr(MI, Fixups, STI);
+ support::endian::write<uint16_t>(OS, Bits, support::little);
+ break;
+ }
+ case 4: {
+ uint32_t Bits = getBinaryCodeForInstr(MI, Fixups, STI);
+ support::endian::write(OS, Bits, support::little);
+ break;
+ }
+ }
+
+ ++MCNumEmitted; // Keep track of the # of mi's emitted.
+}
+
+unsigned
+RISCVMCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+
+ if (MO.isReg())
+ return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
+
+ if (MO.isImm())
+ return static_cast<unsigned>(MO.getImm());
+
+ llvm_unreachable("Unhandled expression!");
+ return 0;
+}
+
+unsigned
+RISCVMCCodeEmitter::getImmOpValueAsr1(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpNo);
+
+ if (MO.isImm()) {
+ unsigned Res = MO.getImm();
+ assert((Res & 1) == 0 && "LSB is non-zero");
+ return Res >> 1;
+ }
+
+ return getImmOpValue(MI, OpNo, Fixups, STI);
+}
+
+unsigned RISCVMCCodeEmitter::getImmOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ bool EnableRelax = STI.getFeatureBits()[RISCV::FeatureRelax];
+ const MCOperand &MO = MI.getOperand(OpNo);
+
+ MCInstrDesc const &Desc = MCII.get(MI.getOpcode());
+ unsigned MIFrm = Desc.TSFlags & RISCVII::InstFormatMask;
+
+ // If the destination is an immediate, there is nothing to do.
+ if (MO.isImm())
+ return MO.getImm();
+
+ assert(MO.isExpr() &&
+ "getImmOpValue expects only expressions or immediates");
+ const MCExpr *Expr = MO.getExpr();
+ MCExpr::ExprKind Kind = Expr->getKind();
+ RISCV::Fixups FixupKind = RISCV::fixup_riscv_invalid;
+ bool RelaxCandidate = false;
+ if (Kind == MCExpr::Target) {
+ const RISCVMCExpr *RVExpr = cast<RISCVMCExpr>(Expr);
+
+ switch (RVExpr->getKind()) {
+ case RISCVMCExpr::VK_RISCV_None:
+ case RISCVMCExpr::VK_RISCV_Invalid:
+ case RISCVMCExpr::VK_RISCV_32_PCREL:
+ llvm_unreachable("Unhandled fixup kind!");
+ case RISCVMCExpr::VK_RISCV_TPREL_ADD:
+ // tprel_add is only used to indicate that a relocation should be emitted
+ // for an add instruction used in TP-relative addressing. It should not be
+ // expanded as if representing an actual instruction operand and so to
+ // encounter it here is an error.
+ llvm_unreachable(
+ "VK_RISCV_TPREL_ADD should not represent an instruction operand");
+ case RISCVMCExpr::VK_RISCV_LO:
+ if (MIFrm == RISCVII::InstFormatI)
+ FixupKind = RISCV::fixup_riscv_lo12_i;
+ else if (MIFrm == RISCVII::InstFormatS)
+ FixupKind = RISCV::fixup_riscv_lo12_s;
+ else
+ llvm_unreachable("VK_RISCV_LO used with unexpected instruction format");
+ RelaxCandidate = true;
+ break;
+ case RISCVMCExpr::VK_RISCV_HI:
+ FixupKind = RISCV::fixup_riscv_hi20;
+ RelaxCandidate = true;
+ break;
+ case RISCVMCExpr::VK_RISCV_PCREL_LO:
+ if (MIFrm == RISCVII::InstFormatI)
+ FixupKind = RISCV::fixup_riscv_pcrel_lo12_i;
+ else if (MIFrm == RISCVII::InstFormatS)
+ FixupKind = RISCV::fixup_riscv_pcrel_lo12_s;
+ else
+ llvm_unreachable(
+ "VK_RISCV_PCREL_LO used with unexpected instruction format");
+ RelaxCandidate = true;
+ break;
+ case RISCVMCExpr::VK_RISCV_PCREL_HI:
+ FixupKind = RISCV::fixup_riscv_pcrel_hi20;
+ RelaxCandidate = true;
+ break;
+ case RISCVMCExpr::VK_RISCV_GOT_HI:
+ FixupKind = RISCV::fixup_riscv_got_hi20;
+ break;
+ case RISCVMCExpr::VK_RISCV_TPREL_LO:
+ if (MIFrm == RISCVII::InstFormatI)
+ FixupKind = RISCV::fixup_riscv_tprel_lo12_i;
+ else if (MIFrm == RISCVII::InstFormatS)
+ FixupKind = RISCV::fixup_riscv_tprel_lo12_s;
+ else
+ llvm_unreachable(
+ "VK_RISCV_TPREL_LO used with unexpected instruction format");
+ RelaxCandidate = true;
+ break;
+ case RISCVMCExpr::VK_RISCV_TPREL_HI:
+ FixupKind = RISCV::fixup_riscv_tprel_hi20;
+ RelaxCandidate = true;
+ break;
+ case RISCVMCExpr::VK_RISCV_TLS_GOT_HI:
+ FixupKind = RISCV::fixup_riscv_tls_got_hi20;
+ break;
+ case RISCVMCExpr::VK_RISCV_TLS_GD_HI:
+ FixupKind = RISCV::fixup_riscv_tls_gd_hi20;
+ break;
+ case RISCVMCExpr::VK_RISCV_CALL:
+ FixupKind = RISCV::fixup_riscv_call;
+ RelaxCandidate = true;
+ break;
+ case RISCVMCExpr::VK_RISCV_CALL_PLT:
+ FixupKind = RISCV::fixup_riscv_call_plt;
+ RelaxCandidate = true;
+ break;
+ }
+ } else if (Kind == MCExpr::SymbolRef &&
+ cast<MCSymbolRefExpr>(Expr)->getKind() == MCSymbolRefExpr::VK_None) {
+ if (Desc.getOpcode() == RISCV::JAL) {
+ FixupKind = RISCV::fixup_riscv_jal;
+ } else if (MIFrm == RISCVII::InstFormatB) {
+ FixupKind = RISCV::fixup_riscv_branch;
+ } else if (MIFrm == RISCVII::InstFormatCJ) {
+ FixupKind = RISCV::fixup_riscv_rvc_jump;
+ } else if (MIFrm == RISCVII::InstFormatCB) {
+ FixupKind = RISCV::fixup_riscv_rvc_branch;
+ }
+ }
+
+ assert(FixupKind != RISCV::fixup_riscv_invalid && "Unhandled expression!");
+
+ Fixups.push_back(
+ MCFixup::create(0, Expr, MCFixupKind(FixupKind), MI.getLoc()));
+ ++MCNumFixups;
+
+ // Ensure an R_RISCV_RELAX relocation will be emitted if linker relaxation is
+ // enabled and the current fixup will result in a relocation that may be
+ // relaxed.
+ if (EnableRelax && RelaxCandidate) {
+ const MCConstantExpr *Dummy = MCConstantExpr::create(0, Ctx);
+ Fixups.push_back(
+ MCFixup::create(0, Dummy, MCFixupKind(RISCV::fixup_riscv_relax),
+ MI.getLoc()));
+ ++MCNumFixups;
+ }
+
+ return 0;
+}
+
+#include "RISCVGenMCCodeEmitter.inc"
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp
new file mode 100644
index 000000000000..ae25ec818171
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp
@@ -0,0 +1,298 @@
+//===-- RISCVMCExpr.cpp - RISCV specific MC expression classes ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the implementation of the assembly expression modifiers
+// accepted by the RISCV architecture (e.g. ":lo12:", ":gottprel_g1:", ...).
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVMCExpr.h"
+#include "MCTargetDesc/RISCVAsmBackend.h"
+#include "RISCV.h"
+#include "RISCVFixupKinds.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/MC/MCAsmLayout.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSymbolELF.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "riscvmcexpr"
+
+const RISCVMCExpr *RISCVMCExpr::create(const MCExpr *Expr, VariantKind Kind,
+ MCContext &Ctx) {
+ return new (Ctx) RISCVMCExpr(Expr, Kind);
+}
+
+void RISCVMCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const {
+ VariantKind Kind = getKind();
+ bool HasVariant = ((Kind != VK_RISCV_None) && (Kind != VK_RISCV_CALL) &&
+ (Kind != VK_RISCV_CALL_PLT));
+
+ if (HasVariant)
+ OS << '%' << getVariantKindName(getKind()) << '(';
+ Expr->print(OS, MAI);
+ if (Kind == VK_RISCV_CALL_PLT)
+ OS << "@plt";
+ if (HasVariant)
+ OS << ')';
+}
+
+const MCFixup *RISCVMCExpr::getPCRelHiFixup() const {
+ MCValue AUIPCLoc;
+ if (!getSubExpr()->evaluateAsRelocatable(AUIPCLoc, nullptr, nullptr))
+ return nullptr;
+
+ const MCSymbolRefExpr *AUIPCSRE = AUIPCLoc.getSymA();
+ if (!AUIPCSRE)
+ return nullptr;
+
+ const MCSymbol *AUIPCSymbol = &AUIPCSRE->getSymbol();
+ const auto *DF = dyn_cast_or_null<MCDataFragment>(AUIPCSymbol->getFragment());
+
+ if (!DF)
+ return nullptr;
+
+ uint64_t Offset = AUIPCSymbol->getOffset();
+ if (DF->getContents().size() == Offset) {
+ DF = dyn_cast_or_null<MCDataFragment>(DF->getNextNode());
+ if (!DF)
+ return nullptr;
+ Offset = 0;
+ }
+
+ for (const MCFixup &F : DF->getFixups()) {
+ if (F.getOffset() != Offset)
+ continue;
+
+ switch ((unsigned)F.getKind()) {
+ default:
+ continue;
+ case RISCV::fixup_riscv_got_hi20:
+ case RISCV::fixup_riscv_tls_got_hi20:
+ case RISCV::fixup_riscv_tls_gd_hi20:
+ case RISCV::fixup_riscv_pcrel_hi20:
+ return &F;
+ }
+ }
+
+ return nullptr;
+}
+
+bool RISCVMCExpr::evaluatePCRelLo(MCValue &Res, const MCAsmLayout *Layout,
+ const MCFixup *Fixup) const {
+ // VK_RISCV_PCREL_LO has to be handled specially. The MCExpr inside is
+ // actually the location of a auipc instruction with a VK_RISCV_PCREL_HI fixup
+ // pointing to the real target. We need to generate an MCValue in the form of
+ // (<real target> + <offset from this fixup to the auipc fixup>). The Fixup
+ // is pcrel relative to the VK_RISCV_PCREL_LO fixup, so we need to add the
+ // offset to the VK_RISCV_PCREL_HI Fixup from VK_RISCV_PCREL_LO to correct.
+
+ // Don't try to evaluate if the fixup will be forced as a relocation (e.g.
+ // as linker relaxation is enabled). If we evaluated pcrel_lo in this case,
+ // the modified fixup will be converted into a relocation that no longer
+ // points to the pcrel_hi as the linker requires.
+ auto &RAB =
+ static_cast<RISCVAsmBackend &>(Layout->getAssembler().getBackend());
+ if (RAB.willForceRelocations())
+ return false;
+
+ MCValue AUIPCLoc;
+ if (!getSubExpr()->evaluateAsValue(AUIPCLoc, *Layout))
+ return false;
+
+ const MCSymbolRefExpr *AUIPCSRE = AUIPCLoc.getSymA();
+ // Don't try to evaluate %pcrel_hi/%pcrel_lo pairs that cross fragment
+ // boundries.
+ if (!AUIPCSRE ||
+ findAssociatedFragment() != AUIPCSRE->findAssociatedFragment())
+ return false;
+
+ const MCSymbol *AUIPCSymbol = &AUIPCSRE->getSymbol();
+ if (!AUIPCSymbol)
+ return false;
+
+ const MCFixup *TargetFixup = getPCRelHiFixup();
+ if (!TargetFixup)
+ return false;
+
+ if ((unsigned)TargetFixup->getKind() != RISCV::fixup_riscv_pcrel_hi20)
+ return false;
+
+ MCValue Target;
+ if (!TargetFixup->getValue()->evaluateAsValue(Target, *Layout))
+ return false;
+
+ if (!Target.getSymA() || !Target.getSymA()->getSymbol().isInSection())
+ return false;
+
+ if (&Target.getSymA()->getSymbol().getSection() !=
+ findAssociatedFragment()->getParent())
+ return false;
+
+ uint64_t AUIPCOffset = AUIPCSymbol->getOffset();
+
+ Res = MCValue::get(Target.getSymA(), nullptr,
+ Target.getConstant() + (Fixup->getOffset() - AUIPCOffset));
+ return true;
+}
+
+bool RISCVMCExpr::evaluateAsRelocatableImpl(MCValue &Res,
+ const MCAsmLayout *Layout,
+ const MCFixup *Fixup) const {
+ if (Kind == VK_RISCV_PCREL_LO && evaluatePCRelLo(Res, Layout, Fixup))
+ return true;
+
+ if (!getSubExpr()->evaluateAsRelocatable(Res, Layout, Fixup))
+ return false;
+
+ // Some custom fixup types are not valid with symbol difference expressions
+ if (Res.getSymA() && Res.getSymB()) {
+ switch (getKind()) {
+ default:
+ return true;
+ case VK_RISCV_LO:
+ case VK_RISCV_HI:
+ case VK_RISCV_PCREL_LO:
+ case VK_RISCV_PCREL_HI:
+ case VK_RISCV_GOT_HI:
+ case VK_RISCV_TPREL_LO:
+ case VK_RISCV_TPREL_HI:
+ case VK_RISCV_TPREL_ADD:
+ case VK_RISCV_TLS_GOT_HI:
+ case VK_RISCV_TLS_GD_HI:
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void RISCVMCExpr::visitUsedExpr(MCStreamer &Streamer) const {
+ Streamer.visitUsedExpr(*getSubExpr());
+}
+
+RISCVMCExpr::VariantKind RISCVMCExpr::getVariantKindForName(StringRef name) {
+ return StringSwitch<RISCVMCExpr::VariantKind>(name)
+ .Case("lo", VK_RISCV_LO)
+ .Case("hi", VK_RISCV_HI)
+ .Case("pcrel_lo", VK_RISCV_PCREL_LO)
+ .Case("pcrel_hi", VK_RISCV_PCREL_HI)
+ .Case("got_pcrel_hi", VK_RISCV_GOT_HI)
+ .Case("tprel_lo", VK_RISCV_TPREL_LO)
+ .Case("tprel_hi", VK_RISCV_TPREL_HI)
+ .Case("tprel_add", VK_RISCV_TPREL_ADD)
+ .Case("tls_ie_pcrel_hi", VK_RISCV_TLS_GOT_HI)
+ .Case("tls_gd_pcrel_hi", VK_RISCV_TLS_GD_HI)
+ .Default(VK_RISCV_Invalid);
+}
+
+StringRef RISCVMCExpr::getVariantKindName(VariantKind Kind) {
+ switch (Kind) {
+ default:
+ llvm_unreachable("Invalid ELF symbol kind");
+ case VK_RISCV_LO:
+ return "lo";
+ case VK_RISCV_HI:
+ return "hi";
+ case VK_RISCV_PCREL_LO:
+ return "pcrel_lo";
+ case VK_RISCV_PCREL_HI:
+ return "pcrel_hi";
+ case VK_RISCV_GOT_HI:
+ return "got_pcrel_hi";
+ case VK_RISCV_TPREL_LO:
+ return "tprel_lo";
+ case VK_RISCV_TPREL_HI:
+ return "tprel_hi";
+ case VK_RISCV_TPREL_ADD:
+ return "tprel_add";
+ case VK_RISCV_TLS_GOT_HI:
+ return "tls_ie_pcrel_hi";
+ case VK_RISCV_TLS_GD_HI:
+ return "tls_gd_pcrel_hi";
+ }
+}
+
+static void fixELFSymbolsInTLSFixupsImpl(const MCExpr *Expr, MCAssembler &Asm) {
+ switch (Expr->getKind()) {
+ case MCExpr::Target:
+ llvm_unreachable("Can't handle nested target expression");
+ break;
+ case MCExpr::Constant:
+ break;
+
+ case MCExpr::Binary: {
+ const MCBinaryExpr *BE = cast<MCBinaryExpr>(Expr);
+ fixELFSymbolsInTLSFixupsImpl(BE->getLHS(), Asm);
+ fixELFSymbolsInTLSFixupsImpl(BE->getRHS(), Asm);
+ break;
+ }
+
+ case MCExpr::SymbolRef: {
+ // We're known to be under a TLS fixup, so any symbol should be
+ // modified. There should be only one.
+ const MCSymbolRefExpr &SymRef = *cast<MCSymbolRefExpr>(Expr);
+ cast<MCSymbolELF>(SymRef.getSymbol()).setType(ELF::STT_TLS);
+ break;
+ }
+
+ case MCExpr::Unary:
+ fixELFSymbolsInTLSFixupsImpl(cast<MCUnaryExpr>(Expr)->getSubExpr(), Asm);
+ break;
+ }
+}
+
+void RISCVMCExpr::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const {
+ switch (getKind()) {
+ default:
+ return;
+ case VK_RISCV_TPREL_HI:
+ case VK_RISCV_TLS_GOT_HI:
+ case VK_RISCV_TLS_GD_HI:
+ break;
+ }
+
+ fixELFSymbolsInTLSFixupsImpl(getSubExpr(), Asm);
+}
+
+bool RISCVMCExpr::evaluateAsConstant(int64_t &Res) const {
+ MCValue Value;
+
+ if (Kind == VK_RISCV_PCREL_HI || Kind == VK_RISCV_PCREL_LO ||
+ Kind == VK_RISCV_GOT_HI || Kind == VK_RISCV_TPREL_HI ||
+ Kind == VK_RISCV_TPREL_LO || Kind == VK_RISCV_TPREL_ADD ||
+ Kind == VK_RISCV_TLS_GOT_HI || Kind == VK_RISCV_TLS_GD_HI ||
+ Kind == VK_RISCV_CALL || Kind == VK_RISCV_CALL_PLT)
+ return false;
+
+ if (!getSubExpr()->evaluateAsRelocatable(Value, nullptr, nullptr))
+ return false;
+
+ if (!Value.isAbsolute())
+ return false;
+
+ Res = evaluateAsInt64(Value.getConstant());
+ return true;
+}
+
+int64_t RISCVMCExpr::evaluateAsInt64(int64_t Value) const {
+ switch (Kind) {
+ default:
+ llvm_unreachable("Invalid kind");
+ case VK_RISCV_LO:
+ return SignExtend64<12>(Value);
+ case VK_RISCV_HI:
+ // Add 1 if bit 11 is 1, to compensate for low 12 bits being negative.
+ return ((Value + 0x800) >> 12) & 0xfffff;
+ }
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.h b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.h
new file mode 100644
index 000000000000..921df376f3df
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.h
@@ -0,0 +1,94 @@
+//===-- RISCVMCExpr.h - RISCV specific MC expression classes ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes RISCV-specific MCExprs, used for modifiers like
+// "%hi" or "%lo" etc.,
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVMCEXPR_H
+#define LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVMCEXPR_H
+
+#include "llvm/MC/MCExpr.h"
+
+namespace llvm {
+
+class StringRef;
+class MCOperand;
+class RISCVMCExpr : public MCTargetExpr {
+public:
+ enum VariantKind {
+ VK_RISCV_None,
+ VK_RISCV_LO,
+ VK_RISCV_HI,
+ VK_RISCV_PCREL_LO,
+ VK_RISCV_PCREL_HI,
+ VK_RISCV_GOT_HI,
+ VK_RISCV_TPREL_LO,
+ VK_RISCV_TPREL_HI,
+ VK_RISCV_TPREL_ADD,
+ VK_RISCV_TLS_GOT_HI,
+ VK_RISCV_TLS_GD_HI,
+ VK_RISCV_CALL,
+ VK_RISCV_CALL_PLT,
+ VK_RISCV_32_PCREL,
+ VK_RISCV_Invalid
+ };
+
+private:
+ const MCExpr *Expr;
+ const VariantKind Kind;
+
+ int64_t evaluateAsInt64(int64_t Value) const;
+
+ bool evaluatePCRelLo(MCValue &Res, const MCAsmLayout *Layout,
+ const MCFixup *Fixup) const;
+
+ explicit RISCVMCExpr(const MCExpr *Expr, VariantKind Kind)
+ : Expr(Expr), Kind(Kind) {}
+
+public:
+ static const RISCVMCExpr *create(const MCExpr *Expr, VariantKind Kind,
+ MCContext &Ctx);
+
+ VariantKind getKind() const { return Kind; }
+
+ const MCExpr *getSubExpr() const { return Expr; }
+
+ /// Get the corresponding PC-relative HI fixup that a VK_RISCV_PCREL_LO
+ /// points to.
+ ///
+ /// \returns nullptr if this isn't a VK_RISCV_PCREL_LO pointing to a
+ /// known PC-relative HI fixup.
+ const MCFixup *getPCRelHiFixup() const;
+
+ void printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const override;
+ bool evaluateAsRelocatableImpl(MCValue &Res, const MCAsmLayout *Layout,
+ const MCFixup *Fixup) const override;
+ void visitUsedExpr(MCStreamer &Streamer) const override;
+ MCFragment *findAssociatedFragment() const override {
+ return getSubExpr()->findAssociatedFragment();
+ }
+
+ void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const override;
+
+ bool evaluateAsConstant(int64_t &Res) const;
+
+ static bool classof(const MCExpr *E) {
+ return E->getKind() == MCExpr::Target;
+ }
+
+ static bool classof(const RISCVMCExpr *) { return true; }
+
+ static VariantKind getVariantKindForName(StringRef name);
+ static StringRef getVariantKindName(VariantKind Kind);
+};
+
+} // end namespace llvm.
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCTargetDesc.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCTargetDesc.cpp
new file mode 100644
index 000000000000..bc45262ab2de
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCTargetDesc.cpp
@@ -0,0 +1,108 @@
+//===-- RISCVMCTargetDesc.cpp - RISCV Target Descriptions -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// This file provides RISCV-specific target descriptions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "RISCVMCTargetDesc.h"
+#include "RISCVELFStreamer.h"
+#include "RISCVInstPrinter.h"
+#include "RISCVMCAsmInfo.h"
+#include "RISCVTargetStreamer.h"
+#include "TargetInfo/RISCVTargetInfo.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetRegistry.h"
+
+#define GET_INSTRINFO_MC_DESC
+#include "RISCVGenInstrInfo.inc"
+
+#define GET_REGINFO_MC_DESC
+#include "RISCVGenRegisterInfo.inc"
+
+#define GET_SUBTARGETINFO_MC_DESC
+#include "RISCVGenSubtargetInfo.inc"
+
+using namespace llvm;
+
+static MCInstrInfo *createRISCVMCInstrInfo() {
+ MCInstrInfo *X = new MCInstrInfo();
+ InitRISCVMCInstrInfo(X);
+ return X;
+}
+
+static MCRegisterInfo *createRISCVMCRegisterInfo(const Triple &TT) {
+ MCRegisterInfo *X = new MCRegisterInfo();
+ InitRISCVMCRegisterInfo(X, RISCV::X1);
+ return X;
+}
+
+static MCAsmInfo *createRISCVMCAsmInfo(const MCRegisterInfo &MRI,
+ const Triple &TT) {
+ MCAsmInfo *MAI = new RISCVMCAsmInfo(TT);
+
+ unsigned SP = MRI.getDwarfRegNum(RISCV::X2, true);
+ MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(nullptr, SP, 0);
+ MAI->addInitialFrameState(Inst);
+
+ return MAI;
+}
+
+static MCSubtargetInfo *createRISCVMCSubtargetInfo(const Triple &TT,
+ StringRef CPU, StringRef FS) {
+ std::string CPUName = CPU;
+ if (CPUName.empty())
+ CPUName = TT.isArch64Bit() ? "generic-rv64" : "generic-rv32";
+ return createRISCVMCSubtargetInfoImpl(TT, CPUName, FS);
+}
+
+static MCInstPrinter *createRISCVMCInstPrinter(const Triple &T,
+ unsigned SyntaxVariant,
+ const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI) {
+ return new RISCVInstPrinter(MAI, MII, MRI);
+}
+
+static MCTargetStreamer *
+createRISCVObjectTargetStreamer(MCStreamer &S, const MCSubtargetInfo &STI) {
+ const Triple &TT = STI.getTargetTriple();
+ if (TT.isOSBinFormatELF())
+ return new RISCVTargetELFStreamer(S, STI);
+ return nullptr;
+}
+
+static MCTargetStreamer *createRISCVAsmTargetStreamer(MCStreamer &S,
+ formatted_raw_ostream &OS,
+ MCInstPrinter *InstPrint,
+ bool isVerboseAsm) {
+ return new RISCVTargetAsmStreamer(S, OS);
+}
+
+extern "C" void LLVMInitializeRISCVTargetMC() {
+ for (Target *T : {&getTheRISCV32Target(), &getTheRISCV64Target()}) {
+ TargetRegistry::RegisterMCAsmInfo(*T, createRISCVMCAsmInfo);
+ TargetRegistry::RegisterMCInstrInfo(*T, createRISCVMCInstrInfo);
+ TargetRegistry::RegisterMCRegInfo(*T, createRISCVMCRegisterInfo);
+ TargetRegistry::RegisterMCAsmBackend(*T, createRISCVAsmBackend);
+ TargetRegistry::RegisterMCCodeEmitter(*T, createRISCVMCCodeEmitter);
+ TargetRegistry::RegisterMCInstPrinter(*T, createRISCVMCInstPrinter);
+ TargetRegistry::RegisterMCSubtargetInfo(*T, createRISCVMCSubtargetInfo);
+ TargetRegistry::RegisterObjectTargetStreamer(
+ *T, createRISCVObjectTargetStreamer);
+
+ // Register the asm target streamer.
+ TargetRegistry::RegisterAsmTargetStreamer(*T, createRISCVAsmTargetStreamer);
+ }
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCTargetDesc.h b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCTargetDesc.h
new file mode 100644
index 000000000000..b30997533ddf
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCTargetDesc.h
@@ -0,0 +1,58 @@
+//===-- RISCVMCTargetDesc.h - RISCV Target Descriptions ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides RISCV specific target descriptions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVMCTARGETDESC_H
+#define LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVMCTARGETDESC_H
+
+#include "llvm/Config/config.h"
+#include "llvm/MC/MCTargetOptions.h"
+#include "llvm/Support/DataTypes.h"
+#include <memory>
+
+namespace llvm {
+class MCAsmBackend;
+class MCCodeEmitter;
+class MCContext;
+class MCInstrInfo;
+class MCObjectTargetWriter;
+class MCRegisterInfo;
+class MCSubtargetInfo;
+class StringRef;
+class Target;
+class Triple;
+class raw_ostream;
+class raw_pwrite_stream;
+
+MCCodeEmitter *createRISCVMCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
+ MCContext &Ctx);
+
+MCAsmBackend *createRISCVAsmBackend(const Target &T, const MCSubtargetInfo &STI,
+ const MCRegisterInfo &MRI,
+ const MCTargetOptions &Options);
+
+std::unique_ptr<MCObjectTargetWriter> createRISCVELFObjectWriter(uint8_t OSABI,
+ bool Is64Bit);
+}
+
+// Defines symbolic names for RISC-V registers.
+#define GET_REGINFO_ENUM
+#include "RISCVGenRegisterInfo.inc"
+
+// Defines symbolic names for RISC-V instructions.
+#define GET_INSTRINFO_ENUM
+#include "RISCVGenInstrInfo.inc"
+
+#define GET_SUBTARGETINFO_ENUM
+#include "RISCVGenSubtargetInfo.inc"
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp
new file mode 100644
index 000000000000..913e1f744192
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp
@@ -0,0 +1,47 @@
+//===-- RISCVTargetStreamer.cpp - RISCV Target Streamer Methods -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides RISCV specific target streamer methods.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVTargetStreamer.h"
+#include "llvm/Support/FormattedStream.h"
+
+using namespace llvm;
+
+RISCVTargetStreamer::RISCVTargetStreamer(MCStreamer &S) : MCTargetStreamer(S) {}
+
+// This part is for ascii assembly output
+RISCVTargetAsmStreamer::RISCVTargetAsmStreamer(MCStreamer &S,
+ formatted_raw_ostream &OS)
+ : RISCVTargetStreamer(S), OS(OS) {}
+
+void RISCVTargetAsmStreamer::emitDirectiveOptionPush() {
+ OS << "\t.option\tpush\n";
+}
+
+void RISCVTargetAsmStreamer::emitDirectiveOptionPop() {
+ OS << "\t.option\tpop\n";
+}
+
+void RISCVTargetAsmStreamer::emitDirectiveOptionRVC() {
+ OS << "\t.option\trvc\n";
+}
+
+void RISCVTargetAsmStreamer::emitDirectiveOptionNoRVC() {
+ OS << "\t.option\tnorvc\n";
+}
+
+void RISCVTargetAsmStreamer::emitDirectiveOptionRelax() {
+ OS << "\t.option\trelax\n";
+}
+
+void RISCVTargetAsmStreamer::emitDirectiveOptionNoRelax() {
+ OS << "\t.option\tnorelax\n";
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.h b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.h
new file mode 100644
index 000000000000..1becc134b2a2
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.h
@@ -0,0 +1,44 @@
+//===-- RISCVTargetStreamer.h - RISCV Target Streamer ----------*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVTARGETSTREAMER_H
+#define LLVM_LIB_TARGET_RISCV_RISCVTARGETSTREAMER_H
+
+#include "llvm/MC/MCStreamer.h"
+
+namespace llvm {
+
+class RISCVTargetStreamer : public MCTargetStreamer {
+public:
+ RISCVTargetStreamer(MCStreamer &S);
+
+ virtual void emitDirectiveOptionPush() = 0;
+ virtual void emitDirectiveOptionPop() = 0;
+ virtual void emitDirectiveOptionRVC() = 0;
+ virtual void emitDirectiveOptionNoRVC() = 0;
+ virtual void emitDirectiveOptionRelax() = 0;
+ virtual void emitDirectiveOptionNoRelax() = 0;
+};
+
+// This part is for ascii assembly output
+class RISCVTargetAsmStreamer : public RISCVTargetStreamer {
+ formatted_raw_ostream &OS;
+
+public:
+ RISCVTargetAsmStreamer(MCStreamer &S, formatted_raw_ostream &OS);
+
+ void emitDirectiveOptionPush() override;
+ void emitDirectiveOptionPop() override;
+ void emitDirectiveOptionRVC() override;
+ void emitDirectiveOptionNoRVC() override;
+ void emitDirectiveOptionRelax() override;
+ void emitDirectiveOptionNoRelax() override;
+};
+
+}
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCV.h b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCV.h
new file mode 100644
index 000000000000..834a1d171143
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCV.h
@@ -0,0 +1,44 @@
+//===-- RISCV.h - Top-level interface for RISCV -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the entry points for global functions defined in the LLVM
+// RISC-V back-end.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCV_H
+#define LLVM_LIB_TARGET_RISCV_RISCV_H
+
+#include "Utils/RISCVBaseInfo.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+class RISCVTargetMachine;
+class AsmPrinter;
+class FunctionPass;
+class MCInst;
+class MCOperand;
+class MachineInstr;
+class MachineOperand;
+class PassRegistry;
+
+void LowerRISCVMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
+ const AsmPrinter &AP);
+bool LowerRISCVMachineOperandToMCOperand(const MachineOperand &MO,
+ MCOperand &MCOp, const AsmPrinter &AP);
+
+FunctionPass *createRISCVISelDag(RISCVTargetMachine &TM);
+
+FunctionPass *createRISCVMergeBaseOffsetOptPass();
+void initializeRISCVMergeBaseOffsetOptPass(PassRegistry &);
+
+FunctionPass *createRISCVExpandPseudoPass();
+void initializeRISCVExpandPseudoPass(PassRegistry &);
+}
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCV.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCV.td
new file mode 100644
index 000000000000..e19b70b8e709
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCV.td
@@ -0,0 +1,111 @@
+//===-- RISCV.td - Describe the RISCV Target Machine -------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+include "llvm/Target/Target.td"
+
+//===----------------------------------------------------------------------===//
+// RISC-V subtarget features and instruction predicates.
+//===----------------------------------------------------------------------===//
+
+def FeatureStdExtM
+ : SubtargetFeature<"m", "HasStdExtM", "true",
+ "'M' (Integer Multiplication and Division)">;
+def HasStdExtM : Predicate<"Subtarget->hasStdExtM()">,
+ AssemblerPredicate<"FeatureStdExtM">;
+
+def FeatureStdExtA
+ : SubtargetFeature<"a", "HasStdExtA", "true",
+ "'A' (Atomic Instructions)">;
+def HasStdExtA : Predicate<"Subtarget->hasStdExtA()">,
+ AssemblerPredicate<"FeatureStdExtA">;
+
+def FeatureStdExtF
+ : SubtargetFeature<"f", "HasStdExtF", "true",
+ "'F' (Single-Precision Floating-Point)">;
+def HasStdExtF : Predicate<"Subtarget->hasStdExtF()">,
+ AssemblerPredicate<"FeatureStdExtF">;
+
+def FeatureStdExtD
+ : SubtargetFeature<"d", "HasStdExtD", "true",
+ "'D' (Double-Precision Floating-Point)",
+ [FeatureStdExtF]>;
+def HasStdExtD : Predicate<"Subtarget->hasStdExtD()">,
+ AssemblerPredicate<"FeatureStdExtD">;
+
+def FeatureStdExtC
+ : SubtargetFeature<"c", "HasStdExtC", "true",
+ "'C' (Compressed Instructions)">;
+def HasStdExtC : Predicate<"Subtarget->hasStdExtC()">,
+ AssemblerPredicate<"FeatureStdExtC">;
+
+
+def Feature64Bit
+ : SubtargetFeature<"64bit", "HasRV64", "true", "Implements RV64">;
+def IsRV64 : Predicate<"Subtarget->is64Bit()">,
+ AssemblerPredicate<"Feature64Bit">;
+def IsRV32 : Predicate<"!Subtarget->is64Bit()">,
+ AssemblerPredicate<"!Feature64Bit">;
+
+def RV64 : HwMode<"+64bit">;
+def RV32 : HwMode<"-64bit">;
+
+def FeatureRV32E
+ : SubtargetFeature<"e", "IsRV32E", "true",
+ "Implements RV32E (provides 16 rather than 32 GPRs)">;
+def IsRV32E : Predicate<"Subtarget->isRV32E()">,
+ AssemblerPredicate<"FeatureRV32E">;
+
+def FeatureRelax
+ : SubtargetFeature<"relax", "EnableLinkerRelax", "true",
+ "Enable Linker relaxation.">;
+
+//===----------------------------------------------------------------------===//
+// Named operands for CSR instructions.
+//===----------------------------------------------------------------------===//
+
+include "RISCVSystemOperands.td"
+
+//===----------------------------------------------------------------------===//
+// Registers, calling conventions, instruction descriptions.
+//===----------------------------------------------------------------------===//
+
+include "RISCVRegisterInfo.td"
+include "RISCVCallingConv.td"
+include "RISCVInstrInfo.td"
+
+//===----------------------------------------------------------------------===//
+// RISC-V processors supported.
+//===----------------------------------------------------------------------===//
+
+def : ProcessorModel<"generic-rv32", NoSchedModel, []>;
+
+def : ProcessorModel<"generic-rv64", NoSchedModel, [Feature64Bit]>;
+
+//===----------------------------------------------------------------------===//
+// Define the RISC-V target.
+//===----------------------------------------------------------------------===//
+
+def RISCVInstrInfo : InstrInfo {
+ let guessInstructionProperties = 0;
+}
+
+def RISCVAsmParser : AsmParser {
+ let ShouldEmitMatchRegisterAltName = 1;
+ let AllowDuplicateRegisterNames = 1;
+}
+
+def RISCVAsmWriter : AsmWriter {
+ int PassSubtarget = 1;
+}
+
+def RISCV : Target {
+ let InstructionSet = RISCVInstrInfo;
+ let AssemblyParsers = [RISCVAsmParser];
+ let AssemblyWriters = [RISCVAsmWriter];
+ let AllowRegisterRenaming = 1;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
new file mode 100644
index 000000000000..57631dcb5115
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
@@ -0,0 +1,147 @@
+//===-- RISCVAsmPrinter.cpp - RISCV LLVM assembly writer ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a printer that converts from our internal representation
+// of machine-dependent LLVM code to the RISCV assembly language.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "MCTargetDesc/RISCVInstPrinter.h"
+#include "MCTargetDesc/RISCVMCExpr.h"
+#include "RISCVTargetMachine.h"
+#include "TargetInfo/RISCVTargetInfo.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "asm-printer"
+
+namespace {
+class RISCVAsmPrinter : public AsmPrinter {
+public:
+ explicit RISCVAsmPrinter(TargetMachine &TM,
+ std::unique_ptr<MCStreamer> Streamer)
+ : AsmPrinter(TM, std::move(Streamer)) {}
+
+ StringRef getPassName() const override { return "RISCV Assembly Printer"; }
+
+ void EmitInstruction(const MachineInstr *MI) override;
+
+ bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ const char *ExtraCode, raw_ostream &OS) override;
+ bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
+ const char *ExtraCode, raw_ostream &OS) override;
+
+ void EmitToStreamer(MCStreamer &S, const MCInst &Inst);
+ bool emitPseudoExpansionLowering(MCStreamer &OutStreamer,
+ const MachineInstr *MI);
+
+ // Wrapper needed for tblgenned pseudo lowering.
+ bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
+ return LowerRISCVMachineOperandToMCOperand(MO, MCOp, *this);
+ }
+};
+}
+
+#define GEN_COMPRESS_INSTR
+#include "RISCVGenCompressInstEmitter.inc"
+void RISCVAsmPrinter::EmitToStreamer(MCStreamer &S, const MCInst &Inst) {
+ MCInst CInst;
+ bool Res = compressInst(CInst, Inst, *TM.getMCSubtargetInfo(),
+ OutStreamer->getContext());
+ AsmPrinter::EmitToStreamer(*OutStreamer, Res ? CInst : Inst);
+}
+
+// Simple pseudo-instructions have their lowering (with expansion to real
+// instructions) auto-generated.
+#include "RISCVGenMCPseudoLowering.inc"
+
+void RISCVAsmPrinter::EmitInstruction(const MachineInstr *MI) {
+ // Do any auto-generated pseudo lowerings.
+ if (emitPseudoExpansionLowering(*OutStreamer, MI))
+ return;
+
+ MCInst TmpInst;
+ LowerRISCVMachineInstrToMCInst(MI, TmpInst, *this);
+ EmitToStreamer(*OutStreamer, TmpInst);
+}
+
+bool RISCVAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ const char *ExtraCode, raw_ostream &OS) {
+ // First try the generic code, which knows about modifiers like 'c' and 'n'.
+ if (!AsmPrinter::PrintAsmOperand(MI, OpNo, ExtraCode, OS))
+ return false;
+
+ const MachineOperand &MO = MI->getOperand(OpNo);
+ if (ExtraCode && ExtraCode[0]) {
+ if (ExtraCode[1] != 0)
+ return true; // Unknown modifier.
+
+ switch (ExtraCode[0]) {
+ default:
+ return true; // Unknown modifier.
+ case 'z': // Print zero register if zero, regular printing otherwise.
+ if (MO.isImm() && MO.getImm() == 0) {
+ OS << RISCVInstPrinter::getRegisterName(RISCV::X0);
+ return false;
+ }
+ break;
+ case 'i': // Literal 'i' if operand is not a register.
+ if (!MO.isReg())
+ OS << 'i';
+ return false;
+ }
+ }
+
+ switch (MO.getType()) {
+ case MachineOperand::MO_Immediate:
+ OS << MO.getImm();
+ return false;
+ case MachineOperand::MO_Register:
+ OS << RISCVInstPrinter::getRegisterName(MO.getReg());
+ return false;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+bool RISCVAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
+ unsigned OpNo,
+ const char *ExtraCode,
+ raw_ostream &OS) {
+ if (!ExtraCode) {
+ const MachineOperand &MO = MI->getOperand(OpNo);
+ // For now, we only support register memory operands in registers and
+ // assume there is no addend
+ if (!MO.isReg())
+ return true;
+
+ OS << "0(" << RISCVInstPrinter::getRegisterName(MO.getReg()) << ")";
+ return false;
+ }
+
+ return AsmPrinter::PrintAsmMemoryOperand(MI, OpNo, ExtraCode, OS);
+}
+
+// Force static initialization.
+extern "C" void LLVMInitializeRISCVAsmPrinter() {
+ RegisterAsmPrinter<RISCVAsmPrinter> X(getTheRISCV32Target());
+ RegisterAsmPrinter<RISCVAsmPrinter> Y(getTheRISCV64Target());
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVCallingConv.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVCallingConv.td
new file mode 100644
index 000000000000..db13e6e8beca
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVCallingConv.td
@@ -0,0 +1,65 @@
+//===-- RISCVCallingConv.td - Calling Conventions RISCV ----*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This describes the calling conventions for the RISCV architecture.
+//
+//===----------------------------------------------------------------------===//
+
+// The RISC-V calling convention is handled with custom code in
+// RISCVISelLowering.cpp (CC_RISCV).
+
+def CSR_ILP32_LP64
+ : CalleeSavedRegs<(add X1, X3, X4, X8, X9, (sequence "X%u", 18, 27))>;
+
+def CSR_ILP32F_LP64F
+ : CalleeSavedRegs<(add CSR_ILP32_LP64,
+ F8_32, F9_32, (sequence "F%u_32", 18, 27))>;
+
+def CSR_ILP32D_LP64D
+ : CalleeSavedRegs<(add CSR_ILP32_LP64,
+ F8_64, F9_64, (sequence "F%u_64", 18, 27))>;
+
+// Needed for implementation of RISCVRegisterInfo::getNoPreservedMask()
+def CSR_NoRegs : CalleeSavedRegs<(add)>;
+
+// Interrupt handler needs to save/restore all registers that are used,
+// both Caller and Callee saved registers.
+def CSR_Interrupt : CalleeSavedRegs<(add X1,
+ (sequence "X%u", 3, 9),
+ (sequence "X%u", 10, 11),
+ (sequence "X%u", 12, 17),
+ (sequence "X%u", 18, 27),
+ (sequence "X%u", 28, 31))>;
+
+// Same as CSR_Interrupt, but including all 32-bit FP registers.
+def CSR_XLEN_F32_Interrupt: CalleeSavedRegs<(add X1,
+ (sequence "X%u", 3, 9),
+ (sequence "X%u", 10, 11),
+ (sequence "X%u", 12, 17),
+ (sequence "X%u", 18, 27),
+ (sequence "X%u", 28, 31),
+ (sequence "F%u_32", 0, 7),
+ (sequence "F%u_32", 10, 11),
+ (sequence "F%u_32", 12, 17),
+ (sequence "F%u_32", 28, 31),
+ (sequence "F%u_32", 8, 9),
+ (sequence "F%u_32", 18, 27))>;
+
+// Same as CSR_Interrupt, but including all 64-bit FP registers.
+def CSR_XLEN_F64_Interrupt: CalleeSavedRegs<(add X1,
+ (sequence "X%u", 3, 9),
+ (sequence "X%u", 10, 11),
+ (sequence "X%u", 12, 17),
+ (sequence "X%u", 18, 27),
+ (sequence "X%u", 28, 31),
+ (sequence "F%u_64", 0, 7),
+ (sequence "F%u_64", 10, 11),
+ (sequence "F%u_64", 12, 17),
+ (sequence "F%u_64", 28, 31),
+ (sequence "F%u_64", 8, 9),
+ (sequence "F%u_64", 18, 27))>;
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
new file mode 100644
index 000000000000..1c5171a7b7a4
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
@@ -0,0 +1,716 @@
+//===-- RISCVExpandPseudoInsts.cpp - Expand pseudo instructions -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a pass that expands pseudo instructions into target
+// instructions. This pass should be run after register allocation but before
+// the post-regalloc scheduling pass.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "RISCVInstrInfo.h"
+#include "RISCVTargetMachine.h"
+
+#include "llvm/CodeGen/LivePhysRegs.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+
+using namespace llvm;
+
+#define RISCV_EXPAND_PSEUDO_NAME "RISCV pseudo instruction expansion pass"
+
+namespace {
+
+class RISCVExpandPseudo : public MachineFunctionPass {
+public:
+ const RISCVInstrInfo *TII;
+ static char ID;
+
+ RISCVExpandPseudo() : MachineFunctionPass(ID) {
+ initializeRISCVExpandPseudoPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ StringRef getPassName() const override { return RISCV_EXPAND_PSEUDO_NAME; }
+
+private:
+ bool expandMBB(MachineBasicBlock &MBB);
+ bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI);
+ bool expandAtomicBinOp(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, AtomicRMWInst::BinOp,
+ bool IsMasked, int Width,
+ MachineBasicBlock::iterator &NextMBBI);
+ bool expandAtomicMinMaxOp(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ AtomicRMWInst::BinOp, bool IsMasked, int Width,
+ MachineBasicBlock::iterator &NextMBBI);
+ bool expandAtomicCmpXchg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, bool IsMasked,
+ int Width, MachineBasicBlock::iterator &NextMBBI);
+ bool expandAuipcInstPair(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI,
+ unsigned FlagsHi, unsigned SecondOpcode);
+ bool expandLoadLocalAddress(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI);
+ bool expandLoadAddress(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI);
+ bool expandLoadTLSIEAddress(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI);
+ bool expandLoadTLSGDAddress(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI);
+};
+
+char RISCVExpandPseudo::ID = 0;
+
+bool RISCVExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
+ TII = static_cast<const RISCVInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ bool Modified = false;
+ for (auto &MBB : MF)
+ Modified |= expandMBB(MBB);
+ return Modified;
+}
+
+bool RISCVExpandPseudo::expandMBB(MachineBasicBlock &MBB) {
+ bool Modified = false;
+
+ MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
+ while (MBBI != E) {
+ MachineBasicBlock::iterator NMBBI = std::next(MBBI);
+ Modified |= expandMI(MBB, MBBI, NMBBI);
+ MBBI = NMBBI;
+ }
+
+ return Modified;
+}
+
+bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI) {
+ switch (MBBI->getOpcode()) {
+ case RISCV::PseudoAtomicLoadNand32:
+ return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 32,
+ NextMBBI);
+ case RISCV::PseudoAtomicLoadNand64:
+ return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 64,
+ NextMBBI);
+ case RISCV::PseudoMaskedAtomicSwap32:
+ return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Xchg, true, 32,
+ NextMBBI);
+ case RISCV::PseudoMaskedAtomicLoadAdd32:
+ return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Add, true, 32, NextMBBI);
+ case RISCV::PseudoMaskedAtomicLoadSub32:
+ return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Sub, true, 32, NextMBBI);
+ case RISCV::PseudoMaskedAtomicLoadNand32:
+ return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, true, 32,
+ NextMBBI);
+ case RISCV::PseudoMaskedAtomicLoadMax32:
+ return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Max, true, 32,
+ NextMBBI);
+ case RISCV::PseudoMaskedAtomicLoadMin32:
+ return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Min, true, 32,
+ NextMBBI);
+ case RISCV::PseudoMaskedAtomicLoadUMax32:
+ return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMax, true, 32,
+ NextMBBI);
+ case RISCV::PseudoMaskedAtomicLoadUMin32:
+ return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMin, true, 32,
+ NextMBBI);
+ case RISCV::PseudoCmpXchg32:
+ return expandAtomicCmpXchg(MBB, MBBI, false, 32, NextMBBI);
+ case RISCV::PseudoCmpXchg64:
+ return expandAtomicCmpXchg(MBB, MBBI, false, 64, NextMBBI);
+ case RISCV::PseudoMaskedCmpXchg32:
+ return expandAtomicCmpXchg(MBB, MBBI, true, 32, NextMBBI);
+ case RISCV::PseudoLLA:
+ return expandLoadLocalAddress(MBB, MBBI, NextMBBI);
+ case RISCV::PseudoLA:
+ return expandLoadAddress(MBB, MBBI, NextMBBI);
+ case RISCV::PseudoLA_TLS_IE:
+ return expandLoadTLSIEAddress(MBB, MBBI, NextMBBI);
+ case RISCV::PseudoLA_TLS_GD:
+ return expandLoadTLSGDAddress(MBB, MBBI, NextMBBI);
+ }
+
+ return false;
+}
+
+static unsigned getLRForRMW32(AtomicOrdering Ordering) {
+ switch (Ordering) {
+ default:
+ llvm_unreachable("Unexpected AtomicOrdering");
+ case AtomicOrdering::Monotonic:
+ return RISCV::LR_W;
+ case AtomicOrdering::Acquire:
+ return RISCV::LR_W_AQ;
+ case AtomicOrdering::Release:
+ return RISCV::LR_W;
+ case AtomicOrdering::AcquireRelease:
+ return RISCV::LR_W_AQ;
+ case AtomicOrdering::SequentiallyConsistent:
+ return RISCV::LR_W_AQ_RL;
+ }
+}
+
+static unsigned getSCForRMW32(AtomicOrdering Ordering) {
+ switch (Ordering) {
+ default:
+ llvm_unreachable("Unexpected AtomicOrdering");
+ case AtomicOrdering::Monotonic:
+ return RISCV::SC_W;
+ case AtomicOrdering::Acquire:
+ return RISCV::SC_W;
+ case AtomicOrdering::Release:
+ return RISCV::SC_W_RL;
+ case AtomicOrdering::AcquireRelease:
+ return RISCV::SC_W_RL;
+ case AtomicOrdering::SequentiallyConsistent:
+ return RISCV::SC_W_AQ_RL;
+ }
+}
+
+static unsigned getLRForRMW64(AtomicOrdering Ordering) {
+ switch (Ordering) {
+ default:
+ llvm_unreachable("Unexpected AtomicOrdering");
+ case AtomicOrdering::Monotonic:
+ return RISCV::LR_D;
+ case AtomicOrdering::Acquire:
+ return RISCV::LR_D_AQ;
+ case AtomicOrdering::Release:
+ return RISCV::LR_D;
+ case AtomicOrdering::AcquireRelease:
+ return RISCV::LR_D_AQ;
+ case AtomicOrdering::SequentiallyConsistent:
+ return RISCV::LR_D_AQ_RL;
+ }
+}
+
+static unsigned getSCForRMW64(AtomicOrdering Ordering) {
+ switch (Ordering) {
+ default:
+ llvm_unreachable("Unexpected AtomicOrdering");
+ case AtomicOrdering::Monotonic:
+ return RISCV::SC_D;
+ case AtomicOrdering::Acquire:
+ return RISCV::SC_D;
+ case AtomicOrdering::Release:
+ return RISCV::SC_D_RL;
+ case AtomicOrdering::AcquireRelease:
+ return RISCV::SC_D_RL;
+ case AtomicOrdering::SequentiallyConsistent:
+ return RISCV::SC_D_AQ_RL;
+ }
+}
+
+static unsigned getLRForRMW(AtomicOrdering Ordering, int Width) {
+ if (Width == 32)
+ return getLRForRMW32(Ordering);
+ if (Width == 64)
+ return getLRForRMW64(Ordering);
+ llvm_unreachable("Unexpected LR width\n");
+}
+
+static unsigned getSCForRMW(AtomicOrdering Ordering, int Width) {
+ if (Width == 32)
+ return getSCForRMW32(Ordering);
+ if (Width == 64)
+ return getSCForRMW64(Ordering);
+ llvm_unreachable("Unexpected SC width\n");
+}
+
+static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI,
+ DebugLoc DL, MachineBasicBlock *ThisMBB,
+ MachineBasicBlock *LoopMBB,
+ MachineBasicBlock *DoneMBB,
+ AtomicRMWInst::BinOp BinOp, int Width) {
+ unsigned DestReg = MI.getOperand(0).getReg();
+ unsigned ScratchReg = MI.getOperand(1).getReg();
+ unsigned AddrReg = MI.getOperand(2).getReg();
+ unsigned IncrReg = MI.getOperand(3).getReg();
+ AtomicOrdering Ordering =
+ static_cast<AtomicOrdering>(MI.getOperand(4).getImm());
+
+ // .loop:
+ // lr.[w|d] dest, (addr)
+ // binop scratch, dest, val
+ // sc.[w|d] scratch, scratch, (addr)
+ // bnez scratch, loop
+ BuildMI(LoopMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
+ .addReg(AddrReg);
+ switch (BinOp) {
+ default:
+ llvm_unreachable("Unexpected AtomicRMW BinOp");
+ case AtomicRMWInst::Nand:
+ BuildMI(LoopMBB, DL, TII->get(RISCV::AND), ScratchReg)
+ .addReg(DestReg)
+ .addReg(IncrReg);
+ BuildMI(LoopMBB, DL, TII->get(RISCV::XORI), ScratchReg)
+ .addReg(ScratchReg)
+ .addImm(-1);
+ break;
+ }
+ BuildMI(LoopMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
+ .addReg(AddrReg)
+ .addReg(ScratchReg);
+ BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
+ .addReg(ScratchReg)
+ .addReg(RISCV::X0)
+ .addMBB(LoopMBB);
+}
+
+static void insertMaskedMerge(const RISCVInstrInfo *TII, DebugLoc DL,
+ MachineBasicBlock *MBB, unsigned DestReg,
+ unsigned OldValReg, unsigned NewValReg,
+ unsigned MaskReg, unsigned ScratchReg) {
+ assert(OldValReg != ScratchReg && "OldValReg and ScratchReg must be unique");
+ assert(OldValReg != MaskReg && "OldValReg and MaskReg must be unique");
+ assert(ScratchReg != MaskReg && "ScratchReg and MaskReg must be unique");
+
+ // We select bits from newval and oldval using:
+ // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge
+ // r = oldval ^ ((oldval ^ newval) & masktargetdata);
+ BuildMI(MBB, DL, TII->get(RISCV::XOR), ScratchReg)
+ .addReg(OldValReg)
+ .addReg(NewValReg);
+ BuildMI(MBB, DL, TII->get(RISCV::AND), ScratchReg)
+ .addReg(ScratchReg)
+ .addReg(MaskReg);
+ BuildMI(MBB, DL, TII->get(RISCV::XOR), DestReg)
+ .addReg(OldValReg)
+ .addReg(ScratchReg);
+}
+
+static void doMaskedAtomicBinOpExpansion(
+ const RISCVInstrInfo *TII, MachineInstr &MI, DebugLoc DL,
+ MachineBasicBlock *ThisMBB, MachineBasicBlock *LoopMBB,
+ MachineBasicBlock *DoneMBB, AtomicRMWInst::BinOp BinOp, int Width) {
+ assert(Width == 32 && "Should never need to expand masked 64-bit operations");
+ unsigned DestReg = MI.getOperand(0).getReg();
+ unsigned ScratchReg = MI.getOperand(1).getReg();
+ unsigned AddrReg = MI.getOperand(2).getReg();
+ unsigned IncrReg = MI.getOperand(3).getReg();
+ unsigned MaskReg = MI.getOperand(4).getReg();
+ AtomicOrdering Ordering =
+ static_cast<AtomicOrdering>(MI.getOperand(5).getImm());
+
+ // .loop:
+ // lr.w destreg, (alignedaddr)
+ // binop scratch, destreg, incr
+ // xor scratch, destreg, scratch
+ // and scratch, scratch, masktargetdata
+ // xor scratch, destreg, scratch
+ // sc.w scratch, scratch, (alignedaddr)
+ // bnez scratch, loop
+ BuildMI(LoopMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
+ .addReg(AddrReg);
+ switch (BinOp) {
+ default:
+ llvm_unreachable("Unexpected AtomicRMW BinOp");
+ case AtomicRMWInst::Xchg:
+ BuildMI(LoopMBB, DL, TII->get(RISCV::ADD), ScratchReg)
+ .addReg(RISCV::X0)
+ .addReg(IncrReg);
+ break;
+ case AtomicRMWInst::Add:
+ BuildMI(LoopMBB, DL, TII->get(RISCV::ADD), ScratchReg)
+ .addReg(DestReg)
+ .addReg(IncrReg);
+ break;
+ case AtomicRMWInst::Sub:
+ BuildMI(LoopMBB, DL, TII->get(RISCV::SUB), ScratchReg)
+ .addReg(DestReg)
+ .addReg(IncrReg);
+ break;
+ case AtomicRMWInst::Nand:
+ BuildMI(LoopMBB, DL, TII->get(RISCV::AND), ScratchReg)
+ .addReg(DestReg)
+ .addReg(IncrReg);
+ BuildMI(LoopMBB, DL, TII->get(RISCV::XORI), ScratchReg)
+ .addReg(ScratchReg)
+ .addImm(-1);
+ break;
+ }
+
+ insertMaskedMerge(TII, DL, LoopMBB, ScratchReg, DestReg, ScratchReg, MaskReg,
+ ScratchReg);
+
+ BuildMI(LoopMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg)
+ .addReg(AddrReg)
+ .addReg(ScratchReg);
+ BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
+ .addReg(ScratchReg)
+ .addReg(RISCV::X0)
+ .addMBB(LoopMBB);
+}
+
+bool RISCVExpandPseudo::expandAtomicBinOp(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width,
+ MachineBasicBlock::iterator &NextMBBI) {
+ MachineInstr &MI = *MBBI;
+ DebugLoc DL = MI.getDebugLoc();
+
+ MachineFunction *MF = MBB.getParent();
+ auto LoopMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+
+ // Insert new MBBs.
+ MF->insert(++MBB.getIterator(), LoopMBB);
+ MF->insert(++LoopMBB->getIterator(), DoneMBB);
+
+ // Set up successors and transfer remaining instructions to DoneMBB.
+ LoopMBB->addSuccessor(LoopMBB);
+ LoopMBB->addSuccessor(DoneMBB);
+ DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
+ DoneMBB->transferSuccessors(&MBB);
+ MBB.addSuccessor(LoopMBB);
+
+ if (!IsMasked)
+ doAtomicBinOpExpansion(TII, MI, DL, &MBB, LoopMBB, DoneMBB, BinOp, Width);
+ else
+ doMaskedAtomicBinOpExpansion(TII, MI, DL, &MBB, LoopMBB, DoneMBB, BinOp,
+ Width);
+
+ NextMBBI = MBB.end();
+ MI.eraseFromParent();
+
+ LivePhysRegs LiveRegs;
+ computeAndAddLiveIns(LiveRegs, *LoopMBB);
+ computeAndAddLiveIns(LiveRegs, *DoneMBB);
+
+ return true;
+}
+
+static void insertSext(const RISCVInstrInfo *TII, DebugLoc DL,
+ MachineBasicBlock *MBB, unsigned ValReg,
+ unsigned ShamtReg) {
+ BuildMI(MBB, DL, TII->get(RISCV::SLL), ValReg)
+ .addReg(ValReg)
+ .addReg(ShamtReg);
+ BuildMI(MBB, DL, TII->get(RISCV::SRA), ValReg)
+ .addReg(ValReg)
+ .addReg(ShamtReg);
+}
+
+bool RISCVExpandPseudo::expandAtomicMinMaxOp(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width,
+ MachineBasicBlock::iterator &NextMBBI) {
+ assert(IsMasked == true &&
+ "Should only need to expand masked atomic max/min");
+ assert(Width == 32 && "Should never need to expand masked 64-bit operations");
+
+ MachineInstr &MI = *MBBI;
+ DebugLoc DL = MI.getDebugLoc();
+ MachineFunction *MF = MBB.getParent();
+ auto LoopHeadMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto LoopIfBodyMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto LoopTailMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+
+ // Insert new MBBs.
+ MF->insert(++MBB.getIterator(), LoopHeadMBB);
+ MF->insert(++LoopHeadMBB->getIterator(), LoopIfBodyMBB);
+ MF->insert(++LoopIfBodyMBB->getIterator(), LoopTailMBB);
+ MF->insert(++LoopTailMBB->getIterator(), DoneMBB);
+
+ // Set up successors and transfer remaining instructions to DoneMBB.
+ LoopHeadMBB->addSuccessor(LoopIfBodyMBB);
+ LoopHeadMBB->addSuccessor(LoopTailMBB);
+ LoopIfBodyMBB->addSuccessor(LoopTailMBB);
+ LoopTailMBB->addSuccessor(LoopHeadMBB);
+ LoopTailMBB->addSuccessor(DoneMBB);
+ DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
+ DoneMBB->transferSuccessors(&MBB);
+ MBB.addSuccessor(LoopHeadMBB);
+
+ unsigned DestReg = MI.getOperand(0).getReg();
+ unsigned Scratch1Reg = MI.getOperand(1).getReg();
+ unsigned Scratch2Reg = MI.getOperand(2).getReg();
+ unsigned AddrReg = MI.getOperand(3).getReg();
+ unsigned IncrReg = MI.getOperand(4).getReg();
+ unsigned MaskReg = MI.getOperand(5).getReg();
+ bool IsSigned = BinOp == AtomicRMWInst::Min || BinOp == AtomicRMWInst::Max;
+ AtomicOrdering Ordering =
+ static_cast<AtomicOrdering>(MI.getOperand(IsSigned ? 7 : 6).getImm());
+
+ //
+ // .loophead:
+ // lr.w destreg, (alignedaddr)
+ // and scratch2, destreg, mask
+ // mv scratch1, destreg
+ // [sext scratch2 if signed min/max]
+ // ifnochangeneeded scratch2, incr, .looptail
+ BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
+ .addReg(AddrReg);
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::AND), Scratch2Reg)
+ .addReg(DestReg)
+ .addReg(MaskReg);
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::ADDI), Scratch1Reg)
+ .addReg(DestReg)
+ .addImm(0);
+
+ switch (BinOp) {
+ default:
+ llvm_unreachable("Unexpected AtomicRMW BinOp");
+ case AtomicRMWInst::Max: {
+ insertSext(TII, DL, LoopHeadMBB, Scratch2Reg, MI.getOperand(6).getReg());
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGE))
+ .addReg(Scratch2Reg)
+ .addReg(IncrReg)
+ .addMBB(LoopTailMBB);
+ break;
+ }
+ case AtomicRMWInst::Min: {
+ insertSext(TII, DL, LoopHeadMBB, Scratch2Reg, MI.getOperand(6).getReg());
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGE))
+ .addReg(IncrReg)
+ .addReg(Scratch2Reg)
+ .addMBB(LoopTailMBB);
+ break;
+ }
+ case AtomicRMWInst::UMax:
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGEU))
+ .addReg(Scratch2Reg)
+ .addReg(IncrReg)
+ .addMBB(LoopTailMBB);
+ break;
+ case AtomicRMWInst::UMin:
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGEU))
+ .addReg(IncrReg)
+ .addReg(Scratch2Reg)
+ .addMBB(LoopTailMBB);
+ break;
+ }
+
+ // .loopifbody:
+ // xor scratch1, destreg, incr
+ // and scratch1, scratch1, mask
+ // xor scratch1, destreg, scratch1
+ insertMaskedMerge(TII, DL, LoopIfBodyMBB, Scratch1Reg, DestReg, IncrReg,
+ MaskReg, Scratch1Reg);
+
+ // .looptail:
+ // sc.w scratch1, scratch1, (addr)
+ // bnez scratch1, loop
+ BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW32(Ordering)), Scratch1Reg)
+ .addReg(AddrReg)
+ .addReg(Scratch1Reg);
+ BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
+ .addReg(Scratch1Reg)
+ .addReg(RISCV::X0)
+ .addMBB(LoopHeadMBB);
+
+ NextMBBI = MBB.end();
+ MI.eraseFromParent();
+
+ LivePhysRegs LiveRegs;
+ computeAndAddLiveIns(LiveRegs, *LoopHeadMBB);
+ computeAndAddLiveIns(LiveRegs, *LoopIfBodyMBB);
+ computeAndAddLiveIns(LiveRegs, *LoopTailMBB);
+ computeAndAddLiveIns(LiveRegs, *DoneMBB);
+
+ return true;
+}
+
+bool RISCVExpandPseudo::expandAtomicCmpXchg(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, bool IsMasked,
+ int Width, MachineBasicBlock::iterator &NextMBBI) {
+ MachineInstr &MI = *MBBI;
+ DebugLoc DL = MI.getDebugLoc();
+ MachineFunction *MF = MBB.getParent();
+ auto LoopHeadMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto LoopTailMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+
+ // Insert new MBBs.
+ MF->insert(++MBB.getIterator(), LoopHeadMBB);
+ MF->insert(++LoopHeadMBB->getIterator(), LoopTailMBB);
+ MF->insert(++LoopTailMBB->getIterator(), DoneMBB);
+
+ // Set up successors and transfer remaining instructions to DoneMBB.
+ LoopHeadMBB->addSuccessor(LoopTailMBB);
+ LoopHeadMBB->addSuccessor(DoneMBB);
+ LoopTailMBB->addSuccessor(DoneMBB);
+ LoopTailMBB->addSuccessor(LoopHeadMBB);
+ DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
+ DoneMBB->transferSuccessors(&MBB);
+ MBB.addSuccessor(LoopHeadMBB);
+
+ unsigned DestReg = MI.getOperand(0).getReg();
+ unsigned ScratchReg = MI.getOperand(1).getReg();
+ unsigned AddrReg = MI.getOperand(2).getReg();
+ unsigned CmpValReg = MI.getOperand(3).getReg();
+ unsigned NewValReg = MI.getOperand(4).getReg();
+ AtomicOrdering Ordering =
+ static_cast<AtomicOrdering>(MI.getOperand(IsMasked ? 6 : 5).getImm());
+
+ if (!IsMasked) {
+ // .loophead:
+ // lr.[w|d] dest, (addr)
+ // bne dest, cmpval, done
+ BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
+ .addReg(AddrReg);
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE))
+ .addReg(DestReg)
+ .addReg(CmpValReg)
+ .addMBB(DoneMBB);
+ // .looptail:
+ // sc.[w|d] scratch, newval, (addr)
+ // bnez scratch, loophead
+ BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
+ .addReg(AddrReg)
+ .addReg(NewValReg);
+ BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
+ .addReg(ScratchReg)
+ .addReg(RISCV::X0)
+ .addMBB(LoopHeadMBB);
+ } else {
+ // .loophead:
+ // lr.w dest, (addr)
+ // and scratch, dest, mask
+ // bne scratch, cmpval, done
+ unsigned MaskReg = MI.getOperand(5).getReg();
+ BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
+ .addReg(AddrReg);
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::AND), ScratchReg)
+ .addReg(DestReg)
+ .addReg(MaskReg);
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE))
+ .addReg(ScratchReg)
+ .addReg(CmpValReg)
+ .addMBB(DoneMBB);
+
+ // .looptail:
+ // xor scratch, dest, newval
+ // and scratch, scratch, mask
+ // xor scratch, dest, scratch
+ // sc.w scratch, scratch, (adrr)
+ // bnez scratch, loophead
+ insertMaskedMerge(TII, DL, LoopTailMBB, ScratchReg, DestReg, NewValReg,
+ MaskReg, ScratchReg);
+ BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
+ .addReg(AddrReg)
+ .addReg(ScratchReg);
+ BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
+ .addReg(ScratchReg)
+ .addReg(RISCV::X0)
+ .addMBB(LoopHeadMBB);
+ }
+
+ NextMBBI = MBB.end();
+ MI.eraseFromParent();
+
+ LivePhysRegs LiveRegs;
+ computeAndAddLiveIns(LiveRegs, *LoopHeadMBB);
+ computeAndAddLiveIns(LiveRegs, *LoopTailMBB);
+ computeAndAddLiveIns(LiveRegs, *DoneMBB);
+
+ return true;
+}
+
+bool RISCVExpandPseudo::expandAuipcInstPair(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI, unsigned FlagsHi,
+ unsigned SecondOpcode) {
+ MachineFunction *MF = MBB.getParent();
+ MachineInstr &MI = *MBBI;
+ DebugLoc DL = MI.getDebugLoc();
+
+ unsigned DestReg = MI.getOperand(0).getReg();
+ const MachineOperand &Symbol = MI.getOperand(1);
+
+ MachineBasicBlock *NewMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+
+ // Tell AsmPrinter that we unconditionally want the symbol of this label to be
+ // emitted.
+ NewMBB->setLabelMustBeEmitted();
+
+ MF->insert(++MBB.getIterator(), NewMBB);
+
+ BuildMI(NewMBB, DL, TII->get(RISCV::AUIPC), DestReg)
+ .addDisp(Symbol, 0, FlagsHi);
+ BuildMI(NewMBB, DL, TII->get(SecondOpcode), DestReg)
+ .addReg(DestReg)
+ .addMBB(NewMBB, RISCVII::MO_PCREL_LO);
+
+ // Move all the rest of the instructions to NewMBB.
+ NewMBB->splice(NewMBB->end(), &MBB, std::next(MBBI), MBB.end());
+ // Update machine-CFG edges.
+ NewMBB->transferSuccessorsAndUpdatePHIs(&MBB);
+ // Make the original basic block fall-through to the new.
+ MBB.addSuccessor(NewMBB);
+
+ // Make sure live-ins are correctly attached to this new basic block.
+ LivePhysRegs LiveRegs;
+ computeAndAddLiveIns(LiveRegs, *NewMBB);
+
+ NextMBBI = MBB.end();
+ MI.eraseFromParent();
+ return true;
+}
+
+bool RISCVExpandPseudo::expandLoadLocalAddress(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI) {
+ return expandAuipcInstPair(MBB, MBBI, NextMBBI, RISCVII::MO_PCREL_HI,
+ RISCV::ADDI);
+}
+
+bool RISCVExpandPseudo::expandLoadAddress(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI) {
+ MachineFunction *MF = MBB.getParent();
+
+ unsigned SecondOpcode;
+ unsigned FlagsHi;
+ if (MF->getTarget().isPositionIndependent()) {
+ const auto &STI = MF->getSubtarget<RISCVSubtarget>();
+ SecondOpcode = STI.is64Bit() ? RISCV::LD : RISCV::LW;
+ FlagsHi = RISCVII::MO_GOT_HI;
+ } else {
+ SecondOpcode = RISCV::ADDI;
+ FlagsHi = RISCVII::MO_PCREL_HI;
+ }
+ return expandAuipcInstPair(MBB, MBBI, NextMBBI, FlagsHi, SecondOpcode);
+}
+
+bool RISCVExpandPseudo::expandLoadTLSIEAddress(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI) {
+ MachineFunction *MF = MBB.getParent();
+
+ const auto &STI = MF->getSubtarget<RISCVSubtarget>();
+ unsigned SecondOpcode = STI.is64Bit() ? RISCV::LD : RISCV::LW;
+ return expandAuipcInstPair(MBB, MBBI, NextMBBI, RISCVII::MO_TLS_GOT_HI,
+ SecondOpcode);
+}
+
+bool RISCVExpandPseudo::expandLoadTLSGDAddress(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI) {
+ return expandAuipcInstPair(MBB, MBBI, NextMBBI, RISCVII::MO_TLS_GD_HI,
+ RISCV::ADDI);
+}
+
+} // end of anonymous namespace
+
+INITIALIZE_PASS(RISCVExpandPseudo, "riscv-expand-pseudo",
+ RISCV_EXPAND_PSEUDO_NAME, false, false)
+namespace llvm {
+
+FunctionPass *createRISCVExpandPseudoPass() { return new RISCVExpandPseudo(); }
+
+} // end of namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
new file mode 100644
index 000000000000..bbaa16c08634
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -0,0 +1,408 @@
+//===-- RISCVFrameLowering.cpp - RISCV Frame Information ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the RISCV implementation of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVFrameLowering.h"
+#include "RISCVMachineFunctionInfo.h"
+#include "RISCVSubtarget.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/MC/MCDwarf.h"
+
+using namespace llvm;
+
+bool RISCVFrameLowering::hasFP(const MachineFunction &MF) const {
+ const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
+
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ return MF.getTarget().Options.DisableFramePointerElim(MF) ||
+ RegInfo->needsStackRealignment(MF) || MFI.hasVarSizedObjects() ||
+ MFI.isFrameAddressTaken();
+}
+
+// Determines the size of the frame and maximum call frame size.
+void RISCVFrameLowering::determineFrameLayout(MachineFunction &MF) const {
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ const RISCVRegisterInfo *RI = STI.getRegisterInfo();
+
+ // Get the number of bytes to allocate from the FrameInfo.
+ uint64_t FrameSize = MFI.getStackSize();
+
+ // Get the alignment.
+ unsigned StackAlign = getStackAlignment();
+ if (RI->needsStackRealignment(MF)) {
+ unsigned MaxStackAlign = std::max(StackAlign, MFI.getMaxAlignment());
+ FrameSize += (MaxStackAlign - StackAlign);
+ StackAlign = MaxStackAlign;
+ }
+
+ // Set Max Call Frame Size
+ uint64_t MaxCallSize = alignTo(MFI.getMaxCallFrameSize(), StackAlign);
+ MFI.setMaxCallFrameSize(MaxCallSize);
+
+ // Make sure the frame is aligned.
+ FrameSize = alignTo(FrameSize, StackAlign);
+
+ // Update frame info.
+ MFI.setStackSize(FrameSize);
+}
+
+void RISCVFrameLowering::adjustReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL, unsigned DestReg,
+ unsigned SrcReg, int64_t Val,
+ MachineInstr::MIFlag Flag) const {
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+ const RISCVInstrInfo *TII = STI.getInstrInfo();
+
+ if (DestReg == SrcReg && Val == 0)
+ return;
+
+ if (isInt<12>(Val)) {
+ BuildMI(MBB, MBBI, DL, TII->get(RISCV::ADDI), DestReg)
+ .addReg(SrcReg)
+ .addImm(Val)
+ .setMIFlag(Flag);
+ } else if (isInt<32>(Val)) {
+ unsigned Opc = RISCV::ADD;
+ bool isSub = Val < 0;
+ if (isSub) {
+ Val = -Val;
+ Opc = RISCV::SUB;
+ }
+
+ unsigned ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+ TII->movImm32(MBB, MBBI, DL, ScratchReg, Val, Flag);
+ BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
+ .addReg(SrcReg)
+ .addReg(ScratchReg, RegState::Kill)
+ .setMIFlag(Flag);
+ } else {
+ report_fatal_error("adjustReg cannot yet handle adjustments >32 bits");
+ }
+}
+
+// Returns the register used to hold the frame pointer.
+static unsigned getFPReg(const RISCVSubtarget &STI) { return RISCV::X8; }
+
+// Returns the register used to hold the stack pointer.
+static unsigned getSPReg(const RISCVSubtarget &STI) { return RISCV::X2; }
+
+void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported");
+
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
+ const RISCVRegisterInfo *RI = STI.getRegisterInfo();
+ const RISCVInstrInfo *TII = STI.getInstrInfo();
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+
+ if (RI->needsStackRealignment(MF) && MFI.hasVarSizedObjects()) {
+ report_fatal_error(
+ "RISC-V backend can't currently handle functions that need stack "
+ "realignment and have variable sized objects");
+ }
+
+ unsigned FPReg = getFPReg(STI);
+ unsigned SPReg = getSPReg(STI);
+
+ // Debug location must be unknown since the first debug location is used
+ // to determine the end of the prologue.
+ DebugLoc DL;
+
+ // Determine the correct frame layout
+ determineFrameLayout(MF);
+
+ // FIXME (note copied from Lanai): This appears to be overallocating. Needs
+ // investigation. Get the number of bytes to allocate from the FrameInfo.
+ uint64_t StackSize = MFI.getStackSize();
+
+ // Early exit if there is no need to allocate on the stack
+ if (StackSize == 0 && !MFI.adjustsStack())
+ return;
+
+ // Allocate space on the stack if necessary.
+ adjustReg(MBB, MBBI, DL, SPReg, SPReg, -StackSize, MachineInstr::FrameSetup);
+
+ // Emit ".cfi_def_cfa_offset StackSize"
+ unsigned CFIIndex = MF.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, -StackSize));
+ BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+
+ // The frame pointer is callee-saved, and code has been generated for us to
+ // save it to the stack. We need to skip over the storing of callee-saved
+ // registers as the frame pointer must be modified after it has been saved
+ // to the stack, not before.
+ // FIXME: assumes exactly one instruction is used to save each callee-saved
+ // register.
+ const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
+ std::advance(MBBI, CSI.size());
+
+ // Iterate over list of callee-saved registers and emit .cfi_offset
+ // directives.
+ for (const auto &Entry : CSI) {
+ int64_t Offset = MFI.getObjectOffset(Entry.getFrameIdx());
+ unsigned Reg = Entry.getReg();
+ unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset(
+ nullptr, RI->getDwarfRegNum(Reg, true), Offset));
+ BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
+
+ // Generate new FP.
+ if (hasFP(MF)) {
+ adjustReg(MBB, MBBI, DL, FPReg, SPReg,
+ StackSize - RVFI->getVarArgsSaveSize(), MachineInstr::FrameSetup);
+
+ // Emit ".cfi_def_cfa $fp, 0"
+ unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfa(
+ nullptr, RI->getDwarfRegNum(FPReg, true), 0));
+ BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+
+ // Realign Stack
+ const RISCVRegisterInfo *RI = STI.getRegisterInfo();
+ if (RI->needsStackRealignment(MF)) {
+ unsigned MaxAlignment = MFI.getMaxAlignment();
+
+ const RISCVInstrInfo *TII = STI.getInstrInfo();
+ if (isInt<12>(-(int)MaxAlignment)) {
+ BuildMI(MBB, MBBI, DL, TII->get(RISCV::ANDI), SPReg)
+ .addReg(SPReg)
+ .addImm(-(int)MaxAlignment);
+ } else {
+ unsigned ShiftAmount = countTrailingZeros(MaxAlignment);
+ unsigned VR =
+ MF.getRegInfo().createVirtualRegister(&RISCV::GPRRegClass);
+ BuildMI(MBB, MBBI, DL, TII->get(RISCV::SRLI), VR)
+ .addReg(SPReg)
+ .addImm(ShiftAmount);
+ BuildMI(MBB, MBBI, DL, TII->get(RISCV::SLLI), SPReg)
+ .addReg(VR)
+ .addImm(ShiftAmount);
+ }
+ }
+ }
+}
+
+void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ const RISCVRegisterInfo *RI = STI.getRegisterInfo();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
+ DebugLoc DL = MBBI->getDebugLoc();
+ const RISCVInstrInfo *TII = STI.getInstrInfo();
+ unsigned FPReg = getFPReg(STI);
+ unsigned SPReg = getSPReg(STI);
+
+ // Skip to before the restores of callee-saved registers
+ // FIXME: assumes exactly one instruction is used to restore each
+ // callee-saved register.
+ auto LastFrameDestroy = std::prev(MBBI, MFI.getCalleeSavedInfo().size());
+
+ uint64_t StackSize = MFI.getStackSize();
+ uint64_t FPOffset = StackSize - RVFI->getVarArgsSaveSize();
+
+ // Restore the stack pointer using the value of the frame pointer. Only
+ // necessary if the stack pointer was modified, meaning the stack size is
+ // unknown.
+ if (RI->needsStackRealignment(MF) || MFI.hasVarSizedObjects()) {
+ assert(hasFP(MF) && "frame pointer should not have been eliminated");
+ adjustReg(MBB, LastFrameDestroy, DL, SPReg, FPReg, -FPOffset,
+ MachineInstr::FrameDestroy);
+ }
+
+ if (hasFP(MF)) {
+ // To find the instruction restoring FP from stack.
+ for (auto &I = LastFrameDestroy; I != MBBI; ++I) {
+ if (I->mayLoad() && I->getOperand(0).isReg()) {
+ unsigned DestReg = I->getOperand(0).getReg();
+ if (DestReg == FPReg) {
+ // If there is frame pointer, after restoring $fp registers, we
+ // need adjust CFA to ($sp - FPOffset).
+ // Emit ".cfi_def_cfa $sp, -FPOffset"
+ unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfa(
+ nullptr, RI->getDwarfRegNum(SPReg, true), -FPOffset));
+ BuildMI(MBB, std::next(I), DL,
+ TII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ break;
+ }
+ }
+ }
+ }
+
+ // Add CFI directives for callee-saved registers.
+ const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
+ // Iterate over list of callee-saved registers and emit .cfi_restore
+ // directives.
+ for (const auto &Entry : CSI) {
+ unsigned Reg = Entry.getReg();
+ unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createRestore(
+ nullptr, RI->getDwarfRegNum(Reg, true)));
+ BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
+
+ // Deallocate stack
+ adjustReg(MBB, MBBI, DL, SPReg, SPReg, StackSize, MachineInstr::FrameDestroy);
+
+ // After restoring $sp, we need to adjust CFA to $(sp + 0)
+ // Emit ".cfi_def_cfa_offset 0"
+ unsigned CFIIndex =
+ MF.addFrameInst(MCCFIInstruction::createDefCfaOffset(nullptr, 0));
+ BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+}
+
+int RISCVFrameLowering::getFrameIndexReference(const MachineFunction &MF,
+ int FI,
+ unsigned &FrameReg) const {
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
+ const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
+
+ // Callee-saved registers should be referenced relative to the stack
+ // pointer (positive offset), otherwise use the frame pointer (negative
+ // offset).
+ const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
+ int MinCSFI = 0;
+ int MaxCSFI = -1;
+
+ int Offset = MFI.getObjectOffset(FI) - getOffsetOfLocalArea() +
+ MFI.getOffsetAdjustment();
+
+ if (CSI.size()) {
+ MinCSFI = CSI[0].getFrameIdx();
+ MaxCSFI = CSI[CSI.size() - 1].getFrameIdx();
+ }
+
+ if (FI >= MinCSFI && FI <= MaxCSFI) {
+ FrameReg = RISCV::X2;
+ Offset += MF.getFrameInfo().getStackSize();
+ } else if (RI->needsStackRealignment(MF)) {
+ assert(!MFI.hasVarSizedObjects() &&
+ "Unexpected combination of stack realignment and varsized objects");
+ // If the stack was realigned, the frame pointer is set in order to allow
+ // SP to be restored, but we still access stack objects using SP.
+ FrameReg = RISCV::X2;
+ Offset += MF.getFrameInfo().getStackSize();
+ } else {
+ FrameReg = RI->getFrameRegister(MF);
+ if (hasFP(MF))
+ Offset += RVFI->getVarArgsSaveSize();
+ else
+ Offset += MF.getFrameInfo().getStackSize();
+ }
+ return Offset;
+}
+
+void RISCVFrameLowering::determineCalleeSaves(MachineFunction &MF,
+ BitVector &SavedRegs,
+ RegScavenger *RS) const {
+ TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
+ // Unconditionally spill RA and FP only if the function uses a frame
+ // pointer.
+ if (hasFP(MF)) {
+ SavedRegs.set(RISCV::X1);
+ SavedRegs.set(RISCV::X8);
+ }
+
+ // If interrupt is enabled and there are calls in the handler,
+ // unconditionally save all Caller-saved registers and
+ // all FP registers, regardless whether they are used.
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+
+ if (MF.getFunction().hasFnAttribute("interrupt") && MFI.hasCalls()) {
+
+ static const MCPhysReg CSRegs[] = { RISCV::X1, /* ra */
+ RISCV::X5, RISCV::X6, RISCV::X7, /* t0-t2 */
+ RISCV::X10, RISCV::X11, /* a0-a1, a2-a7 */
+ RISCV::X12, RISCV::X13, RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17,
+ RISCV::X28, RISCV::X29, RISCV::X30, RISCV::X31, 0 /* t3-t6 */
+ };
+
+ for (unsigned i = 0; CSRegs[i]; ++i)
+ SavedRegs.set(CSRegs[i]);
+
+ if (MF.getSubtarget<RISCVSubtarget>().hasStdExtD() ||
+ MF.getSubtarget<RISCVSubtarget>().hasStdExtF()) {
+
+ // If interrupt is enabled, this list contains all FP registers.
+ const MCPhysReg * Regs = MF.getRegInfo().getCalleeSavedRegs();
+
+ for (unsigned i = 0; Regs[i]; ++i)
+ if (RISCV::FPR32RegClass.contains(Regs[i]) ||
+ RISCV::FPR64RegClass.contains(Regs[i]))
+ SavedRegs.set(Regs[i]);
+ }
+ }
+}
+
+void RISCVFrameLowering::processFunctionBeforeFrameFinalized(
+ MachineFunction &MF, RegScavenger *RS) const {
+ const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ const TargetRegisterClass *RC = &RISCV::GPRRegClass;
+ // estimateStackSize has been observed to under-estimate the final stack
+ // size, so give ourselves wiggle-room by checking for stack size
+ // representable an 11-bit signed field rather than 12-bits.
+ // FIXME: It may be possible to craft a function with a small stack that
+ // still needs an emergency spill slot for branch relaxation. This case
+ // would currently be missed.
+ if (!isInt<11>(MFI.estimateStackSize(MF))) {
+ int RegScavFI = MFI.CreateStackObject(
+ RegInfo->getSpillSize(*RC), RegInfo->getSpillAlignment(*RC), false);
+ RS->addScavengingFrameIndex(RegScavFI);
+ }
+}
+
+// Not preserve stack space within prologue for outgoing variables when the
+// function contains variable size objects and let eliminateCallFramePseudoInstr
+// preserve stack space for it.
+bool RISCVFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
+ return !MF.getFrameInfo().hasVarSizedObjects();
+}
+
+// Eliminate ADJCALLSTACKDOWN, ADJCALLSTACKUP pseudo instructions.
+MachineBasicBlock::iterator RISCVFrameLowering::eliminateCallFramePseudoInstr(
+ MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const {
+ unsigned SPReg = RISCV::X2;
+ DebugLoc DL = MI->getDebugLoc();
+
+ if (!hasReservedCallFrame(MF)) {
+ // If space has not been reserved for a call frame, ADJCALLSTACKDOWN and
+ // ADJCALLSTACKUP must be converted to instructions manipulating the stack
+ // pointer. This is necessary when there is a variable length stack
+ // allocation (e.g. alloca), which means it's not possible to allocate
+ // space for outgoing arguments from within the function prologue.
+ int64_t Amount = MI->getOperand(0).getImm();
+
+ if (Amount != 0) {
+ // Ensure the stack remains aligned after adjustment.
+ Amount = alignSPAdjust(Amount);
+
+ if (MI->getOpcode() == RISCV::ADJCALLSTACKDOWN)
+ Amount = -Amount;
+
+ adjustReg(MBB, MI, DL, SPReg, SPReg, Amount, MachineInstr::NoFlags);
+ }
+ }
+
+ return MBB.erase(MI);
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVFrameLowering.h b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVFrameLowering.h
new file mode 100644
index 000000000000..0e045c3ff853
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVFrameLowering.h
@@ -0,0 +1,58 @@
+//===-- RISCVFrameLowering.h - Define frame lowering for RISCV -*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This class implements RISCV-specific bits of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVFRAMELOWERING_H
+#define LLVM_LIB_TARGET_RISCV_RISCVFRAMELOWERING_H
+
+#include "llvm/CodeGen/TargetFrameLowering.h"
+
+namespace llvm {
+class RISCVSubtarget;
+
+class RISCVFrameLowering : public TargetFrameLowering {
+public:
+ explicit RISCVFrameLowering(const RISCVSubtarget &STI)
+ : TargetFrameLowering(StackGrowsDown,
+ /*StackAlignment=*/16,
+ /*LocalAreaOffset=*/0),
+ STI(STI) {}
+
+ void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+
+ int getFrameIndexReference(const MachineFunction &MF, int FI,
+ unsigned &FrameReg) const override;
+
+ void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
+ RegScavenger *RS) const override;
+
+ void processFunctionBeforeFrameFinalized(MachineFunction &MF,
+ RegScavenger *RS) const override;
+
+ bool hasFP(const MachineFunction &MF) const override;
+
+ bool hasReservedCallFrame(const MachineFunction &MF) const override;
+ MachineBasicBlock::iterator
+ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const override;
+
+protected:
+ const RISCVSubtarget &STI;
+
+private:
+ void determineFrameLayout(MachineFunction &MF) const;
+ void adjustReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
+ int64_t Val, MachineInstr::MIFlag Flag) const;
+};
+}
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
new file mode 100644
index 000000000000..8439278b4ed5
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -0,0 +1,292 @@
+//===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an instruction selector for the RISCV target.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/RISCVMCTargetDesc.h"
+#include "RISCV.h"
+#include "RISCVTargetMachine.h"
+#include "Utils/RISCVMatInt.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "riscv-isel"
+
+// RISCV-specific code to select RISCV machine instructions for
+// SelectionDAG operations.
+namespace {
+class RISCVDAGToDAGISel final : public SelectionDAGISel {
+ const RISCVSubtarget *Subtarget;
+
+public:
+ explicit RISCVDAGToDAGISel(RISCVTargetMachine &TargetMachine)
+ : SelectionDAGISel(TargetMachine) {}
+
+ StringRef getPassName() const override {
+ return "RISCV DAG->DAG Pattern Instruction Selection";
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ Subtarget = &MF.getSubtarget<RISCVSubtarget>();
+ return SelectionDAGISel::runOnMachineFunction(MF);
+ }
+
+ void PostprocessISelDAG() override;
+
+ void Select(SDNode *Node) override;
+
+ bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
+ std::vector<SDValue> &OutOps) override;
+
+ bool SelectAddrFI(SDValue Addr, SDValue &Base);
+
+// Include the pieces autogenerated from the target description.
+#include "RISCVGenDAGISel.inc"
+
+private:
+ void doPeepholeLoadStoreADDI();
+};
+}
+
+void RISCVDAGToDAGISel::PostprocessISelDAG() {
+ doPeepholeLoadStoreADDI();
+}
+
+static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, int64_t Imm,
+ MVT XLenVT) {
+ RISCVMatInt::InstSeq Seq;
+ RISCVMatInt::generateInstSeq(Imm, XLenVT == MVT::i64, Seq);
+
+ SDNode *Result;
+ SDValue SrcReg = CurDAG->getRegister(RISCV::X0, XLenVT);
+ for (RISCVMatInt::Inst &Inst : Seq) {
+ SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, XLenVT);
+ if (Inst.Opc == RISCV::LUI)
+ Result = CurDAG->getMachineNode(RISCV::LUI, DL, XLenVT, SDImm);
+ else
+ Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SDImm);
+
+ // Only the first instruction has X0 as its source.
+ SrcReg = SDValue(Result, 0);
+ }
+
+ return Result;
+}
+
+// Returns true if the Node is an ISD::AND with a constant argument. If so,
+// set Mask to that constant value.
+static bool isConstantMask(SDNode *Node, uint64_t &Mask) {
+ if (Node->getOpcode() == ISD::AND &&
+ Node->getOperand(1).getOpcode() == ISD::Constant) {
+ Mask = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
+ return true;
+ }
+ return false;
+}
+
+void RISCVDAGToDAGISel::Select(SDNode *Node) {
+ // If we have a custom node, we have already selected.
+ if (Node->isMachineOpcode()) {
+ LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
+ Node->setNodeId(-1);
+ return;
+ }
+
+ // Instruction Selection not handled by the auto-generated tablegen selection
+ // should be handled here.
+ unsigned Opcode = Node->getOpcode();
+ MVT XLenVT = Subtarget->getXLenVT();
+ SDLoc DL(Node);
+ EVT VT = Node->getValueType(0);
+
+ switch (Opcode) {
+ case ISD::Constant: {
+ auto ConstNode = cast<ConstantSDNode>(Node);
+ if (VT == XLenVT && ConstNode->isNullValue()) {
+ SDValue New = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
+ RISCV::X0, XLenVT);
+ ReplaceNode(Node, New.getNode());
+ return;
+ }
+ int64_t Imm = ConstNode->getSExtValue();
+ if (XLenVT == MVT::i64) {
+ ReplaceNode(Node, selectImm(CurDAG, SDLoc(Node), Imm, XLenVT));
+ return;
+ }
+ break;
+ }
+ case ISD::FrameIndex: {
+ SDValue Imm = CurDAG->getTargetConstant(0, DL, XLenVT);
+ int FI = cast<FrameIndexSDNode>(Node)->getIndex();
+ SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
+ ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm));
+ return;
+ }
+ case ISD::SRL: {
+ if (!Subtarget->is64Bit())
+ break;
+ SDValue Op0 = Node->getOperand(0);
+ SDValue Op1 = Node->getOperand(1);
+ uint64_t Mask;
+ // Match (srl (and val, mask), imm) where the result would be a
+ // zero-extended 32-bit integer. i.e. the mask is 0xffffffff or the result
+ // is equivalent to this (SimplifyDemandedBits may have removed lower bits
+ // from the mask that aren't necessary due to the right-shifting).
+ if (Op1.getOpcode() == ISD::Constant &&
+ isConstantMask(Op0.getNode(), Mask)) {
+ uint64_t ShAmt = cast<ConstantSDNode>(Op1.getNode())->getZExtValue();
+
+ if ((Mask | maskTrailingOnes<uint64_t>(ShAmt)) == 0xffffffff) {
+ SDValue ShAmtVal =
+ CurDAG->getTargetConstant(ShAmt, SDLoc(Node), XLenVT);
+ CurDAG->SelectNodeTo(Node, RISCV::SRLIW, XLenVT, Op0.getOperand(0),
+ ShAmtVal);
+ return;
+ }
+ }
+ break;
+ }
+ case RISCVISD::READ_CYCLE_WIDE:
+ assert(!Subtarget->is64Bit() && "READ_CYCLE_WIDE is only used on riscv32");
+
+ ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ReadCycleWide, DL, MVT::i32,
+ MVT::i32, MVT::Other,
+ Node->getOperand(0)));
+ return;
+ }
+
+ // Select the default instruction.
+ SelectCode(Node);
+}
+
+bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand(
+ const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
+ switch (ConstraintID) {
+ case InlineAsm::Constraint_i:
+ case InlineAsm::Constraint_m:
+ // We just support simple memory operands that have a single address
+ // operand and need no special handling.
+ OutOps.push_back(Op);
+ return false;
+ case InlineAsm::Constraint_A:
+ OutOps.push_back(Op);
+ return false;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+bool RISCVDAGToDAGISel::SelectAddrFI(SDValue Addr, SDValue &Base) {
+ if (auto FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
+ return true;
+ }
+ return false;
+}
+
+// Merge an ADDI into the offset of a load/store instruction where possible.
+// (load (add base, off), 0) -> (load base, off)
+// (store val, (add base, off)) -> (store val, base, off)
+void RISCVDAGToDAGISel::doPeepholeLoadStoreADDI() {
+ SelectionDAG::allnodes_iterator Position(CurDAG->getRoot().getNode());
+ ++Position;
+
+ while (Position != CurDAG->allnodes_begin()) {
+ SDNode *N = &*--Position;
+ // Skip dead nodes and any non-machine opcodes.
+ if (N->use_empty() || !N->isMachineOpcode())
+ continue;
+
+ int OffsetOpIdx;
+ int BaseOpIdx;
+
+ // Only attempt this optimisation for I-type loads and S-type stores.
+ switch (N->getMachineOpcode()) {
+ default:
+ continue;
+ case RISCV::LB:
+ case RISCV::LH:
+ case RISCV::LW:
+ case RISCV::LBU:
+ case RISCV::LHU:
+ case RISCV::LWU:
+ case RISCV::LD:
+ case RISCV::FLW:
+ case RISCV::FLD:
+ BaseOpIdx = 0;
+ OffsetOpIdx = 1;
+ break;
+ case RISCV::SB:
+ case RISCV::SH:
+ case RISCV::SW:
+ case RISCV::SD:
+ case RISCV::FSW:
+ case RISCV::FSD:
+ BaseOpIdx = 1;
+ OffsetOpIdx = 2;
+ break;
+ }
+
+ // Currently, the load/store offset must be 0 to be considered for this
+ // peephole optimisation.
+ if (!isa<ConstantSDNode>(N->getOperand(OffsetOpIdx)) ||
+ N->getConstantOperandVal(OffsetOpIdx) != 0)
+ continue;
+
+ SDValue Base = N->getOperand(BaseOpIdx);
+
+ // If the base is an ADDI, we can merge it in to the load/store.
+ if (!Base.isMachineOpcode() || Base.getMachineOpcode() != RISCV::ADDI)
+ continue;
+
+ SDValue ImmOperand = Base.getOperand(1);
+
+ if (auto Const = dyn_cast<ConstantSDNode>(ImmOperand)) {
+ ImmOperand = CurDAG->getTargetConstant(
+ Const->getSExtValue(), SDLoc(ImmOperand), ImmOperand.getValueType());
+ } else if (auto GA = dyn_cast<GlobalAddressSDNode>(ImmOperand)) {
+ ImmOperand = CurDAG->getTargetGlobalAddress(
+ GA->getGlobal(), SDLoc(ImmOperand), ImmOperand.getValueType(),
+ GA->getOffset(), GA->getTargetFlags());
+ } else {
+ continue;
+ }
+
+ LLVM_DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase: ");
+ LLVM_DEBUG(Base->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "\nN: ");
+ LLVM_DEBUG(N->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "\n");
+
+ // Modify the offset operand of the load/store.
+ if (BaseOpIdx == 0) // Load
+ CurDAG->UpdateNodeOperands(N, Base.getOperand(0), ImmOperand,
+ N->getOperand(2));
+ else // Store
+ CurDAG->UpdateNodeOperands(N, N->getOperand(0), Base.getOperand(0),
+ ImmOperand, N->getOperand(3));
+
+ // The add-immediate may now be dead, in which case remove it.
+ if (Base.getNode()->use_empty())
+ CurDAG->RemoveDeadNode(Base.getNode());
+ }
+}
+
+// This pass converts a legalized DAG into a RISCV-specific DAG, ready
+// for instruction scheduling.
+FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM) {
+ return new RISCVDAGToDAGISel(TM);
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
new file mode 100644
index 000000000000..2b0f64fa6db6
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -0,0 +1,2665 @@
+//===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interfaces that RISCV uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVISelLowering.h"
+#include "RISCV.h"
+#include "RISCVMachineFunctionInfo.h"
+#include "RISCVRegisterInfo.h"
+#include "RISCVSubtarget.h"
+#include "RISCVTargetMachine.h"
+#include "Utils/RISCVMatInt.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "riscv-lower"
+
+STATISTIC(NumTailCalls, "Number of tail calls");
+
+RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
+ const RISCVSubtarget &STI)
+ : TargetLowering(TM), Subtarget(STI) {
+
+ if (Subtarget.isRV32E())
+ report_fatal_error("Codegen not yet implemented for RV32E");
+
+ RISCVABI::ABI ABI = Subtarget.getTargetABI();
+ assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
+
+ switch (ABI) {
+ default:
+ report_fatal_error("Don't know how to lower this ABI");
+ case RISCVABI::ABI_ILP32:
+ case RISCVABI::ABI_ILP32F:
+ case RISCVABI::ABI_ILP32D:
+ case RISCVABI::ABI_LP64:
+ case RISCVABI::ABI_LP64F:
+ case RISCVABI::ABI_LP64D:
+ break;
+ }
+
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ // Set up the register classes.
+ addRegisterClass(XLenVT, &RISCV::GPRRegClass);
+
+ if (Subtarget.hasStdExtF())
+ addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
+ if (Subtarget.hasStdExtD())
+ addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
+
+ // Compute derived properties from the register classes.
+ computeRegisterProperties(STI.getRegisterInfo());
+
+ setStackPointerRegisterToSaveRestore(RISCV::X2);
+
+ for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
+ setLoadExtAction(N, XLenVT, MVT::i1, Promote);
+
+ // TODO: add all necessary setOperationAction calls.
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
+
+ setOperationAction(ISD::BR_JT, MVT::Other, Expand);
+ setOperationAction(ISD::BR_CC, XLenVT, Expand);
+ setOperationAction(ISD::SELECT, XLenVT, Custom);
+ setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
+
+ setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
+ setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
+
+ setOperationAction(ISD::VASTART, MVT::Other, Custom);
+ setOperationAction(ISD::VAARG, MVT::Other, Expand);
+ setOperationAction(ISD::VACOPY, MVT::Other, Expand);
+ setOperationAction(ISD::VAEND, MVT::Other, Expand);
+
+ for (auto VT : {MVT::i1, MVT::i8, MVT::i16})
+ setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
+
+ if (Subtarget.is64Bit()) {
+ setOperationAction(ISD::SHL, MVT::i32, Custom);
+ setOperationAction(ISD::SRA, MVT::i32, Custom);
+ setOperationAction(ISD::SRL, MVT::i32, Custom);
+ }
+
+ if (!Subtarget.hasStdExtM()) {
+ setOperationAction(ISD::MUL, XLenVT, Expand);
+ setOperationAction(ISD::MULHS, XLenVT, Expand);
+ setOperationAction(ISD::MULHU, XLenVT, Expand);
+ setOperationAction(ISD::SDIV, XLenVT, Expand);
+ setOperationAction(ISD::UDIV, XLenVT, Expand);
+ setOperationAction(ISD::SREM, XLenVT, Expand);
+ setOperationAction(ISD::UREM, XLenVT, Expand);
+ }
+
+ if (Subtarget.is64Bit() && Subtarget.hasStdExtM()) {
+ setOperationAction(ISD::SDIV, MVT::i32, Custom);
+ setOperationAction(ISD::UDIV, MVT::i32, Custom);
+ setOperationAction(ISD::UREM, MVT::i32, Custom);
+ }
+
+ setOperationAction(ISD::SDIVREM, XLenVT, Expand);
+ setOperationAction(ISD::UDIVREM, XLenVT, Expand);
+ setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
+ setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
+
+ setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
+ setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
+ setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
+
+ setOperationAction(ISD::ROTL, XLenVT, Expand);
+ setOperationAction(ISD::ROTR, XLenVT, Expand);
+ setOperationAction(ISD::BSWAP, XLenVT, Expand);
+ setOperationAction(ISD::CTTZ, XLenVT, Expand);
+ setOperationAction(ISD::CTLZ, XLenVT, Expand);
+ setOperationAction(ISD::CTPOP, XLenVT, Expand);
+
+ ISD::CondCode FPCCToExtend[] = {
+ ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
+ ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
+ ISD::SETGE, ISD::SETNE};
+
+ ISD::NodeType FPOpToExtend[] = {
+ ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM};
+
+ if (Subtarget.hasStdExtF()) {
+ setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
+ setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
+ for (auto CC : FPCCToExtend)
+ setCondCodeAction(CC, MVT::f32, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
+ setOperationAction(ISD::SELECT, MVT::f32, Custom);
+ setOperationAction(ISD::BR_CC, MVT::f32, Expand);
+ for (auto Op : FPOpToExtend)
+ setOperationAction(Op, MVT::f32, Expand);
+ }
+
+ if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
+ setOperationAction(ISD::BITCAST, MVT::i32, Custom);
+
+ if (Subtarget.hasStdExtD()) {
+ setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
+ setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
+ for (auto CC : FPCCToExtend)
+ setCondCodeAction(CC, MVT::f64, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
+ setOperationAction(ISD::SELECT, MVT::f64, Custom);
+ setOperationAction(ISD::BR_CC, MVT::f64, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+ for (auto Op : FPOpToExtend)
+ setOperationAction(Op, MVT::f64, Expand);
+ }
+
+ setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
+ setOperationAction(ISD::BlockAddress, XLenVT, Custom);
+ setOperationAction(ISD::ConstantPool, XLenVT, Custom);
+
+ setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
+
+ // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
+ // Unfortunately this can't be determined just from the ISA naming string.
+ setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
+ Subtarget.is64Bit() ? Legal : Custom);
+
+ if (Subtarget.hasStdExtA()) {
+ setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
+ setMinCmpXchgSizeInBits(32);
+ } else {
+ setMaxAtomicSizeInBitsSupported(0);
+ }
+
+ setBooleanContents(ZeroOrOneBooleanContent);
+
+ // Function alignments (log2).
+ unsigned FunctionAlignment = Subtarget.hasStdExtC() ? 1 : 2;
+ setMinFunctionAlignment(FunctionAlignment);
+ setPrefFunctionAlignment(FunctionAlignment);
+
+ // Effectively disable jump table generation.
+ setMinimumJumpTableEntries(INT_MAX);
+}
+
+EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
+ EVT VT) const {
+ if (!VT.isVector())
+ return getPointerTy(DL);
+ return VT.changeVectorElementTypeToInteger();
+}
+
+bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
+ const CallInst &I,
+ MachineFunction &MF,
+ unsigned Intrinsic) const {
+ switch (Intrinsic) {
+ default:
+ return false;
+ case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
+ case Intrinsic::riscv_masked_atomicrmw_add_i32:
+ case Intrinsic::riscv_masked_atomicrmw_sub_i32:
+ case Intrinsic::riscv_masked_atomicrmw_nand_i32:
+ case Intrinsic::riscv_masked_atomicrmw_max_i32:
+ case Intrinsic::riscv_masked_atomicrmw_min_i32:
+ case Intrinsic::riscv_masked_atomicrmw_umax_i32:
+ case Intrinsic::riscv_masked_atomicrmw_umin_i32:
+ case Intrinsic::riscv_masked_cmpxchg_i32:
+ PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.memVT = MVT::getVT(PtrTy->getElementType());
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.align = 4;
+ Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
+ MachineMemOperand::MOVolatile;
+ return true;
+ }
+}
+
+bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
+ const AddrMode &AM, Type *Ty,
+ unsigned AS,
+ Instruction *I) const {
+ // No global is ever allowed as a base.
+ if (AM.BaseGV)
+ return false;
+
+ // Require a 12-bit signed offset.
+ if (!isInt<12>(AM.BaseOffs))
+ return false;
+
+ switch (AM.Scale) {
+ case 0: // "r+i" or just "i", depending on HasBaseReg.
+ break;
+ case 1:
+ if (!AM.HasBaseReg) // allow "r+i".
+ break;
+ return false; // disallow "r+r" or "r+r+i".
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
+ return isInt<12>(Imm);
+}
+
+bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
+ return isInt<12>(Imm);
+}
+
+// On RV32, 64-bit integers are split into their high and low parts and held
+// in two different registers, so the trunc is free since the low register can
+// just be used.
+bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
+ if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
+ return false;
+ unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
+ unsigned DestBits = DstTy->getPrimitiveSizeInBits();
+ return (SrcBits == 64 && DestBits == 32);
+}
+
+bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
+ if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
+ !SrcVT.isInteger() || !DstVT.isInteger())
+ return false;
+ unsigned SrcBits = SrcVT.getSizeInBits();
+ unsigned DestBits = DstVT.getSizeInBits();
+ return (SrcBits == 64 && DestBits == 32);
+}
+
+bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
+ // Zexts are free if they can be combined with a load.
+ if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
+ EVT MemVT = LD->getMemoryVT();
+ if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
+ (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
+ (LD->getExtensionType() == ISD::NON_EXTLOAD ||
+ LD->getExtensionType() == ISD::ZEXTLOAD))
+ return true;
+ }
+
+ return TargetLowering::isZExtFree(Val, VT2);
+}
+
+bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
+ return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
+}
+
+bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
+ return (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
+ (VT == MVT::f64 && Subtarget.hasStdExtD());
+}
+
+// Changes the condition code and swaps operands if necessary, so the SetCC
+// operation matches one of the comparisons supported directly in the RISC-V
+// ISA.
+static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) {
+ switch (CC) {
+ default:
+ break;
+ case ISD::SETGT:
+ case ISD::SETLE:
+ case ISD::SETUGT:
+ case ISD::SETULE:
+ CC = ISD::getSetCCSwappedOperands(CC);
+ std::swap(LHS, RHS);
+ break;
+ }
+}
+
+// Return the RISC-V branch opcode that matches the given DAG integer
+// condition code. The CondCode must be one of those supported by the RISC-V
+// ISA (see normaliseSetCC).
+static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
+ switch (CC) {
+ default:
+ llvm_unreachable("Unsupported CondCode");
+ case ISD::SETEQ:
+ return RISCV::BEQ;
+ case ISD::SETNE:
+ return RISCV::BNE;
+ case ISD::SETLT:
+ return RISCV::BLT;
+ case ISD::SETGE:
+ return RISCV::BGE;
+ case ISD::SETULT:
+ return RISCV::BLTU;
+ case ISD::SETUGE:
+ return RISCV::BGEU;
+ }
+}
+
+SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
+ SelectionDAG &DAG) const {
+ switch (Op.getOpcode()) {
+ default:
+ report_fatal_error("unimplemented operand");
+ case ISD::GlobalAddress:
+ return lowerGlobalAddress(Op, DAG);
+ case ISD::BlockAddress:
+ return lowerBlockAddress(Op, DAG);
+ case ISD::ConstantPool:
+ return lowerConstantPool(Op, DAG);
+ case ISD::GlobalTLSAddress:
+ return lowerGlobalTLSAddress(Op, DAG);
+ case ISD::SELECT:
+ return lowerSELECT(Op, DAG);
+ case ISD::VASTART:
+ return lowerVASTART(Op, DAG);
+ case ISD::FRAMEADDR:
+ return lowerFRAMEADDR(Op, DAG);
+ case ISD::RETURNADDR:
+ return lowerRETURNADDR(Op, DAG);
+ case ISD::SHL_PARTS:
+ return lowerShiftLeftParts(Op, DAG);
+ case ISD::SRA_PARTS:
+ return lowerShiftRightParts(Op, DAG, true);
+ case ISD::SRL_PARTS:
+ return lowerShiftRightParts(Op, DAG, false);
+ case ISD::BITCAST: {
+ assert(Subtarget.is64Bit() && Subtarget.hasStdExtF() &&
+ "Unexpected custom legalisation");
+ SDLoc DL(Op);
+ SDValue Op0 = Op.getOperand(0);
+ if (Op.getValueType() != MVT::f32 || Op0.getValueType() != MVT::i32)
+ return SDValue();
+ SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
+ SDValue FPConv = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
+ return FPConv;
+ }
+ }
+}
+
+static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
+ SelectionDAG &DAG, unsigned Flags) {
+ return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
+}
+
+static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
+ SelectionDAG &DAG, unsigned Flags) {
+ return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
+ Flags);
+}
+
+static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
+ SelectionDAG &DAG, unsigned Flags) {
+ return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlignment(),
+ N->getOffset(), Flags);
+}
+
+template <class NodeTy>
+SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
+ bool IsLocal) const {
+ SDLoc DL(N);
+ EVT Ty = getPointerTy(DAG.getDataLayout());
+
+ if (isPositionIndependent()) {
+ SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
+ if (IsLocal)
+ // Use PC-relative addressing to access the symbol. This generates the
+ // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
+ // %pcrel_lo(auipc)).
+ return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
+
+ // Use PC-relative addressing to access the GOT for this symbol, then load
+ // the address from the GOT. This generates the pattern (PseudoLA sym),
+ // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
+ return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
+ }
+
+ switch (getTargetMachine().getCodeModel()) {
+ default:
+ report_fatal_error("Unsupported code model for lowering");
+ case CodeModel::Small: {
+ // Generate a sequence for accessing addresses within the first 2 GiB of
+ // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
+ SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
+ SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
+ SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
+ return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
+ }
+ case CodeModel::Medium: {
+ // Generate a sequence for accessing addresses within any 2GiB range within
+ // the address space. This generates the pattern (PseudoLLA sym), which
+ // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
+ SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
+ return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
+ }
+ }
+}
+
+SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT Ty = Op.getValueType();
+ GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
+ int64_t Offset = N->getOffset();
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ const GlobalValue *GV = N->getGlobal();
+ bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
+ SDValue Addr = getAddr(N, DAG, IsLocal);
+
+ // In order to maximise the opportunity for common subexpression elimination,
+ // emit a separate ADD node for the global address offset instead of folding
+ // it in the global address node. Later peephole optimisations may choose to
+ // fold it back in when profitable.
+ if (Offset != 0)
+ return DAG.getNode(ISD::ADD, DL, Ty, Addr,
+ DAG.getConstant(Offset, DL, XLenVT));
+ return Addr;
+}
+
+SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+ BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
+
+ return getAddr(N, DAG);
+}
+
+SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
+ SelectionDAG &DAG) const {
+ ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
+
+ return getAddr(N, DAG);
+}
+
+SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
+ SelectionDAG &DAG,
+ bool UseGOT) const {
+ SDLoc DL(N);
+ EVT Ty = getPointerTy(DAG.getDataLayout());
+ const GlobalValue *GV = N->getGlobal();
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ if (UseGOT) {
+ // Use PC-relative addressing to access the GOT for this TLS symbol, then
+ // load the address from the GOT and add the thread pointer. This generates
+ // the pattern (PseudoLA_TLS_IE sym), which expands to
+ // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
+ SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
+ SDValue Load =
+ SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
+
+ // Add the thread pointer.
+ SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
+ return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
+ }
+
+ // Generate a sequence for accessing the address relative to the thread
+ // pointer, with the appropriate adjustment for the thread pointer offset.
+ // This generates the pattern
+ // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
+ SDValue AddrHi =
+ DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
+ SDValue AddrAdd =
+ DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
+ SDValue AddrLo =
+ DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
+
+ SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
+ SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
+ SDValue MNAdd = SDValue(
+ DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
+ 0);
+ return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
+}
+
+SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
+ SelectionDAG &DAG) const {
+ SDLoc DL(N);
+ EVT Ty = getPointerTy(DAG.getDataLayout());
+ IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
+ const GlobalValue *GV = N->getGlobal();
+
+ // Use a PC-relative addressing mode to access the global dynamic GOT address.
+ // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
+ // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
+ SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
+ SDValue Load =
+ SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
+
+ // Prepare argument list to generate call.
+ ArgListTy Args;
+ ArgListEntry Entry;
+ Entry.Node = Load;
+ Entry.Ty = CallTy;
+ Args.push_back(Entry);
+
+ // Setup call to __tls_get_addr.
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(DL)
+ .setChain(DAG.getEntryNode())
+ .setLibCallee(CallingConv::C, CallTy,
+ DAG.getExternalSymbol("__tls_get_addr", Ty),
+ std::move(Args));
+
+ return LowerCallTo(CLI).first;
+}
+
+SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT Ty = Op.getValueType();
+ GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
+ int64_t Offset = N->getOffset();
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ // Non-PIC TLS lowering should always use the LocalExec model.
+ TLSModel::Model Model = isPositionIndependent()
+ ? getTargetMachine().getTLSModel(N->getGlobal())
+ : TLSModel::LocalExec;
+
+ SDValue Addr;
+ switch (Model) {
+ case TLSModel::LocalExec:
+ Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
+ break;
+ case TLSModel::InitialExec:
+ Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
+ break;
+ case TLSModel::LocalDynamic:
+ case TLSModel::GeneralDynamic:
+ Addr = getDynamicTLSAddr(N, DAG);
+ break;
+ }
+
+ // In order to maximise the opportunity for common subexpression elimination,
+ // emit a separate ADD node for the global address offset instead of folding
+ // it in the global address node. Later peephole optimisations may choose to
+ // fold it back in when profitable.
+ if (Offset != 0)
+ return DAG.getNode(ISD::ADD, DL, Ty, Addr,
+ DAG.getConstant(Offset, DL, XLenVT));
+ return Addr;
+}
+
+SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
+ SDValue CondV = Op.getOperand(0);
+ SDValue TrueV = Op.getOperand(1);
+ SDValue FalseV = Op.getOperand(2);
+ SDLoc DL(Op);
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ // If the result type is XLenVT and CondV is the output of a SETCC node
+ // which also operated on XLenVT inputs, then merge the SETCC node into the
+ // lowered RISCVISD::SELECT_CC to take advantage of the integer
+ // compare+branch instructions. i.e.:
+ // (select (setcc lhs, rhs, cc), truev, falsev)
+ // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
+ if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC &&
+ CondV.getOperand(0).getSimpleValueType() == XLenVT) {
+ SDValue LHS = CondV.getOperand(0);
+ SDValue RHS = CondV.getOperand(1);
+ auto CC = cast<CondCodeSDNode>(CondV.getOperand(2));
+ ISD::CondCode CCVal = CC->get();
+
+ normaliseSetCC(LHS, RHS, CCVal);
+
+ SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT);
+ SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
+ SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
+ return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
+ }
+
+ // Otherwise:
+ // (select condv, truev, falsev)
+ // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
+ SDValue Zero = DAG.getConstant(0, DL, XLenVT);
+ SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT);
+
+ SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
+ SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
+
+ return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
+}
+
+SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
+
+ SDLoc DL(Op);
+ SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
+ getPointerTy(MF.getDataLayout()));
+
+ // vastart just stores the address of the VarArgsFrameIndex slot into the
+ // memory location argument.
+ const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
+ return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
+ MachinePointerInfo(SV));
+}
+
+SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
+ SelectionDAG &DAG) const {
+ const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ MFI.setFrameAddressIsTaken(true);
+ unsigned FrameReg = RI.getFrameRegister(MF);
+ int XLenInBytes = Subtarget.getXLen() / 8;
+
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+ SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
+ unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ while (Depth--) {
+ int Offset = -(XLenInBytes * 2);
+ SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
+ DAG.getIntPtrConstant(Offset, DL));
+ FrameAddr =
+ DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
+ }
+ return FrameAddr;
+}
+
+SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
+ SelectionDAG &DAG) const {
+ const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ MFI.setReturnAddressIsTaken(true);
+ MVT XLenVT = Subtarget.getXLenVT();
+ int XLenInBytes = Subtarget.getXLen() / 8;
+
+ if (verifyReturnAddressArgumentIsConstant(Op, DAG))
+ return SDValue();
+
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+ unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ if (Depth) {
+ int Off = -XLenInBytes;
+ SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
+ SDValue Offset = DAG.getConstant(Off, DL, VT);
+ return DAG.getLoad(VT, DL, DAG.getEntryNode(),
+ DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
+ MachinePointerInfo());
+ }
+
+ // Return the value of the return address register, marking it an implicit
+ // live-in.
+ unsigned Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
+ return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
+}
+
+SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ SDValue Lo = Op.getOperand(0);
+ SDValue Hi = Op.getOperand(1);
+ SDValue Shamt = Op.getOperand(2);
+ EVT VT = Lo.getValueType();
+
+ // if Shamt-XLEN < 0: // Shamt < XLEN
+ // Lo = Lo << Shamt
+ // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
+ // else:
+ // Lo = 0
+ // Hi = Lo << (Shamt-XLEN)
+
+ SDValue Zero = DAG.getConstant(0, DL, VT);
+ SDValue One = DAG.getConstant(1, DL, VT);
+ SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
+ SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
+ SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
+ SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
+
+ SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
+ SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
+ SDValue ShiftRightLo =
+ DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
+ SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
+ SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
+ SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
+
+ SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
+
+ Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
+ Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
+
+ SDValue Parts[2] = {Lo, Hi};
+ return DAG.getMergeValues(Parts, DL);
+}
+
+SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
+ bool IsSRA) const {
+ SDLoc DL(Op);
+ SDValue Lo = Op.getOperand(0);
+ SDValue Hi = Op.getOperand(1);
+ SDValue Shamt = Op.getOperand(2);
+ EVT VT = Lo.getValueType();
+
+ // SRA expansion:
+ // if Shamt-XLEN < 0: // Shamt < XLEN
+ // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
+ // Hi = Hi >>s Shamt
+ // else:
+ // Lo = Hi >>s (Shamt-XLEN);
+ // Hi = Hi >>s (XLEN-1)
+ //
+ // SRL expansion:
+ // if Shamt-XLEN < 0: // Shamt < XLEN
+ // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
+ // Hi = Hi >>u Shamt
+ // else:
+ // Lo = Hi >>u (Shamt-XLEN);
+ // Hi = 0;
+
+ unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
+
+ SDValue Zero = DAG.getConstant(0, DL, VT);
+ SDValue One = DAG.getConstant(1, DL, VT);
+ SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
+ SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
+ SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
+ SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
+
+ SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
+ SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
+ SDValue ShiftLeftHi =
+ DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
+ SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
+ SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
+ SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
+ SDValue HiFalse =
+ IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
+
+ SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
+
+ Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
+ Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
+
+ SDValue Parts[2] = {Lo, Hi};
+ return DAG.getMergeValues(Parts, DL);
+}
+
+// Returns the opcode of the target-specific SDNode that implements the 32-bit
+// form of the given Opcode.
+static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
+ switch (Opcode) {
+ default:
+ llvm_unreachable("Unexpected opcode");
+ case ISD::SHL:
+ return RISCVISD::SLLW;
+ case ISD::SRA:
+ return RISCVISD::SRAW;
+ case ISD::SRL:
+ return RISCVISD::SRLW;
+ case ISD::SDIV:
+ return RISCVISD::DIVW;
+ case ISD::UDIV:
+ return RISCVISD::DIVUW;
+ case ISD::UREM:
+ return RISCVISD::REMUW;
+ }
+}
+
+// Converts the given 32-bit operation to a target-specific SelectionDAG node.
+// Because i32 isn't a legal type for RV64, these operations would otherwise
+// be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W
+// later one because the fact the operation was originally of type i32 is
+// lost.
+static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG) {
+ SDLoc DL(N);
+ RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
+ SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
+ SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
+ SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
+ // ReplaceNodeResults requires we maintain the same type for the return value.
+ return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
+}
+
+void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
+ SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) const {
+ SDLoc DL(N);
+ switch (N->getOpcode()) {
+ default:
+ llvm_unreachable("Don't know how to custom type legalize this operation!");
+ case ISD::READCYCLECOUNTER: {
+ assert(!Subtarget.is64Bit() &&
+ "READCYCLECOUNTER only has custom type legalization on riscv32");
+
+ SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
+ SDValue RCW =
+ DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
+
+ Results.push_back(RCW);
+ Results.push_back(RCW.getValue(1));
+ Results.push_back(RCW.getValue(2));
+ break;
+ }
+ case ISD::SHL:
+ case ISD::SRA:
+ case ISD::SRL:
+ assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
+ "Unexpected custom legalisation");
+ if (N->getOperand(1).getOpcode() == ISD::Constant)
+ return;
+ Results.push_back(customLegalizeToWOp(N, DAG));
+ break;
+ case ISD::SDIV:
+ case ISD::UDIV:
+ case ISD::UREM:
+ assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
+ Subtarget.hasStdExtM() && "Unexpected custom legalisation");
+ if (N->getOperand(0).getOpcode() == ISD::Constant ||
+ N->getOperand(1).getOpcode() == ISD::Constant)
+ return;
+ Results.push_back(customLegalizeToWOp(N, DAG));
+ break;
+ case ISD::BITCAST: {
+ assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
+ Subtarget.hasStdExtF() && "Unexpected custom legalisation");
+ SDLoc DL(N);
+ SDValue Op0 = N->getOperand(0);
+ if (Op0.getValueType() != MVT::f32)
+ return;
+ SDValue FPConv =
+ DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
+ break;
+ }
+ }
+}
+
+SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ SelectionDAG &DAG = DCI.DAG;
+
+ switch (N->getOpcode()) {
+ default:
+ break;
+ case RISCVISD::SplitF64: {
+ SDValue Op0 = N->getOperand(0);
+ // If the input to SplitF64 is just BuildPairF64 then the operation is
+ // redundant. Instead, use BuildPairF64's operands directly.
+ if (Op0->getOpcode() == RISCVISD::BuildPairF64)
+ return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
+
+ SDLoc DL(N);
+
+ // It's cheaper to materialise two 32-bit integers than to load a double
+ // from the constant pool and transfer it to integer registers through the
+ // stack.
+ if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
+ APInt V = C->getValueAPF().bitcastToAPInt();
+ SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
+ SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
+ return DCI.CombineTo(N, Lo, Hi);
+ }
+
+ // This is a target-specific version of a DAGCombine performed in
+ // DAGCombiner::visitBITCAST. It performs the equivalent of:
+ // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
+ // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
+ if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
+ !Op0.getNode()->hasOneUse())
+ break;
+ SDValue NewSplitF64 =
+ DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
+ Op0.getOperand(0));
+ SDValue Lo = NewSplitF64.getValue(0);
+ SDValue Hi = NewSplitF64.getValue(1);
+ APInt SignBit = APInt::getSignMask(32);
+ if (Op0.getOpcode() == ISD::FNEG) {
+ SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
+ DAG.getConstant(SignBit, DL, MVT::i32));
+ return DCI.CombineTo(N, Lo, NewHi);
+ }
+ assert(Op0.getOpcode() == ISD::FABS);
+ SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
+ DAG.getConstant(~SignBit, DL, MVT::i32));
+ return DCI.CombineTo(N, Lo, NewHi);
+ }
+ case RISCVISD::SLLW:
+ case RISCVISD::SRAW:
+ case RISCVISD::SRLW: {
+ // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+ APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
+ APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
+ if ((SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI)) ||
+ (SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)))
+ return SDValue();
+ break;
+ }
+ case RISCVISD::FMV_X_ANYEXTW_RV64: {
+ SDLoc DL(N);
+ SDValue Op0 = N->getOperand(0);
+ // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
+ // conversion is unnecessary and can be replaced with an ANY_EXTEND
+ // of the FMV_W_X_RV64 operand.
+ if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) {
+ SDValue AExtOp =
+ DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0.getOperand(0));
+ return DCI.CombineTo(N, AExtOp);
+ }
+
+ // This is a target-specific version of a DAGCombine performed in
+ // DAGCombiner::visitBITCAST. It performs the equivalent of:
+ // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
+ // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
+ if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
+ !Op0.getNode()->hasOneUse())
+ break;
+ SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64,
+ Op0.getOperand(0));
+ APInt SignBit = APInt::getSignMask(32).sext(64);
+ if (Op0.getOpcode() == ISD::FNEG) {
+ return DCI.CombineTo(N,
+ DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV,
+ DAG.getConstant(SignBit, DL, MVT::i64)));
+ }
+ assert(Op0.getOpcode() == ISD::FABS);
+ return DCI.CombineTo(N,
+ DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV,
+ DAG.getConstant(~SignBit, DL, MVT::i64)));
+ }
+ }
+
+ return SDValue();
+}
+
+bool RISCVTargetLowering::isDesirableToCommuteWithShift(
+ const SDNode *N, CombineLevel Level) const {
+ // The following folds are only desirable if `(OP _, c1 << c2)` can be
+ // materialised in fewer instructions than `(OP _, c1)`:
+ //
+ // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
+ // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
+ SDValue N0 = N->getOperand(0);
+ EVT Ty = N0.getValueType();
+ if (Ty.isScalarInteger() &&
+ (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
+ auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
+ auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
+ if (C1 && C2) {
+ APInt C1Int = C1->getAPIntValue();
+ APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
+
+ // We can materialise `c1 << c2` into an add immediate, so it's "free",
+ // and the combine should happen, to potentially allow further combines
+ // later.
+ if (ShiftedC1Int.getMinSignedBits() <= 64 &&
+ isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
+ return true;
+
+ // We can materialise `c1` in an add immediate, so it's "free", and the
+ // combine should be prevented.
+ if (C1Int.getMinSignedBits() <= 64 &&
+ isLegalAddImmediate(C1Int.getSExtValue()))
+ return false;
+
+ // Neither constant will fit into an immediate, so find materialisation
+ // costs.
+ int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
+ Subtarget.is64Bit());
+ int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
+ ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit());
+
+ // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
+ // combine should be prevented.
+ if (C1Cost < ShiftedC1Cost)
+ return false;
+ }
+ }
+ return true;
+}
+
+unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
+ SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
+ unsigned Depth) const {
+ switch (Op.getOpcode()) {
+ default:
+ break;
+ case RISCVISD::SLLW:
+ case RISCVISD::SRAW:
+ case RISCVISD::SRLW:
+ case RISCVISD::DIVW:
+ case RISCVISD::DIVUW:
+ case RISCVISD::REMUW:
+ // TODO: As the result is sign-extended, this is conservatively correct. A
+ // more precise answer could be calculated for SRAW depending on known
+ // bits in the shift amount.
+ return 33;
+ }
+
+ return 1;
+}
+
+MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
+ MachineBasicBlock *BB) {
+ assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
+
+ // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
+ // Should the count have wrapped while it was being read, we need to try
+ // again.
+ // ...
+ // read:
+ // rdcycleh x3 # load high word of cycle
+ // rdcycle x2 # load low word of cycle
+ // rdcycleh x4 # load high word of cycle
+ // bne x3, x4, read # check if high word reads match, otherwise try again
+ // ...
+
+ MachineFunction &MF = *BB->getParent();
+ const BasicBlock *LLVM_BB = BB->getBasicBlock();
+ MachineFunction::iterator It = ++BB->getIterator();
+
+ MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
+ MF.insert(It, LoopMBB);
+
+ MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
+ MF.insert(It, DoneMBB);
+
+ // Transfer the remainder of BB and its successor edges to DoneMBB.
+ DoneMBB->splice(DoneMBB->begin(), BB,
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
+ DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
+
+ BB->addSuccessor(LoopMBB);
+
+ MachineRegisterInfo &RegInfo = MF.getRegInfo();
+ unsigned ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
+ unsigned LoReg = MI.getOperand(0).getReg();
+ unsigned HiReg = MI.getOperand(1).getReg();
+ DebugLoc DL = MI.getDebugLoc();
+
+ const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
+ BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
+ .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
+ .addReg(RISCV::X0);
+ BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
+ .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
+ .addReg(RISCV::X0);
+ BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
+ .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
+ .addReg(RISCV::X0);
+
+ BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
+ .addReg(HiReg)
+ .addReg(ReadAgainReg)
+ .addMBB(LoopMBB);
+
+ LoopMBB->addSuccessor(LoopMBB);
+ LoopMBB->addSuccessor(DoneMBB);
+
+ MI.eraseFromParent();
+
+ return DoneMBB;
+}
+
+static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
+ MachineBasicBlock *BB) {
+ assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
+
+ MachineFunction &MF = *BB->getParent();
+ DebugLoc DL = MI.getDebugLoc();
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
+ unsigned LoReg = MI.getOperand(0).getReg();
+ unsigned HiReg = MI.getOperand(1).getReg();
+ unsigned SrcReg = MI.getOperand(2).getReg();
+ const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
+ int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex();
+
+ TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
+ RI);
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
+ MachineMemOperand::MOLoad, 8, 8);
+ BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
+ .addFrameIndex(FI)
+ .addImm(0)
+ .addMemOperand(MMO);
+ BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
+ .addFrameIndex(FI)
+ .addImm(4)
+ .addMemOperand(MMO);
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
+
+static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
+ MachineBasicBlock *BB) {
+ assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
+ "Unexpected instruction");
+
+ MachineFunction &MF = *BB->getParent();
+ DebugLoc DL = MI.getDebugLoc();
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
+ unsigned DstReg = MI.getOperand(0).getReg();
+ unsigned LoReg = MI.getOperand(1).getReg();
+ unsigned HiReg = MI.getOperand(2).getReg();
+ const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
+ int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex();
+
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
+ MachineMemOperand::MOStore, 8, 8);
+ BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
+ .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
+ .addFrameIndex(FI)
+ .addImm(0)
+ .addMemOperand(MMO);
+ BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
+ .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
+ .addFrameIndex(FI)
+ .addImm(4)
+ .addMemOperand(MMO);
+ TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
+
+static bool isSelectPseudo(MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ default:
+ return false;
+ case RISCV::Select_GPR_Using_CC_GPR:
+ case RISCV::Select_FPR32_Using_CC_GPR:
+ case RISCV::Select_FPR64_Using_CC_GPR:
+ return true;
+ }
+}
+
+static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
+ MachineBasicBlock *BB) {
+ // To "insert" Select_* instructions, we actually have to insert the triangle
+ // control-flow pattern. The incoming instructions know the destination vreg
+ // to set, the condition code register to branch on, the true/false values to
+ // select between, and the condcode to use to select the appropriate branch.
+ //
+ // We produce the following control flow:
+ // HeadMBB
+ // | \
+ // | IfFalseMBB
+ // | /
+ // TailMBB
+ //
+ // When we find a sequence of selects we attempt to optimize their emission
+ // by sharing the control flow. Currently we only handle cases where we have
+ // multiple selects with the exact same condition (same LHS, RHS and CC).
+ // The selects may be interleaved with other instructions if the other
+ // instructions meet some requirements we deem safe:
+ // - They are debug instructions. Otherwise,
+ // - They do not have side-effects, do not access memory and their inputs do
+ // not depend on the results of the select pseudo-instructions.
+ // The TrueV/FalseV operands of the selects cannot depend on the result of
+ // previous selects in the sequence.
+ // These conditions could be further relaxed. See the X86 target for a
+ // related approach and more information.
+ unsigned LHS = MI.getOperand(1).getReg();
+ unsigned RHS = MI.getOperand(2).getReg();
+ auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
+
+ SmallVector<MachineInstr *, 4> SelectDebugValues;
+ SmallSet<unsigned, 4> SelectDests;
+ SelectDests.insert(MI.getOperand(0).getReg());
+
+ MachineInstr *LastSelectPseudo = &MI;
+
+ for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
+ SequenceMBBI != E; ++SequenceMBBI) {
+ if (SequenceMBBI->isDebugInstr())
+ continue;
+ else if (isSelectPseudo(*SequenceMBBI)) {
+ if (SequenceMBBI->getOperand(1).getReg() != LHS ||
+ SequenceMBBI->getOperand(2).getReg() != RHS ||
+ SequenceMBBI->getOperand(3).getImm() != CC ||
+ SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
+ SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
+ break;
+ LastSelectPseudo = &*SequenceMBBI;
+ SequenceMBBI->collectDebugValues(SelectDebugValues);
+ SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
+ } else {
+ if (SequenceMBBI->hasUnmodeledSideEffects() ||
+ SequenceMBBI->mayLoadOrStore())
+ break;
+ if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
+ return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
+ }))
+ break;
+ }
+ }
+
+ const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
+ const BasicBlock *LLVM_BB = BB->getBasicBlock();
+ DebugLoc DL = MI.getDebugLoc();
+ MachineFunction::iterator I = ++BB->getIterator();
+
+ MachineBasicBlock *HeadMBB = BB;
+ MachineFunction *F = BB->getParent();
+ MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
+
+ F->insert(I, IfFalseMBB);
+ F->insert(I, TailMBB);
+
+ // Transfer debug instructions associated with the selects to TailMBB.
+ for (MachineInstr *DebugInstr : SelectDebugValues) {
+ TailMBB->push_back(DebugInstr->removeFromParent());
+ }
+
+ // Move all instructions after the sequence to TailMBB.
+ TailMBB->splice(TailMBB->end(), HeadMBB,
+ std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
+ // Update machine-CFG edges by transferring all successors of the current
+ // block to the new block which will contain the Phi nodes for the selects.
+ TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
+ // Set the successors for HeadMBB.
+ HeadMBB->addSuccessor(IfFalseMBB);
+ HeadMBB->addSuccessor(TailMBB);
+
+ // Insert appropriate branch.
+ unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
+
+ BuildMI(HeadMBB, DL, TII.get(Opcode))
+ .addReg(LHS)
+ .addReg(RHS)
+ .addMBB(TailMBB);
+
+ // IfFalseMBB just falls through to TailMBB.
+ IfFalseMBB->addSuccessor(TailMBB);
+
+ // Create PHIs for all of the select pseudo-instructions.
+ auto SelectMBBI = MI.getIterator();
+ auto SelectEnd = std::next(LastSelectPseudo->getIterator());
+ auto InsertionPoint = TailMBB->begin();
+ while (SelectMBBI != SelectEnd) {
+ auto Next = std::next(SelectMBBI);
+ if (isSelectPseudo(*SelectMBBI)) {
+ // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
+ BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
+ TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
+ .addReg(SelectMBBI->getOperand(4).getReg())
+ .addMBB(HeadMBB)
+ .addReg(SelectMBBI->getOperand(5).getReg())
+ .addMBB(IfFalseMBB);
+ SelectMBBI->eraseFromParent();
+ }
+ SelectMBBI = Next;
+ }
+
+ F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
+ return TailMBB;
+}
+
+MachineBasicBlock *
+RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *BB) const {
+ switch (MI.getOpcode()) {
+ default:
+ llvm_unreachable("Unexpected instr type to insert");
+ case RISCV::ReadCycleWide:
+ assert(!Subtarget.is64Bit() &&
+ "ReadCycleWrite is only to be used on riscv32");
+ return emitReadCycleWidePseudo(MI, BB);
+ case RISCV::Select_GPR_Using_CC_GPR:
+ case RISCV::Select_FPR32_Using_CC_GPR:
+ case RISCV::Select_FPR64_Using_CC_GPR:
+ return emitSelectPseudo(MI, BB);
+ case RISCV::BuildPairF64Pseudo:
+ return emitBuildPairF64Pseudo(MI, BB);
+ case RISCV::SplitF64Pseudo:
+ return emitSplitF64Pseudo(MI, BB);
+ }
+}
+
+// Calling Convention Implementation.
+// The expectations for frontend ABI lowering vary from target to target.
+// Ideally, an LLVM frontend would be able to avoid worrying about many ABI
+// details, but this is a longer term goal. For now, we simply try to keep the
+// role of the frontend as simple and well-defined as possible. The rules can
+// be summarised as:
+// * Never split up large scalar arguments. We handle them here.
+// * If a hardfloat calling convention is being used, and the struct may be
+// passed in a pair of registers (fp+fp, int+fp), and both registers are
+// available, then pass as two separate arguments. If either the GPRs or FPRs
+// are exhausted, then pass according to the rule below.
+// * If a struct could never be passed in registers or directly in a stack
+// slot (as it is larger than 2*XLEN and the floating point rules don't
+// apply), then pass it using a pointer with the byval attribute.
+// * If a struct is less than 2*XLEN, then coerce to either a two-element
+// word-sized array or a 2*XLEN scalar (depending on alignment).
+// * The frontend can determine whether a struct is returned by reference or
+// not based on its size and fields. If it will be returned by reference, the
+// frontend must modify the prototype so a pointer with the sret annotation is
+// passed as the first argument. This is not necessary for large scalar
+// returns.
+// * Struct return values and varargs should be coerced to structs containing
+// register-size fields in the same situations they would be for fixed
+// arguments.
+
+static const MCPhysReg ArgGPRs[] = {
+ RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
+ RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
+};
+static const MCPhysReg ArgFPR32s[] = {
+ RISCV::F10_32, RISCV::F11_32, RISCV::F12_32, RISCV::F13_32,
+ RISCV::F14_32, RISCV::F15_32, RISCV::F16_32, RISCV::F17_32
+};
+static const MCPhysReg ArgFPR64s[] = {
+ RISCV::F10_64, RISCV::F11_64, RISCV::F12_64, RISCV::F13_64,
+ RISCV::F14_64, RISCV::F15_64, RISCV::F16_64, RISCV::F17_64
+};
+
+// Pass a 2*XLEN argument that has been split into two XLEN values through
+// registers or the stack as necessary.
+static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
+ ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
+ MVT ValVT2, MVT LocVT2,
+ ISD::ArgFlagsTy ArgFlags2) {
+ unsigned XLenInBytes = XLen / 8;
+ if (unsigned Reg = State.AllocateReg(ArgGPRs)) {
+ // At least one half can be passed via register.
+ State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
+ VA1.getLocVT(), CCValAssign::Full));
+ } else {
+ // Both halves must be passed on the stack, with proper alignment.
+ unsigned StackAlign = std::max(XLenInBytes, ArgFlags1.getOrigAlign());
+ State.addLoc(
+ CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
+ State.AllocateStack(XLenInBytes, StackAlign),
+ VA1.getLocVT(), CCValAssign::Full));
+ State.addLoc(CCValAssign::getMem(
+ ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2,
+ CCValAssign::Full));
+ return false;
+ }
+
+ if (unsigned Reg = State.AllocateReg(ArgGPRs)) {
+ // The second half can also be passed via register.
+ State.addLoc(
+ CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
+ } else {
+ // The second half is passed via the stack, without additional alignment.
+ State.addLoc(CCValAssign::getMem(
+ ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2,
+ CCValAssign::Full));
+ }
+
+ return false;
+}
+
+// Implements the RISC-V calling convention. Returns true upon failure.
+static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
+ MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
+ bool IsRet, Type *OrigTy) {
+ unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
+ assert(XLen == 32 || XLen == 64);
+ MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
+
+ // Any return value split in to more than two values can't be returned
+ // directly.
+ if (IsRet && ValNo > 1)
+ return true;
+
+ // UseGPRForF32 if targeting one of the soft-float ABIs, if passing a
+ // variadic argument, or if no F32 argument registers are available.
+ bool UseGPRForF32 = true;
+ // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
+ // variadic argument, or if no F64 argument registers are available.
+ bool UseGPRForF64 = true;
+
+ switch (ABI) {
+ default:
+ llvm_unreachable("Unexpected ABI");
+ case RISCVABI::ABI_ILP32:
+ case RISCVABI::ABI_LP64:
+ break;
+ case RISCVABI::ABI_ILP32F:
+ case RISCVABI::ABI_LP64F:
+ UseGPRForF32 = !IsFixed;
+ break;
+ case RISCVABI::ABI_ILP32D:
+ case RISCVABI::ABI_LP64D:
+ UseGPRForF32 = !IsFixed;
+ UseGPRForF64 = !IsFixed;
+ break;
+ }
+
+ if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s))
+ UseGPRForF32 = true;
+ if (State.getFirstUnallocated(ArgFPR64s) == array_lengthof(ArgFPR64s))
+ UseGPRForF64 = true;
+
+ // From this point on, rely on UseGPRForF32, UseGPRForF64 and similar local
+ // variables rather than directly checking against the target ABI.
+
+ if (UseGPRForF32 && ValVT == MVT::f32) {
+ LocVT = XLenVT;
+ LocInfo = CCValAssign::BCvt;
+ } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
+ LocVT = MVT::i64;
+ LocInfo = CCValAssign::BCvt;
+ }
+
+ // If this is a variadic argument, the RISC-V calling convention requires
+ // that it is assigned an 'even' or 'aligned' register if it has 8-byte
+ // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
+ // be used regardless of whether the original argument was split during
+ // legalisation or not. The argument will not be passed by registers if the
+ // original type is larger than 2*XLEN, so the register alignment rule does
+ // not apply.
+ unsigned TwoXLenInBytes = (2 * XLen) / 8;
+ if (!IsFixed && ArgFlags.getOrigAlign() == TwoXLenInBytes &&
+ DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
+ unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
+ // Skip 'odd' register if necessary.
+ if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
+ State.AllocateReg(ArgGPRs);
+ }
+
+ SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
+ SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
+ State.getPendingArgFlags();
+
+ assert(PendingLocs.size() == PendingArgFlags.size() &&
+ "PendingLocs and PendingArgFlags out of sync");
+
+ // Handle passing f64 on RV32D with a soft float ABI or when floating point
+ // registers are exhausted.
+ if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
+ assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
+ "Can't lower f64 if it is split");
+ // Depending on available argument GPRS, f64 may be passed in a pair of
+ // GPRs, split between a GPR and the stack, or passed completely on the
+ // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
+ // cases.
+ unsigned Reg = State.AllocateReg(ArgGPRs);
+ LocVT = MVT::i32;
+ if (!Reg) {
+ unsigned StackOffset = State.AllocateStack(8, 8);
+ State.addLoc(
+ CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
+ return false;
+ }
+ if (!State.AllocateReg(ArgGPRs))
+ State.AllocateStack(4, 4);
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+
+ // Split arguments might be passed indirectly, so keep track of the pending
+ // values.
+ if (ArgFlags.isSplit() || !PendingLocs.empty()) {
+ LocVT = XLenVT;
+ LocInfo = CCValAssign::Indirect;
+ PendingLocs.push_back(
+ CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
+ PendingArgFlags.push_back(ArgFlags);
+ if (!ArgFlags.isSplitEnd()) {
+ return false;
+ }
+ }
+
+ // If the split argument only had two elements, it should be passed directly
+ // in registers or on the stack.
+ if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
+ assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
+ // Apply the normal calling convention rules to the first half of the
+ // split argument.
+ CCValAssign VA = PendingLocs[0];
+ ISD::ArgFlagsTy AF = PendingArgFlags[0];
+ PendingLocs.clear();
+ PendingArgFlags.clear();
+ return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
+ ArgFlags);
+ }
+
+ // Allocate to a register if possible, or else a stack slot.
+ unsigned Reg;
+ if (ValVT == MVT::f32 && !UseGPRForF32)
+ Reg = State.AllocateReg(ArgFPR32s, ArgFPR64s);
+ else if (ValVT == MVT::f64 && !UseGPRForF64)
+ Reg = State.AllocateReg(ArgFPR64s, ArgFPR32s);
+ else
+ Reg = State.AllocateReg(ArgGPRs);
+ unsigned StackOffset = Reg ? 0 : State.AllocateStack(XLen / 8, XLen / 8);
+
+ // If we reach this point and PendingLocs is non-empty, we must be at the
+ // end of a split argument that must be passed indirectly.
+ if (!PendingLocs.empty()) {
+ assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
+ assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
+
+ for (auto &It : PendingLocs) {
+ if (Reg)
+ It.convertToReg(Reg);
+ else
+ It.convertToMem(StackOffset);
+ State.addLoc(It);
+ }
+ PendingLocs.clear();
+ PendingArgFlags.clear();
+ return false;
+ }
+
+ assert((!UseGPRForF32 || !UseGPRForF64 || LocVT == XLenVT) &&
+ "Expected an XLenVT at this stage");
+
+ if (Reg) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+
+ // When an f32 or f64 is passed on the stack, no bit-conversion is needed.
+ if (ValVT == MVT::f32 || ValVT == MVT::f64) {
+ LocVT = ValVT;
+ LocInfo = CCValAssign::Full;
+ }
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
+ return false;
+}
+
+void RISCVTargetLowering::analyzeInputArgs(
+ MachineFunction &MF, CCState &CCInfo,
+ const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const {
+ unsigned NumArgs = Ins.size();
+ FunctionType *FType = MF.getFunction().getFunctionType();
+
+ for (unsigned i = 0; i != NumArgs; ++i) {
+ MVT ArgVT = Ins[i].VT;
+ ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
+
+ Type *ArgTy = nullptr;
+ if (IsRet)
+ ArgTy = FType->getReturnType();
+ else if (Ins[i].isOrigArg())
+ ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
+
+ RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
+ if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
+ ArgFlags, CCInfo, /*IsRet=*/true, IsRet, ArgTy)) {
+ LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
+ << EVT(ArgVT).getEVTString() << '\n');
+ llvm_unreachable(nullptr);
+ }
+ }
+}
+
+void RISCVTargetLowering::analyzeOutputArgs(
+ MachineFunction &MF, CCState &CCInfo,
+ const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
+ CallLoweringInfo *CLI) const {
+ unsigned NumArgs = Outs.size();
+
+ for (unsigned i = 0; i != NumArgs; i++) {
+ MVT ArgVT = Outs[i].VT;
+ ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
+ Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
+
+ RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
+ if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
+ ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) {
+ LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
+ << EVT(ArgVT).getEVTString() << "\n");
+ llvm_unreachable(nullptr);
+ }
+ }
+}
+
+// Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
+// values.
+static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
+ const CCValAssign &VA, const SDLoc &DL) {
+ switch (VA.getLocInfo()) {
+ default:
+ llvm_unreachable("Unexpected CCValAssign::LocInfo");
+ case CCValAssign::Full:
+ break;
+ case CCValAssign::BCvt:
+ if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) {
+ Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
+ break;
+ }
+ Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
+ break;
+ }
+ return Val;
+}
+
+// The caller is responsible for loading the full value if the argument is
+// passed with CCValAssign::Indirect.
+static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
+ const CCValAssign &VA, const SDLoc &DL) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineRegisterInfo &RegInfo = MF.getRegInfo();
+ EVT LocVT = VA.getLocVT();
+ SDValue Val;
+ const TargetRegisterClass *RC;
+
+ switch (LocVT.getSimpleVT().SimpleTy) {
+ default:
+ llvm_unreachable("Unexpected register type");
+ case MVT::i32:
+ case MVT::i64:
+ RC = &RISCV::GPRRegClass;
+ break;
+ case MVT::f32:
+ RC = &RISCV::FPR32RegClass;
+ break;
+ case MVT::f64:
+ RC = &RISCV::FPR64RegClass;
+ break;
+ }
+
+ unsigned VReg = RegInfo.createVirtualRegister(RC);
+ RegInfo.addLiveIn(VA.getLocReg(), VReg);
+ Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
+
+ if (VA.getLocInfo() == CCValAssign::Indirect)
+ return Val;
+
+ return convertLocVTToValVT(DAG, Val, VA, DL);
+}
+
+static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
+ const CCValAssign &VA, const SDLoc &DL) {
+ EVT LocVT = VA.getLocVT();
+
+ switch (VA.getLocInfo()) {
+ default:
+ llvm_unreachable("Unexpected CCValAssign::LocInfo");
+ case CCValAssign::Full:
+ break;
+ case CCValAssign::BCvt:
+ if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) {
+ Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
+ break;
+ }
+ Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
+ break;
+ }
+ return Val;
+}
+
+// The caller is responsible for loading the full value if the argument is
+// passed with CCValAssign::Indirect.
+static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
+ const CCValAssign &VA, const SDLoc &DL) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ EVT LocVT = VA.getLocVT();
+ EVT ValVT = VA.getValVT();
+ EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
+ int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
+ VA.getLocMemOffset(), /*Immutable=*/true);
+ SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
+ SDValue Val;
+
+ ISD::LoadExtType ExtType;
+ switch (VA.getLocInfo()) {
+ default:
+ llvm_unreachable("Unexpected CCValAssign::LocInfo");
+ case CCValAssign::Full:
+ case CCValAssign::Indirect:
+ case CCValAssign::BCvt:
+ ExtType = ISD::NON_EXTLOAD;
+ break;
+ }
+ Val = DAG.getExtLoad(
+ ExtType, DL, LocVT, Chain, FIN,
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
+ return Val;
+}
+
+static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
+ const CCValAssign &VA, const SDLoc &DL) {
+ assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
+ "Unexpected VA");
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ MachineRegisterInfo &RegInfo = MF.getRegInfo();
+
+ if (VA.isMemLoc()) {
+ // f64 is passed on the stack.
+ int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
+ SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
+ return DAG.getLoad(MVT::f64, DL, Chain, FIN,
+ MachinePointerInfo::getFixedStack(MF, FI));
+ }
+
+ assert(VA.isRegLoc() && "Expected register VA assignment");
+
+ unsigned LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
+ RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
+ SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
+ SDValue Hi;
+ if (VA.getLocReg() == RISCV::X17) {
+ // Second half of f64 is passed on the stack.
+ int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
+ SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
+ Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
+ MachinePointerInfo::getFixedStack(MF, FI));
+ } else {
+ // Second half of f64 is passed in another GPR.
+ unsigned HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
+ RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
+ Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
+ }
+ return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
+}
+
+// Transform physical registers into virtual registers.
+SDValue RISCVTargetLowering::LowerFormalArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
+
+ switch (CallConv) {
+ default:
+ report_fatal_error("Unsupported calling convention");
+ case CallingConv::C:
+ case CallingConv::Fast:
+ break;
+ }
+
+ MachineFunction &MF = DAG.getMachineFunction();
+
+ const Function &Func = MF.getFunction();
+ if (Func.hasFnAttribute("interrupt")) {
+ if (!Func.arg_empty())
+ report_fatal_error(
+ "Functions with the interrupt attribute cannot have arguments!");
+
+ StringRef Kind =
+ MF.getFunction().getFnAttribute("interrupt").getValueAsString();
+
+ if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
+ report_fatal_error(
+ "Function interrupt attribute argument not supported!");
+ }
+
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
+ MVT XLenVT = Subtarget.getXLenVT();
+ unsigned XLenInBytes = Subtarget.getXLen() / 8;
+ // Used with vargs to acumulate store chains.
+ std::vector<SDValue> OutChains;
+
+ // Assign locations to all of the incoming arguments.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
+ analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false);
+
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ SDValue ArgValue;
+ // Passing f64 on RV32D with a soft float ABI must be handled as a special
+ // case.
+ if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
+ ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
+ else if (VA.isRegLoc())
+ ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL);
+ else
+ ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
+
+ if (VA.getLocInfo() == CCValAssign::Indirect) {
+ // If the original argument was split and passed by reference (e.g. i128
+ // on RV32), we need to load all parts of it here (using the same
+ // address).
+ InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
+ MachinePointerInfo()));
+ unsigned ArgIndex = Ins[i].OrigArgIndex;
+ assert(Ins[i].PartOffset == 0);
+ while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
+ CCValAssign &PartVA = ArgLocs[i + 1];
+ unsigned PartOffset = Ins[i + 1].PartOffset;
+ SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
+ DAG.getIntPtrConstant(PartOffset, DL));
+ InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
+ MachinePointerInfo()));
+ ++i;
+ }
+ continue;
+ }
+ InVals.push_back(ArgValue);
+ }
+
+ if (IsVarArg) {
+ ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
+ unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
+ const TargetRegisterClass *RC = &RISCV::GPRRegClass;
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ MachineRegisterInfo &RegInfo = MF.getRegInfo();
+ RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
+
+ // Offset of the first variable argument from stack pointer, and size of
+ // the vararg save area. For now, the varargs save area is either zero or
+ // large enough to hold a0-a7.
+ int VaArgOffset, VarArgsSaveSize;
+
+ // If all registers are allocated, then all varargs must be passed on the
+ // stack and we don't need to save any argregs.
+ if (ArgRegs.size() == Idx) {
+ VaArgOffset = CCInfo.getNextStackOffset();
+ VarArgsSaveSize = 0;
+ } else {
+ VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
+ VaArgOffset = -VarArgsSaveSize;
+ }
+
+ // Record the frame index of the first variable argument
+ // which is a value necessary to VASTART.
+ int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
+ RVFI->setVarArgsFrameIndex(FI);
+
+ // If saving an odd number of registers then create an extra stack slot to
+ // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
+ // offsets to even-numbered registered remain 2*XLEN-aligned.
+ if (Idx % 2) {
+ FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes,
+ true);
+ VarArgsSaveSize += XLenInBytes;
+ }
+
+ // Copy the integer registers that may have been used for passing varargs
+ // to the vararg save area.
+ for (unsigned I = Idx; I < ArgRegs.size();
+ ++I, VaArgOffset += XLenInBytes) {
+ const unsigned Reg = RegInfo.createVirtualRegister(RC);
+ RegInfo.addLiveIn(ArgRegs[I], Reg);
+ SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
+ FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
+ SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
+ SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
+ MachinePointerInfo::getFixedStack(MF, FI));
+ cast<StoreSDNode>(Store.getNode())
+ ->getMemOperand()
+ ->setValue((Value *)nullptr);
+ OutChains.push_back(Store);
+ }
+ RVFI->setVarArgsSaveSize(VarArgsSaveSize);
+ }
+
+ // All stores are grouped in one node to allow the matching between
+ // the size of Ins and InVals. This only happens for vararg functions.
+ if (!OutChains.empty()) {
+ OutChains.push_back(Chain);
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
+ }
+
+ return Chain;
+}
+
+/// isEligibleForTailCallOptimization - Check whether the call is eligible
+/// for tail call optimization.
+/// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
+bool RISCVTargetLowering::isEligibleForTailCallOptimization(
+ CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
+ const SmallVector<CCValAssign, 16> &ArgLocs) const {
+
+ auto &Callee = CLI.Callee;
+ auto CalleeCC = CLI.CallConv;
+ auto IsVarArg = CLI.IsVarArg;
+ auto &Outs = CLI.Outs;
+ auto &Caller = MF.getFunction();
+ auto CallerCC = Caller.getCallingConv();
+
+ // Do not tail call opt functions with "disable-tail-calls" attribute.
+ if (Caller.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
+ return false;
+
+ // Exception-handling functions need a special set of instructions to
+ // indicate a return to the hardware. Tail-calling another function would
+ // probably break this.
+ // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
+ // should be expanded as new function attributes are introduced.
+ if (Caller.hasFnAttribute("interrupt"))
+ return false;
+
+ // Do not tail call opt functions with varargs.
+ if (IsVarArg)
+ return false;
+
+ // Do not tail call opt if the stack is used to pass parameters.
+ if (CCInfo.getNextStackOffset() != 0)
+ return false;
+
+ // Do not tail call opt if any parameters need to be passed indirectly.
+ // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
+ // passed indirectly. So the address of the value will be passed in a
+ // register, or if not available, then the address is put on the stack. In
+ // order to pass indirectly, space on the stack often needs to be allocated
+ // in order to store the value. In this case the CCInfo.getNextStackOffset()
+ // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
+ // are passed CCValAssign::Indirect.
+ for (auto &VA : ArgLocs)
+ if (VA.getLocInfo() == CCValAssign::Indirect)
+ return false;
+
+ // Do not tail call opt if either caller or callee uses struct return
+ // semantics.
+ auto IsCallerStructRet = Caller.hasStructRetAttr();
+ auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
+ if (IsCallerStructRet || IsCalleeStructRet)
+ return false;
+
+ // Externally-defined functions with weak linkage should not be
+ // tail-called. The behaviour of branch instructions in this situation (as
+ // used for tail calls) is implementation-defined, so we cannot rely on the
+ // linker replacing the tail call with a return.
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ const GlobalValue *GV = G->getGlobal();
+ if (GV->hasExternalWeakLinkage())
+ return false;
+ }
+
+ // The callee has to preserve all registers the caller needs to preserve.
+ const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
+ const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
+ if (CalleeCC != CallerCC) {
+ const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
+ if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
+ return false;
+ }
+
+ // Byval parameters hand the function a pointer directly into the stack area
+ // we want to reuse during a tail call. Working around this *is* possible
+ // but less efficient and uglier in LowerCall.
+ for (auto &Arg : Outs)
+ if (Arg.Flags.isByVal())
+ return false;
+
+ return true;
+}
+
+// Lower a call to a callseq_start + CALL + callseq_end chain, and add input
+// and output parameter nodes.
+SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ SDLoc &DL = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &IsTailCall = CLI.IsTailCall;
+ CallingConv::ID CallConv = CLI.CallConv;
+ bool IsVarArg = CLI.IsVarArg;
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ MachineFunction &MF = DAG.getMachineFunction();
+
+ // Analyze the operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
+ analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI);
+
+ // Check if it's really possible to do a tail call.
+ if (IsTailCall)
+ IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
+
+ if (IsTailCall)
+ ++NumTailCalls;
+ else if (CLI.CS && CLI.CS.isMustTailCall())
+ report_fatal_error("failed to perform tail call elimination on a call "
+ "site marked musttail");
+
+ // Get a count of how many bytes are to be pushed on the stack.
+ unsigned NumBytes = ArgCCInfo.getNextStackOffset();
+
+ // Create local copies for byval args
+ SmallVector<SDValue, 8> ByValArgs;
+ for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
+ ISD::ArgFlagsTy Flags = Outs[i].Flags;
+ if (!Flags.isByVal())
+ continue;
+
+ SDValue Arg = OutVals[i];
+ unsigned Size = Flags.getByValSize();
+ unsigned Align = Flags.getByValAlign();
+
+ int FI = MF.getFrameInfo().CreateStackObject(Size, Align, /*isSS=*/false);
+ SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
+ SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
+
+ Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Align,
+ /*IsVolatile=*/false,
+ /*AlwaysInline=*/false,
+ IsTailCall, MachinePointerInfo(),
+ MachinePointerInfo());
+ ByValArgs.push_back(FIPtr);
+ }
+
+ if (!IsTailCall)
+ Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
+
+ // Copy argument values to their designated locations.
+ SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
+ SmallVector<SDValue, 8> MemOpChains;
+ SDValue StackPtr;
+ for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ SDValue ArgValue = OutVals[i];
+ ISD::ArgFlagsTy Flags = Outs[i].Flags;
+
+ // Handle passing f64 on RV32D with a soft float ABI as a special case.
+ bool IsF64OnRV32DSoftABI =
+ VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
+ if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
+ SDValue SplitF64 = DAG.getNode(
+ RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
+ SDValue Lo = SplitF64.getValue(0);
+ SDValue Hi = SplitF64.getValue(1);
+
+ unsigned RegLo = VA.getLocReg();
+ RegsToPass.push_back(std::make_pair(RegLo, Lo));
+
+ if (RegLo == RISCV::X17) {
+ // Second half of f64 is passed on the stack.
+ // Work out the address of the stack slot.
+ if (!StackPtr.getNode())
+ StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
+ // Emit the store.
+ MemOpChains.push_back(
+ DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
+ } else {
+ // Second half of f64 is passed in another GPR.
+ unsigned RegHigh = RegLo + 1;
+ RegsToPass.push_back(std::make_pair(RegHigh, Hi));
+ }
+ continue;
+ }
+
+ // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
+ // as any other MemLoc.
+
+ // Promote the value if needed.
+ // For now, only handle fully promoted and indirect arguments.
+ if (VA.getLocInfo() == CCValAssign::Indirect) {
+ // Store the argument in a stack slot and pass its address.
+ SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT);
+ int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
+ MemOpChains.push_back(
+ DAG.getStore(Chain, DL, ArgValue, SpillSlot,
+ MachinePointerInfo::getFixedStack(MF, FI)));
+ // If the original argument was split (e.g. i128), we need
+ // to store all parts of it here (and pass just one address).
+ unsigned ArgIndex = Outs[i].OrigArgIndex;
+ assert(Outs[i].PartOffset == 0);
+ while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
+ SDValue PartValue = OutVals[i + 1];
+ unsigned PartOffset = Outs[i + 1].PartOffset;
+ SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
+ DAG.getIntPtrConstant(PartOffset, DL));
+ MemOpChains.push_back(
+ DAG.getStore(Chain, DL, PartValue, Address,
+ MachinePointerInfo::getFixedStack(MF, FI)));
+ ++i;
+ }
+ ArgValue = SpillSlot;
+ } else {
+ ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL);
+ }
+
+ // Use local copy if it is a byval arg.
+ if (Flags.isByVal())
+ ArgValue = ByValArgs[j++];
+
+ if (VA.isRegLoc()) {
+ // Queue up the argument copies and emit them at the end.
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
+ } else {
+ assert(VA.isMemLoc() && "Argument not register or memory");
+ assert(!IsTailCall && "Tail call not allowed if stack is used "
+ "for passing parameters");
+
+ // Work out the address of the stack slot.
+ if (!StackPtr.getNode())
+ StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
+ SDValue Address =
+ DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
+ DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
+
+ // Emit the store.
+ MemOpChains.push_back(
+ DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
+ }
+ }
+
+ // Join the stores, which are independent of one another.
+ if (!MemOpChains.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
+
+ SDValue Glue;
+
+ // Build a sequence of copy-to-reg nodes, chained and glued together.
+ for (auto &Reg : RegsToPass) {
+ Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
+ Glue = Chain.getValue(1);
+ }
+
+ // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
+ // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
+ // split it and then direct call can be matched by PseudoCALL.
+ if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ const GlobalValue *GV = S->getGlobal();
+
+ unsigned OpFlags = RISCVII::MO_CALL;
+ if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
+ OpFlags = RISCVII::MO_PLT;
+
+ Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
+ } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
+ unsigned OpFlags = RISCVII::MO_CALL;
+
+ if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
+ nullptr))
+ OpFlags = RISCVII::MO_PLT;
+
+ Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
+ }
+
+ // The first call operand is the chain and the second is the target address.
+ SmallVector<SDValue, 8> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(Callee);
+
+ // Add argument registers to the end of the list so that they are
+ // known live into the call.
+ for (auto &Reg : RegsToPass)
+ Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
+
+ if (!IsTailCall) {
+ // Add a register mask operand representing the call-preserved registers.
+ const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
+ const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
+ assert(Mask && "Missing call preserved mask for calling convention");
+ Ops.push_back(DAG.getRegisterMask(Mask));
+ }
+
+ // Glue the call to the argument copies, if any.
+ if (Glue.getNode())
+ Ops.push_back(Glue);
+
+ // Emit the call.
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+
+ if (IsTailCall) {
+ MF.getFrameInfo().setHasTailCall();
+ return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
+ }
+
+ Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
+ Glue = Chain.getValue(1);
+
+ // Mark the end of the call, which is glued to the call itself.
+ Chain = DAG.getCALLSEQ_END(Chain,
+ DAG.getConstant(NumBytes, DL, PtrVT, true),
+ DAG.getConstant(0, DL, PtrVT, true),
+ Glue, DL);
+ Glue = Chain.getValue(1);
+
+ // Assign locations to each value returned by this call.
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
+ analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true);
+
+ // Copy all of the result registers out of their specified physreg.
+ for (auto &VA : RVLocs) {
+ // Copy the value out
+ SDValue RetValue =
+ DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
+ // Glue the RetValue to the end of the call sequence
+ Chain = RetValue.getValue(1);
+ Glue = RetValue.getValue(2);
+
+ if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
+ assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
+ SDValue RetValue2 =
+ DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
+ Chain = RetValue2.getValue(1);
+ Glue = RetValue2.getValue(2);
+ RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
+ RetValue2);
+ }
+
+ RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL);
+
+ InVals.push_back(RetValue);
+ }
+
+ return Chain;
+}
+
+bool RISCVTargetLowering::CanLowerReturn(
+ CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
+ for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
+ MVT VT = Outs[i].VT;
+ ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
+ RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
+ if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
+ ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr))
+ return false;
+ }
+ return true;
+}
+
+SDValue
+RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SDLoc &DL, SelectionDAG &DAG) const {
+ // Stores the assignment of the return value to a location.
+ SmallVector<CCValAssign, 16> RVLocs;
+
+ // Info about the registers and stack slot.
+ CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
+ *DAG.getContext());
+
+ analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
+ nullptr);
+
+ SDValue Glue;
+ SmallVector<SDValue, 4> RetOps(1, Chain);
+
+ // Copy the result values into the output registers.
+ for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
+ SDValue Val = OutVals[i];
+ CCValAssign &VA = RVLocs[i];
+ assert(VA.isRegLoc() && "Can only return in registers!");
+
+ if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
+ // Handle returning f64 on RV32D with a soft float ABI.
+ assert(VA.isRegLoc() && "Expected return via registers");
+ SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
+ DAG.getVTList(MVT::i32, MVT::i32), Val);
+ SDValue Lo = SplitF64.getValue(0);
+ SDValue Hi = SplitF64.getValue(1);
+ unsigned RegLo = VA.getLocReg();
+ unsigned RegHi = RegLo + 1;
+ Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
+ Glue = Chain.getValue(1);
+ RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
+ Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
+ Glue = Chain.getValue(1);
+ RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
+ } else {
+ // Handle a 'normal' return.
+ Val = convertValVTToLocVT(DAG, Val, VA, DL);
+ Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
+
+ // Guarantee that all emitted copies are stuck together.
+ Glue = Chain.getValue(1);
+ RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
+ }
+ }
+
+ RetOps[0] = Chain; // Update chain.
+
+ // Add the glue node if we have it.
+ if (Glue.getNode()) {
+ RetOps.push_back(Glue);
+ }
+
+ // Interrupt service routines use different return instructions.
+ const Function &Func = DAG.getMachineFunction().getFunction();
+ if (Func.hasFnAttribute("interrupt")) {
+ if (!Func.getReturnType()->isVoidTy())
+ report_fatal_error(
+ "Functions with the interrupt attribute must have void return type!");
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ StringRef Kind =
+ MF.getFunction().getFnAttribute("interrupt").getValueAsString();
+
+ unsigned RetOpc;
+ if (Kind == "user")
+ RetOpc = RISCVISD::URET_FLAG;
+ else if (Kind == "supervisor")
+ RetOpc = RISCVISD::SRET_FLAG;
+ else
+ RetOpc = RISCVISD::MRET_FLAG;
+
+ return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
+ }
+
+ return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
+}
+
+const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
+ switch ((RISCVISD::NodeType)Opcode) {
+ case RISCVISD::FIRST_NUMBER:
+ break;
+ case RISCVISD::RET_FLAG:
+ return "RISCVISD::RET_FLAG";
+ case RISCVISD::URET_FLAG:
+ return "RISCVISD::URET_FLAG";
+ case RISCVISD::SRET_FLAG:
+ return "RISCVISD::SRET_FLAG";
+ case RISCVISD::MRET_FLAG:
+ return "RISCVISD::MRET_FLAG";
+ case RISCVISD::CALL:
+ return "RISCVISD::CALL";
+ case RISCVISD::SELECT_CC:
+ return "RISCVISD::SELECT_CC";
+ case RISCVISD::BuildPairF64:
+ return "RISCVISD::BuildPairF64";
+ case RISCVISD::SplitF64:
+ return "RISCVISD::SplitF64";
+ case RISCVISD::TAIL:
+ return "RISCVISD::TAIL";
+ case RISCVISD::SLLW:
+ return "RISCVISD::SLLW";
+ case RISCVISD::SRAW:
+ return "RISCVISD::SRAW";
+ case RISCVISD::SRLW:
+ return "RISCVISD::SRLW";
+ case RISCVISD::DIVW:
+ return "RISCVISD::DIVW";
+ case RISCVISD::DIVUW:
+ return "RISCVISD::DIVUW";
+ case RISCVISD::REMUW:
+ return "RISCVISD::REMUW";
+ case RISCVISD::FMV_W_X_RV64:
+ return "RISCVISD::FMV_W_X_RV64";
+ case RISCVISD::FMV_X_ANYEXTW_RV64:
+ return "RISCVISD::FMV_X_ANYEXTW_RV64";
+ case RISCVISD::READ_CYCLE_WIDE:
+ return "RISCVISD::READ_CYCLE_WIDE";
+ }
+ return nullptr;
+}
+
+/// getConstraintType - Given a constraint letter, return the type of
+/// constraint it is for this target.
+RISCVTargetLowering::ConstraintType
+RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ default:
+ break;
+ case 'f':
+ return C_RegisterClass;
+ case 'I':
+ case 'J':
+ case 'K':
+ return C_Immediate;
+ case 'A':
+ return C_Memory;
+ }
+ }
+ return TargetLowering::getConstraintType(Constraint);
+}
+
+std::pair<unsigned, const TargetRegisterClass *>
+RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ StringRef Constraint,
+ MVT VT) const {
+ // First, see if this is a constraint that directly corresponds to a
+ // RISCV register class.
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ case 'r':
+ return std::make_pair(0U, &RISCV::GPRRegClass);
+ case 'f':
+ if (Subtarget.hasStdExtF() && VT == MVT::f32)
+ return std::make_pair(0U, &RISCV::FPR32RegClass);
+ if (Subtarget.hasStdExtD() && VT == MVT::f64)
+ return std::make_pair(0U, &RISCV::FPR64RegClass);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
+}
+
+unsigned
+RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
+ // Currently only support length 1 constraints.
+ if (ConstraintCode.size() == 1) {
+ switch (ConstraintCode[0]) {
+ case 'A':
+ return InlineAsm::Constraint_A;
+ default:
+ break;
+ }
+ }
+
+ return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
+}
+
+void RISCVTargetLowering::LowerAsmOperandForConstraint(
+ SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const {
+ // Currently only support length 1 constraints.
+ if (Constraint.length() == 1) {
+ switch (Constraint[0]) {
+ case 'I':
+ // Validate & create a 12-bit signed immediate operand.
+ if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
+ uint64_t CVal = C->getSExtValue();
+ if (isInt<12>(CVal))
+ Ops.push_back(
+ DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
+ }
+ return;
+ case 'J':
+ // Validate & create an integer zero operand.
+ if (auto *C = dyn_cast<ConstantSDNode>(Op))
+ if (C->getZExtValue() == 0)
+ Ops.push_back(
+ DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
+ return;
+ case 'K':
+ // Validate & create a 5-bit unsigned immediate operand.
+ if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
+ uint64_t CVal = C->getZExtValue();
+ if (isUInt<5>(CVal))
+ Ops.push_back(
+ DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
+ }
+ return;
+ default:
+ break;
+ }
+ }
+ TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
+}
+
+Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
+ Instruction *Inst,
+ AtomicOrdering Ord) const {
+ if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
+ return Builder.CreateFence(Ord);
+ if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
+ return Builder.CreateFence(AtomicOrdering::Release);
+ return nullptr;
+}
+
+Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
+ Instruction *Inst,
+ AtomicOrdering Ord) const {
+ if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
+ return Builder.CreateFence(AtomicOrdering::Acquire);
+ return nullptr;
+}
+
+TargetLowering::AtomicExpansionKind
+RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
+ // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
+ // point operations can't be used in an lr/sc sequence without breaking the
+ // forward-progress guarantee.
+ if (AI->isFloatingPointOperation())
+ return AtomicExpansionKind::CmpXChg;
+
+ unsigned Size = AI->getType()->getPrimitiveSizeInBits();
+ if (Size == 8 || Size == 16)
+ return AtomicExpansionKind::MaskedIntrinsic;
+ return AtomicExpansionKind::None;
+}
+
+static Intrinsic::ID
+getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
+ if (XLen == 32) {
+ switch (BinOp) {
+ default:
+ llvm_unreachable("Unexpected AtomicRMW BinOp");
+ case AtomicRMWInst::Xchg:
+ return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
+ case AtomicRMWInst::Add:
+ return Intrinsic::riscv_masked_atomicrmw_add_i32;
+ case AtomicRMWInst::Sub:
+ return Intrinsic::riscv_masked_atomicrmw_sub_i32;
+ case AtomicRMWInst::Nand:
+ return Intrinsic::riscv_masked_atomicrmw_nand_i32;
+ case AtomicRMWInst::Max:
+ return Intrinsic::riscv_masked_atomicrmw_max_i32;
+ case AtomicRMWInst::Min:
+ return Intrinsic::riscv_masked_atomicrmw_min_i32;
+ case AtomicRMWInst::UMax:
+ return Intrinsic::riscv_masked_atomicrmw_umax_i32;
+ case AtomicRMWInst::UMin:
+ return Intrinsic::riscv_masked_atomicrmw_umin_i32;
+ }
+ }
+
+ if (XLen == 64) {
+ switch (BinOp) {
+ default:
+ llvm_unreachable("Unexpected AtomicRMW BinOp");
+ case AtomicRMWInst::Xchg:
+ return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
+ case AtomicRMWInst::Add:
+ return Intrinsic::riscv_masked_atomicrmw_add_i64;
+ case AtomicRMWInst::Sub:
+ return Intrinsic::riscv_masked_atomicrmw_sub_i64;
+ case AtomicRMWInst::Nand:
+ return Intrinsic::riscv_masked_atomicrmw_nand_i64;
+ case AtomicRMWInst::Max:
+ return Intrinsic::riscv_masked_atomicrmw_max_i64;
+ case AtomicRMWInst::Min:
+ return Intrinsic::riscv_masked_atomicrmw_min_i64;
+ case AtomicRMWInst::UMax:
+ return Intrinsic::riscv_masked_atomicrmw_umax_i64;
+ case AtomicRMWInst::UMin:
+ return Intrinsic::riscv_masked_atomicrmw_umin_i64;
+ }
+ }
+
+ llvm_unreachable("Unexpected XLen\n");
+}
+
+Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
+ IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
+ Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
+ unsigned XLen = Subtarget.getXLen();
+ Value *Ordering =
+ Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
+ Type *Tys[] = {AlignedAddr->getType()};
+ Function *LrwOpScwLoop = Intrinsic::getDeclaration(
+ AI->getModule(),
+ getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
+
+ if (XLen == 64) {
+ Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
+ Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
+ ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
+ }
+
+ Value *Result;
+
+ // Must pass the shift amount needed to sign extend the loaded value prior
+ // to performing a signed comparison for min/max. ShiftAmt is the number of
+ // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
+ // is the number of bits to left+right shift the value in order to
+ // sign-extend.
+ if (AI->getOperation() == AtomicRMWInst::Min ||
+ AI->getOperation() == AtomicRMWInst::Max) {
+ const DataLayout &DL = AI->getModule()->getDataLayout();
+ unsigned ValWidth =
+ DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
+ Value *SextShamt =
+ Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
+ Result = Builder.CreateCall(LrwOpScwLoop,
+ {AlignedAddr, Incr, Mask, SextShamt, Ordering});
+ } else {
+ Result =
+ Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
+ }
+
+ if (XLen == 64)
+ Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
+ return Result;
+}
+
+TargetLowering::AtomicExpansionKind
+RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
+ AtomicCmpXchgInst *CI) const {
+ unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
+ if (Size == 8 || Size == 16)
+ return AtomicExpansionKind::MaskedIntrinsic;
+ return AtomicExpansionKind::None;
+}
+
+Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
+ IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
+ Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
+ unsigned XLen = Subtarget.getXLen();
+ Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
+ Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
+ if (XLen == 64) {
+ CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
+ NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
+ Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
+ CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
+ }
+ Type *Tys[] = {AlignedAddr->getType()};
+ Function *MaskedCmpXchg =
+ Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
+ Value *Result = Builder.CreateCall(
+ MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
+ if (XLen == 64)
+ Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
+ return Result;
+}
+
+unsigned RISCVTargetLowering::getExceptionPointerRegister(
+ const Constant *PersonalityFn) const {
+ return RISCV::X10;
+}
+
+unsigned RISCVTargetLowering::getExceptionSelectorRegister(
+ const Constant *PersonalityFn) const {
+ return RISCV::X11;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.h b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.h
new file mode 100644
index 000000000000..e2059e70831d
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -0,0 +1,214 @@
+//===-- RISCVISelLowering.h - RISCV DAG Lowering Interface ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interfaces that RISCV uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H
+#define LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H
+
+#include "RISCV.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/TargetLowering.h"
+
+namespace llvm {
+class RISCVSubtarget;
+namespace RISCVISD {
+enum NodeType : unsigned {
+ FIRST_NUMBER = ISD::BUILTIN_OP_END,
+ RET_FLAG,
+ URET_FLAG,
+ SRET_FLAG,
+ MRET_FLAG,
+ CALL,
+ SELECT_CC,
+ BuildPairF64,
+ SplitF64,
+ TAIL,
+ // RV64I shifts, directly matching the semantics of the named RISC-V
+ // instructions.
+ SLLW,
+ SRAW,
+ SRLW,
+ // 32-bit operations from RV64M that can't be simply matched with a pattern
+ // at instruction selection time.
+ DIVW,
+ DIVUW,
+ REMUW,
+ // FPR32<->GPR transfer operations for RV64. Needed as an i32<->f32 bitcast
+ // is not legal on RV64. FMV_W_X_RV64 matches the semantics of the FMV.W.X.
+ // FMV_X_ANYEXTW_RV64 is similar to FMV.X.W but has an any-extended result.
+ // This is a more convenient semantic for producing dagcombines that remove
+ // unnecessary GPR->FPR->GPR moves.
+ FMV_W_X_RV64,
+ FMV_X_ANYEXTW_RV64,
+ // READ_CYCLE_WIDE - A read of the 64-bit cycle CSR on a 32-bit target
+ // (returns (Lo, Hi)). It takes a chain operand.
+ READ_CYCLE_WIDE
+};
+}
+
+class RISCVTargetLowering : public TargetLowering {
+ const RISCVSubtarget &Subtarget;
+
+public:
+ explicit RISCVTargetLowering(const TargetMachine &TM,
+ const RISCVSubtarget &STI);
+
+ bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
+ MachineFunction &MF,
+ unsigned Intrinsic) const override;
+ bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
+ unsigned AS,
+ Instruction *I = nullptr) const override;
+ bool isLegalICmpImmediate(int64_t Imm) const override;
+ bool isLegalAddImmediate(int64_t Imm) const override;
+ bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;
+ bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
+ bool isZExtFree(SDValue Val, EVT VT2) const override;
+ bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override;
+
+ bool hasBitPreservingFPLogic(EVT VT) const override;
+
+ // Provide custom lowering hooks for some operations.
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
+ void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) const override;
+
+ SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
+
+ unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
+ const APInt &DemandedElts,
+ const SelectionDAG &DAG,
+ unsigned Depth) const override;
+
+ // This method returns the name of a target specific DAG node.
+ const char *getTargetNodeName(unsigned Opcode) const override;
+
+ ConstraintType getConstraintType(StringRef Constraint) const override;
+
+ unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override;
+
+ std::pair<unsigned, const TargetRegisterClass *>
+ getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ StringRef Constraint, MVT VT) const override;
+
+ void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
+ std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const override;
+
+ MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *BB) const override;
+
+ EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
+ EVT VT) const override;
+
+ bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
+ return VT.isScalarInteger();
+ }
+
+ bool shouldInsertFencesForAtomic(const Instruction *I) const override {
+ return isa<LoadInst>(I) || isa<StoreInst>(I);
+ }
+ Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
+ AtomicOrdering Ord) const override;
+ Instruction *emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst,
+ AtomicOrdering Ord) const override;
+
+ ISD::NodeType getExtendForAtomicOps() const override {
+ return ISD::SIGN_EXTEND;
+ }
+
+ bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override {
+ if (DAG.getMachineFunction().getFunction().hasMinSize())
+ return false;
+ return true;
+ }
+ bool isDesirableToCommuteWithShift(const SDNode *N,
+ CombineLevel Level) const override;
+
+ /// If a physical register, this returns the register that receives the
+ /// exception address on entry to an EH pad.
+ unsigned
+ getExceptionPointerRegister(const Constant *PersonalityFn) const override;
+
+ /// If a physical register, this returns the register that receives the
+ /// exception typeid on entry to a landing pad.
+ unsigned
+ getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
+
+private:
+ void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ bool IsRet) const;
+ void analyzeOutputArgs(MachineFunction &MF, CCState &CCInfo,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ bool IsRet, CallLoweringInfo *CLI) const;
+ // Lower incoming arguments, copy physregs into vregs
+ SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
+ bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &DL, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
+ bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
+ bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ LLVMContext &Context) const override;
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
+ SelectionDAG &DAG) const override;
+ SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const override;
+ bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
+ Type *Ty) const override {
+ return true;
+ }
+
+ template <class NodeTy>
+ SDValue getAddr(NodeTy *N, SelectionDAG &DAG, bool IsLocal = true) const;
+
+ SDValue getStaticTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
+ bool UseGOT) const;
+ SDValue getDynamicTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG) const;
+
+ bool shouldConsiderGEPOffsetSplit() const override { return true; }
+ SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const;
+
+ bool isEligibleForTailCallOptimization(
+ CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
+ const SmallVector<CCValAssign, 16> &ArgLocs) const;
+
+ TargetLowering::AtomicExpansionKind
+ shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
+ virtual Value *emitMaskedAtomicRMWIntrinsic(
+ IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
+ Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const override;
+ TargetLowering::AtomicExpansionKind
+ shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override;
+ virtual Value *
+ emitMaskedAtomicCmpXchgIntrinsic(IRBuilder<> &Builder, AtomicCmpXchgInst *CI,
+ Value *AlignedAddr, Value *CmpVal,
+ Value *NewVal, Value *Mask,
+ AtomicOrdering Ord) const override;
+};
+}
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrFormats.td
new file mode 100644
index 000000000000..7229ebfe1db0
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrFormats.td
@@ -0,0 +1,314 @@
+//===-- RISCVInstrFormats.td - RISCV Instruction Formats ---*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+//
+// These instruction format definitions are structured to match the
+// description in the RISC-V User-Level ISA specification as closely as
+// possible. For instance, the specification describes instructions with the
+// MSB (31st bit) on the left and the LSB (0th bit) on the right. This is
+// reflected in the order of parameters to each instruction class.
+//
+// One area of divergence is in the description of immediates. The
+// specification describes immediate encoding in terms of bit-slicing
+// operations on the logical value represented. The immediate argument to
+// these instruction formats instead represents the bit sequence that will be
+// inserted into the instruction. e.g. although JAL's immediate is logically
+// a 21-bit value (where the LSB is always zero), we describe it as an imm20
+// to match how it is encoded.
+//
+//===----------------------------------------------------------------------===//
+
+// Format specifies the encoding used by the instruction. This is used by
+// RISCVMCCodeEmitter to determine which form of fixup to use. These
+// definitions must be kept in-sync with RISCVBaseInfo.h.
+class InstFormat<bits<5> val> {
+ bits<5> Value = val;
+}
+def InstFormatPseudo : InstFormat<0>;
+def InstFormatR : InstFormat<1>;
+def InstFormatR4 : InstFormat<2>;
+def InstFormatI : InstFormat<3>;
+def InstFormatS : InstFormat<4>;
+def InstFormatB : InstFormat<5>;
+def InstFormatU : InstFormat<6>;
+def InstFormatJ : InstFormat<7>;
+def InstFormatCR : InstFormat<8>;
+def InstFormatCI : InstFormat<9>;
+def InstFormatCSS : InstFormat<10>;
+def InstFormatCIW : InstFormat<11>;
+def InstFormatCL : InstFormat<12>;
+def InstFormatCS : InstFormat<13>;
+def InstFormatCA : InstFormat<14>;
+def InstFormatCB : InstFormat<15>;
+def InstFormatCJ : InstFormat<16>;
+def InstFormatOther : InstFormat<17>;
+
+// The following opcode names match those given in Table 19.1 in the
+// RISC-V User-level ISA specification ("RISC-V base opcode map").
+class RISCVOpcode<bits<7> val> {
+ bits<7> Value = val;
+}
+def OPC_LOAD : RISCVOpcode<0b0000011>;
+def OPC_LOAD_FP : RISCVOpcode<0b0000111>;
+def OPC_MISC_MEM : RISCVOpcode<0b0001111>;
+def OPC_OP_IMM : RISCVOpcode<0b0010011>;
+def OPC_AUIPC : RISCVOpcode<0b0010111>;
+def OPC_OP_IMM_32 : RISCVOpcode<0b0011011>;
+def OPC_STORE : RISCVOpcode<0b0100011>;
+def OPC_STORE_FP : RISCVOpcode<0b0100111>;
+def OPC_AMO : RISCVOpcode<0b0101111>;
+def OPC_OP : RISCVOpcode<0b0110011>;
+def OPC_LUI : RISCVOpcode<0b0110111>;
+def OPC_OP_32 : RISCVOpcode<0b0111011>;
+def OPC_MADD : RISCVOpcode<0b1000011>;
+def OPC_MSUB : RISCVOpcode<0b1000111>;
+def OPC_NMSUB : RISCVOpcode<0b1001011>;
+def OPC_NMADD : RISCVOpcode<0b1001111>;
+def OPC_OP_FP : RISCVOpcode<0b1010011>;
+def OPC_BRANCH : RISCVOpcode<0b1100011>;
+def OPC_JALR : RISCVOpcode<0b1100111>;
+def OPC_JAL : RISCVOpcode<0b1101111>;
+def OPC_SYSTEM : RISCVOpcode<0b1110011>;
+
+class RVInst<dag outs, dag ins, string opcodestr, string argstr,
+ list<dag> pattern, InstFormat format>
+ : Instruction {
+ field bits<32> Inst;
+ // SoftFail is a field the disassembler can use to provide a way for
+ // instructions to not match without killing the whole decode process. It is
+ // mainly used for ARM, but Tablegen expects this field to exist or it fails
+ // to build the decode table.
+ field bits<32> SoftFail = 0;
+ let Size = 4;
+
+ bits<7> Opcode = 0;
+
+ let Inst{6-0} = Opcode;
+
+ let Namespace = "RISCV";
+
+ dag OutOperandList = outs;
+ dag InOperandList = ins;
+ let AsmString = opcodestr # "\t" # argstr;
+ let Pattern = pattern;
+
+ let TSFlags{4-0} = format.Value;
+}
+
+// Pseudo instructions
+class Pseudo<dag outs, dag ins, list<dag> pattern, string opcodestr = "", string argstr = "">
+ : RVInst<outs, ins, opcodestr, argstr, pattern, InstFormatPseudo> {
+ let isPseudo = 1;
+ let isCodeGenOnly = 1;
+}
+
+// Pseudo load instructions.
+class PseudoLoad<string opcodestr, RegisterClass rdty = GPR>
+ : Pseudo<(outs rdty:$rd), (ins bare_symbol:$addr), [], opcodestr, "$rd, $addr"> {
+ let hasSideEffects = 0;
+ let mayLoad = 1;
+ let mayStore = 0;
+ let isCodeGenOnly = 0;
+ let isAsmParserOnly = 1;
+}
+
+class PseudoFloatLoad<string opcodestr, RegisterClass rdty = GPR>
+ : Pseudo<(outs rdty:$rd, GPR:$tmp), (ins bare_symbol:$addr), [], opcodestr, "$rd, $addr, $tmp"> {
+ let hasSideEffects = 0;
+ let mayLoad = 1;
+ let mayStore = 0;
+ let isCodeGenOnly = 0;
+ let isAsmParserOnly = 1;
+}
+
+// Pseudo store instructions.
+class PseudoStore<string opcodestr, RegisterClass rsty = GPR>
+ : Pseudo<(outs rsty:$rs, GPR:$tmp), (ins bare_symbol:$addr), [], opcodestr, "$rs, $addr, $tmp"> {
+ let hasSideEffects = 0;
+ let mayLoad = 0;
+ let mayStore = 1;
+ let isCodeGenOnly = 0;
+ let isAsmParserOnly = 1;
+}
+
+// Instruction formats are listed in the order they appear in the RISC-V
+// instruction set manual (R, I, S, B, U, J) with sub-formats (e.g. RVInstR4,
+// RVInstRAtomic) sorted alphabetically.
+
+class RVInstR<bits<7> funct7, bits<3> funct3, RISCVOpcode opcode, dag outs,
+ dag ins, string opcodestr, string argstr>
+ : RVInst<outs, ins, opcodestr, argstr, [], InstFormatR> {
+ bits<5> rs2;
+ bits<5> rs1;
+ bits<5> rd;
+
+ let Inst{31-25} = funct7;
+ let Inst{24-20} = rs2;
+ let Inst{19-15} = rs1;
+ let Inst{14-12} = funct3;
+ let Inst{11-7} = rd;
+ let Opcode = opcode.Value;
+}
+
+class RVInstR4<bits<2> funct2, RISCVOpcode opcode, dag outs, dag ins,
+ string opcodestr, string argstr>
+ : RVInst<outs, ins, opcodestr, argstr, [], InstFormatR4> {
+ bits<5> rs3;
+ bits<5> rs2;
+ bits<5> rs1;
+ bits<3> funct3;
+ bits<5> rd;
+
+ let Inst{31-27} = rs3;
+ let Inst{26-25} = funct2;
+ let Inst{24-20} = rs2;
+ let Inst{19-15} = rs1;
+ let Inst{14-12} = funct3;
+ let Inst{11-7} = rd;
+ let Opcode = opcode.Value;
+}
+
+class RVInstRAtomic<bits<5> funct5, bit aq, bit rl, bits<3> funct3,
+ RISCVOpcode opcode, dag outs, dag ins, string opcodestr,
+ string argstr>
+ : RVInst<outs, ins, opcodestr, argstr, [], InstFormatR> {
+ bits<5> rs2;
+ bits<5> rs1;
+ bits<5> rd;
+
+ let Inst{31-27} = funct5;
+ let Inst{26} = aq;
+ let Inst{25} = rl;
+ let Inst{24-20} = rs2;
+ let Inst{19-15} = rs1;
+ let Inst{14-12} = funct3;
+ let Inst{11-7} = rd;
+ let Opcode = opcode.Value;
+}
+
+class RVInstRFrm<bits<7> funct7, RISCVOpcode opcode, dag outs, dag ins,
+ string opcodestr, string argstr>
+ : RVInst<outs, ins, opcodestr, argstr, [], InstFormatR> {
+ bits<5> rs2;
+ bits<5> rs1;
+ bits<3> funct3;
+ bits<5> rd;
+
+ let Inst{31-25} = funct7;
+ let Inst{24-20} = rs2;
+ let Inst{19-15} = rs1;
+ let Inst{14-12} = funct3;
+ let Inst{11-7} = rd;
+ let Opcode = opcode.Value;
+}
+
+class RVInstI<bits<3> funct3, RISCVOpcode opcode, dag outs, dag ins,
+ string opcodestr, string argstr>
+ : RVInst<outs, ins, opcodestr, argstr, [], InstFormatI> {
+ bits<12> imm12;
+ bits<5> rs1;
+ bits<5> rd;
+
+ let Inst{31-20} = imm12;
+ let Inst{19-15} = rs1;
+ let Inst{14-12} = funct3;
+ let Inst{11-7} = rd;
+ let Opcode = opcode.Value;
+}
+
+class RVInstIShift<bit arithshift, bits<3> funct3, RISCVOpcode opcode,
+ dag outs, dag ins, string opcodestr, string argstr>
+ : RVInst<outs, ins, opcodestr, argstr, [], InstFormatI> {
+ bits<6> shamt;
+ bits<5> rs1;
+ bits<5> rd;
+
+ let Inst{31} = 0;
+ let Inst{30} = arithshift;
+ let Inst{29-26} = 0;
+ let Inst{25-20} = shamt;
+ let Inst{19-15} = rs1;
+ let Inst{14-12} = funct3;
+ let Inst{11-7} = rd;
+ let Opcode = opcode.Value;
+}
+
+class RVInstIShiftW<bit arithshift, bits<3> funct3, RISCVOpcode opcode,
+ dag outs, dag ins, string opcodestr, string argstr>
+ : RVInst<outs, ins, opcodestr, argstr, [], InstFormatI> {
+ bits<5> shamt;
+ bits<5> rs1;
+ bits<5> rd;
+
+ let Inst{31} = 0;
+ let Inst{30} = arithshift;
+ let Inst{29-25} = 0;
+ let Inst{24-20} = shamt;
+ let Inst{19-15} = rs1;
+ let Inst{14-12} = funct3;
+ let Inst{11-7} = rd;
+ let Opcode = opcode.Value;
+}
+
+class RVInstS<bits<3> funct3, RISCVOpcode opcode, dag outs, dag ins,
+ string opcodestr, string argstr>
+ : RVInst<outs, ins, opcodestr, argstr, [], InstFormatS> {
+ bits<12> imm12;
+ bits<5> rs2;
+ bits<5> rs1;
+
+ let Inst{31-25} = imm12{11-5};
+ let Inst{24-20} = rs2;
+ let Inst{19-15} = rs1;
+ let Inst{14-12} = funct3;
+ let Inst{11-7} = imm12{4-0};
+ let Opcode = opcode.Value;
+}
+
+class RVInstB<bits<3> funct3, RISCVOpcode opcode, dag outs, dag ins,
+ string opcodestr, string argstr>
+ : RVInst<outs, ins, opcodestr, argstr, [], InstFormatB> {
+ bits<12> imm12;
+ bits<5> rs2;
+ bits<5> rs1;
+
+ let Inst{31} = imm12{11};
+ let Inst{30-25} = imm12{9-4};
+ let Inst{24-20} = rs2;
+ let Inst{19-15} = rs1;
+ let Inst{14-12} = funct3;
+ let Inst{11-8} = imm12{3-0};
+ let Inst{7} = imm12{10};
+ let Opcode = opcode.Value;
+}
+
+class RVInstU<RISCVOpcode opcode, dag outs, dag ins, string opcodestr,
+ string argstr>
+ : RVInst<outs, ins, opcodestr, argstr, [], InstFormatU> {
+ bits<20> imm20;
+ bits<5> rd;
+
+ let Inst{31-12} = imm20;
+ let Inst{11-7} = rd;
+ let Opcode = opcode.Value;
+}
+
+class RVInstJ<RISCVOpcode opcode, dag outs, dag ins, string opcodestr,
+ string argstr>
+ : RVInst<outs, ins, opcodestr, argstr, [], InstFormatJ> {
+ bits<20> imm20;
+ bits<5> rd;
+
+ let Inst{31} = imm20{19};
+ let Inst{30-21} = imm20{9-0};
+ let Inst{20} = imm20{10};
+ let Inst{19-12} = imm20{18-11};
+ let Inst{11-7} = rd;
+ let Opcode = opcode.Value;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrFormatsC.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrFormatsC.td
new file mode 100644
index 000000000000..690bec5181e2
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrFormatsC.td
@@ -0,0 +1,159 @@
+//===-- RISCVInstrFormatsC.td - RISCV C Instruction Formats --*- tablegen -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the RISC-V C extension instruction formats.
+//
+//===----------------------------------------------------------------------===//
+
+class RVInst16<dag outs, dag ins, string opcodestr, string argstr,
+ list<dag> pattern, InstFormat format>
+ : Instruction {
+ field bits<16> Inst;
+ // SoftFail is a field the disassembler can use to provide a way for
+ // instructions to not match without killing the whole decode process. It is
+ // mainly used for ARM, but Tablegen expects this field to exist or it fails
+ // to build the decode table.
+ field bits<16> SoftFail = 0;
+ let Size = 2;
+
+ bits<2> Opcode = 0;
+
+ let Namespace = "RISCV";
+
+ dag OutOperandList = outs;
+ dag InOperandList = ins;
+ let AsmString = opcodestr # "\t" # argstr;
+ let Pattern = pattern;
+
+ let TSFlags{4-0} = format.Value;
+}
+
+class RVInst16CR<bits<4> funct4, bits<2> opcode, dag outs, dag ins,
+ string opcodestr, string argstr>
+ : RVInst16<outs, ins, opcodestr, argstr, [], InstFormatCR> {
+ bits<5> rs1;
+ bits<5> rs2;
+
+ let Inst{15-12} = funct4;
+ let Inst{11-7} = rs1;
+ let Inst{6-2} = rs2;
+ let Inst{1-0} = opcode;
+}
+
+// The immediate value encoding differs for each instruction, so each subclass
+// is responsible for setting the appropriate bits in the Inst field.
+// The bits Inst{6-2} must be set for each instruction.
+class RVInst16CI<bits<3> funct3, bits<2> opcode, dag outs, dag ins,
+ string opcodestr, string argstr>
+ : RVInst16<outs, ins, opcodestr, argstr, [], InstFormatCI> {
+ bits<10> imm;
+ bits<5> rd;
+ bits<5> rs1;
+
+ let Inst{15-13} = funct3;
+ let Inst{12} = imm{5};
+ let Inst{11-7} = rd;
+ let Inst{1-0} = opcode;
+}
+
+// The immediate value encoding differs for each instruction, so each subclass
+// is responsible for setting the appropriate bits in the Inst field.
+// The bits Inst{12-7} must be set for each instruction.
+class RVInst16CSS<bits<3> funct3, bits<2> opcode, dag outs, dag ins,
+ string opcodestr, string argstr>
+ : RVInst16<outs, ins, opcodestr, argstr, [], InstFormatCSS> {
+ bits<10> imm;
+ bits<5> rs2;
+ bits<5> rs1;
+
+ let Inst{15-13} = funct3;
+ let Inst{6-2} = rs2;
+ let Inst{1-0} = opcode;
+}
+
+class RVInst16CIW<bits<3> funct3, bits<2> opcode, dag outs, dag ins,
+ string opcodestr, string argstr>
+ : RVInst16<outs, ins, opcodestr, argstr, [], InstFormatCIW> {
+ bits<10> imm;
+ bits<3> rd;
+
+ let Inst{15-13} = funct3;
+ let Inst{4-2} = rd;
+ let Inst{1-0} = opcode;
+}
+
+// The immediate value encoding differs for each instruction, so each subclass
+// is responsible for setting the appropriate bits in the Inst field.
+// The bits Inst{12-10} and Inst{6-5} must be set for each instruction.
+class RVInst16CL<bits<3> funct3, bits<2> opcode, dag outs, dag ins,
+ string opcodestr, string argstr>
+ : RVInst16<outs, ins, opcodestr, argstr, [], InstFormatCL> {
+ bits<3> rd;
+ bits<3> rs1;
+
+ let Inst{15-13} = funct3;
+ let Inst{9-7} = rs1;
+ let Inst{4-2} = rd;
+ let Inst{1-0} = opcode;
+}
+
+// The immediate value encoding differs for each instruction, so each subclass
+// is responsible for setting the appropriate bits in the Inst field.
+// The bits Inst{12-10} and Inst{6-5} must be set for each instruction.
+class RVInst16CS<bits<3> funct3, bits<2> opcode, dag outs, dag ins,
+ string opcodestr, string argstr>
+ : RVInst16<outs, ins, opcodestr, argstr, [], InstFormatCS> {
+ bits<3> rs2;
+ bits<3> rs1;
+
+ let Inst{15-13} = funct3;
+ let Inst{9-7} = rs1;
+ let Inst{4-2} = rs2;
+ let Inst{1-0} = opcode;
+}
+
+class RVInst16CA<bits<6> funct6, bits<2> funct2, bits<2> opcode, dag outs,
+ dag ins, string opcodestr, string argstr>
+ : RVInst16<outs, ins, opcodestr, argstr, [], InstFormatCA> {
+ bits<3> rs2;
+ bits<3> rs1;
+
+ let Inst{15-10} = funct6;
+ let Inst{9-7} = rs1;
+ let Inst{6-5} = funct2;
+ let Inst{4-2} = rs2;
+ let Inst{1-0} = opcode;
+}
+
+class RVInst16CB<bits<3> funct3, bits<2> opcode, dag outs, dag ins,
+ string opcodestr, string argstr>
+ : RVInst16<outs, ins, opcodestr, argstr, [], InstFormatCB> {
+ bits<9> imm;
+ bits<3> rs1;
+
+ let Inst{15-13} = funct3;
+ let Inst{9-7} = rs1;
+ let Inst{1-0} = opcode;
+}
+
+class RVInst16CJ<bits<3> funct3, bits<2> opcode, dag outs, dag ins,
+ string opcodestr, string argstr>
+ : RVInst16<outs, ins, opcodestr, argstr, [], InstFormatCJ> {
+ bits<11> offset;
+
+ let Inst{15-13} = funct3;
+ let Inst{12} = offset{10};
+ let Inst{11} = offset{3};
+ let Inst{10-9} = offset{8-7};
+ let Inst{8} = offset{9};
+ let Inst{7} = offset{5};
+ let Inst{6} = offset{6};
+ let Inst{5-3} = offset{2-0};
+ let Inst{2} = offset{4};
+ let Inst{1-0} = opcode;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
new file mode 100644
index 000000000000..99c8d2ef73de
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -0,0 +1,468 @@
+//===-- RISCVInstrInfo.cpp - RISCV Instruction Information ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the RISCV implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVInstrInfo.h"
+#include "RISCV.h"
+#include "RISCVSubtarget.h"
+#include "RISCVTargetMachine.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetRegistry.h"
+
+#define GET_INSTRINFO_CTOR_DTOR
+#include "RISCVGenInstrInfo.inc"
+
+using namespace llvm;
+
+RISCVInstrInfo::RISCVInstrInfo()
+ : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP) {}
+
+unsigned RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const {
+ switch (MI.getOpcode()) {
+ default:
+ return 0;
+ case RISCV::LB:
+ case RISCV::LBU:
+ case RISCV::LH:
+ case RISCV::LHU:
+ case RISCV::LW:
+ case RISCV::FLW:
+ case RISCV::LWU:
+ case RISCV::LD:
+ case RISCV::FLD:
+ break;
+ }
+
+ if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
+ MI.getOperand(2).getImm() == 0) {
+ FrameIndex = MI.getOperand(1).getIndex();
+ return MI.getOperand(0).getReg();
+ }
+
+ return 0;
+}
+
+unsigned RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const {
+ switch (MI.getOpcode()) {
+ default:
+ return 0;
+ case RISCV::SB:
+ case RISCV::SH:
+ case RISCV::SW:
+ case RISCV::FSW:
+ case RISCV::SD:
+ case RISCV::FSD:
+ break;
+ }
+
+ if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() &&
+ MI.getOperand(1).getImm() == 0) {
+ FrameIndex = MI.getOperand(0).getIndex();
+ return MI.getOperand(2).getReg();
+ }
+
+ return 0;
+}
+
+void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL, unsigned DstReg,
+ unsigned SrcReg, bool KillSrc) const {
+ if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
+ BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
+ .addReg(SrcReg, getKillRegState(KillSrc))
+ .addImm(0);
+ return;
+ }
+
+ // FPR->FPR copies
+ unsigned Opc;
+ if (RISCV::FPR32RegClass.contains(DstReg, SrcReg))
+ Opc = RISCV::FSGNJ_S;
+ else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg))
+ Opc = RISCV::FSGNJ_D;
+ else
+ llvm_unreachable("Impossible reg-to-reg copy");
+
+ BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
+ .addReg(SrcReg, getKillRegState(KillSrc))
+ .addReg(SrcReg, getKillRegState(KillSrc));
+}
+
+void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned SrcReg, bool IsKill, int FI,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ DebugLoc DL;
+ if (I != MBB.end())
+ DL = I->getDebugLoc();
+
+ unsigned Opcode;
+
+ if (RISCV::GPRRegClass.hasSubClassEq(RC))
+ Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
+ RISCV::SW : RISCV::SD;
+ else if (RISCV::FPR32RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::FSW;
+ else if (RISCV::FPR64RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::FSD;
+ else
+ llvm_unreachable("Can't store this register to stack slot");
+
+ BuildMI(MBB, I, DL, get(Opcode))
+ .addReg(SrcReg, getKillRegState(IsKill))
+ .addFrameIndex(FI)
+ .addImm(0);
+}
+
+void RISCVInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned DstReg, int FI,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ DebugLoc DL;
+ if (I != MBB.end())
+ DL = I->getDebugLoc();
+
+ unsigned Opcode;
+
+ if (RISCV::GPRRegClass.hasSubClassEq(RC))
+ Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
+ RISCV::LW : RISCV::LD;
+ else if (RISCV::FPR32RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::FLW;
+ else if (RISCV::FPR64RegClass.hasSubClassEq(RC))
+ Opcode = RISCV::FLD;
+ else
+ llvm_unreachable("Can't load this register from stack slot");
+
+ BuildMI(MBB, I, DL, get(Opcode), DstReg).addFrameIndex(FI).addImm(0);
+}
+
+void RISCVInstrInfo::movImm32(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL, unsigned DstReg, uint64_t Val,
+ MachineInstr::MIFlag Flag) const {
+ assert(isInt<32>(Val) && "Can only materialize 32-bit constants");
+
+ // TODO: If the value can be materialized using only one instruction, only
+ // insert a single instruction.
+
+ uint64_t Hi20 = ((Val + 0x800) >> 12) & 0xfffff;
+ uint64_t Lo12 = SignExtend64<12>(Val);
+ BuildMI(MBB, MBBI, DL, get(RISCV::LUI), DstReg)
+ .addImm(Hi20)
+ .setMIFlag(Flag);
+ BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
+ .addReg(DstReg, RegState::Kill)
+ .addImm(Lo12)
+ .setMIFlag(Flag);
+}
+
+// The contents of values added to Cond are not examined outside of
+// RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
+// push BranchOpcode, Reg1, Reg2.
+static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target,
+ SmallVectorImpl<MachineOperand> &Cond) {
+ // Block ends with fall-through condbranch.
+ assert(LastInst.getDesc().isConditionalBranch() &&
+ "Unknown conditional branch");
+ Target = LastInst.getOperand(2).getMBB();
+ Cond.push_back(MachineOperand::CreateImm(LastInst.getOpcode()));
+ Cond.push_back(LastInst.getOperand(0));
+ Cond.push_back(LastInst.getOperand(1));
+}
+
+static unsigned getOppositeBranchOpcode(int Opc) {
+ switch (Opc) {
+ default:
+ llvm_unreachable("Unrecognized conditional branch");
+ case RISCV::BEQ:
+ return RISCV::BNE;
+ case RISCV::BNE:
+ return RISCV::BEQ;
+ case RISCV::BLT:
+ return RISCV::BGE;
+ case RISCV::BGE:
+ return RISCV::BLT;
+ case RISCV::BLTU:
+ return RISCV::BGEU;
+ case RISCV::BGEU:
+ return RISCV::BLTU;
+ }
+}
+
+bool RISCVInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const {
+ TBB = FBB = nullptr;
+ Cond.clear();
+
+ // If the block has no terminators, it just falls into the block after it.
+ MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
+ if (I == MBB.end() || !isUnpredicatedTerminator(*I))
+ return false;
+
+ // Count the number of terminators and find the first unconditional or
+ // indirect branch.
+ MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
+ int NumTerminators = 0;
+ for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
+ J++) {
+ NumTerminators++;
+ if (J->getDesc().isUnconditionalBranch() ||
+ J->getDesc().isIndirectBranch()) {
+ FirstUncondOrIndirectBr = J.getReverse();
+ }
+ }
+
+ // If AllowModify is true, we can erase any terminators after
+ // FirstUncondOrIndirectBR.
+ if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
+ while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
+ std::next(FirstUncondOrIndirectBr)->eraseFromParent();
+ NumTerminators--;
+ }
+ I = FirstUncondOrIndirectBr;
+ }
+
+ // We can't handle blocks that end in an indirect branch.
+ if (I->getDesc().isIndirectBranch())
+ return true;
+
+ // We can't handle blocks with more than 2 terminators.
+ if (NumTerminators > 2)
+ return true;
+
+ // Handle a single unconditional branch.
+ if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
+ TBB = I->getOperand(0).getMBB();
+ return false;
+ }
+
+ // Handle a single conditional branch.
+ if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
+ parseCondBranch(*I, TBB, Cond);
+ return false;
+ }
+
+ // Handle a conditional branch followed by an unconditional branch.
+ if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
+ I->getDesc().isUnconditionalBranch()) {
+ parseCondBranch(*std::prev(I), TBB, Cond);
+ FBB = I->getOperand(0).getMBB();
+ return false;
+ }
+
+ // Otherwise, we can't handle this.
+ return true;
+}
+
+unsigned RISCVInstrInfo::removeBranch(MachineBasicBlock &MBB,
+ int *BytesRemoved) const {
+ if (BytesRemoved)
+ *BytesRemoved = 0;
+ MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
+ if (I == MBB.end())
+ return 0;
+
+ if (!I->getDesc().isUnconditionalBranch() &&
+ !I->getDesc().isConditionalBranch())
+ return 0;
+
+ // Remove the branch.
+ if (BytesRemoved)
+ *BytesRemoved += getInstSizeInBytes(*I);
+ I->eraseFromParent();
+
+ I = MBB.end();
+
+ if (I == MBB.begin())
+ return 1;
+ --I;
+ if (!I->getDesc().isConditionalBranch())
+ return 1;
+
+ // Remove the branch.
+ if (BytesRemoved)
+ *BytesRemoved += getInstSizeInBytes(*I);
+ I->eraseFromParent();
+ return 2;
+}
+
+// Inserts a branch into the end of the specific MachineBasicBlock, returning
+// the number of instructions inserted.
+unsigned RISCVInstrInfo::insertBranch(
+ MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
+ ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
+ if (BytesAdded)
+ *BytesAdded = 0;
+
+ // Shouldn't be a fall through.
+ assert(TBB && "InsertBranch must not be told to insert a fallthrough");
+ assert((Cond.size() == 3 || Cond.size() == 0) &&
+ "RISCV branch conditions have two components!");
+
+ // Unconditional branch.
+ if (Cond.empty()) {
+ MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB);
+ if (BytesAdded)
+ *BytesAdded += getInstSizeInBytes(MI);
+ return 1;
+ }
+
+ // Either a one or two-way conditional branch.
+ unsigned Opc = Cond[0].getImm();
+ MachineInstr &CondMI =
+ *BuildMI(&MBB, DL, get(Opc)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
+ if (BytesAdded)
+ *BytesAdded += getInstSizeInBytes(CondMI);
+
+ // One-way conditional branch.
+ if (!FBB)
+ return 1;
+
+ // Two-way conditional branch.
+ MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB);
+ if (BytesAdded)
+ *BytesAdded += getInstSizeInBytes(MI);
+ return 2;
+}
+
+unsigned RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock &DestBB,
+ const DebugLoc &DL,
+ int64_t BrOffset,
+ RegScavenger *RS) const {
+ assert(RS && "RegScavenger required for long branching");
+ assert(MBB.empty() &&
+ "new block should be inserted for expanding unconditional branch");
+ assert(MBB.pred_size() == 1);
+
+ MachineFunction *MF = MBB.getParent();
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+ const auto &TM = static_cast<const RISCVTargetMachine &>(MF->getTarget());
+
+ if (TM.isPositionIndependent())
+ report_fatal_error("Unable to insert indirect branch");
+
+ if (!isInt<32>(BrOffset))
+ report_fatal_error(
+ "Branch offsets outside of the signed 32-bit range not supported");
+
+ // FIXME: A virtual register must be used initially, as the register
+ // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
+ // uses the same workaround).
+ unsigned ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+ auto II = MBB.end();
+
+ MachineInstr &LuiMI = *BuildMI(MBB, II, DL, get(RISCV::LUI), ScratchReg)
+ .addMBB(&DestBB, RISCVII::MO_HI);
+ BuildMI(MBB, II, DL, get(RISCV::PseudoBRIND))
+ .addReg(ScratchReg, RegState::Kill)
+ .addMBB(&DestBB, RISCVII::MO_LO);
+
+ RS->enterBasicBlockEnd(MBB);
+ unsigned Scav = RS->scavengeRegisterBackwards(RISCV::GPRRegClass,
+ LuiMI.getIterator(), false, 0);
+ MRI.replaceRegWith(ScratchReg, Scav);
+ MRI.clearVirtRegs();
+ RS->setRegUsed(Scav);
+ return 8;
+}
+
+bool RISCVInstrInfo::reverseBranchCondition(
+ SmallVectorImpl<MachineOperand> &Cond) const {
+ assert((Cond.size() == 3) && "Invalid branch condition!");
+ Cond[0].setImm(getOppositeBranchOpcode(Cond[0].getImm()));
+ return false;
+}
+
+MachineBasicBlock *
+RISCVInstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
+ assert(MI.getDesc().isBranch() && "Unexpected opcode!");
+ // The branch target is always the last operand.
+ int NumOp = MI.getNumExplicitOperands();
+ return MI.getOperand(NumOp - 1).getMBB();
+}
+
+bool RISCVInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
+ int64_t BrOffset) const {
+ // Ideally we could determine the supported branch offset from the
+ // RISCVII::FormMask, but this can't be used for Pseudo instructions like
+ // PseudoBR.
+ switch (BranchOp) {
+ default:
+ llvm_unreachable("Unexpected opcode!");
+ case RISCV::BEQ:
+ case RISCV::BNE:
+ case RISCV::BLT:
+ case RISCV::BGE:
+ case RISCV::BLTU:
+ case RISCV::BGEU:
+ return isIntN(13, BrOffset);
+ case RISCV::JAL:
+ case RISCV::PseudoBR:
+ return isIntN(21, BrOffset);
+ }
+}
+
+unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
+ unsigned Opcode = MI.getOpcode();
+
+ switch (Opcode) {
+ default: { return get(Opcode).getSize(); }
+ case TargetOpcode::EH_LABEL:
+ case TargetOpcode::IMPLICIT_DEF:
+ case TargetOpcode::KILL:
+ case TargetOpcode::DBG_VALUE:
+ return 0;
+ case RISCV::PseudoCALLReg:
+ case RISCV::PseudoCALL:
+ case RISCV::PseudoTAIL:
+ case RISCV::PseudoLLA:
+ case RISCV::PseudoLA:
+ case RISCV::PseudoLA_TLS_IE:
+ case RISCV::PseudoLA_TLS_GD:
+ return 8;
+ case TargetOpcode::INLINEASM:
+ case TargetOpcode::INLINEASM_BR: {
+ const MachineFunction &MF = *MI.getParent()->getParent();
+ const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
+ return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
+ *TM.getMCAsmInfo());
+ }
+ }
+}
+
+bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
+ const unsigned Opcode = MI.getOpcode();
+ switch(Opcode) {
+ default:
+ break;
+ case RISCV::ADDI:
+ case RISCV::ORI:
+ case RISCV::XORI:
+ return (MI.getOperand(1).isReg() && MI.getOperand(1).getReg() == RISCV::X0);
+ }
+ return MI.isAsCheapAsAMove();
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.h
new file mode 100644
index 000000000000..ff098e660d19
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -0,0 +1,85 @@
+//===-- RISCVInstrInfo.h - RISCV Instruction Information --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the RISCV implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVINSTRINFO_H
+#define LLVM_LIB_TARGET_RISCV_RISCVINSTRINFO_H
+
+#include "RISCVRegisterInfo.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+
+#define GET_INSTRINFO_HEADER
+#include "RISCVGenInstrInfo.inc"
+
+namespace llvm {
+
+class RISCVInstrInfo : public RISCVGenInstrInfo {
+
+public:
+ RISCVInstrInfo();
+
+ unsigned isLoadFromStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const override;
+ unsigned isStoreToStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const override;
+
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL, unsigned DstReg, unsigned SrcReg,
+ bool KillSrc) const override;
+
+ void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, unsigned SrcReg,
+ bool IsKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+
+ void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, unsigned DstReg,
+ int FrameIndex, const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+
+ // Materializes the given int32 Val into DstReg.
+ void movImm32(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL, unsigned DstReg, uint64_t Val,
+ MachineInstr::MIFlag Flag = MachineInstr::NoFlags) const;
+
+ unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
+
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const override;
+
+ unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
+ const DebugLoc &dl,
+ int *BytesAdded = nullptr) const override;
+
+ unsigned insertIndirectBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock &NewDestBB,
+ const DebugLoc &DL, int64_t BrOffset,
+ RegScavenger *RS = nullptr) const override;
+
+ unsigned removeBranch(MachineBasicBlock &MBB,
+ int *BytesRemoved = nullptr) const override;
+
+ bool
+ reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
+
+ MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override;
+
+ bool isBranchOffsetInRange(unsigned BranchOpc,
+ int64_t BrOffset) const override;
+
+ bool isAsCheapAsAMove(const MachineInstr &MI) const override;
+};
+}
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.td
new file mode 100644
index 000000000000..69bde15f1218
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -0,0 +1,1086 @@
+//===-- RISCVInstrInfo.td - Target Description for RISCV ---*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the RISC-V instructions in TableGen format.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// RISC-V specific DAG Nodes.
+//===----------------------------------------------------------------------===//
+
+// Target-independent type requirements, but with target-specific formats.
+def SDT_CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>,
+ SDTCisVT<1, i32>]>;
+def SDT_CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>,
+ SDTCisVT<1, i32>]>;
+
+// Target-dependent type requirements.
+def SDT_RISCVCall : SDTypeProfile<0, -1, [SDTCisVT<0, XLenVT>]>;
+def SDT_RISCVSelectCC : SDTypeProfile<1, 5, [SDTCisSameAs<1, 2>,
+ SDTCisSameAs<0, 4>,
+ SDTCisSameAs<4, 5>]>;
+
+// Target-independent nodes, but with target-specific formats.
+def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_CallSeqStart,
+ [SDNPHasChain, SDNPOutGlue]>;
+def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_CallSeqEnd,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+
+// Target-dependent nodes.
+def riscv_call : SDNode<"RISCVISD::CALL", SDT_RISCVCall,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
+ SDNPVariadic]>;
+def riscv_ret_flag : SDNode<"RISCVISD::RET_FLAG", SDTNone,
+ [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
+def riscv_uret_flag : SDNode<"RISCVISD::URET_FLAG", SDTNone,
+ [SDNPHasChain, SDNPOptInGlue]>;
+def riscv_sret_flag : SDNode<"RISCVISD::SRET_FLAG", SDTNone,
+ [SDNPHasChain, SDNPOptInGlue]>;
+def riscv_mret_flag : SDNode<"RISCVISD::MRET_FLAG", SDTNone,
+ [SDNPHasChain, SDNPOptInGlue]>;
+def riscv_selectcc : SDNode<"RISCVISD::SELECT_CC", SDT_RISCVSelectCC,
+ [SDNPInGlue]>;
+def riscv_tail : SDNode<"RISCVISD::TAIL", SDT_RISCVCall,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
+ SDNPVariadic]>;
+def riscv_sllw : SDNode<"RISCVISD::SLLW", SDTIntShiftOp>;
+def riscv_sraw : SDNode<"RISCVISD::SRAW", SDTIntShiftOp>;
+def riscv_srlw : SDNode<"RISCVISD::SRLW", SDTIntShiftOp>;
+
+//===----------------------------------------------------------------------===//
+// Operand and SDNode transformation definitions.
+//===----------------------------------------------------------------------===//
+
+class ImmXLenAsmOperand<string prefix, string suffix = ""> : AsmOperandClass {
+ let Name = prefix # "ImmXLen" # suffix;
+ let RenderMethod = "addImmOperands";
+ let DiagnosticType = !strconcat("Invalid", Name);
+}
+
+class ImmAsmOperand<string prefix, int width, string suffix> : AsmOperandClass {
+ let Name = prefix # "Imm" # width # suffix;
+ let RenderMethod = "addImmOperands";
+ let DiagnosticType = !strconcat("Invalid", Name);
+}
+
+class SImmAsmOperand<int width, string suffix = "">
+ : ImmAsmOperand<"S", width, suffix> {
+}
+
+class UImmAsmOperand<int width, string suffix = "">
+ : ImmAsmOperand<"U", width, suffix> {
+}
+
+def FenceArg : AsmOperandClass {
+ let Name = "FenceArg";
+ let RenderMethod = "addFenceArgOperands";
+ let DiagnosticType = "InvalidFenceArg";
+}
+
+def fencearg : Operand<XLenVT> {
+ let ParserMatchClass = FenceArg;
+ let PrintMethod = "printFenceArg";
+ let DecoderMethod = "decodeUImmOperand<4>";
+}
+
+def UImmLog2XLenAsmOperand : AsmOperandClass {
+ let Name = "UImmLog2XLen";
+ let RenderMethod = "addImmOperands";
+ let DiagnosticType = "InvalidUImmLog2XLen";
+}
+
+def uimmlog2xlen : Operand<XLenVT>, ImmLeaf<XLenVT, [{
+ if (Subtarget->is64Bit())
+ return isUInt<6>(Imm);
+ return isUInt<5>(Imm);
+}]> {
+ let ParserMatchClass = UImmLog2XLenAsmOperand;
+ // TODO: should ensure invalid shamt is rejected when decoding.
+ let DecoderMethod = "decodeUImmOperand<6>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (!MCOp.evaluateAsConstantImm(Imm))
+ return false;
+ if (STI.getTargetTriple().isArch64Bit())
+ return isUInt<6>(Imm);
+ return isUInt<5>(Imm);
+ }];
+}
+
+def uimm5 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isUInt<5>(Imm);}]> {
+ let ParserMatchClass = UImmAsmOperand<5>;
+ let DecoderMethod = "decodeUImmOperand<5>";
+}
+
+def simm12 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isInt<12>(Imm);}]> {
+ let ParserMatchClass = SImmAsmOperand<12>;
+ let EncoderMethod = "getImmOpValue";
+ let DecoderMethod = "decodeSImmOperand<12>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (MCOp.evaluateAsConstantImm(Imm))
+ return isInt<12>(Imm);
+ return MCOp.isBareSymbolRef();
+ }];
+}
+
+// A 13-bit signed immediate where the least significant bit is zero.
+def simm13_lsb0 : Operand<OtherVT> {
+ let ParserMatchClass = SImmAsmOperand<13, "Lsb0">;
+ let EncoderMethod = "getImmOpValueAsr1";
+ let DecoderMethod = "decodeSImmOperandAndLsl1<13>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (MCOp.evaluateAsConstantImm(Imm))
+ return isShiftedInt<12, 1>(Imm);
+ return MCOp.isBareSymbolRef();
+ }];
+}
+
+class UImm20Operand : Operand<XLenVT> {
+ let EncoderMethod = "getImmOpValue";
+ let DecoderMethod = "decodeUImmOperand<20>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (MCOp.evaluateAsConstantImm(Imm))
+ return isUInt<20>(Imm);
+ return MCOp.isBareSymbolRef();
+ }];
+}
+
+def uimm20_lui : UImm20Operand {
+ let ParserMatchClass = UImmAsmOperand<20, "LUI">;
+}
+def uimm20_auipc : UImm20Operand {
+ let ParserMatchClass = UImmAsmOperand<20, "AUIPC">;
+}
+
+def Simm21Lsb0JALAsmOperand : SImmAsmOperand<21, "Lsb0JAL"> {
+ let ParserMethod = "parseJALOffset";
+}
+
+// A 21-bit signed immediate where the least significant bit is zero.
+def simm21_lsb0_jal : Operand<OtherVT> {
+ let ParserMatchClass = Simm21Lsb0JALAsmOperand;
+ let EncoderMethod = "getImmOpValueAsr1";
+ let DecoderMethod = "decodeSImmOperandAndLsl1<21>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (MCOp.evaluateAsConstantImm(Imm))
+ return isShiftedInt<20, 1>(Imm);
+ return MCOp.isBareSymbolRef();
+ }];
+}
+
+def BareSymbol : AsmOperandClass {
+ let Name = "BareSymbol";
+ let RenderMethod = "addImmOperands";
+ let DiagnosticType = "InvalidBareSymbol";
+ let ParserMethod = "parseBareSymbol";
+}
+
+// A bare symbol.
+def bare_symbol : Operand<XLenVT> {
+ let ParserMatchClass = BareSymbol;
+}
+
+def CallSymbol : AsmOperandClass {
+ let Name = "CallSymbol";
+ let RenderMethod = "addImmOperands";
+ let DiagnosticType = "InvalidCallSymbol";
+ let ParserMethod = "parseCallSymbol";
+}
+
+// A bare symbol used in call/tail only.
+def call_symbol : Operand<XLenVT> {
+ let ParserMatchClass = CallSymbol;
+}
+
+def TPRelAddSymbol : AsmOperandClass {
+ let Name = "TPRelAddSymbol";
+ let RenderMethod = "addImmOperands";
+ let DiagnosticType = "InvalidTPRelAddSymbol";
+ let ParserMethod = "parseOperandWithModifier";
+}
+
+// A bare symbol with the %tprel_add variant.
+def tprel_add_symbol : Operand<XLenVT> {
+ let ParserMatchClass = TPRelAddSymbol;
+}
+
+def CSRSystemRegister : AsmOperandClass {
+ let Name = "CSRSystemRegister";
+ let ParserMethod = "parseCSRSystemRegister";
+ let DiagnosticType = "InvalidCSRSystemRegister";
+}
+
+def csr_sysreg : Operand<XLenVT> {
+ let ParserMatchClass = CSRSystemRegister;
+ let PrintMethod = "printCSRSystemRegister";
+ let DecoderMethod = "decodeUImmOperand<12>";
+}
+
+// A parameterized register class alternative to i32imm/i64imm from Target.td.
+def ixlenimm : Operand<XLenVT>;
+
+def ixlenimm_li : Operand<XLenVT> {
+ let ParserMatchClass = ImmXLenAsmOperand<"", "LI">;
+}
+
+// Standalone (codegen-only) immleaf patterns.
+def simm32 : ImmLeaf<XLenVT, [{return isInt<32>(Imm);}]>;
+def simm32hi20 : ImmLeaf<XLenVT, [{return isShiftedInt<20, 12>(Imm);}]>;
+// A mask value that won't affect significant shift bits.
+def immbottomxlenset : ImmLeaf<XLenVT, [{
+ if (Subtarget->is64Bit())
+ return countTrailingOnes<uint64_t>(Imm) >= 6;
+ return countTrailingOnes<uint64_t>(Imm) >= 5;
+}]>;
+
+// Addressing modes.
+// Necessary because a frameindex can't be matched directly in a pattern.
+def AddrFI : ComplexPattern<iPTR, 1, "SelectAddrFI", [frameindex], []>;
+
+// Extract least significant 12 bits from an immediate value and sign extend
+// them.
+def LO12Sext : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(SignExtend64<12>(N->getZExtValue()),
+ SDLoc(N), N->getValueType(0));
+}]>;
+
+// Extract the most significant 20 bits from an immediate value. Add 1 if bit
+// 11 is 1, to compensate for the low 12 bits in the matching immediate addi
+// or ld/st being negative.
+def HI20 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(((N->getZExtValue()+0x800) >> 12) & 0xfffff,
+ SDLoc(N), N->getValueType(0));
+}]>;
+
+//===----------------------------------------------------------------------===//
+// Instruction Formats
+//===----------------------------------------------------------------------===//
+
+include "RISCVInstrFormats.td"
+
+//===----------------------------------------------------------------------===//
+// Instruction Class Templates
+//===----------------------------------------------------------------------===//
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+class BranchCC_rri<bits<3> funct3, string opcodestr>
+ : RVInstB<funct3, OPC_BRANCH, (outs),
+ (ins GPR:$rs1, GPR:$rs2, simm13_lsb0:$imm12),
+ opcodestr, "$rs1, $rs2, $imm12"> {
+ let isBranch = 1;
+ let isTerminator = 1;
+}
+
+let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
+class Load_ri<bits<3> funct3, string opcodestr>
+ : RVInstI<funct3, OPC_LOAD, (outs GPR:$rd), (ins GPR:$rs1, simm12:$imm12),
+ opcodestr, "$rd, ${imm12}(${rs1})">;
+
+// Operands for stores are in the order srcreg, base, offset rather than
+// reflecting the order these fields are specified in the instruction
+// encoding.
+let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in
+class Store_rri<bits<3> funct3, string opcodestr>
+ : RVInstS<funct3, OPC_STORE, (outs),
+ (ins GPR:$rs2, GPR:$rs1, simm12:$imm12),
+ opcodestr, "$rs2, ${imm12}(${rs1})">;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+class ALU_ri<bits<3> funct3, string opcodestr>
+ : RVInstI<funct3, OPC_OP_IMM, (outs GPR:$rd), (ins GPR:$rs1, simm12:$imm12),
+ opcodestr, "$rd, $rs1, $imm12">;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+class Shift_ri<bit arithshift, bits<3> funct3, string opcodestr>
+ : RVInstIShift<arithshift, funct3, OPC_OP_IMM, (outs GPR:$rd),
+ (ins GPR:$rs1, uimmlog2xlen:$shamt), opcodestr,
+ "$rd, $rs1, $shamt">;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+class ALU_rr<bits<7> funct7, bits<3> funct3, string opcodestr>
+ : RVInstR<funct7, funct3, OPC_OP, (outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2),
+ opcodestr, "$rd, $rs1, $rs2">;
+
+let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
+class CSR_ir<bits<3> funct3, string opcodestr>
+ : RVInstI<funct3, OPC_SYSTEM, (outs GPR:$rd), (ins csr_sysreg:$imm12, GPR:$rs1),
+ opcodestr, "$rd, $imm12, $rs1">;
+
+let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
+class CSR_ii<bits<3> funct3, string opcodestr>
+ : RVInstI<funct3, OPC_SYSTEM, (outs GPR:$rd),
+ (ins csr_sysreg:$imm12, uimm5:$rs1),
+ opcodestr, "$rd, $imm12, $rs1">;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+class ShiftW_ri<bit arithshift, bits<3> funct3, string opcodestr>
+ : RVInstIShiftW<arithshift, funct3, OPC_OP_IMM_32, (outs GPR:$rd),
+ (ins GPR:$rs1, uimm5:$shamt), opcodestr,
+ "$rd, $rs1, $shamt">;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+class ALUW_rr<bits<7> funct7, bits<3> funct3, string opcodestr>
+ : RVInstR<funct7, funct3, OPC_OP_32, (outs GPR:$rd),
+ (ins GPR:$rs1, GPR:$rs2), opcodestr, "$rd, $rs1, $rs2">;
+
+let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
+class Priv<string opcodestr, bits<7> funct7>
+ : RVInstR<funct7, 0b000, OPC_SYSTEM, (outs), (ins GPR:$rs1, GPR:$rs2),
+ opcodestr, "">;
+
+//===----------------------------------------------------------------------===//
+// Instructions
+//===----------------------------------------------------------------------===//
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
+let isReMaterializable = 1, isAsCheapAsAMove = 1 in
+def LUI : RVInstU<OPC_LUI, (outs GPR:$rd), (ins uimm20_lui:$imm20),
+ "lui", "$rd, $imm20">;
+
+def AUIPC : RVInstU<OPC_AUIPC, (outs GPR:$rd), (ins uimm20_auipc:$imm20),
+ "auipc", "$rd, $imm20">;
+
+let isCall = 1 in
+def JAL : RVInstJ<OPC_JAL, (outs GPR:$rd), (ins simm21_lsb0_jal:$imm20),
+ "jal", "$rd, $imm20">;
+
+let isCall = 1 in
+def JALR : RVInstI<0b000, OPC_JALR, (outs GPR:$rd),
+ (ins GPR:$rs1, simm12:$imm12),
+ "jalr", "$rd, ${imm12}(${rs1})">;
+} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
+
+def BEQ : BranchCC_rri<0b000, "beq">;
+def BNE : BranchCC_rri<0b001, "bne">;
+def BLT : BranchCC_rri<0b100, "blt">;
+def BGE : BranchCC_rri<0b101, "bge">;
+def BLTU : BranchCC_rri<0b110, "bltu">;
+def BGEU : BranchCC_rri<0b111, "bgeu">;
+
+def LB : Load_ri<0b000, "lb">;
+def LH : Load_ri<0b001, "lh">;
+def LW : Load_ri<0b010, "lw">;
+def LBU : Load_ri<0b100, "lbu">;
+def LHU : Load_ri<0b101, "lhu">;
+
+def SB : Store_rri<0b000, "sb">;
+def SH : Store_rri<0b001, "sh">;
+def SW : Store_rri<0b010, "sw">;
+
+// ADDI isn't always rematerializable, but isReMaterializable will be used as
+// a hint which is verified in isReallyTriviallyReMaterializable.
+let isReMaterializable = 1, isAsCheapAsAMove = 1 in
+def ADDI : ALU_ri<0b000, "addi">;
+
+def SLTI : ALU_ri<0b010, "slti">;
+def SLTIU : ALU_ri<0b011, "sltiu">;
+
+let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
+def XORI : ALU_ri<0b100, "xori">;
+def ORI : ALU_ri<0b110, "ori">;
+}
+
+def ANDI : ALU_ri<0b111, "andi">;
+
+def SLLI : Shift_ri<0, 0b001, "slli">;
+def SRLI : Shift_ri<0, 0b101, "srli">;
+def SRAI : Shift_ri<1, 0b101, "srai">;
+
+def ADD : ALU_rr<0b0000000, 0b000, "add">;
+def SUB : ALU_rr<0b0100000, 0b000, "sub">;
+def SLL : ALU_rr<0b0000000, 0b001, "sll">;
+def SLT : ALU_rr<0b0000000, 0b010, "slt">;
+def SLTU : ALU_rr<0b0000000, 0b011, "sltu">;
+def XOR : ALU_rr<0b0000000, 0b100, "xor">;
+def SRL : ALU_rr<0b0000000, 0b101, "srl">;
+def SRA : ALU_rr<0b0100000, 0b101, "sra">;
+def OR : ALU_rr<0b0000000, 0b110, "or">;
+def AND : ALU_rr<0b0000000, 0b111, "and">;
+
+let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
+def FENCE : RVInstI<0b000, OPC_MISC_MEM, (outs),
+ (ins fencearg:$pred, fencearg:$succ),
+ "fence", "$pred, $succ"> {
+ bits<4> pred;
+ bits<4> succ;
+
+ let rs1 = 0;
+ let rd = 0;
+ let imm12 = {0b0000,pred,succ};
+}
+
+def FENCE_TSO : RVInstI<0b000, OPC_MISC_MEM, (outs), (ins), "fence.tso", ""> {
+ let rs1 = 0;
+ let rd = 0;
+ let imm12 = {0b1000,0b0011,0b0011};
+}
+
+def FENCE_I : RVInstI<0b001, OPC_MISC_MEM, (outs), (ins), "fence.i", ""> {
+ let rs1 = 0;
+ let rd = 0;
+ let imm12 = 0;
+}
+
+def ECALL : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), "ecall", ""> {
+ let rs1 = 0;
+ let rd = 0;
+ let imm12 = 0;
+}
+
+def EBREAK : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), "ebreak", ""> {
+ let rs1 = 0;
+ let rd = 0;
+ let imm12 = 1;
+}
+
+// This is a de facto standard (as set by GNU binutils) 32-bit unimplemented
+// instruction (i.e., it should always trap, if your implementation has invalid
+// instruction traps).
+def UNIMP : RVInstI<0b001, OPC_SYSTEM, (outs), (ins), "unimp", ""> {
+ let rs1 = 0;
+ let rd = 0;
+ let imm12 = 0b110000000000;
+}
+} // hasSideEffects = 1, mayLoad = 0, mayStore = 0
+
+def CSRRW : CSR_ir<0b001, "csrrw">;
+def CSRRS : CSR_ir<0b010, "csrrs">;
+def CSRRC : CSR_ir<0b011, "csrrc">;
+
+def CSRRWI : CSR_ii<0b101, "csrrwi">;
+def CSRRSI : CSR_ii<0b110, "csrrsi">;
+def CSRRCI : CSR_ii<0b111, "csrrci">;
+
+/// RV64I instructions
+
+let Predicates = [IsRV64] in {
+def LWU : Load_ri<0b110, "lwu">;
+def LD : Load_ri<0b011, "ld">;
+def SD : Store_rri<0b011, "sd">;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+def ADDIW : RVInstI<0b000, OPC_OP_IMM_32, (outs GPR:$rd),
+ (ins GPR:$rs1, simm12:$imm12),
+ "addiw", "$rd, $rs1, $imm12">;
+
+def SLLIW : ShiftW_ri<0, 0b001, "slliw">;
+def SRLIW : ShiftW_ri<0, 0b101, "srliw">;
+def SRAIW : ShiftW_ri<1, 0b101, "sraiw">;
+
+def ADDW : ALUW_rr<0b0000000, 0b000, "addw">;
+def SUBW : ALUW_rr<0b0100000, 0b000, "subw">;
+def SLLW : ALUW_rr<0b0000000, 0b001, "sllw">;
+def SRLW : ALUW_rr<0b0000000, 0b101, "srlw">;
+def SRAW : ALUW_rr<0b0100000, 0b101, "sraw">;
+} // Predicates = [IsRV64]
+
+//===----------------------------------------------------------------------===//
+// Privileged instructions
+//===----------------------------------------------------------------------===//
+
+let isBarrier = 1, isReturn = 1, isTerminator = 1 in {
+def URET : Priv<"uret", 0b0000000> {
+ let rd = 0;
+ let rs1 = 0;
+ let rs2 = 0b00010;
+}
+
+def SRET : Priv<"sret", 0b0001000> {
+ let rd = 0;
+ let rs1 = 0;
+ let rs2 = 0b00010;
+}
+
+def MRET : Priv<"mret", 0b0011000> {
+ let rd = 0;
+ let rs1 = 0;
+ let rs2 = 0b00010;
+}
+} // isBarrier = 1, isReturn = 1, isTerminator = 1
+
+def WFI : Priv<"wfi", 0b0001000> {
+ let rd = 0;
+ let rs1 = 0;
+ let rs2 = 0b00101;
+}
+
+let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
+def SFENCE_VMA : RVInstR<0b0001001, 0b000, OPC_SYSTEM, (outs),
+ (ins GPR:$rs1, GPR:$rs2),
+ "sfence.vma", "$rs1, $rs2"> {
+ let rd = 0;
+}
+
+//===----------------------------------------------------------------------===//
+// Assembler Pseudo Instructions (User-Level ISA, Version 2.2, Chapter 20)
+//===----------------------------------------------------------------------===//
+
+def : InstAlias<"nop", (ADDI X0, X0, 0)>;
+
+// Note that the size is 32 because up to 8 32-bit instructions are needed to
+// generate an arbitrary 64-bit immediate. However, the size does not really
+// matter since PseudoLI is currently only used in the AsmParser where it gets
+// expanded to real instructions immediately.
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 32,
+ isCodeGenOnly = 0, isAsmParserOnly = 1 in
+def PseudoLI : Pseudo<(outs GPR:$rd), (ins ixlenimm_li:$imm), [],
+ "li", "$rd, $imm">;
+
+def PseudoLB : PseudoLoad<"lb">;
+def PseudoLBU : PseudoLoad<"lbu">;
+def PseudoLH : PseudoLoad<"lh">;
+def PseudoLHU : PseudoLoad<"lhu">;
+def PseudoLW : PseudoLoad<"lw">;
+
+def PseudoSB : PseudoStore<"sb">;
+def PseudoSH : PseudoStore<"sh">;
+def PseudoSW : PseudoStore<"sw">;
+
+let Predicates = [IsRV64] in {
+def PseudoLWU : PseudoLoad<"lwu">;
+def PseudoLD : PseudoLoad<"ld">;
+def PseudoSD : PseudoStore<"sd">;
+} // Predicates = [IsRV64]
+
+def : InstAlias<"mv $rd, $rs", (ADDI GPR:$rd, GPR:$rs, 0)>;
+def : InstAlias<"not $rd, $rs", (XORI GPR:$rd, GPR:$rs, -1)>;
+def : InstAlias<"neg $rd, $rs", (SUB GPR:$rd, X0, GPR:$rs)>;
+
+let Predicates = [IsRV64] in {
+def : InstAlias<"negw $rd, $rs", (SUBW GPR:$rd, X0, GPR:$rs)>;
+def : InstAlias<"sext.w $rd, $rs", (ADDIW GPR:$rd, GPR:$rs, 0)>;
+} // Predicates = [IsRV64]
+
+def : InstAlias<"seqz $rd, $rs", (SLTIU GPR:$rd, GPR:$rs, 1)>;
+def : InstAlias<"snez $rd, $rs", (SLTU GPR:$rd, X0, GPR:$rs)>;
+def : InstAlias<"sltz $rd, $rs", (SLT GPR:$rd, GPR:$rs, X0)>;
+def : InstAlias<"sgtz $rd, $rs", (SLT GPR:$rd, X0, GPR:$rs)>;
+
+// sgt/sgtu are recognised by the GNU assembler but the canonical slt/sltu
+// form will always be printed. Therefore, set a zero weight.
+def : InstAlias<"sgt $rd, $rs, $rt", (SLT GPR:$rd, GPR:$rt, GPR:$rs), 0>;
+def : InstAlias<"sgtu $rd, $rs, $rt", (SLTU GPR:$rd, GPR:$rt, GPR:$rs), 0>;
+
+def : InstAlias<"beqz $rs, $offset",
+ (BEQ GPR:$rs, X0, simm13_lsb0:$offset)>;
+def : InstAlias<"bnez $rs, $offset",
+ (BNE GPR:$rs, X0, simm13_lsb0:$offset)>;
+def : InstAlias<"blez $rs, $offset",
+ (BGE X0, GPR:$rs, simm13_lsb0:$offset)>;
+def : InstAlias<"bgez $rs, $offset",
+ (BGE GPR:$rs, X0, simm13_lsb0:$offset)>;
+def : InstAlias<"bltz $rs, $offset",
+ (BLT GPR:$rs, X0, simm13_lsb0:$offset)>;
+def : InstAlias<"bgtz $rs, $offset",
+ (BLT X0, GPR:$rs, simm13_lsb0:$offset)>;
+
+// Always output the canonical mnemonic for the pseudo branch instructions.
+// The GNU tools emit the canonical mnemonic for the branch pseudo instructions
+// as well (e.g. "bgt" will be recognised by the assembler but never printed by
+// objdump). Match this behaviour by setting a zero weight.
+def : InstAlias<"bgt $rs, $rt, $offset",
+ (BLT GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>;
+def : InstAlias<"ble $rs, $rt, $offset",
+ (BGE GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>;
+def : InstAlias<"bgtu $rs, $rt, $offset",
+ (BLTU GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>;
+def : InstAlias<"bleu $rs, $rt, $offset",
+ (BGEU GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>;
+
+def : InstAlias<"j $offset", (JAL X0, simm21_lsb0_jal:$offset)>;
+def : InstAlias<"jal $offset", (JAL X1, simm21_lsb0_jal:$offset)>;
+
+// Non-zero offset aliases of "jalr" are the lowest weight, followed by the
+// two-register form, then the one-register forms and finally "ret".
+def : InstAlias<"jr $rs", (JALR X0, GPR:$rs, 0), 3>;
+def : InstAlias<"jr ${offset}(${rs})", (JALR X0, GPR:$rs, simm12:$offset)>;
+def : InstAlias<"jalr $rs", (JALR X1, GPR:$rs, 0), 3>;
+def : InstAlias<"jalr ${offset}(${rs})", (JALR X1, GPR:$rs, simm12:$offset)>;
+def : InstAlias<"jalr $rd, $rs", (JALR GPR:$rd, GPR:$rs, 0), 2>;
+def : InstAlias<"ret", (JALR X0, X1, 0), 4>;
+
+// Non-canonical forms for jump targets also accepted by the assembler.
+def : InstAlias<"jr $rs, $offset", (JALR X0, GPR:$rs, simm12:$offset), 0>;
+def : InstAlias<"jalr $rs, $offset", (JALR X1, GPR:$rs, simm12:$offset), 0>;
+def : InstAlias<"jalr $rd, $rs, $offset", (JALR GPR:$rd, GPR:$rs, simm12:$offset), 0>;
+
+// TODO call
+// TODO tail
+
+def : InstAlias<"fence", (FENCE 0xF, 0xF)>; // 0xF == iorw
+
+def : InstAlias<"rdinstret $rd", (CSRRS GPR:$rd, INSTRET.Encoding, X0)>;
+def : InstAlias<"rdcycle $rd", (CSRRS GPR:$rd, CYCLE.Encoding, X0)>;
+def : InstAlias<"rdtime $rd", (CSRRS GPR:$rd, TIME.Encoding, X0)>;
+
+let Predicates = [IsRV32] in {
+def : InstAlias<"rdinstreth $rd", (CSRRS GPR:$rd, INSTRETH.Encoding, X0)>;
+def : InstAlias<"rdcycleh $rd", (CSRRS GPR:$rd, CYCLEH.Encoding, X0)>;
+def : InstAlias<"rdtimeh $rd", (CSRRS GPR:$rd, TIMEH.Encoding, X0)>;
+} // Predicates = [IsRV32]
+
+def : InstAlias<"csrr $rd, $csr", (CSRRS GPR:$rd, csr_sysreg:$csr, X0)>;
+def : InstAlias<"csrw $csr, $rs", (CSRRW X0, csr_sysreg:$csr, GPR:$rs)>;
+def : InstAlias<"csrs $csr, $rs", (CSRRS X0, csr_sysreg:$csr, GPR:$rs)>;
+def : InstAlias<"csrc $csr, $rs", (CSRRC X0, csr_sysreg:$csr, GPR:$rs)>;
+
+def : InstAlias<"csrwi $csr, $imm", (CSRRWI X0, csr_sysreg:$csr, uimm5:$imm)>;
+def : InstAlias<"csrsi $csr, $imm", (CSRRSI X0, csr_sysreg:$csr, uimm5:$imm)>;
+def : InstAlias<"csrci $csr, $imm", (CSRRCI X0, csr_sysreg:$csr, uimm5:$imm)>;
+
+let EmitPriority = 0 in {
+def : InstAlias<"csrw $csr, $imm", (CSRRWI X0, csr_sysreg:$csr, uimm5:$imm)>;
+def : InstAlias<"csrs $csr, $imm", (CSRRSI X0, csr_sysreg:$csr, uimm5:$imm)>;
+def : InstAlias<"csrc $csr, $imm", (CSRRCI X0, csr_sysreg:$csr, uimm5:$imm)>;
+
+def : InstAlias<"csrrw $rd, $csr, $imm", (CSRRWI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>;
+def : InstAlias<"csrrs $rd, $csr, $imm", (CSRRSI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>;
+def : InstAlias<"csrrc $rd, $csr, $imm", (CSRRCI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>;
+}
+
+def : InstAlias<"sfence.vma", (SFENCE_VMA X0, X0)>;
+def : InstAlias<"sfence.vma $rs", (SFENCE_VMA GPR:$rs, X0)>;
+
+let EmitPriority = 0 in {
+def : InstAlias<"lb $rd, (${rs1})",
+ (LB GPR:$rd, GPR:$rs1, 0)>;
+def : InstAlias<"lh $rd, (${rs1})",
+ (LH GPR:$rd, GPR:$rs1, 0)>;
+def : InstAlias<"lw $rd, (${rs1})",
+ (LW GPR:$rd, GPR:$rs1, 0)>;
+def : InstAlias<"lbu $rd, (${rs1})",
+ (LBU GPR:$rd, GPR:$rs1, 0)>;
+def : InstAlias<"lhu $rd, (${rs1})",
+ (LHU GPR:$rd, GPR:$rs1, 0)>;
+
+def : InstAlias<"sb $rs2, (${rs1})",
+ (SB GPR:$rs2, GPR:$rs1, 0)>;
+def : InstAlias<"sh $rs2, (${rs1})",
+ (SH GPR:$rs2, GPR:$rs1, 0)>;
+def : InstAlias<"sw $rs2, (${rs1})",
+ (SW GPR:$rs2, GPR:$rs1, 0)>;
+
+def : InstAlias<"add $rd, $rs1, $imm12",
+ (ADDI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
+def : InstAlias<"and $rd, $rs1, $imm12",
+ (ANDI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
+def : InstAlias<"xor $rd, $rs1, $imm12",
+ (XORI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
+def : InstAlias<"or $rd, $rs1, $imm12",
+ (ORI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
+def : InstAlias<"sll $rd, $rs1, $shamt",
+ (SLLI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>;
+def : InstAlias<"srl $rd, $rs1, $shamt",
+ (SRLI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>;
+def : InstAlias<"sra $rd, $rs1, $shamt",
+ (SRAI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>;
+let Predicates = [IsRV64] in {
+def : InstAlias<"lwu $rd, (${rs1})",
+ (LWU GPR:$rd, GPR:$rs1, 0)>;
+def : InstAlias<"ld $rd, (${rs1})",
+ (LD GPR:$rd, GPR:$rs1, 0)>;
+def : InstAlias<"sd $rs2, (${rs1})",
+ (SD GPR:$rs2, GPR:$rs1, 0)>;
+
+def : InstAlias<"addw $rd, $rs1, $imm12",
+ (ADDIW GPR:$rd, GPR:$rs1, simm12:$imm12)>;
+def : InstAlias<"sllw $rd, $rs1, $shamt",
+ (SLLIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>;
+def : InstAlias<"srlw $rd, $rs1, $shamt",
+ (SRLIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>;
+def : InstAlias<"sraw $rd, $rs1, $shamt",
+ (SRAIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>;
+} // Predicates = [IsRV64]
+def : InstAlias<"slt $rd, $rs1, $imm12",
+ (SLTI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
+def : InstAlias<"sltu $rd, $rs1, $imm12",
+ (SLTIU GPR:$rd, GPR:$rs1, simm12:$imm12)>;
+}
+
+def : MnemonicAlias<"move", "mv">;
+
+// The SCALL and SBREAK instructions wererenamed to ECALL and EBREAK in
+// version 2.1 of the user-level ISA. Like the GNU toolchain, we still accept
+// the old name for backwards compatibility.
+def : MnemonicAlias<"scall", "ecall">;
+def : MnemonicAlias<"sbreak", "ebreak">;
+
+//===----------------------------------------------------------------------===//
+// Pseudo-instructions and codegen patterns
+//
+// Naming convention: For 'generic' pattern classes, we use the naming
+// convention PatTy1Ty2. For pattern classes which offer a more complex
+// expension, prefix the class name, e.g. BccPat.
+//===----------------------------------------------------------------------===//
+
+/// Generic pattern classes
+
+class PatGprGpr<SDPatternOperator OpNode, RVInst Inst>
+ : Pat<(OpNode GPR:$rs1, GPR:$rs2), (Inst GPR:$rs1, GPR:$rs2)>;
+class PatGprSimm12<SDPatternOperator OpNode, RVInstI Inst>
+ : Pat<(OpNode GPR:$rs1, simm12:$imm12), (Inst GPR:$rs1, simm12:$imm12)>;
+class PatGprUimmLog2XLen<SDPatternOperator OpNode, RVInstIShift Inst>
+ : Pat<(OpNode GPR:$rs1, uimmlog2xlen:$shamt),
+ (Inst GPR:$rs1, uimmlog2xlen:$shamt)>;
+
+/// Predicates
+
+def IsOrAdd: PatFrag<(ops node:$A, node:$B), (or node:$A, node:$B), [{
+ return isOrEquivalentToAdd(N);
+}]>;
+def assertsexti32 : PatFrag<(ops node:$src), (assertsext node:$src), [{
+ return cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32;
+}]>;
+def sexti32 : PatFrags<(ops node:$src),
+ [(sext_inreg node:$src, i32),
+ (assertsexti32 node:$src)]>;
+def assertzexti32 : PatFrag<(ops node:$src), (assertzext node:$src), [{
+ return cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32;
+}]>;
+def zexti32 : PatFrags<(ops node:$src),
+ [(and node:$src, 0xffffffff),
+ (assertzexti32 node:$src)]>;
+
+/// Immediates
+
+def : Pat<(simm12:$imm), (ADDI X0, simm12:$imm)>;
+def : Pat<(simm32hi20:$imm), (LUI (HI20 imm:$imm))>;
+def : Pat<(simm32:$imm), (ADDI (LUI (HI20 imm:$imm)), (LO12Sext imm:$imm))>,
+ Requires<[IsRV32]>;
+
+/// Simple arithmetic operations
+
+def : PatGprGpr<add, ADD>;
+def : PatGprSimm12<add, ADDI>;
+def : PatGprGpr<sub, SUB>;
+def : PatGprGpr<or, OR>;
+def : PatGprSimm12<or, ORI>;
+def : PatGprGpr<and, AND>;
+def : PatGprSimm12<and, ANDI>;
+def : PatGprGpr<xor, XOR>;
+def : PatGprSimm12<xor, XORI>;
+def : PatGprUimmLog2XLen<shl, SLLI>;
+def : PatGprUimmLog2XLen<srl, SRLI>;
+def : PatGprUimmLog2XLen<sra, SRAI>;
+
+// Match both a plain shift and one where the shift amount is masked (this is
+// typically introduced when the legalizer promotes the shift amount and
+// zero-extends it). For RISC-V, the mask is unnecessary as shifts in the base
+// ISA only read the least significant 5 bits (RV32I) or 6 bits (RV64I).
+class shiftop<SDPatternOperator operator>
+ : PatFrags<(ops node:$val, node:$count),
+ [(operator node:$val, node:$count),
+ (operator node:$val, (and node:$count, immbottomxlenset))]>;
+
+def : PatGprGpr<shiftop<shl>, SLL>;
+def : PatGprGpr<shiftop<srl>, SRL>;
+def : PatGprGpr<shiftop<sra>, SRA>;
+
+// This is a special case of the ADD instruction used to facilitate the use of a
+// fourth operand to emit a relocation on a symbol relating to this instruction.
+// The relocation does not affect any bits of the instruction itself but is used
+// as a hint to the linker.
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 0 in
+def PseudoAddTPRel : Pseudo<(outs GPR:$rd),
+ (ins GPR:$rs1, GPR:$rs2, tprel_add_symbol:$src), [],
+ "add", "$rd, $rs1, $rs2, $src">;
+
+/// FrameIndex calculations
+
+def : Pat<(add (i32 AddrFI:$Rs), simm12:$imm12),
+ (ADDI (i32 AddrFI:$Rs), simm12:$imm12)>;
+def : Pat<(IsOrAdd (i32 AddrFI:$Rs), simm12:$imm12),
+ (ADDI (i32 AddrFI:$Rs), simm12:$imm12)>;
+
+/// Setcc
+
+def : PatGprGpr<setlt, SLT>;
+def : PatGprSimm12<setlt, SLTI>;
+def : PatGprGpr<setult, SLTU>;
+def : PatGprSimm12<setult, SLTIU>;
+
+// Define pattern expansions for setcc operations that aren't directly
+// handled by a RISC-V instruction.
+def : Pat<(seteq GPR:$rs1, 0), (SLTIU GPR:$rs1, 1)>;
+def : Pat<(seteq GPR:$rs1, GPR:$rs2), (SLTIU (XOR GPR:$rs1, GPR:$rs2), 1)>;
+def : Pat<(seteq GPR:$rs1, simm12:$imm12),
+ (SLTIU (XORI GPR:$rs1, simm12:$imm12), 1)>;
+def : Pat<(setne GPR:$rs1, 0), (SLTU X0, GPR:$rs1)>;
+def : Pat<(setne GPR:$rs1, GPR:$rs2), (SLTU X0, (XOR GPR:$rs1, GPR:$rs2))>;
+def : Pat<(setne GPR:$rs1, simm12:$imm12),
+ (SLTU X0, (XORI GPR:$rs1, simm12:$imm12))>;
+def : Pat<(setugt GPR:$rs1, GPR:$rs2), (SLTU GPR:$rs2, GPR:$rs1)>;
+def : Pat<(setuge GPR:$rs1, GPR:$rs2), (XORI (SLTU GPR:$rs1, GPR:$rs2), 1)>;
+def : Pat<(setule GPR:$rs1, GPR:$rs2), (XORI (SLTU GPR:$rs2, GPR:$rs1), 1)>;
+def : Pat<(setgt GPR:$rs1, GPR:$rs2), (SLT GPR:$rs2, GPR:$rs1)>;
+def : Pat<(setge GPR:$rs1, GPR:$rs2), (XORI (SLT GPR:$rs1, GPR:$rs2), 1)>;
+def : Pat<(setle GPR:$rs1, GPR:$rs2), (XORI (SLT GPR:$rs2, GPR:$rs1), 1)>;
+
+let usesCustomInserter = 1 in
+class SelectCC_rrirr<RegisterClass valty, RegisterClass cmpty>
+ : Pseudo<(outs valty:$dst),
+ (ins cmpty:$lhs, cmpty:$rhs, ixlenimm:$imm,
+ valty:$truev, valty:$falsev),
+ [(set valty:$dst, (riscv_selectcc cmpty:$lhs, cmpty:$rhs,
+ (XLenVT imm:$imm), valty:$truev, valty:$falsev))]>;
+
+def Select_GPR_Using_CC_GPR : SelectCC_rrirr<GPR, GPR>;
+
+/// Branches and jumps
+
+// Match `(brcond (CondOp ..), ..)` and lower to the appropriate RISC-V branch
+// instruction.
+class BccPat<PatFrag CondOp, RVInstB Inst>
+ : Pat<(brcond (XLenVT (CondOp GPR:$rs1, GPR:$rs2)), bb:$imm12),
+ (Inst GPR:$rs1, GPR:$rs2, simm13_lsb0:$imm12)>;
+
+def : BccPat<seteq, BEQ>;
+def : BccPat<setne, BNE>;
+def : BccPat<setlt, BLT>;
+def : BccPat<setge, BGE>;
+def : BccPat<setult, BLTU>;
+def : BccPat<setuge, BGEU>;
+
+class BccSwapPat<PatFrag CondOp, RVInst InstBcc>
+ : Pat<(brcond (XLenVT (CondOp GPR:$rs1, GPR:$rs2)), bb:$imm12),
+ (InstBcc GPR:$rs2, GPR:$rs1, bb:$imm12)>;
+
+// Condition codes that don't have matching RISC-V branch instructions, but
+// are trivially supported by swapping the two input operands
+def : BccSwapPat<setgt, BLT>;
+def : BccSwapPat<setle, BGE>;
+def : BccSwapPat<setugt, BLTU>;
+def : BccSwapPat<setule, BGEU>;
+
+// An extra pattern is needed for a brcond without a setcc (i.e. where the
+// condition was calculated elsewhere).
+def : Pat<(brcond GPR:$cond, bb:$imm12), (BNE GPR:$cond, X0, bb:$imm12)>;
+
+let isBarrier = 1, isBranch = 1, isTerminator = 1 in
+def PseudoBR : Pseudo<(outs), (ins simm21_lsb0_jal:$imm20), [(br bb:$imm20)]>,
+ PseudoInstExpansion<(JAL X0, simm21_lsb0_jal:$imm20)>;
+
+let isCall = 1, Defs=[X1] in
+let isBarrier = 1, isBranch = 1, isIndirectBranch = 1, isTerminator = 1 in
+def PseudoBRIND : Pseudo<(outs), (ins GPR:$rs1, simm12:$imm12), []>,
+ PseudoInstExpansion<(JALR X0, GPR:$rs1, simm12:$imm12)>;
+
+def : Pat<(brind GPR:$rs1), (PseudoBRIND GPR:$rs1, 0)>;
+def : Pat<(brind (add GPR:$rs1, simm12:$imm12)),
+ (PseudoBRIND GPR:$rs1, simm12:$imm12)>;
+
+// PsuedoCALLReg is a generic pseudo instruction for calls which will eventually
+// expand to auipc and jalr while encoding, with any given register used as the
+// destination.
+// Define AsmString to print "call" when compile with -S flag.
+// Define isCodeGenOnly = 0 to support parsing assembly "call" instruction.
+let isCall = 1, isBarrier = 1, isCodeGenOnly = 0, hasSideEffects = 0,
+ mayStore = 0, mayLoad = 0 in
+def PseudoCALLReg : Pseudo<(outs GPR:$rd), (ins call_symbol:$func), []> {
+ let AsmString = "call\t$rd, $func";
+}
+
+// PseudoCALL is a pseudo instruction which will eventually expand to auipc
+// and jalr while encoding. This is desirable, as an auipc+jalr pair with
+// R_RISCV_CALL and R_RISCV_RELAX relocations can be be relaxed by the linker
+// if the offset fits in a signed 21-bit immediate.
+// Define AsmString to print "call" when compile with -S flag.
+// Define isCodeGenOnly = 0 to support parsing assembly "call" instruction.
+let isCall = 1, Defs = [X1], isCodeGenOnly = 0 in
+def PseudoCALL : Pseudo<(outs), (ins call_symbol:$func), []> {
+ let AsmString = "call\t$func";
+}
+
+def : Pat<(riscv_call tglobaladdr:$func), (PseudoCALL tglobaladdr:$func)>;
+def : Pat<(riscv_call texternalsym:$func), (PseudoCALL texternalsym:$func)>;
+
+def : Pat<(riscv_uret_flag), (URET X0, X0)>;
+def : Pat<(riscv_sret_flag), (SRET X0, X0)>;
+def : Pat<(riscv_mret_flag), (MRET X0, X0)>;
+
+let isCall = 1, Defs = [X1] in
+def PseudoCALLIndirect : Pseudo<(outs), (ins GPR:$rs1),
+ [(riscv_call GPR:$rs1)]>,
+ PseudoInstExpansion<(JALR X1, GPR:$rs1, 0)>;
+
+let isBarrier = 1, isReturn = 1, isTerminator = 1 in
+def PseudoRET : Pseudo<(outs), (ins), [(riscv_ret_flag)]>,
+ PseudoInstExpansion<(JALR X0, X1, 0)>;
+
+// PseudoTAIL is a pseudo instruction similar to PseudoCALL and will eventually
+// expand to auipc and jalr while encoding.
+// Define AsmString to print "tail" when compile with -S flag.
+let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [X2],
+ isCodeGenOnly = 0 in
+def PseudoTAIL : Pseudo<(outs), (ins call_symbol:$dst), []> {
+ let AsmString = "tail\t$dst";
+}
+
+let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [X2] in
+def PseudoTAILIndirect : Pseudo<(outs), (ins GPRTC:$rs1),
+ [(riscv_tail GPRTC:$rs1)]>,
+ PseudoInstExpansion<(JALR X0, GPR:$rs1, 0)>;
+
+def : Pat<(riscv_tail (iPTR tglobaladdr:$dst)),
+ (PseudoTAIL texternalsym:$dst)>;
+def : Pat<(riscv_tail (iPTR texternalsym:$dst)),
+ (PseudoTAIL texternalsym:$dst)>;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 0,
+ isAsmParserOnly = 1 in
+def PseudoLLA : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
+ "lla", "$dst, $src">;
+
+let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 0,
+ isAsmParserOnly = 1 in
+def PseudoLA : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
+ "la", "$dst, $src">;
+
+let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 0,
+ isAsmParserOnly = 1 in
+def PseudoLA_TLS_IE : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
+ "la.tls.ie", "$dst, $src">;
+
+let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 0,
+ isAsmParserOnly = 1 in
+def PseudoLA_TLS_GD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
+ "la.tls.gd", "$dst, $src">;
+
+/// Loads
+
+multiclass LdPat<PatFrag LoadOp, RVInst Inst> {
+ def : Pat<(LoadOp GPR:$rs1), (Inst GPR:$rs1, 0)>;
+ def : Pat<(LoadOp AddrFI:$rs1), (Inst AddrFI:$rs1, 0)>;
+ def : Pat<(LoadOp (add GPR:$rs1, simm12:$imm12)),
+ (Inst GPR:$rs1, simm12:$imm12)>;
+ def : Pat<(LoadOp (add AddrFI:$rs1, simm12:$imm12)),
+ (Inst AddrFI:$rs1, simm12:$imm12)>;
+ def : Pat<(LoadOp (IsOrAdd AddrFI:$rs1, simm12:$imm12)),
+ (Inst AddrFI:$rs1, simm12:$imm12)>;
+}
+
+defm : LdPat<sextloadi8, LB>;
+defm : LdPat<extloadi8, LB>;
+defm : LdPat<sextloadi16, LH>;
+defm : LdPat<extloadi16, LH>;
+defm : LdPat<load, LW>, Requires<[IsRV32]>;
+defm : LdPat<zextloadi8, LBU>;
+defm : LdPat<zextloadi16, LHU>;
+
+/// Stores
+
+multiclass StPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy> {
+ def : Pat<(StoreOp StTy:$rs2, GPR:$rs1), (Inst StTy:$rs2, GPR:$rs1, 0)>;
+ def : Pat<(StoreOp StTy:$rs2, AddrFI:$rs1), (Inst StTy:$rs2, AddrFI:$rs1, 0)>;
+ def : Pat<(StoreOp StTy:$rs2, (add GPR:$rs1, simm12:$imm12)),
+ (Inst StTy:$rs2, GPR:$rs1, simm12:$imm12)>;
+ def : Pat<(StoreOp StTy:$rs2, (add AddrFI:$rs1, simm12:$imm12)),
+ (Inst StTy:$rs2, AddrFI:$rs1, simm12:$imm12)>;
+ def : Pat<(StoreOp StTy:$rs2, (IsOrAdd AddrFI:$rs1, simm12:$imm12)),
+ (Inst StTy:$rs2, AddrFI:$rs1, simm12:$imm12)>;
+}
+
+defm : StPat<truncstorei8, SB, GPR>;
+defm : StPat<truncstorei16, SH, GPR>;
+defm : StPat<store, SW, GPR>, Requires<[IsRV32]>;
+
+/// Fences
+
+// Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
+// Manual: Volume I.
+
+// fence acquire -> fence r, rw
+def : Pat<(atomic_fence (XLenVT 4), (imm)), (FENCE 0b10, 0b11)>;
+// fence release -> fence rw, w
+def : Pat<(atomic_fence (XLenVT 5), (imm)), (FENCE 0b11, 0b1)>;
+// fence acq_rel -> fence.tso
+def : Pat<(atomic_fence (XLenVT 6), (imm)), (FENCE_TSO)>;
+// fence seq_cst -> fence rw, rw
+def : Pat<(atomic_fence (XLenVT 7), (imm)), (FENCE 0b11, 0b11)>;
+
+// Lowering for atomic load and store is defined in RISCVInstrInfoA.td.
+// Although these are lowered to fence+load/store instructions defined in the
+// base RV32I/RV64I ISA, this lowering is only used when the A extension is
+// present. This is necessary as it isn't valid to mix __atomic_* libcalls
+// with inline atomic operations for the same object.
+
+/// Other pseudo-instructions
+
+// Pessimistically assume the stack pointer will be clobbered
+let Defs = [X2], Uses = [X2] in {
+def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
+ [(callseq_start timm:$amt1, timm:$amt2)]>;
+def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
+ [(callseq_end timm:$amt1, timm:$amt2)]>;
+} // Defs = [X2], Uses = [X2]
+
+/// RV64 patterns
+
+let Predicates = [IsRV64] in {
+
+/// sext and zext
+
+def : Pat<(sext_inreg GPR:$rs1, i32), (ADDIW GPR:$rs1, 0)>;
+def : Pat<(and GPR:$rs1, 0xffffffff), (SRLI (SLLI GPR:$rs1, 32), 32)>;
+
+/// ALU operations
+
+def : Pat<(sext_inreg (add GPR:$rs1, GPR:$rs2), i32),
+ (ADDW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(sext_inreg (add GPR:$rs1, simm12:$imm12), i32),
+ (ADDIW GPR:$rs1, simm12:$imm12)>;
+def : Pat<(sext_inreg (sub GPR:$rs1, GPR:$rs2), i32),
+ (SUBW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(sext_inreg (shl GPR:$rs1, uimm5:$shamt), i32),
+ (SLLIW GPR:$rs1, uimm5:$shamt)>;
+// (srl (zexti32 ...), uimm5:$shamt) is matched with custom code due to the
+// need to undo manipulation of the mask value performed by DAGCombine.
+def : Pat<(sra (sext_inreg GPR:$rs1, i32), uimm5:$shamt),
+ (SRAIW GPR:$rs1, uimm5:$shamt)>;
+
+def : PatGprGpr<riscv_sllw, SLLW>;
+def : PatGprGpr<riscv_srlw, SRLW>;
+def : PatGprGpr<riscv_sraw, SRAW>;
+
+/// Loads
+
+defm : LdPat<sextloadi32, LW>;
+defm : LdPat<extloadi32, LW>;
+defm : LdPat<zextloadi32, LWU>;
+defm : LdPat<load, LD>;
+
+/// Stores
+
+defm : StPat<truncstorei32, SW, GPR>;
+defm : StPat<store, SD, GPR>;
+} // Predicates = [IsRV64]
+
+/// readcyclecounter
+// On RV64, we can directly read the 64-bit "cycle" CSR.
+let Predicates = [IsRV64] in
+def : Pat<(readcyclecounter), (CSRRS CYCLE.Encoding, X0)>;
+// On RV32, ReadCycleWide will be expanded to the suggested loop reading both
+// halves of the 64-bit "cycle" CSR.
+let Predicates = [IsRV32], usesCustomInserter = 1, hasSideEffects = 0,
+mayLoad = 0, mayStore = 0, hasNoSchedulingInfo = 1 in
+def ReadCycleWide : Pseudo<(outs GPR:$lo, GPR:$hi), (ins), [], "", "">;
+
+//===----------------------------------------------------------------------===//
+// Standard extensions
+//===----------------------------------------------------------------------===//
+
+include "RISCVInstrInfoM.td"
+include "RISCVInstrInfoA.td"
+include "RISCVInstrInfoF.td"
+include "RISCVInstrInfoD.td"
+include "RISCVInstrInfoC.td"
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoA.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoA.td
new file mode 100644
index 000000000000..1484ba9f0687
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoA.td
@@ -0,0 +1,371 @@
+//===-- RISCVInstrInfoA.td - RISC-V 'A' instructions -------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the RISC-V instructions from the standard 'A', Atomic
+// Instructions extension.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Operand and SDNode transformation definitions.
+//===----------------------------------------------------------------------===//
+
+// A parse method for (${gpr}) or 0(${gpr}), where the 0 is be silently ignored.
+// Used for GNU as Compatibility.
+def AtomicMemOpOperand : AsmOperandClass {
+ let Name = "AtomicMemOpOperand";
+ let RenderMethod = "addRegOperands";
+ let PredicateMethod = "isReg";
+ let ParserMethod = "parseAtomicMemOp";
+}
+
+def GPRMemAtomic : RegisterOperand<GPR> {
+ let ParserMatchClass = AtomicMemOpOperand;
+ let PrintMethod = "printAtomicMemOp";
+}
+
+//===----------------------------------------------------------------------===//
+// Instruction class templates
+//===----------------------------------------------------------------------===//
+
+let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
+class LR_r<bit aq, bit rl, bits<3> funct3, string opcodestr>
+ : RVInstRAtomic<0b00010, aq, rl, funct3, OPC_AMO,
+ (outs GPR:$rd), (ins GPRMemAtomic:$rs1),
+ opcodestr, "$rd, $rs1"> {
+ let rs2 = 0;
+}
+
+multiclass LR_r_aq_rl<bits<3> funct3, string opcodestr> {
+ def "" : LR_r<0, 0, funct3, opcodestr>;
+ def _AQ : LR_r<1, 0, funct3, opcodestr # ".aq">;
+ def _RL : LR_r<0, 1, funct3, opcodestr # ".rl">;
+ def _AQ_RL : LR_r<1, 1, funct3, opcodestr # ".aqrl">;
+}
+
+let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in
+class AMO_rr<bits<5> funct5, bit aq, bit rl, bits<3> funct3, string opcodestr>
+ : RVInstRAtomic<funct5, aq, rl, funct3, OPC_AMO,
+ (outs GPR:$rd), (ins GPRMemAtomic:$rs1, GPR:$rs2),
+ opcodestr, "$rd, $rs2, $rs1">;
+
+multiclass AMO_rr_aq_rl<bits<5> funct5, bits<3> funct3, string opcodestr> {
+ def "" : AMO_rr<funct5, 0, 0, funct3, opcodestr>;
+ def _AQ : AMO_rr<funct5, 1, 0, funct3, opcodestr # ".aq">;
+ def _RL : AMO_rr<funct5, 0, 1, funct3, opcodestr # ".rl">;
+ def _AQ_RL : AMO_rr<funct5, 1, 1, funct3, opcodestr # ".aqrl">;
+}
+
+multiclass AtomicStPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy> {
+ def : Pat<(StoreOp GPR:$rs1, StTy:$rs2), (Inst StTy:$rs2, GPR:$rs1, 0)>;
+ def : Pat<(StoreOp AddrFI:$rs1, StTy:$rs2), (Inst StTy:$rs2, AddrFI:$rs1, 0)>;
+ def : Pat<(StoreOp (add GPR:$rs1, simm12:$imm12), StTy:$rs2),
+ (Inst StTy:$rs2, GPR:$rs1, simm12:$imm12)>;
+ def : Pat<(StoreOp (add AddrFI:$rs1, simm12:$imm12), StTy:$rs2),
+ (Inst StTy:$rs2, AddrFI:$rs1, simm12:$imm12)>;
+ def : Pat<(StoreOp (IsOrAdd AddrFI:$rs1, simm12:$imm12), StTy:$rs2),
+ (Inst StTy:$rs2, AddrFI:$rs1, simm12:$imm12)>;
+}
+
+//===----------------------------------------------------------------------===//
+// Instructions
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasStdExtA] in {
+defm LR_W : LR_r_aq_rl<0b010, "lr.w">;
+defm SC_W : AMO_rr_aq_rl<0b00011, 0b010, "sc.w">;
+defm AMOSWAP_W : AMO_rr_aq_rl<0b00001, 0b010, "amoswap.w">;
+defm AMOADD_W : AMO_rr_aq_rl<0b00000, 0b010, "amoadd.w">;
+defm AMOXOR_W : AMO_rr_aq_rl<0b00100, 0b010, "amoxor.w">;
+defm AMOAND_W : AMO_rr_aq_rl<0b01100, 0b010, "amoand.w">;
+defm AMOOR_W : AMO_rr_aq_rl<0b01000, 0b010, "amoor.w">;
+defm AMOMIN_W : AMO_rr_aq_rl<0b10000, 0b010, "amomin.w">;
+defm AMOMAX_W : AMO_rr_aq_rl<0b10100, 0b010, "amomax.w">;
+defm AMOMINU_W : AMO_rr_aq_rl<0b11000, 0b010, "amominu.w">;
+defm AMOMAXU_W : AMO_rr_aq_rl<0b11100, 0b010, "amomaxu.w">;
+} // Predicates = [HasStdExtA]
+
+let Predicates = [HasStdExtA, IsRV64] in {
+defm LR_D : LR_r_aq_rl<0b011, "lr.d">;
+defm SC_D : AMO_rr_aq_rl<0b00011, 0b011, "sc.d">;
+defm AMOSWAP_D : AMO_rr_aq_rl<0b00001, 0b011, "amoswap.d">;
+defm AMOADD_D : AMO_rr_aq_rl<0b00000, 0b011, "amoadd.d">;
+defm AMOXOR_D : AMO_rr_aq_rl<0b00100, 0b011, "amoxor.d">;
+defm AMOAND_D : AMO_rr_aq_rl<0b01100, 0b011, "amoand.d">;
+defm AMOOR_D : AMO_rr_aq_rl<0b01000, 0b011, "amoor.d">;
+defm AMOMIN_D : AMO_rr_aq_rl<0b10000, 0b011, "amomin.d">;
+defm AMOMAX_D : AMO_rr_aq_rl<0b10100, 0b011, "amomax.d">;
+defm AMOMINU_D : AMO_rr_aq_rl<0b11000, 0b011, "amominu.d">;
+defm AMOMAXU_D : AMO_rr_aq_rl<0b11100, 0b011, "amomaxu.d">;
+} // Predicates = [HasStdExtA, IsRV64]
+
+//===----------------------------------------------------------------------===//
+// Pseudo-instructions and codegen patterns
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasStdExtA] in {
+
+/// Atomic loads and stores
+
+// Fences will be inserted for atomic load/stores according to the logic in
+// RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}.
+
+defm : LdPat<atomic_load_8, LB>;
+defm : LdPat<atomic_load_16, LH>;
+defm : LdPat<atomic_load_32, LW>;
+
+defm : AtomicStPat<atomic_store_8, SB, GPR>;
+defm : AtomicStPat<atomic_store_16, SH, GPR>;
+defm : AtomicStPat<atomic_store_32, SW, GPR>;
+
+/// AMOs
+
+multiclass AMOPat<string AtomicOp, string BaseInst> {
+ def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_monotonic"),
+ !cast<RVInst>(BaseInst)>;
+ def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acquire"),
+ !cast<RVInst>(BaseInst#"_AQ")>;
+ def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_release"),
+ !cast<RVInst>(BaseInst#"_RL")>;
+ def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acq_rel"),
+ !cast<RVInst>(BaseInst#"_AQ_RL")>;
+ def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_seq_cst"),
+ !cast<RVInst>(BaseInst#"_AQ_RL")>;
+}
+
+defm : AMOPat<"atomic_swap_32", "AMOSWAP_W">;
+defm : AMOPat<"atomic_load_add_32", "AMOADD_W">;
+defm : AMOPat<"atomic_load_and_32", "AMOAND_W">;
+defm : AMOPat<"atomic_load_or_32", "AMOOR_W">;
+defm : AMOPat<"atomic_load_xor_32", "AMOXOR_W">;
+defm : AMOPat<"atomic_load_max_32", "AMOMAX_W">;
+defm : AMOPat<"atomic_load_min_32", "AMOMIN_W">;
+defm : AMOPat<"atomic_load_umax_32", "AMOMAXU_W">;
+defm : AMOPat<"atomic_load_umin_32", "AMOMINU_W">;
+
+def : Pat<(atomic_load_sub_32_monotonic GPR:$addr, GPR:$incr),
+ (AMOADD_W GPR:$addr, (SUB X0, GPR:$incr))>;
+def : Pat<(atomic_load_sub_32_acquire GPR:$addr, GPR:$incr),
+ (AMOADD_W_AQ GPR:$addr, (SUB X0, GPR:$incr))>;
+def : Pat<(atomic_load_sub_32_release GPR:$addr, GPR:$incr),
+ (AMOADD_W_RL GPR:$addr, (SUB X0, GPR:$incr))>;
+def : Pat<(atomic_load_sub_32_acq_rel GPR:$addr, GPR:$incr),
+ (AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
+def : Pat<(atomic_load_sub_32_seq_cst GPR:$addr, GPR:$incr),
+ (AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
+
+/// Pseudo AMOs
+
+class PseudoAMO : Pseudo<(outs GPR:$res, GPR:$scratch),
+ (ins GPR:$addr, GPR:$incr, ixlenimm:$ordering), []> {
+ let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
+ let mayLoad = 1;
+ let mayStore = 1;
+ let hasSideEffects = 0;
+}
+
+def PseudoAtomicLoadNand32 : PseudoAMO;
+// Ordering constants must be kept in sync with the AtomicOrdering enum in
+// AtomicOrdering.h.
+def : Pat<(atomic_load_nand_32_monotonic GPR:$addr, GPR:$incr),
+ (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 2)>;
+def : Pat<(atomic_load_nand_32_acquire GPR:$addr, GPR:$incr),
+ (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 4)>;
+def : Pat<(atomic_load_nand_32_release GPR:$addr, GPR:$incr),
+ (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 5)>;
+def : Pat<(atomic_load_nand_32_acq_rel GPR:$addr, GPR:$incr),
+ (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 6)>;
+def : Pat<(atomic_load_nand_32_seq_cst GPR:$addr, GPR:$incr),
+ (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 7)>;
+
+class PseudoMaskedAMO
+ : Pseudo<(outs GPR:$res, GPR:$scratch),
+ (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
+ let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
+ let mayLoad = 1;
+ let mayStore = 1;
+ let hasSideEffects = 0;
+}
+
+class PseudoMaskedAMOMinMax
+ : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
+ (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$sextshamt,
+ ixlenimm:$ordering), []> {
+ let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
+ "@earlyclobber $scratch2";
+ let mayLoad = 1;
+ let mayStore = 1;
+ let hasSideEffects = 0;
+}
+
+class PseudoMaskedAMOUMinUMax
+ : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
+ (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
+ let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
+ "@earlyclobber $scratch2";
+ let mayLoad = 1;
+ let mayStore = 1;
+ let hasSideEffects = 0;
+}
+
+class PseudoMaskedAMOPat<Intrinsic intrin, Pseudo AMOInst>
+ : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, imm:$ordering),
+ (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, imm:$ordering)>;
+
+class PseudoMaskedAMOMinMaxPat<Intrinsic intrin, Pseudo AMOInst>
+ : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
+ imm:$ordering),
+ (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
+ imm:$ordering)>;
+
+def PseudoMaskedAtomicSwap32 : PseudoMaskedAMO;
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i32,
+ PseudoMaskedAtomicSwap32>;
+def PseudoMaskedAtomicLoadAdd32 : PseudoMaskedAMO;
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i32,
+ PseudoMaskedAtomicLoadAdd32>;
+def PseudoMaskedAtomicLoadSub32 : PseudoMaskedAMO;
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i32,
+ PseudoMaskedAtomicLoadSub32>;
+def PseudoMaskedAtomicLoadNand32 : PseudoMaskedAMO;
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i32,
+ PseudoMaskedAtomicLoadNand32>;
+def PseudoMaskedAtomicLoadMax32 : PseudoMaskedAMOMinMax;
+def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i32,
+ PseudoMaskedAtomicLoadMax32>;
+def PseudoMaskedAtomicLoadMin32 : PseudoMaskedAMOMinMax;
+def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i32,
+ PseudoMaskedAtomicLoadMin32>;
+def PseudoMaskedAtomicLoadUMax32 : PseudoMaskedAMOUMinUMax;
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i32,
+ PseudoMaskedAtomicLoadUMax32>;
+def PseudoMaskedAtomicLoadUMin32 : PseudoMaskedAMOUMinUMax;
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i32,
+ PseudoMaskedAtomicLoadUMin32>;
+
+/// Compare and exchange
+
+class PseudoCmpXchg
+ : Pseudo<(outs GPR:$res, GPR:$scratch),
+ (ins GPR:$addr, GPR:$cmpval, GPR:$newval, ixlenimm:$ordering), []> {
+ let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
+ let mayLoad = 1;
+ let mayStore = 1;
+ let hasSideEffects = 0;
+}
+
+// Ordering constants must be kept in sync with the AtomicOrdering enum in
+// AtomicOrdering.h.
+multiclass PseudoCmpXchgPat<string Op, Pseudo CmpXchgInst> {
+ def : Pat<(!cast<PatFrag>(Op#"_monotonic") GPR:$addr, GPR:$cmp, GPR:$new),
+ (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 2)>;
+ def : Pat<(!cast<PatFrag>(Op#"_acquire") GPR:$addr, GPR:$cmp, GPR:$new),
+ (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 4)>;
+ def : Pat<(!cast<PatFrag>(Op#"_release") GPR:$addr, GPR:$cmp, GPR:$new),
+ (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 5)>;
+ def : Pat<(!cast<PatFrag>(Op#"_acq_rel") GPR:$addr, GPR:$cmp, GPR:$new),
+ (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 6)>;
+ def : Pat<(!cast<PatFrag>(Op#"_seq_cst") GPR:$addr, GPR:$cmp, GPR:$new),
+ (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 7)>;
+}
+
+def PseudoCmpXchg32 : PseudoCmpXchg;
+defm : PseudoCmpXchgPat<"atomic_cmp_swap_32", PseudoCmpXchg32>;
+
+def PseudoMaskedCmpXchg32
+ : Pseudo<(outs GPR:$res, GPR:$scratch),
+ (ins GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask,
+ ixlenimm:$ordering), []> {
+ let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
+ let mayLoad = 1;
+ let mayStore = 1;
+ let hasSideEffects = 0;
+}
+
+def : Pat<(int_riscv_masked_cmpxchg_i32
+ GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, imm:$ordering),
+ (PseudoMaskedCmpXchg32
+ GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, imm:$ordering)>;
+
+} // Predicates = [HasStdExtA]
+
+let Predicates = [HasStdExtA, IsRV64] in {
+
+/// 64-bit atomic loads and stores
+
+// Fences will be inserted for atomic load/stores according to the logic in
+// RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}.
+defm : LdPat<atomic_load_64, LD>;
+defm : AtomicStPat<atomic_store_64, SD, GPR>;
+
+defm : AMOPat<"atomic_swap_64", "AMOSWAP_D">;
+defm : AMOPat<"atomic_load_add_64", "AMOADD_D">;
+defm : AMOPat<"atomic_load_and_64", "AMOAND_D">;
+defm : AMOPat<"atomic_load_or_64", "AMOOR_D">;
+defm : AMOPat<"atomic_load_xor_64", "AMOXOR_D">;
+defm : AMOPat<"atomic_load_max_64", "AMOMAX_D">;
+defm : AMOPat<"atomic_load_min_64", "AMOMIN_D">;
+defm : AMOPat<"atomic_load_umax_64", "AMOMAXU_D">;
+defm : AMOPat<"atomic_load_umin_64", "AMOMINU_D">;
+
+/// 64-bit AMOs
+
+def : Pat<(atomic_load_sub_64_monotonic GPR:$addr, GPR:$incr),
+ (AMOADD_D GPR:$addr, (SUB X0, GPR:$incr))>;
+def : Pat<(atomic_load_sub_64_acquire GPR:$addr, GPR:$incr),
+ (AMOADD_D_AQ GPR:$addr, (SUB X0, GPR:$incr))>;
+def : Pat<(atomic_load_sub_64_release GPR:$addr, GPR:$incr),
+ (AMOADD_D_RL GPR:$addr, (SUB X0, GPR:$incr))>;
+def : Pat<(atomic_load_sub_64_acq_rel GPR:$addr, GPR:$incr),
+ (AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
+def : Pat<(atomic_load_sub_64_seq_cst GPR:$addr, GPR:$incr),
+ (AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
+
+/// 64-bit pseudo AMOs
+
+def PseudoAtomicLoadNand64 : PseudoAMO;
+// Ordering constants must be kept in sync with the AtomicOrdering enum in
+// AtomicOrdering.h.
+def : Pat<(atomic_load_nand_64_monotonic GPR:$addr, GPR:$incr),
+ (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 2)>;
+def : Pat<(atomic_load_nand_64_acquire GPR:$addr, GPR:$incr),
+ (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 4)>;
+def : Pat<(atomic_load_nand_64_release GPR:$addr, GPR:$incr),
+ (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 5)>;
+def : Pat<(atomic_load_nand_64_acq_rel GPR:$addr, GPR:$incr),
+ (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 6)>;
+def : Pat<(atomic_load_nand_64_seq_cst GPR:$addr, GPR:$incr),
+ (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 7)>;
+
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i64,
+ PseudoMaskedAtomicSwap32>;
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i64,
+ PseudoMaskedAtomicLoadAdd32>;
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i64,
+ PseudoMaskedAtomicLoadSub32>;
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i64,
+ PseudoMaskedAtomicLoadNand32>;
+def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i64,
+ PseudoMaskedAtomicLoadMax32>;
+def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i64,
+ PseudoMaskedAtomicLoadMin32>;
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i64,
+ PseudoMaskedAtomicLoadUMax32>;
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i64,
+ PseudoMaskedAtomicLoadUMin32>;
+
+/// 64-bit compare and exchange
+
+def PseudoCmpXchg64 : PseudoCmpXchg;
+defm : PseudoCmpXchgPat<"atomic_cmp_swap_64", PseudoCmpXchg64>;
+
+def : Pat<(int_riscv_masked_cmpxchg_i64
+ GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, imm:$ordering),
+ (PseudoMaskedCmpXchg32
+ GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, imm:$ordering)>;
+} // Predicates = [HasStdExtA, IsRV64]
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoC.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoC.td
new file mode 100644
index 000000000000..94477341eea7
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoC.td
@@ -0,0 +1,763 @@
+//===- RISCVInstrInfoC.td - Compressed RISCV instructions -*- tblgen-*-----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+include "RISCVInstrFormatsC.td"
+
+//===----------------------------------------------------------------------===//
+// Operand definitions.
+//===----------------------------------------------------------------------===//
+
+def UImmLog2XLenNonZeroAsmOperand : AsmOperandClass {
+ let Name = "UImmLog2XLenNonZero";
+ let RenderMethod = "addImmOperands";
+ let DiagnosticType = "InvalidUImmLog2XLenNonZero";
+}
+
+def uimmlog2xlennonzero : Operand<XLenVT>, ImmLeaf<XLenVT, [{
+ if (Subtarget->is64Bit())
+ return isUInt<6>(Imm) && (Imm != 0);
+ return isUInt<5>(Imm) && (Imm != 0);
+}]> {
+ let ParserMatchClass = UImmLog2XLenNonZeroAsmOperand;
+ // TODO: should ensure invalid shamt is rejected when decoding.
+ let DecoderMethod = "decodeUImmOperand<6>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (!MCOp.evaluateAsConstantImm(Imm))
+ return false;
+ if (STI.getTargetTriple().isArch64Bit())
+ return isUInt<6>(Imm) && (Imm != 0);
+ return isUInt<5>(Imm) && (Imm != 0);
+ }];
+}
+
+def simm6 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isInt<6>(Imm);}]> {
+ let ParserMatchClass = SImmAsmOperand<6>;
+ let EncoderMethod = "getImmOpValue";
+ let DecoderMethod = "decodeSImmOperand<6>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (MCOp.evaluateAsConstantImm(Imm))
+ return isInt<6>(Imm);
+ return MCOp.isBareSymbolRef();
+ }];
+}
+
+def simm6nonzero : Operand<XLenVT>,
+ ImmLeaf<XLenVT, [{return (Imm != 0) && isInt<6>(Imm);}]> {
+ let ParserMatchClass = SImmAsmOperand<6, "NonZero">;
+ let EncoderMethod = "getImmOpValue";
+ let DecoderMethod = "decodeSImmOperand<6>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (MCOp.evaluateAsConstantImm(Imm))
+ return (Imm != 0) && isInt<6>(Imm);
+ return MCOp.isBareSymbolRef();
+ }];
+}
+
+def CLUIImmAsmOperand : AsmOperandClass {
+ let Name = "CLUIImm";
+ let RenderMethod = "addImmOperands";
+ let DiagnosticType = !strconcat("Invalid", Name);
+}
+
+
+// c_lui_imm checks the immediate range is in [1, 31] or [0xfffe0, 0xfffff].
+// The RISC-V ISA describes the constraint as [1, 63], with that value being
+// loaded in to bits 17-12 of the destination register and sign extended from
+// bit 17. Therefore, this 6-bit immediate can represent values in the ranges
+// [1, 31] and [0xfffe0, 0xfffff].
+def c_lui_imm : Operand<XLenVT>,
+ ImmLeaf<XLenVT, [{return (Imm != 0) &&
+ (isUInt<5>(Imm) ||
+ (Imm >= 0xfffe0 && Imm <= 0xfffff));}]> {
+ let ParserMatchClass = CLUIImmAsmOperand;
+ let EncoderMethod = "getImmOpValue";
+ let DecoderMethod = "decodeCLUIImmOperand";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (MCOp.evaluateAsConstantImm(Imm))
+ return (Imm != 0) && (isUInt<5>(Imm) ||
+ (Imm >= 0xfffe0 && Imm <= 0xfffff));
+ return MCOp.isBareSymbolRef();
+ }];
+}
+
+// A 7-bit unsigned immediate where the least significant two bits are zero.
+def uimm7_lsb00 : Operand<XLenVT>,
+ ImmLeaf<XLenVT, [{return isShiftedUInt<5, 2>(Imm);}]> {
+ let ParserMatchClass = UImmAsmOperand<7, "Lsb00">;
+ let EncoderMethod = "getImmOpValue";
+ let DecoderMethod = "decodeUImmOperand<7>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (!MCOp.evaluateAsConstantImm(Imm))
+ return false;
+ return isShiftedUInt<5, 2>(Imm);
+ }];
+}
+
+// A 8-bit unsigned immediate where the least significant two bits are zero.
+def uimm8_lsb00 : Operand<XLenVT>,
+ ImmLeaf<XLenVT, [{return isShiftedUInt<6, 2>(Imm);}]> {
+ let ParserMatchClass = UImmAsmOperand<8, "Lsb00">;
+ let EncoderMethod = "getImmOpValue";
+ let DecoderMethod = "decodeUImmOperand<8>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (!MCOp.evaluateAsConstantImm(Imm))
+ return false;
+ return isShiftedUInt<6, 2>(Imm);
+ }];
+}
+
+// A 8-bit unsigned immediate where the least significant three bits are zero.
+def uimm8_lsb000 : Operand<XLenVT>,
+ ImmLeaf<XLenVT, [{return isShiftedUInt<5, 3>(Imm);}]> {
+ let ParserMatchClass = UImmAsmOperand<8, "Lsb000">;
+ let EncoderMethod = "getImmOpValue";
+ let DecoderMethod = "decodeUImmOperand<8>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (!MCOp.evaluateAsConstantImm(Imm))
+ return false;
+ return isShiftedUInt<5, 3>(Imm);
+ }];
+}
+
+// A 9-bit signed immediate where the least significant bit is zero.
+def simm9_lsb0 : Operand<OtherVT> {
+ let ParserMatchClass = SImmAsmOperand<9, "Lsb0">;
+ let EncoderMethod = "getImmOpValueAsr1";
+ let DecoderMethod = "decodeSImmOperandAndLsl1<9>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (MCOp.evaluateAsConstantImm(Imm))
+ return isShiftedInt<8, 1>(Imm);
+ return MCOp.isBareSymbolRef();
+
+ }];
+}
+
+// A 9-bit unsigned immediate where the least significant three bits are zero.
+def uimm9_lsb000 : Operand<XLenVT>,
+ ImmLeaf<XLenVT, [{return isShiftedUInt<6, 3>(Imm);}]> {
+ let ParserMatchClass = UImmAsmOperand<9, "Lsb000">;
+ let EncoderMethod = "getImmOpValue";
+ let DecoderMethod = "decodeUImmOperand<9>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (!MCOp.evaluateAsConstantImm(Imm))
+ return false;
+ return isShiftedUInt<6, 3>(Imm);
+ }];
+}
+
+// A 10-bit unsigned immediate where the least significant two bits are zero
+// and the immediate can't be zero.
+def uimm10_lsb00nonzero : Operand<XLenVT>,
+ ImmLeaf<XLenVT,
+ [{return isShiftedUInt<8, 2>(Imm) && (Imm != 0);}]> {
+ let ParserMatchClass = UImmAsmOperand<10, "Lsb00NonZero">;
+ let EncoderMethod = "getImmOpValue";
+ let DecoderMethod = "decodeUImmNonZeroOperand<10>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (!MCOp.evaluateAsConstantImm(Imm))
+ return false;
+ return isShiftedUInt<8, 2>(Imm) && (Imm != 0);
+ }];
+}
+
+// A 10-bit signed immediate where the least significant four bits are zero.
+def simm10_lsb0000nonzero : Operand<XLenVT>,
+ ImmLeaf<XLenVT,
+ [{return (Imm != 0) && isShiftedInt<6, 4>(Imm);}]> {
+ let ParserMatchClass = SImmAsmOperand<10, "Lsb0000NonZero">;
+ let EncoderMethod = "getImmOpValue";
+ let DecoderMethod = "decodeSImmNonZeroOperand<10>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (!MCOp.evaluateAsConstantImm(Imm))
+ return false;
+ return isShiftedInt<6, 4>(Imm) && (Imm != 0);
+ }];
+}
+
+// A 12-bit signed immediate where the least significant bit is zero.
+def simm12_lsb0 : Operand<XLenVT> {
+ let ParserMatchClass = SImmAsmOperand<12, "Lsb0">;
+ let EncoderMethod = "getImmOpValueAsr1";
+ let DecoderMethod = "decodeSImmOperandAndLsl1<12>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (MCOp.evaluateAsConstantImm(Imm))
+ return isShiftedInt<11, 1>(Imm);
+ return MCOp.isBareSymbolRef();
+ }];
+}
+
+//===----------------------------------------------------------------------===//
+// Instruction Class Templates
+//===----------------------------------------------------------------------===//
+
+let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
+class CStackLoad<bits<3> funct3, string OpcodeStr,
+ RegisterClass cls, DAGOperand opnd>
+ : RVInst16CI<funct3, 0b10, (outs cls:$rd), (ins SP:$rs1, opnd:$imm),
+ OpcodeStr, "$rd, ${imm}(${rs1})">;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in
+class CStackStore<bits<3> funct3, string OpcodeStr,
+ RegisterClass cls, DAGOperand opnd>
+ : RVInst16CSS<funct3, 0b10, (outs), (ins cls:$rs2, SP:$rs1, opnd:$imm),
+ OpcodeStr, "$rs2, ${imm}(${rs1})">;
+
+let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
+class CLoad_ri<bits<3> funct3, string OpcodeStr,
+ RegisterClass cls, DAGOperand opnd>
+ : RVInst16CL<funct3, 0b00, (outs cls:$rd), (ins GPRC:$rs1, opnd:$imm),
+ OpcodeStr, "$rd, ${imm}(${rs1})">;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in
+class CStore_rri<bits<3> funct3, string OpcodeStr,
+ RegisterClass cls, DAGOperand opnd>
+ : RVInst16CS<funct3, 0b00, (outs), (ins cls:$rs2, GPRC:$rs1, opnd:$imm),
+ OpcodeStr, "$rs2, ${imm}(${rs1})">;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+class Bcz<bits<3> funct3, string OpcodeStr, PatFrag CondOp,
+ RegisterClass cls>
+ : RVInst16CB<funct3, 0b01, (outs), (ins cls:$rs1, simm9_lsb0:$imm),
+ OpcodeStr, "$rs1, $imm"> {
+ let isBranch = 1;
+ let isTerminator = 1;
+ let Inst{12} = imm{7};
+ let Inst{11-10} = imm{3-2};
+ let Inst{6-5} = imm{6-5};
+ let Inst{4-3} = imm{1-0};
+ let Inst{2} = imm{4};
+}
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+class Shift_right<bits<2> funct2, string OpcodeStr, RegisterClass cls,
+ Operand ImmOpnd>
+ : RVInst16CB<0b100, 0b01, (outs cls:$rs1_wb), (ins cls:$rs1, ImmOpnd:$imm),
+ OpcodeStr, "$rs1, $imm"> {
+ let Constraints = "$rs1 = $rs1_wb";
+ let Inst{12} = imm{5};
+ let Inst{11-10} = funct2;
+ let Inst{6-2} = imm{4-0};
+}
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+class CS_ALU<bits<6> funct6, bits<2> funct2, string OpcodeStr,
+ RegisterClass cls>
+ : RVInst16CA<funct6, funct2, 0b01, (outs cls:$rd_wb), (ins cls:$rd, cls:$rs2),
+ OpcodeStr, "$rd, $rs2"> {
+ bits<3> rd;
+ let Constraints = "$rd = $rd_wb";
+ let Inst{9-7} = rd;
+}
+
+//===----------------------------------------------------------------------===//
+// Instructions
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasStdExtC] in {
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [X2] in
+def C_ADDI4SPN : RVInst16CIW<0b000, 0b00, (outs GPRC:$rd),
+ (ins SP:$rs1, uimm10_lsb00nonzero:$imm),
+ "c.addi4spn", "$rd, $rs1, $imm"> {
+ bits<5> rs1;
+ let Inst{12-11} = imm{5-4};
+ let Inst{10-7} = imm{9-6};
+ let Inst{6} = imm{2};
+ let Inst{5} = imm{3};
+}
+
+let Predicates = [HasStdExtC, HasStdExtD] in
+def C_FLD : CLoad_ri<0b001, "c.fld", FPR64C, uimm8_lsb000> {
+ bits<8> imm;
+ let Inst{12-10} = imm{5-3};
+ let Inst{6-5} = imm{7-6};
+}
+
+def C_LW : CLoad_ri<0b010, "c.lw", GPRC, uimm7_lsb00> {
+ bits<7> imm;
+ let Inst{12-10} = imm{5-3};
+ let Inst{6} = imm{2};
+ let Inst{5} = imm{6};
+}
+
+let DecoderNamespace = "RISCV32Only_",
+ Predicates = [HasStdExtC, HasStdExtF, IsRV32] in
+def C_FLW : CLoad_ri<0b011, "c.flw", FPR32C, uimm7_lsb00> {
+ bits<7> imm;
+ let Inst{12-10} = imm{5-3};
+ let Inst{6} = imm{2};
+ let Inst{5} = imm{6};
+}
+
+let Predicates = [HasStdExtC, IsRV64] in
+def C_LD : CLoad_ri<0b011, "c.ld", GPRC, uimm8_lsb000> {
+ bits<8> imm;
+ let Inst{12-10} = imm{5-3};
+ let Inst{6-5} = imm{7-6};
+}
+
+let Predicates = [HasStdExtC, HasStdExtD] in
+def C_FSD : CStore_rri<0b101, "c.fsd", FPR64C, uimm8_lsb000> {
+ bits<8> imm;
+ let Inst{12-10} = imm{5-3};
+ let Inst{6-5} = imm{7-6};
+}
+
+def C_SW : CStore_rri<0b110, "c.sw", GPRC, uimm7_lsb00> {
+ bits<7> imm;
+ let Inst{12-10} = imm{5-3};
+ let Inst{6} = imm{2};
+ let Inst{5} = imm{6};
+}
+
+let DecoderNamespace = "RISCV32Only_",
+ Predicates = [HasStdExtC, HasStdExtF, IsRV32] in
+def C_FSW : CStore_rri<0b111, "c.fsw", FPR32C, uimm7_lsb00> {
+ bits<7> imm;
+ let Inst{12-10} = imm{5-3};
+ let Inst{6} = imm{2};
+ let Inst{5} = imm{6};
+}
+
+let Predicates = [HasStdExtC, IsRV64] in
+def C_SD : CStore_rri<0b111, "c.sd", GPRC, uimm8_lsb000> {
+ bits<8> imm;
+ let Inst{12-10} = imm{5-3};
+ let Inst{6-5} = imm{7-6};
+}
+
+let rd = 0, imm = 0, hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+def C_NOP : RVInst16CI<0b000, 0b01, (outs), (ins), "c.nop", "">;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+def C_ADDI : RVInst16CI<0b000, 0b01, (outs GPRNoX0:$rd_wb),
+ (ins GPRNoX0:$rd, simm6nonzero:$imm),
+ "c.addi", "$rd, $imm"> {
+ let Constraints = "$rd = $rd_wb";
+ let Inst{6-2} = imm{4-0};
+}
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCall = 1,
+ DecoderNamespace = "RISCV32Only_", Defs = [X1],
+ Predicates = [HasStdExtC, IsRV32] in
+def C_JAL : RVInst16CJ<0b001, 0b01, (outs), (ins simm12_lsb0:$offset),
+ "c.jal", "$offset">;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
+ Predicates = [HasStdExtC, IsRV64] in
+def C_ADDIW : RVInst16CI<0b001, 0b01, (outs GPRNoX0:$rd_wb),
+ (ins GPRNoX0:$rd, simm6:$imm),
+ "c.addiw", "$rd, $imm"> {
+ let Constraints = "$rd = $rd_wb";
+ let Inst{6-2} = imm{4-0};
+}
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+def C_LI : RVInst16CI<0b010, 0b01, (outs GPRNoX0:$rd), (ins simm6:$imm),
+ "c.li", "$rd, $imm"> {
+ let Inst{6-2} = imm{4-0};
+}
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+def C_ADDI16SP : RVInst16CI<0b011, 0b01, (outs SP:$rd_wb),
+ (ins SP:$rd, simm10_lsb0000nonzero:$imm),
+ "c.addi16sp", "$rd, $imm"> {
+ let Constraints = "$rd = $rd_wb";
+ let Inst{12} = imm{9};
+ let Inst{11-7} = 2;
+ let Inst{6} = imm{4};
+ let Inst{5} = imm{6};
+ let Inst{4-3} = imm{8-7};
+ let Inst{2} = imm{5};
+}
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+def C_LUI : RVInst16CI<0b011, 0b01, (outs GPRNoX0X2:$rd),
+ (ins c_lui_imm:$imm),
+ "c.lui", "$rd, $imm"> {
+ let Inst{6-2} = imm{4-0};
+}
+
+def C_SRLI : Shift_right<0b00, "c.srli", GPRC, uimmlog2xlennonzero>;
+def C_SRAI : Shift_right<0b01, "c.srai", GPRC, uimmlog2xlennonzero>;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+def C_ANDI : RVInst16CB<0b100, 0b01, (outs GPRC:$rs1_wb), (ins GPRC:$rs1, simm6:$imm),
+ "c.andi", "$rs1, $imm"> {
+ let Constraints = "$rs1 = $rs1_wb";
+ let Inst{12} = imm{5};
+ let Inst{11-10} = 0b10;
+ let Inst{6-2} = imm{4-0};
+}
+
+def C_SUB : CS_ALU<0b100011, 0b00, "c.sub", GPRC>;
+def C_XOR : CS_ALU<0b100011, 0b01, "c.xor", GPRC>;
+def C_OR : CS_ALU<0b100011, 0b10, "c.or" , GPRC>;
+def C_AND : CS_ALU<0b100011, 0b11, "c.and", GPRC>;
+
+let Predicates = [HasStdExtC, IsRV64] in {
+def C_SUBW : CS_ALU<0b100111, 0b00, "c.subw", GPRC>;
+def C_ADDW : CS_ALU<0b100111, 0b01, "c.addw", GPRC>;
+}
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+def C_J : RVInst16CJ<0b101, 0b01, (outs), (ins simm12_lsb0:$offset),
+ "c.j", "$offset"> {
+ let isBranch = 1;
+ let isTerminator=1;
+ let isBarrier=1;
+}
+
+def C_BEQZ : Bcz<0b110, "c.beqz", seteq, GPRC>;
+def C_BNEZ : Bcz<0b111, "c.bnez", setne, GPRC>;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+def C_SLLI : RVInst16CI<0b000, 0b10, (outs GPRNoX0:$rd_wb),
+ (ins GPRNoX0:$rd, uimmlog2xlennonzero:$imm),
+ "c.slli" ,"$rd, $imm"> {
+ let Constraints = "$rd = $rd_wb";
+ let Inst{6-2} = imm{4-0};
+}
+
+let Predicates = [HasStdExtC, HasStdExtD] in
+def C_FLDSP : CStackLoad<0b001, "c.fldsp", FPR64, uimm9_lsb000> {
+ let Inst{6-5} = imm{4-3};
+ let Inst{4-2} = imm{8-6};
+}
+
+def C_LWSP : CStackLoad<0b010, "c.lwsp", GPRNoX0, uimm8_lsb00> {
+ let Inst{6-4} = imm{4-2};
+ let Inst{3-2} = imm{7-6};
+}
+
+let DecoderNamespace = "RISCV32Only_",
+ Predicates = [HasStdExtC, HasStdExtF, IsRV32] in
+def C_FLWSP : CStackLoad<0b011, "c.flwsp", FPR32, uimm8_lsb00> {
+ let Inst{6-4} = imm{4-2};
+ let Inst{3-2} = imm{7-6};
+}
+
+let Predicates = [HasStdExtC, IsRV64] in
+def C_LDSP : CStackLoad<0b011, "c.ldsp", GPRNoX0, uimm9_lsb000> {
+ let Inst{6-5} = imm{4-3};
+ let Inst{4-2} = imm{8-6};
+}
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+def C_JR : RVInst16CR<0b1000, 0b10, (outs), (ins GPRNoX0:$rs1),
+ "c.jr", "$rs1"> {
+ let isBranch = 1;
+ let isBarrier = 1;
+ let isTerminator = 1;
+ let isIndirectBranch = 1;
+ let rs2 = 0;
+}
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+def C_MV : RVInst16CR<0b1000, 0b10, (outs GPRNoX0:$rs1), (ins GPRNoX0:$rs2),
+ "c.mv", "$rs1, $rs2">;
+
+let rs1 = 0, rs2 = 0, hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
+def C_EBREAK : RVInst16CR<0b1001, 0b10, (outs), (ins), "c.ebreak", "">;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
+ isCall=1, Defs=[X1], rs2 = 0 in
+def C_JALR : RVInst16CR<0b1001, 0b10, (outs), (ins GPRNoX0:$rs1),
+ "c.jalr", "$rs1">;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+def C_ADD : RVInst16CR<0b1001, 0b10, (outs GPRNoX0:$rs1_wb),
+ (ins GPRNoX0:$rs1, GPRNoX0:$rs2),
+ "c.add", "$rs1, $rs2"> {
+ let Constraints = "$rs1 = $rs1_wb";
+}
+
+let Predicates = [HasStdExtC, HasStdExtD] in
+def C_FSDSP : CStackStore<0b101, "c.fsdsp", FPR64, uimm9_lsb000> {
+ let Inst{12-10} = imm{5-3};
+ let Inst{9-7} = imm{8-6};
+}
+
+def C_SWSP : CStackStore<0b110, "c.swsp", GPR, uimm8_lsb00> {
+ let Inst{12-9} = imm{5-2};
+ let Inst{8-7} = imm{7-6};
+}
+
+let DecoderNamespace = "RISCV32Only_",
+ Predicates = [HasStdExtC, HasStdExtF, IsRV32] in
+def C_FSWSP : CStackStore<0b111, "c.fswsp", FPR32, uimm8_lsb00> {
+ let Inst{12-9} = imm{5-2};
+ let Inst{8-7} = imm{7-6};
+}
+
+let Predicates = [HasStdExtC, IsRV64] in
+def C_SDSP : CStackStore<0b111, "c.sdsp", GPR, uimm9_lsb000> {
+ let Inst{12-10} = imm{5-3};
+ let Inst{9-7} = imm{8-6};
+}
+
+// The all zeros pattern isn't a valid RISC-V instruction. It's used by GNU
+// binutils as 16-bit instruction known to be unimplemented (i.e., trapping).
+let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
+def C_UNIMP : RVInst16<(outs), (ins), "c.unimp", "", [], InstFormatOther> {
+ let Inst{15-0} = 0;
+}
+
+} // Predicates = [HasStdExtC]
+
+//===----------------------------------------------------------------------===//
+// Assembler Pseudo Instructions
+//===----------------------------------------------------------------------===//
+
+let EmitPriority = 0 in {
+let Predicates = [HasStdExtC, HasStdExtD] in
+def : InstAlias<"c.fld $rd, (${rs1})", (C_FLD FPR64C:$rd, GPRC:$rs1, 0)>;
+
+def : InstAlias<"c.lw $rd, (${rs1})", (C_LW GPRC:$rd, GPRC:$rs1, 0)>;
+
+let Predicates = [HasStdExtC, HasStdExtF, IsRV32] in
+def : InstAlias<"c.flw $rd, (${rs1})", (C_FLW FPR32C:$rd, GPRC:$rs1, 0)>;
+
+let Predicates = [HasStdExtC, IsRV64] in
+def : InstAlias<"c.ld $rd, (${rs1})", (C_LD GPRC:$rd, GPRC:$rs1, 0)>;
+
+let Predicates = [HasStdExtC, HasStdExtD] in
+def : InstAlias<"c.fsd $rs2, (${rs1})", (C_FSD FPR64C:$rs2, GPRC:$rs1, 0)>;
+
+def : InstAlias<"c.sw $rs2, (${rs1})", (C_SW GPRC:$rs2, GPRC:$rs1, 0)>;
+
+let Predicates = [HasStdExtC, HasStdExtF, IsRV32] in
+def : InstAlias<"c.fsw $rs2, (${rs1})", (C_FSW FPR32C:$rs2, GPRC:$rs1, 0)>;
+
+let Predicates = [HasStdExtC, IsRV64] in
+def : InstAlias<"c.sd $rs2, (${rs1})", (C_SD GPRC:$rs2, GPRC:$rs1, 0)>;
+
+let Predicates = [HasStdExtC, HasStdExtD] in
+def : InstAlias<"c.fldsp $rd, (${rs1})", (C_FLDSP FPR64C:$rd, SP:$rs1, 0)>;
+
+def : InstAlias<"c.lwsp $rd, (${rs1})", (C_LWSP GPRC:$rd, SP:$rs1, 0)>;
+
+let Predicates = [HasStdExtC, HasStdExtF, IsRV32] in
+def : InstAlias<"c.flwsp $rd, (${rs1})", (C_FLWSP FPR32C:$rd, SP:$rs1, 0)>;
+
+let Predicates = [HasStdExtC, IsRV64] in
+def : InstAlias<"c.ldsp $rd, (${rs1})", (C_LDSP GPRC:$rd, SP:$rs1, 0)>;
+
+let Predicates = [HasStdExtC, HasStdExtD] in
+def : InstAlias<"c.fsdsp $rs2, (${rs1})", (C_FSDSP FPR64C:$rs2, SP:$rs1, 0)>;
+
+def : InstAlias<"c.swsp $rs2, (${rs1})", (C_SWSP GPRC:$rs2, SP:$rs1, 0)>;
+
+let Predicates = [HasStdExtC, HasStdExtF, IsRV32] in
+def : InstAlias<"c.fswsp $rs2, (${rs1})", (C_FSWSP FPR32C:$rs2, SP:$rs1, 0)>;
+
+let Predicates = [HasStdExtC, IsRV64] in
+def : InstAlias<"c.sdsp $rs2, (${rs1})", (C_SDSP GPRC:$rs2, SP:$rs1, 0)>;
+}
+
+//===----------------------------------------------------------------------===//
+// Compress Instruction tablegen backend.
+//===----------------------------------------------------------------------===//
+
+class CompressPat<dag input, dag output> {
+ dag Input = input;
+ dag Output = output;
+ list<Predicate> Predicates = [];
+}
+
+// Patterns are defined in the same order the compressed instructions appear
+// on page 82 of the ISA manual.
+
+// Quadrant 0
+let Predicates = [HasStdExtC] in {
+def : CompressPat<(ADDI GPRC:$rd, SP:$rs1, uimm10_lsb00nonzero:$imm),
+ (C_ADDI4SPN GPRC:$rd, SP:$rs1, uimm10_lsb00nonzero:$imm)>;
+} // Predicates = [HasStdExtC]
+
+let Predicates = [HasStdExtC, HasStdExtD] in {
+def : CompressPat<(FLD FPR64C:$rd, GPRC:$rs1, uimm8_lsb000:$imm),
+ (C_FLD FPR64C:$rd, GPRC:$rs1, uimm8_lsb000:$imm)>;
+} // Predicates = [HasStdExtC, HasStdExtD]
+
+let Predicates = [HasStdExtC] in {
+def : CompressPat<(LW GPRC:$rd, GPRC:$rs1, uimm7_lsb00:$imm),
+ (C_LW GPRC:$rd, GPRC:$rs1, uimm7_lsb00:$imm)>;
+} // Predicates = [HasStdExtC]
+
+let Predicates = [HasStdExtC, HasStdExtF, IsRV32] in {
+def : CompressPat<(FLW FPR32C:$rd, GPRC:$rs1, uimm7_lsb00:$imm),
+ (C_FLW FPR32C:$rd, GPRC:$rs1, uimm7_lsb00:$imm)>;
+} // Predicates = [HasStdExtC, HasStdExtF, IsRV32]
+
+let Predicates = [HasStdExtC, IsRV64] in {
+def : CompressPat<(LD GPRC:$rd, GPRC:$rs1, uimm8_lsb000:$imm),
+ (C_LD GPRC:$rd, GPRC:$rs1, uimm8_lsb000:$imm)>;
+} // Predicates = [HasStdExtC, IsRV64]
+
+let Predicates = [HasStdExtC, HasStdExtD] in {
+def : CompressPat<(FSD FPR64C:$rs2, GPRC:$rs1, uimm8_lsb000:$imm),
+ (C_FSD FPR64C:$rs2, GPRC:$rs1, uimm8_lsb000:$imm)>;
+} // Predicates = [HasStdExtC, HasStdExtD]
+
+let Predicates = [HasStdExtC] in {
+def : CompressPat<(SW GPRC:$rs2, GPRC:$rs1, uimm7_lsb00:$imm),
+ (C_SW GPRC:$rs2, GPRC:$rs1, uimm7_lsb00:$imm)>;
+} // Predicates = [HasStdExtC]
+
+let Predicates = [HasStdExtC, HasStdExtF, IsRV32] in {
+def : CompressPat<(FSW FPR32C:$rs2, GPRC:$rs1,uimm7_lsb00:$imm),
+ (C_FSW FPR32C:$rs2, GPRC:$rs1, uimm7_lsb00:$imm)>;
+} // Predicate = [HasStdExtC, HasStdExtF, IsRV32]
+
+let Predicates = [HasStdExtC, IsRV64] in {
+def : CompressPat<(SD GPRC:$rs2, GPRC:$rs1, uimm8_lsb000:$imm),
+ (C_SD GPRC:$rs2, GPRC:$rs1, uimm8_lsb000:$imm)>;
+} // Predicates = [HasStdExtC, IsRV64]
+
+// Quadrant 1
+let Predicates = [HasStdExtC] in {
+def : CompressPat<(ADDI X0, X0, 0), (C_NOP)>;
+def : CompressPat<(ADDI GPRNoX0:$rs1, GPRNoX0:$rs1, simm6nonzero:$imm),
+ (C_ADDI GPRNoX0:$rs1, simm6nonzero:$imm)>;
+} // Predicates = [HasStdExtC]
+
+let Predicates = [HasStdExtC, IsRV32] in {
+def : CompressPat<(JAL X1, simm12_lsb0:$offset),
+ (C_JAL simm12_lsb0:$offset)>;
+} // Predicates = [HasStdExtC, IsRV32]
+
+let Predicates = [HasStdExtC, IsRV64] in {
+def : CompressPat<(ADDIW GPRNoX0:$rs1, GPRNoX0:$rs1, simm6:$imm),
+ (C_ADDIW GPRNoX0:$rs1, simm6:$imm)>;
+} // Predicates = [HasStdExtC, IsRV64]
+
+let Predicates = [HasStdExtC] in {
+def : CompressPat<(ADDI GPRNoX0:$rd, X0, simm6:$imm),
+ (C_LI GPRNoX0:$rd, simm6:$imm)>;
+def : CompressPat<(ADDI X2, X2, simm10_lsb0000nonzero:$imm),
+ (C_ADDI16SP X2, simm10_lsb0000nonzero:$imm)>;
+def : CompressPat<(LUI GPRNoX0X2:$rd, c_lui_imm:$imm),
+ (C_LUI GPRNoX0X2:$rd, c_lui_imm:$imm)>;
+def : CompressPat<(SRLI GPRC:$rs1, GPRC:$rs1, uimmlog2xlennonzero:$imm),
+ (C_SRLI GPRC:$rs1, uimmlog2xlennonzero:$imm)>;
+def : CompressPat<(SRAI GPRC:$rs1, GPRC:$rs1, uimmlog2xlennonzero:$imm),
+ (C_SRAI GPRC:$rs1, uimmlog2xlennonzero:$imm)>;
+def : CompressPat<(ANDI GPRC:$rs1, GPRC:$rs1, simm6:$imm),
+ (C_ANDI GPRC:$rs1, simm6:$imm)>;
+def : CompressPat<(SUB GPRC:$rs1, GPRC:$rs1, GPRC:$rs2),
+ (C_SUB GPRC:$rs1, GPRC:$rs2)>;
+def : CompressPat<(XOR GPRC:$rs1, GPRC:$rs1, GPRC:$rs2),
+ (C_XOR GPRC:$rs1, GPRC:$rs2)>;
+def : CompressPat<(XOR GPRC:$rs1, GPRC:$rs2, GPRC:$rs1),
+ (C_XOR GPRC:$rs1, GPRC:$rs2)>;
+def : CompressPat<(OR GPRC:$rs1, GPRC:$rs1, GPRC:$rs2),
+ (C_OR GPRC:$rs1, GPRC:$rs2)>;
+def : CompressPat<(OR GPRC:$rs1, GPRC:$rs2, GPRC:$rs1),
+ (C_OR GPRC:$rs1, GPRC:$rs2)>;
+def : CompressPat<(AND GPRC:$rs1, GPRC:$rs1, GPRC:$rs2),
+ (C_AND GPRC:$rs1, GPRC:$rs2)>;
+def : CompressPat<(AND GPRC:$rs1, GPRC:$rs2, GPRC:$rs1),
+ (C_AND GPRC:$rs1, GPRC:$rs2)>;
+} // Predicates = [HasStdExtC]
+
+let Predicates = [HasStdExtC, IsRV64] in {
+def : CompressPat<(ADDIW GPRNoX0:$rd, X0, simm6:$imm),
+ (C_LI GPRNoX0:$rd, simm6:$imm)>;
+def : CompressPat<(SUBW GPRC:$rs1, GPRC:$rs1, GPRC:$rs2),
+ (C_SUBW GPRC:$rs1, GPRC:$rs2)>;
+def : CompressPat<(ADDW GPRC:$rs1, GPRC:$rs1, GPRC:$rs2),
+ (C_ADDW GPRC:$rs1, GPRC:$rs2)>;
+def : CompressPat<(ADDW GPRC:$rs1, GPRC:$rs2, GPRC:$rs1),
+ (C_ADDW GPRC:$rs1, GPRC:$rs2)>;
+} // Predicates = [HasStdExtC, IsRV64]
+
+let Predicates = [HasStdExtC] in {
+def : CompressPat<(JAL X0, simm12_lsb0:$offset),
+ (C_J simm12_lsb0:$offset)>;
+def : CompressPat<(BEQ GPRC:$rs1, X0, simm9_lsb0:$imm),
+ (C_BEQZ GPRC:$rs1, simm9_lsb0:$imm)>;
+def : CompressPat<(BNE GPRC:$rs1, X0, simm9_lsb0:$imm),
+ (C_BNEZ GPRC:$rs1, simm9_lsb0:$imm)>;
+} // Predicates = [HasStdExtC]
+
+// Quadrant 2
+let Predicates = [HasStdExtC] in {
+def : CompressPat<(SLLI GPRNoX0:$rs1, GPRNoX0:$rs1, uimmlog2xlennonzero:$imm),
+ (C_SLLI GPRNoX0:$rs1, uimmlog2xlennonzero:$imm)>;
+} // Predicates = [HasStdExtC]
+
+let Predicates = [HasStdExtC, HasStdExtD] in {
+def : CompressPat<(FLD FPR64:$rd, SP:$rs1, uimm9_lsb000:$imm),
+ (C_FLDSP FPR64:$rd, SP:$rs1, uimm9_lsb000:$imm)>;
+} // Predicates = [HasStdExtC, HasStdExtD]
+
+let Predicates = [HasStdExtC] in {
+def : CompressPat<(LW GPRNoX0:$rd, SP:$rs1, uimm8_lsb00:$imm),
+ (C_LWSP GPRNoX0:$rd, SP:$rs1, uimm8_lsb00:$imm)>;
+} // Predicates = [HasStdExtC]
+
+let Predicates = [HasStdExtC, HasStdExtF, IsRV32] in {
+def : CompressPat<(FLW FPR32:$rd, SP:$rs1, uimm8_lsb00:$imm),
+ (C_FLWSP FPR32:$rd, SP:$rs1, uimm8_lsb00:$imm)>;
+} // Predicates = [HasStdExtC, HasStdExtF, IsRV32]
+
+let Predicates = [HasStdExtC, IsRV64] in {
+def : CompressPat<(LD GPRNoX0:$rd, SP:$rs1, uimm9_lsb000:$imm),
+ (C_LDSP GPRNoX0:$rd, SP:$rs1, uimm9_lsb000:$imm)>;
+} // Predicates = [HasStdExtC, IsRV64]
+
+let Predicates = [HasStdExtC] in {
+def : CompressPat<(JALR X0, GPRNoX0:$rs1, 0),
+ (C_JR GPRNoX0:$rs1)>;
+def : CompressPat<(ADD GPRNoX0:$rs1, X0, GPRNoX0:$rs2),
+ (C_MV GPRNoX0:$rs1, GPRNoX0:$rs2)>;
+def : CompressPat<(ADD GPRNoX0:$rs1, GPRNoX0:$rs2, X0),
+ (C_MV GPRNoX0:$rs1, GPRNoX0:$rs2)>;
+def : CompressPat<(ADDI GPRNoX0:$rs1, GPRNoX0:$rs2, 0),
+ (C_MV GPRNoX0:$rs1, GPRNoX0:$rs2)>;
+def : CompressPat<(EBREAK), (C_EBREAK)>;
+def : CompressPat<(UNIMP), (C_UNIMP)>;
+def : CompressPat<(JALR X1, GPRNoX0:$rs1, 0),
+ (C_JALR GPRNoX0:$rs1)>;
+def : CompressPat<(ADD GPRNoX0:$rs1, GPRNoX0:$rs1, GPRNoX0:$rs2),
+ (C_ADD GPRNoX0:$rs1, GPRNoX0:$rs2)>;
+def : CompressPat<(ADD GPRNoX0:$rs1, GPRNoX0:$rs2, GPRNoX0:$rs1),
+ (C_ADD GPRNoX0:$rs1, GPRNoX0:$rs2)>;
+} // Predicates = [HasStdExtC]
+
+let Predicates = [HasStdExtC, HasStdExtD] in {
+def : CompressPat<(FSD FPR64:$rs2, SP:$rs1, uimm9_lsb000:$imm),
+ (C_FSDSP FPR64:$rs2, SP:$rs1, uimm9_lsb000:$imm)>;
+} // Predicates = [HasStdExtC, HasStdExtD]
+
+let Predicates = [HasStdExtC] in {
+def : CompressPat<(SW GPR:$rs2, SP:$rs1, uimm8_lsb00:$imm),
+ (C_SWSP GPR:$rs2, SP:$rs1, uimm8_lsb00:$imm)>;
+} // Predicates = [HasStdExtC]
+
+let Predicates = [HasStdExtC, HasStdExtF, IsRV32] in {
+def : CompressPat<(FSW FPR32:$rs2, SP:$rs1, uimm8_lsb00:$imm),
+ (C_FSWSP FPR32:$rs2, SP:$rs1, uimm8_lsb00:$imm)>;
+} // Predicates = [HasStdExtC, HasStdExtF, IsRV32]
+
+let Predicates = [HasStdExtC, IsRV64] in {
+def : CompressPat<(SD GPR:$rs2, SP:$rs1, uimm9_lsb000:$imm),
+ (C_SDSP GPR:$rs2, SP:$rs1, uimm9_lsb000:$imm)>;
+} // Predicates = [HasStdExtC, IsRV64]
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
new file mode 100644
index 000000000000..fe38c4ff02d3
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
@@ -0,0 +1,339 @@
+//===-- RISCVInstrInfoD.td - RISC-V 'D' instructions -------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the RISC-V instructions from the standard 'D',
+// Double-Precision Floating-Point instruction set extension.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// RISC-V specific DAG Nodes.
+//===----------------------------------------------------------------------===//
+
+def SDT_RISCVBuildPairF64 : SDTypeProfile<1, 2, [SDTCisVT<0, f64>,
+ SDTCisVT<1, i32>,
+ SDTCisSameAs<1, 2>]>;
+def SDT_RISCVSplitF64 : SDTypeProfile<2, 1, [SDTCisVT<0, i32>,
+ SDTCisVT<1, i32>,
+ SDTCisVT<2, f64>]>;
+
+def RISCVBuildPairF64 : SDNode<"RISCVISD::BuildPairF64", SDT_RISCVBuildPairF64>;
+def RISCVSplitF64 : SDNode<"RISCVISD::SplitF64", SDT_RISCVSplitF64>;
+
+//===----------------------------------------------------------------------===//
+// Instruction Class Templates
+//===----------------------------------------------------------------------===//
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+class FPFMAD_rrr_frm<RISCVOpcode opcode, string opcodestr>
+ : RVInstR4<0b01, opcode, (outs FPR64:$rd),
+ (ins FPR64:$rs1, FPR64:$rs2, FPR64:$rs3, frmarg:$funct3),
+ opcodestr, "$rd, $rs1, $rs2, $rs3, $funct3">;
+
+class FPFMADDynFrmAlias<FPFMAD_rrr_frm Inst, string OpcodeStr>
+ : InstAlias<OpcodeStr#" $rd, $rs1, $rs2, $rs3",
+ (Inst FPR64:$rd, FPR64:$rs1, FPR64:$rs2, FPR64:$rs3, 0b111)>;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+class FPALUD_rr<bits<7> funct7, bits<3> funct3, string opcodestr>
+ : RVInstR<funct7, funct3, OPC_OP_FP, (outs FPR64:$rd),
+ (ins FPR64:$rs1, FPR64:$rs2), opcodestr, "$rd, $rs1, $rs2">;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+class FPALUD_rr_frm<bits<7> funct7, string opcodestr>
+ : RVInstRFrm<funct7, OPC_OP_FP, (outs FPR64:$rd),
+ (ins FPR64:$rs1, FPR64:$rs2, frmarg:$funct3), opcodestr,
+ "$rd, $rs1, $rs2, $funct3">;
+
+class FPALUDDynFrmAlias<FPALUD_rr_frm Inst, string OpcodeStr>
+ : InstAlias<OpcodeStr#" $rd, $rs1, $rs2",
+ (Inst FPR64:$rd, FPR64:$rs1, FPR64:$rs2, 0b111)>;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+class FPCmpD_rr<bits<3> funct3, string opcodestr>
+ : RVInstR<0b1010001, funct3, OPC_OP_FP, (outs GPR:$rd),
+ (ins FPR64:$rs1, FPR64:$rs2), opcodestr, "$rd, $rs1, $rs2">;
+
+//===----------------------------------------------------------------------===//
+// Instructions
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasStdExtD] in {
+
+let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
+def FLD : RVInstI<0b011, OPC_LOAD_FP, (outs FPR64:$rd),
+ (ins GPR:$rs1, simm12:$imm12),
+ "fld", "$rd, ${imm12}(${rs1})">;
+
+// Operands for stores are in the order srcreg, base, offset rather than
+// reflecting the order these fields are specified in the instruction
+// encoding.
+let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in
+def FSD : RVInstS<0b011, OPC_STORE_FP, (outs),
+ (ins FPR64:$rs2, GPR:$rs1, simm12:$imm12),
+ "fsd", "$rs2, ${imm12}(${rs1})">;
+
+def FMADD_D : FPFMAD_rrr_frm<OPC_MADD, "fmadd.d">;
+def : FPFMADDynFrmAlias<FMADD_D, "fmadd.d">;
+def FMSUB_D : FPFMAD_rrr_frm<OPC_MSUB, "fmsub.d">;
+def : FPFMADDynFrmAlias<FMSUB_D, "fmsub.d">;
+def FNMSUB_D : FPFMAD_rrr_frm<OPC_NMSUB, "fnmsub.d">;
+def : FPFMADDynFrmAlias<FNMSUB_D, "fnmsub.d">;
+def FNMADD_D : FPFMAD_rrr_frm<OPC_NMADD, "fnmadd.d">;
+def : FPFMADDynFrmAlias<FNMADD_D, "fnmadd.d">;
+
+def FADD_D : FPALUD_rr_frm<0b0000001, "fadd.d">;
+def : FPALUDDynFrmAlias<FADD_D, "fadd.d">;
+def FSUB_D : FPALUD_rr_frm<0b0000101, "fsub.d">;
+def : FPALUDDynFrmAlias<FSUB_D, "fsub.d">;
+def FMUL_D : FPALUD_rr_frm<0b0001001, "fmul.d">;
+def : FPALUDDynFrmAlias<FMUL_D, "fmul.d">;
+def FDIV_D : FPALUD_rr_frm<0b0001101, "fdiv.d">;
+def : FPALUDDynFrmAlias<FDIV_D, "fdiv.d">;
+
+def FSQRT_D : FPUnaryOp_r_frm<0b0101101, FPR64, FPR64, "fsqrt.d"> {
+ let rs2 = 0b00000;
+}
+def : FPUnaryOpDynFrmAlias<FSQRT_D, "fsqrt.d", FPR64, FPR64>;
+
+def FSGNJ_D : FPALUD_rr<0b0010001, 0b000, "fsgnj.d">;
+def FSGNJN_D : FPALUD_rr<0b0010001, 0b001, "fsgnjn.d">;
+def FSGNJX_D : FPALUD_rr<0b0010001, 0b010, "fsgnjx.d">;
+def FMIN_D : FPALUD_rr<0b0010101, 0b000, "fmin.d">;
+def FMAX_D : FPALUD_rr<0b0010101, 0b001, "fmax.d">;
+
+def FCVT_S_D : FPUnaryOp_r_frm<0b0100000, FPR32, FPR64, "fcvt.s.d"> {
+ let rs2 = 0b00001;
+}
+def : FPUnaryOpDynFrmAlias<FCVT_S_D, "fcvt.s.d", FPR32, FPR64>;
+
+def FCVT_D_S : FPUnaryOp_r<0b0100001, 0b000, FPR64, FPR32, "fcvt.d.s"> {
+ let rs2 = 0b00000;
+}
+
+def FEQ_D : FPCmpD_rr<0b010, "feq.d">;
+def FLT_D : FPCmpD_rr<0b001, "flt.d">;
+def FLE_D : FPCmpD_rr<0b000, "fle.d">;
+
+def FCLASS_D : FPUnaryOp_r<0b1110001, 0b001, GPR, FPR64, "fclass.d"> {
+ let rs2 = 0b00000;
+}
+
+def FCVT_W_D : FPUnaryOp_r_frm<0b1100001, GPR, FPR64, "fcvt.w.d"> {
+ let rs2 = 0b00000;
+}
+def : FPUnaryOpDynFrmAlias<FCVT_W_D, "fcvt.w.d", GPR, FPR64>;
+
+def FCVT_WU_D : FPUnaryOp_r_frm<0b1100001, GPR, FPR64, "fcvt.wu.d"> {
+ let rs2 = 0b00001;
+}
+def : FPUnaryOpDynFrmAlias<FCVT_WU_D, "fcvt.wu.d", GPR, FPR64>;
+
+def FCVT_D_W : FPUnaryOp_r<0b1101001, 0b000, FPR64, GPR, "fcvt.d.w"> {
+ let rs2 = 0b00000;
+}
+
+def FCVT_D_WU : FPUnaryOp_r<0b1101001, 0b000, FPR64, GPR, "fcvt.d.wu"> {
+ let rs2 = 0b00001;
+}
+} // Predicates = [HasStdExtD]
+
+let Predicates = [HasStdExtD, IsRV64] in {
+def FCVT_L_D : FPUnaryOp_r_frm<0b1100001, GPR, FPR64, "fcvt.l.d"> {
+ let rs2 = 0b00010;
+}
+def : FPUnaryOpDynFrmAlias<FCVT_L_D, "fcvt.l.d", GPR, FPR64>;
+
+def FCVT_LU_D : FPUnaryOp_r_frm<0b1100001, GPR, FPR64, "fcvt.lu.d"> {
+ let rs2 = 0b00011;
+}
+def : FPUnaryOpDynFrmAlias<FCVT_LU_D, "fcvt.lu.d", GPR, FPR64>;
+
+def FMV_X_D : FPUnaryOp_r<0b1110001, 0b000, GPR, FPR64, "fmv.x.d"> {
+ let rs2 = 0b00000;
+}
+
+def FCVT_D_L : FPUnaryOp_r_frm<0b1101001, FPR64, GPR, "fcvt.d.l"> {
+ let rs2 = 0b00010;
+}
+def : FPUnaryOpDynFrmAlias<FCVT_D_L, "fcvt.d.l", FPR64, GPR>;
+
+def FCVT_D_LU : FPUnaryOp_r_frm<0b1101001, FPR64, GPR, "fcvt.d.lu"> {
+ let rs2 = 0b00011;
+}
+def : FPUnaryOpDynFrmAlias<FCVT_D_LU, "fcvt.d.lu", FPR64, GPR>;
+
+def FMV_D_X : FPUnaryOp_r<0b1111001, 0b000, FPR64, GPR, "fmv.d.x"> {
+ let rs2 = 0b00000;
+}
+} // Predicates = [HasStdExtD, IsRV64]
+
+//===----------------------------------------------------------------------===//
+// Assembler Pseudo Instructions (User-Level ISA, Version 2.2, Chapter 20)
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasStdExtD] in {
+def : InstAlias<"fld $rd, (${rs1})", (FLD FPR64:$rd, GPR:$rs1, 0), 0>;
+def : InstAlias<"fsd $rs2, (${rs1})", (FSD FPR64:$rs2, GPR:$rs1, 0), 0>;
+
+def : InstAlias<"fmv.d $rd, $rs", (FSGNJ_D FPR64:$rd, FPR64:$rs, FPR64:$rs)>;
+def : InstAlias<"fabs.d $rd, $rs", (FSGNJX_D FPR64:$rd, FPR64:$rs, FPR64:$rs)>;
+def : InstAlias<"fneg.d $rd, $rs", (FSGNJN_D FPR64:$rd, FPR64:$rs, FPR64:$rs)>;
+
+// fgt.d/fge.d are recognised by the GNU assembler but the canonical
+// flt.d/fle.d forms will always be printed. Therefore, set a zero weight.
+def : InstAlias<"fgt.d $rd, $rs, $rt",
+ (FLT_D GPR:$rd, FPR64:$rt, FPR64:$rs), 0>;
+def : InstAlias<"fge.d $rd, $rs, $rt",
+ (FLE_D GPR:$rd, FPR64:$rt, FPR64:$rs), 0>;
+
+def PseudoFLD : PseudoFloatLoad<"fld", FPR64>;
+def PseudoFSD : PseudoStore<"fsd", FPR64>;
+} // Predicates = [HasStdExtD]
+
+//===----------------------------------------------------------------------===//
+// Pseudo-instructions and codegen patterns
+//===----------------------------------------------------------------------===//
+
+class PatFpr64Fpr64<SDPatternOperator OpNode, RVInstR Inst>
+ : Pat<(OpNode FPR64:$rs1, FPR64:$rs2), (Inst $rs1, $rs2)>;
+
+class PatFpr64Fpr64DynFrm<SDPatternOperator OpNode, RVInstRFrm Inst>
+ : Pat<(OpNode FPR64:$rs1, FPR64:$rs2), (Inst $rs1, $rs2, 0b111)>;
+
+let Predicates = [HasStdExtD] in {
+
+/// Float conversion operations
+
+// f64 -> f32, f32 -> f64
+def : Pat<(fpround FPR64:$rs1), (FCVT_S_D FPR64:$rs1, 0b111)>;
+def : Pat<(fpextend FPR32:$rs1), (FCVT_D_S FPR32:$rs1)>;
+
+// [u]int<->double conversion patterns must be gated on IsRV32 or IsRV64, so
+// are defined later.
+
+/// Float arithmetic operations
+
+def : PatFpr64Fpr64DynFrm<fadd, FADD_D>;
+def : PatFpr64Fpr64DynFrm<fsub, FSUB_D>;
+def : PatFpr64Fpr64DynFrm<fmul, FMUL_D>;
+def : PatFpr64Fpr64DynFrm<fdiv, FDIV_D>;
+
+def : Pat<(fsqrt FPR64:$rs1), (FSQRT_D FPR64:$rs1, 0b111)>;
+
+def : Pat<(fneg FPR64:$rs1), (FSGNJN_D $rs1, $rs1)>;
+def : Pat<(fabs FPR64:$rs1), (FSGNJX_D $rs1, $rs1)>;
+
+def : PatFpr64Fpr64<fcopysign, FSGNJ_D>;
+def : Pat<(fcopysign FPR64:$rs1, (fneg FPR64:$rs2)), (FSGNJN_D $rs1, $rs2)>;
+
+// fmadd: rs1 * rs2 + rs3
+def : Pat<(fma FPR64:$rs1, FPR64:$rs2, FPR64:$rs3),
+ (FMADD_D $rs1, $rs2, $rs3, 0b111)>;
+
+// fmsub: rs1 * rs2 - rs3
+def : Pat<(fma FPR64:$rs1, FPR64:$rs2, (fneg FPR64:$rs3)),
+ (FMSUB_D FPR64:$rs1, FPR64:$rs2, FPR64:$rs3, 0b111)>;
+
+// fnmsub: -rs1 * rs2 + rs3
+def : Pat<(fma (fneg FPR64:$rs1), FPR64:$rs2, FPR64:$rs3),
+ (FNMSUB_D FPR64:$rs1, FPR64:$rs2, FPR64:$rs3, 0b111)>;
+
+// fnmadd: -rs1 * rs2 - rs3
+def : Pat<(fma (fneg FPR64:$rs1), FPR64:$rs2, (fneg FPR64:$rs3)),
+ (FNMADD_D FPR64:$rs1, FPR64:$rs2, FPR64:$rs3, 0b111)>;
+
+// The RISC-V 2.2 user-level ISA spec defines fmin and fmax as returning the
+// canonical NaN when giving a signaling NaN. This doesn't match the LLVM
+// behaviour (see https://bugs.llvm.org/show_bug.cgi?id=27363). However, the
+// draft 2.3 ISA spec changes the definition of fmin and fmax in a way that
+// matches LLVM's fminnum and fmaxnum
+// <https://github.com/riscv/riscv-isa-manual/commit/cd20cee7efd9bac7c5aa127ec3b451749d2b3cce>.
+def : PatFpr64Fpr64<fminnum, FMIN_D>;
+def : PatFpr64Fpr64<fmaxnum, FMAX_D>;
+
+/// Setcc
+
+def : PatFpr64Fpr64<seteq, FEQ_D>;
+def : PatFpr64Fpr64<setoeq, FEQ_D>;
+def : PatFpr64Fpr64<setlt, FLT_D>;
+def : PatFpr64Fpr64<setolt, FLT_D>;
+def : PatFpr64Fpr64<setle, FLE_D>;
+def : PatFpr64Fpr64<setole, FLE_D>;
+
+// Define pattern expansions for setcc operations which aren't directly
+// handled by a RISC-V instruction and aren't expanded in the SelectionDAG
+// Legalizer.
+
+def : Pat<(seto FPR64:$rs1, FPR64:$rs2),
+ (AND (FEQ_D FPR64:$rs1, FPR64:$rs1),
+ (FEQ_D FPR64:$rs2, FPR64:$rs2))>;
+
+def : Pat<(setuo FPR64:$rs1, FPR64:$rs2),
+ (SLTIU (AND (FEQ_D FPR64:$rs1, FPR64:$rs1),
+ (FEQ_D FPR64:$rs2, FPR64:$rs2)),
+ 1)>;
+
+def Select_FPR64_Using_CC_GPR : SelectCC_rrirr<FPR64, GPR>;
+
+/// Loads
+
+defm : LdPat<load, FLD>;
+
+/// Stores
+
+defm : StPat<store, FSD, FPR64>;
+
+/// Pseudo-instructions needed for the soft-float ABI with RV32D
+
+// Moves two GPRs to an FPR.
+let usesCustomInserter = 1 in
+def BuildPairF64Pseudo
+ : Pseudo<(outs FPR64:$dst), (ins GPR:$src1, GPR:$src2),
+ [(set FPR64:$dst, (RISCVBuildPairF64 GPR:$src1, GPR:$src2))]>;
+
+// Moves an FPR to two GPRs.
+let usesCustomInserter = 1 in
+def SplitF64Pseudo
+ : Pseudo<(outs GPR:$dst1, GPR:$dst2), (ins FPR64:$src),
+ [(set GPR:$dst1, GPR:$dst2, (RISCVSplitF64 FPR64:$src))]>;
+
+} // Predicates = [HasStdExtD]
+
+let Predicates = [HasStdExtD, IsRV32] in {
+// double->[u]int. Round-to-zero must be used.
+def : Pat<(fp_to_sint FPR64:$rs1), (FCVT_W_D FPR64:$rs1, 0b001)>;
+def : Pat<(fp_to_uint FPR64:$rs1), (FCVT_WU_D FPR64:$rs1, 0b001)>;
+
+// [u]int->double.
+def : Pat<(sint_to_fp GPR:$rs1), (FCVT_D_W GPR:$rs1)>;
+def : Pat<(uint_to_fp GPR:$rs1), (FCVT_D_WU GPR:$rs1)>;
+} // Predicates = [HasStdExtD, IsRV32]
+
+let Predicates = [HasStdExtD, IsRV64] in {
+def : Pat<(bitconvert GPR:$rs1), (FMV_D_X GPR:$rs1)>;
+def : Pat<(bitconvert FPR64:$rs1), (FMV_X_D FPR64:$rs1)>;
+
+// FP->[u]int32 is mostly handled by the FP->[u]int64 patterns. This is safe
+// because fpto[u|s]i produce poison if the value can't fit into the target.
+// We match the single case below because fcvt.wu.d sign-extends its result so
+// is cheaper than fcvt.lu.d+sext.w.
+def : Pat<(sext_inreg (zexti32 (fp_to_uint FPR64:$rs1)), i32),
+ (FCVT_WU_D $rs1, 0b001)>;
+
+// [u]int32->fp
+def : Pat<(sint_to_fp (sext_inreg GPR:$rs1, i32)), (FCVT_D_W $rs1)>;
+def : Pat<(uint_to_fp (zexti32 GPR:$rs1)), (FCVT_D_WU $rs1)>;
+
+def : Pat<(fp_to_sint FPR64:$rs1), (FCVT_L_D FPR64:$rs1, 0b001)>;
+def : Pat<(fp_to_uint FPR64:$rs1), (FCVT_LU_D FPR64:$rs1, 0b001)>;
+
+// [u]int64->fp. Match GCC and default to using dynamic rounding mode.
+def : Pat<(sint_to_fp GPR:$rs1), (FCVT_D_L GPR:$rs1, 0b111)>;
+def : Pat<(uint_to_fp GPR:$rs1), (FCVT_D_LU GPR:$rs1, 0b111)>;
+} // Predicates = [HasStdExtD, IsRV64]
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
new file mode 100644
index 000000000000..032642942f2b
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
@@ -0,0 +1,389 @@
+//===-- RISCVInstrInfoF.td - RISC-V 'F' instructions -------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the RISC-V instructions from the standard 'F',
+// Single-Precision Floating-Point instruction set extension.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// RISC-V specific DAG Nodes.
+//===----------------------------------------------------------------------===//
+
+def SDT_RISCVFMV_W_X_RV64
+ : SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisVT<1, i64>]>;
+def SDT_RISCVFMV_X_ANYEXTW_RV64
+ : SDTypeProfile<1, 1, [SDTCisVT<0, i64>, SDTCisVT<1, f32>]>;
+
+def riscv_fmv_w_x_rv64
+ : SDNode<"RISCVISD::FMV_W_X_RV64", SDT_RISCVFMV_W_X_RV64>;
+def riscv_fmv_x_anyextw_rv64
+ : SDNode<"RISCVISD::FMV_X_ANYEXTW_RV64", SDT_RISCVFMV_X_ANYEXTW_RV64>;
+
+//===----------------------------------------------------------------------===//
+// Operand and SDNode transformation definitions.
+//===----------------------------------------------------------------------===//
+
+// Floating-point rounding mode
+
+def FRMArg : AsmOperandClass {
+ let Name = "FRMArg";
+ let RenderMethod = "addFRMArgOperands";
+ let DiagnosticType = "InvalidFRMArg";
+}
+
+def frmarg : Operand<XLenVT> {
+ let ParserMatchClass = FRMArg;
+ let PrintMethod = "printFRMArg";
+ let DecoderMethod = "decodeFRMArg";
+}
+
+//===----------------------------------------------------------------------===//
+// Instruction class templates
+//===----------------------------------------------------------------------===//
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+class FPFMAS_rrr_frm<RISCVOpcode opcode, string opcodestr>
+ : RVInstR4<0b00, opcode, (outs FPR32:$rd),
+ (ins FPR32:$rs1, FPR32:$rs2, FPR32:$rs3, frmarg:$funct3),
+ opcodestr, "$rd, $rs1, $rs2, $rs3, $funct3">;
+
+class FPFMASDynFrmAlias<FPFMAS_rrr_frm Inst, string OpcodeStr>
+ : InstAlias<OpcodeStr#" $rd, $rs1, $rs2, $rs3",
+ (Inst FPR32:$rd, FPR32:$rs1, FPR32:$rs2, FPR32:$rs3, 0b111)>;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+class FPALUS_rr<bits<7> funct7, bits<3> funct3, string opcodestr>
+ : RVInstR<funct7, funct3, OPC_OP_FP, (outs FPR32:$rd),
+ (ins FPR32:$rs1, FPR32:$rs2), opcodestr, "$rd, $rs1, $rs2">;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+class FPALUS_rr_frm<bits<7> funct7, string opcodestr>
+ : RVInstRFrm<funct7, OPC_OP_FP, (outs FPR32:$rd),
+ (ins FPR32:$rs1, FPR32:$rs2, frmarg:$funct3), opcodestr,
+ "$rd, $rs1, $rs2, $funct3">;
+
+class FPALUSDynFrmAlias<FPALUS_rr_frm Inst, string OpcodeStr>
+ : InstAlias<OpcodeStr#" $rd, $rs1, $rs2",
+ (Inst FPR32:$rd, FPR32:$rs1, FPR32:$rs2, 0b111)>;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+class FPUnaryOp_r<bits<7> funct7, bits<3> funct3, RegisterClass rdty,
+ RegisterClass rs1ty, string opcodestr>
+ : RVInstR<funct7, funct3, OPC_OP_FP, (outs rdty:$rd), (ins rs1ty:$rs1),
+ opcodestr, "$rd, $rs1">;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+class FPUnaryOp_r_frm<bits<7> funct7, RegisterClass rdty, RegisterClass rs1ty,
+ string opcodestr>
+ : RVInstRFrm<funct7, OPC_OP_FP, (outs rdty:$rd),
+ (ins rs1ty:$rs1, frmarg:$funct3), opcodestr,
+ "$rd, $rs1, $funct3">;
+
+class FPUnaryOpDynFrmAlias<FPUnaryOp_r_frm Inst, string OpcodeStr,
+ RegisterClass rdty, RegisterClass rs1ty>
+ : InstAlias<OpcodeStr#" $rd, $rs1",
+ (Inst rdty:$rd, rs1ty:$rs1, 0b111)>;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+class FPCmpS_rr<bits<3> funct3, string opcodestr>
+ : RVInstR<0b1010000, funct3, OPC_OP_FP, (outs GPR:$rd),
+ (ins FPR32:$rs1, FPR32:$rs2), opcodestr, "$rd, $rs1, $rs2">;
+
+//===----------------------------------------------------------------------===//
+// Instructions
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasStdExtF] in {
+let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
+def FLW : RVInstI<0b010, OPC_LOAD_FP, (outs FPR32:$rd),
+ (ins GPR:$rs1, simm12:$imm12),
+ "flw", "$rd, ${imm12}(${rs1})">;
+
+// Operands for stores are in the order srcreg, base, offset rather than
+// reflecting the order these fields are specified in the instruction
+// encoding.
+let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in
+def FSW : RVInstS<0b010, OPC_STORE_FP, (outs),
+ (ins FPR32:$rs2, GPR:$rs1, simm12:$imm12),
+ "fsw", "$rs2, ${imm12}(${rs1})">;
+
+def FMADD_S : FPFMAS_rrr_frm<OPC_MADD, "fmadd.s">;
+def : FPFMASDynFrmAlias<FMADD_S, "fmadd.s">;
+def FMSUB_S : FPFMAS_rrr_frm<OPC_MSUB, "fmsub.s">;
+def : FPFMASDynFrmAlias<FMSUB_S, "fmsub.s">;
+def FNMSUB_S : FPFMAS_rrr_frm<OPC_NMSUB, "fnmsub.s">;
+def : FPFMASDynFrmAlias<FNMSUB_S, "fnmsub.s">;
+def FNMADD_S : FPFMAS_rrr_frm<OPC_NMADD, "fnmadd.s">;
+def : FPFMASDynFrmAlias<FNMADD_S, "fnmadd.s">;
+
+def FADD_S : FPALUS_rr_frm<0b0000000, "fadd.s">;
+def : FPALUSDynFrmAlias<FADD_S, "fadd.s">;
+def FSUB_S : FPALUS_rr_frm<0b0000100, "fsub.s">;
+def : FPALUSDynFrmAlias<FSUB_S, "fsub.s">;
+def FMUL_S : FPALUS_rr_frm<0b0001000, "fmul.s">;
+def : FPALUSDynFrmAlias<FMUL_S, "fmul.s">;
+def FDIV_S : FPALUS_rr_frm<0b0001100, "fdiv.s">;
+def : FPALUSDynFrmAlias<FDIV_S, "fdiv.s">;
+
+def FSQRT_S : FPUnaryOp_r_frm<0b0101100, FPR32, FPR32, "fsqrt.s"> {
+ let rs2 = 0b00000;
+}
+def : FPUnaryOpDynFrmAlias<FSQRT_S, "fsqrt.s", FPR32, FPR32>;
+
+def FSGNJ_S : FPALUS_rr<0b0010000, 0b000, "fsgnj.s">;
+def FSGNJN_S : FPALUS_rr<0b0010000, 0b001, "fsgnjn.s">;
+def FSGNJX_S : FPALUS_rr<0b0010000, 0b010, "fsgnjx.s">;
+def FMIN_S : FPALUS_rr<0b0010100, 0b000, "fmin.s">;
+def FMAX_S : FPALUS_rr<0b0010100, 0b001, "fmax.s">;
+
+def FCVT_W_S : FPUnaryOp_r_frm<0b1100000, GPR, FPR32, "fcvt.w.s"> {
+ let rs2 = 0b00000;
+}
+def : FPUnaryOpDynFrmAlias<FCVT_W_S, "fcvt.w.s", GPR, FPR32>;
+
+def FCVT_WU_S : FPUnaryOp_r_frm<0b1100000, GPR, FPR32, "fcvt.wu.s"> {
+ let rs2 = 0b00001;
+}
+def : FPUnaryOpDynFrmAlias<FCVT_WU_S, "fcvt.wu.s", GPR, FPR32>;
+
+def FMV_X_W : FPUnaryOp_r<0b1110000, 0b000, GPR, FPR32, "fmv.x.w"> {
+ let rs2 = 0b00000;
+}
+
+def FEQ_S : FPCmpS_rr<0b010, "feq.s">;
+def FLT_S : FPCmpS_rr<0b001, "flt.s">;
+def FLE_S : FPCmpS_rr<0b000, "fle.s">;
+
+def FCLASS_S : FPUnaryOp_r<0b1110000, 0b001, GPR, FPR32, "fclass.s"> {
+ let rs2 = 0b00000;
+}
+
+def FCVT_S_W : FPUnaryOp_r_frm<0b1101000, FPR32, GPR, "fcvt.s.w"> {
+ let rs2 = 0b00000;
+}
+def : FPUnaryOpDynFrmAlias<FCVT_S_W, "fcvt.s.w", FPR32, GPR>;
+
+def FCVT_S_WU : FPUnaryOp_r_frm<0b1101000, FPR32, GPR, "fcvt.s.wu"> {
+ let rs2 = 0b00001;
+}
+def : FPUnaryOpDynFrmAlias<FCVT_S_WU, "fcvt.s.wu", FPR32, GPR>;
+
+def FMV_W_X : FPUnaryOp_r<0b1111000, 0b000, FPR32, GPR, "fmv.w.x"> {
+ let rs2 = 0b00000;
+}
+} // Predicates = [HasStdExtF]
+
+let Predicates = [HasStdExtF, IsRV64] in {
+def FCVT_L_S : FPUnaryOp_r_frm<0b1100000, GPR, FPR32, "fcvt.l.s"> {
+ let rs2 = 0b00010;
+}
+def : FPUnaryOpDynFrmAlias<FCVT_L_S, "fcvt.l.s", GPR, FPR32>;
+
+def FCVT_LU_S : FPUnaryOp_r_frm<0b1100000, GPR, FPR32, "fcvt.lu.s"> {
+ let rs2 = 0b00011;
+}
+def : FPUnaryOpDynFrmAlias<FCVT_LU_S, "fcvt.lu.s", GPR, FPR32>;
+
+def FCVT_S_L : FPUnaryOp_r_frm<0b1101000, FPR32, GPR, "fcvt.s.l"> {
+ let rs2 = 0b00010;
+}
+def : FPUnaryOpDynFrmAlias<FCVT_S_L, "fcvt.s.l", FPR32, GPR>;
+
+def FCVT_S_LU : FPUnaryOp_r_frm<0b1101000, FPR32, GPR, "fcvt.s.lu"> {
+ let rs2 = 0b00011;
+}
+def : FPUnaryOpDynFrmAlias<FCVT_S_LU, "fcvt.s.lu", FPR32, GPR>;
+} // Predicates = [HasStdExtF, IsRV64]
+
+//===----------------------------------------------------------------------===//
+// Assembler Pseudo Instructions (User-Level ISA, Version 2.2, Chapter 20)
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasStdExtF] in {
+def : InstAlias<"flw $rd, (${rs1})", (FLW FPR32:$rd, GPR:$rs1, 0), 0>;
+def : InstAlias<"fsw $rs2, (${rs1})", (FSW FPR32:$rs2, GPR:$rs1, 0), 0>;
+
+def : InstAlias<"fmv.s $rd, $rs", (FSGNJ_S FPR32:$rd, FPR32:$rs, FPR32:$rs)>;
+def : InstAlias<"fabs.s $rd, $rs", (FSGNJX_S FPR32:$rd, FPR32:$rs, FPR32:$rs)>;
+def : InstAlias<"fneg.s $rd, $rs", (FSGNJN_S FPR32:$rd, FPR32:$rs, FPR32:$rs)>;
+
+// fgt.s/fge.s are recognised by the GNU assembler but the canonical
+// flt.s/fle.s forms will always be printed. Therefore, set a zero weight.
+def : InstAlias<"fgt.s $rd, $rs, $rt",
+ (FLT_S GPR:$rd, FPR32:$rt, FPR32:$rs), 0>;
+def : InstAlias<"fge.s $rd, $rs, $rt",
+ (FLE_S GPR:$rd, FPR32:$rt, FPR32:$rs), 0>;
+
+// The following csr instructions actually alias instructions from the base ISA.
+// However, it only makes sense to support them when the F extension is enabled.
+// NOTE: "frcsr", "frrm", and "frflags" are more specialized version of "csrr".
+def : InstAlias<"frcsr $rd", (CSRRS GPR:$rd, FCSR.Encoding, X0), 2>;
+def : InstAlias<"fscsr $rd, $rs", (CSRRW GPR:$rd, FCSR.Encoding, GPR:$rs)>;
+def : InstAlias<"fscsr $rs", (CSRRW X0, FCSR.Encoding, GPR:$rs), 2>;
+
+def : InstAlias<"frrm $rd", (CSRRS GPR:$rd, FRM.Encoding, X0), 2>;
+def : InstAlias<"fsrm $rd, $rs", (CSRRW GPR:$rd, FRM.Encoding, GPR:$rs)>;
+def : InstAlias<"fsrm $rs", (CSRRW X0, FRM.Encoding, GPR:$rs), 2>;
+def : InstAlias<"fsrmi $rd, $imm", (CSRRWI GPR:$rd, FRM.Encoding, uimm5:$imm)>;
+def : InstAlias<"fsrmi $imm", (CSRRWI X0, FRM.Encoding, uimm5:$imm), 2>;
+
+def : InstAlias<"frflags $rd", (CSRRS GPR:$rd, FFLAGS.Encoding, X0), 2>;
+def : InstAlias<"fsflags $rd, $rs", (CSRRW GPR:$rd, FFLAGS.Encoding, GPR:$rs)>;
+def : InstAlias<"fsflags $rs", (CSRRW X0, FFLAGS.Encoding, GPR:$rs), 2>;
+def : InstAlias<"fsflagsi $rd, $imm", (CSRRWI GPR:$rd, FFLAGS.Encoding, uimm5:$imm)>;
+def : InstAlias<"fsflagsi $imm", (CSRRWI X0, FFLAGS.Encoding, uimm5:$imm), 2>;
+
+// fmv.w.x and fmv.x.w were previously known as fmv.s.x and fmv.x.s. Both
+// spellings should be supported by standard tools.
+def : MnemonicAlias<"fmv.s.x", "fmv.w.x">;
+def : MnemonicAlias<"fmv.x.s", "fmv.x.w">;
+
+def PseudoFLW : PseudoFloatLoad<"flw", FPR32>;
+def PseudoFSW : PseudoStore<"fsw", FPR32>;
+} // Predicates = [HasStdExtF]
+
+//===----------------------------------------------------------------------===//
+// Pseudo-instructions and codegen patterns
+//===----------------------------------------------------------------------===//
+
+/// Generic pattern classes
+class PatFpr32Fpr32<SDPatternOperator OpNode, RVInstR Inst>
+ : Pat<(OpNode FPR32:$rs1, FPR32:$rs2), (Inst $rs1, $rs2)>;
+
+class PatFpr32Fpr32DynFrm<SDPatternOperator OpNode, RVInstRFrm Inst>
+ : Pat<(OpNode FPR32:$rs1, FPR32:$rs2), (Inst $rs1, $rs2, 0b111)>;
+
+let Predicates = [HasStdExtF] in {
+
+/// Float conversion operations
+
+// Moves (no conversion)
+def : Pat<(bitconvert GPR:$rs1), (FMV_W_X GPR:$rs1)>;
+def : Pat<(bitconvert FPR32:$rs1), (FMV_X_W FPR32:$rs1)>;
+
+// [u]int32<->float conversion patterns must be gated on IsRV32 or IsRV64, so
+// are defined later.
+
+/// Float arithmetic operations
+
+def : PatFpr32Fpr32DynFrm<fadd, FADD_S>;
+def : PatFpr32Fpr32DynFrm<fsub, FSUB_S>;
+def : PatFpr32Fpr32DynFrm<fmul, FMUL_S>;
+def : PatFpr32Fpr32DynFrm<fdiv, FDIV_S>;
+
+def : Pat<(fsqrt FPR32:$rs1), (FSQRT_S FPR32:$rs1, 0b111)>;
+
+def : Pat<(fneg FPR32:$rs1), (FSGNJN_S $rs1, $rs1)>;
+def : Pat<(fabs FPR32:$rs1), (FSGNJX_S $rs1, $rs1)>;
+
+def : PatFpr32Fpr32<fcopysign, FSGNJ_S>;
+def : Pat<(fcopysign FPR32:$rs1, (fneg FPR32:$rs2)), (FSGNJN_S $rs1, $rs2)>;
+
+// fmadd: rs1 * rs2 + rs3
+def : Pat<(fma FPR32:$rs1, FPR32:$rs2, FPR32:$rs3),
+ (FMADD_S $rs1, $rs2, $rs3, 0b111)>;
+
+// fmsub: rs1 * rs2 - rs3
+def : Pat<(fma FPR32:$rs1, FPR32:$rs2, (fneg FPR32:$rs3)),
+ (FMSUB_S FPR32:$rs1, FPR32:$rs2, FPR32:$rs3, 0b111)>;
+
+// fnmsub: -rs1 * rs2 + rs3
+def : Pat<(fma (fneg FPR32:$rs1), FPR32:$rs2, FPR32:$rs3),
+ (FNMSUB_S FPR32:$rs1, FPR32:$rs2, FPR32:$rs3, 0b111)>;
+
+// fnmadd: -rs1 * rs2 - rs3
+def : Pat<(fma (fneg FPR32:$rs1), FPR32:$rs2, (fneg FPR32:$rs3)),
+ (FNMADD_S FPR32:$rs1, FPR32:$rs2, FPR32:$rs3, 0b111)>;
+
+// The RISC-V 2.2 user-level ISA spec defines fmin and fmax as returning the
+// canonical NaN when given a signaling NaN. This doesn't match the LLVM
+// behaviour (see https://bugs.llvm.org/show_bug.cgi?id=27363). However, the
+// draft 2.3 ISA spec changes the definition of fmin and fmax in a way that
+// matches LLVM's fminnum and fmaxnum
+// <https://github.com/riscv/riscv-isa-manual/commit/cd20cee7efd9bac7c5aa127ec3b451749d2b3cce>.
+def : PatFpr32Fpr32<fminnum, FMIN_S>;
+def : PatFpr32Fpr32<fmaxnum, FMAX_S>;
+
+/// Setcc
+
+def : PatFpr32Fpr32<seteq, FEQ_S>;
+def : PatFpr32Fpr32<setoeq, FEQ_S>;
+def : PatFpr32Fpr32<setlt, FLT_S>;
+def : PatFpr32Fpr32<setolt, FLT_S>;
+def : PatFpr32Fpr32<setle, FLE_S>;
+def : PatFpr32Fpr32<setole, FLE_S>;
+
+// Define pattern expansions for setcc operations which aren't directly
+// handled by a RISC-V instruction and aren't expanded in the SelectionDAG
+// Legalizer.
+
+def : Pat<(seto FPR32:$rs1, FPR32:$rs2),
+ (AND (FEQ_S FPR32:$rs1, FPR32:$rs1),
+ (FEQ_S FPR32:$rs2, FPR32:$rs2))>;
+
+def : Pat<(setuo FPR32:$rs1, FPR32:$rs2),
+ (SLTIU (AND (FEQ_S FPR32:$rs1, FPR32:$rs1),
+ (FEQ_S FPR32:$rs2, FPR32:$rs2)),
+ 1)>;
+
+def Select_FPR32_Using_CC_GPR : SelectCC_rrirr<FPR32, GPR>;
+
+/// Loads
+
+defm : LdPat<load, FLW>;
+
+/// Stores
+
+defm : StPat<store, FSW, FPR32>;
+
+} // Predicates = [HasStdExtF]
+
+let Predicates = [HasStdExtF, IsRV32] in {
+// float->[u]int. Round-to-zero must be used.
+def : Pat<(fp_to_sint FPR32:$rs1), (FCVT_W_S $rs1, 0b001)>;
+def : Pat<(fp_to_uint FPR32:$rs1), (FCVT_WU_S $rs1, 0b001)>;
+
+// [u]int->float. Match GCC and default to using dynamic rounding mode.
+def : Pat<(sint_to_fp GPR:$rs1), (FCVT_S_W $rs1, 0b111)>;
+def : Pat<(uint_to_fp GPR:$rs1), (FCVT_S_WU $rs1, 0b111)>;
+} // Predicates = [HasStdExtF, IsRV32]
+
+let Predicates = [HasStdExtF, IsRV32] in {
+// FP->[u]int. Round-to-zero must be used
+def : Pat<(fp_to_sint FPR32:$rs1), (FCVT_W_S $rs1, 0b001)>;
+def : Pat<(fp_to_uint FPR32:$rs1), (FCVT_WU_S $rs1, 0b001)>;
+
+// [u]int->fp. Match GCC and default to using dynamic rounding mode.
+def : Pat<(sint_to_fp GPR:$rs1), (FCVT_S_W $rs1, 0b111)>;
+def : Pat<(uint_to_fp GPR:$rs1), (FCVT_S_WU $rs1, 0b111)>;
+} // Predicates = [HasStdExtF, IsRV32]
+
+let Predicates = [HasStdExtF, IsRV64] in {
+def : Pat<(riscv_fmv_w_x_rv64 GPR:$src), (FMV_W_X GPR:$src)>;
+def : Pat<(riscv_fmv_x_anyextw_rv64 FPR32:$src), (FMV_X_W FPR32:$src)>;
+def : Pat<(sexti32 (riscv_fmv_x_anyextw_rv64 FPR32:$src)),
+ (FMV_X_W FPR32:$src)>;
+
+// FP->[u]int32 is mostly handled by the FP->[u]int64 patterns. This is safe
+// because fpto[u|s]i produces poison if the value can't fit into the target.
+// We match the single case below because fcvt.wu.s sign-extends its result so
+// is cheaper than fcvt.lu.s+sext.w.
+def : Pat<(sext_inreg (assertzexti32 (fp_to_uint FPR32:$rs1)), i32),
+ (FCVT_WU_S $rs1, 0b001)>;
+
+// FP->[u]int64
+def : Pat<(fp_to_sint FPR32:$rs1), (FCVT_L_S $rs1, 0b001)>;
+def : Pat<(fp_to_uint FPR32:$rs1), (FCVT_LU_S $rs1, 0b001)>;
+
+// [u]int->fp. Match GCC and default to using dynamic rounding mode.
+def : Pat<(sint_to_fp (sext_inreg GPR:$rs1, i32)), (FCVT_S_W $rs1, 0b111)>;
+def : Pat<(uint_to_fp (zexti32 GPR:$rs1)), (FCVT_S_WU $rs1, 0b111)>;
+def : Pat<(sint_to_fp GPR:$rs1), (FCVT_S_L $rs1, 0b111)>;
+def : Pat<(uint_to_fp GPR:$rs1), (FCVT_S_LU $rs1, 0b111)>;
+} // Predicates = [HasStdExtF, IsRV64]
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoM.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoM.td
new file mode 100644
index 000000000000..e75151ba99c7
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoM.td
@@ -0,0 +1,84 @@
+//===-- RISCVInstrInfoM.td - RISC-V 'M' instructions -------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the RISC-V instructions from the standard 'M', Integer
+// Multiplication and Division instruction set extension.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// RISC-V specific DAG Nodes.
+//===----------------------------------------------------------------------===//
+
+def riscv_divw : SDNode<"RISCVISD::DIVW", SDTIntBinOp>;
+def riscv_divuw : SDNode<"RISCVISD::DIVUW", SDTIntBinOp>;
+def riscv_remuw : SDNode<"RISCVISD::REMUW", SDTIntBinOp>;
+
+//===----------------------------------------------------------------------===//
+// Instructions
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasStdExtM] in {
+def MUL : ALU_rr<0b0000001, 0b000, "mul">;
+def MULH : ALU_rr<0b0000001, 0b001, "mulh">;
+def MULHSU : ALU_rr<0b0000001, 0b010, "mulhsu">;
+def MULHU : ALU_rr<0b0000001, 0b011, "mulhu">;
+def DIV : ALU_rr<0b0000001, 0b100, "div">;
+def DIVU : ALU_rr<0b0000001, 0b101, "divu">;
+def REM : ALU_rr<0b0000001, 0b110, "rem">;
+def REMU : ALU_rr<0b0000001, 0b111, "remu">;
+} // Predicates = [HasStdExtM]
+
+let Predicates = [HasStdExtM, IsRV64] in {
+def MULW : ALUW_rr<0b0000001, 0b000, "mulw">;
+def DIVW : ALUW_rr<0b0000001, 0b100, "divw">;
+def DIVUW : ALUW_rr<0b0000001, 0b101, "divuw">;
+def REMW : ALUW_rr<0b0000001, 0b110, "remw">;
+def REMUW : ALUW_rr<0b0000001, 0b111, "remuw">;
+} // Predicates = [HasStdExtM, IsRV64]
+
+//===----------------------------------------------------------------------===//
+// Pseudo-instructions and codegen patterns
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasStdExtM] in {
+def : PatGprGpr<mul, MUL>;
+def : PatGprGpr<mulhs, MULH>;
+def : PatGprGpr<mulhu, MULHU>;
+// No ISDOpcode for mulhsu
+def : PatGprGpr<sdiv, DIV>;
+def : PatGprGpr<udiv, DIVU>;
+def : PatGprGpr<srem, REM>;
+def : PatGprGpr<urem, REMU>;
+} // Predicates = [HasStdExtM]
+
+let Predicates = [HasStdExtM, IsRV64] in {
+def : Pat<(sext_inreg (mul GPR:$rs1, GPR:$rs2), i32),
+ (MULW GPR:$rs1, GPR:$rs2)>;
+
+def : PatGprGpr<riscv_divw, DIVW>;
+def : PatGprGpr<riscv_divuw, DIVUW>;
+def : PatGprGpr<riscv_remuw, REMUW>;
+
+// Handle the specific cases where using DIVU/REMU would be correct and result
+// in fewer instructions than emitting DIVUW/REMUW then zero-extending the
+// result.
+def : Pat<(zexti32 (riscv_divuw (zexti32 GPR:$rs1), (zexti32 GPR:$rs2))),
+ (DIVU GPR:$rs1, GPR:$rs2)>;
+def : Pat<(zexti32 (riscv_remuw (zexti32 GPR:$rs1), (zexti32 GPR:$rs2))),
+ (REMU GPR:$rs1, GPR:$rs2)>;
+
+// Although the sexti32 operands may not have originated from an i32 srem,
+// this pattern is safe as it is impossible for two sign extended inputs to
+// produce a result where res[63:32]=0 and res[31]=1.
+def : Pat<(srem (sexti32 GPR:$rs1), (sexti32 GPR:$rs2)),
+ (REMW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(sext_inreg (srem (sexti32 GPR:$rs1),
+ (sexti32 GPR:$rs2)), i32),
+ (REMW GPR:$rs1, GPR:$rs2)>;
+} // Predicates = [HasStdExtM, IsRV64]
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
new file mode 100644
index 000000000000..b1dbcfa7f738
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
@@ -0,0 +1,137 @@
+//===-- RISCVMCInstLower.cpp - Convert RISCV MachineInstr to an MCInst ------=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code to lower RISCV MachineInstrs to their corresponding
+// MCInst records.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "MCTargetDesc/RISCVMCExpr.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+static MCOperand lowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym,
+ const AsmPrinter &AP) {
+ MCContext &Ctx = AP.OutContext;
+ RISCVMCExpr::VariantKind Kind;
+
+ switch (MO.getTargetFlags()) {
+ default:
+ llvm_unreachable("Unknown target flag on GV operand");
+ case RISCVII::MO_None:
+ Kind = RISCVMCExpr::VK_RISCV_None;
+ break;
+ case RISCVII::MO_CALL:
+ Kind = RISCVMCExpr::VK_RISCV_CALL;
+ break;
+ case RISCVII::MO_PLT:
+ Kind = RISCVMCExpr::VK_RISCV_CALL_PLT;
+ break;
+ case RISCVII::MO_LO:
+ Kind = RISCVMCExpr::VK_RISCV_LO;
+ break;
+ case RISCVII::MO_HI:
+ Kind = RISCVMCExpr::VK_RISCV_HI;
+ break;
+ case RISCVII::MO_PCREL_LO:
+ Kind = RISCVMCExpr::VK_RISCV_PCREL_LO;
+ break;
+ case RISCVII::MO_PCREL_HI:
+ Kind = RISCVMCExpr::VK_RISCV_PCREL_HI;
+ break;
+ case RISCVII::MO_GOT_HI:
+ Kind = RISCVMCExpr::VK_RISCV_GOT_HI;
+ break;
+ case RISCVII::MO_TPREL_LO:
+ Kind = RISCVMCExpr::VK_RISCV_TPREL_LO;
+ break;
+ case RISCVII::MO_TPREL_HI:
+ Kind = RISCVMCExpr::VK_RISCV_TPREL_HI;
+ break;
+ case RISCVII::MO_TPREL_ADD:
+ Kind = RISCVMCExpr::VK_RISCV_TPREL_ADD;
+ break;
+ case RISCVII::MO_TLS_GOT_HI:
+ Kind = RISCVMCExpr::VK_RISCV_TLS_GOT_HI;
+ break;
+ case RISCVII::MO_TLS_GD_HI:
+ Kind = RISCVMCExpr::VK_RISCV_TLS_GD_HI;
+ break;
+ }
+
+ const MCExpr *ME =
+ MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, Ctx);
+
+ if (!MO.isJTI() && !MO.isMBB() && MO.getOffset())
+ ME = MCBinaryExpr::createAdd(
+ ME, MCConstantExpr::create(MO.getOffset(), Ctx), Ctx);
+
+ if (Kind != RISCVMCExpr::VK_RISCV_None)
+ ME = RISCVMCExpr::create(ME, Kind, Ctx);
+ return MCOperand::createExpr(ME);
+}
+
+bool llvm::LowerRISCVMachineOperandToMCOperand(const MachineOperand &MO,
+ MCOperand &MCOp,
+ const AsmPrinter &AP) {
+ switch (MO.getType()) {
+ default:
+ report_fatal_error("LowerRISCVMachineInstrToMCInst: unknown operand type");
+ case MachineOperand::MO_Register:
+ // Ignore all implicit register operands.
+ if (MO.isImplicit())
+ return false;
+ MCOp = MCOperand::createReg(MO.getReg());
+ break;
+ case MachineOperand::MO_RegisterMask:
+ // Regmasks are like implicit defs.
+ return false;
+ case MachineOperand::MO_Immediate:
+ MCOp = MCOperand::createImm(MO.getImm());
+ break;
+ case MachineOperand::MO_MachineBasicBlock:
+ MCOp = lowerSymbolOperand(MO, MO.getMBB()->getSymbol(), AP);
+ break;
+ case MachineOperand::MO_GlobalAddress:
+ MCOp = lowerSymbolOperand(MO, AP.getSymbol(MO.getGlobal()), AP);
+ break;
+ case MachineOperand::MO_BlockAddress:
+ MCOp = lowerSymbolOperand(
+ MO, AP.GetBlockAddressSymbol(MO.getBlockAddress()), AP);
+ break;
+ case MachineOperand::MO_ExternalSymbol:
+ MCOp = lowerSymbolOperand(
+ MO, AP.GetExternalSymbolSymbol(MO.getSymbolName()), AP);
+ break;
+ case MachineOperand::MO_ConstantPoolIndex:
+ MCOp = lowerSymbolOperand(MO, AP.GetCPISymbol(MO.getIndex()), AP);
+ break;
+ }
+ return true;
+}
+
+void llvm::LowerRISCVMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
+ const AsmPrinter &AP) {
+ OutMI.setOpcode(MI->getOpcode());
+
+ for (const MachineOperand &MO : MI->operands()) {
+ MCOperand MCOp;
+ if (LowerRISCVMachineOperandToMCOperand(MO, MCOp, AP))
+ OutMI.addOperand(MCOp);
+ }
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVMachineFunctionInfo.h b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVMachineFunctionInfo.h
new file mode 100644
index 000000000000..585bff2bc20a
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVMachineFunctionInfo.h
@@ -0,0 +1,52 @@
+//=- RISCVMachineFunctionInfo.h - RISCV machine function info -----*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares RISCV-specific per-machine-function information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVMACHINEFUNCTIONINFO_H
+#define LLVM_LIB_TARGET_RISCV_RISCVMACHINEFUNCTIONINFO_H
+
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+
+namespace llvm {
+
+/// RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo
+/// and contains private RISCV-specific information for each MachineFunction.
+class RISCVMachineFunctionInfo : public MachineFunctionInfo {
+private:
+ MachineFunction &MF;
+ /// FrameIndex for start of varargs area
+ int VarArgsFrameIndex = 0;
+ /// Size of the save area used for varargs
+ int VarArgsSaveSize = 0;
+ /// FrameIndex used for transferring values between 64-bit FPRs and a pair
+ /// of 32-bit GPRs via the stack.
+ int MoveF64FrameIndex = -1;
+
+public:
+ RISCVMachineFunctionInfo(MachineFunction &MF) : MF(MF) {}
+
+ int getVarArgsFrameIndex() const { return VarArgsFrameIndex; }
+ void setVarArgsFrameIndex(int Index) { VarArgsFrameIndex = Index; }
+
+ unsigned getVarArgsSaveSize() const { return VarArgsSaveSize; }
+ void setVarArgsSaveSize(int Size) { VarArgsSaveSize = Size; }
+
+ int getMoveF64FrameIndex() {
+ if (MoveF64FrameIndex == -1)
+ MoveF64FrameIndex = MF.getFrameInfo().CreateStackObject(8, 8, false);
+ return MoveF64FrameIndex;
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_TARGET_RISCV_RISCVMACHINEFUNCTIONINFO_H
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp
new file mode 100644
index 000000000000..82b1209cb8e7
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp
@@ -0,0 +1,285 @@
+//===----- RISCVMergeBaseOffset.cpp - Optimise address calculations ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Merge the offset of address calculation into the offset field
+// of instructions in a global address lowering sequence. This pass transforms:
+// lui vreg1, %hi(s)
+// addi vreg2, vreg1, %lo(s)
+// addi vreg3, verg2, Offset
+//
+// Into:
+// lui vreg1, %hi(s+Offset)
+// addi vreg2, vreg1, %lo(s+Offset)
+//
+// The transformation is carried out under certain conditions:
+// 1) The offset field in the base of global address lowering sequence is zero.
+// 2) The lowered global address has only one use.
+//
+// The offset field can be in a different form. This pass handles all of them.
+//===----------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "RISCVTargetMachine.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Target/TargetOptions.h"
+#include <set>
+using namespace llvm;
+
+#define DEBUG_TYPE "riscv-merge-base-offset"
+#define RISCV_MERGE_BASE_OFFSET_NAME "RISCV Merge Base Offset"
+namespace {
+
+struct RISCVMergeBaseOffsetOpt : public MachineFunctionPass {
+ static char ID;
+ const MachineFunction *MF;
+ bool runOnMachineFunction(MachineFunction &Fn) override;
+ bool detectLuiAddiGlobal(MachineInstr &LUI, MachineInstr *&ADDI);
+
+ bool detectAndFoldOffset(MachineInstr &HiLUI, MachineInstr &LoADDI);
+ void foldOffset(MachineInstr &HiLUI, MachineInstr &LoADDI, MachineInstr &Tail,
+ int64_t Offset);
+ bool matchLargeOffset(MachineInstr &TailAdd, unsigned GSReg, int64_t &Offset);
+ RISCVMergeBaseOffsetOpt() : MachineFunctionPass(ID) {}
+
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::IsSSA);
+ }
+
+ StringRef getPassName() const override {
+ return RISCV_MERGE_BASE_OFFSET_NAME;
+ }
+
+private:
+ MachineRegisterInfo *MRI;
+ std::set<MachineInstr *> DeadInstrs;
+};
+} // end anonymous namespace
+
+char RISCVMergeBaseOffsetOpt::ID = 0;
+INITIALIZE_PASS(RISCVMergeBaseOffsetOpt, "riscv-merge-base-offset",
+ RISCV_MERGE_BASE_OFFSET_NAME, false, false)
+
+// Detect the pattern:
+// lui vreg1, %hi(s)
+// addi vreg2, vreg1, %lo(s)
+//
+// Pattern only accepted if:
+// 1) ADDI has only one use.
+// 2) LUI has only one use; which is the ADDI.
+// 3) Both ADDI and LUI have GlobalAddress type which indicates that these
+// are generated from global address lowering.
+// 4) Offset value in the Global Address is 0.
+bool RISCVMergeBaseOffsetOpt::detectLuiAddiGlobal(MachineInstr &HiLUI,
+ MachineInstr *&LoADDI) {
+ if (HiLUI.getOpcode() != RISCV::LUI ||
+ HiLUI.getOperand(1).getTargetFlags() != RISCVII::MO_HI ||
+ HiLUI.getOperand(1).getType() != MachineOperand::MO_GlobalAddress ||
+ HiLUI.getOperand(1).getOffset() != 0 ||
+ !MRI->hasOneUse(HiLUI.getOperand(0).getReg()))
+ return false;
+ unsigned HiLuiDestReg = HiLUI.getOperand(0).getReg();
+ LoADDI = MRI->use_begin(HiLuiDestReg)->getParent();
+ if (LoADDI->getOpcode() != RISCV::ADDI ||
+ LoADDI->getOperand(2).getTargetFlags() != RISCVII::MO_LO ||
+ LoADDI->getOperand(2).getType() != MachineOperand::MO_GlobalAddress ||
+ LoADDI->getOperand(2).getOffset() != 0 ||
+ !MRI->hasOneUse(LoADDI->getOperand(0).getReg()))
+ return false;
+ return true;
+}
+
+// Update the offset in HiLUI and LoADDI instructions.
+// Delete the tail instruction and update all the uses to use the
+// output from LoADDI.
+void RISCVMergeBaseOffsetOpt::foldOffset(MachineInstr &HiLUI,
+ MachineInstr &LoADDI,
+ MachineInstr &Tail, int64_t Offset) {
+ // Put the offset back in HiLUI and the LoADDI
+ HiLUI.getOperand(1).setOffset(Offset);
+ LoADDI.getOperand(2).setOffset(Offset);
+ // Delete the tail instruction.
+ DeadInstrs.insert(&Tail);
+ MRI->replaceRegWith(Tail.getOperand(0).getReg(),
+ LoADDI.getOperand(0).getReg());
+ LLVM_DEBUG(dbgs() << " Merged offset " << Offset << " into base.\n"
+ << " " << HiLUI << " " << LoADDI;);
+}
+
+// Detect patterns for large offsets that are passed into an ADD instruction.
+//
+// Base address lowering is of the form:
+// HiLUI: lui vreg1, %hi(s)
+// LoADDI: addi vreg2, vreg1, %lo(s)
+// / \
+// / \
+// / \
+// / The large offset can be of two forms: \
+// 1) Offset that has non zero bits in lower 2) Offset that has non zero
+// 12 bits and upper 20 bits bits in upper 20 bits only
+// OffseLUI: lui vreg3, 4
+// OffsetTail: addi voff, vreg3, 188 OffsetTail: lui voff, 128
+// \ /
+// \ /
+// \ /
+// \ /
+// TailAdd: add vreg4, vreg2, voff
+bool RISCVMergeBaseOffsetOpt::matchLargeOffset(MachineInstr &TailAdd,
+ unsigned GAReg,
+ int64_t &Offset) {
+ assert((TailAdd.getOpcode() == RISCV::ADD) && "Expected ADD instruction!");
+ unsigned Rs = TailAdd.getOperand(1).getReg();
+ unsigned Rt = TailAdd.getOperand(2).getReg();
+ unsigned Reg = Rs == GAReg ? Rt : Rs;
+
+ // Can't fold if the register has more than one use.
+ if (!MRI->hasOneUse(Reg))
+ return false;
+ // This can point to an ADDI or a LUI:
+ MachineInstr &OffsetTail = *MRI->getVRegDef(Reg);
+ if (OffsetTail.getOpcode() == RISCV::ADDI) {
+ // The offset value has non zero bits in both %hi and %lo parts.
+ // Detect an ADDI that feeds from a LUI instruction.
+ MachineOperand &AddiImmOp = OffsetTail.getOperand(2);
+ if (AddiImmOp.getTargetFlags() != RISCVII::MO_None)
+ return false;
+ int64_t OffLo = AddiImmOp.getImm();
+ MachineInstr &OffsetLui =
+ *MRI->getVRegDef(OffsetTail.getOperand(1).getReg());
+ MachineOperand &LuiImmOp = OffsetLui.getOperand(1);
+ if (OffsetLui.getOpcode() != RISCV::LUI ||
+ LuiImmOp.getTargetFlags() != RISCVII::MO_None ||
+ !MRI->hasOneUse(OffsetLui.getOperand(0).getReg()))
+ return false;
+ int64_t OffHi = OffsetLui.getOperand(1).getImm();
+ Offset = (OffHi << 12) + OffLo;
+ LLVM_DEBUG(dbgs() << " Offset Instrs: " << OffsetTail
+ << " " << OffsetLui);
+ DeadInstrs.insert(&OffsetTail);
+ DeadInstrs.insert(&OffsetLui);
+ return true;
+ } else if (OffsetTail.getOpcode() == RISCV::LUI) {
+ // The offset value has all zero bits in the lower 12 bits. Only LUI
+ // exists.
+ LLVM_DEBUG(dbgs() << " Offset Instr: " << OffsetTail);
+ Offset = OffsetTail.getOperand(1).getImm() << 12;
+ DeadInstrs.insert(&OffsetTail);
+ return true;
+ }
+ return false;
+}
+
+bool RISCVMergeBaseOffsetOpt::detectAndFoldOffset(MachineInstr &HiLUI,
+ MachineInstr &LoADDI) {
+ unsigned DestReg = LoADDI.getOperand(0).getReg();
+ assert(MRI->hasOneUse(DestReg) && "expected one use for LoADDI");
+ // LoADDI has only one use.
+ MachineInstr &Tail = *MRI->use_begin(DestReg)->getParent();
+ switch (Tail.getOpcode()) {
+ default:
+ LLVM_DEBUG(dbgs() << "Don't know how to get offset from this instr:"
+ << Tail);
+ return false;
+ case RISCV::ADDI: {
+ // Offset is simply an immediate operand.
+ int64_t Offset = Tail.getOperand(2).getImm();
+ LLVM_DEBUG(dbgs() << " Offset Instr: " << Tail);
+ foldOffset(HiLUI, LoADDI, Tail, Offset);
+ return true;
+ } break;
+ case RISCV::ADD: {
+ // The offset is too large to fit in the immediate field of ADDI.
+ // This can be in two forms:
+ // 1) LUI hi_Offset followed by:
+ // ADDI lo_offset
+ // This happens in case the offset has non zero bits in
+ // both hi 20 and lo 12 bits.
+ // 2) LUI (offset20)
+ // This happens in case the lower 12 bits of the offset are zeros.
+ int64_t Offset;
+ if (!matchLargeOffset(Tail, DestReg, Offset))
+ return false;
+ foldOffset(HiLUI, LoADDI, Tail, Offset);
+ return true;
+ } break;
+ case RISCV::LB:
+ case RISCV::LH:
+ case RISCV::LW:
+ case RISCV::LBU:
+ case RISCV::LHU:
+ case RISCV::LWU:
+ case RISCV::LD:
+ case RISCV::FLW:
+ case RISCV::FLD:
+ case RISCV::SB:
+ case RISCV::SH:
+ case RISCV::SW:
+ case RISCV::SD:
+ case RISCV::FSW:
+ case RISCV::FSD: {
+ // Transforms the sequence: Into:
+ // HiLUI: lui vreg1, %hi(foo) ---> lui vreg1, %hi(foo+8)
+ // LoADDI: addi vreg2, vreg1, %lo(foo) ---> lw vreg3, lo(foo+8)(vreg1)
+ // Tail: lw vreg3, 8(vreg2)
+ if (Tail.getOperand(1).isFI())
+ return false;
+ // Register defined by LoADDI should be used in the base part of the
+ // load\store instruction. Otherwise, no folding possible.
+ unsigned BaseAddrReg = Tail.getOperand(1).getReg();
+ if (DestReg != BaseAddrReg)
+ return false;
+ MachineOperand &TailImmOp = Tail.getOperand(2);
+ int64_t Offset = TailImmOp.getImm();
+ // Update the offsets in global address lowering.
+ HiLUI.getOperand(1).setOffset(Offset);
+ // Update the immediate in the Tail instruction to add the offset.
+ Tail.RemoveOperand(2);
+ MachineOperand &ImmOp = LoADDI.getOperand(2);
+ ImmOp.setOffset(Offset);
+ Tail.addOperand(ImmOp);
+ // Update the base reg in the Tail instruction to feed from LUI.
+ // Output of HiLUI is only used in LoADDI, no need to use
+ // MRI->replaceRegWith().
+ Tail.getOperand(1).setReg(HiLUI.getOperand(0).getReg());
+ DeadInstrs.insert(&LoADDI);
+ return true;
+ } break;
+ }
+ return false;
+}
+
+bool RISCVMergeBaseOffsetOpt::runOnMachineFunction(MachineFunction &Fn) {
+ if (skipFunction(Fn.getFunction()))
+ return false;
+
+ DeadInstrs.clear();
+ MRI = &Fn.getRegInfo();
+ for (MachineBasicBlock &MBB : Fn) {
+ LLVM_DEBUG(dbgs() << "MBB: " << MBB.getName() << "\n");
+ for (MachineInstr &HiLUI : MBB) {
+ MachineInstr *LoADDI = nullptr;
+ if (!detectLuiAddiGlobal(HiLUI, LoADDI))
+ continue;
+ LLVM_DEBUG(dbgs() << " Found lowered global address with one use: "
+ << *LoADDI->getOperand(2).getGlobal() << "\n");
+ // If the use count is only one, merge the offset
+ detectAndFoldOffset(HiLUI, *LoADDI);
+ }
+ }
+ // Delete dead instructions.
+ for (auto *MI : DeadInstrs)
+ MI->eraseFromParent();
+ return true;
+}
+
+/// Returns an instance of the Merge Base Offset Optimization pass.
+FunctionPass *llvm::createRISCVMergeBaseOffsetOptPass() {
+ return new RISCVMergeBaseOffsetOpt();
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
new file mode 100644
index 000000000000..e6a126e3e513
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -0,0 +1,157 @@
+//===-- RISCVRegisterInfo.cpp - RISCV Register Information ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the RISCV implementation of the TargetRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVRegisterInfo.h"
+#include "RISCV.h"
+#include "RISCVSubtarget.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/CodeGen/TargetFrameLowering.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+
+#define GET_REGINFO_TARGET_DESC
+#include "RISCVGenRegisterInfo.inc"
+
+using namespace llvm;
+
+RISCVRegisterInfo::RISCVRegisterInfo(unsigned HwMode)
+ : RISCVGenRegisterInfo(RISCV::X1, /*DwarfFlavour*/0, /*EHFlavor*/0,
+ /*PC*/0, HwMode) {}
+
+const MCPhysReg *
+RISCVRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
+ auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
+ if (MF->getFunction().hasFnAttribute("interrupt")) {
+ if (Subtarget.hasStdExtD())
+ return CSR_XLEN_F64_Interrupt_SaveList;
+ if (Subtarget.hasStdExtF())
+ return CSR_XLEN_F32_Interrupt_SaveList;
+ return CSR_Interrupt_SaveList;
+ }
+
+ switch (Subtarget.getTargetABI()) {
+ default:
+ llvm_unreachable("Unrecognized ABI");
+ case RISCVABI::ABI_ILP32:
+ case RISCVABI::ABI_LP64:
+ return CSR_ILP32_LP64_SaveList;
+ case RISCVABI::ABI_ILP32F:
+ case RISCVABI::ABI_LP64F:
+ return CSR_ILP32F_LP64F_SaveList;
+ case RISCVABI::ABI_ILP32D:
+ case RISCVABI::ABI_LP64D:
+ return CSR_ILP32D_LP64D_SaveList;
+ }
+}
+
+BitVector RISCVRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
+ const TargetFrameLowering *TFI = getFrameLowering(MF);
+ BitVector Reserved(getNumRegs());
+
+ // Use markSuperRegs to ensure any register aliases are also reserved
+ markSuperRegs(Reserved, RISCV::X0); // zero
+ markSuperRegs(Reserved, RISCV::X1); // ra
+ markSuperRegs(Reserved, RISCV::X2); // sp
+ markSuperRegs(Reserved, RISCV::X3); // gp
+ markSuperRegs(Reserved, RISCV::X4); // tp
+ if (TFI->hasFP(MF))
+ markSuperRegs(Reserved, RISCV::X8); // fp
+ assert(checkAllSuperRegsMarked(Reserved));
+ return Reserved;
+}
+
+bool RISCVRegisterInfo::isConstantPhysReg(unsigned PhysReg) const {
+ return PhysReg == RISCV::X0;
+}
+
+const uint32_t *RISCVRegisterInfo::getNoPreservedMask() const {
+ return CSR_NoRegs_RegMask;
+}
+
+void RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
+ int SPAdj, unsigned FIOperandNum,
+ RegScavenger *RS) const {
+ assert(SPAdj == 0 && "Unexpected non-zero SPAdj value");
+
+ MachineInstr &MI = *II;
+ MachineFunction &MF = *MI.getParent()->getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const RISCVInstrInfo *TII = MF.getSubtarget<RISCVSubtarget>().getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
+
+ int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
+ unsigned FrameReg;
+ int Offset =
+ getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg) +
+ MI.getOperand(FIOperandNum + 1).getImm();
+
+ if (!isInt<32>(Offset)) {
+ report_fatal_error(
+ "Frame offsets outside of the signed 32-bit range not supported");
+ }
+
+ MachineBasicBlock &MBB = *MI.getParent();
+ bool FrameRegIsKill = false;
+
+ if (!isInt<12>(Offset)) {
+ assert(isInt<32>(Offset) && "Int32 expected");
+ // The offset won't fit in an immediate, so use a scratch register instead
+ // Modify Offset and FrameReg appropriately
+ unsigned ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+ TII->movImm32(MBB, II, DL, ScratchReg, Offset);
+ BuildMI(MBB, II, DL, TII->get(RISCV::ADD), ScratchReg)
+ .addReg(FrameReg)
+ .addReg(ScratchReg, RegState::Kill);
+ Offset = 0;
+ FrameReg = ScratchReg;
+ FrameRegIsKill = true;
+ }
+
+ MI.getOperand(FIOperandNum)
+ .ChangeToRegister(FrameReg, false, false, FrameRegIsKill);
+ MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
+}
+
+Register RISCVRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
+ const TargetFrameLowering *TFI = getFrameLowering(MF);
+ return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2;
+}
+
+const uint32_t *
+RISCVRegisterInfo::getCallPreservedMask(const MachineFunction & MF,
+ CallingConv::ID /*CC*/) const {
+ auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
+ if (MF.getFunction().hasFnAttribute("interrupt")) {
+ if (Subtarget.hasStdExtD())
+ return CSR_XLEN_F64_Interrupt_RegMask;
+ if (Subtarget.hasStdExtF())
+ return CSR_XLEN_F32_Interrupt_RegMask;
+ return CSR_Interrupt_RegMask;
+ }
+
+ switch (Subtarget.getTargetABI()) {
+ default:
+ llvm_unreachable("Unrecognized ABI");
+ case RISCVABI::ABI_ILP32:
+ case RISCVABI::ABI_LP64:
+ return CSR_ILP32_LP64_RegMask;
+ case RISCVABI::ABI_ILP32F:
+ case RISCVABI::ABI_LP64F:
+ return CSR_ILP32F_LP64F_RegMask;
+ case RISCVABI::ABI_ILP32D:
+ case RISCVABI::ABI_LP64D:
+ return CSR_ILP32D_LP64D_RegMask;
+ }
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.h b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
new file mode 100644
index 000000000000..4f339475508f
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
@@ -0,0 +1,58 @@
+//===-- RISCVRegisterInfo.h - RISCV Register Information Impl ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the RISCV implementation of the TargetRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVREGISTERINFO_H
+#define LLVM_LIB_TARGET_RISCV_RISCVREGISTERINFO_H
+
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+
+#define GET_REGINFO_HEADER
+#include "RISCVGenRegisterInfo.inc"
+
+namespace llvm {
+
+struct RISCVRegisterInfo : public RISCVGenRegisterInfo {
+
+ RISCVRegisterInfo(unsigned HwMode);
+
+ const uint32_t *getCallPreservedMask(const MachineFunction &MF,
+ CallingConv::ID) const override;
+
+ const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override;
+
+ BitVector getReservedRegs(const MachineFunction &MF) const override;
+
+ bool isConstantPhysReg(unsigned PhysReg) const override;
+
+ const uint32_t *getNoPreservedMask() const override;
+
+ void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj,
+ unsigned FIOperandNum,
+ RegScavenger *RS = nullptr) const override;
+
+ Register getFrameRegister(const MachineFunction &MF) const override;
+
+ bool requiresRegisterScavenging(const MachineFunction &MF) const override {
+ return true;
+ }
+
+ bool requiresFrameIndexScavenging(const MachineFunction &MF) const override {
+ return true;
+ }
+
+ bool trackLivenessAfterRegAlloc(const MachineFunction &) const override {
+ return true;
+ }
+};
+}
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
new file mode 100644
index 000000000000..79f8ab12f6c0
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -0,0 +1,229 @@
+//===-- RISCVRegisterInfo.td - RISC-V Register defs --------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Declarations that describe the RISC-V register files
+//===----------------------------------------------------------------------===//
+
+let Namespace = "RISCV" in {
+class RISCVReg<bits<5> Enc, string n, list<string> alt = []> : Register<n> {
+ let HWEncoding{4-0} = Enc;
+ let AltNames = alt;
+}
+
+class RISCVReg32<bits<5> Enc, string n, list<string> alt = []> : Register<n> {
+ let HWEncoding{4-0} = Enc;
+ let AltNames = alt;
+}
+
+// Because RISCVReg64 register have AsmName and AltNames that alias with their
+// 32-bit sub-register, RISCVAsmParser will need to coerce a register number
+// from a RISCVReg32 to the equivalent RISCVReg64 when appropriate.
+def sub_32 : SubRegIndex<32>;
+class RISCVReg64<RISCVReg32 subreg> : Register<""> {
+ let HWEncoding{4-0} = subreg.HWEncoding{4-0};
+ let SubRegs = [subreg];
+ let SubRegIndices = [sub_32];
+ let AsmName = subreg.AsmName;
+ let AltNames = subreg.AltNames;
+}
+
+def ABIRegAltName : RegAltNameIndex;
+} // Namespace = "RISCV"
+
+// Integer registers
+// CostPerUse is set higher for registers that may not be compressible as they
+// are not part of GPRC, the most restrictive register class used by the
+// compressed instruction set. This will influence the greedy register
+// allocator to reduce the use of registers that can't be encoded in 16 bit
+// instructions. This affects register allocation even when compressed
+// instruction isn't targeted, we see no major negative codegen impact.
+
+let RegAltNameIndices = [ABIRegAltName] in {
+ def X0 : RISCVReg<0, "x0", ["zero"]>, DwarfRegNum<[0]>;
+ let CostPerUse = 1 in {
+ def X1 : RISCVReg<1, "x1", ["ra"]>, DwarfRegNum<[1]>;
+ def X2 : RISCVReg<2, "x2", ["sp"]>, DwarfRegNum<[2]>;
+ def X3 : RISCVReg<3, "x3", ["gp"]>, DwarfRegNum<[3]>;
+ def X4 : RISCVReg<4, "x4", ["tp"]>, DwarfRegNum<[4]>;
+ def X5 : RISCVReg<5, "x5", ["t0"]>, DwarfRegNum<[5]>;
+ def X6 : RISCVReg<6, "x6", ["t1"]>, DwarfRegNum<[6]>;
+ def X7 : RISCVReg<7, "x7", ["t2"]>, DwarfRegNum<[7]>;
+ }
+ def X8 : RISCVReg<8, "x8", ["s0", "fp"]>, DwarfRegNum<[8]>;
+ def X9 : RISCVReg<9, "x9", ["s1"]>, DwarfRegNum<[9]>;
+ def X10 : RISCVReg<10,"x10", ["a0"]>, DwarfRegNum<[10]>;
+ def X11 : RISCVReg<11,"x11", ["a1"]>, DwarfRegNum<[11]>;
+ def X12 : RISCVReg<12,"x12", ["a2"]>, DwarfRegNum<[12]>;
+ def X13 : RISCVReg<13,"x13", ["a3"]>, DwarfRegNum<[13]>;
+ def X14 : RISCVReg<14,"x14", ["a4"]>, DwarfRegNum<[14]>;
+ def X15 : RISCVReg<15,"x15", ["a5"]>, DwarfRegNum<[15]>;
+ let CostPerUse = 1 in {
+ def X16 : RISCVReg<16,"x16", ["a6"]>, DwarfRegNum<[16]>;
+ def X17 : RISCVReg<17,"x17", ["a7"]>, DwarfRegNum<[17]>;
+ def X18 : RISCVReg<18,"x18", ["s2"]>, DwarfRegNum<[18]>;
+ def X19 : RISCVReg<19,"x19", ["s3"]>, DwarfRegNum<[19]>;
+ def X20 : RISCVReg<20,"x20", ["s4"]>, DwarfRegNum<[20]>;
+ def X21 : RISCVReg<21,"x21", ["s5"]>, DwarfRegNum<[21]>;
+ def X22 : RISCVReg<22,"x22", ["s6"]>, DwarfRegNum<[22]>;
+ def X23 : RISCVReg<23,"x23", ["s7"]>, DwarfRegNum<[23]>;
+ def X24 : RISCVReg<24,"x24", ["s8"]>, DwarfRegNum<[24]>;
+ def X25 : RISCVReg<25,"x25", ["s9"]>, DwarfRegNum<[25]>;
+ def X26 : RISCVReg<26,"x26", ["s10"]>, DwarfRegNum<[26]>;
+ def X27 : RISCVReg<27,"x27", ["s11"]>, DwarfRegNum<[27]>;
+ def X28 : RISCVReg<28,"x28", ["t3"]>, DwarfRegNum<[28]>;
+ def X29 : RISCVReg<29,"x29", ["t4"]>, DwarfRegNum<[29]>;
+ def X30 : RISCVReg<30,"x30", ["t5"]>, DwarfRegNum<[30]>;
+ def X31 : RISCVReg<31,"x31", ["t6"]>, DwarfRegNum<[31]>;
+ }
+}
+
+def XLenVT : ValueTypeByHwMode<[RV32, RV64, DefaultMode],
+ [i32, i64, i32]>;
+
+// The order of registers represents the preferred allocation sequence.
+// Registers are listed in the order caller-save, callee-save, specials.
+def GPR : RegisterClass<"RISCV", [XLenVT], 32, (add
+ (sequence "X%u", 10, 17),
+ (sequence "X%u", 5, 7),
+ (sequence "X%u", 28, 31),
+ (sequence "X%u", 8, 9),
+ (sequence "X%u", 18, 27),
+ (sequence "X%u", 0, 4)
+ )> {
+ let RegInfos = RegInfoByHwMode<
+ [RV32, RV64, DefaultMode],
+ [RegInfo<32,32,32>, RegInfo<64,64,64>, RegInfo<32,32,32>]>;
+}
+
+// The order of registers represents the preferred allocation sequence.
+// Registers are listed in the order caller-save, callee-save, specials.
+def GPRNoX0 : RegisterClass<"RISCV", [XLenVT], 32, (add
+ (sequence "X%u", 10, 17),
+ (sequence "X%u", 5, 7),
+ (sequence "X%u", 28, 31),
+ (sequence "X%u", 8, 9),
+ (sequence "X%u", 18, 27),
+ (sequence "X%u", 1, 4)
+ )> {
+ let RegInfos = RegInfoByHwMode<
+ [RV32, RV64, DefaultMode],
+ [RegInfo<32,32,32>, RegInfo<64,64,64>, RegInfo<32,32,32>]>;
+}
+
+def GPRNoX0X2 : RegisterClass<"RISCV", [XLenVT], 32, (add
+ (sequence "X%u", 10, 17),
+ (sequence "X%u", 5, 7),
+ (sequence "X%u", 28, 31),
+ (sequence "X%u", 8, 9),
+ (sequence "X%u", 18, 27),
+ X1, X3, X4
+ )> {
+ let RegInfos = RegInfoByHwMode<
+ [RV32, RV64, DefaultMode],
+ [RegInfo<32,32,32>, RegInfo<64,64,64>, RegInfo<32,32,32>]>;
+}
+
+def GPRC : RegisterClass<"RISCV", [XLenVT], 32, (add
+ (sequence "X%u", 10, 15),
+ (sequence "X%u", 8, 9)
+ )> {
+ let RegInfos = RegInfoByHwMode<
+ [RV32, RV64, DefaultMode],
+ [RegInfo<32,32,32>, RegInfo<64,64,64>, RegInfo<32,32,32>]>;
+}
+
+// For indirect tail calls, we can't use callee-saved registers, as they are
+// restored to the saved value before the tail call, which would clobber a call
+// address.
+def GPRTC : RegisterClass<"RISCV", [XLenVT], 32, (add
+ (sequence "X%u", 5, 7),
+ (sequence "X%u", 10, 17),
+ (sequence "X%u", 28, 31)
+ )> {
+ let RegInfos = RegInfoByHwMode<
+ [RV32, RV64, DefaultMode],
+ [RegInfo<32,32,32>, RegInfo<64,64,64>, RegInfo<32,32,32>]>;
+}
+
+def SP : RegisterClass<"RISCV", [XLenVT], 32, (add X2)> {
+ let RegInfos = RegInfoByHwMode<
+ [RV32, RV64, DefaultMode],
+ [RegInfo<32,32,32>, RegInfo<64,64,64>, RegInfo<32,32,32>]>;
+}
+
+// Floating point registers
+let RegAltNameIndices = [ABIRegAltName] in {
+ def F0_32 : RISCVReg32<0, "f0", ["ft0"]>, DwarfRegNum<[32]>;
+ def F1_32 : RISCVReg32<1, "f1", ["ft1"]>, DwarfRegNum<[33]>;
+ def F2_32 : RISCVReg32<2, "f2", ["ft2"]>, DwarfRegNum<[34]>;
+ def F3_32 : RISCVReg32<3, "f3", ["ft3"]>, DwarfRegNum<[35]>;
+ def F4_32 : RISCVReg32<4, "f4", ["ft4"]>, DwarfRegNum<[36]>;
+ def F5_32 : RISCVReg32<5, "f5", ["ft5"]>, DwarfRegNum<[37]>;
+ def F6_32 : RISCVReg32<6, "f6", ["ft6"]>, DwarfRegNum<[38]>;
+ def F7_32 : RISCVReg32<7, "f7", ["ft7"]>, DwarfRegNum<[39]>;
+ def F8_32 : RISCVReg32<8, "f8", ["fs0"]>, DwarfRegNum<[40]>;
+ def F9_32 : RISCVReg32<9, "f9", ["fs1"]>, DwarfRegNum<[41]>;
+ def F10_32 : RISCVReg32<10,"f10", ["fa0"]>, DwarfRegNum<[42]>;
+ def F11_32 : RISCVReg32<11,"f11", ["fa1"]>, DwarfRegNum<[43]>;
+ def F12_32 : RISCVReg32<12,"f12", ["fa2"]>, DwarfRegNum<[44]>;
+ def F13_32 : RISCVReg32<13,"f13", ["fa3"]>, DwarfRegNum<[45]>;
+ def F14_32 : RISCVReg32<14,"f14", ["fa4"]>, DwarfRegNum<[46]>;
+ def F15_32 : RISCVReg32<15,"f15", ["fa5"]>, DwarfRegNum<[47]>;
+ def F16_32 : RISCVReg32<16,"f16", ["fa6"]>, DwarfRegNum<[48]>;
+ def F17_32 : RISCVReg32<17,"f17", ["fa7"]>, DwarfRegNum<[49]>;
+ def F18_32 : RISCVReg32<18,"f18", ["fs2"]>, DwarfRegNum<[50]>;
+ def F19_32 : RISCVReg32<19,"f19", ["fs3"]>, DwarfRegNum<[51]>;
+ def F20_32 : RISCVReg32<20,"f20", ["fs4"]>, DwarfRegNum<[52]>;
+ def F21_32 : RISCVReg32<21,"f21", ["fs5"]>, DwarfRegNum<[53]>;
+ def F22_32 : RISCVReg32<22,"f22", ["fs6"]>, DwarfRegNum<[54]>;
+ def F23_32 : RISCVReg32<23,"f23", ["fs7"]>, DwarfRegNum<[55]>;
+ def F24_32 : RISCVReg32<24,"f24", ["fs8"]>, DwarfRegNum<[56]>;
+ def F25_32 : RISCVReg32<25,"f25", ["fs9"]>, DwarfRegNum<[57]>;
+ def F26_32 : RISCVReg32<26,"f26", ["fs10"]>, DwarfRegNum<[58]>;
+ def F27_32 : RISCVReg32<27,"f27", ["fs11"]>, DwarfRegNum<[59]>;
+ def F28_32 : RISCVReg32<28,"f28", ["ft8"]>, DwarfRegNum<[60]>;
+ def F29_32 : RISCVReg32<29,"f29", ["ft9"]>, DwarfRegNum<[61]>;
+ def F30_32 : RISCVReg32<30,"f30", ["ft10"]>, DwarfRegNum<[62]>;
+ def F31_32 : RISCVReg32<31,"f31", ["ft11"]>, DwarfRegNum<[63]>;
+
+ foreach Index = 0-31 in {
+ def F#Index#_64 : RISCVReg64<!cast<RISCVReg32>("F"#Index#"_32")>,
+ DwarfRegNum<[!add(Index, 32)]>;
+ }
+}
+
+// The order of registers represents the preferred allocation sequence,
+// meaning caller-save regs are listed before callee-save.
+def FPR32 : RegisterClass<"RISCV", [f32], 32, (add
+ (sequence "F%u_32", 0, 7),
+ (sequence "F%u_32", 10, 17),
+ (sequence "F%u_32", 28, 31),
+ (sequence "F%u_32", 8, 9),
+ (sequence "F%u_32", 18, 27)
+)>;
+
+def FPR32C : RegisterClass<"RISCV", [f32], 32, (add
+ (sequence "F%u_32", 10, 15),
+ (sequence "F%u_32", 8, 9)
+)>;
+
+// The order of registers represents the preferred allocation sequence,
+// meaning caller-save regs are listed before callee-save.
+def FPR64 : RegisterClass<"RISCV", [f64], 64, (add
+ (sequence "F%u_64", 0, 7),
+ (sequence "F%u_64", 10, 17),
+ (sequence "F%u_64", 28, 31),
+ (sequence "F%u_64", 8, 9),
+ (sequence "F%u_64", 18, 27)
+)>;
+
+def FPR64C : RegisterClass<"RISCV", [f64], 64, (add
+ (sequence "F%u_64", 10, 15),
+ (sequence "F%u_64", 8, 9)
+)>;
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
new file mode 100644
index 000000000000..6902ed75d852
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
@@ -0,0 +1,50 @@
+//===-- RISCVSubtarget.cpp - RISCV Subtarget Information ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the RISCV specific subclass of TargetSubtargetInfo.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVSubtarget.h"
+#include "RISCV.h"
+#include "RISCVFrameLowering.h"
+#include "llvm/Support/TargetRegistry.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "riscv-subtarget"
+
+#define GET_SUBTARGETINFO_TARGET_DESC
+#define GET_SUBTARGETINFO_CTOR
+#include "RISCVGenSubtargetInfo.inc"
+
+void RISCVSubtarget::anchor() {}
+
+RISCVSubtarget &RISCVSubtarget::initializeSubtargetDependencies(
+ const Triple &TT, StringRef CPU, StringRef FS, StringRef ABIName) {
+ // Determine default and user-specified characteristics
+ bool Is64Bit = TT.isArch64Bit();
+ std::string CPUName = CPU;
+ if (CPUName.empty())
+ CPUName = Is64Bit ? "generic-rv64" : "generic-rv32";
+ ParseSubtargetFeatures(CPUName, FS);
+ if (Is64Bit) {
+ XLenVT = MVT::i64;
+ XLen = 64;
+ }
+
+ TargetABI = RISCVABI::computeTargetABI(TT, getFeatureBits(), ABIName);
+ RISCVFeatures::validate(TT, getFeatureBits());
+ return *this;
+}
+
+RISCVSubtarget::RISCVSubtarget(const Triple &TT, StringRef CPU, StringRef FS,
+ StringRef ABIName, const TargetMachine &TM)
+ : RISCVGenSubtargetInfo(TT, CPU, FS),
+ FrameLowering(initializeSubtargetDependencies(TT, CPU, FS, ABIName)),
+ InstrInfo(), RegInfo(getHwMode()), TLInfo(TM, *this) {}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.h b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.h
new file mode 100644
index 000000000000..106ff49f021a
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -0,0 +1,92 @@
+//===-- RISCVSubtarget.h - Define Subtarget for the RISCV -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the RISCV specific subclass of TargetSubtargetInfo.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVSUBTARGET_H
+#define LLVM_LIB_TARGET_RISCV_RISCVSUBTARGET_H
+
+#include "RISCVFrameLowering.h"
+#include "RISCVISelLowering.h"
+#include "RISCVInstrInfo.h"
+#include "Utils/RISCVBaseInfo.h"
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/Target/TargetMachine.h"
+
+#define GET_SUBTARGETINFO_HEADER
+#include "RISCVGenSubtargetInfo.inc"
+
+namespace llvm {
+class StringRef;
+
+class RISCVSubtarget : public RISCVGenSubtargetInfo {
+ virtual void anchor();
+ bool HasStdExtM = false;
+ bool HasStdExtA = false;
+ bool HasStdExtF = false;
+ bool HasStdExtD = false;
+ bool HasStdExtC = false;
+ bool HasRV64 = false;
+ bool IsRV32E = false;
+ bool EnableLinkerRelax = false;
+ unsigned XLen = 32;
+ MVT XLenVT = MVT::i32;
+ RISCVABI::ABI TargetABI = RISCVABI::ABI_Unknown;
+ RISCVFrameLowering FrameLowering;
+ RISCVInstrInfo InstrInfo;
+ RISCVRegisterInfo RegInfo;
+ RISCVTargetLowering TLInfo;
+ SelectionDAGTargetInfo TSInfo;
+
+ /// Initializes using the passed in CPU and feature strings so that we can
+ /// use initializer lists for subtarget initialization.
+ RISCVSubtarget &initializeSubtargetDependencies(const Triple &TT,
+ StringRef CPU, StringRef FS,
+ StringRef ABIName);
+
+public:
+ // Initializes the data members to match that of the specified triple.
+ RISCVSubtarget(const Triple &TT, StringRef CPU, StringRef FS,
+ StringRef ABIName, const TargetMachine &TM);
+
+ // Parses features string setting specified subtarget options. The
+ // definition of this function is auto-generated by tblgen.
+ void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
+
+ const RISCVFrameLowering *getFrameLowering() const override {
+ return &FrameLowering;
+ }
+ const RISCVInstrInfo *getInstrInfo() const override { return &InstrInfo; }
+ const RISCVRegisterInfo *getRegisterInfo() const override {
+ return &RegInfo;
+ }
+ const RISCVTargetLowering *getTargetLowering() const override {
+ return &TLInfo;
+ }
+ const SelectionDAGTargetInfo *getSelectionDAGInfo() const override {
+ return &TSInfo;
+ }
+ bool hasStdExtM() const { return HasStdExtM; }
+ bool hasStdExtA() const { return HasStdExtA; }
+ bool hasStdExtF() const { return HasStdExtF; }
+ bool hasStdExtD() const { return HasStdExtD; }
+ bool hasStdExtC() const { return HasStdExtC; }
+ bool is64Bit() const { return HasRV64; }
+ bool isRV32E() const { return IsRV32E; }
+ bool enableLinkerRelax() const { return EnableLinkerRelax; }
+ MVT getXLenVT() const { return XLenVT; }
+ unsigned getXLen() const { return XLen; }
+ RISCVABI::ABI getTargetABI() const { return TargetABI; }
+};
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSystemOperands.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSystemOperands.td
new file mode 100644
index 000000000000..a46a32c4e7f2
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSystemOperands.td
@@ -0,0 +1,349 @@
+//===- RISCVSystemOperands.td ----------------------------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the symbolic operands permitted for various kinds of
+// RISC-V system instruction.
+//
+//===----------------------------------------------------------------------===//
+
+include "llvm/TableGen/SearchableTable.td"
+
+//===----------------------------------------------------------------------===//
+// CSR (control and status register read/write) instruction options.
+//===----------------------------------------------------------------------===//
+
+class SysReg<string name, bits<12> op> {
+ string Name = name;
+ bits<12> Encoding = op;
+ // FIXME: add these additional fields when needed.
+ // Privilege Access: Read and Write = 0, 1, 2; Read-Only = 3.
+ // Privilege Mode: User = 0, System = 1 or Machine = 3.
+ // bits<2> ReadWrite = op{11 - 10};
+ // bits<2> XMode = op{9 - 8};
+ // Check Extra field name and what bits 7-6 correspond to.
+ // bits<2> Extra = op{7 - 6};
+ // Register number without the privilege bits.
+ // bits<6> Number = op{5 - 0};
+ code FeaturesRequired = [{ {} }];
+ bit isRV32Only = 0;
+}
+
+def SysRegsList : GenericTable {
+ let FilterClass = "SysReg";
+ // FIXME: add "ReadWrite", "Mode", "Extra", "Number" fields when needed.
+ let Fields = [ "Name", "Encoding", "FeaturesRequired", "isRV32Only" ];
+
+ let PrimaryKey = [ "Encoding" ];
+ let PrimaryKeyName = "lookupSysRegByEncoding";
+}
+
+def lookupSysRegByName : SearchIndex {
+ let Table = SysRegsList;
+ let Key = [ "Name" ];
+}
+
+// The following CSR encodings match those given in Tables 2.2,
+// 2.3, 2.4 and 2.5 in the RISC-V Instruction Set Manual
+// Volume II: Privileged Architecture.
+
+//===--------------------------
+// User Trap Setup
+//===--------------------------
+def : SysReg<"ustatus", 0x000>;
+def : SysReg<"uie", 0x004>;
+def : SysReg<"utvec", 0x005>;
+
+//===--------------------------
+// User Trap Handling
+//===--------------------------
+def : SysReg<"uscratch", 0x040>;
+def : SysReg<"uepc", 0x041>;
+def : SysReg<"ucause", 0x042>;
+def : SysReg<"utval", 0x043>;
+def : SysReg<"uip", 0x044>;
+
+//===--------------------------
+// User Floating-Point CSRs
+//===--------------------------
+
+def FFLAGS : SysReg<"fflags", 0x001>;
+def FRM : SysReg<"frm", 0x002>;
+def FCSR : SysReg<"fcsr", 0x003>;
+
+//===--------------------------
+// User Counter/Timers
+//===--------------------------
+def CYCLE : SysReg<"cycle", 0xC00>;
+def TIME : SysReg<"time", 0xC01>;
+def INSTRET : SysReg<"instret", 0xC02>;
+
+def : SysReg<"hpmcounter3", 0xC03>;
+def : SysReg<"hpmcounter4", 0xC04>;
+def : SysReg<"hpmcounter5", 0xC05>;
+def : SysReg<"hpmcounter6", 0xC06>;
+def : SysReg<"hpmcounter7", 0xC07>;
+def : SysReg<"hpmcounter8", 0xC08>;
+def : SysReg<"hpmcounter9", 0xC09>;
+def : SysReg<"hpmcounter10", 0xC0A>;
+def : SysReg<"hpmcounter11", 0xC0B>;
+def : SysReg<"hpmcounter12", 0xC0C>;
+def : SysReg<"hpmcounter13", 0xC0D>;
+def : SysReg<"hpmcounter14", 0xC0E>;
+def : SysReg<"hpmcounter15", 0xC0F>;
+def : SysReg<"hpmcounter16", 0xC10>;
+def : SysReg<"hpmcounter17", 0xC11>;
+def : SysReg<"hpmcounter18", 0xC12>;
+def : SysReg<"hpmcounter19", 0xC13>;
+def : SysReg<"hpmcounter20", 0xC14>;
+def : SysReg<"hpmcounter21", 0xC15>;
+def : SysReg<"hpmcounter22", 0xC16>;
+def : SysReg<"hpmcounter23", 0xC17>;
+def : SysReg<"hpmcounter24", 0xC18>;
+def : SysReg<"hpmcounter25", 0xC19>;
+def : SysReg<"hpmcounter26", 0xC1A>;
+def : SysReg<"hpmcounter27", 0xC1B>;
+def : SysReg<"hpmcounter28", 0xC1C>;
+def : SysReg<"hpmcounter29", 0xC1D>;
+def : SysReg<"hpmcounter30", 0xC1E>;
+def : SysReg<"hpmcounter31", 0xC1F>;
+
+let isRV32Only = 1 in {
+def CYCLEH : SysReg<"cycleh", 0xC80>;
+def TIMEH : SysReg<"timeh", 0xC81>;
+def INSTRETH : SysReg<"instreth", 0xC82>;
+
+def: SysReg<"hpmcounter3h", 0xC83>;
+def: SysReg<"hpmcounter4h", 0xC84>;
+def: SysReg<"hpmcounter5h", 0xC85>;
+def: SysReg<"hpmcounter6h", 0xC86>;
+def: SysReg<"hpmcounter7h", 0xC87>;
+def: SysReg<"hpmcounter8h", 0xC88>;
+def: SysReg<"hpmcounter9h", 0xC89>;
+def: SysReg<"hpmcounter10h", 0xC8A>;
+def: SysReg<"hpmcounter11h", 0xC8B>;
+def: SysReg<"hpmcounter12h", 0xC8C>;
+def: SysReg<"hpmcounter13h", 0xC8D>;
+def: SysReg<"hpmcounter14h", 0xC8E>;
+def: SysReg<"hpmcounter15h", 0xC8F>;
+def: SysReg<"hpmcounter16h", 0xC90>;
+def: SysReg<"hpmcounter17h", 0xC91>;
+def: SysReg<"hpmcounter18h", 0xC92>;
+def: SysReg<"hpmcounter19h", 0xC93>;
+def: SysReg<"hpmcounter20h", 0xC94>;
+def: SysReg<"hpmcounter21h", 0xC95>;
+def: SysReg<"hpmcounter22h", 0xC96>;
+def: SysReg<"hpmcounter23h", 0xC97>;
+def: SysReg<"hpmcounter24h", 0xC98>;
+def: SysReg<"hpmcounter25h", 0xC99>;
+def: SysReg<"hpmcounter26h", 0xC9A>;
+def: SysReg<"hpmcounter27h", 0xC9B>;
+def: SysReg<"hpmcounter28h", 0xC9C>;
+def: SysReg<"hpmcounter29h", 0xC9D>;
+def: SysReg<"hpmcounter30h", 0xC9E>;
+def: SysReg<"hpmcounter31h", 0xC9F>;
+}
+
+//===--------------------------
+// Supervisor Trap Setup
+//===--------------------------
+def : SysReg<"sstatus", 0x100>;
+def : SysReg<"sedeleg", 0x102>;
+def : SysReg<"sideleg", 0x103>;
+def : SysReg<"sie", 0x104>;
+def : SysReg<"stvec", 0x105>;
+def : SysReg<"scounteren", 0x106>;
+
+//===--------------------------
+// Supervisor Trap Handling
+//===--------------------------
+def : SysReg<"sscratch", 0x140>;
+def : SysReg<"sepc", 0x141>;
+def : SysReg<"scause", 0x142>;
+def : SysReg<"stval", 0x143>;
+def : SysReg<"sip", 0x144>;
+
+//===-------------------------------------
+// Supervisor Protection and Translation
+//===-------------------------------------
+def : SysReg<"satp", 0x180>;
+
+//===-----------------------------
+// Machine Information Registers
+//===-----------------------------
+
+def : SysReg<"mvendorid", 0xF11>;
+def : SysReg<"marchid", 0xF12>;
+def : SysReg<"mimpid", 0xF13>;
+def : SysReg<"mhartid", 0xF14>;
+
+//===-----------------------------
+// Machine Trap Setup
+//===-----------------------------
+def : SysReg<"mstatus", 0x300>;
+def : SysReg<"misa", 0x301>;
+def : SysReg<"medeleg", 0x302>;
+def : SysReg<"mideleg", 0x303>;
+def : SysReg<"mie", 0x304>;
+def : SysReg<"mtvec", 0x305>;
+def : SysReg<"mcounteren", 0x306>;
+
+//===-----------------------------
+// Machine Trap Handling
+//===-----------------------------
+def : SysReg<"mscratch", 0x340>;
+def : SysReg<"mepc", 0x341>;
+def : SysReg<"mcause", 0x342>;
+def : SysReg<"mtval", 0x343>;
+def : SysReg<"mip", 0x344>;
+
+//===----------------------------------
+// Machine Protection and Translation
+//===----------------------------------
+def : SysReg<"pmpcfg0", 0x3A0>;
+def : SysReg<"pmpcfg2", 0x3A2>;
+let isRV32Only = 1 in {
+def : SysReg<"pmpcfg1", 0x3A1>;
+def : SysReg<"pmpcfg3", 0x3A3>;
+}
+
+def : SysReg<"pmpaddr0", 0x3B0>;
+def : SysReg<"pmpaddr1", 0x3B1>;
+def : SysReg<"pmpaddr2", 0x3B2>;
+def : SysReg<"pmpaddr3", 0x3B3>;
+def : SysReg<"pmpaddr4", 0x3B4>;
+def : SysReg<"pmpaddr5", 0x3B5>;
+def : SysReg<"pmpaddr6", 0x3B6>;
+def : SysReg<"pmpaddr7", 0x3B7>;
+def : SysReg<"pmpaddr8", 0x3B8>;
+def : SysReg<"pmpaddr9", 0x3B9>;
+def : SysReg<"pmpaddr10", 0x3BA>;
+def : SysReg<"pmpaddr11", 0x3BB>;
+def : SysReg<"pmpaddr12", 0x3BC>;
+def : SysReg<"pmpaddr13", 0x3BD>;
+def : SysReg<"pmpaddr14", 0x3BE>;
+def : SysReg<"pmpaddr15", 0x3BF>;
+
+
+//===--------------------------
+// Machine Counter and Timers
+//===--------------------------
+def : SysReg<"mcycle", 0xB00>;
+def : SysReg<"minstret", 0xB02>;
+
+def : SysReg<"mhpmcounter3", 0xB03>;
+def : SysReg<"mhpmcounter4", 0xB04>;
+def : SysReg<"mhpmcounter5", 0xB05>;
+def : SysReg<"mhpmcounter6", 0xB06>;
+def : SysReg<"mhpmcounter7", 0xB07>;
+def : SysReg<"mhpmcounter8", 0xB08>;
+def : SysReg<"mhpmcounter9", 0xB09>;
+def : SysReg<"mhpmcounter10", 0xB0A>;
+def : SysReg<"mhpmcounter11", 0xB0B>;
+def : SysReg<"mhpmcounter12", 0xB0C>;
+def : SysReg<"mhpmcounter13", 0xB0D>;
+def : SysReg<"mhpmcounter14", 0xB0E>;
+def : SysReg<"mhpmcounter15", 0xB0F>;
+def : SysReg<"mhpmcounter16", 0xB10>;
+def : SysReg<"mhpmcounter17", 0xB11>;
+def : SysReg<"mhpmcounter18", 0xB12>;
+def : SysReg<"mhpmcounter19", 0xB13>;
+def : SysReg<"mhpmcounter20", 0xB14>;
+def : SysReg<"mhpmcounter21", 0xB15>;
+def : SysReg<"mhpmcounter22", 0xB16>;
+def : SysReg<"mhpmcounter23", 0xB17>;
+def : SysReg<"mhpmcounter24", 0xB18>;
+def : SysReg<"mhpmcounter25", 0xB19>;
+def : SysReg<"mhpmcounter26", 0xB1A>;
+def : SysReg<"mhpmcounter27", 0xB1B>;
+def : SysReg<"mhpmcounter28", 0xB1C>;
+def : SysReg<"mhpmcounter29", 0xB1D>;
+def : SysReg<"mhpmcounter30", 0xB1E>;
+def : SysReg<"mhpmcounter31", 0xB1F>;
+
+let isRV32Only = 1 in {
+def: SysReg<"mcycleh", 0xB80>;
+def: SysReg<"minstreth", 0xB82>;
+
+def: SysReg<"mhpmcounter3h", 0xB83>;
+def: SysReg<"mhpmcounter4h", 0xB84>;
+def: SysReg<"mhpmcounter5h", 0xB85>;
+def: SysReg<"mhpmcounter6h", 0xB86>;
+def: SysReg<"mhpmcounter7h", 0xB87>;
+def: SysReg<"mhpmcounter8h", 0xB88>;
+def: SysReg<"mhpmcounter9h", 0xB89>;
+def: SysReg<"mhpmcounter10h", 0xB8A>;
+def: SysReg<"mhpmcounter11h", 0xB8B>;
+def: SysReg<"mhpmcounter12h", 0xB8C>;
+def: SysReg<"mhpmcounter13h", 0xB8D>;
+def: SysReg<"mhpmcounter14h", 0xB8E>;
+def: SysReg<"mhpmcounter15h", 0xB8F>;
+def: SysReg<"mhpmcounter16h", 0xB90>;
+def: SysReg<"mhpmcounter17h", 0xB91>;
+def: SysReg<"mhpmcounter18h", 0xB92>;
+def: SysReg<"mhpmcounter19h", 0xB93>;
+def: SysReg<"mhpmcounter20h", 0xB94>;
+def: SysReg<"mhpmcounter21h", 0xB95>;
+def: SysReg<"mhpmcounter22h", 0xB96>;
+def: SysReg<"mhpmcounter23h", 0xB97>;
+def: SysReg<"mhpmcounter24h", 0xB98>;
+def: SysReg<"mhpmcounter25h", 0xB99>;
+def: SysReg<"mhpmcounter26h", 0xB9A>;
+def: SysReg<"mhpmcounter27h", 0xB9B>;
+def: SysReg<"mhpmcounter28h", 0xB9C>;
+def: SysReg<"mhpmcounter29h", 0xB9D>;
+def: SysReg<"mhpmcounter30h", 0xB9E>;
+def: SysReg<"mhpmcounter31h", 0xB9F>;
+}
+
+//===--------------------------
+// Machine Counter Setup
+//===--------------------------
+def : SysReg<"mhpmevent3", 0x323>;
+def : SysReg<"mhpmevent4", 0x324>;
+def : SysReg<"mhpmevent5", 0x325>;
+def : SysReg<"mhpmevent6", 0x326>;
+def : SysReg<"mhpmevent7", 0x327>;
+def : SysReg<"mhpmevent8", 0x328>;
+def : SysReg<"mhpmevent9", 0x329>;
+def : SysReg<"mhpmevent10", 0x32A>;
+def : SysReg<"mhpmevent11", 0x32B>;
+def : SysReg<"mhpmevent12", 0x32C>;
+def : SysReg<"mhpmevent13", 0x32D>;
+def : SysReg<"mhpmevent14", 0x32E>;
+def : SysReg<"mhpmevent15", 0x32F>;
+def : SysReg<"mhpmevent16", 0x330>;
+def : SysReg<"mhpmevent17", 0x331>;
+def : SysReg<"mhpmevent18", 0x332>;
+def : SysReg<"mhpmevent19", 0x333>;
+def : SysReg<"mhpmevent20", 0x334>;
+def : SysReg<"mhpmevent21", 0x335>;
+def : SysReg<"mhpmevent22", 0x336>;
+def : SysReg<"mhpmevent23", 0x337>;
+def : SysReg<"mhpmevent24", 0x338>;
+def : SysReg<"mhpmevent25", 0x339>;
+def : SysReg<"mhpmevent26", 0x33A>;
+def : SysReg<"mhpmevent27", 0x33B>;
+def : SysReg<"mhpmevent28", 0x33C>;
+def : SysReg<"mhpmevent29", 0x33D>;
+def : SysReg<"mhpmevent30", 0x33E>;
+def : SysReg<"mhpmevent31", 0x33F>;
+
+//===-----------------------------------------------
+// Debug/ Trace Registers (shared with Debug Mode)
+//===-----------------------------------------------
+def : SysReg<"tselect", 0x7A0>;
+def : SysReg<"tdata1", 0x7A1>;
+def : SysReg<"tdata2", 0x7A2>;
+def : SysReg<"tdata3", 0x7A3>;
+
+//===-----------------------------------------------
+// Debug Mode Registers
+//===-----------------------------------------------
+def : SysReg<"dcsr", 0x7B0>;
+def : SysReg<"dpc", 0x7B1>;
+def : SysReg<"dscratch", 0x7B2>;
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
new file mode 100644
index 000000000000..f4e6ed9f6284
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -0,0 +1,115 @@
+//===-- RISCVTargetMachine.cpp - Define TargetMachine for RISCV -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements the info about RISCV target spec.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVTargetMachine.h"
+#include "RISCV.h"
+#include "RISCVTargetObjectFile.h"
+#include "RISCVTargetTransformInfo.h"
+#include "TargetInfo/RISCVTargetInfo.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Target/TargetOptions.h"
+using namespace llvm;
+
+extern "C" void LLVMInitializeRISCVTarget() {
+ RegisterTargetMachine<RISCVTargetMachine> X(getTheRISCV32Target());
+ RegisterTargetMachine<RISCVTargetMachine> Y(getTheRISCV64Target());
+ auto PR = PassRegistry::getPassRegistry();
+ initializeRISCVExpandPseudoPass(*PR);
+}
+
+static StringRef computeDataLayout(const Triple &TT) {
+ if (TT.isArch64Bit()) {
+ return "e-m:e-p:64:64-i64:64-i128:128-n64-S128";
+ } else {
+ assert(TT.isArch32Bit() && "only RV32 and RV64 are currently supported");
+ return "e-m:e-p:32:32-i64:64-n32-S128";
+ }
+}
+
+static Reloc::Model getEffectiveRelocModel(const Triple &TT,
+ Optional<Reloc::Model> RM) {
+ if (!RM.hasValue())
+ return Reloc::Static;
+ return *RM;
+}
+
+RISCVTargetMachine::RISCVTargetMachine(const Target &T, const Triple &TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
+ Optional<Reloc::Model> RM,
+ Optional<CodeModel::Model> CM,
+ CodeGenOpt::Level OL, bool JIT)
+ : LLVMTargetMachine(T, computeDataLayout(TT), TT, CPU, FS, Options,
+ getEffectiveRelocModel(TT, RM),
+ getEffectiveCodeModel(CM, CodeModel::Small), OL),
+ TLOF(make_unique<RISCVELFTargetObjectFile>()),
+ Subtarget(TT, CPU, FS, Options.MCOptions.getABIName(), *this) {
+ initAsmInfo();
+}
+
+TargetTransformInfo
+RISCVTargetMachine::getTargetTransformInfo(const Function &F) {
+ return TargetTransformInfo(RISCVTTIImpl(this, F));
+}
+
+namespace {
+class RISCVPassConfig : public TargetPassConfig {
+public:
+ RISCVPassConfig(RISCVTargetMachine &TM, PassManagerBase &PM)
+ : TargetPassConfig(TM, PM) {}
+
+ RISCVTargetMachine &getRISCVTargetMachine() const {
+ return getTM<RISCVTargetMachine>();
+ }
+
+ void addIRPasses() override;
+ bool addInstSelector() override;
+ void addPreEmitPass() override;
+ void addPreEmitPass2() override;
+ void addPreRegAlloc() override;
+};
+}
+
+TargetPassConfig *RISCVTargetMachine::createPassConfig(PassManagerBase &PM) {
+ return new RISCVPassConfig(*this, PM);
+}
+
+void RISCVPassConfig::addIRPasses() {
+ addPass(createAtomicExpandPass());
+ TargetPassConfig::addIRPasses();
+}
+
+bool RISCVPassConfig::addInstSelector() {
+ addPass(createRISCVISelDag(getRISCVTargetMachine()));
+
+ return false;
+}
+
+void RISCVPassConfig::addPreEmitPass() { addPass(&BranchRelaxationPassID); }
+
+void RISCVPassConfig::addPreEmitPass2() {
+ // Schedule the expansion of AMOs at the last possible moment, avoiding the
+ // possibility for other passes to break the requirements for forward
+ // progress in the LR/SC block.
+ addPass(createRISCVExpandPseudoPass());
+}
+
+void RISCVPassConfig::addPreRegAlloc() {
+ addPass(createRISCVMergeBaseOffsetOptPass());
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetMachine.h b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetMachine.h
new file mode 100644
index 000000000000..ebf3f3c07955
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetMachine.h
@@ -0,0 +1,47 @@
+//===-- RISCVTargetMachine.h - Define TargetMachine for RISCV ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the RISCV specific subclass of TargetMachine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVTARGETMACHINE_H
+#define LLVM_LIB_TARGET_RISCV_RISCVTARGETMACHINE_H
+
+#include "MCTargetDesc/RISCVMCTargetDesc.h"
+#include "RISCVSubtarget.h"
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+class RISCVTargetMachine : public LLVMTargetMachine {
+ std::unique_ptr<TargetLoweringObjectFile> TLOF;
+ RISCVSubtarget Subtarget;
+
+public:
+ RISCVTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
+ Optional<Reloc::Model> RM, Optional<CodeModel::Model> CM,
+ CodeGenOpt::Level OL, bool JIT);
+
+ const RISCVSubtarget *getSubtargetImpl(const Function &) const override {
+ return &Subtarget;
+ }
+
+ TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
+
+ TargetLoweringObjectFile *getObjFileLowering() const override {
+ return TLOF.get();
+ }
+
+ TargetTransformInfo getTargetTransformInfo(const Function &F) override;
+};
+}
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetObjectFile.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetObjectFile.cpp
new file mode 100644
index 000000000000..bbd45c970d3d
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetObjectFile.cpp
@@ -0,0 +1,114 @@
+//===-- RISCVTargetObjectFile.cpp - RISCV Object Info -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVTargetObjectFile.h"
+#include "RISCVTargetMachine.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCSectionELF.h"
+
+using namespace llvm;
+
+void RISCVELFTargetObjectFile::Initialize(MCContext &Ctx,
+ const TargetMachine &TM) {
+ TargetLoweringObjectFileELF::Initialize(Ctx, TM);
+ InitializeELF(TM.Options.UseInitArray);
+
+ SmallDataSection = getContext().getELFSection(
+ ".sdata", ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC);
+ SmallBSSSection = getContext().getELFSection(".sbss", ELF::SHT_NOBITS,
+ ELF::SHF_WRITE | ELF::SHF_ALLOC);
+}
+
+// A address must be loaded from a small section if its size is less than the
+// small section size threshold. Data in this section could be addressed by
+// using gp_rel operator.
+bool RISCVELFTargetObjectFile::isInSmallSection(uint64_t Size) const {
+ // gcc has traditionally not treated zero-sized objects as small data, so this
+ // is effectively part of the ABI.
+ return Size > 0 && Size <= SSThreshold;
+}
+
+// Return true if this global address should be placed into small data/bss
+// section.
+bool RISCVELFTargetObjectFile::isGlobalInSmallSection(
+ const GlobalObject *GO, const TargetMachine &TM) const {
+ // Only global variables, not functions.
+ const GlobalVariable *GVA = dyn_cast<GlobalVariable>(GO);
+ if (!GVA)
+ return false;
+
+ // If the variable has an explicit section, it is placed in that section.
+ if (GVA->hasSection()) {
+ StringRef Section = GVA->getSection();
+
+ // Explicitly placing any variable in the small data section overrides
+ // the global -G value.
+ if (Section == ".sdata" || Section == ".sbss")
+ return true;
+
+ // Otherwise reject putting the variable to small section if it has an
+ // explicit section name.
+ return false;
+ }
+
+ if (((GVA->hasExternalLinkage() && GVA->isDeclaration()) ||
+ GVA->hasCommonLinkage()))
+ return false;
+
+ Type *Ty = GVA->getValueType();
+ // It is possible that the type of the global is unsized, i.e. a declaration
+ // of a extern struct. In this case don't presume it is in the small data
+ // section. This happens e.g. when building the FreeBSD kernel.
+ if (!Ty->isSized())
+ return false;
+
+ return isInSmallSection(
+ GVA->getParent()->getDataLayout().getTypeAllocSize(Ty));
+}
+
+MCSection *RISCVELFTargetObjectFile::SelectSectionForGlobal(
+ const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const {
+ // Handle Small Section classification here.
+ if (Kind.isBSS() && isGlobalInSmallSection(GO, TM))
+ return SmallBSSSection;
+ if (Kind.isData() && isGlobalInSmallSection(GO, TM))
+ return SmallDataSection;
+
+ // Otherwise, we work the same as ELF.
+ return TargetLoweringObjectFileELF::SelectSectionForGlobal(GO, Kind, TM);
+}
+
+void RISCVELFTargetObjectFile::getModuleMetadata(Module &M) {
+ SmallVector<Module::ModuleFlagEntry, 8> ModuleFlags;
+ M.getModuleFlagsMetadata(ModuleFlags);
+
+ for (const auto &MFE : ModuleFlags) {
+ StringRef Key = MFE.Key->getString();
+ if (Key == "SmallDataLimit") {
+ SSThreshold = mdconst::extract<ConstantInt>(MFE.Val)->getZExtValue();
+ break;
+ }
+ }
+}
+
+/// Return true if this constant should be placed into small data section.
+bool RISCVELFTargetObjectFile::isConstantInSmallSection(
+ const DataLayout &DL, const Constant *CN) const {
+ return isInSmallSection(DL.getTypeAllocSize(CN->getType()));
+}
+
+MCSection *RISCVELFTargetObjectFile::getSectionForConstant(
+ const DataLayout &DL, SectionKind Kind, const Constant *C,
+ unsigned &Align) const {
+ if (isConstantInSmallSection(DL, C))
+ return SmallDataSection;
+
+ // Otherwise, we work the same as ELF.
+ return TargetLoweringObjectFileELF::getSectionForConstant(DL, Kind, C, Align);
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetObjectFile.h b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetObjectFile.h
new file mode 100644
index 000000000000..b2daaaa9d364
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetObjectFile.h
@@ -0,0 +1,48 @@
+//===-- RISCVTargetObjectFile.h - RISCV Object Info -*- C++ ---------*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVTARGETOBJECTFILE_H
+#define LLVM_LIB_TARGET_RISCV_RISCVTARGETOBJECTFILE_H
+
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+
+namespace llvm {
+class RISCVTargetMachine;
+
+/// This implementation is used for RISCV ELF targets.
+class RISCVELFTargetObjectFile : public TargetLoweringObjectFileELF {
+ MCSection *SmallDataSection;
+ MCSection *SmallBSSSection;
+ unsigned SSThreshold = 8;
+
+public:
+ void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
+
+ /// Return true if this global address should be placed into small data/bss
+ /// section.
+ bool isGlobalInSmallSection(const GlobalObject *GO,
+ const TargetMachine &TM) const;
+
+ MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind,
+ const TargetMachine &TM) const override;
+
+ /// Return true if this constant should be placed into small data section.
+ bool isConstantInSmallSection(const DataLayout &DL, const Constant *CN) const;
+
+ MCSection *getSectionForConstant(const DataLayout &DL, SectionKind Kind,
+ const Constant *C,
+ unsigned &Align) const override;
+
+ void getModuleMetadata(Module &M) override;
+
+ bool isInSmallSection(uint64_t Size) const;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
new file mode 100644
index 000000000000..2c6400cbb1eb
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -0,0 +1,92 @@
+//===-- RISCVTargetTransformInfo.cpp - RISC-V specific TTI ----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVTargetTransformInfo.h"
+#include "Utils/RISCVMatInt.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/BasicTTIImpl.h"
+#include "llvm/CodeGen/TargetLowering.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "riscvtti"
+
+int RISCVTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
+ assert(Ty->isIntegerTy() &&
+ "getIntImmCost can only estimate cost of materialising integers");
+
+ // We have a Zero register, so 0 is always free.
+ if (Imm == 0)
+ return TTI::TCC_Free;
+
+ // Otherwise, we check how many instructions it will take to materialise.
+ const DataLayout &DL = getDataLayout();
+ return RISCVMatInt::getIntMatCost(Imm, DL.getTypeSizeInBits(Ty),
+ getST()->is64Bit());
+}
+
+int RISCVTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
+ Type *Ty) {
+ assert(Ty->isIntegerTy() &&
+ "getIntImmCost can only estimate cost of materialising integers");
+
+ // We have a Zero register, so 0 is always free.
+ if (Imm == 0)
+ return TTI::TCC_Free;
+
+ // Some instructions in RISC-V can take a 12-bit immediate. Some of these are
+ // commutative, in others the immediate comes from a specific argument index.
+ bool Takes12BitImm = false;
+ unsigned ImmArgIdx = ~0U;
+
+ switch (Opcode) {
+ case Instruction::GetElementPtr:
+ // Never hoist any arguments to a GetElementPtr. CodeGenPrepare will
+ // split up large offsets in GEP into better parts than ConstantHoisting
+ // can.
+ return TTI::TCC_Free;
+ case Instruction::Add:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ case Instruction::Mul:
+ Takes12BitImm = true;
+ break;
+ case Instruction::Sub:
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ Takes12BitImm = true;
+ ImmArgIdx = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (Takes12BitImm) {
+ // Check immediate is the correct argument...
+ if (Instruction::isCommutative(Opcode) || Idx == ImmArgIdx) {
+ // ... and fits into the 12-bit immediate.
+ if (Imm.getMinSignedBits() <= 64 &&
+ getTLI()->isLegalAddImmediate(Imm.getSExtValue())) {
+ return TTI::TCC_Free;
+ }
+ }
+
+ // Otherwise, use the full materialisation cost.
+ return getIntImmCost(Imm, Ty);
+ }
+
+ // By default, prevent hoisting.
+ return TTI::TCC_Free;
+}
+
+int RISCVTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
+ const APInt &Imm, Type *Ty) {
+ // Prevent hoisting in unknown cases.
+ return TTI::TCC_Free;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
new file mode 100644
index 000000000000..f361b25a0c70
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -0,0 +1,52 @@
+//===- RISCVTargetTransformInfo.h - RISC-V specific TTI ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file defines a TargetTransformInfo::Concept conforming object specific
+/// to the RISC-V target machine. It uses the target's detailed information to
+/// provide more precise answers to certain TTI queries, while letting the
+/// target independent and default TTI implementations handle the rest.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
+#define LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
+
+#include "RISCVSubtarget.h"
+#include "RISCVTargetMachine.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/BasicTTIImpl.h"
+#include "llvm/IR/Function.h"
+
+namespace llvm {
+
+class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
+ using BaseT = BasicTTIImplBase<RISCVTTIImpl>;
+ using TTI = TargetTransformInfo;
+
+ friend BaseT;
+
+ const RISCVSubtarget *ST;
+ const RISCVTargetLowering *TLI;
+
+ const RISCVSubtarget *getST() const { return ST; }
+ const RISCVTargetLowering *getTLI() const { return TLI; }
+
+public:
+ explicit RISCVTTIImpl(const RISCVTargetMachine *TM, const Function &F)
+ : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
+ TLI(ST->getTargetLowering()) {}
+
+ int getIntImmCost(const APInt &Imm, Type *Ty);
+ int getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty);
+ int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
+ Type *Ty);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H \ No newline at end of file
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/TargetInfo/RISCVTargetInfo.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/TargetInfo/RISCVTargetInfo.cpp
new file mode 100644
index 000000000000..e44984a3fcc5
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/TargetInfo/RISCVTargetInfo.cpp
@@ -0,0 +1,28 @@
+//===-- RISCVTargetInfo.cpp - RISCV Target Implementation -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "TargetInfo/RISCVTargetInfo.h"
+#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
+Target &llvm::getTheRISCV32Target() {
+ static Target TheRISCV32Target;
+ return TheRISCV32Target;
+}
+
+Target &llvm::getTheRISCV64Target() {
+ static Target TheRISCV64Target;
+ return TheRISCV64Target;
+}
+
+extern "C" void LLVMInitializeRISCVTargetInfo() {
+ RegisterTarget<Triple::riscv32> X(getTheRISCV32Target(), "riscv32",
+ "32-bit RISC-V", "RISCV");
+ RegisterTarget<Triple::riscv64> Y(getTheRISCV64Target(), "riscv64",
+ "64-bit RISC-V", "RISCV");
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/TargetInfo/RISCVTargetInfo.h b/contrib/llvm-project/llvm/lib/Target/RISCV/TargetInfo/RISCVTargetInfo.h
new file mode 100644
index 000000000000..ef3d9d116efa
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/TargetInfo/RISCVTargetInfo.h
@@ -0,0 +1,21 @@
+//===-- RISCVTargetInfo.h - RISCV Target Implementation ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_TARGETINFO_RISCVTARGETINFO_H
+#define LLVM_LIB_TARGET_RISCV_TARGETINFO_RISCVTARGETINFO_H
+
+namespace llvm {
+
+class Target;
+
+Target &getTheRISCV32Target();
+Target &getTheRISCV64Target();
+
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_RISCV_TARGETINFO_RISCVTARGETINFO_H
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.cpp
new file mode 100644
index 000000000000..bc5395768ca1
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.cpp
@@ -0,0 +1,80 @@
+#include "RISCVBaseInfo.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+namespace RISCVSysReg {
+#define GET_SysRegsList_IMPL
+#include "RISCVGenSystemOperands.inc"
+} // namespace RISCVSysReg
+
+namespace RISCVABI {
+ABI computeTargetABI(const Triple &TT, FeatureBitset FeatureBits,
+ StringRef ABIName) {
+ auto TargetABI = StringSwitch<ABI>(ABIName)
+ .Case("ilp32", ABI_ILP32)
+ .Case("ilp32f", ABI_ILP32F)
+ .Case("ilp32d", ABI_ILP32D)
+ .Case("ilp32e", ABI_ILP32E)
+ .Case("lp64", ABI_LP64)
+ .Case("lp64f", ABI_LP64F)
+ .Case("lp64d", ABI_LP64D)
+ .Default(ABI_Unknown);
+
+ bool IsRV64 = TT.isArch64Bit();
+ bool IsRV32E = FeatureBits[RISCV::FeatureRV32E];
+
+ if (!ABIName.empty() && TargetABI == ABI_Unknown) {
+ errs()
+ << "'" << ABIName
+ << "' is not a recognized ABI for this target (ignoring target-abi)\n";
+ } else if (ABIName.startswith("ilp32") && IsRV64) {
+ errs() << "32-bit ABIs are not supported for 64-bit targets (ignoring "
+ "target-abi)\n";
+ TargetABI = ABI_Unknown;
+ } else if (ABIName.startswith("lp64") && !IsRV64) {
+ errs() << "64-bit ABIs are not supported for 32-bit targets (ignoring "
+ "target-abi)\n";
+ TargetABI = ABI_Unknown;
+ } else if (ABIName.endswith("f") && !FeatureBits[RISCV::FeatureStdExtF]) {
+ errs() << "Hard-float 'f' ABI can't be used for a target that "
+ "doesn't support the F instruction set extension (ignoring "
+ "target-abi)\n";
+ TargetABI = ABI_Unknown;
+ } else if (ABIName.endswith("d") && !FeatureBits[RISCV::FeatureStdExtD]) {
+ errs() << "Hard-float 'd' ABI can't be used for a target that "
+ "doesn't support the D instruction set extension (ignoring "
+ "target-abi)\n";
+ TargetABI = ABI_Unknown;
+ } else if (IsRV32E && TargetABI != ABI_ILP32E && TargetABI != ABI_Unknown) {
+ errs()
+ << "Only the ilp32e ABI is supported for RV32E (ignoring target-abi)\n";
+ TargetABI = ABI_Unknown;
+ }
+
+ if (TargetABI != ABI_Unknown)
+ return TargetABI;
+
+ // For now, default to the ilp32/ilp32e/lp64 ABI if no explicit ABI is given
+ // or an invalid/unrecognised string is given. In the future, it might be
+ // worth changing this to default to ilp32f/lp64f and ilp32d/lp64d when
+ // hardware support for floating point is present.
+ if (IsRV32E)
+ return ABI_ILP32E;
+ if (IsRV64)
+ return ABI_LP64;
+ return ABI_ILP32;
+}
+} // namespace RISCVABI
+
+namespace RISCVFeatures {
+
+void validate(const Triple &TT, const FeatureBitset &FeatureBits) {
+ if (TT.isArch64Bit() && FeatureBits[RISCV::FeatureRV32E])
+ report_fatal_error("RV32E can't be enabled for an RV64 target");
+}
+
+} // namespace RISCVFeatures
+
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h b/contrib/llvm-project/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h
new file mode 100644
index 000000000000..c33c72f24319
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h
@@ -0,0 +1,194 @@
+//===-- RISCVBaseInfo.h - Top level definitions for RISCV MC ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains small standalone enum definitions for the RISCV target
+// useful for the compiler back-end and the MC libraries.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVBASEINFO_H
+#define LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVBASEINFO_H
+
+#include "MCTargetDesc/RISCVMCTargetDesc.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/MC/SubtargetFeature.h"
+
+namespace llvm {
+
+// RISCVII - This namespace holds all of the target specific flags that
+// instruction info tracks. All definitions must match RISCVInstrFormats.td.
+namespace RISCVII {
+enum {
+ InstFormatPseudo = 0,
+ InstFormatR = 1,
+ InstFormatR4 = 2,
+ InstFormatI = 3,
+ InstFormatS = 4,
+ InstFormatB = 5,
+ InstFormatU = 6,
+ InstFormatJ = 7,
+ InstFormatCR = 8,
+ InstFormatCI = 9,
+ InstFormatCSS = 10,
+ InstFormatCIW = 11,
+ InstFormatCL = 12,
+ InstFormatCS = 13,
+ InstFormatCA = 14,
+ InstFormatCB = 15,
+ InstFormatCJ = 16,
+ InstFormatOther = 17,
+
+ InstFormatMask = 31
+};
+
+enum {
+ MO_None,
+ MO_CALL,
+ MO_PLT,
+ MO_LO,
+ MO_HI,
+ MO_PCREL_LO,
+ MO_PCREL_HI,
+ MO_GOT_HI,
+ MO_TPREL_LO,
+ MO_TPREL_HI,
+ MO_TPREL_ADD,
+ MO_TLS_GOT_HI,
+ MO_TLS_GD_HI,
+};
+} // namespace RISCVII
+
+// Describes the predecessor/successor bits used in the FENCE instruction.
+namespace RISCVFenceField {
+enum FenceField {
+ I = 8,
+ O = 4,
+ R = 2,
+ W = 1
+};
+}
+
+// Describes the supported floating point rounding mode encodings.
+namespace RISCVFPRndMode {
+enum RoundingMode {
+ RNE = 0,
+ RTZ = 1,
+ RDN = 2,
+ RUP = 3,
+ RMM = 4,
+ DYN = 7,
+ Invalid
+};
+
+inline static StringRef roundingModeToString(RoundingMode RndMode) {
+ switch (RndMode) {
+ default:
+ llvm_unreachable("Unknown floating point rounding mode");
+ case RISCVFPRndMode::RNE:
+ return "rne";
+ case RISCVFPRndMode::RTZ:
+ return "rtz";
+ case RISCVFPRndMode::RDN:
+ return "rdn";
+ case RISCVFPRndMode::RUP:
+ return "rup";
+ case RISCVFPRndMode::RMM:
+ return "rmm";
+ case RISCVFPRndMode::DYN:
+ return "dyn";
+ }
+}
+
+inline static RoundingMode stringToRoundingMode(StringRef Str) {
+ return StringSwitch<RoundingMode>(Str)
+ .Case("rne", RISCVFPRndMode::RNE)
+ .Case("rtz", RISCVFPRndMode::RTZ)
+ .Case("rdn", RISCVFPRndMode::RDN)
+ .Case("rup", RISCVFPRndMode::RUP)
+ .Case("rmm", RISCVFPRndMode::RMM)
+ .Case("dyn", RISCVFPRndMode::DYN)
+ .Default(RISCVFPRndMode::Invalid);
+}
+
+inline static bool isValidRoundingMode(unsigned Mode) {
+ switch (Mode) {
+ default:
+ return false;
+ case RISCVFPRndMode::RNE:
+ case RISCVFPRndMode::RTZ:
+ case RISCVFPRndMode::RDN:
+ case RISCVFPRndMode::RUP:
+ case RISCVFPRndMode::RMM:
+ case RISCVFPRndMode::DYN:
+ return true;
+ }
+}
+} // namespace RISCVFPRndMode
+
+namespace RISCVSysReg {
+struct SysReg {
+ const char *Name;
+ unsigned Encoding;
+ // FIXME: add these additional fields when needed.
+ // Privilege Access: Read, Write, Read-Only.
+ // unsigned ReadWrite;
+ // Privilege Mode: User, System or Machine.
+ // unsigned Mode;
+ // Check field name.
+ // unsigned Extra;
+ // Register number without the privilege bits.
+ // unsigned Number;
+ FeatureBitset FeaturesRequired;
+ bool isRV32Only;
+
+ bool haveRequiredFeatures(FeatureBitset ActiveFeatures) const {
+ // Not in 32-bit mode.
+ if (isRV32Only && ActiveFeatures[RISCV::Feature64Bit])
+ return false;
+ // No required feature associated with the system register.
+ if (FeaturesRequired.none())
+ return true;
+ return (FeaturesRequired & ActiveFeatures) == FeaturesRequired;
+ }
+};
+
+#define GET_SysRegsList_DECL
+#include "RISCVGenSystemOperands.inc"
+} // end namespace RISCVSysReg
+
+namespace RISCVABI {
+
+enum ABI {
+ ABI_ILP32,
+ ABI_ILP32F,
+ ABI_ILP32D,
+ ABI_ILP32E,
+ ABI_LP64,
+ ABI_LP64F,
+ ABI_LP64D,
+ ABI_Unknown
+};
+
+// Returns the target ABI, or else a StringError if the requested ABIName is
+// not supported for the given TT and FeatureBits combination.
+ABI computeTargetABI(const Triple &TT, FeatureBitset FeatureBits,
+ StringRef ABIName);
+
+} // namespace RISCVABI
+
+namespace RISCVFeatures {
+
+// Validates if the given combination of features are valid for the target
+// triple. Exits with report_fatal_error if not.
+void validate(const Triple &TT, const FeatureBitset &FeatureBits);
+
+} // namespace RISCVFeatures
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/Utils/RISCVMatInt.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/Utils/RISCVMatInt.cpp
new file mode 100644
index 000000000000..f390ddb89e3c
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/Utils/RISCVMatInt.cpp
@@ -0,0 +1,93 @@
+//===- RISCVMatInt.cpp - Immediate materialisation -------------*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVMatInt.h"
+#include "MCTargetDesc/RISCVMCTargetDesc.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/MachineValueType.h"
+#include "llvm/Support/MathExtras.h"
+#include <cstdint>
+
+namespace llvm {
+
+namespace RISCVMatInt {
+void generateInstSeq(int64_t Val, bool IsRV64, InstSeq &Res) {
+ if (isInt<32>(Val)) {
+ // Depending on the active bits in the immediate Value v, the following
+ // instruction sequences are emitted:
+ //
+ // v == 0 : ADDI
+ // v[0,12) != 0 && v[12,32) == 0 : ADDI
+ // v[0,12) == 0 && v[12,32) != 0 : LUI
+ // v[0,32) != 0 : LUI+ADDI(W)
+ int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
+ int64_t Lo12 = SignExtend64<12>(Val);
+
+ if (Hi20)
+ Res.push_back(Inst(RISCV::LUI, Hi20));
+
+ if (Lo12 || Hi20 == 0) {
+ unsigned AddiOpc = (IsRV64 && Hi20) ? RISCV::ADDIW : RISCV::ADDI;
+ Res.push_back(Inst(AddiOpc, Lo12));
+ }
+ return;
+ }
+
+ assert(IsRV64 && "Can't emit >32-bit imm for non-RV64 target");
+
+ // In the worst case, for a full 64-bit constant, a sequence of 8 instructions
+ // (i.e., LUI+ADDIW+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be emmitted. Note
+ // that the first two instructions (LUI+ADDIW) can contribute up to 32 bits
+ // while the following ADDI instructions contribute up to 12 bits each.
+ //
+ // On the first glance, implementing this seems to be possible by simply
+ // emitting the most significant 32 bits (LUI+ADDIW) followed by as many left
+ // shift (SLLI) and immediate additions (ADDI) as needed. However, due to the
+ // fact that ADDI performs a sign extended addition, doing it like that would
+ // only be possible when at most 11 bits of the ADDI instructions are used.
+ // Using all 12 bits of the ADDI instructions, like done by GAS, actually
+ // requires that the constant is processed starting with the least significant
+ // bit.
+ //
+ // In the following, constants are processed from LSB to MSB but instruction
+ // emission is performed from MSB to LSB by recursively calling
+ // generateInstSeq. In each recursion, first the lowest 12 bits are removed
+ // from the constant and the optimal shift amount, which can be greater than
+ // 12 bits if the constant is sparse, is determined. Then, the shifted
+ // remaining constant is processed recursively and gets emitted as soon as it
+ // fits into 32 bits. The emission of the shifts and additions is subsequently
+ // performed when the recursion returns.
+
+ int64_t Lo12 = SignExtend64<12>(Val);
+ int64_t Hi52 = ((uint64_t)Val + 0x800ull) >> 12;
+ int ShiftAmount = 12 + findFirstSet((uint64_t)Hi52);
+ Hi52 = SignExtend64(Hi52 >> (ShiftAmount - 12), 64 - ShiftAmount);
+
+ generateInstSeq(Hi52, IsRV64, Res);
+
+ Res.push_back(Inst(RISCV::SLLI, ShiftAmount));
+ if (Lo12)
+ Res.push_back(Inst(RISCV::ADDI, Lo12));
+}
+
+int getIntMatCost(const APInt &Val, unsigned Size, bool IsRV64) {
+ int PlatRegSize = IsRV64 ? 64 : 32;
+
+ // Split the constant into platform register sized chunks, and calculate cost
+ // of each chunk.
+ int Cost = 0;
+ for (unsigned ShiftVal = 0; ShiftVal < Size; ShiftVal += PlatRegSize) {
+ APInt Chunk = Val.ashr(ShiftVal).sextOrTrunc(PlatRegSize);
+ InstSeq MatSeq;
+ generateInstSeq(Chunk.getSExtValue(), IsRV64, MatSeq);
+ Cost += MatSeq.size();
+ }
+ return std::max(1, Cost);
+}
+} // namespace RISCVMatInt
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/Utils/RISCVMatInt.h b/contrib/llvm-project/llvm/lib/Target/RISCV/Utils/RISCVMatInt.h
new file mode 100644
index 000000000000..b12ae2eade99
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/Utils/RISCVMatInt.h
@@ -0,0 +1,44 @@
+//===- RISCVMatInt.h - Immediate materialisation ---------------*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_MATINT_H
+#define LLVM_LIB_TARGET_RISCV_MATINT_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/MachineValueType.h"
+#include <cstdint>
+
+namespace llvm {
+
+namespace RISCVMatInt {
+struct Inst {
+ unsigned Opc;
+ int64_t Imm;
+
+ Inst(unsigned Opc, int64_t Imm) : Opc(Opc), Imm(Imm) {}
+};
+using InstSeq = SmallVector<Inst, 8>;
+
+// Helper to generate an instruction sequence that will materialise the given
+// immediate value into a register. A sequence of instructions represented by
+// a simple struct produced rather than directly emitting the instructions in
+// order to allow this helper to be used from both the MC layer and during
+// instruction selection.
+void generateInstSeq(int64_t Val, bool IsRV64, InstSeq &Res);
+
+// Helper to estimate the number of instructions required to materialise the
+// given immediate value into a register. This estimate does not account for
+// `Val` possibly fitting into an immediate, and so may over-estimate.
+//
+// This will attempt to produce instructions to materialise `Val` as an
+// `Size`-bit immediate. `IsRV64` should match the target architecture.
+int getIntMatCost(const APInt &Val, unsigned Size, bool IsRV64);
+} // namespace RISCVMatInt
+} // namespace llvm
+#endif