aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/Target/WebAssembly
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/llvm/lib/Target/WebAssembly')
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp1178
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp420
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.h75
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp302
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp129
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyFixupKinds.h29
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.cpp368
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.h59
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp55
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h32
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp188
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp151
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h552
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTypeUtilities.cpp124
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTypeUtilities.h73
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp139
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h115
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp166
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/README.txt173
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp41
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.h34
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/Utils/WasmAddressSpaces.h48
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.cpp95
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.h68
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssembly.h106
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssembly.td139
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyAddMissingPrototypes.cpp154
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp97
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp750
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.h93
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp398
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp1776
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyDebugFixup.cpp161
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyDebugValueManager.cpp415
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyDebugValueManager.h57
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp371
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.h176
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp463
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp1435
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFixBrTableDefaults.cpp199
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp303
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp560
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp396
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h80
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISD.def52
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp413
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp2889
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h159
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td536
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrBulkMemory.td75
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrCall.td84
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrControl.td168
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrConv.td240
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrFloat.td127
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrFormats.td68
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp233
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h79
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td442
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrInteger.td131
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrMemory.td195
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrRef.td46
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td1505
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrTable.td89
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLateEHPrepare.cpp322
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp209
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp1870
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLowerRefTypesIntPtrConv.cpp86
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp303
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.h47
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMCLowerPrePass.cpp102
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp170
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h225
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMemIntrinsicResults.cpp210
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyNullifyDebugValueLists.cpp64
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp124
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp79
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyPeephole.cpp165
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp332
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp110
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp984
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp160
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.h53
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td69
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp107
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp907
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h37
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp59
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h46
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp97
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySortRegion.cpp78
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySortRegion.h91
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp60
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h114
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp634
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.h69
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp24
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h29
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp144
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h83
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp181
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.h70
101 files changed, 28788 insertions, 0 deletions
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp
new file mode 100644
index 000000000000..1b92997f03f1
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp
@@ -0,0 +1,1178 @@
+//==- WebAssemblyAsmParser.cpp - Assembler for WebAssembly -*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is part of the WebAssembly Assembler.
+///
+/// It contains code to translate a parsed .s file into MCInsts.
+///
+//===----------------------------------------------------------------------===//
+
+#include "AsmParser/WebAssemblyAsmTypeCheck.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "MCTargetDesc/WebAssemblyMCTypeUtilities.h"
+#include "MCTargetDesc/WebAssemblyTargetStreamer.h"
+#include "TargetInfo/WebAssemblyTargetInfo.h"
+#include "WebAssembly.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCParser/MCAsmLexer.h"
+#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/MC/MCParser/MCTargetAsmParser.h"
+#include "llvm/MC/MCSectionWasm.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCSymbolWasm.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/SourceMgr.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-asm-parser"
+
+static const char *getSubtargetFeatureName(uint64_t Val);
+
+namespace {
+
+/// WebAssemblyOperand - Instances of this class represent the operands in a
+/// parsed Wasm machine instruction.
+struct WebAssemblyOperand : public MCParsedAsmOperand {
+ enum KindTy { Token, Integer, Float, Symbol, BrList } Kind;
+
+ SMLoc StartLoc, EndLoc;
+
+ struct TokOp {
+ StringRef Tok;
+ };
+
+ struct IntOp {
+ int64_t Val;
+ };
+
+ struct FltOp {
+ double Val;
+ };
+
+ struct SymOp {
+ const MCExpr *Exp;
+ };
+
+ struct BrLOp {
+ std::vector<unsigned> List;
+ };
+
+ union {
+ struct TokOp Tok;
+ struct IntOp Int;
+ struct FltOp Flt;
+ struct SymOp Sym;
+ struct BrLOp BrL;
+ };
+
+ WebAssemblyOperand(KindTy K, SMLoc Start, SMLoc End, TokOp T)
+ : Kind(K), StartLoc(Start), EndLoc(End), Tok(T) {}
+ WebAssemblyOperand(KindTy K, SMLoc Start, SMLoc End, IntOp I)
+ : Kind(K), StartLoc(Start), EndLoc(End), Int(I) {}
+ WebAssemblyOperand(KindTy K, SMLoc Start, SMLoc End, FltOp F)
+ : Kind(K), StartLoc(Start), EndLoc(End), Flt(F) {}
+ WebAssemblyOperand(KindTy K, SMLoc Start, SMLoc End, SymOp S)
+ : Kind(K), StartLoc(Start), EndLoc(End), Sym(S) {}
+ WebAssemblyOperand(KindTy K, SMLoc Start, SMLoc End)
+ : Kind(K), StartLoc(Start), EndLoc(End), BrL() {}
+
+ ~WebAssemblyOperand() {
+ if (isBrList())
+ BrL.~BrLOp();
+ }
+
+ bool isToken() const override { return Kind == Token; }
+ bool isImm() const override { return Kind == Integer || Kind == Symbol; }
+ bool isFPImm() const { return Kind == Float; }
+ bool isMem() const override { return false; }
+ bool isReg() const override { return false; }
+ bool isBrList() const { return Kind == BrList; }
+
+ unsigned getReg() const override {
+ llvm_unreachable("Assembly inspects a register operand");
+ return 0;
+ }
+
+ StringRef getToken() const {
+ assert(isToken());
+ return Tok.Tok;
+ }
+
+ SMLoc getStartLoc() const override { return StartLoc; }
+ SMLoc getEndLoc() const override { return EndLoc; }
+
+ void addRegOperands(MCInst &, unsigned) const {
+ // Required by the assembly matcher.
+ llvm_unreachable("Assembly matcher creates register operands");
+ }
+
+ void addImmOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ if (Kind == Integer)
+ Inst.addOperand(MCOperand::createImm(Int.Val));
+ else if (Kind == Symbol)
+ Inst.addOperand(MCOperand::createExpr(Sym.Exp));
+ else
+ llvm_unreachable("Should be integer immediate or symbol!");
+ }
+
+ void addFPImmf32Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ if (Kind == Float)
+ Inst.addOperand(
+ MCOperand::createSFPImm(bit_cast<uint32_t>(float(Flt.Val))));
+ else
+ llvm_unreachable("Should be float immediate!");
+ }
+
+ void addFPImmf64Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ if (Kind == Float)
+ Inst.addOperand(MCOperand::createDFPImm(bit_cast<uint64_t>(Flt.Val)));
+ else
+ llvm_unreachable("Should be float immediate!");
+ }
+
+ void addBrListOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && isBrList() && "Invalid BrList!");
+ for (auto Br : BrL.List)
+ Inst.addOperand(MCOperand::createImm(Br));
+ }
+
+ void print(raw_ostream &OS) const override {
+ switch (Kind) {
+ case Token:
+ OS << "Tok:" << Tok.Tok;
+ break;
+ case Integer:
+ OS << "Int:" << Int.Val;
+ break;
+ case Float:
+ OS << "Flt:" << Flt.Val;
+ break;
+ case Symbol:
+ OS << "Sym:" << Sym.Exp;
+ break;
+ case BrList:
+ OS << "BrList:" << BrL.List.size();
+ break;
+ }
+ }
+};
+
+// Perhaps this should go somewhere common.
+static wasm::WasmLimits DefaultLimits() {
+ return {wasm::WASM_LIMITS_FLAG_NONE, 0, 0};
+}
+
+static MCSymbolWasm *GetOrCreateFunctionTableSymbol(MCContext &Ctx,
+ const StringRef &Name) {
+ MCSymbolWasm *Sym = cast_or_null<MCSymbolWasm>(Ctx.lookupSymbol(Name));
+ if (Sym) {
+ if (!Sym->isFunctionTable())
+ Ctx.reportError(SMLoc(), "symbol is not a wasm funcref table");
+ } else {
+ Sym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(Name));
+ Sym->setFunctionTable();
+ // The default function table is synthesized by the linker.
+ Sym->setUndefined();
+ }
+ return Sym;
+}
+
+class WebAssemblyAsmParser final : public MCTargetAsmParser {
+ MCAsmParser &Parser;
+ MCAsmLexer &Lexer;
+
+ // Much like WebAssemblyAsmPrinter in the backend, we have to own these.
+ std::vector<std::unique_ptr<wasm::WasmSignature>> Signatures;
+ std::vector<std::unique_ptr<std::string>> Names;
+
+ // Order of labels, directives and instructions in a .s file have no
+ // syntactical enforcement. This class is a callback from the actual parser,
+ // and yet we have to be feeding data to the streamer in a very particular
+ // order to ensure a correct binary encoding that matches the regular backend
+ // (the streamer does not enforce this). This "state machine" enum helps
+ // guarantee that correct order.
+ enum ParserState {
+ FileStart,
+ FunctionLabel,
+ FunctionStart,
+ FunctionLocals,
+ Instructions,
+ EndFunction,
+ DataSection,
+ } CurrentState = FileStart;
+
+ // For ensuring blocks are properly nested.
+ enum NestingType {
+ Function,
+ Block,
+ Loop,
+ Try,
+ CatchAll,
+ If,
+ Else,
+ Undefined,
+ };
+ struct Nested {
+ NestingType NT;
+ wasm::WasmSignature Sig;
+ };
+ std::vector<Nested> NestingStack;
+
+ MCSymbolWasm *DefaultFunctionTable = nullptr;
+ MCSymbol *LastFunctionLabel = nullptr;
+
+ bool is64;
+
+ WebAssemblyAsmTypeCheck TC;
+ // Don't type check if -no-type-check was set.
+ bool SkipTypeCheck;
+
+public:
+ WebAssemblyAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
+ const MCInstrInfo &MII, const MCTargetOptions &Options)
+ : MCTargetAsmParser(Options, STI, MII), Parser(Parser),
+ Lexer(Parser.getLexer()), is64(STI.getTargetTriple().isArch64Bit()),
+ TC(Parser, MII, is64), SkipTypeCheck(Options.MCNoTypeCheck) {
+ setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
+ // Don't type check if this is inline asm, since that is a naked sequence of
+ // instructions without a function/locals decl.
+ auto &SM = Parser.getSourceManager();
+ auto BufferName =
+ SM.getBufferInfo(SM.getMainFileID()).Buffer->getBufferIdentifier();
+ if (BufferName == "<inline asm>")
+ SkipTypeCheck = true;
+ }
+
+ void Initialize(MCAsmParser &Parser) override {
+ MCAsmParserExtension::Initialize(Parser);
+
+ DefaultFunctionTable = GetOrCreateFunctionTableSymbol(
+ getContext(), "__indirect_function_table");
+ if (!STI->checkFeatures("+reference-types"))
+ DefaultFunctionTable->setOmitFromLinkingSection();
+ }
+
+#define GET_ASSEMBLER_HEADER
+#include "WebAssemblyGenAsmMatcher.inc"
+
+ // TODO: This is required to be implemented, but appears unused.
+ bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override {
+ llvm_unreachable("parseRegister is not implemented.");
+ }
+ ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
+ SMLoc &EndLoc) override {
+ llvm_unreachable("tryParseRegister is not implemented.");
+ }
+
+ bool error(const Twine &Msg, const AsmToken &Tok) {
+ return Parser.Error(Tok.getLoc(), Msg + Tok.getString());
+ }
+
+ bool error(const Twine &Msg, SMLoc Loc = SMLoc()) {
+ return Parser.Error(Loc.isValid() ? Loc : Lexer.getTok().getLoc(), Msg);
+ }
+
+ void addSignature(std::unique_ptr<wasm::WasmSignature> &&Sig) {
+ Signatures.push_back(std::move(Sig));
+ }
+
+ StringRef storeName(StringRef Name) {
+ std::unique_ptr<std::string> N = std::make_unique<std::string>(Name);
+ Names.push_back(std::move(N));
+ return *Names.back();
+ }
+
+ std::pair<StringRef, StringRef> nestingString(NestingType NT) {
+ switch (NT) {
+ case Function:
+ return {"function", "end_function"};
+ case Block:
+ return {"block", "end_block"};
+ case Loop:
+ return {"loop", "end_loop"};
+ case Try:
+ return {"try", "end_try/delegate"};
+ case CatchAll:
+ return {"catch_all", "end_try"};
+ case If:
+ return {"if", "end_if"};
+ case Else:
+ return {"else", "end_if"};
+ default:
+ llvm_unreachable("unknown NestingType");
+ }
+ }
+
+ void push(NestingType NT, wasm::WasmSignature Sig = wasm::WasmSignature()) {
+ NestingStack.push_back({NT, Sig});
+ }
+
+ bool pop(StringRef Ins, NestingType NT1, NestingType NT2 = Undefined) {
+ if (NestingStack.empty())
+ return error(Twine("End of block construct with no start: ") + Ins);
+ auto Top = NestingStack.back();
+ if (Top.NT != NT1 && Top.NT != NT2)
+ return error(Twine("Block construct type mismatch, expected: ") +
+ nestingString(Top.NT).second + ", instead got: " + Ins);
+ TC.setLastSig(Top.Sig);
+ NestingStack.pop_back();
+ return false;
+ }
+
+ // Pop a NestingType and push a new NestingType with the same signature. Used
+ // for if-else and try-catch(_all).
+ bool popAndPushWithSameSignature(StringRef Ins, NestingType PopNT,
+ NestingType PushNT) {
+ if (NestingStack.empty())
+ return error(Twine("End of block construct with no start: ") + Ins);
+ auto Sig = NestingStack.back().Sig;
+ if (pop(Ins, PopNT))
+ return true;
+ push(PushNT, Sig);
+ return false;
+ }
+
+ bool ensureEmptyNestingStack(SMLoc Loc = SMLoc()) {
+ auto Err = !NestingStack.empty();
+ while (!NestingStack.empty()) {
+ error(Twine("Unmatched block construct(s) at function end: ") +
+ nestingString(NestingStack.back().NT).first,
+ Loc);
+ NestingStack.pop_back();
+ }
+ return Err;
+ }
+
+ bool isNext(AsmToken::TokenKind Kind) {
+ auto Ok = Lexer.is(Kind);
+ if (Ok)
+ Parser.Lex();
+ return Ok;
+ }
+
+ bool expect(AsmToken::TokenKind Kind, const char *KindName) {
+ if (!isNext(Kind))
+ return error(std::string("Expected ") + KindName + ", instead got: ",
+ Lexer.getTok());
+ return false;
+ }
+
+ StringRef expectIdent() {
+ if (!Lexer.is(AsmToken::Identifier)) {
+ error("Expected identifier, got: ", Lexer.getTok());
+ return StringRef();
+ }
+ auto Name = Lexer.getTok().getString();
+ Parser.Lex();
+ return Name;
+ }
+
+ bool parseRegTypeList(SmallVectorImpl<wasm::ValType> &Types) {
+ while (Lexer.is(AsmToken::Identifier)) {
+ auto Type = WebAssembly::parseType(Lexer.getTok().getString());
+ if (!Type)
+ return error("unknown type: ", Lexer.getTok());
+ Types.push_back(*Type);
+ Parser.Lex();
+ if (!isNext(AsmToken::Comma))
+ break;
+ }
+ return false;
+ }
+
+ void parseSingleInteger(bool IsNegative, OperandVector &Operands) {
+ auto &Int = Lexer.getTok();
+ int64_t Val = Int.getIntVal();
+ if (IsNegative)
+ Val = -Val;
+ Operands.push_back(std::make_unique<WebAssemblyOperand>(
+ WebAssemblyOperand::Integer, Int.getLoc(), Int.getEndLoc(),
+ WebAssemblyOperand::IntOp{Val}));
+ Parser.Lex();
+ }
+
+ bool parseSingleFloat(bool IsNegative, OperandVector &Operands) {
+ auto &Flt = Lexer.getTok();
+ double Val;
+ if (Flt.getString().getAsDouble(Val, false))
+ return error("Cannot parse real: ", Flt);
+ if (IsNegative)
+ Val = -Val;
+ Operands.push_back(std::make_unique<WebAssemblyOperand>(
+ WebAssemblyOperand::Float, Flt.getLoc(), Flt.getEndLoc(),
+ WebAssemblyOperand::FltOp{Val}));
+ Parser.Lex();
+ return false;
+ }
+
+ bool parseSpecialFloatMaybe(bool IsNegative, OperandVector &Operands) {
+ if (Lexer.isNot(AsmToken::Identifier))
+ return true;
+ auto &Flt = Lexer.getTok();
+ auto S = Flt.getString();
+ double Val;
+ if (S.compare_insensitive("infinity") == 0) {
+ Val = std::numeric_limits<double>::infinity();
+ } else if (S.compare_insensitive("nan") == 0) {
+ Val = std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return true;
+ }
+ if (IsNegative)
+ Val = -Val;
+ Operands.push_back(std::make_unique<WebAssemblyOperand>(
+ WebAssemblyOperand::Float, Flt.getLoc(), Flt.getEndLoc(),
+ WebAssemblyOperand::FltOp{Val}));
+ Parser.Lex();
+ return false;
+ }
+
+ bool checkForP2AlignIfLoadStore(OperandVector &Operands, StringRef InstName) {
+ // FIXME: there is probably a cleaner way to do this.
+ auto IsLoadStore = InstName.contains(".load") ||
+ InstName.contains(".store") ||
+ InstName.contains("prefetch");
+ auto IsAtomic = InstName.contains("atomic.");
+ if (IsLoadStore || IsAtomic) {
+ // Parse load/store operands of the form: offset:p2align=align
+ if (IsLoadStore && isNext(AsmToken::Colon)) {
+ auto Id = expectIdent();
+ if (Id != "p2align")
+ return error("Expected p2align, instead got: " + Id);
+ if (expect(AsmToken::Equal, "="))
+ return true;
+ if (!Lexer.is(AsmToken::Integer))
+ return error("Expected integer constant");
+ parseSingleInteger(false, Operands);
+ } else {
+ // v128.{load,store}{8,16,32,64}_lane has both a memarg and a lane
+ // index. We need to avoid parsing an extra alignment operand for the
+ // lane index.
+ auto IsLoadStoreLane = InstName.contains("_lane");
+ if (IsLoadStoreLane && Operands.size() == 4)
+ return false;
+ // Alignment not specified (or atomics, must use default alignment).
+ // We can't just call WebAssembly::GetDefaultP2Align since we don't have
+ // an opcode until after the assembly matcher, so set a default to fix
+ // up later.
+ auto Tok = Lexer.getTok();
+ Operands.push_back(std::make_unique<WebAssemblyOperand>(
+ WebAssemblyOperand::Integer, Tok.getLoc(), Tok.getEndLoc(),
+ WebAssemblyOperand::IntOp{-1}));
+ }
+ }
+ return false;
+ }
+
+ void addBlockTypeOperand(OperandVector &Operands, SMLoc NameLoc,
+ WebAssembly::BlockType BT) {
+ if (BT != WebAssembly::BlockType::Void) {
+ wasm::WasmSignature Sig({static_cast<wasm::ValType>(BT)}, {});
+ TC.setLastSig(Sig);
+ NestingStack.back().Sig = Sig;
+ }
+ Operands.push_back(std::make_unique<WebAssemblyOperand>(
+ WebAssemblyOperand::Integer, NameLoc, NameLoc,
+ WebAssemblyOperand::IntOp{static_cast<int64_t>(BT)}));
+ }
+
+ bool parseLimits(wasm::WasmLimits *Limits) {
+ auto Tok = Lexer.getTok();
+ if (!Tok.is(AsmToken::Integer))
+ return error("Expected integer constant, instead got: ", Tok);
+ int64_t Val = Tok.getIntVal();
+ assert(Val >= 0);
+ Limits->Minimum = Val;
+ Parser.Lex();
+
+ if (isNext(AsmToken::Comma)) {
+ Limits->Flags |= wasm::WASM_LIMITS_FLAG_HAS_MAX;
+ auto Tok = Lexer.getTok();
+ if (!Tok.is(AsmToken::Integer))
+ return error("Expected integer constant, instead got: ", Tok);
+ int64_t Val = Tok.getIntVal();
+ assert(Val >= 0);
+ Limits->Maximum = Val;
+ Parser.Lex();
+ }
+ return false;
+ }
+
+ bool parseFunctionTableOperand(std::unique_ptr<WebAssemblyOperand> *Op) {
+ if (STI->checkFeatures("+reference-types")) {
+ // If the reference-types feature is enabled, there is an explicit table
+ // operand. To allow the same assembly to be compiled with or without
+ // reference types, we allow the operand to be omitted, in which case we
+ // default to __indirect_function_table.
+ auto &Tok = Lexer.getTok();
+ if (Tok.is(AsmToken::Identifier)) {
+ auto *Sym =
+ GetOrCreateFunctionTableSymbol(getContext(), Tok.getString());
+ const auto *Val = MCSymbolRefExpr::create(Sym, getContext());
+ *Op = std::make_unique<WebAssemblyOperand>(
+ WebAssemblyOperand::Symbol, Tok.getLoc(), Tok.getEndLoc(),
+ WebAssemblyOperand::SymOp{Val});
+ Parser.Lex();
+ return expect(AsmToken::Comma, ",");
+ } else {
+ const auto *Val =
+ MCSymbolRefExpr::create(DefaultFunctionTable, getContext());
+ *Op = std::make_unique<WebAssemblyOperand>(
+ WebAssemblyOperand::Symbol, SMLoc(), SMLoc(),
+ WebAssemblyOperand::SymOp{Val});
+ return false;
+ }
+ } else {
+ // For the MVP there is at most one table whose number is 0, but we can't
+ // write a table symbol or issue relocations. Instead we just ensure the
+ // table is live and write a zero.
+ getStreamer().emitSymbolAttribute(DefaultFunctionTable, MCSA_NoDeadStrip);
+ *Op = std::make_unique<WebAssemblyOperand>(WebAssemblyOperand::Integer,
+ SMLoc(), SMLoc(),
+ WebAssemblyOperand::IntOp{0});
+ return false;
+ }
+ }
+
+ bool ParseInstruction(ParseInstructionInfo & /*Info*/, StringRef Name,
+ SMLoc NameLoc, OperandVector &Operands) override {
+ // Note: Name does NOT point into the sourcecode, but to a local, so
+ // use NameLoc instead.
+ Name = StringRef(NameLoc.getPointer(), Name.size());
+
+ // WebAssembly has instructions with / in them, which AsmLexer parses
+ // as separate tokens, so if we find such tokens immediately adjacent (no
+ // whitespace), expand the name to include them:
+ for (;;) {
+ auto &Sep = Lexer.getTok();
+ if (Sep.getLoc().getPointer() != Name.end() ||
+ Sep.getKind() != AsmToken::Slash)
+ break;
+ // Extend name with /
+ Name = StringRef(Name.begin(), Name.size() + Sep.getString().size());
+ Parser.Lex();
+ // We must now find another identifier, or error.
+ auto &Id = Lexer.getTok();
+ if (Id.getKind() != AsmToken::Identifier ||
+ Id.getLoc().getPointer() != Name.end())
+ return error("Incomplete instruction name: ", Id);
+ Name = StringRef(Name.begin(), Name.size() + Id.getString().size());
+ Parser.Lex();
+ }
+
+ // Now construct the name as first operand.
+ Operands.push_back(std::make_unique<WebAssemblyOperand>(
+ WebAssemblyOperand::Token, NameLoc, SMLoc::getFromPointer(Name.end()),
+ WebAssemblyOperand::TokOp{Name}));
+
+ // If this instruction is part of a control flow structure, ensure
+ // proper nesting.
+ bool ExpectBlockType = false;
+ bool ExpectFuncType = false;
+ std::unique_ptr<WebAssemblyOperand> FunctionTable;
+ if (Name == "block") {
+ push(Block);
+ ExpectBlockType = true;
+ } else if (Name == "loop") {
+ push(Loop);
+ ExpectBlockType = true;
+ } else if (Name == "try") {
+ push(Try);
+ ExpectBlockType = true;
+ } else if (Name == "if") {
+ push(If);
+ ExpectBlockType = true;
+ } else if (Name == "else") {
+ if (popAndPushWithSameSignature(Name, If, Else))
+ return true;
+ } else if (Name == "catch") {
+ if (popAndPushWithSameSignature(Name, Try, Try))
+ return true;
+ } else if (Name == "catch_all") {
+ if (popAndPushWithSameSignature(Name, Try, CatchAll))
+ return true;
+ } else if (Name == "end_if") {
+ if (pop(Name, If, Else))
+ return true;
+ } else if (Name == "end_try") {
+ if (pop(Name, Try, CatchAll))
+ return true;
+ } else if (Name == "delegate") {
+ if (pop(Name, Try))
+ return true;
+ } else if (Name == "end_loop") {
+ if (pop(Name, Loop))
+ return true;
+ } else if (Name == "end_block") {
+ if (pop(Name, Block))
+ return true;
+ } else if (Name == "end_function") {
+ ensureLocals(getStreamer());
+ CurrentState = EndFunction;
+ if (pop(Name, Function) || ensureEmptyNestingStack())
+ return true;
+ } else if (Name == "call_indirect" || Name == "return_call_indirect") {
+ // These instructions have differing operand orders in the text format vs
+ // the binary formats. The MC instructions follow the binary format, so
+ // here we stash away the operand and append it later.
+ if (parseFunctionTableOperand(&FunctionTable))
+ return true;
+ ExpectFuncType = true;
+ }
+
+ if (ExpectFuncType || (ExpectBlockType && Lexer.is(AsmToken::LParen))) {
+ // This has a special TYPEINDEX operand which in text we
+ // represent as a signature, such that we can re-build this signature,
+ // attach it to an anonymous symbol, which is what WasmObjectWriter
+ // expects to be able to recreate the actual unique-ified type indices.
+ auto Loc = Parser.getTok();
+ auto Signature = std::make_unique<wasm::WasmSignature>();
+ if (parseSignature(Signature.get()))
+ return true;
+ // Got signature as block type, don't need more
+ TC.setLastSig(*Signature.get());
+ if (ExpectBlockType)
+ NestingStack.back().Sig = *Signature.get();
+ ExpectBlockType = false;
+ auto &Ctx = getContext();
+ // The "true" here will cause this to be a nameless symbol.
+ MCSymbol *Sym = Ctx.createTempSymbol("typeindex", true);
+ auto *WasmSym = cast<MCSymbolWasm>(Sym);
+ WasmSym->setSignature(Signature.get());
+ addSignature(std::move(Signature));
+ WasmSym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION);
+ const MCExpr *Expr = MCSymbolRefExpr::create(
+ WasmSym, MCSymbolRefExpr::VK_WASM_TYPEINDEX, Ctx);
+ Operands.push_back(std::make_unique<WebAssemblyOperand>(
+ WebAssemblyOperand::Symbol, Loc.getLoc(), Loc.getEndLoc(),
+ WebAssemblyOperand::SymOp{Expr}));
+ }
+
+ while (Lexer.isNot(AsmToken::EndOfStatement)) {
+ auto &Tok = Lexer.getTok();
+ switch (Tok.getKind()) {
+ case AsmToken::Identifier: {
+ if (!parseSpecialFloatMaybe(false, Operands))
+ break;
+ auto &Id = Lexer.getTok();
+ if (ExpectBlockType) {
+ // Assume this identifier is a block_type.
+ auto BT = WebAssembly::parseBlockType(Id.getString());
+ if (BT == WebAssembly::BlockType::Invalid)
+ return error("Unknown block type: ", Id);
+ addBlockTypeOperand(Operands, NameLoc, BT);
+ Parser.Lex();
+ } else {
+ // Assume this identifier is a label.
+ const MCExpr *Val;
+ SMLoc Start = Id.getLoc();
+ SMLoc End;
+ if (Parser.parseExpression(Val, End))
+ return error("Cannot parse symbol: ", Lexer.getTok());
+ Operands.push_back(std::make_unique<WebAssemblyOperand>(
+ WebAssemblyOperand::Symbol, Start, End,
+ WebAssemblyOperand::SymOp{Val}));
+ if (checkForP2AlignIfLoadStore(Operands, Name))
+ return true;
+ }
+ break;
+ }
+ case AsmToken::Minus:
+ Parser.Lex();
+ if (Lexer.is(AsmToken::Integer)) {
+ parseSingleInteger(true, Operands);
+ if (checkForP2AlignIfLoadStore(Operands, Name))
+ return true;
+ } else if (Lexer.is(AsmToken::Real)) {
+ if (parseSingleFloat(true, Operands))
+ return true;
+ } else if (!parseSpecialFloatMaybe(true, Operands)) {
+ } else {
+ return error("Expected numeric constant instead got: ",
+ Lexer.getTok());
+ }
+ break;
+ case AsmToken::Integer:
+ parseSingleInteger(false, Operands);
+ if (checkForP2AlignIfLoadStore(Operands, Name))
+ return true;
+ break;
+ case AsmToken::Real: {
+ if (parseSingleFloat(false, Operands))
+ return true;
+ break;
+ }
+ case AsmToken::LCurly: {
+ Parser.Lex();
+ auto Op = std::make_unique<WebAssemblyOperand>(
+ WebAssemblyOperand::BrList, Tok.getLoc(), Tok.getEndLoc());
+ if (!Lexer.is(AsmToken::RCurly))
+ for (;;) {
+ Op->BrL.List.push_back(Lexer.getTok().getIntVal());
+ expect(AsmToken::Integer, "integer");
+ if (!isNext(AsmToken::Comma))
+ break;
+ }
+ expect(AsmToken::RCurly, "}");
+ Operands.push_back(std::move(Op));
+ break;
+ }
+ default:
+ return error("Unexpected token in operand: ", Tok);
+ }
+ if (Lexer.isNot(AsmToken::EndOfStatement)) {
+ if (expect(AsmToken::Comma, ","))
+ return true;
+ }
+ }
+ if (ExpectBlockType && Operands.size() == 1) {
+ // Support blocks with no operands as default to void.
+ addBlockTypeOperand(Operands, NameLoc, WebAssembly::BlockType::Void);
+ }
+ if (FunctionTable)
+ Operands.push_back(std::move(FunctionTable));
+ Parser.Lex();
+ return false;
+ }
+
+ bool parseSignature(wasm::WasmSignature *Signature) {
+ if (expect(AsmToken::LParen, "("))
+ return true;
+ if (parseRegTypeList(Signature->Params))
+ return true;
+ if (expect(AsmToken::RParen, ")"))
+ return true;
+ if (expect(AsmToken::MinusGreater, "->"))
+ return true;
+ if (expect(AsmToken::LParen, "("))
+ return true;
+ if (parseRegTypeList(Signature->Returns))
+ return true;
+ if (expect(AsmToken::RParen, ")"))
+ return true;
+ return false;
+ }
+
+ bool CheckDataSection() {
+ if (CurrentState != DataSection) {
+ auto WS = cast<MCSectionWasm>(getStreamer().getCurrentSection().first);
+ if (WS && WS->getKind().isText())
+ return error("data directive must occur in a data segment: ",
+ Lexer.getTok());
+ }
+ CurrentState = DataSection;
+ return false;
+ }
+
+ // This function processes wasm-specific directives streamed to
+ // WebAssemblyTargetStreamer, all others go to the generic parser
+ // (see WasmAsmParser).
+ ParseStatus parseDirective(AsmToken DirectiveID) override {
+ assert(DirectiveID.getKind() == AsmToken::Identifier);
+ auto &Out = getStreamer();
+ auto &TOut =
+ reinterpret_cast<WebAssemblyTargetStreamer &>(*Out.getTargetStreamer());
+ auto &Ctx = Out.getContext();
+
+ if (DirectiveID.getString() == ".globaltype") {
+ auto SymName = expectIdent();
+ if (SymName.empty())
+ return ParseStatus::Failure;
+ if (expect(AsmToken::Comma, ","))
+ return ParseStatus::Failure;
+ auto TypeTok = Lexer.getTok();
+ auto TypeName = expectIdent();
+ if (TypeName.empty())
+ return ParseStatus::Failure;
+ auto Type = WebAssembly::parseType(TypeName);
+ if (!Type)
+ return error("Unknown type in .globaltype directive: ", TypeTok);
+ // Optional mutable modifier. Default to mutable for historical reasons.
+ // Ideally we would have gone with immutable as the default and used `mut`
+ // as the modifier to match the `.wat` format.
+ bool Mutable = true;
+ if (isNext(AsmToken::Comma)) {
+ TypeTok = Lexer.getTok();
+ auto Id = expectIdent();
+ if (Id.empty())
+ return ParseStatus::Failure;
+ if (Id == "immutable")
+ Mutable = false;
+ else
+ // Should we also allow `mutable` and `mut` here for clarity?
+ return error("Unknown type in .globaltype modifier: ", TypeTok);
+ }
+ // Now set this symbol with the correct type.
+ auto WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName));
+ WasmSym->setType(wasm::WASM_SYMBOL_TYPE_GLOBAL);
+ WasmSym->setGlobalType(wasm::WasmGlobalType{uint8_t(*Type), Mutable});
+ // And emit the directive again.
+ TOut.emitGlobalType(WasmSym);
+ return expect(AsmToken::EndOfStatement, "EOL");
+ }
+
+ if (DirectiveID.getString() == ".tabletype") {
+ // .tabletype SYM, ELEMTYPE[, MINSIZE[, MAXSIZE]]
+ auto SymName = expectIdent();
+ if (SymName.empty())
+ return ParseStatus::Failure;
+ if (expect(AsmToken::Comma, ","))
+ return ParseStatus::Failure;
+
+ auto ElemTypeTok = Lexer.getTok();
+ auto ElemTypeName = expectIdent();
+ if (ElemTypeName.empty())
+ return ParseStatus::Failure;
+ std::optional<wasm::ValType> ElemType =
+ WebAssembly::parseType(ElemTypeName);
+ if (!ElemType)
+ return error("Unknown type in .tabletype directive: ", ElemTypeTok);
+
+ wasm::WasmLimits Limits = DefaultLimits();
+ if (isNext(AsmToken::Comma) && parseLimits(&Limits))
+ return ParseStatus::Failure;
+
+ // Now that we have the name and table type, we can actually create the
+ // symbol
+ auto WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName));
+ WasmSym->setType(wasm::WASM_SYMBOL_TYPE_TABLE);
+ wasm::WasmTableType Type = {uint8_t(*ElemType), Limits};
+ WasmSym->setTableType(Type);
+ TOut.emitTableType(WasmSym);
+ return expect(AsmToken::EndOfStatement, "EOL");
+ }
+
+ if (DirectiveID.getString() == ".functype") {
+ // This code has to send things to the streamer similar to
+ // WebAssemblyAsmPrinter::EmitFunctionBodyStart.
+ // TODO: would be good to factor this into a common function, but the
+ // assembler and backend really don't share any common code, and this code
+ // parses the locals separately.
+ auto SymName = expectIdent();
+ if (SymName.empty())
+ return ParseStatus::Failure;
+ auto WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName));
+ if (WasmSym->isDefined()) {
+ // We push 'Function' either when a label is parsed or a .functype
+ // directive is parsed. The reason it is not easy to do this uniformly
+ // in a single place is,
+ // 1. We can't do this at label parsing time only because there are
+ // cases we don't have .functype directive before a function label,
+ // in which case we don't know if the label is a function at the time
+ // of parsing.
+ // 2. We can't do this at .functype parsing time only because we want to
+ // detect a function started with a label and not ended correctly
+ // without encountering a .functype directive after the label.
+ if (CurrentState != FunctionLabel) {
+ // This .functype indicates a start of a function.
+ if (ensureEmptyNestingStack())
+ return ParseStatus::Failure;
+ push(Function);
+ }
+ CurrentState = FunctionStart;
+ LastFunctionLabel = WasmSym;
+ }
+ auto Signature = std::make_unique<wasm::WasmSignature>();
+ if (parseSignature(Signature.get()))
+ return ParseStatus::Failure;
+ TC.funcDecl(*Signature);
+ WasmSym->setSignature(Signature.get());
+ addSignature(std::move(Signature));
+ WasmSym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION);
+ TOut.emitFunctionType(WasmSym);
+ // TODO: backend also calls TOut.emitIndIdx, but that is not implemented.
+ return expect(AsmToken::EndOfStatement, "EOL");
+ }
+
+ if (DirectiveID.getString() == ".export_name") {
+ auto SymName = expectIdent();
+ if (SymName.empty())
+ return ParseStatus::Failure;
+ if (expect(AsmToken::Comma, ","))
+ return ParseStatus::Failure;
+ auto ExportName = expectIdent();
+ if (ExportName.empty())
+ return ParseStatus::Failure;
+ auto WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName));
+ WasmSym->setExportName(storeName(ExportName));
+ TOut.emitExportName(WasmSym, ExportName);
+ return expect(AsmToken::EndOfStatement, "EOL");
+ }
+
+ if (DirectiveID.getString() == ".import_module") {
+ auto SymName = expectIdent();
+ if (SymName.empty())
+ return ParseStatus::Failure;
+ if (expect(AsmToken::Comma, ","))
+ return ParseStatus::Failure;
+ auto ImportModule = expectIdent();
+ if (ImportModule.empty())
+ return ParseStatus::Failure;
+ auto WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName));
+ WasmSym->setImportModule(storeName(ImportModule));
+ TOut.emitImportModule(WasmSym, ImportModule);
+ return expect(AsmToken::EndOfStatement, "EOL");
+ }
+
+ if (DirectiveID.getString() == ".import_name") {
+ auto SymName = expectIdent();
+ if (SymName.empty())
+ return ParseStatus::Failure;
+ if (expect(AsmToken::Comma, ","))
+ return ParseStatus::Failure;
+ auto ImportName = expectIdent();
+ if (ImportName.empty())
+ return ParseStatus::Failure;
+ auto WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName));
+ WasmSym->setImportName(storeName(ImportName));
+ TOut.emitImportName(WasmSym, ImportName);
+ return expect(AsmToken::EndOfStatement, "EOL");
+ }
+
+ if (DirectiveID.getString() == ".tagtype") {
+ auto SymName = expectIdent();
+ if (SymName.empty())
+ return ParseStatus::Failure;
+ auto WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName));
+ auto Signature = std::make_unique<wasm::WasmSignature>();
+ if (parseRegTypeList(Signature->Params))
+ return ParseStatus::Failure;
+ WasmSym->setSignature(Signature.get());
+ addSignature(std::move(Signature));
+ WasmSym->setType(wasm::WASM_SYMBOL_TYPE_TAG);
+ TOut.emitTagType(WasmSym);
+ // TODO: backend also calls TOut.emitIndIdx, but that is not implemented.
+ return expect(AsmToken::EndOfStatement, "EOL");
+ }
+
+ if (DirectiveID.getString() == ".local") {
+ if (CurrentState != FunctionStart)
+ return error(".local directive should follow the start of a function: ",
+ Lexer.getTok());
+ SmallVector<wasm::ValType, 4> Locals;
+ if (parseRegTypeList(Locals))
+ return ParseStatus::Failure;
+ TC.localDecl(Locals);
+ TOut.emitLocal(Locals);
+ CurrentState = FunctionLocals;
+ return expect(AsmToken::EndOfStatement, "EOL");
+ }
+
+ if (DirectiveID.getString() == ".int8" ||
+ DirectiveID.getString() == ".int16" ||
+ DirectiveID.getString() == ".int32" ||
+ DirectiveID.getString() == ".int64") {
+ if (CheckDataSection())
+ return ParseStatus::Failure;
+ const MCExpr *Val;
+ SMLoc End;
+ if (Parser.parseExpression(Val, End))
+ return error("Cannot parse .int expression: ", Lexer.getTok());
+ size_t NumBits = 0;
+ DirectiveID.getString().drop_front(4).getAsInteger(10, NumBits);
+ Out.emitValue(Val, NumBits / 8, End);
+ return expect(AsmToken::EndOfStatement, "EOL");
+ }
+
+ if (DirectiveID.getString() == ".asciz") {
+ if (CheckDataSection())
+ return ParseStatus::Failure;
+ std::string S;
+ if (Parser.parseEscapedString(S))
+ return error("Cannot parse string constant: ", Lexer.getTok());
+ Out.emitBytes(StringRef(S.c_str(), S.length() + 1));
+ return expect(AsmToken::EndOfStatement, "EOL");
+ }
+
+ return ParseStatus::NoMatch; // We didn't process this directive.
+ }
+
+ // Called either when the first instruction is parsed of the function ends.
+ void ensureLocals(MCStreamer &Out) {
+ if (CurrentState == FunctionStart) {
+ // We haven't seen a .local directive yet. The streamer requires locals to
+ // be encoded as a prelude to the instructions, so emit an empty list of
+ // locals here.
+ auto &TOut = reinterpret_cast<WebAssemblyTargetStreamer &>(
+ *Out.getTargetStreamer());
+ TOut.emitLocal(SmallVector<wasm::ValType, 0>());
+ CurrentState = FunctionLocals;
+ }
+ }
+
+ bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned & /*Opcode*/,
+ OperandVector &Operands, MCStreamer &Out,
+ uint64_t &ErrorInfo,
+ bool MatchingInlineAsm) override {
+ MCInst Inst;
+ Inst.setLoc(IDLoc);
+ FeatureBitset MissingFeatures;
+ unsigned MatchResult = MatchInstructionImpl(
+ Operands, Inst, ErrorInfo, MissingFeatures, MatchingInlineAsm);
+ switch (MatchResult) {
+ case Match_Success: {
+ ensureLocals(Out);
+ // Fix unknown p2align operands.
+ auto Align = WebAssembly::GetDefaultP2AlignAny(Inst.getOpcode());
+ if (Align != -1U) {
+ auto &Op0 = Inst.getOperand(0);
+ if (Op0.getImm() == -1)
+ Op0.setImm(Align);
+ }
+ if (is64) {
+ // Upgrade 32-bit loads/stores to 64-bit. These mostly differ by having
+ // an offset64 arg instead of offset32, but to the assembler matcher
+ // they're both immediates so don't get selected for.
+ auto Opc64 = WebAssembly::getWasm64Opcode(
+ static_cast<uint16_t>(Inst.getOpcode()));
+ if (Opc64 >= 0) {
+ Inst.setOpcode(Opc64);
+ }
+ }
+ if (!SkipTypeCheck && TC.typeCheck(IDLoc, Inst, Operands))
+ return true;
+ Out.emitInstruction(Inst, getSTI());
+ if (CurrentState == EndFunction) {
+ onEndOfFunction(IDLoc);
+ } else {
+ CurrentState = Instructions;
+ }
+ return false;
+ }
+ case Match_MissingFeature: {
+ assert(MissingFeatures.count() > 0 && "Expected missing features");
+ SmallString<128> Message;
+ raw_svector_ostream OS(Message);
+ OS << "instruction requires:";
+ for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i)
+ if (MissingFeatures.test(i))
+ OS << ' ' << getSubtargetFeatureName(i);
+ return Parser.Error(IDLoc, Message);
+ }
+ case Match_MnemonicFail:
+ return Parser.Error(IDLoc, "invalid instruction");
+ case Match_NearMisses:
+ return Parser.Error(IDLoc, "ambiguous instruction");
+ case Match_InvalidTiedOperand:
+ case Match_InvalidOperand: {
+ SMLoc ErrorLoc = IDLoc;
+ if (ErrorInfo != ~0ULL) {
+ if (ErrorInfo >= Operands.size())
+ return Parser.Error(IDLoc, "too few operands for instruction");
+ ErrorLoc = Operands[ErrorInfo]->getStartLoc();
+ if (ErrorLoc == SMLoc())
+ ErrorLoc = IDLoc;
+ }
+ return Parser.Error(ErrorLoc, "invalid operand for instruction");
+ }
+ }
+ llvm_unreachable("Implement any new match types added!");
+ }
+
+ void doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) override {
+ // Code below only applies to labels in text sections.
+ auto CWS = cast<MCSectionWasm>(getStreamer().getCurrentSection().first);
+ if (!CWS || !CWS->getKind().isText())
+ return;
+
+ auto WasmSym = cast<MCSymbolWasm>(Symbol);
+ // Unlike other targets, we don't allow data in text sections (labels
+ // declared with .type @object).
+ if (WasmSym->getType() == wasm::WASM_SYMBOL_TYPE_DATA) {
+ Parser.Error(IDLoc,
+ "Wasm doesn\'t support data symbols in text sections");
+ return;
+ }
+
+ // Start a new section for the next function automatically, since our
+ // object writer expects each function to have its own section. This way
+ // The user can't forget this "convention".
+ auto SymName = Symbol->getName();
+ if (SymName.starts_with(".L"))
+ return; // Local Symbol.
+
+ // TODO: If the user explicitly creates a new function section, we ignore
+ // its name when we create this one. It would be nice to honor their
+ // choice, while still ensuring that we create one if they forget.
+ // (that requires coordination with WasmAsmParser::parseSectionDirective)
+ auto SecName = ".text." + SymName;
+
+ auto *Group = CWS->getGroup();
+ // If the current section is a COMDAT, also set the flag on the symbol.
+ // TODO: Currently the only place that the symbols' comdat flag matters is
+ // for importing comdat functions. But there's no way to specify that in
+ // assembly currently.
+ if (Group)
+ WasmSym->setComdat(true);
+ auto *WS =
+ getContext().getWasmSection(SecName, SectionKind::getText(), 0, Group,
+ MCContext::GenericSectionID, nullptr);
+ getStreamer().switchSection(WS);
+ // Also generate DWARF for this section if requested.
+ if (getContext().getGenDwarfForAssembly())
+ getContext().addGenDwarfSection(WS);
+
+ if (WasmSym->isFunction()) {
+ // We give the location of the label (IDLoc) here, because otherwise the
+ // lexer's next location will be used, which can be confusing. For
+ // example:
+ //
+ // test0: ; This function does not end properly
+ // ...
+ //
+ // test1: ; We would like to point to this line for error
+ // ... . Not this line, which can contain any instruction
+ ensureEmptyNestingStack(IDLoc);
+ CurrentState = FunctionLabel;
+ LastFunctionLabel = Symbol;
+ push(Function);
+ }
+ }
+
+ void onEndOfFunction(SMLoc ErrorLoc) {
+ if (!SkipTypeCheck)
+ TC.endOfFunction(ErrorLoc);
+ // Reset the type checker state.
+ TC.Clear();
+ }
+
+ void onEndOfFile() override { ensureEmptyNestingStack(); }
+};
+} // end anonymous namespace
+
+// Force static initialization.
+extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeWebAssemblyAsmParser() {
+ RegisterMCAsmParser<WebAssemblyAsmParser> X(getTheWebAssemblyTarget32());
+ RegisterMCAsmParser<WebAssemblyAsmParser> Y(getTheWebAssemblyTarget64());
+}
+
+#define GET_REGISTER_MATCHER
+#define GET_SUBTARGET_FEATURE_NAME
+#define GET_MATCHER_IMPLEMENTATION
+#include "WebAssemblyGenAsmMatcher.inc"
+
+StringRef GetMnemonic(unsigned Opc) {
+ // FIXME: linear search!
+ for (auto &ME : MatchTable0) {
+ if (ME.Opcode == Opc) {
+ return ME.getMnemonic();
+ }
+ }
+ assert(false && "mnemonic not found");
+ return StringRef();
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp
new file mode 100644
index 000000000000..69466667e45a
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp
@@ -0,0 +1,420 @@
+//==- WebAssemblyAsmTypeCheck.cpp - Assembler for WebAssembly -*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is part of the WebAssembly Assembler.
+///
+/// It contains code to translate a parsed .s file into MCInsts.
+///
+//===----------------------------------------------------------------------===//
+
+#include "AsmParser/WebAssemblyAsmTypeCheck.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "MCTargetDesc/WebAssemblyMCTypeUtilities.h"
+#include "MCTargetDesc/WebAssemblyTargetStreamer.h"
+#include "TargetInfo/WebAssemblyTargetInfo.h"
+#include "WebAssembly.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/MC/MCParser/MCTargetAsmParser.h"
+#include "llvm/MC/MCSectionWasm.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCSymbolWasm.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/SourceMgr.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-asm-parser"
+
+extern StringRef GetMnemonic(unsigned Opc);
+
+namespace llvm {
+
+WebAssemblyAsmTypeCheck::WebAssemblyAsmTypeCheck(MCAsmParser &Parser,
+ const MCInstrInfo &MII,
+ bool is64)
+ : Parser(Parser), MII(MII), is64(is64) {}
+
+void WebAssemblyAsmTypeCheck::funcDecl(const wasm::WasmSignature &Sig) {
+ LocalTypes.assign(Sig.Params.begin(), Sig.Params.end());
+ ReturnTypes.assign(Sig.Returns.begin(), Sig.Returns.end());
+ BrStack.emplace_back(Sig.Returns.begin(), Sig.Returns.end());
+}
+
+void WebAssemblyAsmTypeCheck::localDecl(
+ const SmallVectorImpl<wasm::ValType> &Locals) {
+ LocalTypes.insert(LocalTypes.end(), Locals.begin(), Locals.end());
+}
+
+void WebAssemblyAsmTypeCheck::dumpTypeStack(Twine Msg) {
+ LLVM_DEBUG({
+ std::string s;
+ for (auto VT : Stack) {
+ s += WebAssembly::typeToString(VT);
+ s += " ";
+ }
+ dbgs() << Msg << s << '\n';
+ });
+}
+
+bool WebAssemblyAsmTypeCheck::typeError(SMLoc ErrorLoc, const Twine &Msg) {
+ // Once you get one type error in a function, it will likely trigger more
+ // which are mostly not helpful.
+ if (TypeErrorThisFunction)
+ return true;
+ // If we're currently in unreachable code, we suppress errors completely.
+ if (Unreachable)
+ return false;
+ TypeErrorThisFunction = true;
+ dumpTypeStack("current stack: ");
+ return Parser.Error(ErrorLoc, Msg);
+}
+
+bool WebAssemblyAsmTypeCheck::popType(SMLoc ErrorLoc,
+ std::optional<wasm::ValType> EVT) {
+ if (Stack.empty()) {
+ return typeError(ErrorLoc,
+ EVT ? StringRef("empty stack while popping ") +
+ WebAssembly::typeToString(*EVT)
+ : StringRef("empty stack while popping value"));
+ }
+ auto PVT = Stack.pop_back_val();
+ if (EVT && *EVT != PVT) {
+ return typeError(ErrorLoc,
+ StringRef("popped ") + WebAssembly::typeToString(PVT) +
+ ", expected " + WebAssembly::typeToString(*EVT));
+ }
+ return false;
+}
+
+bool WebAssemblyAsmTypeCheck::popRefType(SMLoc ErrorLoc) {
+ if (Stack.empty()) {
+ return typeError(ErrorLoc, StringRef("empty stack while popping reftype"));
+ }
+ auto PVT = Stack.pop_back_val();
+ if (!WebAssembly::isRefType(PVT)) {
+ return typeError(ErrorLoc, StringRef("popped ") +
+ WebAssembly::typeToString(PVT) +
+ ", expected reftype");
+ }
+ return false;
+}
+
+bool WebAssemblyAsmTypeCheck::getLocal(SMLoc ErrorLoc, const MCInst &Inst,
+ wasm::ValType &Type) {
+ auto Local = static_cast<size_t>(Inst.getOperand(0).getImm());
+ if (Local >= LocalTypes.size())
+ return typeError(ErrorLoc, StringRef("no local type specified for index ") +
+ std::to_string(Local));
+ Type = LocalTypes[Local];
+ return false;
+}
+
+static std::optional<std::string>
+checkStackTop(const SmallVectorImpl<wasm::ValType> &ExpectedStackTop,
+ const SmallVectorImpl<wasm::ValType> &Got) {
+ for (size_t I = 0; I < ExpectedStackTop.size(); I++) {
+ auto EVT = ExpectedStackTop[I];
+ auto PVT = Got[Got.size() - ExpectedStackTop.size() + I];
+ if (PVT != EVT)
+ return std::string{"got "} + WebAssembly::typeToString(PVT) +
+ ", expected " + WebAssembly::typeToString(EVT);
+ }
+ return std::nullopt;
+}
+
+bool WebAssemblyAsmTypeCheck::checkBr(SMLoc ErrorLoc, size_t Level) {
+ if (Level >= BrStack.size())
+ return typeError(ErrorLoc,
+ StringRef("br: invalid depth ") + std::to_string(Level));
+ const SmallVector<wasm::ValType, 4> &Expected =
+ BrStack[BrStack.size() - Level - 1];
+ if (Expected.size() > Stack.size())
+ return typeError(ErrorLoc, "br: insufficient values on the type stack");
+ auto IsStackTopInvalid = checkStackTop(Expected, Stack);
+ if (IsStackTopInvalid)
+ return typeError(ErrorLoc, "br " + IsStackTopInvalid.value());
+ return false;
+}
+
+bool WebAssemblyAsmTypeCheck::checkEnd(SMLoc ErrorLoc, bool PopVals) {
+ if (!PopVals)
+ BrStack.pop_back();
+ if (LastSig.Returns.size() > Stack.size())
+ return typeError(ErrorLoc, "end: insufficient values on the type stack");
+
+ if (PopVals) {
+ for (auto VT : llvm::reverse(LastSig.Returns)) {
+ if (popType(ErrorLoc, VT))
+ return true;
+ }
+ return false;
+ }
+
+ auto IsStackTopInvalid = checkStackTop(LastSig.Returns, Stack);
+ if (IsStackTopInvalid)
+ return typeError(ErrorLoc, "end " + IsStackTopInvalid.value());
+ return false;
+}
+
+bool WebAssemblyAsmTypeCheck::checkSig(SMLoc ErrorLoc,
+ const wasm::WasmSignature &Sig) {
+ for (auto VT : llvm::reverse(Sig.Params))
+ if (popType(ErrorLoc, VT))
+ return true;
+ Stack.insert(Stack.end(), Sig.Returns.begin(), Sig.Returns.end());
+ return false;
+}
+
+bool WebAssemblyAsmTypeCheck::getSymRef(SMLoc ErrorLoc, const MCInst &Inst,
+ const MCSymbolRefExpr *&SymRef) {
+ auto Op = Inst.getOperand(0);
+ if (!Op.isExpr())
+ return typeError(ErrorLoc, StringRef("expected expression operand"));
+ SymRef = dyn_cast<MCSymbolRefExpr>(Op.getExpr());
+ if (!SymRef)
+ return typeError(ErrorLoc, StringRef("expected symbol operand"));
+ return false;
+}
+
+bool WebAssemblyAsmTypeCheck::getGlobal(SMLoc ErrorLoc, const MCInst &Inst,
+ wasm::ValType &Type) {
+ const MCSymbolRefExpr *SymRef;
+ if (getSymRef(ErrorLoc, Inst, SymRef))
+ return true;
+ auto WasmSym = cast<MCSymbolWasm>(&SymRef->getSymbol());
+ switch (WasmSym->getType().value_or(wasm::WASM_SYMBOL_TYPE_DATA)) {
+ case wasm::WASM_SYMBOL_TYPE_GLOBAL:
+ Type = static_cast<wasm::ValType>(WasmSym->getGlobalType().Type);
+ break;
+ case wasm::WASM_SYMBOL_TYPE_FUNCTION:
+ case wasm::WASM_SYMBOL_TYPE_DATA:
+ switch (SymRef->getKind()) {
+ case MCSymbolRefExpr::VK_GOT:
+ case MCSymbolRefExpr::VK_WASM_GOT_TLS:
+ Type = is64 ? wasm::ValType::I64 : wasm::ValType::I32;
+ return false;
+ default:
+ break;
+ }
+ [[fallthrough]];
+ default:
+ return typeError(ErrorLoc, StringRef("symbol ") + WasmSym->getName() +
+ " missing .globaltype");
+ }
+ return false;
+}
+
+bool WebAssemblyAsmTypeCheck::getTable(SMLoc ErrorLoc, const MCInst &Inst,
+ wasm::ValType &Type) {
+ const MCSymbolRefExpr *SymRef;
+ if (getSymRef(ErrorLoc, Inst, SymRef))
+ return true;
+ auto WasmSym = cast<MCSymbolWasm>(&SymRef->getSymbol());
+ if (WasmSym->getType().value_or(wasm::WASM_SYMBOL_TYPE_DATA) !=
+ wasm::WASM_SYMBOL_TYPE_TABLE)
+ return typeError(ErrorLoc, StringRef("symbol ") + WasmSym->getName() +
+ " missing .tabletype");
+ Type = static_cast<wasm::ValType>(WasmSym->getTableType().ElemType);
+ return false;
+}
+
+bool WebAssemblyAsmTypeCheck::endOfFunction(SMLoc ErrorLoc) {
+ // Check the return types.
+ for (auto RVT : llvm::reverse(ReturnTypes)) {
+ if (popType(ErrorLoc, RVT))
+ return true;
+ }
+ if (!Stack.empty()) {
+ return typeError(ErrorLoc, std::to_string(Stack.size()) +
+ " superfluous return values");
+ }
+ Unreachable = true;
+ return false;
+}
+
+bool WebAssemblyAsmTypeCheck::typeCheck(SMLoc ErrorLoc, const MCInst &Inst,
+ OperandVector &Operands) {
+ auto Opc = Inst.getOpcode();
+ auto Name = GetMnemonic(Opc);
+ dumpTypeStack("typechecking " + Name + ": ");
+ wasm::ValType Type;
+ if (Name == "local.get") {
+ if (getLocal(Operands[1]->getStartLoc(), Inst, Type))
+ return true;
+ Stack.push_back(Type);
+ } else if (Name == "local.set") {
+ if (getLocal(Operands[1]->getStartLoc(), Inst, Type))
+ return true;
+ if (popType(ErrorLoc, Type))
+ return true;
+ } else if (Name == "local.tee") {
+ if (getLocal(Operands[1]->getStartLoc(), Inst, Type))
+ return true;
+ if (popType(ErrorLoc, Type))
+ return true;
+ Stack.push_back(Type);
+ } else if (Name == "global.get") {
+ if (getGlobal(Operands[1]->getStartLoc(), Inst, Type))
+ return true;
+ Stack.push_back(Type);
+ } else if (Name == "global.set") {
+ if (getGlobal(Operands[1]->getStartLoc(), Inst, Type))
+ return true;
+ if (popType(ErrorLoc, Type))
+ return true;
+ } else if (Name == "table.get") {
+ if (getTable(Operands[1]->getStartLoc(), Inst, Type))
+ return true;
+ if (popType(ErrorLoc, wasm::ValType::I32))
+ return true;
+ Stack.push_back(Type);
+ } else if (Name == "table.set") {
+ if (getTable(Operands[1]->getStartLoc(), Inst, Type))
+ return true;
+ if (popType(ErrorLoc, Type))
+ return true;
+ if (popType(ErrorLoc, wasm::ValType::I32))
+ return true;
+ } else if (Name == "table.fill") {
+ if (getTable(Operands[1]->getStartLoc(), Inst, Type))
+ return true;
+ if (popType(ErrorLoc, wasm::ValType::I32))
+ return true;
+ if (popType(ErrorLoc, Type))
+ return true;
+ if (popType(ErrorLoc, wasm::ValType::I32))
+ return true;
+ } else if (Name == "memory.fill") {
+ Type = is64 ? wasm::ValType::I64 : wasm::ValType::I32;
+ if (popType(ErrorLoc, Type))
+ return true;
+ if (popType(ErrorLoc, wasm::ValType::I32))
+ return true;
+ if (popType(ErrorLoc, Type))
+ return true;
+ } else if (Name == "memory.copy") {
+ Type = is64 ? wasm::ValType::I64 : wasm::ValType::I32;
+ if (popType(ErrorLoc, Type))
+ return true;
+ if (popType(ErrorLoc, Type))
+ return true;
+ if (popType(ErrorLoc, Type))
+ return true;
+ } else if (Name == "memory.init") {
+ Type = is64 ? wasm::ValType::I64 : wasm::ValType::I32;
+ if (popType(ErrorLoc, wasm::ValType::I32))
+ return true;
+ if (popType(ErrorLoc, wasm::ValType::I32))
+ return true;
+ if (popType(ErrorLoc, Type))
+ return true;
+ } else if (Name == "drop") {
+ if (popType(ErrorLoc, {}))
+ return true;
+ } else if (Name == "try" || Name == "block" || Name == "loop" ||
+ Name == "if") {
+ if (Name == "if" && popType(ErrorLoc, wasm::ValType::I32))
+ return true;
+ if (Name == "loop")
+ BrStack.emplace_back(LastSig.Params.begin(), LastSig.Params.end());
+ else
+ BrStack.emplace_back(LastSig.Returns.begin(), LastSig.Returns.end());
+ } else if (Name == "end_block" || Name == "end_loop" || Name == "end_if" ||
+ Name == "else" || Name == "end_try" || Name == "catch" ||
+ Name == "catch_all" || Name == "delegate") {
+ if (checkEnd(ErrorLoc,
+ Name == "else" || Name == "catch" || Name == "catch_all"))
+ return true;
+ Unreachable = false;
+ if (Name == "catch") {
+ const MCSymbolRefExpr *SymRef;
+ if (getSymRef(Operands[1]->getStartLoc(), Inst, SymRef))
+ return true;
+ const auto *WasmSym = cast<MCSymbolWasm>(&SymRef->getSymbol());
+ const auto *Sig = WasmSym->getSignature();
+ if (!Sig || WasmSym->getType() != wasm::WASM_SYMBOL_TYPE_TAG)
+ return typeError(Operands[1]->getStartLoc(), StringRef("symbol ") +
+ WasmSym->getName() +
+ " missing .tagtype");
+ // catch instruction pushes values whose types are specified in the tag's
+ // "params" part
+ Stack.insert(Stack.end(), Sig->Params.begin(), Sig->Params.end());
+ }
+ } else if (Name == "br") {
+ const MCOperand &Operand = Inst.getOperand(0);
+ if (!Operand.isImm())
+ return false;
+ if (checkBr(ErrorLoc, static_cast<size_t>(Operand.getImm())))
+ return true;
+ } else if (Name == "return") {
+ if (endOfFunction(ErrorLoc))
+ return true;
+ } else if (Name == "call_indirect" || Name == "return_call_indirect") {
+ // Function value.
+ if (popType(ErrorLoc, wasm::ValType::I32))
+ return true;
+ if (checkSig(ErrorLoc, LastSig))
+ return true;
+ if (Name == "return_call_indirect" && endOfFunction(ErrorLoc))
+ return true;
+ } else if (Name == "call" || Name == "return_call") {
+ const MCSymbolRefExpr *SymRef;
+ if (getSymRef(Operands[1]->getStartLoc(), Inst, SymRef))
+ return true;
+ auto WasmSym = cast<MCSymbolWasm>(&SymRef->getSymbol());
+ auto Sig = WasmSym->getSignature();
+ if (!Sig || WasmSym->getType() != wasm::WASM_SYMBOL_TYPE_FUNCTION)
+ return typeError(Operands[1]->getStartLoc(), StringRef("symbol ") +
+ WasmSym->getName() +
+ " missing .functype");
+ if (checkSig(ErrorLoc, *Sig))
+ return true;
+ if (Name == "return_call" && endOfFunction(ErrorLoc))
+ return true;
+ } else if (Name == "unreachable") {
+ Unreachable = true;
+ } else if (Name == "ref.is_null") {
+ if (popRefType(ErrorLoc))
+ return true;
+ Stack.push_back(wasm::ValType::I32);
+ } else {
+ // The current instruction is a stack instruction which doesn't have
+ // explicit operands that indicate push/pop types, so we get those from
+ // the register version of the same instruction.
+ auto RegOpc = WebAssembly::getRegisterOpcode(Opc);
+ assert(RegOpc != -1 && "Failed to get register version of MC instruction");
+ const auto &II = MII.get(RegOpc);
+ // First pop all the uses off the stack and check them.
+ for (unsigned I = II.getNumOperands(); I > II.getNumDefs(); I--) {
+ const auto &Op = II.operands()[I - 1];
+ if (Op.OperandType == MCOI::OPERAND_REGISTER) {
+ auto VT = WebAssembly::regClassToValType(Op.RegClass);
+ if (popType(ErrorLoc, VT))
+ return true;
+ }
+ }
+ // Now push all the defs onto the stack.
+ for (unsigned I = 0; I < II.getNumDefs(); I++) {
+ const auto &Op = II.operands()[I];
+ assert(Op.OperandType == MCOI::OPERAND_REGISTER && "Register expected");
+ auto VT = WebAssembly::regClassToValType(Op.RegClass);
+ Stack.push_back(VT);
+ }
+ }
+ return false;
+}
+
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.h
new file mode 100644
index 000000000000..6fa95c392975
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.h
@@ -0,0 +1,75 @@
+//==- WebAssemblyAsmTypeCheck.h - Assembler for WebAssembly -*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is part of the WebAssembly Assembler.
+///
+/// It contains code to translate a parsed .s file into MCInsts.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_ASMPARSER_TYPECHECK_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_ASMPARSER_TYPECHECK_H
+
+#include "llvm/BinaryFormat/Wasm.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCParser/MCAsmParser.h"
+#include "llvm/MC/MCParser/MCTargetAsmParser.h"
+#include "llvm/MC/MCSymbol.h"
+
+namespace llvm {
+
+class WebAssemblyAsmTypeCheck final {
+ MCAsmParser &Parser;
+ const MCInstrInfo &MII;
+
+ SmallVector<wasm::ValType, 8> Stack;
+ SmallVector<SmallVector<wasm::ValType, 4>, 8> BrStack;
+ SmallVector<wasm::ValType, 16> LocalTypes;
+ SmallVector<wasm::ValType, 4> ReturnTypes;
+ wasm::WasmSignature LastSig;
+ bool TypeErrorThisFunction = false;
+ bool Unreachable = false;
+ bool is64;
+
+ void dumpTypeStack(Twine Msg);
+ bool typeError(SMLoc ErrorLoc, const Twine &Msg);
+ bool popType(SMLoc ErrorLoc, std::optional<wasm::ValType> EVT);
+ bool popRefType(SMLoc ErrorLoc);
+ bool getLocal(SMLoc ErrorLoc, const MCInst &Inst, wasm::ValType &Type);
+ bool checkEnd(SMLoc ErrorLoc, bool PopVals = false);
+ bool checkBr(SMLoc ErrorLoc, size_t Level);
+ bool checkSig(SMLoc ErrorLoc, const wasm::WasmSignature &Sig);
+ bool getSymRef(SMLoc ErrorLoc, const MCInst &Inst,
+ const MCSymbolRefExpr *&SymRef);
+ bool getGlobal(SMLoc ErrorLoc, const MCInst &Inst, wasm::ValType &Type);
+ bool getTable(SMLoc ErrorLoc, const MCInst &Inst, wasm::ValType &Type);
+
+public:
+ WebAssemblyAsmTypeCheck(MCAsmParser &Parser, const MCInstrInfo &MII,
+ bool is64);
+
+ void funcDecl(const wasm::WasmSignature &Sig);
+ void localDecl(const SmallVectorImpl<wasm::ValType> &Locals);
+ void setLastSig(const wasm::WasmSignature &Sig) { LastSig = Sig; }
+ bool endOfFunction(SMLoc ErrorLoc);
+ bool typeCheck(SMLoc ErrorLoc, const MCInst &Inst, OperandVector &Operands);
+
+ void Clear() {
+ Stack.clear();
+ BrStack.clear();
+ LocalTypes.clear();
+ ReturnTypes.clear();
+ TypeErrorThisFunction = false;
+ Unreachable = false;
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_TARGET_WEBASSEMBLY_ASMPARSER_TYPECHECK_H
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp
new file mode 100644
index 000000000000..ed7757be6615
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp
@@ -0,0 +1,302 @@
+//==- WebAssemblyDisassembler.cpp - Disassembler for WebAssembly -*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is part of the WebAssembly Disassembler.
+///
+/// It contains code to translate the data produced by the decoder into
+/// MCInsts.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTypeUtilities.h"
+#include "TargetInfo/WebAssemblyTargetInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCDecoderOps.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCSymbolWasm.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/LEB128.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-disassembler"
+
+using DecodeStatus = MCDisassembler::DecodeStatus;
+
+#include "WebAssemblyGenDisassemblerTables.inc"
+
+namespace {
+static constexpr int WebAssemblyInstructionTableSize = 256;
+
+class WebAssemblyDisassembler final : public MCDisassembler {
+ std::unique_ptr<const MCInstrInfo> MCII;
+
+ DecodeStatus getInstruction(MCInst &Instr, uint64_t &Size,
+ ArrayRef<uint8_t> Bytes, uint64_t Address,
+ raw_ostream &CStream) const override;
+ std::optional<DecodeStatus>
+ onSymbolStart(SymbolInfoTy &Symbol, uint64_t &Size, ArrayRef<uint8_t> Bytes,
+ uint64_t Address, raw_ostream &CStream) const override;
+
+public:
+ WebAssemblyDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx,
+ std::unique_ptr<const MCInstrInfo> MCII)
+ : MCDisassembler(STI, Ctx), MCII(std::move(MCII)) {}
+};
+} // end anonymous namespace
+
+static MCDisassembler *createWebAssemblyDisassembler(const Target &T,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ std::unique_ptr<const MCInstrInfo> MCII(T.createMCInstrInfo());
+ return new WebAssemblyDisassembler(STI, Ctx, std::move(MCII));
+}
+
+extern "C" LLVM_EXTERNAL_VISIBILITY void
+LLVMInitializeWebAssemblyDisassembler() {
+ // Register the disassembler for each target.
+ TargetRegistry::RegisterMCDisassembler(getTheWebAssemblyTarget32(),
+ createWebAssemblyDisassembler);
+ TargetRegistry::RegisterMCDisassembler(getTheWebAssemblyTarget64(),
+ createWebAssemblyDisassembler);
+}
+
+static int nextByte(ArrayRef<uint8_t> Bytes, uint64_t &Size) {
+ if (Size >= Bytes.size())
+ return -1;
+ auto V = Bytes[Size];
+ Size++;
+ return V;
+}
+
+static bool nextLEB(int64_t &Val, ArrayRef<uint8_t> Bytes, uint64_t &Size,
+ bool Signed) {
+ unsigned N = 0;
+ const char *Error = nullptr;
+ Val = Signed ? decodeSLEB128(Bytes.data() + Size, &N,
+ Bytes.data() + Bytes.size(), &Error)
+ : static_cast<int64_t>(decodeULEB128(Bytes.data() + Size, &N,
+ Bytes.data() + Bytes.size(),
+ &Error));
+ if (Error)
+ return false;
+ Size += N;
+ return true;
+}
+
+static bool parseLEBImmediate(MCInst &MI, uint64_t &Size,
+ ArrayRef<uint8_t> Bytes, bool Signed) {
+ int64_t Val;
+ if (!nextLEB(Val, Bytes, Size, Signed))
+ return false;
+ MI.addOperand(MCOperand::createImm(Val));
+ return true;
+}
+
+template <typename T>
+bool parseImmediate(MCInst &MI, uint64_t &Size, ArrayRef<uint8_t> Bytes) {
+ if (Size + sizeof(T) > Bytes.size())
+ return false;
+ T Val =
+ support::endian::read<T, llvm::endianness::little>(Bytes.data() + Size);
+ Size += sizeof(T);
+ if (std::is_floating_point<T>::value) {
+ MI.addOperand(
+ MCOperand::createDFPImm(bit_cast<uint64_t>(static_cast<double>(Val))));
+ } else {
+ MI.addOperand(MCOperand::createImm(static_cast<int64_t>(Val)));
+ }
+ return true;
+}
+
+std::optional<MCDisassembler::DecodeStatus>
+WebAssemblyDisassembler::onSymbolStart(SymbolInfoTy &Symbol, uint64_t &Size,
+ ArrayRef<uint8_t> Bytes,
+ uint64_t Address,
+ raw_ostream &CStream) const {
+ Size = 0;
+ if (Address == 0) {
+ // Start of a code section: we're parsing only the function count.
+ int64_t FunctionCount;
+ if (!nextLEB(FunctionCount, Bytes, Size, false))
+ return std::nullopt;
+ outs() << " # " << FunctionCount << " functions in section.";
+ } else {
+ // Parse the start of a single function.
+ int64_t BodySize, LocalEntryCount;
+ if (!nextLEB(BodySize, Bytes, Size, false) ||
+ !nextLEB(LocalEntryCount, Bytes, Size, false))
+ return std::nullopt;
+ if (LocalEntryCount) {
+ outs() << " .local ";
+ for (int64_t I = 0; I < LocalEntryCount; I++) {
+ int64_t Count, Type;
+ if (!nextLEB(Count, Bytes, Size, false) ||
+ !nextLEB(Type, Bytes, Size, false))
+ return std::nullopt;
+ for (int64_t J = 0; J < Count; J++) {
+ if (I || J)
+ outs() << ", ";
+ outs() << WebAssembly::anyTypeToString(Type);
+ }
+ }
+ }
+ }
+ outs() << "\n";
+ return MCDisassembler::Success;
+}
+
+MCDisassembler::DecodeStatus WebAssemblyDisassembler::getInstruction(
+ MCInst &MI, uint64_t &Size, ArrayRef<uint8_t> Bytes, uint64_t /*Address*/,
+ raw_ostream &CS) const {
+ CommentStream = &CS;
+ Size = 0;
+ int Opc = nextByte(Bytes, Size);
+ if (Opc < 0)
+ return MCDisassembler::Fail;
+ const auto *WasmInst = &InstructionTable0[Opc];
+ // If this is a prefix byte, indirect to another table.
+ if (WasmInst->ET == ET_Prefix) {
+ WasmInst = nullptr;
+ // Linear search, so far only 2 entries.
+ for (auto PT = PrefixTable; PT->Table; PT++) {
+ if (PT->Prefix == Opc) {
+ WasmInst = PT->Table;
+ break;
+ }
+ }
+ if (!WasmInst)
+ return MCDisassembler::Fail;
+ int64_t PrefixedOpc;
+ if (!nextLEB(PrefixedOpc, Bytes, Size, false))
+ return MCDisassembler::Fail;
+ if (PrefixedOpc < 0 || PrefixedOpc >= WebAssemblyInstructionTableSize)
+ return MCDisassembler::Fail;
+ WasmInst += PrefixedOpc;
+ }
+ if (WasmInst->ET == ET_Unused)
+ return MCDisassembler::Fail;
+ // At this point we must have a valid instruction to decode.
+ assert(WasmInst->ET == ET_Instruction);
+ MI.setOpcode(WasmInst->Opcode);
+ // Parse any operands.
+ for (uint8_t OPI = 0; OPI < WasmInst->NumOperands; OPI++) {
+ auto OT = OperandTable[WasmInst->OperandStart + OPI];
+ switch (OT) {
+ // ULEB operands:
+ case WebAssembly::OPERAND_BASIC_BLOCK:
+ case WebAssembly::OPERAND_LOCAL:
+ case WebAssembly::OPERAND_GLOBAL:
+ case WebAssembly::OPERAND_FUNCTION32:
+ case WebAssembly::OPERAND_TABLE:
+ case WebAssembly::OPERAND_OFFSET32:
+ case WebAssembly::OPERAND_OFFSET64:
+ case WebAssembly::OPERAND_P2ALIGN:
+ case WebAssembly::OPERAND_TYPEINDEX:
+ case WebAssembly::OPERAND_TAG:
+ case MCOI::OPERAND_IMMEDIATE: {
+ if (!parseLEBImmediate(MI, Size, Bytes, false))
+ return MCDisassembler::Fail;
+ break;
+ }
+ // SLEB operands:
+ case WebAssembly::OPERAND_I32IMM:
+ case WebAssembly::OPERAND_I64IMM: {
+ if (!parseLEBImmediate(MI, Size, Bytes, true))
+ return MCDisassembler::Fail;
+ break;
+ }
+ // block_type operands:
+ case WebAssembly::OPERAND_SIGNATURE: {
+ int64_t Val;
+ uint64_t PrevSize = Size;
+ if (!nextLEB(Val, Bytes, Size, true))
+ return MCDisassembler::Fail;
+ if (Val < 0) {
+ // Negative values are single septet value types or empty types
+ if (Size != PrevSize + 1) {
+ MI.addOperand(
+ MCOperand::createImm(int64_t(WebAssembly::BlockType::Invalid)));
+ } else {
+ MI.addOperand(MCOperand::createImm(Val & 0x7f));
+ }
+ } else {
+ // We don't have access to the signature, so create a symbol without one
+ MCSymbol *Sym = getContext().createTempSymbol("typeindex", true);
+ auto *WasmSym = cast<MCSymbolWasm>(Sym);
+ WasmSym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION);
+ const MCExpr *Expr = MCSymbolRefExpr::create(
+ WasmSym, MCSymbolRefExpr::VK_WASM_TYPEINDEX, getContext());
+ MI.addOperand(MCOperand::createExpr(Expr));
+ }
+ break;
+ }
+ // FP operands.
+ case WebAssembly::OPERAND_F32IMM: {
+ if (!parseImmediate<float>(MI, Size, Bytes))
+ return MCDisassembler::Fail;
+ break;
+ }
+ case WebAssembly::OPERAND_F64IMM: {
+ if (!parseImmediate<double>(MI, Size, Bytes))
+ return MCDisassembler::Fail;
+ break;
+ }
+ // Vector lane operands (not LEB encoded).
+ case WebAssembly::OPERAND_VEC_I8IMM: {
+ if (!parseImmediate<uint8_t>(MI, Size, Bytes))
+ return MCDisassembler::Fail;
+ break;
+ }
+ case WebAssembly::OPERAND_VEC_I16IMM: {
+ if (!parseImmediate<uint16_t>(MI, Size, Bytes))
+ return MCDisassembler::Fail;
+ break;
+ }
+ case WebAssembly::OPERAND_VEC_I32IMM: {
+ if (!parseImmediate<uint32_t>(MI, Size, Bytes))
+ return MCDisassembler::Fail;
+ break;
+ }
+ case WebAssembly::OPERAND_VEC_I64IMM: {
+ if (!parseImmediate<uint64_t>(MI, Size, Bytes))
+ return MCDisassembler::Fail;
+ break;
+ }
+ case WebAssembly::OPERAND_BRLIST: {
+ int64_t TargetTableLen;
+ if (!nextLEB(TargetTableLen, Bytes, Size, false))
+ return MCDisassembler::Fail;
+ for (int64_t I = 0; I < TargetTableLen; I++) {
+ if (!parseLEBImmediate(MI, Size, Bytes, false))
+ return MCDisassembler::Fail;
+ }
+ // Default case.
+ if (!parseLEBImmediate(MI, Size, Bytes, false))
+ return MCDisassembler::Fail;
+ break;
+ }
+ case MCOI::OPERAND_REGISTER:
+ // The tablegen header currently does not have any register operands since
+ // we use only the stack (_S) instructions.
+ // If you hit this that probably means a bad instruction definition in
+ // tablegen.
+ llvm_unreachable("Register operand in WebAssemblyDisassembler");
+ default:
+ llvm_unreachable("Unknown operand type in WebAssemblyDisassembler");
+ }
+ }
+ return MCDisassembler::Success;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp
new file mode 100644
index 000000000000..ffab67f8ab2b
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp
@@ -0,0 +1,129 @@
+//===-- WebAssemblyAsmBackend.cpp - WebAssembly Assembler Backend ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the WebAssemblyAsmBackend class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyFixupKinds.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCFixupKindInfo.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCWasmObjectWriter.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+namespace {
+
+class WebAssemblyAsmBackend final : public MCAsmBackend {
+ bool Is64Bit;
+ bool IsEmscripten;
+
+public:
+ explicit WebAssemblyAsmBackend(bool Is64Bit, bool IsEmscripten)
+ : MCAsmBackend(llvm::endianness::little), Is64Bit(Is64Bit),
+ IsEmscripten(IsEmscripten) {}
+
+ unsigned getNumFixupKinds() const override {
+ return WebAssembly::NumTargetFixupKinds;
+ }
+
+ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
+
+ void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
+ const MCValue &Target, MutableArrayRef<char> Data,
+ uint64_t Value, bool IsPCRel,
+ const MCSubtargetInfo *STI) const override;
+
+ std::unique_ptr<MCObjectTargetWriter>
+ createObjectTargetWriter() const override;
+
+ // No instruction requires relaxation
+ bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
+ const MCRelaxableFragment *DF,
+ const MCAsmLayout &Layout) const override {
+ return false;
+ }
+
+ bool writeNopData(raw_ostream &OS, uint64_t Count,
+ const MCSubtargetInfo *STI) const override;
+};
+
+const MCFixupKindInfo &
+WebAssemblyAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
+ const static MCFixupKindInfo Infos[WebAssembly::NumTargetFixupKinds] = {
+ // This table *must* be in the order that the fixup_* kinds are defined in
+ // WebAssemblyFixupKinds.h.
+ //
+ // Name Offset (bits) Size (bits) Flags
+ {"fixup_sleb128_i32", 0, 5 * 8, 0},
+ {"fixup_sleb128_i64", 0, 10 * 8, 0},
+ {"fixup_uleb128_i32", 0, 5 * 8, 0},
+ {"fixup_uleb128_i64", 0, 10 * 8, 0},
+ };
+
+ if (Kind < FirstTargetFixupKind)
+ return MCAsmBackend::getFixupKindInfo(Kind);
+
+ assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
+ "Invalid kind!");
+ return Infos[Kind - FirstTargetFixupKind];
+}
+
+bool WebAssemblyAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
+ const MCSubtargetInfo *STI) const {
+ for (uint64_t I = 0; I < Count; ++I)
+ OS << char(WebAssembly::Nop);
+
+ return true;
+}
+
+void WebAssemblyAsmBackend::applyFixup(const MCAssembler &Asm,
+ const MCFixup &Fixup,
+ const MCValue &Target,
+ MutableArrayRef<char> Data,
+ uint64_t Value, bool IsPCRel,
+ const MCSubtargetInfo *STI) const {
+ const MCFixupKindInfo &Info = getFixupKindInfo(Fixup.getKind());
+ assert(Info.Flags == 0 && "WebAssembly does not use MCFixupKindInfo flags");
+
+ unsigned NumBytes = alignTo(Info.TargetSize, 8) / 8;
+ if (Value == 0)
+ return; // Doesn't change encoding.
+
+ // Shift the value into position.
+ Value <<= Info.TargetOffset;
+
+ unsigned Offset = Fixup.getOffset();
+ assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
+
+ // For each byte of the fragment that the fixup touches, mask in the
+ // bits from the fixup value.
+ for (unsigned I = 0; I != NumBytes; ++I)
+ Data[Offset + I] |= uint8_t((Value >> (I * 8)) & 0xff);
+}
+
+std::unique_ptr<MCObjectTargetWriter>
+WebAssemblyAsmBackend::createObjectTargetWriter() const {
+ return createWebAssemblyWasmObjectWriter(Is64Bit, IsEmscripten);
+}
+
+} // end anonymous namespace
+
+MCAsmBackend *llvm::createWebAssemblyAsmBackend(const Triple &TT) {
+ return new WebAssemblyAsmBackend(TT.isArch64Bit(), TT.isOSEmscripten());
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyFixupKinds.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyFixupKinds.h
new file mode 100644
index 000000000000..92708dadd3e0
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyFixupKinds.h
@@ -0,0 +1,29 @@
+//=- WebAssemblyFixupKinds.h - WebAssembly Specific Fixup Entries -*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYFIXUPKINDS_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYFIXUPKINDS_H
+
+#include "llvm/MC/MCFixup.h"
+
+namespace llvm {
+namespace WebAssembly {
+enum Fixups {
+ fixup_sleb128_i32 = FirstTargetFixupKind, // 32-bit signed
+ fixup_sleb128_i64, // 64-bit signed
+ fixup_uleb128_i32, // 32-bit unsigned
+ fixup_uleb128_i64, // 64-bit unsigned
+
+ // Marker
+ LastTargetFixupKind,
+ NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
+};
+} // end namespace WebAssembly
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.cpp
new file mode 100644
index 000000000000..bf6d6dce1f8a
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.cpp
@@ -0,0 +1,368 @@
+//=- WebAssemblyInstPrinter.cpp - WebAssembly assembly instruction printing -=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Print MCInst instructions to wasm format.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyInstPrinter.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "MCTargetDesc/WebAssemblyMCTypeUtilities.h"
+#include "WebAssembly.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCSymbolWasm.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormattedStream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "asm-printer"
+
+#include "WebAssemblyGenAsmWriter.inc"
+
+WebAssemblyInstPrinter::WebAssemblyInstPrinter(const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI)
+ : MCInstPrinter(MAI, MII, MRI) {}
+
+void WebAssemblyInstPrinter::printRegName(raw_ostream &OS,
+ MCRegister Reg) const {
+ assert(Reg.id() != WebAssembly::UnusedReg);
+ // Note that there's an implicit local.get/local.set here!
+ OS << "$" << Reg.id();
+}
+
+void WebAssemblyInstPrinter::printInst(const MCInst *MI, uint64_t Address,
+ StringRef Annot,
+ const MCSubtargetInfo &STI,
+ raw_ostream &OS) {
+ switch (MI->getOpcode()) {
+ case WebAssembly::CALL_INDIRECT_S:
+ case WebAssembly::RET_CALL_INDIRECT_S: {
+ // A special case for call_indirect (and ret_call_indirect), if the table
+ // operand is a symbol: the order of the type and table operands is inverted
+ // in the text format relative to the binary format. Otherwise if table the
+ // operand isn't a symbol, then we have an MVP compilation unit, and the
+ // table shouldn't appear in the output.
+ OS << "\t";
+ OS << getMnemonic(MI).first;
+ OS << " ";
+
+ assert(MI->getNumOperands() == 2);
+ const unsigned TypeOperand = 0;
+ const unsigned TableOperand = 1;
+ if (MI->getOperand(TableOperand).isExpr()) {
+ printOperand(MI, TableOperand, OS);
+ OS << ", ";
+ } else {
+ assert(MI->getOperand(TableOperand).getImm() == 0);
+ }
+ printOperand(MI, TypeOperand, OS);
+ break;
+ }
+ default:
+ // Print the instruction (this uses the AsmStrings from the .td files).
+ printInstruction(MI, Address, OS);
+ break;
+ }
+
+ // Print any additional variadic operands.
+ const MCInstrDesc &Desc = MII.get(MI->getOpcode());
+ if (Desc.isVariadic()) {
+ if ((Desc.getNumOperands() == 0 && MI->getNumOperands() > 0) ||
+ Desc.variadicOpsAreDefs())
+ OS << "\t";
+ unsigned Start = Desc.getNumOperands();
+ unsigned NumVariadicDefs = 0;
+ if (Desc.variadicOpsAreDefs()) {
+ // The number of variadic defs is encoded in an immediate by MCInstLower
+ NumVariadicDefs = MI->getOperand(0).getImm();
+ Start = 1;
+ }
+ bool NeedsComma = Desc.getNumOperands() > 0 && !Desc.variadicOpsAreDefs();
+ for (auto I = Start, E = MI->getNumOperands(); I < E; ++I) {
+ if (MI->getOpcode() == WebAssembly::CALL_INDIRECT &&
+ I - Start == NumVariadicDefs) {
+ // Skip type and table arguments when printing for tests.
+ ++I;
+ continue;
+ }
+ if (NeedsComma)
+ OS << ", ";
+ printOperand(MI, I, OS, I - Start < NumVariadicDefs);
+ NeedsComma = true;
+ }
+ }
+
+ // Print any added annotation.
+ printAnnotation(OS, Annot);
+
+ if (CommentStream) {
+ // Observe any effects on the control flow stack, for use in annotating
+ // control flow label references.
+ unsigned Opc = MI->getOpcode();
+ switch (Opc) {
+ default:
+ break;
+
+ case WebAssembly::LOOP:
+ case WebAssembly::LOOP_S:
+ printAnnotation(OS, "label" + utostr(ControlFlowCounter) + ':');
+ ControlFlowStack.push_back(std::make_pair(ControlFlowCounter++, true));
+ return;
+
+ case WebAssembly::BLOCK:
+ case WebAssembly::BLOCK_S:
+ ControlFlowStack.push_back(std::make_pair(ControlFlowCounter++, false));
+ return;
+
+ case WebAssembly::TRY:
+ case WebAssembly::TRY_S:
+ ControlFlowStack.push_back(std::make_pair(ControlFlowCounter, false));
+ TryStack.push_back(ControlFlowCounter++);
+ EHInstStack.push_back(TRY);
+ return;
+
+ case WebAssembly::END_LOOP:
+ case WebAssembly::END_LOOP_S:
+ if (ControlFlowStack.empty()) {
+ printAnnotation(OS, "End marker mismatch!");
+ } else {
+ ControlFlowStack.pop_back();
+ }
+ return;
+
+ case WebAssembly::END_BLOCK:
+ case WebAssembly::END_BLOCK_S:
+ if (ControlFlowStack.empty()) {
+ printAnnotation(OS, "End marker mismatch!");
+ } else {
+ printAnnotation(
+ OS, "label" + utostr(ControlFlowStack.pop_back_val().first) + ':');
+ }
+ return;
+
+ case WebAssembly::END_TRY:
+ case WebAssembly::END_TRY_S:
+ if (ControlFlowStack.empty() || EHInstStack.empty()) {
+ printAnnotation(OS, "End marker mismatch!");
+ } else {
+ printAnnotation(
+ OS, "label" + utostr(ControlFlowStack.pop_back_val().first) + ':');
+ EHInstStack.pop_back();
+ }
+ return;
+
+ case WebAssembly::CATCH:
+ case WebAssembly::CATCH_S:
+ case WebAssembly::CATCH_ALL:
+ case WebAssembly::CATCH_ALL_S:
+ // There can be multiple catch instructions for one try instruction, so
+ // we print a label only for the first 'catch' label.
+ if (EHInstStack.empty()) {
+ printAnnotation(OS, "try-catch mismatch!");
+ } else if (EHInstStack.back() == CATCH_ALL) {
+ printAnnotation(OS, "catch/catch_all cannot occur after catch_all");
+ } else if (EHInstStack.back() == TRY) {
+ if (TryStack.empty()) {
+ printAnnotation(OS, "try-catch mismatch!");
+ } else {
+ printAnnotation(OS, "catch" + utostr(TryStack.pop_back_val()) + ':');
+ }
+ EHInstStack.pop_back();
+ if (Opc == WebAssembly::CATCH || Opc == WebAssembly::CATCH_S) {
+ EHInstStack.push_back(CATCH);
+ } else {
+ EHInstStack.push_back(CATCH_ALL);
+ }
+ }
+ return;
+
+ case WebAssembly::RETHROW:
+ case WebAssembly::RETHROW_S:
+ // 'rethrow' rethrows to the nearest enclosing catch scope, if any. If
+ // there's no enclosing catch scope, it throws up to the caller.
+ if (TryStack.empty()) {
+ printAnnotation(OS, "to caller");
+ } else {
+ printAnnotation(OS, "down to catch" + utostr(TryStack.back()));
+ }
+ return;
+
+ case WebAssembly::DELEGATE:
+ case WebAssembly::DELEGATE_S:
+ if (ControlFlowStack.empty() || TryStack.empty() || EHInstStack.empty()) {
+ printAnnotation(OS, "try-delegate mismatch!");
+ } else {
+ // 'delegate' is
+ // 1. A marker for the end of block label
+ // 2. A destination for throwing instructions
+ // 3. An instruction that itself rethrows to another 'catch'
+ assert(ControlFlowStack.back().first == TryStack.back());
+ std::string Label = "label/catch" +
+ utostr(ControlFlowStack.pop_back_val().first) +
+ ": ";
+ TryStack.pop_back();
+ EHInstStack.pop_back();
+ uint64_t Depth = MI->getOperand(0).getImm();
+ if (Depth >= ControlFlowStack.size()) {
+ Label += "to caller";
+ } else {
+ const auto &Pair = ControlFlowStack.rbegin()[Depth];
+ if (Pair.second)
+ printAnnotation(OS, "delegate cannot target a loop");
+ else
+ Label += "down to catch" + utostr(Pair.first);
+ }
+ printAnnotation(OS, Label);
+ }
+ return;
+ }
+
+ // Annotate any control flow label references.
+
+ unsigned NumFixedOperands = Desc.NumOperands;
+ SmallSet<uint64_t, 8> Printed;
+ for (unsigned I = 0, E = MI->getNumOperands(); I < E; ++I) {
+ // See if this operand denotes a basic block target.
+ if (I < NumFixedOperands) {
+ // A non-variable_ops operand, check its type.
+ if (Desc.operands()[I].OperandType != WebAssembly::OPERAND_BASIC_BLOCK)
+ continue;
+ } else {
+ // A variable_ops operand, which currently can be immediates (used in
+ // br_table) which are basic block targets, or for call instructions
+ // when using -wasm-keep-registers (in which case they are registers,
+ // and should not be processed).
+ if (!MI->getOperand(I).isImm())
+ continue;
+ }
+ uint64_t Depth = MI->getOperand(I).getImm();
+ if (!Printed.insert(Depth).second)
+ continue;
+ if (Depth >= ControlFlowStack.size()) {
+ printAnnotation(OS, "Invalid depth argument!");
+ } else {
+ const auto &Pair = ControlFlowStack.rbegin()[Depth];
+ printAnnotation(OS, utostr(Depth) + ": " +
+ (Pair.second ? "up" : "down") + " to label" +
+ utostr(Pair.first));
+ }
+ }
+ }
+}
+
+static std::string toString(const APFloat &FP) {
+ // Print NaNs with custom payloads specially.
+ if (FP.isNaN() && !FP.bitwiseIsEqual(APFloat::getQNaN(FP.getSemantics())) &&
+ !FP.bitwiseIsEqual(
+ APFloat::getQNaN(FP.getSemantics(), /*Negative=*/true))) {
+ APInt AI = FP.bitcastToAPInt();
+ return std::string(AI.isNegative() ? "-" : "") + "nan:0x" +
+ utohexstr(AI.getZExtValue() &
+ (AI.getBitWidth() == 32 ? INT64_C(0x007fffff)
+ : INT64_C(0x000fffffffffffff)),
+ /*LowerCase=*/true);
+ }
+
+ // Use C99's hexadecimal floating-point representation.
+ static const size_t BufBytes = 128;
+ char Buf[BufBytes];
+ auto Written = FP.convertToHexString(
+ Buf, /*HexDigits=*/0, /*UpperCase=*/false, APFloat::rmNearestTiesToEven);
+ (void)Written;
+ assert(Written != 0);
+ assert(Written < BufBytes);
+ return Buf;
+}
+
+void WebAssemblyInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O, bool IsVariadicDef) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ if (Op.isReg()) {
+ const MCInstrDesc &Desc = MII.get(MI->getOpcode());
+ unsigned WAReg = Op.getReg();
+ if (int(WAReg) >= 0)
+ printRegName(O, WAReg);
+ else if (OpNo >= Desc.getNumDefs() && !IsVariadicDef)
+ O << "$pop" << WebAssembly::getWARegStackId(WAReg);
+ else if (WAReg != WebAssembly::UnusedReg)
+ O << "$push" << WebAssembly::getWARegStackId(WAReg);
+ else
+ O << "$drop";
+ // Add a '=' suffix if this is a def.
+ if (OpNo < MII.get(MI->getOpcode()).getNumDefs() || IsVariadicDef)
+ O << '=';
+ } else if (Op.isImm()) {
+ O << Op.getImm();
+ } else if (Op.isSFPImm()) {
+ O << ::toString(APFloat(APFloat::IEEEsingle(), APInt(32, Op.getSFPImm())));
+ } else if (Op.isDFPImm()) {
+ O << ::toString(APFloat(APFloat::IEEEdouble(), APInt(64, Op.getDFPImm())));
+ } else {
+ assert(Op.isExpr() && "unknown operand kind in printOperand");
+ // call_indirect instructions have a TYPEINDEX operand that we print
+ // as a signature here, such that the assembler can recover this
+ // information.
+ auto SRE = static_cast<const MCSymbolRefExpr *>(Op.getExpr());
+ if (SRE->getKind() == MCSymbolRefExpr::VK_WASM_TYPEINDEX) {
+ auto &Sym = static_cast<const MCSymbolWasm &>(SRE->getSymbol());
+ O << WebAssembly::signatureToString(Sym.getSignature());
+ } else {
+ Op.getExpr()->print(O, &MAI);
+ }
+ }
+}
+
+void WebAssemblyInstPrinter::printBrList(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ O << "{";
+ for (unsigned I = OpNo, E = MI->getNumOperands(); I != E; ++I) {
+ if (I != OpNo)
+ O << ", ";
+ O << MI->getOperand(I).getImm();
+ }
+ O << "}";
+}
+
+void WebAssemblyInstPrinter::printWebAssemblyP2AlignOperand(const MCInst *MI,
+ unsigned OpNo,
+ raw_ostream &O) {
+ int64_t Imm = MI->getOperand(OpNo).getImm();
+ if (Imm == WebAssembly::GetDefaultP2Align(MI->getOpcode()))
+ return;
+ O << ":p2align=" << Imm;
+}
+
+void WebAssemblyInstPrinter::printWebAssemblySignatureOperand(const MCInst *MI,
+ unsigned OpNo,
+ raw_ostream &O) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ if (Op.isImm()) {
+ auto Imm = static_cast<unsigned>(Op.getImm());
+ if (Imm != wasm::WASM_TYPE_NORESULT)
+ O << WebAssembly::anyTypeToString(Imm);
+ } else {
+ auto Expr = cast<MCSymbolRefExpr>(Op.getExpr());
+ auto *Sym = cast<MCSymbolWasm>(&Expr->getSymbol());
+ if (Sym->getSignature()) {
+ O << WebAssembly::signatureToString(Sym->getSignature());
+ } else {
+ // Disassembler does not currently produce a signature
+ O << "unknown_type";
+ }
+ }
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.h
new file mode 100644
index 000000000000..51e4d3656ba4
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.h
@@ -0,0 +1,59 @@
+// WebAssemblyInstPrinter.h - Print wasm MCInst to assembly syntax -*- C++ -*-//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This class prints an WebAssembly MCInst to wasm file syntax.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_INSTPRINTER_WEBASSEMBLYINSTPRINTER_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_INSTPRINTER_WEBASSEMBLYINSTPRINTER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/BinaryFormat/Wasm.h"
+#include "llvm/CodeGen/MachineValueType.h"
+#include "llvm/MC/MCInstPrinter.h"
+
+namespace llvm {
+
+class MCSubtargetInfo;
+
+class WebAssemblyInstPrinter final : public MCInstPrinter {
+ uint64_t ControlFlowCounter = 0;
+ SmallVector<std::pair<uint64_t, bool>, 4> ControlFlowStack;
+ SmallVector<uint64_t, 4> TryStack;
+
+ enum EHInstKind { TRY, CATCH, CATCH_ALL };
+ SmallVector<EHInstKind, 4> EHInstStack;
+
+public:
+ WebAssemblyInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI);
+
+ void printRegName(raw_ostream &OS, MCRegister Reg) const override;
+ void printInst(const MCInst *MI, uint64_t Address, StringRef Annot,
+ const MCSubtargetInfo &STI, raw_ostream &OS) override;
+
+ // Used by tblegen code.
+ void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O,
+ bool IsVariadicDef = false);
+ void printBrList(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printWebAssemblyP2AlignOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O);
+ void printWebAssemblySignatureOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O);
+
+ // Autogenerated by tblgen.
+ std::pair<const char *, uint64_t> getMnemonic(const MCInst *MI) override;
+ void printInstruction(const MCInst *MI, uint64_t Address, raw_ostream &O);
+ static const char *getRegisterName(MCRegister Reg);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp
new file mode 100644
index 000000000000..9d43c0052d52
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp
@@ -0,0 +1,55 @@
+//===-- WebAssemblyMCAsmInfo.cpp - WebAssembly asm properties -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the declarations of the WebAssemblyMCAsmInfo
+/// properties.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyMCAsmInfo.h"
+#include "WebAssemblyMCTargetDesc.h"
+#include "llvm/TargetParser/Triple.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-mc-asm-info"
+
+WebAssemblyMCAsmInfo::~WebAssemblyMCAsmInfo() = default; // anchor.
+
+WebAssemblyMCAsmInfo::WebAssemblyMCAsmInfo(const Triple &T,
+ const MCTargetOptions &Options) {
+ CodePointerSize = CalleeSaveStackSlotSize = T.isArch64Bit() ? 8 : 4;
+
+ // TODO: What should MaxInstLength be?
+
+ UseDataRegionDirectives = true;
+
+ // Use .skip instead of .zero because .zero is confusing when used with two
+ // arguments (it doesn't actually zero things out).
+ ZeroDirective = "\t.skip\t";
+
+ Data8bitsDirective = "\t.int8\t";
+ Data16bitsDirective = "\t.int16\t";
+ Data32bitsDirective = "\t.int32\t";
+ Data64bitsDirective = "\t.int64\t";
+
+ AlignmentIsInBytes = false;
+ COMMDirectiveAlignmentIsInBytes = false;
+ LCOMMDirectiveAlignmentType = LCOMM::Log2Alignment;
+
+ SupportsDebugInformation = true;
+
+ // When compilation is done on a cpp file by clang, the exception model info
+ // is stored in LangOptions, which is later used to set the info in
+ // TargetOptions and then MCAsmInfo in LLVMTargetMachine::initAsmInfo(). But
+ // this process does not happen when compiling bitcode directly with clang, so
+ // we make sure this info is set correctly.
+ if (WebAssembly::WasmEnableEH || WebAssembly::WasmEnableSjLj)
+ ExceptionsType = ExceptionHandling::Wasm;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h
new file mode 100644
index 000000000000..5ba4dcf8c4b3
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h
@@ -0,0 +1,32 @@
+//===-- WebAssemblyMCAsmInfo.h - WebAssembly asm properties -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the declaration of the WebAssemblyMCAsmInfo class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYMCASMINFO_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYMCASMINFO_H
+
+#include "llvm/MC/MCAsmInfoWasm.h"
+
+namespace llvm {
+
+class Triple;
+
+class WebAssemblyMCAsmInfo final : public MCAsmInfoWasm {
+public:
+ explicit WebAssemblyMCAsmInfo(const Triple &T,
+ const MCTargetOptions &Options);
+ ~WebAssemblyMCAsmInfo() override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp
new file mode 100644
index 000000000000..aaca213c4afe
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp
@@ -0,0 +1,188 @@
+//=- WebAssemblyMCCodeEmitter.cpp - Convert WebAssembly code to machine code -//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the WebAssemblyMCCodeEmitter class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyFixupKinds.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/LEB128.h"
+#include "llvm/Support/SMLoc.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "mccodeemitter"
+
+STATISTIC(MCNumEmitted, "Number of MC instructions emitted.");
+STATISTIC(MCNumFixups, "Number of MC fixups created.");
+
+namespace {
+class WebAssemblyMCCodeEmitter final : public MCCodeEmitter {
+ const MCInstrInfo &MCII;
+ MCContext &Ctx;
+ // Implementation generated by tablegen.
+ uint64_t getBinaryCodeForInstr(const MCInst &MI,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ void encodeInstruction(const MCInst &MI, SmallVectorImpl<char> &CB,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const override;
+
+public:
+ WebAssemblyMCCodeEmitter(const MCInstrInfo &MCII, MCContext &Ctx)
+ : MCII(MCII), Ctx{Ctx} {}
+};
+} // end anonymous namespace
+
+MCCodeEmitter *llvm::createWebAssemblyMCCodeEmitter(const MCInstrInfo &MCII,
+ MCContext &Ctx) {
+ return new WebAssemblyMCCodeEmitter(MCII, Ctx);
+}
+
+void WebAssemblyMCCodeEmitter::encodeInstruction(
+ const MCInst &MI, SmallVectorImpl<char> &CB,
+ SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
+ raw_svector_ostream OS(CB);
+ uint64_t Start = OS.tell();
+
+ uint64_t Binary = getBinaryCodeForInstr(MI, Fixups, STI);
+ if (Binary < (1 << 8)) {
+ OS << uint8_t(Binary);
+ } else if (Binary < (1 << 16)) {
+ OS << uint8_t(Binary >> 8);
+ encodeULEB128(uint8_t(Binary), OS);
+ } else if (Binary < (1 << 24)) {
+ OS << uint8_t(Binary >> 16);
+ encodeULEB128(uint16_t(Binary), OS);
+ } else {
+ llvm_unreachable("Very large (prefix + 3 byte) opcodes not supported");
+ }
+
+ // For br_table instructions, encode the size of the table. In the MCInst,
+ // there's an index operand (if not a stack instruction), one operand for
+ // each table entry, and the default operand.
+ if (MI.getOpcode() == WebAssembly::BR_TABLE_I32_S ||
+ MI.getOpcode() == WebAssembly::BR_TABLE_I64_S)
+ encodeULEB128(MI.getNumOperands() - 1, OS);
+ if (MI.getOpcode() == WebAssembly::BR_TABLE_I32 ||
+ MI.getOpcode() == WebAssembly::BR_TABLE_I64)
+ encodeULEB128(MI.getNumOperands() - 2, OS);
+
+ const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
+ for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I) {
+ const MCOperand &MO = MI.getOperand(I);
+ if (MO.isReg()) {
+ /* nothing to encode */
+
+ } else if (MO.isImm()) {
+ if (I < Desc.getNumOperands()) {
+ const MCOperandInfo &Info = Desc.operands()[I];
+ LLVM_DEBUG(dbgs() << "Encoding immediate: type="
+ << int(Info.OperandType) << "\n");
+ switch (Info.OperandType) {
+ case WebAssembly::OPERAND_I32IMM:
+ encodeSLEB128(int32_t(MO.getImm()), OS);
+ break;
+ case WebAssembly::OPERAND_OFFSET32:
+ encodeULEB128(uint32_t(MO.getImm()), OS);
+ break;
+ case WebAssembly::OPERAND_I64IMM:
+ encodeSLEB128(int64_t(MO.getImm()), OS);
+ break;
+ case WebAssembly::OPERAND_SIGNATURE:
+ case WebAssembly::OPERAND_VEC_I8IMM:
+ support::endian::write<uint8_t>(OS, MO.getImm(),
+ llvm::endianness::little);
+ break;
+ case WebAssembly::OPERAND_VEC_I16IMM:
+ support::endian::write<uint16_t>(OS, MO.getImm(),
+ llvm::endianness::little);
+ break;
+ case WebAssembly::OPERAND_VEC_I32IMM:
+ support::endian::write<uint32_t>(OS, MO.getImm(),
+ llvm::endianness::little);
+ break;
+ case WebAssembly::OPERAND_VEC_I64IMM:
+ support::endian::write<uint64_t>(OS, MO.getImm(),
+ llvm::endianness::little);
+ break;
+ case WebAssembly::OPERAND_GLOBAL:
+ Ctx.reportError(
+ SMLoc(),
+ Twine("Wasm globals should only be accessed symbolically!"));
+ break;
+ default:
+ encodeULEB128(uint64_t(MO.getImm()), OS);
+ }
+ } else {
+ encodeULEB128(uint64_t(MO.getImm()), OS);
+ }
+
+ } else if (MO.isSFPImm()) {
+ uint32_t F = MO.getSFPImm();
+ support::endian::write<uint32_t>(OS, F, llvm::endianness::little);
+ } else if (MO.isDFPImm()) {
+ uint64_t D = MO.getDFPImm();
+ support::endian::write<uint64_t>(OS, D, llvm::endianness::little);
+ } else if (MO.isExpr()) {
+ const MCOperandInfo &Info = Desc.operands()[I];
+ llvm::MCFixupKind FixupKind;
+ size_t PaddedSize = 5;
+ switch (Info.OperandType) {
+ case WebAssembly::OPERAND_I32IMM:
+ FixupKind = MCFixupKind(WebAssembly::fixup_sleb128_i32);
+ break;
+ case WebAssembly::OPERAND_I64IMM:
+ FixupKind = MCFixupKind(WebAssembly::fixup_sleb128_i64);
+ PaddedSize = 10;
+ break;
+ case WebAssembly::OPERAND_FUNCTION32:
+ case WebAssembly::OPERAND_TABLE:
+ case WebAssembly::OPERAND_OFFSET32:
+ case WebAssembly::OPERAND_SIGNATURE:
+ case WebAssembly::OPERAND_TYPEINDEX:
+ case WebAssembly::OPERAND_GLOBAL:
+ case WebAssembly::OPERAND_TAG:
+ FixupKind = MCFixupKind(WebAssembly::fixup_uleb128_i32);
+ break;
+ case WebAssembly::OPERAND_OFFSET64:
+ FixupKind = MCFixupKind(WebAssembly::fixup_uleb128_i64);
+ PaddedSize = 10;
+ break;
+ default:
+ llvm_unreachable("unexpected symbolic operand kind");
+ }
+ Fixups.push_back(MCFixup::create(OS.tell() - Start, MO.getExpr(),
+ FixupKind, MI.getLoc()));
+ ++MCNumFixups;
+ encodeULEB128(0, OS, PaddedSize);
+ } else {
+ llvm_unreachable("unexpected operand kind");
+ }
+ }
+
+ ++MCNumEmitted; // Keep track of the # of mi's emitted.
+}
+
+#include "WebAssemblyGenMCCodeEmitter.inc"
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp
new file mode 100644
index 000000000000..e8f58a19d25e
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp
@@ -0,0 +1,151 @@
+//===-- WebAssemblyMCTargetDesc.cpp - WebAssembly Target Descriptions -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file provides WebAssembly-specific target descriptions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "MCTargetDesc/WebAssemblyInstPrinter.h"
+#include "MCTargetDesc/WebAssemblyMCAsmInfo.h"
+#include "MCTargetDesc/WebAssemblyTargetStreamer.h"
+#include "TargetInfo/WebAssemblyTargetInfo.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-mc-target-desc"
+
+#define GET_INSTRINFO_MC_DESC
+#define ENABLE_INSTR_PREDICATE_VERIFIER
+#include "WebAssemblyGenInstrInfo.inc"
+
+#define GET_SUBTARGETINFO_MC_DESC
+#include "WebAssemblyGenSubtargetInfo.inc"
+
+#define GET_REGINFO_MC_DESC
+#include "WebAssemblyGenRegisterInfo.inc"
+
+// Exception handling & setjmp-longjmp handling related options.
+
+// Emscripten's asm.js-style exception handling
+cl::opt<bool> WebAssembly::WasmEnableEmEH(
+ "enable-emscripten-cxx-exceptions",
+ cl::desc("WebAssembly Emscripten-style exception handling"),
+ cl::init(false));
+// Emscripten's asm.js-style setjmp/longjmp handling
+cl::opt<bool> WebAssembly::WasmEnableEmSjLj(
+ "enable-emscripten-sjlj",
+ cl::desc("WebAssembly Emscripten-style setjmp/longjmp handling"),
+ cl::init(false));
+// Exception handling using wasm EH instructions
+cl::opt<bool>
+ WebAssembly::WasmEnableEH("wasm-enable-eh",
+ cl::desc("WebAssembly exception handling"));
+// setjmp/longjmp handling using wasm EH instrutions
+cl::opt<bool> WebAssembly::WasmEnableSjLj(
+ "wasm-enable-sjlj", cl::desc("WebAssembly setjmp/longjmp handling"));
+
+static MCAsmInfo *createMCAsmInfo(const MCRegisterInfo & /*MRI*/,
+ const Triple &TT,
+ const MCTargetOptions &Options) {
+ return new WebAssemblyMCAsmInfo(TT, Options);
+}
+
+static MCInstrInfo *createMCInstrInfo() {
+ auto *X = new MCInstrInfo();
+ InitWebAssemblyMCInstrInfo(X);
+ return X;
+}
+
+static MCRegisterInfo *createMCRegisterInfo(const Triple & /*T*/) {
+ auto *X = new MCRegisterInfo();
+ InitWebAssemblyMCRegisterInfo(X, 0);
+ return X;
+}
+
+static MCInstPrinter *createMCInstPrinter(const Triple & /*T*/,
+ unsigned SyntaxVariant,
+ const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI) {
+ assert(SyntaxVariant == 0 && "WebAssembly only has one syntax variant");
+ return new WebAssemblyInstPrinter(MAI, MII, MRI);
+}
+
+static MCCodeEmitter *createCodeEmitter(const MCInstrInfo &MCII,
+ MCContext &Ctx) {
+ return createWebAssemblyMCCodeEmitter(MCII, Ctx);
+}
+
+static MCAsmBackend *createAsmBackend(const Target & /*T*/,
+ const MCSubtargetInfo &STI,
+ const MCRegisterInfo & /*MRI*/,
+ const MCTargetOptions & /*Options*/) {
+ return createWebAssemblyAsmBackend(STI.getTargetTriple());
+}
+
+static MCSubtargetInfo *createMCSubtargetInfo(const Triple &TT, StringRef CPU,
+ StringRef FS) {
+ return createWebAssemblyMCSubtargetInfoImpl(TT, CPU, /*TuneCPU*/ CPU, FS);
+}
+
+static MCTargetStreamer *
+createObjectTargetStreamer(MCStreamer &S, const MCSubtargetInfo &STI) {
+ return new WebAssemblyTargetWasmStreamer(S);
+}
+
+static MCTargetStreamer *createAsmTargetStreamer(MCStreamer &S,
+ formatted_raw_ostream &OS,
+ MCInstPrinter * /*InstPrint*/,
+ bool /*isVerboseAsm*/) {
+ return new WebAssemblyTargetAsmStreamer(S, OS);
+}
+
+static MCTargetStreamer *createNullTargetStreamer(MCStreamer &S) {
+ return new WebAssemblyTargetNullStreamer(S);
+}
+
+// Force static initialization.
+extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeWebAssemblyTargetMC() {
+ for (Target *T :
+ {&getTheWebAssemblyTarget32(), &getTheWebAssemblyTarget64()}) {
+ // Register the MC asm info.
+ RegisterMCAsmInfoFn X(*T, createMCAsmInfo);
+
+ // Register the MC instruction info.
+ TargetRegistry::RegisterMCInstrInfo(*T, createMCInstrInfo);
+
+ // Register the MC register info.
+ TargetRegistry::RegisterMCRegInfo(*T, createMCRegisterInfo);
+
+ // Register the MCInstPrinter.
+ TargetRegistry::RegisterMCInstPrinter(*T, createMCInstPrinter);
+
+ // Register the MC code emitter.
+ TargetRegistry::RegisterMCCodeEmitter(*T, createCodeEmitter);
+
+ // Register the ASM Backend.
+ TargetRegistry::RegisterMCAsmBackend(*T, createAsmBackend);
+
+ // Register the MC subtarget info.
+ TargetRegistry::RegisterMCSubtargetInfo(*T, createMCSubtargetInfo);
+
+ // Register the object target streamer.
+ TargetRegistry::RegisterObjectTargetStreamer(*T,
+ createObjectTargetStreamer);
+ // Register the asm target streamer.
+ TargetRegistry::RegisterAsmTargetStreamer(*T, createAsmTargetStreamer);
+ // Register the null target streamer.
+ TargetRegistry::RegisterNullTargetStreamer(*T, createNullTargetStreamer);
+ }
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
new file mode 100644
index 000000000000..15aeaaeb8c4a
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
@@ -0,0 +1,552 @@
+//==- WebAssemblyMCTargetDesc.h - WebAssembly Target Descriptions -*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file provides WebAssembly-specific target descriptions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYMCTARGETDESC_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYMCTARGETDESC_H
+
+#include "llvm/BinaryFormat/Wasm.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCInstrDesc.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/DataTypes.h"
+#include <memory>
+
+namespace llvm {
+
+class MCAsmBackend;
+class MCCodeEmitter;
+class MCInstrInfo;
+class MCObjectTargetWriter;
+class Triple;
+
+MCCodeEmitter *createWebAssemblyMCCodeEmitter(const MCInstrInfo &MCII,
+ MCContext &Ctx);
+
+MCAsmBackend *createWebAssemblyAsmBackend(const Triple &TT);
+
+std::unique_ptr<MCObjectTargetWriter>
+createWebAssemblyWasmObjectWriter(bool Is64Bit, bool IsEmscripten);
+
+namespace WebAssembly {
+
+// Exception handling / setjmp-longjmp handling command-line options
+extern cl::opt<bool> WasmEnableEmEH; // asm.js-style EH
+extern cl::opt<bool> WasmEnableEmSjLj; // asm.js-style SjLJ
+extern cl::opt<bool> WasmEnableEH; // EH using Wasm EH instructions
+extern cl::opt<bool> WasmEnableSjLj; // SjLj using Wasm EH instructions
+
+enum OperandType {
+ /// Basic block label in a branch construct.
+ OPERAND_BASIC_BLOCK = MCOI::OPERAND_FIRST_TARGET,
+ /// Local index.
+ OPERAND_LOCAL,
+ /// Global index.
+ OPERAND_GLOBAL,
+ /// 32-bit integer immediates.
+ OPERAND_I32IMM,
+ /// 64-bit integer immediates.
+ OPERAND_I64IMM,
+ /// 32-bit floating-point immediates.
+ OPERAND_F32IMM,
+ /// 64-bit floating-point immediates.
+ OPERAND_F64IMM,
+ /// 8-bit vector lane immediate
+ OPERAND_VEC_I8IMM,
+ /// 16-bit vector lane immediate
+ OPERAND_VEC_I16IMM,
+ /// 32-bit vector lane immediate
+ OPERAND_VEC_I32IMM,
+ /// 64-bit vector lane immediate
+ OPERAND_VEC_I64IMM,
+ /// 32-bit unsigned function indices.
+ OPERAND_FUNCTION32,
+ /// 32-bit unsigned memory offsets.
+ OPERAND_OFFSET32,
+ /// 64-bit unsigned memory offsets.
+ OPERAND_OFFSET64,
+ /// p2align immediate for load and store address alignment.
+ OPERAND_P2ALIGN,
+ /// signature immediate for block/loop.
+ OPERAND_SIGNATURE,
+ /// type signature immediate for call_indirect.
+ OPERAND_TYPEINDEX,
+ /// Tag index.
+ OPERAND_TAG,
+ /// A list of branch targets for br_list.
+ OPERAND_BRLIST,
+ /// 32-bit unsigned table number.
+ OPERAND_TABLE,
+};
+} // end namespace WebAssembly
+
+namespace WebAssemblyII {
+
+/// Target Operand Flag enum.
+enum TOF {
+ MO_NO_FLAG = 0,
+
+ // On a symbol operand this indicates that the immediate is a wasm global
+ // index. The value of the wasm global will be set to the symbol address at
+ // runtime. This adds a level of indirection similar to the GOT on native
+ // platforms.
+ MO_GOT,
+
+ // Same as MO_GOT but the address stored in the global is a TLS address.
+ MO_GOT_TLS,
+
+ // On a symbol operand this indicates that the immediate is the symbol
+ // address relative the __memory_base wasm global.
+ // Only applicable to data symbols.
+ MO_MEMORY_BASE_REL,
+
+ // On a symbol operand this indicates that the immediate is the symbol
+ // address relative the __tls_base wasm global.
+ // Only applicable to data symbols.
+ MO_TLS_BASE_REL,
+
+ // On a symbol operand this indicates that the immediate is the symbol
+ // address relative the __table_base wasm global.
+ // Only applicable to function symbols.
+ MO_TABLE_BASE_REL,
+};
+
+} // end namespace WebAssemblyII
+
+} // end namespace llvm
+
+// Defines symbolic names for WebAssembly registers. This defines a mapping from
+// register name to register number.
+//
+#define GET_REGINFO_ENUM
+#include "WebAssemblyGenRegisterInfo.inc"
+
+// Defines symbolic names for the WebAssembly instructions.
+//
+#define GET_INSTRINFO_ENUM
+#define GET_INSTRINFO_MC_HELPER_DECLS
+#include "WebAssemblyGenInstrInfo.inc"
+
+namespace llvm {
+namespace WebAssembly {
+
+/// Instruction opcodes emitted via means other than CodeGen.
+static const unsigned Nop = 0x01;
+static const unsigned End = 0x0b;
+
+/// Return the default p2align value for a load or store with the given opcode.
+inline unsigned GetDefaultP2AlignAny(unsigned Opc) {
+ switch (Opc) {
+#define WASM_LOAD_STORE(NAME) \
+ case WebAssembly::NAME##_A32: \
+ case WebAssembly::NAME##_A64: \
+ case WebAssembly::NAME##_A32_S: \
+ case WebAssembly::NAME##_A64_S:
+ WASM_LOAD_STORE(LOAD8_S_I32)
+ WASM_LOAD_STORE(LOAD8_U_I32)
+ WASM_LOAD_STORE(LOAD8_S_I64)
+ WASM_LOAD_STORE(LOAD8_U_I64)
+ WASM_LOAD_STORE(ATOMIC_LOAD8_U_I32)
+ WASM_LOAD_STORE(ATOMIC_LOAD8_U_I64)
+ WASM_LOAD_STORE(STORE8_I32)
+ WASM_LOAD_STORE(STORE8_I64)
+ WASM_LOAD_STORE(ATOMIC_STORE8_I32)
+ WASM_LOAD_STORE(ATOMIC_STORE8_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW8_U_ADD_I32)
+ WASM_LOAD_STORE(ATOMIC_RMW8_U_ADD_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW8_U_SUB_I32)
+ WASM_LOAD_STORE(ATOMIC_RMW8_U_SUB_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW8_U_AND_I32)
+ WASM_LOAD_STORE(ATOMIC_RMW8_U_AND_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW8_U_OR_I32)
+ WASM_LOAD_STORE(ATOMIC_RMW8_U_OR_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW8_U_XOR_I32)
+ WASM_LOAD_STORE(ATOMIC_RMW8_U_XOR_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW8_U_XCHG_I32)
+ WASM_LOAD_STORE(ATOMIC_RMW8_U_XCHG_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW8_U_CMPXCHG_I32)
+ WASM_LOAD_STORE(ATOMIC_RMW8_U_CMPXCHG_I64)
+ WASM_LOAD_STORE(LOAD8_SPLAT)
+ WASM_LOAD_STORE(LOAD_LANE_I8x16)
+ WASM_LOAD_STORE(STORE_LANE_I8x16)
+ return 0;
+ WASM_LOAD_STORE(LOAD16_S_I32)
+ WASM_LOAD_STORE(LOAD16_U_I32)
+ WASM_LOAD_STORE(LOAD16_S_I64)
+ WASM_LOAD_STORE(LOAD16_U_I64)
+ WASM_LOAD_STORE(ATOMIC_LOAD16_U_I32)
+ WASM_LOAD_STORE(ATOMIC_LOAD16_U_I64)
+ WASM_LOAD_STORE(STORE16_I32)
+ WASM_LOAD_STORE(STORE16_I64)
+ WASM_LOAD_STORE(ATOMIC_STORE16_I32)
+ WASM_LOAD_STORE(ATOMIC_STORE16_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW16_U_ADD_I32)
+ WASM_LOAD_STORE(ATOMIC_RMW16_U_ADD_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW16_U_SUB_I32)
+ WASM_LOAD_STORE(ATOMIC_RMW16_U_SUB_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW16_U_AND_I32)
+ WASM_LOAD_STORE(ATOMIC_RMW16_U_AND_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW16_U_OR_I32)
+ WASM_LOAD_STORE(ATOMIC_RMW16_U_OR_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW16_U_XOR_I32)
+ WASM_LOAD_STORE(ATOMIC_RMW16_U_XOR_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW16_U_XCHG_I32)
+ WASM_LOAD_STORE(ATOMIC_RMW16_U_XCHG_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW16_U_CMPXCHG_I32)
+ WASM_LOAD_STORE(ATOMIC_RMW16_U_CMPXCHG_I64)
+ WASM_LOAD_STORE(LOAD16_SPLAT)
+ WASM_LOAD_STORE(LOAD_LANE_I16x8)
+ WASM_LOAD_STORE(STORE_LANE_I16x8)
+ return 1;
+ WASM_LOAD_STORE(LOAD_I32)
+ WASM_LOAD_STORE(LOAD_F32)
+ WASM_LOAD_STORE(STORE_I32)
+ WASM_LOAD_STORE(STORE_F32)
+ WASM_LOAD_STORE(LOAD32_S_I64)
+ WASM_LOAD_STORE(LOAD32_U_I64)
+ WASM_LOAD_STORE(STORE32_I64)
+ WASM_LOAD_STORE(ATOMIC_LOAD_I32)
+ WASM_LOAD_STORE(ATOMIC_LOAD32_U_I64)
+ WASM_LOAD_STORE(ATOMIC_STORE_I32)
+ WASM_LOAD_STORE(ATOMIC_STORE32_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW_ADD_I32)
+ WASM_LOAD_STORE(ATOMIC_RMW32_U_ADD_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW_SUB_I32)
+ WASM_LOAD_STORE(ATOMIC_RMW32_U_SUB_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW_AND_I32)
+ WASM_LOAD_STORE(ATOMIC_RMW32_U_AND_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW_OR_I32)
+ WASM_LOAD_STORE(ATOMIC_RMW32_U_OR_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW_XOR_I32)
+ WASM_LOAD_STORE(ATOMIC_RMW32_U_XOR_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW_XCHG_I32)
+ WASM_LOAD_STORE(ATOMIC_RMW32_U_XCHG_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW_CMPXCHG_I32)
+ WASM_LOAD_STORE(ATOMIC_RMW32_U_CMPXCHG_I64)
+ WASM_LOAD_STORE(MEMORY_ATOMIC_NOTIFY)
+ WASM_LOAD_STORE(MEMORY_ATOMIC_WAIT32)
+ WASM_LOAD_STORE(LOAD32_SPLAT)
+ WASM_LOAD_STORE(LOAD_ZERO_I32x4)
+ WASM_LOAD_STORE(LOAD_LANE_I32x4)
+ WASM_LOAD_STORE(STORE_LANE_I32x4)
+ return 2;
+ WASM_LOAD_STORE(LOAD_I64)
+ WASM_LOAD_STORE(LOAD_F64)
+ WASM_LOAD_STORE(STORE_I64)
+ WASM_LOAD_STORE(STORE_F64)
+ WASM_LOAD_STORE(ATOMIC_LOAD_I64)
+ WASM_LOAD_STORE(ATOMIC_STORE_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW_ADD_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW_SUB_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW_AND_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW_OR_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW_XOR_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW_XCHG_I64)
+ WASM_LOAD_STORE(ATOMIC_RMW_CMPXCHG_I64)
+ WASM_LOAD_STORE(MEMORY_ATOMIC_WAIT64)
+ WASM_LOAD_STORE(LOAD64_SPLAT)
+ WASM_LOAD_STORE(LOAD_EXTEND_S_I16x8)
+ WASM_LOAD_STORE(LOAD_EXTEND_U_I16x8)
+ WASM_LOAD_STORE(LOAD_EXTEND_S_I32x4)
+ WASM_LOAD_STORE(LOAD_EXTEND_U_I32x4)
+ WASM_LOAD_STORE(LOAD_EXTEND_S_I64x2)
+ WASM_LOAD_STORE(LOAD_EXTEND_U_I64x2)
+ WASM_LOAD_STORE(LOAD_ZERO_I64x2)
+ WASM_LOAD_STORE(LOAD_LANE_I64x2)
+ WASM_LOAD_STORE(STORE_LANE_I64x2)
+ return 3;
+ WASM_LOAD_STORE(LOAD_V128)
+ WASM_LOAD_STORE(STORE_V128)
+ return 4;
+ default:
+ return -1;
+ }
+#undef WASM_LOAD_STORE
+}
+
+inline unsigned GetDefaultP2Align(unsigned Opc) {
+ auto Align = GetDefaultP2AlignAny(Opc);
+ if (Align == -1U) {
+ llvm_unreachable("Only loads and stores have p2align values");
+ }
+ return Align;
+}
+
+inline bool isConst(unsigned Opc) {
+ switch (Opc) {
+ case WebAssembly::CONST_I32:
+ case WebAssembly::CONST_I32_S:
+ case WebAssembly::CONST_I64:
+ case WebAssembly::CONST_I64_S:
+ case WebAssembly::CONST_F32:
+ case WebAssembly::CONST_F32_S:
+ case WebAssembly::CONST_F64:
+ case WebAssembly::CONST_F64_S:
+ case WebAssembly::CONST_V128_I8x16:
+ case WebAssembly::CONST_V128_I8x16_S:
+ case WebAssembly::CONST_V128_I16x8:
+ case WebAssembly::CONST_V128_I16x8_S:
+ case WebAssembly::CONST_V128_I32x4:
+ case WebAssembly::CONST_V128_I32x4_S:
+ case WebAssembly::CONST_V128_I64x2:
+ case WebAssembly::CONST_V128_I64x2_S:
+ case WebAssembly::CONST_V128_F32x4:
+ case WebAssembly::CONST_V128_F32x4_S:
+ case WebAssembly::CONST_V128_F64x2:
+ case WebAssembly::CONST_V128_F64x2_S:
+ return true;
+ default:
+ return false;
+ }
+}
+
+inline bool isScalarConst(unsigned Opc) {
+ switch (Opc) {
+ case WebAssembly::CONST_I32:
+ case WebAssembly::CONST_I32_S:
+ case WebAssembly::CONST_I64:
+ case WebAssembly::CONST_I64_S:
+ case WebAssembly::CONST_F32:
+ case WebAssembly::CONST_F32_S:
+ case WebAssembly::CONST_F64:
+ case WebAssembly::CONST_F64_S:
+ return true;
+ default:
+ return false;
+ }
+}
+
+inline bool isArgument(unsigned Opc) {
+ switch (Opc) {
+ case WebAssembly::ARGUMENT_i32:
+ case WebAssembly::ARGUMENT_i32_S:
+ case WebAssembly::ARGUMENT_i64:
+ case WebAssembly::ARGUMENT_i64_S:
+ case WebAssembly::ARGUMENT_f32:
+ case WebAssembly::ARGUMENT_f32_S:
+ case WebAssembly::ARGUMENT_f64:
+ case WebAssembly::ARGUMENT_f64_S:
+ case WebAssembly::ARGUMENT_v16i8:
+ case WebAssembly::ARGUMENT_v16i8_S:
+ case WebAssembly::ARGUMENT_v8i16:
+ case WebAssembly::ARGUMENT_v8i16_S:
+ case WebAssembly::ARGUMENT_v4i32:
+ case WebAssembly::ARGUMENT_v4i32_S:
+ case WebAssembly::ARGUMENT_v2i64:
+ case WebAssembly::ARGUMENT_v2i64_S:
+ case WebAssembly::ARGUMENT_v4f32:
+ case WebAssembly::ARGUMENT_v4f32_S:
+ case WebAssembly::ARGUMENT_v2f64:
+ case WebAssembly::ARGUMENT_v2f64_S:
+ case WebAssembly::ARGUMENT_funcref:
+ case WebAssembly::ARGUMENT_funcref_S:
+ case WebAssembly::ARGUMENT_externref:
+ case WebAssembly::ARGUMENT_externref_S:
+ return true;
+ default:
+ return false;
+ }
+}
+
+inline bool isCopy(unsigned Opc) {
+ switch (Opc) {
+ case WebAssembly::COPY_I32:
+ case WebAssembly::COPY_I32_S:
+ case WebAssembly::COPY_I64:
+ case WebAssembly::COPY_I64_S:
+ case WebAssembly::COPY_F32:
+ case WebAssembly::COPY_F32_S:
+ case WebAssembly::COPY_F64:
+ case WebAssembly::COPY_F64_S:
+ case WebAssembly::COPY_V128:
+ case WebAssembly::COPY_V128_S:
+ case WebAssembly::COPY_FUNCREF:
+ case WebAssembly::COPY_FUNCREF_S:
+ case WebAssembly::COPY_EXTERNREF:
+ case WebAssembly::COPY_EXTERNREF_S:
+ return true;
+ default:
+ return false;
+ }
+}
+
+inline bool isTee(unsigned Opc) {
+ switch (Opc) {
+ case WebAssembly::TEE_I32:
+ case WebAssembly::TEE_I32_S:
+ case WebAssembly::TEE_I64:
+ case WebAssembly::TEE_I64_S:
+ case WebAssembly::TEE_F32:
+ case WebAssembly::TEE_F32_S:
+ case WebAssembly::TEE_F64:
+ case WebAssembly::TEE_F64_S:
+ case WebAssembly::TEE_V128:
+ case WebAssembly::TEE_V128_S:
+ case WebAssembly::TEE_FUNCREF:
+ case WebAssembly::TEE_FUNCREF_S:
+ case WebAssembly::TEE_EXTERNREF:
+ case WebAssembly::TEE_EXTERNREF_S:
+ return true;
+ default:
+ return false;
+ }
+}
+
+inline bool isCallDirect(unsigned Opc) {
+ switch (Opc) {
+ case WebAssembly::CALL:
+ case WebAssembly::CALL_S:
+ case WebAssembly::RET_CALL:
+ case WebAssembly::RET_CALL_S:
+ return true;
+ default:
+ return false;
+ }
+}
+
+inline bool isCallIndirect(unsigned Opc) {
+ switch (Opc) {
+ case WebAssembly::CALL_INDIRECT:
+ case WebAssembly::CALL_INDIRECT_S:
+ case WebAssembly::RET_CALL_INDIRECT:
+ case WebAssembly::RET_CALL_INDIRECT_S:
+ return true;
+ default:
+ return false;
+ }
+}
+
+inline bool isBrTable(unsigned Opc) {
+ switch (Opc) {
+ case WebAssembly::BR_TABLE_I32:
+ case WebAssembly::BR_TABLE_I32_S:
+ case WebAssembly::BR_TABLE_I64:
+ case WebAssembly::BR_TABLE_I64_S:
+ return true;
+ default:
+ return false;
+ }
+}
+
+inline bool isMarker(unsigned Opc) {
+ switch (Opc) {
+ case WebAssembly::BLOCK:
+ case WebAssembly::BLOCK_S:
+ case WebAssembly::END_BLOCK:
+ case WebAssembly::END_BLOCK_S:
+ case WebAssembly::LOOP:
+ case WebAssembly::LOOP_S:
+ case WebAssembly::END_LOOP:
+ case WebAssembly::END_LOOP_S:
+ case WebAssembly::TRY:
+ case WebAssembly::TRY_S:
+ case WebAssembly::END_TRY:
+ case WebAssembly::END_TRY_S:
+ return true;
+ default:
+ return false;
+ }
+}
+
+inline bool isCatch(unsigned Opc) {
+ switch (Opc) {
+ case WebAssembly::CATCH:
+ case WebAssembly::CATCH_S:
+ case WebAssembly::CATCH_ALL:
+ case WebAssembly::CATCH_ALL_S:
+ return true;
+ default:
+ return false;
+ }
+}
+
+inline bool isLocalGet(unsigned Opc) {
+ switch (Opc) {
+ case WebAssembly::LOCAL_GET_I32:
+ case WebAssembly::LOCAL_GET_I32_S:
+ case WebAssembly::LOCAL_GET_I64:
+ case WebAssembly::LOCAL_GET_I64_S:
+ case WebAssembly::LOCAL_GET_F32:
+ case WebAssembly::LOCAL_GET_F32_S:
+ case WebAssembly::LOCAL_GET_F64:
+ case WebAssembly::LOCAL_GET_F64_S:
+ case WebAssembly::LOCAL_GET_V128:
+ case WebAssembly::LOCAL_GET_V128_S:
+ case WebAssembly::LOCAL_GET_FUNCREF:
+ case WebAssembly::LOCAL_GET_FUNCREF_S:
+ case WebAssembly::LOCAL_GET_EXTERNREF:
+ case WebAssembly::LOCAL_GET_EXTERNREF_S:
+ return true;
+ default:
+ return false;
+ }
+}
+
+inline bool isLocalSet(unsigned Opc) {
+ switch (Opc) {
+ case WebAssembly::LOCAL_SET_I32:
+ case WebAssembly::LOCAL_SET_I32_S:
+ case WebAssembly::LOCAL_SET_I64:
+ case WebAssembly::LOCAL_SET_I64_S:
+ case WebAssembly::LOCAL_SET_F32:
+ case WebAssembly::LOCAL_SET_F32_S:
+ case WebAssembly::LOCAL_SET_F64:
+ case WebAssembly::LOCAL_SET_F64_S:
+ case WebAssembly::LOCAL_SET_V128:
+ case WebAssembly::LOCAL_SET_V128_S:
+ case WebAssembly::LOCAL_SET_FUNCREF:
+ case WebAssembly::LOCAL_SET_FUNCREF_S:
+ case WebAssembly::LOCAL_SET_EXTERNREF:
+ case WebAssembly::LOCAL_SET_EXTERNREF_S:
+ return true;
+ default:
+ return false;
+ }
+}
+
+inline bool isLocalTee(unsigned Opc) {
+ switch (Opc) {
+ case WebAssembly::LOCAL_TEE_I32:
+ case WebAssembly::LOCAL_TEE_I32_S:
+ case WebAssembly::LOCAL_TEE_I64:
+ case WebAssembly::LOCAL_TEE_I64_S:
+ case WebAssembly::LOCAL_TEE_F32:
+ case WebAssembly::LOCAL_TEE_F32_S:
+ case WebAssembly::LOCAL_TEE_F64:
+ case WebAssembly::LOCAL_TEE_F64_S:
+ case WebAssembly::LOCAL_TEE_V128:
+ case WebAssembly::LOCAL_TEE_V128_S:
+ case WebAssembly::LOCAL_TEE_FUNCREF:
+ case WebAssembly::LOCAL_TEE_FUNCREF_S:
+ case WebAssembly::LOCAL_TEE_EXTERNREF:
+ case WebAssembly::LOCAL_TEE_EXTERNREF_S:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const unsigned UnusedReg = -1u;
+
+// For a given stackified WAReg, return the id number to print with push/pop.
+unsigned inline getWARegStackId(unsigned Reg) {
+ assert(Reg & INT32_MIN);
+ return Reg & INT32_MAX;
+}
+
+} // end namespace WebAssembly
+} // end namespace llvm
+
+#define GET_SUBTARGETINFO_ENUM
+#include "WebAssemblyGenSubtargetInfo.inc"
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTypeUtilities.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTypeUtilities.cpp
new file mode 100644
index 000000000000..b7b5b2a97c59
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTypeUtilities.cpp
@@ -0,0 +1,124 @@
+//===- WebAssemblyMCTypeUtilities.cpp - WebAssembly Type Utility Functions-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements several utility functions for WebAssembly type parsing.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyMCTypeUtilities.h"
+#include "WebAssemblyMCTargetDesc.h"
+#include "llvm/ADT/StringSwitch.h"
+
+using namespace llvm;
+
+std::optional<wasm::ValType> WebAssembly::parseType(StringRef Type) {
+ // FIXME: can't use StringSwitch because wasm::ValType doesn't have a
+ // "invalid" value.
+ if (Type == "i32")
+ return wasm::ValType::I32;
+ if (Type == "i64")
+ return wasm::ValType::I64;
+ if (Type == "f32")
+ return wasm::ValType::F32;
+ if (Type == "f64")
+ return wasm::ValType::F64;
+ if (Type == "v128" || Type == "i8x16" || Type == "i16x8" || Type == "i32x4" ||
+ Type == "i64x2" || Type == "f32x4" || Type == "f64x2")
+ return wasm::ValType::V128;
+ if (Type == "funcref")
+ return wasm::ValType::FUNCREF;
+ if (Type == "externref")
+ return wasm::ValType::EXTERNREF;
+ return std::nullopt;
+}
+
+WebAssembly::BlockType WebAssembly::parseBlockType(StringRef Type) {
+ // Multivalue block types are handled separately in parseSignature
+ return StringSwitch<WebAssembly::BlockType>(Type)
+ .Case("i32", WebAssembly::BlockType::I32)
+ .Case("i64", WebAssembly::BlockType::I64)
+ .Case("f32", WebAssembly::BlockType::F32)
+ .Case("f64", WebAssembly::BlockType::F64)
+ .Case("v128", WebAssembly::BlockType::V128)
+ .Case("funcref", WebAssembly::BlockType::Funcref)
+ .Case("externref", WebAssembly::BlockType::Externref)
+ .Case("void", WebAssembly::BlockType::Void)
+ .Default(WebAssembly::BlockType::Invalid);
+}
+
+// We have various enums representing a subset of these types, use this
+// function to convert any of them to text.
+const char *WebAssembly::anyTypeToString(unsigned Type) {
+ switch (Type) {
+ case wasm::WASM_TYPE_I32:
+ return "i32";
+ case wasm::WASM_TYPE_I64:
+ return "i64";
+ case wasm::WASM_TYPE_F32:
+ return "f32";
+ case wasm::WASM_TYPE_F64:
+ return "f64";
+ case wasm::WASM_TYPE_V128:
+ return "v128";
+ case wasm::WASM_TYPE_FUNCREF:
+ return "funcref";
+ case wasm::WASM_TYPE_EXTERNREF:
+ return "externref";
+ case wasm::WASM_TYPE_FUNC:
+ return "func";
+ case wasm::WASM_TYPE_NORESULT:
+ return "void";
+ default:
+ return "invalid_type";
+ }
+}
+
+const char *WebAssembly::typeToString(wasm::ValType Type) {
+ return anyTypeToString(static_cast<unsigned>(Type));
+}
+
+std::string WebAssembly::typeListToString(ArrayRef<wasm::ValType> List) {
+ std::string S;
+ for (const auto &Type : List) {
+ if (&Type != &List[0])
+ S += ", ";
+ S += WebAssembly::typeToString(Type);
+ }
+ return S;
+}
+
+std::string WebAssembly::signatureToString(const wasm::WasmSignature *Sig) {
+ std::string S("(");
+ S += typeListToString(Sig->Params);
+ S += ") -> (";
+ S += typeListToString(Sig->Returns);
+ S += ")";
+ return S;
+}
+
+wasm::ValType WebAssembly::regClassToValType(unsigned RC) {
+ switch (RC) {
+ case WebAssembly::I32RegClassID:
+ return wasm::ValType::I32;
+ case WebAssembly::I64RegClassID:
+ return wasm::ValType::I64;
+ case WebAssembly::F32RegClassID:
+ return wasm::ValType::F32;
+ case WebAssembly::F64RegClassID:
+ return wasm::ValType::F64;
+ case WebAssembly::V128RegClassID:
+ return wasm::ValType::V128;
+ case WebAssembly::FUNCREFRegClassID:
+ return wasm::ValType::FUNCREF;
+ case WebAssembly::EXTERNREFRegClassID:
+ return wasm::ValType::EXTERNREF;
+ default:
+ llvm_unreachable("unexpected type");
+ }
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTypeUtilities.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTypeUtilities.h
new file mode 100644
index 000000000000..18018dfc6d6f
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTypeUtilities.h
@@ -0,0 +1,73 @@
+//===-- WebAssemblyMCTypeUtilities - WebAssembly Type Utilities-*- C++ -*-====//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the declaration of the WebAssembly-specific type parsing
+/// utility functions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYMCTYPEUTILITIES_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYMCTYPEUTILITIES_H
+
+#include "llvm/BinaryFormat/Wasm.h"
+
+namespace llvm {
+
+namespace WebAssembly {
+
+/// Used as immediate MachineOperands for block signatures
+enum class BlockType : unsigned {
+ Invalid = 0x00,
+ Void = 0x40,
+ I32 = unsigned(wasm::ValType::I32),
+ I64 = unsigned(wasm::ValType::I64),
+ F32 = unsigned(wasm::ValType::F32),
+ F64 = unsigned(wasm::ValType::F64),
+ V128 = unsigned(wasm::ValType::V128),
+ Externref = unsigned(wasm::ValType::EXTERNREF),
+ Funcref = unsigned(wasm::ValType::FUNCREF),
+ // Multivalue blocks (and other non-void blocks) are only emitted when the
+ // blocks will never be exited and are at the ends of functions (see
+ // WebAssemblyCFGStackify::fixEndsAtEndOfFunction). They also are never made
+ // to pop values off the stack, so the exact multivalue signature can always
+ // be inferred from the return type of the parent function in MCInstLower.
+ Multivalue = 0xffff,
+};
+
+inline bool isRefType(wasm::ValType Type) {
+ return Type == wasm::ValType::EXTERNREF || Type == wasm::ValType::FUNCREF;
+}
+
+// Convert ValType or a list/signature of ValTypes to a string.
+
+// Convert an unsinged integer, which can be among wasm::ValType enum, to its
+// type name string. If the input is not within wasm::ValType, returns
+// "invalid_type".
+const char *anyTypeToString(unsigned Type);
+const char *typeToString(wasm::ValType Type);
+// Convert a list of ValTypes into a string in the format of
+// "type0, type1, ... typeN"
+std::string typeListToString(ArrayRef<wasm::ValType> List);
+// Convert a wasm signature into a string in the format of
+// "(params) -> (results)", where params and results are a string of ValType
+// lists.
+std::string signatureToString(const wasm::WasmSignature *Sig);
+
+// Convert a register class ID to a wasm ValType.
+wasm::ValType regClassToValType(unsigned RC);
+
+// Convert StringRef to ValType / HealType / BlockType
+
+std::optional<wasm::ValType> parseType(StringRef Type);
+BlockType parseBlockType(StringRef Type);
+
+} // end namespace WebAssembly
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp
new file mode 100644
index 000000000000..f389ee2f50d8
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp
@@ -0,0 +1,139 @@
+//==-- WebAssemblyTargetStreamer.cpp - WebAssembly Target Streamer Methods --=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines WebAssembly-specific target streamer classes.
+/// These are for implementing support for target-specific assembly directives.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyTargetStreamer.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "MCTargetDesc/WebAssemblyMCTypeUtilities.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCSectionWasm.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbolWasm.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormattedStream.h"
+using namespace llvm;
+
+WebAssemblyTargetStreamer::WebAssemblyTargetStreamer(MCStreamer &S)
+ : MCTargetStreamer(S) {}
+
+void WebAssemblyTargetStreamer::emitValueType(wasm::ValType Type) {
+ Streamer.emitIntValue(uint8_t(Type), 1);
+}
+
+WebAssemblyTargetAsmStreamer::WebAssemblyTargetAsmStreamer(
+ MCStreamer &S, formatted_raw_ostream &OS)
+ : WebAssemblyTargetStreamer(S), OS(OS) {}
+
+WebAssemblyTargetWasmStreamer::WebAssemblyTargetWasmStreamer(MCStreamer &S)
+ : WebAssemblyTargetStreamer(S) {}
+
+static void printTypes(formatted_raw_ostream &OS,
+ ArrayRef<wasm::ValType> Types) {
+ bool First = true;
+ for (auto Type : Types) {
+ if (First)
+ First = false;
+ else
+ OS << ", ";
+ OS << WebAssembly::typeToString(Type);
+ }
+ OS << '\n';
+}
+
+void WebAssemblyTargetAsmStreamer::emitLocal(ArrayRef<wasm::ValType> Types) {
+ if (!Types.empty()) {
+ OS << "\t.local \t";
+ printTypes(OS, Types);
+ }
+}
+
+void WebAssemblyTargetAsmStreamer::emitFunctionType(const MCSymbolWasm *Sym) {
+ assert(Sym->isFunction());
+ OS << "\t.functype\t" << Sym->getName() << " ";
+ OS << WebAssembly::signatureToString(Sym->getSignature());
+ OS << "\n";
+}
+
+void WebAssemblyTargetAsmStreamer::emitGlobalType(const MCSymbolWasm *Sym) {
+ assert(Sym->isGlobal());
+ OS << "\t.globaltype\t" << Sym->getName() << ", "
+ << WebAssembly::typeToString(
+ static_cast<wasm::ValType>(Sym->getGlobalType().Type));
+ if (!Sym->getGlobalType().Mutable)
+ OS << ", immutable";
+ OS << '\n';
+}
+
+void WebAssemblyTargetAsmStreamer::emitTableType(const MCSymbolWasm *Sym) {
+ assert(Sym->isTable());
+ const wasm::WasmTableType &Type = Sym->getTableType();
+ OS << "\t.tabletype\t" << Sym->getName() << ", "
+ << WebAssembly::typeToString(static_cast<wasm::ValType>(Type.ElemType));
+ bool HasMaximum = Type.Limits.Flags & wasm::WASM_LIMITS_FLAG_HAS_MAX;
+ if (Type.Limits.Minimum != 0 || HasMaximum) {
+ OS << ", " << Type.Limits.Minimum;
+ if (HasMaximum)
+ OS << ", " << Type.Limits.Maximum;
+ }
+ OS << '\n';
+}
+
+void WebAssemblyTargetAsmStreamer::emitTagType(const MCSymbolWasm *Sym) {
+ assert(Sym->isTag());
+ OS << "\t.tagtype\t" << Sym->getName() << " ";
+ OS << WebAssembly::typeListToString(Sym->getSignature()->Params);
+ OS << "\n";
+}
+
+void WebAssemblyTargetAsmStreamer::emitImportModule(const MCSymbolWasm *Sym,
+ StringRef ImportModule) {
+ OS << "\t.import_module\t" << Sym->getName() << ", "
+ << ImportModule << '\n';
+}
+
+void WebAssemblyTargetAsmStreamer::emitImportName(const MCSymbolWasm *Sym,
+ StringRef ImportName) {
+ OS << "\t.import_name\t" << Sym->getName() << ", "
+ << ImportName << '\n';
+}
+
+void WebAssemblyTargetAsmStreamer::emitExportName(const MCSymbolWasm *Sym,
+ StringRef ExportName) {
+ OS << "\t.export_name\t" << Sym->getName() << ", "
+ << ExportName << '\n';
+}
+
+void WebAssemblyTargetAsmStreamer::emitIndIdx(const MCExpr *Value) {
+ OS << "\t.indidx \t" << *Value << '\n';
+}
+
+void WebAssemblyTargetWasmStreamer::emitLocal(ArrayRef<wasm::ValType> Types) {
+ SmallVector<std::pair<wasm::ValType, uint32_t>, 4> Grouped;
+ for (auto Type : Types) {
+ if (Grouped.empty() || Grouped.back().first != Type)
+ Grouped.push_back(std::make_pair(Type, 1));
+ else
+ ++Grouped.back().second;
+ }
+
+ Streamer.emitULEB128IntValue(Grouped.size());
+ for (auto Pair : Grouped) {
+ Streamer.emitULEB128IntValue(Pair.second);
+ emitValueType(Pair.first);
+ }
+}
+
+void WebAssemblyTargetWasmStreamer::emitIndIdx(const MCExpr *Value) {
+ llvm_unreachable(".indidx encoding not yet implemented");
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h
new file mode 100644
index 000000000000..72d36a251a91
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h
@@ -0,0 +1,115 @@
+//==-- WebAssemblyTargetStreamer.h - WebAssembly Target Streamer -*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file declares WebAssembly-specific target streamer classes.
+/// These are for implementing support for target-specific assembly directives.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYTARGETSTREAMER_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYTARGETSTREAMER_H
+
+#include "llvm/BinaryFormat/Wasm.h"
+#include "llvm/CodeGen/MachineValueType.h"
+#include "llvm/MC/MCStreamer.h"
+
+namespace llvm {
+
+class MCSymbolWasm;
+class formatted_raw_ostream;
+
+/// WebAssembly-specific streamer interface, to implement support
+/// WebAssembly-specific assembly directives.
+class WebAssemblyTargetStreamer : public MCTargetStreamer {
+public:
+ explicit WebAssemblyTargetStreamer(MCStreamer &S);
+
+ /// .local
+ virtual void emitLocal(ArrayRef<wasm::ValType> Types) = 0;
+ /// .functype
+ virtual void emitFunctionType(const MCSymbolWasm *Sym) = 0;
+ /// .indidx
+ virtual void emitIndIdx(const MCExpr *Value) = 0;
+ /// .globaltype
+ virtual void emitGlobalType(const MCSymbolWasm *Sym) = 0;
+ /// .tabletype
+ virtual void emitTableType(const MCSymbolWasm *Sym) = 0;
+ /// .tagtype
+ virtual void emitTagType(const MCSymbolWasm *Sym) = 0;
+ /// .import_module
+ virtual void emitImportModule(const MCSymbolWasm *Sym,
+ StringRef ImportModule) = 0;
+ /// .import_name
+ virtual void emitImportName(const MCSymbolWasm *Sym,
+ StringRef ImportName) = 0;
+ /// .export_name
+ virtual void emitExportName(const MCSymbolWasm *Sym,
+ StringRef ExportName) = 0;
+
+protected:
+ void emitValueType(wasm::ValType Type);
+};
+
+/// This part is for ascii assembly output
+class WebAssemblyTargetAsmStreamer final : public WebAssemblyTargetStreamer {
+ formatted_raw_ostream &OS;
+
+public:
+ WebAssemblyTargetAsmStreamer(MCStreamer &S, formatted_raw_ostream &OS);
+
+ void emitLocal(ArrayRef<wasm::ValType> Types) override;
+ void emitFunctionType(const MCSymbolWasm *Sym) override;
+ void emitIndIdx(const MCExpr *Value) override;
+ void emitGlobalType(const MCSymbolWasm *Sym) override;
+ void emitTableType(const MCSymbolWasm *Sym) override;
+ void emitTagType(const MCSymbolWasm *Sym) override;
+ void emitImportModule(const MCSymbolWasm *Sym, StringRef ImportModule) override;
+ void emitImportName(const MCSymbolWasm *Sym, StringRef ImportName) override;
+ void emitExportName(const MCSymbolWasm *Sym, StringRef ExportName) override;
+};
+
+/// This part is for Wasm object output
+class WebAssemblyTargetWasmStreamer final : public WebAssemblyTargetStreamer {
+public:
+ explicit WebAssemblyTargetWasmStreamer(MCStreamer &S);
+
+ void emitLocal(ArrayRef<wasm::ValType> Types) override;
+ void emitFunctionType(const MCSymbolWasm *Sym) override {}
+ void emitIndIdx(const MCExpr *Value) override;
+ void emitGlobalType(const MCSymbolWasm *Sym) override {}
+ void emitTableType(const MCSymbolWasm *Sym) override {}
+ void emitTagType(const MCSymbolWasm *Sym) override {}
+ void emitImportModule(const MCSymbolWasm *Sym,
+ StringRef ImportModule) override {}
+ void emitImportName(const MCSymbolWasm *Sym,
+ StringRef ImportName) override {}
+ void emitExportName(const MCSymbolWasm *Sym,
+ StringRef ExportName) override {}
+};
+
+/// This part is for null output
+class WebAssemblyTargetNullStreamer final : public WebAssemblyTargetStreamer {
+public:
+ explicit WebAssemblyTargetNullStreamer(MCStreamer &S)
+ : WebAssemblyTargetStreamer(S) {}
+
+ void emitLocal(ArrayRef<wasm::ValType>) override {}
+ void emitFunctionType(const MCSymbolWasm *) override {}
+ void emitIndIdx(const MCExpr *) override {}
+ void emitGlobalType(const MCSymbolWasm *) override {}
+ void emitTableType(const MCSymbolWasm *) override {}
+ void emitTagType(const MCSymbolWasm *) override {}
+ void emitImportModule(const MCSymbolWasm *, StringRef) override {}
+ void emitImportName(const MCSymbolWasm *, StringRef) override {}
+ void emitExportName(const MCSymbolWasm *, StringRef) override {}
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp
new file mode 100644
index 000000000000..43c67b4b4749
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp
@@ -0,0 +1,166 @@
+//===-- WebAssemblyWasmObjectWriter.cpp - WebAssembly Wasm Writer ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file handles Wasm-specific object emission, converting LLVM's
+/// internal fixups into the appropriate relocations.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyFixupKinds.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "llvm/BinaryFormat/Wasm.h"
+#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCFixupKindInfo.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCSectionWasm.h"
+#include "llvm/MC/MCSymbolWasm.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/MC/MCWasmObjectWriter.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace llvm;
+
+namespace {
+class WebAssemblyWasmObjectWriter final : public MCWasmObjectTargetWriter {
+public:
+ explicit WebAssemblyWasmObjectWriter(bool Is64Bit, bool IsEmscripten);
+
+private:
+ unsigned getRelocType(const MCValue &Target, const MCFixup &Fixup,
+ const MCSectionWasm &FixupSection,
+ bool IsLocRel) const override;
+};
+} // end anonymous namespace
+
+WebAssemblyWasmObjectWriter::WebAssemblyWasmObjectWriter(bool Is64Bit,
+ bool IsEmscripten)
+ : MCWasmObjectTargetWriter(Is64Bit, IsEmscripten) {}
+
+static const MCSection *getTargetSection(const MCExpr *Expr) {
+ if (auto SyExp = dyn_cast<MCSymbolRefExpr>(Expr)) {
+ if (SyExp->getSymbol().isInSection())
+ return &SyExp->getSymbol().getSection();
+ return nullptr;
+ }
+
+ if (auto BinOp = dyn_cast<MCBinaryExpr>(Expr)) {
+ auto SectionLHS = getTargetSection(BinOp->getLHS());
+ auto SectionRHS = getTargetSection(BinOp->getRHS());
+ return SectionLHS == SectionRHS ? nullptr : SectionLHS;
+ }
+
+ if (auto UnOp = dyn_cast<MCUnaryExpr>(Expr))
+ return getTargetSection(UnOp->getSubExpr());
+
+ return nullptr;
+}
+
+unsigned WebAssemblyWasmObjectWriter::getRelocType(
+ const MCValue &Target, const MCFixup &Fixup,
+ const MCSectionWasm &FixupSection, bool IsLocRel) const {
+ const MCSymbolRefExpr *RefA = Target.getSymA();
+ assert(RefA);
+ auto& SymA = cast<MCSymbolWasm>(RefA->getSymbol());
+
+ MCSymbolRefExpr::VariantKind Modifier = Target.getAccessVariant();
+
+ switch (Modifier) {
+ case MCSymbolRefExpr::VK_GOT:
+ case MCSymbolRefExpr::VK_WASM_GOT_TLS:
+ return wasm::R_WASM_GLOBAL_INDEX_LEB;
+ case MCSymbolRefExpr::VK_WASM_TBREL:
+ assert(SymA.isFunction());
+ return is64Bit() ? wasm::R_WASM_TABLE_INDEX_REL_SLEB64
+ : wasm::R_WASM_TABLE_INDEX_REL_SLEB;
+ case MCSymbolRefExpr::VK_WASM_TLSREL:
+ return is64Bit() ? wasm::R_WASM_MEMORY_ADDR_TLS_SLEB64
+ : wasm::R_WASM_MEMORY_ADDR_TLS_SLEB;
+ case MCSymbolRefExpr::VK_WASM_MBREL:
+ assert(SymA.isData());
+ return is64Bit() ? wasm::R_WASM_MEMORY_ADDR_REL_SLEB64
+ : wasm::R_WASM_MEMORY_ADDR_REL_SLEB;
+ case MCSymbolRefExpr::VK_WASM_TYPEINDEX:
+ return wasm::R_WASM_TYPE_INDEX_LEB;
+ case MCSymbolRefExpr::VK_None:
+ break;
+ case MCSymbolRefExpr::VK_WASM_FUNCINDEX:
+ return wasm::R_WASM_FUNCTION_INDEX_I32;
+ default:
+ report_fatal_error("unknown VariantKind");
+ break;
+ }
+
+ switch (unsigned(Fixup.getKind())) {
+ case WebAssembly::fixup_sleb128_i32:
+ if (SymA.isFunction())
+ return wasm::R_WASM_TABLE_INDEX_SLEB;
+ return wasm::R_WASM_MEMORY_ADDR_SLEB;
+ case WebAssembly::fixup_sleb128_i64:
+ if (SymA.isFunction())
+ return wasm::R_WASM_TABLE_INDEX_SLEB64;
+ return wasm::R_WASM_MEMORY_ADDR_SLEB64;
+ case WebAssembly::fixup_uleb128_i32:
+ if (SymA.isGlobal())
+ return wasm::R_WASM_GLOBAL_INDEX_LEB;
+ if (SymA.isFunction())
+ return wasm::R_WASM_FUNCTION_INDEX_LEB;
+ if (SymA.isTag())
+ return wasm::R_WASM_TAG_INDEX_LEB;
+ if (SymA.isTable())
+ return wasm::R_WASM_TABLE_NUMBER_LEB;
+ return wasm::R_WASM_MEMORY_ADDR_LEB;
+ case WebAssembly::fixup_uleb128_i64:
+ assert(SymA.isData());
+ return wasm::R_WASM_MEMORY_ADDR_LEB64;
+ case FK_Data_4:
+ if (SymA.isFunction()) {
+ if (FixupSection.getKind().isMetadata())
+ return wasm::R_WASM_FUNCTION_OFFSET_I32;
+ assert(FixupSection.isWasmData());
+ return wasm::R_WASM_TABLE_INDEX_I32;
+ }
+ if (SymA.isGlobal())
+ return wasm::R_WASM_GLOBAL_INDEX_I32;
+ if (auto Section = static_cast<const MCSectionWasm *>(
+ getTargetSection(Fixup.getValue()))) {
+ if (Section->getKind().isText())
+ return wasm::R_WASM_FUNCTION_OFFSET_I32;
+ else if (!Section->isWasmData())
+ return wasm::R_WASM_SECTION_OFFSET_I32;
+ }
+ return IsLocRel ? wasm::R_WASM_MEMORY_ADDR_LOCREL_I32
+ : wasm::R_WASM_MEMORY_ADDR_I32;
+ case FK_Data_8:
+ if (SymA.isFunction()) {
+ if (FixupSection.getKind().isMetadata())
+ return wasm::R_WASM_FUNCTION_OFFSET_I64;
+ return wasm::R_WASM_TABLE_INDEX_I64;
+ }
+ if (SymA.isGlobal())
+ llvm_unreachable("unimplemented R_WASM_GLOBAL_INDEX_I64");
+ if (auto Section = static_cast<const MCSectionWasm *>(
+ getTargetSection(Fixup.getValue()))) {
+ if (Section->getKind().isText())
+ return wasm::R_WASM_FUNCTION_OFFSET_I64;
+ else if (!Section->isWasmData())
+ llvm_unreachable("unimplemented R_WASM_SECTION_OFFSET_I64");
+ }
+ assert(SymA.isData());
+ return wasm::R_WASM_MEMORY_ADDR_I64;
+ default:
+ llvm_unreachable("unimplemented fixup kind");
+ }
+}
+
+std::unique_ptr<MCObjectTargetWriter>
+llvm::createWebAssemblyWasmObjectWriter(bool Is64Bit, bool IsEmscripten) {
+ return std::make_unique<WebAssemblyWasmObjectWriter>(Is64Bit, IsEmscripten);
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/README.txt b/contrib/llvm-project/llvm/lib/Target/WebAssembly/README.txt
new file mode 100644
index 000000000000..8dc2d7fcc733
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/README.txt
@@ -0,0 +1,173 @@
+//===-- README.txt - Notes for WebAssembly code gen -----------------------===//
+
+The object format emitted by the WebAssembly backed is documented in:
+
+ * https://github.com/WebAssembly/tool-conventions/blob/main/Linking.md
+
+The C ABI is described in:
+
+ * https://github.com/WebAssembly/tool-conventions/blob/main/BasicCABI.md
+
+For more information on WebAssembly itself, see the home page:
+
+ * https://webassembly.github.io/
+
+Emscripten provides a C/C++ compilation environment based on clang which
+includes standard libraries, tools, and packaging for producing WebAssembly
+applications that can run in browsers and other environments.
+
+wasi-sdk provides a more minimal C/C++ SDK based on clang, llvm and a libc based
+on musl, for producing WebAssembly applications that use the WASI ABI.
+
+Rust provides WebAssembly support integrated into Cargo. There are two
+main options:
+ - wasm32-unknown-unknown, which provides a relatively minimal environment
+ that has an emphasis on being "native"
+ - wasm32-unknown-emscripten, which uses Emscripten internally and
+ provides standard C/C++ libraries, filesystem emulation, GL and SDL
+ bindings
+For more information, see:
+ * https://www.hellorust.com/
+
+The following documents contain some information on the semantics and binary
+encoding of WebAssembly itself:
+ * https://github.com/WebAssembly/design/blob/main/Semantics.md
+ * https://github.com/WebAssembly/design/blob/main/BinaryEncoding.md
+
+Some notes on ways that the generated code could be improved follow:
+
+//===---------------------------------------------------------------------===//
+
+Br, br_if, and br_table instructions can support having a value on the value
+stack across the jump (sometimes). We should (a) model this, and (b) extend
+the stackifier to utilize it.
+
+//===---------------------------------------------------------------------===//
+
+The min/max instructions aren't exactly a<b?a:b because of NaN and negative zero
+behavior. The ARM target has the same kind of min/max instructions and has
+implemented optimizations for them; we should do similar optimizations for
+WebAssembly.
+
+//===---------------------------------------------------------------------===//
+
+AArch64 runs SeparateConstOffsetFromGEPPass, followed by EarlyCSE and LICM.
+Would these be useful to run for WebAssembly too? Also, it has an option to
+run SimplifyCFG after running the AtomicExpand pass. Would this be useful for
+us too?
+
+//===---------------------------------------------------------------------===//
+
+Register stackification uses the VALUE_STACK physical register to impose
+ordering dependencies on instructions with stack operands. This is pessimistic;
+we should consider alternate ways to model stack dependencies.
+
+//===---------------------------------------------------------------------===//
+
+Lots of things could be done in WebAssemblyTargetTransformInfo.cpp. Similarly,
+there are numerous optimization-related hooks that can be overridden in
+WebAssemblyTargetLowering.
+
+//===---------------------------------------------------------------------===//
+
+Instead of the OptimizeReturned pass, which should consider preserving the
+"returned" attribute through to MachineInstrs and extending the
+MemIntrinsicResults pass to do this optimization on calls too. That would also
+let the WebAssemblyPeephole pass clean up dead defs for such calls, as it does
+for stores.
+
+//===---------------------------------------------------------------------===//
+
+Consider implementing optimizeSelect, optimizeCompareInstr, optimizeCondBranch,
+optimizeLoadInstr, and/or getMachineCombinerPatterns.
+
+//===---------------------------------------------------------------------===//
+
+Find a clean way to fix the problem which leads to the Shrink Wrapping pass
+being run after the WebAssembly PEI pass.
+
+//===---------------------------------------------------------------------===//
+
+When setting multiple local variables to the same constant, we currently get
+code like this:
+
+ i32.const $4=, 0
+ i32.const $3=, 0
+
+It could be done with a smaller encoding like this:
+
+ i32.const $push5=, 0
+ local.tee $push6=, $4=, $pop5
+ local.copy $3=, $pop6
+
+//===---------------------------------------------------------------------===//
+
+WebAssembly registers are implicitly initialized to zero. Explicit zeroing is
+therefore often redundant and could be optimized away.
+
+//===---------------------------------------------------------------------===//
+
+Small indices may use smaller encodings than large indices.
+WebAssemblyRegColoring and/or WebAssemblyRegRenumbering should sort registers
+according to their usage frequency to maximize the usage of smaller encodings.
+
+//===---------------------------------------------------------------------===//
+
+Many cases of irreducible control flow could be transformed more optimally
+than via the transform in WebAssemblyFixIrreducibleControlFlow.cpp.
+
+It may also be worthwhile to do transforms before register coloring,
+particularly when duplicating code, to allow register coloring to be aware of
+the duplication.
+
+//===---------------------------------------------------------------------===//
+
+WebAssemblyRegStackify could use AliasAnalysis to reorder loads and stores more
+aggressively.
+
+//===---------------------------------------------------------------------===//
+
+WebAssemblyRegStackify is currently a greedy algorithm. This means that, for
+example, a binary operator will stackify with its user before its operands.
+However, if moving the binary operator to its user moves it to a place where
+its operands can't be moved to, it would be better to leave it in place, or
+perhaps move it up, so that it can stackify its operands. A binary operator
+has two operands and one result, so in such cases there could be a net win by
+preferring the operands.
+
+//===---------------------------------------------------------------------===//
+
+Instruction ordering has a significant influence on register stackification and
+coloring. Consider experimenting with the MachineScheduler (enable via
+enableMachineScheduler) and determine if it can be configured to schedule
+instructions advantageously for this purpose.
+
+//===---------------------------------------------------------------------===//
+
+WebAssemblyRegStackify currently assumes that the stack must be empty after
+an instruction with no return values, however wasm doesn't actually require
+this. WebAssemblyRegStackify could be extended, or possibly rewritten, to take
+full advantage of what WebAssembly permits.
+
+//===---------------------------------------------------------------------===//
+
+Add support for mergeable sections in the Wasm writer, such as for strings and
+floating-point constants.
+
+//===---------------------------------------------------------------------===//
+
+The function @dynamic_alloca_redzone in test/CodeGen/WebAssembly/userstack.ll
+ends up with a local.tee in its prolog which has an unused result, requiring
+an extra drop:
+
+ global.get $push8=, 0
+ local.tee $push9=, 1, $pop8
+ drop $pop9
+ [...]
+
+The prologue code initially thinks it needs an FP register, but later it
+turns out to be unneeded, so one could either approach this by being more
+clever about not inserting code for an FP in the first place, or optimizing
+away the copy later.
+
+//===---------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp
new file mode 100644
index 000000000000..ef2c77ade8cc
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp
@@ -0,0 +1,41 @@
+//===-- WebAssemblyTargetInfo.cpp - WebAssembly Target Implementation -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file registers the WebAssembly target.
+///
+//===----------------------------------------------------------------------===//
+
+#include "TargetInfo/WebAssemblyTargetInfo.h"
+#include "llvm/MC/TargetRegistry.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-target-info"
+
+Target &llvm::getTheWebAssemblyTarget32() {
+ static Target TheWebAssemblyTarget32;
+ return TheWebAssemblyTarget32;
+}
+Target &llvm::getTheWebAssemblyTarget64() {
+ static Target TheWebAssemblyTarget64;
+ return TheWebAssemblyTarget64;
+}
+
+extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeWebAssemblyTargetInfo() {
+ RegisterTarget<Triple::wasm32> X(getTheWebAssemblyTarget32(), "wasm32",
+ "WebAssembly 32-bit", "WebAssembly");
+ RegisterTarget<Triple::wasm64> Y(getTheWebAssemblyTarget64(), "wasm64",
+ "WebAssembly 64-bit", "WebAssembly");
+}
+
+// Defines llvm::WebAssembly::getWasm64Opcode llvm::WebAssembly::getStackOpcode
+// which have to be in a shared location between CodeGen and MC.
+#define GET_INSTRMAP_INFO 1
+#define GET_INSTRINFO_ENUM 1
+#define GET_INSTRINFO_MC_HELPER_DECLS
+#include "WebAssemblyGenInstrInfo.inc"
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.h
new file mode 100644
index 000000000000..741cc002f9e2
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.h
@@ -0,0 +1,34 @@
+//===-- WebAssemblyTargetInfo.h - WebAssembly Target Impl -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file registers the WebAssembly target.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_TARGETINFO_WEBASSEMBLYTARGETINFO_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_TARGETINFO_WEBASSEMBLYTARGETINFO_H
+
+namespace llvm {
+
+class Target;
+
+Target &getTheWebAssemblyTarget32();
+Target &getTheWebAssemblyTarget64();
+
+namespace WebAssembly {
+
+int getStackOpcode(unsigned short Opcode);
+int getRegisterOpcode(unsigned short Opcode);
+int getWasm64Opcode(unsigned short Opcode);
+
+} // namespace WebAssembly
+
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_WEBASSEMBLY_TARGETINFO_WEBASSEMBLYTARGETINFO_H
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/Utils/WasmAddressSpaces.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/Utils/WasmAddressSpaces.h
new file mode 100644
index 000000000000..2239badca69c
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/Utils/WasmAddressSpaces.h
@@ -0,0 +1,48 @@
+//===--- llvm/CodeGen/WasmAddressSpaces.h -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Address Spaces for WebAssembly Type Handling
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_UTILS_WASMADDRESSSPACES_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_UTILS_WASMADDRESSSPACES_H
+
+namespace llvm {
+
+namespace WebAssembly {
+
+enum WasmAddressSpace : unsigned {
+ // Default address space, for pointers to linear memory (stack, heap, data).
+ WASM_ADDRESS_SPACE_DEFAULT = 0,
+ // A non-integral address space for pointers to named objects outside of
+ // linear memory: WebAssembly globals or WebAssembly locals. Loads and stores
+ // to these pointers are lowered to global.get / global.set or local.get /
+ // local.set, as appropriate.
+ WASM_ADDRESS_SPACE_VAR = 1,
+ // A non-integral address space for externref values
+ WASM_ADDRESS_SPACE_EXTERNREF = 10,
+ // A non-integral address space for funcref values
+ WASM_ADDRESS_SPACE_FUNCREF = 20,
+};
+
+inline bool isDefaultAddressSpace(unsigned AS) {
+ return AS == WASM_ADDRESS_SPACE_DEFAULT;
+}
+inline bool isWasmVarAddressSpace(unsigned AS) {
+ return AS == WASM_ADDRESS_SPACE_VAR;
+}
+inline bool isValidAddressSpace(unsigned AS) {
+ return isDefaultAddressSpace(AS) || isWasmVarAddressSpace(AS);
+}
+
+} // namespace WebAssembly
+
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_WEBASSEMBLY_UTILS_WASMADDRESSSPACES_H
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.cpp
new file mode 100644
index 000000000000..86fb99cc98a9
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.cpp
@@ -0,0 +1,95 @@
+//===-- WebAssemblyTypeUtilities.cpp - WebAssembly Type Utility Functions -===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements several utility functions for WebAssembly type parsing.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyTypeUtilities.h"
+#include "llvm/ADT/StringSwitch.h"
+
+// Get register classes enum.
+#define GET_REGINFO_ENUM
+#include "WebAssemblyGenRegisterInfo.inc"
+
+using namespace llvm;
+
+MVT WebAssembly::parseMVT(StringRef Type) {
+ return StringSwitch<MVT>(Type)
+ .Case("i32", MVT::i32)
+ .Case("i64", MVT::i64)
+ .Case("f32", MVT::f32)
+ .Case("f64", MVT::f64)
+ .Case("i64", MVT::i64)
+ .Case("v16i8", MVT::v16i8)
+ .Case("v8i16", MVT::v8i16)
+ .Case("v4i32", MVT::v4i32)
+ .Case("v2i64", MVT::v2i64)
+ .Case("funcref", MVT::funcref)
+ .Case("externref", MVT::externref)
+ .Default(MVT::INVALID_SIMPLE_VALUE_TYPE);
+}
+
+wasm::ValType WebAssembly::toValType(MVT Type) {
+ switch (Type.SimpleTy) {
+ case MVT::i32:
+ return wasm::ValType::I32;
+ case MVT::i64:
+ return wasm::ValType::I64;
+ case MVT::f32:
+ return wasm::ValType::F32;
+ case MVT::f64:
+ return wasm::ValType::F64;
+ case MVT::v16i8:
+ case MVT::v8i16:
+ case MVT::v4i32:
+ case MVT::v2i64:
+ case MVT::v4f32:
+ case MVT::v2f64:
+ return wasm::ValType::V128;
+ case MVT::funcref:
+ return wasm::ValType::FUNCREF;
+ case MVT::externref:
+ return wasm::ValType::EXTERNREF;
+ default:
+ llvm_unreachable("unexpected type");
+ }
+}
+
+void WebAssembly::wasmSymbolSetType(MCSymbolWasm *Sym, const Type *GlobalVT,
+ const ArrayRef<MVT> &VTs) {
+ assert(!Sym->getType());
+
+ // Tables are represented as Arrays in LLVM IR therefore
+ // they reach this point as aggregate Array types with an element type
+ // that is a reference type.
+ wasm::ValType ValTy;
+ bool IsTable = false;
+ if (WebAssembly::isWebAssemblyTableType(GlobalVT)) {
+ IsTable = true;
+ const Type *ElTy = GlobalVT->getArrayElementType();
+ if (WebAssembly::isWebAssemblyExternrefType(ElTy))
+ ValTy = wasm::ValType::EXTERNREF;
+ else if (WebAssembly::isWebAssemblyFuncrefType(ElTy))
+ ValTy = wasm::ValType::FUNCREF;
+ else
+ report_fatal_error("unhandled reference type");
+ } else if (VTs.size() == 1) {
+ ValTy = WebAssembly::toValType(VTs[0]);
+ } else
+ report_fatal_error("Aggregate globals not yet implemented");
+
+ if (IsTable) {
+ Sym->setType(wasm::WASM_SYMBOL_TYPE_TABLE);
+ Sym->setTableType(ValTy);
+ } else {
+ Sym->setType(wasm::WASM_SYMBOL_TYPE_GLOBAL);
+ Sym->setGlobalType(wasm::WasmGlobalType{uint8_t(ValTy), /*Mutable=*/true});
+ }
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.h
new file mode 100644
index 000000000000..a8860477a247
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.h
@@ -0,0 +1,68 @@
+//===-- WebAssemblyTypeUtilities - WebAssembly Type Utilities---*- C++ -*-====//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the declaration of the WebAssembly-specific type parsing
+/// utility functions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_UTILS_WEBASSEMBLYTYPEUTILITIES_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_UTILS_WEBASSEMBLYTYPEUTILITIES_H
+
+#include "MCTargetDesc/WebAssemblyMCTypeUtilities.h"
+#include "WasmAddressSpaces.h"
+#include "llvm/BinaryFormat/Wasm.h"
+#include "llvm/CodeGen/MachineValueType.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/MC/MCSymbolWasm.h"
+
+namespace llvm {
+
+namespace WebAssembly {
+
+/// Return true if this is a WebAssembly Externref Type.
+inline bool isWebAssemblyExternrefType(const Type *Ty) {
+ return Ty->isPointerTy() &&
+ Ty->getPointerAddressSpace() ==
+ WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_EXTERNREF;
+}
+
+/// Return true if this is a WebAssembly Funcref Type.
+inline bool isWebAssemblyFuncrefType(const Type *Ty) {
+ return Ty->isPointerTy() &&
+ Ty->getPointerAddressSpace() ==
+ WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF;
+}
+
+/// Return true if this is a WebAssembly Reference Type.
+inline bool isWebAssemblyReferenceType(const Type *Ty) {
+ return isWebAssemblyExternrefType(Ty) || isWebAssemblyFuncrefType(Ty);
+}
+
+/// Return true if the table represents a WebAssembly table type.
+inline bool isWebAssemblyTableType(const Type *Ty) {
+ return Ty->isArrayTy() &&
+ isWebAssemblyReferenceType(Ty->getArrayElementType());
+}
+
+// Convert StringRef to ValType / HealType / BlockType
+
+MVT parseMVT(StringRef Type);
+
+// Convert a MVT into its corresponding wasm ValType.
+wasm::ValType toValType(MVT Type);
+
+/// Sets a Wasm Symbol Type.
+void wasmSymbolSetType(MCSymbolWasm *Sym, const Type *GlobalVT,
+ const ArrayRef<MVT> &VTs);
+
+} // end namespace WebAssembly
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssembly.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssembly.h
new file mode 100644
index 000000000000..91765ad117bd
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssembly.h
@@ -0,0 +1,106 @@
+//===-- WebAssembly.h - Top-level interface for WebAssembly ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the entry points for global functions defined in
+/// the LLVM WebAssembly back-end.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLY_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLY_H
+
+#include "llvm/PassRegistry.h"
+#include "llvm/Support/CodeGen.h"
+
+namespace llvm {
+
+class WebAssemblyTargetMachine;
+class ModulePass;
+class FunctionPass;
+
+// LLVM IR passes.
+ModulePass *createWebAssemblyLowerEmscriptenEHSjLj();
+ModulePass *createWebAssemblyAddMissingPrototypes();
+ModulePass *createWebAssemblyFixFunctionBitcasts();
+FunctionPass *createWebAssemblyOptimizeReturned();
+FunctionPass *createWebAssemblyLowerRefTypesIntPtrConv();
+
+// ISel and immediate followup passes.
+FunctionPass *createWebAssemblyISelDag(WebAssemblyTargetMachine &TM,
+ CodeGenOptLevel OptLevel);
+FunctionPass *createWebAssemblyArgumentMove();
+FunctionPass *createWebAssemblySetP2AlignOperands();
+
+// Late passes.
+FunctionPass *createWebAssemblyReplacePhysRegs();
+FunctionPass *createWebAssemblyNullifyDebugValueLists();
+FunctionPass *createWebAssemblyOptimizeLiveIntervals();
+FunctionPass *createWebAssemblyMemIntrinsicResults();
+FunctionPass *createWebAssemblyRegStackify();
+FunctionPass *createWebAssemblyRegColoring();
+FunctionPass *createWebAssemblyFixBrTableDefaults();
+FunctionPass *createWebAssemblyFixIrreducibleControlFlow();
+FunctionPass *createWebAssemblyLateEHPrepare();
+FunctionPass *createWebAssemblyCFGSort();
+FunctionPass *createWebAssemblyCFGStackify();
+FunctionPass *createWebAssemblyExplicitLocals();
+FunctionPass *createWebAssemblyLowerBrUnless();
+FunctionPass *createWebAssemblyRegNumbering();
+FunctionPass *createWebAssemblyDebugFixup();
+FunctionPass *createWebAssemblyPeephole();
+ModulePass *createWebAssemblyMCLowerPrePass();
+
+// PassRegistry initialization declarations.
+void initializeFixFunctionBitcastsPass(PassRegistry &);
+void initializeOptimizeReturnedPass(PassRegistry &);
+void initializeWebAssemblyAddMissingPrototypesPass(PassRegistry &);
+void initializeWebAssemblyArgumentMovePass(PassRegistry &);
+void initializeWebAssemblyCFGSortPass(PassRegistry &);
+void initializeWebAssemblyCFGStackifyPass(PassRegistry &);
+void initializeWebAssemblyDAGToDAGISelPass(PassRegistry &);
+void initializeWebAssemblyDebugFixupPass(PassRegistry &);
+void initializeWebAssemblyExceptionInfoPass(PassRegistry &);
+void initializeWebAssemblyExplicitLocalsPass(PassRegistry &);
+void initializeWebAssemblyFixBrTableDefaultsPass(PassRegistry &);
+void initializeWebAssemblyFixIrreducibleControlFlowPass(PassRegistry &);
+void initializeWebAssemblyLateEHPreparePass(PassRegistry &);
+void initializeWebAssemblyLowerBrUnlessPass(PassRegistry &);
+void initializeWebAssemblyLowerEmscriptenEHSjLjPass(PassRegistry &);
+void initializeWebAssemblyLowerRefTypesIntPtrConvPass(PassRegistry &);
+void initializeWebAssemblyMCLowerPrePassPass(PassRegistry &);
+void initializeWebAssemblyMemIntrinsicResultsPass(PassRegistry &);
+void initializeWebAssemblyNullifyDebugValueListsPass(PassRegistry &);
+void initializeWebAssemblyOptimizeLiveIntervalsPass(PassRegistry &);
+void initializeWebAssemblyPeepholePass(PassRegistry &);
+void initializeWebAssemblyRegColoringPass(PassRegistry &);
+void initializeWebAssemblyRegNumberingPass(PassRegistry &);
+void initializeWebAssemblyRegStackifyPass(PassRegistry &);
+void initializeWebAssemblyReplacePhysRegsPass(PassRegistry &);
+void initializeWebAssemblySetP2AlignOperandsPass(PassRegistry &);
+
+namespace WebAssembly {
+enum TargetIndex {
+ // Followed by a local index (ULEB).
+ TI_LOCAL,
+ // Followed by an absolute global index (ULEB). DEPRECATED.
+ TI_GLOBAL_FIXED,
+ // Followed by the index from the bottom of the Wasm stack.
+ TI_OPERAND_STACK,
+ // Followed by a compilation unit relative global index (uint32_t)
+ // that will have an associated relocation.
+ TI_GLOBAL_RELOC,
+ // Like TI_LOCAL, but indicates an indirect value (e.g. byval arg
+ // passed by pointer).
+ TI_LOCAL_INDIRECT
+};
+} // end namespace WebAssembly
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssembly.td b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssembly.td
new file mode 100644
index 000000000000..d538197450b6
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssembly.td
@@ -0,0 +1,139 @@
+//- WebAssembly.td - Describe the WebAssembly Target Machine --*- tablegen -*-//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This is a target description file for the WebAssembly architecture,
+/// which is also known as "wasm".
+///
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Target-independent interfaces which we are implementing
+//===----------------------------------------------------------------------===//
+
+include "llvm/Target/Target.td"
+
+//===----------------------------------------------------------------------===//
+// WebAssembly Subtarget features.
+//===----------------------------------------------------------------------===//
+
+def FeatureSIMD128 : SubtargetFeature<"simd128", "SIMDLevel", "SIMD128",
+ "Enable 128-bit SIMD">;
+
+def FeatureRelaxedSIMD : SubtargetFeature<"relaxed-simd", "SIMDLevel", "RelaxedSIMD",
+ "Enable relaxed-simd instructions">;
+
+def FeatureAtomics : SubtargetFeature<"atomics", "HasAtomics", "true",
+ "Enable Atomics">;
+
+def FeatureNontrappingFPToInt :
+ SubtargetFeature<"nontrapping-fptoint",
+ "HasNontrappingFPToInt", "true",
+ "Enable non-trapping float-to-int conversion operators">;
+
+def FeatureSignExt :
+ SubtargetFeature<"sign-ext",
+ "HasSignExt", "true",
+ "Enable sign extension operators">;
+
+def FeatureTailCall :
+ SubtargetFeature<"tail-call",
+ "HasTailCall", "true",
+ "Enable tail call instructions">;
+
+def FeatureExceptionHandling :
+ SubtargetFeature<"exception-handling", "HasExceptionHandling", "true",
+ "Enable Wasm exception handling">;
+
+def FeatureBulkMemory :
+ SubtargetFeature<"bulk-memory", "HasBulkMemory", "true",
+ "Enable bulk memory operations">;
+
+def FeatureMultivalue :
+ SubtargetFeature<"multivalue",
+ "HasMultivalue", "true",
+ "Enable multivalue blocks, instructions, and functions">;
+
+def FeatureMutableGlobals :
+ SubtargetFeature<"mutable-globals", "HasMutableGlobals", "true",
+ "Enable mutable globals">;
+
+def FeatureReferenceTypes :
+ SubtargetFeature<"reference-types", "HasReferenceTypes", "true",
+ "Enable reference types">;
+
+def FeatureExtendedConst :
+ SubtargetFeature<"extended-const", "HasExtendedConst", "true",
+ "Enable extended const expressions">;
+
+def FeatureMultiMemory :
+ SubtargetFeature<"multimemory", "HasMultiMemory", "true",
+ "Enable multiple memories">;
+
+//===----------------------------------------------------------------------===//
+// Architectures.
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Register File Description
+//===----------------------------------------------------------------------===//
+
+include "WebAssemblyRegisterInfo.td"
+
+//===----------------------------------------------------------------------===//
+// Instruction Descriptions
+//===----------------------------------------------------------------------===//
+
+include "WebAssemblyInstrInfo.td"
+
+def WebAssemblyInstrInfo : InstrInfo;
+
+//===----------------------------------------------------------------------===//
+// WebAssembly Processors supported.
+//===----------------------------------------------------------------------===//
+
+// Minimal Viable Product.
+def : ProcessorModel<"mvp", NoSchedModel, []>;
+
+// Generic processor: latest stable version.
+//
+// This includes features that have achieved phase 4 of the standards process,
+// and that are expected to work for most users in the current time, with
+// consideration given to available support in relevant engines and tools, and
+// the importance of the features.
+def : ProcessorModel<"generic", NoSchedModel,
+ [FeatureSignExt, FeatureMutableGlobals]>;
+
+// Latest and greatest experimental version of WebAssembly. Bugs included!
+def : ProcessorModel<"bleeding-edge", NoSchedModel,
+ [FeatureSIMD128, FeatureAtomics,
+ FeatureNontrappingFPToInt, FeatureSignExt,
+ FeatureMutableGlobals, FeatureBulkMemory,
+ FeatureTailCall]>;
+
+//===----------------------------------------------------------------------===//
+// Target Declaration
+//===----------------------------------------------------------------------===//
+
+def WebAssemblyAsmParser : AsmParser {
+ // The physical register names are not in the binary format or asm text
+ let ShouldEmitMatchRegisterName = 0;
+}
+
+def WebAssemblyAsmWriter : AsmWriter {
+ string AsmWriterClassName = "InstPrinter";
+ int PassSubtarget = 0;
+ int Variant = 0;
+ bit isMCAsmWriter = 1;
+}
+
+def WebAssembly : Target {
+ let InstructionSet = WebAssemblyInstrInfo;
+ let AssemblyParsers = [WebAssemblyAsmParser];
+ let AssemblyWriters = [WebAssemblyAsmWriter];
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyAddMissingPrototypes.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyAddMissingPrototypes.cpp
new file mode 100644
index 000000000000..90e819912847
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyAddMissingPrototypes.cpp
@@ -0,0 +1,154 @@
+//===-- WebAssemblyAddMissingPrototypes.cpp - Fix prototypeless functions -===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Add prototypes to prototypes-less functions.
+///
+/// WebAssembly has strict function prototype checking so we need functions
+/// declarations to match the call sites. Clang treats prototype-less functions
+/// as varargs (foo(...)) which happens to work on existing platforms but
+/// doesn't under WebAssembly. This pass will find all the call sites of each
+/// prototype-less function, ensure they agree, and then set the signature
+/// on the function declaration accordingly.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssembly.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/ModuleUtils.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-add-missing-prototypes"
+
+namespace {
+class WebAssemblyAddMissingPrototypes final : public ModulePass {
+ StringRef getPassName() const override {
+ return "Add prototypes to prototypes-less functions";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ ModulePass::getAnalysisUsage(AU);
+ }
+
+ bool runOnModule(Module &M) override;
+
+public:
+ static char ID;
+ WebAssemblyAddMissingPrototypes() : ModulePass(ID) {}
+};
+} // End anonymous namespace
+
+char WebAssemblyAddMissingPrototypes::ID = 0;
+INITIALIZE_PASS(WebAssemblyAddMissingPrototypes, DEBUG_TYPE,
+ "Add prototypes to prototypes-less functions", false, false)
+
+ModulePass *llvm::createWebAssemblyAddMissingPrototypes() {
+ return new WebAssemblyAddMissingPrototypes();
+}
+
+bool WebAssemblyAddMissingPrototypes::runOnModule(Module &M) {
+ LLVM_DEBUG(dbgs() << "********** Add Missing Prototypes **********\n");
+
+ std::vector<std::pair<Function *, Function *>> Replacements;
+
+ // Find all the prototype-less function declarations
+ for (Function &F : M) {
+ if (!F.isDeclaration() || !F.hasFnAttribute("no-prototype"))
+ continue;
+
+ LLVM_DEBUG(dbgs() << "Found no-prototype function: " << F.getName()
+ << "\n");
+
+ // When clang emits prototype-less C functions it uses (...), i.e. varargs
+ // function that take no arguments (have no sentinel). When we see a
+ // no-prototype attribute we expect the function have these properties.
+ if (!F.isVarArg())
+ report_fatal_error(
+ "Functions with 'no-prototype' attribute must take varargs: " +
+ F.getName());
+ unsigned NumParams = F.getFunctionType()->getNumParams();
+ if (NumParams != 0) {
+ if (!(NumParams == 1 && F.arg_begin()->hasStructRetAttr()))
+ report_fatal_error("Functions with 'no-prototype' attribute should "
+ "not have params: " +
+ F.getName());
+ }
+
+ // Find calls of this function, looking through bitcasts.
+ SmallVector<CallBase *> Calls;
+ SmallVector<Value *> Worklist;
+ Worklist.push_back(&F);
+ while (!Worklist.empty()) {
+ Value *V = Worklist.pop_back_val();
+ for (User *U : V->users()) {
+ if (auto *BC = dyn_cast<BitCastOperator>(U))
+ Worklist.push_back(BC);
+ else if (auto *CB = dyn_cast<CallBase>(U))
+ if (CB->getCalledOperand() == V)
+ Calls.push_back(CB);
+ }
+ }
+
+ // Create a function prototype based on the first call site that we find.
+ FunctionType *NewType = nullptr;
+ for (CallBase *CB : Calls) {
+ LLVM_DEBUG(dbgs() << "prototype-less call of " << F.getName() << ":\n");
+ LLVM_DEBUG(dbgs() << *CB << "\n");
+ FunctionType *DestType = CB->getFunctionType();
+ if (!NewType) {
+ // Create a new function with the correct type
+ NewType = DestType;
+ LLVM_DEBUG(dbgs() << "found function type: " << *NewType << "\n");
+ } else if (NewType != DestType) {
+ errs() << "warning: prototype-less function used with "
+ "conflicting signatures: "
+ << F.getName() << "\n";
+ LLVM_DEBUG(dbgs() << " " << *DestType << "\n");
+ LLVM_DEBUG(dbgs() << " " << *NewType << "\n");
+ }
+ }
+
+ if (!NewType) {
+ LLVM_DEBUG(
+ dbgs() << "could not derive a function prototype from usage: " +
+ F.getName() + "\n");
+ // We could not derive a type for this function. In this case strip
+ // the isVarArg and make it a simple zero-arg function. This has more
+ // chance of being correct. The current signature of (...) is illegal in
+ // C since it doesn't have any arguments before the "...", we this at
+ // least makes it possible for this symbol to be resolved by the linker.
+ NewType = FunctionType::get(F.getFunctionType()->getReturnType(), false);
+ }
+
+ Function *NewF =
+ Function::Create(NewType, F.getLinkage(), F.getName() + ".fixed_sig");
+ NewF->setAttributes(F.getAttributes());
+ NewF->removeFnAttr("no-prototype");
+ Replacements.emplace_back(&F, NewF);
+ }
+
+ for (auto &Pair : Replacements) {
+ Function *OldF = Pair.first;
+ Function *NewF = Pair.second;
+ std::string Name = std::string(OldF->getName());
+ M.getFunctionList().push_back(NewF);
+ OldF->replaceAllUsesWith(
+ ConstantExpr::getPointerBitCastOrAddrSpaceCast(NewF, OldF->getType()));
+ OldF->eraseFromParent();
+ NewF->setName(Name);
+ }
+
+ return !Replacements.empty();
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp
new file mode 100644
index 000000000000..02f5cc6da77c
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp
@@ -0,0 +1,97 @@
+//===-- WebAssemblyArgumentMove.cpp - Argument instruction moving ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file moves ARGUMENT instructions after ScheduleDAG scheduling.
+///
+/// Arguments are really live-in registers, however, since we use virtual
+/// registers and LLVM doesn't support live-in virtual registers, we're
+/// currently making do with ARGUMENT instructions which are placed at the top
+/// of the entry block. The trick is to get them to *stay* at the top of the
+/// entry block.
+///
+/// The ARGUMENTS physical register keeps these instructions pinned in place
+/// during liveness-aware CodeGen passes, however one thing which does not
+/// respect this is the ScheduleDAG scheduler. This pass is therefore run
+/// immediately after that.
+///
+/// This is all hopefully a temporary solution until we find a better solution
+/// for describing the live-in nature of arguments.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-argument-move"
+
+namespace {
+class WebAssemblyArgumentMove final : public MachineFunctionPass {
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyArgumentMove() : MachineFunctionPass(ID) {}
+
+ StringRef getPassName() const override { return "WebAssembly Argument Move"; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addPreserved<MachineBlockFrequencyInfo>();
+ AU.addPreservedID(MachineDominatorsID);
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+} // end anonymous namespace
+
+char WebAssemblyArgumentMove::ID = 0;
+INITIALIZE_PASS(WebAssemblyArgumentMove, DEBUG_TYPE,
+ "Move ARGUMENT instructions for WebAssembly", false, false)
+
+FunctionPass *llvm::createWebAssemblyArgumentMove() {
+ return new WebAssemblyArgumentMove();
+}
+
+bool WebAssemblyArgumentMove::runOnMachineFunction(MachineFunction &MF) {
+ LLVM_DEBUG({
+ dbgs() << "********** Argument Move **********\n"
+ << "********** Function: " << MF.getName() << '\n';
+ });
+
+ bool Changed = false;
+ MachineBasicBlock &EntryMBB = MF.front();
+ MachineBasicBlock::iterator InsertPt = EntryMBB.end();
+
+ // Look for the first NonArg instruction.
+ for (MachineInstr &MI : EntryMBB) {
+ if (!WebAssembly::isArgument(MI.getOpcode())) {
+ InsertPt = MI;
+ break;
+ }
+ }
+
+ // Now move any argument instructions later in the block
+ // to before our first NonArg instruction.
+ for (MachineInstr &MI : llvm::make_range(InsertPt, EntryMBB.end())) {
+ if (WebAssembly::isArgument(MI.getOpcode())) {
+ EntryMBB.insert(InsertPt, MI.removeFromParent());
+ Changed = true;
+ }
+ }
+
+ return Changed;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp
new file mode 100644
index 000000000000..908efbb8d321
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp
@@ -0,0 +1,750 @@
+//===-- WebAssemblyAsmPrinter.cpp - WebAssembly LLVM assembly writer ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains a printer that converts from our internal
+/// representation of machine-dependent LLVM code to the WebAssembly assembly
+/// language.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyAsmPrinter.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "MCTargetDesc/WebAssemblyTargetStreamer.h"
+#include "TargetInfo/WebAssemblyTargetInfo.h"
+#include "Utils/WebAssemblyTypeUtilities.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMCInstLower.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblyRegisterInfo.h"
+#include "WebAssemblyRuntimeLibcallSignatures.h"
+#include "WebAssemblyTargetMachine.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/BinaryFormat/Wasm.h"
+#include "llvm/CodeGen/Analysis.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineModuleInfoImpls.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCSectionWasm.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCSymbolWasm.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "asm-printer"
+
+extern cl::opt<bool> WasmKeepRegisters;
+
+//===----------------------------------------------------------------------===//
+// Helpers.
+//===----------------------------------------------------------------------===//
+
+MVT WebAssemblyAsmPrinter::getRegType(unsigned RegNo) const {
+ const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
+ const TargetRegisterClass *TRC = MRI->getRegClass(RegNo);
+ for (MVT T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64, MVT::v16i8, MVT::v8i16,
+ MVT::v4i32, MVT::v2i64, MVT::v4f32, MVT::v2f64})
+ if (TRI->isTypeLegalForClass(*TRC, T))
+ return T;
+ LLVM_DEBUG(errs() << "Unknown type for register number: " << RegNo);
+ llvm_unreachable("Unknown register type");
+ return MVT::Other;
+}
+
+std::string WebAssemblyAsmPrinter::regToString(const MachineOperand &MO) {
+ Register RegNo = MO.getReg();
+ assert(RegNo.isVirtual() &&
+ "Unlowered physical register encountered during assembly printing");
+ assert(!MFI->isVRegStackified(RegNo));
+ unsigned WAReg = MFI->getWAReg(RegNo);
+ assert(WAReg != WebAssembly::UnusedReg);
+ return '$' + utostr(WAReg);
+}
+
+WebAssemblyTargetStreamer *WebAssemblyAsmPrinter::getTargetStreamer() {
+ MCTargetStreamer *TS = OutStreamer->getTargetStreamer();
+ return static_cast<WebAssemblyTargetStreamer *>(TS);
+}
+
+// Emscripten exception handling helpers
+//
+// This converts invoke names generated by LowerEmscriptenEHSjLj to real names
+// that are expected by JavaScript glue code. The invoke names generated by
+// Emscripten JS glue code are based on their argument and return types; for
+// example, for a function that takes an i32 and returns nothing, it is
+// 'invoke_vi'. But the format of invoke generated by LowerEmscriptenEHSjLj pass
+// contains a mangled string generated from their IR types, for example,
+// "__invoke_void_%struct.mystruct*_int", because final wasm types are not
+// available in the IR pass. So we convert those names to the form that
+// Emscripten JS code expects.
+//
+// Refer to LowerEmscriptenEHSjLj pass for more details.
+
+// Returns true if the given function name is an invoke name generated by
+// LowerEmscriptenEHSjLj pass.
+static bool isEmscriptenInvokeName(StringRef Name) {
+ if (Name.front() == '"' && Name.back() == '"')
+ Name = Name.substr(1, Name.size() - 2);
+ return Name.starts_with("__invoke_");
+}
+
+// Returns a character that represents the given wasm value type in invoke
+// signatures.
+static char getInvokeSig(wasm::ValType VT) {
+ switch (VT) {
+ case wasm::ValType::I32:
+ return 'i';
+ case wasm::ValType::I64:
+ return 'j';
+ case wasm::ValType::F32:
+ return 'f';
+ case wasm::ValType::F64:
+ return 'd';
+ case wasm::ValType::V128:
+ return 'V';
+ case wasm::ValType::FUNCREF:
+ return 'F';
+ case wasm::ValType::EXTERNREF:
+ return 'X';
+ }
+ llvm_unreachable("Unhandled wasm::ValType enum");
+}
+
+// Given the wasm signature, generate the invoke name in the format JS glue code
+// expects.
+static std::string getEmscriptenInvokeSymbolName(wasm::WasmSignature *Sig) {
+ assert(Sig->Returns.size() <= 1);
+ std::string Ret = "invoke_";
+ if (!Sig->Returns.empty())
+ for (auto VT : Sig->Returns)
+ Ret += getInvokeSig(VT);
+ else
+ Ret += 'v';
+ // Invokes' first argument is a pointer to the original function, so skip it
+ for (unsigned I = 1, E = Sig->Params.size(); I < E; I++)
+ Ret += getInvokeSig(Sig->Params[I]);
+ return Ret;
+}
+
+//===----------------------------------------------------------------------===//
+// WebAssemblyAsmPrinter Implementation.
+//===----------------------------------------------------------------------===//
+
+MCSymbolWasm *WebAssemblyAsmPrinter::getMCSymbolForFunction(
+ const Function *F, bool EnableEmEH, wasm::WasmSignature *Sig,
+ bool &InvokeDetected) {
+ MCSymbolWasm *WasmSym = nullptr;
+ if (EnableEmEH && isEmscriptenInvokeName(F->getName())) {
+ assert(Sig);
+ InvokeDetected = true;
+ if (Sig->Returns.size() > 1) {
+ std::string Msg =
+ "Emscripten EH/SjLj does not support multivalue returns: " +
+ std::string(F->getName()) + ": " +
+ WebAssembly::signatureToString(Sig);
+ report_fatal_error(Twine(Msg));
+ }
+ WasmSym = cast<MCSymbolWasm>(
+ GetExternalSymbolSymbol(getEmscriptenInvokeSymbolName(Sig)));
+ } else {
+ WasmSym = cast<MCSymbolWasm>(getSymbol(F));
+ }
+ return WasmSym;
+}
+
+void WebAssemblyAsmPrinter::emitGlobalVariable(const GlobalVariable *GV) {
+ if (!WebAssembly::isWasmVarAddressSpace(GV->getAddressSpace())) {
+ AsmPrinter::emitGlobalVariable(GV);
+ return;
+ }
+
+ assert(!GV->isThreadLocal());
+
+ MCSymbolWasm *Sym = cast<MCSymbolWasm>(getSymbol(GV));
+
+ if (!Sym->getType()) {
+ SmallVector<MVT, 1> VTs;
+ Type *GlobalVT = GV->getValueType();
+ if (Subtarget) {
+ // Subtarget is only set when a function is defined, because
+ // each function can declare a different subtarget. For example,
+ // on ARM a compilation unit might have a function on ARM and
+ // another on Thumb. Therefore only if Subtarget is non-null we
+ // can actually calculate the legal VTs.
+ const WebAssemblyTargetLowering &TLI = *Subtarget->getTargetLowering();
+ computeLegalValueVTs(TLI, GV->getParent()->getContext(),
+ GV->getParent()->getDataLayout(), GlobalVT, VTs);
+ }
+ WebAssembly::wasmSymbolSetType(Sym, GlobalVT, VTs);
+ }
+
+ emitVisibility(Sym, GV->getVisibility(), !GV->isDeclaration());
+ emitSymbolType(Sym);
+ if (GV->hasInitializer()) {
+ assert(getSymbolPreferLocal(*GV) == Sym);
+ emitLinkage(GV, Sym);
+ OutStreamer->emitLabel(Sym);
+ // TODO: Actually emit the initializer value. Otherwise the global has the
+ // default value for its type (0, ref.null, etc).
+ OutStreamer->addBlankLine();
+ }
+}
+
+MCSymbol *WebAssemblyAsmPrinter::getOrCreateWasmSymbol(StringRef Name) {
+ auto *WasmSym = cast<MCSymbolWasm>(GetExternalSymbolSymbol(Name));
+
+ // May be called multiple times, so early out.
+ if (WasmSym->getType())
+ return WasmSym;
+
+ const WebAssemblySubtarget &Subtarget = getSubtarget();
+
+ // Except for certain known symbols, all symbols used by CodeGen are
+ // functions. It's OK to hardcode knowledge of specific symbols here; this
+ // method is precisely there for fetching the signatures of known
+ // Clang-provided symbols.
+ if (Name == "__stack_pointer" || Name == "__tls_base" ||
+ Name == "__memory_base" || Name == "__table_base" ||
+ Name == "__tls_size" || Name == "__tls_align") {
+ bool Mutable =
+ Name == "__stack_pointer" || Name == "__tls_base";
+ WasmSym->setType(wasm::WASM_SYMBOL_TYPE_GLOBAL);
+ WasmSym->setGlobalType(wasm::WasmGlobalType{
+ uint8_t(Subtarget.hasAddr64() ? wasm::WASM_TYPE_I64
+ : wasm::WASM_TYPE_I32),
+ Mutable});
+ return WasmSym;
+ }
+
+ if (Name.starts_with("GCC_except_table")) {
+ WasmSym->setType(wasm::WASM_SYMBOL_TYPE_DATA);
+ return WasmSym;
+ }
+
+ SmallVector<wasm::ValType, 4> Returns;
+ SmallVector<wasm::ValType, 4> Params;
+ if (Name == "__cpp_exception" || Name == "__c_longjmp") {
+ WasmSym->setType(wasm::WASM_SYMBOL_TYPE_TAG);
+ // In static linking we define tag symbols in WasmException::endModule().
+ // But we may have multiple objects to be linked together, each of which
+ // defines the tag symbols. To resolve them, we declare them as weak. In
+ // dynamic linking we make tag symbols undefined in the backend, define it
+ // in JS, and feed them to each importing module.
+ if (!isPositionIndependent())
+ WasmSym->setWeak(true);
+ WasmSym->setExternal(true);
+
+ // Currently both C++ exceptions and C longjmps have a single pointer type
+ // param. For C++ exceptions it is a pointer to an exception object, and for
+ // C longjmps it is pointer to a struct that contains a setjmp buffer and a
+ // longjmp return value. We may consider using multiple value parameters for
+ // longjmps later when multivalue support is ready.
+ wasm::ValType AddrType =
+ Subtarget.hasAddr64() ? wasm::ValType::I64 : wasm::ValType::I32;
+ Params.push_back(AddrType);
+ } else { // Function symbols
+ WasmSym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION);
+ getLibcallSignature(Subtarget, Name, Returns, Params);
+ }
+ auto Signature = std::make_unique<wasm::WasmSignature>(std::move(Returns),
+ std::move(Params));
+ WasmSym->setSignature(Signature.get());
+ addSignature(std::move(Signature));
+
+ return WasmSym;
+}
+
+void WebAssemblyAsmPrinter::emitSymbolType(const MCSymbolWasm *Sym) {
+ std::optional<wasm::WasmSymbolType> WasmTy = Sym->getType();
+ if (!WasmTy)
+ return;
+
+ switch (*WasmTy) {
+ case wasm::WASM_SYMBOL_TYPE_GLOBAL:
+ getTargetStreamer()->emitGlobalType(Sym);
+ break;
+ case wasm::WASM_SYMBOL_TYPE_TAG:
+ getTargetStreamer()->emitTagType(Sym);
+ break;
+ case wasm::WASM_SYMBOL_TYPE_TABLE:
+ getTargetStreamer()->emitTableType(Sym);
+ break;
+ default:
+ break; // We only handle globals, tags and tables here
+ }
+}
+
+void WebAssemblyAsmPrinter::emitDecls(const Module &M) {
+ if (signaturesEmitted)
+ return;
+ signaturesEmitted = true;
+
+ // Normally symbols for globals get discovered as the MI gets lowered,
+ // but we need to know about them ahead of time. This will however,
+ // only find symbols that have been used. Unused symbols from globals will
+ // not be found here.
+ MachineModuleInfoWasm &MMIW = MMI->getObjFileInfo<MachineModuleInfoWasm>();
+ for (StringRef Name : MMIW.MachineSymbolsUsed) {
+ auto *WasmSym = cast<MCSymbolWasm>(getOrCreateWasmSymbol(Name));
+ if (WasmSym->isFunction()) {
+ // TODO(wvo): is there any case where this overlaps with the call to
+ // emitFunctionType in the loop below?
+ getTargetStreamer()->emitFunctionType(WasmSym);
+ }
+ }
+
+ for (auto &It : OutContext.getSymbols()) {
+ // Emit .globaltype, .tagtype, or .tabletype declarations for extern
+ // declarations, i.e. those that have only been declared (but not defined)
+ // in the current module
+ auto Sym = cast<MCSymbolWasm>(It.getValue());
+ if (!Sym->isDefined())
+ emitSymbolType(Sym);
+ }
+
+ DenseSet<MCSymbol *> InvokeSymbols;
+ for (const auto &F : M) {
+ if (F.isIntrinsic())
+ continue;
+
+ // Emit function type info for all functions. This will emit duplicate
+ // information for defined functions (which already have function type
+ // info emitted alongside their definition), but this is necessary in
+ // order to enable the single-pass WebAssemblyAsmTypeCheck to succeed.
+ SmallVector<MVT, 4> Results;
+ SmallVector<MVT, 4> Params;
+ computeSignatureVTs(F.getFunctionType(), &F, F, TM, Params, Results);
+ // At this point these MCSymbols may or may not have been created already
+ // and thus also contain a signature, but we need to get the signature
+ // anyway here in case it is an invoke that has not yet been created. We
+ // will discard it later if it turns out not to be necessary.
+ auto Signature = signatureFromMVTs(Results, Params);
+ bool InvokeDetected = false;
+ auto *Sym = getMCSymbolForFunction(
+ &F, WebAssembly::WasmEnableEmEH || WebAssembly::WasmEnableEmSjLj,
+ Signature.get(), InvokeDetected);
+
+ // Multiple functions can be mapped to the same invoke symbol. For
+ // example, two IR functions '__invoke_void_i8*' and '__invoke_void_i32'
+ // are both mapped to '__invoke_vi'. We keep them in a set once we emit an
+ // Emscripten EH symbol so we don't emit the same symbol twice.
+ if (InvokeDetected && !InvokeSymbols.insert(Sym).second)
+ continue;
+
+ Sym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION);
+ if (!Sym->getSignature()) {
+ Sym->setSignature(Signature.get());
+ addSignature(std::move(Signature));
+ } else {
+ // This symbol has already been created and had a signature. Discard it.
+ Signature.reset();
+ }
+
+ getTargetStreamer()->emitFunctionType(Sym);
+
+ if (F.hasFnAttribute("wasm-import-module")) {
+ StringRef Name =
+ F.getFnAttribute("wasm-import-module").getValueAsString();
+ Sym->setImportModule(storeName(Name));
+ getTargetStreamer()->emitImportModule(Sym, Name);
+ }
+ if (F.hasFnAttribute("wasm-import-name")) {
+ // If this is a converted Emscripten EH/SjLj symbol, we shouldn't use
+ // the original function name but the converted symbol name.
+ StringRef Name =
+ InvokeDetected
+ ? Sym->getName()
+ : F.getFnAttribute("wasm-import-name").getValueAsString();
+ Sym->setImportName(storeName(Name));
+ getTargetStreamer()->emitImportName(Sym, Name);
+ }
+
+ if (F.hasFnAttribute("wasm-export-name")) {
+ auto *Sym = cast<MCSymbolWasm>(getSymbol(&F));
+ StringRef Name = F.getFnAttribute("wasm-export-name").getValueAsString();
+ Sym->setExportName(storeName(Name));
+ getTargetStreamer()->emitExportName(Sym, Name);
+ }
+ }
+}
+
+void WebAssemblyAsmPrinter::emitEndOfAsmFile(Module &M) {
+ // This is required to emit external declarations (like .functypes) when
+ // no functions are defined in the compilation unit and therefore,
+ // emitDecls() is not called until now.
+ emitDecls(M);
+
+ // When a function's address is taken, a TABLE_INDEX relocation is emitted
+ // against the function symbol at the use site. However the relocation
+ // doesn't explicitly refer to the table. In the future we may want to
+ // define a new kind of reloc against both the function and the table, so
+ // that the linker can see that the function symbol keeps the table alive,
+ // but for now manually mark the table as live.
+ for (const auto &F : M) {
+ if (!F.isIntrinsic() && F.hasAddressTaken()) {
+ MCSymbolWasm *FunctionTable =
+ WebAssembly::getOrCreateFunctionTableSymbol(OutContext, Subtarget);
+ OutStreamer->emitSymbolAttribute(FunctionTable, MCSA_NoDeadStrip);
+ break;
+ }
+ }
+
+ for (const auto &G : M.globals()) {
+ if (!G.hasInitializer() && G.hasExternalLinkage() &&
+ !WebAssembly::isWasmVarAddressSpace(G.getAddressSpace()) &&
+ G.getValueType()->isSized()) {
+ uint16_t Size = M.getDataLayout().getTypeAllocSize(G.getValueType());
+ OutStreamer->emitELFSize(getSymbol(&G),
+ MCConstantExpr::create(Size, OutContext));
+ }
+ }
+
+ if (const NamedMDNode *Named = M.getNamedMetadata("wasm.custom_sections")) {
+ for (const Metadata *MD : Named->operands()) {
+ const auto *Tuple = dyn_cast<MDTuple>(MD);
+ if (!Tuple || Tuple->getNumOperands() != 2)
+ continue;
+ const MDString *Name = dyn_cast<MDString>(Tuple->getOperand(0));
+ const MDString *Contents = dyn_cast<MDString>(Tuple->getOperand(1));
+ if (!Name || !Contents)
+ continue;
+
+ OutStreamer->pushSection();
+ std::string SectionName = (".custom_section." + Name->getString()).str();
+ MCSectionWasm *MySection =
+ OutContext.getWasmSection(SectionName, SectionKind::getMetadata());
+ OutStreamer->switchSection(MySection);
+ OutStreamer->emitBytes(Contents->getString());
+ OutStreamer->popSection();
+ }
+ }
+
+ EmitProducerInfo(M);
+ EmitTargetFeatures(M);
+ EmitFunctionAttributes(M);
+}
+
+void WebAssemblyAsmPrinter::EmitProducerInfo(Module &M) {
+ llvm::SmallVector<std::pair<std::string, std::string>, 4> Languages;
+ if (const NamedMDNode *Debug = M.getNamedMetadata("llvm.dbg.cu")) {
+ llvm::SmallSet<StringRef, 4> SeenLanguages;
+ for (size_t I = 0, E = Debug->getNumOperands(); I < E; ++I) {
+ const auto *CU = cast<DICompileUnit>(Debug->getOperand(I));
+ StringRef Language = dwarf::LanguageString(CU->getSourceLanguage());
+ Language.consume_front("DW_LANG_");
+ if (SeenLanguages.insert(Language).second)
+ Languages.emplace_back(Language.str(), "");
+ }
+ }
+
+ llvm::SmallVector<std::pair<std::string, std::string>, 4> Tools;
+ if (const NamedMDNode *Ident = M.getNamedMetadata("llvm.ident")) {
+ llvm::SmallSet<StringRef, 4> SeenTools;
+ for (size_t I = 0, E = Ident->getNumOperands(); I < E; ++I) {
+ const auto *S = cast<MDString>(Ident->getOperand(I)->getOperand(0));
+ std::pair<StringRef, StringRef> Field = S->getString().split("version");
+ StringRef Name = Field.first.trim();
+ StringRef Version = Field.second.trim();
+ if (SeenTools.insert(Name).second)
+ Tools.emplace_back(Name.str(), Version.str());
+ }
+ }
+
+ int FieldCount = int(!Languages.empty()) + int(!Tools.empty());
+ if (FieldCount != 0) {
+ MCSectionWasm *Producers = OutContext.getWasmSection(
+ ".custom_section.producers", SectionKind::getMetadata());
+ OutStreamer->pushSection();
+ OutStreamer->switchSection(Producers);
+ OutStreamer->emitULEB128IntValue(FieldCount);
+ for (auto &Producers : {std::make_pair("language", &Languages),
+ std::make_pair("processed-by", &Tools)}) {
+ if (Producers.second->empty())
+ continue;
+ OutStreamer->emitULEB128IntValue(strlen(Producers.first));
+ OutStreamer->emitBytes(Producers.first);
+ OutStreamer->emitULEB128IntValue(Producers.second->size());
+ for (auto &Producer : *Producers.second) {
+ OutStreamer->emitULEB128IntValue(Producer.first.size());
+ OutStreamer->emitBytes(Producer.first);
+ OutStreamer->emitULEB128IntValue(Producer.second.size());
+ OutStreamer->emitBytes(Producer.second);
+ }
+ }
+ OutStreamer->popSection();
+ }
+}
+
+void WebAssemblyAsmPrinter::EmitTargetFeatures(Module &M) {
+ struct FeatureEntry {
+ uint8_t Prefix;
+ std::string Name;
+ };
+
+ // Read target features and linkage policies from module metadata
+ SmallVector<FeatureEntry, 4> EmittedFeatures;
+ auto EmitFeature = [&](std::string Feature) {
+ std::string MDKey = (StringRef("wasm-feature-") + Feature).str();
+ Metadata *Policy = M.getModuleFlag(MDKey);
+ if (Policy == nullptr)
+ return;
+
+ FeatureEntry Entry;
+ Entry.Prefix = 0;
+ Entry.Name = Feature;
+
+ if (auto *MD = cast<ConstantAsMetadata>(Policy))
+ if (auto *I = cast<ConstantInt>(MD->getValue()))
+ Entry.Prefix = I->getZExtValue();
+
+ // Silently ignore invalid metadata
+ if (Entry.Prefix != wasm::WASM_FEATURE_PREFIX_USED &&
+ Entry.Prefix != wasm::WASM_FEATURE_PREFIX_REQUIRED &&
+ Entry.Prefix != wasm::WASM_FEATURE_PREFIX_DISALLOWED)
+ return;
+
+ EmittedFeatures.push_back(Entry);
+ };
+
+ for (const SubtargetFeatureKV &KV : WebAssemblyFeatureKV) {
+ EmitFeature(KV.Key);
+ }
+ // This pseudo-feature tells the linker whether shared memory would be safe
+ EmitFeature("shared-mem");
+
+ // This is an "architecture", not a "feature", but we emit it as such for
+ // the benefit of tools like Binaryen and consistency with other producers.
+ // FIXME: Subtarget is null here, so can't Subtarget->hasAddr64() ?
+ if (M.getDataLayout().getPointerSize() == 8) {
+ // Can't use EmitFeature since "wasm-feature-memory64" is not a module
+ // flag.
+ EmittedFeatures.push_back({wasm::WASM_FEATURE_PREFIX_USED, "memory64"});
+ }
+
+ if (EmittedFeatures.size() == 0)
+ return;
+
+ // Emit features and linkage policies into the "target_features" section
+ MCSectionWasm *FeaturesSection = OutContext.getWasmSection(
+ ".custom_section.target_features", SectionKind::getMetadata());
+ OutStreamer->pushSection();
+ OutStreamer->switchSection(FeaturesSection);
+
+ OutStreamer->emitULEB128IntValue(EmittedFeatures.size());
+ for (auto &F : EmittedFeatures) {
+ OutStreamer->emitIntValue(F.Prefix, 1);
+ OutStreamer->emitULEB128IntValue(F.Name.size());
+ OutStreamer->emitBytes(F.Name);
+ }
+
+ OutStreamer->popSection();
+}
+
+void WebAssemblyAsmPrinter::EmitFunctionAttributes(Module &M) {
+ auto V = M.getNamedGlobal("llvm.global.annotations");
+ if (!V)
+ return;
+
+ // Group all the custom attributes by name.
+ MapVector<StringRef, SmallVector<MCSymbol *, 4>> CustomSections;
+ const ConstantArray *CA = cast<ConstantArray>(V->getOperand(0));
+ for (Value *Op : CA->operands()) {
+ auto *CS = cast<ConstantStruct>(Op);
+ // The first field is a pointer to the annotated variable.
+ Value *AnnotatedVar = CS->getOperand(0)->stripPointerCasts();
+ // Only annotated functions are supported for now.
+ if (!isa<Function>(AnnotatedVar))
+ continue;
+ auto *F = cast<Function>(AnnotatedVar);
+
+ // The second field is a pointer to a global annotation string.
+ auto *GV = cast<GlobalVariable>(CS->getOperand(1)->stripPointerCasts());
+ StringRef AnnotationString;
+ getConstantStringInfo(GV, AnnotationString);
+ auto *Sym = cast<MCSymbolWasm>(getSymbol(F));
+ CustomSections[AnnotationString].push_back(Sym);
+ }
+
+ // Emit a custom section for each unique attribute.
+ for (const auto &[Name, Symbols] : CustomSections) {
+ MCSectionWasm *CustomSection = OutContext.getWasmSection(
+ ".custom_section.llvm.func_attr.annotate." + Name, SectionKind::getMetadata());
+ OutStreamer->pushSection();
+ OutStreamer->switchSection(CustomSection);
+
+ for (auto &Sym : Symbols) {
+ OutStreamer->emitValue(
+ MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_WASM_FUNCINDEX,
+ OutContext),
+ 4);
+ }
+ OutStreamer->popSection();
+ }
+}
+
+void WebAssemblyAsmPrinter::emitConstantPool() {
+ emitDecls(*MMI->getModule());
+ assert(MF->getConstantPool()->getConstants().empty() &&
+ "WebAssembly disables constant pools");
+}
+
+void WebAssemblyAsmPrinter::emitJumpTableInfo() {
+ // Nothing to do; jump tables are incorporated into the instruction stream.
+}
+
+void WebAssemblyAsmPrinter::emitFunctionBodyStart() {
+ const Function &F = MF->getFunction();
+ SmallVector<MVT, 1> ResultVTs;
+ SmallVector<MVT, 4> ParamVTs;
+ computeSignatureVTs(F.getFunctionType(), &F, F, TM, ParamVTs, ResultVTs);
+
+ auto Signature = signatureFromMVTs(ResultVTs, ParamVTs);
+ auto *WasmSym = cast<MCSymbolWasm>(CurrentFnSym);
+ WasmSym->setSignature(Signature.get());
+ addSignature(std::move(Signature));
+ WasmSym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION);
+
+ getTargetStreamer()->emitFunctionType(WasmSym);
+
+ // Emit the function index.
+ if (MDNode *Idx = F.getMetadata("wasm.index")) {
+ assert(Idx->getNumOperands() == 1);
+
+ getTargetStreamer()->emitIndIdx(AsmPrinter::lowerConstant(
+ cast<ConstantAsMetadata>(Idx->getOperand(0))->getValue()));
+ }
+
+ SmallVector<wasm::ValType, 16> Locals;
+ valTypesFromMVTs(MFI->getLocals(), Locals);
+ getTargetStreamer()->emitLocal(Locals);
+
+ AsmPrinter::emitFunctionBodyStart();
+}
+
+void WebAssemblyAsmPrinter::emitInstruction(const MachineInstr *MI) {
+ LLVM_DEBUG(dbgs() << "EmitInstruction: " << *MI << '\n');
+ WebAssembly_MC::verifyInstructionPredicates(MI->getOpcode(),
+ Subtarget->getFeatureBits());
+
+ switch (MI->getOpcode()) {
+ case WebAssembly::ARGUMENT_i32:
+ case WebAssembly::ARGUMENT_i32_S:
+ case WebAssembly::ARGUMENT_i64:
+ case WebAssembly::ARGUMENT_i64_S:
+ case WebAssembly::ARGUMENT_f32:
+ case WebAssembly::ARGUMENT_f32_S:
+ case WebAssembly::ARGUMENT_f64:
+ case WebAssembly::ARGUMENT_f64_S:
+ case WebAssembly::ARGUMENT_v16i8:
+ case WebAssembly::ARGUMENT_v16i8_S:
+ case WebAssembly::ARGUMENT_v8i16:
+ case WebAssembly::ARGUMENT_v8i16_S:
+ case WebAssembly::ARGUMENT_v4i32:
+ case WebAssembly::ARGUMENT_v4i32_S:
+ case WebAssembly::ARGUMENT_v2i64:
+ case WebAssembly::ARGUMENT_v2i64_S:
+ case WebAssembly::ARGUMENT_v4f32:
+ case WebAssembly::ARGUMENT_v4f32_S:
+ case WebAssembly::ARGUMENT_v2f64:
+ case WebAssembly::ARGUMENT_v2f64_S:
+ // These represent values which are live into the function entry, so there's
+ // no instruction to emit.
+ break;
+ case WebAssembly::FALLTHROUGH_RETURN: {
+ // These instructions represent the implicit return at the end of a
+ // function body.
+ if (isVerbose()) {
+ OutStreamer->AddComment("fallthrough-return");
+ OutStreamer->addBlankLine();
+ }
+ break;
+ }
+ case WebAssembly::COMPILER_FENCE:
+ // This is a compiler barrier that prevents instruction reordering during
+ // backend compilation, and should not be emitted.
+ break;
+ default: {
+ WebAssemblyMCInstLower MCInstLowering(OutContext, *this);
+ MCInst TmpInst;
+ MCInstLowering.lower(MI, TmpInst);
+ EmitToStreamer(*OutStreamer, TmpInst);
+ break;
+ }
+ }
+}
+
+bool WebAssemblyAsmPrinter::PrintAsmOperand(const MachineInstr *MI,
+ unsigned OpNo,
+ const char *ExtraCode,
+ raw_ostream &OS) {
+ // First try the generic code, which knows about modifiers like 'c' and 'n'.
+ if (!AsmPrinter::PrintAsmOperand(MI, OpNo, ExtraCode, OS))
+ return false;
+
+ if (!ExtraCode) {
+ const MachineOperand &MO = MI->getOperand(OpNo);
+ switch (MO.getType()) {
+ case MachineOperand::MO_Immediate:
+ OS << MO.getImm();
+ return false;
+ case MachineOperand::MO_Register:
+ // FIXME: only opcode that still contains registers, as required by
+ // MachineInstr::getDebugVariable().
+ assert(MI->getOpcode() == WebAssembly::INLINEASM);
+ OS << regToString(MO);
+ return false;
+ case MachineOperand::MO_GlobalAddress:
+ PrintSymbolOperand(MO, OS);
+ return false;
+ case MachineOperand::MO_ExternalSymbol:
+ GetExternalSymbolSymbol(MO.getSymbolName())->print(OS, MAI);
+ printOffset(MO.getOffset(), OS);
+ return false;
+ case MachineOperand::MO_MachineBasicBlock:
+ MO.getMBB()->getSymbol()->print(OS, MAI);
+ return false;
+ default:
+ break;
+ }
+ }
+
+ return true;
+}
+
+bool WebAssemblyAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
+ unsigned OpNo,
+ const char *ExtraCode,
+ raw_ostream &OS) {
+ // The current approach to inline asm is that "r" constraints are expressed
+ // as local indices, rather than values on the operand stack. This simplifies
+ // using "r" as it eliminates the need to push and pop the values in a
+ // particular order, however it also makes it impossible to have an "m"
+ // constraint. So we don't support it.
+
+ return AsmPrinter::PrintAsmMemoryOperand(MI, OpNo, ExtraCode, OS);
+}
+
+// Force static initialization.
+extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeWebAssemblyAsmPrinter() {
+ RegisterAsmPrinter<WebAssemblyAsmPrinter> X(getTheWebAssemblyTarget32());
+ RegisterAsmPrinter<WebAssemblyAsmPrinter> Y(getTheWebAssemblyTarget64());
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.h
new file mode 100644
index 000000000000..c30e0155c81e
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.h
@@ -0,0 +1,93 @@
+// WebAssemblyAsmPrinter.h - WebAssembly implementation of AsmPrinter-*- C++ -*-
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYASMPRINTER_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYASMPRINTER_H
+
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+class WebAssemblyTargetStreamer;
+
+class LLVM_LIBRARY_VISIBILITY WebAssemblyAsmPrinter final : public AsmPrinter {
+ const WebAssemblySubtarget *Subtarget;
+ const MachineRegisterInfo *MRI;
+ WebAssemblyFunctionInfo *MFI;
+ // TODO: Do the uniquing of Signatures here instead of ObjectFileWriter?
+ std::vector<std::unique_ptr<wasm::WasmSignature>> Signatures;
+ std::vector<std::unique_ptr<std::string>> Names;
+ bool signaturesEmitted = false;
+
+ StringRef storeName(StringRef Name) {
+ std::unique_ptr<std::string> N = std::make_unique<std::string>(Name);
+ Names.push_back(std::move(N));
+ return *Names.back();
+ }
+
+public:
+ explicit WebAssemblyAsmPrinter(TargetMachine &TM,
+ std::unique_ptr<MCStreamer> Streamer)
+ : AsmPrinter(TM, std::move(Streamer)), Subtarget(nullptr), MRI(nullptr),
+ MFI(nullptr) {}
+
+ StringRef getPassName() const override {
+ return "WebAssembly Assembly Printer";
+ }
+
+ const WebAssemblySubtarget &getSubtarget() const { return *Subtarget; }
+ void addSignature(std::unique_ptr<wasm::WasmSignature> &&Sig) {
+ Signatures.push_back(std::move(Sig));
+ }
+
+ //===------------------------------------------------------------------===//
+ // MachineFunctionPass Implementation.
+ //===------------------------------------------------------------------===//
+
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ Subtarget = &MF.getSubtarget<WebAssemblySubtarget>();
+ MRI = &MF.getRegInfo();
+ MFI = MF.getInfo<WebAssemblyFunctionInfo>();
+ return AsmPrinter::runOnMachineFunction(MF);
+ }
+
+ //===------------------------------------------------------------------===//
+ // AsmPrinter Implementation.
+ //===------------------------------------------------------------------===//
+
+ void emitEndOfAsmFile(Module &M) override;
+ void EmitProducerInfo(Module &M);
+ void EmitTargetFeatures(Module &M);
+ void EmitFunctionAttributes(Module &M);
+ void emitSymbolType(const MCSymbolWasm *Sym);
+ void emitGlobalVariable(const GlobalVariable *GV) override;
+ void emitJumpTableInfo() override;
+ void emitConstantPool() override;
+ void emitFunctionBodyStart() override;
+ void emitInstruction(const MachineInstr *MI) override;
+ bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ const char *ExtraCode, raw_ostream &OS) override;
+ bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
+ const char *ExtraCode, raw_ostream &OS) override;
+
+ MVT getRegType(unsigned RegNo) const;
+ std::string regToString(const MachineOperand &MO);
+ WebAssemblyTargetStreamer *getTargetStreamer();
+ MCSymbolWasm *getMCSymbolForFunction(const Function *F, bool EnableEmEH,
+ wasm::WasmSignature *Sig,
+ bool &InvokeDetected);
+ MCSymbol *getOrCreateWasmSymbol(StringRef Name);
+ void emitDecls(const Module &M);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp
new file mode 100644
index 000000000000..06758e465197
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp
@@ -0,0 +1,398 @@
+//===-- WebAssemblyCFGSort.cpp - CFG Sorting ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements a CFG sorting pass.
+///
+/// This pass reorders the blocks in a function to put them into topological
+/// order, ignoring loop backedges, and without any loop or exception being
+/// interrupted by a block not dominated by the its header, with special care
+/// to keep the order as similar as possible to the original order.
+///
+////===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyExceptionInfo.h"
+#include "WebAssemblySortRegion.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/ADT/PriorityQueue.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/WasmEHFuncInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+using WebAssembly::SortRegion;
+using WebAssembly::SortRegionInfo;
+
+#define DEBUG_TYPE "wasm-cfg-sort"
+
+// Option to disable EH pad first sorting. Only for testing unwind destination
+// mismatches in CFGStackify.
+static cl::opt<bool> WasmDisableEHPadSort(
+ "wasm-disable-ehpad-sort", cl::ReallyHidden,
+ cl::desc(
+ "WebAssembly: Disable EH pad-first sort order. Testing purpose only."),
+ cl::init(false));
+
+namespace {
+
+class WebAssemblyCFGSort final : public MachineFunctionPass {
+ StringRef getPassName() const override { return "WebAssembly CFG Sort"; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addPreserved<MachineDominatorTree>();
+ AU.addRequired<MachineLoopInfo>();
+ AU.addPreserved<MachineLoopInfo>();
+ AU.addRequired<WebAssemblyExceptionInfo>();
+ AU.addPreserved<WebAssemblyExceptionInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyCFGSort() : MachineFunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyCFGSort::ID = 0;
+INITIALIZE_PASS(WebAssemblyCFGSort, DEBUG_TYPE,
+ "Reorders blocks in topological order", false, false)
+
+FunctionPass *llvm::createWebAssemblyCFGSort() {
+ return new WebAssemblyCFGSort();
+}
+
+static void maybeUpdateTerminator(MachineBasicBlock *MBB) {
+#ifndef NDEBUG
+ bool AnyBarrier = false;
+#endif
+ bool AllAnalyzable = true;
+ for (const MachineInstr &Term : MBB->terminators()) {
+#ifndef NDEBUG
+ AnyBarrier |= Term.isBarrier();
+#endif
+ AllAnalyzable &= Term.isBranch() && !Term.isIndirectBranch();
+ }
+ assert((AnyBarrier || AllAnalyzable) &&
+ "analyzeBranch needs to analyze any block with a fallthrough");
+
+ // Find the layout successor from the original block order.
+ MachineFunction *MF = MBB->getParent();
+ MachineBasicBlock *OriginalSuccessor =
+ unsigned(MBB->getNumber() + 1) < MF->getNumBlockIDs()
+ ? MF->getBlockNumbered(MBB->getNumber() + 1)
+ : nullptr;
+
+ if (AllAnalyzable)
+ MBB->updateTerminator(OriginalSuccessor);
+}
+
+namespace {
+// EH pads are selected first regardless of the block comparison order.
+// When only one of the BBs is an EH pad, we give a higher priority to it, to
+// prevent common mismatches between possibly throwing calls and ehpads they
+// unwind to, as in the example below:
+//
+// bb0:
+// call @foo // If this throws, unwind to bb2
+// bb1:
+// call @bar // If this throws, unwind to bb3
+// bb2 (ehpad):
+// handler_bb2
+// bb3 (ehpad):
+// handler_bb3
+// continuing code
+//
+// Because this pass tries to preserve the original BB order, this order will
+// not change. But this will result in this try-catch structure in CFGStackify,
+// resulting in a mismatch:
+// try
+// try
+// call @foo
+// call @bar // This should unwind to bb3, not bb2!
+// catch
+// handler_bb2
+// end
+// catch
+// handler_bb3
+// end
+// continuing code
+//
+// If we give a higher priority to an EH pad whenever it is ready in this
+// example, when both bb1 and bb2 are ready, we would pick up bb2 first.
+
+/// Sort blocks by their number.
+struct CompareBlockNumbers {
+ bool operator()(const MachineBasicBlock *A,
+ const MachineBasicBlock *B) const {
+ if (!WasmDisableEHPadSort) {
+ if (A->isEHPad() && !B->isEHPad())
+ return false;
+ if (!A->isEHPad() && B->isEHPad())
+ return true;
+ }
+
+ return A->getNumber() > B->getNumber();
+ }
+};
+/// Sort blocks by their number in the opposite order..
+struct CompareBlockNumbersBackwards {
+ bool operator()(const MachineBasicBlock *A,
+ const MachineBasicBlock *B) const {
+ if (!WasmDisableEHPadSort) {
+ if (A->isEHPad() && !B->isEHPad())
+ return false;
+ if (!A->isEHPad() && B->isEHPad())
+ return true;
+ }
+
+ return A->getNumber() < B->getNumber();
+ }
+};
+/// Bookkeeping for a region to help ensure that we don't mix blocks not
+/// dominated by the its header among its blocks.
+struct Entry {
+ const SortRegion *TheRegion;
+ unsigned NumBlocksLeft;
+
+ /// List of blocks not dominated by Loop's header that are deferred until
+ /// after all of Loop's blocks have been seen.
+ std::vector<MachineBasicBlock *> Deferred;
+
+ explicit Entry(const SortRegion *R)
+ : TheRegion(R), NumBlocksLeft(R->getNumBlocks()) {}
+};
+} // end anonymous namespace
+
+/// Sort the blocks, taking special care to make sure that regions are not
+/// interrupted by blocks not dominated by their header.
+/// TODO: There are many opportunities for improving the heuristics here.
+/// Explore them.
+static void sortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI,
+ const WebAssemblyExceptionInfo &WEI,
+ const MachineDominatorTree &MDT) {
+ // Remember original layout ordering, so we can update terminators after
+ // reordering to point to the original layout successor.
+ MF.RenumberBlocks();
+
+ // Prepare for a topological sort: Record the number of predecessors each
+ // block has, ignoring loop backedges.
+ SmallVector<unsigned, 16> NumPredsLeft(MF.getNumBlockIDs(), 0);
+ for (MachineBasicBlock &MBB : MF) {
+ unsigned N = MBB.pred_size();
+ if (MachineLoop *L = MLI.getLoopFor(&MBB))
+ if (L->getHeader() == &MBB)
+ for (const MachineBasicBlock *Pred : MBB.predecessors())
+ if (L->contains(Pred))
+ --N;
+ NumPredsLeft[MBB.getNumber()] = N;
+ }
+
+ // Topological sort the CFG, with additional constraints:
+ // - Between a region header and the last block in the region, there can be
+ // no blocks not dominated by its header.
+ // - It's desirable to preserve the original block order when possible.
+ // We use two ready lists; Preferred and Ready. Preferred has recently
+ // processed successors, to help preserve block sequences from the original
+ // order. Ready has the remaining ready blocks. EH blocks are picked first
+ // from both queues.
+ PriorityQueue<MachineBasicBlock *, std::vector<MachineBasicBlock *>,
+ CompareBlockNumbers>
+ Preferred;
+ PriorityQueue<MachineBasicBlock *, std::vector<MachineBasicBlock *>,
+ CompareBlockNumbersBackwards>
+ Ready;
+
+ const auto *EHInfo = MF.getWasmEHFuncInfo();
+ SortRegionInfo SRI(MLI, WEI);
+ SmallVector<Entry, 4> Entries;
+ for (MachineBasicBlock *MBB = &MF.front();;) {
+ const SortRegion *R = SRI.getRegionFor(MBB);
+ if (R) {
+ // If MBB is a region header, add it to the active region list. We can't
+ // put any blocks that it doesn't dominate until we see the end of the
+ // region.
+ if (R->getHeader() == MBB)
+ Entries.push_back(Entry(R));
+ // For each active region the block is in, decrement the count. If MBB is
+ // the last block in an active region, take it off the list and pick up
+ // any blocks deferred because the header didn't dominate them.
+ for (Entry &E : Entries)
+ if (E.TheRegion->contains(MBB) && --E.NumBlocksLeft == 0)
+ for (auto *DeferredBlock : E.Deferred)
+ Ready.push(DeferredBlock);
+ while (!Entries.empty() && Entries.back().NumBlocksLeft == 0)
+ Entries.pop_back();
+ }
+ // The main topological sort logic.
+ for (MachineBasicBlock *Succ : MBB->successors()) {
+ // Ignore backedges.
+ if (MachineLoop *SuccL = MLI.getLoopFor(Succ))
+ if (SuccL->getHeader() == Succ && SuccL->contains(MBB))
+ continue;
+ // Decrement the predecessor count. If it's now zero, it's ready.
+ if (--NumPredsLeft[Succ->getNumber()] == 0) {
+ // When we are in a SortRegion, we allow sorting of not only BBs that
+ // belong to the current (innermost) region but also BBs that are
+ // dominated by the current region header. But we should not do this for
+ // exceptions because there can be cases in which, for example:
+ // EHPad A's unwind destination (where the exception lands when it is
+ // not caught by EHPad A) is EHPad B, so EHPad B does not belong to the
+ // exception dominated by EHPad A. But EHPad B is dominated by EHPad A,
+ // so EHPad B can be sorted within EHPad A's exception. This is
+ // incorrect because we may end up delegating/rethrowing to an inner
+ // scope in CFGStackify. So here we make sure those unwind destinations
+ // are deferred until their unwind source's exception is sorted.
+ if (EHInfo && EHInfo->hasUnwindSrcs(Succ)) {
+ SmallPtrSet<MachineBasicBlock *, 4> UnwindSrcs =
+ EHInfo->getUnwindSrcs(Succ);
+ bool IsDeferred = false;
+ for (Entry &E : Entries) {
+ if (UnwindSrcs.count(E.TheRegion->getHeader())) {
+ E.Deferred.push_back(Succ);
+ IsDeferred = true;
+ break;
+ }
+ }
+ if (IsDeferred)
+ continue;
+ }
+ Preferred.push(Succ);
+ }
+ }
+ // Determine the block to follow MBB. First try to find a preferred block,
+ // to preserve the original block order when possible.
+ MachineBasicBlock *Next = nullptr;
+ while (!Preferred.empty()) {
+ Next = Preferred.top();
+ Preferred.pop();
+ // If X isn't dominated by the top active region header, defer it until
+ // that region is done.
+ if (!Entries.empty() &&
+ !MDT.dominates(Entries.back().TheRegion->getHeader(), Next)) {
+ Entries.back().Deferred.push_back(Next);
+ Next = nullptr;
+ continue;
+ }
+ // If Next was originally ordered before MBB, and it isn't because it was
+ // loop-rotated above the header, it's not preferred.
+ if (Next->getNumber() < MBB->getNumber() &&
+ (WasmDisableEHPadSort || !Next->isEHPad()) &&
+ (!R || !R->contains(Next) ||
+ R->getHeader()->getNumber() < Next->getNumber())) {
+ Ready.push(Next);
+ Next = nullptr;
+ continue;
+ }
+ break;
+ }
+ // If we didn't find a suitable block in the Preferred list, check the
+ // general Ready list.
+ if (!Next) {
+ // If there are no more blocks to process, we're done.
+ if (Ready.empty()) {
+ maybeUpdateTerminator(MBB);
+ break;
+ }
+ for (;;) {
+ Next = Ready.top();
+ Ready.pop();
+ // If Next isn't dominated by the top active region header, defer it
+ // until that region is done.
+ if (!Entries.empty() &&
+ !MDT.dominates(Entries.back().TheRegion->getHeader(), Next)) {
+ Entries.back().Deferred.push_back(Next);
+ continue;
+ }
+ break;
+ }
+ }
+ // Move the next block into place and iterate.
+ Next->moveAfter(MBB);
+ maybeUpdateTerminator(MBB);
+ MBB = Next;
+ }
+ assert(Entries.empty() && "Active sort region list not finished");
+ MF.RenumberBlocks();
+
+#ifndef NDEBUG
+ SmallSetVector<const SortRegion *, 8> OnStack;
+
+ // Insert a sentinel representing the degenerate loop that starts at the
+ // function entry block and includes the entire function as a "loop" that
+ // executes once.
+ OnStack.insert(nullptr);
+
+ for (auto &MBB : MF) {
+ assert(MBB.getNumber() >= 0 && "Renumbered blocks should be non-negative.");
+ const SortRegion *Region = SRI.getRegionFor(&MBB);
+
+ if (Region && &MBB == Region->getHeader()) {
+ // Region header.
+ if (Region->isLoop()) {
+ // Loop header. The loop predecessor should be sorted above, and the
+ // other predecessors should be backedges below.
+ for (auto *Pred : MBB.predecessors())
+ assert(
+ (Pred->getNumber() < MBB.getNumber() || Region->contains(Pred)) &&
+ "Loop header predecessors must be loop predecessors or "
+ "backedges");
+ } else {
+ // Exception header. All predecessors should be sorted above.
+ for (auto *Pred : MBB.predecessors())
+ assert(Pred->getNumber() < MBB.getNumber() &&
+ "Non-loop-header predecessors should be topologically sorted");
+ }
+ assert(OnStack.insert(Region) &&
+ "Regions should be declared at most once.");
+
+ } else {
+ // Not a region header. All predecessors should be sorted above.
+ for (auto *Pred : MBB.predecessors())
+ assert(Pred->getNumber() < MBB.getNumber() &&
+ "Non-loop-header predecessors should be topologically sorted");
+ assert(OnStack.count(SRI.getRegionFor(&MBB)) &&
+ "Blocks must be nested in their regions");
+ }
+ while (OnStack.size() > 1 && &MBB == SRI.getBottom(OnStack.back()))
+ OnStack.pop_back();
+ }
+ assert(OnStack.pop_back_val() == nullptr &&
+ "The function entry block shouldn't actually be a region header");
+ assert(OnStack.empty() &&
+ "Control flow stack pushes and pops should be balanced.");
+#endif
+}
+
+bool WebAssemblyCFGSort::runOnMachineFunction(MachineFunction &MF) {
+ LLVM_DEBUG(dbgs() << "********** CFG Sorting **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+
+ const auto &MLI = getAnalysis<MachineLoopInfo>();
+ const auto &WEI = getAnalysis<WebAssemblyExceptionInfo>();
+ auto &MDT = getAnalysis<MachineDominatorTree>();
+ // Liveness is not tracked for VALUE_STACK physreg.
+ MF.getRegInfo().invalidateLiveness();
+
+ // Sort the blocks, with contiguous sort regions.
+ sortBlocks(MF, MLI, WEI, MDT);
+
+ return true;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp
new file mode 100644
index 000000000000..d8cbddf74545
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp
@@ -0,0 +1,1776 @@
+//===-- WebAssemblyCFGStackify.cpp - CFG Stackification -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements a CFG stacking pass.
+///
+/// This pass inserts BLOCK, LOOP, and TRY markers to mark the start of scopes,
+/// since scope boundaries serve as the labels for WebAssembly's control
+/// transfers.
+///
+/// This is sufficient to convert arbitrary CFGs into a form that works on
+/// WebAssembly, provided that all loops are single-entry.
+///
+/// In case we use exceptions, this pass also fixes mismatches in unwind
+/// destinations created during transforming CFG into wasm structured format.
+///
+//===----------------------------------------------------------------------===//
+
+#include "Utils/WebAssemblyTypeUtilities.h"
+#include "WebAssembly.h"
+#include "WebAssemblyExceptionInfo.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySortRegion.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/WasmEHFuncInfo.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/Target/TargetMachine.h"
+using namespace llvm;
+using WebAssembly::SortRegionInfo;
+
+#define DEBUG_TYPE "wasm-cfg-stackify"
+
+STATISTIC(NumCallUnwindMismatches, "Number of call unwind mismatches found");
+STATISTIC(NumCatchUnwindMismatches, "Number of catch unwind mismatches found");
+
+namespace {
+class WebAssemblyCFGStackify final : public MachineFunctionPass {
+ StringRef getPassName() const override { return "WebAssembly CFG Stackify"; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<MachineDominatorTree>();
+ AU.addRequired<MachineLoopInfo>();
+ AU.addRequired<WebAssemblyExceptionInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ // For each block whose label represents the end of a scope, record the block
+ // which holds the beginning of the scope. This will allow us to quickly skip
+ // over scoped regions when walking blocks.
+ SmallVector<MachineBasicBlock *, 8> ScopeTops;
+ void updateScopeTops(MachineBasicBlock *Begin, MachineBasicBlock *End) {
+ int EndNo = End->getNumber();
+ if (!ScopeTops[EndNo] || ScopeTops[EndNo]->getNumber() > Begin->getNumber())
+ ScopeTops[EndNo] = Begin;
+ }
+
+ // Placing markers.
+ void placeMarkers(MachineFunction &MF);
+ void placeBlockMarker(MachineBasicBlock &MBB);
+ void placeLoopMarker(MachineBasicBlock &MBB);
+ void placeTryMarker(MachineBasicBlock &MBB);
+
+ // Exception handling related functions
+ bool fixCallUnwindMismatches(MachineFunction &MF);
+ bool fixCatchUnwindMismatches(MachineFunction &MF);
+ void addTryDelegate(MachineInstr *RangeBegin, MachineInstr *RangeEnd,
+ MachineBasicBlock *DelegateDest);
+ void recalculateScopeTops(MachineFunction &MF);
+ void removeUnnecessaryInstrs(MachineFunction &MF);
+
+ // Wrap-up
+ using EndMarkerInfo =
+ std::pair<const MachineBasicBlock *, const MachineInstr *>;
+ unsigned getBranchDepth(const SmallVectorImpl<EndMarkerInfo> &Stack,
+ const MachineBasicBlock *MBB);
+ unsigned getDelegateDepth(const SmallVectorImpl<EndMarkerInfo> &Stack,
+ const MachineBasicBlock *MBB);
+ unsigned
+ getRethrowDepth(const SmallVectorImpl<EndMarkerInfo> &Stack,
+ const SmallVectorImpl<const MachineBasicBlock *> &EHPadStack);
+ void rewriteDepthImmediates(MachineFunction &MF);
+ void fixEndsAtEndOfFunction(MachineFunction &MF);
+ void cleanupFunctionData(MachineFunction &MF);
+
+ // For each BLOCK|LOOP|TRY, the corresponding END_(BLOCK|LOOP|TRY) or DELEGATE
+ // (in case of TRY).
+ DenseMap<const MachineInstr *, MachineInstr *> BeginToEnd;
+ // For each END_(BLOCK|LOOP|TRY) or DELEGATE, the corresponding
+ // BLOCK|LOOP|TRY.
+ DenseMap<const MachineInstr *, MachineInstr *> EndToBegin;
+ // <TRY marker, EH pad> map
+ DenseMap<const MachineInstr *, MachineBasicBlock *> TryToEHPad;
+ // <EH pad, TRY marker> map
+ DenseMap<const MachineBasicBlock *, MachineInstr *> EHPadToTry;
+
+ // We need an appendix block to place 'end_loop' or 'end_try' marker when the
+ // loop / exception bottom block is the last block in a function
+ MachineBasicBlock *AppendixBB = nullptr;
+ MachineBasicBlock *getAppendixBlock(MachineFunction &MF) {
+ if (!AppendixBB) {
+ AppendixBB = MF.CreateMachineBasicBlock();
+ // Give it a fake predecessor so that AsmPrinter prints its label.
+ AppendixBB->addSuccessor(AppendixBB);
+ MF.push_back(AppendixBB);
+ }
+ return AppendixBB;
+ }
+
+ // Before running rewriteDepthImmediates function, 'delegate' has a BB as its
+ // destination operand. getFakeCallerBlock() returns a fake BB that will be
+ // used for the operand when 'delegate' needs to rethrow to the caller. This
+ // will be rewritten as an immediate value that is the number of block depths
+ // + 1 in rewriteDepthImmediates, and this fake BB will be removed at the end
+ // of the pass.
+ MachineBasicBlock *FakeCallerBB = nullptr;
+ MachineBasicBlock *getFakeCallerBlock(MachineFunction &MF) {
+ if (!FakeCallerBB)
+ FakeCallerBB = MF.CreateMachineBasicBlock();
+ return FakeCallerBB;
+ }
+
+ // Helper functions to register / unregister scope information created by
+ // marker instructions.
+ void registerScope(MachineInstr *Begin, MachineInstr *End);
+ void registerTryScope(MachineInstr *Begin, MachineInstr *End,
+ MachineBasicBlock *EHPad);
+ void unregisterScope(MachineInstr *Begin);
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyCFGStackify() : MachineFunctionPass(ID) {}
+ ~WebAssemblyCFGStackify() override { releaseMemory(); }
+ void releaseMemory() override;
+};
+} // end anonymous namespace
+
+char WebAssemblyCFGStackify::ID = 0;
+INITIALIZE_PASS(WebAssemblyCFGStackify, DEBUG_TYPE,
+ "Insert BLOCK/LOOP/TRY markers for WebAssembly scopes", false,
+ false)
+
+FunctionPass *llvm::createWebAssemblyCFGStackify() {
+ return new WebAssemblyCFGStackify();
+}
+
+/// Test whether Pred has any terminators explicitly branching to MBB, as
+/// opposed to falling through. Note that it's possible (eg. in unoptimized
+/// code) for a branch instruction to both branch to a block and fallthrough
+/// to it, so we check the actual branch operands to see if there are any
+/// explicit mentions.
+static bool explicitlyBranchesTo(MachineBasicBlock *Pred,
+ MachineBasicBlock *MBB) {
+ for (MachineInstr &MI : Pred->terminators())
+ for (MachineOperand &MO : MI.explicit_operands())
+ if (MO.isMBB() && MO.getMBB() == MBB)
+ return true;
+ return false;
+}
+
+// Returns an iterator to the earliest position possible within the MBB,
+// satisfying the restrictions given by BeforeSet and AfterSet. BeforeSet
+// contains instructions that should go before the marker, and AfterSet contains
+// ones that should go after the marker. In this function, AfterSet is only
+// used for validation checking.
+template <typename Container>
+static MachineBasicBlock::iterator
+getEarliestInsertPos(MachineBasicBlock *MBB, const Container &BeforeSet,
+ const Container &AfterSet) {
+ auto InsertPos = MBB->end();
+ while (InsertPos != MBB->begin()) {
+ if (BeforeSet.count(&*std::prev(InsertPos))) {
+#ifndef NDEBUG
+ // Validation check
+ for (auto Pos = InsertPos, E = MBB->begin(); Pos != E; --Pos)
+ assert(!AfterSet.count(&*std::prev(Pos)));
+#endif
+ break;
+ }
+ --InsertPos;
+ }
+ return InsertPos;
+}
+
+// Returns an iterator to the latest position possible within the MBB,
+// satisfying the restrictions given by BeforeSet and AfterSet. BeforeSet
+// contains instructions that should go before the marker, and AfterSet contains
+// ones that should go after the marker. In this function, BeforeSet is only
+// used for validation checking.
+template <typename Container>
+static MachineBasicBlock::iterator
+getLatestInsertPos(MachineBasicBlock *MBB, const Container &BeforeSet,
+ const Container &AfterSet) {
+ auto InsertPos = MBB->begin();
+ while (InsertPos != MBB->end()) {
+ if (AfterSet.count(&*InsertPos)) {
+#ifndef NDEBUG
+ // Validation check
+ for (auto Pos = InsertPos, E = MBB->end(); Pos != E; ++Pos)
+ assert(!BeforeSet.count(&*Pos));
+#endif
+ break;
+ }
+ ++InsertPos;
+ }
+ return InsertPos;
+}
+
+void WebAssemblyCFGStackify::registerScope(MachineInstr *Begin,
+ MachineInstr *End) {
+ BeginToEnd[Begin] = End;
+ EndToBegin[End] = Begin;
+}
+
+// When 'End' is not an 'end_try' but 'delegate, EHPad is nullptr.
+void WebAssemblyCFGStackify::registerTryScope(MachineInstr *Begin,
+ MachineInstr *End,
+ MachineBasicBlock *EHPad) {
+ registerScope(Begin, End);
+ TryToEHPad[Begin] = EHPad;
+ EHPadToTry[EHPad] = Begin;
+}
+
+void WebAssemblyCFGStackify::unregisterScope(MachineInstr *Begin) {
+ assert(BeginToEnd.count(Begin));
+ MachineInstr *End = BeginToEnd[Begin];
+ assert(EndToBegin.count(End));
+ BeginToEnd.erase(Begin);
+ EndToBegin.erase(End);
+ MachineBasicBlock *EHPad = TryToEHPad.lookup(Begin);
+ if (EHPad) {
+ assert(EHPadToTry.count(EHPad));
+ TryToEHPad.erase(Begin);
+ EHPadToTry.erase(EHPad);
+ }
+}
+
+/// Insert a BLOCK marker for branches to MBB (if needed).
+// TODO Consider a more generalized way of handling block (and also loop and
+// try) signatures when we implement the multi-value proposal later.
+void WebAssemblyCFGStackify::placeBlockMarker(MachineBasicBlock &MBB) {
+ assert(!MBB.isEHPad());
+ MachineFunction &MF = *MBB.getParent();
+ auto &MDT = getAnalysis<MachineDominatorTree>();
+ const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ const auto &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
+
+ // First compute the nearest common dominator of all forward non-fallthrough
+ // predecessors so that we minimize the time that the BLOCK is on the stack,
+ // which reduces overall stack height.
+ MachineBasicBlock *Header = nullptr;
+ bool IsBranchedTo = false;
+ int MBBNumber = MBB.getNumber();
+ for (MachineBasicBlock *Pred : MBB.predecessors()) {
+ if (Pred->getNumber() < MBBNumber) {
+ Header = Header ? MDT.findNearestCommonDominator(Header, Pred) : Pred;
+ if (explicitlyBranchesTo(Pred, &MBB))
+ IsBranchedTo = true;
+ }
+ }
+ if (!Header)
+ return;
+ if (!IsBranchedTo)
+ return;
+
+ assert(&MBB != &MF.front() && "Header blocks shouldn't have predecessors");
+ MachineBasicBlock *LayoutPred = MBB.getPrevNode();
+
+ // If the nearest common dominator is inside a more deeply nested context,
+ // walk out to the nearest scope which isn't more deeply nested.
+ for (MachineFunction::iterator I(LayoutPred), E(Header); I != E; --I) {
+ if (MachineBasicBlock *ScopeTop = ScopeTops[I->getNumber()]) {
+ if (ScopeTop->getNumber() > Header->getNumber()) {
+ // Skip over an intervening scope.
+ I = std::next(ScopeTop->getIterator());
+ } else {
+ // We found a scope level at an appropriate depth.
+ Header = ScopeTop;
+ break;
+ }
+ }
+ }
+
+ // Decide where in Header to put the BLOCK.
+
+ // Instructions that should go before the BLOCK.
+ SmallPtrSet<const MachineInstr *, 4> BeforeSet;
+ // Instructions that should go after the BLOCK.
+ SmallPtrSet<const MachineInstr *, 4> AfterSet;
+ for (const auto &MI : *Header) {
+ // If there is a previously placed LOOP marker and the bottom block of the
+ // loop is above MBB, it should be after the BLOCK, because the loop is
+ // nested in this BLOCK. Otherwise it should be before the BLOCK.
+ if (MI.getOpcode() == WebAssembly::LOOP) {
+ auto *LoopBottom = BeginToEnd[&MI]->getParent()->getPrevNode();
+ if (MBB.getNumber() > LoopBottom->getNumber())
+ AfterSet.insert(&MI);
+#ifndef NDEBUG
+ else
+ BeforeSet.insert(&MI);
+#endif
+ }
+
+ // If there is a previously placed BLOCK/TRY marker and its corresponding
+ // END marker is before the current BLOCK's END marker, that should be
+ // placed after this BLOCK. Otherwise it should be placed before this BLOCK
+ // marker.
+ if (MI.getOpcode() == WebAssembly::BLOCK ||
+ MI.getOpcode() == WebAssembly::TRY) {
+ if (BeginToEnd[&MI]->getParent()->getNumber() <= MBB.getNumber())
+ AfterSet.insert(&MI);
+#ifndef NDEBUG
+ else
+ BeforeSet.insert(&MI);
+#endif
+ }
+
+#ifndef NDEBUG
+ // All END_(BLOCK|LOOP|TRY) markers should be before the BLOCK.
+ if (MI.getOpcode() == WebAssembly::END_BLOCK ||
+ MI.getOpcode() == WebAssembly::END_LOOP ||
+ MI.getOpcode() == WebAssembly::END_TRY)
+ BeforeSet.insert(&MI);
+#endif
+
+ // Terminators should go after the BLOCK.
+ if (MI.isTerminator())
+ AfterSet.insert(&MI);
+ }
+
+ // Local expression tree should go after the BLOCK.
+ for (auto I = Header->getFirstTerminator(), E = Header->begin(); I != E;
+ --I) {
+ if (std::prev(I)->isDebugInstr() || std::prev(I)->isPosition())
+ continue;
+ if (WebAssembly::isChild(*std::prev(I), MFI))
+ AfterSet.insert(&*std::prev(I));
+ else
+ break;
+ }
+
+ // Add the BLOCK.
+ WebAssembly::BlockType ReturnType = WebAssembly::BlockType::Void;
+ auto InsertPos = getLatestInsertPos(Header, BeforeSet, AfterSet);
+ MachineInstr *Begin =
+ BuildMI(*Header, InsertPos, Header->findDebugLoc(InsertPos),
+ TII.get(WebAssembly::BLOCK))
+ .addImm(int64_t(ReturnType));
+
+ // Decide where in Header to put the END_BLOCK.
+ BeforeSet.clear();
+ AfterSet.clear();
+ for (auto &MI : MBB) {
+#ifndef NDEBUG
+ // END_BLOCK should precede existing LOOP and TRY markers.
+ if (MI.getOpcode() == WebAssembly::LOOP ||
+ MI.getOpcode() == WebAssembly::TRY)
+ AfterSet.insert(&MI);
+#endif
+
+ // If there is a previously placed END_LOOP marker and the header of the
+ // loop is above this block's header, the END_LOOP should be placed after
+ // the BLOCK, because the loop contains this block. Otherwise the END_LOOP
+ // should be placed before the BLOCK. The same for END_TRY.
+ if (MI.getOpcode() == WebAssembly::END_LOOP ||
+ MI.getOpcode() == WebAssembly::END_TRY) {
+ if (EndToBegin[&MI]->getParent()->getNumber() >= Header->getNumber())
+ BeforeSet.insert(&MI);
+#ifndef NDEBUG
+ else
+ AfterSet.insert(&MI);
+#endif
+ }
+ }
+
+ // Mark the end of the block.
+ InsertPos = getEarliestInsertPos(&MBB, BeforeSet, AfterSet);
+ MachineInstr *End = BuildMI(MBB, InsertPos, MBB.findPrevDebugLoc(InsertPos),
+ TII.get(WebAssembly::END_BLOCK));
+ registerScope(Begin, End);
+
+ // Track the farthest-spanning scope that ends at this point.
+ updateScopeTops(Header, &MBB);
+}
+
+/// Insert a LOOP marker for a loop starting at MBB (if it's a loop header).
+void WebAssemblyCFGStackify::placeLoopMarker(MachineBasicBlock &MBB) {
+ MachineFunction &MF = *MBB.getParent();
+ const auto &MLI = getAnalysis<MachineLoopInfo>();
+ const auto &WEI = getAnalysis<WebAssemblyExceptionInfo>();
+ SortRegionInfo SRI(MLI, WEI);
+ const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+
+ MachineLoop *Loop = MLI.getLoopFor(&MBB);
+ if (!Loop || Loop->getHeader() != &MBB)
+ return;
+
+ // The operand of a LOOP is the first block after the loop. If the loop is the
+ // bottom of the function, insert a dummy block at the end.
+ MachineBasicBlock *Bottom = SRI.getBottom(Loop);
+ auto Iter = std::next(Bottom->getIterator());
+ if (Iter == MF.end()) {
+ getAppendixBlock(MF);
+ Iter = std::next(Bottom->getIterator());
+ }
+ MachineBasicBlock *AfterLoop = &*Iter;
+
+ // Decide where in Header to put the LOOP.
+ SmallPtrSet<const MachineInstr *, 4> BeforeSet;
+ SmallPtrSet<const MachineInstr *, 4> AfterSet;
+ for (const auto &MI : MBB) {
+ // LOOP marker should be after any existing loop that ends here. Otherwise
+ // we assume the instruction belongs to the loop.
+ if (MI.getOpcode() == WebAssembly::END_LOOP)
+ BeforeSet.insert(&MI);
+#ifndef NDEBUG
+ else
+ AfterSet.insert(&MI);
+#endif
+ }
+
+ // Mark the beginning of the loop.
+ auto InsertPos = getEarliestInsertPos(&MBB, BeforeSet, AfterSet);
+ MachineInstr *Begin = BuildMI(MBB, InsertPos, MBB.findDebugLoc(InsertPos),
+ TII.get(WebAssembly::LOOP))
+ .addImm(int64_t(WebAssembly::BlockType::Void));
+
+ // Decide where in Header to put the END_LOOP.
+ BeforeSet.clear();
+ AfterSet.clear();
+#ifndef NDEBUG
+ for (const auto &MI : MBB)
+ // Existing END_LOOP markers belong to parent loops of this loop
+ if (MI.getOpcode() == WebAssembly::END_LOOP)
+ AfterSet.insert(&MI);
+#endif
+
+ // Mark the end of the loop (using arbitrary debug location that branched to
+ // the loop end as its location).
+ InsertPos = getEarliestInsertPos(AfterLoop, BeforeSet, AfterSet);
+ DebugLoc EndDL = AfterLoop->pred_empty()
+ ? DebugLoc()
+ : (*AfterLoop->pred_rbegin())->findBranchDebugLoc();
+ MachineInstr *End =
+ BuildMI(*AfterLoop, InsertPos, EndDL, TII.get(WebAssembly::END_LOOP));
+ registerScope(Begin, End);
+
+ assert((!ScopeTops[AfterLoop->getNumber()] ||
+ ScopeTops[AfterLoop->getNumber()]->getNumber() < MBB.getNumber()) &&
+ "With block sorting the outermost loop for a block should be first.");
+ updateScopeTops(&MBB, AfterLoop);
+}
+
+void WebAssemblyCFGStackify::placeTryMarker(MachineBasicBlock &MBB) {
+ assert(MBB.isEHPad());
+ MachineFunction &MF = *MBB.getParent();
+ auto &MDT = getAnalysis<MachineDominatorTree>();
+ const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ const auto &MLI = getAnalysis<MachineLoopInfo>();
+ const auto &WEI = getAnalysis<WebAssemblyExceptionInfo>();
+ SortRegionInfo SRI(MLI, WEI);
+ const auto &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
+
+ // Compute the nearest common dominator of all unwind predecessors
+ MachineBasicBlock *Header = nullptr;
+ int MBBNumber = MBB.getNumber();
+ for (auto *Pred : MBB.predecessors()) {
+ if (Pred->getNumber() < MBBNumber) {
+ Header = Header ? MDT.findNearestCommonDominator(Header, Pred) : Pred;
+ assert(!explicitlyBranchesTo(Pred, &MBB) &&
+ "Explicit branch to an EH pad!");
+ }
+ }
+ if (!Header)
+ return;
+
+ // If this try is at the bottom of the function, insert a dummy block at the
+ // end.
+ WebAssemblyException *WE = WEI.getExceptionFor(&MBB);
+ assert(WE);
+ MachineBasicBlock *Bottom = SRI.getBottom(WE);
+
+ auto Iter = std::next(Bottom->getIterator());
+ if (Iter == MF.end()) {
+ getAppendixBlock(MF);
+ Iter = std::next(Bottom->getIterator());
+ }
+ MachineBasicBlock *Cont = &*Iter;
+
+ assert(Cont != &MF.front());
+ MachineBasicBlock *LayoutPred = Cont->getPrevNode();
+
+ // If the nearest common dominator is inside a more deeply nested context,
+ // walk out to the nearest scope which isn't more deeply nested.
+ for (MachineFunction::iterator I(LayoutPred), E(Header); I != E; --I) {
+ if (MachineBasicBlock *ScopeTop = ScopeTops[I->getNumber()]) {
+ if (ScopeTop->getNumber() > Header->getNumber()) {
+ // Skip over an intervening scope.
+ I = std::next(ScopeTop->getIterator());
+ } else {
+ // We found a scope level at an appropriate depth.
+ Header = ScopeTop;
+ break;
+ }
+ }
+ }
+
+ // Decide where in Header to put the TRY.
+
+ // Instructions that should go before the TRY.
+ SmallPtrSet<const MachineInstr *, 4> BeforeSet;
+ // Instructions that should go after the TRY.
+ SmallPtrSet<const MachineInstr *, 4> AfterSet;
+ for (const auto &MI : *Header) {
+ // If there is a previously placed LOOP marker and the bottom block of the
+ // loop is above MBB, it should be after the TRY, because the loop is nested
+ // in this TRY. Otherwise it should be before the TRY.
+ if (MI.getOpcode() == WebAssembly::LOOP) {
+ auto *LoopBottom = BeginToEnd[&MI]->getParent()->getPrevNode();
+ if (MBB.getNumber() > LoopBottom->getNumber())
+ AfterSet.insert(&MI);
+#ifndef NDEBUG
+ else
+ BeforeSet.insert(&MI);
+#endif
+ }
+
+ // All previously inserted BLOCK/TRY markers should be after the TRY because
+ // they are all nested trys.
+ if (MI.getOpcode() == WebAssembly::BLOCK ||
+ MI.getOpcode() == WebAssembly::TRY)
+ AfterSet.insert(&MI);
+
+#ifndef NDEBUG
+ // All END_(BLOCK/LOOP/TRY) markers should be before the TRY.
+ if (MI.getOpcode() == WebAssembly::END_BLOCK ||
+ MI.getOpcode() == WebAssembly::END_LOOP ||
+ MI.getOpcode() == WebAssembly::END_TRY)
+ BeforeSet.insert(&MI);
+#endif
+
+ // Terminators should go after the TRY.
+ if (MI.isTerminator())
+ AfterSet.insert(&MI);
+ }
+
+ // If Header unwinds to MBB (= Header contains 'invoke'), the try block should
+ // contain the call within it. So the call should go after the TRY. The
+ // exception is when the header's terminator is a rethrow instruction, in
+ // which case that instruction, not a call instruction before it, is gonna
+ // throw.
+ MachineInstr *ThrowingCall = nullptr;
+ if (MBB.isPredecessor(Header)) {
+ auto TermPos = Header->getFirstTerminator();
+ if (TermPos == Header->end() ||
+ TermPos->getOpcode() != WebAssembly::RETHROW) {
+ for (auto &MI : reverse(*Header)) {
+ if (MI.isCall()) {
+ AfterSet.insert(&MI);
+ ThrowingCall = &MI;
+ // Possibly throwing calls are usually wrapped by EH_LABEL
+ // instructions. We don't want to split them and the call.
+ if (MI.getIterator() != Header->begin() &&
+ std::prev(MI.getIterator())->isEHLabel()) {
+ AfterSet.insert(&*std::prev(MI.getIterator()));
+ ThrowingCall = &*std::prev(MI.getIterator());
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ // Local expression tree should go after the TRY.
+ // For BLOCK placement, we start the search from the previous instruction of a
+ // BB's terminator, but in TRY's case, we should start from the previous
+ // instruction of a call that can throw, or a EH_LABEL that precedes the call,
+ // because the return values of the call's previous instructions can be
+ // stackified and consumed by the throwing call.
+ auto SearchStartPt = ThrowingCall ? MachineBasicBlock::iterator(ThrowingCall)
+ : Header->getFirstTerminator();
+ for (auto I = SearchStartPt, E = Header->begin(); I != E; --I) {
+ if (std::prev(I)->isDebugInstr() || std::prev(I)->isPosition())
+ continue;
+ if (WebAssembly::isChild(*std::prev(I), MFI))
+ AfterSet.insert(&*std::prev(I));
+ else
+ break;
+ }
+
+ // Add the TRY.
+ auto InsertPos = getLatestInsertPos(Header, BeforeSet, AfterSet);
+ MachineInstr *Begin =
+ BuildMI(*Header, InsertPos, Header->findDebugLoc(InsertPos),
+ TII.get(WebAssembly::TRY))
+ .addImm(int64_t(WebAssembly::BlockType::Void));
+
+ // Decide where in Header to put the END_TRY.
+ BeforeSet.clear();
+ AfterSet.clear();
+ for (const auto &MI : *Cont) {
+#ifndef NDEBUG
+ // END_TRY should precede existing LOOP and BLOCK markers.
+ if (MI.getOpcode() == WebAssembly::LOOP ||
+ MI.getOpcode() == WebAssembly::BLOCK)
+ AfterSet.insert(&MI);
+
+ // All END_TRY markers placed earlier belong to exceptions that contains
+ // this one.
+ if (MI.getOpcode() == WebAssembly::END_TRY)
+ AfterSet.insert(&MI);
+#endif
+
+ // If there is a previously placed END_LOOP marker and its header is after
+ // where TRY marker is, this loop is contained within the 'catch' part, so
+ // the END_TRY marker should go after that. Otherwise, the whole try-catch
+ // is contained within this loop, so the END_TRY should go before that.
+ if (MI.getOpcode() == WebAssembly::END_LOOP) {
+ // For a LOOP to be after TRY, LOOP's BB should be after TRY's BB; if they
+ // are in the same BB, LOOP is always before TRY.
+ if (EndToBegin[&MI]->getParent()->getNumber() > Header->getNumber())
+ BeforeSet.insert(&MI);
+#ifndef NDEBUG
+ else
+ AfterSet.insert(&MI);
+#endif
+ }
+
+ // It is not possible for an END_BLOCK to be already in this block.
+ }
+
+ // Mark the end of the TRY.
+ InsertPos = getEarliestInsertPos(Cont, BeforeSet, AfterSet);
+ MachineInstr *End =
+ BuildMI(*Cont, InsertPos, Bottom->findBranchDebugLoc(),
+ TII.get(WebAssembly::END_TRY));
+ registerTryScope(Begin, End, &MBB);
+
+ // Track the farthest-spanning scope that ends at this point. We create two
+ // mappings: (BB with 'end_try' -> BB with 'try') and (BB with 'catch' -> BB
+ // with 'try'). We need to create 'catch' -> 'try' mapping here too because
+ // markers should not span across 'catch'. For example, this should not
+ // happen:
+ //
+ // try
+ // block --| (X)
+ // catch |
+ // end_block --|
+ // end_try
+ for (auto *End : {&MBB, Cont})
+ updateScopeTops(Header, End);
+}
+
+void WebAssemblyCFGStackify::removeUnnecessaryInstrs(MachineFunction &MF) {
+ const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+
+ // When there is an unconditional branch right before a catch instruction and
+ // it branches to the end of end_try marker, we don't need the branch, because
+ // if there is no exception, the control flow transfers to that point anyway.
+ // bb0:
+ // try
+ // ...
+ // br bb2 <- Not necessary
+ // bb1 (ehpad):
+ // catch
+ // ...
+ // bb2: <- Continuation BB
+ // end
+ //
+ // A more involved case: When the BB where 'end' is located is an another EH
+ // pad, the Cont (= continuation) BB is that EH pad's 'end' BB. For example,
+ // bb0:
+ // try
+ // try
+ // ...
+ // br bb3 <- Not necessary
+ // bb1 (ehpad):
+ // catch
+ // bb2 (ehpad):
+ // end
+ // catch
+ // ...
+ // bb3: <- Continuation BB
+ // end
+ //
+ // When the EH pad at hand is bb1, its matching end_try is in bb2. But it is
+ // another EH pad, so bb0's continuation BB becomes bb3. So 'br bb3' in the
+ // code can be deleted. This is why we run 'while' until 'Cont' is not an EH
+ // pad.
+ for (auto &MBB : MF) {
+ if (!MBB.isEHPad())
+ continue;
+
+ MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
+ SmallVector<MachineOperand, 4> Cond;
+ MachineBasicBlock *EHPadLayoutPred = MBB.getPrevNode();
+
+ MachineBasicBlock *Cont = &MBB;
+ while (Cont->isEHPad()) {
+ MachineInstr *Try = EHPadToTry[Cont];
+ MachineInstr *EndTry = BeginToEnd[Try];
+ // We started from an EH pad, so the end marker cannot be a delegate
+ assert(EndTry->getOpcode() != WebAssembly::DELEGATE);
+ Cont = EndTry->getParent();
+ }
+
+ bool Analyzable = !TII.analyzeBranch(*EHPadLayoutPred, TBB, FBB, Cond);
+ // This condition means either
+ // 1. This BB ends with a single unconditional branch whose destinaion is
+ // Cont.
+ // 2. This BB ends with a conditional branch followed by an unconditional
+ // branch, and the unconditional branch's destination is Cont.
+ // In both cases, we want to remove the last (= unconditional) branch.
+ if (Analyzable && ((Cond.empty() && TBB && TBB == Cont) ||
+ (!Cond.empty() && FBB && FBB == Cont))) {
+ bool ErasedUncondBr = false;
+ (void)ErasedUncondBr;
+ for (auto I = EHPadLayoutPred->end(), E = EHPadLayoutPred->begin();
+ I != E; --I) {
+ auto PrevI = std::prev(I);
+ if (PrevI->isTerminator()) {
+ assert(PrevI->getOpcode() == WebAssembly::BR);
+ PrevI->eraseFromParent();
+ ErasedUncondBr = true;
+ break;
+ }
+ }
+ assert(ErasedUncondBr && "Unconditional branch not erased!");
+ }
+ }
+
+ // When there are block / end_block markers that overlap with try / end_try
+ // markers, and the block and try markers' return types are the same, the
+ // block /end_block markers are not necessary, because try / end_try markers
+ // also can serve as boundaries for branches.
+ // block <- Not necessary
+ // try
+ // ...
+ // catch
+ // ...
+ // end
+ // end <- Not necessary
+ SmallVector<MachineInstr *, 32> ToDelete;
+ for (auto &MBB : MF) {
+ for (auto &MI : MBB) {
+ if (MI.getOpcode() != WebAssembly::TRY)
+ continue;
+ MachineInstr *Try = &MI, *EndTry = BeginToEnd[Try];
+ if (EndTry->getOpcode() == WebAssembly::DELEGATE)
+ continue;
+
+ MachineBasicBlock *TryBB = Try->getParent();
+ MachineBasicBlock *Cont = EndTry->getParent();
+ int64_t RetType = Try->getOperand(0).getImm();
+ for (auto B = Try->getIterator(), E = std::next(EndTry->getIterator());
+ B != TryBB->begin() && E != Cont->end() &&
+ std::prev(B)->getOpcode() == WebAssembly::BLOCK &&
+ E->getOpcode() == WebAssembly::END_BLOCK &&
+ std::prev(B)->getOperand(0).getImm() == RetType;
+ --B, ++E) {
+ ToDelete.push_back(&*std::prev(B));
+ ToDelete.push_back(&*E);
+ }
+ }
+ }
+ for (auto *MI : ToDelete) {
+ if (MI->getOpcode() == WebAssembly::BLOCK)
+ unregisterScope(MI);
+ MI->eraseFromParent();
+ }
+}
+
+// When MBB is split into MBB and Split, we should unstackify defs in MBB that
+// have their uses in Split.
+static void unstackifyVRegsUsedInSplitBB(MachineBasicBlock &MBB,
+ MachineBasicBlock &Split) {
+ MachineFunction &MF = *MBB.getParent();
+ const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ auto &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
+ auto &MRI = MF.getRegInfo();
+
+ for (auto &MI : Split) {
+ for (auto &MO : MI.explicit_uses()) {
+ if (!MO.isReg() || MO.getReg().isPhysical())
+ continue;
+ if (MachineInstr *Def = MRI.getUniqueVRegDef(MO.getReg()))
+ if (Def->getParent() == &MBB)
+ MFI.unstackifyVReg(MO.getReg());
+ }
+ }
+
+ // In RegStackify, when a register definition is used multiple times,
+ // Reg = INST ...
+ // INST ..., Reg, ...
+ // INST ..., Reg, ...
+ // INST ..., Reg, ...
+ //
+ // we introduce a TEE, which has the following form:
+ // DefReg = INST ...
+ // TeeReg, Reg = TEE_... DefReg
+ // INST ..., TeeReg, ...
+ // INST ..., Reg, ...
+ // INST ..., Reg, ...
+ // with DefReg and TeeReg stackified but Reg not stackified.
+ //
+ // But the invariant that TeeReg should be stackified can be violated while we
+ // unstackify registers in the split BB above. In this case, we convert TEEs
+ // into two COPYs. This COPY will be eventually eliminated in ExplicitLocals.
+ // DefReg = INST ...
+ // TeeReg = COPY DefReg
+ // Reg = COPY DefReg
+ // INST ..., TeeReg, ...
+ // INST ..., Reg, ...
+ // INST ..., Reg, ...
+ for (MachineInstr &MI : llvm::make_early_inc_range(MBB)) {
+ if (!WebAssembly::isTee(MI.getOpcode()))
+ continue;
+ Register TeeReg = MI.getOperand(0).getReg();
+ Register Reg = MI.getOperand(1).getReg();
+ Register DefReg = MI.getOperand(2).getReg();
+ if (!MFI.isVRegStackified(TeeReg)) {
+ // Now we are not using TEE anymore, so unstackify DefReg too
+ MFI.unstackifyVReg(DefReg);
+ unsigned CopyOpc =
+ WebAssembly::getCopyOpcodeForRegClass(MRI.getRegClass(DefReg));
+ BuildMI(MBB, &MI, MI.getDebugLoc(), TII.get(CopyOpc), TeeReg)
+ .addReg(DefReg);
+ BuildMI(MBB, &MI, MI.getDebugLoc(), TII.get(CopyOpc), Reg).addReg(DefReg);
+ MI.eraseFromParent();
+ }
+ }
+}
+
+// Wrap the given range of instruction with try-delegate. RangeBegin and
+// RangeEnd are inclusive.
+void WebAssemblyCFGStackify::addTryDelegate(MachineInstr *RangeBegin,
+ MachineInstr *RangeEnd,
+ MachineBasicBlock *DelegateDest) {
+ auto *BeginBB = RangeBegin->getParent();
+ auto *EndBB = RangeEnd->getParent();
+ MachineFunction &MF = *BeginBB->getParent();
+ const auto &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
+ const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+
+ // Local expression tree before the first call of this range should go
+ // after the nested TRY.
+ SmallPtrSet<const MachineInstr *, 4> AfterSet;
+ AfterSet.insert(RangeBegin);
+ for (auto I = MachineBasicBlock::iterator(RangeBegin), E = BeginBB->begin();
+ I != E; --I) {
+ if (std::prev(I)->isDebugInstr() || std::prev(I)->isPosition())
+ continue;
+ if (WebAssembly::isChild(*std::prev(I), MFI))
+ AfterSet.insert(&*std::prev(I));
+ else
+ break;
+ }
+
+ // Create the nested try instruction.
+ auto TryPos = getLatestInsertPos(
+ BeginBB, SmallPtrSet<const MachineInstr *, 4>(), AfterSet);
+ MachineInstr *Try = BuildMI(*BeginBB, TryPos, RangeBegin->getDebugLoc(),
+ TII.get(WebAssembly::TRY))
+ .addImm(int64_t(WebAssembly::BlockType::Void));
+
+ // Create a BB to insert the 'delegate' instruction.
+ MachineBasicBlock *DelegateBB = MF.CreateMachineBasicBlock();
+ // If the destination of 'delegate' is not the caller, adds the destination to
+ // the BB's successors.
+ if (DelegateDest != FakeCallerBB)
+ DelegateBB->addSuccessor(DelegateDest);
+
+ auto SplitPos = std::next(RangeEnd->getIterator());
+ if (SplitPos == EndBB->end()) {
+ // If the range's end instruction is at the end of the BB, insert the new
+ // delegate BB after the current BB.
+ MF.insert(std::next(EndBB->getIterator()), DelegateBB);
+ EndBB->addSuccessor(DelegateBB);
+
+ } else {
+ // When the split pos is in the middle of a BB, we split the BB into two and
+ // put the 'delegate' BB in between. We normally create a split BB and make
+ // it a successor of the original BB (PostSplit == true), but in case the BB
+ // is an EH pad and the split pos is before 'catch', we should preserve the
+ // BB's property, including that it is an EH pad, in the later part of the
+ // BB, where 'catch' is. In this case we set PostSplit to false.
+ bool PostSplit = true;
+ if (EndBB->isEHPad()) {
+ for (auto I = MachineBasicBlock::iterator(SplitPos), E = EndBB->end();
+ I != E; ++I) {
+ if (WebAssembly::isCatch(I->getOpcode())) {
+ PostSplit = false;
+ break;
+ }
+ }
+ }
+
+ MachineBasicBlock *PreBB = nullptr, *PostBB = nullptr;
+ if (PostSplit) {
+ // If the range's end instruction is in the middle of the BB, we split the
+ // BB into two and insert the delegate BB in between.
+ // - Before:
+ // bb:
+ // range_end
+ // other_insts
+ //
+ // - After:
+ // pre_bb: (previous 'bb')
+ // range_end
+ // delegate_bb: (new)
+ // delegate
+ // post_bb: (new)
+ // other_insts
+ PreBB = EndBB;
+ PostBB = MF.CreateMachineBasicBlock();
+ MF.insert(std::next(PreBB->getIterator()), PostBB);
+ MF.insert(std::next(PreBB->getIterator()), DelegateBB);
+ PostBB->splice(PostBB->end(), PreBB, SplitPos, PreBB->end());
+ PostBB->transferSuccessors(PreBB);
+ } else {
+ // - Before:
+ // ehpad:
+ // range_end
+ // catch
+ // ...
+ //
+ // - After:
+ // pre_bb: (new)
+ // range_end
+ // delegate_bb: (new)
+ // delegate
+ // post_bb: (previous 'ehpad')
+ // catch
+ // ...
+ assert(EndBB->isEHPad());
+ PreBB = MF.CreateMachineBasicBlock();
+ PostBB = EndBB;
+ MF.insert(PostBB->getIterator(), PreBB);
+ MF.insert(PostBB->getIterator(), DelegateBB);
+ PreBB->splice(PreBB->end(), PostBB, PostBB->begin(), SplitPos);
+ // We don't need to transfer predecessors of the EH pad to 'PreBB',
+ // because an EH pad's predecessors are all through unwind edges and they
+ // should still unwind to the EH pad, not PreBB.
+ }
+ unstackifyVRegsUsedInSplitBB(*PreBB, *PostBB);
+ PreBB->addSuccessor(DelegateBB);
+ PreBB->addSuccessor(PostBB);
+ }
+
+ // Add 'delegate' instruction in the delegate BB created above.
+ MachineInstr *Delegate = BuildMI(DelegateBB, RangeEnd->getDebugLoc(),
+ TII.get(WebAssembly::DELEGATE))
+ .addMBB(DelegateDest);
+ registerTryScope(Try, Delegate, nullptr);
+}
+
+bool WebAssemblyCFGStackify::fixCallUnwindMismatches(MachineFunction &MF) {
+ // Linearizing the control flow by placing TRY / END_TRY markers can create
+ // mismatches in unwind destinations for throwing instructions, such as calls.
+ //
+ // We use the 'delegate' instruction to fix the unwind mismatches. 'delegate'
+ // instruction delegates an exception to an outer 'catch'. It can target not
+ // only 'catch' but all block-like structures including another 'delegate',
+ // but with slightly different semantics than branches. When it targets a
+ // 'catch', it will delegate the exception to that catch. It is being
+ // discussed how to define the semantics when 'delegate''s target is a non-try
+ // block: it will either be a validation failure or it will target the next
+ // outer try-catch. But anyway our LLVM backend currently does not generate
+ // such code. The example below illustrates where the 'delegate' instruction
+ // in the middle will delegate the exception to, depending on the value of N.
+ // try
+ // try
+ // block
+ // try
+ // try
+ // call @foo
+ // delegate N ;; Where will this delegate to?
+ // catch ;; N == 0
+ // end
+ // end ;; N == 1 (invalid; will not be generated)
+ // delegate ;; N == 2
+ // catch ;; N == 3
+ // end
+ // ;; N == 4 (to caller)
+
+ // 1. When an instruction may throw, but the EH pad it will unwind to can be
+ // different from the original CFG.
+ //
+ // Example: we have the following CFG:
+ // bb0:
+ // call @foo ; if it throws, unwind to bb2
+ // bb1:
+ // call @bar ; if it throws, unwind to bb3
+ // bb2 (ehpad):
+ // catch
+ // ...
+ // bb3 (ehpad)
+ // catch
+ // ...
+ //
+ // And the CFG is sorted in this order. Then after placing TRY markers, it
+ // will look like: (BB markers are omitted)
+ // try
+ // try
+ // call @foo
+ // call @bar ;; if it throws, unwind to bb3
+ // catch ;; ehpad (bb2)
+ // ...
+ // end_try
+ // catch ;; ehpad (bb3)
+ // ...
+ // end_try
+ //
+ // Now if bar() throws, it is going to end up ip in bb2, not bb3, where it
+ // is supposed to end up. We solve this problem by wrapping the mismatching
+ // call with an inner try-delegate that rethrows the exception to the right
+ // 'catch'.
+ //
+ // try
+ // try
+ // call @foo
+ // try ;; (new)
+ // call @bar
+ // delegate 1 (bb3) ;; (new)
+ // catch ;; ehpad (bb2)
+ // ...
+ // end_try
+ // catch ;; ehpad (bb3)
+ // ...
+ // end_try
+ //
+ // ---
+ // 2. The same as 1, but in this case an instruction unwinds to a caller
+ // function and not another EH pad.
+ //
+ // Example: we have the following CFG:
+ // bb0:
+ // call @foo ; if it throws, unwind to bb2
+ // bb1:
+ // call @bar ; if it throws, unwind to caller
+ // bb2 (ehpad):
+ // catch
+ // ...
+ //
+ // And the CFG is sorted in this order. Then after placing TRY markers, it
+ // will look like:
+ // try
+ // call @foo
+ // call @bar ;; if it throws, unwind to caller
+ // catch ;; ehpad (bb2)
+ // ...
+ // end_try
+ //
+ // Now if bar() throws, it is going to end up ip in bb2, when it is supposed
+ // throw up to the caller. We solve this problem in the same way, but in this
+ // case 'delegate's immediate argument is the number of block depths + 1,
+ // which means it rethrows to the caller.
+ // try
+ // call @foo
+ // try ;; (new)
+ // call @bar
+ // delegate 1 (caller) ;; (new)
+ // catch ;; ehpad (bb2)
+ // ...
+ // end_try
+ //
+ // Before rewriteDepthImmediates, delegate's argument is a BB. In case of the
+ // caller, it will take a fake BB generated by getFakeCallerBlock(), which
+ // will be converted to a correct immediate argument later.
+ //
+ // In case there are multiple calls in a BB that may throw to the caller, they
+ // can be wrapped together in one nested try-delegate scope. (In 1, this
+ // couldn't happen, because may-throwing instruction there had an unwind
+ // destination, i.e., it was an invoke before, and there could be only one
+ // invoke within a BB.)
+
+ SmallVector<const MachineBasicBlock *, 8> EHPadStack;
+ // Range of intructions to be wrapped in a new nested try/catch. A range
+ // exists in a single BB and does not span multiple BBs.
+ using TryRange = std::pair<MachineInstr *, MachineInstr *>;
+ // In original CFG, <unwind destination BB, a vector of try ranges>
+ DenseMap<MachineBasicBlock *, SmallVector<TryRange, 4>> UnwindDestToTryRanges;
+
+ // Gather possibly throwing calls (i.e., previously invokes) whose current
+ // unwind destination is not the same as the original CFG. (Case 1)
+
+ for (auto &MBB : reverse(MF)) {
+ bool SeenThrowableInstInBB = false;
+ for (auto &MI : reverse(MBB)) {
+ if (MI.getOpcode() == WebAssembly::TRY)
+ EHPadStack.pop_back();
+ else if (WebAssembly::isCatch(MI.getOpcode()))
+ EHPadStack.push_back(MI.getParent());
+
+ // In this loop we only gather calls that have an EH pad to unwind. So
+ // there will be at most 1 such call (= invoke) in a BB, so after we've
+ // seen one, we can skip the rest of BB. Also if MBB has no EH pad
+ // successor or MI does not throw, this is not an invoke.
+ if (SeenThrowableInstInBB || !MBB.hasEHPadSuccessor() ||
+ !WebAssembly::mayThrow(MI))
+ continue;
+ SeenThrowableInstInBB = true;
+
+ // If the EH pad on the stack top is where this instruction should unwind
+ // next, we're good.
+ MachineBasicBlock *UnwindDest = getFakeCallerBlock(MF);
+ for (auto *Succ : MBB.successors()) {
+ // Even though semantically a BB can have multiple successors in case an
+ // exception is not caught by a catchpad, in our backend implementation
+ // it is guaranteed that a BB can have at most one EH pad successor. For
+ // details, refer to comments in findWasmUnwindDestinations function in
+ // SelectionDAGBuilder.cpp.
+ if (Succ->isEHPad()) {
+ UnwindDest = Succ;
+ break;
+ }
+ }
+ if (EHPadStack.back() == UnwindDest)
+ continue;
+
+ // Include EH_LABELs in the range before and afer the invoke
+ MachineInstr *RangeBegin = &MI, *RangeEnd = &MI;
+ if (RangeBegin->getIterator() != MBB.begin() &&
+ std::prev(RangeBegin->getIterator())->isEHLabel())
+ RangeBegin = &*std::prev(RangeBegin->getIterator());
+ if (std::next(RangeEnd->getIterator()) != MBB.end() &&
+ std::next(RangeEnd->getIterator())->isEHLabel())
+ RangeEnd = &*std::next(RangeEnd->getIterator());
+
+ // If not, record the range.
+ UnwindDestToTryRanges[UnwindDest].push_back(
+ TryRange(RangeBegin, RangeEnd));
+ LLVM_DEBUG(dbgs() << "- Call unwind mismatch: MBB = " << MBB.getName()
+ << "\nCall = " << MI
+ << "\nOriginal dest = " << UnwindDest->getName()
+ << " Current dest = " << EHPadStack.back()->getName()
+ << "\n\n");
+ }
+ }
+
+ assert(EHPadStack.empty());
+
+ // Gather possibly throwing calls that are supposed to unwind up to the caller
+ // if they throw, but currently unwind to an incorrect destination. Unlike the
+ // loop above, there can be multiple calls within a BB that unwind to the
+ // caller, which we should group together in a range. (Case 2)
+
+ MachineInstr *RangeBegin = nullptr, *RangeEnd = nullptr; // inclusive
+
+ // Record the range.
+ auto RecordCallerMismatchRange = [&](const MachineBasicBlock *CurrentDest) {
+ UnwindDestToTryRanges[getFakeCallerBlock(MF)].push_back(
+ TryRange(RangeBegin, RangeEnd));
+ LLVM_DEBUG(dbgs() << "- Call unwind mismatch: MBB = "
+ << RangeBegin->getParent()->getName()
+ << "\nRange begin = " << *RangeBegin
+ << "Range end = " << *RangeEnd
+ << "\nOriginal dest = caller Current dest = "
+ << CurrentDest->getName() << "\n\n");
+ RangeBegin = RangeEnd = nullptr; // Reset range pointers
+ };
+
+ for (auto &MBB : reverse(MF)) {
+ bool SeenThrowableInstInBB = false;
+ for (auto &MI : reverse(MBB)) {
+ bool MayThrow = WebAssembly::mayThrow(MI);
+
+ // If MBB has an EH pad successor and this is the last instruction that
+ // may throw, this instruction unwinds to the EH pad and not to the
+ // caller.
+ if (MBB.hasEHPadSuccessor() && MayThrow && !SeenThrowableInstInBB)
+ SeenThrowableInstInBB = true;
+
+ // We wrap up the current range when we see a marker even if we haven't
+ // finished a BB.
+ else if (RangeEnd && WebAssembly::isMarker(MI.getOpcode()))
+ RecordCallerMismatchRange(EHPadStack.back());
+
+ // If EHPadStack is empty, that means it correctly unwinds to the caller
+ // if it throws, so we're good. If MI does not throw, we're good too.
+ else if (EHPadStack.empty() || !MayThrow) {
+ }
+
+ // We found an instruction that unwinds to the caller but currently has an
+ // incorrect unwind destination. Create a new range or increment the
+ // currently existing range.
+ else {
+ if (!RangeEnd)
+ RangeBegin = RangeEnd = &MI;
+ else
+ RangeBegin = &MI;
+ }
+
+ // Update EHPadStack.
+ if (MI.getOpcode() == WebAssembly::TRY)
+ EHPadStack.pop_back();
+ else if (WebAssembly::isCatch(MI.getOpcode()))
+ EHPadStack.push_back(MI.getParent());
+ }
+
+ if (RangeEnd)
+ RecordCallerMismatchRange(EHPadStack.back());
+ }
+
+ assert(EHPadStack.empty());
+
+ // We don't have any unwind destination mismatches to resolve.
+ if (UnwindDestToTryRanges.empty())
+ return false;
+
+ // Now we fix the mismatches by wrapping calls with inner try-delegates.
+ for (auto &P : UnwindDestToTryRanges) {
+ NumCallUnwindMismatches += P.second.size();
+ MachineBasicBlock *UnwindDest = P.first;
+ auto &TryRanges = P.second;
+
+ for (auto Range : TryRanges) {
+ MachineInstr *RangeBegin = nullptr, *RangeEnd = nullptr;
+ std::tie(RangeBegin, RangeEnd) = Range;
+ auto *MBB = RangeBegin->getParent();
+
+ // If this BB has an EH pad successor, i.e., ends with an 'invoke', now we
+ // are going to wrap the invoke with try-delegate, making the 'delegate'
+ // BB the new successor instead, so remove the EH pad succesor here. The
+ // BB may not have an EH pad successor if calls in this BB throw to the
+ // caller.
+ MachineBasicBlock *EHPad = nullptr;
+ for (auto *Succ : MBB->successors()) {
+ if (Succ->isEHPad()) {
+ EHPad = Succ;
+ break;
+ }
+ }
+ if (EHPad)
+ MBB->removeSuccessor(EHPad);
+
+ addTryDelegate(RangeBegin, RangeEnd, UnwindDest);
+ }
+ }
+
+ return true;
+}
+
+bool WebAssemblyCFGStackify::fixCatchUnwindMismatches(MachineFunction &MF) {
+ // There is another kind of unwind destination mismatches besides call unwind
+ // mismatches, which we will call "catch unwind mismatches". See this example
+ // after the marker placement:
+ // try
+ // try
+ // call @foo
+ // catch __cpp_exception ;; ehpad A (next unwind dest: caller)
+ // ...
+ // end_try
+ // catch_all ;; ehpad B
+ // ...
+ // end_try
+ //
+ // 'call @foo's unwind destination is the ehpad A. But suppose 'call @foo'
+ // throws a foreign exception that is not caught by ehpad A, and its next
+ // destination should be the caller. But after control flow linearization,
+ // another EH pad can be placed in between (e.g. ehpad B here), making the
+ // next unwind destination incorrect. In this case, the foreign exception
+ // will instead go to ehpad B and will be caught there instead. In this
+ // example the correct next unwind destination is the caller, but it can be
+ // another outer catch in other cases.
+ //
+ // There is no specific 'call' or 'throw' instruction to wrap with a
+ // try-delegate, so we wrap the whole try-catch-end with a try-delegate and
+ // make it rethrow to the right destination, as in the example below:
+ // try
+ // try ;; (new)
+ // try
+ // call @foo
+ // catch __cpp_exception ;; ehpad A (next unwind dest: caller)
+ // ...
+ // end_try
+ // delegate 1 (caller) ;; (new)
+ // catch_all ;; ehpad B
+ // ...
+ // end_try
+
+ const auto *EHInfo = MF.getWasmEHFuncInfo();
+ assert(EHInfo);
+ SmallVector<const MachineBasicBlock *, 8> EHPadStack;
+ // For EH pads that have catch unwind mismatches, a map of <EH pad, its
+ // correct unwind destination>.
+ DenseMap<MachineBasicBlock *, MachineBasicBlock *> EHPadToUnwindDest;
+
+ for (auto &MBB : reverse(MF)) {
+ for (auto &MI : reverse(MBB)) {
+ if (MI.getOpcode() == WebAssembly::TRY)
+ EHPadStack.pop_back();
+ else if (MI.getOpcode() == WebAssembly::DELEGATE)
+ EHPadStack.push_back(&MBB);
+ else if (WebAssembly::isCatch(MI.getOpcode())) {
+ auto *EHPad = &MBB;
+
+ // catch_all always catches an exception, so we don't need to do
+ // anything
+ if (MI.getOpcode() == WebAssembly::CATCH_ALL) {
+ }
+
+ // This can happen when the unwind dest was removed during the
+ // optimization, e.g. because it was unreachable.
+ else if (EHPadStack.empty() && EHInfo->hasUnwindDest(EHPad)) {
+ LLVM_DEBUG(dbgs() << "EHPad (" << EHPad->getName()
+ << "'s unwind destination does not exist anymore"
+ << "\n\n");
+ }
+
+ // The EHPad's next unwind destination is the caller, but we incorrectly
+ // unwind to another EH pad.
+ else if (!EHPadStack.empty() && !EHInfo->hasUnwindDest(EHPad)) {
+ EHPadToUnwindDest[EHPad] = getFakeCallerBlock(MF);
+ LLVM_DEBUG(dbgs()
+ << "- Catch unwind mismatch:\nEHPad = " << EHPad->getName()
+ << " Original dest = caller Current dest = "
+ << EHPadStack.back()->getName() << "\n\n");
+ }
+
+ // The EHPad's next unwind destination is an EH pad, whereas we
+ // incorrectly unwind to another EH pad.
+ else if (!EHPadStack.empty() && EHInfo->hasUnwindDest(EHPad)) {
+ auto *UnwindDest = EHInfo->getUnwindDest(EHPad);
+ if (EHPadStack.back() != UnwindDest) {
+ EHPadToUnwindDest[EHPad] = UnwindDest;
+ LLVM_DEBUG(dbgs() << "- Catch unwind mismatch:\nEHPad = "
+ << EHPad->getName() << " Original dest = "
+ << UnwindDest->getName() << " Current dest = "
+ << EHPadStack.back()->getName() << "\n\n");
+ }
+ }
+
+ EHPadStack.push_back(EHPad);
+ }
+ }
+ }
+
+ assert(EHPadStack.empty());
+ if (EHPadToUnwindDest.empty())
+ return false;
+ NumCatchUnwindMismatches += EHPadToUnwindDest.size();
+ SmallPtrSet<MachineBasicBlock *, 4> NewEndTryBBs;
+
+ for (auto &P : EHPadToUnwindDest) {
+ MachineBasicBlock *EHPad = P.first;
+ MachineBasicBlock *UnwindDest = P.second;
+ MachineInstr *Try = EHPadToTry[EHPad];
+ MachineInstr *EndTry = BeginToEnd[Try];
+ addTryDelegate(Try, EndTry, UnwindDest);
+ NewEndTryBBs.insert(EndTry->getParent());
+ }
+
+ // Adding a try-delegate wrapping an existing try-catch-end can make existing
+ // branch destination BBs invalid. For example,
+ //
+ // - Before:
+ // bb0:
+ // block
+ // br bb3
+ // bb1:
+ // try
+ // ...
+ // bb2: (ehpad)
+ // catch
+ // bb3:
+ // end_try
+ // end_block ;; 'br bb3' targets here
+ //
+ // Suppose this try-catch-end has a catch unwind mismatch, so we need to wrap
+ // this with a try-delegate. Then this becomes:
+ //
+ // - After:
+ // bb0:
+ // block
+ // br bb3 ;; invalid destination!
+ // bb1:
+ // try ;; (new instruction)
+ // try
+ // ...
+ // bb2: (ehpad)
+ // catch
+ // bb3:
+ // end_try ;; 'br bb3' still incorrectly targets here!
+ // delegate_bb: ;; (new BB)
+ // delegate ;; (new instruction)
+ // split_bb: ;; (new BB)
+ // end_block
+ //
+ // Now 'br bb3' incorrectly branches to an inner scope.
+ //
+ // As we can see in this case, when branches target a BB that has both
+ // 'end_try' and 'end_block' and the BB is split to insert a 'delegate', we
+ // have to remap existing branch destinations so that they target not the
+ // 'end_try' BB but the new 'end_block' BB. There can be multiple 'delegate's
+ // in between, so we try to find the next BB with 'end_block' instruction. In
+ // this example, the 'br bb3' instruction should be remapped to 'br split_bb'.
+ for (auto &MBB : MF) {
+ for (auto &MI : MBB) {
+ if (MI.isTerminator()) {
+ for (auto &MO : MI.operands()) {
+ if (MO.isMBB() && NewEndTryBBs.count(MO.getMBB())) {
+ auto *BrDest = MO.getMBB();
+ bool FoundEndBlock = false;
+ for (; std::next(BrDest->getIterator()) != MF.end();
+ BrDest = BrDest->getNextNode()) {
+ for (const auto &MI : *BrDest) {
+ if (MI.getOpcode() == WebAssembly::END_BLOCK) {
+ FoundEndBlock = true;
+ break;
+ }
+ }
+ if (FoundEndBlock)
+ break;
+ }
+ assert(FoundEndBlock);
+ MO.setMBB(BrDest);
+ }
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+void WebAssemblyCFGStackify::recalculateScopeTops(MachineFunction &MF) {
+ // Renumber BBs and recalculate ScopeTop info because new BBs might have been
+ // created and inserted during fixing unwind mismatches.
+ MF.RenumberBlocks();
+ ScopeTops.clear();
+ ScopeTops.resize(MF.getNumBlockIDs());
+ for (auto &MBB : reverse(MF)) {
+ for (auto &MI : reverse(MBB)) {
+ if (ScopeTops[MBB.getNumber()])
+ break;
+ switch (MI.getOpcode()) {
+ case WebAssembly::END_BLOCK:
+ case WebAssembly::END_LOOP:
+ case WebAssembly::END_TRY:
+ case WebAssembly::DELEGATE:
+ updateScopeTops(EndToBegin[&MI]->getParent(), &MBB);
+ break;
+ case WebAssembly::CATCH:
+ case WebAssembly::CATCH_ALL:
+ updateScopeTops(EHPadToTry[&MBB]->getParent(), &MBB);
+ break;
+ }
+ }
+ }
+}
+
+/// In normal assembly languages, when the end of a function is unreachable,
+/// because the function ends in an infinite loop or a noreturn call or similar,
+/// it isn't necessary to worry about the function return type at the end of
+/// the function, because it's never reached. However, in WebAssembly, blocks
+/// that end at the function end need to have a return type signature that
+/// matches the function signature, even though it's unreachable. This function
+/// checks for such cases and fixes up the signatures.
+void WebAssemblyCFGStackify::fixEndsAtEndOfFunction(MachineFunction &MF) {
+ const auto &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
+
+ if (MFI.getResults().empty())
+ return;
+
+ // MCInstLower will add the proper types to multivalue signatures based on the
+ // function return type
+ WebAssembly::BlockType RetType =
+ MFI.getResults().size() > 1
+ ? WebAssembly::BlockType::Multivalue
+ : WebAssembly::BlockType(
+ WebAssembly::toValType(MFI.getResults().front()));
+
+ SmallVector<MachineBasicBlock::reverse_iterator, 4> Worklist;
+ Worklist.push_back(MF.rbegin()->rbegin());
+
+ auto Process = [&](MachineBasicBlock::reverse_iterator It) {
+ auto *MBB = It->getParent();
+ while (It != MBB->rend()) {
+ MachineInstr &MI = *It++;
+ if (MI.isPosition() || MI.isDebugInstr())
+ continue;
+ switch (MI.getOpcode()) {
+ case WebAssembly::END_TRY: {
+ // If a 'try''s return type is fixed, both its try body and catch body
+ // should satisfy the return type, so we need to search 'end'
+ // instructions before its corresponding 'catch' too.
+ auto *EHPad = TryToEHPad.lookup(EndToBegin[&MI]);
+ assert(EHPad);
+ auto NextIt =
+ std::next(WebAssembly::findCatch(EHPad)->getReverseIterator());
+ if (NextIt != EHPad->rend())
+ Worklist.push_back(NextIt);
+ [[fallthrough]];
+ }
+ case WebAssembly::END_BLOCK:
+ case WebAssembly::END_LOOP:
+ case WebAssembly::DELEGATE:
+ EndToBegin[&MI]->getOperand(0).setImm(int32_t(RetType));
+ continue;
+ default:
+ // Something other than an `end`. We're done for this BB.
+ return;
+ }
+ }
+ // We've reached the beginning of a BB. Continue the search in the previous
+ // BB.
+ Worklist.push_back(MBB->getPrevNode()->rbegin());
+ };
+
+ while (!Worklist.empty())
+ Process(Worklist.pop_back_val());
+}
+
+// WebAssembly functions end with an end instruction, as if the function body
+// were a block.
+static void appendEndToFunction(MachineFunction &MF,
+ const WebAssemblyInstrInfo &TII) {
+ BuildMI(MF.back(), MF.back().end(),
+ MF.back().findPrevDebugLoc(MF.back().end()),
+ TII.get(WebAssembly::END_FUNCTION));
+}
+
+/// Insert LOOP/TRY/BLOCK markers at appropriate places.
+void WebAssemblyCFGStackify::placeMarkers(MachineFunction &MF) {
+ // We allocate one more than the number of blocks in the function to
+ // accommodate for the possible fake block we may insert at the end.
+ ScopeTops.resize(MF.getNumBlockIDs() + 1);
+ // Place the LOOP for MBB if MBB is the header of a loop.
+ for (auto &MBB : MF)
+ placeLoopMarker(MBB);
+
+ const MCAsmInfo *MCAI = MF.getTarget().getMCAsmInfo();
+ for (auto &MBB : MF) {
+ if (MBB.isEHPad()) {
+ // Place the TRY for MBB if MBB is the EH pad of an exception.
+ if (MCAI->getExceptionHandlingType() == ExceptionHandling::Wasm &&
+ MF.getFunction().hasPersonalityFn())
+ placeTryMarker(MBB);
+ } else {
+ // Place the BLOCK for MBB if MBB is branched to from above.
+ placeBlockMarker(MBB);
+ }
+ }
+ // Fix mismatches in unwind destinations induced by linearizing the code.
+ if (MCAI->getExceptionHandlingType() == ExceptionHandling::Wasm &&
+ MF.getFunction().hasPersonalityFn()) {
+ bool Changed = fixCallUnwindMismatches(MF);
+ Changed |= fixCatchUnwindMismatches(MF);
+ if (Changed)
+ recalculateScopeTops(MF);
+ }
+}
+
+unsigned WebAssemblyCFGStackify::getBranchDepth(
+ const SmallVectorImpl<EndMarkerInfo> &Stack, const MachineBasicBlock *MBB) {
+ unsigned Depth = 0;
+ for (auto X : reverse(Stack)) {
+ if (X.first == MBB)
+ break;
+ ++Depth;
+ }
+ assert(Depth < Stack.size() && "Branch destination should be in scope");
+ return Depth;
+}
+
+unsigned WebAssemblyCFGStackify::getDelegateDepth(
+ const SmallVectorImpl<EndMarkerInfo> &Stack, const MachineBasicBlock *MBB) {
+ if (MBB == FakeCallerBB)
+ return Stack.size();
+ // Delegate's destination is either a catch or a another delegate BB. When the
+ // destination is another delegate, we can compute the argument in the same
+ // way as branches, because the target delegate BB only contains the single
+ // delegate instruction.
+ if (!MBB->isEHPad()) // Target is a delegate BB
+ return getBranchDepth(Stack, MBB);
+
+ // When the delegate's destination is a catch BB, we need to use its
+ // corresponding try's end_try BB because Stack contains each marker's end BB.
+ // Also we need to check if the end marker instruction matches, because a
+ // single BB can contain multiple end markers, like this:
+ // bb:
+ // END_BLOCK
+ // END_TRY
+ // END_BLOCK
+ // END_TRY
+ // ...
+ //
+ // In case of branches getting the immediate that targets any of these is
+ // fine, but delegate has to exactly target the correct try.
+ unsigned Depth = 0;
+ const MachineInstr *EndTry = BeginToEnd[EHPadToTry[MBB]];
+ for (auto X : reverse(Stack)) {
+ if (X.first == EndTry->getParent() && X.second == EndTry)
+ break;
+ ++Depth;
+ }
+ assert(Depth < Stack.size() && "Delegate destination should be in scope");
+ return Depth;
+}
+
+unsigned WebAssemblyCFGStackify::getRethrowDepth(
+ const SmallVectorImpl<EndMarkerInfo> &Stack,
+ const SmallVectorImpl<const MachineBasicBlock *> &EHPadStack) {
+ unsigned Depth = 0;
+ // In our current implementation, rethrows always rethrow the exception caught
+ // by the innermost enclosing catch. This means while traversing Stack in the
+ // reverse direction, when we encounter END_TRY, we should check if the
+ // END_TRY corresponds to the current innermost EH pad. For example:
+ // try
+ // ...
+ // catch ;; (a)
+ // try
+ // rethrow 1 ;; (b)
+ // catch ;; (c)
+ // rethrow 0 ;; (d)
+ // end ;; (e)
+ // end ;; (f)
+ //
+ // When we are at 'rethrow' (d), while reversely traversing Stack the first
+ // 'end' we encounter is the 'end' (e), which corresponds to the 'catch' (c).
+ // And 'rethrow' (d) rethrows the exception caught by 'catch' (c), so we stop
+ // there and the depth should be 0. But when we are at 'rethrow' (b), it
+ // rethrows the exception caught by 'catch' (a), so when traversing Stack
+ // reversely, we should skip the 'end' (e) and choose 'end' (f), which
+ // corresponds to 'catch' (a).
+ for (auto X : reverse(Stack)) {
+ const MachineInstr *End = X.second;
+ if (End->getOpcode() == WebAssembly::END_TRY) {
+ auto *EHPad = TryToEHPad[EndToBegin[End]];
+ if (EHPadStack.back() == EHPad)
+ break;
+ }
+ ++Depth;
+ }
+ assert(Depth < Stack.size() && "Rethrow destination should be in scope");
+ return Depth;
+}
+
+void WebAssemblyCFGStackify::rewriteDepthImmediates(MachineFunction &MF) {
+ // Now rewrite references to basic blocks to be depth immediates.
+ SmallVector<EndMarkerInfo, 8> Stack;
+ SmallVector<const MachineBasicBlock *, 8> EHPadStack;
+ for (auto &MBB : reverse(MF)) {
+ for (MachineInstr &MI : llvm::reverse(MBB)) {
+ switch (MI.getOpcode()) {
+ case WebAssembly::BLOCK:
+ case WebAssembly::TRY:
+ assert(ScopeTops[Stack.back().first->getNumber()]->getNumber() <=
+ MBB.getNumber() &&
+ "Block/try marker should be balanced");
+ Stack.pop_back();
+ break;
+
+ case WebAssembly::LOOP:
+ assert(Stack.back().first == &MBB && "Loop top should be balanced");
+ Stack.pop_back();
+ break;
+
+ case WebAssembly::END_BLOCK:
+ Stack.push_back(std::make_pair(&MBB, &MI));
+ break;
+
+ case WebAssembly::END_TRY: {
+ // We handle DELEGATE in the default level, because DELEGATE has
+ // immediate operands to rewrite.
+ Stack.push_back(std::make_pair(&MBB, &MI));
+ auto *EHPad = TryToEHPad[EndToBegin[&MI]];
+ EHPadStack.push_back(EHPad);
+ break;
+ }
+
+ case WebAssembly::END_LOOP:
+ Stack.push_back(std::make_pair(EndToBegin[&MI]->getParent(), &MI));
+ break;
+
+ case WebAssembly::CATCH:
+ case WebAssembly::CATCH_ALL:
+ EHPadStack.pop_back();
+ break;
+
+ case WebAssembly::RETHROW:
+ MI.getOperand(0).setImm(getRethrowDepth(Stack, EHPadStack));
+ break;
+
+ default:
+ if (MI.isTerminator()) {
+ // Rewrite MBB operands to be depth immediates.
+ SmallVector<MachineOperand, 4> Ops(MI.operands());
+ while (MI.getNumOperands() > 0)
+ MI.removeOperand(MI.getNumOperands() - 1);
+ for (auto MO : Ops) {
+ if (MO.isMBB()) {
+ if (MI.getOpcode() == WebAssembly::DELEGATE)
+ MO = MachineOperand::CreateImm(
+ getDelegateDepth(Stack, MO.getMBB()));
+ else
+ MO = MachineOperand::CreateImm(
+ getBranchDepth(Stack, MO.getMBB()));
+ }
+ MI.addOperand(MF, MO);
+ }
+ }
+
+ if (MI.getOpcode() == WebAssembly::DELEGATE)
+ Stack.push_back(std::make_pair(&MBB, &MI));
+ break;
+ }
+ }
+ }
+ assert(Stack.empty() && "Control flow should be balanced");
+}
+
+void WebAssemblyCFGStackify::cleanupFunctionData(MachineFunction &MF) {
+ if (FakeCallerBB)
+ MF.deleteMachineBasicBlock(FakeCallerBB);
+ AppendixBB = FakeCallerBB = nullptr;
+}
+
+void WebAssemblyCFGStackify::releaseMemory() {
+ ScopeTops.clear();
+ BeginToEnd.clear();
+ EndToBegin.clear();
+ TryToEHPad.clear();
+ EHPadToTry.clear();
+}
+
+bool WebAssemblyCFGStackify::runOnMachineFunction(MachineFunction &MF) {
+ LLVM_DEBUG(dbgs() << "********** CFG Stackifying **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+ const MCAsmInfo *MCAI = MF.getTarget().getMCAsmInfo();
+
+ releaseMemory();
+
+ // Liveness is not tracked for VALUE_STACK physreg.
+ MF.getRegInfo().invalidateLiveness();
+
+ // Place the BLOCK/LOOP/TRY markers to indicate the beginnings of scopes.
+ placeMarkers(MF);
+
+ // Remove unnecessary instructions possibly introduced by try/end_trys.
+ if (MCAI->getExceptionHandlingType() == ExceptionHandling::Wasm &&
+ MF.getFunction().hasPersonalityFn())
+ removeUnnecessaryInstrs(MF);
+
+ // Convert MBB operands in terminators to relative depth immediates.
+ rewriteDepthImmediates(MF);
+
+ // Fix up block/loop/try signatures at the end of the function to conform to
+ // WebAssembly's rules.
+ fixEndsAtEndOfFunction(MF);
+
+ // Add an end instruction at the end of the function body.
+ const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ if (!MF.getSubtarget<WebAssemblySubtarget>()
+ .getTargetTriple()
+ .isOSBinFormatELF())
+ appendEndToFunction(MF, TII);
+
+ cleanupFunctionData(MF);
+
+ MF.getInfo<WebAssemblyFunctionInfo>()->setCFGStackified();
+ return true;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyDebugFixup.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyDebugFixup.cpp
new file mode 100644
index 000000000000..4a75bab6b95d
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyDebugFixup.cpp
@@ -0,0 +1,161 @@
+//===-- WebAssemblyDebugFixup.cpp - Debug Fixup ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Several prior passes may "stackify" registers, here we ensure any references
+/// in such registers in debug_value instructions become stack relative also.
+/// This is done in a separate pass such that not all previous passes need to
+/// track stack depth when values get stackified.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/ADT/SCCIterator.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-debug-fixup"
+
+namespace {
+class WebAssemblyDebugFixup final : public MachineFunctionPass {
+ StringRef getPassName() const override { return "WebAssembly Debug Fixup"; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyDebugFixup() : MachineFunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyDebugFixup::ID = 0;
+INITIALIZE_PASS(
+ WebAssemblyDebugFixup, DEBUG_TYPE,
+ "Ensures debug_value's that have been stackified become stack relative",
+ false, false)
+
+FunctionPass *llvm::createWebAssemblyDebugFixup() {
+ return new WebAssemblyDebugFixup();
+}
+
+// At this very end of the compilation pipeline, if any DBG_VALUEs with
+// registers remain, it means they are dangling info which we failed to update
+// when their corresponding def instruction was transformed/moved/splitted etc.
+// Because Wasm cannot access values in LLVM virtual registers in the debugger,
+// these dangling DBG_VALUEs in effect kill the effect of any previous DBG_VALUE
+// associated with the variable, which will appear as "optimized out".
+static void setDanglingDebugValuesUndef(MachineBasicBlock &MBB,
+ const TargetInstrInfo *TII) {
+ for (auto &MI : llvm::make_early_inc_range(MBB)) {
+ if (MI.isDebugValue() && MI.getDebugOperand(0).isReg() &&
+ !MI.isUndefDebugValue()) {
+ LLVM_DEBUG(dbgs() << "Warning: dangling DBG_VALUE set to undef: " << MI
+ << "\n");
+ MI.setDebugValueUndef();
+ }
+ }
+}
+
+bool WebAssemblyDebugFixup::runOnMachineFunction(MachineFunction &MF) {
+ LLVM_DEBUG(dbgs() << "********** Debug Fixup **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+
+ WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
+ const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+
+ struct StackElem {
+ unsigned Reg;
+ MachineInstr *DebugValue;
+ };
+ std::vector<StackElem> Stack;
+ for (MachineBasicBlock &MBB : MF) {
+ // We may insert into this list.
+ for (auto MII = MBB.begin(); MII != MBB.end(); ++MII) {
+ MachineInstr &MI = *MII;
+ if (MI.isDebugValue()) {
+ auto &MO = MI.getOperand(0);
+ // Also check if not a $noreg: likely a DBG_VALUE we just inserted.
+ if (MO.isReg() && MO.getReg().isValid() &&
+ MFI.isVRegStackified(MO.getReg())) {
+ // Found a DBG_VALUE with a stackified register we will
+ // change into a stack operand.
+ // Search for register rather than assume it is on top (which it
+ // typically is if it appears right after the def), since
+ // DBG_VALUE's may shift under some circumstances.
+ for (auto &Elem : reverse(Stack)) {
+ if (MO.getReg() == Elem.Reg) {
+ auto Depth = static_cast<unsigned>(&Elem - &Stack[0]);
+ LLVM_DEBUG(dbgs() << "Debug Value VReg " << MO.getReg()
+ << " -> Stack Relative " << Depth << "\n");
+ MO.ChangeToTargetIndex(WebAssembly::TI_OPERAND_STACK, Depth);
+ // Save the DBG_VALUE instruction that defined this stackified
+ // variable since later we need it to construct another one on
+ // pop.
+ Elem.DebugValue = &MI;
+ break;
+ }
+ }
+ // If the Reg was not found, we have a DBG_VALUE outside of its
+ // def-use range, and we leave it unmodified as reg, which means
+ // it will be culled later.
+ }
+ } else {
+ // Track stack depth.
+ for (MachineOperand &MO : reverse(MI.explicit_uses())) {
+ if (MO.isReg() && MFI.isVRegStackified(MO.getReg())) {
+ auto Prev = Stack.back();
+ Stack.pop_back();
+ assert(Prev.Reg == MO.getReg() &&
+ "WebAssemblyDebugFixup: Pop: Register not matched!");
+ // We should not put a DBG_VALUE after a terminator; debug ranges
+ // are terminated at the end of a BB anyway.
+ if (Prev.DebugValue && !MI.isTerminator()) {
+ // This stackified reg is a variable that started life at
+ // Prev.DebugValue, so now that we're popping it we must insert
+ // a $noreg DBG_VALUE for the variable to end it, right after
+ // the current instruction.
+ BuildMI(*Prev.DebugValue->getParent(), std::next(MII),
+ Prev.DebugValue->getDebugLoc(),
+ TII->get(WebAssembly::DBG_VALUE), false, Register(),
+ Prev.DebugValue->getOperand(2).getMetadata(),
+ Prev.DebugValue->getOperand(3).getMetadata());
+ }
+ }
+ }
+ for (MachineOperand &MO : MI.defs()) {
+ if (MO.isReg() && MFI.isVRegStackified(MO.getReg())) {
+ Stack.push_back({MO.getReg(), nullptr});
+ }
+ }
+ }
+ }
+ assert(Stack.empty() &&
+ "WebAssemblyDebugFixup: Stack not empty at end of basic block!");
+
+ setDanglingDebugValuesUndef(MBB, TII);
+ }
+
+ return true;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyDebugValueManager.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyDebugValueManager.cpp
new file mode 100644
index 000000000000..fd510f85a8a3
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyDebugValueManager.cpp
@@ -0,0 +1,415 @@
+//===-- WebAssemblyDebugValueManager.cpp - WebAssembly DebugValue Manager -===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the manager for MachineInstr DebugValues.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyDebugValueManager.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+
+using namespace llvm;
+
+WebAssemblyDebugValueManager::WebAssemblyDebugValueManager(MachineInstr *Def)
+ : Def(Def) {
+ // This code differs from MachineInstr::collectDebugValues in that it scans
+ // the whole BB, not just contiguous DBG_VALUEs, until another definition to
+ // the same register is encountered.
+ if (!Def->getOperand(0).isReg())
+ return;
+ CurrentReg = Def->getOperand(0).getReg();
+
+ for (MachineBasicBlock::iterator MI = std::next(Def->getIterator()),
+ ME = Def->getParent()->end();
+ MI != ME; ++MI) {
+ // If another definition appears, stop
+ if (MI->definesRegister(CurrentReg))
+ break;
+ if (MI->isDebugValue() && MI->hasDebugOperandForReg(CurrentReg))
+ DbgValues.push_back(&*MI);
+ }
+}
+
+// Returns true if both A and B are the same CONST_I32/I64/F32/F64 instructions.
+// Doesn't include CONST_V128.
+static bool isSameScalarConst(const MachineInstr *A, const MachineInstr *B) {
+ if (A->getOpcode() != B->getOpcode() ||
+ !WebAssembly::isScalarConst(A->getOpcode()) ||
+ !WebAssembly::isScalarConst(B->getOpcode()))
+ return false;
+ const MachineOperand &OpA = A->getOperand(1), &OpB = B->getOperand(1);
+ if ((OpA.isImm() && OpB.isImm() && OpA.getImm() == OpB.getImm()) ||
+ (OpA.isFPImm() && OpB.isFPImm() && OpA.getFPImm() == OpB.getFPImm()) ||
+ (OpA.isGlobal() && OpB.isGlobal() && OpA.getGlobal() == OpB.getGlobal()))
+ return true;
+ return false;
+}
+
+SmallVector<MachineInstr *, 1>
+WebAssemblyDebugValueManager::getSinkableDebugValues(
+ MachineInstr *Insert) const {
+ if (DbgValues.empty())
+ return {};
+ // DBG_VALUEs between Def and Insert
+ SmallVector<MachineInstr *, 8> DbgValuesInBetween;
+
+ if (Def->getParent() == Insert->getParent()) {
+ // When Def and Insert are within the same BB, check if Insert comes after
+ // Def, because we only support sinking.
+ bool DefFirst = false;
+ for (MachineBasicBlock::iterator MI = std::next(Def->getIterator()),
+ ME = Def->getParent()->end();
+ MI != ME; ++MI) {
+ if (&*MI == Insert) {
+ DefFirst = true;
+ break;
+ }
+ if (MI->isDebugValue())
+ DbgValuesInBetween.push_back(&*MI);
+ }
+ if (!DefFirst) // Not a sink
+ return {};
+
+ } else { // Def and Insert are in different BBs
+ // If Def and Insert are in different BBs, we only handle a simple case in
+ // which Insert's BB is a successor of Def's BB.
+ if (!Def->getParent()->isSuccessor(Insert->getParent()))
+ return {};
+
+ // Gather DBG_VALUEs between 'Def~Def BB's end' and
+ // 'Insert BB's begin~Insert'
+ for (MachineBasicBlock::iterator MI = std::next(Def->getIterator()),
+ ME = Def->getParent()->end();
+ MI != ME; ++MI) {
+ if (MI->isDebugValue())
+ DbgValuesInBetween.push_back(&*MI);
+ }
+ for (MachineBasicBlock::iterator MI = Insert->getParent()->begin(),
+ ME = Insert->getIterator();
+ MI != ME; ++MI) {
+ if (MI->isDebugValue())
+ DbgValuesInBetween.push_back(&*MI);
+ }
+ }
+
+ // Gather DebugVariables that are seen between Def and Insert, excluding our
+ // own DBG_VALUEs in DbgValues.
+ SmallDenseMap<DebugVariable, SmallVector<MachineInstr *, 2>>
+ SeenDbgVarToDbgValues;
+ for (auto *DV : DbgValuesInBetween) {
+ if (!llvm::is_contained(DbgValues, DV)) {
+ DebugVariable Var(DV->getDebugVariable(), DV->getDebugExpression(),
+ DV->getDebugLoc()->getInlinedAt());
+ SeenDbgVarToDbgValues[Var].push_back(DV);
+ }
+ }
+
+ // Gather sinkable DBG_VALUEs. We should not sink a DBG_VALUE if there is
+ // another DBG_VALUE between Def and Insert referring to the same
+ // DebugVariable. For example,
+ // %0 = someinst
+ // DBG_VALUE %0, !"a", !DIExpression() // Should not sink with %0
+ // %1 = anotherinst
+ // DBG_VALUE %1, !"a", !DIExpression()
+ // Where if %0 were to sink, the DBG_VAUE should not sink with it, as that
+ // would re-order assignments.
+ SmallVector<MachineInstr *, 1> SinkableDbgValues;
+ MachineRegisterInfo &MRI = Def->getParent()->getParent()->getRegInfo();
+ for (auto *DV : DbgValues) {
+ DebugVariable Var(DV->getDebugVariable(), DV->getDebugExpression(),
+ DV->getDebugLoc()->getInlinedAt());
+ auto It = SeenDbgVarToDbgValues.find(Var);
+ if (It == SeenDbgVarToDbgValues.end()) {
+ SinkableDbgValues.push_back(DV);
+ continue;
+ }
+ if (!WebAssembly::isScalarConst(Def->getOpcode()))
+ continue;
+ auto &OverlappingDbgValues = It->second;
+ bool Sinkable = true;
+ for (auto *OverlappingDV : OverlappingDbgValues) {
+ MachineOperand &DbgOp = OverlappingDV->getDebugOperand(0);
+ if (!DbgOp.isReg()) {
+ Sinkable = false;
+ break;
+ }
+ Register OtherReg = DbgOp.getReg();
+ MachineInstr *OtherDef = MRI.getUniqueVRegDef(OtherReg);
+ // We have an exception to allow encoutering other DBG_VALUEs with the
+ // smae DebugVariables, only when they are referring to the same scalar
+ // CONST instruction. For example,
+ // %0 = CONST_I32 1
+ // DBG_VALUE %0, !"a", !DIExpression() // Can sink with %0
+ // %1 = CONST_I32 1
+ // DBG_VALUE %1, !"a", !DIExpression()
+ // When %0 were to be sunk/cloneed, the DBG_VALUE can be sunk/cloned with
+ // it because even though the second DBG_VALUE refers to the same
+ // DebugVariable, its value in effect is the same CONST instruction.
+ //
+ // This is to allow a case that can happen with RegStackify's
+ // "rematerializeCheapDef". For example, we have this program with two
+ // BBs:
+ // bb0:
+ // %0 = CONST_I32 1
+ // DBG_VALUE %0, !"a", ...
+ // ...
+ // INST0 ..., $0 ...
+ // bb1:
+ // INST1 ..., $0 ...
+ // INST2 ..., $0 ...
+ //
+ // We process bb0 first. Because %0 is used multiple times, %0 is cloned
+ // before INST0:
+ // bb0:
+ // %0 = CONST_I32 1
+ // DBG_VALUE %0, !"a", ...
+ // ...
+ // %1 = CONST_I32 1
+ // DBG_VALUE %1, !"a", ...
+ // INST0 ..., $1 ...
+ //
+ // And when we process bb1, we clone %0 and its DBG_VALUE again:
+ // bb0:
+ // %0 = CONST_I32 1
+ // DBG_VALUE %0, !"a", ...
+ // ...
+ // %1 = CONST_I32 1
+ // DBG_VALUE %1, !"a", ...
+ // INST0 ..., $1 ...
+ // bb1:
+ // %2 = CONST_I32 1
+ // DBG_VALUE %2, !"a", ... // !!!
+ // INST1 ..., $2 ...
+ // %3 = CONST_I32 1
+ // DBG_VALUE %3, !"a", ... // !!!
+ // INST2 ..., $3 ...
+ //
+ // But (without this exception) the cloned DBG_VALUEs marked with !!! are
+ // not possible to be cloned, because there is a previously cloned
+ // 'DBG_VALUE %1, !"a"' at the end of bb0 referring to the same
+ // DebugVariable "a". But in this case they are OK to be cloned, because
+ // the interfering DBG_VALUE is pointing to the same 'CONST_I32 1',
+ // because it was cloned from the same instruction.
+ if (!OtherDef || !isSameScalarConst(Def, OtherDef)) {
+ Sinkable = false;
+ break;
+ }
+ }
+ if (Sinkable)
+ SinkableDbgValues.push_back(DV);
+ }
+ return SinkableDbgValues;
+}
+
+// Returns true if the insertion point is the same as the current place.
+// Following DBG_VALUEs for 'Def' are ignored.
+bool WebAssemblyDebugValueManager::isInsertSamePlace(
+ MachineInstr *Insert) const {
+ if (Def->getParent() != Insert->getParent())
+ return false;
+ for (MachineBasicBlock::iterator MI = std::next(Def->getIterator()),
+ ME = Insert;
+ MI != ME; ++MI) {
+ if (!llvm::is_contained(DbgValues, MI)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Returns true if any instruction in MBB has the same debug location as DL.
+// Also returns true if DL is an empty location.
+static bool hasSameDebugLoc(const MachineBasicBlock *MBB, DebugLoc DL) {
+ for (const auto &MI : *MBB)
+ if (MI.getDebugLoc() == DL)
+ return true;
+ return false;
+}
+
+// Sink 'Def', and also sink its eligible DBG_VALUEs to the place before
+// 'Insert'. Convert the original DBG_VALUEs into undefs.
+//
+// For DBG_VALUEs to sink properly, if 'Def' and 'Insert' are within the same
+// BB, 'Insert' should be below 'Def'; if they are in different BBs, 'Insert'
+// should be in one of 'Def's BBs successors. Def will be sunk regardless of the
+// location.
+//
+// This DebugValueManager's new Def and DbgValues will be updated to the newly
+// sinked Def + DBG_VALUEs.
+void WebAssemblyDebugValueManager::sink(MachineInstr *Insert) {
+ // In case Def is requested to be sunk to
+ // the same place, we don't need to do anything. If we actually do the sink,
+ // it will create unnecessary undef DBG_VALUEs. For example, if the original
+ // code is:
+ // %0 = someinst // Def
+ // DBG_VALUE %0, ...
+ // %1 = anotherinst // Insert
+ //
+ // If we actually sink %0 and the following DBG_VALUE and setting the original
+ // DBG_VALUE undef, the result will be:
+ // DBG_VALUE %noreg, ... // Unnecessary!
+ // %0 = someinst // Def
+ // DBG_VALUE %0, ...
+ // %1 = anotherinst // Insert
+ if (isInsertSamePlace(Insert))
+ return;
+
+ MachineBasicBlock *MBB = Insert->getParent();
+ MachineFunction *MF = MBB->getParent();
+
+ // Get the list of sinkable DBG_VALUEs. This should be done before sinking
+ // Def, because we need to examine instructions between Def and Insert.
+ SmallVector<MachineInstr *, 1> SinkableDbgValues =
+ getSinkableDebugValues(Insert);
+
+ // Sink Def first.
+ //
+ // When moving to a different BB, we preserve the debug loc only if the
+ // destination BB contains the same location. See
+ // https://llvm.org/docs/HowToUpdateDebugInfo.html#when-to-preserve-an-instruction-location.
+ if (Def->getParent() != MBB && !hasSameDebugLoc(MBB, Def->getDebugLoc()))
+ Def->setDebugLoc(DebugLoc());
+ MBB->splice(Insert, Def->getParent(), Def);
+
+ if (DbgValues.empty())
+ return;
+
+ // Clone sinkable DBG_VALUEs and insert them.
+ SmallVector<MachineInstr *, 1> NewDbgValues;
+ for (MachineInstr *DV : SinkableDbgValues) {
+ MachineInstr *Clone = MF->CloneMachineInstr(DV);
+ MBB->insert(Insert, Clone);
+ NewDbgValues.push_back(Clone);
+ }
+
+ // When sinking a Def and its DBG_VALUEs, we shouldn't just remove the
+ // original DBG_VALUE instructions; we should set them to undef not to create
+ // an impossible combination of variable assignments in the original program.
+ // For example, this is the original program in order:
+ // %0 = CONST_I32 0
+ // DBG_VALUE %0, !"a", !DIExpression() // a = 0, b = ?
+ // %1 = CONST_I32 1
+ // DBG_VALUE %1, !"b", !DIExpression() // a = 0, b = 1
+ // %2 = CONST_I32 2
+ // DBG_VALUE %2, !"a", !DIExpression() // a = 2, b = 1
+ // %3 = CONST_I32 3
+ // DBG_VALUE %3, !"b", !DIExpression() // a = 2, b = 3
+ //
+ // If %2 were to sink below %3, if we just sink DBG_VALUE %1 with it, the
+ // debug info will show the variable "b" is updated to 2, creating the
+ // variable assignment combination of (a = 0, b = 3), which is not possible in
+ // the original program:
+ // %0 = CONST_I32 0
+ // DBG_VALUE %0, !"a", !DIExpression() // a = 0, b = ?
+ // %1 = CONST_I32 1
+ // DBG_VALUE %1, !"b", !DIExpression() // a = 0, b = 1
+ // %3 = CONST_I32 3
+ // DBG_VALUE %3, !"b", !DIExpression() // a = 0, b = 3 (Incorrect!)
+ // %2 = CONST_I32 2
+ // DBG_VALUE %2, !"a", !DIExpression() // a = 2, b = 3
+ //
+ // To fix this,we leave an undef DBG_VALUE in its original place, so that the
+ // result will be
+ // %0 = CONST_I32 0
+ // DBG_VALUE %0, !"a", !DIExpression() // a = 0, b = ?
+ // %1 = CONST_I32 1
+ // DBG_VALUE %1, !"b", !DIExpression() // a = 0, b = 1
+ // DBG_VALUE $noreg, !"a", !DIExpression() // a = ?, b = 1
+ // %3 = CONST_I32 3
+ // DBG_VALUE %3, !"b", !DIExpression() // a = ?, b = 3
+ // %2 = CONST_I32 2
+ // DBG_VALUE %2, !"a", !DIExpression() // a = 2, b = 3
+ // Now in the middle "a" will be shown as "optimized out", but it wouldn't
+ // show the impossible combination of (a = 0, b = 3).
+ for (MachineInstr *DV : DbgValues)
+ DV->setDebugValueUndef();
+
+ DbgValues.swap(NewDbgValues);
+}
+
+// Clone 'Def', and also clone its eligible DBG_VALUEs to the place before
+// 'Insert'.
+//
+// For DBG_VALUEs to be cloned properly, if 'Def' and 'Insert' are within the
+// same BB, 'Insert' should be below 'Def'; if they are in different BBs,
+// 'Insert' should be in one of 'Def's BBs successors. Def will be cloned
+// regardless of the location.
+//
+// If NewReg is not $noreg, the newly cloned DBG_VALUEs will have the new
+// register as its operand.
+void WebAssemblyDebugValueManager::cloneSink(MachineInstr *Insert,
+ Register NewReg,
+ bool CloneDef) const {
+ MachineBasicBlock *MBB = Insert->getParent();
+ MachineFunction *MF = MBB->getParent();
+
+ SmallVector<MachineInstr *> SinkableDbgValues =
+ getSinkableDebugValues(Insert);
+
+ // Clone Def first.
+ if (CloneDef) {
+ MachineInstr *Clone = MF->CloneMachineInstr(Def);
+ // When cloning to a different BB, we preserve the debug loc only if the
+ // destination BB contains the same location. See
+ // https://llvm.org/docs/HowToUpdateDebugInfo.html#when-to-preserve-an-instruction-location.
+ if (Def->getParent() != MBB && !hasSameDebugLoc(MBB, Def->getDebugLoc()))
+ Clone->setDebugLoc(DebugLoc());
+ if (NewReg != CurrentReg && NewReg.isValid())
+ Clone->getOperand(0).setReg(NewReg);
+ MBB->insert(Insert, Clone);
+ }
+
+ if (DbgValues.empty())
+ return;
+
+ // Clone sinkable DBG_VALUEs and insert them.
+ SmallVector<MachineInstr *, 1> NewDbgValues;
+ for (MachineInstr *DV : SinkableDbgValues) {
+ MachineInstr *Clone = MF->CloneMachineInstr(DV);
+ MBB->insert(Insert, Clone);
+ NewDbgValues.push_back(Clone);
+ }
+
+ if (NewReg != CurrentReg && NewReg.isValid())
+ for (auto *DBI : NewDbgValues)
+ for (auto &MO : DBI->getDebugOperandsForReg(CurrentReg))
+ MO.setReg(NewReg);
+}
+
+// Update the register for Def and DBG_VALUEs.
+void WebAssemblyDebugValueManager::updateReg(Register Reg) {
+ if (Reg != CurrentReg && Reg.isValid()) {
+ for (auto *DBI : DbgValues)
+ for (auto &MO : DBI->getDebugOperandsForReg(CurrentReg))
+ MO.setReg(Reg);
+ CurrentReg = Reg;
+ Def->getOperand(0).setReg(Reg);
+ }
+}
+
+void WebAssemblyDebugValueManager::replaceWithLocal(unsigned LocalId) {
+ for (auto *DBI : DbgValues) {
+ auto IndexType = DBI->isIndirectDebugValue()
+ ? llvm::WebAssembly::TI_LOCAL_INDIRECT
+ : llvm::WebAssembly::TI_LOCAL;
+ for (auto &MO : DBI->getDebugOperandsForReg(CurrentReg))
+ MO.ChangeToTargetIndex(IndexType, LocalId);
+ }
+}
+
+// Remove Def, and set its DBG_VALUEs to undef.
+void WebAssemblyDebugValueManager::removeDef() {
+ Def->removeFromParent();
+ for (MachineInstr *DV : DbgValues)
+ DV->setDebugValueUndef();
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyDebugValueManager.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyDebugValueManager.h
new file mode 100644
index 000000000000..9ef3da758947
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyDebugValueManager.h
@@ -0,0 +1,57 @@
+// WebAssemblyDebugValueManager.h - WebAssembly DebugValue Manager -*- C++ -*-//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the declaration of the WebAssembly-specific
+/// manager for DebugValues associated with the specific MachineInstr.
+/// This pass currently does not handle DBG_VALUE_LISTs; they are assumed to
+/// have been set to undef in NullifyDebugValueLists pass.
+/// TODO Handle DBG_VALUE_LIST
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYDEBUGVALUEMANAGER_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYDEBUGVALUEMANAGER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/Register.h"
+
+namespace llvm {
+
+class MachineInstr;
+
+class WebAssemblyDebugValueManager {
+ MachineInstr *Def;
+ SmallVector<MachineInstr *, 1> DbgValues;
+ Register CurrentReg;
+ SmallVector<MachineInstr *, 1>
+ getSinkableDebugValues(MachineInstr *Insert) const;
+ bool isInsertSamePlace(MachineInstr *Insert) const;
+
+public:
+ WebAssemblyDebugValueManager(MachineInstr *Def);
+
+ // Sink 'Def', and also sink its eligible DBG_VALUEs to the place before
+ // 'Insert'. Convert the original DBG_VALUEs into undefs.
+ void sink(MachineInstr *Insert);
+ // Clone 'Def' (optionally), and also clone its eligible DBG_VALUEs to the
+ // place before 'Insert'.
+ void cloneSink(MachineInstr *Insert, Register NewReg = Register(),
+ bool CloneDef = true) const;
+ // Update the register for Def and DBG_VALUEs.
+ void updateReg(Register Reg);
+ // Replace the current register in DBG_VALUEs with the given LocalId target
+ // index.
+ void replaceWithLocal(unsigned LocalId);
+ // Remove Def, and set its DBG_VALUEs to undef.
+ void removeDef();
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp
new file mode 100644
index 000000000000..8deac76b2bc3
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp
@@ -0,0 +1,371 @@
+//===--- WebAssemblyExceptionInfo.cpp - Exception Infomation --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements WebAssemblyException information analysis.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyExceptionInfo.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/CodeGen/MachineDominanceFrontier.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/WasmEHFuncInfo.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/Target/TargetMachine.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-exception-info"
+
+char WebAssemblyExceptionInfo::ID = 0;
+
+INITIALIZE_PASS_BEGIN(WebAssemblyExceptionInfo, DEBUG_TYPE,
+ "WebAssembly Exception Information", true, true)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(MachineDominanceFrontier)
+INITIALIZE_PASS_END(WebAssemblyExceptionInfo, DEBUG_TYPE,
+ "WebAssembly Exception Information", true, true)
+
+bool WebAssemblyExceptionInfo::runOnMachineFunction(MachineFunction &MF) {
+ LLVM_DEBUG(dbgs() << "********** Exception Info Calculation **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+ releaseMemory();
+ if (MF.getTarget().getMCAsmInfo()->getExceptionHandlingType() !=
+ ExceptionHandling::Wasm ||
+ !MF.getFunction().hasPersonalityFn())
+ return false;
+ auto &MDT = getAnalysis<MachineDominatorTree>();
+ auto &MDF = getAnalysis<MachineDominanceFrontier>();
+ recalculate(MF, MDT, MDF);
+ LLVM_DEBUG(dump());
+ return false;
+}
+
+// Check if Dst is reachable from Src using BFS. Search only within BBs
+// dominated by Header.
+static bool isReachableAmongDominated(const MachineBasicBlock *Src,
+ const MachineBasicBlock *Dst,
+ const MachineBasicBlock *Header,
+ const MachineDominatorTree &MDT) {
+ assert(MDT.dominates(Header, Dst));
+ SmallVector<const MachineBasicBlock *, 8> WL;
+ SmallPtrSet<const MachineBasicBlock *, 8> Visited;
+ WL.push_back(Src);
+
+ while (!WL.empty()) {
+ const auto *MBB = WL.pop_back_val();
+ if (MBB == Dst)
+ return true;
+ Visited.insert(MBB);
+ for (auto *Succ : MBB->successors())
+ if (!Visited.count(Succ) && MDT.dominates(Header, Succ))
+ WL.push_back(Succ);
+ }
+ return false;
+}
+
+void WebAssemblyExceptionInfo::recalculate(
+ MachineFunction &MF, MachineDominatorTree &MDT,
+ const MachineDominanceFrontier &MDF) {
+ // Postorder traversal of the dominator tree.
+ SmallVector<std::unique_ptr<WebAssemblyException>, 8> Exceptions;
+ for (auto *DomNode : post_order(&MDT)) {
+ MachineBasicBlock *EHPad = DomNode->getBlock();
+ if (!EHPad->isEHPad())
+ continue;
+ auto WE = std::make_unique<WebAssemblyException>(EHPad);
+ discoverAndMapException(WE.get(), MDT, MDF);
+ Exceptions.push_back(std::move(WE));
+ }
+
+ // WasmEHFuncInfo contains a map of <catchpad, its next unwind destination>,
+ // which means, if an exception is not caught by the catchpad, it should end
+ // up in the next unwind destination stored in this data structure. (It is
+ // written as catchswitch's 'unwind' destination in ll files.) The below is an
+ // intuitive example of their relationship in C++ code:
+ // try {
+ // try {
+ // } catch (int) { // catchpad
+ // ... // this catch (int) { ... } is grouped as an exception
+ // }
+ // } catch (...) { // next unwind destination
+ // }
+ // (The example is try-catches for illustration purpose, but the unwind
+ // destination can be also a cleanuppad generated by destructor calls.) So the
+ // unwind destination is in the outside of the catchpad's exception.
+ //
+ // We group exceptions in this analysis simply by including all BBs dominated
+ // by an EH pad. But in case the EH pad's unwind destination does not have any
+ // children outside of the exception, that unwind destination ends up also
+ // being dominated by the EH pad and included in the exception, which is not
+ // semantically correct, because it unwinds/rethrows into an inner scope.
+ //
+ // Here we extract those unwind destinations from their (incorrect) parent
+ // exception. Note that the unwind destinations may not be an immediate
+ // children of the parent exception, so we have to traverse the parent chain.
+ //
+ // We should traverse BBs in the preorder of the dominator tree, because
+ // otherwise the result can be incorrect. For example, when there are three
+ // exceptions A, B, and C and A > B > C (> is subexception relationship here),
+ // and A's unwind destination is B and B's is C. When we visit B before A, we
+ // end up extracting C only out of B but not out of A.
+ const auto *EHInfo = MF.getWasmEHFuncInfo();
+ assert(EHInfo);
+ SmallVector<std::pair<WebAssemblyException *, WebAssemblyException *>>
+ UnwindWEVec;
+ for (auto *DomNode : depth_first(&MDT)) {
+ MachineBasicBlock *EHPad = DomNode->getBlock();
+ if (!EHPad->isEHPad())
+ continue;
+ if (!EHInfo->hasUnwindDest(EHPad))
+ continue;
+ auto *UnwindDest = EHInfo->getUnwindDest(EHPad);
+ auto *SrcWE = getExceptionFor(EHPad);
+ auto *DstWE = getExceptionFor(UnwindDest);
+ if (SrcWE->contains(DstWE)) {
+ UnwindWEVec.push_back(std::make_pair(SrcWE, DstWE));
+ LLVM_DEBUG(dbgs() << "Unwind destination ExceptionInfo fix:\n "
+ << DstWE->getEHPad()->getNumber() << "."
+ << DstWE->getEHPad()->getName()
+ << "'s exception is taken out of "
+ << SrcWE->getEHPad()->getNumber() << "."
+ << SrcWE->getEHPad()->getName() << "'s exception\n");
+ DstWE->setParentException(SrcWE->getParentException());
+ }
+ }
+
+ // After fixing subexception relationship between unwind destinations above,
+ // there can still be remaining discrepancies.
+ //
+ // For example, suppose Exception A is dominated by EHPad A and Exception B is
+ // dominated by EHPad B. EHPad A's unwind destination is EHPad B, but because
+ // EHPad B is dominated by EHPad A, the initial grouping makes Exception B a
+ // subexception of Exception A, and we fix it by taking Exception B out of
+ // Exception A above. But there can still be remaining BBs within Exception A
+ // that are reachable from Exception B. These BBs semantically don't belong
+ // to Exception A and were not a part of this 'catch' clause or cleanup code
+ // in the original code, but they just happened to be grouped within Exception
+ // A because they were dominated by EHPad A. We fix this case by taking those
+ // BBs out of the incorrect exception and all its subexceptions that it
+ // belongs to.
+ //
+ // 1. First, we take out remaining incorrect subexceptions. This part is
+ // easier, because we haven't added BBs to exceptions yet, we only need to
+ // change parent exception pointer.
+ for (auto *DomNode : depth_first(&MDT)) {
+ MachineBasicBlock *EHPad = DomNode->getBlock();
+ if (!EHPad->isEHPad())
+ continue;
+ auto *WE = getExceptionFor(EHPad);
+
+ // For each source EHPad -> unwind destination EHPad
+ for (auto &P : UnwindWEVec) {
+ auto *SrcWE = P.first;
+ auto *DstWE = P.second;
+ // If WE (the current EH pad's exception) is still contained in SrcWE but
+ // reachable from DstWE that was taken out of SrcWE above, we have to take
+ // out WE out of SrcWE too.
+ if (WE != SrcWE && SrcWE->contains(WE) && !DstWE->contains(WE) &&
+ isReachableAmongDominated(DstWE->getEHPad(), EHPad, SrcWE->getEHPad(),
+ MDT)) {
+ LLVM_DEBUG(dbgs() << "Remaining reachable ExceptionInfo fix:\n "
+ << WE->getEHPad()->getNumber() << "."
+ << WE->getEHPad()->getName()
+ << "'s exception is taken out of "
+ << SrcWE->getEHPad()->getNumber() << "."
+ << SrcWE->getEHPad()->getName() << "'s exception\n");
+ WE->setParentException(SrcWE->getParentException());
+ }
+ }
+ }
+
+ // Add BBs to exceptions' block set. This is a preparation to take out
+ // remaining incorect BBs from exceptions, because we need to iterate over BBs
+ // for each exception.
+ for (auto *DomNode : post_order(&MDT)) {
+ MachineBasicBlock *MBB = DomNode->getBlock();
+ WebAssemblyException *WE = getExceptionFor(MBB);
+ for (; WE; WE = WE->getParentException())
+ WE->addToBlocksSet(MBB);
+ }
+
+ // 2. We take out remaining individual BBs out. Now we have added BBs to each
+ // exceptions' BlockSet, when we take a BB out of an exception, we need to fix
+ // those sets too.
+ for (auto &P : UnwindWEVec) {
+ auto *SrcWE = P.first;
+ auto *DstWE = P.second;
+
+ for (auto *MBB : SrcWE->getBlocksSet()) {
+ if (MBB->isEHPad()) {
+ assert(!isReachableAmongDominated(DstWE->getEHPad(), MBB,
+ SrcWE->getEHPad(), MDT) &&
+ "We already handled EH pads above");
+ continue;
+ }
+ if (isReachableAmongDominated(DstWE->getEHPad(), MBB, SrcWE->getEHPad(),
+ MDT)) {
+ LLVM_DEBUG(dbgs() << "Remainder BB: " << MBB->getNumber() << "."
+ << MBB->getName() << " is\n");
+ WebAssemblyException *InnerWE = getExceptionFor(MBB);
+ while (InnerWE != SrcWE) {
+ LLVM_DEBUG(dbgs()
+ << " removed from " << InnerWE->getEHPad()->getNumber()
+ << "." << InnerWE->getEHPad()->getName()
+ << "'s exception\n");
+ InnerWE->removeFromBlocksSet(MBB);
+ InnerWE = InnerWE->getParentException();
+ }
+ SrcWE->removeFromBlocksSet(MBB);
+ LLVM_DEBUG(dbgs() << " removed from " << SrcWE->getEHPad()->getNumber()
+ << "." << SrcWE->getEHPad()->getName()
+ << "'s exception\n");
+ changeExceptionFor(MBB, SrcWE->getParentException());
+ if (SrcWE->getParentException())
+ SrcWE->getParentException()->addToBlocksSet(MBB);
+ }
+ }
+ }
+
+ // Add BBs to exceptions' block vector
+ for (auto *DomNode : post_order(&MDT)) {
+ MachineBasicBlock *MBB = DomNode->getBlock();
+ WebAssemblyException *WE = getExceptionFor(MBB);
+ for (; WE; WE = WE->getParentException())
+ WE->addToBlocksVector(MBB);
+ }
+
+ SmallVector<WebAssemblyException*, 8> ExceptionPointers;
+ ExceptionPointers.reserve(Exceptions.size());
+
+ // Add subexceptions to exceptions
+ for (auto &WE : Exceptions) {
+ ExceptionPointers.push_back(WE.get());
+ if (WE->getParentException())
+ WE->getParentException()->getSubExceptions().push_back(std::move(WE));
+ else
+ addTopLevelException(std::move(WE));
+ }
+
+ // For convenience, Blocks and SubExceptions are inserted in postorder.
+ // Reverse the lists.
+ for (auto *WE : ExceptionPointers) {
+ WE->reverseBlock();
+ std::reverse(WE->getSubExceptions().begin(), WE->getSubExceptions().end());
+ }
+}
+
+void WebAssemblyExceptionInfo::releaseMemory() {
+ BBMap.clear();
+ TopLevelExceptions.clear();
+}
+
+void WebAssemblyExceptionInfo::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addRequired<MachineDominanceFrontier>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
+
+void WebAssemblyExceptionInfo::discoverAndMapException(
+ WebAssemblyException *WE, const MachineDominatorTree &MDT,
+ const MachineDominanceFrontier &MDF) {
+ unsigned NumBlocks = 0;
+ unsigned NumSubExceptions = 0;
+
+ // Map blocks that belong to a catchpad / cleanuppad
+ MachineBasicBlock *EHPad = WE->getEHPad();
+ SmallVector<MachineBasicBlock *, 8> WL;
+ WL.push_back(EHPad);
+ while (!WL.empty()) {
+ MachineBasicBlock *MBB = WL.pop_back_val();
+
+ // Find its outermost discovered exception. If this is a discovered block,
+ // check if it is already discovered to be a subexception of this exception.
+ WebAssemblyException *SubE = getOutermostException(MBB);
+ if (SubE) {
+ if (SubE != WE) {
+ // Discover a subexception of this exception.
+ SubE->setParentException(WE);
+ ++NumSubExceptions;
+ NumBlocks += SubE->getBlocksVector().capacity();
+ // All blocks that belong to this subexception have been already
+ // discovered. Skip all of them. Add the subexception's landing pad's
+ // dominance frontier to the worklist.
+ for (auto &Frontier : MDF.find(SubE->getEHPad())->second)
+ if (MDT.dominates(EHPad, Frontier))
+ WL.push_back(Frontier);
+ }
+ continue;
+ }
+
+ // This is an undiscovered block. Map it to the current exception.
+ changeExceptionFor(MBB, WE);
+ ++NumBlocks;
+
+ // Add successors dominated by the current BB to the worklist.
+ for (auto *Succ : MBB->successors())
+ if (MDT.dominates(EHPad, Succ))
+ WL.push_back(Succ);
+ }
+
+ WE->getSubExceptions().reserve(NumSubExceptions);
+ WE->reserveBlocks(NumBlocks);
+}
+
+WebAssemblyException *
+WebAssemblyExceptionInfo::getOutermostException(MachineBasicBlock *MBB) const {
+ WebAssemblyException *WE = getExceptionFor(MBB);
+ if (WE) {
+ while (WebAssemblyException *Parent = WE->getParentException())
+ WE = Parent;
+ }
+ return WE;
+}
+
+void WebAssemblyException::print(raw_ostream &OS, unsigned Depth) const {
+ OS.indent(Depth * 2) << "Exception at depth " << getExceptionDepth()
+ << " containing: ";
+
+ for (unsigned I = 0; I < getBlocks().size(); ++I) {
+ MachineBasicBlock *MBB = getBlocks()[I];
+ if (I)
+ OS << ", ";
+ OS << "%bb." << MBB->getNumber();
+ if (const auto *BB = MBB->getBasicBlock())
+ if (BB->hasName())
+ OS << "." << BB->getName();
+
+ if (getEHPad() == MBB)
+ OS << " (landing-pad)";
+ }
+ OS << "\n";
+
+ for (auto &SubE : SubExceptions)
+ SubE->print(OS, Depth + 2);
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void WebAssemblyException::dump() const { print(dbgs()); }
+#endif
+
+raw_ostream &operator<<(raw_ostream &OS, const WebAssemblyException &WE) {
+ WE.print(OS);
+ return OS;
+}
+
+void WebAssemblyExceptionInfo::print(raw_ostream &OS, const Module *) const {
+ for (auto &WE : TopLevelExceptions)
+ WE->print(OS);
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.h
new file mode 100644
index 000000000000..832ef1e49d78
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.h
@@ -0,0 +1,176 @@
+//===-- WebAssemblyExceptionInfo.h - WebAssembly Exception Info -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements WebAssemblyException information analysis.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYEXCEPTIONINFO_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYEXCEPTIONINFO_H
+
+#include "WebAssembly.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+
+class MachineDominatorTree;
+class MachineDominanceFrontier;
+
+// WebAssembly instructions for exception handling are structured as follows:
+// try
+// instructions*
+// catch ----|
+// instructions* | -> A WebAssemblyException consists of this region
+// end ----|
+//
+// A WebAssemblyException object contains BBs that belong to a 'catch' part of
+// the try-catch-end structure to be created later. 'try' and 'end' markers
+// are not present at this stage and will be generated in CFGStackify pass.
+// Because CFGSort requires all the BBs within a catch part to be sorted
+// together as it does for loops, this pass calculates the nesting structure of
+// catch part of exceptions in a function.
+//
+// An exception catch part is defined as a BB with catch instruction and all
+// other BBs dominated by this BB.
+class WebAssemblyException {
+ MachineBasicBlock *EHPad = nullptr;
+
+ WebAssemblyException *ParentException = nullptr;
+ std::vector<std::unique_ptr<WebAssemblyException>> SubExceptions;
+ std::vector<MachineBasicBlock *> Blocks;
+ SmallPtrSet<MachineBasicBlock *, 8> BlockSet;
+
+public:
+ WebAssemblyException(MachineBasicBlock *EHPad) : EHPad(EHPad) {}
+ WebAssemblyException(const WebAssemblyException &) = delete;
+ const WebAssemblyException &operator=(const WebAssemblyException &) = delete;
+
+ MachineBasicBlock *getEHPad() const { return EHPad; }
+ MachineBasicBlock *getHeader() const { return EHPad; }
+ WebAssemblyException *getParentException() const { return ParentException; }
+ void setParentException(WebAssemblyException *WE) { ParentException = WE; }
+
+ bool contains(const WebAssemblyException *WE) const {
+ if (WE == this)
+ return true;
+ if (!WE)
+ return false;
+ return contains(WE->getParentException());
+ }
+ bool contains(const MachineBasicBlock *MBB) const {
+ return BlockSet.count(MBB);
+ }
+
+ void addToBlocksSet(MachineBasicBlock *MBB) { BlockSet.insert(MBB); }
+ void removeFromBlocksSet(MachineBasicBlock *MBB) { BlockSet.erase(MBB); }
+ void addToBlocksVector(MachineBasicBlock *MBB) { Blocks.push_back(MBB); }
+ void addBlock(MachineBasicBlock *MBB) {
+ Blocks.push_back(MBB);
+ BlockSet.insert(MBB);
+ }
+ ArrayRef<MachineBasicBlock *> getBlocks() const { return Blocks; }
+ using block_iterator = typename ArrayRef<MachineBasicBlock *>::const_iterator;
+ block_iterator block_begin() const { return getBlocks().begin(); }
+ block_iterator block_end() const { return getBlocks().end(); }
+ inline iterator_range<block_iterator> blocks() const {
+ return make_range(block_begin(), block_end());
+ }
+ unsigned getNumBlocks() const { return Blocks.size(); }
+ std::vector<MachineBasicBlock *> &getBlocksVector() { return Blocks; }
+ SmallPtrSetImpl<MachineBasicBlock *> &getBlocksSet() { return BlockSet; }
+
+ const std::vector<std::unique_ptr<WebAssemblyException>> &
+ getSubExceptions() const {
+ return SubExceptions;
+ }
+ std::vector<std::unique_ptr<WebAssemblyException>> &getSubExceptions() {
+ return SubExceptions;
+ }
+ void addSubException(std::unique_ptr<WebAssemblyException> E) {
+ SubExceptions.push_back(std::move(E));
+ }
+ using iterator = typename decltype(SubExceptions)::const_iterator;
+ iterator begin() const { return SubExceptions.begin(); }
+ iterator end() const { return SubExceptions.end(); }
+
+ void reserveBlocks(unsigned Size) { Blocks.reserve(Size); }
+ void reverseBlock(unsigned From = 0) {
+ std::reverse(Blocks.begin() + From, Blocks.end());
+ }
+
+ // Return the nesting level. An outermost one has depth 1.
+ unsigned getExceptionDepth() const {
+ unsigned D = 1;
+ for (const WebAssemblyException *CurException = ParentException;
+ CurException; CurException = CurException->ParentException)
+ ++D;
+ return D;
+ }
+
+ void print(raw_ostream &OS, unsigned Depth = 0) const;
+ void dump() const;
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const WebAssemblyException &WE);
+
+class WebAssemblyExceptionInfo final : public MachineFunctionPass {
+ // Mapping of basic blocks to the innermost exception they occur in
+ DenseMap<const MachineBasicBlock *, WebAssemblyException *> BBMap;
+ std::vector<std::unique_ptr<WebAssemblyException>> TopLevelExceptions;
+
+ void discoverAndMapException(WebAssemblyException *WE,
+ const MachineDominatorTree &MDT,
+ const MachineDominanceFrontier &MDF);
+ WebAssemblyException *getOutermostException(MachineBasicBlock *MBB) const;
+
+public:
+ static char ID;
+ WebAssemblyExceptionInfo() : MachineFunctionPass(ID) {
+ initializeWebAssemblyExceptionInfoPass(*PassRegistry::getPassRegistry());
+ }
+ ~WebAssemblyExceptionInfo() override { releaseMemory(); }
+ WebAssemblyExceptionInfo(const WebAssemblyExceptionInfo &) = delete;
+ WebAssemblyExceptionInfo &
+ operator=(const WebAssemblyExceptionInfo &) = delete;
+
+ bool runOnMachineFunction(MachineFunction &) override;
+ void releaseMemory() override;
+ void recalculate(MachineFunction &MF, MachineDominatorTree &MDT,
+ const MachineDominanceFrontier &MDF);
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ bool empty() const { return TopLevelExceptions.empty(); }
+
+ // Return the innermost exception that MBB lives in. If the block is not in an
+ // exception, null is returned.
+ WebAssemblyException *getExceptionFor(const MachineBasicBlock *MBB) const {
+ return BBMap.lookup(MBB);
+ }
+
+ void changeExceptionFor(const MachineBasicBlock *MBB,
+ WebAssemblyException *WE) {
+ if (!WE) {
+ BBMap.erase(MBB);
+ return;
+ }
+ BBMap[MBB] = WE;
+ }
+
+ void addTopLevelException(std::unique_ptr<WebAssemblyException> WE) {
+ assert(!WE->getParentException() && "Not a top level exception!");
+ TopLevelExceptions.push_back(std::move(WE));
+ }
+
+ void print(raw_ostream &OS, const Module *M = nullptr) const override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
new file mode 100644
index 000000000000..0159c44a79b7
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
@@ -0,0 +1,463 @@
+//===-- WebAssemblyExplicitLocals.cpp - Make Locals Explicit --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file converts any remaining registers into WebAssembly locals.
+///
+/// After register stackification and register coloring, convert non-stackified
+/// registers into locals, inserting explicit local.get and local.set
+/// instructions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyDebugValueManager.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-explicit-locals"
+
+namespace {
+class WebAssemblyExplicitLocals final : public MachineFunctionPass {
+ StringRef getPassName() const override {
+ return "WebAssembly Explicit Locals";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addPreserved<MachineBlockFrequencyInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyExplicitLocals() : MachineFunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyExplicitLocals::ID = 0;
+INITIALIZE_PASS(WebAssemblyExplicitLocals, DEBUG_TYPE,
+ "Convert registers to WebAssembly locals", false, false)
+
+FunctionPass *llvm::createWebAssemblyExplicitLocals() {
+ return new WebAssemblyExplicitLocals();
+}
+
+static void checkFrameBase(WebAssemblyFunctionInfo &MFI, unsigned Local,
+ unsigned Reg) {
+ // Mark a local for the frame base vreg.
+ if (MFI.isFrameBaseVirtual() && Reg == MFI.getFrameBaseVreg()) {
+ LLVM_DEBUG({
+ dbgs() << "Allocating local " << Local << "for VReg "
+ << Register::virtReg2Index(Reg) << '\n';
+ });
+ MFI.setFrameBaseLocal(Local);
+ }
+}
+
+/// Return a local id number for the given register, assigning it a new one
+/// if it doesn't yet have one.
+static unsigned getLocalId(DenseMap<unsigned, unsigned> &Reg2Local,
+ WebAssemblyFunctionInfo &MFI, unsigned &CurLocal,
+ unsigned Reg) {
+ auto P = Reg2Local.insert(std::make_pair(Reg, CurLocal));
+ if (P.second) {
+ checkFrameBase(MFI, CurLocal, Reg);
+ ++CurLocal;
+ }
+ return P.first->second;
+}
+
+/// Get the appropriate drop opcode for the given register class.
+static unsigned getDropOpcode(const TargetRegisterClass *RC) {
+ if (RC == &WebAssembly::I32RegClass)
+ return WebAssembly::DROP_I32;
+ if (RC == &WebAssembly::I64RegClass)
+ return WebAssembly::DROP_I64;
+ if (RC == &WebAssembly::F32RegClass)
+ return WebAssembly::DROP_F32;
+ if (RC == &WebAssembly::F64RegClass)
+ return WebAssembly::DROP_F64;
+ if (RC == &WebAssembly::V128RegClass)
+ return WebAssembly::DROP_V128;
+ if (RC == &WebAssembly::FUNCREFRegClass)
+ return WebAssembly::DROP_FUNCREF;
+ if (RC == &WebAssembly::EXTERNREFRegClass)
+ return WebAssembly::DROP_EXTERNREF;
+ llvm_unreachable("Unexpected register class");
+}
+
+/// Get the appropriate local.get opcode for the given register class.
+static unsigned getLocalGetOpcode(const TargetRegisterClass *RC) {
+ if (RC == &WebAssembly::I32RegClass)
+ return WebAssembly::LOCAL_GET_I32;
+ if (RC == &WebAssembly::I64RegClass)
+ return WebAssembly::LOCAL_GET_I64;
+ if (RC == &WebAssembly::F32RegClass)
+ return WebAssembly::LOCAL_GET_F32;
+ if (RC == &WebAssembly::F64RegClass)
+ return WebAssembly::LOCAL_GET_F64;
+ if (RC == &WebAssembly::V128RegClass)
+ return WebAssembly::LOCAL_GET_V128;
+ if (RC == &WebAssembly::FUNCREFRegClass)
+ return WebAssembly::LOCAL_GET_FUNCREF;
+ if (RC == &WebAssembly::EXTERNREFRegClass)
+ return WebAssembly::LOCAL_GET_EXTERNREF;
+ llvm_unreachable("Unexpected register class");
+}
+
+/// Get the appropriate local.set opcode for the given register class.
+static unsigned getLocalSetOpcode(const TargetRegisterClass *RC) {
+ if (RC == &WebAssembly::I32RegClass)
+ return WebAssembly::LOCAL_SET_I32;
+ if (RC == &WebAssembly::I64RegClass)
+ return WebAssembly::LOCAL_SET_I64;
+ if (RC == &WebAssembly::F32RegClass)
+ return WebAssembly::LOCAL_SET_F32;
+ if (RC == &WebAssembly::F64RegClass)
+ return WebAssembly::LOCAL_SET_F64;
+ if (RC == &WebAssembly::V128RegClass)
+ return WebAssembly::LOCAL_SET_V128;
+ if (RC == &WebAssembly::FUNCREFRegClass)
+ return WebAssembly::LOCAL_SET_FUNCREF;
+ if (RC == &WebAssembly::EXTERNREFRegClass)
+ return WebAssembly::LOCAL_SET_EXTERNREF;
+ llvm_unreachable("Unexpected register class");
+}
+
+/// Get the appropriate local.tee opcode for the given register class.
+static unsigned getLocalTeeOpcode(const TargetRegisterClass *RC) {
+ if (RC == &WebAssembly::I32RegClass)
+ return WebAssembly::LOCAL_TEE_I32;
+ if (RC == &WebAssembly::I64RegClass)
+ return WebAssembly::LOCAL_TEE_I64;
+ if (RC == &WebAssembly::F32RegClass)
+ return WebAssembly::LOCAL_TEE_F32;
+ if (RC == &WebAssembly::F64RegClass)
+ return WebAssembly::LOCAL_TEE_F64;
+ if (RC == &WebAssembly::V128RegClass)
+ return WebAssembly::LOCAL_TEE_V128;
+ if (RC == &WebAssembly::FUNCREFRegClass)
+ return WebAssembly::LOCAL_TEE_FUNCREF;
+ if (RC == &WebAssembly::EXTERNREFRegClass)
+ return WebAssembly::LOCAL_TEE_EXTERNREF;
+ llvm_unreachable("Unexpected register class");
+}
+
+/// Get the type associated with the given register class.
+static MVT typeForRegClass(const TargetRegisterClass *RC) {
+ if (RC == &WebAssembly::I32RegClass)
+ return MVT::i32;
+ if (RC == &WebAssembly::I64RegClass)
+ return MVT::i64;
+ if (RC == &WebAssembly::F32RegClass)
+ return MVT::f32;
+ if (RC == &WebAssembly::F64RegClass)
+ return MVT::f64;
+ if (RC == &WebAssembly::V128RegClass)
+ return MVT::v16i8;
+ if (RC == &WebAssembly::FUNCREFRegClass)
+ return MVT::funcref;
+ if (RC == &WebAssembly::EXTERNREFRegClass)
+ return MVT::externref;
+ llvm_unreachable("unrecognized register class");
+}
+
+/// Given a MachineOperand of a stackified vreg, return the instruction at the
+/// start of the expression tree.
+static MachineInstr *findStartOfTree(MachineOperand &MO,
+ MachineRegisterInfo &MRI,
+ const WebAssemblyFunctionInfo &MFI) {
+ Register Reg = MO.getReg();
+ assert(MFI.isVRegStackified(Reg));
+ MachineInstr *Def = MRI.getVRegDef(Reg);
+
+ // If this instruction has any non-stackified defs, it is the start
+ for (auto DefReg : Def->defs()) {
+ if (!MFI.isVRegStackified(DefReg.getReg())) {
+ return Def;
+ }
+ }
+
+ // Find the first stackified use and proceed from there.
+ for (MachineOperand &DefMO : Def->explicit_uses()) {
+ if (!DefMO.isReg())
+ continue;
+ return findStartOfTree(DefMO, MRI, MFI);
+ }
+
+ // If there were no stackified uses, we've reached the start.
+ return Def;
+}
+
+bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) {
+ LLVM_DEBUG(dbgs() << "********** Make Locals Explicit **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+
+ bool Changed = false;
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
+ const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+
+ // Map non-stackified virtual registers to their local ids.
+ DenseMap<unsigned, unsigned> Reg2Local;
+
+ // Handle ARGUMENTS first to ensure that they get the designated numbers.
+ for (MachineBasicBlock::iterator I = MF.begin()->begin(),
+ E = MF.begin()->end();
+ I != E;) {
+ MachineInstr &MI = *I++;
+ if (!WebAssembly::isArgument(MI.getOpcode()))
+ break;
+ Register Reg = MI.getOperand(0).getReg();
+ assert(!MFI.isVRegStackified(Reg));
+ auto Local = static_cast<unsigned>(MI.getOperand(1).getImm());
+ Reg2Local[Reg] = Local;
+ checkFrameBase(MFI, Local, Reg);
+
+ // Update debug value to point to the local before removing.
+ WebAssemblyDebugValueManager(&MI).replaceWithLocal(Local);
+
+ MI.eraseFromParent();
+ Changed = true;
+ }
+
+ // Start assigning local numbers after the last parameter and after any
+ // already-assigned locals.
+ unsigned CurLocal = static_cast<unsigned>(MFI.getParams().size());
+ CurLocal += static_cast<unsigned>(MFI.getLocals().size());
+
+ // Precompute the set of registers that are unused, so that we can insert
+ // drops to their defs.
+ BitVector UseEmpty(MRI.getNumVirtRegs());
+ for (unsigned I = 0, E = MRI.getNumVirtRegs(); I < E; ++I)
+ UseEmpty[I] = MRI.use_empty(Register::index2VirtReg(I));
+
+ // Visit each instruction in the function.
+ for (MachineBasicBlock &MBB : MF) {
+ for (MachineInstr &MI : llvm::make_early_inc_range(MBB)) {
+ assert(!WebAssembly::isArgument(MI.getOpcode()));
+
+ if (MI.isDebugInstr() || MI.isLabel())
+ continue;
+
+ if (MI.getOpcode() == WebAssembly::IMPLICIT_DEF) {
+ MI.eraseFromParent();
+ Changed = true;
+ continue;
+ }
+
+ // Replace tee instructions with local.tee. The difference is that tee
+ // instructions have two defs, while local.tee instructions have one def
+ // and an index of a local to write to.
+ //
+ // - Before:
+ // TeeReg, Reg = TEE DefReg
+ // INST ..., TeeReg, ...
+ // INST ..., Reg, ...
+ // INST ..., Reg, ...
+ // * DefReg: may or may not be stackified
+ // * Reg: not stackified
+ // * TeeReg: stackified
+ //
+ // - After (when DefReg was already stackified):
+ // TeeReg = LOCAL_TEE LocalId1, DefReg
+ // INST ..., TeeReg, ...
+ // INST ..., Reg, ...
+ // INST ..., Reg, ...
+ // * Reg: mapped to LocalId1
+ // * TeeReg: stackified
+ //
+ // - After (when DefReg was not already stackified):
+ // NewReg = LOCAL_GET LocalId1
+ // TeeReg = LOCAL_TEE LocalId2, NewReg
+ // INST ..., TeeReg, ...
+ // INST ..., Reg, ...
+ // INST ..., Reg, ...
+ // * DefReg: mapped to LocalId1
+ // * Reg: mapped to LocalId2
+ // * TeeReg: stackified
+ if (WebAssembly::isTee(MI.getOpcode())) {
+ assert(MFI.isVRegStackified(MI.getOperand(0).getReg()));
+ assert(!MFI.isVRegStackified(MI.getOperand(1).getReg()));
+ Register DefReg = MI.getOperand(2).getReg();
+ const TargetRegisterClass *RC = MRI.getRegClass(DefReg);
+
+ // Stackify the input if it isn't stackified yet.
+ if (!MFI.isVRegStackified(DefReg)) {
+ unsigned LocalId = getLocalId(Reg2Local, MFI, CurLocal, DefReg);
+ Register NewReg = MRI.createVirtualRegister(RC);
+ unsigned Opc = getLocalGetOpcode(RC);
+ BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(Opc), NewReg)
+ .addImm(LocalId);
+ MI.getOperand(2).setReg(NewReg);
+ MFI.stackifyVReg(MRI, NewReg);
+ }
+
+ // Replace the TEE with a LOCAL_TEE.
+ unsigned LocalId =
+ getLocalId(Reg2Local, MFI, CurLocal, MI.getOperand(1).getReg());
+ unsigned Opc = getLocalTeeOpcode(RC);
+ BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(Opc),
+ MI.getOperand(0).getReg())
+ .addImm(LocalId)
+ .addReg(MI.getOperand(2).getReg());
+
+ WebAssemblyDebugValueManager(&MI).replaceWithLocal(LocalId);
+
+ MI.eraseFromParent();
+ Changed = true;
+ continue;
+ }
+
+ // Insert local.sets for any defs that aren't stackified yet.
+ for (auto &Def : MI.defs()) {
+ Register OldReg = Def.getReg();
+ if (!MFI.isVRegStackified(OldReg)) {
+ const TargetRegisterClass *RC = MRI.getRegClass(OldReg);
+ Register NewReg = MRI.createVirtualRegister(RC);
+ auto InsertPt = std::next(MI.getIterator());
+ if (UseEmpty[Register::virtReg2Index(OldReg)]) {
+ unsigned Opc = getDropOpcode(RC);
+ MachineInstr *Drop =
+ BuildMI(MBB, InsertPt, MI.getDebugLoc(), TII->get(Opc))
+ .addReg(NewReg);
+ // After the drop instruction, this reg operand will not be used
+ Drop->getOperand(0).setIsKill();
+ if (MFI.isFrameBaseVirtual() && OldReg == MFI.getFrameBaseVreg())
+ MFI.clearFrameBaseVreg();
+ } else {
+ unsigned LocalId = getLocalId(Reg2Local, MFI, CurLocal, OldReg);
+ unsigned Opc = getLocalSetOpcode(RC);
+
+ WebAssemblyDebugValueManager(&MI).replaceWithLocal(LocalId);
+
+ BuildMI(MBB, InsertPt, MI.getDebugLoc(), TII->get(Opc))
+ .addImm(LocalId)
+ .addReg(NewReg);
+ }
+ // This register operand of the original instruction is now being used
+ // by the inserted drop or local.set instruction, so make it not dead
+ // yet.
+ Def.setReg(NewReg);
+ Def.setIsDead(false);
+ MFI.stackifyVReg(MRI, NewReg);
+ Changed = true;
+ }
+ }
+
+ // Insert local.gets for any uses that aren't stackified yet.
+ MachineInstr *InsertPt = &MI;
+ for (MachineOperand &MO : reverse(MI.explicit_uses())) {
+ if (!MO.isReg())
+ continue;
+
+ Register OldReg = MO.getReg();
+
+ // Inline asm may have a def in the middle of the operands. Our contract
+ // with inline asm register operands is to provide local indices as
+ // immediates.
+ if (MO.isDef()) {
+ assert(MI.isInlineAsm());
+ unsigned LocalId = getLocalId(Reg2Local, MFI, CurLocal, OldReg);
+ // If this register operand is tied to another operand, we can't
+ // change it to an immediate. Untie it first.
+ MI.untieRegOperand(MO.getOperandNo());
+ MO.ChangeToImmediate(LocalId);
+ continue;
+ }
+
+ // If we see a stackified register, prepare to insert subsequent
+ // local.gets before the start of its tree.
+ if (MFI.isVRegStackified(OldReg)) {
+ InsertPt = findStartOfTree(MO, MRI, MFI);
+ continue;
+ }
+
+ // Our contract with inline asm register operands is to provide local
+ // indices as immediates.
+ if (MI.isInlineAsm()) {
+ unsigned LocalId = getLocalId(Reg2Local, MFI, CurLocal, OldReg);
+ // Untie it first if this reg operand is tied to another operand.
+ MI.untieRegOperand(MO.getOperandNo());
+ MO.ChangeToImmediate(LocalId);
+ continue;
+ }
+
+ // Insert a local.get.
+ unsigned LocalId = getLocalId(Reg2Local, MFI, CurLocal, OldReg);
+ const TargetRegisterClass *RC = MRI.getRegClass(OldReg);
+ Register NewReg = MRI.createVirtualRegister(RC);
+ unsigned Opc = getLocalGetOpcode(RC);
+ // Use a InsertPt as our DebugLoc, since MI may be discontinuous from
+ // the where this local is being inserted, causing non-linear stepping
+ // in the debugger or function entry points where variables aren't live
+ // yet. Alternative is previous instruction, but that is strictly worse
+ // since it can point at the previous statement.
+ // See crbug.com/1251909, crbug.com/1249745
+ InsertPt = BuildMI(MBB, InsertPt, InsertPt->getDebugLoc(),
+ TII->get(Opc), NewReg).addImm(LocalId);
+ MO.setReg(NewReg);
+ MFI.stackifyVReg(MRI, NewReg);
+ Changed = true;
+ }
+
+ // Coalesce and eliminate COPY instructions.
+ if (WebAssembly::isCopy(MI.getOpcode())) {
+ MRI.replaceRegWith(MI.getOperand(1).getReg(),
+ MI.getOperand(0).getReg());
+ MI.eraseFromParent();
+ Changed = true;
+ }
+ }
+ }
+
+ // Define the locals.
+ // TODO: Sort the locals for better compression.
+ MFI.setNumLocals(CurLocal - MFI.getParams().size());
+ for (unsigned I = 0, E = MRI.getNumVirtRegs(); I < E; ++I) {
+ Register Reg = Register::index2VirtReg(I);
+ auto RL = Reg2Local.find(Reg);
+ if (RL == Reg2Local.end() || RL->second < MFI.getParams().size())
+ continue;
+
+ MFI.setLocal(RL->second - MFI.getParams().size(),
+ typeForRegClass(MRI.getRegClass(Reg)));
+ Changed = true;
+ }
+
+#ifndef NDEBUG
+ // Assert that all registers have been stackified at this point.
+ for (const MachineBasicBlock &MBB : MF) {
+ for (const MachineInstr &MI : MBB) {
+ if (MI.isDebugInstr() || MI.isLabel())
+ continue;
+ for (const MachineOperand &MO : MI.explicit_operands()) {
+ assert(
+ (!MO.isReg() || MRI.use_empty(MO.getReg()) ||
+ MFI.isVRegStackified(MO.getReg())) &&
+ "WebAssemblyExplicitLocals failed to stackify a register operand");
+ }
+ }
+ }
+#endif
+
+ return Changed;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
new file mode 100644
index 000000000000..37abbb072cdd
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
@@ -0,0 +1,1435 @@
+//===-- WebAssemblyFastISel.cpp - WebAssembly FastISel implementation -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the WebAssembly-specific support for the FastISel
+/// class. Some of the target-specific code is generated by tablegen in the file
+/// WebAssemblyGenFastISel.inc, which is #included here.
+///
+/// TODO: kill flags
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "Utils/WebAssemblyTypeUtilities.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyTargetMachine.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/Analysis/BranchProbabilityInfo.h"
+#include "llvm/CodeGen/FastISel.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/PatternMatch.h"
+
+using namespace llvm;
+using namespace PatternMatch;
+
+#define DEBUG_TYPE "wasm-fastisel"
+
+namespace {
+
+class WebAssemblyFastISel final : public FastISel {
+ // All possible address modes.
+ class Address {
+ public:
+ using BaseKind = enum { RegBase, FrameIndexBase };
+
+ private:
+ BaseKind Kind = RegBase;
+ union {
+ unsigned Reg;
+ int FI;
+ } Base;
+
+ // Whether the base has been determined yet
+ bool IsBaseSet = false;
+
+ int64_t Offset = 0;
+
+ const GlobalValue *GV = nullptr;
+
+ public:
+ // Innocuous defaults for our address.
+ Address() { Base.Reg = 0; }
+ void setKind(BaseKind K) {
+ assert(!isSet() && "Can't change kind with non-zero base");
+ Kind = K;
+ }
+ BaseKind getKind() const { return Kind; }
+ bool isRegBase() const { return Kind == RegBase; }
+ bool isFIBase() const { return Kind == FrameIndexBase; }
+ void setReg(unsigned Reg) {
+ assert(isRegBase() && "Invalid base register access!");
+ assert(!IsBaseSet && "Base cannot be reset");
+ Base.Reg = Reg;
+ IsBaseSet = true;
+ }
+ unsigned getReg() const {
+ assert(isRegBase() && "Invalid base register access!");
+ return Base.Reg;
+ }
+ void setFI(unsigned FI) {
+ assert(isFIBase() && "Invalid base frame index access!");
+ assert(!IsBaseSet && "Base cannot be reset");
+ Base.FI = FI;
+ IsBaseSet = true;
+ }
+ unsigned getFI() const {
+ assert(isFIBase() && "Invalid base frame index access!");
+ return Base.FI;
+ }
+
+ void setOffset(int64_t NewOffset) {
+ assert(NewOffset >= 0 && "Offsets must be non-negative");
+ Offset = NewOffset;
+ }
+ int64_t getOffset() const { return Offset; }
+ void setGlobalValue(const GlobalValue *G) { GV = G; }
+ const GlobalValue *getGlobalValue() const { return GV; }
+ bool isSet() const { return IsBaseSet; }
+ };
+
+ /// Keep a pointer to the WebAssemblySubtarget around so that we can make the
+ /// right decision when generating code for different targets.
+ const WebAssemblySubtarget *Subtarget;
+ LLVMContext *Context;
+
+private:
+ // Utility helper routines
+ MVT::SimpleValueType getSimpleType(Type *Ty) {
+ EVT VT = TLI.getValueType(DL, Ty, /*AllowUnknown=*/true);
+ return VT.isSimple() ? VT.getSimpleVT().SimpleTy
+ : MVT::INVALID_SIMPLE_VALUE_TYPE;
+ }
+ MVT::SimpleValueType getLegalType(MVT::SimpleValueType VT) {
+ switch (VT) {
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ return MVT::i32;
+ case MVT::i32:
+ case MVT::i64:
+ case MVT::f32:
+ case MVT::f64:
+ return VT;
+ case MVT::funcref:
+ case MVT::externref:
+ if (Subtarget->hasReferenceTypes())
+ return VT;
+ break;
+ case MVT::f16:
+ return MVT::f32;
+ case MVT::v16i8:
+ case MVT::v8i16:
+ case MVT::v4i32:
+ case MVT::v4f32:
+ case MVT::v2i64:
+ case MVT::v2f64:
+ if (Subtarget->hasSIMD128())
+ return VT;
+ break;
+ default:
+ break;
+ }
+ return MVT::INVALID_SIMPLE_VALUE_TYPE;
+ }
+ bool computeAddress(const Value *Obj, Address &Addr);
+ void materializeLoadStoreOperands(Address &Addr);
+ void addLoadStoreOperands(const Address &Addr, const MachineInstrBuilder &MIB,
+ MachineMemOperand *MMO);
+ unsigned maskI1Value(unsigned Reg, const Value *V);
+ unsigned getRegForI1Value(const Value *V, const BasicBlock *BB, bool &Not);
+ unsigned zeroExtendToI32(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From);
+ unsigned signExtendToI32(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From);
+ unsigned zeroExtend(unsigned Reg, const Value *V, MVT::SimpleValueType From,
+ MVT::SimpleValueType To);
+ unsigned signExtend(unsigned Reg, const Value *V, MVT::SimpleValueType From,
+ MVT::SimpleValueType To);
+ unsigned getRegForUnsignedValue(const Value *V);
+ unsigned getRegForSignedValue(const Value *V);
+ unsigned getRegForPromotedValue(const Value *V, bool IsSigned);
+ unsigned notValue(unsigned Reg);
+ unsigned copyValue(unsigned Reg);
+
+ // Backend specific FastISel code.
+ unsigned fastMaterializeAlloca(const AllocaInst *AI) override;
+ unsigned fastMaterializeConstant(const Constant *C) override;
+ bool fastLowerArguments() override;
+
+ // Selection routines.
+ bool selectCall(const Instruction *I);
+ bool selectSelect(const Instruction *I);
+ bool selectTrunc(const Instruction *I);
+ bool selectZExt(const Instruction *I);
+ bool selectSExt(const Instruction *I);
+ bool selectICmp(const Instruction *I);
+ bool selectFCmp(const Instruction *I);
+ bool selectBitCast(const Instruction *I);
+ bool selectLoad(const Instruction *I);
+ bool selectStore(const Instruction *I);
+ bool selectBr(const Instruction *I);
+ bool selectRet(const Instruction *I);
+ bool selectUnreachable(const Instruction *I);
+
+public:
+ // Backend specific FastISel code.
+ WebAssemblyFastISel(FunctionLoweringInfo &FuncInfo,
+ const TargetLibraryInfo *LibInfo)
+ : FastISel(FuncInfo, LibInfo, /*SkipTargetIndependentISel=*/true) {
+ Subtarget = &FuncInfo.MF->getSubtarget<WebAssemblySubtarget>();
+ Context = &FuncInfo.Fn->getContext();
+ }
+
+ bool fastSelectInstruction(const Instruction *I) override;
+
+#include "WebAssemblyGenFastISel.inc"
+};
+
+} // end anonymous namespace
+
+bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) {
+ const User *U = nullptr;
+ unsigned Opcode = Instruction::UserOp1;
+ if (const auto *I = dyn_cast<Instruction>(Obj)) {
+ // Don't walk into other basic blocks unless the object is an alloca from
+ // another block, otherwise it may not have a virtual register assigned.
+ if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
+ FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
+ Opcode = I->getOpcode();
+ U = I;
+ }
+ } else if (const auto *C = dyn_cast<ConstantExpr>(Obj)) {
+ Opcode = C->getOpcode();
+ U = C;
+ }
+
+ if (auto *Ty = dyn_cast<PointerType>(Obj->getType()))
+ if (Ty->getAddressSpace() > 255)
+ // Fast instruction selection doesn't support the special
+ // address spaces.
+ return false;
+
+ if (const auto *GV = dyn_cast<GlobalValue>(Obj)) {
+ if (TLI.isPositionIndependent())
+ return false;
+ if (Addr.getGlobalValue())
+ return false;
+ if (GV->isThreadLocal())
+ return false;
+ Addr.setGlobalValue(GV);
+ return true;
+ }
+
+ switch (Opcode) {
+ default:
+ break;
+ case Instruction::BitCast: {
+ // Look through bitcasts.
+ return computeAddress(U->getOperand(0), Addr);
+ }
+ case Instruction::IntToPtr: {
+ // Look past no-op inttoptrs.
+ if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
+ TLI.getPointerTy(DL))
+ return computeAddress(U->getOperand(0), Addr);
+ break;
+ }
+ case Instruction::PtrToInt: {
+ // Look past no-op ptrtoints.
+ if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
+ return computeAddress(U->getOperand(0), Addr);
+ break;
+ }
+ case Instruction::GetElementPtr: {
+ Address SavedAddr = Addr;
+ uint64_t TmpOffset = Addr.getOffset();
+ // Non-inbounds geps can wrap; wasm's offsets can't.
+ if (!cast<GEPOperator>(U)->isInBounds())
+ goto unsupported_gep;
+ // Iterate through the GEP folding the constants into offsets where
+ // we can.
+ for (gep_type_iterator GTI = gep_type_begin(U), E = gep_type_end(U);
+ GTI != E; ++GTI) {
+ const Value *Op = GTI.getOperand();
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
+ const StructLayout *SL = DL.getStructLayout(STy);
+ unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
+ TmpOffset += SL->getElementOffset(Idx);
+ } else {
+ uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
+ for (;;) {
+ if (const auto *CI = dyn_cast<ConstantInt>(Op)) {
+ // Constant-offset addressing.
+ TmpOffset += CI->getSExtValue() * S;
+ break;
+ }
+ if (S == 1 && Addr.isRegBase() && Addr.getReg() == 0) {
+ // An unscaled add of a register. Set it as the new base.
+ Register Reg = getRegForValue(Op);
+ if (Reg == 0)
+ return false;
+ Addr.setReg(Reg);
+ break;
+ }
+ if (canFoldAddIntoGEP(U, Op)) {
+ // A compatible add with a constant operand. Fold the constant.
+ auto *CI = cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
+ TmpOffset += CI->getSExtValue() * S;
+ // Iterate on the other operand.
+ Op = cast<AddOperator>(Op)->getOperand(0);
+ continue;
+ }
+ // Unsupported
+ goto unsupported_gep;
+ }
+ }
+ }
+ // Don't fold in negative offsets.
+ if (int64_t(TmpOffset) >= 0) {
+ // Try to grab the base operand now.
+ Addr.setOffset(TmpOffset);
+ if (computeAddress(U->getOperand(0), Addr))
+ return true;
+ }
+ // We failed, restore everything and try the other options.
+ Addr = SavedAddr;
+ unsupported_gep:
+ break;
+ }
+ case Instruction::Alloca: {
+ const auto *AI = cast<AllocaInst>(Obj);
+ DenseMap<const AllocaInst *, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(AI);
+ if (SI != FuncInfo.StaticAllocaMap.end()) {
+ if (Addr.isSet()) {
+ return false;
+ }
+ Addr.setKind(Address::FrameIndexBase);
+ Addr.setFI(SI->second);
+ return true;
+ }
+ break;
+ }
+ case Instruction::Add: {
+ // Adds of constants are common and easy enough.
+ const Value *LHS = U->getOperand(0);
+ const Value *RHS = U->getOperand(1);
+
+ if (isa<ConstantInt>(LHS))
+ std::swap(LHS, RHS);
+
+ if (const auto *CI = dyn_cast<ConstantInt>(RHS)) {
+ uint64_t TmpOffset = Addr.getOffset() + CI->getSExtValue();
+ if (int64_t(TmpOffset) >= 0) {
+ Addr.setOffset(TmpOffset);
+ return computeAddress(LHS, Addr);
+ }
+ }
+
+ Address Backup = Addr;
+ if (computeAddress(LHS, Addr) && computeAddress(RHS, Addr))
+ return true;
+ Addr = Backup;
+
+ break;
+ }
+ case Instruction::Sub: {
+ // Subs of constants are common and easy enough.
+ const Value *LHS = U->getOperand(0);
+ const Value *RHS = U->getOperand(1);
+
+ if (const auto *CI = dyn_cast<ConstantInt>(RHS)) {
+ int64_t TmpOffset = Addr.getOffset() - CI->getSExtValue();
+ if (TmpOffset >= 0) {
+ Addr.setOffset(TmpOffset);
+ return computeAddress(LHS, Addr);
+ }
+ }
+ break;
+ }
+ }
+ if (Addr.isSet()) {
+ return false;
+ }
+ Register Reg = getRegForValue(Obj);
+ if (Reg == 0)
+ return false;
+ Addr.setReg(Reg);
+ return Addr.getReg() != 0;
+}
+
+void WebAssemblyFastISel::materializeLoadStoreOperands(Address &Addr) {
+ if (Addr.isRegBase()) {
+ unsigned Reg = Addr.getReg();
+ if (Reg == 0) {
+ Reg = createResultReg(Subtarget->hasAddr64() ? &WebAssembly::I64RegClass
+ : &WebAssembly::I32RegClass);
+ unsigned Opc = Subtarget->hasAddr64() ? WebAssembly::CONST_I64
+ : WebAssembly::CONST_I32;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), Reg)
+ .addImm(0);
+ Addr.setReg(Reg);
+ }
+ }
+}
+
+void WebAssemblyFastISel::addLoadStoreOperands(const Address &Addr,
+ const MachineInstrBuilder &MIB,
+ MachineMemOperand *MMO) {
+ // Set the alignment operand (this is rewritten in SetP2AlignOperands).
+ // TODO: Disable SetP2AlignOperands for FastISel and just do it here.
+ MIB.addImm(0);
+
+ if (const GlobalValue *GV = Addr.getGlobalValue())
+ MIB.addGlobalAddress(GV, Addr.getOffset());
+ else
+ MIB.addImm(Addr.getOffset());
+
+ if (Addr.isRegBase())
+ MIB.addReg(Addr.getReg());
+ else
+ MIB.addFrameIndex(Addr.getFI());
+
+ MIB.addMemOperand(MMO);
+}
+
+unsigned WebAssemblyFastISel::maskI1Value(unsigned Reg, const Value *V) {
+ return zeroExtendToI32(Reg, V, MVT::i1);
+}
+
+unsigned WebAssemblyFastISel::getRegForI1Value(const Value *V,
+ const BasicBlock *BB,
+ bool &Not) {
+ if (const auto *ICmp = dyn_cast<ICmpInst>(V))
+ if (const ConstantInt *C = dyn_cast<ConstantInt>(ICmp->getOperand(1)))
+ if (ICmp->isEquality() && C->isZero() && C->getType()->isIntegerTy(32) &&
+ ICmp->getParent() == BB) {
+ Not = ICmp->isTrueWhenEqual();
+ return getRegForValue(ICmp->getOperand(0));
+ }
+
+ Not = false;
+ Register Reg = getRegForValue(V);
+ if (Reg == 0)
+ return 0;
+ return maskI1Value(Reg, V);
+}
+
+unsigned WebAssemblyFastISel::zeroExtendToI32(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From) {
+ if (Reg == 0)
+ return 0;
+
+ switch (From) {
+ case MVT::i1:
+ // If the value is naturally an i1, we don't need to mask it. We only know
+ // if a value is naturally an i1 if it is definitely lowered by FastISel,
+ // not a DAG ISel fallback.
+ if (V != nullptr && isa<Argument>(V) && cast<Argument>(V)->hasZExtAttr())
+ return copyValue(Reg);
+ break;
+ case MVT::i8:
+ case MVT::i16:
+ break;
+ case MVT::i32:
+ return copyValue(Reg);
+ default:
+ return 0;
+ }
+
+ Register Imm = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
+ TII.get(WebAssembly::CONST_I32), Imm)
+ .addImm(~(~uint64_t(0) << MVT(From).getSizeInBits()));
+
+ Register Result = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
+ TII.get(WebAssembly::AND_I32), Result)
+ .addReg(Reg)
+ .addReg(Imm);
+
+ return Result;
+}
+
+unsigned WebAssemblyFastISel::signExtendToI32(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From) {
+ if (Reg == 0)
+ return 0;
+
+ switch (From) {
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ break;
+ case MVT::i32:
+ return copyValue(Reg);
+ default:
+ return 0;
+ }
+
+ Register Imm = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
+ TII.get(WebAssembly::CONST_I32), Imm)
+ .addImm(32 - MVT(From).getSizeInBits());
+
+ Register Left = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
+ TII.get(WebAssembly::SHL_I32), Left)
+ .addReg(Reg)
+ .addReg(Imm);
+
+ Register Right = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
+ TII.get(WebAssembly::SHR_S_I32), Right)
+ .addReg(Left)
+ .addReg(Imm);
+
+ return Right;
+}
+
+unsigned WebAssemblyFastISel::zeroExtend(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From,
+ MVT::SimpleValueType To) {
+ if (To == MVT::i64) {
+ if (From == MVT::i64)
+ return copyValue(Reg);
+
+ Reg = zeroExtendToI32(Reg, V, From);
+
+ Register Result = createResultReg(&WebAssembly::I64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
+ TII.get(WebAssembly::I64_EXTEND_U_I32), Result)
+ .addReg(Reg);
+ return Result;
+ }
+
+ if (To == MVT::i32)
+ return zeroExtendToI32(Reg, V, From);
+
+ return 0;
+}
+
+unsigned WebAssemblyFastISel::signExtend(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From,
+ MVT::SimpleValueType To) {
+ if (To == MVT::i64) {
+ if (From == MVT::i64)
+ return copyValue(Reg);
+
+ Reg = signExtendToI32(Reg, V, From);
+
+ Register Result = createResultReg(&WebAssembly::I64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
+ TII.get(WebAssembly::I64_EXTEND_S_I32), Result)
+ .addReg(Reg);
+ return Result;
+ }
+
+ if (To == MVT::i32)
+ return signExtendToI32(Reg, V, From);
+
+ return 0;
+}
+
+unsigned WebAssemblyFastISel::getRegForUnsignedValue(const Value *V) {
+ MVT::SimpleValueType From = getSimpleType(V->getType());
+ MVT::SimpleValueType To = getLegalType(From);
+ Register VReg = getRegForValue(V);
+ if (VReg == 0)
+ return 0;
+ return zeroExtend(VReg, V, From, To);
+}
+
+unsigned WebAssemblyFastISel::getRegForSignedValue(const Value *V) {
+ MVT::SimpleValueType From = getSimpleType(V->getType());
+ MVT::SimpleValueType To = getLegalType(From);
+ Register VReg = getRegForValue(V);
+ if (VReg == 0)
+ return 0;
+ return signExtend(VReg, V, From, To);
+}
+
+unsigned WebAssemblyFastISel::getRegForPromotedValue(const Value *V,
+ bool IsSigned) {
+ return IsSigned ? getRegForSignedValue(V) : getRegForUnsignedValue(V);
+}
+
+unsigned WebAssemblyFastISel::notValue(unsigned Reg) {
+ assert(MRI.getRegClass(Reg) == &WebAssembly::I32RegClass);
+
+ Register NotReg = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
+ TII.get(WebAssembly::EQZ_I32), NotReg)
+ .addReg(Reg);
+ return NotReg;
+}
+
+unsigned WebAssemblyFastISel::copyValue(unsigned Reg) {
+ Register ResultReg = createResultReg(MRI.getRegClass(Reg));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::COPY),
+ ResultReg)
+ .addReg(Reg);
+ return ResultReg;
+}
+
+unsigned WebAssemblyFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
+ DenseMap<const AllocaInst *, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(AI);
+
+ if (SI != FuncInfo.StaticAllocaMap.end()) {
+ Register ResultReg =
+ createResultReg(Subtarget->hasAddr64() ? &WebAssembly::I64RegClass
+ : &WebAssembly::I32RegClass);
+ unsigned Opc =
+ Subtarget->hasAddr64() ? WebAssembly::COPY_I64 : WebAssembly::COPY_I32;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg)
+ .addFrameIndex(SI->second);
+ return ResultReg;
+ }
+
+ return 0;
+}
+
+unsigned WebAssemblyFastISel::fastMaterializeConstant(const Constant *C) {
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
+ if (TLI.isPositionIndependent())
+ return 0;
+ if (GV->isThreadLocal())
+ return 0;
+ Register ResultReg =
+ createResultReg(Subtarget->hasAddr64() ? &WebAssembly::I64RegClass
+ : &WebAssembly::I32RegClass);
+ unsigned Opc = Subtarget->hasAddr64() ? WebAssembly::CONST_I64
+ : WebAssembly::CONST_I32;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg)
+ .addGlobalAddress(GV);
+ return ResultReg;
+ }
+
+ // Let target-independent code handle it.
+ return 0;
+}
+
+bool WebAssemblyFastISel::fastLowerArguments() {
+ if (!FuncInfo.CanLowerReturn)
+ return false;
+
+ const Function *F = FuncInfo.Fn;
+ if (F->isVarArg())
+ return false;
+
+ if (FuncInfo.Fn->getCallingConv() == CallingConv::Swift)
+ return false;
+
+ unsigned I = 0;
+ for (auto const &Arg : F->args()) {
+ const AttributeList &Attrs = F->getAttributes();
+ if (Attrs.hasParamAttr(I, Attribute::ByVal) ||
+ Attrs.hasParamAttr(I, Attribute::SwiftSelf) ||
+ Attrs.hasParamAttr(I, Attribute::SwiftError) ||
+ Attrs.hasParamAttr(I, Attribute::InAlloca) ||
+ Attrs.hasParamAttr(I, Attribute::Nest))
+ return false;
+
+ Type *ArgTy = Arg.getType();
+ if (ArgTy->isStructTy() || ArgTy->isArrayTy())
+ return false;
+ if (!Subtarget->hasSIMD128() && ArgTy->isVectorTy())
+ return false;
+
+ unsigned Opc;
+ const TargetRegisterClass *RC;
+ switch (getSimpleType(ArgTy)) {
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+ Opc = WebAssembly::ARGUMENT_i32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i64:
+ Opc = WebAssembly::ARGUMENT_i64;
+ RC = &WebAssembly::I64RegClass;
+ break;
+ case MVT::f32:
+ Opc = WebAssembly::ARGUMENT_f32;
+ RC = &WebAssembly::F32RegClass;
+ break;
+ case MVT::f64:
+ Opc = WebAssembly::ARGUMENT_f64;
+ RC = &WebAssembly::F64RegClass;
+ break;
+ case MVT::v16i8:
+ Opc = WebAssembly::ARGUMENT_v16i8;
+ RC = &WebAssembly::V128RegClass;
+ break;
+ case MVT::v8i16:
+ Opc = WebAssembly::ARGUMENT_v8i16;
+ RC = &WebAssembly::V128RegClass;
+ break;
+ case MVT::v4i32:
+ Opc = WebAssembly::ARGUMENT_v4i32;
+ RC = &WebAssembly::V128RegClass;
+ break;
+ case MVT::v2i64:
+ Opc = WebAssembly::ARGUMENT_v2i64;
+ RC = &WebAssembly::V128RegClass;
+ break;
+ case MVT::v4f32:
+ Opc = WebAssembly::ARGUMENT_v4f32;
+ RC = &WebAssembly::V128RegClass;
+ break;
+ case MVT::v2f64:
+ Opc = WebAssembly::ARGUMENT_v2f64;
+ RC = &WebAssembly::V128RegClass;
+ break;
+ case MVT::funcref:
+ Opc = WebAssembly::ARGUMENT_funcref;
+ RC = &WebAssembly::FUNCREFRegClass;
+ break;
+ case MVT::externref:
+ Opc = WebAssembly::ARGUMENT_externref;
+ RC = &WebAssembly::EXTERNREFRegClass;
+ break;
+ default:
+ return false;
+ }
+ Register ResultReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg)
+ .addImm(I);
+ updateValueMap(&Arg, ResultReg);
+
+ ++I;
+ }
+
+ MRI.addLiveIn(WebAssembly::ARGUMENTS);
+
+ auto *MFI = MF->getInfo<WebAssemblyFunctionInfo>();
+ for (auto const &Arg : F->args()) {
+ MVT::SimpleValueType ArgTy = getLegalType(getSimpleType(Arg.getType()));
+ if (ArgTy == MVT::INVALID_SIMPLE_VALUE_TYPE) {
+ MFI->clearParamsAndResults();
+ return false;
+ }
+ MFI->addParam(ArgTy);
+ }
+
+ if (!F->getReturnType()->isVoidTy()) {
+ MVT::SimpleValueType RetTy =
+ getLegalType(getSimpleType(F->getReturnType()));
+ if (RetTy == MVT::INVALID_SIMPLE_VALUE_TYPE) {
+ MFI->clearParamsAndResults();
+ return false;
+ }
+ MFI->addResult(RetTy);
+ }
+
+ return true;
+}
+
+bool WebAssemblyFastISel::selectCall(const Instruction *I) {
+ const auto *Call = cast<CallInst>(I);
+
+ // TODO: Support tail calls in FastISel
+ if (Call->isMustTailCall() || Call->isInlineAsm() ||
+ Call->getFunctionType()->isVarArg())
+ return false;
+
+ Function *Func = Call->getCalledFunction();
+ if (Func && Func->isIntrinsic())
+ return false;
+
+ if (Call->getCallingConv() == CallingConv::Swift)
+ return false;
+
+ bool IsDirect = Func != nullptr;
+ if (!IsDirect && isa<ConstantExpr>(Call->getCalledOperand()))
+ return false;
+
+ FunctionType *FuncTy = Call->getFunctionType();
+ unsigned Opc = IsDirect ? WebAssembly::CALL : WebAssembly::CALL_INDIRECT;
+ bool IsVoid = FuncTy->getReturnType()->isVoidTy();
+ unsigned ResultReg;
+ if (!IsVoid) {
+ if (!Subtarget->hasSIMD128() && Call->getType()->isVectorTy())
+ return false;
+
+ MVT::SimpleValueType RetTy = getSimpleType(Call->getType());
+ switch (RetTy) {
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+ ResultReg = createResultReg(&WebAssembly::I32RegClass);
+ break;
+ case MVT::i64:
+ ResultReg = createResultReg(&WebAssembly::I64RegClass);
+ break;
+ case MVT::f32:
+ ResultReg = createResultReg(&WebAssembly::F32RegClass);
+ break;
+ case MVT::f64:
+ ResultReg = createResultReg(&WebAssembly::F64RegClass);
+ break;
+ case MVT::v16i8:
+ ResultReg = createResultReg(&WebAssembly::V128RegClass);
+ break;
+ case MVT::v8i16:
+ ResultReg = createResultReg(&WebAssembly::V128RegClass);
+ break;
+ case MVT::v4i32:
+ ResultReg = createResultReg(&WebAssembly::V128RegClass);
+ break;
+ case MVT::v2i64:
+ ResultReg = createResultReg(&WebAssembly::V128RegClass);
+ break;
+ case MVT::v4f32:
+ ResultReg = createResultReg(&WebAssembly::V128RegClass);
+ break;
+ case MVT::v2f64:
+ ResultReg = createResultReg(&WebAssembly::V128RegClass);
+ break;
+ case MVT::funcref:
+ ResultReg = createResultReg(&WebAssembly::FUNCREFRegClass);
+ break;
+ case MVT::externref:
+ ResultReg = createResultReg(&WebAssembly::EXTERNREFRegClass);
+ break;
+ default:
+ return false;
+ }
+ }
+
+ SmallVector<unsigned, 8> Args;
+ for (unsigned I = 0, E = Call->arg_size(); I < E; ++I) {
+ Value *V = Call->getArgOperand(I);
+ MVT::SimpleValueType ArgTy = getSimpleType(V->getType());
+ if (ArgTy == MVT::INVALID_SIMPLE_VALUE_TYPE)
+ return false;
+
+ const AttributeList &Attrs = Call->getAttributes();
+ if (Attrs.hasParamAttr(I, Attribute::ByVal) ||
+ Attrs.hasParamAttr(I, Attribute::SwiftSelf) ||
+ Attrs.hasParamAttr(I, Attribute::SwiftError) ||
+ Attrs.hasParamAttr(I, Attribute::InAlloca) ||
+ Attrs.hasParamAttr(I, Attribute::Nest))
+ return false;
+
+ unsigned Reg;
+
+ if (Attrs.hasParamAttr(I, Attribute::SExt))
+ Reg = getRegForSignedValue(V);
+ else if (Attrs.hasParamAttr(I, Attribute::ZExt))
+ Reg = getRegForUnsignedValue(V);
+ else
+ Reg = getRegForValue(V);
+
+ if (Reg == 0)
+ return false;
+
+ Args.push_back(Reg);
+ }
+
+ unsigned CalleeReg = 0;
+ if (!IsDirect) {
+ CalleeReg = getRegForValue(Call->getCalledOperand());
+ if (!CalleeReg)
+ return false;
+ }
+
+ auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc));
+
+ if (!IsVoid)
+ MIB.addReg(ResultReg, RegState::Define);
+
+ if (IsDirect) {
+ MIB.addGlobalAddress(Func);
+ } else {
+ // Placeholder for the type index.
+ MIB.addImm(0);
+ // The table into which this call_indirect indexes.
+ MCSymbolWasm *Table = WebAssembly::getOrCreateFunctionTableSymbol(
+ MF->getMMI().getContext(), Subtarget);
+ if (Subtarget->hasReferenceTypes()) {
+ MIB.addSym(Table);
+ } else {
+ // Otherwise for the MVP there is at most one table whose number is 0, but
+ // we can't write a table symbol or issue relocations. Instead we just
+ // ensure the table is live.
+ Table->setNoStrip();
+ MIB.addImm(0);
+ }
+ // See if we must truncate the function pointer.
+ // CALL_INDIRECT takes an i32, but in wasm64 we represent function pointers
+ // as 64-bit for uniformity with other pointer types.
+ // See also: WebAssemblyISelLowering.cpp: LowerCallResults
+ if (Subtarget->hasAddr64()) {
+ auto Wrap = BuildMI(*FuncInfo.MBB, std::prev(FuncInfo.InsertPt), MIMD,
+ TII.get(WebAssembly::I32_WRAP_I64));
+ Register Reg32 = createResultReg(&WebAssembly::I32RegClass);
+ Wrap.addReg(Reg32, RegState::Define);
+ Wrap.addReg(CalleeReg);
+ CalleeReg = Reg32;
+ }
+ }
+
+ for (unsigned ArgReg : Args)
+ MIB.addReg(ArgReg);
+
+ if (!IsDirect)
+ MIB.addReg(CalleeReg);
+
+ if (!IsVoid)
+ updateValueMap(Call, ResultReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectSelect(const Instruction *I) {
+ const auto *Select = cast<SelectInst>(I);
+
+ bool Not;
+ unsigned CondReg =
+ getRegForI1Value(Select->getCondition(), I->getParent(), Not);
+ if (CondReg == 0)
+ return false;
+
+ Register TrueReg = getRegForValue(Select->getTrueValue());
+ if (TrueReg == 0)
+ return false;
+
+ Register FalseReg = getRegForValue(Select->getFalseValue());
+ if (FalseReg == 0)
+ return false;
+
+ if (Not)
+ std::swap(TrueReg, FalseReg);
+
+ unsigned Opc;
+ const TargetRegisterClass *RC;
+ switch (getSimpleType(Select->getType())) {
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+ Opc = WebAssembly::SELECT_I32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i64:
+ Opc = WebAssembly::SELECT_I64;
+ RC = &WebAssembly::I64RegClass;
+ break;
+ case MVT::f32:
+ Opc = WebAssembly::SELECT_F32;
+ RC = &WebAssembly::F32RegClass;
+ break;
+ case MVT::f64:
+ Opc = WebAssembly::SELECT_F64;
+ RC = &WebAssembly::F64RegClass;
+ break;
+ case MVT::funcref:
+ Opc = WebAssembly::SELECT_FUNCREF;
+ RC = &WebAssembly::FUNCREFRegClass;
+ break;
+ case MVT::externref:
+ Opc = WebAssembly::SELECT_EXTERNREF;
+ RC = &WebAssembly::EXTERNREFRegClass;
+ break;
+ default:
+ return false;
+ }
+
+ Register ResultReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg)
+ .addReg(TrueReg)
+ .addReg(FalseReg)
+ .addReg(CondReg);
+
+ updateValueMap(Select, ResultReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectTrunc(const Instruction *I) {
+ const auto *Trunc = cast<TruncInst>(I);
+
+ Register Reg = getRegForValue(Trunc->getOperand(0));
+ if (Reg == 0)
+ return false;
+
+ if (Trunc->getOperand(0)->getType()->isIntegerTy(64)) {
+ Register Result = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
+ TII.get(WebAssembly::I32_WRAP_I64), Result)
+ .addReg(Reg);
+ Reg = Result;
+ }
+
+ updateValueMap(Trunc, Reg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectZExt(const Instruction *I) {
+ const auto *ZExt = cast<ZExtInst>(I);
+
+ const Value *Op = ZExt->getOperand(0);
+ MVT::SimpleValueType From = getSimpleType(Op->getType());
+ MVT::SimpleValueType To = getLegalType(getSimpleType(ZExt->getType()));
+ Register In = getRegForValue(Op);
+ if (In == 0)
+ return false;
+ unsigned Reg = zeroExtend(In, Op, From, To);
+ if (Reg == 0)
+ return false;
+
+ updateValueMap(ZExt, Reg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectSExt(const Instruction *I) {
+ const auto *SExt = cast<SExtInst>(I);
+
+ const Value *Op = SExt->getOperand(0);
+ MVT::SimpleValueType From = getSimpleType(Op->getType());
+ MVT::SimpleValueType To = getLegalType(getSimpleType(SExt->getType()));
+ Register In = getRegForValue(Op);
+ if (In == 0)
+ return false;
+ unsigned Reg = signExtend(In, Op, From, To);
+ if (Reg == 0)
+ return false;
+
+ updateValueMap(SExt, Reg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectICmp(const Instruction *I) {
+ const auto *ICmp = cast<ICmpInst>(I);
+
+ bool I32 = getSimpleType(ICmp->getOperand(0)->getType()) != MVT::i64;
+ unsigned Opc;
+ bool IsSigned = false;
+ switch (ICmp->getPredicate()) {
+ case ICmpInst::ICMP_EQ:
+ Opc = I32 ? WebAssembly::EQ_I32 : WebAssembly::EQ_I64;
+ break;
+ case ICmpInst::ICMP_NE:
+ Opc = I32 ? WebAssembly::NE_I32 : WebAssembly::NE_I64;
+ break;
+ case ICmpInst::ICMP_UGT:
+ Opc = I32 ? WebAssembly::GT_U_I32 : WebAssembly::GT_U_I64;
+ break;
+ case ICmpInst::ICMP_UGE:
+ Opc = I32 ? WebAssembly::GE_U_I32 : WebAssembly::GE_U_I64;
+ break;
+ case ICmpInst::ICMP_ULT:
+ Opc = I32 ? WebAssembly::LT_U_I32 : WebAssembly::LT_U_I64;
+ break;
+ case ICmpInst::ICMP_ULE:
+ Opc = I32 ? WebAssembly::LE_U_I32 : WebAssembly::LE_U_I64;
+ break;
+ case ICmpInst::ICMP_SGT:
+ Opc = I32 ? WebAssembly::GT_S_I32 : WebAssembly::GT_S_I64;
+ IsSigned = true;
+ break;
+ case ICmpInst::ICMP_SGE:
+ Opc = I32 ? WebAssembly::GE_S_I32 : WebAssembly::GE_S_I64;
+ IsSigned = true;
+ break;
+ case ICmpInst::ICMP_SLT:
+ Opc = I32 ? WebAssembly::LT_S_I32 : WebAssembly::LT_S_I64;
+ IsSigned = true;
+ break;
+ case ICmpInst::ICMP_SLE:
+ Opc = I32 ? WebAssembly::LE_S_I32 : WebAssembly::LE_S_I64;
+ IsSigned = true;
+ break;
+ default:
+ return false;
+ }
+
+ unsigned LHS = getRegForPromotedValue(ICmp->getOperand(0), IsSigned);
+ if (LHS == 0)
+ return false;
+
+ unsigned RHS = getRegForPromotedValue(ICmp->getOperand(1), IsSigned);
+ if (RHS == 0)
+ return false;
+
+ Register ResultReg = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg)
+ .addReg(LHS)
+ .addReg(RHS);
+ updateValueMap(ICmp, ResultReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectFCmp(const Instruction *I) {
+ const auto *FCmp = cast<FCmpInst>(I);
+
+ Register LHS = getRegForValue(FCmp->getOperand(0));
+ if (LHS == 0)
+ return false;
+
+ Register RHS = getRegForValue(FCmp->getOperand(1));
+ if (RHS == 0)
+ return false;
+
+ bool F32 = getSimpleType(FCmp->getOperand(0)->getType()) != MVT::f64;
+ unsigned Opc;
+ bool Not = false;
+ switch (FCmp->getPredicate()) {
+ case FCmpInst::FCMP_OEQ:
+ Opc = F32 ? WebAssembly::EQ_F32 : WebAssembly::EQ_F64;
+ break;
+ case FCmpInst::FCMP_UNE:
+ Opc = F32 ? WebAssembly::NE_F32 : WebAssembly::NE_F64;
+ break;
+ case FCmpInst::FCMP_OGT:
+ Opc = F32 ? WebAssembly::GT_F32 : WebAssembly::GT_F64;
+ break;
+ case FCmpInst::FCMP_OGE:
+ Opc = F32 ? WebAssembly::GE_F32 : WebAssembly::GE_F64;
+ break;
+ case FCmpInst::FCMP_OLT:
+ Opc = F32 ? WebAssembly::LT_F32 : WebAssembly::LT_F64;
+ break;
+ case FCmpInst::FCMP_OLE:
+ Opc = F32 ? WebAssembly::LE_F32 : WebAssembly::LE_F64;
+ break;
+ case FCmpInst::FCMP_UGT:
+ Opc = F32 ? WebAssembly::LE_F32 : WebAssembly::LE_F64;
+ Not = true;
+ break;
+ case FCmpInst::FCMP_UGE:
+ Opc = F32 ? WebAssembly::LT_F32 : WebAssembly::LT_F64;
+ Not = true;
+ break;
+ case FCmpInst::FCMP_ULT:
+ Opc = F32 ? WebAssembly::GE_F32 : WebAssembly::GE_F64;
+ Not = true;
+ break;
+ case FCmpInst::FCMP_ULE:
+ Opc = F32 ? WebAssembly::GT_F32 : WebAssembly::GT_F64;
+ Not = true;
+ break;
+ default:
+ return false;
+ }
+
+ Register ResultReg = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg)
+ .addReg(LHS)
+ .addReg(RHS);
+
+ if (Not)
+ ResultReg = notValue(ResultReg);
+
+ updateValueMap(FCmp, ResultReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectBitCast(const Instruction *I) {
+ // Target-independent code can handle this, except it doesn't set the dead
+ // flag on the ARGUMENTS clobber, so we have to do that manually in order
+ // to satisfy code that expects this of isBitcast() instructions.
+ EVT VT = TLI.getValueType(DL, I->getOperand(0)->getType());
+ EVT RetVT = TLI.getValueType(DL, I->getType());
+ if (!VT.isSimple() || !RetVT.isSimple())
+ return false;
+
+ Register In = getRegForValue(I->getOperand(0));
+ if (In == 0)
+ return false;
+
+ if (VT == RetVT) {
+ // No-op bitcast.
+ updateValueMap(I, In);
+ return true;
+ }
+
+ Register Reg = fastEmit_ISD_BITCAST_r(VT.getSimpleVT(), RetVT.getSimpleVT(),
+ In);
+ if (!Reg)
+ return false;
+ MachineBasicBlock::iterator Iter = FuncInfo.InsertPt;
+ --Iter;
+ assert(Iter->isBitcast());
+ Iter->setPhysRegsDeadExcept(ArrayRef<Register>(), TRI);
+ updateValueMap(I, Reg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectLoad(const Instruction *I) {
+ const auto *Load = cast<LoadInst>(I);
+ if (Load->isAtomic())
+ return false;
+ if (!WebAssembly::isDefaultAddressSpace(Load->getPointerAddressSpace()))
+ return false;
+ if (!Subtarget->hasSIMD128() && Load->getType()->isVectorTy())
+ return false;
+
+ Address Addr;
+ if (!computeAddress(Load->getPointerOperand(), Addr))
+ return false;
+
+ // TODO: Fold a following sign-/zero-extend into the load instruction.
+
+ unsigned Opc;
+ const TargetRegisterClass *RC;
+ bool A64 = Subtarget->hasAddr64();
+ switch (getSimpleType(Load->getType())) {
+ case MVT::i1:
+ case MVT::i8:
+ Opc = A64 ? WebAssembly::LOAD8_U_I32_A64 : WebAssembly::LOAD8_U_I32_A32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i16:
+ Opc = A64 ? WebAssembly::LOAD16_U_I32_A64 : WebAssembly::LOAD16_U_I32_A32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i32:
+ Opc = A64 ? WebAssembly::LOAD_I32_A64 : WebAssembly::LOAD_I32_A32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i64:
+ Opc = A64 ? WebAssembly::LOAD_I64_A64 : WebAssembly::LOAD_I64_A32;
+ RC = &WebAssembly::I64RegClass;
+ break;
+ case MVT::f32:
+ Opc = A64 ? WebAssembly::LOAD_F32_A64 : WebAssembly::LOAD_F32_A32;
+ RC = &WebAssembly::F32RegClass;
+ break;
+ case MVT::f64:
+ Opc = A64 ? WebAssembly::LOAD_F64_A64 : WebAssembly::LOAD_F64_A32;
+ RC = &WebAssembly::F64RegClass;
+ break;
+ default:
+ return false;
+ }
+
+ materializeLoadStoreOperands(Addr);
+
+ Register ResultReg = createResultReg(RC);
+ auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc),
+ ResultReg);
+
+ addLoadStoreOperands(Addr, MIB, createMachineMemOperandFor(Load));
+
+ updateValueMap(Load, ResultReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectStore(const Instruction *I) {
+ const auto *Store = cast<StoreInst>(I);
+ if (Store->isAtomic())
+ return false;
+ if (!WebAssembly::isDefaultAddressSpace(Store->getPointerAddressSpace()))
+ return false;
+ if (!Subtarget->hasSIMD128() &&
+ Store->getValueOperand()->getType()->isVectorTy())
+ return false;
+
+ Address Addr;
+ if (!computeAddress(Store->getPointerOperand(), Addr))
+ return false;
+
+ unsigned Opc;
+ bool VTIsi1 = false;
+ bool A64 = Subtarget->hasAddr64();
+ switch (getSimpleType(Store->getValueOperand()->getType())) {
+ case MVT::i1:
+ VTIsi1 = true;
+ [[fallthrough]];
+ case MVT::i8:
+ Opc = A64 ? WebAssembly::STORE8_I32_A64 : WebAssembly::STORE8_I32_A32;
+ break;
+ case MVT::i16:
+ Opc = A64 ? WebAssembly::STORE16_I32_A64 : WebAssembly::STORE16_I32_A32;
+ break;
+ case MVT::i32:
+ Opc = A64 ? WebAssembly::STORE_I32_A64 : WebAssembly::STORE_I32_A32;
+ break;
+ case MVT::i64:
+ Opc = A64 ? WebAssembly::STORE_I64_A64 : WebAssembly::STORE_I64_A32;
+ break;
+ case MVT::f32:
+ Opc = A64 ? WebAssembly::STORE_F32_A64 : WebAssembly::STORE_F32_A32;
+ break;
+ case MVT::f64:
+ Opc = A64 ? WebAssembly::STORE_F64_A64 : WebAssembly::STORE_F64_A32;
+ break;
+ default:
+ return false;
+ }
+
+ materializeLoadStoreOperands(Addr);
+
+ Register ValueReg = getRegForValue(Store->getValueOperand());
+ if (ValueReg == 0)
+ return false;
+ if (VTIsi1)
+ ValueReg = maskI1Value(ValueReg, Store->getValueOperand());
+
+ auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc));
+
+ addLoadStoreOperands(Addr, MIB, createMachineMemOperandFor(Store));
+
+ MIB.addReg(ValueReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectBr(const Instruction *I) {
+ const auto *Br = cast<BranchInst>(I);
+ if (Br->isUnconditional()) {
+ MachineBasicBlock *MSucc = FuncInfo.MBBMap[Br->getSuccessor(0)];
+ fastEmitBranch(MSucc, Br->getDebugLoc());
+ return true;
+ }
+
+ MachineBasicBlock *TBB = FuncInfo.MBBMap[Br->getSuccessor(0)];
+ MachineBasicBlock *FBB = FuncInfo.MBBMap[Br->getSuccessor(1)];
+
+ bool Not;
+ unsigned CondReg = getRegForI1Value(Br->getCondition(), Br->getParent(), Not);
+ if (CondReg == 0)
+ return false;
+
+ unsigned Opc = WebAssembly::BR_IF;
+ if (Not)
+ Opc = WebAssembly::BR_UNLESS;
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc))
+ .addMBB(TBB)
+ .addReg(CondReg);
+
+ finishCondBranch(Br->getParent(), TBB, FBB);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectRet(const Instruction *I) {
+ if (!FuncInfo.CanLowerReturn)
+ return false;
+
+ const auto *Ret = cast<ReturnInst>(I);
+
+ if (Ret->getNumOperands() == 0) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
+ TII.get(WebAssembly::RETURN));
+ return true;
+ }
+
+ // TODO: support multiple return in FastISel
+ if (Ret->getNumOperands() > 1)
+ return false;
+
+ Value *RV = Ret->getOperand(0);
+ if (!Subtarget->hasSIMD128() && RV->getType()->isVectorTy())
+ return false;
+
+ switch (getSimpleType(RV->getType())) {
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+ case MVT::i64:
+ case MVT::f32:
+ case MVT::f64:
+ case MVT::v16i8:
+ case MVT::v8i16:
+ case MVT::v4i32:
+ case MVT::v2i64:
+ case MVT::v4f32:
+ case MVT::v2f64:
+ case MVT::funcref:
+ case MVT::externref:
+ break;
+ default:
+ return false;
+ }
+
+ unsigned Reg;
+ if (FuncInfo.Fn->getAttributes().hasRetAttr(Attribute::SExt))
+ Reg = getRegForSignedValue(RV);
+ else if (FuncInfo.Fn->getAttributes().hasRetAttr(Attribute::ZExt))
+ Reg = getRegForUnsignedValue(RV);
+ else
+ Reg = getRegForValue(RV);
+
+ if (Reg == 0)
+ return false;
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
+ TII.get(WebAssembly::RETURN))
+ .addReg(Reg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectUnreachable(const Instruction *I) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
+ TII.get(WebAssembly::UNREACHABLE));
+ return true;
+}
+
+bool WebAssemblyFastISel::fastSelectInstruction(const Instruction *I) {
+ switch (I->getOpcode()) {
+ case Instruction::Call:
+ if (selectCall(I))
+ return true;
+ break;
+ case Instruction::Select:
+ return selectSelect(I);
+ case Instruction::Trunc:
+ return selectTrunc(I);
+ case Instruction::ZExt:
+ return selectZExt(I);
+ case Instruction::SExt:
+ return selectSExt(I);
+ case Instruction::ICmp:
+ return selectICmp(I);
+ case Instruction::FCmp:
+ return selectFCmp(I);
+ case Instruction::BitCast:
+ return selectBitCast(I);
+ case Instruction::Load:
+ return selectLoad(I);
+ case Instruction::Store:
+ return selectStore(I);
+ case Instruction::Br:
+ return selectBr(I);
+ case Instruction::Ret:
+ return selectRet(I);
+ case Instruction::Unreachable:
+ return selectUnreachable(I);
+ default:
+ break;
+ }
+
+ // Fall back to target-independent instruction selection.
+ return selectOperator(I, I->getOpcode());
+}
+
+FastISel *WebAssembly::createFastISel(FunctionLoweringInfo &FuncInfo,
+ const TargetLibraryInfo *LibInfo) {
+ return new WebAssemblyFastISel(FuncInfo, LibInfo);
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFixBrTableDefaults.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFixBrTableDefaults.cpp
new file mode 100644
index 000000000000..495f19a7ccde
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFixBrTableDefaults.cpp
@@ -0,0 +1,199 @@
+//=- WebAssemblyFixBrTableDefaults.cpp - Fix br_table default branch targets -//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file This file implements a pass that eliminates redundant range checks
+/// guarding br_table instructions. Since jump tables on most targets cannot
+/// handle out of range indices, LLVM emits these checks before most jump
+/// tables. But br_table takes a default branch target as an argument, so it
+/// does not need the range checks.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Pass.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-fix-br-table-defaults"
+
+namespace {
+
+class WebAssemblyFixBrTableDefaults final : public MachineFunctionPass {
+ StringRef getPassName() const override {
+ return "WebAssembly Fix br_table Defaults";
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyFixBrTableDefaults() : MachineFunctionPass(ID) {}
+};
+
+char WebAssemblyFixBrTableDefaults::ID = 0;
+
+// Target indepedent selection dag assumes that it is ok to use PointerTy
+// as the index for a "switch", whereas Wasm so far only has a 32-bit br_table.
+// See e.g. SelectionDAGBuilder::visitJumpTableHeader
+// We have a 64-bit br_table in the tablegen defs as a result, which does get
+// selected, and thus we get incorrect truncates/extensions happening on
+// wasm64. Here we fix that.
+void fixBrTableIndex(MachineInstr &MI, MachineBasicBlock *MBB,
+ MachineFunction &MF) {
+ // Only happens on wasm64.
+ auto &WST = MF.getSubtarget<WebAssemblySubtarget>();
+ if (!WST.hasAddr64())
+ return;
+
+ assert(MI.getDesc().getOpcode() == WebAssembly::BR_TABLE_I64 &&
+ "64-bit br_table pseudo instruction expected");
+
+ // Find extension op, if any. It sits in the previous BB before the branch.
+ auto ExtMI = MF.getRegInfo().getVRegDef(MI.getOperand(0).getReg());
+ if (ExtMI->getOpcode() == WebAssembly::I64_EXTEND_U_I32) {
+ // Unnecessarily extending a 32-bit value to 64, remove it.
+ auto ExtDefReg = ExtMI->getOperand(0).getReg();
+ assert(MI.getOperand(0).getReg() == ExtDefReg);
+ MI.getOperand(0).setReg(ExtMI->getOperand(1).getReg());
+ if (MF.getRegInfo().use_nodbg_empty(ExtDefReg)) {
+ // No more users of extend, delete it.
+ ExtMI->eraseFromParent();
+ }
+ } else {
+ // Incoming 64-bit value that needs to be truncated.
+ Register Reg32 =
+ MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
+ BuildMI(*MBB, MI.getIterator(), MI.getDebugLoc(),
+ WST.getInstrInfo()->get(WebAssembly::I32_WRAP_I64), Reg32)
+ .addReg(MI.getOperand(0).getReg());
+ MI.getOperand(0).setReg(Reg32);
+ }
+
+ // We now have a 32-bit operand in all cases, so change the instruction
+ // accordingly.
+ MI.setDesc(WST.getInstrInfo()->get(WebAssembly::BR_TABLE_I32));
+}
+
+// `MI` is a br_table instruction with a dummy default target argument. This
+// function finds and adds the default target argument and removes any redundant
+// range check preceding the br_table. Returns the MBB that the br_table is
+// moved into so it can be removed from further consideration, or nullptr if the
+// br_table cannot be optimized.
+MachineBasicBlock *fixBrTableDefault(MachineInstr &MI, MachineBasicBlock *MBB,
+ MachineFunction &MF) {
+ // Get the header block, which contains the redundant range check.
+ assert(MBB->pred_size() == 1 && "Expected a single guard predecessor");
+ auto *HeaderMBB = *MBB->pred_begin();
+
+ // Find the conditional jump to the default target. If it doesn't exist, the
+ // default target is unreachable anyway, so we can keep the existing dummy
+ // target.
+ MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
+ SmallVector<MachineOperand, 2> Cond;
+ const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ bool Analyzed = !TII.analyzeBranch(*HeaderMBB, TBB, FBB, Cond);
+ assert(Analyzed && "Could not analyze jump header branches");
+ (void)Analyzed;
+
+ // Here are the possible outcomes. '_' is nullptr, `J` is the jump table block
+ // aka MBB, 'D' is the default block.
+ //
+ // TBB | FBB | Meaning
+ // _ | _ | No default block, header falls through to jump table
+ // J | _ | No default block, header jumps to the jump table
+ // D | _ | Header jumps to the default and falls through to the jump table
+ // D | J | Header jumps to the default and also to the jump table
+ if (TBB && TBB != MBB) {
+ assert((FBB == nullptr || FBB == MBB) &&
+ "Expected jump or fallthrough to br_table block");
+ assert(Cond.size() == 2 && Cond[1].isReg() && "Unexpected condition info");
+
+ // If the range check checks an i64 value, we cannot optimize it out because
+ // the i64 index is truncated to an i32, making values over 2^32
+ // indistinguishable from small numbers. There are also other strange edge
+ // cases that can arise in practice that we don't want to reason about, so
+ // conservatively only perform the optimization if the range check is the
+ // normal case of an i32.gt_u.
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ auto *RangeCheck = MRI.getVRegDef(Cond[1].getReg());
+ assert(RangeCheck != nullptr);
+ if (RangeCheck->getOpcode() != WebAssembly::GT_U_I32)
+ return nullptr;
+
+ // Remove the dummy default target and install the real one.
+ MI.removeOperand(MI.getNumExplicitOperands() - 1);
+ MI.addOperand(MF, MachineOperand::CreateMBB(TBB));
+ }
+
+ // Remove any branches from the header and splice in the jump table instead
+ TII.removeBranch(*HeaderMBB, nullptr);
+ HeaderMBB->splice(HeaderMBB->end(), MBB, MBB->begin(), MBB->end());
+
+ // Update CFG to skip the old jump table block. Remove shared successors
+ // before transferring to avoid duplicated successors.
+ HeaderMBB->removeSuccessor(MBB);
+ for (auto &Succ : MBB->successors())
+ if (HeaderMBB->isSuccessor(Succ))
+ HeaderMBB->removeSuccessor(Succ);
+ HeaderMBB->transferSuccessorsAndUpdatePHIs(MBB);
+
+ // Remove the old jump table block from the function
+ MF.erase(MBB);
+
+ return HeaderMBB;
+}
+
+bool WebAssemblyFixBrTableDefaults::runOnMachineFunction(MachineFunction &MF) {
+ LLVM_DEBUG(dbgs() << "********** Fixing br_table Default Targets **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+
+ bool Changed = false;
+ SmallPtrSet<MachineBasicBlock *, 16> MBBSet;
+ for (auto &MBB : MF)
+ MBBSet.insert(&MBB);
+
+ while (!MBBSet.empty()) {
+ MachineBasicBlock *MBB = *MBBSet.begin();
+ MBBSet.erase(MBB);
+ for (auto &MI : *MBB) {
+ if (WebAssembly::isBrTable(MI.getOpcode())) {
+ fixBrTableIndex(MI, MBB, MF);
+ auto *Fixed = fixBrTableDefault(MI, MBB, MF);
+ if (Fixed != nullptr) {
+ MBBSet.erase(Fixed);
+ Changed = true;
+ }
+ break;
+ }
+ }
+ }
+
+ if (Changed) {
+ // We rewrote part of the function; recompute relevant things.
+ MF.RenumberBlocks();
+ return true;
+ }
+
+ return false;
+}
+
+} // end anonymous namespace
+
+INITIALIZE_PASS(WebAssemblyFixBrTableDefaults, DEBUG_TYPE,
+ "Removes range checks and sets br_table default targets", false,
+ false)
+
+FunctionPass *llvm::createWebAssemblyFixBrTableDefaults() {
+ return new WebAssemblyFixBrTableDefaults();
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp
new file mode 100644
index 000000000000..81a450dbc0d9
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp
@@ -0,0 +1,303 @@
+//===-- WebAssemblyFixFunctionBitcasts.cpp - Fix function bitcasts --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Fix bitcasted functions.
+///
+/// WebAssembly requires caller and callee signatures to match, however in LLVM,
+/// some amount of slop is vaguely permitted. Detect mismatch by looking for
+/// bitcasts of functions and rewrite them to use wrapper functions instead.
+///
+/// This doesn't catch all cases, such as when a function's address is taken in
+/// one place and casted in another, but it works for many common cases.
+///
+/// Note that LLVM already optimizes away function bitcasts in common cases by
+/// dropping arguments as needed, so this pass only ends up getting used in less
+/// common cases.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssembly.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-fix-function-bitcasts"
+
+namespace {
+class FixFunctionBitcasts final : public ModulePass {
+ StringRef getPassName() const override {
+ return "WebAssembly Fix Function Bitcasts";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ ModulePass::getAnalysisUsage(AU);
+ }
+
+ bool runOnModule(Module &M) override;
+
+public:
+ static char ID;
+ FixFunctionBitcasts() : ModulePass(ID) {}
+};
+} // End anonymous namespace
+
+char FixFunctionBitcasts::ID = 0;
+INITIALIZE_PASS(FixFunctionBitcasts, DEBUG_TYPE,
+ "Fix mismatching bitcasts for WebAssembly", false, false)
+
+ModulePass *llvm::createWebAssemblyFixFunctionBitcasts() {
+ return new FixFunctionBitcasts();
+}
+
+// Recursively descend the def-use lists from V to find non-bitcast users of
+// bitcasts of V.
+static void findUses(Value *V, Function &F,
+ SmallVectorImpl<std::pair<CallBase *, Function *>> &Uses) {
+ for (User *U : V->users()) {
+ if (auto *BC = dyn_cast<BitCastOperator>(U))
+ findUses(BC, F, Uses);
+ else if (auto *A = dyn_cast<GlobalAlias>(U))
+ findUses(A, F, Uses);
+ else if (auto *CB = dyn_cast<CallBase>(U)) {
+ Value *Callee = CB->getCalledOperand();
+ if (Callee != V)
+ // Skip calls where the function isn't the callee
+ continue;
+ if (CB->getFunctionType() == F.getValueType())
+ // Skip uses that are immediately called
+ continue;
+ Uses.push_back(std::make_pair(CB, &F));
+ }
+ }
+}
+
+// Create a wrapper function with type Ty that calls F (which may have a
+// different type). Attempt to support common bitcasted function idioms:
+// - Call with more arguments than needed: arguments are dropped
+// - Call with fewer arguments than needed: arguments are filled in with undef
+// - Return value is not needed: drop it
+// - Return value needed but not present: supply an undef
+//
+// If the all the argument types of trivially castable to one another (i.e.
+// I32 vs pointer type) then we don't create a wrapper at all (return nullptr
+// instead).
+//
+// If there is a type mismatch that we know would result in an invalid wasm
+// module then generate wrapper that contains unreachable (i.e. abort at
+// runtime). Such programs are deep into undefined behaviour territory,
+// but we choose to fail at runtime rather than generate and invalid module
+// or fail at compiler time. The reason we delay the error is that we want
+// to support the CMake which expects to be able to compile and link programs
+// that refer to functions with entirely incorrect signatures (this is how
+// CMake detects the existence of a function in a toolchain).
+//
+// For bitcasts that involve struct types we don't know at this stage if they
+// would be equivalent at the wasm level and so we can't know if we need to
+// generate a wrapper.
+static Function *createWrapper(Function *F, FunctionType *Ty) {
+ Module *M = F->getParent();
+
+ Function *Wrapper = Function::Create(Ty, Function::PrivateLinkage,
+ F->getName() + "_bitcast", M);
+ BasicBlock *BB = BasicBlock::Create(M->getContext(), "body", Wrapper);
+ const DataLayout &DL = BB->getModule()->getDataLayout();
+
+ // Determine what arguments to pass.
+ SmallVector<Value *, 4> Args;
+ Function::arg_iterator AI = Wrapper->arg_begin();
+ Function::arg_iterator AE = Wrapper->arg_end();
+ FunctionType::param_iterator PI = F->getFunctionType()->param_begin();
+ FunctionType::param_iterator PE = F->getFunctionType()->param_end();
+ bool TypeMismatch = false;
+ bool WrapperNeeded = false;
+
+ Type *ExpectedRtnType = F->getFunctionType()->getReturnType();
+ Type *RtnType = Ty->getReturnType();
+
+ if ((F->getFunctionType()->getNumParams() != Ty->getNumParams()) ||
+ (F->getFunctionType()->isVarArg() != Ty->isVarArg()) ||
+ (ExpectedRtnType != RtnType))
+ WrapperNeeded = true;
+
+ for (; AI != AE && PI != PE; ++AI, ++PI) {
+ Type *ArgType = AI->getType();
+ Type *ParamType = *PI;
+
+ if (ArgType == ParamType) {
+ Args.push_back(&*AI);
+ } else {
+ if (CastInst::isBitOrNoopPointerCastable(ArgType, ParamType, DL)) {
+ Instruction *PtrCast =
+ CastInst::CreateBitOrPointerCast(AI, ParamType, "cast");
+ PtrCast->insertInto(BB, BB->end());
+ Args.push_back(PtrCast);
+ } else if (ArgType->isStructTy() || ParamType->isStructTy()) {
+ LLVM_DEBUG(dbgs() << "createWrapper: struct param type in bitcast: "
+ << F->getName() << "\n");
+ WrapperNeeded = false;
+ } else {
+ LLVM_DEBUG(dbgs() << "createWrapper: arg type mismatch calling: "
+ << F->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Arg[" << Args.size() << "] Expected: "
+ << *ParamType << " Got: " << *ArgType << "\n");
+ TypeMismatch = true;
+ break;
+ }
+ }
+ }
+
+ if (WrapperNeeded && !TypeMismatch) {
+ for (; PI != PE; ++PI)
+ Args.push_back(UndefValue::get(*PI));
+ if (F->isVarArg())
+ for (; AI != AE; ++AI)
+ Args.push_back(&*AI);
+
+ CallInst *Call = CallInst::Create(F, Args, "", BB);
+
+ Type *ExpectedRtnType = F->getFunctionType()->getReturnType();
+ Type *RtnType = Ty->getReturnType();
+ // Determine what value to return.
+ if (RtnType->isVoidTy()) {
+ ReturnInst::Create(M->getContext(), BB);
+ } else if (ExpectedRtnType->isVoidTy()) {
+ LLVM_DEBUG(dbgs() << "Creating dummy return: " << *RtnType << "\n");
+ ReturnInst::Create(M->getContext(), UndefValue::get(RtnType), BB);
+ } else if (RtnType == ExpectedRtnType) {
+ ReturnInst::Create(M->getContext(), Call, BB);
+ } else if (CastInst::isBitOrNoopPointerCastable(ExpectedRtnType, RtnType,
+ DL)) {
+ Instruction *Cast =
+ CastInst::CreateBitOrPointerCast(Call, RtnType, "cast");
+ Cast->insertInto(BB, BB->end());
+ ReturnInst::Create(M->getContext(), Cast, BB);
+ } else if (RtnType->isStructTy() || ExpectedRtnType->isStructTy()) {
+ LLVM_DEBUG(dbgs() << "createWrapper: struct return type in bitcast: "
+ << F->getName() << "\n");
+ WrapperNeeded = false;
+ } else {
+ LLVM_DEBUG(dbgs() << "createWrapper: return type mismatch calling: "
+ << F->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Expected: " << *ExpectedRtnType
+ << " Got: " << *RtnType << "\n");
+ TypeMismatch = true;
+ }
+ }
+
+ if (TypeMismatch) {
+ // Create a new wrapper that simply contains `unreachable`.
+ Wrapper->eraseFromParent();
+ Wrapper = Function::Create(Ty, Function::PrivateLinkage,
+ F->getName() + "_bitcast_invalid", M);
+ BasicBlock *BB = BasicBlock::Create(M->getContext(), "body", Wrapper);
+ new UnreachableInst(M->getContext(), BB);
+ Wrapper->setName(F->getName() + "_bitcast_invalid");
+ } else if (!WrapperNeeded) {
+ LLVM_DEBUG(dbgs() << "createWrapper: no wrapper needed: " << F->getName()
+ << "\n");
+ Wrapper->eraseFromParent();
+ return nullptr;
+ }
+ LLVM_DEBUG(dbgs() << "createWrapper: " << F->getName() << "\n");
+ return Wrapper;
+}
+
+// Test whether a main function with type FuncTy should be rewritten to have
+// type MainTy.
+static bool shouldFixMainFunction(FunctionType *FuncTy, FunctionType *MainTy) {
+ // Only fix the main function if it's the standard zero-arg form. That way,
+ // the standard cases will work as expected, and users will see signature
+ // mismatches from the linker for non-standard cases.
+ return FuncTy->getReturnType() == MainTy->getReturnType() &&
+ FuncTy->getNumParams() == 0 &&
+ !FuncTy->isVarArg();
+}
+
+bool FixFunctionBitcasts::runOnModule(Module &M) {
+ LLVM_DEBUG(dbgs() << "********** Fix Function Bitcasts **********\n");
+
+ Function *Main = nullptr;
+ CallInst *CallMain = nullptr;
+ SmallVector<std::pair<CallBase *, Function *>, 0> Uses;
+
+ // Collect all the places that need wrappers.
+ for (Function &F : M) {
+ // Skip to fix when the function is swiftcc because swiftcc allows
+ // bitcast type difference for swiftself and swifterror.
+ if (F.getCallingConv() == CallingConv::Swift)
+ continue;
+ findUses(&F, F, Uses);
+
+ // If we have a "main" function, and its type isn't
+ // "int main(int argc, char *argv[])", create an artificial call with it
+ // bitcasted to that type so that we generate a wrapper for it, so that
+ // the C runtime can call it.
+ if (F.getName() == "main") {
+ Main = &F;
+ LLVMContext &C = M.getContext();
+ Type *MainArgTys[] = {Type::getInt32Ty(C), PointerType::get(C, 0)};
+ FunctionType *MainTy = FunctionType::get(Type::getInt32Ty(C), MainArgTys,
+ /*isVarArg=*/false);
+ if (shouldFixMainFunction(F.getFunctionType(), MainTy)) {
+ LLVM_DEBUG(dbgs() << "Found `main` function with incorrect type: "
+ << *F.getFunctionType() << "\n");
+ Value *Args[] = {UndefValue::get(MainArgTys[0]),
+ UndefValue::get(MainArgTys[1])};
+ CallMain = CallInst::Create(MainTy, Main, Args, "call_main");
+ Uses.push_back(std::make_pair(CallMain, &F));
+ }
+ }
+ }
+
+ DenseMap<std::pair<Function *, FunctionType *>, Function *> Wrappers;
+
+ for (auto &UseFunc : Uses) {
+ CallBase *CB = UseFunc.first;
+ Function *F = UseFunc.second;
+ FunctionType *Ty = CB->getFunctionType();
+
+ auto Pair = Wrappers.insert(std::make_pair(std::make_pair(F, Ty), nullptr));
+ if (Pair.second)
+ Pair.first->second = createWrapper(F, Ty);
+
+ Function *Wrapper = Pair.first->second;
+ if (!Wrapper)
+ continue;
+
+ CB->setCalledOperand(Wrapper);
+ }
+
+ // If we created a wrapper for main, rename the wrapper so that it's the
+ // one that gets called from startup.
+ if (CallMain) {
+ Main->setName("__original_main");
+ auto *MainWrapper =
+ cast<Function>(CallMain->getCalledOperand()->stripPointerCasts());
+ delete CallMain;
+ if (Main->isDeclaration()) {
+ // The wrapper is not needed in this case as we don't need to export
+ // it to anyone else.
+ MainWrapper->eraseFromParent();
+ } else {
+ // Otherwise give the wrapper the same linkage as the original main
+ // function, so that it can be called from the same places.
+ MainWrapper->setName("main");
+ MainWrapper->setLinkage(Main->getLinkage());
+ MainWrapper->setVisibility(Main->getVisibility());
+ }
+ }
+
+ return true;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp
new file mode 100644
index 000000000000..6c46673c36bf
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp
@@ -0,0 +1,560 @@
+//=- WebAssemblyFixIrreducibleControlFlow.cpp - Fix irreducible control flow -//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements a pass that removes irreducible control flow.
+/// Irreducible control flow means multiple-entry loops, which this pass
+/// transforms to have a single entry.
+///
+/// Note that LLVM has a generic pass that lowers irreducible control flow, but
+/// it linearizes control flow, turning diamonds into two triangles, which is
+/// both unnecessary and undesirable for WebAssembly.
+///
+/// The big picture: We recursively process each "region", defined as a group
+/// of blocks with a single entry and no branches back to that entry. A region
+/// may be the entire function body, or the inner part of a loop, i.e., the
+/// loop's body without branches back to the loop entry. In each region we fix
+/// up multi-entry loops by adding a new block that can dispatch to each of the
+/// loop entries, based on the value of a label "helper" variable, and we
+/// replace direct branches to the entries with assignments to the label
+/// variable and a branch to the dispatch block. Then the dispatch block is the
+/// single entry in the loop containing the previous multiple entries. After
+/// ensuring all the loops in a region are reducible, we recurse into them. The
+/// total time complexity of this pass is:
+///
+/// O(NumBlocks * NumNestedLoops * NumIrreducibleLoops +
+/// NumLoops * NumLoops)
+///
+/// This pass is similar to what the Relooper [1] does. Both identify looping
+/// code that requires multiple entries, and resolve it in a similar way (in
+/// Relooper terminology, we implement a Multiple shape in a Loop shape). Note
+/// also that like the Relooper, we implement a "minimal" intervention: we only
+/// use the "label" helper for the blocks we absolutely must and no others. We
+/// also prioritize code size and do not duplicate code in order to resolve
+/// irreducibility. The graph algorithms for finding loops and entries and so
+/// forth are also similar to the Relooper. The main differences between this
+/// pass and the Relooper are:
+///
+/// * We just care about irreducibility, so we just look at loops.
+/// * The Relooper emits structured control flow (with ifs etc.), while we
+/// emit a CFG.
+///
+/// [1] Alon Zakai. 2011. Emscripten: an LLVM-to-JavaScript compiler. In
+/// Proceedings of the ACM international conference companion on Object oriented
+/// programming systems languages and applications companion (SPLASH '11). ACM,
+/// New York, NY, USA, 301-312. DOI=10.1145/2048147.2048224
+/// http://doi.acm.org/10.1145/2048147.2048224
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/Support/Debug.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-fix-irreducible-control-flow"
+
+namespace {
+
+using BlockVector = SmallVector<MachineBasicBlock *, 4>;
+using BlockSet = SmallPtrSet<MachineBasicBlock *, 4>;
+
+static BlockVector getSortedEntries(const BlockSet &Entries) {
+ BlockVector SortedEntries(Entries.begin(), Entries.end());
+ llvm::sort(SortedEntries,
+ [](const MachineBasicBlock *A, const MachineBasicBlock *B) {
+ auto ANum = A->getNumber();
+ auto BNum = B->getNumber();
+ return ANum < BNum;
+ });
+ return SortedEntries;
+}
+
+// Calculates reachability in a region. Ignores branches to blocks outside of
+// the region, and ignores branches to the region entry (for the case where
+// the region is the inner part of a loop).
+class ReachabilityGraph {
+public:
+ ReachabilityGraph(MachineBasicBlock *Entry, const BlockSet &Blocks)
+ : Entry(Entry), Blocks(Blocks) {
+#ifndef NDEBUG
+ // The region must have a single entry.
+ for (auto *MBB : Blocks) {
+ if (MBB != Entry) {
+ for (auto *Pred : MBB->predecessors()) {
+ assert(inRegion(Pred));
+ }
+ }
+ }
+#endif
+ calculate();
+ }
+
+ bool canReach(MachineBasicBlock *From, MachineBasicBlock *To) const {
+ assert(inRegion(From) && inRegion(To));
+ auto I = Reachable.find(From);
+ if (I == Reachable.end())
+ return false;
+ return I->second.count(To);
+ }
+
+ // "Loopers" are blocks that are in a loop. We detect these by finding blocks
+ // that can reach themselves.
+ const BlockSet &getLoopers() const { return Loopers; }
+
+ // Get all blocks that are loop entries.
+ const BlockSet &getLoopEntries() const { return LoopEntries; }
+
+ // Get all blocks that enter a particular loop from outside.
+ const BlockSet &getLoopEnterers(MachineBasicBlock *LoopEntry) const {
+ assert(inRegion(LoopEntry));
+ auto I = LoopEnterers.find(LoopEntry);
+ assert(I != LoopEnterers.end());
+ return I->second;
+ }
+
+private:
+ MachineBasicBlock *Entry;
+ const BlockSet &Blocks;
+
+ BlockSet Loopers, LoopEntries;
+ DenseMap<MachineBasicBlock *, BlockSet> LoopEnterers;
+
+ bool inRegion(MachineBasicBlock *MBB) const { return Blocks.count(MBB); }
+
+ // Maps a block to all the other blocks it can reach.
+ DenseMap<MachineBasicBlock *, BlockSet> Reachable;
+
+ void calculate() {
+ // Reachability computation work list. Contains pairs of recent additions
+ // (A, B) where we just added a link A => B.
+ using BlockPair = std::pair<MachineBasicBlock *, MachineBasicBlock *>;
+ SmallVector<BlockPair, 4> WorkList;
+
+ // Add all relevant direct branches.
+ for (auto *MBB : Blocks) {
+ for (auto *Succ : MBB->successors()) {
+ if (Succ != Entry && inRegion(Succ)) {
+ Reachable[MBB].insert(Succ);
+ WorkList.emplace_back(MBB, Succ);
+ }
+ }
+ }
+
+ while (!WorkList.empty()) {
+ MachineBasicBlock *MBB, *Succ;
+ std::tie(MBB, Succ) = WorkList.pop_back_val();
+ assert(inRegion(MBB) && Succ != Entry && inRegion(Succ));
+ if (MBB != Entry) {
+ // We recently added MBB => Succ, and that means we may have enabled
+ // Pred => MBB => Succ.
+ for (auto *Pred : MBB->predecessors()) {
+ if (Reachable[Pred].insert(Succ).second) {
+ WorkList.emplace_back(Pred, Succ);
+ }
+ }
+ }
+ }
+
+ // Blocks that can return to themselves are in a loop.
+ for (auto *MBB : Blocks) {
+ if (canReach(MBB, MBB)) {
+ Loopers.insert(MBB);
+ }
+ }
+ assert(!Loopers.count(Entry));
+
+ // Find the loop entries - loopers reachable from blocks not in that loop -
+ // and those outside blocks that reach them, the "loop enterers".
+ for (auto *Looper : Loopers) {
+ for (auto *Pred : Looper->predecessors()) {
+ // Pred can reach Looper. If Looper can reach Pred, it is in the loop;
+ // otherwise, it is a block that enters into the loop.
+ if (!canReach(Looper, Pred)) {
+ LoopEntries.insert(Looper);
+ LoopEnterers[Looper].insert(Pred);
+ }
+ }
+ }
+ }
+};
+
+// Finds the blocks in a single-entry loop, given the loop entry and the
+// list of blocks that enter the loop.
+class LoopBlocks {
+public:
+ LoopBlocks(MachineBasicBlock *Entry, const BlockSet &Enterers)
+ : Entry(Entry), Enterers(Enterers) {
+ calculate();
+ }
+
+ BlockSet &getBlocks() { return Blocks; }
+
+private:
+ MachineBasicBlock *Entry;
+ const BlockSet &Enterers;
+
+ BlockSet Blocks;
+
+ void calculate() {
+ // Going backwards from the loop entry, if we ignore the blocks entering
+ // from outside, we will traverse all the blocks in the loop.
+ BlockVector WorkList;
+ BlockSet AddedToWorkList;
+ Blocks.insert(Entry);
+ for (auto *Pred : Entry->predecessors()) {
+ if (!Enterers.count(Pred)) {
+ WorkList.push_back(Pred);
+ AddedToWorkList.insert(Pred);
+ }
+ }
+
+ while (!WorkList.empty()) {
+ auto *MBB = WorkList.pop_back_val();
+ assert(!Enterers.count(MBB));
+ if (Blocks.insert(MBB).second) {
+ for (auto *Pred : MBB->predecessors()) {
+ if (AddedToWorkList.insert(Pred).second)
+ WorkList.push_back(Pred);
+ }
+ }
+ }
+ }
+};
+
+class WebAssemblyFixIrreducibleControlFlow final : public MachineFunctionPass {
+ StringRef getPassName() const override {
+ return "WebAssembly Fix Irreducible Control Flow";
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ bool processRegion(MachineBasicBlock *Entry, BlockSet &Blocks,
+ MachineFunction &MF);
+
+ void makeSingleEntryLoop(BlockSet &Entries, BlockSet &Blocks,
+ MachineFunction &MF, const ReachabilityGraph &Graph);
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyFixIrreducibleControlFlow() : MachineFunctionPass(ID) {}
+};
+
+bool WebAssemblyFixIrreducibleControlFlow::processRegion(
+ MachineBasicBlock *Entry, BlockSet &Blocks, MachineFunction &MF) {
+ bool Changed = false;
+ // Remove irreducibility before processing child loops, which may take
+ // multiple iterations.
+ while (true) {
+ ReachabilityGraph Graph(Entry, Blocks);
+
+ bool FoundIrreducibility = false;
+
+ for (auto *LoopEntry : getSortedEntries(Graph.getLoopEntries())) {
+ // Find mutual entries - all entries which can reach this one, and
+ // are reached by it (that always includes LoopEntry itself). All mutual
+ // entries must be in the same loop, so if we have more than one, then we
+ // have irreducible control flow.
+ //
+ // (Note that we need to sort the entries here, as otherwise the order can
+ // matter: being mutual is a symmetric relationship, and each set of
+ // mutuals will be handled properly no matter which we see first. However,
+ // there can be multiple disjoint sets of mutuals, and which we process
+ // first changes the output.)
+ //
+ // Note that irreducibility may involve inner loops, e.g. imagine A
+ // starts one loop, and it has B inside it which starts an inner loop.
+ // If we add a branch from all the way on the outside to B, then in a
+ // sense B is no longer an "inner" loop, semantically speaking. We will
+ // fix that irreducibility by adding a block that dispatches to either
+ // either A or B, so B will no longer be an inner loop in our output.
+ // (A fancier approach might try to keep it as such.)
+ //
+ // Note that we still need to recurse into inner loops later, to handle
+ // the case where the irreducibility is entirely nested - we would not
+ // be able to identify that at this point, since the enclosing loop is
+ // a group of blocks all of whom can reach each other. (We'll see the
+ // irreducibility after removing branches to the top of that enclosing
+ // loop.)
+ BlockSet MutualLoopEntries;
+ MutualLoopEntries.insert(LoopEntry);
+ for (auto *OtherLoopEntry : Graph.getLoopEntries()) {
+ if (OtherLoopEntry != LoopEntry &&
+ Graph.canReach(LoopEntry, OtherLoopEntry) &&
+ Graph.canReach(OtherLoopEntry, LoopEntry)) {
+ MutualLoopEntries.insert(OtherLoopEntry);
+ }
+ }
+
+ if (MutualLoopEntries.size() > 1) {
+ makeSingleEntryLoop(MutualLoopEntries, Blocks, MF, Graph);
+ FoundIrreducibility = true;
+ Changed = true;
+ break;
+ }
+ }
+ // Only go on to actually process the inner loops when we are done
+ // removing irreducible control flow and changing the graph. Modifying
+ // the graph as we go is possible, and that might let us avoid looking at
+ // the already-fixed loops again if we are careful, but all that is
+ // complex and bug-prone. Since irreducible loops are rare, just starting
+ // another iteration is best.
+ if (FoundIrreducibility) {
+ continue;
+ }
+
+ for (auto *LoopEntry : Graph.getLoopEntries()) {
+ LoopBlocks InnerBlocks(LoopEntry, Graph.getLoopEnterers(LoopEntry));
+ // Each of these calls to processRegion may change the graph, but are
+ // guaranteed not to interfere with each other. The only changes we make
+ // to the graph are to add blocks on the way to a loop entry. As the
+ // loops are disjoint, that means we may only alter branches that exit
+ // another loop, which are ignored when recursing into that other loop
+ // anyhow.
+ if (processRegion(LoopEntry, InnerBlocks.getBlocks(), MF)) {
+ Changed = true;
+ }
+ }
+
+ return Changed;
+ }
+}
+
+// Given a set of entries to a single loop, create a single entry for that
+// loop by creating a dispatch block for them, routing control flow using
+// a helper variable. Also updates Blocks with any new blocks created, so
+// that we properly track all the blocks in the region. But this does not update
+// ReachabilityGraph; this will be updated in the caller of this function as
+// needed.
+void WebAssemblyFixIrreducibleControlFlow::makeSingleEntryLoop(
+ BlockSet &Entries, BlockSet &Blocks, MachineFunction &MF,
+ const ReachabilityGraph &Graph) {
+ assert(Entries.size() >= 2);
+
+ // Sort the entries to ensure a deterministic build.
+ BlockVector SortedEntries = getSortedEntries(Entries);
+
+#ifndef NDEBUG
+ for (auto *Block : SortedEntries)
+ assert(Block->getNumber() != -1);
+ if (SortedEntries.size() > 1) {
+ for (auto I = SortedEntries.begin(), E = SortedEntries.end() - 1; I != E;
+ ++I) {
+ auto ANum = (*I)->getNumber();
+ auto BNum = (*(std::next(I)))->getNumber();
+ assert(ANum != BNum);
+ }
+ }
+#endif
+
+ // Create a dispatch block which will contain a jump table to the entries.
+ MachineBasicBlock *Dispatch = MF.CreateMachineBasicBlock();
+ MF.insert(MF.end(), Dispatch);
+ Blocks.insert(Dispatch);
+
+ // Add the jump table.
+ const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ MachineInstrBuilder MIB =
+ BuildMI(Dispatch, DebugLoc(), TII.get(WebAssembly::BR_TABLE_I32));
+
+ // Add the register which will be used to tell the jump table which block to
+ // jump to.
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ Register Reg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
+ MIB.addReg(Reg);
+
+ // Compute the indices in the superheader, one for each bad block, and
+ // add them as successors.
+ DenseMap<MachineBasicBlock *, unsigned> Indices;
+ for (auto *Entry : SortedEntries) {
+ auto Pair = Indices.insert(std::make_pair(Entry, 0));
+ assert(Pair.second);
+
+ unsigned Index = MIB.getInstr()->getNumExplicitOperands() - 1;
+ Pair.first->second = Index;
+
+ MIB.addMBB(Entry);
+ Dispatch->addSuccessor(Entry);
+ }
+
+ // Rewrite the problematic successors for every block that wants to reach
+ // the bad blocks. For simplicity, we just introduce a new block for every
+ // edge we need to rewrite. (Fancier things are possible.)
+
+ BlockVector AllPreds;
+ for (auto *Entry : SortedEntries) {
+ for (auto *Pred : Entry->predecessors()) {
+ if (Pred != Dispatch) {
+ AllPreds.push_back(Pred);
+ }
+ }
+ }
+
+ // This set stores predecessors within this loop.
+ DenseSet<MachineBasicBlock *> InLoop;
+ for (auto *Pred : AllPreds) {
+ for (auto *Entry : Pred->successors()) {
+ if (!Entries.count(Entry))
+ continue;
+ if (Graph.canReach(Entry, Pred)) {
+ InLoop.insert(Pred);
+ break;
+ }
+ }
+ }
+
+ // Record if each entry has a layout predecessor. This map stores
+ // <<loop entry, Predecessor is within the loop?>, layout predecessor>
+ DenseMap<PointerIntPair<MachineBasicBlock *, 1, bool>, MachineBasicBlock *>
+ EntryToLayoutPred;
+ for (auto *Pred : AllPreds) {
+ bool PredInLoop = InLoop.count(Pred);
+ for (auto *Entry : Pred->successors())
+ if (Entries.count(Entry) && Pred->isLayoutSuccessor(Entry))
+ EntryToLayoutPred[{Entry, PredInLoop}] = Pred;
+ }
+
+ // We need to create at most two routing blocks per entry: one for
+ // predecessors outside the loop and one for predecessors inside the loop.
+ // This map stores
+ // <<loop entry, Predecessor is within the loop?>, routing block>
+ DenseMap<PointerIntPair<MachineBasicBlock *, 1, bool>, MachineBasicBlock *>
+ Map;
+ for (auto *Pred : AllPreds) {
+ bool PredInLoop = InLoop.count(Pred);
+ for (auto *Entry : Pred->successors()) {
+ if (!Entries.count(Entry) || Map.count({Entry, PredInLoop}))
+ continue;
+ // If there exists a layout predecessor of this entry and this predecessor
+ // is not that, we rather create a routing block after that layout
+ // predecessor to save a branch.
+ if (auto *OtherPred = EntryToLayoutPred.lookup({Entry, PredInLoop}))
+ if (OtherPred != Pred)
+ continue;
+
+ // This is a successor we need to rewrite.
+ MachineBasicBlock *Routing = MF.CreateMachineBasicBlock();
+ MF.insert(Pred->isLayoutSuccessor(Entry)
+ ? MachineFunction::iterator(Entry)
+ : MF.end(),
+ Routing);
+ Blocks.insert(Routing);
+
+ // Set the jump table's register of the index of the block we wish to
+ // jump to, and jump to the jump table.
+ BuildMI(Routing, DebugLoc(), TII.get(WebAssembly::CONST_I32), Reg)
+ .addImm(Indices[Entry]);
+ BuildMI(Routing, DebugLoc(), TII.get(WebAssembly::BR)).addMBB(Dispatch);
+ Routing->addSuccessor(Dispatch);
+ Map[{Entry, PredInLoop}] = Routing;
+ }
+ }
+
+ for (auto *Pred : AllPreds) {
+ bool PredInLoop = InLoop.count(Pred);
+ // Remap the terminator operands and the successor list.
+ for (MachineInstr &Term : Pred->terminators())
+ for (auto &Op : Term.explicit_uses())
+ if (Op.isMBB() && Indices.count(Op.getMBB()))
+ Op.setMBB(Map[{Op.getMBB(), PredInLoop}]);
+
+ for (auto *Succ : Pred->successors()) {
+ if (!Entries.count(Succ))
+ continue;
+ auto *Routing = Map[{Succ, PredInLoop}];
+ Pred->replaceSuccessor(Succ, Routing);
+ }
+ }
+
+ // Create a fake default label, because br_table requires one.
+ MIB.addMBB(MIB.getInstr()
+ ->getOperand(MIB.getInstr()->getNumExplicitOperands() - 1)
+ .getMBB());
+}
+
+} // end anonymous namespace
+
+char WebAssemblyFixIrreducibleControlFlow::ID = 0;
+INITIALIZE_PASS(WebAssemblyFixIrreducibleControlFlow, DEBUG_TYPE,
+ "Removes irreducible control flow", false, false)
+
+FunctionPass *llvm::createWebAssemblyFixIrreducibleControlFlow() {
+ return new WebAssemblyFixIrreducibleControlFlow();
+}
+
+// Test whether the given register has an ARGUMENT def.
+static bool hasArgumentDef(unsigned Reg, const MachineRegisterInfo &MRI) {
+ for (const auto &Def : MRI.def_instructions(Reg))
+ if (WebAssembly::isArgument(Def.getOpcode()))
+ return true;
+ return false;
+}
+
+// Add a register definition with IMPLICIT_DEFs for every register to cover for
+// register uses that don't have defs in every possible path.
+// TODO: This is fairly heavy-handed; find a better approach.
+static void addImplicitDefs(MachineFunction &MF) {
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ MachineBasicBlock &Entry = *MF.begin();
+ for (unsigned I = 0, E = MRI.getNumVirtRegs(); I < E; ++I) {
+ Register Reg = Register::index2VirtReg(I);
+
+ // Skip unused registers.
+ if (MRI.use_nodbg_empty(Reg))
+ continue;
+
+ // Skip registers that have an ARGUMENT definition.
+ if (hasArgumentDef(Reg, MRI))
+ continue;
+
+ BuildMI(Entry, Entry.begin(), DebugLoc(),
+ TII.get(WebAssembly::IMPLICIT_DEF), Reg);
+ }
+
+ // Move ARGUMENT_* instructions to the top of the entry block, so that their
+ // liveness reflects the fact that these really are live-in values.
+ for (MachineInstr &MI : llvm::make_early_inc_range(Entry)) {
+ if (WebAssembly::isArgument(MI.getOpcode())) {
+ MI.removeFromParent();
+ Entry.insert(Entry.begin(), &MI);
+ }
+ }
+}
+
+bool WebAssemblyFixIrreducibleControlFlow::runOnMachineFunction(
+ MachineFunction &MF) {
+ LLVM_DEBUG(dbgs() << "********** Fixing Irreducible Control Flow **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+
+ // Start the recursive process on the entire function body.
+ BlockSet AllBlocks;
+ for (auto &MBB : MF) {
+ AllBlocks.insert(&MBB);
+ }
+
+ if (LLVM_UNLIKELY(processRegion(&*MF.begin(), AllBlocks, MF))) {
+ // We rewrote part of the function; recompute relevant things.
+ MF.RenumberBlocks();
+ // Now we've inserted dispatch blocks, some register uses can have incoming
+ // paths without a def. For example, before this pass register %a was
+ // defined in BB1 and used in BB2, and there was only one path from BB1 and
+ // BB2. But if this pass inserts a dispatch block having multiple
+ // predecessors between the two BBs, now there are paths to BB2 without
+ // visiting BB1, and %a's use in BB2 is not dominated by its def. Adding
+ // IMPLICIT_DEFs to all regs is one simple way to fix it.
+ addImplicitDefs(MF);
+ return true;
+ }
+
+ return false;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp
new file mode 100644
index 000000000000..8f3ad167ae41
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp
@@ -0,0 +1,396 @@
+//===-- WebAssemblyFrameLowering.cpp - WebAssembly Frame Lowering ----------==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the WebAssembly implementation of
+/// TargetFrameLowering class.
+///
+/// On WebAssembly, there aren't a lot of things to do here. There are no
+/// callee-saved registers to save, and no spill slots.
+///
+/// The stack grows downward.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyFrameLowering.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "Utils/WebAssemblyTypeUtilities.h"
+#include "WebAssembly.h"
+#include "WebAssemblyInstrInfo.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyTargetMachine.h"
+#include "llvm/CodeGen/Analysis.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfoImpls.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/Support/Debug.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-frame-info"
+
+// TODO: wasm64
+// TODO: Emit TargetOpcode::CFI_INSTRUCTION instructions
+
+// In an ideal world, when objects are added to the MachineFrameInfo by
+// FunctionLoweringInfo::set, we could somehow hook into target-specific code to
+// ensure they are assigned the right stack ID. However there isn't a hook that
+// runs between then and DAG building time, though, so instead we hoist stack
+// objects lazily when they are first used, and comprehensively after the DAG is
+// built via the PreprocessISelDAG hook, called by the
+// SelectionDAGISel::runOnMachineFunction. We have to do it in two places
+// because we want to do it while building the selection DAG for uses of alloca,
+// but not all alloca instructions are used so we have to follow up afterwards.
+std::optional<unsigned>
+WebAssemblyFrameLowering::getLocalForStackObject(MachineFunction &MF,
+ int FrameIndex) {
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+
+ // If already hoisted to a local, done.
+ if (MFI.getStackID(FrameIndex) == TargetStackID::WasmLocal)
+ return static_cast<unsigned>(MFI.getObjectOffset(FrameIndex));
+
+ // If not allocated in the object address space, this object will be in
+ // linear memory.
+ const AllocaInst *AI = MFI.getObjectAllocation(FrameIndex);
+ if (!AI || !WebAssembly::isWasmVarAddressSpace(AI->getAddressSpace()))
+ return std::nullopt;
+
+ // Otherwise, allocate this object in the named value stack, outside of linear
+ // memory.
+ SmallVector<EVT, 4> ValueVTs;
+ const WebAssemblyTargetLowering &TLI =
+ *MF.getSubtarget<WebAssemblySubtarget>().getTargetLowering();
+ WebAssemblyFunctionInfo *FuncInfo = MF.getInfo<WebAssemblyFunctionInfo>();
+ ComputeValueVTs(TLI, MF.getDataLayout(), AI->getAllocatedType(), ValueVTs);
+ MFI.setStackID(FrameIndex, TargetStackID::WasmLocal);
+ // Abuse SP offset to record the index of the first local in the object.
+ unsigned Local = FuncInfo->getParams().size() + FuncInfo->getLocals().size();
+ MFI.setObjectOffset(FrameIndex, Local);
+ // Allocate WebAssembly locals for each non-aggregate component of the
+ // allocation.
+ for (EVT ValueVT : ValueVTs)
+ FuncInfo->addLocal(ValueVT.getSimpleVT());
+ // Abuse object size to record number of WebAssembly locals allocated to
+ // this object.
+ MFI.setObjectSize(FrameIndex, ValueVTs.size());
+ return static_cast<unsigned>(Local);
+}
+
+/// We need a base pointer in the case of having items on the stack that
+/// require stricter alignment than the stack pointer itself. Because we need
+/// to shift the stack pointer by some unknown amount to force the alignment,
+/// we need to record the value of the stack pointer on entry to the function.
+bool WebAssemblyFrameLowering::hasBP(const MachineFunction &MF) const {
+ const auto *RegInfo =
+ MF.getSubtarget<WebAssemblySubtarget>().getRegisterInfo();
+ return RegInfo->hasStackRealignment(MF);
+}
+
+/// Return true if the specified function should have a dedicated frame pointer
+/// register.
+bool WebAssemblyFrameLowering::hasFP(const MachineFunction &MF) const {
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+
+ // When we have var-sized objects, we move the stack pointer by an unknown
+ // amount, and need to emit a frame pointer to restore the stack to where we
+ // were on function entry.
+ // If we already need a base pointer, we use that to fix up the stack pointer.
+ // If there are no fixed-size objects, we would have no use of a frame
+ // pointer, and thus should not emit one.
+ bool HasFixedSizedObjects = MFI.getStackSize() > 0;
+ bool NeedsFixedReference = !hasBP(MF) || HasFixedSizedObjects;
+
+ return MFI.isFrameAddressTaken() ||
+ (MFI.hasVarSizedObjects() && NeedsFixedReference) ||
+ MFI.hasStackMap() || MFI.hasPatchPoint();
+}
+
+/// Under normal circumstances, when a frame pointer is not required, we reserve
+/// argument space for call sites in the function immediately on entry to the
+/// current function. This eliminates the need for add/sub sp brackets around
+/// call sites. Returns true if the call frame is included as part of the stack
+/// frame.
+bool WebAssemblyFrameLowering::hasReservedCallFrame(
+ const MachineFunction &MF) const {
+ return !MF.getFrameInfo().hasVarSizedObjects();
+}
+
+// Returns true if this function needs a local user-space stack pointer for its
+// local frame (not for exception handling).
+bool WebAssemblyFrameLowering::needsSPForLocalFrame(
+ const MachineFunction &MF) const {
+ auto &MFI = MF.getFrameInfo();
+ auto &MRI = MF.getRegInfo();
+ // llvm.stacksave can explicitly read SP register and it can appear without
+ // dynamic alloca.
+ bool HasExplicitSPUse =
+ any_of(MRI.use_operands(getSPReg(MF)),
+ [](MachineOperand &MO) { return !MO.isImplicit(); });
+
+ return MFI.getStackSize() || MFI.adjustsStack() || hasFP(MF) ||
+ HasExplicitSPUse;
+}
+
+// In function with EH pads, we need to make a copy of the value of
+// __stack_pointer global in SP32/64 register, in order to use it when
+// restoring __stack_pointer after an exception is caught.
+bool WebAssemblyFrameLowering::needsPrologForEH(
+ const MachineFunction &MF) const {
+ auto EHType = MF.getTarget().getMCAsmInfo()->getExceptionHandlingType();
+ return EHType == ExceptionHandling::Wasm &&
+ MF.getFunction().hasPersonalityFn() && MF.getFrameInfo().hasCalls();
+}
+
+/// Returns true if this function needs a local user-space stack pointer.
+/// Unlike a machine stack pointer, the wasm user stack pointer is a global
+/// variable, so it is loaded into a register in the prolog.
+bool WebAssemblyFrameLowering::needsSP(const MachineFunction &MF) const {
+ return needsSPForLocalFrame(MF) || needsPrologForEH(MF);
+}
+
+/// Returns true if the local user-space stack pointer needs to be written back
+/// to __stack_pointer global by this function (this is not meaningful if
+/// needsSP is false). If false, the stack red zone can be used and only a local
+/// SP is needed.
+bool WebAssemblyFrameLowering::needsSPWriteback(
+ const MachineFunction &MF) const {
+ auto &MFI = MF.getFrameInfo();
+ assert(needsSP(MF));
+ // When we don't need a local stack pointer for its local frame but only to
+ // support EH, we don't need to write SP back in the epilog, because we don't
+ // bump down the stack pointer in the prolog. We need to write SP back in the
+ // epilog only if
+ // 1. We need SP not only for EH support but also because we actually use
+ // stack or we have a frame address taken.
+ // 2. We cannot use the red zone.
+ bool CanUseRedZone = MFI.getStackSize() <= RedZoneSize && !MFI.hasCalls() &&
+ !MF.getFunction().hasFnAttribute(Attribute::NoRedZone);
+ return needsSPForLocalFrame(MF) && !CanUseRedZone;
+}
+
+unsigned WebAssemblyFrameLowering::getSPReg(const MachineFunction &MF) {
+ return MF.getSubtarget<WebAssemblySubtarget>().hasAddr64()
+ ? WebAssembly::SP64
+ : WebAssembly::SP32;
+}
+
+unsigned WebAssemblyFrameLowering::getFPReg(const MachineFunction &MF) {
+ return MF.getSubtarget<WebAssemblySubtarget>().hasAddr64()
+ ? WebAssembly::FP64
+ : WebAssembly::FP32;
+}
+
+unsigned
+WebAssemblyFrameLowering::getOpcConst(const MachineFunction &MF) {
+ return MF.getSubtarget<WebAssemblySubtarget>().hasAddr64()
+ ? WebAssembly::CONST_I64
+ : WebAssembly::CONST_I32;
+}
+
+unsigned WebAssemblyFrameLowering::getOpcAdd(const MachineFunction &MF) {
+ return MF.getSubtarget<WebAssemblySubtarget>().hasAddr64()
+ ? WebAssembly::ADD_I64
+ : WebAssembly::ADD_I32;
+}
+
+unsigned WebAssemblyFrameLowering::getOpcSub(const MachineFunction &MF) {
+ return MF.getSubtarget<WebAssemblySubtarget>().hasAddr64()
+ ? WebAssembly::SUB_I64
+ : WebAssembly::SUB_I32;
+}
+
+unsigned WebAssemblyFrameLowering::getOpcAnd(const MachineFunction &MF) {
+ return MF.getSubtarget<WebAssemblySubtarget>().hasAddr64()
+ ? WebAssembly::AND_I64
+ : WebAssembly::AND_I32;
+}
+
+unsigned
+WebAssemblyFrameLowering::getOpcGlobGet(const MachineFunction &MF) {
+ return MF.getSubtarget<WebAssemblySubtarget>().hasAddr64()
+ ? WebAssembly::GLOBAL_GET_I64
+ : WebAssembly::GLOBAL_GET_I32;
+}
+
+unsigned
+WebAssemblyFrameLowering::getOpcGlobSet(const MachineFunction &MF) {
+ return MF.getSubtarget<WebAssemblySubtarget>().hasAddr64()
+ ? WebAssembly::GLOBAL_SET_I64
+ : WebAssembly::GLOBAL_SET_I32;
+}
+
+void WebAssemblyFrameLowering::writeSPToGlobal(
+ unsigned SrcReg, MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &InsertStore, const DebugLoc &DL) const {
+ const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+
+ const char *ES = "__stack_pointer";
+ auto *SPSymbol = MF.createExternalSymbolName(ES);
+
+ BuildMI(MBB, InsertStore, DL, TII->get(getOpcGlobSet(MF)))
+ .addExternalSymbol(SPSymbol)
+ .addReg(SrcReg);
+}
+
+MachineBasicBlock::iterator
+WebAssemblyFrameLowering::eliminateCallFramePseudoInstr(
+ MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
+ assert(!I->getOperand(0).getImm() && (hasFP(MF) || hasBP(MF)) &&
+ "Call frame pseudos should only be used for dynamic stack adjustment");
+ auto &ST = MF.getSubtarget<WebAssemblySubtarget>();
+ const auto *TII = ST.getInstrInfo();
+ if (I->getOpcode() == TII->getCallFrameDestroyOpcode() &&
+ needsSPWriteback(MF)) {
+ DebugLoc DL = I->getDebugLoc();
+ writeSPToGlobal(getSPReg(MF), MF, MBB, I, DL);
+ }
+ return MBB.erase(I);
+}
+
+void WebAssemblyFrameLowering::emitPrologue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ // TODO: Do ".setMIFlag(MachineInstr::FrameSetup)" on emitted instructions
+ auto &MFI = MF.getFrameInfo();
+ assert(MFI.getCalleeSavedInfo().empty() &&
+ "WebAssembly should not have callee-saved registers");
+
+ if (!needsSP(MF))
+ return;
+ uint64_t StackSize = MFI.getStackSize();
+
+ auto &ST = MF.getSubtarget<WebAssemblySubtarget>();
+ const auto *TII = ST.getInstrInfo();
+ auto &MRI = MF.getRegInfo();
+
+ auto InsertPt = MBB.begin();
+ while (InsertPt != MBB.end() &&
+ WebAssembly::isArgument(InsertPt->getOpcode()))
+ ++InsertPt;
+ DebugLoc DL;
+
+ const TargetRegisterClass *PtrRC =
+ MRI.getTargetRegisterInfo()->getPointerRegClass(MF);
+ unsigned SPReg = getSPReg(MF);
+ if (StackSize)
+ SPReg = MRI.createVirtualRegister(PtrRC);
+
+ const char *ES = "__stack_pointer";
+ auto *SPSymbol = MF.createExternalSymbolName(ES);
+ BuildMI(MBB, InsertPt, DL, TII->get(getOpcGlobGet(MF)), SPReg)
+ .addExternalSymbol(SPSymbol);
+
+ bool HasBP = hasBP(MF);
+ if (HasBP) {
+ auto FI = MF.getInfo<WebAssemblyFunctionInfo>();
+ Register BasePtr = MRI.createVirtualRegister(PtrRC);
+ FI->setBasePointerVreg(BasePtr);
+ BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::COPY), BasePtr)
+ .addReg(SPReg);
+ }
+ if (StackSize) {
+ // Subtract the frame size
+ Register OffsetReg = MRI.createVirtualRegister(PtrRC);
+ BuildMI(MBB, InsertPt, DL, TII->get(getOpcConst(MF)), OffsetReg)
+ .addImm(StackSize);
+ BuildMI(MBB, InsertPt, DL, TII->get(getOpcSub(MF)), getSPReg(MF))
+ .addReg(SPReg)
+ .addReg(OffsetReg);
+ }
+ if (HasBP) {
+ Register BitmaskReg = MRI.createVirtualRegister(PtrRC);
+ Align Alignment = MFI.getMaxAlign();
+ BuildMI(MBB, InsertPt, DL, TII->get(getOpcConst(MF)), BitmaskReg)
+ .addImm((int64_t) ~(Alignment.value() - 1));
+ BuildMI(MBB, InsertPt, DL, TII->get(getOpcAnd(MF)), getSPReg(MF))
+ .addReg(getSPReg(MF))
+ .addReg(BitmaskReg);
+ }
+ if (hasFP(MF)) {
+ // Unlike most conventional targets (where FP points to the saved FP),
+ // FP points to the bottom of the fixed-size locals, so we can use positive
+ // offsets in load/store instructions.
+ BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::COPY), getFPReg(MF))
+ .addReg(getSPReg(MF));
+ }
+ if (StackSize && needsSPWriteback(MF)) {
+ writeSPToGlobal(getSPReg(MF), MF, MBB, InsertPt, DL);
+ }
+}
+
+void WebAssemblyFrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ uint64_t StackSize = MF.getFrameInfo().getStackSize();
+ if (!needsSP(MF) || !needsSPWriteback(MF))
+ return;
+ auto &ST = MF.getSubtarget<WebAssemblySubtarget>();
+ const auto *TII = ST.getInstrInfo();
+ auto &MRI = MF.getRegInfo();
+ auto InsertPt = MBB.getFirstTerminator();
+ DebugLoc DL;
+
+ if (InsertPt != MBB.end())
+ DL = InsertPt->getDebugLoc();
+
+ // Restore the stack pointer. If we had fixed-size locals, add the offset
+ // subtracted in the prolog.
+ unsigned SPReg = 0;
+ unsigned SPFPReg = hasFP(MF) ? getFPReg(MF) : getSPReg(MF);
+ if (hasBP(MF)) {
+ auto FI = MF.getInfo<WebAssemblyFunctionInfo>();
+ SPReg = FI->getBasePointerVreg();
+ } else if (StackSize) {
+ const TargetRegisterClass *PtrRC =
+ MRI.getTargetRegisterInfo()->getPointerRegClass(MF);
+ Register OffsetReg = MRI.createVirtualRegister(PtrRC);
+ BuildMI(MBB, InsertPt, DL, TII->get(getOpcConst(MF)), OffsetReg)
+ .addImm(StackSize);
+ // In the epilog we don't need to write the result back to the SP32/64
+ // physreg because it won't be used again. We can use a stackified register
+ // instead.
+ SPReg = MRI.createVirtualRegister(PtrRC);
+ BuildMI(MBB, InsertPt, DL, TII->get(getOpcAdd(MF)), SPReg)
+ .addReg(SPFPReg)
+ .addReg(OffsetReg);
+ } else {
+ SPReg = SPFPReg;
+ }
+
+ writeSPToGlobal(SPReg, MF, MBB, InsertPt, DL);
+}
+
+bool WebAssemblyFrameLowering::isSupportedStackID(
+ TargetStackID::Value ID) const {
+ // Use the Object stack for WebAssembly locals which can only be accessed
+ // by name, not via an address in linear memory.
+ if (ID == TargetStackID::WasmLocal)
+ return true;
+
+ return TargetFrameLowering::isSupportedStackID(ID);
+}
+
+TargetFrameLowering::DwarfFrameBase
+WebAssemblyFrameLowering::getDwarfFrameBase(const MachineFunction &MF) const {
+ DwarfFrameBase Loc;
+ Loc.Kind = DwarfFrameBase::WasmFrameBase;
+ const WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
+ if (needsSP(MF) && MFI.isFrameBaseVirtual()) {
+ unsigned LocalNum = MFI.getFrameBaseLocal();
+ Loc.Location.WasmLoc = {WebAssembly::TI_LOCAL, LocalNum};
+ } else {
+ // TODO: This should work on a breakpoint at a function with no frame,
+ // but probably won't work for traversing up the stack.
+ Loc.Location.WasmLoc = {WebAssembly::TI_GLOBAL_RELOC, 0};
+ }
+ return Loc;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h
new file mode 100644
index 000000000000..528b33e34bee
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h
@@ -0,0 +1,80 @@
+// WebAssemblyFrameLowering.h - TargetFrameLowering for WebAssembly -*- C++ -*-/
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This class implements WebAssembly-specific bits of
+/// TargetFrameLowering class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYFRAMELOWERING_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYFRAMELOWERING_H
+
+#include "llvm/CodeGen/TargetFrameLowering.h"
+
+namespace llvm {
+
+class WebAssemblyFrameLowering final : public TargetFrameLowering {
+public:
+ /// Size of the red zone for the user stack (leaf functions can use this much
+ /// space below the stack pointer without writing it back to __stack_pointer
+ /// global).
+ // TODO: (ABI) Revisit and decide how large it should be.
+ static const size_t RedZoneSize = 128;
+
+ WebAssemblyFrameLowering()
+ : TargetFrameLowering(StackGrowsDown, /*StackAlignment=*/Align(16),
+ /*LocalAreaOffset=*/0,
+ /*TransientStackAlignment=*/Align(16),
+ /*StackRealignable=*/true) {}
+
+ MachineBasicBlock::iterator
+ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const override;
+
+ /// These methods insert prolog and epilog code into the function.
+ void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+
+ bool hasFP(const MachineFunction &MF) const override;
+ bool hasReservedCallFrame(const MachineFunction &MF) const override;
+ bool isSupportedStackID(TargetStackID::Value ID) const override;
+ DwarfFrameBase getDwarfFrameBase(const MachineFunction &MF) const override;
+
+ bool needsPrologForEH(const MachineFunction &MF) const;
+
+ /// Write SP back to __stack_pointer global.
+ void writeSPToGlobal(unsigned SrcReg, MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &InsertStore,
+ const DebugLoc &DL) const;
+
+ // Returns the index of the WebAssembly local to which the stack object
+ // FrameIndex in MF should be allocated, or std::nullopt.
+ static std::optional<unsigned> getLocalForStackObject(MachineFunction &MF,
+ int FrameIndex);
+
+ static unsigned getSPReg(const MachineFunction &MF);
+ static unsigned getFPReg(const MachineFunction &MF);
+ static unsigned getOpcConst(const MachineFunction &MF);
+ static unsigned getOpcAdd(const MachineFunction &MF);
+ static unsigned getOpcSub(const MachineFunction &MF);
+ static unsigned getOpcAnd(const MachineFunction &MF);
+ static unsigned getOpcGlobGet(const MachineFunction &MF);
+ static unsigned getOpcGlobSet(const MachineFunction &MF);
+
+private:
+ bool hasBP(const MachineFunction &MF) const;
+ bool needsSPForLocalFrame(const MachineFunction &MF) const;
+ bool needsSP(const MachineFunction &MF) const;
+ bool needsSPWriteback(const MachineFunction &MF) const;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISD.def b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISD.def
new file mode 100644
index 000000000000..b8954f4693f0
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISD.def
@@ -0,0 +1,52 @@
+//- WebAssemblyISD.def - WebAssembly ISD ---------------------------*- C++ -*-//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file describes the various WebAssembly ISD node types.
+///
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+HANDLE_NODETYPE(CALL)
+HANDLE_NODETYPE(RET_CALL)
+HANDLE_NODETYPE(RETURN)
+HANDLE_NODETYPE(ARGUMENT)
+HANDLE_NODETYPE(LOCAL_GET)
+HANDLE_NODETYPE(LOCAL_SET)
+// A wrapper node for TargetExternalSymbol, TargetGlobalAddress, and MCSymbol
+HANDLE_NODETYPE(Wrapper)
+// A special node for TargetGlobalAddress used in PIC code for
+// __memory_base/__table_base relative access.
+HANDLE_NODETYPE(WrapperREL)
+HANDLE_NODETYPE(BR_IF)
+HANDLE_NODETYPE(BR_TABLE)
+HANDLE_NODETYPE(SHUFFLE)
+HANDLE_NODETYPE(SWIZZLE)
+HANDLE_NODETYPE(VEC_SHL)
+HANDLE_NODETYPE(VEC_SHR_S)
+HANDLE_NODETYPE(VEC_SHR_U)
+HANDLE_NODETYPE(NARROW_U)
+HANDLE_NODETYPE(EXTEND_LOW_S)
+HANDLE_NODETYPE(EXTEND_LOW_U)
+HANDLE_NODETYPE(EXTEND_HIGH_S)
+HANDLE_NODETYPE(EXTEND_HIGH_U)
+HANDLE_NODETYPE(CONVERT_LOW_S)
+HANDLE_NODETYPE(CONVERT_LOW_U)
+HANDLE_NODETYPE(PROMOTE_LOW)
+HANDLE_NODETYPE(TRUNC_SAT_ZERO_S)
+HANDLE_NODETYPE(TRUNC_SAT_ZERO_U)
+HANDLE_NODETYPE(DEMOTE_ZERO)
+HANDLE_NODETYPE(MEMORY_COPY)
+HANDLE_NODETYPE(MEMORY_FILL)
+
+// Memory intrinsics
+HANDLE_MEM_NODETYPE(GLOBAL_GET)
+HANDLE_MEM_NODETYPE(GLOBAL_SET)
+HANDLE_MEM_NODETYPE(TABLE_GET)
+HANDLE_MEM_NODETYPE(TABLE_SET)
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp
new file mode 100644
index 000000000000..8833aee02a6a
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp
@@ -0,0 +1,413 @@
+//- WebAssemblyISelDAGToDAG.cpp - A dag to dag inst selector for WebAssembly -//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines an instruction selector for the WebAssembly target.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyISelLowering.h"
+#include "WebAssemblyTargetMachine.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/CodeGen/WasmEHFuncInfo.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/Function.h" // To access function attributes.
+#include "llvm/IR/IntrinsicsWebAssembly.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/KnownBits.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-isel"
+#define PASS_NAME "WebAssembly Instruction Selection"
+
+//===--------------------------------------------------------------------===//
+/// WebAssembly-specific code to select WebAssembly machine instructions for
+/// SelectionDAG operations.
+///
+namespace {
+class WebAssemblyDAGToDAGISel final : public SelectionDAGISel {
+ /// Keep a pointer to the WebAssemblySubtarget around so that we can make the
+ /// right decision when generating code for different targets.
+ const WebAssemblySubtarget *Subtarget;
+
+public:
+ static char ID;
+
+ WebAssemblyDAGToDAGISel() = delete;
+
+ WebAssemblyDAGToDAGISel(WebAssemblyTargetMachine &TM,
+ CodeGenOptLevel OptLevel)
+ : SelectionDAGISel(ID, TM, OptLevel), Subtarget(nullptr) {}
+
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ LLVM_DEBUG(dbgs() << "********** ISelDAGToDAG **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+
+ Subtarget = &MF.getSubtarget<WebAssemblySubtarget>();
+
+ return SelectionDAGISel::runOnMachineFunction(MF);
+ }
+
+ void PreprocessISelDAG() override;
+
+ void Select(SDNode *Node) override;
+
+ bool SelectInlineAsmMemoryOperand(const SDValue &Op,
+ InlineAsm::ConstraintCode ConstraintID,
+ std::vector<SDValue> &OutOps) override;
+
+ bool SelectAddrOperands32(SDValue Op, SDValue &Offset, SDValue &Addr);
+ bool SelectAddrOperands64(SDValue Op, SDValue &Offset, SDValue &Addr);
+
+// Include the pieces autogenerated from the target description.
+#include "WebAssemblyGenDAGISel.inc"
+
+private:
+ // add select functions here...
+
+ bool SelectAddrOperands(MVT AddrType, unsigned ConstOpc, SDValue Op,
+ SDValue &Offset, SDValue &Addr);
+ bool SelectAddrAddOperands(MVT OffsetType, SDValue N, SDValue &Offset,
+ SDValue &Addr);
+};
+} // end anonymous namespace
+
+char WebAssemblyDAGToDAGISel::ID;
+
+INITIALIZE_PASS(WebAssemblyDAGToDAGISel, DEBUG_TYPE, PASS_NAME, false, false)
+
+void WebAssemblyDAGToDAGISel::PreprocessISelDAG() {
+ // Stack objects that should be allocated to locals are hoisted to WebAssembly
+ // locals when they are first used. However for those without uses, we hoist
+ // them here. It would be nice if there were some hook to do this when they
+ // are added to the MachineFrameInfo, but that's not the case right now.
+ MachineFrameInfo &FrameInfo = MF->getFrameInfo();
+ for (int Idx = 0; Idx < FrameInfo.getObjectIndexEnd(); Idx++)
+ WebAssemblyFrameLowering::getLocalForStackObject(*MF, Idx);
+
+ SelectionDAGISel::PreprocessISelDAG();
+}
+
+static SDValue getTagSymNode(int Tag, SelectionDAG *DAG) {
+ assert(Tag == WebAssembly::CPP_EXCEPTION || WebAssembly::C_LONGJMP);
+ auto &MF = DAG->getMachineFunction();
+ const auto &TLI = DAG->getTargetLoweringInfo();
+ MVT PtrVT = TLI.getPointerTy(DAG->getDataLayout());
+ const char *SymName = Tag == WebAssembly::CPP_EXCEPTION
+ ? MF.createExternalSymbolName("__cpp_exception")
+ : MF.createExternalSymbolName("__c_longjmp");
+ return DAG->getTargetExternalSymbol(SymName, PtrVT);
+}
+
+void WebAssemblyDAGToDAGISel::Select(SDNode *Node) {
+ // If we have a custom node, we already have selected!
+ if (Node->isMachineOpcode()) {
+ LLVM_DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
+ Node->setNodeId(-1);
+ return;
+ }
+
+ MVT PtrVT = TLI->getPointerTy(CurDAG->getDataLayout());
+ auto GlobalGetIns = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
+ : WebAssembly::GLOBAL_GET_I32;
+
+ // Few custom selection stuff.
+ SDLoc DL(Node);
+ MachineFunction &MF = CurDAG->getMachineFunction();
+ switch (Node->getOpcode()) {
+ case ISD::ATOMIC_FENCE: {
+ if (!MF.getSubtarget<WebAssemblySubtarget>().hasAtomics())
+ break;
+
+ uint64_t SyncScopeID = Node->getConstantOperandVal(2);
+ MachineSDNode *Fence = nullptr;
+ switch (SyncScopeID) {
+ case SyncScope::SingleThread:
+ // We lower a single-thread fence to a pseudo compiler barrier instruction
+ // preventing instruction reordering. This will not be emitted in final
+ // binary.
+ Fence = CurDAG->getMachineNode(WebAssembly::COMPILER_FENCE,
+ DL, // debug loc
+ MVT::Other, // outchain type
+ Node->getOperand(0) // inchain
+ );
+ break;
+ case SyncScope::System:
+ // Currently wasm only supports sequentially consistent atomics, so we
+ // always set the order to 0 (sequentially consistent).
+ Fence = CurDAG->getMachineNode(
+ WebAssembly::ATOMIC_FENCE,
+ DL, // debug loc
+ MVT::Other, // outchain type
+ CurDAG->getTargetConstant(0, DL, MVT::i32), // order
+ Node->getOperand(0) // inchain
+ );
+ break;
+ default:
+ llvm_unreachable("Unknown scope!");
+ }
+
+ ReplaceNode(Node, Fence);
+ CurDAG->RemoveDeadNode(Node);
+ return;
+ }
+
+ case ISD::INTRINSIC_WO_CHAIN: {
+ unsigned IntNo = Node->getConstantOperandVal(0);
+ switch (IntNo) {
+ case Intrinsic::wasm_tls_size: {
+ MachineSDNode *TLSSize = CurDAG->getMachineNode(
+ GlobalGetIns, DL, PtrVT,
+ CurDAG->getTargetExternalSymbol("__tls_size", PtrVT));
+ ReplaceNode(Node, TLSSize);
+ return;
+ }
+
+ case Intrinsic::wasm_tls_align: {
+ MachineSDNode *TLSAlign = CurDAG->getMachineNode(
+ GlobalGetIns, DL, PtrVT,
+ CurDAG->getTargetExternalSymbol("__tls_align", PtrVT));
+ ReplaceNode(Node, TLSAlign);
+ return;
+ }
+ }
+ break;
+ }
+
+ case ISD::INTRINSIC_W_CHAIN: {
+ unsigned IntNo = Node->getConstantOperandVal(1);
+ const auto &TLI = CurDAG->getTargetLoweringInfo();
+ MVT PtrVT = TLI.getPointerTy(CurDAG->getDataLayout());
+ switch (IntNo) {
+ case Intrinsic::wasm_tls_base: {
+ MachineSDNode *TLSBase = CurDAG->getMachineNode(
+ GlobalGetIns, DL, PtrVT, MVT::Other,
+ CurDAG->getTargetExternalSymbol("__tls_base", PtrVT),
+ Node->getOperand(0));
+ ReplaceNode(Node, TLSBase);
+ return;
+ }
+
+ case Intrinsic::wasm_catch: {
+ int Tag = Node->getConstantOperandVal(2);
+ SDValue SymNode = getTagSymNode(Tag, CurDAG);
+ MachineSDNode *Catch =
+ CurDAG->getMachineNode(WebAssembly::CATCH, DL,
+ {
+ PtrVT, // exception pointer
+ MVT::Other // outchain type
+ },
+ {
+ SymNode, // exception symbol
+ Node->getOperand(0) // inchain
+ });
+ ReplaceNode(Node, Catch);
+ return;
+ }
+ }
+ break;
+ }
+
+ case ISD::INTRINSIC_VOID: {
+ unsigned IntNo = Node->getConstantOperandVal(1);
+ switch (IntNo) {
+ case Intrinsic::wasm_throw: {
+ int Tag = Node->getConstantOperandVal(2);
+ SDValue SymNode = getTagSymNode(Tag, CurDAG);
+ MachineSDNode *Throw =
+ CurDAG->getMachineNode(WebAssembly::THROW, DL,
+ MVT::Other, // outchain type
+ {
+ SymNode, // exception symbol
+ Node->getOperand(3), // thrown value
+ Node->getOperand(0) // inchain
+ });
+ ReplaceNode(Node, Throw);
+ return;
+ }
+ }
+ break;
+ }
+
+ case WebAssemblyISD::CALL:
+ case WebAssemblyISD::RET_CALL: {
+ // CALL has both variable operands and variable results, but ISel only
+ // supports one or the other. Split calls into two nodes glued together, one
+ // for the operands and one for the results. These two nodes will be
+ // recombined in a custom inserter hook into a single MachineInstr.
+ SmallVector<SDValue, 16> Ops;
+ for (size_t i = 1; i < Node->getNumOperands(); ++i) {
+ SDValue Op = Node->getOperand(i);
+ // Remove the wrapper when the call target is a function, an external
+ // symbol (which will be lowered to a library function), or an alias of
+ // a function. If the target is not a function/external symbol, we
+ // shouldn't remove the wrapper, because we cannot call it directly and
+ // instead we want it to be loaded with a CONST instruction and called
+ // with a call_indirect later.
+ if (i == 1 && Op->getOpcode() == WebAssemblyISD::Wrapper) {
+ SDValue NewOp = Op->getOperand(0);
+ if (auto *GlobalOp = dyn_cast<GlobalAddressSDNode>(NewOp.getNode())) {
+ if (isa<Function>(
+ GlobalOp->getGlobal()->stripPointerCastsAndAliases()))
+ Op = NewOp;
+ } else if (isa<ExternalSymbolSDNode>(NewOp.getNode())) {
+ Op = NewOp;
+ }
+ }
+ Ops.push_back(Op);
+ }
+
+ // Add the chain last
+ Ops.push_back(Node->getOperand(0));
+ MachineSDNode *CallParams =
+ CurDAG->getMachineNode(WebAssembly::CALL_PARAMS, DL, MVT::Glue, Ops);
+
+ unsigned Results = Node->getOpcode() == WebAssemblyISD::CALL
+ ? WebAssembly::CALL_RESULTS
+ : WebAssembly::RET_CALL_RESULTS;
+
+ SDValue Link(CallParams, 0);
+ MachineSDNode *CallResults =
+ CurDAG->getMachineNode(Results, DL, Node->getVTList(), Link);
+ ReplaceNode(Node, CallResults);
+ return;
+ }
+
+ default:
+ break;
+ }
+
+ // Select the default instruction.
+ SelectCode(Node);
+}
+
+bool WebAssemblyDAGToDAGISel::SelectInlineAsmMemoryOperand(
+ const SDValue &Op, InlineAsm::ConstraintCode ConstraintID,
+ std::vector<SDValue> &OutOps) {
+ switch (ConstraintID) {
+ case InlineAsm::ConstraintCode::m:
+ // We just support simple memory operands that just have a single address
+ // operand and need no special handling.
+ OutOps.push_back(Op);
+ return false;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+bool WebAssemblyDAGToDAGISel::SelectAddrAddOperands(MVT OffsetType, SDValue N,
+ SDValue &Offset,
+ SDValue &Addr) {
+ assert(N.getNumOperands() == 2 && "Attempting to fold in a non-binary op");
+
+ // WebAssembly constant offsets are performed as unsigned with infinite
+ // precision, so we need to check for NoUnsignedWrap so that we don't fold an
+ // offset for an add that needs wrapping.
+ if (N.getOpcode() == ISD::ADD && !N.getNode()->getFlags().hasNoUnsignedWrap())
+ return false;
+
+ // Folds constants in an add into the offset.
+ for (size_t i = 0; i < 2; ++i) {
+ SDValue Op = N.getOperand(i);
+ SDValue OtherOp = N.getOperand(i == 0 ? 1 : 0);
+
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
+ Offset =
+ CurDAG->getTargetConstant(CN->getZExtValue(), SDLoc(N), OffsetType);
+ Addr = OtherOp;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool WebAssemblyDAGToDAGISel::SelectAddrOperands(MVT AddrType,
+ unsigned ConstOpc, SDValue N,
+ SDValue &Offset,
+ SDValue &Addr) {
+ SDLoc DL(N);
+
+ // Fold target global addresses into the offset.
+ if (!TM.isPositionIndependent()) {
+ SDValue Op(N);
+ if (Op.getOpcode() == WebAssemblyISD::Wrapper)
+ Op = Op.getOperand(0);
+
+ if (Op.getOpcode() == ISD::TargetGlobalAddress) {
+ Offset = Op;
+ Addr = SDValue(
+ CurDAG->getMachineNode(ConstOpc, DL, AddrType,
+ CurDAG->getTargetConstant(0, DL, AddrType)),
+ 0);
+ return true;
+ }
+ }
+
+ // Fold anything inside an add into the offset.
+ if (N.getOpcode() == ISD::ADD &&
+ SelectAddrAddOperands(AddrType, N, Offset, Addr))
+ return true;
+
+ // Likewise, treat an 'or' node as an 'add' if the or'ed bits are known to be
+ // zero and fold them into the offset too.
+ if (N.getOpcode() == ISD::OR) {
+ bool OrIsAdd;
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
+ OrIsAdd =
+ CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
+ } else {
+ KnownBits Known0 = CurDAG->computeKnownBits(N->getOperand(0), 0);
+ KnownBits Known1 = CurDAG->computeKnownBits(N->getOperand(1), 0);
+ OrIsAdd = (~Known0.Zero & ~Known1.Zero) == 0;
+ }
+
+ if (OrIsAdd && SelectAddrAddOperands(AddrType, N, Offset, Addr))
+ return true;
+ }
+
+ // Fold constant addresses into the offset.
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
+ Offset = CurDAG->getTargetConstant(CN->getZExtValue(), DL, AddrType);
+ Addr = SDValue(
+ CurDAG->getMachineNode(ConstOpc, DL, AddrType,
+ CurDAG->getTargetConstant(0, DL, AddrType)),
+ 0);
+ return true;
+ }
+
+ // Else it's a plain old load/store with no offset.
+ Offset = CurDAG->getTargetConstant(0, DL, AddrType);
+ Addr = N;
+ return true;
+}
+
+bool WebAssemblyDAGToDAGISel::SelectAddrOperands32(SDValue Op, SDValue &Offset,
+ SDValue &Addr) {
+ return SelectAddrOperands(MVT::i32, WebAssembly::CONST_I32, Op, Offset, Addr);
+}
+
+bool WebAssemblyDAGToDAGISel::SelectAddrOperands64(SDValue Op, SDValue &Offset,
+ SDValue &Addr) {
+ return SelectAddrOperands(MVT::i64, WebAssembly::CONST_I64, Op, Offset, Addr);
+}
+
+/// This pass converts a legalized DAG into a WebAssembly-specific DAG, ready
+/// for instruction scheduling.
+FunctionPass *llvm::createWebAssemblyISelDag(WebAssemblyTargetMachine &TM,
+ CodeGenOptLevel OptLevel) {
+ return new WebAssemblyDAGToDAGISel(TM, OptLevel);
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
new file mode 100644
index 000000000000..4bcf89690505
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -0,0 +1,2889 @@
+//=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the WebAssemblyTargetLowering class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyISelLowering.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "Utils/WebAssemblyTypeUtilities.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyTargetMachine.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicsWebAssembly.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/KnownBits.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetOptions.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-lower"
+
+WebAssemblyTargetLowering::WebAssemblyTargetLowering(
+ const TargetMachine &TM, const WebAssemblySubtarget &STI)
+ : TargetLowering(TM), Subtarget(&STI) {
+ auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
+
+ // Booleans always contain 0 or 1.
+ setBooleanContents(ZeroOrOneBooleanContent);
+ // Except in SIMD vectors
+ setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
+ // We don't know the microarchitecture here, so just reduce register pressure.
+ setSchedulingPreference(Sched::RegPressure);
+ // Tell ISel that we have a stack pointer.
+ setStackPointerRegisterToSaveRestore(
+ Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
+ // Set up the register classes.
+ addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
+ addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
+ addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
+ addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
+ if (Subtarget->hasSIMD128()) {
+ addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
+ addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
+ addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
+ addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
+ addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
+ addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
+ }
+ if (Subtarget->hasReferenceTypes()) {
+ addRegisterClass(MVT::externref, &WebAssembly::EXTERNREFRegClass);
+ addRegisterClass(MVT::funcref, &WebAssembly::FUNCREFRegClass);
+ }
+ // Compute derived properties from the register classes.
+ computeRegisterProperties(Subtarget->getRegisterInfo());
+
+ // Transform loads and stores to pointers in address space 1 to loads and
+ // stores to WebAssembly global variables, outside linear memory.
+ for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) {
+ setOperationAction(ISD::LOAD, T, Custom);
+ setOperationAction(ISD::STORE, T, Custom);
+ }
+ if (Subtarget->hasSIMD128()) {
+ for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
+ MVT::v2f64}) {
+ setOperationAction(ISD::LOAD, T, Custom);
+ setOperationAction(ISD::STORE, T, Custom);
+ }
+ }
+ if (Subtarget->hasReferenceTypes()) {
+ // We need custom load and store lowering for both externref, funcref and
+ // Other. The MVT::Other here represents tables of reference types.
+ for (auto T : {MVT::externref, MVT::funcref, MVT::Other}) {
+ setOperationAction(ISD::LOAD, T, Custom);
+ setOperationAction(ISD::STORE, T, Custom);
+ }
+ }
+
+ setOperationAction(ISD::GlobalAddress, MVTPtr, Custom);
+ setOperationAction(ISD::GlobalTLSAddress, MVTPtr, Custom);
+ setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom);
+ setOperationAction(ISD::JumpTable, MVTPtr, Custom);
+ setOperationAction(ISD::BlockAddress, MVTPtr, Custom);
+ setOperationAction(ISD::BRIND, MVT::Other, Custom);
+
+ // Take the default expansion for va_arg, va_copy, and va_end. There is no
+ // default action for va_start, so we do that custom.
+ setOperationAction(ISD::VASTART, MVT::Other, Custom);
+ setOperationAction(ISD::VAARG, MVT::Other, Expand);
+ setOperationAction(ISD::VACOPY, MVT::Other, Expand);
+ setOperationAction(ISD::VAEND, MVT::Other, Expand);
+
+ for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
+ // Don't expand the floating-point types to constant pools.
+ setOperationAction(ISD::ConstantFP, T, Legal);
+ // Expand floating-point comparisons.
+ for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
+ ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE})
+ setCondCodeAction(CC, T, Expand);
+ // Expand floating-point library function operators.
+ for (auto Op :
+ {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA})
+ setOperationAction(Op, T, Expand);
+ // Note supported floating-point library function operators that otherwise
+ // default to expand.
+ for (auto Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT,
+ ISD::FRINT, ISD::FROUNDEVEN})
+ setOperationAction(Op, T, Legal);
+ // Support minimum and maximum, which otherwise default to expand.
+ setOperationAction(ISD::FMINIMUM, T, Legal);
+ setOperationAction(ISD::FMAXIMUM, T, Legal);
+ // WebAssembly currently has no builtin f16 support.
+ setOperationAction(ISD::FP16_TO_FP, T, Expand);
+ setOperationAction(ISD::FP_TO_FP16, T, Expand);
+ setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand);
+ setTruncStoreAction(T, MVT::f16, Expand);
+ }
+
+ // Expand unavailable integer operations.
+ for (auto Op :
+ {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU,
+ ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS,
+ ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) {
+ for (auto T : {MVT::i32, MVT::i64})
+ setOperationAction(Op, T, Expand);
+ if (Subtarget->hasSIMD128())
+ for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
+ setOperationAction(Op, T, Expand);
+ }
+
+ if (Subtarget->hasNontrappingFPToInt())
+ for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT})
+ for (auto T : {MVT::i32, MVT::i64})
+ setOperationAction(Op, T, Custom);
+
+ // SIMD-specific configuration
+ if (Subtarget->hasSIMD128()) {
+ // Combine vector mask reductions into alltrue/anytrue
+ setTargetDAGCombine(ISD::SETCC);
+
+ // Convert vector to integer bitcasts to bitmask
+ setTargetDAGCombine(ISD::BITCAST);
+
+ // Hoist bitcasts out of shuffles
+ setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
+
+ // Combine extends of extract_subvectors into widening ops
+ setTargetDAGCombine({ISD::SIGN_EXTEND, ISD::ZERO_EXTEND});
+
+ // Combine int_to_fp or fp_extend of extract_vectors and vice versa into
+ // conversions ops
+ setTargetDAGCombine({ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_EXTEND,
+ ISD::EXTRACT_SUBVECTOR});
+
+ // Combine fp_to_{s,u}int_sat or fp_round of concat_vectors or vice versa
+ // into conversion ops
+ setTargetDAGCombine({ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT,
+ ISD::FP_ROUND, ISD::CONCAT_VECTORS});
+
+ setTargetDAGCombine(ISD::TRUNCATE);
+
+ // Support saturating add for i8x16 and i16x8
+ for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
+ for (auto T : {MVT::v16i8, MVT::v8i16})
+ setOperationAction(Op, T, Legal);
+
+ // Support integer abs
+ for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
+ setOperationAction(ISD::ABS, T, Legal);
+
+ // Custom lower BUILD_VECTORs to minimize number of replace_lanes
+ for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
+ MVT::v2f64})
+ setOperationAction(ISD::BUILD_VECTOR, T, Custom);
+
+ // We have custom shuffle lowering to expose the shuffle mask
+ for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
+ MVT::v2f64})
+ setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom);
+
+ // Support splatting
+ for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
+ MVT::v2f64})
+ setOperationAction(ISD::SPLAT_VECTOR, T, Legal);
+
+ // Custom lowering since wasm shifts must have a scalar shift amount
+ for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
+ for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
+ setOperationAction(Op, T, Custom);
+
+ // Custom lower lane accesses to expand out variable indices
+ for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT})
+ for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
+ MVT::v2f64})
+ setOperationAction(Op, T, Custom);
+
+ // There is no i8x16.mul instruction
+ setOperationAction(ISD::MUL, MVT::v16i8, Expand);
+
+ // There is no vector conditional select instruction
+ for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
+ MVT::v2f64})
+ setOperationAction(ISD::SELECT_CC, T, Expand);
+
+ // Expand integer operations supported for scalars but not SIMD
+ for (auto Op :
+ {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR})
+ for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
+ setOperationAction(Op, T, Expand);
+
+ // But we do have integer min and max operations
+ for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
+ for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
+ setOperationAction(Op, T, Legal);
+
+ // And we have popcnt for i8x16. It can be used to expand ctlz/cttz.
+ setOperationAction(ISD::CTPOP, MVT::v16i8, Legal);
+ setOperationAction(ISD::CTLZ, MVT::v16i8, Expand);
+ setOperationAction(ISD::CTTZ, MVT::v16i8, Expand);
+
+ // Custom lower bit counting operations for other types to scalarize them.
+ for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP})
+ for (auto T : {MVT::v8i16, MVT::v4i32, MVT::v2i64})
+ setOperationAction(Op, T, Custom);
+
+ // Expand float operations supported for scalars but not SIMD
+ for (auto Op : {ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10,
+ ISD::FEXP, ISD::FEXP2})
+ for (auto T : {MVT::v4f32, MVT::v2f64})
+ setOperationAction(Op, T, Expand);
+
+ // Unsigned comparison operations are unavailable for i64x2 vectors.
+ for (auto CC : {ISD::SETUGT, ISD::SETUGE, ISD::SETULT, ISD::SETULE})
+ setCondCodeAction(CC, MVT::v2i64, Custom);
+
+ // 64x2 conversions are not in the spec
+ for (auto Op :
+ {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT})
+ for (auto T : {MVT::v2i64, MVT::v2f64})
+ setOperationAction(Op, T, Expand);
+
+ // But saturating fp_to_int converstions are
+ for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT})
+ setOperationAction(Op, MVT::v4i32, Custom);
+
+ // Support vector extending
+ for (auto T : MVT::integer_fixedlen_vector_valuetypes()) {
+ setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, T, Custom);
+ setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, T, Custom);
+ }
+ }
+
+ // As a special case, these operators use the type to mean the type to
+ // sign-extend from.
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+ if (!Subtarget->hasSignExt()) {
+ // Sign extends are legal only when extending a vector extract
+ auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
+ for (auto T : {MVT::i8, MVT::i16, MVT::i32})
+ setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action);
+ }
+ for (auto T : MVT::integer_fixedlen_vector_valuetypes())
+ setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand);
+
+ // Dynamic stack allocation: use the default expansion.
+ setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
+ setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand);
+
+ setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
+ setOperationAction(ISD::FrameIndex, MVT::i64, Custom);
+ setOperationAction(ISD::CopyToReg, MVT::Other, Custom);
+
+ // Expand these forms; we pattern-match the forms that we can handle in isel.
+ for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
+ for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
+ setOperationAction(Op, T, Expand);
+
+ // We have custom switch handling.
+ setOperationAction(ISD::BR_JT, MVT::Other, Custom);
+
+ // WebAssembly doesn't have:
+ // - Floating-point extending loads.
+ // - Floating-point truncating stores.
+ // - i1 extending loads.
+ // - truncating SIMD stores and most extending loads
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+ for (auto T : MVT::integer_valuetypes())
+ for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
+ setLoadExtAction(Ext, T, MVT::i1, Promote);
+ if (Subtarget->hasSIMD128()) {
+ for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
+ MVT::v2f64}) {
+ for (auto MemT : MVT::fixedlen_vector_valuetypes()) {
+ if (MVT(T) != MemT) {
+ setTruncStoreAction(T, MemT, Expand);
+ for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
+ setLoadExtAction(Ext, T, MemT, Expand);
+ }
+ }
+ }
+ // But some vector extending loads are legal
+ for (auto Ext : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
+ setLoadExtAction(Ext, MVT::v8i16, MVT::v8i8, Legal);
+ setLoadExtAction(Ext, MVT::v4i32, MVT::v4i16, Legal);
+ setLoadExtAction(Ext, MVT::v2i64, MVT::v2i32, Legal);
+ }
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Legal);
+ }
+
+ // Don't do anything clever with build_pairs
+ setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
+
+ // Trap lowers to wasm unreachable
+ setOperationAction(ISD::TRAP, MVT::Other, Legal);
+ setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
+
+ // Exception handling intrinsics
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
+ setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
+
+ setMaxAtomicSizeInBitsSupported(64);
+
+ // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
+ // consistent with the f64 and f128 names.
+ setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
+ setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
+
+ // Define the emscripten name for return address helper.
+ // TODO: when implementing other Wasm backends, make this generic or only do
+ // this on emscripten depending on what they end up doing.
+ setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address");
+
+ // Always convert switches to br_tables unless there is only one case, which
+ // is equivalent to a simple branch. This reduces code size for wasm, and we
+ // defer possible jump table optimizations to the VM.
+ setMinimumJumpTableEntries(2);
+}
+
+MVT WebAssemblyTargetLowering::getPointerTy(const DataLayout &DL,
+ uint32_t AS) const {
+ if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_EXTERNREF)
+ return MVT::externref;
+ if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF)
+ return MVT::funcref;
+ return TargetLowering::getPointerTy(DL, AS);
+}
+
+MVT WebAssemblyTargetLowering::getPointerMemTy(const DataLayout &DL,
+ uint32_t AS) const {
+ if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_EXTERNREF)
+ return MVT::externref;
+ if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF)
+ return MVT::funcref;
+ return TargetLowering::getPointerMemTy(DL, AS);
+}
+
+TargetLowering::AtomicExpansionKind
+WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
+ // We have wasm instructions for these
+ switch (AI->getOperation()) {
+ case AtomicRMWInst::Add:
+ case AtomicRMWInst::Sub:
+ case AtomicRMWInst::And:
+ case AtomicRMWInst::Or:
+ case AtomicRMWInst::Xor:
+ case AtomicRMWInst::Xchg:
+ return AtomicExpansionKind::None;
+ default:
+ break;
+ }
+ return AtomicExpansionKind::CmpXChg;
+}
+
+bool WebAssemblyTargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
+ // Implementation copied from X86TargetLowering.
+ unsigned Opc = VecOp.getOpcode();
+
+ // Assume target opcodes can't be scalarized.
+ // TODO - do we have any exceptions?
+ if (Opc >= ISD::BUILTIN_OP_END)
+ return false;
+
+ // If the vector op is not supported, try to convert to scalar.
+ EVT VecVT = VecOp.getValueType();
+ if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
+ return true;
+
+ // If the vector op is supported, but the scalar op is not, the transform may
+ // not be worthwhile.
+ EVT ScalarVT = VecVT.getScalarType();
+ return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
+}
+
+FastISel *WebAssemblyTargetLowering::createFastISel(
+ FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
+ return WebAssembly::createFastISel(FuncInfo, LibInfo);
+}
+
+MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
+ EVT VT) const {
+ unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
+ if (BitWidth > 1 && BitWidth < 8)
+ BitWidth = 8;
+
+ if (BitWidth > 64) {
+ // The shift will be lowered to a libcall, and compiler-rt libcalls expect
+ // the count to be an i32.
+ BitWidth = 32;
+ assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&
+ "32-bit shift counts ought to be enough for anyone");
+ }
+
+ MVT Result = MVT::getIntegerVT(BitWidth);
+ assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE &&
+ "Unable to represent scalar shift amount type");
+ return Result;
+}
+
+// Lower an fp-to-int conversion operator from the LLVM opcode, which has an
+// undefined result on invalid/overflow, to the WebAssembly opcode, which
+// traps on invalid/overflow.
+static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL,
+ MachineBasicBlock *BB,
+ const TargetInstrInfo &TII,
+ bool IsUnsigned, bool Int64,
+ bool Float64, unsigned LoweredOpcode) {
+ MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+
+ Register OutReg = MI.getOperand(0).getReg();
+ Register InReg = MI.getOperand(1).getReg();
+
+ unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
+ unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
+ unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
+ unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
+ unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
+ unsigned Eqz = WebAssembly::EQZ_I32;
+ unsigned And = WebAssembly::AND_I32;
+ int64_t Limit = Int64 ? INT64_MIN : INT32_MIN;
+ int64_t Substitute = IsUnsigned ? 0 : Limit;
+ double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
+ auto &Context = BB->getParent()->getFunction().getContext();
+ Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
+
+ const BasicBlock *LLVMBB = BB->getBasicBlock();
+ MachineFunction *F = BB->getParent();
+ MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
+ MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
+ MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
+
+ MachineFunction::iterator It = ++BB->getIterator();
+ F->insert(It, FalseMBB);
+ F->insert(It, TrueMBB);
+ F->insert(It, DoneMBB);
+
+ // Transfer the remainder of BB and its successor edges to DoneMBB.
+ DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
+ DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
+
+ BB->addSuccessor(TrueMBB);
+ BB->addSuccessor(FalseMBB);
+ TrueMBB->addSuccessor(DoneMBB);
+ FalseMBB->addSuccessor(DoneMBB);
+
+ unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
+ Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
+ Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
+ CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
+ EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
+ FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
+ TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
+
+ MI.eraseFromParent();
+ // For signed numbers, we can do a single comparison to determine whether
+ // fabs(x) is within range.
+ if (IsUnsigned) {
+ Tmp0 = InReg;
+ } else {
+ BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
+ }
+ BuildMI(BB, DL, TII.get(FConst), Tmp1)
+ .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
+ BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
+
+ // For unsigned numbers, we have to do a separate comparison with zero.
+ if (IsUnsigned) {
+ Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
+ Register SecondCmpReg =
+ MRI.createVirtualRegister(&WebAssembly::I32RegClass);
+ Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
+ BuildMI(BB, DL, TII.get(FConst), Tmp1)
+ .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
+ BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
+ BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
+ CmpReg = AndReg;
+ }
+
+ BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
+
+ // Create the CFG diamond to select between doing the conversion or using
+ // the substitute value.
+ BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
+ BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
+ BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
+ BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
+ BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
+ .addReg(FalseReg)
+ .addMBB(FalseMBB)
+ .addReg(TrueReg)
+ .addMBB(TrueMBB);
+
+ return DoneMBB;
+}
+
+static MachineBasicBlock *
+LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB,
+ const WebAssemblySubtarget *Subtarget,
+ const TargetInstrInfo &TII) {
+ MachineInstr &CallParams = *CallResults.getPrevNode();
+ assert(CallParams.getOpcode() == WebAssembly::CALL_PARAMS);
+ assert(CallResults.getOpcode() == WebAssembly::CALL_RESULTS ||
+ CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS);
+
+ bool IsIndirect =
+ CallParams.getOperand(0).isReg() || CallParams.getOperand(0).isFI();
+ bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS;
+
+ bool IsFuncrefCall = false;
+ if (IsIndirect && CallParams.getOperand(0).isReg()) {
+ Register Reg = CallParams.getOperand(0).getReg();
+ const MachineFunction *MF = BB->getParent();
+ const MachineRegisterInfo &MRI = MF->getRegInfo();
+ const TargetRegisterClass *TRC = MRI.getRegClass(Reg);
+ IsFuncrefCall = (TRC == &WebAssembly::FUNCREFRegClass);
+ assert(!IsFuncrefCall || Subtarget->hasReferenceTypes());
+ }
+
+ unsigned CallOp;
+ if (IsIndirect && IsRetCall) {
+ CallOp = WebAssembly::RET_CALL_INDIRECT;
+ } else if (IsIndirect) {
+ CallOp = WebAssembly::CALL_INDIRECT;
+ } else if (IsRetCall) {
+ CallOp = WebAssembly::RET_CALL;
+ } else {
+ CallOp = WebAssembly::CALL;
+ }
+
+ MachineFunction &MF = *BB->getParent();
+ const MCInstrDesc &MCID = TII.get(CallOp);
+ MachineInstrBuilder MIB(MF, MF.CreateMachineInstr(MCID, DL));
+
+ // See if we must truncate the function pointer.
+ // CALL_INDIRECT takes an i32, but in wasm64 we represent function pointers
+ // as 64-bit for uniformity with other pointer types.
+ // See also: WebAssemblyFastISel::selectCall
+ if (IsIndirect && MF.getSubtarget<WebAssemblySubtarget>().hasAddr64()) {
+ Register Reg32 =
+ MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
+ auto &FnPtr = CallParams.getOperand(0);
+ BuildMI(*BB, CallResults.getIterator(), DL,
+ TII.get(WebAssembly::I32_WRAP_I64), Reg32)
+ .addReg(FnPtr.getReg());
+ FnPtr.setReg(Reg32);
+ }
+
+ // Move the function pointer to the end of the arguments for indirect calls
+ if (IsIndirect) {
+ auto FnPtr = CallParams.getOperand(0);
+ CallParams.removeOperand(0);
+
+ // For funcrefs, call_indirect is done through __funcref_call_table and the
+ // funcref is always installed in slot 0 of the table, therefore instead of
+ // having the function pointer added at the end of the params list, a zero
+ // (the index in
+ // __funcref_call_table is added).
+ if (IsFuncrefCall) {
+ Register RegZero =
+ MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
+ MachineInstrBuilder MIBC0 =
+ BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero).addImm(0);
+
+ BB->insert(CallResults.getIterator(), MIBC0);
+ MachineInstrBuilder(MF, CallParams).addReg(RegZero);
+ } else
+ CallParams.addOperand(FnPtr);
+ }
+
+ for (auto Def : CallResults.defs())
+ MIB.add(Def);
+
+ if (IsIndirect) {
+ // Placeholder for the type index.
+ MIB.addImm(0);
+ // The table into which this call_indirect indexes.
+ MCSymbolWasm *Table = IsFuncrefCall
+ ? WebAssembly::getOrCreateFuncrefCallTableSymbol(
+ MF.getContext(), Subtarget)
+ : WebAssembly::getOrCreateFunctionTableSymbol(
+ MF.getContext(), Subtarget);
+ if (Subtarget->hasReferenceTypes()) {
+ MIB.addSym(Table);
+ } else {
+ // For the MVP there is at most one table whose number is 0, but we can't
+ // write a table symbol or issue relocations. Instead we just ensure the
+ // table is live and write a zero.
+ Table->setNoStrip();
+ MIB.addImm(0);
+ }
+ }
+
+ for (auto Use : CallParams.uses())
+ MIB.add(Use);
+
+ BB->insert(CallResults.getIterator(), MIB);
+ CallParams.eraseFromParent();
+ CallResults.eraseFromParent();
+
+ // If this is a funcref call, to avoid hidden GC roots, we need to clear the
+ // table slot with ref.null upon call_indirect return.
+ //
+ // This generates the following code, which comes right after a call_indirect
+ // of a funcref:
+ //
+ // i32.const 0
+ // ref.null func
+ // table.set __funcref_call_table
+ if (IsIndirect && IsFuncrefCall) {
+ MCSymbolWasm *Table = WebAssembly::getOrCreateFuncrefCallTableSymbol(
+ MF.getContext(), Subtarget);
+ Register RegZero =
+ MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
+ MachineInstr *Const0 =
+ BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero).addImm(0);
+ BB->insertAfter(MIB.getInstr()->getIterator(), Const0);
+
+ Register RegFuncref =
+ MF.getRegInfo().createVirtualRegister(&WebAssembly::FUNCREFRegClass);
+ MachineInstr *RefNull =
+ BuildMI(MF, DL, TII.get(WebAssembly::REF_NULL_FUNCREF), RegFuncref);
+ BB->insertAfter(Const0->getIterator(), RefNull);
+
+ MachineInstr *TableSet =
+ BuildMI(MF, DL, TII.get(WebAssembly::TABLE_SET_FUNCREF))
+ .addSym(Table)
+ .addReg(RegZero)
+ .addReg(RegFuncref);
+ BB->insertAfter(RefNull->getIterator(), TableSet);
+ }
+
+ return BB;
+}
+
+MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
+ MachineInstr &MI, MachineBasicBlock *BB) const {
+ const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
+
+ switch (MI.getOpcode()) {
+ default:
+ llvm_unreachable("Unexpected instr type to insert");
+ case WebAssembly::FP_TO_SINT_I32_F32:
+ return LowerFPToInt(MI, DL, BB, TII, false, false, false,
+ WebAssembly::I32_TRUNC_S_F32);
+ case WebAssembly::FP_TO_UINT_I32_F32:
+ return LowerFPToInt(MI, DL, BB, TII, true, false, false,
+ WebAssembly::I32_TRUNC_U_F32);
+ case WebAssembly::FP_TO_SINT_I64_F32:
+ return LowerFPToInt(MI, DL, BB, TII, false, true, false,
+ WebAssembly::I64_TRUNC_S_F32);
+ case WebAssembly::FP_TO_UINT_I64_F32:
+ return LowerFPToInt(MI, DL, BB, TII, true, true, false,
+ WebAssembly::I64_TRUNC_U_F32);
+ case WebAssembly::FP_TO_SINT_I32_F64:
+ return LowerFPToInt(MI, DL, BB, TII, false, false, true,
+ WebAssembly::I32_TRUNC_S_F64);
+ case WebAssembly::FP_TO_UINT_I32_F64:
+ return LowerFPToInt(MI, DL, BB, TII, true, false, true,
+ WebAssembly::I32_TRUNC_U_F64);
+ case WebAssembly::FP_TO_SINT_I64_F64:
+ return LowerFPToInt(MI, DL, BB, TII, false, true, true,
+ WebAssembly::I64_TRUNC_S_F64);
+ case WebAssembly::FP_TO_UINT_I64_F64:
+ return LowerFPToInt(MI, DL, BB, TII, true, true, true,
+ WebAssembly::I64_TRUNC_U_F64);
+ case WebAssembly::CALL_RESULTS:
+ case WebAssembly::RET_CALL_RESULTS:
+ return LowerCallResults(MI, DL, BB, Subtarget, TII);
+ }
+}
+
+const char *
+WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
+ switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
+ case WebAssemblyISD::FIRST_NUMBER:
+ case WebAssemblyISD::FIRST_MEM_OPCODE:
+ break;
+#define HANDLE_NODETYPE(NODE) \
+ case WebAssemblyISD::NODE: \
+ return "WebAssemblyISD::" #NODE;
+#define HANDLE_MEM_NODETYPE(NODE) HANDLE_NODETYPE(NODE)
+#include "WebAssemblyISD.def"
+#undef HANDLE_MEM_NODETYPE
+#undef HANDLE_NODETYPE
+ }
+ return nullptr;
+}
+
+std::pair<unsigned, const TargetRegisterClass *>
+WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
+ const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
+ // First, see if this is a constraint that directly corresponds to a
+ // WebAssembly register class.
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ case 'r':
+ assert(VT != MVT::iPTR && "Pointer MVT not expected here");
+ if (Subtarget->hasSIMD128() && VT.isVector()) {
+ if (VT.getSizeInBits() == 128)
+ return std::make_pair(0U, &WebAssembly::V128RegClass);
+ }
+ if (VT.isInteger() && !VT.isVector()) {
+ if (VT.getSizeInBits() <= 32)
+ return std::make_pair(0U, &WebAssembly::I32RegClass);
+ if (VT.getSizeInBits() <= 64)
+ return std::make_pair(0U, &WebAssembly::I64RegClass);
+ }
+ if (VT.isFloatingPoint() && !VT.isVector()) {
+ switch (VT.getSizeInBits()) {
+ case 32:
+ return std::make_pair(0U, &WebAssembly::F32RegClass);
+ case 64:
+ return std::make_pair(0U, &WebAssembly::F64RegClass);
+ default:
+ break;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
+}
+
+bool WebAssemblyTargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
+ // Assume ctz is a relatively cheap operation.
+ return true;
+}
+
+bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
+ // Assume clz is a relatively cheap operation.
+ return true;
+}
+
+bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
+ const AddrMode &AM,
+ Type *Ty, unsigned AS,
+ Instruction *I) const {
+ // WebAssembly offsets are added as unsigned without wrapping. The
+ // isLegalAddressingMode gives us no way to determine if wrapping could be
+ // happening, so we approximate this by accepting only non-negative offsets.
+ if (AM.BaseOffs < 0)
+ return false;
+
+ // WebAssembly has no scale register operands.
+ if (AM.Scale != 0)
+ return false;
+
+ // Everything else is legal.
+ return true;
+}
+
+bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
+ EVT /*VT*/, unsigned /*AddrSpace*/, Align /*Align*/,
+ MachineMemOperand::Flags /*Flags*/, unsigned *Fast) const {
+ // WebAssembly supports unaligned accesses, though it should be declared
+ // with the p2align attribute on loads and stores which do so, and there
+ // may be a performance impact. We tell LLVM they're "fast" because
+ // for the kinds of things that LLVM uses this for (merging adjacent stores
+ // of constants, etc.), WebAssembly implementations will either want the
+ // unaligned access or they'll split anyway.
+ if (Fast)
+ *Fast = 1;
+ return true;
+}
+
+bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
+ AttributeList Attr) const {
+ // The current thinking is that wasm engines will perform this optimization,
+ // so we can save on code size.
+ return true;
+}
+
+bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
+ EVT ExtT = ExtVal.getValueType();
+ EVT MemT = cast<LoadSDNode>(ExtVal->getOperand(0))->getValueType(0);
+ return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
+ (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
+ (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
+}
+
+bool WebAssemblyTargetLowering::isOffsetFoldingLegal(
+ const GlobalAddressSDNode *GA) const {
+ // Wasm doesn't support function addresses with offsets
+ const GlobalValue *GV = GA->getGlobal();
+ return isa<Function>(GV) ? false : TargetLowering::isOffsetFoldingLegal(GA);
+}
+
+bool WebAssemblyTargetLowering::shouldSinkOperands(
+ Instruction *I, SmallVectorImpl<Use *> &Ops) const {
+ using namespace llvm::PatternMatch;
+
+ if (!I->getType()->isVectorTy() || !I->isShift())
+ return false;
+
+ Value *V = I->getOperand(1);
+ // We dont need to sink constant splat.
+ if (dyn_cast<Constant>(V))
+ return false;
+
+ if (match(V, m_Shuffle(m_InsertElt(m_Value(), m_Value(), m_ZeroInt()),
+ m_Value(), m_ZeroMask()))) {
+ // Sink insert
+ Ops.push_back(&cast<Instruction>(V)->getOperandUse(0));
+ // Sink shuffle
+ Ops.push_back(&I->getOperandUse(1));
+ return true;
+ }
+
+ return false;
+}
+
+EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
+ LLVMContext &C,
+ EVT VT) const {
+ if (VT.isVector())
+ return VT.changeVectorElementTypeToInteger();
+
+ // So far, all branch instructions in Wasm take an I32 condition.
+ // The default TargetLowering::getSetCCResultType returns the pointer size,
+ // which would be useful to reduce instruction counts when testing
+ // against 64-bit pointers/values if at some point Wasm supports that.
+ return EVT::getIntegerVT(C, 32);
+}
+
+bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
+ const CallInst &I,
+ MachineFunction &MF,
+ unsigned Intrinsic) const {
+ switch (Intrinsic) {
+ case Intrinsic::wasm_memory_atomic_notify:
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.memVT = MVT::i32;
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.align = Align(4);
+ // atomic.notify instruction does not really load the memory specified with
+ // this argument, but MachineMemOperand should either be load or store, so
+ // we set this to a load.
+ // FIXME Volatile isn't really correct, but currently all LLVM atomic
+ // instructions are treated as volatiles in the backend, so we should be
+ // consistent. The same applies for wasm_atomic_wait intrinsics too.
+ Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
+ return true;
+ case Intrinsic::wasm_memory_atomic_wait32:
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.memVT = MVT::i32;
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.align = Align(4);
+ Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
+ return true;
+ case Intrinsic::wasm_memory_atomic_wait64:
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.memVT = MVT::i64;
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.align = Align(8);
+ Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
+ return true;
+ default:
+ return false;
+ }
+}
+
+void WebAssemblyTargetLowering::computeKnownBitsForTargetNode(
+ const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
+ const SelectionDAG &DAG, unsigned Depth) const {
+ switch (Op.getOpcode()) {
+ default:
+ break;
+ case ISD::INTRINSIC_WO_CHAIN: {
+ unsigned IntNo = Op.getConstantOperandVal(0);
+ switch (IntNo) {
+ default:
+ break;
+ case Intrinsic::wasm_bitmask: {
+ unsigned BitWidth = Known.getBitWidth();
+ EVT VT = Op.getOperand(1).getSimpleValueType();
+ unsigned PossibleBits = VT.getVectorNumElements();
+ APInt ZeroMask = APInt::getHighBitsSet(BitWidth, BitWidth - PossibleBits);
+ Known.Zero |= ZeroMask;
+ break;
+ }
+ }
+ }
+ }
+}
+
+TargetLoweringBase::LegalizeTypeAction
+WebAssemblyTargetLowering::getPreferredVectorAction(MVT VT) const {
+ if (VT.isFixedLengthVector()) {
+ MVT EltVT = VT.getVectorElementType();
+ // We have legal vector types with these lane types, so widening the
+ // vector would let us use some of the lanes directly without having to
+ // extend or truncate values.
+ if (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 ||
+ EltVT == MVT::i64 || EltVT == MVT::f32 || EltVT == MVT::f64)
+ return TypeWidenVector;
+ }
+
+ return TargetLoweringBase::getPreferredVectorAction(VT);
+}
+
+bool WebAssemblyTargetLowering::shouldSimplifyDemandedVectorElts(
+ SDValue Op, const TargetLoweringOpt &TLO) const {
+ // ISel process runs DAGCombiner after legalization; this step is called
+ // SelectionDAG optimization phase. This post-legalization combining process
+ // runs DAGCombiner on each node, and if there was a change to be made,
+ // re-runs legalization again on it and its user nodes to make sure
+ // everythiing is in a legalized state.
+ //
+ // The legalization calls lowering routines, and we do our custom lowering for
+ // build_vectors (LowerBUILD_VECTOR), which converts undef vector elements
+ // into zeros. But there is a set of routines in DAGCombiner that turns unused
+ // (= not demanded) nodes into undef, among which SimplifyDemandedVectorElts
+ // turns unused vector elements into undefs. But this routine does not work
+ // with our custom LowerBUILD_VECTOR, which turns undefs into zeros. This
+ // combination can result in a infinite loop, in which undefs are converted to
+ // zeros in legalization and back to undefs in combining.
+ //
+ // So after DAG is legalized, we prevent SimplifyDemandedVectorElts from
+ // running for build_vectors.
+ if (Op.getOpcode() == ISD::BUILD_VECTOR && TLO.LegalOps && TLO.LegalTys)
+ return false;
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// WebAssembly Lowering private implementation.
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Lowering Code
+//===----------------------------------------------------------------------===//
+
+static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ DAG.getContext()->diagnose(
+ DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
+}
+
+// Test whether the given calling convention is supported.
+static bool callingConvSupported(CallingConv::ID CallConv) {
+ // We currently support the language-independent target-independent
+ // conventions. We don't yet have a way to annotate calls with properties like
+ // "cold", and we don't have any call-clobbered registers, so these are mostly
+ // all handled the same.
+ return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
+ CallConv == CallingConv::Cold ||
+ CallConv == CallingConv::PreserveMost ||
+ CallConv == CallingConv::PreserveAll ||
+ CallConv == CallingConv::CXX_FAST_TLS ||
+ CallConv == CallingConv::WASM_EmscriptenInvoke ||
+ CallConv == CallingConv::Swift;
+}
+
+SDValue
+WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ SDLoc DL = CLI.DL;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ MachineFunction &MF = DAG.getMachineFunction();
+ auto Layout = MF.getDataLayout();
+
+ CallingConv::ID CallConv = CLI.CallConv;
+ if (!callingConvSupported(CallConv))
+ fail(DL, DAG,
+ "WebAssembly doesn't support language-specific or target-specific "
+ "calling conventions yet");
+ if (CLI.IsPatchPoint)
+ fail(DL, DAG, "WebAssembly doesn't support patch point yet");
+
+ if (CLI.IsTailCall) {
+ auto NoTail = [&](const char *Msg) {
+ if (CLI.CB && CLI.CB->isMustTailCall())
+ fail(DL, DAG, Msg);
+ CLI.IsTailCall = false;
+ };
+
+ if (!Subtarget->hasTailCall())
+ NoTail("WebAssembly 'tail-call' feature not enabled");
+
+ // Varargs calls cannot be tail calls because the buffer is on the stack
+ if (CLI.IsVarArg)
+ NoTail("WebAssembly does not support varargs tail calls");
+
+ // Do not tail call unless caller and callee return types match
+ const Function &F = MF.getFunction();
+ const TargetMachine &TM = getTargetMachine();
+ Type *RetTy = F.getReturnType();
+ SmallVector<MVT, 4> CallerRetTys;
+ SmallVector<MVT, 4> CalleeRetTys;
+ computeLegalValueVTs(F, TM, RetTy, CallerRetTys);
+ computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys);
+ bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
+ std::equal(CallerRetTys.begin(), CallerRetTys.end(),
+ CalleeRetTys.begin());
+ if (!TypesMatch)
+ NoTail("WebAssembly tail call requires caller and callee return types to "
+ "match");
+
+ // If pointers to local stack values are passed, we cannot tail call
+ if (CLI.CB) {
+ for (auto &Arg : CLI.CB->args()) {
+ Value *Val = Arg.get();
+ // Trace the value back through pointer operations
+ while (true) {
+ Value *Src = Val->stripPointerCastsAndAliases();
+ if (auto *GEP = dyn_cast<GetElementPtrInst>(Src))
+ Src = GEP->getPointerOperand();
+ if (Val == Src)
+ break;
+ Val = Src;
+ }
+ if (isa<AllocaInst>(Val)) {
+ NoTail(
+ "WebAssembly does not support tail calling with stack arguments");
+ break;
+ }
+ }
+ }
+ }
+
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+
+ // The generic code may have added an sret argument. If we're lowering an
+ // invoke function, the ABI requires that the function pointer be the first
+ // argument, so we may have to swap the arguments.
+ if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 &&
+ Outs[0].Flags.isSRet()) {
+ std::swap(Outs[0], Outs[1]);
+ std::swap(OutVals[0], OutVals[1]);
+ }
+
+ bool HasSwiftSelfArg = false;
+ bool HasSwiftErrorArg = false;
+ unsigned NumFixedArgs = 0;
+ for (unsigned I = 0; I < Outs.size(); ++I) {
+ const ISD::OutputArg &Out = Outs[I];
+ SDValue &OutVal = OutVals[I];
+ HasSwiftSelfArg |= Out.Flags.isSwiftSelf();
+ HasSwiftErrorArg |= Out.Flags.isSwiftError();
+ if (Out.Flags.isNest())
+ fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
+ if (Out.Flags.isInAlloca())
+ fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
+ if (Out.Flags.isInConsecutiveRegs())
+ fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
+ if (Out.Flags.isInConsecutiveRegsLast())
+ fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
+ if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
+ auto &MFI = MF.getFrameInfo();
+ int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
+ Out.Flags.getNonZeroByValAlign(),
+ /*isSS=*/false);
+ SDValue SizeNode =
+ DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
+ SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
+ Chain = DAG.getMemcpy(
+ Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getNonZeroByValAlign(),
+ /*isVolatile*/ false, /*AlwaysInline=*/false,
+ /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
+ OutVal = FINode;
+ }
+ // Count the number of fixed args *after* legalization.
+ NumFixedArgs += Out.IsFixed;
+ }
+
+ bool IsVarArg = CLI.IsVarArg;
+ auto PtrVT = getPointerTy(Layout);
+
+ // For swiftcc, emit additional swiftself and swifterror arguments
+ // if there aren't. These additional arguments are also added for callee
+ // signature They are necessary to match callee and caller signature for
+ // indirect call.
+ if (CallConv == CallingConv::Swift) {
+ if (!HasSwiftSelfArg) {
+ NumFixedArgs++;
+ ISD::OutputArg Arg;
+ Arg.Flags.setSwiftSelf();
+ CLI.Outs.push_back(Arg);
+ SDValue ArgVal = DAG.getUNDEF(PtrVT);
+ CLI.OutVals.push_back(ArgVal);
+ }
+ if (!HasSwiftErrorArg) {
+ NumFixedArgs++;
+ ISD::OutputArg Arg;
+ Arg.Flags.setSwiftError();
+ CLI.Outs.push_back(Arg);
+ SDValue ArgVal = DAG.getUNDEF(PtrVT);
+ CLI.OutVals.push_back(ArgVal);
+ }
+ }
+
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
+
+ if (IsVarArg) {
+ // Outgoing non-fixed arguments are placed in a buffer. First
+ // compute their offsets and the total amount of buffer space needed.
+ for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
+ const ISD::OutputArg &Out = Outs[I];
+ SDValue &Arg = OutVals[I];
+ EVT VT = Arg.getValueType();
+ assert(VT != MVT::iPTR && "Legalized args should be concrete");
+ Type *Ty = VT.getTypeForEVT(*DAG.getContext());
+ Align Alignment =
+ std::max(Out.Flags.getNonZeroOrigAlign(), Layout.getABITypeAlign(Ty));
+ unsigned Offset =
+ CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment);
+ CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
+ Offset, VT.getSimpleVT(),
+ CCValAssign::Full));
+ }
+ }
+
+ unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
+
+ SDValue FINode;
+ if (IsVarArg && NumBytes) {
+ // For non-fixed arguments, next emit stores to store the argument values
+ // to the stack buffer at the offsets computed above.
+ int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
+ Layout.getStackAlignment(),
+ /*isSS=*/false);
+ unsigned ValNo = 0;
+ SmallVector<SDValue, 8> Chains;
+ for (SDValue Arg : drop_begin(OutVals, NumFixedArgs)) {
+ assert(ArgLocs[ValNo].getValNo() == ValNo &&
+ "ArgLocs should remain in order and only hold varargs args");
+ unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
+ FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
+ SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
+ DAG.getConstant(Offset, DL, PtrVT));
+ Chains.push_back(
+ DAG.getStore(Chain, DL, Arg, Add,
+ MachinePointerInfo::getFixedStack(MF, FI, Offset)));
+ }
+ if (!Chains.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
+ } else if (IsVarArg) {
+ FINode = DAG.getIntPtrConstant(0, DL);
+ }
+
+ if (Callee->getOpcode() == ISD::GlobalAddress) {
+ // If the callee is a GlobalAddress node (quite common, every direct call
+ // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
+ // doesn't at MO_GOT which is not needed for direct calls.
+ GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Callee);
+ Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
+ getPointerTy(DAG.getDataLayout()),
+ GA->getOffset());
+ Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
+ getPointerTy(DAG.getDataLayout()), Callee);
+ }
+
+ // Compute the operands for the CALLn node.
+ SmallVector<SDValue, 16> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(Callee);
+
+ // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
+ // isn't reliable.
+ Ops.append(OutVals.begin(),
+ IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
+ // Add a pointer to the vararg buffer.
+ if (IsVarArg)
+ Ops.push_back(FINode);
+
+ SmallVector<EVT, 8> InTys;
+ for (const auto &In : Ins) {
+ assert(!In.Flags.isByVal() && "byval is not valid for return values");
+ assert(!In.Flags.isNest() && "nest is not valid for return values");
+ if (In.Flags.isInAlloca())
+ fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
+ if (In.Flags.isInConsecutiveRegs())
+ fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
+ if (In.Flags.isInConsecutiveRegsLast())
+ fail(DL, DAG,
+ "WebAssembly hasn't implemented cons regs last return values");
+ // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
+ // registers.
+ InTys.push_back(In.VT);
+ }
+
+ // Lastly, if this is a call to a funcref we need to add an instruction
+ // table.set to the chain and transform the call.
+ if (CLI.CB && WebAssembly::isWebAssemblyFuncrefType(
+ CLI.CB->getCalledOperand()->getType())) {
+ // In the absence of function references proposal where a funcref call is
+ // lowered to call_ref, using reference types we generate a table.set to set
+ // the funcref to a special table used solely for this purpose, followed by
+ // a call_indirect. Here we just generate the table set, and return the
+ // SDValue of the table.set so that LowerCall can finalize the lowering by
+ // generating the call_indirect.
+ SDValue Chain = Ops[0];
+
+ MCSymbolWasm *Table = WebAssembly::getOrCreateFuncrefCallTableSymbol(
+ MF.getContext(), Subtarget);
+ SDValue Sym = DAG.getMCSymbol(Table, PtrVT);
+ SDValue TableSlot = DAG.getConstant(0, DL, MVT::i32);
+ SDValue TableSetOps[] = {Chain, Sym, TableSlot, Callee};
+ SDValue TableSet = DAG.getMemIntrinsicNode(
+ WebAssemblyISD::TABLE_SET, DL, DAG.getVTList(MVT::Other), TableSetOps,
+ MVT::funcref,
+ // Machine Mem Operand args
+ MachinePointerInfo(
+ WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF),
+ CLI.CB->getCalledOperand()->getPointerAlignment(DAG.getDataLayout()),
+ MachineMemOperand::MOStore);
+
+ Ops[0] = TableSet; // The new chain is the TableSet itself
+ }
+
+ if (CLI.IsTailCall) {
+ // ret_calls do not return values to the current frame
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+ return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops);
+ }
+
+ InTys.push_back(MVT::Other);
+ SDVTList InTyList = DAG.getVTList(InTys);
+ SDValue Res = DAG.getNode(WebAssemblyISD::CALL, DL, InTyList, Ops);
+
+ for (size_t I = 0; I < Ins.size(); ++I)
+ InVals.push_back(Res.getValue(I));
+
+ // Return the chain
+ return Res.getValue(Ins.size());
+}
+
+bool WebAssemblyTargetLowering::CanLowerReturn(
+ CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ LLVMContext & /*Context*/) const {
+ // WebAssembly can only handle returning tuples with multivalue enabled
+ return Subtarget->hasMultivalue() || Outs.size() <= 1;
+}
+
+SDValue WebAssemblyTargetLowering::LowerReturn(
+ SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
+ SelectionDAG &DAG) const {
+ assert((Subtarget->hasMultivalue() || Outs.size() <= 1) &&
+ "MVP WebAssembly can only return up to one value");
+ if (!callingConvSupported(CallConv))
+ fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
+
+ SmallVector<SDValue, 4> RetOps(1, Chain);
+ RetOps.append(OutVals.begin(), OutVals.end());
+ Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
+
+ // Record the number and types of the return values.
+ for (const ISD::OutputArg &Out : Outs) {
+ assert(!Out.Flags.isByVal() && "byval is not valid for return values");
+ assert(!Out.Flags.isNest() && "nest is not valid for return values");
+ assert(Out.IsFixed && "non-fixed return value is not valid");
+ if (Out.Flags.isInAlloca())
+ fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
+ if (Out.Flags.isInConsecutiveRegs())
+ fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
+ if (Out.Flags.isInConsecutiveRegsLast())
+ fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
+ }
+
+ return Chain;
+}
+
+SDValue WebAssemblyTargetLowering::LowerFormalArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
+ if (!callingConvSupported(CallConv))
+ fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
+
+ // Set up the incoming ARGUMENTS value, which serves to represent the liveness
+ // of the incoming values before they're represented by virtual registers.
+ MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
+
+ bool HasSwiftErrorArg = false;
+ bool HasSwiftSelfArg = false;
+ for (const ISD::InputArg &In : Ins) {
+ HasSwiftSelfArg |= In.Flags.isSwiftSelf();
+ HasSwiftErrorArg |= In.Flags.isSwiftError();
+ if (In.Flags.isInAlloca())
+ fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
+ if (In.Flags.isNest())
+ fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
+ if (In.Flags.isInConsecutiveRegs())
+ fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
+ if (In.Flags.isInConsecutiveRegsLast())
+ fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
+ // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
+ // registers.
+ InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
+ DAG.getTargetConstant(InVals.size(),
+ DL, MVT::i32))
+ : DAG.getUNDEF(In.VT));
+
+ // Record the number and types of arguments.
+ MFI->addParam(In.VT);
+ }
+
+ // For swiftcc, emit additional swiftself and swifterror arguments
+ // if there aren't. These additional arguments are also added for callee
+ // signature They are necessary to match callee and caller signature for
+ // indirect call.
+ auto PtrVT = getPointerTy(MF.getDataLayout());
+ if (CallConv == CallingConv::Swift) {
+ if (!HasSwiftSelfArg) {
+ MFI->addParam(PtrVT);
+ }
+ if (!HasSwiftErrorArg) {
+ MFI->addParam(PtrVT);
+ }
+ }
+ // Varargs are copied into a buffer allocated by the caller, and a pointer to
+ // the buffer is passed as an argument.
+ if (IsVarArg) {
+ MVT PtrVT = getPointerTy(MF.getDataLayout());
+ Register VarargVreg =
+ MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT));
+ MFI->setVarargBufferVreg(VarargVreg);
+ Chain = DAG.getCopyToReg(
+ Chain, DL, VarargVreg,
+ DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
+ DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
+ MFI->addParam(PtrVT);
+ }
+
+ // Record the number and types of arguments and results.
+ SmallVector<MVT, 4> Params;
+ SmallVector<MVT, 4> Results;
+ computeSignatureVTs(MF.getFunction().getFunctionType(), &MF.getFunction(),
+ MF.getFunction(), DAG.getTarget(), Params, Results);
+ for (MVT VT : Results)
+ MFI->addResult(VT);
+ // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
+ // the param logic here with ComputeSignatureVTs
+ assert(MFI->getParams().size() == Params.size() &&
+ std::equal(MFI->getParams().begin(), MFI->getParams().end(),
+ Params.begin()));
+
+ return Chain;
+}
+
+void WebAssemblyTargetLowering::ReplaceNodeResults(
+ SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
+ switch (N->getOpcode()) {
+ case ISD::SIGN_EXTEND_INREG:
+ // Do not add any results, signifying that N should not be custom lowered
+ // after all. This happens because simd128 turns on custom lowering for
+ // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
+ // illegal type.
+ break;
+ case ISD::SIGN_EXTEND_VECTOR_INREG:
+ case ISD::ZERO_EXTEND_VECTOR_INREG:
+ // Do not add any results, signifying that N should not be custom lowered.
+ // EXTEND_VECTOR_INREG is implemented for some vectors, but not all.
+ break;
+ default:
+ llvm_unreachable(
+ "ReplaceNodeResults not implemented for this op for WebAssembly!");
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Custom lowering hooks.
+//===----------------------------------------------------------------------===//
+
+SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ switch (Op.getOpcode()) {
+ default:
+ llvm_unreachable("unimplemented operation lowering");
+ return SDValue();
+ case ISD::FrameIndex:
+ return LowerFrameIndex(Op, DAG);
+ case ISD::GlobalAddress:
+ return LowerGlobalAddress(Op, DAG);
+ case ISD::GlobalTLSAddress:
+ return LowerGlobalTLSAddress(Op, DAG);
+ case ISD::ExternalSymbol:
+ return LowerExternalSymbol(Op, DAG);
+ case ISD::JumpTable:
+ return LowerJumpTable(Op, DAG);
+ case ISD::BR_JT:
+ return LowerBR_JT(Op, DAG);
+ case ISD::VASTART:
+ return LowerVASTART(Op, DAG);
+ case ISD::BlockAddress:
+ case ISD::BRIND:
+ fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
+ return SDValue();
+ case ISD::RETURNADDR:
+ return LowerRETURNADDR(Op, DAG);
+ case ISD::FRAMEADDR:
+ return LowerFRAMEADDR(Op, DAG);
+ case ISD::CopyToReg:
+ return LowerCopyToReg(Op, DAG);
+ case ISD::EXTRACT_VECTOR_ELT:
+ case ISD::INSERT_VECTOR_ELT:
+ return LowerAccessVectorElement(Op, DAG);
+ case ISD::INTRINSIC_VOID:
+ case ISD::INTRINSIC_WO_CHAIN:
+ case ISD::INTRINSIC_W_CHAIN:
+ return LowerIntrinsic(Op, DAG);
+ case ISD::SIGN_EXTEND_INREG:
+ return LowerSIGN_EXTEND_INREG(Op, DAG);
+ case ISD::ZERO_EXTEND_VECTOR_INREG:
+ case ISD::SIGN_EXTEND_VECTOR_INREG:
+ return LowerEXTEND_VECTOR_INREG(Op, DAG);
+ case ISD::BUILD_VECTOR:
+ return LowerBUILD_VECTOR(Op, DAG);
+ case ISD::VECTOR_SHUFFLE:
+ return LowerVECTOR_SHUFFLE(Op, DAG);
+ case ISD::SETCC:
+ return LowerSETCC(Op, DAG);
+ case ISD::SHL:
+ case ISD::SRA:
+ case ISD::SRL:
+ return LowerShift(Op, DAG);
+ case ISD::FP_TO_SINT_SAT:
+ case ISD::FP_TO_UINT_SAT:
+ return LowerFP_TO_INT_SAT(Op, DAG);
+ case ISD::LOAD:
+ return LowerLoad(Op, DAG);
+ case ISD::STORE:
+ return LowerStore(Op, DAG);
+ case ISD::CTPOP:
+ case ISD::CTLZ:
+ case ISD::CTTZ:
+ return DAG.UnrollVectorOp(Op.getNode());
+ }
+}
+
+static bool IsWebAssemblyGlobal(SDValue Op) {
+ if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
+ return WebAssembly::isWasmVarAddressSpace(GA->getAddressSpace());
+
+ return false;
+}
+
+static std::optional<unsigned> IsWebAssemblyLocal(SDValue Op,
+ SelectionDAG &DAG) {
+ const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op);
+ if (!FI)
+ return std::nullopt;
+
+ auto &MF = DAG.getMachineFunction();
+ return WebAssemblyFrameLowering::getLocalForStackObject(MF, FI->getIndex());
+}
+
+SDValue WebAssemblyTargetLowering::LowerStore(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
+ const SDValue &Value = SN->getValue();
+ const SDValue &Base = SN->getBasePtr();
+ const SDValue &Offset = SN->getOffset();
+
+ if (IsWebAssemblyGlobal(Base)) {
+ if (!Offset->isUndef())
+ report_fatal_error("unexpected offset when storing to webassembly global",
+ false);
+
+ SDVTList Tys = DAG.getVTList(MVT::Other);
+ SDValue Ops[] = {SN->getChain(), Value, Base};
+ return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_SET, DL, Tys, Ops,
+ SN->getMemoryVT(), SN->getMemOperand());
+ }
+
+ if (std::optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
+ if (!Offset->isUndef())
+ report_fatal_error("unexpected offset when storing to webassembly local",
+ false);
+
+ SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
+ SDVTList Tys = DAG.getVTList(MVT::Other); // The chain.
+ SDValue Ops[] = {SN->getChain(), Idx, Value};
+ return DAG.getNode(WebAssemblyISD::LOCAL_SET, DL, Tys, Ops);
+ }
+
+ if (WebAssembly::isWasmVarAddressSpace(SN->getAddressSpace()))
+ report_fatal_error(
+ "Encountered an unlowerable store to the wasm_var address space",
+ false);
+
+ return Op;
+}
+
+SDValue WebAssemblyTargetLowering::LowerLoad(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
+ const SDValue &Base = LN->getBasePtr();
+ const SDValue &Offset = LN->getOffset();
+
+ if (IsWebAssemblyGlobal(Base)) {
+ if (!Offset->isUndef())
+ report_fatal_error(
+ "unexpected offset when loading from webassembly global", false);
+
+ SDVTList Tys = DAG.getVTList(LN->getValueType(0), MVT::Other);
+ SDValue Ops[] = {LN->getChain(), Base};
+ return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_GET, DL, Tys, Ops,
+ LN->getMemoryVT(), LN->getMemOperand());
+ }
+
+ if (std::optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
+ if (!Offset->isUndef())
+ report_fatal_error(
+ "unexpected offset when loading from webassembly local", false);
+
+ SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
+ EVT LocalVT = LN->getValueType(0);
+ SDValue LocalGet = DAG.getNode(WebAssemblyISD::LOCAL_GET, DL, LocalVT,
+ {LN->getChain(), Idx});
+ SDValue Result = DAG.getMergeValues({LocalGet, LN->getChain()}, DL);
+ assert(Result->getNumValues() == 2 && "Loads must carry a chain!");
+ return Result;
+ }
+
+ if (WebAssembly::isWasmVarAddressSpace(LN->getAddressSpace()))
+ report_fatal_error(
+ "Encountered an unlowerable load from the wasm_var address space",
+ false);
+
+ return Op;
+}
+
+SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue Src = Op.getOperand(2);
+ if (isa<FrameIndexSDNode>(Src.getNode())) {
+ // CopyToReg nodes don't support FrameIndex operands. Other targets select
+ // the FI to some LEA-like instruction, but since we don't have that, we
+ // need to insert some kind of instruction that can take an FI operand and
+ // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
+ // local.copy between Op and its FI operand.
+ SDValue Chain = Op.getOperand(0);
+ SDLoc DL(Op);
+ Register Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
+ EVT VT = Src.getValueType();
+ SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
+ : WebAssembly::COPY_I64,
+ DL, VT, Src),
+ 0);
+ return Op.getNode()->getNumValues() == 1
+ ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
+ : DAG.getCopyToReg(Chain, DL, Reg, Copy,
+ Op.getNumOperands() == 4 ? Op.getOperand(3)
+ : SDValue());
+ }
+ return SDValue();
+}
+
+SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
+ SelectionDAG &DAG) const {
+ int FI = cast<FrameIndexSDNode>(Op)->getIndex();
+ return DAG.getTargetFrameIndex(FI, Op.getValueType());
+}
+
+SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+
+ if (!Subtarget->getTargetTriple().isOSEmscripten()) {
+ fail(DL, DAG,
+ "Non-Emscripten WebAssembly hasn't implemented "
+ "__builtin_return_address");
+ return SDValue();
+ }
+
+ if (verifyReturnAddressArgumentIsConstant(Op, DAG))
+ return SDValue();
+
+ unsigned Depth = Op.getConstantOperandVal(0);
+ MakeLibCallOptions CallOptions;
+ return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
+ {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL)
+ .first;
+}
+
+SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
+ SelectionDAG &DAG) const {
+ // Non-zero depths are not supported by WebAssembly currently. Use the
+ // legalizer's default expansion, which is to return 0 (what this function is
+ // documented to do).
+ if (Op.getConstantOperandVal(0) > 0)
+ return SDValue();
+
+ DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
+ EVT VT = Op.getValueType();
+ Register FP =
+ Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction());
+ return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
+}
+
+SDValue
+WebAssemblyTargetLowering::LowerGlobalTLSAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ const auto *GA = cast<GlobalAddressSDNode>(Op);
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ if (!MF.getSubtarget<WebAssemblySubtarget>().hasBulkMemory())
+ report_fatal_error("cannot use thread-local storage without bulk memory",
+ false);
+
+ const GlobalValue *GV = GA->getGlobal();
+
+ // Currently only Emscripten supports dynamic linking with threads. Therefore,
+ // on other targets, if we have thread-local storage, only the local-exec
+ // model is possible.
+ auto model = Subtarget->getTargetTriple().isOSEmscripten()
+ ? GV->getThreadLocalMode()
+ : GlobalValue::LocalExecTLSModel;
+
+ // Unsupported TLS modes
+ assert(model != GlobalValue::NotThreadLocal);
+ assert(model != GlobalValue::InitialExecTLSModel);
+
+ if (model == GlobalValue::LocalExecTLSModel ||
+ model == GlobalValue::LocalDynamicTLSModel ||
+ (model == GlobalValue::GeneralDynamicTLSModel &&
+ getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))) {
+ // For DSO-local TLS variables we use offset from __tls_base
+
+ MVT PtrVT = getPointerTy(DAG.getDataLayout());
+ auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
+ : WebAssembly::GLOBAL_GET_I32;
+ const char *BaseName = MF.createExternalSymbolName("__tls_base");
+
+ SDValue BaseAddr(
+ DAG.getMachineNode(GlobalGet, DL, PtrVT,
+ DAG.getTargetExternalSymbol(BaseName, PtrVT)),
+ 0);
+
+ SDValue TLSOffset = DAG.getTargetGlobalAddress(
+ GV, DL, PtrVT, GA->getOffset(), WebAssemblyII::MO_TLS_BASE_REL);
+ SDValue SymOffset =
+ DAG.getNode(WebAssemblyISD::WrapperREL, DL, PtrVT, TLSOffset);
+
+ return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymOffset);
+ }
+
+ assert(model == GlobalValue::GeneralDynamicTLSModel);
+
+ EVT VT = Op.getValueType();
+ return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
+ DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
+ GA->getOffset(),
+ WebAssemblyII::MO_GOT_TLS));
+}
+
+SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ const auto *GA = cast<GlobalAddressSDNode>(Op);
+ EVT VT = Op.getValueType();
+ assert(GA->getTargetFlags() == 0 &&
+ "Unexpected target flags on generic GlobalAddressSDNode");
+ if (!WebAssembly::isValidAddressSpace(GA->getAddressSpace()))
+ fail(DL, DAG, "Invalid address space for WebAssembly target");
+
+ unsigned OperandFlags = 0;
+ const GlobalValue *GV = GA->getGlobal();
+ // Since WebAssembly tables cannot yet be shared accross modules, we don't
+ // need special treatment for tables in PIC mode.
+ if (isPositionIndependent() &&
+ !WebAssembly::isWebAssemblyTableType(GV->getValueType())) {
+ if (getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MVT PtrVT = getPointerTy(MF.getDataLayout());
+ const char *BaseName;
+ if (GV->getValueType()->isFunctionTy()) {
+ BaseName = MF.createExternalSymbolName("__table_base");
+ OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL;
+ } else {
+ BaseName = MF.createExternalSymbolName("__memory_base");
+ OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL;
+ }
+ SDValue BaseAddr =
+ DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
+ DAG.getTargetExternalSymbol(BaseName, PtrVT));
+
+ SDValue SymAddr = DAG.getNode(
+ WebAssemblyISD::WrapperREL, DL, VT,
+ DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
+ OperandFlags));
+
+ return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
+ }
+ OperandFlags = WebAssemblyII::MO_GOT;
+ }
+
+ return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
+ DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
+ GA->getOffset(), OperandFlags));
+}
+
+SDValue
+WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ const auto *ES = cast<ExternalSymbolSDNode>(Op);
+ EVT VT = Op.getValueType();
+ assert(ES->getTargetFlags() == 0 &&
+ "Unexpected target flags on generic ExternalSymbolSDNode");
+ return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
+ DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
+}
+
+SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
+ SelectionDAG &DAG) const {
+ // There's no need for a Wrapper node because we always incorporate a jump
+ // table operand into a BR_TABLE instruction, rather than ever
+ // materializing it in a register.
+ const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
+ return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
+ JT->getTargetFlags());
+}
+
+SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ SDValue Chain = Op.getOperand(0);
+ const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
+ SDValue Index = Op.getOperand(2);
+ assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags");
+
+ SmallVector<SDValue, 8> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(Index);
+
+ MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo();
+ const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
+
+ // Add an operand for each case.
+ for (auto *MBB : MBBs)
+ Ops.push_back(DAG.getBasicBlock(MBB));
+
+ // Add the first MBB as a dummy default target for now. This will be replaced
+ // with the proper default target (and the preceding range check eliminated)
+ // if possible by WebAssemblyFixBrTableDefaults.
+ Ops.push_back(DAG.getBasicBlock(*MBBs.begin()));
+ return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
+}
+
+SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout());
+
+ auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
+ const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
+
+ SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
+ MFI->getVarargBufferVreg(), PtrVT);
+ return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
+ MachinePointerInfo(SV));
+}
+
+SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
+ SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ unsigned IntNo;
+ switch (Op.getOpcode()) {
+ case ISD::INTRINSIC_VOID:
+ case ISD::INTRINSIC_W_CHAIN:
+ IntNo = Op.getConstantOperandVal(1);
+ break;
+ case ISD::INTRINSIC_WO_CHAIN:
+ IntNo = Op.getConstantOperandVal(0);
+ break;
+ default:
+ llvm_unreachable("Invalid intrinsic");
+ }
+ SDLoc DL(Op);
+
+ switch (IntNo) {
+ default:
+ return SDValue(); // Don't custom lower most intrinsics.
+
+ case Intrinsic::wasm_lsda: {
+ auto PtrVT = getPointerTy(MF.getDataLayout());
+ const char *SymName = MF.createExternalSymbolName(
+ "GCC_except_table" + std::to_string(MF.getFunctionNumber()));
+ if (isPositionIndependent()) {
+ SDValue Node = DAG.getTargetExternalSymbol(
+ SymName, PtrVT, WebAssemblyII::MO_MEMORY_BASE_REL);
+ const char *BaseName = MF.createExternalSymbolName("__memory_base");
+ SDValue BaseAddr =
+ DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
+ DAG.getTargetExternalSymbol(BaseName, PtrVT));
+ SDValue SymAddr =
+ DAG.getNode(WebAssemblyISD::WrapperREL, DL, PtrVT, Node);
+ return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymAddr);
+ }
+ SDValue Node = DAG.getTargetExternalSymbol(SymName, PtrVT);
+ return DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, Node);
+ }
+
+ case Intrinsic::wasm_shuffle: {
+ // Drop in-chain and replace undefs, but otherwise pass through unchanged
+ SDValue Ops[18];
+ size_t OpIdx = 0;
+ Ops[OpIdx++] = Op.getOperand(1);
+ Ops[OpIdx++] = Op.getOperand(2);
+ while (OpIdx < 18) {
+ const SDValue &MaskIdx = Op.getOperand(OpIdx + 1);
+ if (MaskIdx.isUndef() ||
+ cast<ConstantSDNode>(MaskIdx.getNode())->getZExtValue() >= 32) {
+ bool isTarget = MaskIdx.getNode()->getOpcode() == ISD::TargetConstant;
+ Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32, isTarget);
+ } else {
+ Ops[OpIdx++] = MaskIdx;
+ }
+ }
+ return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
+ }
+ }
+}
+
+SDValue
+WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ // If sign extension operations are disabled, allow sext_inreg only if operand
+ // is a vector extract of an i8 or i16 lane. SIMD does not depend on sign
+ // extension operations, but allowing sext_inreg in this context lets us have
+ // simple patterns to select extract_lane_s instructions. Expanding sext_inreg
+ // everywhere would be simpler in this file, but would necessitate large and
+ // brittle patterns to undo the expansion and select extract_lane_s
+ // instructions.
+ assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128());
+ if (Op.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT)
+ return SDValue();
+
+ const SDValue &Extract = Op.getOperand(0);
+ MVT VecT = Extract.getOperand(0).getSimpleValueType();
+ if (VecT.getVectorElementType().getSizeInBits() > 32)
+ return SDValue();
+ MVT ExtractedLaneT =
+ cast<VTSDNode>(Op.getOperand(1).getNode())->getVT().getSimpleVT();
+ MVT ExtractedVecT =
+ MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
+ if (ExtractedVecT == VecT)
+ return Op;
+
+ // Bitcast vector to appropriate type to ensure ISel pattern coverage
+ const SDNode *Index = Extract.getOperand(1).getNode();
+ if (!isa<ConstantSDNode>(Index))
+ return SDValue();
+ unsigned IndexVal = cast<ConstantSDNode>(Index)->getZExtValue();
+ unsigned Scale =
+ ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
+ assert(Scale > 1);
+ SDValue NewIndex =
+ DAG.getConstant(IndexVal * Scale, DL, Index->getValueType(0));
+ SDValue NewExtract = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(),
+ DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
+ return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), NewExtract,
+ Op.getOperand(1));
+}
+
+SDValue
+WebAssemblyTargetLowering::LowerEXTEND_VECTOR_INREG(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT VT = Op.getValueType();
+ SDValue Src = Op.getOperand(0);
+ EVT SrcVT = Src.getValueType();
+
+ if (SrcVT.getVectorElementType() == MVT::i1 ||
+ SrcVT.getVectorElementType() == MVT::i64)
+ return SDValue();
+
+ assert(VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits() == 0 &&
+ "Unexpected extension factor.");
+ unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
+
+ if (Scale != 2 && Scale != 4 && Scale != 8)
+ return SDValue();
+
+ unsigned Ext;
+ switch (Op.getOpcode()) {
+ case ISD::ZERO_EXTEND_VECTOR_INREG:
+ Ext = WebAssemblyISD::EXTEND_LOW_U;
+ break;
+ case ISD::SIGN_EXTEND_VECTOR_INREG:
+ Ext = WebAssemblyISD::EXTEND_LOW_S;
+ break;
+ }
+
+ SDValue Ret = Src;
+ while (Scale != 1) {
+ Ret = DAG.getNode(Ext, DL,
+ Ret.getValueType()
+ .widenIntegerVectorElementType(*DAG.getContext())
+ .getHalfNumVectorElementsVT(*DAG.getContext()),
+ Ret);
+ Scale /= 2;
+ }
+ assert(Ret.getValueType() == VT);
+ return Ret;
+}
+
+static SDValue LowerConvertLow(SDValue Op, SelectionDAG &DAG) {
+ SDLoc DL(Op);
+ if (Op.getValueType() != MVT::v2f64)
+ return SDValue();
+
+ auto GetConvertedLane = [](SDValue Op, unsigned &Opcode, SDValue &SrcVec,
+ unsigned &Index) -> bool {
+ switch (Op.getOpcode()) {
+ case ISD::SINT_TO_FP:
+ Opcode = WebAssemblyISD::CONVERT_LOW_S;
+ break;
+ case ISD::UINT_TO_FP:
+ Opcode = WebAssemblyISD::CONVERT_LOW_U;
+ break;
+ case ISD::FP_EXTEND:
+ Opcode = WebAssemblyISD::PROMOTE_LOW;
+ break;
+ default:
+ return false;
+ }
+
+ auto ExtractVector = Op.getOperand(0);
+ if (ExtractVector.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
+ return false;
+
+ if (!isa<ConstantSDNode>(ExtractVector.getOperand(1).getNode()))
+ return false;
+
+ SrcVec = ExtractVector.getOperand(0);
+ Index = ExtractVector.getConstantOperandVal(1);
+ return true;
+ };
+
+ unsigned LHSOpcode, RHSOpcode, LHSIndex, RHSIndex;
+ SDValue LHSSrcVec, RHSSrcVec;
+ if (!GetConvertedLane(Op.getOperand(0), LHSOpcode, LHSSrcVec, LHSIndex) ||
+ !GetConvertedLane(Op.getOperand(1), RHSOpcode, RHSSrcVec, RHSIndex))
+ return SDValue();
+
+ if (LHSOpcode != RHSOpcode)
+ return SDValue();
+
+ MVT ExpectedSrcVT;
+ switch (LHSOpcode) {
+ case WebAssemblyISD::CONVERT_LOW_S:
+ case WebAssemblyISD::CONVERT_LOW_U:
+ ExpectedSrcVT = MVT::v4i32;
+ break;
+ case WebAssemblyISD::PROMOTE_LOW:
+ ExpectedSrcVT = MVT::v4f32;
+ break;
+ }
+ if (LHSSrcVec.getValueType() != ExpectedSrcVT)
+ return SDValue();
+
+ auto Src = LHSSrcVec;
+ if (LHSIndex != 0 || RHSIndex != 1 || LHSSrcVec != RHSSrcVec) {
+ // Shuffle the source vector so that the converted lanes are the low lanes.
+ Src = DAG.getVectorShuffle(
+ ExpectedSrcVT, DL, LHSSrcVec, RHSSrcVec,
+ {static_cast<int>(LHSIndex), static_cast<int>(RHSIndex) + 4, -1, -1});
+ }
+ return DAG.getNode(LHSOpcode, DL, MVT::v2f64, Src);
+}
+
+SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
+ SelectionDAG &DAG) const {
+ if (auto ConvertLow = LowerConvertLow(Op, DAG))
+ return ConvertLow;
+
+ SDLoc DL(Op);
+ const EVT VecT = Op.getValueType();
+ const EVT LaneT = Op.getOperand(0).getValueType();
+ const size_t Lanes = Op.getNumOperands();
+ bool CanSwizzle = VecT == MVT::v16i8;
+
+ // BUILD_VECTORs are lowered to the instruction that initializes the highest
+ // possible number of lanes at once followed by a sequence of replace_lane
+ // instructions to individually initialize any remaining lanes.
+
+ // TODO: Tune this. For example, lanewise swizzling is very expensive, so
+ // swizzled lanes should be given greater weight.
+
+ // TODO: Investigate looping rather than always extracting/replacing specific
+ // lanes to fill gaps.
+
+ auto IsConstant = [](const SDValue &V) {
+ return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
+ };
+
+ // Returns the source vector and index vector pair if they exist. Checks for:
+ // (extract_vector_elt
+ // $src,
+ // (sign_extend_inreg (extract_vector_elt $indices, $i))
+ // )
+ auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) {
+ auto Bail = std::make_pair(SDValue(), SDValue());
+ if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
+ return Bail;
+ const SDValue &SwizzleSrc = Lane->getOperand(0);
+ const SDValue &IndexExt = Lane->getOperand(1);
+ if (IndexExt->getOpcode() != ISD::SIGN_EXTEND_INREG)
+ return Bail;
+ const SDValue &Index = IndexExt->getOperand(0);
+ if (Index->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
+ return Bail;
+ const SDValue &SwizzleIndices = Index->getOperand(0);
+ if (SwizzleSrc.getValueType() != MVT::v16i8 ||
+ SwizzleIndices.getValueType() != MVT::v16i8 ||
+ Index->getOperand(1)->getOpcode() != ISD::Constant ||
+ Index->getConstantOperandVal(1) != I)
+ return Bail;
+ return std::make_pair(SwizzleSrc, SwizzleIndices);
+ };
+
+ // If the lane is extracted from another vector at a constant index, return
+ // that vector. The source vector must not have more lanes than the dest
+ // because the shufflevector indices are in terms of the destination lanes and
+ // would not be able to address the smaller individual source lanes.
+ auto GetShuffleSrc = [&](const SDValue &Lane) {
+ if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
+ return SDValue();
+ if (!isa<ConstantSDNode>(Lane->getOperand(1).getNode()))
+ return SDValue();
+ if (Lane->getOperand(0).getValueType().getVectorNumElements() >
+ VecT.getVectorNumElements())
+ return SDValue();
+ return Lane->getOperand(0);
+ };
+
+ using ValueEntry = std::pair<SDValue, size_t>;
+ SmallVector<ValueEntry, 16> SplatValueCounts;
+
+ using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>, size_t>;
+ SmallVector<SwizzleEntry, 16> SwizzleCounts;
+
+ using ShuffleEntry = std::pair<SDValue, size_t>;
+ SmallVector<ShuffleEntry, 16> ShuffleCounts;
+
+ auto AddCount = [](auto &Counts, const auto &Val) {
+ auto CountIt =
+ llvm::find_if(Counts, [&Val](auto E) { return E.first == Val; });
+ if (CountIt == Counts.end()) {
+ Counts.emplace_back(Val, 1);
+ } else {
+ CountIt->second++;
+ }
+ };
+
+ auto GetMostCommon = [](auto &Counts) {
+ auto CommonIt =
+ std::max_element(Counts.begin(), Counts.end(), llvm::less_second());
+ assert(CommonIt != Counts.end() && "Unexpected all-undef build_vector");
+ return *CommonIt;
+ };
+
+ size_t NumConstantLanes = 0;
+
+ // Count eligible lanes for each type of vector creation op
+ for (size_t I = 0; I < Lanes; ++I) {
+ const SDValue &Lane = Op->getOperand(I);
+ if (Lane.isUndef())
+ continue;
+
+ AddCount(SplatValueCounts, Lane);
+
+ if (IsConstant(Lane))
+ NumConstantLanes++;
+ if (auto ShuffleSrc = GetShuffleSrc(Lane))
+ AddCount(ShuffleCounts, ShuffleSrc);
+ if (CanSwizzle) {
+ auto SwizzleSrcs = GetSwizzleSrcs(I, Lane);
+ if (SwizzleSrcs.first)
+ AddCount(SwizzleCounts, SwizzleSrcs);
+ }
+ }
+
+ SDValue SplatValue;
+ size_t NumSplatLanes;
+ std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts);
+
+ SDValue SwizzleSrc;
+ SDValue SwizzleIndices;
+ size_t NumSwizzleLanes = 0;
+ if (SwizzleCounts.size())
+ std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices),
+ NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
+
+ // Shuffles can draw from up to two vectors, so find the two most common
+ // sources.
+ SDValue ShuffleSrc1, ShuffleSrc2;
+ size_t NumShuffleLanes = 0;
+ if (ShuffleCounts.size()) {
+ std::tie(ShuffleSrc1, NumShuffleLanes) = GetMostCommon(ShuffleCounts);
+ llvm::erase_if(ShuffleCounts,
+ [&](const auto &Pair) { return Pair.first == ShuffleSrc1; });
+ }
+ if (ShuffleCounts.size()) {
+ size_t AdditionalShuffleLanes;
+ std::tie(ShuffleSrc2, AdditionalShuffleLanes) =
+ GetMostCommon(ShuffleCounts);
+ NumShuffleLanes += AdditionalShuffleLanes;
+ }
+
+ // Predicate returning true if the lane is properly initialized by the
+ // original instruction
+ std::function<bool(size_t, const SDValue &)> IsLaneConstructed;
+ SDValue Result;
+ // Prefer swizzles over shuffles over vector consts over splats
+ if (NumSwizzleLanes >= NumShuffleLanes &&
+ NumSwizzleLanes >= NumConstantLanes && NumSwizzleLanes >= NumSplatLanes) {
+ Result = DAG.getNode(WebAssemblyISD::SWIZZLE, DL, VecT, SwizzleSrc,
+ SwizzleIndices);
+ auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices);
+ IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) {
+ return Swizzled == GetSwizzleSrcs(I, Lane);
+ };
+ } else if (NumShuffleLanes >= NumConstantLanes &&
+ NumShuffleLanes >= NumSplatLanes) {
+ size_t DestLaneSize = VecT.getVectorElementType().getFixedSizeInBits() / 8;
+ size_t DestLaneCount = VecT.getVectorNumElements();
+ size_t Scale1 = 1;
+ size_t Scale2 = 1;
+ SDValue Src1 = ShuffleSrc1;
+ SDValue Src2 = ShuffleSrc2 ? ShuffleSrc2 : DAG.getUNDEF(VecT);
+ if (Src1.getValueType() != VecT) {
+ size_t LaneSize =
+ Src1.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
+ assert(LaneSize > DestLaneSize);
+ Scale1 = LaneSize / DestLaneSize;
+ Src1 = DAG.getBitcast(VecT, Src1);
+ }
+ if (Src2.getValueType() != VecT) {
+ size_t LaneSize =
+ Src2.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
+ assert(LaneSize > DestLaneSize);
+ Scale2 = LaneSize / DestLaneSize;
+ Src2 = DAG.getBitcast(VecT, Src2);
+ }
+
+ int Mask[16];
+ assert(DestLaneCount <= 16);
+ for (size_t I = 0; I < DestLaneCount; ++I) {
+ const SDValue &Lane = Op->getOperand(I);
+ SDValue Src = GetShuffleSrc(Lane);
+ if (Src == ShuffleSrc1) {
+ Mask[I] = Lane->getConstantOperandVal(1) * Scale1;
+ } else if (Src && Src == ShuffleSrc2) {
+ Mask[I] = DestLaneCount + Lane->getConstantOperandVal(1) * Scale2;
+ } else {
+ Mask[I] = -1;
+ }
+ }
+ ArrayRef<int> MaskRef(Mask, DestLaneCount);
+ Result = DAG.getVectorShuffle(VecT, DL, Src1, Src2, MaskRef);
+ IsLaneConstructed = [&](size_t, const SDValue &Lane) {
+ auto Src = GetShuffleSrc(Lane);
+ return Src == ShuffleSrc1 || (Src && Src == ShuffleSrc2);
+ };
+ } else if (NumConstantLanes >= NumSplatLanes) {
+ SmallVector<SDValue, 16> ConstLanes;
+ for (const SDValue &Lane : Op->op_values()) {
+ if (IsConstant(Lane)) {
+ // Values may need to be fixed so that they will sign extend to be
+ // within the expected range during ISel. Check whether the value is in
+ // bounds based on the lane bit width and if it is out of bounds, lop
+ // off the extra bits and subtract 2^n to reflect giving the high bit
+ // value -2^(n-1) rather than +2^(n-1). Skip the i64 case because it
+ // cannot possibly be out of range.
+ auto *Const = dyn_cast<ConstantSDNode>(Lane.getNode());
+ int64_t Val = Const ? Const->getSExtValue() : 0;
+ uint64_t LaneBits = 128 / Lanes;
+ assert((LaneBits == 64 || Val >= -(1ll << (LaneBits - 1))) &&
+ "Unexpected out of bounds negative value");
+ if (Const && LaneBits != 64 && Val > (1ll << (LaneBits - 1)) - 1) {
+ uint64_t Mask = (1ll << LaneBits) - 1;
+ auto NewVal = (((uint64_t)Val & Mask) - (1ll << LaneBits)) & Mask;
+ ConstLanes.push_back(DAG.getConstant(NewVal, SDLoc(Lane), LaneT));
+ } else {
+ ConstLanes.push_back(Lane);
+ }
+ } else if (LaneT.isFloatingPoint()) {
+ ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
+ } else {
+ ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
+ }
+ }
+ Result = DAG.getBuildVector(VecT, DL, ConstLanes);
+ IsLaneConstructed = [&IsConstant](size_t _, const SDValue &Lane) {
+ return IsConstant(Lane);
+ };
+ } else {
+ // Use a splat (which might be selected as a load splat)
+ Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
+ IsLaneConstructed = [&SplatValue](size_t _, const SDValue &Lane) {
+ return Lane == SplatValue;
+ };
+ }
+
+ assert(Result);
+ assert(IsLaneConstructed);
+
+ // Add replace_lane instructions for any unhandled values
+ for (size_t I = 0; I < Lanes; ++I) {
+ const SDValue &Lane = Op->getOperand(I);
+ if (!Lane.isUndef() && !IsLaneConstructed(I, Lane))
+ Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
+ DAG.getConstant(I, DL, MVT::i32));
+ }
+
+ return Result;
+}
+
+SDValue
+WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
+ MVT VecType = Op.getOperand(0).getSimpleValueType();
+ assert(VecType.is128BitVector() && "Unexpected shuffle vector type");
+ size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
+
+ // Space for two vector args and sixteen mask indices
+ SDValue Ops[18];
+ size_t OpIdx = 0;
+ Ops[OpIdx++] = Op.getOperand(0);
+ Ops[OpIdx++] = Op.getOperand(1);
+
+ // Expand mask indices to byte indices and materialize them as operands
+ for (int M : Mask) {
+ for (size_t J = 0; J < LaneBytes; ++J) {
+ // Lower undefs (represented by -1 in mask) to {0..J}, which use a
+ // whole lane of vector input, to allow further reduction at VM. E.g.
+ // match an 8x16 byte shuffle to an equivalent cheaper 32x4 shuffle.
+ uint64_t ByteIndex = M == -1 ? J : (uint64_t)M * LaneBytes + J;
+ Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
+ }
+ }
+
+ return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
+}
+
+SDValue WebAssemblyTargetLowering::LowerSETCC(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ // The legalizer does not know how to expand the unsupported comparison modes
+ // of i64x2 vectors, so we manually unroll them here.
+ assert(Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64);
+ SmallVector<SDValue, 2> LHS, RHS;
+ DAG.ExtractVectorElements(Op->getOperand(0), LHS);
+ DAG.ExtractVectorElements(Op->getOperand(1), RHS);
+ const SDValue &CC = Op->getOperand(2);
+ auto MakeLane = [&](unsigned I) {
+ return DAG.getNode(ISD::SELECT_CC, DL, MVT::i64, LHS[I], RHS[I],
+ DAG.getConstant(uint64_t(-1), DL, MVT::i64),
+ DAG.getConstant(uint64_t(0), DL, MVT::i64), CC);
+ };
+ return DAG.getBuildVector(Op->getValueType(0), DL,
+ {MakeLane(0), MakeLane(1)});
+}
+
+SDValue
+WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
+ SelectionDAG &DAG) const {
+ // Allow constant lane indices, expand variable lane indices
+ SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
+ if (isa<ConstantSDNode>(IdxNode)) {
+ // Ensure the index type is i32 to match the tablegen patterns
+ uint64_t Idx = cast<ConstantSDNode>(IdxNode)->getZExtValue();
+ SmallVector<SDValue, 3> Ops(Op.getNode()->ops());
+ Ops[Op.getNumOperands() - 1] =
+ DAG.getConstant(Idx, SDLoc(IdxNode), MVT::i32);
+ return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), Ops);
+ }
+ // Perform default expansion
+ return SDValue();
+}
+
+static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG) {
+ EVT LaneT = Op.getSimpleValueType().getVectorElementType();
+ // 32-bit and 64-bit unrolled shifts will have proper semantics
+ if (LaneT.bitsGE(MVT::i32))
+ return DAG.UnrollVectorOp(Op.getNode());
+ // Otherwise mask the shift value to get proper semantics from 32-bit shift
+ SDLoc DL(Op);
+ size_t NumLanes = Op.getSimpleValueType().getVectorNumElements();
+ SDValue Mask = DAG.getConstant(LaneT.getSizeInBits() - 1, DL, MVT::i32);
+ unsigned ShiftOpcode = Op.getOpcode();
+ SmallVector<SDValue, 16> ShiftedElements;
+ DAG.ExtractVectorElements(Op.getOperand(0), ShiftedElements, 0, 0, MVT::i32);
+ SmallVector<SDValue, 16> ShiftElements;
+ DAG.ExtractVectorElements(Op.getOperand(1), ShiftElements, 0, 0, MVT::i32);
+ SmallVector<SDValue, 16> UnrolledOps;
+ for (size_t i = 0; i < NumLanes; ++i) {
+ SDValue MaskedShiftValue =
+ DAG.getNode(ISD::AND, DL, MVT::i32, ShiftElements[i], Mask);
+ SDValue ShiftedValue = ShiftedElements[i];
+ if (ShiftOpcode == ISD::SRA)
+ ShiftedValue = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32,
+ ShiftedValue, DAG.getValueType(LaneT));
+ UnrolledOps.push_back(
+ DAG.getNode(ShiftOpcode, DL, MVT::i32, ShiftedValue, MaskedShiftValue));
+ }
+ return DAG.getBuildVector(Op.getValueType(), DL, UnrolledOps);
+}
+
+SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+
+ // Only manually lower vector shifts
+ assert(Op.getSimpleValueType().isVector());
+
+ uint64_t LaneBits = Op.getValueType().getScalarSizeInBits();
+ auto ShiftVal = Op.getOperand(1);
+
+ // Try to skip bitmask operation since it is implied inside shift instruction
+ auto SkipImpliedMask = [](SDValue MaskOp, uint64_t MaskBits) {
+ if (MaskOp.getOpcode() != ISD::AND)
+ return MaskOp;
+ SDValue LHS = MaskOp.getOperand(0);
+ SDValue RHS = MaskOp.getOperand(1);
+ if (MaskOp.getValueType().isVector()) {
+ APInt MaskVal;
+ if (!ISD::isConstantSplatVector(RHS.getNode(), MaskVal))
+ std::swap(LHS, RHS);
+
+ if (ISD::isConstantSplatVector(RHS.getNode(), MaskVal) &&
+ MaskVal == MaskBits)
+ MaskOp = LHS;
+ } else {
+ if (!isa<ConstantSDNode>(RHS.getNode()))
+ std::swap(LHS, RHS);
+
+ auto ConstantRHS = dyn_cast<ConstantSDNode>(RHS.getNode());
+ if (ConstantRHS && ConstantRHS->getAPIntValue() == MaskBits)
+ MaskOp = LHS;
+ }
+
+ return MaskOp;
+ };
+
+ // Skip vector and operation
+ ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1);
+ ShiftVal = DAG.getSplatValue(ShiftVal);
+ if (!ShiftVal)
+ return unrollVectorShift(Op, DAG);
+
+ // Skip scalar and operation
+ ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1);
+ // Use anyext because none of the high bits can affect the shift
+ ShiftVal = DAG.getAnyExtOrTrunc(ShiftVal, DL, MVT::i32);
+
+ unsigned Opcode;
+ switch (Op.getOpcode()) {
+ case ISD::SHL:
+ Opcode = WebAssemblyISD::VEC_SHL;
+ break;
+ case ISD::SRA:
+ Opcode = WebAssemblyISD::VEC_SHR_S;
+ break;
+ case ISD::SRL:
+ Opcode = WebAssemblyISD::VEC_SHR_U;
+ break;
+ default:
+ llvm_unreachable("unexpected opcode");
+ }
+
+ return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), ShiftVal);
+}
+
+SDValue WebAssemblyTargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT ResT = Op.getValueType();
+ EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
+
+ if ((ResT == MVT::i32 || ResT == MVT::i64) &&
+ (SatVT == MVT::i32 || SatVT == MVT::i64))
+ return Op;
+
+ if (ResT == MVT::v4i32 && SatVT == MVT::i32)
+ return Op;
+
+ return SDValue();
+}
+
+//===----------------------------------------------------------------------===//
+// Custom DAG combine hooks
+//===----------------------------------------------------------------------===//
+static SDValue
+performVECTOR_SHUFFLECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
+ auto &DAG = DCI.DAG;
+ auto Shuffle = cast<ShuffleVectorSDNode>(N);
+
+ // Hoist vector bitcasts that don't change the number of lanes out of unary
+ // shuffles, where they are less likely to get in the way of other combines.
+ // (shuffle (vNxT1 (bitcast (vNxT0 x))), undef, mask) ->
+ // (vNxT1 (bitcast (vNxT0 (shuffle x, undef, mask))))
+ SDValue Bitcast = N->getOperand(0);
+ if (Bitcast.getOpcode() != ISD::BITCAST)
+ return SDValue();
+ if (!N->getOperand(1).isUndef())
+ return SDValue();
+ SDValue CastOp = Bitcast.getOperand(0);
+ EVT SrcType = CastOp.getValueType();
+ EVT DstType = Bitcast.getValueType();
+ if (!SrcType.is128BitVector() ||
+ SrcType.getVectorNumElements() != DstType.getVectorNumElements())
+ return SDValue();
+ SDValue NewShuffle = DAG.getVectorShuffle(
+ SrcType, SDLoc(N), CastOp, DAG.getUNDEF(SrcType), Shuffle->getMask());
+ return DAG.getBitcast(DstType, NewShuffle);
+}
+
+/// Convert ({u,s}itofp vec) --> ({u,s}itofp ({s,z}ext vec)) so it doesn't get
+/// split up into scalar instructions during legalization, and the vector
+/// extending instructions are selected in performVectorExtendCombine below.
+static SDValue
+performVectorExtendToFPCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ auto &DAG = DCI.DAG;
+ assert(N->getOpcode() == ISD::UINT_TO_FP ||
+ N->getOpcode() == ISD::SINT_TO_FP);
+
+ EVT InVT = N->getOperand(0)->getValueType(0);
+ EVT ResVT = N->getValueType(0);
+ MVT ExtVT;
+ if (ResVT == MVT::v4f32 && (InVT == MVT::v4i16 || InVT == MVT::v4i8))
+ ExtVT = MVT::v4i32;
+ else if (ResVT == MVT::v2f64 && (InVT == MVT::v2i16 || InVT == MVT::v2i8))
+ ExtVT = MVT::v2i32;
+ else
+ return SDValue();
+
+ unsigned Op =
+ N->getOpcode() == ISD::UINT_TO_FP ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
+ SDValue Conv = DAG.getNode(Op, SDLoc(N), ExtVT, N->getOperand(0));
+ return DAG.getNode(N->getOpcode(), SDLoc(N), ResVT, Conv);
+}
+
+static SDValue
+performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
+ auto &DAG = DCI.DAG;
+ assert(N->getOpcode() == ISD::SIGN_EXTEND ||
+ N->getOpcode() == ISD::ZERO_EXTEND);
+
+ // Combine ({s,z}ext (extract_subvector src, i)) into a widening operation if
+ // possible before the extract_subvector can be expanded.
+ auto Extract = N->getOperand(0);
+ if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
+ return SDValue();
+ auto Source = Extract.getOperand(0);
+ auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
+ if (IndexNode == nullptr)
+ return SDValue();
+ auto Index = IndexNode->getZExtValue();
+
+ // Only v8i8, v4i16, and v2i32 extracts can be widened, and only if the
+ // extracted subvector is the low or high half of its source.
+ EVT ResVT = N->getValueType(0);
+ if (ResVT == MVT::v8i16) {
+ if (Extract.getValueType() != MVT::v8i8 ||
+ Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8))
+ return SDValue();
+ } else if (ResVT == MVT::v4i32) {
+ if (Extract.getValueType() != MVT::v4i16 ||
+ Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4))
+ return SDValue();
+ } else if (ResVT == MVT::v2i64) {
+ if (Extract.getValueType() != MVT::v2i32 ||
+ Source.getValueType() != MVT::v4i32 || (Index != 0 && Index != 2))
+ return SDValue();
+ } else {
+ return SDValue();
+ }
+
+ bool IsSext = N->getOpcode() == ISD::SIGN_EXTEND;
+ bool IsLow = Index == 0;
+
+ unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::EXTEND_LOW_S
+ : WebAssemblyISD::EXTEND_HIGH_S)
+ : (IsLow ? WebAssemblyISD::EXTEND_LOW_U
+ : WebAssemblyISD::EXTEND_HIGH_U);
+
+ return DAG.getNode(Op, SDLoc(N), ResVT, Source);
+}
+
+static SDValue
+performVectorTruncZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
+ auto &DAG = DCI.DAG;
+
+ auto GetWasmConversionOp = [](unsigned Op) {
+ switch (Op) {
+ case ISD::FP_TO_SINT_SAT:
+ return WebAssemblyISD::TRUNC_SAT_ZERO_S;
+ case ISD::FP_TO_UINT_SAT:
+ return WebAssemblyISD::TRUNC_SAT_ZERO_U;
+ case ISD::FP_ROUND:
+ return WebAssemblyISD::DEMOTE_ZERO;
+ }
+ llvm_unreachable("unexpected op");
+ };
+
+ auto IsZeroSplat = [](SDValue SplatVal) {
+ auto *Splat = dyn_cast<BuildVectorSDNode>(SplatVal.getNode());
+ APInt SplatValue, SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+ // Endianness doesn't matter in this context because we are looking for
+ // an all-zero value.
+ return Splat &&
+ Splat->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
+ HasAnyUndefs) &&
+ SplatValue == 0;
+ };
+
+ if (N->getOpcode() == ISD::CONCAT_VECTORS) {
+ // Combine this:
+ //
+ // (concat_vectors (v2i32 (fp_to_{s,u}int_sat $x, 32)), (v2i32 (splat 0)))
+ //
+ // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
+ //
+ // Or this:
+ //
+ // (concat_vectors (v2f32 (fp_round (v2f64 $x))), (v2f32 (splat 0)))
+ //
+ // into (f32x4.demote_zero_f64x2 $x).
+ EVT ResVT;
+ EVT ExpectedConversionType;
+ auto Conversion = N->getOperand(0);
+ auto ConversionOp = Conversion.getOpcode();
+ switch (ConversionOp) {
+ case ISD::FP_TO_SINT_SAT:
+ case ISD::FP_TO_UINT_SAT:
+ ResVT = MVT::v4i32;
+ ExpectedConversionType = MVT::v2i32;
+ break;
+ case ISD::FP_ROUND:
+ ResVT = MVT::v4f32;
+ ExpectedConversionType = MVT::v2f32;
+ break;
+ default:
+ return SDValue();
+ }
+
+ if (N->getValueType(0) != ResVT)
+ return SDValue();
+
+ if (Conversion.getValueType() != ExpectedConversionType)
+ return SDValue();
+
+ auto Source = Conversion.getOperand(0);
+ if (Source.getValueType() != MVT::v2f64)
+ return SDValue();
+
+ if (!IsZeroSplat(N->getOperand(1)) ||
+ N->getOperand(1).getValueType() != ExpectedConversionType)
+ return SDValue();
+
+ unsigned Op = GetWasmConversionOp(ConversionOp);
+ return DAG.getNode(Op, SDLoc(N), ResVT, Source);
+ }
+
+ // Combine this:
+ //
+ // (fp_to_{s,u}int_sat (concat_vectors $x, (v2f64 (splat 0))), 32)
+ //
+ // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
+ //
+ // Or this:
+ //
+ // (v4f32 (fp_round (concat_vectors $x, (v2f64 (splat 0)))))
+ //
+ // into (f32x4.demote_zero_f64x2 $x).
+ EVT ResVT;
+ auto ConversionOp = N->getOpcode();
+ switch (ConversionOp) {
+ case ISD::FP_TO_SINT_SAT:
+ case ISD::FP_TO_UINT_SAT:
+ ResVT = MVT::v4i32;
+ break;
+ case ISD::FP_ROUND:
+ ResVT = MVT::v4f32;
+ break;
+ default:
+ llvm_unreachable("unexpected op");
+ }
+
+ if (N->getValueType(0) != ResVT)
+ return SDValue();
+
+ auto Concat = N->getOperand(0);
+ if (Concat.getValueType() != MVT::v4f64)
+ return SDValue();
+
+ auto Source = Concat.getOperand(0);
+ if (Source.getValueType() != MVT::v2f64)
+ return SDValue();
+
+ if (!IsZeroSplat(Concat.getOperand(1)) ||
+ Concat.getOperand(1).getValueType() != MVT::v2f64)
+ return SDValue();
+
+ unsigned Op = GetWasmConversionOp(ConversionOp);
+ return DAG.getNode(Op, SDLoc(N), ResVT, Source);
+}
+
+// Helper to extract VectorWidth bits from Vec, starting from IdxVal.
+static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
+ const SDLoc &DL, unsigned VectorWidth) {
+ EVT VT = Vec.getValueType();
+ EVT ElVT = VT.getVectorElementType();
+ unsigned Factor = VT.getSizeInBits() / VectorWidth;
+ EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
+ VT.getVectorNumElements() / Factor);
+
+ // Extract the relevant VectorWidth bits. Generate an EXTRACT_SUBVECTOR
+ unsigned ElemsPerChunk = VectorWidth / ElVT.getSizeInBits();
+ assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
+
+ // This is the index of the first element of the VectorWidth-bit chunk
+ // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
+ IdxVal &= ~(ElemsPerChunk - 1);
+
+ // If the input is a buildvector just emit a smaller one.
+ if (Vec.getOpcode() == ISD::BUILD_VECTOR)
+ return DAG.getBuildVector(ResultVT, DL,
+ Vec->ops().slice(IdxVal, ElemsPerChunk));
+
+ SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, DL);
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResultVT, Vec, VecIdx);
+}
+
+// Helper to recursively truncate vector elements in half with NARROW_U. DstVT
+// is the expected destination value type after recursion. In is the initial
+// input. Note that the input should have enough leading zero bits to prevent
+// NARROW_U from saturating results.
+static SDValue truncateVectorWithNARROW(EVT DstVT, SDValue In, const SDLoc &DL,
+ SelectionDAG &DAG) {
+ EVT SrcVT = In.getValueType();
+
+ // No truncation required, we might get here due to recursive calls.
+ if (SrcVT == DstVT)
+ return In;
+
+ unsigned SrcSizeInBits = SrcVT.getSizeInBits();
+ unsigned NumElems = SrcVT.getVectorNumElements();
+ if (!isPowerOf2_32(NumElems))
+ return SDValue();
+ assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
+ assert(SrcSizeInBits > DstVT.getSizeInBits() && "Illegal truncation");
+
+ LLVMContext &Ctx = *DAG.getContext();
+ EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
+
+ // Narrow to the largest type possible:
+ // vXi64/vXi32 -> i16x8.narrow_i32x4_u and vXi16 -> i8x16.narrow_i16x8_u.
+ EVT InVT = MVT::i16, OutVT = MVT::i8;
+ if (SrcVT.getScalarSizeInBits() > 16) {
+ InVT = MVT::i32;
+ OutVT = MVT::i16;
+ }
+ unsigned SubSizeInBits = SrcSizeInBits / 2;
+ InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
+ OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
+
+ // Split lower/upper subvectors.
+ SDValue Lo = extractSubVector(In, 0, DAG, DL, SubSizeInBits);
+ SDValue Hi = extractSubVector(In, NumElems / 2, DAG, DL, SubSizeInBits);
+
+ // 256bit -> 128bit truncate - Narrow lower/upper 128-bit subvectors.
+ if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
+ Lo = DAG.getBitcast(InVT, Lo);
+ Hi = DAG.getBitcast(InVT, Hi);
+ SDValue Res = DAG.getNode(WebAssemblyISD::NARROW_U, DL, OutVT, Lo, Hi);
+ return DAG.getBitcast(DstVT, Res);
+ }
+
+ // Recursively narrow lower/upper subvectors, concat result and narrow again.
+ EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems / 2);
+ Lo = truncateVectorWithNARROW(PackedVT, Lo, DL, DAG);
+ Hi = truncateVectorWithNARROW(PackedVT, Hi, DL, DAG);
+
+ PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
+ SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
+ return truncateVectorWithNARROW(DstVT, Res, DL, DAG);
+}
+
+static SDValue performTruncateCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ auto &DAG = DCI.DAG;
+
+ SDValue In = N->getOperand(0);
+ EVT InVT = In.getValueType();
+ if (!InVT.isSimple())
+ return SDValue();
+
+ EVT OutVT = N->getValueType(0);
+ if (!OutVT.isVector())
+ return SDValue();
+
+ EVT OutSVT = OutVT.getVectorElementType();
+ EVT InSVT = InVT.getVectorElementType();
+ // Currently only cover truncate to v16i8 or v8i16.
+ if (!((InSVT == MVT::i16 || InSVT == MVT::i32 || InSVT == MVT::i64) &&
+ (OutSVT == MVT::i8 || OutSVT == MVT::i16) && OutVT.is128BitVector()))
+ return SDValue();
+
+ SDLoc DL(N);
+ APInt Mask = APInt::getLowBitsSet(InVT.getScalarSizeInBits(),
+ OutVT.getScalarSizeInBits());
+ In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(Mask, DL, InVT));
+ return truncateVectorWithNARROW(OutVT, In, DL, DAG);
+}
+
+static SDValue performBitcastCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ auto &DAG = DCI.DAG;
+ SDLoc DL(N);
+ SDValue Src = N->getOperand(0);
+ EVT VT = N->getValueType(0);
+ EVT SrcVT = Src.getValueType();
+
+ // bitcast <N x i1> to iN
+ // ==> bitmask
+ if (DCI.isBeforeLegalize() && VT.isScalarInteger() &&
+ SrcVT.isFixedLengthVector() && SrcVT.getScalarType() == MVT::i1) {
+ unsigned NumElts = SrcVT.getVectorNumElements();
+ if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
+ return SDValue();
+ EVT Width = MVT::getIntegerVT(128 / NumElts);
+ return DAG.getZExtOrTrunc(
+ DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
+ {DAG.getConstant(Intrinsic::wasm_bitmask, DL, MVT::i32),
+ DAG.getSExtOrTrunc(N->getOperand(0), DL,
+ SrcVT.changeVectorElementType(Width))}),
+ DL, VT);
+ }
+
+ return SDValue();
+}
+
+static SDValue performSETCCCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ auto &DAG = DCI.DAG;
+
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+ ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(2))->get();
+ SDLoc DL(N);
+ EVT VT = N->getValueType(0);
+
+ // setcc (iN (bitcast (vNi1 X))), 0, ne
+ // ==> any_true (vNi1 X)
+ // setcc (iN (bitcast (vNi1 X))), 0, eq
+ // ==> xor (any_true (vNi1 X)), -1
+ // setcc (iN (bitcast (vNi1 X))), -1, eq
+ // ==> all_true (vNi1 X)
+ // setcc (iN (bitcast (vNi1 X))), -1, ne
+ // ==> xor (all_true (vNi1 X)), -1
+ if (DCI.isBeforeLegalize() && VT.isScalarInteger() &&
+ (Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
+ (isNullConstant(RHS) || isAllOnesConstant(RHS)) &&
+ LHS->getOpcode() == ISD::BITCAST) {
+ EVT FromVT = LHS->getOperand(0).getValueType();
+ if (FromVT.isFixedLengthVector() &&
+ FromVT.getVectorElementType() == MVT::i1) {
+ int Intrin = isNullConstant(RHS) ? Intrinsic::wasm_anytrue
+ : Intrinsic::wasm_alltrue;
+ unsigned NumElts = FromVT.getVectorNumElements();
+ if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
+ return SDValue();
+ EVT Width = MVT::getIntegerVT(128 / NumElts);
+ SDValue Ret = DAG.getZExtOrTrunc(
+ DAG.getNode(
+ ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
+ {DAG.getConstant(Intrin, DL, MVT::i32),
+ DAG.getSExtOrTrunc(LHS->getOperand(0), DL,
+ FromVT.changeVectorElementType(Width))}),
+ DL, MVT::i1);
+ if ((isNullConstant(RHS) && (Cond == ISD::SETEQ)) ||
+ (isAllOnesConstant(RHS) && (Cond == ISD::SETNE))) {
+ Ret = DAG.getNOT(DL, Ret, MVT::i1);
+ }
+ return DAG.getZExtOrTrunc(Ret, DL, VT);
+ }
+ }
+
+ return SDValue();
+}
+
+SDValue
+WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ switch (N->getOpcode()) {
+ default:
+ return SDValue();
+ case ISD::BITCAST:
+ return performBitcastCombine(N, DCI);
+ case ISD::SETCC:
+ return performSETCCCombine(N, DCI);
+ case ISD::VECTOR_SHUFFLE:
+ return performVECTOR_SHUFFLECombine(N, DCI);
+ case ISD::SIGN_EXTEND:
+ case ISD::ZERO_EXTEND:
+ return performVectorExtendCombine(N, DCI);
+ case ISD::UINT_TO_FP:
+ case ISD::SINT_TO_FP:
+ return performVectorExtendToFPCombine(N, DCI);
+ case ISD::FP_TO_SINT_SAT:
+ case ISD::FP_TO_UINT_SAT:
+ case ISD::FP_ROUND:
+ case ISD::CONCAT_VECTORS:
+ return performVectorTruncZeroCombine(N, DCI);
+ case ISD::TRUNCATE:
+ return performTruncateCombine(N, DCI);
+ }
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h
new file mode 100644
index 000000000000..1d1338ab40d0
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h
@@ -0,0 +1,159 @@
+//- WebAssemblyISelLowering.h - WebAssembly DAG Lowering Interface -*- C++ -*-//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the interfaces that WebAssembly uses to lower LLVM
+/// code into a selection DAG.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYISELLOWERING_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYISELLOWERING_H
+
+#include "llvm/CodeGen/TargetLowering.h"
+
+namespace llvm {
+
+namespace WebAssemblyISD {
+
+enum NodeType : unsigned {
+ FIRST_NUMBER = ISD::BUILTIN_OP_END,
+#define HANDLE_NODETYPE(NODE) NODE,
+#define HANDLE_MEM_NODETYPE(NODE)
+#include "WebAssemblyISD.def"
+ FIRST_MEM_OPCODE = ISD::FIRST_TARGET_MEMORY_OPCODE,
+#undef HANDLE_NODETYPE
+#undef HANDLE_MEM_NODETYPE
+#define HANDLE_NODETYPE(NODE)
+#define HANDLE_MEM_NODETYPE(NODE) NODE,
+#include "WebAssemblyISD.def"
+#undef HANDLE_NODETYPE
+#undef HANDLE_MEM_NODETYPE
+};
+
+} // end namespace WebAssemblyISD
+
+class WebAssemblySubtarget;
+
+class WebAssemblyTargetLowering final : public TargetLowering {
+public:
+ WebAssemblyTargetLowering(const TargetMachine &TM,
+ const WebAssemblySubtarget &STI);
+
+ MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const override;
+ MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const override;
+
+private:
+ /// Keep a pointer to the WebAssemblySubtarget around so that we can make the
+ /// right decision when generating code for different targets.
+ const WebAssemblySubtarget *Subtarget;
+
+ AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override;
+ bool shouldScalarizeBinop(SDValue VecOp) const override;
+ FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
+ const TargetLibraryInfo *LibInfo) const override;
+ MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
+ MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *MBB) const override;
+ const char *getTargetNodeName(unsigned Opcode) const override;
+ std::pair<unsigned, const TargetRegisterClass *>
+ getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ StringRef Constraint, MVT VT) const override;
+ bool isCheapToSpeculateCttz(Type *Ty) const override;
+ bool isCheapToSpeculateCtlz(Type *Ty) const override;
+ bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
+ unsigned AS,
+ Instruction *I = nullptr) const override;
+ bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace, Align Alignment,
+ MachineMemOperand::Flags Flags,
+ unsigned *Fast) const override;
+ bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
+ bool isVectorLoadExtDesirable(SDValue ExtVal) const override;
+ bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
+ bool shouldSinkOperands(Instruction *I,
+ SmallVectorImpl<Use *> &Ops) const override;
+ EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
+ EVT VT) const override;
+ bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
+ MachineFunction &MF,
+ unsigned Intrinsic) const override;
+
+ void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
+ const APInt &DemandedElts,
+ const SelectionDAG &DAG,
+ unsigned Depth) const override;
+
+ TargetLoweringBase::LegalizeTypeAction
+ getPreferredVectorAction(MVT VT) const override;
+
+ SDValue LowerCall(CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const override;
+ bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ LLVMContext &Context) const override;
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals, const SDLoc &dl,
+ SelectionDAG &DAG) const override;
+ SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
+ bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &DL, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
+
+ void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) const override;
+
+ const char *getClearCacheBuiltinName() const override {
+ report_fatal_error("llvm.clear_cache is not supported on wasm");
+ }
+
+ bool
+ shouldSimplifyDemandedVectorElts(SDValue Op,
+ const TargetLoweringOpt &TLO) const override;
+
+ // Custom lowering hooks.
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
+ SDValue LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerCopyToReg(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerIntrinsic(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerEXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerAccessVectorElement(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerLoad(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerStore(SDValue Op, SelectionDAG &DAG) const;
+
+ // Custom DAG combine hooks
+ SDValue
+ PerformDAGCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) const override;
+};
+
+namespace WebAssembly {
+FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo);
+} // end namespace WebAssembly
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td
new file mode 100644
index 000000000000..4623ce9b5c38
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td
@@ -0,0 +1,536 @@
+// WebAssemblyInstrAtomics.td-WebAssembly Atomic codegen support-*- tablegen -*-
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// WebAssembly Atomic operand code-gen constructs.
+///
+//===----------------------------------------------------------------------===//
+
+let UseNamedOperandTable = 1 in
+multiclass ATOMIC_I<dag oops_r, dag iops_r, dag oops_s, dag iops_s,
+ list<dag> pattern_r, string asmstr_r,
+ string asmstr_s, bits<32> atomic_op,
+ bit is64 = false> {
+ defm "" : I<oops_r, iops_r, oops_s, iops_s, pattern_r, asmstr_r, asmstr_s,
+ !or(0xfe00, !and(0xff, atomic_op)), is64>,
+ Requires<[HasAtomics]>;
+}
+
+multiclass ATOMIC_NRI<dag oops, dag iops, list<dag> pattern, string asmstr = "",
+ bits<32> atomic_op = -1> {
+ defm "" : NRI<oops, iops, pattern, asmstr,
+ !or(0xfe00, !and(0xff, atomic_op))>,
+ Requires<[HasAtomics]>;
+}
+
+//===----------------------------------------------------------------------===//
+// Atomic wait / notify
+//===----------------------------------------------------------------------===//
+
+let hasSideEffects = 1 in {
+defm MEMORY_ATOMIC_NOTIFY_A32 :
+ ATOMIC_I<(outs I32:$dst),
+ (ins P2Align:$p2align, offset32_op:$off, I32:$addr, I32:$count),
+ (outs), (ins P2Align:$p2align, offset32_op:$off), [],
+ "memory.atomic.notify \t$dst, ${off}(${addr})${p2align}, $count",
+ "memory.atomic.notify \t${off}${p2align}", 0x00, false>;
+defm MEMORY_ATOMIC_NOTIFY_A64 :
+ ATOMIC_I<(outs I32:$dst),
+ (ins P2Align:$p2align, offset64_op:$off, I64:$addr, I32:$count),
+ (outs), (ins P2Align:$p2align, offset64_op:$off), [],
+ "memory.atomic.notify \t$dst, ${off}(${addr})${p2align}, $count",
+ "memory.atomic.notify \t${off}${p2align}", 0x00, true>;
+let mayLoad = 1 in {
+defm MEMORY_ATOMIC_WAIT32_A32 :
+ ATOMIC_I<(outs I32:$dst),
+ (ins P2Align:$p2align, offset32_op:$off, I32:$addr, I32:$exp,
+ I64:$timeout),
+ (outs), (ins P2Align:$p2align, offset32_op:$off), [],
+ "memory.atomic.wait32 \t$dst, ${off}(${addr})${p2align}, $exp, $timeout",
+ "memory.atomic.wait32 \t${off}${p2align}", 0x01, false>;
+defm MEMORY_ATOMIC_WAIT32_A64 :
+ ATOMIC_I<(outs I32:$dst),
+ (ins P2Align:$p2align, offset64_op:$off, I64:$addr, I32:$exp,
+ I64:$timeout),
+ (outs), (ins P2Align:$p2align, offset64_op:$off), [],
+ "memory.atomic.wait32 \t$dst, ${off}(${addr})${p2align}, $exp, $timeout",
+ "memory.atomic.wait32 \t${off}${p2align}", 0x01, true>;
+defm MEMORY_ATOMIC_WAIT64_A32 :
+ ATOMIC_I<(outs I32:$dst),
+ (ins P2Align:$p2align, offset32_op:$off, I32:$addr, I64:$exp,
+ I64:$timeout),
+ (outs), (ins P2Align:$p2align, offset32_op:$off), [],
+ "memory.atomic.wait64 \t$dst, ${off}(${addr})${p2align}, $exp, $timeout",
+ "memory.atomic.wait64 \t${off}${p2align}", 0x02, false>;
+defm MEMORY_ATOMIC_WAIT64_A64 :
+ ATOMIC_I<(outs I32:$dst),
+ (ins P2Align:$p2align, offset64_op:$off, I64:$addr, I64:$exp,
+ I64:$timeout),
+ (outs), (ins P2Align:$p2align, offset64_op:$off), [],
+ "memory.atomic.wait64 \t$dst, ${off}(${addr})${p2align}, $exp, $timeout",
+ "memory.atomic.wait64 \t${off}${p2align}", 0x02, true>;
+} // mayLoad = 1
+} // hasSideEffects = 1
+
+def NotifyPat_A32 :
+ Pat<(i32 (int_wasm_memory_atomic_notify (AddrOps32 offset32_op:$offset, I32:$addr), I32:$count)),
+ (MEMORY_ATOMIC_NOTIFY_A32 0, $offset, $addr, $count)>,
+ Requires<[HasAddr32, HasAtomics]>;
+def NotifyPat_A64 :
+ Pat<(i32 (int_wasm_memory_atomic_notify (AddrOps64 offset64_op:$offset, I64:$addr), I32:$count)),
+ (MEMORY_ATOMIC_NOTIFY_A64 0, $offset, $addr, $count)>,
+ Requires<[HasAddr64, HasAtomics]>;
+
+
+multiclass WaitPat<ValueType ty, Intrinsic kind, string inst> {
+ def WaitPat_A32 :
+ Pat<(i32 (kind (AddrOps32 offset32_op:$offset, I32:$addr), ty:$exp, I64:$timeout)),
+ (!cast<NI>(inst#_A32) 0, $offset, $addr, $exp, $timeout)>,
+ Requires<[HasAddr32, HasAtomics]>;
+ def WaitPat_A64 :
+ Pat<(i32 (kind (AddrOps64 offset64_op:$offset, I64:$addr), ty:$exp, I64:$timeout)),
+ (!cast<NI>(inst#_A64) 0, $offset, $addr, $exp, $timeout)>,
+ Requires<[HasAddr64, HasAtomics]>;
+}
+
+defm : WaitPat<i32, int_wasm_memory_atomic_wait32, "MEMORY_ATOMIC_WAIT32">;
+defm : WaitPat<i64, int_wasm_memory_atomic_wait64, "MEMORY_ATOMIC_WAIT64">;
+
+//===----------------------------------------------------------------------===//
+// Atomic fences
+//===----------------------------------------------------------------------===//
+
+// A compiler fence instruction that prevents reordering of instructions.
+let Defs = [ARGUMENTS] in {
+let isPseudo = 1, hasSideEffects = 1 in
+defm COMPILER_FENCE : ATOMIC_NRI<(outs), (ins), [], "compiler_fence">;
+let hasSideEffects = 1 in
+defm ATOMIC_FENCE : ATOMIC_NRI<(outs), (ins i8imm:$flags), [], "atomic.fence",
+ 0x03>;
+} // Defs = [ARGUMENTS]
+
+//===----------------------------------------------------------------------===//
+// Atomic loads
+//===----------------------------------------------------------------------===//
+
+multiclass AtomicLoad<WebAssemblyRegClass rc, string name, int atomic_op> {
+ defm "" : WebAssemblyLoad<rc, name, !or(0xfe00, !and(0xff, atomic_op)),
+ [HasAtomics]>;
+}
+
+defm ATOMIC_LOAD_I32 : AtomicLoad<I32, "i32.atomic.load", 0x10>;
+defm ATOMIC_LOAD_I64 : AtomicLoad<I64, "i64.atomic.load", 0x11>;
+
+// Select loads
+defm : LoadPat<i32, atomic_load_32, "ATOMIC_LOAD_I32">;
+defm : LoadPat<i64, atomic_load_64, "ATOMIC_LOAD_I64">;
+
+// Extending loads. Note that there are only zero-extending atomic loads, no
+// sign-extending loads.
+defm ATOMIC_LOAD8_U_I32 : AtomicLoad<I32, "i32.atomic.load8_u", 0x12>;
+defm ATOMIC_LOAD16_U_I32 : AtomicLoad<I32, "i32.atomic.load16_u", 0x13>;
+defm ATOMIC_LOAD8_U_I64 : AtomicLoad<I64, "i64.atomic.load8_u", 0x14>;
+defm ATOMIC_LOAD16_U_I64 : AtomicLoad<I64, "i64.atomic.load16_u", 0x15>;
+defm ATOMIC_LOAD32_U_I64 : AtomicLoad<I64, "i64.atomic.load32_u", 0x16>;
+
+// Fragments for extending loads. These are different from regular loads because
+// the SDNodes are derived from AtomicSDNode rather than LoadSDNode and
+// therefore don't have the extension type field. So instead of matching that,
+// we match the patterns that the type legalizer expands them to.
+
+// Unlike regular loads, extension to i64 is handled differently than i32.
+// i64 (zext (i8 (atomic_load_8))) gets legalized to
+// i64 (and (i64 (anyext (i32 (atomic_load_8)))), 255)
+// Extension to i32 is elided by SelectionDAG as our atomic loads are
+// zero-extending.
+def zext_aload_8_64 :
+ PatFrag<(ops node:$addr),
+ (i64 (zext (i32 (atomic_load_8 node:$addr))))>;
+def zext_aload_16_64 :
+ PatFrag<(ops node:$addr),
+ (i64 (zext (i32 (atomic_load_16 node:$addr))))>;
+def zext_aload_32_64 :
+ PatFrag<(ops node:$addr),
+ (i64 (zext (i32 (atomic_load_32 node:$addr))))>;
+
+// We don't have single sext atomic load instructions. So for sext loads, we
+// match bare subword loads (for 32-bit results) and anyext loads (for 64-bit
+// results) and select a zext load; the next instruction will be sext_inreg
+// which is selected by itself.
+def sext_aload_8_64 :
+ PatFrag<(ops node:$addr), (anyext (i32 (atomic_load_8 node:$addr)))>;
+def sext_aload_16_64 :
+ PatFrag<(ops node:$addr), (anyext (i32 (atomic_load_16 node:$addr)))>;
+
+// Select zero-extending loads
+defm : LoadPat<i64, zext_aload_8_64, "ATOMIC_LOAD8_U_I64">;
+defm : LoadPat<i64, zext_aload_16_64, "ATOMIC_LOAD16_U_I64">;
+defm : LoadPat<i64, zext_aload_32_64, "ATOMIC_LOAD32_U_I64">;
+
+// Select sign-extending loads
+defm : LoadPat<i32, atomic_load_8, "ATOMIC_LOAD8_U_I32">;
+defm : LoadPat<i32, atomic_load_16, "ATOMIC_LOAD16_U_I32">;
+defm : LoadPat<i64, sext_aload_8_64, "ATOMIC_LOAD8_U_I64">;
+defm : LoadPat<i64, sext_aload_16_64, "ATOMIC_LOAD16_U_I64">;
+// 32->64 sext load gets selected as i32.atomic.load, i64.extend_i32_s
+
+
+//===----------------------------------------------------------------------===//
+// Atomic stores
+//===----------------------------------------------------------------------===//
+
+multiclass AtomicStore<WebAssemblyRegClass rc, string name, int atomic_op> {
+ defm "" : WebAssemblyStore<rc, name, !or(0xfe00, !and(0xff, atomic_op)),
+ [HasAtomics]>;
+}
+
+defm ATOMIC_STORE_I32 : AtomicStore<I32, "i32.atomic.store", 0x17>;
+defm ATOMIC_STORE_I64 : AtomicStore<I64, "i64.atomic.store", 0x18>;
+
+// We used to need an 'atomic' version of store patterns because store and atomic_store
+// nodes have different operand orders.
+//
+// TODO: This is no longer true and atomic_store and store patterns
+// can be unified.
+
+multiclass AStorePat<ValueType ty, PatFrag kind, string inst> {
+ def : Pat<(kind ty:$val, (AddrOps32 offset32_op:$offset, I32:$addr)),
+ (!cast<NI>(inst#_A32) 0, $offset, $addr, $val)>,
+ Requires<[HasAddr32, HasAtomics]>;
+ def : Pat<(kind ty:$val, (AddrOps64 offset64_op:$offset, I64:$addr)),
+ (!cast<NI>(inst#_A64) 0, $offset, $addr, $val)>,
+ Requires<[HasAddr64, HasAtomics]>;
+}
+defm : AStorePat<i32, atomic_store_32, "ATOMIC_STORE_I32">;
+defm : AStorePat<i64, atomic_store_64, "ATOMIC_STORE_I64">;
+
+// Truncating stores.
+defm ATOMIC_STORE8_I32 : AtomicStore<I32, "i32.atomic.store8", 0x19>;
+defm ATOMIC_STORE16_I32 : AtomicStore<I32, "i32.atomic.store16", 0x1a>;
+defm ATOMIC_STORE8_I64 : AtomicStore<I64, "i64.atomic.store8", 0x1b>;
+defm ATOMIC_STORE16_I64 : AtomicStore<I64, "i64.atomic.store16", 0x1c>;
+defm ATOMIC_STORE32_I64 : AtomicStore<I64, "i64.atomic.store32", 0x1d>;
+
+// Fragments for truncating stores.
+
+// We don't have single truncating atomic store instructions. For 32-bit
+// instructions, we just need to match bare atomic stores. On the other hand,
+// truncating stores from i64 values are once truncated to i32 first.
+class trunc_astore_64<PatFrag kind> :
+ PatFrag<(ops node:$val, node:$addr),
+ (kind (i32 (trunc (i64 node:$val))), node:$addr)>;
+def trunc_astore_8_64 : trunc_astore_64<atomic_store_8>;
+def trunc_astore_16_64 : trunc_astore_64<atomic_store_16>;
+def trunc_astore_32_64 : trunc_astore_64<atomic_store_32>;
+
+// Truncating stores with no constant offset
+defm : AStorePat<i32, atomic_store_8, "ATOMIC_STORE8_I32">;
+defm : AStorePat<i32, atomic_store_16, "ATOMIC_STORE16_I32">;
+defm : AStorePat<i64, trunc_astore_8_64, "ATOMIC_STORE8_I64">;
+defm : AStorePat<i64, trunc_astore_16_64, "ATOMIC_STORE16_I64">;
+defm : AStorePat<i64, trunc_astore_32_64, "ATOMIC_STORE32_I64">;
+
+//===----------------------------------------------------------------------===//
+// Atomic binary read-modify-writes
+//===----------------------------------------------------------------------===//
+
+multiclass WebAssemblyBinRMW<WebAssemblyRegClass rc, string name,
+ int atomic_op> {
+ defm "_A32" :
+ ATOMIC_I<(outs rc:$dst),
+ (ins P2Align:$p2align, offset32_op:$off, I32:$addr, rc:$val),
+ (outs), (ins P2Align:$p2align, offset32_op:$off), [],
+ !strconcat(name, "\t$dst, ${off}(${addr})${p2align}, $val"),
+ !strconcat(name, "\t${off}${p2align}"), atomic_op, false>;
+ defm "_A64" :
+ ATOMIC_I<(outs rc:$dst),
+ (ins P2Align:$p2align, offset64_op:$off, I64:$addr, rc:$val),
+ (outs), (ins P2Align:$p2align, offset64_op:$off), [],
+ !strconcat(name, "\t$dst, ${off}(${addr})${p2align}, $val"),
+ !strconcat(name, "\t${off}${p2align}"), atomic_op, true>;
+}
+
+defm ATOMIC_RMW_ADD_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.add", 0x1e>;
+defm ATOMIC_RMW_ADD_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.add", 0x1f>;
+defm ATOMIC_RMW8_U_ADD_I32 :
+ WebAssemblyBinRMW<I32, "i32.atomic.rmw8.add_u", 0x20>;
+defm ATOMIC_RMW16_U_ADD_I32 :
+ WebAssemblyBinRMW<I32, "i32.atomic.rmw16.add_u", 0x21>;
+defm ATOMIC_RMW8_U_ADD_I64 :
+ WebAssemblyBinRMW<I64, "i64.atomic.rmw8.add_u", 0x22>;
+defm ATOMIC_RMW16_U_ADD_I64 :
+ WebAssemblyBinRMW<I64, "i64.atomic.rmw16.add_u", 0x23>;
+defm ATOMIC_RMW32_U_ADD_I64 :
+ WebAssemblyBinRMW<I64, "i64.atomic.rmw32.add_u", 0x24>;
+
+defm ATOMIC_RMW_SUB_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.sub", 0x25>;
+defm ATOMIC_RMW_SUB_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.sub", 0x26>;
+defm ATOMIC_RMW8_U_SUB_I32 :
+ WebAssemblyBinRMW<I32, "i32.atomic.rmw8.sub_u", 0x27>;
+defm ATOMIC_RMW16_U_SUB_I32 :
+ WebAssemblyBinRMW<I32, "i32.atomic.rmw16.sub_u", 0x28>;
+defm ATOMIC_RMW8_U_SUB_I64 :
+ WebAssemblyBinRMW<I64, "i64.atomic.rmw8.sub_u", 0x29>;
+defm ATOMIC_RMW16_U_SUB_I64 :
+ WebAssemblyBinRMW<I64, "i64.atomic.rmw16.sub_u", 0x2a>;
+defm ATOMIC_RMW32_U_SUB_I64 :
+ WebAssemblyBinRMW<I64, "i64.atomic.rmw32.sub_u", 0x2b>;
+
+defm ATOMIC_RMW_AND_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.and", 0x2c>;
+defm ATOMIC_RMW_AND_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.and", 0x2d>;
+defm ATOMIC_RMW8_U_AND_I32 :
+ WebAssemblyBinRMW<I32, "i32.atomic.rmw8.and_u", 0x2e>;
+defm ATOMIC_RMW16_U_AND_I32 :
+ WebAssemblyBinRMW<I32, "i32.atomic.rmw16.and_u", 0x2f>;
+defm ATOMIC_RMW8_U_AND_I64 :
+ WebAssemblyBinRMW<I64, "i64.atomic.rmw8.and_u", 0x30>;
+defm ATOMIC_RMW16_U_AND_I64 :
+ WebAssemblyBinRMW<I64, "i64.atomic.rmw16.and_u", 0x31>;
+defm ATOMIC_RMW32_U_AND_I64 :
+ WebAssemblyBinRMW<I64, "i64.atomic.rmw32.and_u", 0x32>;
+
+defm ATOMIC_RMW_OR_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.or", 0x33>;
+defm ATOMIC_RMW_OR_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.or", 0x34>;
+defm ATOMIC_RMW8_U_OR_I32 :
+ WebAssemblyBinRMW<I32, "i32.atomic.rmw8.or_u", 0x35>;
+defm ATOMIC_RMW16_U_OR_I32 :
+ WebAssemblyBinRMW<I32, "i32.atomic.rmw16.or_u", 0x36>;
+defm ATOMIC_RMW8_U_OR_I64 :
+ WebAssemblyBinRMW<I64, "i64.atomic.rmw8.or_u", 0x37>;
+defm ATOMIC_RMW16_U_OR_I64 :
+ WebAssemblyBinRMW<I64, "i64.atomic.rmw16.or_u", 0x38>;
+defm ATOMIC_RMW32_U_OR_I64 :
+ WebAssemblyBinRMW<I64, "i64.atomic.rmw32.or_u", 0x39>;
+
+defm ATOMIC_RMW_XOR_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.xor", 0x3a>;
+defm ATOMIC_RMW_XOR_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.xor", 0x3b>;
+defm ATOMIC_RMW8_U_XOR_I32 :
+ WebAssemblyBinRMW<I32, "i32.atomic.rmw8.xor_u", 0x3c>;
+defm ATOMIC_RMW16_U_XOR_I32 :
+ WebAssemblyBinRMW<I32, "i32.atomic.rmw16.xor_u", 0x3d>;
+defm ATOMIC_RMW8_U_XOR_I64 :
+ WebAssemblyBinRMW<I64, "i64.atomic.rmw8.xor_u", 0x3e>;
+defm ATOMIC_RMW16_U_XOR_I64 :
+ WebAssemblyBinRMW<I64, "i64.atomic.rmw16.xor_u", 0x3f>;
+defm ATOMIC_RMW32_U_XOR_I64 :
+ WebAssemblyBinRMW<I64, "i64.atomic.rmw32.xor_u", 0x40>;
+
+defm ATOMIC_RMW_XCHG_I32 :
+ WebAssemblyBinRMW<I32, "i32.atomic.rmw.xchg", 0x41>;
+defm ATOMIC_RMW_XCHG_I64 :
+ WebAssemblyBinRMW<I64, "i64.atomic.rmw.xchg", 0x42>;
+defm ATOMIC_RMW8_U_XCHG_I32 :
+ WebAssemblyBinRMW<I32, "i32.atomic.rmw8.xchg_u", 0x43>;
+defm ATOMIC_RMW16_U_XCHG_I32 :
+ WebAssemblyBinRMW<I32, "i32.atomic.rmw16.xchg_u", 0x44>;
+defm ATOMIC_RMW8_U_XCHG_I64 :
+ WebAssemblyBinRMW<I64, "i64.atomic.rmw8.xchg_u", 0x45>;
+defm ATOMIC_RMW16_U_XCHG_I64 :
+ WebAssemblyBinRMW<I64, "i64.atomic.rmw16.xchg_u", 0x46>;
+defm ATOMIC_RMW32_U_XCHG_I64 :
+ WebAssemblyBinRMW<I64, "i64.atomic.rmw32.xchg_u", 0x47>;
+
+multiclass BinRMWPat<ValueType ty, PatFrag kind, string inst> {
+ def : Pat<(ty (kind (AddrOps32 offset32_op:$offset, I32:$addr), ty:$val)),
+ (!cast<NI>(inst#_A32) 0, $offset, $addr, $val)>,
+ Requires<[HasAddr32, HasAtomics]>;
+ def : Pat<(ty (kind (AddrOps64 offset64_op:$offset, I64:$addr), ty:$val)),
+ (!cast<NI>(inst#_A64) 0, $offset, $addr, $val)>,
+ Requires<[HasAddr64, HasAtomics]>;
+}
+
+// Patterns for various addressing modes.
+multiclass BinRMWPattern<PatFrag rmw_32, PatFrag rmw_64, string inst_32,
+ string inst_64> {
+ defm : BinRMWPat<i32, rmw_32, inst_32>;
+ defm : BinRMWPat<i64, rmw_64, inst_64>;
+}
+
+defm : BinRMWPattern<atomic_load_add_32, atomic_load_add_64,
+ "ATOMIC_RMW_ADD_I32", "ATOMIC_RMW_ADD_I64">;
+defm : BinRMWPattern<atomic_load_sub_32, atomic_load_sub_64,
+ "ATOMIC_RMW_SUB_I32", "ATOMIC_RMW_SUB_I64">;
+defm : BinRMWPattern<atomic_load_and_32, atomic_load_and_64,
+ "ATOMIC_RMW_AND_I32", "ATOMIC_RMW_AND_I64">;
+defm : BinRMWPattern<atomic_load_or_32, atomic_load_or_64,
+ "ATOMIC_RMW_OR_I32", "ATOMIC_RMW_OR_I64">;
+defm : BinRMWPattern<atomic_load_xor_32, atomic_load_xor_64,
+ "ATOMIC_RMW_XOR_I32", "ATOMIC_RMW_XOR_I64">;
+defm : BinRMWPattern<atomic_swap_32, atomic_swap_64,
+ "ATOMIC_RMW_XCHG_I32", "ATOMIC_RMW_XCHG_I64">;
+
+// Truncating & zero-extending binary RMW patterns.
+// These are combined patterns of truncating store patterns and zero-extending
+// load patterns above.
+class zext_bin_rmw_8_32<PatFrag kind> :
+ PatFrag<(ops node:$addr, node:$val), (i32 (kind node:$addr, node:$val))>;
+class zext_bin_rmw_16_32<PatFrag kind> : zext_bin_rmw_8_32<kind>;
+class zext_bin_rmw_8_64<PatFrag kind> :
+ PatFrag<(ops node:$addr, node:$val),
+ (zext (i32 (kind node:$addr, (i32 (trunc (i64 node:$val))))))>;
+class zext_bin_rmw_16_64<PatFrag kind> : zext_bin_rmw_8_64<kind>;
+class zext_bin_rmw_32_64<PatFrag kind> : zext_bin_rmw_8_64<kind>;
+
+// Truncating & sign-extending binary RMW patterns.
+// These are combined patterns of truncating store patterns and sign-extending
+// load patterns above. We match subword RMWs (for 32-bit) and anyext RMWs (for
+// 64-bit) and select a zext RMW; the next instruction will be sext_inreg which
+// is selected by itself.
+class sext_bin_rmw_8_32<PatFrag kind> :
+ PatFrag<(ops node:$addr, node:$val), (kind node:$addr, node:$val)>;
+class sext_bin_rmw_16_32<PatFrag kind> : sext_bin_rmw_8_32<kind>;
+class sext_bin_rmw_8_64<PatFrag kind> :
+ PatFrag<(ops node:$addr, node:$val),
+ (anyext (i32 (kind node:$addr, (i32 (trunc (i64 node:$val))))))>;
+class sext_bin_rmw_16_64<PatFrag kind> : sext_bin_rmw_8_64<kind>;
+// 32->64 sext RMW gets selected as i32.atomic.rmw.***, i64.extend_i32_s
+
+// Patterns for various addressing modes for truncating-extending binary RMWs.
+multiclass BinRMWTruncExtPattern<
+ PatFrag rmw_8, PatFrag rmw_16, PatFrag rmw_32,
+ string inst8_32, string inst16_32, string inst8_64, string inst16_64, string inst32_64> {
+ // Truncating-extending binary RMWs
+ defm : BinRMWPat<i32, zext_bin_rmw_8_32<rmw_8>, inst8_32>;
+ defm : BinRMWPat<i32, zext_bin_rmw_16_32<rmw_16>, inst16_32>;
+ defm : BinRMWPat<i64, zext_bin_rmw_8_64<rmw_8>, inst8_64>;
+ defm : BinRMWPat<i64, zext_bin_rmw_16_64<rmw_16>, inst16_64>;
+ defm : BinRMWPat<i64, zext_bin_rmw_32_64<rmw_32>, inst32_64>;
+
+ defm : BinRMWPat<i32, sext_bin_rmw_8_32<rmw_8>, inst8_32>;
+ defm : BinRMWPat<i32, sext_bin_rmw_16_32<rmw_16>, inst16_32>;
+ defm : BinRMWPat<i64, sext_bin_rmw_8_64<rmw_8>, inst8_64>;
+ defm : BinRMWPat<i64, sext_bin_rmw_16_64<rmw_16>, inst16_64>;
+}
+
+defm : BinRMWTruncExtPattern<
+ atomic_load_add_8, atomic_load_add_16, atomic_load_add_32,
+ "ATOMIC_RMW8_U_ADD_I32", "ATOMIC_RMW16_U_ADD_I32",
+ "ATOMIC_RMW8_U_ADD_I64", "ATOMIC_RMW16_U_ADD_I64", "ATOMIC_RMW32_U_ADD_I64">;
+defm : BinRMWTruncExtPattern<
+ atomic_load_sub_8, atomic_load_sub_16, atomic_load_sub_32,
+ "ATOMIC_RMW8_U_SUB_I32", "ATOMIC_RMW16_U_SUB_I32",
+ "ATOMIC_RMW8_U_SUB_I64", "ATOMIC_RMW16_U_SUB_I64", "ATOMIC_RMW32_U_SUB_I64">;
+defm : BinRMWTruncExtPattern<
+ atomic_load_and_8, atomic_load_and_16, atomic_load_and_32,
+ "ATOMIC_RMW8_U_AND_I32", "ATOMIC_RMW16_U_AND_I32",
+ "ATOMIC_RMW8_U_AND_I64", "ATOMIC_RMW16_U_AND_I64", "ATOMIC_RMW32_U_AND_I64">;
+defm : BinRMWTruncExtPattern<
+ atomic_load_or_8, atomic_load_or_16, atomic_load_or_32,
+ "ATOMIC_RMW8_U_OR_I32", "ATOMIC_RMW16_U_OR_I32",
+ "ATOMIC_RMW8_U_OR_I64", "ATOMIC_RMW16_U_OR_I64", "ATOMIC_RMW32_U_OR_I64">;
+defm : BinRMWTruncExtPattern<
+ atomic_load_xor_8, atomic_load_xor_16, atomic_load_xor_32,
+ "ATOMIC_RMW8_U_XOR_I32", "ATOMIC_RMW16_U_XOR_I32",
+ "ATOMIC_RMW8_U_XOR_I64", "ATOMIC_RMW16_U_XOR_I64", "ATOMIC_RMW32_U_XOR_I64">;
+defm : BinRMWTruncExtPattern<
+ atomic_swap_8, atomic_swap_16, atomic_swap_32,
+ "ATOMIC_RMW8_U_XCHG_I32", "ATOMIC_RMW16_U_XCHG_I32",
+ "ATOMIC_RMW8_U_XCHG_I64", "ATOMIC_RMW16_U_XCHG_I64",
+ "ATOMIC_RMW32_U_XCHG_I64">;
+
+//===----------------------------------------------------------------------===//
+// Atomic ternary read-modify-writes
+//===----------------------------------------------------------------------===//
+
+// TODO LLVM IR's cmpxchg instruction returns a pair of {loaded value, success
+// flag}. When we use the success flag or both values, we can't make use of i64
+// truncate/extend versions of instructions for now, which is suboptimal.
+// Consider adding a pass after instruction selection that optimizes this case
+// if it is frequent.
+
+multiclass WebAssemblyTerRMW<WebAssemblyRegClass rc, string name,
+ int atomic_op> {
+ defm "_A32" :
+ ATOMIC_I<(outs rc:$dst),
+ (ins P2Align:$p2align, offset32_op:$off, I32:$addr, rc:$exp,
+ rc:$new_),
+ (outs), (ins P2Align:$p2align, offset32_op:$off), [],
+ !strconcat(name, "\t$dst, ${off}(${addr})${p2align}, $exp, $new_"),
+ !strconcat(name, "\t${off}${p2align}"), atomic_op, false>;
+ defm "_A64" :
+ ATOMIC_I<(outs rc:$dst),
+ (ins P2Align:$p2align, offset64_op:$off, I64:$addr, rc:$exp,
+ rc:$new_),
+ (outs), (ins P2Align:$p2align, offset64_op:$off), [],
+ !strconcat(name, "\t$dst, ${off}(${addr})${p2align}, $exp, $new_"),
+ !strconcat(name, "\t${off}${p2align}"), atomic_op, true>;
+}
+
+defm ATOMIC_RMW_CMPXCHG_I32 :
+ WebAssemblyTerRMW<I32, "i32.atomic.rmw.cmpxchg", 0x48>;
+defm ATOMIC_RMW_CMPXCHG_I64 :
+ WebAssemblyTerRMW<I64, "i64.atomic.rmw.cmpxchg", 0x49>;
+defm ATOMIC_RMW8_U_CMPXCHG_I32 :
+ WebAssemblyTerRMW<I32, "i32.atomic.rmw8.cmpxchg_u", 0x4a>;
+defm ATOMIC_RMW16_U_CMPXCHG_I32 :
+ WebAssemblyTerRMW<I32, "i32.atomic.rmw16.cmpxchg_u", 0x4b>;
+defm ATOMIC_RMW8_U_CMPXCHG_I64 :
+ WebAssemblyTerRMW<I64, "i64.atomic.rmw8.cmpxchg_u", 0x4c>;
+defm ATOMIC_RMW16_U_CMPXCHG_I64 :
+ WebAssemblyTerRMW<I64, "i64.atomic.rmw16.cmpxchg_u", 0x4d>;
+defm ATOMIC_RMW32_U_CMPXCHG_I64 :
+ WebAssemblyTerRMW<I64, "i64.atomic.rmw32.cmpxchg_u", 0x4e>;
+
+multiclass TerRMWPat<ValueType ty, PatFrag kind, string inst> {
+ def : Pat<(ty (kind (AddrOps32 offset32_op:$offset, I32:$addr), ty:$exp, ty:$new)),
+ (!cast<NI>(inst#_A32) 0, $offset, $addr, $exp, $new)>,
+ Requires<[HasAddr32, HasAtomics]>;
+ def : Pat<(ty (kind (AddrOps64 offset64_op:$offset, I64:$addr), ty:$exp, ty:$new)),
+ (!cast<NI>(inst#_A64) 0, $offset, $addr, $exp, $new)>,
+ Requires<[HasAddr64, HasAtomics]>;
+}
+
+defm : TerRMWPat<i32, atomic_cmp_swap_32, "ATOMIC_RMW_CMPXCHG_I32">;
+defm : TerRMWPat<i64, atomic_cmp_swap_64, "ATOMIC_RMW_CMPXCHG_I64">;
+
+// Truncating & zero-extending ternary RMW patterns.
+// DAG legalization & optimization before instruction selection may introduce
+// additional nodes such as anyext or assertzext depending on operand types.
+class zext_ter_rmw_8_32<PatFrag kind> :
+ PatFrag<(ops node:$addr, node:$exp, node:$new),
+ (i32 (kind node:$addr, node:$exp, node:$new))>;
+class zext_ter_rmw_16_32<PatFrag kind> : zext_ter_rmw_8_32<kind>;
+class zext_ter_rmw_8_64<PatFrag kind> :
+ PatFrag<(ops node:$addr, node:$exp, node:$new),
+ (zext (i32 (assertzext (i32 (kind node:$addr,
+ (i32 (trunc (i64 node:$exp))),
+ (i32 (trunc (i64 node:$new))))))))>;
+class zext_ter_rmw_16_64<PatFrag kind> : zext_ter_rmw_8_64<kind>;
+class zext_ter_rmw_32_64<PatFrag kind> :
+ PatFrag<(ops node:$addr, node:$exp, node:$new),
+ (zext (i32 (kind node:$addr,
+ (i32 (trunc (i64 node:$exp))),
+ (i32 (trunc (i64 node:$new))))))>;
+
+// Truncating & sign-extending ternary RMW patterns.
+// We match subword RMWs (for 32-bit) and anyext RMWs (for 64-bit) and select a
+// zext RMW; the next instruction will be sext_inreg which is selected by
+// itself.
+class sext_ter_rmw_8_32<PatFrag kind> :
+ PatFrag<(ops node:$addr, node:$exp, node:$new),
+ (kind node:$addr, node:$exp, node:$new)>;
+class sext_ter_rmw_16_32<PatFrag kind> : sext_ter_rmw_8_32<kind>;
+class sext_ter_rmw_8_64<PatFrag kind> :
+ PatFrag<(ops node:$addr, node:$exp, node:$new),
+ (anyext (i32 (assertzext (i32
+ (kind node:$addr,
+ (i32 (trunc (i64 node:$exp))),
+ (i32 (trunc (i64 node:$new))))))))>;
+class sext_ter_rmw_16_64<PatFrag kind> : sext_ter_rmw_8_64<kind>;
+// 32->64 sext RMW gets selected as i32.atomic.rmw.***, i64.extend_i32_s
+
+defm : TerRMWPat<i32, zext_ter_rmw_8_32<atomic_cmp_swap_8>, "ATOMIC_RMW8_U_CMPXCHG_I32">;
+defm : TerRMWPat<i32, zext_ter_rmw_16_32<atomic_cmp_swap_16>, "ATOMIC_RMW16_U_CMPXCHG_I32">;
+defm : TerRMWPat<i64, zext_ter_rmw_8_64<atomic_cmp_swap_8>, "ATOMIC_RMW8_U_CMPXCHG_I64">;
+defm : TerRMWPat<i64, zext_ter_rmw_16_64<atomic_cmp_swap_16>, "ATOMIC_RMW16_U_CMPXCHG_I64">;
+defm : TerRMWPat<i64, zext_ter_rmw_32_64<atomic_cmp_swap_32>, "ATOMIC_RMW32_U_CMPXCHG_I64">;
+
+defm : TerRMWPat<i32, sext_ter_rmw_8_32<atomic_cmp_swap_8>, "ATOMIC_RMW8_U_CMPXCHG_I32">;
+defm : TerRMWPat<i32, sext_ter_rmw_16_32<atomic_cmp_swap_16>, "ATOMIC_RMW16_U_CMPXCHG_I32">;
+defm : TerRMWPat<i64, sext_ter_rmw_8_64<atomic_cmp_swap_8>, "ATOMIC_RMW8_U_CMPXCHG_I64">;
+defm : TerRMWPat<i64, sext_ter_rmw_16_64<atomic_cmp_swap_16>, "ATOMIC_RMW16_U_CMPXCHG_I64">;
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrBulkMemory.td b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrBulkMemory.td
new file mode 100644
index 000000000000..7aeae54d95a8
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrBulkMemory.td
@@ -0,0 +1,75 @@
+// WebAssemblyInstrBulkMemory.td - bulk memory codegen support --*- tablegen -*-
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// WebAssembly bulk memory codegen constructs.
+///
+//===----------------------------------------------------------------------===//
+
+// Instruction requiring HasBulkMemory and the bulk memory prefix byte
+multiclass BULK_I<dag oops_r, dag iops_r, dag oops_s, dag iops_s,
+ list<dag> pattern_r, string asmstr_r = "",
+ string asmstr_s = "", bits<32> simdop = -1> {
+ defm "" : I<oops_r, iops_r, oops_s, iops_s, pattern_r, asmstr_r, asmstr_s,
+ !or(0xfc00, !and(0xff, simdop))>,
+ Requires<[HasBulkMemory]>;
+}
+
+// Bespoke types and nodes for bulk memory ops
+def wasm_memcpy_t : SDTypeProfile<0, 5,
+ [SDTCisInt<0>, SDTCisInt<1>, SDTCisPtrTy<2>, SDTCisPtrTy<3>, SDTCisInt<4>]
+>;
+def wasm_memcpy : SDNode<"WebAssemblyISD::MEMORY_COPY", wasm_memcpy_t,
+ [SDNPHasChain, SDNPMayLoad, SDNPMayStore]>;
+
+def wasm_memset_t : SDTypeProfile<0, 4,
+ [SDTCisInt<0>, SDTCisPtrTy<1>, SDTCisInt<2>, SDTCisInt<3>]
+>;
+def wasm_memset : SDNode<"WebAssemblyISD::MEMORY_FILL", wasm_memset_t,
+ [SDNPHasChain, SDNPMayStore]>;
+
+multiclass BulkMemoryOps<WebAssemblyRegClass rc, string B> {
+
+let mayStore = 1, hasSideEffects = 1 in
+defm MEMORY_INIT_A#B :
+ BULK_I<(outs),
+ (ins i32imm_op:$seg, i32imm_op:$idx, rc:$dest,
+ I32:$offset, I32:$size),
+ (outs), (ins i32imm_op:$seg, i32imm_op:$idx),
+ [],
+ "memory.init\t$seg, $idx, $dest, $offset, $size",
+ "memory.init\t$seg, $idx", 0x08>;
+
+let hasSideEffects = 1 in
+defm DATA_DROP :
+ BULK_I<(outs), (ins i32imm_op:$seg), (outs), (ins i32imm_op:$seg),
+ [],
+ "data.drop\t$seg", "data.drop\t$seg", 0x09>;
+
+let mayLoad = 1, mayStore = 1 in
+defm MEMORY_COPY_A#B :
+ BULK_I<(outs), (ins i32imm_op:$src_idx, i32imm_op:$dst_idx,
+ rc:$dst, rc:$src, rc:$len),
+ (outs), (ins i32imm_op:$src_idx, i32imm_op:$dst_idx),
+ [(wasm_memcpy (i32 imm:$src_idx), (i32 imm:$dst_idx),
+ rc:$dst, rc:$src, rc:$len
+ )],
+ "memory.copy\t$src_idx, $dst_idx, $dst, $src, $len",
+ "memory.copy\t$src_idx, $dst_idx", 0x0a>;
+
+let mayStore = 1 in
+defm MEMORY_FILL_A#B :
+ BULK_I<(outs), (ins i32imm_op:$idx, rc:$dst, I32:$value, rc:$size),
+ (outs), (ins i32imm_op:$idx),
+ [(wasm_memset (i32 imm:$idx), rc:$dst, I32:$value, rc:$size)],
+ "memory.fill\t$idx, $dst, $value, $size",
+ "memory.fill\t$idx", 0x0b>;
+}
+
+defm : BulkMemoryOps<I32, "32">;
+defm : BulkMemoryOps<I64, "64">;
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrCall.td b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrCall.td
new file mode 100644
index 000000000000..ca9a5ef9dda1
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrCall.td
@@ -0,0 +1,84 @@
+//===- WebAssemblyInstrCall.td-WebAssembly Call codegen support -*- tablegen -*-
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// WebAssembly Call operand code-gen constructs.
+///
+//===----------------------------------------------------------------------===//
+
+// TODO: addr64: These currently assume the callee address is 32-bit.
+// FIXME: add $type to first call_indirect asmstr (and maybe $flags)
+
+// Call sequence markers. These have an immediate which represents the amount of
+// stack space to allocate or free, which is used for varargs lowering.
+let Uses = [SP32, SP64], Defs = [SP32, SP64], isCodeGenOnly = 1 in {
+defm ADJCALLSTACKDOWN : NRI<(outs), (ins i32imm:$amt, i32imm:$amt2),
+ [(WebAssemblycallseq_start timm:$amt, timm:$amt2)]>;
+defm ADJCALLSTACKUP : NRI<(outs), (ins i32imm:$amt, i32imm:$amt2),
+ [(WebAssemblycallseq_end timm:$amt, timm:$amt2)]>;
+} // Uses = [SP32, SP64], Defs = [SP32, SP64], isCodeGenOnly = 1
+
+
+let Uses = [SP32, SP64], isCall = 1 in {
+
+// CALL should take both variadic arguments and produce variadic results, but
+// this is not possible to model directly. Instead, we select calls to a
+// CALL_PARAMS taking variadic arguments linked with a CALL_RESULTS that handles
+// producing the call's variadic results. We recombine the two in a custom
+// inserter hook after DAG ISel, so passes over MachineInstrs will only ever
+// observe CALL nodes with all of the expected variadic uses and defs.
+let isPseudo = 1 in
+defm CALL_PARAMS :
+ I<(outs), (ins function32_op:$callee, variable_ops),
+ (outs), (ins function32_op:$callee), [],
+ "call_params\t$callee", "call_params\t$callee", -1>;
+
+let variadicOpsAreDefs = 1, usesCustomInserter = 1, isPseudo = 1 in
+defm CALL_RESULTS :
+ I<(outs), (ins variable_ops), (outs), (ins), [],
+ "call_results", "call_results", -1>;
+
+let variadicOpsAreDefs = 1, usesCustomInserter = 1, isPseudo = 1 in
+defm RET_CALL_RESULTS :
+ I<(outs), (ins variable_ops), (outs), (ins), [],
+ "return_call_results", "return_call_results", -1>;
+
+// Note that instructions with variable_ops have custom printers in
+// WebAssemblyInstPrinter.cpp.
+
+let variadicOpsAreDefs = 1 in
+defm CALL :
+ I<(outs), (ins function32_op:$callee, variable_ops),
+ (outs), (ins function32_op:$callee), [],
+ "call", "call\t$callee", 0x10>;
+
+let variadicOpsAreDefs = 1 in
+defm CALL_INDIRECT :
+ I<(outs),
+ (ins TypeIndex:$type, table32_op:$table, variable_ops),
+ (outs),
+ (ins TypeIndex:$type, table32_op:$table),
+ [],
+ "call_indirect", "call_indirect\t$type, $table", 0x11>;
+
+let isReturn = 1, isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 in
+defm RET_CALL :
+ I<(outs), (ins function32_op:$callee, variable_ops),
+ (outs), (ins function32_op:$callee), [],
+ "return_call \t$callee", "return_call\t$callee", 0x12>,
+ Requires<[HasTailCall]>;
+
+let isReturn = 1, isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 in
+defm RET_CALL_INDIRECT :
+ I<(outs), (ins TypeIndex:$type, table32_op:$table, variable_ops),
+ (outs), (ins TypeIndex:$type, table32_op:$table), [],
+ "return_call_indirect\t", "return_call_indirect\t$type, $table",
+ 0x13>,
+ Requires<[HasTailCall]>;
+
+} // Uses = [SP32,SP64], isCall = 1
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrControl.td b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrControl.td
new file mode 100644
index 000000000000..be6547007aaf
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrControl.td
@@ -0,0 +1,168 @@
+//===- WebAssemblyInstrControl.td-WebAssembly control-flow ------*- tablegen -*-
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// WebAssembly control-flow code-gen constructs.
+///
+//===----------------------------------------------------------------------===//
+
+let isBranch = 1, isTerminator = 1, hasCtrlDep = 1 in {
+// The condition operand is a boolean value which WebAssembly represents as i32.
+defm BR_IF : I<(outs), (ins bb_op:$dst, I32:$cond),
+ (outs), (ins bb_op:$dst),
+ [(brcond I32:$cond, bb:$dst)],
+ "br_if \t$dst, $cond", "br_if \t$dst", 0x0d>;
+let isCodeGenOnly = 1 in
+defm BR_UNLESS : I<(outs), (ins bb_op:$dst, I32:$cond),
+ (outs), (ins bb_op:$dst), []>;
+let isBarrier = 1 in
+defm BR : NRI<(outs), (ins bb_op:$dst),
+ [(br bb:$dst)],
+ "br \t$dst", 0x0c>;
+} // isBranch = 1, isTerminator = 1, hasCtrlDep = 1
+
+def : Pat<(brcond (i32 (setne I32:$cond, 0)), bb:$dst),
+ (BR_IF bb_op:$dst, I32:$cond)>;
+def : Pat<(brcond (i32 (seteq I32:$cond, 0)), bb:$dst),
+ (BR_UNLESS bb_op:$dst, I32:$cond)>;
+def : Pat<(brcond (i32 (xor bool_node:$cond, (i32 1))), bb:$dst),
+ (BR_UNLESS bb_op:$dst, I32:$cond)>;
+
+// A list of branch targets enclosed in {} and separated by comma.
+// Used by br_table only.
+def BrListAsmOperand : AsmOperandClass { let Name = "BrList"; }
+let OperandNamespace = "WebAssembly", OperandType = "OPERAND_BRLIST" in
+def brlist : Operand<i32> {
+ let ParserMatchClass = BrListAsmOperand;
+ let PrintMethod = "printBrList";
+}
+
+// Duplicating a BR_TABLE is almost never a good idea. In particular, it can
+// lead to some nasty irreducibility due to tail merging when the br_table is in
+// a loop.
+let isTerminator = 1, hasCtrlDep = 1, isBarrier = 1, isNotDuplicable = 1 in {
+
+defm BR_TABLE_I32 : I<(outs), (ins I32:$index, variable_ops),
+ (outs), (ins brlist:$brl),
+ [(WebAssemblybr_table I32:$index)],
+ "br_table \t$index", "br_table \t$brl",
+ 0x0e>;
+// TODO: SelectionDAG's lowering insists on using a pointer as the index for
+// jump tables, so in practice we don't ever use BR_TABLE_I64 in wasm32 mode
+// currently.
+defm BR_TABLE_I64 : I<(outs), (ins I64:$index, variable_ops),
+ (outs), (ins brlist:$brl),
+ [(WebAssemblybr_table I64:$index)],
+ "br_table \t$index", "br_table \t$brl",
+ 0x0e>;
+} // isTerminator = 1, hasCtrlDep = 1, isBarrier = 1, isNotDuplicable = 1
+
+// This is technically a control-flow instruction, since all it affects is the
+// IP.
+defm NOP : NRI<(outs), (ins), [], "nop", 0x01>;
+
+// Placemarkers to indicate the start or end of a block or loop scope.
+// These use/clobber VALUE_STACK to prevent them from being moved into the
+// middle of an expression tree.
+let Uses = [VALUE_STACK], Defs = [VALUE_STACK] in {
+defm BLOCK : NRI<(outs), (ins Signature:$sig), [], "block \t$sig", 0x02>;
+defm LOOP : NRI<(outs), (ins Signature:$sig), [], "loop \t$sig", 0x03>;
+
+defm IF : I<(outs), (ins Signature:$sig, I32:$cond),
+ (outs), (ins Signature:$sig),
+ [], "if \t$sig, $cond", "if \t$sig", 0x04>;
+defm ELSE : NRI<(outs), (ins), [], "else", 0x05>;
+
+// END_BLOCK, END_LOOP, END_IF and END_FUNCTION are represented with the same
+// opcode in wasm.
+defm END_BLOCK : NRI<(outs), (ins), [], "end_block", 0x0b>;
+defm END_LOOP : NRI<(outs), (ins), [], "end_loop", 0x0b>;
+defm END_IF : NRI<(outs), (ins), [], "end_if", 0x0b>;
+// Generic instruction, for disassembler.
+let IsCanonical = 1 in
+defm END : NRI<(outs), (ins), [], "end", 0x0b>;
+let isTerminator = 1, isBarrier = 1 in
+defm END_FUNCTION : NRI<(outs), (ins), [], "end_function", 0x0b>;
+} // Uses = [VALUE_STACK], Defs = [VALUE_STACK]
+
+
+let hasCtrlDep = 1, isBarrier = 1 in {
+let isTerminator = 1 in {
+let isReturn = 1 in {
+
+defm RETURN : I<(outs), (ins variable_ops), (outs), (ins),
+ [(WebAssemblyreturn)],
+ "return", "return", 0x0f>;
+// Equivalent to RETURN, for use at the end of a function when wasm
+// semantics return by falling off the end of the block.
+let isCodeGenOnly = 1 in
+defm FALLTHROUGH_RETURN : I<(outs), (ins variable_ops), (outs), (ins), []>;
+
+} // isReturn = 1
+
+let IsCanonical = 1, isTrap = 1 in
+defm UNREACHABLE : NRI<(outs), (ins), [(trap)], "unreachable", 0x00>;
+
+} // isTerminator = 1
+
+// debugtrap explicitly returns despite trapping because it is supposed to just
+// get the attention of the debugger. Unfortunately, because UNREACHABLE is a
+// terminator, lowering debugtrap to UNREACHABLE can create an invalid
+// MachineBasicBlock when there is additional code after it. Lower it to this
+// non-terminator version instead.
+// TODO: Actually execute the debugger statement when running on the Web
+let isTrap = 1 in
+defm DEBUG_UNREACHABLE : NRI<(outs), (ins), [(debugtrap)], "unreachable", 0x00>;
+
+} // hasCtrlDep = 1, isBarrier = 1
+
+//===----------------------------------------------------------------------===//
+// Exception handling instructions
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasExceptionHandling] in {
+
+// Throwing an exception: throw / rethrow
+let isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 in {
+defm THROW : I<(outs), (ins tag_op:$tag, variable_ops),
+ (outs), (ins tag_op:$tag), [],
+ "throw \t$tag", "throw \t$tag", 0x08>;
+defm RETHROW : NRI<(outs), (ins i32imm:$depth), [], "rethrow \t$depth", 0x09>;
+} // isTerminator = 1, hasCtrlDep = 1, isBarrier = 1
+// The depth argument will be computed in CFGStackify. We set it to 0 here for
+// now.
+def : Pat<(int_wasm_rethrow), (RETHROW 0)>;
+
+// Region within which an exception is caught: try / end_try
+let Uses = [VALUE_STACK], Defs = [VALUE_STACK] in {
+defm TRY : NRI<(outs), (ins Signature:$sig), [], "try \t$sig", 0x06>;
+defm END_TRY : NRI<(outs), (ins), [], "end_try", 0x0b>;
+} // Uses = [VALUE_STACK], Defs = [VALUE_STACK]
+
+// Catching an exception: catch / catch_all
+let hasCtrlDep = 1, hasSideEffects = 1 in {
+let variadicOpsAreDefs = 1 in
+defm CATCH : I<(outs), (ins tag_op:$tag, variable_ops),
+ (outs), (ins tag_op:$tag), [],
+ "catch", "catch \t$tag", 0x07>;
+defm CATCH_ALL : NRI<(outs), (ins), [], "catch_all", 0x19>;
+}
+
+// Delegating an exception: delegate
+let isTerminator = 1, hasCtrlDep = 1, hasSideEffects = 1 in
+defm DELEGATE : NRI<(outs), (ins bb_op:$dst), [], "delegate \t $dst", 0x18>;
+
+// Pseudo instructions: cleanupret / catchret
+let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1,
+ isPseudo = 1, isEHScopeReturn = 1 in {
+ defm CLEANUPRET : NRI<(outs), (ins), [(cleanupret)], "cleanupret", 0>;
+ defm CATCHRET : NRI<(outs), (ins bb_op:$dst, bb_op:$from),
+ [(catchret bb:$dst, bb:$from)], "catchret", 0>;
+} // isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1,
+ // isPseudo = 1, isEHScopeReturn = 1
+} // Predicates = [HasExceptionHandling]
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrConv.td b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrConv.td
new file mode 100644
index 000000000000..262d5f6ebc47
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrConv.td
@@ -0,0 +1,240 @@
+//===-- WebAssemblyInstrConv.td-WebAssembly Conversion support -*- tablegen -*-=
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// WebAssembly datatype conversions, truncations, reinterpretations,
+/// promotions, and demotions operand code-gen constructs.
+///
+//===----------------------------------------------------------------------===//
+
+defm I32_WRAP_I64 : I<(outs I32:$dst), (ins I64:$src), (outs), (ins),
+ [(set I32:$dst, (trunc I64:$src))],
+ "i32.wrap_i64\t$dst, $src", "i32.wrap_i64", 0xa7>;
+
+defm I64_EXTEND_S_I32 : I<(outs I64:$dst), (ins I32:$src), (outs), (ins),
+ [(set I64:$dst, (sext I32:$src))],
+ "i64.extend_i32_s\t$dst, $src", "i64.extend_i32_s",
+ 0xac>;
+defm I64_EXTEND_U_I32 : I<(outs I64:$dst), (ins I32:$src), (outs), (ins),
+ [(set I64:$dst, (zext I32:$src))],
+ "i64.extend_i32_u\t$dst, $src", "i64.extend_i32_u",
+ 0xad>;
+
+let Predicates = [HasSignExt] in {
+defm I32_EXTEND8_S_I32 : I<(outs I32:$dst), (ins I32:$src), (outs), (ins),
+ [(set I32:$dst, (sext_inreg I32:$src, i8))],
+ "i32.extend8_s\t$dst, $src", "i32.extend8_s",
+ 0xc0>;
+defm I32_EXTEND16_S_I32 : I<(outs I32:$dst), (ins I32:$src), (outs), (ins),
+ [(set I32:$dst, (sext_inreg I32:$src, i16))],
+ "i32.extend16_s\t$dst, $src", "i32.extend16_s",
+ 0xc1>;
+defm I64_EXTEND8_S_I64 : I<(outs I64:$dst), (ins I64:$src), (outs), (ins),
+ [(set I64:$dst, (sext_inreg I64:$src, i8))],
+ "i64.extend8_s\t$dst, $src", "i64.extend8_s",
+ 0xc2>;
+defm I64_EXTEND16_S_I64 : I<(outs I64:$dst), (ins I64:$src), (outs), (ins),
+ [(set I64:$dst, (sext_inreg I64:$src, i16))],
+ "i64.extend16_s\t$dst, $src", "i64.extend16_s",
+ 0xc3>;
+defm I64_EXTEND32_S_I64 : I<(outs I64:$dst), (ins I64:$src), (outs), (ins),
+ [(set I64:$dst, (sext_inreg I64:$src, i32))],
+ "i64.extend32_s\t$dst, $src", "i64.extend32_s",
+ 0xc4>;
+} // Predicates = [HasSignExt]
+
+// Expand a "don't care" extend into zero-extend (chosen over sign-extend
+// somewhat arbitrarily, although it favors popular hardware architectures
+// and is conceptually a simpler operation).
+def : Pat<(i64 (anyext I32:$src)), (I64_EXTEND_U_I32 I32:$src)>;
+
+// Conversion from floating point to integer instructions which don't trap on
+// overflow or invalid.
+defm I32_TRUNC_S_SAT_F32 : I<(outs I32:$dst), (ins F32:$src), (outs), (ins),
+ [(set I32:$dst, (fp_to_sint F32:$src))],
+ "i32.trunc_sat_f32_s\t$dst, $src",
+ "i32.trunc_sat_f32_s", 0xfc00>,
+ Requires<[HasNontrappingFPToInt]>;
+defm I32_TRUNC_U_SAT_F32 : I<(outs I32:$dst), (ins F32:$src), (outs), (ins),
+ [(set I32:$dst, (fp_to_uint F32:$src))],
+ "i32.trunc_sat_f32_u\t$dst, $src",
+ "i32.trunc_sat_f32_u", 0xfc01>,
+ Requires<[HasNontrappingFPToInt]>;
+defm I64_TRUNC_S_SAT_F32 : I<(outs I64:$dst), (ins F32:$src), (outs), (ins),
+ [(set I64:$dst, (fp_to_sint F32:$src))],
+ "i64.trunc_sat_f32_s\t$dst, $src",
+ "i64.trunc_sat_f32_s", 0xfc04>,
+ Requires<[HasNontrappingFPToInt]>;
+defm I64_TRUNC_U_SAT_F32 : I<(outs I64:$dst), (ins F32:$src), (outs), (ins),
+ [(set I64:$dst, (fp_to_uint F32:$src))],
+ "i64.trunc_sat_f32_u\t$dst, $src",
+ "i64.trunc_sat_f32_u", 0xfc05>,
+ Requires<[HasNontrappingFPToInt]>;
+defm I32_TRUNC_S_SAT_F64 : I<(outs I32:$dst), (ins F64:$src), (outs), (ins),
+ [(set I32:$dst, (fp_to_sint F64:$src))],
+ "i32.trunc_sat_f64_s\t$dst, $src",
+ "i32.trunc_sat_f64_s", 0xfc02>,
+ Requires<[HasNontrappingFPToInt]>;
+defm I32_TRUNC_U_SAT_F64 : I<(outs I32:$dst), (ins F64:$src), (outs), (ins),
+ [(set I32:$dst, (fp_to_uint F64:$src))],
+ "i32.trunc_sat_f64_u\t$dst, $src",
+ "i32.trunc_sat_f64_u", 0xfc03>,
+ Requires<[HasNontrappingFPToInt]>;
+defm I64_TRUNC_S_SAT_F64 : I<(outs I64:$dst), (ins F64:$src), (outs), (ins),
+ [(set I64:$dst, (fp_to_sint F64:$src))],
+ "i64.trunc_sat_f64_s\t$dst, $src",
+ "i64.trunc_sat_f64_s", 0xfc06>,
+ Requires<[HasNontrappingFPToInt]>;
+defm I64_TRUNC_U_SAT_F64 : I<(outs I64:$dst), (ins F64:$src), (outs), (ins),
+ [(set I64:$dst, (fp_to_uint F64:$src))],
+ "i64.trunc_sat_f64_u\t$dst, $src",
+ "i64.trunc_sat_f64_u", 0xfc07>,
+ Requires<[HasNontrappingFPToInt]>;
+
+// Support the explicitly saturating operations as well.
+def : Pat<(fp_to_sint_sat F32:$src, i32), (I32_TRUNC_S_SAT_F32 F32:$src)>;
+def : Pat<(fp_to_uint_sat F32:$src, i32), (I32_TRUNC_U_SAT_F32 F32:$src)>;
+def : Pat<(fp_to_sint_sat F64:$src, i32), (I32_TRUNC_S_SAT_F64 F64:$src)>;
+def : Pat<(fp_to_uint_sat F64:$src, i32), (I32_TRUNC_U_SAT_F64 F64:$src)>;
+def : Pat<(fp_to_sint_sat F32:$src, i64), (I64_TRUNC_S_SAT_F32 F32:$src)>;
+def : Pat<(fp_to_uint_sat F32:$src, i64), (I64_TRUNC_U_SAT_F32 F32:$src)>;
+def : Pat<(fp_to_sint_sat F64:$src, i64), (I64_TRUNC_S_SAT_F64 F64:$src)>;
+def : Pat<(fp_to_uint_sat F64:$src, i64), (I64_TRUNC_U_SAT_F64 F64:$src)>;
+
+// Conversion from floating point to integer pseudo-instructions which don't
+// trap on overflow or invalid.
+let usesCustomInserter = 1, isCodeGenOnly = 1 in {
+defm FP_TO_SINT_I32_F32 : I<(outs I32:$dst), (ins F32:$src), (outs), (ins),
+ [(set I32:$dst, (fp_to_sint F32:$src))], "", "", 0>,
+ Requires<[NotHasNontrappingFPToInt]>;
+defm FP_TO_UINT_I32_F32 : I<(outs I32:$dst), (ins F32:$src), (outs), (ins),
+ [(set I32:$dst, (fp_to_uint F32:$src))], "", "", 0>,
+ Requires<[NotHasNontrappingFPToInt]>;
+defm FP_TO_SINT_I64_F32 : I<(outs I64:$dst), (ins F32:$src), (outs), (ins),
+ [(set I64:$dst, (fp_to_sint F32:$src))], "", "", 0>,
+ Requires<[NotHasNontrappingFPToInt]>;
+defm FP_TO_UINT_I64_F32 : I<(outs I64:$dst), (ins F32:$src), (outs), (ins),
+ [(set I64:$dst, (fp_to_uint F32:$src))], "", "", 0>,
+ Requires<[NotHasNontrappingFPToInt]>;
+defm FP_TO_SINT_I32_F64 : I<(outs I32:$dst), (ins F64:$src), (outs), (ins),
+ [(set I32:$dst, (fp_to_sint F64:$src))], "", "", 0>,
+ Requires<[NotHasNontrappingFPToInt]>;
+defm FP_TO_UINT_I32_F64 : I<(outs I32:$dst), (ins F64:$src), (outs), (ins),
+ [(set I32:$dst, (fp_to_uint F64:$src))], "", "", 0>,
+ Requires<[NotHasNontrappingFPToInt]>;
+defm FP_TO_SINT_I64_F64 : I<(outs I64:$dst), (ins F64:$src), (outs), (ins),
+ [(set I64:$dst, (fp_to_sint F64:$src))], "", "", 0>,
+ Requires<[NotHasNontrappingFPToInt]>;
+defm FP_TO_UINT_I64_F64 : I<(outs I64:$dst), (ins F64:$src), (outs), (ins),
+ [(set I64:$dst, (fp_to_uint F64:$src))], "", "", 0>,
+ Requires<[NotHasNontrappingFPToInt]>;
+} // usesCustomInserter, isCodeGenOnly = 1
+
+// Conversion from floating point to integer traps on overflow and invalid.
+let hasSideEffects = 1 in {
+defm I32_TRUNC_S_F32 : I<(outs I32:$dst), (ins F32:$src), (outs), (ins),
+ [], "i32.trunc_f32_s\t$dst, $src", "i32.trunc_f32_s",
+ 0xa8>;
+defm I32_TRUNC_U_F32 : I<(outs I32:$dst), (ins F32:$src), (outs), (ins),
+ [], "i32.trunc_f32_u\t$dst, $src", "i32.trunc_f32_u",
+ 0xa9>;
+defm I64_TRUNC_S_F32 : I<(outs I64:$dst), (ins F32:$src), (outs), (ins),
+ [], "i64.trunc_f32_s\t$dst, $src", "i64.trunc_f32_s",
+ 0xae>;
+defm I64_TRUNC_U_F32 : I<(outs I64:$dst), (ins F32:$src), (outs), (ins),
+ [], "i64.trunc_f32_u\t$dst, $src", "i64.trunc_f32_u",
+ 0xaf>;
+defm I32_TRUNC_S_F64 : I<(outs I32:$dst), (ins F64:$src), (outs), (ins),
+ [], "i32.trunc_f64_s\t$dst, $src", "i32.trunc_f64_s",
+ 0xaa>;
+defm I32_TRUNC_U_F64 : I<(outs I32:$dst), (ins F64:$src), (outs), (ins),
+ [], "i32.trunc_f64_u\t$dst, $src", "i32.trunc_f64_u",
+ 0xab>;
+defm I64_TRUNC_S_F64 : I<(outs I64:$dst), (ins F64:$src), (outs), (ins),
+ [], "i64.trunc_f64_s\t$dst, $src", "i64.trunc_f64_s",
+ 0xb0>;
+defm I64_TRUNC_U_F64 : I<(outs I64:$dst), (ins F64:$src), (outs), (ins),
+ [], "i64.trunc_f64_u\t$dst, $src", "i64.trunc_f64_u",
+ 0xb1>;
+} // hasSideEffects = 1
+
+def : Pat<(int_wasm_trunc_signed F32:$src),
+ (I32_TRUNC_S_F32 F32:$src)>;
+def : Pat<(int_wasm_trunc_unsigned F32:$src),
+ (I32_TRUNC_U_F32 F32:$src)>;
+def : Pat<(int_wasm_trunc_signed F64:$src),
+ (I32_TRUNC_S_F64 F64:$src)>;
+def : Pat<(int_wasm_trunc_unsigned F64:$src),
+ (I32_TRUNC_U_F64 F64:$src)>;
+def : Pat<(int_wasm_trunc_signed F32:$src),
+ (I64_TRUNC_S_F32 F32:$src)>;
+def : Pat<(int_wasm_trunc_unsigned F32:$src),
+ (I64_TRUNC_U_F32 F32:$src)>;
+def : Pat<(int_wasm_trunc_signed F64:$src),
+ (I64_TRUNC_S_F64 F64:$src)>;
+def : Pat<(int_wasm_trunc_unsigned F64:$src),
+ (I64_TRUNC_U_F64 F64:$src)>;
+
+defm F32_CONVERT_S_I32 : I<(outs F32:$dst), (ins I32:$src), (outs), (ins),
+ [(set F32:$dst, (sint_to_fp I32:$src))],
+ "f32.convert_i32_s\t$dst, $src", "f32.convert_i32_s",
+ 0xb2>;
+defm F32_CONVERT_U_I32 : I<(outs F32:$dst), (ins I32:$src), (outs), (ins),
+ [(set F32:$dst, (uint_to_fp I32:$src))],
+ "f32.convert_i32_u\t$dst, $src", "f32.convert_i32_u",
+ 0xb3>;
+defm F64_CONVERT_S_I32 : I<(outs F64:$dst), (ins I32:$src), (outs), (ins),
+ [(set F64:$dst, (sint_to_fp I32:$src))],
+ "f64.convert_i32_s\t$dst, $src", "f64.convert_i32_s",
+ 0xb7>;
+defm F64_CONVERT_U_I32 : I<(outs F64:$dst), (ins I32:$src), (outs), (ins),
+ [(set F64:$dst, (uint_to_fp I32:$src))],
+ "f64.convert_i32_u\t$dst, $src", "f64.convert_i32_u",
+ 0xb8>;
+defm F32_CONVERT_S_I64 : I<(outs F32:$dst), (ins I64:$src), (outs), (ins),
+ [(set F32:$dst, (sint_to_fp I64:$src))],
+ "f32.convert_i64_s\t$dst, $src", "f32.convert_i64_s",
+ 0xb4>;
+defm F32_CONVERT_U_I64 : I<(outs F32:$dst), (ins I64:$src), (outs), (ins),
+ [(set F32:$dst, (uint_to_fp I64:$src))],
+ "f32.convert_i64_u\t$dst, $src", "f32.convert_i64_u",
+ 0xb5>;
+defm F64_CONVERT_S_I64 : I<(outs F64:$dst), (ins I64:$src), (outs), (ins),
+ [(set F64:$dst, (sint_to_fp I64:$src))],
+ "f64.convert_i64_s\t$dst, $src", "f64.convert_i64_s",
+ 0xb9>;
+defm F64_CONVERT_U_I64 : I<(outs F64:$dst), (ins I64:$src), (outs), (ins),
+ [(set F64:$dst, (uint_to_fp I64:$src))],
+ "f64.convert_i64_u\t$dst, $src", "f64.convert_i64_u",
+ 0xba>;
+
+defm F64_PROMOTE_F32 : I<(outs F64:$dst), (ins F32:$src), (outs), (ins),
+ [(set F64:$dst, (fpextend F32:$src))],
+ "f64.promote_f32\t$dst, $src", "f64.promote_f32",
+ 0xbb>;
+defm F32_DEMOTE_F64 : I<(outs F32:$dst), (ins F64:$src), (outs), (ins),
+ [(set F32:$dst, (fpround F64:$src))],
+ "f32.demote_f64\t$dst, $src", "f32.demote_f64",
+ 0xb6>;
+
+defm I32_REINTERPRET_F32 : I<(outs I32:$dst), (ins F32:$src), (outs), (ins),
+ [(set I32:$dst, (bitconvert F32:$src))],
+ "i32.reinterpret_f32\t$dst, $src",
+ "i32.reinterpret_f32", 0xbc>;
+defm F32_REINTERPRET_I32 : I<(outs F32:$dst), (ins I32:$src), (outs), (ins),
+ [(set F32:$dst, (bitconvert I32:$src))],
+ "f32.reinterpret_i32\t$dst, $src",
+ "f32.reinterpret_i32", 0xbe>;
+defm I64_REINTERPRET_F64 : I<(outs I64:$dst), (ins F64:$src), (outs), (ins),
+ [(set I64:$dst, (bitconvert F64:$src))],
+ "i64.reinterpret_f64\t$dst, $src",
+ "i64.reinterpret_f64", 0xbd>;
+defm F64_REINTERPRET_I64 : I<(outs F64:$dst), (ins I64:$src), (outs), (ins),
+ [(set F64:$dst, (bitconvert I64:$src))],
+ "f64.reinterpret_i64\t$dst, $src",
+ "f64.reinterpret_i64", 0xbf>;
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrFloat.td b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrFloat.td
new file mode 100644
index 000000000000..cc9a9f86f683
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrFloat.td
@@ -0,0 +1,127 @@
+// WebAssemblyInstrFloat.td-WebAssembly Float codegen support ---*- tablegen -*-
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// WebAssembly Floating-point operand code-gen constructs.
+///
+//===----------------------------------------------------------------------===//
+
+multiclass UnaryFP<SDNode node, string name, bits<32> f32Inst,
+ bits<32> f64Inst> {
+ defm _F32 : I<(outs F32:$dst), (ins F32:$src), (outs), (ins),
+ [(set F32:$dst, (node F32:$src))],
+ !strconcat("f32.", !strconcat(name, "\t$dst, $src")),
+ !strconcat("f32.", name), f32Inst>;
+ defm _F64 : I<(outs F64:$dst), (ins F64:$src), (outs), (ins),
+ [(set F64:$dst, (node F64:$src))],
+ !strconcat("f64.", !strconcat(name, "\t$dst, $src")),
+ !strconcat("f64.", name), f64Inst>;
+}
+multiclass BinaryFP<SDNode node, string name, bits<32> f32Inst,
+ bits<32> f64Inst> {
+ defm _F32 : I<(outs F32:$dst), (ins F32:$lhs, F32:$rhs), (outs), (ins),
+ [(set F32:$dst, (node F32:$lhs, F32:$rhs))],
+ !strconcat("f32.", !strconcat(name, "\t$dst, $lhs, $rhs")),
+ !strconcat("f32.", name), f32Inst>;
+ defm _F64 : I<(outs F64:$dst), (ins F64:$lhs, F64:$rhs), (outs), (ins),
+ [(set F64:$dst, (node F64:$lhs, F64:$rhs))],
+ !strconcat("f64.", !strconcat(name, "\t$dst, $lhs, $rhs")),
+ !strconcat("f64.", name), f64Inst>;
+}
+multiclass ComparisonFP<CondCode cond, string name, bits<32> f32Inst, bits<32> f64Inst> {
+ defm _F32 : I<(outs I32:$dst), (ins F32:$lhs, F32:$rhs), (outs), (ins),
+ [(set I32:$dst, (setcc F32:$lhs, F32:$rhs, cond))],
+ !strconcat("f32.", !strconcat(name, "\t$dst, $lhs, $rhs")),
+ !strconcat("f32.", name), f32Inst>;
+ defm _F64 : I<(outs I32:$dst), (ins F64:$lhs, F64:$rhs), (outs), (ins),
+ [(set I32:$dst, (setcc F64:$lhs, F64:$rhs, cond))],
+ !strconcat("f64.", !strconcat(name, "\t$dst, $lhs, $rhs")),
+ !strconcat("f64.", name), f64Inst>;
+}
+
+let isCommutable = 1 in
+defm ADD : BinaryFP<fadd, "add ", 0x92, 0xa0>;
+defm SUB : BinaryFP<fsub, "sub ", 0x93, 0xa1>;
+let isCommutable = 1 in
+defm MUL : BinaryFP<fmul, "mul ", 0x94, 0xa2>;
+defm DIV : BinaryFP<fdiv, "div ", 0x95, 0xa3>;
+defm SQRT : UnaryFP<fsqrt, "sqrt", 0x91, 0x9f>;
+
+defm ABS : UnaryFP<fabs, "abs ", 0x8b, 0x99>;
+defm NEG : UnaryFP<fneg, "neg ", 0x8c, 0x9a>;
+defm COPYSIGN : BinaryFP<fcopysign, "copysign", 0x98, 0xa6>;
+
+let isCommutable = 1 in {
+defm MIN : BinaryFP<fminimum, "min ", 0x96, 0xa4>;
+defm MAX : BinaryFP<fmaximum, "max ", 0x97, 0xa5>;
+} // isCommutable = 1
+
+defm CEIL : UnaryFP<fceil, "ceil", 0x8d, 0x9b>;
+defm FLOOR : UnaryFP<ffloor, "floor", 0x8e, 0x9c>;
+defm TRUNC : UnaryFP<ftrunc, "trunc", 0x8f, 0x9d>;
+defm NEAREST : UnaryFP<fnearbyint, "nearest", 0x90, 0x9e>;
+
+// DAGCombine oddly folds casts into the rhs of copysign. Unfold them.
+def : Pat<(fcopysign F64:$lhs, F32:$rhs),
+ (COPYSIGN_F64 F64:$lhs, (F64_PROMOTE_F32 F32:$rhs))>;
+def : Pat<(fcopysign F32:$lhs, F64:$rhs),
+ (COPYSIGN_F32 F32:$lhs, (F32_DEMOTE_F64 F64:$rhs))>;
+
+// WebAssembly doesn't expose inexact exceptions, so map frint to fnearbyint.
+def : Pat<(frint f32:$src), (NEAREST_F32 f32:$src)>;
+def : Pat<(frint f64:$src), (NEAREST_F64 f64:$src)>;
+
+// WebAssembly always rounds ties-to-even, so map froundeven to fnearbyint.
+def : Pat<(froundeven f32:$src), (NEAREST_F32 f32:$src)>;
+def : Pat<(froundeven f64:$src), (NEAREST_F64 f64:$src)>;
+
+let isCommutable = 1 in {
+defm EQ : ComparisonFP<SETOEQ, "eq ", 0x5b, 0x61>;
+defm NE : ComparisonFP<SETUNE, "ne ", 0x5c, 0x62>;
+} // isCommutable = 1
+defm LT : ComparisonFP<SETOLT, "lt ", 0x5d, 0x63>;
+defm LE : ComparisonFP<SETOLE, "le ", 0x5f, 0x65>;
+defm GT : ComparisonFP<SETOGT, "gt ", 0x5e, 0x64>;
+defm GE : ComparisonFP<SETOGE, "ge ", 0x60, 0x66>;
+
+// Don't care floating-point comparisons, supported via other comparisons.
+def : Pat<(seteq f32:$lhs, f32:$rhs), (EQ_F32 f32:$lhs, f32:$rhs)>;
+def : Pat<(setne f32:$lhs, f32:$rhs), (NE_F32 f32:$lhs, f32:$rhs)>;
+def : Pat<(setlt f32:$lhs, f32:$rhs), (LT_F32 f32:$lhs, f32:$rhs)>;
+def : Pat<(setle f32:$lhs, f32:$rhs), (LE_F32 f32:$lhs, f32:$rhs)>;
+def : Pat<(setgt f32:$lhs, f32:$rhs), (GT_F32 f32:$lhs, f32:$rhs)>;
+def : Pat<(setge f32:$lhs, f32:$rhs), (GE_F32 f32:$lhs, f32:$rhs)>;
+def : Pat<(seteq f64:$lhs, f64:$rhs), (EQ_F64 f64:$lhs, f64:$rhs)>;
+def : Pat<(setne f64:$lhs, f64:$rhs), (NE_F64 f64:$lhs, f64:$rhs)>;
+def : Pat<(setlt f64:$lhs, f64:$rhs), (LT_F64 f64:$lhs, f64:$rhs)>;
+def : Pat<(setle f64:$lhs, f64:$rhs), (LE_F64 f64:$lhs, f64:$rhs)>;
+def : Pat<(setgt f64:$lhs, f64:$rhs), (GT_F64 f64:$lhs, f64:$rhs)>;
+def : Pat<(setge f64:$lhs, f64:$rhs), (GE_F64 f64:$lhs, f64:$rhs)>;
+
+defm SELECT_F32 : I<(outs F32:$dst), (ins F32:$lhs, F32:$rhs, I32:$cond),
+ (outs), (ins),
+ [(set F32:$dst, (select I32:$cond, F32:$lhs, F32:$rhs))],
+ "f32.select\t$dst, $lhs, $rhs, $cond", "f32.select", 0x1b>;
+defm SELECT_F64 : I<(outs F64:$dst), (ins F64:$lhs, F64:$rhs, I32:$cond),
+ (outs), (ins),
+ [(set F64:$dst, (select I32:$cond, F64:$lhs, F64:$rhs))],
+ "f64.select\t$dst, $lhs, $rhs, $cond", "f64.select", 0x1b>;
+
+// ISD::SELECT requires its operand to conform to getBooleanContents, but
+// WebAssembly's select interprets any non-zero value as true, so we can fold
+// a setne with 0 into a select.
+def : Pat<(select (i32 (setne I32:$cond, 0)), F32:$lhs, F32:$rhs),
+ (SELECT_F32 F32:$lhs, F32:$rhs, I32:$cond)>;
+def : Pat<(select (i32 (setne I32:$cond, 0)), F64:$lhs, F64:$rhs),
+ (SELECT_F64 F64:$lhs, F64:$rhs, I32:$cond)>;
+
+// And again, this time with seteq instead of setne and the arms reversed.
+def : Pat<(select (i32 (seteq I32:$cond, 0)), F32:$lhs, F32:$rhs),
+ (SELECT_F32 F32:$rhs, F32:$lhs, I32:$cond)>;
+def : Pat<(select (i32 (seteq I32:$cond, 0)), F64:$lhs, F64:$rhs),
+ (SELECT_F64 F64:$rhs, F64:$lhs, I32:$cond)>;
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrFormats.td b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrFormats.td
new file mode 100644
index 000000000000..f2e73dd19d6b
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrFormats.td
@@ -0,0 +1,68 @@
+//=- WebAssemblyInstrFormats.td - WebAssembly Instr. Formats -*- tablegen -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// WebAssembly instruction format definitions.
+///
+//===----------------------------------------------------------------------===//
+
+// WebAssembly Instruction Format.
+// We instantiate 2 of these for every actual instruction (register based
+// and stack based), see below.
+class WebAssemblyInst<bits<32> inst, string asmstr, bit stack, bit is64>
+ : StackRel, RegisterRel, Wasm64Rel, Instruction {
+ bits<32> Inst = inst; // Instruction encoding.
+ bit StackBased = stack;
+ string BaseName = NAME;
+ bit IsWasm64 = is64;
+ string Wasm32Name = !subst("_A64", "_A32", NAME);
+ let Namespace = "WebAssembly";
+ let Pattern = [];
+ let AsmString = asmstr;
+ // When there are multiple instructions that map to the same encoding (in
+ // e.g. the disassembler use case) prefer the one where IsCanonical == 1.
+ bit IsCanonical = 0;
+}
+
+// Normal instructions. Default instantiation of a WebAssemblyInst.
+class NI<dag oops, dag iops, list<dag> pattern, bit stack,
+ string asmstr = "", bits<32> inst = -1, bit is64 = false>
+ : WebAssemblyInst<inst, asmstr, stack, is64> {
+ dag OutOperandList = oops;
+ dag InOperandList = iops;
+ let Pattern = pattern;
+ let Defs = [ARGUMENTS];
+}
+
+// Generates both register and stack based versions of one actual instruction.
+// We have 2 sets of operands (oops & iops) for the register and stack
+// based version of this instruction, as well as the corresponding asmstr.
+// The register versions have virtual-register operands which correspond to wasm
+// locals or stack locations. Each use and def of the register corresponds to an
+// implicit local.get / local.set or access of stack operands in wasm. These
+// instructions are used for ISel and all MI passes. The stack versions of the
+// instructions do not have register operands (they implicitly operate on the
+// stack), and local.gets and local.sets are explicit. The register instructions
+// are converted to their corresponding stack instructions before lowering to
+// MC.
+// Every instruction should want to be based on this multi-class to guarantee
+// there is always an equivalent pair of instructions.
+multiclass I<dag oops_r, dag iops_r, dag oops_s, dag iops_s,
+ list<dag> pattern_r, string asmstr_r = "", string asmstr_s = "",
+ bits<32> inst = -1, bit is64 = false> {
+ let isCodeGenOnly = 1 in
+ def "" : NI<oops_r, iops_r, pattern_r, false, asmstr_r, inst, is64>;
+ let BaseName = NAME in
+ def _S : NI<oops_s, iops_s, [], true, asmstr_s, inst, is64>;
+}
+
+// For instructions that have no register ops, so both sets are the same.
+multiclass NRI<dag oops, dag iops, list<dag> pattern, string asmstr = "",
+ bits<32> inst = -1> {
+ defm "": I<oops, iops, oops, iops, pattern, asmstr, asmstr, inst>;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp
new file mode 100644
index 000000000000..32a4accd040e
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp
@@ -0,0 +1,233 @@
+//===-- WebAssemblyInstrInfo.cpp - WebAssembly Instruction Information ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the WebAssembly implementation of the
+/// TargetInstrInfo class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyInstrInfo.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-instr-info"
+
+#define GET_INSTRINFO_CTOR_DTOR
+#include "WebAssemblyGenInstrInfo.inc"
+
+// defines WebAssembly::getNamedOperandIdx
+#define GET_INSTRINFO_NAMED_OPS
+#include "WebAssemblyGenInstrInfo.inc"
+
+WebAssemblyInstrInfo::WebAssemblyInstrInfo(const WebAssemblySubtarget &STI)
+ : WebAssemblyGenInstrInfo(WebAssembly::ADJCALLSTACKDOWN,
+ WebAssembly::ADJCALLSTACKUP,
+ WebAssembly::CATCHRET),
+ RI(STI.getTargetTriple()) {}
+
+bool WebAssemblyInstrInfo::isReallyTriviallyReMaterializable(
+ const MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
+ case WebAssembly::CONST_I32:
+ case WebAssembly::CONST_I64:
+ case WebAssembly::CONST_F32:
+ case WebAssembly::CONST_F64:
+ // TargetInstrInfo::isReallyTriviallyReMaterializable misses these
+ // because of the ARGUMENTS implicit def, so we manualy override it here.
+ return true;
+ default:
+ return TargetInstrInfo::isReallyTriviallyReMaterializable(MI);
+ }
+}
+
+void WebAssemblyInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ const DebugLoc &DL, MCRegister DestReg,
+ MCRegister SrcReg, bool KillSrc) const {
+ // This method is called by post-RA expansion, which expects only pregs to
+ // exist. However we need to handle both here.
+ auto &MRI = MBB.getParent()->getRegInfo();
+ const TargetRegisterClass *RC =
+ Register::isVirtualRegister(DestReg)
+ ? MRI.getRegClass(DestReg)
+ : MRI.getTargetRegisterInfo()->getMinimalPhysRegClass(DestReg);
+
+ unsigned CopyOpcode = WebAssembly::getCopyOpcodeForRegClass(RC);
+
+ BuildMI(MBB, I, DL, get(CopyOpcode), DestReg)
+ .addReg(SrcReg, KillSrc ? RegState::Kill : 0);
+}
+
+MachineInstr *WebAssemblyInstrInfo::commuteInstructionImpl(
+ MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const {
+ // If the operands are stackified, we can't reorder them.
+ WebAssemblyFunctionInfo &MFI =
+ *MI.getParent()->getParent()->getInfo<WebAssemblyFunctionInfo>();
+ if (MFI.isVRegStackified(MI.getOperand(OpIdx1).getReg()) ||
+ MFI.isVRegStackified(MI.getOperand(OpIdx2).getReg()))
+ return nullptr;
+
+ // Otherwise use the default implementation.
+ return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
+}
+
+// Branch analysis.
+bool WebAssemblyInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool /*AllowModify*/) const {
+ const auto &MFI = *MBB.getParent()->getInfo<WebAssemblyFunctionInfo>();
+ // WebAssembly has control flow that doesn't have explicit branches or direct
+ // fallthrough (e.g. try/catch), which can't be modeled by analyzeBranch. It
+ // is created after CFGStackify.
+ if (MFI.isCFGStackified())
+ return true;
+
+ bool HaveCond = false;
+ for (MachineInstr &MI : MBB.terminators()) {
+ switch (MI.getOpcode()) {
+ default:
+ // Unhandled instruction; bail out.
+ return true;
+ case WebAssembly::BR_IF:
+ if (HaveCond)
+ return true;
+ Cond.push_back(MachineOperand::CreateImm(true));
+ Cond.push_back(MI.getOperand(1));
+ TBB = MI.getOperand(0).getMBB();
+ HaveCond = true;
+ break;
+ case WebAssembly::BR_UNLESS:
+ if (HaveCond)
+ return true;
+ Cond.push_back(MachineOperand::CreateImm(false));
+ Cond.push_back(MI.getOperand(1));
+ TBB = MI.getOperand(0).getMBB();
+ HaveCond = true;
+ break;
+ case WebAssembly::BR:
+ if (!HaveCond)
+ TBB = MI.getOperand(0).getMBB();
+ else
+ FBB = MI.getOperand(0).getMBB();
+ break;
+ }
+ if (MI.isBarrier())
+ break;
+ }
+
+ return false;
+}
+
+unsigned WebAssemblyInstrInfo::removeBranch(MachineBasicBlock &MBB,
+ int *BytesRemoved) const {
+ assert(!BytesRemoved && "code size not handled");
+
+ MachineBasicBlock::instr_iterator I = MBB.instr_end();
+ unsigned Count = 0;
+
+ while (I != MBB.instr_begin()) {
+ --I;
+ if (I->isDebugInstr())
+ continue;
+ if (!I->isTerminator())
+ break;
+ // Remove the branch.
+ I->eraseFromParent();
+ I = MBB.instr_end();
+ ++Count;
+ }
+
+ return Count;
+}
+
+unsigned WebAssemblyInstrInfo::insertBranch(
+ MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
+ ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
+ assert(!BytesAdded && "code size not handled");
+
+ if (Cond.empty()) {
+ if (!TBB)
+ return 0;
+
+ BuildMI(&MBB, DL, get(WebAssembly::BR)).addMBB(TBB);
+ return 1;
+ }
+
+ assert(Cond.size() == 2 && "Expected a flag and a successor block");
+
+ if (Cond[0].getImm())
+ BuildMI(&MBB, DL, get(WebAssembly::BR_IF)).addMBB(TBB).add(Cond[1]);
+ else
+ BuildMI(&MBB, DL, get(WebAssembly::BR_UNLESS)).addMBB(TBB).add(Cond[1]);
+ if (!FBB)
+ return 1;
+
+ BuildMI(&MBB, DL, get(WebAssembly::BR)).addMBB(FBB);
+ return 2;
+}
+
+bool WebAssemblyInstrInfo::reverseBranchCondition(
+ SmallVectorImpl<MachineOperand> &Cond) const {
+ assert(Cond.size() == 2 && "Expected a flag and a condition expression");
+ Cond.front() = MachineOperand::CreateImm(!Cond.front().getImm());
+ return false;
+}
+
+ArrayRef<std::pair<int, const char *>>
+WebAssemblyInstrInfo::getSerializableTargetIndices() const {
+ static const std::pair<int, const char *> TargetIndices[] = {
+ {WebAssembly::TI_LOCAL, "wasm-local"},
+ {WebAssembly::TI_GLOBAL_FIXED, "wasm-global-fixed"},
+ {WebAssembly::TI_OPERAND_STACK, "wasm-operand-stack"},
+ {WebAssembly::TI_GLOBAL_RELOC, "wasm-global-reloc"},
+ {WebAssembly::TI_LOCAL_INDIRECT, "wasm-local-indirect"}};
+ return ArrayRef(TargetIndices);
+}
+
+const MachineOperand &
+WebAssemblyInstrInfo::getCalleeOperand(const MachineInstr &MI) const {
+ return WebAssembly::getCalleeOp(MI);
+}
+
+// This returns true when the instruction defines a value of a TargetIndex
+// operand that can be tracked by offsets. For Wasm, this returns true for only
+// local.set/local.tees. This is currently used by LiveDebugValues analysis.
+//
+// These are not included:
+// - In theory we need to add global.set here too, but we don't have global
+// indices at this point because they are relocatable and we address them by
+// names until linking, so we don't have 'offsets' (which are used to store
+// local/global indices) to deal with in LiveDebugValues. And we don't
+// associate debug info in values in globals anyway.
+// - All other value-producing instructions, i.e. instructions with defs, can
+// define values in the Wasm stack, which is represented by TI_OPERAND_STACK
+// TargetIndex. But they don't have offset info within the instruction itself,
+// and debug info analysis for them is handled separately in
+// WebAssemblyDebugFixup pass, so we don't worry about them here.
+bool WebAssemblyInstrInfo::isExplicitTargetIndexDef(const MachineInstr &MI,
+ int &Index,
+ int64_t &Offset) const {
+ unsigned Opc = MI.getOpcode();
+ if (WebAssembly::isLocalSet(Opc) || WebAssembly::isLocalTee(Opc)) {
+ Index = WebAssembly::TI_LOCAL;
+ Offset = MI.explicit_uses().begin()->getImm();
+ return true;
+ }
+ return false;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h
new file mode 100644
index 000000000000..c1e1a790c60e
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h
@@ -0,0 +1,79 @@
+//=- WebAssemblyInstrInfo.h - WebAssembly Instruction Information -*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the WebAssembly implementation of the
+/// TargetInstrInfo class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYINSTRINFO_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYINSTRINFO_H
+
+#include "WebAssemblyRegisterInfo.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+
+#define GET_INSTRINFO_HEADER
+#include "WebAssemblyGenInstrInfo.inc"
+
+#define GET_INSTRINFO_OPERAND_ENUM
+#include "WebAssemblyGenInstrInfo.inc"
+
+namespace llvm {
+
+namespace WebAssembly {
+
+int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex);
+
+}
+
+class WebAssemblySubtarget;
+
+class WebAssemblyInstrInfo final : public WebAssemblyGenInstrInfo {
+ const WebAssemblyRegisterInfo RI;
+
+public:
+ explicit WebAssemblyInstrInfo(const WebAssemblySubtarget &STI);
+
+ const WebAssemblyRegisterInfo &getRegisterInfo() const { return RI; }
+
+ bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override;
+
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
+ bool KillSrc) const override;
+ MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
+ unsigned OpIdx1,
+ unsigned OpIdx2) const override;
+
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify = false) const override;
+ unsigned removeBranch(MachineBasicBlock &MBB,
+ int *BytesRemoved = nullptr) const override;
+ unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL,
+ int *BytesAdded = nullptr) const override;
+ bool
+ reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
+
+ ArrayRef<std::pair<int, const char *>>
+ getSerializableTargetIndices() const override;
+
+ const MachineOperand &getCalleeOperand(const MachineInstr &MI) const override;
+
+ bool isExplicitTargetIndexDef(const MachineInstr &MI, int &Index,
+ int64_t &Offset) const override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td
new file mode 100644
index 000000000000..59ea9247bd86
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td
@@ -0,0 +1,442 @@
+// WebAssemblyInstrInfo.td-Describe the WebAssembly Instructions-*- tablegen -*-
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// WebAssembly Instruction definitions.
+///
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// WebAssembly Instruction Predicate Definitions.
+//===----------------------------------------------------------------------===//
+
+def IsPIC : Predicate<"TM.isPositionIndependent()">;
+def IsNotPIC : Predicate<"!TM.isPositionIndependent()">;
+
+def HasAddr32 : Predicate<"!Subtarget->hasAddr64()">;
+
+def HasAddr64 : Predicate<"Subtarget->hasAddr64()">;
+
+def HasSIMD128 :
+ Predicate<"Subtarget->hasSIMD128()">,
+ AssemblerPredicate<(all_of FeatureSIMD128), "simd128">;
+
+def HasRelaxedSIMD :
+ Predicate<"Subtarget->hasRelaxedSIMD()">,
+ AssemblerPredicate<(all_of FeatureRelaxedSIMD), "relaxed-simd">;
+
+def HasAtomics :
+ Predicate<"Subtarget->hasAtomics()">,
+ AssemblerPredicate<(all_of FeatureAtomics), "atomics">;
+
+def HasMultivalue :
+ Predicate<"Subtarget->hasMultivalue()">,
+ AssemblerPredicate<(all_of FeatureMultivalue), "multivalue">;
+
+def HasNontrappingFPToInt :
+ Predicate<"Subtarget->hasNontrappingFPToInt()">,
+ AssemblerPredicate<(all_of FeatureNontrappingFPToInt), "nontrapping-fptoint">;
+
+def NotHasNontrappingFPToInt :
+ Predicate<"!Subtarget->hasNontrappingFPToInt()">,
+ AssemblerPredicate<(all_of (not FeatureNontrappingFPToInt)), "nontrapping-fptoint">;
+
+def HasSignExt :
+ Predicate<"Subtarget->hasSignExt()">,
+ AssemblerPredicate<(all_of FeatureSignExt), "sign-ext">;
+
+def HasTailCall :
+ Predicate<"Subtarget->hasTailCall()">,
+ AssemblerPredicate<(all_of FeatureTailCall), "tail-call">;
+
+def HasExceptionHandling :
+ Predicate<"Subtarget->hasExceptionHandling()">,
+ AssemblerPredicate<(all_of FeatureExceptionHandling), "exception-handling">;
+
+def HasBulkMemory :
+ Predicate<"Subtarget->hasBulkMemory()">,
+ AssemblerPredicate<(all_of FeatureBulkMemory), "bulk-memory">;
+
+def HasReferenceTypes :
+ Predicate<"Subtarget->hasReferenceTypes()">,
+ AssemblerPredicate<(all_of FeatureReferenceTypes), "reference-types">;
+
+def HasExtendedConst :
+ Predicate<"Subtarget->hasExtendedConst()">,
+ AssemblerPredicate<(all_of FeatureExtendedConst), "extended-const">;
+
+def HasMultiMemory :
+ Predicate<"Subtarget->hasMultiMemory()">,
+ AssemblerPredicate<(all_of FeatureMultiMemory), "multimemory">;
+
+//===----------------------------------------------------------------------===//
+// WebAssembly-specific DAG Node Types.
+//===----------------------------------------------------------------------===//
+
+def SDT_WebAssemblyCallSeqStart : SDCallSeqStart<[SDTCisVT<0, iPTR>,
+ SDTCisVT<1, iPTR>]>;
+def SDT_WebAssemblyCallSeqEnd :
+ SDCallSeqEnd<[SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
+def SDT_WebAssemblyBrTable : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>;
+def SDT_WebAssemblyArgument : SDTypeProfile<1, 1, [SDTCisVT<1, i32>]>;
+def SDT_WebAssemblyLocalGet : SDTypeProfile<1, 1, [SDTCisVT<1, i32>]>;
+def SDT_WebAssemblyLocalSet : SDTypeProfile<0, 2, [SDTCisVT<0, i32>]>;
+def SDT_WebAssemblyReturn : SDTypeProfile<0, -1, []>;
+def SDT_WebAssemblyWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
+ SDTCisPtrTy<0>]>;
+def SDT_WebAssemblyGlobalGet : SDTypeProfile<1, 1, [SDTCisPtrTy<1>]>;
+def SDT_WebAssemblyGlobalSet : SDTypeProfile<0, 2, [SDTCisPtrTy<1>]>;
+
+//===----------------------------------------------------------------------===//
+// WebAssembly-specific DAG Nodes.
+//===----------------------------------------------------------------------===//
+
+def WebAssemblycallseq_start :
+ SDNode<"ISD::CALLSEQ_START", SDT_WebAssemblyCallSeqStart,
+ [SDNPHasChain, SDNPOutGlue]>;
+def WebAssemblycallseq_end :
+ SDNode<"ISD::CALLSEQ_END", SDT_WebAssemblyCallSeqEnd,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+def WebAssemblybr_table : SDNode<"WebAssemblyISD::BR_TABLE",
+ SDT_WebAssemblyBrTable,
+ [SDNPHasChain, SDNPVariadic]>;
+def WebAssemblyargument : SDNode<"WebAssemblyISD::ARGUMENT",
+ SDT_WebAssemblyArgument>;
+def WebAssemblyreturn : SDNode<"WebAssemblyISD::RETURN",
+ SDT_WebAssemblyReturn,
+ [SDNPHasChain, SDNPVariadic]>;
+def WebAssemblyWrapper : SDNode<"WebAssemblyISD::Wrapper",
+ SDT_WebAssemblyWrapper>;
+def WebAssemblyWrapperREL : SDNode<"WebAssemblyISD::WrapperREL",
+ SDT_WebAssemblyWrapper>;
+def WebAssemblyglobal_get :
+ SDNode<"WebAssemblyISD::GLOBAL_GET", SDT_WebAssemblyGlobalGet,
+ [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
+def WebAssemblyglobal_set :
+ SDNode<"WebAssemblyISD::GLOBAL_SET", SDT_WebAssemblyGlobalSet,
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
+def WebAssemblylocal_get :
+ SDNode<"WebAssemblyISD::LOCAL_GET", SDT_WebAssemblyLocalGet,
+ [SDNPHasChain, SDNPMayLoad]>;
+def WebAssemblylocal_set :
+ SDNode<"WebAssemblyISD::LOCAL_SET", SDT_WebAssemblyLocalSet,
+ [SDNPHasChain, SDNPMayStore]>;
+
+//===----------------------------------------------------------------------===//
+// WebAssembly-specific Operands.
+//===----------------------------------------------------------------------===//
+
+// Default Operand has AsmOperandClass "Imm" which is for integers (and
+// symbols), so specialize one for floats:
+class FPImmAsmOperand<ValueType ty> : AsmOperandClass {
+ let Name = "FPImm" # ty;
+ let PredicateMethod = "isFPImm";
+}
+
+class FPOperand<ValueType ty> : Operand<ty> {
+ AsmOperandClass ParserMatchClass = FPImmAsmOperand<ty>;
+}
+
+let OperandNamespace = "WebAssembly" in {
+
+let OperandType = "OPERAND_BASIC_BLOCK" in
+def bb_op : Operand<OtherVT>;
+
+let OperandType = "OPERAND_LOCAL" in
+def local_op : Operand<i32>;
+
+let OperandType = "OPERAND_GLOBAL" in {
+ // The operand to global instructions is always a 32-bit index.
+ def global_op32 : Operand<i32>;
+ // In PIC mode however, we temporarily represent this index as an external
+ // symbol, which to LLVM is a pointer, so in wasm64 mode it is easiest to
+ // pretend we use a 64-bit index for it.
+ def global_op64 : Operand<i64>;
+}
+
+let OperandType = "OPERAND_I32IMM" in
+def i32imm_op : Operand<i32>;
+
+let OperandType = "OPERAND_I64IMM" in
+def i64imm_op : Operand<i64>;
+
+let OperandType = "OPERAND_F32IMM" in
+def f32imm_op : FPOperand<f32>;
+
+let OperandType = "OPERAND_F64IMM" in
+def f64imm_op : FPOperand<f64>;
+
+let OperandType = "OPERAND_VEC_I8IMM" in
+def vec_i8imm_op : Operand<i32>;
+
+let OperandType = "OPERAND_VEC_I16IMM" in
+def vec_i16imm_op : Operand<i32>;
+
+let OperandType = "OPERAND_VEC_I32IMM" in
+def vec_i32imm_op : Operand<i32>;
+
+let OperandType = "OPERAND_VEC_I64IMM" in
+def vec_i64imm_op : Operand<i64>;
+
+let OperandType = "OPERAND_FUNCTION32" in
+def function32_op : Operand<i32>;
+
+let OperandType = "OPERAND_TABLE" in
+def table32_op : Operand<i32>;
+
+let OperandType = "OPERAND_OFFSET32" in
+def offset32_op : Operand<i32>;
+
+let OperandType = "OPERAND_OFFSET64" in
+def offset64_op : Operand<i64>;
+
+let OperandType = "OPERAND_P2ALIGN" in {
+def P2Align : Operand<i32> {
+ let PrintMethod = "printWebAssemblyP2AlignOperand";
+}
+
+let OperandType = "OPERAND_TAG" in
+def tag_op : Operand<i32>;
+
+} // OperandType = "OPERAND_P2ALIGN"
+
+let OperandType = "OPERAND_SIGNATURE" in
+def Signature : Operand<i32> {
+ let PrintMethod = "printWebAssemblySignatureOperand";
+}
+
+let OperandType = "OPERAND_TYPEINDEX" in
+def TypeIndex : Operand<i32>;
+
+} // OperandNamespace = "WebAssembly"
+
+// TODO: Find more places to use this.
+def bool_node : PatLeaf<(i32 I32:$cond), [{
+ return CurDAG->computeKnownBits(SDValue(N, 0)).countMinLeadingZeros() == 31;
+}]>;
+
+//===----------------------------------------------------------------------===//
+// WebAssembly Register to Stack instruction mapping
+//===----------------------------------------------------------------------===//
+
+class StackRel;
+def getStackOpcode : InstrMapping {
+ let FilterClass = "StackRel";
+ let RowFields = ["BaseName"];
+ let ColFields = ["StackBased"];
+ let KeyCol = ["0"];
+ let ValueCols = [["1"]];
+}
+
+//===----------------------------------------------------------------------===//
+// WebAssembly Stack to Register instruction mapping
+//===----------------------------------------------------------------------===//
+
+class RegisterRel;
+def getRegisterOpcode : InstrMapping {
+ let FilterClass = "RegisterRel";
+ let RowFields = ["BaseName"];
+ let ColFields = ["StackBased"];
+ let KeyCol = ["1"];
+ let ValueCols = [["0"]];
+}
+
+//===----------------------------------------------------------------------===//
+// WebAssembly 32 to 64-bit instruction mapping
+//===----------------------------------------------------------------------===//
+
+class Wasm64Rel;
+def getWasm64Opcode : InstrMapping {
+ let FilterClass = "Wasm64Rel";
+ let RowFields = ["Wasm32Name"];
+ let ColFields = ["IsWasm64"];
+ let KeyCol = ["0"];
+ let ValueCols = [["1"]];
+}
+
+//===----------------------------------------------------------------------===//
+// WebAssembly Instruction Format Definitions.
+//===----------------------------------------------------------------------===//
+
+include "WebAssemblyInstrFormats.td"
+
+//===----------------------------------------------------------------------===//
+// Additional instructions.
+//===----------------------------------------------------------------------===//
+
+multiclass ARGUMENT<WebAssemblyRegClass rc, ValueType vt> {
+ let hasSideEffects = 1, isCodeGenOnly = 1, Defs = []<Register>,
+ Uses = [ARGUMENTS] in
+ defm ARGUMENT_#vt :
+ I<(outs rc:$res), (ins i32imm:$argno), (outs), (ins i32imm:$argno),
+ [(set (vt rc:$res), (WebAssemblyargument timm:$argno))]>;
+}
+defm "": ARGUMENT<I32, i32>;
+defm "": ARGUMENT<I64, i64>;
+defm "": ARGUMENT<F32, f32>;
+defm "": ARGUMENT<F64, f64>;
+defm "": ARGUMENT<FUNCREF, funcref>;
+defm "": ARGUMENT<EXTERNREF, externref>;
+
+// local.get and local.set are not generated by instruction selection; they
+// are implied by virtual register uses and defs.
+multiclass LOCAL<WebAssemblyRegClass rc, Operand global_op> {
+ let hasSideEffects = 0 in {
+ // COPY is not an actual instruction in wasm, but since we allow local.get and
+ // local.set to be implicit during most of codegen, we can have a COPY which
+ // is actually a no-op because all the work is done in the implied local.get
+ // and local.set. COPYs are eliminated (and replaced with
+ // local.get/local.set) in the ExplicitLocals pass.
+ let isAsCheapAsAMove = 1, isCodeGenOnly = 1 in
+ defm COPY_#rc : I<(outs rc:$res), (ins rc:$src), (outs), (ins), [],
+ "local.copy\t$res, $src", "local.copy">;
+
+ // TEE is similar to COPY, but writes two copies of its result. Typically
+ // this would be used to stackify one result and write the other result to a
+ // local.
+ let isAsCheapAsAMove = 1, isCodeGenOnly = 1 in
+ defm TEE_#rc : I<(outs rc:$res, rc:$also), (ins rc:$src), (outs), (ins), [],
+ "local.tee\t$res, $also, $src", "local.tee">;
+
+ // This is the actual local.get instruction in wasm. These are made explicit
+ // by the ExplicitLocals pass. It has mayLoad because it reads from a wasm
+ // local, which is a side effect not otherwise modeled in LLVM.
+ let mayLoad = 1, isAsCheapAsAMove = 1 in
+ defm LOCAL_GET_#rc : I<(outs rc:$res), (ins local_op:$local),
+ (outs), (ins local_op:$local), [],
+ "local.get\t$res, $local", "local.get\t$local", 0x20>;
+
+ // This is the actual local.set instruction in wasm. These are made explicit
+ // by the ExplicitLocals pass. It has mayStore because it writes to a wasm
+ // local, which is a side effect not otherwise modeled in LLVM.
+ let mayStore = 1, isAsCheapAsAMove = 1 in
+ defm LOCAL_SET_#rc : I<(outs), (ins local_op:$local, rc:$src),
+ (outs), (ins local_op:$local), [],
+ "local.set\t$local, $src", "local.set\t$local", 0x21>;
+
+ // This is the actual local.tee instruction in wasm. TEEs are turned into
+ // LOCAL_TEEs by the ExplicitLocals pass. It has mayStore for the same reason
+ // as LOCAL_SET.
+ let mayStore = 1, isAsCheapAsAMove = 1 in
+ defm LOCAL_TEE_#rc : I<(outs rc:$res), (ins local_op:$local, rc:$src),
+ (outs), (ins local_op:$local), [],
+ "local.tee\t$res, $local, $src", "local.tee\t$local",
+ 0x22>;
+
+ // Unused values must be dropped in some contexts.
+ defm DROP_#rc : I<(outs), (ins rc:$src), (outs), (ins), [],
+ "drop\t$src", "drop", 0x1a>;
+
+ let mayLoad = 1 in
+ defm GLOBAL_GET_#rc : I<(outs rc:$res), (ins global_op:$addr),
+ (outs), (ins global_op:$addr), [],
+ "global.get\t$res, $addr", "global.get\t$addr",
+ 0x23>;
+
+ let mayStore = 1 in
+ defm GLOBAL_SET_#rc : I<(outs), (ins global_op:$addr, rc:$src),
+ (outs), (ins global_op:$addr), [],
+ "global.set\t$addr, $src", "global.set\t$addr",
+ 0x24>;
+
+ } // hasSideEffects = 0
+ foreach vt = rc.RegTypes in {
+ def : Pat<(vt (WebAssemblyglobal_get
+ (WebAssemblyWrapper tglobaladdr:$addr))),
+ (!cast<NI>("GLOBAL_GET_" # rc) tglobaladdr:$addr)>;
+ def : Pat<(WebAssemblyglobal_set
+ vt:$src, (WebAssemblyWrapper tglobaladdr:$addr)),
+ (!cast<NI>("GLOBAL_SET_" # rc) tglobaladdr:$addr, vt:$src)>;
+ def : Pat<(vt (WebAssemblylocal_get (i32 timm:$local))),
+ (!cast<NI>("LOCAL_GET_" # rc) timm:$local)>;
+ def : Pat<(WebAssemblylocal_set timm:$local, vt:$src),
+ (!cast<NI>("LOCAL_SET_" # rc) timm:$local, vt:$src)>;
+ }
+}
+defm "" : LOCAL<I32, global_op32>;
+defm "" : LOCAL<I64, global_op64>; // 64-bit only needed for pointers.
+defm "" : LOCAL<F32, global_op32>;
+defm "" : LOCAL<F64, global_op32>;
+defm "" : LOCAL<V128, global_op32>, Requires<[HasSIMD128]>;
+defm "" : LOCAL<FUNCREF, global_op32>, Requires<[HasReferenceTypes]>;
+defm "" : LOCAL<EXTERNREF, global_op32>, Requires<[HasReferenceTypes]>;
+
+let isMoveImm = 1, isAsCheapAsAMove = 1, isReMaterializable = 1 in {
+defm CONST_I32 : I<(outs I32:$res), (ins i32imm_op:$imm),
+ (outs), (ins i32imm_op:$imm),
+ [(set I32:$res, imm:$imm)],
+ "i32.const\t$res, $imm", "i32.const\t$imm", 0x41>;
+defm CONST_I64 : I<(outs I64:$res), (ins i64imm_op:$imm),
+ (outs), (ins i64imm_op:$imm),
+ [(set I64:$res, imm:$imm)],
+ "i64.const\t$res, $imm", "i64.const\t$imm", 0x42>;
+defm CONST_F32 : I<(outs F32:$res), (ins f32imm_op:$imm),
+ (outs), (ins f32imm_op:$imm),
+ [(set F32:$res, fpimm:$imm)],
+ "f32.const\t$res, $imm", "f32.const\t$imm", 0x43>;
+defm CONST_F64 : I<(outs F64:$res), (ins f64imm_op:$imm),
+ (outs), (ins f64imm_op:$imm),
+ [(set F64:$res, fpimm:$imm)],
+ "f64.const\t$res, $imm", "f64.const\t$imm", 0x44>;
+} // isMoveImm = 1, isAsCheapAsAMove = 1, isReMaterializable = 1
+
+def : Pat<(i32 (WebAssemblyWrapper tglobaladdr:$addr)),
+ (CONST_I32 tglobaladdr:$addr)>, Requires<[IsNotPIC, HasAddr32]>;
+def : Pat<(i64 (WebAssemblyWrapper tglobaladdr:$addr)),
+ (CONST_I64 tglobaladdr:$addr)>, Requires<[IsNotPIC, HasAddr64]>;
+
+def : Pat<(i32 (WebAssemblyWrapper tglobaladdr:$addr)),
+ (GLOBAL_GET_I32 tglobaladdr:$addr)>, Requires<[IsPIC, HasAddr32]>;
+def : Pat<(i64 (WebAssemblyWrapper tglobaladdr:$addr)),
+ (GLOBAL_GET_I64 tglobaladdr:$addr)>, Requires<[IsPIC, HasAddr64]>;
+
+def : Pat<(i32 (WebAssemblyWrapperREL tglobaladdr:$addr)),
+ (CONST_I32 tglobaladdr:$addr)>, Requires<[IsPIC, HasAddr32]>;
+def : Pat<(i64 (WebAssemblyWrapperREL tglobaladdr:$addr)),
+ (CONST_I64 tglobaladdr:$addr)>, Requires<[IsPIC, HasAddr64]>;
+
+def : Pat<(i32 (WebAssemblyWrapperREL tglobaltlsaddr:$addr)),
+ (CONST_I32 tglobaltlsaddr:$addr)>, Requires<[HasAddr32]>;
+def : Pat<(i64 (WebAssemblyWrapperREL tglobaltlsaddr:$addr)),
+ (CONST_I64 tglobaltlsaddr:$addr)>, Requires<[HasAddr64]>;
+
+def : Pat<(i32 (WebAssemblyWrapper tglobaltlsaddr:$addr)),
+ (GLOBAL_GET_I32 tglobaltlsaddr:$addr)>, Requires<[HasAddr32]>;
+def : Pat<(i64 (WebAssemblyWrapper tglobaltlsaddr:$addr)),
+ (GLOBAL_GET_I64 tglobaltlsaddr:$addr)>, Requires<[HasAddr64]>;
+
+def : Pat<(i32 (WebAssemblyWrapper texternalsym:$addr)),
+ (GLOBAL_GET_I32 texternalsym:$addr)>, Requires<[IsPIC, HasAddr32]>;
+def : Pat<(i64 (WebAssemblyWrapper texternalsym:$addr)),
+ (GLOBAL_GET_I64 texternalsym:$addr)>, Requires<[IsPIC, HasAddr64]>;
+
+def : Pat<(i32 (WebAssemblyWrapper texternalsym:$addr)),
+ (CONST_I32 texternalsym:$addr)>, Requires<[IsNotPIC, HasAddr32]>;
+def : Pat<(i64 (WebAssemblyWrapper texternalsym:$addr)),
+ (CONST_I64 texternalsym:$addr)>, Requires<[IsNotPIC, HasAddr64]>;
+
+def : Pat<(i32 (WebAssemblyWrapperREL texternalsym:$addr)),
+ (CONST_I32 texternalsym:$addr)>, Requires<[IsPIC, HasAddr32]>;
+def : Pat<(i64 (WebAssemblyWrapperREL texternalsym:$addr)),
+ (CONST_I64 texternalsym:$addr)>, Requires<[IsPIC, HasAddr64]>;
+
+//===----------------------------------------------------------------------===//
+// Additional sets of instructions.
+//===----------------------------------------------------------------------===//
+
+include "WebAssemblyInstrMemory.td"
+include "WebAssemblyInstrCall.td"
+include "WebAssemblyInstrControl.td"
+include "WebAssemblyInstrInteger.td"
+include "WebAssemblyInstrConv.td"
+include "WebAssemblyInstrFloat.td"
+include "WebAssemblyInstrAtomics.td"
+include "WebAssemblyInstrSIMD.td"
+include "WebAssemblyInstrRef.td"
+include "WebAssemblyInstrBulkMemory.td"
+include "WebAssemblyInstrTable.td"
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrInteger.td b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrInteger.td
new file mode 100644
index 000000000000..7a0c524d63b0
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrInteger.td
@@ -0,0 +1,131 @@
+// WebAssemblyInstrInteger.td-WebAssembly Integer codegen -------*- tablegen -*-
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// WebAssembly Integer operand code-gen constructs.
+///
+//===----------------------------------------------------------------------===//
+
+multiclass UnaryInt<SDNode node, string name, bits<32> i32Inst,
+ bits<32> i64Inst> {
+ defm _I32 : I<(outs I32:$dst), (ins I32:$src), (outs), (ins),
+ [(set I32:$dst, (node I32:$src))],
+ !strconcat("i32.", !strconcat(name, "\t$dst, $src")),
+ !strconcat("i32.", name), i32Inst>;
+ defm _I64 : I<(outs I64:$dst), (ins I64:$src), (outs), (ins),
+ [(set I64:$dst, (node I64:$src))],
+ !strconcat("i64.", !strconcat(name, "\t$dst, $src")),
+ !strconcat("i64.", name), i64Inst>;
+}
+multiclass BinaryInt<SDNode node, string name, bits<32> i32Inst,
+ bits<32> i64Inst> {
+ defm _I32 : I<(outs I32:$dst), (ins I32:$lhs, I32:$rhs), (outs), (ins),
+ [(set I32:$dst, (node I32:$lhs, I32:$rhs))],
+ !strconcat("i32.", !strconcat(name, "\t$dst, $lhs, $rhs")),
+ !strconcat("i32.", name), i32Inst>;
+ defm _I64 : I<(outs I64:$dst), (ins I64:$lhs, I64:$rhs), (outs), (ins),
+ [(set I64:$dst, (node I64:$lhs, I64:$rhs))],
+ !strconcat("i64.", !strconcat(name, "\t$dst, $lhs, $rhs")),
+ !strconcat("i64.", name), i64Inst>;
+}
+multiclass ComparisonInt<CondCode cond, string name, bits<32> i32Inst, bits<32> i64Inst> {
+ defm _I32 : I<(outs I32:$dst), (ins I32:$lhs, I32:$rhs), (outs), (ins),
+ [(set I32:$dst, (setcc I32:$lhs, I32:$rhs, cond))],
+ !strconcat("i32.", !strconcat(name, "\t$dst, $lhs, $rhs")),
+ !strconcat("i32.", name), i32Inst>;
+ defm _I64 : I<(outs I32:$dst), (ins I64:$lhs, I64:$rhs), (outs), (ins),
+ [(set I32:$dst, (setcc I64:$lhs, I64:$rhs, cond))],
+ !strconcat("i64.", !strconcat(name, "\t$dst, $lhs, $rhs")),
+ !strconcat("i64.", name), i64Inst>;
+}
+
+// The spaces after the names are for aesthetic purposes only, to make
+// operands line up vertically after tab expansion.
+let isCommutable = 1 in
+defm ADD : BinaryInt<add, "add ", 0x6a, 0x7c>;
+defm SUB : BinaryInt<sub, "sub ", 0x6b, 0x7d>;
+let isCommutable = 1 in
+defm MUL : BinaryInt<mul, "mul ", 0x6c, 0x7e>;
+// Divide and remainder trap on a zero denominator.
+let hasSideEffects = 1 in {
+defm DIV_S : BinaryInt<sdiv, "div_s", 0x6d, 0x7f>;
+defm DIV_U : BinaryInt<udiv, "div_u", 0x6e, 0x80>;
+defm REM_S : BinaryInt<srem, "rem_s", 0x6f, 0x81>;
+defm REM_U : BinaryInt<urem, "rem_u", 0x70, 0x82>;
+} // hasSideEffects = 1
+let isCommutable = 1 in {
+defm AND : BinaryInt<and, "and ", 0x71, 0x83>;
+defm OR : BinaryInt<or, "or ", 0x72, 0x84>;
+defm XOR : BinaryInt<xor, "xor ", 0x73, 0x85>;
+} // isCommutable = 1
+defm SHL : BinaryInt<shl, "shl ", 0x74, 0x86>;
+defm SHR_S : BinaryInt<sra, "shr_s", 0x75, 0x87>;
+defm SHR_U : BinaryInt<srl, "shr_u", 0x76, 0x88>;
+defm ROTL : BinaryInt<rotl, "rotl", 0x77, 0x89>;
+defm ROTR : BinaryInt<rotr, "rotr", 0x78, 0x8a>;
+
+let isCommutable = 1 in {
+defm EQ : ComparisonInt<SETEQ, "eq ", 0x46, 0x51>;
+defm NE : ComparisonInt<SETNE, "ne ", 0x47, 0x52>;
+} // isCommutable = 1
+defm LT_S : ComparisonInt<SETLT, "lt_s", 0x48, 0x53>;
+defm LT_U : ComparisonInt<SETULT, "lt_u", 0x49, 0x54>;
+defm GT_S : ComparisonInt<SETGT, "gt_s", 0x4a, 0x55>;
+defm GT_U : ComparisonInt<SETUGT, "gt_u", 0x4b, 0x56>;
+defm LE_S : ComparisonInt<SETLE, "le_s", 0x4c, 0x57>;
+defm LE_U : ComparisonInt<SETULE, "le_u", 0x4d, 0x58>;
+defm GE_S : ComparisonInt<SETGE, "ge_s", 0x4e, 0x59>;
+defm GE_U : ComparisonInt<SETUGE, "ge_u", 0x4f, 0x5a>;
+
+defm CLZ : UnaryInt<ctlz, "clz ", 0x67, 0x79>;
+defm CTZ : UnaryInt<cttz, "ctz ", 0x68, 0x7a>;
+defm POPCNT : UnaryInt<ctpop, "popcnt", 0x69, 0x7b>;
+
+defm EQZ_I32 : I<(outs I32:$dst), (ins I32:$src), (outs), (ins),
+ [(set I32:$dst, (setcc I32:$src, 0, SETEQ))],
+ "i32.eqz \t$dst, $src", "i32.eqz", 0x45>;
+defm EQZ_I64 : I<(outs I32:$dst), (ins I64:$src), (outs), (ins),
+ [(set I32:$dst, (setcc I64:$src, 0, SETEQ))],
+ "i64.eqz \t$dst, $src", "i64.eqz", 0x50>;
+
+// Optimize away an explicit mask on a shift count.
+def : Pat<(shl I32:$lhs, (and I32:$rhs, 31)), (SHL_I32 I32:$lhs, I32:$rhs)>;
+def : Pat<(sra I32:$lhs, (and I32:$rhs, 31)), (SHR_S_I32 I32:$lhs, I32:$rhs)>;
+def : Pat<(srl I32:$lhs, (and I32:$rhs, 31)), (SHR_U_I32 I32:$lhs, I32:$rhs)>;
+def : Pat<(shl I64:$lhs, (and I64:$rhs, 63)), (SHL_I64 I64:$lhs, I64:$rhs)>;
+def : Pat<(sra I64:$lhs, (and I64:$rhs, 63)), (SHR_S_I64 I64:$lhs, I64:$rhs)>;
+def : Pat<(srl I64:$lhs, (and I64:$rhs, 63)), (SHR_U_I64 I64:$lhs, I64:$rhs)>;
+
+// Optimize away an explicit mask on a rotate count.
+def : Pat<(rotl I32:$lhs, (and I32:$rhs, 31)), (ROTL_I32 I32:$lhs, I32:$rhs)>;
+def : Pat<(rotr I32:$lhs, (and I32:$rhs, 31)), (ROTR_I32 I32:$lhs, I32:$rhs)>;
+def : Pat<(rotl I64:$lhs, (and I64:$rhs, 63)), (ROTL_I64 I64:$lhs, I64:$rhs)>;
+def : Pat<(rotr I64:$lhs, (and I64:$rhs, 63)), (ROTR_I64 I64:$lhs, I64:$rhs)>;
+
+defm SELECT_I32 : I<(outs I32:$dst), (ins I32:$lhs, I32:$rhs, I32:$cond),
+ (outs), (ins),
+ [(set I32:$dst, (select I32:$cond, I32:$lhs, I32:$rhs))],
+ "i32.select\t$dst, $lhs, $rhs, $cond", "i32.select", 0x1b>;
+defm SELECT_I64 : I<(outs I64:$dst), (ins I64:$lhs, I64:$rhs, I32:$cond),
+ (outs), (ins),
+ [(set I64:$dst, (select I32:$cond, I64:$lhs, I64:$rhs))],
+ "i64.select\t$dst, $lhs, $rhs, $cond", "i64.select", 0x1b>;
+
+// ISD::SELECT requires its operand to conform to getBooleanContents, but
+// WebAssembly's select interprets any non-zero value as true, so we can fold
+// a setne with 0 into a select.
+def : Pat<(select (i32 (setne I32:$cond, 0)), I32:$lhs, I32:$rhs),
+ (SELECT_I32 I32:$lhs, I32:$rhs, I32:$cond)>;
+def : Pat<(select (i32 (setne I32:$cond, 0)), I64:$lhs, I64:$rhs),
+ (SELECT_I64 I64:$lhs, I64:$rhs, I32:$cond)>;
+
+// And again, this time with seteq instead of setne and the arms reversed.
+def : Pat<(select (i32 (seteq I32:$cond, 0)), I32:$lhs, I32:$rhs),
+ (SELECT_I32 I32:$rhs, I32:$lhs, I32:$cond)>;
+def : Pat<(select (i32 (seteq I32:$cond, 0)), I64:$lhs, I64:$rhs),
+ (SELECT_I64 I64:$rhs, I64:$lhs, I32:$cond)>;
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrMemory.td b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrMemory.td
new file mode 100644
index 000000000000..01c0909af72e
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrMemory.td
@@ -0,0 +1,195 @@
+// WebAssemblyInstrMemory.td-WebAssembly Memory codegen support -*- tablegen -*-
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// WebAssembly Memory operand code-gen constructs.
+///
+//===----------------------------------------------------------------------===//
+
+// TODO:
+// - WebAssemblyTargetLowering having to do with atomics
+// - Each has optional alignment.
+
+// WebAssembly has i8/i16/i32/i64/f32/f64 memory types, but doesn't have i8/i16
+// local types. These memory-only types instead zero- or sign-extend into local
+// types when loading, and truncate when storing.
+
+// Address Operands
+
+// These patterns match the static (offset) and dynamic (address stack operand)
+// operands for loads and stores, based on a combination of target global
+// addresses and constants.
+// For example,
+// (load (add tga, x)) -> load offset=tga, addr=x
+// (store v, tga) -> store v, offset=tga, addr=0
+// (load (add const, x)) -> load offset=const, addr=x
+// (store v, const) -> store v, offset=const, addr=0
+// (load x) -> load offset=0, addr=x
+def AddrOps32 : ComplexPattern<i32, 2, "SelectAddrOperands32">;
+def AddrOps64 : ComplexPattern<i64, 2, "SelectAddrOperands64">;
+
+// Defines atomic and non-atomic loads, regular and extending.
+multiclass WebAssemblyLoad<WebAssemblyRegClass rc, string Name, int Opcode,
+ list<Predicate> reqs = []> {
+ let mayLoad = 1, UseNamedOperandTable = 1 in {
+ defm "_A32": I<(outs rc:$dst),
+ (ins P2Align:$p2align, offset32_op:$off, I32:$addr),
+ (outs), (ins P2Align:$p2align, offset32_op:$off),
+ [], !strconcat(Name, "\t$dst, ${off}(${addr})${p2align}"),
+ !strconcat(Name, "\t${off}${p2align}"), Opcode, false>,
+ Requires<reqs>;
+ defm "_A64": I<(outs rc:$dst),
+ (ins P2Align:$p2align, offset64_op:$off, I64:$addr),
+ (outs), (ins P2Align:$p2align, offset64_op:$off),
+ [], !strconcat(Name, "\t$dst, ${off}(${addr})${p2align}"),
+ !strconcat(Name, "\t${off}${p2align}"), Opcode, true>,
+ Requires<reqs>;
+ }
+}
+
+// Basic load.
+// FIXME: When we can break syntax compatibility, reorder the fields in the
+// asmstrings to match the binary encoding.
+defm LOAD_I32 : WebAssemblyLoad<I32, "i32.load", 0x28, []>;
+defm LOAD_I64 : WebAssemblyLoad<I64, "i64.load", 0x29, []>;
+defm LOAD_F32 : WebAssemblyLoad<F32, "f32.load", 0x2a, []>;
+defm LOAD_F64 : WebAssemblyLoad<F64, "f64.load", 0x2b, []>;
+
+// Extending load.
+defm LOAD8_S_I32 : WebAssemblyLoad<I32, "i32.load8_s", 0x2c, []>;
+defm LOAD8_U_I32 : WebAssemblyLoad<I32, "i32.load8_u", 0x2d, []>;
+defm LOAD16_S_I32 : WebAssemblyLoad<I32, "i32.load16_s", 0x2e, []>;
+defm LOAD16_U_I32 : WebAssemblyLoad<I32, "i32.load16_u", 0x2f, []>;
+defm LOAD8_S_I64 : WebAssemblyLoad<I64, "i64.load8_s", 0x30, []>;
+defm LOAD8_U_I64 : WebAssemblyLoad<I64, "i64.load8_u", 0x31, []>;
+defm LOAD16_S_I64 : WebAssemblyLoad<I64, "i64.load16_s", 0x32, []>;
+defm LOAD16_U_I64 : WebAssemblyLoad<I64, "i64.load16_u", 0x33, []>;
+defm LOAD32_S_I64 : WebAssemblyLoad<I64, "i64.load32_s", 0x34, []>;
+defm LOAD32_U_I64 : WebAssemblyLoad<I64, "i64.load32_u", 0x35, []>;
+
+// Pattern matching
+
+multiclass LoadPat<ValueType ty, SDPatternOperator kind, string Name> {
+ def : Pat<(ty (kind (AddrOps32 offset32_op:$offset, I32:$addr))),
+ (!cast<NI>(Name # "_A32") 0,
+ offset32_op:$offset,
+ I32:$addr)>,
+ Requires<[HasAddr32]>;
+
+ def : Pat<(ty (kind (AddrOps64 offset64_op:$offset, I64:$addr))),
+ (!cast<NI>(Name # "_A64") 0,
+ offset64_op:$offset,
+ I64:$addr)>,
+ Requires<[HasAddr64]>;
+}
+
+defm : LoadPat<i32, load, "LOAD_I32">;
+defm : LoadPat<i64, load, "LOAD_I64">;
+defm : LoadPat<f32, load, "LOAD_F32">;
+defm : LoadPat<f64, load, "LOAD_F64">;
+
+defm : LoadPat<i32, sextloadi8, "LOAD8_S_I32">;
+defm : LoadPat<i32, sextloadi16, "LOAD16_S_I32">;
+defm : LoadPat<i64, sextloadi8, "LOAD8_S_I64">;
+defm : LoadPat<i64, sextloadi16, "LOAD16_S_I64">;
+defm : LoadPat<i64, sextloadi32, "LOAD32_S_I64">;
+
+defm : LoadPat<i32, zextloadi8, "LOAD8_U_I32">;
+defm : LoadPat<i32, zextloadi16, "LOAD16_U_I32">;
+defm : LoadPat<i64, zextloadi8, "LOAD8_U_I64">;
+defm : LoadPat<i64, zextloadi16, "LOAD16_U_I64">;
+defm : LoadPat<i64, zextloadi32, "LOAD32_U_I64">;
+
+defm : LoadPat<i32, extloadi8, "LOAD8_U_I32">;
+defm : LoadPat<i32, extloadi16, "LOAD16_U_I32">;
+defm : LoadPat<i64, extloadi8, "LOAD8_U_I64">;
+defm : LoadPat<i64, extloadi16, "LOAD16_U_I64">;
+defm : LoadPat<i64, extloadi32, "LOAD32_U_I64">;
+
+// Defines atomic and non-atomic stores, regular and truncating
+multiclass WebAssemblyStore<WebAssemblyRegClass rc, string Name, int Opcode,
+ list<Predicate> reqs = []> {
+ let mayStore = 1, UseNamedOperandTable = 1 in
+ defm "_A32" : I<(outs),
+ (ins P2Align:$p2align, offset32_op:$off, I32:$addr, rc:$val),
+ (outs),
+ (ins P2Align:$p2align, offset32_op:$off), [],
+ !strconcat(Name, "\t${off}(${addr})${p2align}, $val"),
+ !strconcat(Name, "\t${off}${p2align}"), Opcode, false>,
+ Requires<reqs>;
+ let mayStore = 1, UseNamedOperandTable = 1 in
+ defm "_A64" : I<(outs),
+ (ins P2Align:$p2align, offset64_op:$off, I64:$addr, rc:$val),
+ (outs),
+ (ins P2Align:$p2align, offset64_op:$off), [],
+ !strconcat(Name, "\t${off}(${addr})${p2align}, $val"),
+ !strconcat(Name, "\t${off}${p2align}"), Opcode, true>,
+ Requires<reqs>;
+}
+
+// Basic store.
+// Note: WebAssembly inverts SelectionDAG's usual operand order.
+defm STORE_I32 : WebAssemblyStore<I32, "i32.store", 0x36>;
+defm STORE_I64 : WebAssemblyStore<I64, "i64.store", 0x37>;
+defm STORE_F32 : WebAssemblyStore<F32, "f32.store", 0x38>;
+defm STORE_F64 : WebAssemblyStore<F64, "f64.store", 0x39>;
+
+multiclass StorePat<ValueType ty, SDPatternOperator kind, string Name> {
+ def : Pat<(kind ty:$val, (AddrOps32 offset32_op:$offset, I32:$addr)),
+ (!cast<NI>(Name # "_A32") 0,
+ offset32_op:$offset,
+ I32:$addr,
+ ty:$val)>,
+ Requires<[HasAddr32]>;
+ def : Pat<(kind ty:$val, (AddrOps64 offset64_op:$offset, I64:$addr)),
+ (!cast<NI>(Name # "_A64") 0,
+ offset64_op:$offset,
+ I64:$addr,
+ ty:$val)>,
+ Requires<[HasAddr64]>;
+}
+
+defm : StorePat<i32, store, "STORE_I32">;
+defm : StorePat<i64, store, "STORE_I64">;
+defm : StorePat<f32, store, "STORE_F32">;
+defm : StorePat<f64, store, "STORE_F64">;
+
+// Truncating store.
+defm STORE8_I32 : WebAssemblyStore<I32, "i32.store8", 0x3a>;
+defm STORE16_I32 : WebAssemblyStore<I32, "i32.store16", 0x3b>;
+defm STORE8_I64 : WebAssemblyStore<I64, "i64.store8", 0x3c>;
+defm STORE16_I64 : WebAssemblyStore<I64, "i64.store16", 0x3d>;
+defm STORE32_I64 : WebAssemblyStore<I64, "i64.store32", 0x3e>;
+
+defm : StorePat<i32, truncstorei8, "STORE8_I32">;
+defm : StorePat<i32, truncstorei16, "STORE16_I32">;
+defm : StorePat<i64, truncstorei8, "STORE8_I64">;
+defm : StorePat<i64, truncstorei16, "STORE16_I64">;
+defm : StorePat<i64, truncstorei32, "STORE32_I64">;
+
+multiclass MemoryOps<WebAssemblyRegClass rc, string B> {
+// Current memory size.
+defm MEMORY_SIZE_A#B : I<(outs rc:$dst), (ins i32imm:$flags),
+ (outs), (ins i32imm:$flags),
+ [(set rc:$dst,
+ (int_wasm_memory_size (i32 imm:$flags)))],
+ "memory.size\t$dst, $flags", "memory.size\t$flags",
+ 0x3f>;
+
+// Grow memory.
+defm MEMORY_GROW_A#B : I<(outs rc:$dst), (ins i32imm:$flags, rc:$delta),
+ (outs), (ins i32imm:$flags),
+ [(set rc:$dst,
+ (int_wasm_memory_grow (i32 imm:$flags),
+ rc:$delta))],
+ "memory.grow\t$dst, $flags, $delta",
+ "memory.grow\t$flags", 0x40>;
+}
+
+defm : MemoryOps<I32, "32">;
+defm : MemoryOps<I64, "64">;
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrRef.td b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrRef.td
new file mode 100644
index 000000000000..608963d58863
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrRef.td
@@ -0,0 +1,46 @@
+// WebAssemblyInstrRef.td - WebAssembly reference type codegen --*- tablegen -*-
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// WebAssembly reference type operand codegen constructs.
+///
+//===----------------------------------------------------------------------===//
+
+multiclass REF_I<WebAssemblyRegClass rc, ValueType vt, string ht> {
+ defm REF_NULL_#rc : I<(outs rc:$dst), (ins),
+ (outs), (ins),
+ [(set rc:$dst, (!cast<Intrinsic>("int_wasm_ref_null_" # ht)))],
+ "ref.null_" # ht # "$dst",
+ "ref.null_" # ht,
+ !cond(!eq(ht, "func") : 0xd070,
+ !eq(ht, "extern") : 0xd06f)>,
+ Requires<[HasReferenceTypes]>;
+ defm SELECT_#rc: I<(outs rc:$dst), (ins rc:$lhs, rc:$rhs, I32:$cond),
+ (outs), (ins),
+ [(set rc:$dst,
+ (select I32:$cond, rc:$lhs, rc:$rhs))],
+ vt#".select\t$dst, $lhs, $rhs, $cond",
+ vt#".select", 0x1b>,
+ Requires<[HasReferenceTypes]>;
+ defm REF_IS_NULL_#rc
+ : I<(outs I32:$dst), (ins rc:$ref), (outs), (ins),
+ [(set I32:$dst, (!cast<Intrinsic>("int_wasm_ref_is_null_" # ht) rc:$ref))],
+ "ref.is_null\t$ref",
+ "ref.is_null", 0xd1>,
+ Requires<[HasReferenceTypes]>;
+}
+
+defm "" : REF_I<FUNCREF, funcref, "func">;
+defm "" : REF_I<EXTERNREF, externref, "extern">;
+
+foreach rc = [FUNCREF, EXTERNREF] in {
+def : Pat<(select (i32 (setne I32:$cond, 0)), rc:$lhs, rc:$rhs),
+ (!cast<Instruction>("SELECT_"#rc) rc:$lhs, rc:$rhs, I32:$cond)>;
+def : Pat<(select (i32 (seteq I32:$cond, 0)), rc:$lhs, rc:$rhs),
+ (!cast<Instruction>("SELECT_"#rc) rc:$rhs, rc:$lhs, I32:$cond)>;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
new file mode 100644
index 000000000000..8cd41d7017a0
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
@@ -0,0 +1,1505 @@
+// WebAssemblyInstrSIMD.td - WebAssembly SIMD codegen support -*- tablegen -*-//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// WebAssembly SIMD operand code-gen constructs.
+///
+//===----------------------------------------------------------------------===//
+
+// Instructions using the SIMD opcode prefix and requiring one of the SIMD
+// feature predicates.
+multiclass ABSTRACT_SIMD_I<dag oops_r, dag iops_r, dag oops_s, dag iops_s,
+ list<dag> pattern_r, string asmstr_r,
+ string asmstr_s, bits<32> simdop,
+ Predicate simd_level> {
+ defm "" : I<oops_r, iops_r, oops_s, iops_s, pattern_r, asmstr_r, asmstr_s,
+ !if(!ge(simdop, 0x100),
+ !or(0xfd0000, !and(0xffff, simdop)),
+ !or(0xfd00, !and(0xff, simdop)))>,
+ Requires<[simd_level]>;
+}
+
+multiclass SIMD_I<dag oops_r, dag iops_r, dag oops_s, dag iops_s,
+ list<dag> pattern_r, string asmstr_r = "",
+ string asmstr_s = "", bits<32> simdop = -1> {
+ defm "" : ABSTRACT_SIMD_I<oops_r, iops_r, oops_s, iops_s, pattern_r, asmstr_r,
+ asmstr_s, simdop, HasSIMD128>;
+}
+
+multiclass RELAXED_I<dag oops_r, dag iops_r, dag oops_s, dag iops_s,
+ list<dag> pattern_r, string asmstr_r = "",
+ string asmstr_s = "", bits<32> simdop = -1> {
+ defm "" : ABSTRACT_SIMD_I<oops_r, iops_r, oops_s, iops_s, pattern_r, asmstr_r,
+ asmstr_s, simdop, HasRelaxedSIMD>;
+}
+
+
+defm "" : ARGUMENT<V128, v16i8>;
+defm "" : ARGUMENT<V128, v8i16>;
+defm "" : ARGUMENT<V128, v4i32>;
+defm "" : ARGUMENT<V128, v2i64>;
+defm "" : ARGUMENT<V128, v4f32>;
+defm "" : ARGUMENT<V128, v2f64>;
+
+// Constrained immediate argument types
+foreach SIZE = [8, 16] in
+def ImmI#SIZE : ImmLeaf<i32,
+ "return -(1 << ("#SIZE#" - 1)) <= Imm && Imm < (1 << ("#SIZE#" - 1));"
+>;
+foreach SIZE = [2, 4, 8, 16, 32] in
+def LaneIdx#SIZE : ImmLeaf<i32, "return 0 <= Imm && Imm < "#SIZE#";">;
+
+class Vec {
+ ValueType vt;
+ ValueType int_vt;
+ ValueType lane_vt;
+ WebAssemblyRegClass lane_rc;
+ int lane_bits;
+ ImmLeaf lane_idx;
+ SDPatternOperator lane_load;
+ PatFrag splat;
+ string prefix;
+ Vec split;
+}
+
+def I8x16 : Vec {
+ let vt = v16i8;
+ let int_vt = vt;
+ let lane_vt = i32;
+ let lane_rc = I32;
+ let lane_bits = 8;
+ let lane_idx = LaneIdx16;
+ let lane_load = extloadi8;
+ let splat = PatFrag<(ops node:$x), (v16i8 (splat_vector (i8 $x)))>;
+ let prefix = "i8x16";
+}
+
+def I16x8 : Vec {
+ let vt = v8i16;
+ let int_vt = vt;
+ let lane_vt = i32;
+ let lane_rc = I32;
+ let lane_bits = 16;
+ let lane_idx = LaneIdx8;
+ let lane_load = extloadi16;
+ let splat = PatFrag<(ops node:$x), (v8i16 (splat_vector (i16 $x)))>;
+ let prefix = "i16x8";
+ let split = I8x16;
+}
+
+def I32x4 : Vec {
+ let vt = v4i32;
+ let int_vt = vt;
+ let lane_vt = i32;
+ let lane_rc = I32;
+ let lane_bits = 32;
+ let lane_idx = LaneIdx4;
+ let lane_load = load;
+ let splat = PatFrag<(ops node:$x), (v4i32 (splat_vector (i32 $x)))>;
+ let prefix = "i32x4";
+ let split = I16x8;
+}
+
+def I64x2 : Vec {
+ let vt = v2i64;
+ let int_vt = vt;
+ let lane_vt = i64;
+ let lane_rc = I64;
+ let lane_bits = 64;
+ let lane_idx = LaneIdx2;
+ let lane_load = load;
+ let splat = PatFrag<(ops node:$x), (v2i64 (splat_vector (i64 $x)))>;
+ let prefix = "i64x2";
+ let split = I32x4;
+}
+
+def F32x4 : Vec {
+ let vt = v4f32;
+ let int_vt = v4i32;
+ let lane_vt = f32;
+ let lane_rc = F32;
+ let lane_bits = 32;
+ let lane_idx = LaneIdx4;
+ let lane_load = load;
+ let splat = PatFrag<(ops node:$x), (v4f32 (splat_vector (f32 $x)))>;
+ let prefix = "f32x4";
+}
+
+def F64x2 : Vec {
+ let vt = v2f64;
+ let int_vt = v2i64;
+ let lane_vt = f64;
+ let lane_rc = F64;
+ let lane_bits = 64;
+ let lane_idx = LaneIdx2;
+ let lane_load = load;
+ let splat = PatFrag<(ops node:$x), (v2f64 (splat_vector (f64 $x)))>;
+ let prefix = "f64x2";
+}
+
+defvar AllVecs = [I8x16, I16x8, I32x4, I64x2, F32x4, F64x2];
+defvar IntVecs = [I8x16, I16x8, I32x4, I64x2];
+
+//===----------------------------------------------------------------------===//
+// Load and store
+//===----------------------------------------------------------------------===//
+
+// Load: v128.load
+let mayLoad = 1, UseNamedOperandTable = 1 in {
+defm LOAD_V128_A32 :
+ SIMD_I<(outs V128:$dst), (ins P2Align:$p2align, offset32_op:$off, I32:$addr),
+ (outs), (ins P2Align:$p2align, offset32_op:$off), [],
+ "v128.load\t$dst, ${off}(${addr})$p2align",
+ "v128.load\t$off$p2align", 0>;
+defm LOAD_V128_A64 :
+ SIMD_I<(outs V128:$dst), (ins P2Align:$p2align, offset64_op:$off, I64:$addr),
+ (outs), (ins P2Align:$p2align, offset64_op:$off), [],
+ "v128.load\t$dst, ${off}(${addr})$p2align",
+ "v128.load\t$off$p2align", 0>;
+}
+
+// Def load patterns from WebAssemblyInstrMemory.td for vector types
+foreach vec = AllVecs in {
+defm : LoadPat<vec.vt, load, "LOAD_V128">;
+}
+
+// v128.loadX_splat
+multiclass SIMDLoadSplat<int size, bits<32> simdop> {
+ let mayLoad = 1, UseNamedOperandTable = 1 in {
+ defm LOAD#size#_SPLAT_A32 :
+ SIMD_I<(outs V128:$dst),
+ (ins P2Align:$p2align, offset32_op:$off, I32:$addr),
+ (outs),
+ (ins P2Align:$p2align, offset32_op:$off), [],
+ "v128.load"#size#"_splat\t$dst, ${off}(${addr})$p2align",
+ "v128.load"#size#"_splat\t$off$p2align", simdop>;
+ defm LOAD#size#_SPLAT_A64 :
+ SIMD_I<(outs V128:$dst),
+ (ins P2Align:$p2align, offset64_op:$off, I64:$addr),
+ (outs),
+ (ins P2Align:$p2align, offset64_op:$off), [],
+ "v128.load"#size#"_splat\t$dst, ${off}(${addr})$p2align",
+ "v128.load"#size#"_splat\t$off$p2align", simdop>;
+ }
+}
+
+defm "" : SIMDLoadSplat<8, 7>;
+defm "" : SIMDLoadSplat<16, 8>;
+defm "" : SIMDLoadSplat<32, 9>;
+defm "" : SIMDLoadSplat<64, 10>;
+
+foreach vec = AllVecs in {
+ defvar inst = "LOAD"#vec.lane_bits#"_SPLAT";
+ defm : LoadPat<vec.vt,
+ PatFrag<(ops node:$addr), (splat_vector (vec.lane_vt (vec.lane_load node:$addr)))>,
+ inst>;
+}
+
+// Load and extend
+multiclass SIMDLoadExtend<Vec vec, string loadPat, bits<32> simdop> {
+ defvar signed = vec.prefix#".load"#loadPat#"_s";
+ defvar unsigned = vec.prefix#".load"#loadPat#"_u";
+ let mayLoad = 1, UseNamedOperandTable = 1 in {
+ defm LOAD_EXTEND_S_#vec#_A32 :
+ SIMD_I<(outs V128:$dst),
+ (ins P2Align:$p2align, offset32_op:$off, I32:$addr),
+ (outs), (ins P2Align:$p2align, offset32_op:$off), [],
+ signed#"\t$dst, ${off}(${addr})$p2align",
+ signed#"\t$off$p2align", simdop>;
+ defm LOAD_EXTEND_U_#vec#_A32 :
+ SIMD_I<(outs V128:$dst),
+ (ins P2Align:$p2align, offset32_op:$off, I32:$addr),
+ (outs), (ins P2Align:$p2align, offset32_op:$off), [],
+ unsigned#"\t$dst, ${off}(${addr})$p2align",
+ unsigned#"\t$off$p2align", !add(simdop, 1)>;
+ defm LOAD_EXTEND_S_#vec#_A64 :
+ SIMD_I<(outs V128:$dst),
+ (ins P2Align:$p2align, offset64_op:$off, I64:$addr),
+ (outs), (ins P2Align:$p2align, offset64_op:$off), [],
+ signed#"\t$dst, ${off}(${addr})$p2align",
+ signed#"\t$off$p2align", simdop>;
+ defm LOAD_EXTEND_U_#vec#_A64 :
+ SIMD_I<(outs V128:$dst),
+ (ins P2Align:$p2align, offset64_op:$off, I64:$addr),
+ (outs), (ins P2Align:$p2align, offset64_op:$off), [],
+ unsigned#"\t$dst, ${off}(${addr})$p2align",
+ unsigned#"\t$off$p2align", !add(simdop, 1)>;
+ }
+}
+
+defm "" : SIMDLoadExtend<I16x8, "8x8", 1>;
+defm "" : SIMDLoadExtend<I32x4, "16x4", 3>;
+defm "" : SIMDLoadExtend<I64x2, "32x2", 5>;
+
+foreach vec = [I16x8, I32x4, I64x2] in
+foreach exts = [["sextloadvi", "_S"],
+ ["zextloadvi", "_U"],
+ ["extloadvi", "_U"]] in {
+defvar loadpat = !cast<PatFrag>(exts[0]#vec.split.lane_bits);
+defvar inst = "LOAD_EXTEND"#exts[1]#"_"#vec;
+defm : LoadPat<vec.vt, loadpat, inst>;
+}
+
+// Load lane into zero vector
+multiclass SIMDLoadZero<Vec vec, bits<32> simdop> {
+ defvar name = "v128.load"#vec.lane_bits#"_zero";
+ let mayLoad = 1, UseNamedOperandTable = 1 in {
+ defm LOAD_ZERO_#vec#_A32 :
+ SIMD_I<(outs V128:$dst),
+ (ins P2Align:$p2align, offset32_op:$off, I32:$addr),
+ (outs), (ins P2Align:$p2align, offset32_op:$off), [],
+ name#"\t$dst, ${off}(${addr})$p2align",
+ name#"\t$off$p2align", simdop>;
+ defm LOAD_ZERO_#vec#_A64 :
+ SIMD_I<(outs V128:$dst),
+ (ins P2Align:$p2align, offset64_op:$off, I64:$addr),
+ (outs), (ins P2Align:$p2align, offset64_op:$off), [],
+ name#"\t$dst, ${off}(${addr})$p2align",
+ name#"\t$off$p2align", simdop>;
+ } // mayLoad = 1, UseNamedOperandTable = 1
+}
+
+defm "" : SIMDLoadZero<I32x4, 0x5c>;
+defm "" : SIMDLoadZero<I64x2, 0x5d>;
+
+// Use load_zero to load scalars into vectors as well where possible.
+// TODO: i16, and i8 scalars
+foreach vec = [I32x4, I64x2] in {
+ defvar inst = "LOAD_ZERO_"#vec;
+ defvar pat = PatFrag<(ops node:$addr), (scalar_to_vector (vec.lane_vt (load $addr)))>;
+ defm : LoadPat<vec.vt, pat, inst>;
+}
+
+// TODO: f32x4 and f64x2 as well
+foreach vec = [I32x4, I64x2] in {
+ defvar inst = "LOAD_ZERO_"#vec;
+ defvar pat = PatFrag<(ops node:$ptr),
+ (vector_insert (vec.splat (vec.lane_vt 0)), (vec.lane_vt (load $ptr)), 0)>;
+ defm : LoadPat<vec.vt, pat, inst>;
+}
+
+// Load lane
+multiclass SIMDLoadLane<Vec vec, bits<32> simdop> {
+ defvar name = "v128.load"#vec.lane_bits#"_lane";
+ let mayLoad = 1, UseNamedOperandTable = 1 in {
+ defm LOAD_LANE_#vec#_A32 :
+ SIMD_I<(outs V128:$dst),
+ (ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx,
+ I32:$addr, V128:$vec),
+ (outs), (ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx),
+ [], name#"\t$dst, ${off}(${addr})$p2align, $vec, $idx",
+ name#"\t$off$p2align, $idx", simdop>;
+ defm LOAD_LANE_#vec#_A64 :
+ SIMD_I<(outs V128:$dst),
+ (ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx,
+ I64:$addr, V128:$vec),
+ (outs), (ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx),
+ [], name#"\t$dst, ${off}(${addr})$p2align, $vec, $idx",
+ name#"\t$off$p2align, $idx", simdop>;
+ } // mayLoad = 1, UseNamedOperandTable = 1
+}
+
+defm "" : SIMDLoadLane<I8x16, 0x54>;
+defm "" : SIMDLoadLane<I16x8, 0x55>;
+defm "" : SIMDLoadLane<I32x4, 0x56>;
+defm "" : SIMDLoadLane<I64x2, 0x57>;
+
+// Select loads with no constant offset.
+multiclass LoadLanePatNoOffset<Vec vec, SDPatternOperator kind> {
+ defvar load_lane_a32 = !cast<NI>("LOAD_LANE_"#vec#"_A32");
+ defvar load_lane_a64 = !cast<NI>("LOAD_LANE_"#vec#"_A64");
+ def : Pat<(vec.vt (kind (i32 I32:$addr),
+ (vec.vt V128:$vec), (i32 vec.lane_idx:$idx))),
+ (load_lane_a32 0, 0, imm:$idx, $addr, $vec)>,
+ Requires<[HasAddr32]>;
+ def : Pat<(vec.vt (kind (i64 I64:$addr),
+ (vec.vt V128:$vec), (i32 vec.lane_idx:$idx))),
+ (load_lane_a64 0, 0, imm:$idx, $addr, $vec)>,
+ Requires<[HasAddr64]>;
+}
+
+def load8_lane :
+ PatFrag<(ops node:$ptr, node:$vec, node:$idx),
+ (vector_insert $vec, (i32 (extloadi8 $ptr)), $idx)>;
+def load16_lane :
+ PatFrag<(ops node:$ptr, node:$vec, node:$idx),
+ (vector_insert $vec, (i32 (extloadi16 $ptr)), $idx)>;
+def load32_lane :
+ PatFrag<(ops node:$ptr, node:$vec, node:$idx),
+ (vector_insert $vec, (i32 (load $ptr)), $idx)>;
+def load64_lane :
+ PatFrag<(ops node:$ptr, node:$vec, node:$idx),
+ (vector_insert $vec, (i64 (load $ptr)), $idx)>;
+// TODO: floating point lanes as well
+
+defm : LoadLanePatNoOffset<I8x16, load8_lane>;
+defm : LoadLanePatNoOffset<I16x8, load16_lane>;
+defm : LoadLanePatNoOffset<I32x4, load32_lane>;
+defm : LoadLanePatNoOffset<I64x2, load64_lane>;
+
+// TODO: Also support the other load patterns for load_lane once the instructions
+// are merged to the proposal.
+
+// Store: v128.store
+let mayStore = 1, UseNamedOperandTable = 1 in {
+defm STORE_V128_A32 :
+ SIMD_I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr, V128:$vec),
+ (outs), (ins P2Align:$p2align, offset32_op:$off), [],
+ "v128.store\t${off}(${addr})$p2align, $vec",
+ "v128.store\t$off$p2align", 11>;
+defm STORE_V128_A64 :
+ SIMD_I<(outs), (ins P2Align:$p2align, offset64_op:$off, I64:$addr, V128:$vec),
+ (outs), (ins P2Align:$p2align, offset64_op:$off), [],
+ "v128.store\t${off}(${addr})$p2align, $vec",
+ "v128.store\t$off$p2align", 11>;
+}
+
+// Def store patterns from WebAssemblyInstrMemory.td for vector types
+foreach vec = AllVecs in {
+defm : StorePat<vec.vt, store, "STORE_V128">;
+}
+
+// Store lane
+multiclass SIMDStoreLane<Vec vec, bits<32> simdop> {
+ defvar name = "v128.store"#vec.lane_bits#"_lane";
+ let mayStore = 1, UseNamedOperandTable = 1 in {
+ defm STORE_LANE_#vec#_A32 :
+ SIMD_I<(outs),
+ (ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx,
+ I32:$addr, V128:$vec),
+ (outs), (ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx),
+ [], name#"\t${off}(${addr})$p2align, $vec, $idx",
+ name#"\t$off$p2align, $idx", simdop>;
+ defm STORE_LANE_#vec#_A64 :
+ SIMD_I<(outs),
+ (ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx,
+ I64:$addr, V128:$vec),
+ (outs), (ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx),
+ [], name#"\t${off}(${addr})$p2align, $vec, $idx",
+ name#"\t$off$p2align, $idx", simdop>;
+ } // mayStore = 1, UseNamedOperandTable = 1
+}
+
+defm "" : SIMDStoreLane<I8x16, 0x58>;
+defm "" : SIMDStoreLane<I16x8, 0x59>;
+defm "" : SIMDStoreLane<I32x4, 0x5a>;
+defm "" : SIMDStoreLane<I64x2, 0x5b>;
+
+multiclass StoreLanePat<Vec vec, SDPatternOperator kind> {
+ def : Pat<(kind (AddrOps32 offset32_op:$offset, I32:$addr),
+ (vec.vt V128:$vec),
+ (i32 vec.lane_idx:$idx)),
+ (!cast<NI>("STORE_LANE_"#vec#"_A32") 0, $offset, imm:$idx, $addr, $vec)>,
+ Requires<[HasAddr32]>;
+ def : Pat<(kind (AddrOps64 offset64_op:$offset, I64:$addr),
+ (vec.vt V128:$vec),
+ (i32 vec.lane_idx:$idx)),
+ (!cast<NI>("STORE_LANE_"#vec#"_A64") 0, $offset, imm:$idx, $addr, $vec)>,
+ Requires<[HasAddr64]>;
+}
+
+def store8_lane :
+ PatFrag<(ops node:$ptr, node:$vec, node:$idx),
+ (truncstorei8 (i32 (vector_extract $vec, $idx)), $ptr)>;
+def store16_lane :
+ PatFrag<(ops node:$ptr, node:$vec, node:$idx),
+ (truncstorei16 (i32 (vector_extract $vec, $idx)), $ptr)>;
+def store32_lane :
+ PatFrag<(ops node:$ptr, node:$vec, node:$idx),
+ (store (i32 (vector_extract $vec, $idx)), $ptr)>;
+def store64_lane :
+ PatFrag<(ops node:$ptr, node:$vec, node:$idx),
+ (store (i64 (vector_extract $vec, $idx)), $ptr)>;
+// TODO: floating point lanes as well
+
+let AddedComplexity = 1 in {
+defm : StoreLanePat<I8x16, store8_lane>;
+defm : StoreLanePat<I16x8, store16_lane>;
+defm : StoreLanePat<I32x4, store32_lane>;
+defm : StoreLanePat<I64x2, store64_lane>;
+}
+
+//===----------------------------------------------------------------------===//
+// Constructing SIMD values
+//===----------------------------------------------------------------------===//
+
+// Constant: v128.const
+multiclass ConstVec<Vec vec, dag ops, dag pat, string args> {
+ let isMoveImm = 1, isReMaterializable = 1 in
+ defm CONST_V128_#vec : SIMD_I<(outs V128:$dst), ops, (outs), ops,
+ [(set V128:$dst, (vec.vt pat))],
+ "v128.const\t$dst, "#args,
+ "v128.const\t"#args, 12>;
+}
+
+defm "" : ConstVec<I8x16,
+ (ins vec_i8imm_op:$i0, vec_i8imm_op:$i1,
+ vec_i8imm_op:$i2, vec_i8imm_op:$i3,
+ vec_i8imm_op:$i4, vec_i8imm_op:$i5,
+ vec_i8imm_op:$i6, vec_i8imm_op:$i7,
+ vec_i8imm_op:$i8, vec_i8imm_op:$i9,
+ vec_i8imm_op:$iA, vec_i8imm_op:$iB,
+ vec_i8imm_op:$iC, vec_i8imm_op:$iD,
+ vec_i8imm_op:$iE, vec_i8imm_op:$iF),
+ (build_vector ImmI8:$i0, ImmI8:$i1, ImmI8:$i2, ImmI8:$i3,
+ ImmI8:$i4, ImmI8:$i5, ImmI8:$i6, ImmI8:$i7,
+ ImmI8:$i8, ImmI8:$i9, ImmI8:$iA, ImmI8:$iB,
+ ImmI8:$iC, ImmI8:$iD, ImmI8:$iE, ImmI8:$iF),
+ !strconcat("$i0, $i1, $i2, $i3, $i4, $i5, $i6, $i7, ",
+ "$i8, $i9, $iA, $iB, $iC, $iD, $iE, $iF")>;
+defm "" : ConstVec<I16x8,
+ (ins vec_i16imm_op:$i0, vec_i16imm_op:$i1,
+ vec_i16imm_op:$i2, vec_i16imm_op:$i3,
+ vec_i16imm_op:$i4, vec_i16imm_op:$i5,
+ vec_i16imm_op:$i6, vec_i16imm_op:$i7),
+ (build_vector
+ ImmI16:$i0, ImmI16:$i1, ImmI16:$i2, ImmI16:$i3,
+ ImmI16:$i4, ImmI16:$i5, ImmI16:$i6, ImmI16:$i7),
+ "$i0, $i1, $i2, $i3, $i4, $i5, $i6, $i7">;
+let IsCanonical = 1 in
+defm "" : ConstVec<I32x4,
+ (ins vec_i32imm_op:$i0, vec_i32imm_op:$i1,
+ vec_i32imm_op:$i2, vec_i32imm_op:$i3),
+ (build_vector (i32 imm:$i0), (i32 imm:$i1),
+ (i32 imm:$i2), (i32 imm:$i3)),
+ "$i0, $i1, $i2, $i3">;
+defm "" : ConstVec<I64x2,
+ (ins vec_i64imm_op:$i0, vec_i64imm_op:$i1),
+ (build_vector (i64 imm:$i0), (i64 imm:$i1)),
+ "$i0, $i1">;
+defm "" : ConstVec<F32x4,
+ (ins f32imm_op:$i0, f32imm_op:$i1,
+ f32imm_op:$i2, f32imm_op:$i3),
+ (build_vector (f32 fpimm:$i0), (f32 fpimm:$i1),
+ (f32 fpimm:$i2), (f32 fpimm:$i3)),
+ "$i0, $i1, $i2, $i3">;
+defm "" : ConstVec<F64x2,
+ (ins f64imm_op:$i0, f64imm_op:$i1),
+ (build_vector (f64 fpimm:$i0), (f64 fpimm:$i1)),
+ "$i0, $i1">;
+
+// Match splat(x) -> const.v128(x, ..., x)
+foreach vec = AllVecs in {
+ defvar numEls = !div(vec.vt.Size, vec.lane_bits);
+ defvar isFloat = !or(!eq(vec.lane_vt, f32), !eq(vec.lane_vt, f64));
+ defvar immKind = !if(isFloat, fpimm, imm);
+ def : Pat<(vec.splat (vec.lane_vt immKind:$x)),
+ !dag(!cast<NI>("CONST_V128_"#vec),
+ !listsplat((vec.lane_vt immKind:$x), numEls),
+ ?)>;
+}
+
+// Shuffle lanes: shuffle
+defm SHUFFLE :
+ SIMD_I<(outs V128:$dst),
+ (ins V128:$x, V128:$y,
+ vec_i8imm_op:$m0, vec_i8imm_op:$m1,
+ vec_i8imm_op:$m2, vec_i8imm_op:$m3,
+ vec_i8imm_op:$m4, vec_i8imm_op:$m5,
+ vec_i8imm_op:$m6, vec_i8imm_op:$m7,
+ vec_i8imm_op:$m8, vec_i8imm_op:$m9,
+ vec_i8imm_op:$mA, vec_i8imm_op:$mB,
+ vec_i8imm_op:$mC, vec_i8imm_op:$mD,
+ vec_i8imm_op:$mE, vec_i8imm_op:$mF),
+ (outs),
+ (ins
+ vec_i8imm_op:$m0, vec_i8imm_op:$m1,
+ vec_i8imm_op:$m2, vec_i8imm_op:$m3,
+ vec_i8imm_op:$m4, vec_i8imm_op:$m5,
+ vec_i8imm_op:$m6, vec_i8imm_op:$m7,
+ vec_i8imm_op:$m8, vec_i8imm_op:$m9,
+ vec_i8imm_op:$mA, vec_i8imm_op:$mB,
+ vec_i8imm_op:$mC, vec_i8imm_op:$mD,
+ vec_i8imm_op:$mE, vec_i8imm_op:$mF),
+ [],
+ "i8x16.shuffle\t$dst, $x, $y, "#
+ "$m0, $m1, $m2, $m3, $m4, $m5, $m6, $m7, "#
+ "$m8, $m9, $mA, $mB, $mC, $mD, $mE, $mF",
+ "i8x16.shuffle\t"#
+ "$m0, $m1, $m2, $m3, $m4, $m5, $m6, $m7, "#
+ "$m8, $m9, $mA, $mB, $mC, $mD, $mE, $mF",
+ 13>;
+
+// Shuffles after custom lowering
+def wasm_shuffle_t : SDTypeProfile<1, 18, []>;
+def wasm_shuffle : SDNode<"WebAssemblyISD::SHUFFLE", wasm_shuffle_t>;
+foreach vec = AllVecs in {
+// The @llvm.wasm.shuffle intrinsic has immediate arguments that become TargetConstants.
+def : Pat<(vec.vt (wasm_shuffle (vec.vt V128:$x), (vec.vt V128:$y),
+ (i32 timm:$m0), (i32 timm:$m1),
+ (i32 timm:$m2), (i32 timm:$m3),
+ (i32 timm:$m4), (i32 timm:$m5),
+ (i32 timm:$m6), (i32 timm:$m7),
+ (i32 timm:$m8), (i32 timm:$m9),
+ (i32 timm:$mA), (i32 timm:$mB),
+ (i32 timm:$mC), (i32 timm:$mD),
+ (i32 timm:$mE), (i32 timm:$mF))),
+ (SHUFFLE $x, $y,
+ imm:$m0, imm:$m1, imm:$m2, imm:$m3,
+ imm:$m4, imm:$m5, imm:$m6, imm:$m7,
+ imm:$m8, imm:$m9, imm:$mA, imm:$mB,
+ imm:$mC, imm:$mD, imm:$mE, imm:$mF)>;
+// Normal shufflevector instructions may have normal constant arguemnts.
+def : Pat<(vec.vt (wasm_shuffle (vec.vt V128:$x), (vec.vt V128:$y),
+ (i32 LaneIdx32:$m0), (i32 LaneIdx32:$m1),
+ (i32 LaneIdx32:$m2), (i32 LaneIdx32:$m3),
+ (i32 LaneIdx32:$m4), (i32 LaneIdx32:$m5),
+ (i32 LaneIdx32:$m6), (i32 LaneIdx32:$m7),
+ (i32 LaneIdx32:$m8), (i32 LaneIdx32:$m9),
+ (i32 LaneIdx32:$mA), (i32 LaneIdx32:$mB),
+ (i32 LaneIdx32:$mC), (i32 LaneIdx32:$mD),
+ (i32 LaneIdx32:$mE), (i32 LaneIdx32:$mF))),
+ (SHUFFLE $x, $y,
+ imm:$m0, imm:$m1, imm:$m2, imm:$m3,
+ imm:$m4, imm:$m5, imm:$m6, imm:$m7,
+ imm:$m8, imm:$m9, imm:$mA, imm:$mB,
+ imm:$mC, imm:$mD, imm:$mE, imm:$mF)>;
+}
+
+// Swizzle lanes: i8x16.swizzle
+def wasm_swizzle_t : SDTypeProfile<1, 2, []>;
+def wasm_swizzle : SDNode<"WebAssemblyISD::SWIZZLE", wasm_swizzle_t>;
+defm SWIZZLE :
+ SIMD_I<(outs V128:$dst), (ins V128:$src, V128:$mask), (outs), (ins),
+ [(set (v16i8 V128:$dst),
+ (wasm_swizzle (v16i8 V128:$src), (v16i8 V128:$mask)))],
+ "i8x16.swizzle\t$dst, $src, $mask", "i8x16.swizzle", 14>;
+
+def : Pat<(int_wasm_swizzle (v16i8 V128:$src), (v16i8 V128:$mask)),
+ (SWIZZLE $src, $mask)>;
+
+multiclass Splat<Vec vec, bits<32> simdop> {
+ defm SPLAT_#vec : SIMD_I<(outs V128:$dst), (ins vec.lane_rc:$x),
+ (outs), (ins),
+ [(set (vec.vt V128:$dst),
+ (vec.splat vec.lane_rc:$x))],
+ vec.prefix#".splat\t$dst, $x", vec.prefix#".splat",
+ simdop>;
+}
+
+defm "" : Splat<I8x16, 15>;
+defm "" : Splat<I16x8, 16>;
+defm "" : Splat<I32x4, 17>;
+defm "" : Splat<I64x2, 18>;
+defm "" : Splat<F32x4, 19>;
+defm "" : Splat<F64x2, 20>;
+
+// scalar_to_vector leaves high lanes undefined, so can be a splat
+foreach vec = AllVecs in
+def : Pat<(vec.vt (scalar_to_vector (vec.lane_vt vec.lane_rc:$x))),
+ (!cast<Instruction>("SPLAT_"#vec) $x)>;
+
+//===----------------------------------------------------------------------===//
+// Accessing lanes
+//===----------------------------------------------------------------------===//
+
+// Extract lane as a scalar: extract_lane / extract_lane_s / extract_lane_u
+multiclass ExtractLane<Vec vec, bits<32> simdop, string suffix = ""> {
+ defm EXTRACT_LANE_#vec#suffix :
+ SIMD_I<(outs vec.lane_rc:$dst), (ins V128:$vec, vec_i8imm_op:$idx),
+ (outs), (ins vec_i8imm_op:$idx), [],
+ vec.prefix#".extract_lane"#suffix#"\t$dst, $vec, $idx",
+ vec.prefix#".extract_lane"#suffix#"\t$idx", simdop>;
+}
+
+defm "" : ExtractLane<I8x16, 21, "_s">;
+defm "" : ExtractLane<I8x16, 22, "_u">;
+defm "" : ExtractLane<I16x8, 24, "_s">;
+defm "" : ExtractLane<I16x8, 25, "_u">;
+defm "" : ExtractLane<I32x4, 27>;
+defm "" : ExtractLane<I64x2, 29>;
+defm "" : ExtractLane<F32x4, 31>;
+defm "" : ExtractLane<F64x2, 33>;
+
+def : Pat<(vector_extract (v16i8 V128:$vec), (i32 LaneIdx16:$idx)),
+ (EXTRACT_LANE_I8x16_u $vec, imm:$idx)>;
+def : Pat<(vector_extract (v8i16 V128:$vec), (i32 LaneIdx8:$idx)),
+ (EXTRACT_LANE_I16x8_u $vec, imm:$idx)>;
+def : Pat<(vector_extract (v4i32 V128:$vec), (i32 LaneIdx4:$idx)),
+ (EXTRACT_LANE_I32x4 $vec, imm:$idx)>;
+def : Pat<(vector_extract (v4f32 V128:$vec), (i32 LaneIdx4:$idx)),
+ (EXTRACT_LANE_F32x4 $vec, imm:$idx)>;
+def : Pat<(vector_extract (v2i64 V128:$vec), (i32 LaneIdx2:$idx)),
+ (EXTRACT_LANE_I64x2 $vec, imm:$idx)>;
+def : Pat<(vector_extract (v2f64 V128:$vec), (i32 LaneIdx2:$idx)),
+ (EXTRACT_LANE_F64x2 $vec, imm:$idx)>;
+
+def : Pat<
+ (sext_inreg (vector_extract (v16i8 V128:$vec), (i32 LaneIdx16:$idx)), i8),
+ (EXTRACT_LANE_I8x16_s $vec, imm:$idx)>;
+def : Pat<
+ (and (vector_extract (v16i8 V128:$vec), (i32 LaneIdx16:$idx)), (i32 0xff)),
+ (EXTRACT_LANE_I8x16_u $vec, imm:$idx)>;
+def : Pat<
+ (sext_inreg (vector_extract (v8i16 V128:$vec), (i32 LaneIdx8:$idx)), i16),
+ (EXTRACT_LANE_I16x8_s $vec, imm:$idx)>;
+def : Pat<
+ (and (vector_extract (v8i16 V128:$vec), (i32 LaneIdx8:$idx)), (i32 0xffff)),
+ (EXTRACT_LANE_I16x8_u $vec, imm:$idx)>;
+
+// Replace lane value: replace_lane
+multiclass ReplaceLane<Vec vec, bits<32> simdop> {
+ defm REPLACE_LANE_#vec :
+ SIMD_I<(outs V128:$dst), (ins V128:$vec, vec_i8imm_op:$idx, vec.lane_rc:$x),
+ (outs), (ins vec_i8imm_op:$idx),
+ [(set V128:$dst, (vector_insert
+ (vec.vt V128:$vec),
+ (vec.lane_vt vec.lane_rc:$x),
+ (i32 vec.lane_idx:$idx)))],
+ vec.prefix#".replace_lane\t$dst, $vec, $idx, $x",
+ vec.prefix#".replace_lane\t$idx", simdop>;
+}
+
+defm "" : ReplaceLane<I8x16, 23>;
+defm "" : ReplaceLane<I16x8, 26>;
+defm "" : ReplaceLane<I32x4, 28>;
+defm "" : ReplaceLane<I64x2, 30>;
+defm "" : ReplaceLane<F32x4, 32>;
+defm "" : ReplaceLane<F64x2, 34>;
+
+// Lower undef lane indices to zero
+def : Pat<(vector_insert (v16i8 V128:$vec), I32:$x, undef),
+ (REPLACE_LANE_I8x16 $vec, 0, $x)>;
+def : Pat<(vector_insert (v8i16 V128:$vec), I32:$x, undef),
+ (REPLACE_LANE_I16x8 $vec, 0, $x)>;
+def : Pat<(vector_insert (v4i32 V128:$vec), I32:$x, undef),
+ (REPLACE_LANE_I32x4 $vec, 0, $x)>;
+def : Pat<(vector_insert (v2i64 V128:$vec), I64:$x, undef),
+ (REPLACE_LANE_I64x2 $vec, 0, $x)>;
+def : Pat<(vector_insert (v4f32 V128:$vec), F32:$x, undef),
+ (REPLACE_LANE_F32x4 $vec, 0, $x)>;
+def : Pat<(vector_insert (v2f64 V128:$vec), F64:$x, undef),
+ (REPLACE_LANE_F64x2 $vec, 0, $x)>;
+
+//===----------------------------------------------------------------------===//
+// Comparisons
+//===----------------------------------------------------------------------===//
+
+multiclass SIMDCondition<Vec vec, string name, CondCode cond, bits<32> simdop> {
+ defm _#vec :
+ SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs), (outs), (ins),
+ [(set (vec.int_vt V128:$dst),
+ (setcc (vec.vt V128:$lhs), (vec.vt V128:$rhs), cond))],
+ vec.prefix#"."#name#"\t$dst, $lhs, $rhs",
+ vec.prefix#"."#name, simdop>;
+}
+
+multiclass SIMDConditionInt<string name, CondCode cond, bits<32> baseInst> {
+ defm "" : SIMDCondition<I8x16, name, cond, baseInst>;
+ defm "" : SIMDCondition<I16x8, name, cond, !add(baseInst, 10)>;
+ defm "" : SIMDCondition<I32x4, name, cond, !add(baseInst, 20)>;
+}
+
+multiclass SIMDConditionFP<string name, CondCode cond, bits<32> baseInst> {
+ defm "" : SIMDCondition<F32x4, name, cond, baseInst>;
+ defm "" : SIMDCondition<F64x2, name, cond, !add(baseInst, 6)>;
+}
+
+// Equality: eq
+let isCommutable = 1 in {
+defm EQ : SIMDConditionInt<"eq", SETEQ, 35>;
+defm EQ : SIMDCondition<I64x2, "eq", SETEQ, 214>;
+defm EQ : SIMDConditionFP<"eq", SETOEQ, 65>;
+} // isCommutable = 1
+
+// Non-equality: ne
+let isCommutable = 1 in {
+defm NE : SIMDConditionInt<"ne", SETNE, 36>;
+defm NE : SIMDCondition<I64x2, "ne", SETNE, 215>;
+defm NE : SIMDConditionFP<"ne", SETUNE, 66>;
+} // isCommutable = 1
+
+// Less than: lt_s / lt_u / lt
+defm LT_S : SIMDConditionInt<"lt_s", SETLT, 37>;
+defm LT_S : SIMDCondition<I64x2, "lt_s", SETLT, 216>;
+defm LT_U : SIMDConditionInt<"lt_u", SETULT, 38>;
+defm LT : SIMDConditionFP<"lt", SETOLT, 67>;
+
+// Greater than: gt_s / gt_u / gt
+defm GT_S : SIMDConditionInt<"gt_s", SETGT, 39>;
+defm GT_S : SIMDCondition<I64x2, "gt_s", SETGT, 217>;
+defm GT_U : SIMDConditionInt<"gt_u", SETUGT, 40>;
+defm GT : SIMDConditionFP<"gt", SETOGT, 68>;
+
+// Less than or equal: le_s / le_u / le
+defm LE_S : SIMDConditionInt<"le_s", SETLE, 41>;
+defm LE_S : SIMDCondition<I64x2, "le_s", SETLE, 218>;
+defm LE_U : SIMDConditionInt<"le_u", SETULE, 42>;
+defm LE : SIMDConditionFP<"le", SETOLE, 69>;
+
+// Greater than or equal: ge_s / ge_u / ge
+defm GE_S : SIMDConditionInt<"ge_s", SETGE, 43>;
+defm GE_S : SIMDCondition<I64x2, "ge_s", SETGE, 219>;
+defm GE_U : SIMDConditionInt<"ge_u", SETUGE, 44>;
+defm GE : SIMDConditionFP<"ge", SETOGE, 70>;
+
+// Lower float comparisons that don't care about NaN to standard WebAssembly
+// float comparisons. These instructions are generated with nnan and in the
+// target-independent expansion of unordered comparisons and ordered ne.
+foreach nodes = [[seteq, EQ_F32x4], [setne, NE_F32x4], [setlt, LT_F32x4],
+ [setgt, GT_F32x4], [setle, LE_F32x4], [setge, GE_F32x4]] in
+def : Pat<(v4i32 (nodes[0] (v4f32 V128:$lhs), (v4f32 V128:$rhs))),
+ (nodes[1] $lhs, $rhs)>;
+
+foreach nodes = [[seteq, EQ_F64x2], [setne, NE_F64x2], [setlt, LT_F64x2],
+ [setgt, GT_F64x2], [setle, LE_F64x2], [setge, GE_F64x2]] in
+def : Pat<(v2i64 (nodes[0] (v2f64 V128:$lhs), (v2f64 V128:$rhs))),
+ (nodes[1] $lhs, $rhs)>;
+
+//===----------------------------------------------------------------------===//
+// Bitwise operations
+//===----------------------------------------------------------------------===//
+
+multiclass SIMDBinary<Vec vec, SDPatternOperator node, string name, bits<32> simdop> {
+ defm _#vec : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs),
+ (outs), (ins),
+ [(set (vec.vt V128:$dst),
+ (node (vec.vt V128:$lhs), (vec.vt V128:$rhs)))],
+ vec.prefix#"."#name#"\t$dst, $lhs, $rhs",
+ vec.prefix#"."#name, simdop>;
+}
+
+multiclass SIMDBitwise<SDPatternOperator node, string name, bits<32> simdop,
+ bit commutable = false> {
+ let isCommutable = commutable in
+ defm "" : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs),
+ (outs), (ins), [],
+ "v128."#name#"\t$dst, $lhs, $rhs", "v128."#name, simdop>;
+ foreach vec = IntVecs in
+ def : Pat<(node (vec.vt V128:$lhs), (vec.vt V128:$rhs)),
+ (!cast<NI>(NAME) $lhs, $rhs)>;
+}
+
+multiclass SIMDUnary<Vec vec, SDPatternOperator node, string name, bits<32> simdop> {
+ defm _#vec : SIMD_I<(outs V128:$dst), (ins V128:$v), (outs), (ins),
+ [(set (vec.vt V128:$dst),
+ (vec.vt (node (vec.vt V128:$v))))],
+ vec.prefix#"."#name#"\t$dst, $v",
+ vec.prefix#"."#name, simdop>;
+}
+
+// Bitwise logic: v128.not
+defm NOT : SIMD_I<(outs V128:$dst), (ins V128:$v), (outs), (ins), [],
+ "v128.not\t$dst, $v", "v128.not", 77>;
+foreach vec = IntVecs in
+def : Pat<(vnot (vec.vt V128:$v)), (NOT $v)>;
+
+// Bitwise logic: v128.and / v128.or / v128.xor
+defm AND : SIMDBitwise<and, "and", 78, true>;
+defm OR : SIMDBitwise<or, "or", 80, true>;
+defm XOR : SIMDBitwise<xor, "xor", 81, true>;
+
+// Bitwise logic: v128.andnot
+def andnot : PatFrag<(ops node:$left, node:$right), (and $left, (vnot $right))>;
+defm ANDNOT : SIMDBitwise<andnot, "andnot", 79>;
+
+// Bitwise select: v128.bitselect
+defm BITSELECT :
+ SIMD_I<(outs V128:$dst), (ins V128:$v1, V128:$v2, V128:$c), (outs), (ins), [],
+ "v128.bitselect\t$dst, $v1, $v2, $c", "v128.bitselect", 82>;
+
+foreach vec = AllVecs in
+def : Pat<(vec.vt (int_wasm_bitselect
+ (vec.vt V128:$v1), (vec.vt V128:$v2), (vec.vt V128:$c))),
+ (BITSELECT $v1, $v2, $c)>;
+
+// Bitselect is equivalent to (c & v1) | (~c & v2)
+foreach vec = IntVecs in
+def : Pat<(vec.vt (or (and (vec.vt V128:$c), (vec.vt V128:$v1)),
+ (and (vnot V128:$c), (vec.vt V128:$v2)))),
+ (BITSELECT $v1, $v2, $c)>;
+
+// Bitselect is also equivalent to ((v1 ^ v2) & c) ^ v2
+foreach vec = IntVecs in
+def : Pat<(vec.vt (xor (and (xor (vec.vt V128:$v1), (vec.vt V128:$v2)),
+ (vec.vt V128:$c)),
+ (vec.vt V128:$v2))),
+ (BITSELECT $v1, $v2, $c)>;
+
+// Same pattern with `c` negated so `a` and `b` get swapped.
+foreach vec = IntVecs in
+def : Pat<(vec.vt (xor (and (xor (vec.vt V128:$v1), (vec.vt V128:$v2)),
+ (vnot (vec.vt V128:$c))),
+ (vec.vt V128:$v2))),
+ (BITSELECT $v2, $v1, $c)>;
+
+// Also implement vselect in terms of bitselect
+foreach vec = AllVecs in
+def : Pat<(vec.vt (vselect
+ (vec.int_vt V128:$c), (vec.vt V128:$v1), (vec.vt V128:$v2))),
+ (BITSELECT $v1, $v2, $c)>;
+
+// MVP select on v128 values
+defm SELECT_V128 :
+ I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs, I32:$cond), (outs), (ins), [],
+ "v128.select\t$dst, $lhs, $rhs, $cond", "v128.select", 0x1b>;
+
+foreach vec = AllVecs in {
+def : Pat<(select I32:$cond, (vec.vt V128:$lhs), (vec.vt V128:$rhs)),
+ (SELECT_V128 $lhs, $rhs, $cond)>;
+
+// ISD::SELECT requires its operand to conform to getBooleanContents, but
+// WebAssembly's select interprets any non-zero value as true, so we can fold
+// a setne with 0 into a select.
+def : Pat<(select
+ (i32 (setne I32:$cond, 0)), (vec.vt V128:$lhs), (vec.vt V128:$rhs)),
+ (SELECT_V128 $lhs, $rhs, $cond)>;
+
+// And again, this time with seteq instead of setne and the arms reversed.
+def : Pat<(select
+ (i32 (seteq I32:$cond, 0)), (vec.vt V128:$lhs), (vec.vt V128:$rhs)),
+ (SELECT_V128 $rhs, $lhs, $cond)>;
+} // foreach vec
+
+//===----------------------------------------------------------------------===//
+// Integer unary arithmetic
+//===----------------------------------------------------------------------===//
+
+multiclass SIMDUnaryInt<SDPatternOperator node, string name, bits<32> baseInst> {
+ defm "" : SIMDUnary<I8x16, node, name, baseInst>;
+ defm "" : SIMDUnary<I16x8, node, name, !add(baseInst, 32)>;
+ defm "" : SIMDUnary<I32x4, node, name, !add(baseInst, 64)>;
+ defm "" : SIMDUnary<I64x2, node, name, !add(baseInst, 96)>;
+}
+
+// Integer vector negation
+def ivneg : PatFrag<(ops node:$in), (sub immAllZerosV, $in)>;
+
+// Integer absolute value: abs
+defm ABS : SIMDUnaryInt<abs, "abs", 96>;
+
+// Integer negation: neg
+defm NEG : SIMDUnaryInt<ivneg, "neg", 97>;
+
+// Population count: popcnt
+defm POPCNT : SIMDUnary<I8x16, ctpop, "popcnt", 0x62>;
+
+// Any lane true: any_true
+defm ANYTRUE : SIMD_I<(outs I32:$dst), (ins V128:$vec), (outs), (ins), [],
+ "v128.any_true\t$dst, $vec", "v128.any_true", 0x53>;
+
+foreach vec = IntVecs in
+def : Pat<(int_wasm_anytrue (vec.vt V128:$vec)), (ANYTRUE V128:$vec)>;
+
+// All lanes true: all_true
+multiclass SIMDAllTrue<Vec vec, bits<32> simdop> {
+ defm ALLTRUE_#vec : SIMD_I<(outs I32:$dst), (ins V128:$vec), (outs), (ins),
+ [(set I32:$dst,
+ (i32 (int_wasm_alltrue (vec.vt V128:$vec))))],
+ vec.prefix#".all_true\t$dst, $vec",
+ vec.prefix#".all_true", simdop>;
+}
+
+defm "" : SIMDAllTrue<I8x16, 0x63>;
+defm "" : SIMDAllTrue<I16x8, 0x83>;
+defm "" : SIMDAllTrue<I32x4, 0xa3>;
+defm "" : SIMDAllTrue<I64x2, 0xc3>;
+
+// Reductions already return 0 or 1, so and 1, setne 0, and seteq 1
+// can be folded out
+foreach reduction =
+ [["int_wasm_anytrue", "ANYTRUE", "I8x16"],
+ ["int_wasm_anytrue", "ANYTRUE", "I16x8"],
+ ["int_wasm_anytrue", "ANYTRUE", "I32x4"],
+ ["int_wasm_anytrue", "ANYTRUE", "I64x2"],
+ ["int_wasm_alltrue", "ALLTRUE_I8x16", "I8x16"],
+ ["int_wasm_alltrue", "ALLTRUE_I16x8", "I16x8"],
+ ["int_wasm_alltrue", "ALLTRUE_I32x4", "I32x4"],
+ ["int_wasm_alltrue", "ALLTRUE_I64x2", "I64x2"]] in {
+defvar intrinsic = !cast<Intrinsic>(reduction[0]);
+defvar inst = !cast<NI>(reduction[1]);
+defvar vec = !cast<Vec>(reduction[2]);
+def : Pat<(i32 (and (i32 (intrinsic (vec.vt V128:$x))), (i32 1))), (inst $x)>;
+def : Pat<(i32 (setne (i32 (intrinsic (vec.vt V128:$x))), (i32 0))), (inst $x)>;
+def : Pat<(i32 (seteq (i32 (intrinsic (vec.vt V128:$x))), (i32 1))), (inst $x)>;
+}
+
+multiclass SIMDBitmask<Vec vec, bits<32> simdop> {
+ defm _#vec : SIMD_I<(outs I32:$dst), (ins V128:$vec), (outs), (ins),
+ [(set I32:$dst,
+ (i32 (int_wasm_bitmask (vec.vt V128:$vec))))],
+ vec.prefix#".bitmask\t$dst, $vec", vec.prefix#".bitmask",
+ simdop>;
+}
+
+defm BITMASK : SIMDBitmask<I8x16, 100>;
+defm BITMASK : SIMDBitmask<I16x8, 132>;
+defm BITMASK : SIMDBitmask<I32x4, 164>;
+defm BITMASK : SIMDBitmask<I64x2, 196>;
+
+//===----------------------------------------------------------------------===//
+// Bit shifts
+//===----------------------------------------------------------------------===//
+
+multiclass SIMDShift<Vec vec, SDNode node, string name, bits<32> simdop> {
+ defm _#vec : SIMD_I<(outs V128:$dst), (ins V128:$vec, I32:$x), (outs), (ins),
+ [(set (vec.vt V128:$dst), (node V128:$vec, I32:$x))],
+ vec.prefix#"."#name#"\t$dst, $vec, $x",
+ vec.prefix#"."#name, simdop>;
+}
+
+multiclass SIMDShiftInt<SDNode node, string name, bits<32> baseInst> {
+ defm "" : SIMDShift<I8x16, node, name, baseInst>;
+ defm "" : SIMDShift<I16x8, node, name, !add(baseInst, 32)>;
+ defm "" : SIMDShift<I32x4, node, name, !add(baseInst, 64)>;
+ defm "" : SIMDShift<I64x2, node, name, !add(baseInst, 96)>;
+}
+
+// WebAssembly SIMD shifts are nonstandard in that the shift amount is
+// an i32 rather than a vector, so they need custom nodes.
+def wasm_shift_t :
+ SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisVT<2, i32>]>;
+def wasm_shl : SDNode<"WebAssemblyISD::VEC_SHL", wasm_shift_t>;
+def wasm_shr_s : SDNode<"WebAssemblyISD::VEC_SHR_S", wasm_shift_t>;
+def wasm_shr_u : SDNode<"WebAssemblyISD::VEC_SHR_U", wasm_shift_t>;
+
+// Left shift by scalar: shl
+defm SHL : SIMDShiftInt<wasm_shl, "shl", 107>;
+
+// Right shift by scalar: shr_s / shr_u
+defm SHR_S : SIMDShiftInt<wasm_shr_s, "shr_s", 108>;
+defm SHR_U : SIMDShiftInt<wasm_shr_u, "shr_u", 109>;
+
+// Optimize away an explicit mask on a shift count.
+def : Pat<(wasm_shl (v16i8 V128:$lhs), (and I32:$rhs, 7)),
+ (SHL_I8x16 V128:$lhs, I32:$rhs)>;
+def : Pat<(wasm_shr_s (v16i8 V128:$lhs), (and I32:$rhs, 7)),
+ (SHR_S_I8x16 V128:$lhs, I32:$rhs)>;
+def : Pat<(wasm_shr_u (v16i8 V128:$lhs), (and I32:$rhs, 7)),
+ (SHR_U_I8x16 V128:$lhs, I32:$rhs)>;
+
+def : Pat<(wasm_shl (v8i16 V128:$lhs), (and I32:$rhs, 15)),
+ (SHL_I16x8 V128:$lhs, I32:$rhs)>;
+def : Pat<(wasm_shr_s (v8i16 V128:$lhs), (and I32:$rhs, 15)),
+ (SHR_S_I16x8 V128:$lhs, I32:$rhs)>;
+def : Pat<(wasm_shr_u (v8i16 V128:$lhs), (and I32:$rhs, 15)),
+ (SHR_U_I16x8 V128:$lhs, I32:$rhs)>;
+
+def : Pat<(wasm_shl (v4i32 V128:$lhs), (and I32:$rhs, 31)),
+ (SHL_I32x4 V128:$lhs, I32:$rhs)>;
+def : Pat<(wasm_shr_s (v4i32 V128:$lhs), (and I32:$rhs, 31)),
+ (SHR_S_I32x4 V128:$lhs, I32:$rhs)>;
+def : Pat<(wasm_shr_u (v4i32 V128:$lhs), (and I32:$rhs, 31)),
+ (SHR_U_I32x4 V128:$lhs, I32:$rhs)>;
+
+def : Pat<(wasm_shl (v2i64 V128:$lhs), (and I32:$rhs, 63)),
+ (SHL_I64x2 V128:$lhs, I32:$rhs)>;
+def : Pat<(wasm_shr_s (v2i64 V128:$lhs), (and I32:$rhs, 63)),
+ (SHR_S_I64x2 V128:$lhs, I32:$rhs)>;
+def : Pat<(wasm_shr_u (v2i64 V128:$lhs), (and I32:$rhs, 63)),
+ (SHR_U_I64x2 V128:$lhs, I32:$rhs)>;
+def : Pat<(wasm_shl (v2i64 V128:$lhs), (trunc (and I64:$rhs, 63))),
+ (SHL_I64x2 V128:$lhs, (I32_WRAP_I64 I64:$rhs))>;
+def : Pat<(wasm_shr_s (v2i64 V128:$lhs), (trunc (and I64:$rhs, 63))),
+ (SHR_S_I64x2 V128:$lhs, (I32_WRAP_I64 I64:$rhs))>;
+def : Pat<(wasm_shr_u (v2i64 V128:$lhs), (trunc (and I64:$rhs, 63))),
+ (SHR_U_I64x2 V128:$lhs, (I32_WRAP_I64 I64:$rhs))>;
+
+//===----------------------------------------------------------------------===//
+// Integer binary arithmetic
+//===----------------------------------------------------------------------===//
+
+multiclass SIMDBinaryIntNoI8x16<SDPatternOperator node, string name, bits<32> baseInst> {
+ defm "" : SIMDBinary<I16x8, node, name, !add(baseInst, 32)>;
+ defm "" : SIMDBinary<I32x4, node, name, !add(baseInst, 64)>;
+ defm "" : SIMDBinary<I64x2, node, name, !add(baseInst, 96)>;
+}
+
+multiclass SIMDBinaryIntSmall<SDPatternOperator node, string name, bits<32> baseInst> {
+ defm "" : SIMDBinary<I8x16, node, name, baseInst>;
+ defm "" : SIMDBinary<I16x8, node, name, !add(baseInst, 32)>;
+}
+
+multiclass SIMDBinaryIntNoI64x2<SDPatternOperator node, string name, bits<32> baseInst> {
+ defm "" : SIMDBinaryIntSmall<node, name, baseInst>;
+ defm "" : SIMDBinary<I32x4, node, name, !add(baseInst, 64)>;
+}
+
+multiclass SIMDBinaryInt<SDPatternOperator node, string name, bits<32> baseInst> {
+ defm "" : SIMDBinaryIntNoI64x2<node, name, baseInst>;
+ defm "" : SIMDBinary<I64x2, node, name, !add(baseInst, 96)>;
+}
+
+// Integer addition: add / add_sat_s / add_sat_u
+let isCommutable = 1 in {
+defm ADD : SIMDBinaryInt<add, "add", 110>;
+defm ADD_SAT_S : SIMDBinaryIntSmall<saddsat, "add_sat_s", 111>;
+defm ADD_SAT_U : SIMDBinaryIntSmall<uaddsat, "add_sat_u", 112>;
+} // isCommutable = 1
+
+// Integer subtraction: sub / sub_sat_s / sub_sat_u
+defm SUB : SIMDBinaryInt<sub, "sub", 113>;
+defm SUB_SAT_S :
+ SIMDBinaryIntSmall<int_wasm_sub_sat_signed, "sub_sat_s", 114>;
+defm SUB_SAT_U :
+ SIMDBinaryIntSmall<int_wasm_sub_sat_unsigned, "sub_sat_u", 115>;
+
+// Integer multiplication: mul
+let isCommutable = 1 in
+defm MUL : SIMDBinaryIntNoI8x16<mul, "mul", 117>;
+
+// Integer min_s / min_u / max_s / max_u
+let isCommutable = 1 in {
+defm MIN_S : SIMDBinaryIntNoI64x2<smin, "min_s", 118>;
+defm MIN_U : SIMDBinaryIntNoI64x2<umin, "min_u", 119>;
+defm MAX_S : SIMDBinaryIntNoI64x2<smax, "max_s", 120>;
+defm MAX_U : SIMDBinaryIntNoI64x2<umax, "max_u", 121>;
+} // isCommutable = 1
+
+// Integer unsigned rounding average: avgr_u
+let isCommutable = 1 in {
+defm AVGR_U : SIMDBinary<I8x16, int_wasm_avgr_unsigned, "avgr_u", 123>;
+defm AVGR_U : SIMDBinary<I16x8, int_wasm_avgr_unsigned, "avgr_u", 155>;
+}
+
+def add_nuw : PatFrag<(ops node:$lhs, node:$rhs), (add $lhs, $rhs),
+ "return N->getFlags().hasNoUnsignedWrap();">;
+
+foreach vec = [I8x16, I16x8] in {
+defvar inst = !cast<NI>("AVGR_U_"#vec);
+def : Pat<(wasm_shr_u
+ (add_nuw
+ (add_nuw (vec.vt V128:$lhs), (vec.vt V128:$rhs)),
+ (vec.splat (i32 1))),
+ (i32 1)),
+ (inst $lhs, $rhs)>;
+}
+
+// Widening dot product: i32x4.dot_i16x8_s
+let isCommutable = 1 in
+defm DOT : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs), (outs), (ins),
+ [(set V128:$dst, (int_wasm_dot V128:$lhs, V128:$rhs))],
+ "i32x4.dot_i16x8_s\t$dst, $lhs, $rhs", "i32x4.dot_i16x8_s",
+ 186>;
+
+// Extending multiplication: extmul_{low,high}_P, extmul_high
+def extend_t : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>]>;
+def extend_low_s : SDNode<"WebAssemblyISD::EXTEND_LOW_S", extend_t>;
+def extend_high_s : SDNode<"WebAssemblyISD::EXTEND_HIGH_S", extend_t>;
+def extend_low_u : SDNode<"WebAssemblyISD::EXTEND_LOW_U", extend_t>;
+def extend_high_u : SDNode<"WebAssemblyISD::EXTEND_HIGH_U", extend_t>;
+
+multiclass SIMDExtBinary<Vec vec, SDPatternOperator node, string name,
+ bits<32> simdop> {
+ defm _#vec : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs),
+ (outs), (ins),
+ [(set (vec.vt V128:$dst), (node
+ (vec.split.vt V128:$lhs),(vec.split.vt V128:$rhs)))],
+ vec.prefix#"."#name#"\t$dst, $lhs, $rhs",
+ vec.prefix#"."#name, simdop>;
+}
+
+class ExtMulPat<SDNode extend> :
+ PatFrag<(ops node:$lhs, node:$rhs),
+ (mul (extend $lhs), (extend $rhs))> {}
+
+def extmul_low_s : ExtMulPat<extend_low_s>;
+def extmul_high_s : ExtMulPat<extend_high_s>;
+def extmul_low_u : ExtMulPat<extend_low_u>;
+def extmul_high_u : ExtMulPat<extend_high_u>;
+
+defm EXTMUL_LOW_S :
+ SIMDExtBinary<I16x8, extmul_low_s, "extmul_low_i8x16_s", 0x9c>;
+defm EXTMUL_HIGH_S :
+ SIMDExtBinary<I16x8, extmul_high_s, "extmul_high_i8x16_s", 0x9d>;
+defm EXTMUL_LOW_U :
+ SIMDExtBinary<I16x8, extmul_low_u, "extmul_low_i8x16_u", 0x9e>;
+defm EXTMUL_HIGH_U :
+ SIMDExtBinary<I16x8, extmul_high_u, "extmul_high_i8x16_u", 0x9f>;
+
+defm EXTMUL_LOW_S :
+ SIMDExtBinary<I32x4, extmul_low_s, "extmul_low_i16x8_s", 0xbc>;
+defm EXTMUL_HIGH_S :
+ SIMDExtBinary<I32x4, extmul_high_s, "extmul_high_i16x8_s", 0xbd>;
+defm EXTMUL_LOW_U :
+ SIMDExtBinary<I32x4, extmul_low_u, "extmul_low_i16x8_u", 0xbe>;
+defm EXTMUL_HIGH_U :
+ SIMDExtBinary<I32x4, extmul_high_u, "extmul_high_i16x8_u", 0xbf>;
+
+defm EXTMUL_LOW_S :
+ SIMDExtBinary<I64x2, extmul_low_s, "extmul_low_i32x4_s", 0xdc>;
+defm EXTMUL_HIGH_S :
+ SIMDExtBinary<I64x2, extmul_high_s, "extmul_high_i32x4_s", 0xdd>;
+defm EXTMUL_LOW_U :
+ SIMDExtBinary<I64x2, extmul_low_u, "extmul_low_i32x4_u", 0xde>;
+defm EXTMUL_HIGH_U :
+ SIMDExtBinary<I64x2, extmul_high_u, "extmul_high_i32x4_u", 0xdf>;
+
+//===----------------------------------------------------------------------===//
+// Floating-point unary arithmetic
+//===----------------------------------------------------------------------===//
+
+multiclass SIMDUnaryFP<SDNode node, string name, bits<32> baseInst> {
+ defm "" : SIMDUnary<F32x4, node, name, baseInst>;
+ defm "" : SIMDUnary<F64x2, node, name, !add(baseInst, 12)>;
+}
+
+// Absolute value: abs
+defm ABS : SIMDUnaryFP<fabs, "abs", 224>;
+
+// Negation: neg
+defm NEG : SIMDUnaryFP<fneg, "neg", 225>;
+
+// Square root: sqrt
+defm SQRT : SIMDUnaryFP<fsqrt, "sqrt", 227>;
+
+// Rounding: ceil, floor, trunc, nearest
+defm CEIL : SIMDUnary<F32x4, fceil, "ceil", 0x67>;
+defm FLOOR : SIMDUnary<F32x4, ffloor, "floor", 0x68>;
+defm TRUNC: SIMDUnary<F32x4, ftrunc, "trunc", 0x69>;
+defm NEAREST: SIMDUnary<F32x4, fnearbyint, "nearest", 0x6a>;
+defm CEIL : SIMDUnary<F64x2, fceil, "ceil", 0x74>;
+defm FLOOR : SIMDUnary<F64x2, ffloor, "floor", 0x75>;
+defm TRUNC: SIMDUnary<F64x2, ftrunc, "trunc", 0x7a>;
+defm NEAREST: SIMDUnary<F64x2, fnearbyint, "nearest", 0x94>;
+
+// WebAssembly doesn't expose inexact exceptions, so map frint to fnearbyint.
+def : Pat<(v4f32 (frint (v4f32 V128:$src))), (NEAREST_F32x4 V128:$src)>;
+def : Pat<(v2f64 (frint (v2f64 V128:$src))), (NEAREST_F64x2 V128:$src)>;
+
+// WebAssembly always rounds ties-to-even, so map froundeven to fnearbyint.
+def : Pat<(v4f32 (froundeven (v4f32 V128:$src))), (NEAREST_F32x4 V128:$src)>;
+def : Pat<(v2f64 (froundeven (v2f64 V128:$src))), (NEAREST_F64x2 V128:$src)>;
+
+//===----------------------------------------------------------------------===//
+// Floating-point binary arithmetic
+//===----------------------------------------------------------------------===//
+
+multiclass SIMDBinaryFP<SDPatternOperator node, string name, bits<32> baseInst> {
+ defm "" : SIMDBinary<F32x4, node, name, baseInst>;
+ defm "" : SIMDBinary<F64x2, node, name, !add(baseInst, 12)>;
+}
+
+// Addition: add
+let isCommutable = 1 in
+defm ADD : SIMDBinaryFP<fadd, "add", 228>;
+
+// Subtraction: sub
+defm SUB : SIMDBinaryFP<fsub, "sub", 229>;
+
+// Multiplication: mul
+let isCommutable = 1 in
+defm MUL : SIMDBinaryFP<fmul, "mul", 230>;
+
+// Division: div
+defm DIV : SIMDBinaryFP<fdiv, "div", 231>;
+
+// NaN-propagating minimum: min
+defm MIN : SIMDBinaryFP<fminimum, "min", 232>;
+
+// NaN-propagating maximum: max
+defm MAX : SIMDBinaryFP<fmaximum, "max", 233>;
+
+// Pseudo-minimum: pmin
+def pmin : PatFrags<(ops node:$lhs, node:$rhs), [
+ (vselect (setolt $rhs, $lhs), $rhs, $lhs),
+ (vselect (setole $rhs, $lhs), $rhs, $lhs),
+ (vselect (setogt $lhs, $rhs), $rhs, $lhs),
+ (vselect (setoge $lhs, $rhs), $rhs, $lhs)
+]>;
+defm PMIN : SIMDBinaryFP<pmin, "pmin", 234>;
+
+// Pseudo-maximum: pmax
+def pmax : PatFrags<(ops node:$lhs, node:$rhs), [
+ (vselect (setogt $rhs, $lhs), $rhs, $lhs),
+ (vselect (setoge $rhs, $lhs), $rhs, $lhs),
+ (vselect (setolt $lhs, $rhs), $rhs, $lhs),
+ (vselect (setole $lhs, $rhs), $rhs, $lhs)
+]>;
+defm PMAX : SIMDBinaryFP<pmax, "pmax", 235>;
+
+// Also match the pmin/pmax cases where the operands are int vectors (but the
+// comparison is still a floating point comparison). This can happen when using
+// the wasm_simd128.h intrinsics because v128_t is an integer vector.
+foreach vec = [F32x4, F64x2] in {
+defvar pmin = !cast<NI>("PMIN_"#vec);
+defvar pmax = !cast<NI>("PMAX_"#vec);
+def : Pat<(vec.int_vt (vselect
+ (setolt (vec.vt (bitconvert V128:$rhs)),
+ (vec.vt (bitconvert V128:$lhs))),
+ V128:$rhs, V128:$lhs)),
+ (pmin $lhs, $rhs)>;
+def : Pat<(vec.int_vt (vselect
+ (setolt (vec.vt (bitconvert V128:$lhs)),
+ (vec.vt (bitconvert V128:$rhs))),
+ V128:$rhs, V128:$lhs)),
+ (pmax $lhs, $rhs)>;
+}
+
+// And match the pmin/pmax LLVM intrinsics as well
+def : Pat<(v4f32 (int_wasm_pmin (v4f32 V128:$lhs), (v4f32 V128:$rhs))),
+ (PMIN_F32x4 V128:$lhs, V128:$rhs)>;
+def : Pat<(v4f32 (int_wasm_pmax (v4f32 V128:$lhs), (v4f32 V128:$rhs))),
+ (PMAX_F32x4 V128:$lhs, V128:$rhs)>;
+def : Pat<(v2f64 (int_wasm_pmin (v2f64 V128:$lhs), (v2f64 V128:$rhs))),
+ (PMIN_F64x2 V128:$lhs, V128:$rhs)>;
+def : Pat<(v2f64 (int_wasm_pmax (v2f64 V128:$lhs), (v2f64 V128:$rhs))),
+ (PMAX_F64x2 V128:$lhs, V128:$rhs)>;
+
+//===----------------------------------------------------------------------===//
+// Conversions
+//===----------------------------------------------------------------------===//
+
+multiclass SIMDConvert<Vec vec, Vec arg, SDPatternOperator op, string name,
+ bits<32> simdop> {
+ defm op#_#vec :
+ SIMD_I<(outs V128:$dst), (ins V128:$vec), (outs), (ins),
+ [(set (vec.vt V128:$dst), (vec.vt (op (arg.vt V128:$vec))))],
+ vec.prefix#"."#name#"\t$dst, $vec", vec.prefix#"."#name, simdop>;
+}
+
+// Floating point to integer with saturation: trunc_sat
+defm "" : SIMDConvert<I32x4, F32x4, fp_to_sint, "trunc_sat_f32x4_s", 248>;
+defm "" : SIMDConvert<I32x4, F32x4, fp_to_uint, "trunc_sat_f32x4_u", 249>;
+
+// Support the saturating variety as well.
+def trunc_s_sat32 : PatFrag<(ops node:$x), (fp_to_sint_sat $x, i32)>;
+def trunc_u_sat32 : PatFrag<(ops node:$x), (fp_to_uint_sat $x, i32)>;
+def : Pat<(v4i32 (trunc_s_sat32 (v4f32 V128:$src))), (fp_to_sint_I32x4 $src)>;
+def : Pat<(v4i32 (trunc_u_sat32 (v4f32 V128:$src))), (fp_to_uint_I32x4 $src)>;
+
+def trunc_sat_zero_t : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>]>;
+def trunc_sat_zero_s :
+ SDNode<"WebAssemblyISD::TRUNC_SAT_ZERO_S", trunc_sat_zero_t>;
+def trunc_sat_zero_u :
+ SDNode<"WebAssemblyISD::TRUNC_SAT_ZERO_U", trunc_sat_zero_t>;
+defm "" : SIMDConvert<I32x4, F64x2, trunc_sat_zero_s, "trunc_sat_f64x2_s_zero",
+ 0xfc>;
+defm "" : SIMDConvert<I32x4, F64x2, trunc_sat_zero_u, "trunc_sat_f64x2_u_zero",
+ 0xfd>;
+
+// Integer to floating point: convert
+def convert_low_t : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>]>;
+def convert_low_s : SDNode<"WebAssemblyISD::CONVERT_LOW_S", convert_low_t>;
+def convert_low_u : SDNode<"WebAssemblyISD::CONVERT_LOW_U", convert_low_t>;
+defm "" : SIMDConvert<F32x4, I32x4, sint_to_fp, "convert_i32x4_s", 250>;
+defm "" : SIMDConvert<F32x4, I32x4, uint_to_fp, "convert_i32x4_u", 251>;
+defm "" : SIMDConvert<F64x2, I32x4, convert_low_s, "convert_low_i32x4_s", 0xfe>;
+defm "" : SIMDConvert<F64x2, I32x4, convert_low_u, "convert_low_i32x4_u", 0xff>;
+
+// Extending operations
+// TODO: refactor this to be uniform for i64x2 if the numbering is not changed.
+multiclass SIMDExtend<Vec vec, bits<32> baseInst> {
+ defm "" : SIMDConvert<vec, vec.split, extend_low_s,
+ "extend_low_"#vec.split.prefix#"_s", baseInst>;
+ defm "" : SIMDConvert<vec, vec.split, extend_high_s,
+ "extend_high_"#vec.split.prefix#"_s", !add(baseInst, 1)>;
+ defm "" : SIMDConvert<vec, vec.split, extend_low_u,
+ "extend_low_"#vec.split.prefix#"_u", !add(baseInst, 2)>;
+ defm "" : SIMDConvert<vec, vec.split, extend_high_u,
+ "extend_high_"#vec.split.prefix#"_u", !add(baseInst, 3)>;
+}
+
+defm "" : SIMDExtend<I16x8, 0x87>;
+defm "" : SIMDExtend<I32x4, 0xa7>;
+defm "" : SIMDExtend<I64x2, 0xc7>;
+
+// Narrowing operations
+multiclass SIMDNarrow<Vec vec, bits<32> baseInst> {
+ defvar name = vec.split.prefix#".narrow_"#vec.prefix;
+ defm NARROW_S_#vec.split :
+ SIMD_I<(outs V128:$dst), (ins V128:$low, V128:$high), (outs), (ins),
+ [(set (vec.split.vt V128:$dst), (vec.split.vt (int_wasm_narrow_signed
+ (vec.vt V128:$low), (vec.vt V128:$high))))],
+ name#"_s\t$dst, $low, $high", name#"_s", baseInst>;
+ defm NARROW_U_#vec.split :
+ SIMD_I<(outs V128:$dst), (ins V128:$low, V128:$high), (outs), (ins),
+ [(set (vec.split.vt V128:$dst), (vec.split.vt (int_wasm_narrow_unsigned
+ (vec.vt V128:$low), (vec.vt V128:$high))))],
+ name#"_u\t$dst, $low, $high", name#"_u", !add(baseInst, 1)>;
+}
+
+defm "" : SIMDNarrow<I16x8, 101>;
+defm "" : SIMDNarrow<I32x4, 133>;
+
+// WebAssemblyISD::NARROW_U
+def wasm_narrow_t : SDTypeProfile<1, 2, []>;
+def wasm_narrow_u : SDNode<"WebAssemblyISD::NARROW_U", wasm_narrow_t>;
+def : Pat<(v16i8 (wasm_narrow_u (v8i16 V128:$left), (v8i16 V128:$right))),
+ (NARROW_U_I8x16 $left, $right)>;
+def : Pat<(v8i16 (wasm_narrow_u (v4i32 V128:$left), (v4i32 V128:$right))),
+ (NARROW_U_I16x8 $left, $right)>;
+
+// Bitcasts are nops
+// Matching bitcast t1 to t1 causes strange errors, so avoid repeating types
+foreach t1 = AllVecs in
+foreach t2 = AllVecs in
+if !ne(t1, t2) then
+def : Pat<(t1.vt (bitconvert (t2.vt V128:$v))), (t1.vt V128:$v)>;
+
+// Extended pairwise addition
+defm "" : SIMDConvert<I16x8, I8x16, int_wasm_extadd_pairwise_signed,
+ "extadd_pairwise_i8x16_s", 0x7c>;
+defm "" : SIMDConvert<I16x8, I8x16, int_wasm_extadd_pairwise_unsigned,
+ "extadd_pairwise_i8x16_u", 0x7d>;
+defm "" : SIMDConvert<I32x4, I16x8, int_wasm_extadd_pairwise_signed,
+ "extadd_pairwise_i16x8_s", 0x7e>;
+defm "" : SIMDConvert<I32x4, I16x8, int_wasm_extadd_pairwise_unsigned,
+ "extadd_pairwise_i16x8_u", 0x7f>;
+
+// f64x2 <-> f32x4 conversions
+def demote_t : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>]>;
+def demote_zero : SDNode<"WebAssemblyISD::DEMOTE_ZERO", demote_t>;
+defm "" : SIMDConvert<F32x4, F64x2, demote_zero,
+ "demote_f64x2_zero", 0x5e>;
+
+def promote_t : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>]>;
+def promote_low : SDNode<"WebAssemblyISD::PROMOTE_LOW", promote_t>;
+defm "" : SIMDConvert<F64x2, F32x4, promote_low, "promote_low_f32x4", 0x5f>;
+
+// Lower extending loads to load64_zero + promote_low
+def extloadv2f32 : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
+ let MemoryVT = v2f32;
+}
+// Adapted from the body of LoadPatNoOffset
+// TODO: other addressing patterns
+def : Pat<(v2f64 (extloadv2f32 (i32 I32:$addr))),
+ (promote_low_F64x2 (LOAD_ZERO_I64x2_A32 0, 0, I32:$addr))>,
+ Requires<[HasAddr32]>;
+def : Pat<(v2f64 (extloadv2f32 (i64 I64:$addr))),
+ (promote_low_F64x2 (LOAD_ZERO_I64x2_A64 0, 0, I64:$addr))>,
+ Requires<[HasAddr64]>;
+
+//===----------------------------------------------------------------------===//
+// Saturating Rounding Q-Format Multiplication
+//===----------------------------------------------------------------------===//
+
+defm Q15MULR_SAT_S :
+ SIMDBinary<I16x8, int_wasm_q15mulr_sat_signed, "q15mulr_sat_s", 0x82>;
+
+//===----------------------------------------------------------------------===//
+// Relaxed swizzle
+//===----------------------------------------------------------------------===//
+
+defm RELAXED_SWIZZLE :
+ RELAXED_I<(outs V128:$dst), (ins V128:$src, V128:$mask), (outs), (ins),
+ [(set (v16i8 V128:$dst),
+ (int_wasm_relaxed_swizzle (v16i8 V128:$src), (v16i8 V128:$mask)))],
+ "i8x16.relaxed_swizzle\t$dst, $src, $mask", "i8x16.relaxed_swizzle", 0x100>;
+
+//===----------------------------------------------------------------------===//
+// Relaxed floating-point to int conversions
+//===----------------------------------------------------------------------===//
+
+multiclass RelaxedConvert<Vec vec, Vec arg, SDPatternOperator op, string name, bits<32> simdop> {
+ defm op#_#vec :
+ RELAXED_I<(outs V128:$dst), (ins V128:$vec), (outs), (ins),
+ [(set (vec.vt V128:$dst), (vec.vt (op (arg.vt V128:$vec))))],
+ vec.prefix#"."#name#"\t$dst, $vec", vec.prefix#"."#name, simdop>;
+}
+
+defm "" : RelaxedConvert<I32x4, F32x4, int_wasm_relaxed_trunc_signed,
+ "relaxed_trunc_f32x4_s", 0x101>;
+defm "" : RelaxedConvert<I32x4, F32x4, int_wasm_relaxed_trunc_unsigned,
+ "relaxed_trunc_f32x4_u", 0x102>;
+defm "" : RelaxedConvert<I32x4, F64x2, int_wasm_relaxed_trunc_signed_zero,
+ "relaxed_trunc_f64x2_s_zero", 0x103>;
+defm "" : RelaxedConvert<I32x4, F64x2, int_wasm_relaxed_trunc_unsigned_zero,
+ "relaxed_trunc_f64x2_u_zero", 0x104>;
+
+//===----------------------------------------------------------------------===//
+// Relaxed (Negative) Multiply-Add (madd/nmadd)
+//===----------------------------------------------------------------------===//
+
+multiclass SIMDMADD<Vec vec, bits<32> simdopA, bits<32> simdopS> {
+ defm MADD_#vec :
+ RELAXED_I<(outs V128:$dst), (ins V128:$a, V128:$b, V128:$c), (outs), (ins),
+ [(set (vec.vt V128:$dst), (int_wasm_relaxed_madd
+ (vec.vt V128:$a), (vec.vt V128:$b), (vec.vt V128:$c)))],
+ vec.prefix#".relaxed_madd\t$dst, $a, $b, $c",
+ vec.prefix#".relaxed_madd", simdopA>;
+ defm NMADD_#vec :
+ RELAXED_I<(outs V128:$dst), (ins V128:$a, V128:$b, V128:$c), (outs), (ins),
+ [(set (vec.vt V128:$dst), (int_wasm_relaxed_nmadd
+ (vec.vt V128:$a), (vec.vt V128:$b), (vec.vt V128:$c)))],
+ vec.prefix#".relaxed_nmadd\t$dst, $a, $b, $c",
+ vec.prefix#".relaxed_nmadd", simdopS>;
+}
+
+defm "" : SIMDMADD<F32x4, 0x105, 0x106>;
+defm "" : SIMDMADD<F64x2, 0x107, 0x108>;
+
+//===----------------------------------------------------------------------===//
+// Laneselect
+//===----------------------------------------------------------------------===//
+
+multiclass SIMDLANESELECT<Vec vec, bits<32> op> {
+ defm LANESELECT_#vec :
+ RELAXED_I<(outs V128:$dst), (ins V128:$a, V128:$b, V128:$c), (outs), (ins),
+ [(set (vec.vt V128:$dst), (int_wasm_relaxed_laneselect
+ (vec.vt V128:$a), (vec.vt V128:$b), (vec.vt V128:$c)))],
+ vec.prefix#".relaxed_laneselect\t$dst, $a, $b, $c",
+ vec.prefix#".relaxed_laneselect", op>;
+}
+
+defm "" : SIMDLANESELECT<I8x16, 0x109>;
+defm "" : SIMDLANESELECT<I16x8, 0x10a>;
+defm "" : SIMDLANESELECT<I32x4, 0x10b>;
+defm "" : SIMDLANESELECT<I64x2, 0x10c>;
+
+//===----------------------------------------------------------------------===//
+// Relaxed floating-point min and max.
+//===----------------------------------------------------------------------===//
+
+multiclass RelaxedBinary<Vec vec, SDPatternOperator node, string name,
+ bits<32> simdop> {
+ defm _#vec : RELAXED_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs),
+ (outs), (ins),
+ [(set (vec.vt V128:$dst),
+ (node (vec.vt V128:$lhs), (vec.vt V128:$rhs)))],
+ vec.prefix#"."#name#"\t$dst, $lhs, $rhs",
+ vec.prefix#"."#name, simdop>;
+}
+
+defm SIMD_RELAXED_FMIN :
+ RelaxedBinary<F32x4, int_wasm_relaxed_min, "relaxed_min", 0x10d>;
+defm SIMD_RELAXED_FMAX :
+ RelaxedBinary<F32x4, int_wasm_relaxed_max, "relaxed_max", 0x10e>;
+defm SIMD_RELAXED_FMIN :
+ RelaxedBinary<F64x2, int_wasm_relaxed_min, "relaxed_min", 0x10f>;
+defm SIMD_RELAXED_FMAX :
+ RelaxedBinary<F64x2, int_wasm_relaxed_max, "relaxed_max", 0x110>;
+
+//===----------------------------------------------------------------------===//
+// Relaxed rounding q15 multiplication
+//===----------------------------------------------------------------------===//
+
+defm RELAXED_Q15MULR_S :
+ RelaxedBinary<I16x8, int_wasm_relaxed_q15mulr_signed, "relaxed_q15mulr_s",
+ 0x111>;
+
+//===----------------------------------------------------------------------===//
+// Relaxed integer dot product
+//===----------------------------------------------------------------------===//
+
+defm RELAXED_DOT :
+ RELAXED_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs), (outs), (ins),
+ [(set (v8i16 V128:$dst), (int_wasm_relaxed_dot_i8x16_i7x16_signed
+ (v16i8 V128:$lhs), (v16i8 V128:$rhs)))],
+ "i16x8.relaxed_dot_i8x16_i7x16_s\t$dst, $lhs, $rhs",
+ "i16x8.relaxed_dot_i8x16_i7x16_s", 0x112>;
+
+defm RELAXED_DOT_ADD :
+ RELAXED_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs, V128:$acc),
+ (outs), (ins),
+ [(set (v4i32 V128:$dst), (int_wasm_relaxed_dot_i8x16_i7x16_add_signed
+ (v16i8 V128:$lhs), (v16i8 V128:$rhs), (v4i32 V128:$acc)))],
+ "i32x4.relaxed_dot_i8x16_i7x16_add_s\t$dst, $lhs, $rhs, $acc",
+ "i32x4.relaxed_dot_i8x16_i7x16_add_s", 0x113>;
+
+//===----------------------------------------------------------------------===//
+// Relaxed BFloat16 dot product
+//===----------------------------------------------------------------------===//
+
+defm RELAXED_DOT_BFLOAT :
+ RELAXED_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs, V128:$acc),
+ (outs), (ins),
+ [(set (v4f32 V128:$dst), (int_wasm_relaxed_dot_bf16x8_add_f32
+ (v8i16 V128:$lhs), (v8i16 V128:$rhs), (v4f32 V128:$acc)))],
+ "f32x4.relaxed_dot_bf16x8_add_f32\t$dst, $lhs, $rhs, $acc",
+ "f32x4.relaxed_dot_bf16x8_add_f32", 0x114>;
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrTable.td b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrTable.td
new file mode 100644
index 000000000000..069ce5e3bc94
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrTable.td
@@ -0,0 +1,89 @@
+// WebAssemblyInstrTable.td - WebAssembly Table codegen support -*- tablegen -*-
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// WebAssembly Table operand code-gen constructs.
+/// Instructions that handle tables
+//===----------------------------------------------------------------------===//
+
+def WebAssemblyTableSet_t : SDTypeProfile<0, 3, [SDTCisPtrTy<1>]>;
+def WebAssemblyTableSet : SDNode<"WebAssemblyISD::TABLE_SET", WebAssemblyTableSet_t,
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
+
+def WebAssemblyTableGet_t : SDTypeProfile<1, 2, [SDTCisPtrTy<1>]>;
+def WebAssemblyTableGet : SDNode<"WebAssemblyISD::TABLE_GET", WebAssemblyTableGet_t,
+ [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
+
+
+multiclass TABLE<WebAssemblyRegClass rc, string suffix> {
+ let mayLoad = 1 in
+ defm TABLE_GET_#rc : I<(outs rc:$res), (ins table32_op:$table, I32:$i),
+ (outs), (ins table32_op:$table),
+ [(set rc:$res, (!cast<Intrinsic>("int_wasm_table_get_" # suffix) (WebAssemblyWrapper tglobaladdr:$table), I32:$i))],
+ "table.get\t$res, $table, $i",
+ "table.get\t$table",
+ 0x25>;
+
+ let mayStore = 1 in
+ defm TABLE_SET_#rc : I<(outs), (ins table32_op:$table, I32:$i, rc:$val),
+ (outs), (ins table32_op:$table),
+ [(!cast<Intrinsic>("int_wasm_table_set_" # suffix) (WebAssemblyWrapper tglobaladdr:$table), I32:$i, rc:$val)],
+ "table.set\t$table, $i, $val",
+ "table.set\t$table",
+ 0x26>;
+
+ defm TABLE_GROW_#rc : I<(outs I32:$sz), (ins table32_op:$table, rc:$val, I32:$n),
+ (outs), (ins table32_op:$table),
+ [(set I32:$sz, (!cast<Intrinsic>("int_wasm_table_grow_" # suffix) (WebAssemblyWrapper tglobaladdr:$table), rc:$val, I32:$n))],
+ "table.grow\t$sz, $table, $val, $n",
+ "table.grow\t$table",
+ 0xfc0f>;
+
+ defm TABLE_FILL_#rc : I<(outs), (ins table32_op:$table, I32:$i, rc:$val, I32:$n),
+ (outs), (ins table32_op:$table),
+ [(!cast<Intrinsic>("int_wasm_table_fill_" # suffix) (WebAssemblyWrapper tglobaladdr:$table), I32:$i, rc:$val, I32:$n)],
+ "table.fill\t$table, $i, $val, $n",
+ "table.fill\t$table",
+ 0xfc11>;
+
+ foreach vt = rc.RegTypes in {
+ def : Pat<(vt (WebAssemblyTableGet (WebAssemblyWrapper tglobaladdr:$table), i32:$idx)),
+ (!cast<NI>("TABLE_GET_" # rc) tglobaladdr:$table, i32:$idx)>;
+ def : Pat<(WebAssemblyTableSet
+ (WebAssemblyWrapper tglobaladdr:$table),
+ i32:$idx,
+ vt:$src),
+ (!cast<NI>("TABLE_SET_" # rc) tglobaladdr:$table, i32:$idx, vt:$src)>;
+ }
+}
+
+defm "" : TABLE<FUNCREF, "funcref">, Requires<[HasReferenceTypes]>;
+defm "" : TABLE<EXTERNREF, "externref">, Requires<[HasReferenceTypes]>;
+
+def : Pat<(WebAssemblyTableSet mcsym:$table, i32:$idx, funcref:$r),
+ (TABLE_SET_FUNCREF mcsym:$table, i32:$idx, funcref:$r)>,
+ Requires<[HasReferenceTypes]>;
+
+defm TABLE_SIZE : I<(outs I32:$sz), (ins table32_op:$table),
+ (outs), (ins table32_op:$table),
+ [(set I32:$sz, (int_wasm_table_size (WebAssemblyWrapper tglobaladdr:$table)))],
+ "table.size\t$sz, $table",
+ "table.size\t$table",
+ 0xfc10>,
+ Requires<[HasReferenceTypes]>;
+
+
+defm TABLE_COPY : I<(outs), (ins table32_op:$table1, table32_op:$table2, I32:$d, I32:$s, I32:$n),
+ (outs), (ins table32_op:$table1, table32_op:$table2),
+ [(int_wasm_table_copy (WebAssemblyWrapper tglobaladdr:$table1),
+ (WebAssemblyWrapper tglobaladdr:$table2),
+ I32:$d, I32:$s, I32:$n)],
+ "table.copy\t$table1, $table2, $d, $s, $n",
+ "table.copy\t$table1, $table2",
+ 0xfc0e>,
+ Requires<[HasReferenceTypes]>;
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLateEHPrepare.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLateEHPrepare.cpp
new file mode 100644
index 000000000000..94037b9ab189
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLateEHPrepare.cpp
@@ -0,0 +1,322 @@
+//=== WebAssemblyLateEHPrepare.cpp - WebAssembly Exception Preparation -===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Does various transformations for exception handling.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/WasmEHFuncInfo.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetMachine.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-late-eh-prepare"
+
+namespace {
+class WebAssemblyLateEHPrepare final : public MachineFunctionPass {
+ StringRef getPassName() const override {
+ return "WebAssembly Late Prepare Exception";
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+ bool removeUnreachableEHPads(MachineFunction &MF);
+ void recordCatchRetBBs(MachineFunction &MF);
+ bool hoistCatches(MachineFunction &MF);
+ bool addCatchAlls(MachineFunction &MF);
+ bool replaceFuncletReturns(MachineFunction &MF);
+ bool removeUnnecessaryUnreachables(MachineFunction &MF);
+ bool restoreStackPointer(MachineFunction &MF);
+
+ MachineBasicBlock *getMatchingEHPad(MachineInstr *MI);
+ SmallPtrSet<MachineBasicBlock *, 8> CatchRetBBs;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyLateEHPrepare() : MachineFunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyLateEHPrepare::ID = 0;
+INITIALIZE_PASS(WebAssemblyLateEHPrepare, DEBUG_TYPE,
+ "WebAssembly Late Exception Preparation", false, false)
+
+FunctionPass *llvm::createWebAssemblyLateEHPrepare() {
+ return new WebAssemblyLateEHPrepare();
+}
+
+// Returns the nearest EH pad that dominates this instruction. This does not use
+// dominator analysis; it just does BFS on its predecessors until arriving at an
+// EH pad. This assumes valid EH scopes so the first EH pad it arrives in all
+// possible search paths should be the same.
+// Returns nullptr in case it does not find any EH pad in the search, or finds
+// multiple different EH pads.
+MachineBasicBlock *
+WebAssemblyLateEHPrepare::getMatchingEHPad(MachineInstr *MI) {
+ MachineFunction *MF = MI->getParent()->getParent();
+ SmallVector<MachineBasicBlock *, 2> WL;
+ SmallPtrSet<MachineBasicBlock *, 2> Visited;
+ WL.push_back(MI->getParent());
+ MachineBasicBlock *EHPad = nullptr;
+ while (!WL.empty()) {
+ MachineBasicBlock *MBB = WL.pop_back_val();
+ if (!Visited.insert(MBB).second)
+ continue;
+ if (MBB->isEHPad()) {
+ if (EHPad && EHPad != MBB)
+ return nullptr;
+ EHPad = MBB;
+ continue;
+ }
+ if (MBB == &MF->front())
+ return nullptr;
+ for (auto *Pred : MBB->predecessors())
+ if (!CatchRetBBs.count(Pred)) // We don't go into child scopes
+ WL.push_back(Pred);
+ }
+ return EHPad;
+}
+
+// Erase the specified BBs if the BB does not have any remaining predecessors,
+// and also all its dead children.
+template <typename Container>
+static void eraseDeadBBsAndChildren(const Container &MBBs) {
+ SmallVector<MachineBasicBlock *, 8> WL(MBBs.begin(), MBBs.end());
+ SmallPtrSet<MachineBasicBlock *, 8> Deleted;
+ while (!WL.empty()) {
+ MachineBasicBlock *MBB = WL.pop_back_val();
+ if (Deleted.count(MBB) || !MBB->pred_empty())
+ continue;
+ SmallVector<MachineBasicBlock *, 4> Succs(MBB->successors());
+ WL.append(MBB->succ_begin(), MBB->succ_end());
+ for (auto *Succ : Succs)
+ MBB->removeSuccessor(Succ);
+ // To prevent deleting the same BB multiple times, which can happen when
+ // 'MBBs' contain both a parent and a child
+ Deleted.insert(MBB);
+ MBB->eraseFromParent();
+ }
+}
+
+bool WebAssemblyLateEHPrepare::runOnMachineFunction(MachineFunction &MF) {
+ LLVM_DEBUG(dbgs() << "********** Late EH Prepare **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+
+ if (MF.getTarget().getMCAsmInfo()->getExceptionHandlingType() !=
+ ExceptionHandling::Wasm)
+ return false;
+
+ bool Changed = false;
+ if (MF.getFunction().hasPersonalityFn()) {
+ Changed |= removeUnreachableEHPads(MF);
+ recordCatchRetBBs(MF);
+ Changed |= hoistCatches(MF);
+ Changed |= addCatchAlls(MF);
+ Changed |= replaceFuncletReturns(MF);
+ }
+ Changed |= removeUnnecessaryUnreachables(MF);
+ if (MF.getFunction().hasPersonalityFn())
+ Changed |= restoreStackPointer(MF);
+ return Changed;
+}
+
+// Remove unreachable EH pads and its children. If they remain, CFG
+// stackification can be tricky.
+bool WebAssemblyLateEHPrepare::removeUnreachableEHPads(MachineFunction &MF) {
+ SmallVector<MachineBasicBlock *, 4> ToDelete;
+ for (auto &MBB : MF)
+ if (MBB.isEHPad() && MBB.pred_empty())
+ ToDelete.push_back(&MBB);
+ eraseDeadBBsAndChildren(ToDelete);
+ return !ToDelete.empty();
+}
+
+// Record which BB ends with catchret instruction, because this will be replaced
+// with 'br's later. This set of catchret BBs is necessary in 'getMatchingEHPad'
+// function.
+void WebAssemblyLateEHPrepare::recordCatchRetBBs(MachineFunction &MF) {
+ CatchRetBBs.clear();
+ for (auto &MBB : MF) {
+ auto Pos = MBB.getFirstTerminator();
+ if (Pos == MBB.end())
+ continue;
+ MachineInstr *TI = &*Pos;
+ if (TI->getOpcode() == WebAssembly::CATCHRET)
+ CatchRetBBs.insert(&MBB);
+ }
+}
+
+// Hoist catch instructions to the beginning of their matching EH pad BBs in
+// case,
+// (1) catch instruction is not the first instruction in EH pad.
+// ehpad:
+// some_other_instruction
+// ...
+// %exn = catch 0
+// (2) catch instruction is in a non-EH pad BB. For example,
+// ehpad:
+// br bb0
+// bb0:
+// %exn = catch 0
+bool WebAssemblyLateEHPrepare::hoistCatches(MachineFunction &MF) {
+ bool Changed = false;
+ SmallVector<MachineInstr *, 16> Catches;
+ for (auto &MBB : MF)
+ for (auto &MI : MBB)
+ if (WebAssembly::isCatch(MI.getOpcode()))
+ Catches.push_back(&MI);
+
+ for (auto *Catch : Catches) {
+ MachineBasicBlock *EHPad = getMatchingEHPad(Catch);
+ assert(EHPad && "No matching EH pad for catch");
+ auto InsertPos = EHPad->begin();
+ // Skip EH_LABELs in the beginning of an EH pad if present. We don't use
+ // these labels at the moment, but other targets also seem to have an
+ // EH_LABEL instruction in the beginning of an EH pad.
+ while (InsertPos != EHPad->end() && InsertPos->isEHLabel())
+ InsertPos++;
+ if (InsertPos == Catch)
+ continue;
+ Changed = true;
+ EHPad->insert(InsertPos, Catch->removeFromParent());
+ }
+ return Changed;
+}
+
+// Add catch_all to beginning of cleanup pads.
+bool WebAssemblyLateEHPrepare::addCatchAlls(MachineFunction &MF) {
+ bool Changed = false;
+ const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+
+ for (auto &MBB : MF) {
+ if (!MBB.isEHPad())
+ continue;
+ auto InsertPos = MBB.begin();
+ // Skip EH_LABELs in the beginning of an EH pad if present.
+ while (InsertPos != MBB.end() && InsertPos->isEHLabel())
+ InsertPos++;
+ // This runs after hoistCatches(), so we assume that if there is a catch,
+ // that should be the first non-EH-label instruction in an EH pad.
+ if (InsertPos == MBB.end() ||
+ !WebAssembly::isCatch(InsertPos->getOpcode())) {
+ Changed = true;
+ BuildMI(MBB, InsertPos,
+ InsertPos == MBB.end() ? DebugLoc() : InsertPos->getDebugLoc(),
+ TII.get(WebAssembly::CATCH_ALL));
+ }
+ }
+ return Changed;
+}
+
+// Replace pseudo-instructions catchret and cleanupret with br and rethrow
+// respectively.
+bool WebAssemblyLateEHPrepare::replaceFuncletReturns(MachineFunction &MF) {
+ bool Changed = false;
+ const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+
+ for (auto &MBB : MF) {
+ auto Pos = MBB.getFirstTerminator();
+ if (Pos == MBB.end())
+ continue;
+ MachineInstr *TI = &*Pos;
+
+ switch (TI->getOpcode()) {
+ case WebAssembly::CATCHRET: {
+ // Replace a catchret with a branch
+ MachineBasicBlock *TBB = TI->getOperand(0).getMBB();
+ if (!MBB.isLayoutSuccessor(TBB))
+ BuildMI(MBB, TI, TI->getDebugLoc(), TII.get(WebAssembly::BR))
+ .addMBB(TBB);
+ TI->eraseFromParent();
+ Changed = true;
+ break;
+ }
+ case WebAssembly::CLEANUPRET: {
+ // Replace a cleanupret with a rethrow. For C++ support, currently
+ // rethrow's immediate argument is always 0 (= the latest exception).
+ BuildMI(MBB, TI, TI->getDebugLoc(), TII.get(WebAssembly::RETHROW))
+ .addImm(0);
+ TI->eraseFromParent();
+ Changed = true;
+ break;
+ }
+ }
+ }
+ return Changed;
+}
+
+// Remove unnecessary unreachables after a throw or rethrow.
+bool WebAssemblyLateEHPrepare::removeUnnecessaryUnreachables(
+ MachineFunction &MF) {
+ bool Changed = false;
+ for (auto &MBB : MF) {
+ for (auto &MI : MBB) {
+ if (MI.getOpcode() != WebAssembly::THROW &&
+ MI.getOpcode() != WebAssembly::RETHROW)
+ continue;
+ Changed = true;
+
+ // The instruction after the throw should be an unreachable or a branch to
+ // another BB that should eventually lead to an unreachable. Delete it
+ // because throw itself is a terminator, and also delete successors if
+ // any.
+ MBB.erase(std::next(MI.getIterator()), MBB.end());
+ SmallVector<MachineBasicBlock *, 8> Succs(MBB.successors());
+ for (auto *Succ : Succs)
+ if (!Succ->isEHPad())
+ MBB.removeSuccessor(Succ);
+ eraseDeadBBsAndChildren(Succs);
+ }
+ }
+
+ return Changed;
+}
+
+// After the stack is unwound due to a thrown exception, the __stack_pointer
+// global can point to an invalid address. This inserts instructions that
+// restore __stack_pointer global.
+bool WebAssemblyLateEHPrepare::restoreStackPointer(MachineFunction &MF) {
+ const auto *FrameLowering = static_cast<const WebAssemblyFrameLowering *>(
+ MF.getSubtarget().getFrameLowering());
+ if (!FrameLowering->needsPrologForEH(MF))
+ return false;
+ bool Changed = false;
+
+ for (auto &MBB : MF) {
+ if (!MBB.isEHPad())
+ continue;
+ Changed = true;
+
+ // Insert __stack_pointer restoring instructions at the beginning of each EH
+ // pad, after the catch instruction. Here it is safe to assume that SP32
+ // holds the latest value of __stack_pointer, because the only exception for
+ // this case is when a function uses the red zone, but that only happens
+ // with leaf functions, and we don't restore __stack_pointer in leaf
+ // functions anyway.
+ auto InsertPos = MBB.begin();
+ // Skip EH_LABELs in the beginning of an EH pad if present.
+ while (InsertPos != MBB.end() && InsertPos->isEHLabel())
+ InsertPos++;
+ assert(InsertPos != MBB.end() &&
+ WebAssembly::isCatch(InsertPos->getOpcode()) &&
+ "catch/catch_all should be present in every EH pad at this point");
+ ++InsertPos; // Skip the catch instruction
+ FrameLowering->writeSPToGlobal(FrameLowering->getSPReg(MF), MF, MBB,
+ InsertPos, MBB.begin()->getDebugLoc());
+ }
+ return Changed;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp
new file mode 100644
index 000000000000..52226206eb32
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp
@@ -0,0 +1,209 @@
+//===-- WebAssemblyLowerBrUnless.cpp - Lower br_unless --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file lowers br_unless into br_if with an inverted condition.
+///
+/// br_unless is not currently in the spec, but it's very convenient for LLVM
+/// to use. This pass allows LLVM to use it, for now.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-lower-br_unless"
+
+namespace {
+class WebAssemblyLowerBrUnless final : public MachineFunctionPass {
+ StringRef getPassName() const override {
+ return "WebAssembly Lower br_unless";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyLowerBrUnless() : MachineFunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyLowerBrUnless::ID = 0;
+INITIALIZE_PASS(WebAssemblyLowerBrUnless, DEBUG_TYPE,
+ "Lowers br_unless into inverted br_if", false, false)
+
+FunctionPass *llvm::createWebAssemblyLowerBrUnless() {
+ return new WebAssemblyLowerBrUnless();
+}
+
+bool WebAssemblyLowerBrUnless::runOnMachineFunction(MachineFunction &MF) {
+ LLVM_DEBUG(dbgs() << "********** Lowering br_unless **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+
+ auto &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
+ const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ auto &MRI = MF.getRegInfo();
+
+ for (auto &MBB : MF) {
+ for (MachineInstr &MI : llvm::make_early_inc_range(MBB)) {
+ if (MI.getOpcode() != WebAssembly::BR_UNLESS)
+ continue;
+
+ Register Cond = MI.getOperand(1).getReg();
+ bool Inverted = false;
+
+ // Attempt to invert the condition in place.
+ if (MFI.isVRegStackified(Cond)) {
+ assert(MRI.hasOneDef(Cond));
+ MachineInstr *Def = MRI.getVRegDef(Cond);
+ switch (Def->getOpcode()) {
+ using namespace WebAssembly;
+ case EQ_I32:
+ Def->setDesc(TII.get(NE_I32));
+ Inverted = true;
+ break;
+ case NE_I32:
+ Def->setDesc(TII.get(EQ_I32));
+ Inverted = true;
+ break;
+ case GT_S_I32:
+ Def->setDesc(TII.get(LE_S_I32));
+ Inverted = true;
+ break;
+ case GE_S_I32:
+ Def->setDesc(TII.get(LT_S_I32));
+ Inverted = true;
+ break;
+ case LT_S_I32:
+ Def->setDesc(TII.get(GE_S_I32));
+ Inverted = true;
+ break;
+ case LE_S_I32:
+ Def->setDesc(TII.get(GT_S_I32));
+ Inverted = true;
+ break;
+ case GT_U_I32:
+ Def->setDesc(TII.get(LE_U_I32));
+ Inverted = true;
+ break;
+ case GE_U_I32:
+ Def->setDesc(TII.get(LT_U_I32));
+ Inverted = true;
+ break;
+ case LT_U_I32:
+ Def->setDesc(TII.get(GE_U_I32));
+ Inverted = true;
+ break;
+ case LE_U_I32:
+ Def->setDesc(TII.get(GT_U_I32));
+ Inverted = true;
+ break;
+ case EQ_I64:
+ Def->setDesc(TII.get(NE_I64));
+ Inverted = true;
+ break;
+ case NE_I64:
+ Def->setDesc(TII.get(EQ_I64));
+ Inverted = true;
+ break;
+ case GT_S_I64:
+ Def->setDesc(TII.get(LE_S_I64));
+ Inverted = true;
+ break;
+ case GE_S_I64:
+ Def->setDesc(TII.get(LT_S_I64));
+ Inverted = true;
+ break;
+ case LT_S_I64:
+ Def->setDesc(TII.get(GE_S_I64));
+ Inverted = true;
+ break;
+ case LE_S_I64:
+ Def->setDesc(TII.get(GT_S_I64));
+ Inverted = true;
+ break;
+ case GT_U_I64:
+ Def->setDesc(TII.get(LE_U_I64));
+ Inverted = true;
+ break;
+ case GE_U_I64:
+ Def->setDesc(TII.get(LT_U_I64));
+ Inverted = true;
+ break;
+ case LT_U_I64:
+ Def->setDesc(TII.get(GE_U_I64));
+ Inverted = true;
+ break;
+ case LE_U_I64:
+ Def->setDesc(TII.get(GT_U_I64));
+ Inverted = true;
+ break;
+ case EQ_F32:
+ Def->setDesc(TII.get(NE_F32));
+ Inverted = true;
+ break;
+ case NE_F32:
+ Def->setDesc(TII.get(EQ_F32));
+ Inverted = true;
+ break;
+ case EQ_F64:
+ Def->setDesc(TII.get(NE_F64));
+ Inverted = true;
+ break;
+ case NE_F64:
+ Def->setDesc(TII.get(EQ_F64));
+ Inverted = true;
+ break;
+ case EQZ_I32: {
+ // Invert an eqz by replacing it with its operand.
+ Cond = Def->getOperand(1).getReg();
+ Def->eraseFromParent();
+ Inverted = true;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ // If we weren't able to invert the condition in place. Insert an
+ // instruction to invert it.
+ if (!Inverted) {
+ Register Tmp = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
+ BuildMI(MBB, &MI, MI.getDebugLoc(), TII.get(WebAssembly::EQZ_I32), Tmp)
+ .addReg(Cond);
+ MFI.stackifyVReg(MRI, Tmp);
+ Cond = Tmp;
+ Inverted = true;
+ }
+
+ // The br_unless condition has now been inverted. Insert a br_if and
+ // delete the br_unless.
+ assert(Inverted);
+ BuildMI(MBB, &MI, MI.getDebugLoc(), TII.get(WebAssembly::BR_IF))
+ .add(MI.getOperand(0))
+ .addReg(Cond);
+ MBB.erase(&MI);
+ }
+ }
+
+ return true;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
new file mode 100644
index 000000000000..77e6640d5a82
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
@@ -0,0 +1,1870 @@
+//=== WebAssemblyLowerEmscriptenEHSjLj.cpp - Lower exceptions for Emscripten =//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file lowers exception-related instructions and setjmp/longjmp function
+/// calls to use Emscripten's library functions. The pass uses JavaScript's try
+/// and catch mechanism in case of Emscripten EH/SjLj and Wasm EH intrinsics in
+/// case of Emscripten SjLJ.
+///
+/// * Emscripten exception handling
+/// This pass lowers invokes and landingpads into library functions in JS glue
+/// code. Invokes are lowered into function wrappers called invoke wrappers that
+/// exist in JS side, which wraps the original function call with JS try-catch.
+/// If an exception occurred, cxa_throw() function in JS side sets some
+/// variables (see below) so we can check whether an exception occurred from
+/// wasm code and handle it appropriately.
+///
+/// * Emscripten setjmp-longjmp handling
+/// This pass lowers setjmp to a reasonably-performant approach for emscripten.
+/// The idea is that each block with a setjmp is broken up into two parts: the
+/// part containing setjmp and the part right after the setjmp. The latter part
+/// is either reached from the setjmp, or later from a longjmp. To handle the
+/// longjmp, all calls that might longjmp are also called using invoke wrappers
+/// and thus JS / try-catch. JS longjmp() function also sets some variables so
+/// we can check / whether a longjmp occurred from wasm code. Each block with a
+/// function call that might longjmp is also split up after the longjmp call.
+/// After the longjmp call, we check whether a longjmp occurred, and if it did,
+/// which setjmp it corresponds to, and jump to the right post-setjmp block.
+/// We assume setjmp-longjmp handling always run after EH handling, which means
+/// we don't expect any exception-related instructions when SjLj runs.
+/// FIXME Currently this scheme does not support indirect call of setjmp,
+/// because of the limitation of the scheme itself. fastcomp does not support it
+/// either.
+///
+/// In detail, this pass does following things:
+///
+/// 1) Assumes the existence of global variables: __THREW__, __threwValue
+/// __THREW__ and __threwValue are defined in compiler-rt in Emscripten.
+/// These variables are used for both exceptions and setjmp/longjmps.
+/// __THREW__ indicates whether an exception or a longjmp occurred or not. 0
+/// means nothing occurred, 1 means an exception occurred, and other numbers
+/// mean a longjmp occurred. In the case of longjmp, __THREW__ variable
+/// indicates the corresponding setjmp buffer the longjmp corresponds to.
+/// __threwValue is 0 for exceptions, and the argument to longjmp in case of
+/// longjmp.
+///
+/// * Emscripten exception handling
+///
+/// 2) We assume the existence of setThrew and setTempRet0/getTempRet0 functions
+/// at link time. setThrew exists in Emscripten's compiler-rt:
+///
+/// void setThrew(uintptr_t threw, int value) {
+/// if (__THREW__ == 0) {
+/// __THREW__ = threw;
+/// __threwValue = value;
+/// }
+/// }
+//
+/// setTempRet0 is called from __cxa_find_matching_catch() in JS glue code.
+/// In exception handling, getTempRet0 indicates the type of an exception
+/// caught, and in setjmp/longjmp, it means the second argument to longjmp
+/// function.
+///
+/// 3) Lower
+/// invoke @func(arg1, arg2) to label %invoke.cont unwind label %lpad
+/// into
+/// __THREW__ = 0;
+/// call @__invoke_SIG(func, arg1, arg2)
+/// %__THREW__.val = __THREW__;
+/// __THREW__ = 0;
+/// if (%__THREW__.val == 1)
+/// goto %lpad
+/// else
+/// goto %invoke.cont
+/// SIG is a mangled string generated based on the LLVM IR-level function
+/// signature. After LLVM IR types are lowered to the target wasm types,
+/// the names for these wrappers will change based on wasm types as well,
+/// as in invoke_vi (function takes an int and returns void). The bodies of
+/// these wrappers will be generated in JS glue code, and inside those
+/// wrappers we use JS try-catch to generate actual exception effects. It
+/// also calls the original callee function. An example wrapper in JS code
+/// would look like this:
+/// function invoke_vi(index,a1) {
+/// try {
+/// Module["dynCall_vi"](index,a1); // This calls original callee
+/// } catch(e) {
+/// if (typeof e !== 'number' && e !== 'longjmp') throw e;
+/// _setThrew(1, 0); // setThrew is called here
+/// }
+/// }
+/// If an exception is thrown, __THREW__ will be set to true in a wrapper,
+/// so we can jump to the right BB based on this value.
+///
+/// 4) Lower
+/// %val = landingpad catch c1 catch c2 catch c3 ...
+/// ... use %val ...
+/// into
+/// %fmc = call @__cxa_find_matching_catch_N(c1, c2, c3, ...)
+/// %val = {%fmc, getTempRet0()}
+/// ... use %val ...
+/// Here N is a number calculated based on the number of clauses.
+/// setTempRet0 is called from __cxa_find_matching_catch() in JS glue code.
+///
+/// 5) Lower
+/// resume {%a, %b}
+/// into
+/// call @__resumeException(%a)
+/// where __resumeException() is a function in JS glue code.
+///
+/// 6) Lower
+/// call @llvm.eh.typeid.for(type) (intrinsic)
+/// into
+/// call @llvm_eh_typeid_for(type)
+/// llvm_eh_typeid_for function will be generated in JS glue code.
+///
+/// * Emscripten setjmp / longjmp handling
+///
+/// If there are calls to longjmp()
+///
+/// 1) Lower
+/// longjmp(env, val)
+/// into
+/// emscripten_longjmp(env, val)
+///
+/// If there are calls to setjmp()
+///
+/// 2) In the function entry that calls setjmp, initialize setjmpTable and
+/// sejmpTableSize as follows:
+/// setjmpTableSize = 4;
+/// setjmpTable = (int *) malloc(40);
+/// setjmpTable[0] = 0;
+/// setjmpTable and setjmpTableSize are used to call saveSetjmp() function in
+/// Emscripten compiler-rt.
+///
+/// 3) Lower
+/// setjmp(env)
+/// into
+/// setjmpTable = saveSetjmp(env, label, setjmpTable, setjmpTableSize);
+/// setjmpTableSize = getTempRet0();
+/// For each dynamic setjmp call, setjmpTable stores its ID (a number which
+/// is incrementally assigned from 0) and its label (a unique number that
+/// represents each callsite of setjmp). When we need more entries in
+/// setjmpTable, it is reallocated in saveSetjmp() in Emscripten's
+/// compiler-rt and it will return the new table address, and assign the new
+/// table size in setTempRet0(). saveSetjmp also stores the setjmp's ID into
+/// the buffer 'env'. A BB with setjmp is split into two after setjmp call in
+/// order to make the post-setjmp BB the possible destination of longjmp BB.
+///
+/// 4) Lower every call that might longjmp into
+/// __THREW__ = 0;
+/// call @__invoke_SIG(func, arg1, arg2)
+/// %__THREW__.val = __THREW__;
+/// __THREW__ = 0;
+/// %__threwValue.val = __threwValue;
+/// if (%__THREW__.val != 0 & %__threwValue.val != 0) {
+/// %label = testSetjmp(mem[%__THREW__.val], setjmpTable,
+/// setjmpTableSize);
+/// if (%label == 0)
+/// emscripten_longjmp(%__THREW__.val, %__threwValue.val);
+/// setTempRet0(%__threwValue.val);
+/// } else {
+/// %label = -1;
+/// }
+/// longjmp_result = getTempRet0();
+/// switch %label {
+/// label 1: goto post-setjmp BB 1
+/// label 2: goto post-setjmp BB 2
+/// ...
+/// default: goto splitted next BB
+/// }
+/// testSetjmp examines setjmpTable to see if there is a matching setjmp
+/// call. After calling an invoke wrapper, if a longjmp occurred, __THREW__
+/// will be the address of matching jmp_buf buffer and __threwValue be the
+/// second argument to longjmp. mem[%__THREW__.val] is a setjmp ID that is
+/// stored in saveSetjmp. testSetjmp returns a setjmp label, a unique ID to
+/// each setjmp callsite. Label 0 means this longjmp buffer does not
+/// correspond to one of the setjmp callsites in this function, so in this
+/// case we just chain the longjmp to the caller. Label -1 means no longjmp
+/// occurred. Otherwise we jump to the right post-setjmp BB based on the
+/// label.
+///
+/// * Wasm setjmp / longjmp handling
+/// This mode still uses some Emscripten library functions but not JavaScript's
+/// try-catch mechanism. It instead uses Wasm exception handling intrinsics,
+/// which will be lowered to exception handling instructions.
+///
+/// If there are calls to longjmp()
+///
+/// 1) Lower
+/// longjmp(env, val)
+/// into
+/// __wasm_longjmp(env, val)
+///
+/// If there are calls to setjmp()
+///
+/// 2) and 3): The same as 2) and 3) in Emscripten SjLj.
+/// (setjmpTable/setjmpTableSize initialization + setjmp callsite
+/// transformation)
+///
+/// 4) Create a catchpad with a wasm.catch() intrinsic, which returns the value
+/// thrown by __wasm_longjmp function. In Emscripten library, we have this
+/// struct:
+///
+/// struct __WasmLongjmpArgs {
+/// void *env;
+/// int val;
+/// };
+/// struct __WasmLongjmpArgs __wasm_longjmp_args;
+///
+/// The thrown value here is a pointer to __wasm_longjmp_args struct object. We
+/// use this struct to transfer two values by throwing a single value. Wasm
+/// throw and catch instructions are capable of throwing and catching multiple
+/// values, but it also requires multivalue support that is currently not very
+/// reliable.
+/// TODO Switch to throwing and catching two values without using the struct
+///
+/// All longjmpable function calls will be converted to an invoke that will
+/// unwind to this catchpad in case a longjmp occurs. Within the catchpad, we
+/// test the thrown values using testSetjmp function as we do for Emscripten
+/// SjLj. The main difference is, in Emscripten SjLj, we need to transform every
+/// longjmpable callsite into a sequence of code including testSetjmp() call; in
+/// Wasm SjLj we do the testing in only one place, in this catchpad.
+///
+/// After testing calling testSetjmp(), if the longjmp does not correspond to
+/// one of the setjmps within the current function, it rethrows the longjmp
+/// by calling __wasm_longjmp(). If it corresponds to one of setjmps in the
+/// function, we jump to the beginning of the function, which contains a switch
+/// to each post-setjmp BB. Again, in Emscripten SjLj, this switch is added for
+/// every longjmpable callsite; in Wasm SjLj we do this only once at the top of
+/// the function. (after setjmpTable/setjmpTableSize initialization)
+///
+/// The below is the pseudocode for what we have described
+///
+/// entry:
+/// Initialize setjmpTable and setjmpTableSize
+///
+/// setjmp.dispatch:
+/// switch %label {
+/// label 1: goto post-setjmp BB 1
+/// label 2: goto post-setjmp BB 2
+/// ...
+/// default: goto splitted next BB
+/// }
+/// ...
+///
+/// bb:
+/// invoke void @foo() ;; foo is a longjmpable function
+/// to label %next unwind label %catch.dispatch.longjmp
+/// ...
+///
+/// catch.dispatch.longjmp:
+/// %0 = catchswitch within none [label %catch.longjmp] unwind to caller
+///
+/// catch.longjmp:
+/// %longjmp.args = wasm.catch() ;; struct __WasmLongjmpArgs
+/// %env = load 'env' field from __WasmLongjmpArgs
+/// %val = load 'val' field from __WasmLongjmpArgs
+/// %label = testSetjmp(mem[%env], setjmpTable, setjmpTableSize);
+/// if (%label == 0)
+/// __wasm_longjmp(%env, %val)
+/// catchret to %setjmp.dispatch
+///
+///===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyTargetMachine.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/CodeGen/WasmEHFuncInfo.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/IntrinsicsWebAssembly.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/SSAUpdater.h"
+#include "llvm/Transforms/Utils/SSAUpdaterBulk.h"
+#include <set>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-lower-em-ehsjlj"
+
+static cl::list<std::string>
+ EHAllowlist("emscripten-cxx-exceptions-allowed",
+ cl::desc("The list of function names in which Emscripten-style "
+ "exception handling is enabled (see emscripten "
+ "EMSCRIPTEN_CATCHING_ALLOWED options)"),
+ cl::CommaSeparated);
+
+namespace {
+class WebAssemblyLowerEmscriptenEHSjLj final : public ModulePass {
+ bool EnableEmEH; // Enable Emscripten exception handling
+ bool EnableEmSjLj; // Enable Emscripten setjmp/longjmp handling
+ bool EnableWasmSjLj; // Enable Wasm setjmp/longjmp handling
+ bool DoSjLj; // Whether we actually perform setjmp/longjmp handling
+
+ GlobalVariable *ThrewGV = nullptr; // __THREW__ (Emscripten)
+ GlobalVariable *ThrewValueGV = nullptr; // __threwValue (Emscripten)
+ Function *GetTempRet0F = nullptr; // getTempRet0() (Emscripten)
+ Function *SetTempRet0F = nullptr; // setTempRet0() (Emscripten)
+ Function *ResumeF = nullptr; // __resumeException() (Emscripten)
+ Function *EHTypeIDF = nullptr; // llvm.eh.typeid.for() (intrinsic)
+ Function *EmLongjmpF = nullptr; // emscripten_longjmp() (Emscripten)
+ Function *SaveSetjmpF = nullptr; // saveSetjmp() (Emscripten)
+ Function *TestSetjmpF = nullptr; // testSetjmp() (Emscripten)
+ Function *WasmLongjmpF = nullptr; // __wasm_longjmp() (Emscripten)
+ Function *CatchF = nullptr; // wasm.catch() (intrinsic)
+
+ // type of 'struct __WasmLongjmpArgs' defined in emscripten
+ Type *LongjmpArgsTy = nullptr;
+
+ // __cxa_find_matching_catch_N functions.
+ // Indexed by the number of clauses in an original landingpad instruction.
+ DenseMap<int, Function *> FindMatchingCatches;
+ // Map of <function signature string, invoke_ wrappers>
+ StringMap<Function *> InvokeWrappers;
+ // Set of allowed function names for exception handling
+ std::set<std::string> EHAllowlistSet;
+ // Functions that contains calls to setjmp
+ SmallPtrSet<Function *, 8> SetjmpUsers;
+
+ StringRef getPassName() const override {
+ return "WebAssembly Lower Emscripten Exceptions";
+ }
+
+ using InstVector = SmallVectorImpl<Instruction *>;
+ bool runEHOnFunction(Function &F);
+ bool runSjLjOnFunction(Function &F);
+ void handleLongjmpableCallsForEmscriptenSjLj(
+ Function &F, InstVector &SetjmpTableInsts,
+ InstVector &SetjmpTableSizeInsts,
+ SmallVectorImpl<PHINode *> &SetjmpRetPHIs);
+ void
+ handleLongjmpableCallsForWasmSjLj(Function &F, InstVector &SetjmpTableInsts,
+ InstVector &SetjmpTableSizeInsts,
+ SmallVectorImpl<PHINode *> &SetjmpRetPHIs);
+ Function *getFindMatchingCatch(Module &M, unsigned NumClauses);
+
+ Value *wrapInvoke(CallBase *CI);
+ void wrapTestSetjmp(BasicBlock *BB, DebugLoc DL, Value *Threw,
+ Value *SetjmpTable, Value *SetjmpTableSize, Value *&Label,
+ Value *&LongjmpResult, BasicBlock *&CallEmLongjmpBB,
+ PHINode *&CallEmLongjmpBBThrewPHI,
+ PHINode *&CallEmLongjmpBBThrewValuePHI,
+ BasicBlock *&EndBB);
+ Function *getInvokeWrapper(CallBase *CI);
+
+ bool areAllExceptionsAllowed() const { return EHAllowlistSet.empty(); }
+ bool supportsException(const Function *F) const {
+ return EnableEmEH && (areAllExceptionsAllowed() ||
+ EHAllowlistSet.count(std::string(F->getName())));
+ }
+ void replaceLongjmpWith(Function *LongjmpF, Function *NewF);
+
+ void rebuildSSA(Function &F);
+
+public:
+ static char ID;
+
+ WebAssemblyLowerEmscriptenEHSjLj()
+ : ModulePass(ID), EnableEmEH(WebAssembly::WasmEnableEmEH),
+ EnableEmSjLj(WebAssembly::WasmEnableEmSjLj),
+ EnableWasmSjLj(WebAssembly::WasmEnableSjLj) {
+ assert(!(EnableEmSjLj && EnableWasmSjLj) &&
+ "Two SjLj modes cannot be turned on at the same time");
+ assert(!(EnableEmEH && EnableWasmSjLj) &&
+ "Wasm SjLj should be only used with Wasm EH");
+ EHAllowlistSet.insert(EHAllowlist.begin(), EHAllowlist.end());
+ }
+ bool runOnModule(Module &M) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<DominatorTreeWrapperPass>();
+ }
+};
+} // End anonymous namespace
+
+char WebAssemblyLowerEmscriptenEHSjLj::ID = 0;
+INITIALIZE_PASS(WebAssemblyLowerEmscriptenEHSjLj, DEBUG_TYPE,
+ "WebAssembly Lower Emscripten Exceptions / Setjmp / Longjmp",
+ false, false)
+
+ModulePass *llvm::createWebAssemblyLowerEmscriptenEHSjLj() {
+ return new WebAssemblyLowerEmscriptenEHSjLj();
+}
+
+static bool canThrow(const Value *V) {
+ if (const auto *F = dyn_cast<const Function>(V)) {
+ // Intrinsics cannot throw
+ if (F->isIntrinsic())
+ return false;
+ StringRef Name = F->getName();
+ // leave setjmp and longjmp (mostly) alone, we process them properly later
+ if (Name == "setjmp" || Name == "longjmp" || Name == "emscripten_longjmp")
+ return false;
+ return !F->doesNotThrow();
+ }
+ // not a function, so an indirect call - can throw, we can't tell
+ return true;
+}
+
+// Get a thread-local global variable with the given name. If it doesn't exist
+// declare it, which will generate an import and assume that it will exist at
+// link time.
+static GlobalVariable *getGlobalVariable(Module &M, Type *Ty,
+ WebAssemblyTargetMachine &TM,
+ const char *Name) {
+ auto *GV = dyn_cast<GlobalVariable>(M.getOrInsertGlobal(Name, Ty));
+ if (!GV)
+ report_fatal_error(Twine("unable to create global: ") + Name);
+
+ // Variables created by this function are thread local. If the target does not
+ // support TLS, we depend on CoalesceFeaturesAndStripAtomics to downgrade it
+ // to non-thread-local ones, in which case we don't allow this object to be
+ // linked with other objects using shared memory.
+ GV->setThreadLocalMode(GlobalValue::GeneralDynamicTLSModel);
+ return GV;
+}
+
+// Simple function name mangler.
+// This function simply takes LLVM's string representation of parameter types
+// and concatenate them with '_'. There are non-alphanumeric characters but llc
+// is ok with it, and we need to postprocess these names after the lowering
+// phase anyway.
+static std::string getSignature(FunctionType *FTy) {
+ std::string Sig;
+ raw_string_ostream OS(Sig);
+ OS << *FTy->getReturnType();
+ for (Type *ParamTy : FTy->params())
+ OS << "_" << *ParamTy;
+ if (FTy->isVarArg())
+ OS << "_...";
+ Sig = OS.str();
+ erase_if(Sig, isSpace);
+ // When s2wasm parses .s file, a comma means the end of an argument. So a
+ // mangled function name can contain any character but a comma.
+ std::replace(Sig.begin(), Sig.end(), ',', '.');
+ return Sig;
+}
+
+static Function *getEmscriptenFunction(FunctionType *Ty, const Twine &Name,
+ Module *M) {
+ Function* F = Function::Create(Ty, GlobalValue::ExternalLinkage, Name, M);
+ // Tell the linker that this function is expected to be imported from the
+ // 'env' module.
+ if (!F->hasFnAttribute("wasm-import-module")) {
+ llvm::AttrBuilder B(M->getContext());
+ B.addAttribute("wasm-import-module", "env");
+ F->addFnAttrs(B);
+ }
+ if (!F->hasFnAttribute("wasm-import-name")) {
+ llvm::AttrBuilder B(M->getContext());
+ B.addAttribute("wasm-import-name", F->getName());
+ F->addFnAttrs(B);
+ }
+ return F;
+}
+
+// Returns an integer type for the target architecture's address space.
+// i32 for wasm32 and i64 for wasm64.
+static Type *getAddrIntType(Module *M) {
+ IRBuilder<> IRB(M->getContext());
+ return IRB.getIntNTy(M->getDataLayout().getPointerSizeInBits());
+}
+
+// Returns an integer pointer type for the target architecture's address space.
+// i32* for wasm32 and i64* for wasm64. With opaque pointers this is just a ptr
+// in address space zero.
+static Type *getAddrPtrType(Module *M) {
+ return PointerType::getUnqual(M->getContext());
+}
+
+// Returns an integer whose type is the integer type for the target's address
+// space. Returns (i32 C) for wasm32 and (i64 C) for wasm64, when C is the
+// integer.
+static Value *getAddrSizeInt(Module *M, uint64_t C) {
+ IRBuilder<> IRB(M->getContext());
+ return IRB.getIntN(M->getDataLayout().getPointerSizeInBits(), C);
+}
+
+// Returns __cxa_find_matching_catch_N function, where N = NumClauses + 2.
+// This is because a landingpad instruction contains two more arguments, a
+// personality function and a cleanup bit, and __cxa_find_matching_catch_N
+// functions are named after the number of arguments in the original landingpad
+// instruction.
+Function *
+WebAssemblyLowerEmscriptenEHSjLj::getFindMatchingCatch(Module &M,
+ unsigned NumClauses) {
+ if (FindMatchingCatches.count(NumClauses))
+ return FindMatchingCatches[NumClauses];
+ PointerType *Int8PtrTy = PointerType::getUnqual(M.getContext());
+ SmallVector<Type *, 16> Args(NumClauses, Int8PtrTy);
+ FunctionType *FTy = FunctionType::get(Int8PtrTy, Args, false);
+ Function *F = getEmscriptenFunction(
+ FTy, "__cxa_find_matching_catch_" + Twine(NumClauses + 2), &M);
+ FindMatchingCatches[NumClauses] = F;
+ return F;
+}
+
+// Generate invoke wrapper seqence with preamble and postamble
+// Preamble:
+// __THREW__ = 0;
+// Postamble:
+// %__THREW__.val = __THREW__; __THREW__ = 0;
+// Returns %__THREW__.val, which indicates whether an exception is thrown (or
+// whether longjmp occurred), for future use.
+Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallBase *CI) {
+ Module *M = CI->getModule();
+ LLVMContext &C = M->getContext();
+
+ IRBuilder<> IRB(C);
+ IRB.SetInsertPoint(CI);
+
+ // Pre-invoke
+ // __THREW__ = 0;
+ IRB.CreateStore(getAddrSizeInt(M, 0), ThrewGV);
+
+ // Invoke function wrapper in JavaScript
+ SmallVector<Value *, 16> Args;
+ // Put the pointer to the callee as first argument, so it can be called
+ // within the invoke wrapper later
+ Args.push_back(CI->getCalledOperand());
+ Args.append(CI->arg_begin(), CI->arg_end());
+ CallInst *NewCall = IRB.CreateCall(getInvokeWrapper(CI), Args);
+ NewCall->takeName(CI);
+ NewCall->setCallingConv(CallingConv::WASM_EmscriptenInvoke);
+ NewCall->setDebugLoc(CI->getDebugLoc());
+
+ // Because we added the pointer to the callee as first argument, all
+ // argument attribute indices have to be incremented by one.
+ SmallVector<AttributeSet, 8> ArgAttributes;
+ const AttributeList &InvokeAL = CI->getAttributes();
+
+ // No attributes for the callee pointer.
+ ArgAttributes.push_back(AttributeSet());
+ // Copy the argument attributes from the original
+ for (unsigned I = 0, E = CI->arg_size(); I < E; ++I)
+ ArgAttributes.push_back(InvokeAL.getParamAttrs(I));
+
+ AttrBuilder FnAttrs(CI->getContext(), InvokeAL.getFnAttrs());
+ if (auto Args = FnAttrs.getAllocSizeArgs()) {
+ // The allocsize attribute (if any) referes to parameters by index and needs
+ // to be adjusted.
+ auto [SizeArg, NEltArg] = *Args;
+ SizeArg += 1;
+ if (NEltArg)
+ NEltArg = *NEltArg + 1;
+ FnAttrs.addAllocSizeAttr(SizeArg, NEltArg);
+ }
+ // In case the callee has 'noreturn' attribute, We need to remove it, because
+ // we expect invoke wrappers to return.
+ FnAttrs.removeAttribute(Attribute::NoReturn);
+
+ // Reconstruct the AttributesList based on the vector we constructed.
+ AttributeList NewCallAL = AttributeList::get(
+ C, AttributeSet::get(C, FnAttrs), InvokeAL.getRetAttrs(), ArgAttributes);
+ NewCall->setAttributes(NewCallAL);
+
+ CI->replaceAllUsesWith(NewCall);
+
+ // Post-invoke
+ // %__THREW__.val = __THREW__; __THREW__ = 0;
+ Value *Threw =
+ IRB.CreateLoad(getAddrIntType(M), ThrewGV, ThrewGV->getName() + ".val");
+ IRB.CreateStore(getAddrSizeInt(M, 0), ThrewGV);
+ return Threw;
+}
+
+// Get matching invoke wrapper based on callee signature
+Function *WebAssemblyLowerEmscriptenEHSjLj::getInvokeWrapper(CallBase *CI) {
+ Module *M = CI->getModule();
+ SmallVector<Type *, 16> ArgTys;
+ FunctionType *CalleeFTy = CI->getFunctionType();
+
+ std::string Sig = getSignature(CalleeFTy);
+ if (InvokeWrappers.contains(Sig))
+ return InvokeWrappers[Sig];
+
+ // Put the pointer to the callee as first argument
+ ArgTys.push_back(PointerType::getUnqual(CalleeFTy));
+ // Add argument types
+ ArgTys.append(CalleeFTy->param_begin(), CalleeFTy->param_end());
+
+ FunctionType *FTy = FunctionType::get(CalleeFTy->getReturnType(), ArgTys,
+ CalleeFTy->isVarArg());
+ Function *F = getEmscriptenFunction(FTy, "__invoke_" + Sig, M);
+ InvokeWrappers[Sig] = F;
+ return F;
+}
+
+static bool canLongjmp(const Value *Callee) {
+ if (auto *CalleeF = dyn_cast<Function>(Callee))
+ if (CalleeF->isIntrinsic())
+ return false;
+
+ // Attempting to transform inline assembly will result in something like:
+ // call void @__invoke_void(void ()* asm ...)
+ // which is invalid because inline assembly blocks do not have addresses
+ // and can't be passed by pointer. The result is a crash with illegal IR.
+ if (isa<InlineAsm>(Callee))
+ return false;
+ StringRef CalleeName = Callee->getName();
+
+ // TODO Include more functions or consider checking with mangled prefixes
+
+ // The reason we include malloc/free here is to exclude the malloc/free
+ // calls generated in setjmp prep / cleanup routines.
+ if (CalleeName == "setjmp" || CalleeName == "malloc" || CalleeName == "free")
+ return false;
+
+ // There are functions in Emscripten's JS glue code or compiler-rt
+ if (CalleeName == "__resumeException" || CalleeName == "llvm_eh_typeid_for" ||
+ CalleeName == "saveSetjmp" || CalleeName == "testSetjmp" ||
+ CalleeName == "getTempRet0" || CalleeName == "setTempRet0")
+ return false;
+
+ // __cxa_find_matching_catch_N functions cannot longjmp
+ if (Callee->getName().starts_with("__cxa_find_matching_catch_"))
+ return false;
+
+ // Exception-catching related functions
+ //
+ // We intentionally treat __cxa_end_catch longjmpable in Wasm SjLj even though
+ // it surely cannot longjmp, in order to maintain the unwind relationship from
+ // all existing catchpads (and calls within them) to catch.dispatch.longjmp.
+ //
+ // In Wasm EH + Wasm SjLj, we
+ // 1. Make all catchswitch and cleanuppad that unwind to caller unwind to
+ // catch.dispatch.longjmp instead
+ // 2. Convert all longjmpable calls to invokes that unwind to
+ // catch.dispatch.longjmp
+ // But catchswitch BBs are removed in isel, so if an EH catchswitch (generated
+ // from an exception)'s catchpad does not contain any calls that are converted
+ // into invokes unwinding to catch.dispatch.longjmp, this unwind relationship
+ // (EH catchswitch BB -> catch.dispatch.longjmp BB) is lost and
+ // catch.dispatch.longjmp BB can be placed before the EH catchswitch BB in
+ // CFGSort.
+ // int ret = setjmp(buf);
+ // try {
+ // foo(); // longjmps
+ // } catch (...) {
+ // }
+ // Then in this code, if 'foo' longjmps, it first unwinds to 'catch (...)'
+ // catchswitch, and is not caught by that catchswitch because it is a longjmp,
+ // then it should next unwind to catch.dispatch.longjmp BB. But if this 'catch
+ // (...)' catchswitch -> catch.dispatch.longjmp unwind relationship is lost,
+ // it will not unwind to catch.dispatch.longjmp, producing an incorrect
+ // result.
+ //
+ // Every catchpad generated by Wasm C++ contains __cxa_end_catch, so we
+ // intentionally treat it as longjmpable to work around this problem. This is
+ // a hacky fix but an easy one.
+ //
+ // The comment block in findWasmUnwindDestinations() in
+ // SelectionDAGBuilder.cpp is addressing a similar problem.
+ if (CalleeName == "__cxa_end_catch")
+ return WebAssembly::WasmEnableSjLj;
+ if (CalleeName == "__cxa_begin_catch" ||
+ CalleeName == "__cxa_allocate_exception" || CalleeName == "__cxa_throw" ||
+ CalleeName == "__clang_call_terminate")
+ return false;
+
+ // std::terminate, which is generated when another exception occurs while
+ // handling an exception, cannot longjmp.
+ if (CalleeName == "_ZSt9terminatev")
+ return false;
+
+ // Otherwise we don't know
+ return true;
+}
+
+static bool isEmAsmCall(const Value *Callee) {
+ StringRef CalleeName = Callee->getName();
+ // This is an exhaustive list from Emscripten's <emscripten/em_asm.h>.
+ return CalleeName == "emscripten_asm_const_int" ||
+ CalleeName == "emscripten_asm_const_double" ||
+ CalleeName == "emscripten_asm_const_int_sync_on_main_thread" ||
+ CalleeName == "emscripten_asm_const_double_sync_on_main_thread" ||
+ CalleeName == "emscripten_asm_const_async_on_main_thread";
+}
+
+// Generate testSetjmp function call seqence with preamble and postamble.
+// The code this generates is equivalent to the following JavaScript code:
+// %__threwValue.val = __threwValue;
+// if (%__THREW__.val != 0 & %__threwValue.val != 0) {
+// %label = testSetjmp(mem[%__THREW__.val], setjmpTable, setjmpTableSize);
+// if (%label == 0)
+// emscripten_longjmp(%__THREW__.val, %__threwValue.val);
+// setTempRet0(%__threwValue.val);
+// } else {
+// %label = -1;
+// }
+// %longjmp_result = getTempRet0();
+//
+// As output parameters. returns %label, %longjmp_result, and the BB the last
+// instruction (%longjmp_result = ...) is in.
+void WebAssemblyLowerEmscriptenEHSjLj::wrapTestSetjmp(
+ BasicBlock *BB, DebugLoc DL, Value *Threw, Value *SetjmpTable,
+ Value *SetjmpTableSize, Value *&Label, Value *&LongjmpResult,
+ BasicBlock *&CallEmLongjmpBB, PHINode *&CallEmLongjmpBBThrewPHI,
+ PHINode *&CallEmLongjmpBBThrewValuePHI, BasicBlock *&EndBB) {
+ Function *F = BB->getParent();
+ Module *M = F->getParent();
+ LLVMContext &C = M->getContext();
+ IRBuilder<> IRB(C);
+ IRB.SetCurrentDebugLocation(DL);
+
+ // if (%__THREW__.val != 0 & %__threwValue.val != 0)
+ IRB.SetInsertPoint(BB);
+ BasicBlock *ThenBB1 = BasicBlock::Create(C, "if.then1", F);
+ BasicBlock *ElseBB1 = BasicBlock::Create(C, "if.else1", F);
+ BasicBlock *EndBB1 = BasicBlock::Create(C, "if.end", F);
+ Value *ThrewCmp = IRB.CreateICmpNE(Threw, getAddrSizeInt(M, 0));
+ Value *ThrewValue = IRB.CreateLoad(IRB.getInt32Ty(), ThrewValueGV,
+ ThrewValueGV->getName() + ".val");
+ Value *ThrewValueCmp = IRB.CreateICmpNE(ThrewValue, IRB.getInt32(0));
+ Value *Cmp1 = IRB.CreateAnd(ThrewCmp, ThrewValueCmp, "cmp1");
+ IRB.CreateCondBr(Cmp1, ThenBB1, ElseBB1);
+
+ // Generate call.em.longjmp BB once and share it within the function
+ if (!CallEmLongjmpBB) {
+ // emscripten_longjmp(%__THREW__.val, %__threwValue.val);
+ CallEmLongjmpBB = BasicBlock::Create(C, "call.em.longjmp", F);
+ IRB.SetInsertPoint(CallEmLongjmpBB);
+ CallEmLongjmpBBThrewPHI = IRB.CreatePHI(getAddrIntType(M), 4, "threw.phi");
+ CallEmLongjmpBBThrewValuePHI =
+ IRB.CreatePHI(IRB.getInt32Ty(), 4, "threwvalue.phi");
+ CallEmLongjmpBBThrewPHI->addIncoming(Threw, ThenBB1);
+ CallEmLongjmpBBThrewValuePHI->addIncoming(ThrewValue, ThenBB1);
+ IRB.CreateCall(EmLongjmpF,
+ {CallEmLongjmpBBThrewPHI, CallEmLongjmpBBThrewValuePHI});
+ IRB.CreateUnreachable();
+ } else {
+ CallEmLongjmpBBThrewPHI->addIncoming(Threw, ThenBB1);
+ CallEmLongjmpBBThrewValuePHI->addIncoming(ThrewValue, ThenBB1);
+ }
+
+ // %label = testSetjmp(mem[%__THREW__.val], setjmpTable, setjmpTableSize);
+ // if (%label == 0)
+ IRB.SetInsertPoint(ThenBB1);
+ BasicBlock *EndBB2 = BasicBlock::Create(C, "if.end2", F);
+ Value *ThrewPtr =
+ IRB.CreateIntToPtr(Threw, getAddrPtrType(M), Threw->getName() + ".p");
+ Value *LoadedThrew = IRB.CreateLoad(getAddrIntType(M), ThrewPtr,
+ ThrewPtr->getName() + ".loaded");
+ Value *ThenLabel = IRB.CreateCall(
+ TestSetjmpF, {LoadedThrew, SetjmpTable, SetjmpTableSize}, "label");
+ Value *Cmp2 = IRB.CreateICmpEQ(ThenLabel, IRB.getInt32(0));
+ IRB.CreateCondBr(Cmp2, CallEmLongjmpBB, EndBB2);
+
+ // setTempRet0(%__threwValue.val);
+ IRB.SetInsertPoint(EndBB2);
+ IRB.CreateCall(SetTempRet0F, ThrewValue);
+ IRB.CreateBr(EndBB1);
+
+ IRB.SetInsertPoint(ElseBB1);
+ IRB.CreateBr(EndBB1);
+
+ // longjmp_result = getTempRet0();
+ IRB.SetInsertPoint(EndBB1);
+ PHINode *LabelPHI = IRB.CreatePHI(IRB.getInt32Ty(), 2, "label");
+ LabelPHI->addIncoming(ThenLabel, EndBB2);
+
+ LabelPHI->addIncoming(IRB.getInt32(-1), ElseBB1);
+
+ // Output parameter assignment
+ Label = LabelPHI;
+ EndBB = EndBB1;
+ LongjmpResult = IRB.CreateCall(GetTempRet0F, std::nullopt, "longjmp_result");
+}
+
+void WebAssemblyLowerEmscriptenEHSjLj::rebuildSSA(Function &F) {
+ DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>(F).getDomTree();
+ DT.recalculate(F); // CFG has been changed
+
+ SSAUpdaterBulk SSA;
+ for (BasicBlock &BB : F) {
+ for (Instruction &I : BB) {
+ unsigned VarID = SSA.AddVariable(I.getName(), I.getType());
+ // If a value is defined by an invoke instruction, it is only available in
+ // its normal destination and not in its unwind destination.
+ if (auto *II = dyn_cast<InvokeInst>(&I))
+ SSA.AddAvailableValue(VarID, II->getNormalDest(), II);
+ else
+ SSA.AddAvailableValue(VarID, &BB, &I);
+ for (auto &U : I.uses()) {
+ auto *User = cast<Instruction>(U.getUser());
+ if (auto *UserPN = dyn_cast<PHINode>(User))
+ if (UserPN->getIncomingBlock(U) == &BB)
+ continue;
+ if (DT.dominates(&I, User))
+ continue;
+ SSA.AddUse(VarID, &U);
+ }
+ }
+ }
+ SSA.RewriteAllUses(&DT);
+}
+
+// Replace uses of longjmp with a new longjmp function in Emscripten library.
+// In Emscripten SjLj, the new function is
+// void emscripten_longjmp(uintptr_t, i32)
+// In Wasm SjLj, the new function is
+// void __wasm_longjmp(i8*, i32)
+// Because the original libc longjmp function takes (jmp_buf*, i32), we need a
+// ptrtoint/bitcast instruction here to make the type match. jmp_buf* will
+// eventually be lowered to i32/i64 in the wasm backend.
+void WebAssemblyLowerEmscriptenEHSjLj::replaceLongjmpWith(Function *LongjmpF,
+ Function *NewF) {
+ assert(NewF == EmLongjmpF || NewF == WasmLongjmpF);
+ Module *M = LongjmpF->getParent();
+ SmallVector<CallInst *, 8> ToErase;
+ LLVMContext &C = LongjmpF->getParent()->getContext();
+ IRBuilder<> IRB(C);
+
+ // For calls to longjmp, replace it with emscripten_longjmp/__wasm_longjmp and
+ // cast its first argument (jmp_buf*) appropriately
+ for (User *U : LongjmpF->users()) {
+ auto *CI = dyn_cast<CallInst>(U);
+ if (CI && CI->getCalledFunction() == LongjmpF) {
+ IRB.SetInsertPoint(CI);
+ Value *Env = nullptr;
+ if (NewF == EmLongjmpF)
+ Env =
+ IRB.CreatePtrToInt(CI->getArgOperand(0), getAddrIntType(M), "env");
+ else // WasmLongjmpF
+ Env = IRB.CreateBitCast(CI->getArgOperand(0), IRB.getPtrTy(), "env");
+ IRB.CreateCall(NewF, {Env, CI->getArgOperand(1)});
+ ToErase.push_back(CI);
+ }
+ }
+ for (auto *I : ToErase)
+ I->eraseFromParent();
+
+ // If we have any remaining uses of longjmp's function pointer, replace it
+ // with (void(*)(jmp_buf*, int))emscripten_longjmp / __wasm_longjmp.
+ if (!LongjmpF->uses().empty()) {
+ Value *NewLongjmp =
+ IRB.CreateBitCast(NewF, LongjmpF->getType(), "longjmp.cast");
+ LongjmpF->replaceAllUsesWith(NewLongjmp);
+ }
+}
+
+static bool containsLongjmpableCalls(const Function *F) {
+ for (const auto &BB : *F)
+ for (const auto &I : BB)
+ if (const auto *CB = dyn_cast<CallBase>(&I))
+ if (canLongjmp(CB->getCalledOperand()))
+ return true;
+ return false;
+}
+
+// When a function contains a setjmp call but not other calls that can longjmp,
+// we don't do setjmp transformation for that setjmp. But we need to convert the
+// setjmp calls into "i32 0" so they don't cause link time errors. setjmp always
+// returns 0 when called directly.
+static void nullifySetjmp(Function *F) {
+ Module &M = *F->getParent();
+ IRBuilder<> IRB(M.getContext());
+ Function *SetjmpF = M.getFunction("setjmp");
+ SmallVector<Instruction *, 1> ToErase;
+
+ for (User *U : make_early_inc_range(SetjmpF->users())) {
+ auto *CB = cast<CallBase>(U);
+ BasicBlock *BB = CB->getParent();
+ if (BB->getParent() != F) // in other function
+ continue;
+ CallInst *CI = nullptr;
+ // setjmp cannot throw. So if it is an invoke, lower it to a call
+ if (auto *II = dyn_cast<InvokeInst>(CB))
+ CI = llvm::changeToCall(II);
+ else
+ CI = cast<CallInst>(CB);
+ ToErase.push_back(CI);
+ CI->replaceAllUsesWith(IRB.getInt32(0));
+ }
+ for (auto *I : ToErase)
+ I->eraseFromParent();
+}
+
+bool WebAssemblyLowerEmscriptenEHSjLj::runOnModule(Module &M) {
+ LLVM_DEBUG(dbgs() << "********** Lower Emscripten EH & SjLj **********\n");
+
+ LLVMContext &C = M.getContext();
+ IRBuilder<> IRB(C);
+
+ Function *SetjmpF = M.getFunction("setjmp");
+ Function *LongjmpF = M.getFunction("longjmp");
+
+ // In some platforms _setjmp and _longjmp are used instead. Change these to
+ // use setjmp/longjmp instead, because we later detect these functions by
+ // their names.
+ Function *SetjmpF2 = M.getFunction("_setjmp");
+ Function *LongjmpF2 = M.getFunction("_longjmp");
+ if (SetjmpF2) {
+ if (SetjmpF) {
+ if (SetjmpF->getFunctionType() != SetjmpF2->getFunctionType())
+ report_fatal_error("setjmp and _setjmp have different function types");
+ } else {
+ SetjmpF = Function::Create(SetjmpF2->getFunctionType(),
+ GlobalValue::ExternalLinkage, "setjmp", M);
+ }
+ SetjmpF2->replaceAllUsesWith(SetjmpF);
+ }
+ if (LongjmpF2) {
+ if (LongjmpF) {
+ if (LongjmpF->getFunctionType() != LongjmpF2->getFunctionType())
+ report_fatal_error(
+ "longjmp and _longjmp have different function types");
+ } else {
+ LongjmpF = Function::Create(LongjmpF2->getFunctionType(),
+ GlobalValue::ExternalLinkage, "setjmp", M);
+ }
+ LongjmpF2->replaceAllUsesWith(LongjmpF);
+ }
+
+ auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
+ assert(TPC && "Expected a TargetPassConfig");
+ auto &TM = TPC->getTM<WebAssemblyTargetMachine>();
+
+ // Declare (or get) global variables __THREW__, __threwValue, and
+ // getTempRet0/setTempRet0 function which are used in common for both
+ // exception handling and setjmp/longjmp handling
+ ThrewGV = getGlobalVariable(M, getAddrIntType(&M), TM, "__THREW__");
+ ThrewValueGV = getGlobalVariable(M, IRB.getInt32Ty(), TM, "__threwValue");
+ GetTempRet0F = getEmscriptenFunction(
+ FunctionType::get(IRB.getInt32Ty(), false), "getTempRet0", &M);
+ SetTempRet0F = getEmscriptenFunction(
+ FunctionType::get(IRB.getVoidTy(), IRB.getInt32Ty(), false),
+ "setTempRet0", &M);
+ GetTempRet0F->setDoesNotThrow();
+ SetTempRet0F->setDoesNotThrow();
+
+ bool Changed = false;
+
+ // Function registration for exception handling
+ if (EnableEmEH) {
+ // Register __resumeException function
+ FunctionType *ResumeFTy =
+ FunctionType::get(IRB.getVoidTy(), IRB.getPtrTy(), false);
+ ResumeF = getEmscriptenFunction(ResumeFTy, "__resumeException", &M);
+ ResumeF->addFnAttr(Attribute::NoReturn);
+
+ // Register llvm_eh_typeid_for function
+ FunctionType *EHTypeIDTy =
+ FunctionType::get(IRB.getInt32Ty(), IRB.getPtrTy(), false);
+ EHTypeIDF = getEmscriptenFunction(EHTypeIDTy, "llvm_eh_typeid_for", &M);
+ }
+
+ // Functions that contains calls to setjmp but don't have other longjmpable
+ // calls within them.
+ SmallPtrSet<Function *, 4> SetjmpUsersToNullify;
+
+ if ((EnableEmSjLj || EnableWasmSjLj) && SetjmpF) {
+ // Precompute setjmp users
+ for (User *U : SetjmpF->users()) {
+ if (auto *CB = dyn_cast<CallBase>(U)) {
+ auto *UserF = CB->getFunction();
+ // If a function that calls setjmp does not contain any other calls that
+ // can longjmp, we don't need to do any transformation on that function,
+ // so can ignore it
+ if (containsLongjmpableCalls(UserF))
+ SetjmpUsers.insert(UserF);
+ else
+ SetjmpUsersToNullify.insert(UserF);
+ } else {
+ std::string S;
+ raw_string_ostream SS(S);
+ SS << *U;
+ report_fatal_error(Twine("Indirect use of setjmp is not supported: ") +
+ SS.str());
+ }
+ }
+ }
+
+ bool SetjmpUsed = SetjmpF && !SetjmpUsers.empty();
+ bool LongjmpUsed = LongjmpF && !LongjmpF->use_empty();
+ DoSjLj = (EnableEmSjLj | EnableWasmSjLj) && (SetjmpUsed || LongjmpUsed);
+
+ // Function registration and data pre-gathering for setjmp/longjmp handling
+ if (DoSjLj) {
+ assert(EnableEmSjLj || EnableWasmSjLj);
+ if (EnableEmSjLj) {
+ // Register emscripten_longjmp function
+ FunctionType *FTy = FunctionType::get(
+ IRB.getVoidTy(), {getAddrIntType(&M), IRB.getInt32Ty()}, false);
+ EmLongjmpF = getEmscriptenFunction(FTy, "emscripten_longjmp", &M);
+ EmLongjmpF->addFnAttr(Attribute::NoReturn);
+ } else { // EnableWasmSjLj
+ Type *Int8PtrTy = IRB.getPtrTy();
+ // Register __wasm_longjmp function, which calls __builtin_wasm_longjmp.
+ FunctionType *FTy = FunctionType::get(
+ IRB.getVoidTy(), {Int8PtrTy, IRB.getInt32Ty()}, false);
+ WasmLongjmpF = getEmscriptenFunction(FTy, "__wasm_longjmp", &M);
+ WasmLongjmpF->addFnAttr(Attribute::NoReturn);
+ }
+
+ if (SetjmpF) {
+ Type *Int8PtrTy = IRB.getPtrTy();
+ Type *Int32PtrTy = IRB.getPtrTy();
+ Type *Int32Ty = IRB.getInt32Ty();
+ // Register saveSetjmp function
+ FunctionType *SetjmpFTy = SetjmpF->getFunctionType();
+ FunctionType *FTy = FunctionType::get(
+ Int32PtrTy,
+ {SetjmpFTy->getParamType(0), Int32Ty, Int32PtrTy, Int32Ty}, false);
+ SaveSetjmpF = getEmscriptenFunction(FTy, "saveSetjmp", &M);
+
+ // Register testSetjmp function
+ FTy = FunctionType::get(Int32Ty,
+ {getAddrIntType(&M), Int32PtrTy, Int32Ty}, false);
+ TestSetjmpF = getEmscriptenFunction(FTy, "testSetjmp", &M);
+
+ // wasm.catch() will be lowered down to wasm 'catch' instruction in
+ // instruction selection.
+ CatchF = Intrinsic::getDeclaration(&M, Intrinsic::wasm_catch);
+ // Type for struct __WasmLongjmpArgs
+ LongjmpArgsTy = StructType::get(Int8PtrTy, // env
+ Int32Ty // val
+ );
+ }
+ }
+
+ // Exception handling transformation
+ if (EnableEmEH) {
+ for (Function &F : M) {
+ if (F.isDeclaration())
+ continue;
+ Changed |= runEHOnFunction(F);
+ }
+ }
+
+ // Setjmp/longjmp handling transformation
+ if (DoSjLj) {
+ Changed = true; // We have setjmp or longjmp somewhere
+ if (LongjmpF)
+ replaceLongjmpWith(LongjmpF, EnableEmSjLj ? EmLongjmpF : WasmLongjmpF);
+ // Only traverse functions that uses setjmp in order not to insert
+ // unnecessary prep / cleanup code in every function
+ if (SetjmpF)
+ for (Function *F : SetjmpUsers)
+ runSjLjOnFunction(*F);
+ }
+
+ // Replace unnecessary setjmp calls with 0
+ if ((EnableEmSjLj || EnableWasmSjLj) && !SetjmpUsersToNullify.empty()) {
+ Changed = true;
+ assert(SetjmpF);
+ for (Function *F : SetjmpUsersToNullify)
+ nullifySetjmp(F);
+ }
+
+ // Delete unused global variables and functions
+ for (auto *V : {ThrewGV, ThrewValueGV})
+ if (V && V->use_empty())
+ V->eraseFromParent();
+ for (auto *V : {GetTempRet0F, SetTempRet0F, ResumeF, EHTypeIDF, EmLongjmpF,
+ SaveSetjmpF, TestSetjmpF, WasmLongjmpF, CatchF})
+ if (V && V->use_empty())
+ V->eraseFromParent();
+
+ return Changed;
+}
+
+bool WebAssemblyLowerEmscriptenEHSjLj::runEHOnFunction(Function &F) {
+ Module &M = *F.getParent();
+ LLVMContext &C = F.getContext();
+ IRBuilder<> IRB(C);
+ bool Changed = false;
+ SmallVector<Instruction *, 64> ToErase;
+ SmallPtrSet<LandingPadInst *, 32> LandingPads;
+
+ // rethrow.longjmp BB that will be shared within the function.
+ BasicBlock *RethrowLongjmpBB = nullptr;
+ // PHI node for the loaded value of __THREW__ global variable in
+ // rethrow.longjmp BB
+ PHINode *RethrowLongjmpBBThrewPHI = nullptr;
+
+ for (BasicBlock &BB : F) {
+ auto *II = dyn_cast<InvokeInst>(BB.getTerminator());
+ if (!II)
+ continue;
+ Changed = true;
+ LandingPads.insert(II->getLandingPadInst());
+ IRB.SetInsertPoint(II);
+
+ const Value *Callee = II->getCalledOperand();
+ bool NeedInvoke = supportsException(&F) && canThrow(Callee);
+ if (NeedInvoke) {
+ // Wrap invoke with invoke wrapper and generate preamble/postamble
+ Value *Threw = wrapInvoke(II);
+ ToErase.push_back(II);
+
+ // If setjmp/longjmp handling is enabled, the thrown value can be not an
+ // exception but a longjmp. If the current function contains calls to
+ // setjmp, it will be appropriately handled in runSjLjOnFunction. But even
+ // if the function does not contain setjmp calls, we shouldn't silently
+ // ignore longjmps; we should rethrow them so they can be correctly
+ // handled in somewhere up the call chain where setjmp is. __THREW__'s
+ // value is 0 when nothing happened, 1 when an exception is thrown, and
+ // other values when longjmp is thrown.
+ //
+ // if (%__THREW__.val == 0 || %__THREW__.val == 1)
+ // goto %tail
+ // else
+ // goto %longjmp.rethrow
+ //
+ // rethrow.longjmp: ;; This is longjmp. Rethrow it
+ // %__threwValue.val = __threwValue
+ // emscripten_longjmp(%__THREW__.val, %__threwValue.val);
+ //
+ // tail: ;; Nothing happened or an exception is thrown
+ // ... Continue exception handling ...
+ if (DoSjLj && EnableEmSjLj && !SetjmpUsers.count(&F) &&
+ canLongjmp(Callee)) {
+ // Create longjmp.rethrow BB once and share it within the function
+ if (!RethrowLongjmpBB) {
+ RethrowLongjmpBB = BasicBlock::Create(C, "rethrow.longjmp", &F);
+ IRB.SetInsertPoint(RethrowLongjmpBB);
+ RethrowLongjmpBBThrewPHI =
+ IRB.CreatePHI(getAddrIntType(&M), 4, "threw.phi");
+ RethrowLongjmpBBThrewPHI->addIncoming(Threw, &BB);
+ Value *ThrewValue = IRB.CreateLoad(IRB.getInt32Ty(), ThrewValueGV,
+ ThrewValueGV->getName() + ".val");
+ IRB.CreateCall(EmLongjmpF, {RethrowLongjmpBBThrewPHI, ThrewValue});
+ IRB.CreateUnreachable();
+ } else {
+ RethrowLongjmpBBThrewPHI->addIncoming(Threw, &BB);
+ }
+
+ IRB.SetInsertPoint(II); // Restore the insert point back
+ BasicBlock *Tail = BasicBlock::Create(C, "tail", &F);
+ Value *CmpEqOne =
+ IRB.CreateICmpEQ(Threw, getAddrSizeInt(&M, 1), "cmp.eq.one");
+ Value *CmpEqZero =
+ IRB.CreateICmpEQ(Threw, getAddrSizeInt(&M, 0), "cmp.eq.zero");
+ Value *Or = IRB.CreateOr(CmpEqZero, CmpEqOne, "or");
+ IRB.CreateCondBr(Or, Tail, RethrowLongjmpBB);
+ IRB.SetInsertPoint(Tail);
+ BB.replaceSuccessorsPhiUsesWith(&BB, Tail);
+ }
+
+ // Insert a branch based on __THREW__ variable
+ Value *Cmp = IRB.CreateICmpEQ(Threw, getAddrSizeInt(&M, 1), "cmp");
+ IRB.CreateCondBr(Cmp, II->getUnwindDest(), II->getNormalDest());
+
+ } else {
+ // This can't throw, and we don't need this invoke, just replace it with a
+ // call+branch
+ changeToCall(II);
+ }
+ }
+
+ // Process resume instructions
+ for (BasicBlock &BB : F) {
+ // Scan the body of the basic block for resumes
+ for (Instruction &I : BB) {
+ auto *RI = dyn_cast<ResumeInst>(&I);
+ if (!RI)
+ continue;
+ Changed = true;
+
+ // Split the input into legal values
+ Value *Input = RI->getValue();
+ IRB.SetInsertPoint(RI);
+ Value *Low = IRB.CreateExtractValue(Input, 0, "low");
+ // Create a call to __resumeException function
+ IRB.CreateCall(ResumeF, {Low});
+ // Add a terminator to the block
+ IRB.CreateUnreachable();
+ ToErase.push_back(RI);
+ }
+ }
+
+ // Process llvm.eh.typeid.for intrinsics
+ for (BasicBlock &BB : F) {
+ for (Instruction &I : BB) {
+ auto *CI = dyn_cast<CallInst>(&I);
+ if (!CI)
+ continue;
+ const Function *Callee = CI->getCalledFunction();
+ if (!Callee)
+ continue;
+ if (Callee->getIntrinsicID() != Intrinsic::eh_typeid_for)
+ continue;
+ Changed = true;
+
+ IRB.SetInsertPoint(CI);
+ CallInst *NewCI =
+ IRB.CreateCall(EHTypeIDF, CI->getArgOperand(0), "typeid");
+ CI->replaceAllUsesWith(NewCI);
+ ToErase.push_back(CI);
+ }
+ }
+
+ // Look for orphan landingpads, can occur in blocks with no predecessors
+ for (BasicBlock &BB : F) {
+ Instruction *I = BB.getFirstNonPHI();
+ if (auto *LPI = dyn_cast<LandingPadInst>(I))
+ LandingPads.insert(LPI);
+ }
+ Changed |= !LandingPads.empty();
+
+ // Handle all the landingpad for this function together, as multiple invokes
+ // may share a single lp
+ for (LandingPadInst *LPI : LandingPads) {
+ IRB.SetInsertPoint(LPI);
+ SmallVector<Value *, 16> FMCArgs;
+ for (unsigned I = 0, E = LPI->getNumClauses(); I < E; ++I) {
+ Constant *Clause = LPI->getClause(I);
+ // TODO Handle filters (= exception specifications).
+ // https://github.com/llvm/llvm-project/issues/49740
+ if (LPI->isCatch(I))
+ FMCArgs.push_back(Clause);
+ }
+
+ // Create a call to __cxa_find_matching_catch_N function
+ Function *FMCF = getFindMatchingCatch(M, FMCArgs.size());
+ CallInst *FMCI = IRB.CreateCall(FMCF, FMCArgs, "fmc");
+ Value *Poison = PoisonValue::get(LPI->getType());
+ Value *Pair0 = IRB.CreateInsertValue(Poison, FMCI, 0, "pair0");
+ Value *TempRet0 = IRB.CreateCall(GetTempRet0F, std::nullopt, "tempret0");
+ Value *Pair1 = IRB.CreateInsertValue(Pair0, TempRet0, 1, "pair1");
+
+ LPI->replaceAllUsesWith(Pair1);
+ ToErase.push_back(LPI);
+ }
+
+ // Erase everything we no longer need in this function
+ for (Instruction *I : ToErase)
+ I->eraseFromParent();
+
+ return Changed;
+}
+
+// This tries to get debug info from the instruction before which a new
+// instruction will be inserted, and if there's no debug info in that
+// instruction, tries to get the info instead from the previous instruction (if
+// any). If none of these has debug info and a DISubprogram is provided, it
+// creates a dummy debug info with the first line of the function, because IR
+// verifier requires all inlinable callsites should have debug info when both a
+// caller and callee have DISubprogram. If none of these conditions are met,
+// returns empty info.
+static DebugLoc getOrCreateDebugLoc(const Instruction *InsertBefore,
+ DISubprogram *SP) {
+ assert(InsertBefore);
+ if (InsertBefore->getDebugLoc())
+ return InsertBefore->getDebugLoc();
+ const Instruction *Prev = InsertBefore->getPrevNode();
+ if (Prev && Prev->getDebugLoc())
+ return Prev->getDebugLoc();
+ if (SP)
+ return DILocation::get(SP->getContext(), SP->getLine(), 1, SP);
+ return DebugLoc();
+}
+
+bool WebAssemblyLowerEmscriptenEHSjLj::runSjLjOnFunction(Function &F) {
+ assert(EnableEmSjLj || EnableWasmSjLj);
+ Module &M = *F.getParent();
+ LLVMContext &C = F.getContext();
+ IRBuilder<> IRB(C);
+ SmallVector<Instruction *, 64> ToErase;
+ // Vector of %setjmpTable values
+ SmallVector<Instruction *, 4> SetjmpTableInsts;
+ // Vector of %setjmpTableSize values
+ SmallVector<Instruction *, 4> SetjmpTableSizeInsts;
+
+ // Setjmp preparation
+
+ // This instruction effectively means %setjmpTableSize = 4.
+ // We create this as an instruction intentionally, and we don't want to fold
+ // this instruction to a constant 4, because this value will be used in
+ // SSAUpdater.AddAvailableValue(...) later.
+ BasicBlock *Entry = &F.getEntryBlock();
+ DebugLoc FirstDL = getOrCreateDebugLoc(&*Entry->begin(), F.getSubprogram());
+ SplitBlock(Entry, &*Entry->getFirstInsertionPt());
+
+ BinaryOperator *SetjmpTableSize =
+ BinaryOperator::Create(Instruction::Add, IRB.getInt32(4), IRB.getInt32(0),
+ "setjmpTableSize", Entry->getTerminator());
+ SetjmpTableSize->setDebugLoc(FirstDL);
+ // setjmpTable = (int *) malloc(40);
+ Type *IntPtrTy = getAddrIntType(&M);
+ Constant *size = ConstantInt::get(IntPtrTy, 40);
+ IRB.SetInsertPoint(SetjmpTableSize);
+ auto *SetjmpTable = IRB.CreateMalloc(IntPtrTy, IRB.getInt32Ty(), size,
+ nullptr, nullptr, "setjmpTable");
+ SetjmpTable->setDebugLoc(FirstDL);
+ // CallInst::CreateMalloc may return a bitcast instruction if the result types
+ // mismatch. We need to set the debug loc for the original call too.
+ auto *MallocCall = SetjmpTable->stripPointerCasts();
+ if (auto *MallocCallI = dyn_cast<Instruction>(MallocCall)) {
+ MallocCallI->setDebugLoc(FirstDL);
+ }
+ // setjmpTable[0] = 0;
+ IRB.CreateStore(IRB.getInt32(0), SetjmpTable);
+ SetjmpTableInsts.push_back(SetjmpTable);
+ SetjmpTableSizeInsts.push_back(SetjmpTableSize);
+
+ // Setjmp transformation
+ SmallVector<PHINode *, 4> SetjmpRetPHIs;
+ Function *SetjmpF = M.getFunction("setjmp");
+ for (auto *U : make_early_inc_range(SetjmpF->users())) {
+ auto *CB = cast<CallBase>(U);
+ BasicBlock *BB = CB->getParent();
+ if (BB->getParent() != &F) // in other function
+ continue;
+ if (CB->getOperandBundle(LLVMContext::OB_funclet)) {
+ std::string S;
+ raw_string_ostream SS(S);
+ SS << "In function " + F.getName() +
+ ": setjmp within a catch clause is not supported in Wasm EH:\n";
+ SS << *CB;
+ report_fatal_error(StringRef(SS.str()));
+ }
+
+ CallInst *CI = nullptr;
+ // setjmp cannot throw. So if it is an invoke, lower it to a call
+ if (auto *II = dyn_cast<InvokeInst>(CB))
+ CI = llvm::changeToCall(II);
+ else
+ CI = cast<CallInst>(CB);
+
+ // The tail is everything right after the call, and will be reached once
+ // when setjmp is called, and later when longjmp returns to the setjmp
+ BasicBlock *Tail = SplitBlock(BB, CI->getNextNode());
+ // Add a phi to the tail, which will be the output of setjmp, which
+ // indicates if this is the first call or a longjmp back. The phi directly
+ // uses the right value based on where we arrive from
+ IRB.SetInsertPoint(Tail, Tail->getFirstNonPHIIt());
+ PHINode *SetjmpRet = IRB.CreatePHI(IRB.getInt32Ty(), 2, "setjmp.ret");
+
+ // setjmp initial call returns 0
+ SetjmpRet->addIncoming(IRB.getInt32(0), BB);
+ // The proper output is now this, not the setjmp call itself
+ CI->replaceAllUsesWith(SetjmpRet);
+ // longjmp returns to the setjmp will add themselves to this phi
+ SetjmpRetPHIs.push_back(SetjmpRet);
+
+ // Fix call target
+ // Our index in the function is our place in the array + 1 to avoid index
+ // 0, because index 0 means the longjmp is not ours to handle.
+ IRB.SetInsertPoint(CI);
+ Value *Args[] = {CI->getArgOperand(0), IRB.getInt32(SetjmpRetPHIs.size()),
+ SetjmpTable, SetjmpTableSize};
+ Instruction *NewSetjmpTable =
+ IRB.CreateCall(SaveSetjmpF, Args, "setjmpTable");
+ Instruction *NewSetjmpTableSize =
+ IRB.CreateCall(GetTempRet0F, std::nullopt, "setjmpTableSize");
+ SetjmpTableInsts.push_back(NewSetjmpTable);
+ SetjmpTableSizeInsts.push_back(NewSetjmpTableSize);
+ ToErase.push_back(CI);
+ }
+
+ // Handle longjmpable calls.
+ if (EnableEmSjLj)
+ handleLongjmpableCallsForEmscriptenSjLj(
+ F, SetjmpTableInsts, SetjmpTableSizeInsts, SetjmpRetPHIs);
+ else // EnableWasmSjLj
+ handleLongjmpableCallsForWasmSjLj(F, SetjmpTableInsts, SetjmpTableSizeInsts,
+ SetjmpRetPHIs);
+
+ // Erase everything we no longer need in this function
+ for (Instruction *I : ToErase)
+ I->eraseFromParent();
+
+ // Free setjmpTable buffer before each return instruction + function-exiting
+ // call
+ SmallVector<Instruction *, 16> ExitingInsts;
+ for (BasicBlock &BB : F) {
+ Instruction *TI = BB.getTerminator();
+ if (isa<ReturnInst>(TI))
+ ExitingInsts.push_back(TI);
+ // Any 'call' instruction with 'noreturn' attribute exits the function at
+ // this point. If this throws but unwinds to another EH pad within this
+ // function instead of exiting, this would have been an 'invoke', which
+ // happens if we use Wasm EH or Wasm SjLJ.
+ for (auto &I : BB) {
+ if (auto *CI = dyn_cast<CallInst>(&I)) {
+ bool IsNoReturn = CI->hasFnAttr(Attribute::NoReturn);
+ if (Function *CalleeF = CI->getCalledFunction())
+ IsNoReturn |= CalleeF->hasFnAttribute(Attribute::NoReturn);
+ if (IsNoReturn)
+ ExitingInsts.push_back(&I);
+ }
+ }
+ }
+ for (auto *I : ExitingInsts) {
+ DebugLoc DL = getOrCreateDebugLoc(I, F.getSubprogram());
+ // If this existing instruction is a call within a catchpad, we should add
+ // it as "funclet" to the operand bundle of 'free' call
+ SmallVector<OperandBundleDef, 1> Bundles;
+ if (auto *CB = dyn_cast<CallBase>(I))
+ if (auto Bundle = CB->getOperandBundle(LLVMContext::OB_funclet))
+ Bundles.push_back(OperandBundleDef(*Bundle));
+ IRB.SetInsertPoint(I);
+ auto *Free = IRB.CreateFree(SetjmpTable, Bundles);
+ Free->setDebugLoc(DL);
+ }
+
+ // Every call to saveSetjmp can change setjmpTable and setjmpTableSize
+ // (when buffer reallocation occurs)
+ // entry:
+ // setjmpTableSize = 4;
+ // setjmpTable = (int *) malloc(40);
+ // setjmpTable[0] = 0;
+ // ...
+ // somebb:
+ // setjmpTable = saveSetjmp(env, label, setjmpTable, setjmpTableSize);
+ // setjmpTableSize = getTempRet0();
+ // So we need to make sure the SSA for these variables is valid so that every
+ // saveSetjmp and testSetjmp calls have the correct arguments.
+ SSAUpdater SetjmpTableSSA;
+ SSAUpdater SetjmpTableSizeSSA;
+ SetjmpTableSSA.Initialize(PointerType::get(C, 0), "setjmpTable");
+ SetjmpTableSizeSSA.Initialize(Type::getInt32Ty(C), "setjmpTableSize");
+ for (Instruction *I : SetjmpTableInsts)
+ SetjmpTableSSA.AddAvailableValue(I->getParent(), I);
+ for (Instruction *I : SetjmpTableSizeInsts)
+ SetjmpTableSizeSSA.AddAvailableValue(I->getParent(), I);
+
+ for (auto &U : make_early_inc_range(SetjmpTable->uses()))
+ if (auto *I = dyn_cast<Instruction>(U.getUser()))
+ if (I->getParent() != Entry)
+ SetjmpTableSSA.RewriteUse(U);
+ for (auto &U : make_early_inc_range(SetjmpTableSize->uses()))
+ if (auto *I = dyn_cast<Instruction>(U.getUser()))
+ if (I->getParent() != Entry)
+ SetjmpTableSizeSSA.RewriteUse(U);
+
+ // Finally, our modifications to the cfg can break dominance of SSA variables.
+ // For example, in this code,
+ // if (x()) { .. setjmp() .. }
+ // if (y()) { .. longjmp() .. }
+ // We must split the longjmp block, and it can jump into the block splitted
+ // from setjmp one. But that means that when we split the setjmp block, it's
+ // first part no longer dominates its second part - there is a theoretically
+ // possible control flow path where x() is false, then y() is true and we
+ // reach the second part of the setjmp block, without ever reaching the first
+ // part. So, we rebuild SSA form here.
+ rebuildSSA(F);
+ return true;
+}
+
+// Update each call that can longjmp so it can return to the corresponding
+// setjmp. Refer to 4) of "Emscripten setjmp/longjmp handling" section in the
+// comments at top of the file for details.
+void WebAssemblyLowerEmscriptenEHSjLj::handleLongjmpableCallsForEmscriptenSjLj(
+ Function &F, InstVector &SetjmpTableInsts, InstVector &SetjmpTableSizeInsts,
+ SmallVectorImpl<PHINode *> &SetjmpRetPHIs) {
+ Module &M = *F.getParent();
+ LLVMContext &C = F.getContext();
+ IRBuilder<> IRB(C);
+ SmallVector<Instruction *, 64> ToErase;
+
+ // We need to pass setjmpTable and setjmpTableSize to testSetjmp function.
+ // These values are defined in the beginning of the function and also in each
+ // setjmp callsite, but we don't know which values we should use at this
+ // point. So here we arbitraily use the ones defined in the beginning of the
+ // function, and SSAUpdater will later update them to the correct values.
+ Instruction *SetjmpTable = *SetjmpTableInsts.begin();
+ Instruction *SetjmpTableSize = *SetjmpTableSizeInsts.begin();
+
+ // call.em.longjmp BB that will be shared within the function.
+ BasicBlock *CallEmLongjmpBB = nullptr;
+ // PHI node for the loaded value of __THREW__ global variable in
+ // call.em.longjmp BB
+ PHINode *CallEmLongjmpBBThrewPHI = nullptr;
+ // PHI node for the loaded value of __threwValue global variable in
+ // call.em.longjmp BB
+ PHINode *CallEmLongjmpBBThrewValuePHI = nullptr;
+ // rethrow.exn BB that will be shared within the function.
+ BasicBlock *RethrowExnBB = nullptr;
+
+ // Because we are creating new BBs while processing and don't want to make
+ // all these newly created BBs candidates again for longjmp processing, we
+ // first make the vector of candidate BBs.
+ std::vector<BasicBlock *> BBs;
+ for (BasicBlock &BB : F)
+ BBs.push_back(&BB);
+
+ // BBs.size() will change within the loop, so we query it every time
+ for (unsigned I = 0; I < BBs.size(); I++) {
+ BasicBlock *BB = BBs[I];
+ for (Instruction &I : *BB) {
+ if (isa<InvokeInst>(&I)) {
+ std::string S;
+ raw_string_ostream SS(S);
+ SS << "In function " << F.getName()
+ << ": When using Wasm EH with Emscripten SjLj, there is a "
+ "restriction that `setjmp` function call and exception cannot be "
+ "used within the same function:\n";
+ SS << I;
+ report_fatal_error(StringRef(SS.str()));
+ }
+ auto *CI = dyn_cast<CallInst>(&I);
+ if (!CI)
+ continue;
+
+ const Value *Callee = CI->getCalledOperand();
+ if (!canLongjmp(Callee))
+ continue;
+ if (isEmAsmCall(Callee))
+ report_fatal_error("Cannot use EM_ASM* alongside setjmp/longjmp in " +
+ F.getName() +
+ ". Please consider using EM_JS, or move the "
+ "EM_ASM into another function.",
+ false);
+
+ Value *Threw = nullptr;
+ BasicBlock *Tail;
+ if (Callee->getName().starts_with("__invoke_")) {
+ // If invoke wrapper has already been generated for this call in
+ // previous EH phase, search for the load instruction
+ // %__THREW__.val = __THREW__;
+ // in postamble after the invoke wrapper call
+ LoadInst *ThrewLI = nullptr;
+ StoreInst *ThrewResetSI = nullptr;
+ for (auto I = std::next(BasicBlock::iterator(CI)), IE = BB->end();
+ I != IE; ++I) {
+ if (auto *LI = dyn_cast<LoadInst>(I))
+ if (auto *GV = dyn_cast<GlobalVariable>(LI->getPointerOperand()))
+ if (GV == ThrewGV) {
+ Threw = ThrewLI = LI;
+ break;
+ }
+ }
+ // Search for the store instruction after the load above
+ // __THREW__ = 0;
+ for (auto I = std::next(BasicBlock::iterator(ThrewLI)), IE = BB->end();
+ I != IE; ++I) {
+ if (auto *SI = dyn_cast<StoreInst>(I)) {
+ if (auto *GV = dyn_cast<GlobalVariable>(SI->getPointerOperand())) {
+ if (GV == ThrewGV &&
+ SI->getValueOperand() == getAddrSizeInt(&M, 0)) {
+ ThrewResetSI = SI;
+ break;
+ }
+ }
+ }
+ }
+ assert(Threw && ThrewLI && "Cannot find __THREW__ load after invoke");
+ assert(ThrewResetSI && "Cannot find __THREW__ store after invoke");
+ Tail = SplitBlock(BB, ThrewResetSI->getNextNode());
+
+ } else {
+ // Wrap call with invoke wrapper and generate preamble/postamble
+ Threw = wrapInvoke(CI);
+ ToErase.push_back(CI);
+ Tail = SplitBlock(BB, CI->getNextNode());
+
+ // If exception handling is enabled, the thrown value can be not a
+ // longjmp but an exception, in which case we shouldn't silently ignore
+ // exceptions; we should rethrow them.
+ // __THREW__'s value is 0 when nothing happened, 1 when an exception is
+ // thrown, other values when longjmp is thrown.
+ //
+ // if (%__THREW__.val == 1)
+ // goto %eh.rethrow
+ // else
+ // goto %normal
+ //
+ // eh.rethrow: ;; Rethrow exception
+ // %exn = call @__cxa_find_matching_catch_2() ;; Retrieve thrown ptr
+ // __resumeException(%exn)
+ //
+ // normal:
+ // <-- Insertion point. Will insert sjlj handling code from here
+ // goto %tail
+ //
+ // tail:
+ // ...
+ if (supportsException(&F) && canThrow(Callee)) {
+ // We will add a new conditional branch. So remove the branch created
+ // when we split the BB
+ ToErase.push_back(BB->getTerminator());
+
+ // Generate rethrow.exn BB once and share it within the function
+ if (!RethrowExnBB) {
+ RethrowExnBB = BasicBlock::Create(C, "rethrow.exn", &F);
+ IRB.SetInsertPoint(RethrowExnBB);
+ CallInst *Exn =
+ IRB.CreateCall(getFindMatchingCatch(M, 0), {}, "exn");
+ IRB.CreateCall(ResumeF, {Exn});
+ IRB.CreateUnreachable();
+ }
+
+ IRB.SetInsertPoint(CI);
+ BasicBlock *NormalBB = BasicBlock::Create(C, "normal", &F);
+ Value *CmpEqOne =
+ IRB.CreateICmpEQ(Threw, getAddrSizeInt(&M, 1), "cmp.eq.one");
+ IRB.CreateCondBr(CmpEqOne, RethrowExnBB, NormalBB);
+
+ IRB.SetInsertPoint(NormalBB);
+ IRB.CreateBr(Tail);
+ BB = NormalBB; // New insertion point to insert testSetjmp()
+ }
+ }
+
+ // We need to replace the terminator in Tail - SplitBlock makes BB go
+ // straight to Tail, we need to check if a longjmp occurred, and go to the
+ // right setjmp-tail if so
+ ToErase.push_back(BB->getTerminator());
+
+ // Generate a function call to testSetjmp function and preamble/postamble
+ // code to figure out (1) whether longjmp occurred (2) if longjmp
+ // occurred, which setjmp it corresponds to
+ Value *Label = nullptr;
+ Value *LongjmpResult = nullptr;
+ BasicBlock *EndBB = nullptr;
+ wrapTestSetjmp(BB, CI->getDebugLoc(), Threw, SetjmpTable, SetjmpTableSize,
+ Label, LongjmpResult, CallEmLongjmpBB,
+ CallEmLongjmpBBThrewPHI, CallEmLongjmpBBThrewValuePHI,
+ EndBB);
+ assert(Label && LongjmpResult && EndBB);
+
+ // Create switch instruction
+ IRB.SetInsertPoint(EndBB);
+ IRB.SetCurrentDebugLocation(EndBB->back().getDebugLoc());
+ SwitchInst *SI = IRB.CreateSwitch(Label, Tail, SetjmpRetPHIs.size());
+ // -1 means no longjmp happened, continue normally (will hit the default
+ // switch case). 0 means a longjmp that is not ours to handle, needs a
+ // rethrow. Otherwise the index is the same as the index in P+1 (to avoid
+ // 0).
+ for (unsigned I = 0; I < SetjmpRetPHIs.size(); I++) {
+ SI->addCase(IRB.getInt32(I + 1), SetjmpRetPHIs[I]->getParent());
+ SetjmpRetPHIs[I]->addIncoming(LongjmpResult, EndBB);
+ }
+
+ // We are splitting the block here, and must continue to find other calls
+ // in the block - which is now split. so continue to traverse in the Tail
+ BBs.push_back(Tail);
+ }
+ }
+
+ for (Instruction *I : ToErase)
+ I->eraseFromParent();
+}
+
+static BasicBlock *getCleanupRetUnwindDest(const CleanupPadInst *CPI) {
+ for (const User *U : CPI->users())
+ if (const auto *CRI = dyn_cast<CleanupReturnInst>(U))
+ return CRI->getUnwindDest();
+ return nullptr;
+}
+
+// Create a catchpad in which we catch a longjmp's env and val arguments, test
+// if the longjmp corresponds to one of setjmps in the current function, and if
+// so, jump to the setjmp dispatch BB from which we go to one of post-setjmp
+// BBs. Refer to 4) of "Wasm setjmp/longjmp handling" section in the comments at
+// top of the file for details.
+void WebAssemblyLowerEmscriptenEHSjLj::handleLongjmpableCallsForWasmSjLj(
+ Function &F, InstVector &SetjmpTableInsts, InstVector &SetjmpTableSizeInsts,
+ SmallVectorImpl<PHINode *> &SetjmpRetPHIs) {
+ Module &M = *F.getParent();
+ LLVMContext &C = F.getContext();
+ IRBuilder<> IRB(C);
+
+ // A function with catchswitch/catchpad instruction should have a personality
+ // function attached to it. Search for the wasm personality function, and if
+ // it exists, use it, and if it doesn't, create a dummy personality function.
+ // (SjLj is not going to call it anyway.)
+ if (!F.hasPersonalityFn()) {
+ StringRef PersName = getEHPersonalityName(EHPersonality::Wasm_CXX);
+ FunctionType *PersType =
+ FunctionType::get(IRB.getInt32Ty(), /* isVarArg */ true);
+ Value *PersF = M.getOrInsertFunction(PersName, PersType).getCallee();
+ F.setPersonalityFn(
+ cast<Constant>(IRB.CreateBitCast(PersF, IRB.getPtrTy())));
+ }
+
+ // Use the entry BB's debugloc as a fallback
+ BasicBlock *Entry = &F.getEntryBlock();
+ DebugLoc FirstDL = getOrCreateDebugLoc(&*Entry->begin(), F.getSubprogram());
+ IRB.SetCurrentDebugLocation(FirstDL);
+
+ // Arbitrarily use the ones defined in the beginning of the function.
+ // SSAUpdater will later update them to the correct values.
+ Instruction *SetjmpTable = *SetjmpTableInsts.begin();
+ Instruction *SetjmpTableSize = *SetjmpTableSizeInsts.begin();
+
+ // Add setjmp.dispatch BB right after the entry block. Because we have
+ // initialized setjmpTable/setjmpTableSize in the entry block and split the
+ // rest into another BB, here 'OrigEntry' is the function's original entry
+ // block before the transformation.
+ //
+ // entry:
+ // setjmpTable / setjmpTableSize initialization
+ // setjmp.dispatch:
+ // switch will be inserted here later
+ // entry.split: (OrigEntry)
+ // the original function starts here
+ BasicBlock *OrigEntry = Entry->getNextNode();
+ BasicBlock *SetjmpDispatchBB =
+ BasicBlock::Create(C, "setjmp.dispatch", &F, OrigEntry);
+ cast<BranchInst>(Entry->getTerminator())->setSuccessor(0, SetjmpDispatchBB);
+
+ // Create catch.dispatch.longjmp BB and a catchswitch instruction
+ BasicBlock *CatchDispatchLongjmpBB =
+ BasicBlock::Create(C, "catch.dispatch.longjmp", &F);
+ IRB.SetInsertPoint(CatchDispatchLongjmpBB);
+ CatchSwitchInst *CatchSwitchLongjmp =
+ IRB.CreateCatchSwitch(ConstantTokenNone::get(C), nullptr, 1);
+
+ // Create catch.longjmp BB and a catchpad instruction
+ BasicBlock *CatchLongjmpBB = BasicBlock::Create(C, "catch.longjmp", &F);
+ CatchSwitchLongjmp->addHandler(CatchLongjmpBB);
+ IRB.SetInsertPoint(CatchLongjmpBB);
+ CatchPadInst *CatchPad = IRB.CreateCatchPad(CatchSwitchLongjmp, {});
+
+ // Wasm throw and catch instructions can throw and catch multiple values, but
+ // that requires multivalue support in the toolchain, which is currently not
+ // very reliable. We instead throw and catch a pointer to a struct value of
+ // type 'struct __WasmLongjmpArgs', which is defined in Emscripten.
+ Instruction *LongjmpArgs =
+ IRB.CreateCall(CatchF, {IRB.getInt32(WebAssembly::C_LONGJMP)}, "thrown");
+ Value *EnvField =
+ IRB.CreateConstGEP2_32(LongjmpArgsTy, LongjmpArgs, 0, 0, "env_gep");
+ Value *ValField =
+ IRB.CreateConstGEP2_32(LongjmpArgsTy, LongjmpArgs, 0, 1, "val_gep");
+ // void *env = __wasm_longjmp_args.env;
+ Instruction *Env = IRB.CreateLoad(IRB.getPtrTy(), EnvField, "env");
+ // int val = __wasm_longjmp_args.val;
+ Instruction *Val = IRB.CreateLoad(IRB.getInt32Ty(), ValField, "val");
+
+ // %label = testSetjmp(mem[%env], setjmpTable, setjmpTableSize);
+ // if (%label == 0)
+ // __wasm_longjmp(%env, %val)
+ // catchret to %setjmp.dispatch
+ BasicBlock *ThenBB = BasicBlock::Create(C, "if.then", &F);
+ BasicBlock *EndBB = BasicBlock::Create(C, "if.end", &F);
+ Value *EnvP = IRB.CreateBitCast(Env, getAddrPtrType(&M), "env.p");
+ Value *SetjmpID = IRB.CreateLoad(getAddrIntType(&M), EnvP, "setjmp.id");
+ Value *Label =
+ IRB.CreateCall(TestSetjmpF, {SetjmpID, SetjmpTable, SetjmpTableSize},
+ OperandBundleDef("funclet", CatchPad), "label");
+ Value *Cmp = IRB.CreateICmpEQ(Label, IRB.getInt32(0));
+ IRB.CreateCondBr(Cmp, ThenBB, EndBB);
+
+ IRB.SetInsertPoint(ThenBB);
+ CallInst *WasmLongjmpCI = IRB.CreateCall(
+ WasmLongjmpF, {Env, Val}, OperandBundleDef("funclet", CatchPad));
+ IRB.CreateUnreachable();
+
+ IRB.SetInsertPoint(EndBB);
+ // Jump to setjmp.dispatch block
+ IRB.CreateCatchRet(CatchPad, SetjmpDispatchBB);
+
+ // Go back to setjmp.dispatch BB
+ // setjmp.dispatch:
+ // switch %label {
+ // label 1: goto post-setjmp BB 1
+ // label 2: goto post-setjmp BB 2
+ // ...
+ // default: goto splitted next BB
+ // }
+ IRB.SetInsertPoint(SetjmpDispatchBB);
+ PHINode *LabelPHI = IRB.CreatePHI(IRB.getInt32Ty(), 2, "label.phi");
+ LabelPHI->addIncoming(Label, EndBB);
+ LabelPHI->addIncoming(IRB.getInt32(-1), Entry);
+ SwitchInst *SI = IRB.CreateSwitch(LabelPHI, OrigEntry, SetjmpRetPHIs.size());
+ // -1 means no longjmp happened, continue normally (will hit the default
+ // switch case). 0 means a longjmp that is not ours to handle, needs a
+ // rethrow. Otherwise the index is the same as the index in P+1 (to avoid
+ // 0).
+ for (unsigned I = 0; I < SetjmpRetPHIs.size(); I++) {
+ SI->addCase(IRB.getInt32(I + 1), SetjmpRetPHIs[I]->getParent());
+ SetjmpRetPHIs[I]->addIncoming(Val, SetjmpDispatchBB);
+ }
+
+ // Convert all longjmpable call instructions to invokes that unwind to the
+ // newly created catch.dispatch.longjmp BB.
+ SmallVector<CallInst *, 64> LongjmpableCalls;
+ for (auto *BB = &*F.begin(); BB; BB = BB->getNextNode()) {
+ for (auto &I : *BB) {
+ auto *CI = dyn_cast<CallInst>(&I);
+ if (!CI)
+ continue;
+ const Value *Callee = CI->getCalledOperand();
+ if (!canLongjmp(Callee))
+ continue;
+ if (isEmAsmCall(Callee))
+ report_fatal_error("Cannot use EM_ASM* alongside setjmp/longjmp in " +
+ F.getName() +
+ ". Please consider using EM_JS, or move the "
+ "EM_ASM into another function.",
+ false);
+ // This is __wasm_longjmp() call we inserted in this function, which
+ // rethrows the longjmp when the longjmp does not correspond to one of
+ // setjmps in this function. We should not convert this call to an invoke.
+ if (CI == WasmLongjmpCI)
+ continue;
+ LongjmpableCalls.push_back(CI);
+ }
+ }
+
+ for (auto *CI : LongjmpableCalls) {
+ // Even if the callee function has attribute 'nounwind', which is true for
+ // all C functions, it can longjmp, which means it can throw a Wasm
+ // exception now.
+ CI->removeFnAttr(Attribute::NoUnwind);
+ if (Function *CalleeF = CI->getCalledFunction())
+ CalleeF->removeFnAttr(Attribute::NoUnwind);
+
+ // Change it to an invoke and make it unwind to the catch.dispatch.longjmp
+ // BB. If the call is enclosed in another catchpad/cleanuppad scope, unwind
+ // to its parent pad's unwind destination instead to preserve the scope
+ // structure. It will eventually unwind to the catch.dispatch.longjmp.
+ SmallVector<OperandBundleDef, 1> Bundles;
+ BasicBlock *UnwindDest = nullptr;
+ if (auto Bundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
+ Instruction *FromPad = cast<Instruction>(Bundle->Inputs[0]);
+ while (!UnwindDest) {
+ if (auto *CPI = dyn_cast<CatchPadInst>(FromPad)) {
+ UnwindDest = CPI->getCatchSwitch()->getUnwindDest();
+ break;
+ }
+ if (auto *CPI = dyn_cast<CleanupPadInst>(FromPad)) {
+ // getCleanupRetUnwindDest() can return nullptr when
+ // 1. This cleanuppad's matching cleanupret uwninds to caller
+ // 2. There is no matching cleanupret because it ends with
+ // unreachable.
+ // In case of 2, we need to traverse the parent pad chain.
+ UnwindDest = getCleanupRetUnwindDest(CPI);
+ Value *ParentPad = CPI->getParentPad();
+ if (isa<ConstantTokenNone>(ParentPad))
+ break;
+ FromPad = cast<Instruction>(ParentPad);
+ }
+ }
+ }
+ if (!UnwindDest)
+ UnwindDest = CatchDispatchLongjmpBB;
+ changeToInvokeAndSplitBasicBlock(CI, UnwindDest);
+ }
+
+ SmallVector<Instruction *, 16> ToErase;
+ for (auto &BB : F) {
+ if (auto *CSI = dyn_cast<CatchSwitchInst>(BB.getFirstNonPHI())) {
+ if (CSI != CatchSwitchLongjmp && CSI->unwindsToCaller()) {
+ IRB.SetInsertPoint(CSI);
+ ToErase.push_back(CSI);
+ auto *NewCSI = IRB.CreateCatchSwitch(CSI->getParentPad(),
+ CatchDispatchLongjmpBB, 1);
+ NewCSI->addHandler(*CSI->handler_begin());
+ NewCSI->takeName(CSI);
+ CSI->replaceAllUsesWith(NewCSI);
+ }
+ }
+
+ if (auto *CRI = dyn_cast<CleanupReturnInst>(BB.getTerminator())) {
+ if (CRI->unwindsToCaller()) {
+ IRB.SetInsertPoint(CRI);
+ ToErase.push_back(CRI);
+ IRB.CreateCleanupRet(CRI->getCleanupPad(), CatchDispatchLongjmpBB);
+ }
+ }
+ }
+
+ for (Instruction *I : ToErase)
+ I->eraseFromParent();
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLowerRefTypesIntPtrConv.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLowerRefTypesIntPtrConv.cpp
new file mode 100644
index 000000000000..e0a219211228
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLowerRefTypesIntPtrConv.cpp
@@ -0,0 +1,86 @@
+//=== WebAssemblyLowerRefTypesIntPtrConv.cpp -
+// Lower IntToPtr and PtrToInt on Reference Types ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Lowers IntToPtr and PtrToInt instructions on reference types to
+/// Trap instructions since they have been allowed to operate
+/// on non-integral pointers.
+///
+//===----------------------------------------------------------------------===//
+
+#include "Utils/WebAssemblyTypeUtilities.h"
+#include "WebAssembly.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/Pass.h"
+#include <set>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-lower-reftypes-intptr-conv"
+
+namespace {
+class WebAssemblyLowerRefTypesIntPtrConv final : public FunctionPass {
+ StringRef getPassName() const override {
+ return "WebAssembly Lower RefTypes Int-Ptr Conversions";
+ }
+
+ bool runOnFunction(Function &MF) override;
+
+public:
+ static char ID; // Pass identification
+ WebAssemblyLowerRefTypesIntPtrConv() : FunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyLowerRefTypesIntPtrConv::ID = 0;
+INITIALIZE_PASS(WebAssemblyLowerRefTypesIntPtrConv, DEBUG_TYPE,
+ "WebAssembly Lower RefTypes Int-Ptr Conversions", false, false)
+
+FunctionPass *llvm::createWebAssemblyLowerRefTypesIntPtrConv() {
+ return new WebAssemblyLowerRefTypesIntPtrConv();
+}
+
+bool WebAssemblyLowerRefTypesIntPtrConv::runOnFunction(Function &F) {
+ LLVM_DEBUG(dbgs() << "********** Lower RefTypes IntPtr Convs **********\n"
+ "********** Function: "
+ << F.getName() << '\n');
+
+ // This function will check for uses of ptrtoint and inttoptr on reference
+ // types and replace them with a trap instruction.
+ //
+ // We replace the instruction by a trap instruction
+ // and its uses by null in the case of inttoptr and 0 in the
+ // case of ptrtoint.
+ std::set<Instruction *> worklist;
+
+ for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
+ PtrToIntInst *PTI = dyn_cast<PtrToIntInst>(&*I);
+ IntToPtrInst *ITP = dyn_cast<IntToPtrInst>(&*I);
+ if (!(PTI && WebAssembly::isWebAssemblyReferenceType(
+ PTI->getPointerOperand()->getType())) &&
+ !(ITP && WebAssembly::isWebAssemblyReferenceType(ITP->getDestTy())))
+ continue;
+
+ UndefValue *U = UndefValue::get(I->getType());
+ I->replaceAllUsesWith(U);
+
+ Function *TrapIntrin =
+ Intrinsic::getDeclaration(F.getParent(), Intrinsic::debugtrap);
+ CallInst::Create(TrapIntrin, {}, "", &*I);
+
+ worklist.insert(&*I);
+ }
+
+ // erase each instruction replaced by trap
+ for (Instruction *I : worklist)
+ I->eraseFromParent();
+
+ return !worklist.empty();
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp
new file mode 100644
index 000000000000..f6e24f7aaece
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp
@@ -0,0 +1,303 @@
+// WebAssemblyMCInstLower.cpp - Convert WebAssembly MachineInstr to an MCInst //
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains code to lower WebAssembly MachineInstrs to their
+/// corresponding MCInst records.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyMCInstLower.h"
+#include "TargetInfo/WebAssemblyTargetInfo.h"
+#include "Utils/WebAssemblyTypeUtilities.h"
+#include "WebAssemblyAsmPrinter.h"
+#include "WebAssemblyISelLowering.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCSymbolWasm.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+// This disables the removal of registers when lowering into MC, as required
+// by some current tests.
+cl::opt<bool>
+ WasmKeepRegisters("wasm-keep-registers", cl::Hidden,
+ cl::desc("WebAssembly: output stack registers in"
+ " instruction output for test purposes only."),
+ cl::init(false));
+
+static void removeRegisterOperands(const MachineInstr *MI, MCInst &OutMI);
+
+MCSymbol *
+WebAssemblyMCInstLower::GetGlobalAddressSymbol(const MachineOperand &MO) const {
+ const GlobalValue *Global = MO.getGlobal();
+ if (!isa<Function>(Global)) {
+ auto *WasmSym = cast<MCSymbolWasm>(Printer.getSymbol(Global));
+ // If the symbol doesn't have an explicit WasmSymbolType yet and the
+ // GlobalValue is actually a WebAssembly global, then ensure the symbol is a
+ // WASM_SYMBOL_TYPE_GLOBAL.
+ if (WebAssembly::isWasmVarAddressSpace(Global->getAddressSpace()) &&
+ !WasmSym->getType()) {
+ const MachineFunction &MF = *MO.getParent()->getParent()->getParent();
+ const TargetMachine &TM = MF.getTarget();
+ const Function &CurrentFunc = MF.getFunction();
+ Type *GlobalVT = Global->getValueType();
+ SmallVector<MVT, 1> VTs;
+ computeLegalValueVTs(CurrentFunc, TM, GlobalVT, VTs);
+
+ WebAssembly::wasmSymbolSetType(WasmSym, GlobalVT, VTs);
+ }
+ return WasmSym;
+ }
+
+ const auto *FuncTy = cast<FunctionType>(Global->getValueType());
+ const MachineFunction &MF = *MO.getParent()->getParent()->getParent();
+ const TargetMachine &TM = MF.getTarget();
+ const Function &CurrentFunc = MF.getFunction();
+
+ SmallVector<MVT, 1> ResultMVTs;
+ SmallVector<MVT, 4> ParamMVTs;
+ const auto *const F = dyn_cast<Function>(Global);
+ computeSignatureVTs(FuncTy, F, CurrentFunc, TM, ParamMVTs, ResultMVTs);
+ auto Signature = signatureFromMVTs(ResultMVTs, ParamMVTs);
+
+ bool InvokeDetected = false;
+ auto *WasmSym = Printer.getMCSymbolForFunction(
+ F, WebAssembly::WasmEnableEmEH || WebAssembly::WasmEnableEmSjLj,
+ Signature.get(), InvokeDetected);
+ WasmSym->setSignature(Signature.get());
+ Printer.addSignature(std::move(Signature));
+ WasmSym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION);
+ return WasmSym;
+}
+
+MCSymbol *WebAssemblyMCInstLower::GetExternalSymbolSymbol(
+ const MachineOperand &MO) const {
+ return Printer.getOrCreateWasmSymbol(MO.getSymbolName());
+}
+
+MCOperand WebAssemblyMCInstLower::lowerSymbolOperand(const MachineOperand &MO,
+ MCSymbol *Sym) const {
+ MCSymbolRefExpr::VariantKind Kind = MCSymbolRefExpr::VK_None;
+ unsigned TargetFlags = MO.getTargetFlags();
+
+ switch (TargetFlags) {
+ case WebAssemblyII::MO_NO_FLAG:
+ break;
+ case WebAssemblyII::MO_GOT_TLS:
+ Kind = MCSymbolRefExpr::VK_WASM_GOT_TLS;
+ break;
+ case WebAssemblyII::MO_GOT:
+ Kind = MCSymbolRefExpr::VK_GOT;
+ break;
+ case WebAssemblyII::MO_MEMORY_BASE_REL:
+ Kind = MCSymbolRefExpr::VK_WASM_MBREL;
+ break;
+ case WebAssemblyII::MO_TLS_BASE_REL:
+ Kind = MCSymbolRefExpr::VK_WASM_TLSREL;
+ break;
+ case WebAssemblyII::MO_TABLE_BASE_REL:
+ Kind = MCSymbolRefExpr::VK_WASM_TBREL;
+ break;
+ default:
+ llvm_unreachable("Unknown target flag on GV operand");
+ }
+
+ const MCExpr *Expr = MCSymbolRefExpr::create(Sym, Kind, Ctx);
+
+ if (MO.getOffset() != 0) {
+ const auto *WasmSym = cast<MCSymbolWasm>(Sym);
+ if (TargetFlags == WebAssemblyII::MO_GOT)
+ report_fatal_error("GOT symbol references do not support offsets");
+ if (WasmSym->isFunction())
+ report_fatal_error("Function addresses with offsets not supported");
+ if (WasmSym->isGlobal())
+ report_fatal_error("Global indexes with offsets not supported");
+ if (WasmSym->isTag())
+ report_fatal_error("Tag indexes with offsets not supported");
+ if (WasmSym->isTable())
+ report_fatal_error("Table indexes with offsets not supported");
+
+ Expr = MCBinaryExpr::createAdd(
+ Expr, MCConstantExpr::create(MO.getOffset(), Ctx), Ctx);
+ }
+
+ return MCOperand::createExpr(Expr);
+}
+
+MCOperand WebAssemblyMCInstLower::lowerTypeIndexOperand(
+ SmallVectorImpl<wasm::ValType> &&Returns,
+ SmallVectorImpl<wasm::ValType> &&Params) const {
+ auto Signature = std::make_unique<wasm::WasmSignature>(std::move(Returns),
+ std::move(Params));
+ MCSymbol *Sym = Printer.createTempSymbol("typeindex");
+ auto *WasmSym = cast<MCSymbolWasm>(Sym);
+ WasmSym->setSignature(Signature.get());
+ Printer.addSignature(std::move(Signature));
+ WasmSym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION);
+ const MCExpr *Expr =
+ MCSymbolRefExpr::create(WasmSym, MCSymbolRefExpr::VK_WASM_TYPEINDEX, Ctx);
+ return MCOperand::createExpr(Expr);
+}
+
+static void getFunctionReturns(const MachineInstr *MI,
+ SmallVectorImpl<wasm::ValType> &Returns) {
+ const Function &F = MI->getMF()->getFunction();
+ const TargetMachine &TM = MI->getMF()->getTarget();
+ Type *RetTy = F.getReturnType();
+ SmallVector<MVT, 4> CallerRetTys;
+ computeLegalValueVTs(F, TM, RetTy, CallerRetTys);
+ valTypesFromMVTs(CallerRetTys, Returns);
+}
+
+void WebAssemblyMCInstLower::lower(const MachineInstr *MI,
+ MCInst &OutMI) const {
+ OutMI.setOpcode(MI->getOpcode());
+
+ const MCInstrDesc &Desc = MI->getDesc();
+ unsigned NumVariadicDefs = MI->getNumExplicitDefs() - Desc.getNumDefs();
+ for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
+ const MachineOperand &MO = MI->getOperand(I);
+
+ MCOperand MCOp;
+ switch (MO.getType()) {
+ default:
+ MI->print(errs());
+ llvm_unreachable("unknown operand type");
+ case MachineOperand::MO_MachineBasicBlock:
+ MI->print(errs());
+ llvm_unreachable("MachineBasicBlock operand should have been rewritten");
+ case MachineOperand::MO_Register: {
+ // Ignore all implicit register operands.
+ if (MO.isImplicit())
+ continue;
+ const WebAssemblyFunctionInfo &MFI =
+ *MI->getParent()->getParent()->getInfo<WebAssemblyFunctionInfo>();
+ unsigned WAReg = MFI.getWAReg(MO.getReg());
+ MCOp = MCOperand::createReg(WAReg);
+ break;
+ }
+ case MachineOperand::MO_Immediate: {
+ unsigned DescIndex = I - NumVariadicDefs;
+ if (DescIndex < Desc.NumOperands) {
+ const MCOperandInfo &Info = Desc.operands()[DescIndex];
+ if (Info.OperandType == WebAssembly::OPERAND_TYPEINDEX) {
+ SmallVector<wasm::ValType, 4> Returns;
+ SmallVector<wasm::ValType, 4> Params;
+
+ const MachineRegisterInfo &MRI =
+ MI->getParent()->getParent()->getRegInfo();
+ for (const MachineOperand &MO : MI->defs())
+ Returns.push_back(WebAssembly::regClassToValType(
+ MRI.getRegClass(MO.getReg())->getID()));
+ for (const MachineOperand &MO : MI->explicit_uses())
+ if (MO.isReg())
+ Params.push_back(WebAssembly::regClassToValType(
+ MRI.getRegClass(MO.getReg())->getID()));
+
+ // call_indirect instructions have a callee operand at the end which
+ // doesn't count as a param.
+ if (WebAssembly::isCallIndirect(MI->getOpcode()))
+ Params.pop_back();
+
+ // return_call_indirect instructions have the return type of the
+ // caller
+ if (MI->getOpcode() == WebAssembly::RET_CALL_INDIRECT)
+ getFunctionReturns(MI, Returns);
+
+ MCOp = lowerTypeIndexOperand(std::move(Returns), std::move(Params));
+ break;
+ } else if (Info.OperandType == WebAssembly::OPERAND_SIGNATURE) {
+ auto BT = static_cast<WebAssembly::BlockType>(MO.getImm());
+ assert(BT != WebAssembly::BlockType::Invalid);
+ if (BT == WebAssembly::BlockType::Multivalue) {
+ SmallVector<wasm::ValType, 1> Returns;
+ getFunctionReturns(MI, Returns);
+ MCOp = lowerTypeIndexOperand(std::move(Returns),
+ SmallVector<wasm::ValType, 4>());
+ break;
+ }
+ }
+ }
+ MCOp = MCOperand::createImm(MO.getImm());
+ break;
+ }
+ case MachineOperand::MO_FPImmediate: {
+ const ConstantFP *Imm = MO.getFPImm();
+ const uint64_t BitPattern =
+ Imm->getValueAPF().bitcastToAPInt().getZExtValue();
+ if (Imm->getType()->isFloatTy())
+ MCOp = MCOperand::createSFPImm(static_cast<uint32_t>(BitPattern));
+ else if (Imm->getType()->isDoubleTy())
+ MCOp = MCOperand::createDFPImm(BitPattern);
+ else
+ llvm_unreachable("unknown floating point immediate type");
+ break;
+ }
+ case MachineOperand::MO_GlobalAddress:
+ MCOp = lowerSymbolOperand(MO, GetGlobalAddressSymbol(MO));
+ break;
+ case MachineOperand::MO_ExternalSymbol:
+ MCOp = lowerSymbolOperand(MO, GetExternalSymbolSymbol(MO));
+ break;
+ case MachineOperand::MO_MCSymbol:
+ assert(MO.getTargetFlags() == 0 &&
+ "WebAssembly does not use target flags on MCSymbol");
+ MCOp = lowerSymbolOperand(MO, MO.getMCSymbol());
+ break;
+ }
+
+ OutMI.addOperand(MCOp);
+ }
+
+ if (!WasmKeepRegisters)
+ removeRegisterOperands(MI, OutMI);
+ else if (Desc.variadicOpsAreDefs())
+ OutMI.insert(OutMI.begin(), MCOperand::createImm(MI->getNumExplicitDefs()));
+}
+
+static void removeRegisterOperands(const MachineInstr *MI, MCInst &OutMI) {
+ // Remove all uses of stackified registers to bring the instruction format
+ // into its final stack form used thruout MC, and transition opcodes to
+ // their _S variant.
+ // We do this separate from the above code that still may need these
+ // registers for e.g. call_indirect signatures.
+ // See comments in lib/Target/WebAssembly/WebAssemblyInstrFormats.td for
+ // details.
+ // TODO: the code above creates new registers which are then removed here.
+ // That code could be slightly simplified by not doing that, though maybe
+ // it is simpler conceptually to keep the code above in "register mode"
+ // until this transition point.
+ // FIXME: we are not processing inline assembly, which contains register
+ // operands, because it is used by later target generic code.
+ if (MI->isDebugInstr() || MI->isLabel() || MI->isInlineAsm())
+ return;
+
+ // Transform to _S instruction.
+ auto RegOpcode = OutMI.getOpcode();
+ auto StackOpcode = WebAssembly::getStackOpcode(RegOpcode);
+ assert(StackOpcode != -1 && "Failed to stackify instruction");
+ OutMI.setOpcode(StackOpcode);
+
+ // Remove register operands.
+ for (auto I = OutMI.getNumOperands(); I; --I) {
+ auto &MO = OutMI.getOperand(I - 1);
+ if (MO.isReg()) {
+ OutMI.erase(&MO);
+ }
+ }
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.h
new file mode 100644
index 000000000000..9f08499e5cde
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.h
@@ -0,0 +1,47 @@
+//===-- WebAssemblyMCInstLower.h - Lower MachineInstr to MCInst -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file declares the class to lower WebAssembly MachineInstrs to
+/// their corresponding MCInst records.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYMCINSTLOWER_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYMCINSTLOWER_H
+
+#include "llvm/BinaryFormat/Wasm.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+class WebAssemblyAsmPrinter;
+class MCContext;
+class MCSymbol;
+class MachineInstr;
+class MachineOperand;
+
+/// This class is used to lower an MachineInstr into an MCInst.
+class LLVM_LIBRARY_VISIBILITY WebAssemblyMCInstLower {
+ MCContext &Ctx;
+ WebAssemblyAsmPrinter &Printer;
+
+ MCSymbol *GetGlobalAddressSymbol(const MachineOperand &MO) const;
+ MCSymbol *GetExternalSymbolSymbol(const MachineOperand &MO) const;
+ MCOperand lowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const;
+ MCOperand lowerTypeIndexOperand(SmallVectorImpl<wasm::ValType> &&,
+ SmallVectorImpl<wasm::ValType> &&) const;
+
+public:
+ WebAssemblyMCInstLower(MCContext &ctx, WebAssemblyAsmPrinter &printer)
+ : Ctx(ctx), Printer(printer) {}
+ void lower(const MachineInstr *MI, MCInst &OutMI) const;
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMCLowerPrePass.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMCLowerPrePass.cpp
new file mode 100644
index 000000000000..13acbd2e24cc
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMCLowerPrePass.cpp
@@ -0,0 +1,102 @@
+//===-- WebAssemblyMCLowerPrePass.cpp - Prepare for MC lower --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Some information in MC lowering / asm printing gets generated as
+/// instructions get emitted, but may be necessary at the start, such as for
+/// .globaltype declarations. This pass collects this information.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/ADT/SCCIterator.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineModuleInfoImpls.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-mclower-prepass"
+
+namespace {
+class WebAssemblyMCLowerPrePass final : public ModulePass {
+ StringRef getPassName() const override {
+ return "WebAssembly MC Lower Pre Pass";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ ModulePass::getAnalysisUsage(AU);
+ }
+
+ bool runOnModule(Module &M) override;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyMCLowerPrePass() : ModulePass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyMCLowerPrePass::ID = 0;
+INITIALIZE_PASS(
+ WebAssemblyMCLowerPrePass, DEBUG_TYPE,
+ "Collects information ahead of time for MC lowering",
+ false, false)
+
+ModulePass *llvm::createWebAssemblyMCLowerPrePass() {
+ return new WebAssemblyMCLowerPrePass();
+}
+
+// NOTE: this is a ModulePass since we need to enforce that this code has run
+// for all functions before AsmPrinter. If this way of doing things is ever
+// suboptimal, we could opt to make it a MachineFunctionPass and instead use
+// something like createBarrierNoopPass() to enforce ordering.
+//
+// The information stored here is essential for emitExternalDecls in the Wasm
+// AsmPrinter
+bool WebAssemblyMCLowerPrePass::runOnModule(Module &M) {
+ auto *MMIWP = getAnalysisIfAvailable<MachineModuleInfoWrapperPass>();
+ if (!MMIWP)
+ return true;
+
+ MachineModuleInfo &MMI = MMIWP->getMMI();
+ MachineModuleInfoWasm &MMIW = MMI.getObjFileInfo<MachineModuleInfoWasm>();
+
+ for (Function &F : M) {
+ MachineFunction *MF = MMI.getMachineFunction(F);
+ if (!MF)
+ continue;
+
+ LLVM_DEBUG(dbgs() << "********** MC Lower Pre Pass **********\n"
+ "********** Function: "
+ << MF->getName() << '\n');
+
+ for (MachineBasicBlock &MBB : *MF) {
+ for (auto &MI : MBB) {
+ // FIXME: what should all be filtered out beyond these?
+ if (MI.isDebugInstr() || MI.isInlineAsm())
+ continue;
+ for (MachineOperand &MO : MI.uses()) {
+ if (MO.isSymbol()) {
+ MMIW.MachineSymbolsUsed.insert(MO.getSymbolName());
+ }
+ }
+ }
+ }
+ }
+ return true;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp
new file mode 100644
index 000000000000..1e959111a4db
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp
@@ -0,0 +1,170 @@
+//=- WebAssemblyMachineFunctionInfo.cpp - WebAssembly Machine Function Info -=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements WebAssembly-specific per-machine-function
+/// information.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "MCTargetDesc/WebAssemblyInstPrinter.h"
+#include "Utils/WebAssemblyTypeUtilities.h"
+#include "WebAssemblyISelLowering.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/CodeGen/Analysis.h"
+#include "llvm/CodeGen/WasmEHFuncInfo.h"
+#include "llvm/Target/TargetMachine.h"
+using namespace llvm;
+
+WebAssemblyFunctionInfo::~WebAssemblyFunctionInfo() = default; // anchor.
+
+MachineFunctionInfo *WebAssemblyFunctionInfo::clone(
+ BumpPtrAllocator &Allocator, MachineFunction &DestMF,
+ const DenseMap<MachineBasicBlock *, MachineBasicBlock *> &Src2DstMBB)
+ const {
+ // TODO: Implement cloning for WasmEHFuncInfo. This will have invalid block
+ // references.
+ return DestMF.cloneInfo<WebAssemblyFunctionInfo>(*this);
+}
+
+void WebAssemblyFunctionInfo::initWARegs(MachineRegisterInfo &MRI) {
+ assert(WARegs.empty());
+ unsigned Reg = WebAssembly::UnusedReg;
+ WARegs.resize(MRI.getNumVirtRegs(), Reg);
+}
+
+void llvm::computeLegalValueVTs(const WebAssemblyTargetLowering &TLI,
+ LLVMContext &Ctx, const DataLayout &DL,
+ Type *Ty, SmallVectorImpl<MVT> &ValueVTs) {
+ SmallVector<EVT, 4> VTs;
+ ComputeValueVTs(TLI, DL, Ty, VTs);
+
+ for (EVT VT : VTs) {
+ unsigned NumRegs = TLI.getNumRegisters(Ctx, VT);
+ MVT RegisterVT = TLI.getRegisterType(Ctx, VT);
+ for (unsigned I = 0; I != NumRegs; ++I)
+ ValueVTs.push_back(RegisterVT);
+ }
+}
+
+void llvm::computeLegalValueVTs(const Function &F, const TargetMachine &TM,
+ Type *Ty, SmallVectorImpl<MVT> &ValueVTs) {
+ const DataLayout &DL(F.getParent()->getDataLayout());
+ const WebAssemblyTargetLowering &TLI =
+ *TM.getSubtarget<WebAssemblySubtarget>(F).getTargetLowering();
+ computeLegalValueVTs(TLI, F.getContext(), DL, Ty, ValueVTs);
+}
+
+void llvm::computeSignatureVTs(const FunctionType *Ty,
+ const Function *TargetFunc,
+ const Function &ContextFunc,
+ const TargetMachine &TM,
+ SmallVectorImpl<MVT> &Params,
+ SmallVectorImpl<MVT> &Results) {
+ computeLegalValueVTs(ContextFunc, TM, Ty->getReturnType(), Results);
+
+ MVT PtrVT = MVT::getIntegerVT(TM.createDataLayout().getPointerSizeInBits());
+ if (Results.size() > 1 &&
+ !TM.getSubtarget<WebAssemblySubtarget>(ContextFunc).hasMultivalue()) {
+ // WebAssembly can't lower returns of multiple values without demoting to
+ // sret unless multivalue is enabled (see
+ // WebAssemblyTargetLowering::CanLowerReturn). So replace multiple return
+ // values with a poitner parameter.
+ Results.clear();
+ Params.push_back(PtrVT);
+ }
+
+ for (auto *Param : Ty->params())
+ computeLegalValueVTs(ContextFunc, TM, Param, Params);
+ if (Ty->isVarArg())
+ Params.push_back(PtrVT);
+
+ // For swiftcc, emit additional swiftself and swifterror parameters
+ // if there aren't. These additional parameters are also passed for caller.
+ // They are necessary to match callee and caller signature for indirect
+ // call.
+
+ if (TargetFunc && TargetFunc->getCallingConv() == CallingConv::Swift) {
+ MVT PtrVT = MVT::getIntegerVT(TM.createDataLayout().getPointerSizeInBits());
+ bool HasSwiftErrorArg = false;
+ bool HasSwiftSelfArg = false;
+ for (const auto &Arg : TargetFunc->args()) {
+ HasSwiftErrorArg |= Arg.hasAttribute(Attribute::SwiftError);
+ HasSwiftSelfArg |= Arg.hasAttribute(Attribute::SwiftSelf);
+ }
+ if (!HasSwiftErrorArg)
+ Params.push_back(PtrVT);
+ if (!HasSwiftSelfArg)
+ Params.push_back(PtrVT);
+ }
+}
+
+void llvm::valTypesFromMVTs(const ArrayRef<MVT> &In,
+ SmallVectorImpl<wasm::ValType> &Out) {
+ for (MVT Ty : In)
+ Out.push_back(WebAssembly::toValType(Ty));
+}
+
+std::unique_ptr<wasm::WasmSignature>
+llvm::signatureFromMVTs(const SmallVectorImpl<MVT> &Results,
+ const SmallVectorImpl<MVT> &Params) {
+ auto Sig = std::make_unique<wasm::WasmSignature>();
+ valTypesFromMVTs(Results, Sig->Returns);
+ valTypesFromMVTs(Params, Sig->Params);
+ return Sig;
+}
+
+yaml::WebAssemblyFunctionInfo::WebAssemblyFunctionInfo(
+ const llvm::MachineFunction &MF, const llvm::WebAssemblyFunctionInfo &MFI)
+ : CFGStackified(MFI.isCFGStackified()) {
+ for (auto VT : MFI.getParams())
+ Params.push_back(EVT(VT).getEVTString());
+ for (auto VT : MFI.getResults())
+ Results.push_back(EVT(VT).getEVTString());
+
+ // MFI.getWasmEHFuncInfo() is non-null only for functions with the
+ // personality function.
+
+ if (auto *EHInfo = MF.getWasmEHFuncInfo()) {
+ // SrcToUnwindDest can contain stale mappings in case BBs are removed in
+ // optimizations, in case, for example, they are unreachable. We should not
+ // include their info.
+ SmallPtrSet<const MachineBasicBlock *, 16> MBBs;
+ for (const auto &MBB : MF)
+ MBBs.insert(&MBB);
+ for (auto KV : EHInfo->SrcToUnwindDest) {
+ auto *SrcBB = KV.first.get<MachineBasicBlock *>();
+ auto *DestBB = KV.second.get<MachineBasicBlock *>();
+ if (MBBs.count(SrcBB) && MBBs.count(DestBB))
+ SrcToUnwindDest[SrcBB->getNumber()] = DestBB->getNumber();
+ }
+ }
+}
+
+void yaml::WebAssemblyFunctionInfo::mappingImpl(yaml::IO &YamlIO) {
+ MappingTraits<WebAssemblyFunctionInfo>::mapping(YamlIO, *this);
+}
+
+void WebAssemblyFunctionInfo::initializeBaseYamlFields(
+ MachineFunction &MF, const yaml::WebAssemblyFunctionInfo &YamlMFI) {
+ CFGStackified = YamlMFI.CFGStackified;
+ for (auto VT : YamlMFI.Params)
+ addParam(WebAssembly::parseMVT(VT.Value));
+ for (auto VT : YamlMFI.Results)
+ addResult(WebAssembly::parseMVT(VT.Value));
+
+ // FIXME: WasmEHInfo is defined in the MachineFunction, but serialized
+ // here. Either WasmEHInfo should be moved out of MachineFunction, or the
+ // serialization handling should be moved to MachineFunction.
+ if (WasmEHFuncInfo *WasmEHInfo = MF.getWasmEHFuncInfo()) {
+ for (auto KV : YamlMFI.SrcToUnwindDest)
+ WasmEHInfo->setUnwindDest(MF.getBlockNumbered(KV.first),
+ MF.getBlockNumbered(KV.second));
+ }
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h
new file mode 100644
index 000000000000..fe18347ad8c1
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h
@@ -0,0 +1,225 @@
+// WebAssemblyMachineFunctionInfo.h-WebAssembly machine function info-*- C++ -*-
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file declares WebAssembly-specific per-machine-function
+/// information.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYMACHINEFUNCTIONINFO_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYMACHINEFUNCTIONINFO_H
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "llvm/CodeGen/MIRYamlMapping.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/MC/MCSymbolWasm.h"
+
+namespace llvm {
+class WebAssemblyTargetLowering;
+
+struct WasmEHFuncInfo;
+
+namespace yaml {
+struct WebAssemblyFunctionInfo;
+}
+
+/// This class is derived from MachineFunctionInfo and contains private
+/// WebAssembly-specific information for each MachineFunction.
+class WebAssemblyFunctionInfo final : public MachineFunctionInfo {
+ std::vector<MVT> Params;
+ std::vector<MVT> Results;
+ std::vector<MVT> Locals;
+
+ /// A mapping from CodeGen vreg index to WebAssembly register number.
+ std::vector<unsigned> WARegs;
+
+ /// A mapping from CodeGen vreg index to a boolean value indicating whether
+ /// the given register is considered to be "stackified", meaning it has been
+ /// determined or made to meet the stack requirements:
+ /// - single use (per path)
+ /// - single def (per path)
+ /// - defined and used in LIFO order with other stack registers
+ BitVector VRegStackified;
+
+ // A virtual register holding the pointer to the vararg buffer for vararg
+ // functions. It is created and set in TLI::LowerFormalArguments and read by
+ // TLI::LowerVASTART
+ unsigned VarargVreg = -1U;
+
+ // A virtual register holding the base pointer for functions that have
+ // overaligned values on the user stack.
+ unsigned BasePtrVreg = -1U;
+ // A virtual register holding the frame base. This is either FP or SP
+ // after it has been replaced by a vreg
+ unsigned FrameBaseVreg = -1U;
+ // The local holding the frame base. This is either FP or SP
+ // after WebAssemblyExplicitLocals
+ unsigned FrameBaseLocal = -1U;
+
+ // Function properties.
+ bool CFGStackified = false;
+
+public:
+ explicit WebAssemblyFunctionInfo(const Function &F,
+ const TargetSubtargetInfo *STI) {}
+ ~WebAssemblyFunctionInfo() override;
+
+ MachineFunctionInfo *
+ clone(BumpPtrAllocator &Allocator, MachineFunction &DestMF,
+ const DenseMap<MachineBasicBlock *, MachineBasicBlock *> &Src2DstMBB)
+ const override;
+
+ void initializeBaseYamlFields(MachineFunction &MF,
+ const yaml::WebAssemblyFunctionInfo &YamlMFI);
+
+ void addParam(MVT VT) { Params.push_back(VT); }
+ const std::vector<MVT> &getParams() const { return Params; }
+
+ void addResult(MVT VT) { Results.push_back(VT); }
+ const std::vector<MVT> &getResults() const { return Results; }
+
+ void clearParamsAndResults() {
+ Params.clear();
+ Results.clear();
+ }
+
+ void setNumLocals(size_t NumLocals) { Locals.resize(NumLocals, MVT::i32); }
+ void setLocal(size_t i, MVT VT) { Locals[i] = VT; }
+ void addLocal(MVT VT) { Locals.push_back(VT); }
+ const std::vector<MVT> &getLocals() const { return Locals; }
+
+ unsigned getVarargBufferVreg() const {
+ assert(VarargVreg != -1U && "Vararg vreg hasn't been set");
+ return VarargVreg;
+ }
+ void setVarargBufferVreg(unsigned Reg) { VarargVreg = Reg; }
+
+ unsigned getBasePointerVreg() const {
+ assert(BasePtrVreg != -1U && "Base ptr vreg hasn't been set");
+ return BasePtrVreg;
+ }
+ void setFrameBaseVreg(unsigned Reg) { FrameBaseVreg = Reg; }
+ unsigned getFrameBaseVreg() const {
+ assert(FrameBaseVreg != -1U && "Frame base vreg hasn't been set");
+ return FrameBaseVreg;
+ }
+ void clearFrameBaseVreg() { FrameBaseVreg = -1U; }
+ // Return true if the frame base physreg has been replaced by a virtual reg.
+ bool isFrameBaseVirtual() const { return FrameBaseVreg != -1U; }
+ void setFrameBaseLocal(unsigned Local) { FrameBaseLocal = Local; }
+ unsigned getFrameBaseLocal() const {
+ assert(FrameBaseLocal != -1U && "Frame base local hasn't been set");
+ return FrameBaseLocal;
+ }
+ void setBasePointerVreg(unsigned Reg) { BasePtrVreg = Reg; }
+
+ void stackifyVReg(MachineRegisterInfo &MRI, unsigned VReg) {
+ assert(MRI.getUniqueVRegDef(VReg));
+ auto I = Register::virtReg2Index(VReg);
+ if (I >= VRegStackified.size())
+ VRegStackified.resize(I + 1);
+ VRegStackified.set(I);
+ }
+ void unstackifyVReg(unsigned VReg) {
+ auto I = Register::virtReg2Index(VReg);
+ if (I < VRegStackified.size())
+ VRegStackified.reset(I);
+ }
+ bool isVRegStackified(unsigned VReg) const {
+ auto I = Register::virtReg2Index(VReg);
+ if (I >= VRegStackified.size())
+ return false;
+ return VRegStackified.test(I);
+ }
+
+ void initWARegs(MachineRegisterInfo &MRI);
+ void setWAReg(unsigned VReg, unsigned WAReg) {
+ assert(WAReg != WebAssembly::UnusedReg);
+ auto I = Register::virtReg2Index(VReg);
+ assert(I < WARegs.size());
+ WARegs[I] = WAReg;
+ }
+ unsigned getWAReg(unsigned VReg) const {
+ auto I = Register::virtReg2Index(VReg);
+ assert(I < WARegs.size());
+ return WARegs[I];
+ }
+
+ bool isCFGStackified() const { return CFGStackified; }
+ void setCFGStackified(bool Value = true) { CFGStackified = Value; }
+};
+
+void computeLegalValueVTs(const WebAssemblyTargetLowering &TLI,
+ LLVMContext &Ctx, const DataLayout &DL, Type *Ty,
+ SmallVectorImpl<MVT> &ValueVTs);
+
+void computeLegalValueVTs(const Function &F, const TargetMachine &TM, Type *Ty,
+ SmallVectorImpl<MVT> &ValueVTs);
+
+// Compute the signature for a given FunctionType (Ty). Note that it's not the
+// signature for ContextFunc (ContextFunc is just used to get varous context)
+void computeSignatureVTs(const FunctionType *Ty, const Function *TargetFunc,
+ const Function &ContextFunc, const TargetMachine &TM,
+ SmallVectorImpl<MVT> &Params,
+ SmallVectorImpl<MVT> &Results);
+
+void valTypesFromMVTs(const ArrayRef<MVT> &In,
+ SmallVectorImpl<wasm::ValType> &Out);
+
+std::unique_ptr<wasm::WasmSignature>
+signatureFromMVTs(const SmallVectorImpl<MVT> &Results,
+ const SmallVectorImpl<MVT> &Params);
+
+namespace yaml {
+
+using BBNumberMap = DenseMap<int, int>;
+
+struct WebAssemblyFunctionInfo final : public yaml::MachineFunctionInfo {
+ std::vector<FlowStringValue> Params;
+ std::vector<FlowStringValue> Results;
+ bool CFGStackified = false;
+ // The same as WasmEHFuncInfo's SrcToUnwindDest, but stored in the mapping of
+ // BB numbers
+ BBNumberMap SrcToUnwindDest;
+
+ WebAssemblyFunctionInfo() = default;
+ WebAssemblyFunctionInfo(const llvm::MachineFunction &MF,
+ const llvm::WebAssemblyFunctionInfo &MFI);
+
+ void mappingImpl(yaml::IO &YamlIO) override;
+ ~WebAssemblyFunctionInfo() = default;
+};
+
+template <> struct MappingTraits<WebAssemblyFunctionInfo> {
+ static void mapping(IO &YamlIO, WebAssemblyFunctionInfo &MFI) {
+ YamlIO.mapOptional("params", MFI.Params, std::vector<FlowStringValue>());
+ YamlIO.mapOptional("results", MFI.Results, std::vector<FlowStringValue>());
+ YamlIO.mapOptional("isCFGStackified", MFI.CFGStackified, false);
+ YamlIO.mapOptional("wasmEHFuncInfo", MFI.SrcToUnwindDest);
+ }
+};
+
+template <> struct CustomMappingTraits<BBNumberMap> {
+ static void inputOne(IO &YamlIO, StringRef Key,
+ BBNumberMap &SrcToUnwindDest) {
+ YamlIO.mapRequired(Key.str().c_str(),
+ SrcToUnwindDest[std::atoi(Key.str().c_str())]);
+ }
+
+ static void output(IO &YamlIO, BBNumberMap &SrcToUnwindDest) {
+ for (auto KV : SrcToUnwindDest)
+ YamlIO.mapRequired(std::to_string(KV.first).c_str(), KV.second);
+ }
+};
+
+} // end namespace yaml
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMemIntrinsicResults.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMemIntrinsicResults.cpp
new file mode 100644
index 000000000000..2180f57c106a
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyMemIntrinsicResults.cpp
@@ -0,0 +1,210 @@
+//== WebAssemblyMemIntrinsicResults.cpp - Optimize memory intrinsic results ==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements an optimization pass using memory intrinsic results.
+///
+/// Calls to memory intrinsics (memcpy, memmove, memset) return the destination
+/// address. They are in the form of
+/// %dst_new = call @memcpy %dst, %src, %len
+/// where %dst and %dst_new registers contain the same value.
+///
+/// This is to enable an optimization wherein uses of the %dst register used in
+/// the parameter can be replaced by uses of the %dst_new register used in the
+/// result, making the %dst register more likely to be single-use, thus more
+/// likely to be useful to register stackifying, and potentially also exposing
+/// the call instruction itself to register stackifying. These both can reduce
+/// local.get/local.set traffic.
+///
+/// The LLVM intrinsics for these return void so they can't use the returned
+/// attribute and consequently aren't handled by the OptimizeReturned pass.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/CodeGen/LiveIntervals.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-mem-intrinsic-results"
+
+namespace {
+class WebAssemblyMemIntrinsicResults final : public MachineFunctionPass {
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyMemIntrinsicResults() : MachineFunctionPass(ID) {}
+
+ StringRef getPassName() const override {
+ return "WebAssembly Memory Intrinsic Results";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<MachineBlockFrequencyInfo>();
+ AU.addPreserved<MachineBlockFrequencyInfo>();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addPreserved<MachineDominatorTree>();
+ AU.addRequired<LiveIntervals>();
+ AU.addPreserved<SlotIndexes>();
+ AU.addPreserved<LiveIntervals>();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+private:
+};
+} // end anonymous namespace
+
+char WebAssemblyMemIntrinsicResults::ID = 0;
+INITIALIZE_PASS(WebAssemblyMemIntrinsicResults, DEBUG_TYPE,
+ "Optimize memory intrinsic result values for WebAssembly",
+ false, false)
+
+FunctionPass *llvm::createWebAssemblyMemIntrinsicResults() {
+ return new WebAssemblyMemIntrinsicResults();
+}
+
+// Replace uses of FromReg with ToReg if they are dominated by MI.
+static bool replaceDominatedUses(MachineBasicBlock &MBB, MachineInstr &MI,
+ unsigned FromReg, unsigned ToReg,
+ const MachineRegisterInfo &MRI,
+ MachineDominatorTree &MDT,
+ LiveIntervals &LIS) {
+ bool Changed = false;
+
+ LiveInterval *FromLI = &LIS.getInterval(FromReg);
+ LiveInterval *ToLI = &LIS.getInterval(ToReg);
+
+ SlotIndex FromIdx = LIS.getInstructionIndex(MI).getRegSlot();
+ VNInfo *FromVNI = FromLI->getVNInfoAt(FromIdx);
+
+ SmallVector<SlotIndex, 4> Indices;
+
+ for (MachineOperand &O :
+ llvm::make_early_inc_range(MRI.use_nodbg_operands(FromReg))) {
+ MachineInstr *Where = O.getParent();
+
+ // Check that MI dominates the instruction in the normal way.
+ if (&MI == Where || !MDT.dominates(&MI, Where))
+ continue;
+
+ // If this use gets a different value, skip it.
+ SlotIndex WhereIdx = LIS.getInstructionIndex(*Where);
+ VNInfo *WhereVNI = FromLI->getVNInfoAt(WhereIdx);
+ if (WhereVNI && WhereVNI != FromVNI)
+ continue;
+
+ // Make sure ToReg isn't clobbered before it gets there.
+ VNInfo *ToVNI = ToLI->getVNInfoAt(WhereIdx);
+ if (ToVNI && ToVNI != FromVNI)
+ continue;
+
+ Changed = true;
+ LLVM_DEBUG(dbgs() << "Setting operand " << O << " in " << *Where << " from "
+ << MI << "\n");
+ O.setReg(ToReg);
+
+ // If the store's def was previously dead, it is no longer.
+ if (!O.isUndef()) {
+ MI.getOperand(0).setIsDead(false);
+
+ Indices.push_back(WhereIdx.getRegSlot());
+ }
+ }
+
+ if (Changed) {
+ // Extend ToReg's liveness.
+ LIS.extendToIndices(*ToLI, Indices);
+
+ // Shrink FromReg's liveness.
+ LIS.shrinkToUses(FromLI);
+
+ // If we replaced all dominated uses, FromReg is now killed at MI.
+ if (!FromLI->liveAt(FromIdx.getDeadSlot()))
+ MI.addRegisterKilled(FromReg, MBB.getParent()
+ ->getSubtarget<WebAssemblySubtarget>()
+ .getRegisterInfo());
+ }
+
+ return Changed;
+}
+
+static bool optimizeCall(MachineBasicBlock &MBB, MachineInstr &MI,
+ const MachineRegisterInfo &MRI,
+ MachineDominatorTree &MDT, LiveIntervals &LIS,
+ const WebAssemblyTargetLowering &TLI,
+ const TargetLibraryInfo &LibInfo) {
+ MachineOperand &Op1 = MI.getOperand(1);
+ if (!Op1.isSymbol())
+ return false;
+
+ StringRef Name(Op1.getSymbolName());
+ bool CallReturnsInput = Name == TLI.getLibcallName(RTLIB::MEMCPY) ||
+ Name == TLI.getLibcallName(RTLIB::MEMMOVE) ||
+ Name == TLI.getLibcallName(RTLIB::MEMSET);
+ if (!CallReturnsInput)
+ return false;
+
+ LibFunc Func;
+ if (!LibInfo.getLibFunc(Name, Func))
+ return false;
+
+ Register FromReg = MI.getOperand(2).getReg();
+ Register ToReg = MI.getOperand(0).getReg();
+ if (MRI.getRegClass(FromReg) != MRI.getRegClass(ToReg))
+ report_fatal_error("Memory Intrinsic results: call to builtin function "
+ "with wrong signature, from/to mismatch");
+ return replaceDominatedUses(MBB, MI, FromReg, ToReg, MRI, MDT, LIS);
+}
+
+bool WebAssemblyMemIntrinsicResults::runOnMachineFunction(MachineFunction &MF) {
+ LLVM_DEBUG({
+ dbgs() << "********** Memory Intrinsic Results **********\n"
+ << "********** Function: " << MF.getName() << '\n';
+ });
+
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ auto &MDT = getAnalysis<MachineDominatorTree>();
+ const WebAssemblyTargetLowering &TLI =
+ *MF.getSubtarget<WebAssemblySubtarget>().getTargetLowering();
+ const auto &LibInfo =
+ getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(MF.getFunction());
+ auto &LIS = getAnalysis<LiveIntervals>();
+ bool Changed = false;
+
+ // We don't preserve SSA form.
+ MRI.leaveSSA();
+
+ assert(MRI.tracksLiveness() &&
+ "MemIntrinsicResults expects liveness tracking");
+
+ for (auto &MBB : MF) {
+ LLVM_DEBUG(dbgs() << "Basic Block: " << MBB.getName() << '\n');
+ for (auto &MI : MBB)
+ switch (MI.getOpcode()) {
+ default:
+ break;
+ case WebAssembly::CALL:
+ Changed |= optimizeCall(MBB, MI, MRI, MDT, LIS, TLI, LibInfo);
+ break;
+ }
+ }
+
+ return Changed;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyNullifyDebugValueLists.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyNullifyDebugValueLists.cpp
new file mode 100644
index 000000000000..b58f7a0152ae
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyNullifyDebugValueLists.cpp
@@ -0,0 +1,64 @@
+//=== WebAssemblyNullifyDebugValueLists.cpp - Nullify DBG_VALUE_LISTs ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Nullify DBG_VALUE_LISTs instructions as a temporary measure before we
+/// implement DBG_VALUE_LIST handling in WebAssemblyDebugValueManager.
+/// See https://github.com/llvm/llvm-project/issues/49705.
+/// TODO Correctly handle DBG_VALUE_LISTs
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssembly.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-nullify-dbg-value-lists"
+
+namespace {
+class WebAssemblyNullifyDebugValueLists final : public MachineFunctionPass {
+ StringRef getPassName() const override {
+ return "WebAssembly Nullify DBG_VALUE_LISTs";
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyNullifyDebugValueLists() : MachineFunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyNullifyDebugValueLists::ID = 0;
+INITIALIZE_PASS(WebAssemblyNullifyDebugValueLists, DEBUG_TYPE,
+ "WebAssembly Nullify DBG_VALUE_LISTs", false, false)
+
+FunctionPass *llvm::createWebAssemblyNullifyDebugValueLists() {
+ return new WebAssemblyNullifyDebugValueLists();
+}
+
+bool WebAssemblyNullifyDebugValueLists::runOnMachineFunction(
+ MachineFunction &MF) {
+ LLVM_DEBUG(dbgs() << "********** Nullify DBG_VALUE_LISTs **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+ bool Changed = false;
+ // Our backend, including WebAssemblyDebugValueManager, currently cannot
+ // handle DBG_VALUE_LISTs correctly. So this makes them undefined, which will
+ // appear as "optimized out".
+ for (auto &MBB : MF) {
+ for (auto &MI : MBB) {
+ if (MI.getOpcode() == TargetOpcode::DBG_VALUE_LIST) {
+ MI.setDebugValueUndef();
+ Changed = true;
+ }
+ }
+ }
+ return Changed;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp
new file mode 100644
index 000000000000..d542ddb45c2e
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp
@@ -0,0 +1,124 @@
+//===--- WebAssemblyOptimizeLiveIntervals.cpp - LiveInterval processing ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Optimize LiveIntervals for use in a post-RA context.
+//
+/// LiveIntervals normally runs before register allocation when the code is
+/// only recently lowered out of SSA form, so it's uncommon for registers to
+/// have multiple defs, and when they do, the defs are usually closely related.
+/// Later, after coalescing, tail duplication, and other optimizations, it's
+/// more common to see registers with multiple unrelated defs. This pass
+/// updates LiveIntervals to distribute the value numbers across separate
+/// LiveIntervals.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/CodeGen/LiveIntervals.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-optimize-live-intervals"
+
+namespace {
+class WebAssemblyOptimizeLiveIntervals final : public MachineFunctionPass {
+ StringRef getPassName() const override {
+ return "WebAssembly Optimize Live Intervals";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<LiveIntervals>();
+ AU.addPreserved<MachineBlockFrequencyInfo>();
+ AU.addPreserved<SlotIndexes>();
+ AU.addPreserved<LiveIntervals>();
+ AU.addPreservedID(LiveVariablesID);
+ AU.addPreservedID(MachineDominatorsID);
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::TracksLiveness);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyOptimizeLiveIntervals() : MachineFunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyOptimizeLiveIntervals::ID = 0;
+INITIALIZE_PASS(WebAssemblyOptimizeLiveIntervals, DEBUG_TYPE,
+ "Optimize LiveIntervals for WebAssembly", false, false)
+
+FunctionPass *llvm::createWebAssemblyOptimizeLiveIntervals() {
+ return new WebAssemblyOptimizeLiveIntervals();
+}
+
+bool WebAssemblyOptimizeLiveIntervals::runOnMachineFunction(
+ MachineFunction &MF) {
+ LLVM_DEBUG(dbgs() << "********** Optimize LiveIntervals **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ auto &LIS = getAnalysis<LiveIntervals>();
+
+ // We don't preserve SSA form.
+ MRI.leaveSSA();
+
+ assert(MRI.tracksLiveness() && "OptimizeLiveIntervals expects liveness");
+
+ // Split multiple-VN LiveIntervals into multiple LiveIntervals.
+ SmallVector<LiveInterval *, 4> SplitLIs;
+ for (unsigned I = 0, E = MRI.getNumVirtRegs(); I < E; ++I) {
+ Register Reg = Register::index2VirtReg(I);
+ auto &TRI = *MF.getSubtarget<WebAssemblySubtarget>().getRegisterInfo();
+
+ if (MRI.reg_nodbg_empty(Reg))
+ continue;
+
+ LIS.splitSeparateComponents(LIS.getInterval(Reg), SplitLIs);
+ if (Reg == TRI.getFrameRegister(MF) && SplitLIs.size() > 0) {
+ // The live interval for the frame register was split, resulting in a new
+ // VReg. For now we only support debug info output for a single frame base
+ // value for the function, so just use the last one. It will certainly be
+ // wrong for some part of the function, but until we are able to track
+ // values through live-range splitting and stackification, it will have to
+ // do.
+ MF.getInfo<WebAssemblyFunctionInfo>()->setFrameBaseVreg(
+ SplitLIs.back()->reg());
+ }
+ SplitLIs.clear();
+ }
+
+ // In FixIrreducibleControlFlow, we conservatively inserted IMPLICIT_DEF
+ // instructions to satisfy LiveIntervals' requirement that all uses be
+ // dominated by defs. Now that LiveIntervals has computed which of these
+ // defs are actually needed and which are dead, remove the dead ones.
+ for (MachineInstr &MI : llvm::make_early_inc_range(MF.front())) {
+ if (MI.isImplicitDef() && MI.getOperand(0).isDead()) {
+ LiveInterval &LI = LIS.getInterval(MI.getOperand(0).getReg());
+ LIS.removeVRegDefAt(LI, LIS.getInstructionIndex(MI).getRegSlot());
+ LIS.RemoveMachineInstrFromMaps(MI);
+ MI.eraseFromParent();
+ }
+ }
+
+ return true;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp
new file mode 100644
index 000000000000..7912aeb4f502
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp
@@ -0,0 +1,79 @@
+//===-- WebAssemblyOptimizeReturned.cpp - Optimize "returned" attributes --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Optimize calls with "returned" attributes for WebAssembly.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssembly.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/InstVisitor.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-optimize-returned"
+
+namespace {
+class OptimizeReturned final : public FunctionPass,
+ public InstVisitor<OptimizeReturned> {
+ StringRef getPassName() const override {
+ return "WebAssembly Optimize Returned";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
+ FunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnFunction(Function &F) override;
+
+ DominatorTree *DT = nullptr;
+
+public:
+ static char ID;
+ OptimizeReturned() : FunctionPass(ID) {}
+
+ void visitCallBase(CallBase &CB);
+};
+} // End anonymous namespace
+
+char OptimizeReturned::ID = 0;
+INITIALIZE_PASS(OptimizeReturned, DEBUG_TYPE,
+ "Optimize calls with \"returned\" attributes for WebAssembly",
+ false, false)
+
+FunctionPass *llvm::createWebAssemblyOptimizeReturned() {
+ return new OptimizeReturned();
+}
+
+void OptimizeReturned::visitCallBase(CallBase &CB) {
+ for (unsigned I = 0, E = CB.arg_size(); I < E; ++I)
+ if (CB.paramHasAttr(I, Attribute::Returned)) {
+ Value *Arg = CB.getArgOperand(I);
+ // Ignore constants, globals, undef, etc.
+ if (isa<Constant>(Arg))
+ continue;
+ // Like replaceDominatedUsesWith but using Instruction/Use dominance.
+ Arg->replaceUsesWithIf(&CB,
+ [&](Use &U) { return DT->dominates(&CB, U); });
+ }
+}
+
+bool OptimizeReturned::runOnFunction(Function &F) {
+ LLVM_DEBUG(dbgs() << "********** Optimize returned Attributes **********\n"
+ "********** Function: "
+ << F.getName() << '\n');
+
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ visit(F);
+ return true;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyPeephole.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyPeephole.cpp
new file mode 100644
index 000000000000..6e2d566d9b48
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyPeephole.cpp
@@ -0,0 +1,165 @@
+//===-- WebAssemblyPeephole.cpp - WebAssembly Peephole Optimiztions -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Late peephole optimizations for WebAssembly.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-peephole"
+
+static cl::opt<bool> DisableWebAssemblyFallthroughReturnOpt(
+ "disable-wasm-fallthrough-return-opt", cl::Hidden,
+ cl::desc("WebAssembly: Disable fallthrough-return optimizations."),
+ cl::init(false));
+
+namespace {
+class WebAssemblyPeephole final : public MachineFunctionPass {
+ StringRef getPassName() const override {
+ return "WebAssembly late peephole optimizer";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+public:
+ static char ID;
+ WebAssemblyPeephole() : MachineFunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyPeephole::ID = 0;
+INITIALIZE_PASS(WebAssemblyPeephole, DEBUG_TYPE,
+ "WebAssembly peephole optimizations", false, false)
+
+FunctionPass *llvm::createWebAssemblyPeephole() {
+ return new WebAssemblyPeephole();
+}
+
+/// If desirable, rewrite NewReg to a drop register.
+static bool maybeRewriteToDrop(unsigned OldReg, unsigned NewReg,
+ MachineOperand &MO, WebAssemblyFunctionInfo &MFI,
+ MachineRegisterInfo &MRI) {
+ bool Changed = false;
+ if (OldReg == NewReg) {
+ Changed = true;
+ Register NewReg = MRI.createVirtualRegister(MRI.getRegClass(OldReg));
+ MO.setReg(NewReg);
+ MO.setIsDead();
+ MFI.stackifyVReg(MRI, NewReg);
+ }
+ return Changed;
+}
+
+static bool maybeRewriteToFallthrough(MachineInstr &MI, MachineBasicBlock &MBB,
+ const MachineFunction &MF,
+ WebAssemblyFunctionInfo &MFI,
+ MachineRegisterInfo &MRI,
+ const WebAssemblyInstrInfo &TII) {
+ if (DisableWebAssemblyFallthroughReturnOpt)
+ return false;
+ if (&MBB != &MF.back())
+ return false;
+
+ MachineBasicBlock::iterator End = MBB.end();
+ --End;
+ assert(End->getOpcode() == WebAssembly::END_FUNCTION);
+ --End;
+ if (&MI != &*End)
+ return false;
+
+ for (auto &MO : MI.explicit_operands()) {
+ // If the operand isn't stackified, insert a COPY to read the operands and
+ // stackify them.
+ Register Reg = MO.getReg();
+ if (!MFI.isVRegStackified(Reg)) {
+ unsigned CopyLocalOpc;
+ const TargetRegisterClass *RegClass = MRI.getRegClass(Reg);
+ CopyLocalOpc = WebAssembly::getCopyOpcodeForRegClass(RegClass);
+ Register NewReg = MRI.createVirtualRegister(RegClass);
+ BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(CopyLocalOpc), NewReg)
+ .addReg(Reg);
+ MO.setReg(NewReg);
+ MFI.stackifyVReg(MRI, NewReg);
+ }
+ }
+
+ MI.setDesc(TII.get(WebAssembly::FALLTHROUGH_RETURN));
+ return true;
+}
+
+bool WebAssemblyPeephole::runOnMachineFunction(MachineFunction &MF) {
+ LLVM_DEBUG({
+ dbgs() << "********** Peephole **********\n"
+ << "********** Function: " << MF.getName() << '\n';
+ });
+
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
+ const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ const WebAssemblyTargetLowering &TLI =
+ *MF.getSubtarget<WebAssemblySubtarget>().getTargetLowering();
+ auto &LibInfo =
+ getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(MF.getFunction());
+ bool Changed = false;
+
+ for (auto &MBB : MF)
+ for (auto &MI : MBB)
+ switch (MI.getOpcode()) {
+ default:
+ break;
+ case WebAssembly::CALL: {
+ MachineOperand &Op1 = MI.getOperand(1);
+ if (Op1.isSymbol()) {
+ StringRef Name(Op1.getSymbolName());
+ if (Name == TLI.getLibcallName(RTLIB::MEMCPY) ||
+ Name == TLI.getLibcallName(RTLIB::MEMMOVE) ||
+ Name == TLI.getLibcallName(RTLIB::MEMSET)) {
+ LibFunc Func;
+ if (LibInfo.getLibFunc(Name, Func)) {
+ const auto &Op2 = MI.getOperand(2);
+ if (!Op2.isReg())
+ report_fatal_error("Peephole: call to builtin function with "
+ "wrong signature, not consuming reg");
+ MachineOperand &MO = MI.getOperand(0);
+ Register OldReg = MO.getReg();
+ Register NewReg = Op2.getReg();
+
+ if (MRI.getRegClass(NewReg) != MRI.getRegClass(OldReg))
+ report_fatal_error("Peephole: call to builtin function with "
+ "wrong signature, from/to mismatch");
+ Changed |= maybeRewriteToDrop(OldReg, NewReg, MO, MFI, MRI);
+ }
+ }
+ }
+ break;
+ }
+ // Optimize away an explicit void return at the end of the function.
+ case WebAssembly::RETURN:
+ Changed |= maybeRewriteToFallthrough(MI, MBB, MF, MFI, MRI, TII);
+ break;
+ }
+
+ return Changed;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp
new file mode 100644
index 000000000000..c9ef17f92814
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp
@@ -0,0 +1,332 @@
+//===-- WebAssemblyRegColoring.cpp - Register coloring --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements a virtual register coloring pass.
+///
+/// WebAssembly doesn't have a fixed number of registers, but it is still
+/// desirable to minimize the total number of registers used in each function.
+///
+/// This code is modeled after lib/CodeGen/StackSlotColoring.cpp.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "llvm/CodeGen/LiveIntervals.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-reg-coloring"
+
+namespace {
+class WebAssemblyRegColoring final : public MachineFunctionPass {
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyRegColoring() : MachineFunctionPass(ID) {}
+
+ StringRef getPassName() const override {
+ return "WebAssembly Register Coloring";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<LiveIntervals>();
+ AU.addRequired<MachineBlockFrequencyInfo>();
+ AU.addPreserved<MachineBlockFrequencyInfo>();
+ AU.addPreservedID(MachineDominatorsID);
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+private:
+};
+} // end anonymous namespace
+
+char WebAssemblyRegColoring::ID = 0;
+INITIALIZE_PASS(WebAssemblyRegColoring, DEBUG_TYPE,
+ "Minimize number of registers used", false, false)
+
+FunctionPass *llvm::createWebAssemblyRegColoring() {
+ return new WebAssemblyRegColoring();
+}
+
+// Compute the total spill weight for VReg.
+static float computeWeight(const MachineRegisterInfo *MRI,
+ const MachineBlockFrequencyInfo *MBFI,
+ unsigned VReg) {
+ float Weight = 0.0f;
+ for (MachineOperand &MO : MRI->reg_nodbg_operands(VReg))
+ Weight += LiveIntervals::getSpillWeight(MO.isDef(), MO.isUse(), MBFI,
+ *MO.getParent());
+ return Weight;
+}
+
+// Create a map of "Register -> vector of <SlotIndex, DBG_VALUE>".
+// The SlotIndex is the slot index of the next non-debug instruction or the end
+// of a BB, because DBG_VALUE's don't have slot index themselves.
+// Adapted from RegisterCoalescer::buildVRegToDbgValueMap.
+static DenseMap<Register, std::vector<std::pair<SlotIndex, MachineInstr *>>>
+buildVRegToDbgValueMap(MachineFunction &MF, const LiveIntervals *Liveness) {
+ DenseMap<Register, std::vector<std::pair<SlotIndex, MachineInstr *>>>
+ DbgVRegToValues;
+ const SlotIndexes *Slots = Liveness->getSlotIndexes();
+ SmallVector<MachineInstr *, 8> ToInsert;
+
+ // After collecting a block of DBG_VALUEs into ToInsert, enter them into the
+ // map.
+ auto CloseNewDVRange = [&DbgVRegToValues, &ToInsert](SlotIndex Slot) {
+ for (auto *X : ToInsert) {
+ for (const auto &Op : X->debug_operands()) {
+ if (Op.isReg() && Op.getReg().isVirtual())
+ DbgVRegToValues[Op.getReg()].push_back({Slot, X});
+ }
+ }
+
+ ToInsert.clear();
+ };
+
+ // Iterate over all instructions, collecting them into the ToInsert vector.
+ // Once a non-debug instruction is found, record the slot index of the
+ // collected DBG_VALUEs.
+ for (auto &MBB : MF) {
+ SlotIndex CurrentSlot = Slots->getMBBStartIdx(&MBB);
+
+ for (auto &MI : MBB) {
+ if (MI.isDebugValue()) {
+ if (any_of(MI.debug_operands(), [](const MachineOperand &MO) {
+ return MO.isReg() && MO.getReg().isVirtual();
+ }))
+ ToInsert.push_back(&MI);
+ } else if (!MI.isDebugOrPseudoInstr()) {
+ CurrentSlot = Slots->getInstructionIndex(MI);
+ CloseNewDVRange(CurrentSlot);
+ }
+ }
+
+ // Close range of DBG_VALUEs at the end of blocks.
+ CloseNewDVRange(Slots->getMBBEndIdx(&MBB));
+ }
+
+ // Sort all DBG_VALUEs we've seen by slot number.
+ for (auto &Pair : DbgVRegToValues)
+ llvm::sort(Pair.second);
+ return DbgVRegToValues;
+}
+
+// After register coalescing, some DBG_VALUEs will be invalid. Set them undef.
+// This function has to run before the actual coalescing, i.e., the register
+// changes.
+static void undefInvalidDbgValues(
+ const LiveIntervals *Liveness,
+ const ArrayRef<SmallVector<LiveInterval *, 4>> &Assignments,
+ DenseMap<Register, std::vector<std::pair<SlotIndex, MachineInstr *>>>
+ &DbgVRegToValues) {
+#ifndef NDEBUG
+ DenseSet<Register> SeenRegs;
+#endif
+ for (size_t I = 0, E = Assignments.size(); I < E; ++I) {
+ const auto &CoalescedIntervals = Assignments[I];
+ if (CoalescedIntervals.empty())
+ continue;
+ for (LiveInterval *LI : CoalescedIntervals) {
+ Register Reg = LI->reg();
+#ifndef NDEBUG
+ // Ensure we don't process the same register twice
+ assert(SeenRegs.insert(Reg).second);
+#endif
+ auto RegMapIt = DbgVRegToValues.find(Reg);
+ if (RegMapIt == DbgVRegToValues.end())
+ continue;
+ SlotIndex LastSlot;
+ bool LastUndefResult = false;
+ for (auto [Slot, DbgValue] : RegMapIt->second) {
+ // All consecutive DBG_VALUEs have the same slot because the slot
+ // indices they have is the one for the first non-debug instruction
+ // after it, because DBG_VALUEs don't have slot index themselves. Before
+ // doing live range queries, quickly check if the current DBG_VALUE has
+ // the same slot index as the previous one, in which case we should do
+ // the same. Note that RegMapIt->second, the vector of {SlotIndex,
+ // DBG_VALUE}, is sorted by SlotIndex, which is necessary for this
+ // check.
+ if (Slot == LastSlot) {
+ if (LastUndefResult) {
+ LLVM_DEBUG(dbgs() << "Undefed: " << *DbgValue << "\n");
+ DbgValue->setDebugValueUndef();
+ }
+ continue;
+ }
+ LastSlot = Slot;
+ LastUndefResult = false;
+ for (LiveInterval *OtherLI : CoalescedIntervals) {
+ if (LI == OtherLI)
+ continue;
+
+ // This DBG_VALUE has 'Reg' (the current LiveInterval's register) as
+ // its operand. If this DBG_VALUE's slot index is within other
+ // registers' live ranges, this DBG_VALUE should be undefed. For
+ // example, suppose %0 and %1 are to be coalesced into %0.
+ // ; %0's live range starts
+ // %0 = value_0
+ // DBG_VALUE %0, !"a", ... (a)
+ // DBG_VALUE %1, !"b", ... (b)
+ // use %0
+ // ; %0's live range ends
+ // ...
+ // ; %1's live range starts
+ // %1 = value_1
+ // DBG_VALUE %0, !"c", ... (c)
+ // DBG_VALUE %1, !"d", ... (d)
+ // use %1
+ // ; %1's live range ends
+ //
+ // In this code, (b) and (c) should be set to undef. After the two
+ // registers are coalesced, (b) will incorrectly say the variable
+ // "b"'s value is 'value_0', and (c) will also incorrectly say the
+ // variable "c"'s value is value_1. Note it doesn't actually matter
+ // which register they are coalesced into (%0 or %1); (b) and (c)
+ // should be set to undef as well if they are coalesced into %1.
+ //
+ // This happens DBG_VALUEs are not included when computing live
+ // ranges.
+ //
+ // Note that it is not possible for this DBG_VALUE to be
+ // simultaneously within 'Reg''s live range and one of other coalesced
+ // registers' live ranges because if their live ranges overlapped they
+ // would have not been selected as a coalescing candidate in the first
+ // place.
+ auto *SegmentIt = OtherLI->find(Slot);
+ if (SegmentIt != OtherLI->end() && SegmentIt->contains(Slot)) {
+ LLVM_DEBUG(dbgs() << "Undefed: " << *DbgValue << "\n");
+ DbgValue->setDebugValueUndef();
+ LastUndefResult = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+}
+
+bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
+ LLVM_DEBUG({
+ dbgs() << "********** Register Coloring **********\n"
+ << "********** Function: " << MF.getName() << '\n';
+ });
+
+ // If there are calls to setjmp or sigsetjmp, don't perform coloring. Virtual
+ // registers could be modified before the longjmp is executed, resulting in
+ // the wrong value being used afterwards.
+ // TODO: Does WebAssembly need to care about setjmp for register coloring?
+ if (MF.exposesReturnsTwice())
+ return false;
+
+ MachineRegisterInfo *MRI = &MF.getRegInfo();
+ LiveIntervals *Liveness = &getAnalysis<LiveIntervals>();
+ const MachineBlockFrequencyInfo *MBFI =
+ &getAnalysis<MachineBlockFrequencyInfo>();
+ WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
+
+ // We don't preserve SSA form.
+ MRI->leaveSSA();
+
+ // Gather all register intervals into a list and sort them.
+ unsigned NumVRegs = MRI->getNumVirtRegs();
+ SmallVector<LiveInterval *, 0> SortedIntervals;
+ SortedIntervals.reserve(NumVRegs);
+
+ // Record DBG_VALUEs and their SlotIndexes.
+ auto DbgVRegToValues = buildVRegToDbgValueMap(MF, Liveness);
+
+ LLVM_DEBUG(dbgs() << "Interesting register intervals:\n");
+ for (unsigned I = 0; I < NumVRegs; ++I) {
+ Register VReg = Register::index2VirtReg(I);
+ if (MFI.isVRegStackified(VReg))
+ continue;
+ // Skip unused registers, which can use $drop.
+ if (MRI->use_empty(VReg))
+ continue;
+
+ LiveInterval *LI = &Liveness->getInterval(VReg);
+ assert(LI->weight() == 0.0f);
+ LI->setWeight(computeWeight(MRI, MBFI, VReg));
+ LLVM_DEBUG(LI->dump());
+ SortedIntervals.push_back(LI);
+ }
+ LLVM_DEBUG(dbgs() << '\n');
+
+ // Sort them to put arguments first (since we don't want to rename live-in
+ // registers), by weight next, and then by position.
+ // TODO: Investigate more intelligent sorting heuristics. For starters, we
+ // should try to coalesce adjacent live intervals before non-adjacent ones.
+ llvm::sort(SortedIntervals, [MRI](LiveInterval *LHS, LiveInterval *RHS) {
+ if (MRI->isLiveIn(LHS->reg()) != MRI->isLiveIn(RHS->reg()))
+ return MRI->isLiveIn(LHS->reg());
+ if (LHS->weight() != RHS->weight())
+ return LHS->weight() > RHS->weight();
+ if (LHS->empty() || RHS->empty())
+ return !LHS->empty() && RHS->empty();
+ return *LHS < *RHS;
+ });
+
+ LLVM_DEBUG(dbgs() << "Coloring register intervals:\n");
+ SmallVector<unsigned, 16> SlotMapping(SortedIntervals.size(), -1u);
+ SmallVector<SmallVector<LiveInterval *, 4>, 16> Assignments(
+ SortedIntervals.size());
+ BitVector UsedColors(SortedIntervals.size());
+ bool Changed = false;
+ for (size_t I = 0, E = SortedIntervals.size(); I < E; ++I) {
+ LiveInterval *LI = SortedIntervals[I];
+ Register Old = LI->reg();
+ size_t Color = I;
+ const TargetRegisterClass *RC = MRI->getRegClass(Old);
+
+ // Check if it's possible to reuse any of the used colors.
+ if (!MRI->isLiveIn(Old))
+ for (unsigned C : UsedColors.set_bits()) {
+ if (MRI->getRegClass(SortedIntervals[C]->reg()) != RC)
+ continue;
+ for (LiveInterval *OtherLI : Assignments[C])
+ if (!OtherLI->empty() && OtherLI->overlaps(*LI))
+ goto continue_outer;
+ Color = C;
+ break;
+ continue_outer:;
+ }
+
+ Register New = SortedIntervals[Color]->reg();
+ SlotMapping[I] = New;
+ Changed |= Old != New;
+ UsedColors.set(Color);
+ Assignments[Color].push_back(LI);
+ // If we reassigned the stack pointer, update the debug frame base info.
+ if (Old != New && MFI.isFrameBaseVirtual() && MFI.getFrameBaseVreg() == Old)
+ MFI.setFrameBaseVreg(New);
+ LLVM_DEBUG(dbgs() << "Assigning vreg" << Register::virtReg2Index(LI->reg())
+ << " to vreg" << Register::virtReg2Index(New) << "\n");
+ }
+ if (!Changed)
+ return false;
+
+ // Set DBG_VALUEs that will be invalid after coalescing to undef.
+ undefInvalidDbgValues(Liveness, Assignments, DbgVRegToValues);
+
+ // Rewrite register operands.
+ for (size_t I = 0, E = SortedIntervals.size(); I < E; ++I) {
+ Register Old = SortedIntervals[I]->reg();
+ unsigned New = SlotMapping[I];
+ if (Old != New)
+ MRI->replaceRegWith(Old, New);
+ }
+ return true;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp
new file mode 100644
index 000000000000..1203b343bf24
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp
@@ -0,0 +1,110 @@
+//===-- WebAssemblyRegNumbering.cpp - Register Numbering ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements a pass which assigns WebAssembly register
+/// numbers for CodeGen virtual registers.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/ADT/SCCIterator.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-reg-numbering"
+
+namespace {
+class WebAssemblyRegNumbering final : public MachineFunctionPass {
+ StringRef getPassName() const override {
+ return "WebAssembly Register Numbering";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyRegNumbering() : MachineFunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyRegNumbering::ID = 0;
+INITIALIZE_PASS(WebAssemblyRegNumbering, DEBUG_TYPE,
+ "Assigns WebAssembly register numbers for virtual registers",
+ false, false)
+
+FunctionPass *llvm::createWebAssemblyRegNumbering() {
+ return new WebAssemblyRegNumbering();
+}
+
+bool WebAssemblyRegNumbering::runOnMachineFunction(MachineFunction &MF) {
+ LLVM_DEBUG(dbgs() << "********** Register Numbering **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+
+ WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ MFI.initWARegs(MRI);
+
+ // WebAssembly argument registers are in the same index space as local
+ // variables. Assign the numbers for them first.
+ MachineBasicBlock &EntryMBB = MF.front();
+ for (MachineInstr &MI : EntryMBB) {
+ if (!WebAssembly::isArgument(MI.getOpcode()))
+ break;
+
+ int64_t Imm = MI.getOperand(1).getImm();
+ LLVM_DEBUG(dbgs() << "Arg VReg " << MI.getOperand(0).getReg()
+ << " -> WAReg " << Imm << "\n");
+ MFI.setWAReg(MI.getOperand(0).getReg(), Imm);
+ }
+
+ // Then assign regular WebAssembly registers for all remaining used
+ // virtual registers. TODO: Consider sorting the registers by frequency of
+ // use, to maximize usage of small immediate fields.
+ unsigned NumVRegs = MF.getRegInfo().getNumVirtRegs();
+ unsigned NumStackRegs = 0;
+ // Start the numbering for locals after the arg regs
+ unsigned CurReg = MFI.getParams().size();
+ for (unsigned VRegIdx = 0; VRegIdx < NumVRegs; ++VRegIdx) {
+ Register VReg = Register::index2VirtReg(VRegIdx);
+ // Skip unused registers.
+ if (MRI.use_empty(VReg))
+ continue;
+ // Handle stackified registers.
+ if (MFI.isVRegStackified(VReg)) {
+ LLVM_DEBUG(dbgs() << "VReg " << VReg << " -> WAReg "
+ << (INT32_MIN | NumStackRegs) << "\n");
+ MFI.setWAReg(VReg, INT32_MIN | NumStackRegs++);
+ continue;
+ }
+ if (MFI.getWAReg(VReg) == WebAssembly::UnusedReg) {
+ LLVM_DEBUG(dbgs() << "VReg " << VReg << " -> WAReg " << CurReg << "\n");
+ MFI.setWAReg(VReg, CurReg++);
+ }
+ }
+
+ return true;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
new file mode 100644
index 000000000000..3046f9476f91
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
@@ -0,0 +1,984 @@
+//===-- WebAssemblyRegStackify.cpp - Register Stackification --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements a register stacking pass.
+///
+/// This pass reorders instructions to put register uses and defs in an order
+/// such that they form single-use expression trees. Registers fitting this form
+/// are then marked as "stackified", meaning references to them are replaced by
+/// "push" and "pop" from the value stack.
+///
+/// This is primarily a code size optimization, since temporary values on the
+/// value stack don't need to be named.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h" // for WebAssembly::ARGUMENT_*
+#include "WebAssembly.h"
+#include "WebAssemblyDebugValueManager.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/LiveIntervals.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfoImpls.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include <iterator>
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-reg-stackify"
+
+namespace {
+class WebAssemblyRegStackify final : public MachineFunctionPass {
+ StringRef getPassName() const override {
+ return "WebAssembly Register Stackify";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addRequired<LiveIntervals>();
+ AU.addPreserved<MachineBlockFrequencyInfo>();
+ AU.addPreserved<SlotIndexes>();
+ AU.addPreserved<LiveIntervals>();
+ AU.addPreservedID(LiveVariablesID);
+ AU.addPreserved<MachineDominatorTree>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyRegStackify() : MachineFunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyRegStackify::ID = 0;
+INITIALIZE_PASS(WebAssemblyRegStackify, DEBUG_TYPE,
+ "Reorder instructions to use the WebAssembly value stack",
+ false, false)
+
+FunctionPass *llvm::createWebAssemblyRegStackify() {
+ return new WebAssemblyRegStackify();
+}
+
+// Decorate the given instruction with implicit operands that enforce the
+// expression stack ordering constraints for an instruction which is on
+// the expression stack.
+static void imposeStackOrdering(MachineInstr *MI) {
+ // Write the opaque VALUE_STACK register.
+ if (!MI->definesRegister(WebAssembly::VALUE_STACK))
+ MI->addOperand(MachineOperand::CreateReg(WebAssembly::VALUE_STACK,
+ /*isDef=*/true,
+ /*isImp=*/true));
+
+ // Also read the opaque VALUE_STACK register.
+ if (!MI->readsRegister(WebAssembly::VALUE_STACK))
+ MI->addOperand(MachineOperand::CreateReg(WebAssembly::VALUE_STACK,
+ /*isDef=*/false,
+ /*isImp=*/true));
+}
+
+// Convert an IMPLICIT_DEF instruction into an instruction which defines
+// a constant zero value.
+static void convertImplicitDefToConstZero(MachineInstr *MI,
+ MachineRegisterInfo &MRI,
+ const TargetInstrInfo *TII,
+ MachineFunction &MF,
+ LiveIntervals &LIS) {
+ assert(MI->getOpcode() == TargetOpcode::IMPLICIT_DEF);
+
+ const auto *RegClass = MRI.getRegClass(MI->getOperand(0).getReg());
+ if (RegClass == &WebAssembly::I32RegClass) {
+ MI->setDesc(TII->get(WebAssembly::CONST_I32));
+ MI->addOperand(MachineOperand::CreateImm(0));
+ } else if (RegClass == &WebAssembly::I64RegClass) {
+ MI->setDesc(TII->get(WebAssembly::CONST_I64));
+ MI->addOperand(MachineOperand::CreateImm(0));
+ } else if (RegClass == &WebAssembly::F32RegClass) {
+ MI->setDesc(TII->get(WebAssembly::CONST_F32));
+ auto *Val = cast<ConstantFP>(Constant::getNullValue(
+ Type::getFloatTy(MF.getFunction().getContext())));
+ MI->addOperand(MachineOperand::CreateFPImm(Val));
+ } else if (RegClass == &WebAssembly::F64RegClass) {
+ MI->setDesc(TII->get(WebAssembly::CONST_F64));
+ auto *Val = cast<ConstantFP>(Constant::getNullValue(
+ Type::getDoubleTy(MF.getFunction().getContext())));
+ MI->addOperand(MachineOperand::CreateFPImm(Val));
+ } else if (RegClass == &WebAssembly::V128RegClass) {
+ MI->setDesc(TII->get(WebAssembly::CONST_V128_I64x2));
+ MI->addOperand(MachineOperand::CreateImm(0));
+ MI->addOperand(MachineOperand::CreateImm(0));
+ } else {
+ llvm_unreachable("Unexpected reg class");
+ }
+}
+
+// Determine whether a call to the callee referenced by
+// MI->getOperand(CalleeOpNo) reads memory, writes memory, and/or has side
+// effects.
+static void queryCallee(const MachineInstr &MI, bool &Read, bool &Write,
+ bool &Effects, bool &StackPointer) {
+ // All calls can use the stack pointer.
+ StackPointer = true;
+
+ const MachineOperand &MO = WebAssembly::getCalleeOp(MI);
+ if (MO.isGlobal()) {
+ const Constant *GV = MO.getGlobal();
+ if (const auto *GA = dyn_cast<GlobalAlias>(GV))
+ if (!GA->isInterposable())
+ GV = GA->getAliasee();
+
+ if (const auto *F = dyn_cast<Function>(GV)) {
+ if (!F->doesNotThrow())
+ Effects = true;
+ if (F->doesNotAccessMemory())
+ return;
+ if (F->onlyReadsMemory()) {
+ Read = true;
+ return;
+ }
+ }
+ }
+
+ // Assume the worst.
+ Write = true;
+ Read = true;
+ Effects = true;
+}
+
+// Determine whether MI reads memory, writes memory, has side effects,
+// and/or uses the stack pointer value.
+static void query(const MachineInstr &MI, bool &Read, bool &Write,
+ bool &Effects, bool &StackPointer) {
+ assert(!MI.isTerminator());
+
+ if (MI.isDebugInstr() || MI.isPosition())
+ return;
+
+ // Check for loads.
+ if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad())
+ Read = true;
+
+ // Check for stores.
+ if (MI.mayStore()) {
+ Write = true;
+ } else if (MI.hasOrderedMemoryRef()) {
+ switch (MI.getOpcode()) {
+ case WebAssembly::DIV_S_I32:
+ case WebAssembly::DIV_S_I64:
+ case WebAssembly::REM_S_I32:
+ case WebAssembly::REM_S_I64:
+ case WebAssembly::DIV_U_I32:
+ case WebAssembly::DIV_U_I64:
+ case WebAssembly::REM_U_I32:
+ case WebAssembly::REM_U_I64:
+ case WebAssembly::I32_TRUNC_S_F32:
+ case WebAssembly::I64_TRUNC_S_F32:
+ case WebAssembly::I32_TRUNC_S_F64:
+ case WebAssembly::I64_TRUNC_S_F64:
+ case WebAssembly::I32_TRUNC_U_F32:
+ case WebAssembly::I64_TRUNC_U_F32:
+ case WebAssembly::I32_TRUNC_U_F64:
+ case WebAssembly::I64_TRUNC_U_F64:
+ // These instruction have hasUnmodeledSideEffects() returning true
+ // because they trap on overflow and invalid so they can't be arbitrarily
+ // moved, however hasOrderedMemoryRef() interprets this plus their lack
+ // of memoperands as having a potential unknown memory reference.
+ break;
+ default:
+ // Record volatile accesses, unless it's a call, as calls are handled
+ // specially below.
+ if (!MI.isCall()) {
+ Write = true;
+ Effects = true;
+ }
+ break;
+ }
+ }
+
+ // Check for side effects.
+ if (MI.hasUnmodeledSideEffects()) {
+ switch (MI.getOpcode()) {
+ case WebAssembly::DIV_S_I32:
+ case WebAssembly::DIV_S_I64:
+ case WebAssembly::REM_S_I32:
+ case WebAssembly::REM_S_I64:
+ case WebAssembly::DIV_U_I32:
+ case WebAssembly::DIV_U_I64:
+ case WebAssembly::REM_U_I32:
+ case WebAssembly::REM_U_I64:
+ case WebAssembly::I32_TRUNC_S_F32:
+ case WebAssembly::I64_TRUNC_S_F32:
+ case WebAssembly::I32_TRUNC_S_F64:
+ case WebAssembly::I64_TRUNC_S_F64:
+ case WebAssembly::I32_TRUNC_U_F32:
+ case WebAssembly::I64_TRUNC_U_F32:
+ case WebAssembly::I32_TRUNC_U_F64:
+ case WebAssembly::I64_TRUNC_U_F64:
+ // These instructions have hasUnmodeledSideEffects() returning true
+ // because they trap on overflow and invalid so they can't be arbitrarily
+ // moved, however in the specific case of register stackifying, it is safe
+ // to move them because overflow and invalid are Undefined Behavior.
+ break;
+ default:
+ Effects = true;
+ break;
+ }
+ }
+
+ // Check for writes to __stack_pointer global.
+ if ((MI.getOpcode() == WebAssembly::GLOBAL_SET_I32 ||
+ MI.getOpcode() == WebAssembly::GLOBAL_SET_I64) &&
+ strcmp(MI.getOperand(0).getSymbolName(), "__stack_pointer") == 0)
+ StackPointer = true;
+
+ // Analyze calls.
+ if (MI.isCall()) {
+ queryCallee(MI, Read, Write, Effects, StackPointer);
+ }
+}
+
+// Test whether Def is safe and profitable to rematerialize.
+static bool shouldRematerialize(const MachineInstr &Def,
+ const WebAssemblyInstrInfo *TII) {
+ return Def.isAsCheapAsAMove() && TII->isTriviallyReMaterializable(Def);
+}
+
+// Identify the definition for this register at this point. This is a
+// generalization of MachineRegisterInfo::getUniqueVRegDef that uses
+// LiveIntervals to handle complex cases.
+static MachineInstr *getVRegDef(unsigned Reg, const MachineInstr *Insert,
+ const MachineRegisterInfo &MRI,
+ const LiveIntervals &LIS) {
+ // Most registers are in SSA form here so we try a quick MRI query first.
+ if (MachineInstr *Def = MRI.getUniqueVRegDef(Reg))
+ return Def;
+
+ // MRI doesn't know what the Def is. Try asking LIS.
+ if (const VNInfo *ValNo = LIS.getInterval(Reg).getVNInfoBefore(
+ LIS.getInstructionIndex(*Insert)))
+ return LIS.getInstructionFromIndex(ValNo->def);
+
+ return nullptr;
+}
+
+// Test whether Reg, as defined at Def, has exactly one use. This is a
+// generalization of MachineRegisterInfo::hasOneNonDBGUse that uses
+// LiveIntervals to handle complex cases.
+static bool hasOneNonDBGUse(unsigned Reg, MachineInstr *Def,
+ MachineRegisterInfo &MRI, MachineDominatorTree &MDT,
+ LiveIntervals &LIS) {
+ // Most registers are in SSA form here so we try a quick MRI query first.
+ if (MRI.hasOneNonDBGUse(Reg))
+ return true;
+
+ bool HasOne = false;
+ const LiveInterval &LI = LIS.getInterval(Reg);
+ const VNInfo *DefVNI =
+ LI.getVNInfoAt(LIS.getInstructionIndex(*Def).getRegSlot());
+ assert(DefVNI);
+ for (auto &I : MRI.use_nodbg_operands(Reg)) {
+ const auto &Result = LI.Query(LIS.getInstructionIndex(*I.getParent()));
+ if (Result.valueIn() == DefVNI) {
+ if (!Result.isKill())
+ return false;
+ if (HasOne)
+ return false;
+ HasOne = true;
+ }
+ }
+ return HasOne;
+}
+
+// Test whether it's safe to move Def to just before Insert.
+// TODO: Compute memory dependencies in a way that doesn't require always
+// walking the block.
+// TODO: Compute memory dependencies in a way that uses AliasAnalysis to be
+// more precise.
+static bool isSafeToMove(const MachineOperand *Def, const MachineOperand *Use,
+ const MachineInstr *Insert,
+ const WebAssemblyFunctionInfo &MFI,
+ const MachineRegisterInfo &MRI) {
+ const MachineInstr *DefI = Def->getParent();
+ const MachineInstr *UseI = Use->getParent();
+ assert(DefI->getParent() == Insert->getParent());
+ assert(UseI->getParent() == Insert->getParent());
+
+ // The first def of a multivalue instruction can be stackified by moving,
+ // since the later defs can always be placed into locals if necessary. Later
+ // defs can only be stackified if all previous defs are already stackified
+ // since ExplicitLocals will not know how to place a def in a local if a
+ // subsequent def is stackified. But only one def can be stackified by moving
+ // the instruction, so it must be the first one.
+ //
+ // TODO: This could be loosened to be the first *live* def, but care would
+ // have to be taken to ensure the drops of the initial dead defs can be
+ // placed. This would require checking that no previous defs are used in the
+ // same instruction as subsequent defs.
+ if (Def != DefI->defs().begin())
+ return false;
+
+ // If any subsequent def is used prior to the current value by the same
+ // instruction in which the current value is used, we cannot
+ // stackify. Stackifying in this case would require that def moving below the
+ // current def in the stack, which cannot be achieved, even with locals.
+ // Also ensure we don't sink the def past any other prior uses.
+ for (const auto &SubsequentDef : drop_begin(DefI->defs())) {
+ auto I = std::next(MachineBasicBlock::const_iterator(DefI));
+ auto E = std::next(MachineBasicBlock::const_iterator(UseI));
+ for (; I != E; ++I) {
+ for (const auto &PriorUse : I->uses()) {
+ if (&PriorUse == Use)
+ break;
+ if (PriorUse.isReg() && SubsequentDef.getReg() == PriorUse.getReg())
+ return false;
+ }
+ }
+ }
+
+ // If moving is a semantic nop, it is always allowed
+ const MachineBasicBlock *MBB = DefI->getParent();
+ auto NextI = std::next(MachineBasicBlock::const_iterator(DefI));
+ for (auto E = MBB->end(); NextI != E && NextI->isDebugInstr(); ++NextI)
+ ;
+ if (NextI == Insert)
+ return true;
+
+ // 'catch' and 'catch_all' should be the first instruction of a BB and cannot
+ // move.
+ if (WebAssembly::isCatch(DefI->getOpcode()))
+ return false;
+
+ // Check for register dependencies.
+ SmallVector<unsigned, 4> MutableRegisters;
+ for (const MachineOperand &MO : DefI->operands()) {
+ if (!MO.isReg() || MO.isUndef())
+ continue;
+ Register Reg = MO.getReg();
+
+ // If the register is dead here and at Insert, ignore it.
+ if (MO.isDead() && Insert->definesRegister(Reg) &&
+ !Insert->readsRegister(Reg))
+ continue;
+
+ if (Reg.isPhysical()) {
+ // Ignore ARGUMENTS; it's just used to keep the ARGUMENT_* instructions
+ // from moving down, and we've already checked for that.
+ if (Reg == WebAssembly::ARGUMENTS)
+ continue;
+ // If the physical register is never modified, ignore it.
+ if (!MRI.isPhysRegModified(Reg))
+ continue;
+ // Otherwise, it's a physical register with unknown liveness.
+ return false;
+ }
+
+ // If one of the operands isn't in SSA form, it has different values at
+ // different times, and we need to make sure we don't move our use across
+ // a different def.
+ if (!MO.isDef() && !MRI.hasOneDef(Reg))
+ MutableRegisters.push_back(Reg);
+ }
+
+ bool Read = false, Write = false, Effects = false, StackPointer = false;
+ query(*DefI, Read, Write, Effects, StackPointer);
+
+ // If the instruction does not access memory and has no side effects, it has
+ // no additional dependencies.
+ bool HasMutableRegisters = !MutableRegisters.empty();
+ if (!Read && !Write && !Effects && !StackPointer && !HasMutableRegisters)
+ return true;
+
+ // Scan through the intervening instructions between DefI and Insert.
+ MachineBasicBlock::const_iterator D(DefI), I(Insert);
+ for (--I; I != D; --I) {
+ bool InterveningRead = false;
+ bool InterveningWrite = false;
+ bool InterveningEffects = false;
+ bool InterveningStackPointer = false;
+ query(*I, InterveningRead, InterveningWrite, InterveningEffects,
+ InterveningStackPointer);
+ if (Effects && InterveningEffects)
+ return false;
+ if (Read && InterveningWrite)
+ return false;
+ if (Write && (InterveningRead || InterveningWrite))
+ return false;
+ if (StackPointer && InterveningStackPointer)
+ return false;
+
+ for (unsigned Reg : MutableRegisters)
+ for (const MachineOperand &MO : I->operands())
+ if (MO.isReg() && MO.isDef() && MO.getReg() == Reg)
+ return false;
+ }
+
+ return true;
+}
+
+/// Test whether OneUse, a use of Reg, dominates all of Reg's other uses.
+static bool oneUseDominatesOtherUses(unsigned Reg, const MachineOperand &OneUse,
+ const MachineBasicBlock &MBB,
+ const MachineRegisterInfo &MRI,
+ const MachineDominatorTree &MDT,
+ LiveIntervals &LIS,
+ WebAssemblyFunctionInfo &MFI) {
+ const LiveInterval &LI = LIS.getInterval(Reg);
+
+ const MachineInstr *OneUseInst = OneUse.getParent();
+ VNInfo *OneUseVNI = LI.getVNInfoBefore(LIS.getInstructionIndex(*OneUseInst));
+
+ for (const MachineOperand &Use : MRI.use_nodbg_operands(Reg)) {
+ if (&Use == &OneUse)
+ continue;
+
+ const MachineInstr *UseInst = Use.getParent();
+ VNInfo *UseVNI = LI.getVNInfoBefore(LIS.getInstructionIndex(*UseInst));
+
+ if (UseVNI != OneUseVNI)
+ continue;
+
+ if (UseInst == OneUseInst) {
+ // Another use in the same instruction. We need to ensure that the one
+ // selected use happens "before" it.
+ if (&OneUse > &Use)
+ return false;
+ } else {
+ // Test that the use is dominated by the one selected use.
+ while (!MDT.dominates(OneUseInst, UseInst)) {
+ // Actually, dominating is over-conservative. Test that the use would
+ // happen after the one selected use in the stack evaluation order.
+ //
+ // This is needed as a consequence of using implicit local.gets for
+ // uses and implicit local.sets for defs.
+ if (UseInst->getDesc().getNumDefs() == 0)
+ return false;
+ const MachineOperand &MO = UseInst->getOperand(0);
+ if (!MO.isReg())
+ return false;
+ Register DefReg = MO.getReg();
+ if (!DefReg.isVirtual() || !MFI.isVRegStackified(DefReg))
+ return false;
+ assert(MRI.hasOneNonDBGUse(DefReg));
+ const MachineOperand &NewUse = *MRI.use_nodbg_begin(DefReg);
+ const MachineInstr *NewUseInst = NewUse.getParent();
+ if (NewUseInst == OneUseInst) {
+ if (&OneUse > &NewUse)
+ return false;
+ break;
+ }
+ UseInst = NewUseInst;
+ }
+ }
+ }
+ return true;
+}
+
+/// Get the appropriate tee opcode for the given register class.
+static unsigned getTeeOpcode(const TargetRegisterClass *RC) {
+ if (RC == &WebAssembly::I32RegClass)
+ return WebAssembly::TEE_I32;
+ if (RC == &WebAssembly::I64RegClass)
+ return WebAssembly::TEE_I64;
+ if (RC == &WebAssembly::F32RegClass)
+ return WebAssembly::TEE_F32;
+ if (RC == &WebAssembly::F64RegClass)
+ return WebAssembly::TEE_F64;
+ if (RC == &WebAssembly::V128RegClass)
+ return WebAssembly::TEE_V128;
+ if (RC == &WebAssembly::EXTERNREFRegClass)
+ return WebAssembly::TEE_EXTERNREF;
+ if (RC == &WebAssembly::FUNCREFRegClass)
+ return WebAssembly::TEE_FUNCREF;
+ llvm_unreachable("Unexpected register class");
+}
+
+// Shrink LI to its uses, cleaning up LI.
+static void shrinkToUses(LiveInterval &LI, LiveIntervals &LIS) {
+ if (LIS.shrinkToUses(&LI)) {
+ SmallVector<LiveInterval *, 4> SplitLIs;
+ LIS.splitSeparateComponents(LI, SplitLIs);
+ }
+}
+
+/// A single-use def in the same block with no intervening memory or register
+/// dependencies; move the def down and nest it with the current instruction.
+static MachineInstr *moveForSingleUse(unsigned Reg, MachineOperand &Op,
+ MachineInstr *Def, MachineBasicBlock &MBB,
+ MachineInstr *Insert, LiveIntervals &LIS,
+ WebAssemblyFunctionInfo &MFI,
+ MachineRegisterInfo &MRI) {
+ LLVM_DEBUG(dbgs() << "Move for single use: "; Def->dump());
+
+ WebAssemblyDebugValueManager DefDIs(Def);
+ DefDIs.sink(Insert);
+ LIS.handleMove(*Def);
+
+ if (MRI.hasOneDef(Reg) && MRI.hasOneNonDBGUse(Reg)) {
+ // No one else is using this register for anything so we can just stackify
+ // it in place.
+ MFI.stackifyVReg(MRI, Reg);
+ } else {
+ // The register may have unrelated uses or defs; create a new register for
+ // just our one def and use so that we can stackify it.
+ Register NewReg = MRI.createVirtualRegister(MRI.getRegClass(Reg));
+ Op.setReg(NewReg);
+ DefDIs.updateReg(NewReg);
+
+ // Tell LiveIntervals about the new register.
+ LIS.createAndComputeVirtRegInterval(NewReg);
+
+ // Tell LiveIntervals about the changes to the old register.
+ LiveInterval &LI = LIS.getInterval(Reg);
+ LI.removeSegment(LIS.getInstructionIndex(*Def).getRegSlot(),
+ LIS.getInstructionIndex(*Op.getParent()).getRegSlot(),
+ /*RemoveDeadValNo=*/true);
+
+ MFI.stackifyVReg(MRI, NewReg);
+
+ LLVM_DEBUG(dbgs() << " - Replaced register: "; Def->dump());
+ }
+
+ imposeStackOrdering(Def);
+ return Def;
+}
+
+static MachineInstr *getPrevNonDebugInst(MachineInstr *MI) {
+ for (auto *I = MI->getPrevNode(); I; I = I->getPrevNode())
+ if (!I->isDebugInstr())
+ return I;
+ return nullptr;
+}
+
+/// A trivially cloneable instruction; clone it and nest the new copy with the
+/// current instruction.
+static MachineInstr *rematerializeCheapDef(
+ unsigned Reg, MachineOperand &Op, MachineInstr &Def, MachineBasicBlock &MBB,
+ MachineBasicBlock::instr_iterator Insert, LiveIntervals &LIS,
+ WebAssemblyFunctionInfo &MFI, MachineRegisterInfo &MRI,
+ const WebAssemblyInstrInfo *TII, const WebAssemblyRegisterInfo *TRI) {
+ LLVM_DEBUG(dbgs() << "Rematerializing cheap def: "; Def.dump());
+ LLVM_DEBUG(dbgs() << " - for use in "; Op.getParent()->dump());
+
+ WebAssemblyDebugValueManager DefDIs(&Def);
+
+ Register NewReg = MRI.createVirtualRegister(MRI.getRegClass(Reg));
+ DefDIs.cloneSink(&*Insert, NewReg);
+ Op.setReg(NewReg);
+ MachineInstr *Clone = getPrevNonDebugInst(&*Insert);
+ assert(Clone);
+ LIS.InsertMachineInstrInMaps(*Clone);
+ LIS.createAndComputeVirtRegInterval(NewReg);
+ MFI.stackifyVReg(MRI, NewReg);
+ imposeStackOrdering(Clone);
+
+ LLVM_DEBUG(dbgs() << " - Cloned to "; Clone->dump());
+
+ // Shrink the interval.
+ bool IsDead = MRI.use_empty(Reg);
+ if (!IsDead) {
+ LiveInterval &LI = LIS.getInterval(Reg);
+ shrinkToUses(LI, LIS);
+ IsDead = !LI.liveAt(LIS.getInstructionIndex(Def).getDeadSlot());
+ }
+
+ // If that was the last use of the original, delete the original.
+ if (IsDead) {
+ LLVM_DEBUG(dbgs() << " - Deleting original\n");
+ SlotIndex Idx = LIS.getInstructionIndex(Def).getRegSlot();
+ LIS.removePhysRegDefAt(MCRegister::from(WebAssembly::ARGUMENTS), Idx);
+ LIS.removeInterval(Reg);
+ LIS.RemoveMachineInstrFromMaps(Def);
+ DefDIs.removeDef();
+ }
+
+ return Clone;
+}
+
+/// A multiple-use def in the same block with no intervening memory or register
+/// dependencies; move the def down, nest it with the current instruction, and
+/// insert a tee to satisfy the rest of the uses. As an illustration, rewrite
+/// this:
+///
+/// Reg = INST ... // Def
+/// INST ..., Reg, ... // Insert
+/// INST ..., Reg, ...
+/// INST ..., Reg, ...
+///
+/// to this:
+///
+/// DefReg = INST ... // Def (to become the new Insert)
+/// TeeReg, Reg = TEE_... DefReg
+/// INST ..., TeeReg, ... // Insert
+/// INST ..., Reg, ...
+/// INST ..., Reg, ...
+///
+/// with DefReg and TeeReg stackified. This eliminates a local.get from the
+/// resulting code.
+static MachineInstr *moveAndTeeForMultiUse(
+ unsigned Reg, MachineOperand &Op, MachineInstr *Def, MachineBasicBlock &MBB,
+ MachineInstr *Insert, LiveIntervals &LIS, WebAssemblyFunctionInfo &MFI,
+ MachineRegisterInfo &MRI, const WebAssemblyInstrInfo *TII) {
+ LLVM_DEBUG(dbgs() << "Move and tee for multi-use:"; Def->dump());
+
+ const auto *RegClass = MRI.getRegClass(Reg);
+ Register TeeReg = MRI.createVirtualRegister(RegClass);
+ Register DefReg = MRI.createVirtualRegister(RegClass);
+
+ // Move Def into place.
+ WebAssemblyDebugValueManager DefDIs(Def);
+ DefDIs.sink(Insert);
+ LIS.handleMove(*Def);
+
+ // Create the Tee and attach the registers.
+ MachineOperand &DefMO = Def->getOperand(0);
+ MachineInstr *Tee = BuildMI(MBB, Insert, Insert->getDebugLoc(),
+ TII->get(getTeeOpcode(RegClass)), TeeReg)
+ .addReg(Reg, RegState::Define)
+ .addReg(DefReg, getUndefRegState(DefMO.isDead()));
+ Op.setReg(TeeReg);
+ DefDIs.updateReg(DefReg);
+ SlotIndex TeeIdx = LIS.InsertMachineInstrInMaps(*Tee).getRegSlot();
+ SlotIndex DefIdx = LIS.getInstructionIndex(*Def).getRegSlot();
+
+ // Tell LiveIntervals we moved the original vreg def from Def to Tee.
+ LiveInterval &LI = LIS.getInterval(Reg);
+ LiveInterval::iterator I = LI.FindSegmentContaining(DefIdx);
+ VNInfo *ValNo = LI.getVNInfoAt(DefIdx);
+ I->start = TeeIdx;
+ ValNo->def = TeeIdx;
+ shrinkToUses(LI, LIS);
+
+ // Finish stackifying the new regs.
+ LIS.createAndComputeVirtRegInterval(TeeReg);
+ LIS.createAndComputeVirtRegInterval(DefReg);
+ MFI.stackifyVReg(MRI, DefReg);
+ MFI.stackifyVReg(MRI, TeeReg);
+ imposeStackOrdering(Def);
+ imposeStackOrdering(Tee);
+
+ // Even though 'TeeReg, Reg = TEE ...', has two defs, we don't need to clone
+ // DBG_VALUEs for both of them, given that the latter will cancel the former
+ // anyway. Here we only clone DBG_VALUEs for TeeReg, which will be converted
+ // to a local index in ExplicitLocals pass.
+ DefDIs.cloneSink(Insert, TeeReg, /* CloneDef */ false);
+
+ LLVM_DEBUG(dbgs() << " - Replaced register: "; Def->dump());
+ LLVM_DEBUG(dbgs() << " - Tee instruction: "; Tee->dump());
+ return Def;
+}
+
+namespace {
+/// A stack for walking the tree of instructions being built, visiting the
+/// MachineOperands in DFS order.
+class TreeWalkerState {
+ using mop_iterator = MachineInstr::mop_iterator;
+ using mop_reverse_iterator = std::reverse_iterator<mop_iterator>;
+ using RangeTy = iterator_range<mop_reverse_iterator>;
+ SmallVector<RangeTy, 4> Worklist;
+
+public:
+ explicit TreeWalkerState(MachineInstr *Insert) {
+ const iterator_range<mop_iterator> &Range = Insert->explicit_uses();
+ if (!Range.empty())
+ Worklist.push_back(reverse(Range));
+ }
+
+ bool done() const { return Worklist.empty(); }
+
+ MachineOperand &pop() {
+ RangeTy &Range = Worklist.back();
+ MachineOperand &Op = *Range.begin();
+ Range = drop_begin(Range);
+ if (Range.empty())
+ Worklist.pop_back();
+ assert((Worklist.empty() || !Worklist.back().empty()) &&
+ "Empty ranges shouldn't remain in the worklist");
+ return Op;
+ }
+
+ /// Push Instr's operands onto the stack to be visited.
+ void pushOperands(MachineInstr *Instr) {
+ const iterator_range<mop_iterator> &Range(Instr->explicit_uses());
+ if (!Range.empty())
+ Worklist.push_back(reverse(Range));
+ }
+
+ /// Some of Instr's operands are on the top of the stack; remove them and
+ /// re-insert them starting from the beginning (because we've commuted them).
+ void resetTopOperands(MachineInstr *Instr) {
+ assert(hasRemainingOperands(Instr) &&
+ "Reseting operands should only be done when the instruction has "
+ "an operand still on the stack");
+ Worklist.back() = reverse(Instr->explicit_uses());
+ }
+
+ /// Test whether Instr has operands remaining to be visited at the top of
+ /// the stack.
+ bool hasRemainingOperands(const MachineInstr *Instr) const {
+ if (Worklist.empty())
+ return false;
+ const RangeTy &Range = Worklist.back();
+ return !Range.empty() && Range.begin()->getParent() == Instr;
+ }
+
+ /// Test whether the given register is present on the stack, indicating an
+ /// operand in the tree that we haven't visited yet. Moving a definition of
+ /// Reg to a point in the tree after that would change its value.
+ ///
+ /// This is needed as a consequence of using implicit local.gets for
+ /// uses and implicit local.sets for defs.
+ bool isOnStack(unsigned Reg) const {
+ for (const RangeTy &Range : Worklist)
+ for (const MachineOperand &MO : Range)
+ if (MO.isReg() && MO.getReg() == Reg)
+ return true;
+ return false;
+ }
+};
+
+/// State to keep track of whether commuting is in flight or whether it's been
+/// tried for the current instruction and didn't work.
+class CommutingState {
+ /// There are effectively three states: the initial state where we haven't
+ /// started commuting anything and we don't know anything yet, the tentative
+ /// state where we've commuted the operands of the current instruction and are
+ /// revisiting it, and the declined state where we've reverted the operands
+ /// back to their original order and will no longer commute it further.
+ bool TentativelyCommuting = false;
+ bool Declined = false;
+
+ /// During the tentative state, these hold the operand indices of the commuted
+ /// operands.
+ unsigned Operand0, Operand1;
+
+public:
+ /// Stackification for an operand was not successful due to ordering
+ /// constraints. If possible, and if we haven't already tried it and declined
+ /// it, commute Insert's operands and prepare to revisit it.
+ void maybeCommute(MachineInstr *Insert, TreeWalkerState &TreeWalker,
+ const WebAssemblyInstrInfo *TII) {
+ if (TentativelyCommuting) {
+ assert(!Declined &&
+ "Don't decline commuting until you've finished trying it");
+ // Commuting didn't help. Revert it.
+ TII->commuteInstruction(*Insert, /*NewMI=*/false, Operand0, Operand1);
+ TentativelyCommuting = false;
+ Declined = true;
+ } else if (!Declined && TreeWalker.hasRemainingOperands(Insert)) {
+ Operand0 = TargetInstrInfo::CommuteAnyOperandIndex;
+ Operand1 = TargetInstrInfo::CommuteAnyOperandIndex;
+ if (TII->findCommutedOpIndices(*Insert, Operand0, Operand1)) {
+ // Tentatively commute the operands and try again.
+ TII->commuteInstruction(*Insert, /*NewMI=*/false, Operand0, Operand1);
+ TreeWalker.resetTopOperands(Insert);
+ TentativelyCommuting = true;
+ Declined = false;
+ }
+ }
+ }
+
+ /// Stackification for some operand was successful. Reset to the default
+ /// state.
+ void reset() {
+ TentativelyCommuting = false;
+ Declined = false;
+ }
+};
+} // end anonymous namespace
+
+bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) {
+ LLVM_DEBUG(dbgs() << "********** Register Stackifying **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+
+ bool Changed = false;
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
+ const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ const auto *TRI = MF.getSubtarget<WebAssemblySubtarget>().getRegisterInfo();
+ auto &MDT = getAnalysis<MachineDominatorTree>();
+ auto &LIS = getAnalysis<LiveIntervals>();
+
+ // Walk the instructions from the bottom up. Currently we don't look past
+ // block boundaries, and the blocks aren't ordered so the block visitation
+ // order isn't significant, but we may want to change this in the future.
+ for (MachineBasicBlock &MBB : MF) {
+ // Don't use a range-based for loop, because we modify the list as we're
+ // iterating over it and the end iterator may change.
+ for (auto MII = MBB.rbegin(); MII != MBB.rend(); ++MII) {
+ MachineInstr *Insert = &*MII;
+ // Don't nest anything inside an inline asm, because we don't have
+ // constraints for $push inputs.
+ if (Insert->isInlineAsm())
+ continue;
+
+ // Ignore debugging intrinsics.
+ if (Insert->isDebugValue())
+ continue;
+
+ // Iterate through the inputs in reverse order, since we'll be pulling
+ // operands off the stack in LIFO order.
+ CommutingState Commuting;
+ TreeWalkerState TreeWalker(Insert);
+ while (!TreeWalker.done()) {
+ MachineOperand &Use = TreeWalker.pop();
+
+ // We're only interested in explicit virtual register operands.
+ if (!Use.isReg())
+ continue;
+
+ Register Reg = Use.getReg();
+ assert(Use.isUse() && "explicit_uses() should only iterate over uses");
+ assert(!Use.isImplicit() &&
+ "explicit_uses() should only iterate over explicit operands");
+ if (Reg.isPhysical())
+ continue;
+
+ // Identify the definition for this register at this point.
+ MachineInstr *DefI = getVRegDef(Reg, Insert, MRI, LIS);
+ if (!DefI)
+ continue;
+
+ // Don't nest an INLINE_ASM def into anything, because we don't have
+ // constraints for $pop outputs.
+ if (DefI->isInlineAsm())
+ continue;
+
+ // Argument instructions represent live-in registers and not real
+ // instructions.
+ if (WebAssembly::isArgument(DefI->getOpcode()))
+ continue;
+
+ MachineOperand *Def = DefI->findRegisterDefOperand(Reg);
+ assert(Def != nullptr);
+
+ // Decide which strategy to take. Prefer to move a single-use value
+ // over cloning it, and prefer cloning over introducing a tee.
+ // For moving, we require the def to be in the same block as the use;
+ // this makes things simpler (LiveIntervals' handleMove function only
+ // supports intra-block moves) and it's MachineSink's job to catch all
+ // the sinking opportunities anyway.
+ bool SameBlock = DefI->getParent() == &MBB;
+ bool CanMove = SameBlock && isSafeToMove(Def, &Use, Insert, MFI, MRI) &&
+ !TreeWalker.isOnStack(Reg);
+ if (CanMove && hasOneNonDBGUse(Reg, DefI, MRI, MDT, LIS)) {
+ Insert = moveForSingleUse(Reg, Use, DefI, MBB, Insert, LIS, MFI, MRI);
+
+ // If we are removing the frame base reg completely, remove the debug
+ // info as well.
+ // TODO: Encode this properly as a stackified value.
+ if (MFI.isFrameBaseVirtual() && MFI.getFrameBaseVreg() == Reg)
+ MFI.clearFrameBaseVreg();
+ } else if (shouldRematerialize(*DefI, TII)) {
+ Insert =
+ rematerializeCheapDef(Reg, Use, *DefI, MBB, Insert->getIterator(),
+ LIS, MFI, MRI, TII, TRI);
+ } else if (CanMove && oneUseDominatesOtherUses(Reg, Use, MBB, MRI, MDT,
+ LIS, MFI)) {
+ Insert = moveAndTeeForMultiUse(Reg, Use, DefI, MBB, Insert, LIS, MFI,
+ MRI, TII);
+ } else {
+ // We failed to stackify the operand. If the problem was ordering
+ // constraints, Commuting may be able to help.
+ if (!CanMove && SameBlock)
+ Commuting.maybeCommute(Insert, TreeWalker, TII);
+ // Proceed to the next operand.
+ continue;
+ }
+
+ // Stackifying a multivalue def may unlock in-place stackification of
+ // subsequent defs. TODO: Handle the case where the consecutive uses are
+ // not all in the same instruction.
+ auto *SubsequentDef = Insert->defs().begin();
+ auto *SubsequentUse = &Use;
+ while (SubsequentDef != Insert->defs().end() &&
+ SubsequentUse != Use.getParent()->uses().end()) {
+ if (!SubsequentDef->isReg() || !SubsequentUse->isReg())
+ break;
+ Register DefReg = SubsequentDef->getReg();
+ Register UseReg = SubsequentUse->getReg();
+ // TODO: This single-use restriction could be relaxed by using tees
+ if (DefReg != UseReg || !MRI.hasOneNonDBGUse(DefReg))
+ break;
+ MFI.stackifyVReg(MRI, DefReg);
+ ++SubsequentDef;
+ ++SubsequentUse;
+ }
+
+ // If the instruction we just stackified is an IMPLICIT_DEF, convert it
+ // to a constant 0 so that the def is explicit, and the push/pop
+ // correspondence is maintained.
+ if (Insert->getOpcode() == TargetOpcode::IMPLICIT_DEF)
+ convertImplicitDefToConstZero(Insert, MRI, TII, MF, LIS);
+
+ // We stackified an operand. Add the defining instruction's operands to
+ // the worklist stack now to continue to build an ever deeper tree.
+ Commuting.reset();
+ TreeWalker.pushOperands(Insert);
+ }
+
+ // If we stackified any operands, skip over the tree to start looking for
+ // the next instruction we can build a tree on.
+ if (Insert != &*MII) {
+ imposeStackOrdering(&*MII);
+ MII = MachineBasicBlock::iterator(Insert).getReverse();
+ Changed = true;
+ }
+ }
+ }
+
+ // If we used VALUE_STACK anywhere, add it to the live-in sets everywhere so
+ // that it never looks like a use-before-def.
+ if (Changed) {
+ MF.getRegInfo().addLiveIn(WebAssembly::VALUE_STACK);
+ for (MachineBasicBlock &MBB : MF)
+ MBB.addLiveIn(WebAssembly::VALUE_STACK);
+ }
+
+#ifndef NDEBUG
+ // Verify that pushes and pops are performed in LIFO order.
+ SmallVector<unsigned, 0> Stack;
+ for (MachineBasicBlock &MBB : MF) {
+ for (MachineInstr &MI : MBB) {
+ if (MI.isDebugInstr())
+ continue;
+ for (MachineOperand &MO : reverse(MI.explicit_uses())) {
+ if (!MO.isReg())
+ continue;
+ Register Reg = MO.getReg();
+ if (MFI.isVRegStackified(Reg))
+ assert(Stack.pop_back_val() == Reg &&
+ "Register stack pop should be paired with a push");
+ }
+ for (MachineOperand &MO : MI.defs()) {
+ if (!MO.isReg())
+ continue;
+ Register Reg = MO.getReg();
+ if (MFI.isVRegStackified(Reg))
+ Stack.push_back(MO.getReg());
+ }
+ }
+ // TODO: Generalize this code to support keeping values on the stack across
+ // basic block boundaries.
+ assert(Stack.empty() &&
+ "Register stack pushes and pops should be balanced");
+ }
+#endif
+
+ return Changed;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp
new file mode 100644
index 000000000000..4ca262e248f7
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp
@@ -0,0 +1,160 @@
+//===-- WebAssemblyRegisterInfo.cpp - WebAssembly Register Information ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the WebAssembly implementation of the
+/// TargetRegisterInfo class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyRegisterInfo.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssemblyFrameLowering.h"
+#include "WebAssemblyInstrInfo.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetFrameLowering.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetOptions.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-reg-info"
+
+#define GET_REGINFO_TARGET_DESC
+#include "WebAssemblyGenRegisterInfo.inc"
+
+WebAssemblyRegisterInfo::WebAssemblyRegisterInfo(const Triple &TT)
+ : WebAssemblyGenRegisterInfo(0), TT(TT) {}
+
+const MCPhysReg *
+WebAssemblyRegisterInfo::getCalleeSavedRegs(const MachineFunction *) const {
+ static const MCPhysReg CalleeSavedRegs[] = {0};
+ return CalleeSavedRegs;
+}
+
+BitVector
+WebAssemblyRegisterInfo::getReservedRegs(const MachineFunction & /*MF*/) const {
+ BitVector Reserved(getNumRegs());
+ for (auto Reg : {WebAssembly::SP32, WebAssembly::SP64, WebAssembly::FP32,
+ WebAssembly::FP64})
+ Reserved.set(Reg);
+ return Reserved;
+}
+
+bool WebAssemblyRegisterInfo::eliminateFrameIndex(
+ MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum,
+ RegScavenger * /*RS*/) const {
+ assert(SPAdj == 0);
+ MachineInstr &MI = *II;
+
+ MachineBasicBlock &MBB = *MI.getParent();
+ MachineFunction &MF = *MBB.getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ int64_t FrameOffset = MFI.getStackSize() + MFI.getObjectOffset(FrameIndex);
+
+ assert(MFI.getObjectSize(FrameIndex) != 0 &&
+ "We assume that variable-sized objects have already been lowered, "
+ "and don't use FrameIndex operands.");
+ Register FrameRegister = getFrameRegister(MF);
+
+ // If this is the address operand of a load or store, make it relative to SP
+ // and fold the frame offset directly in.
+ unsigned AddrOperandNum = WebAssembly::getNamedOperandIdx(
+ MI.getOpcode(), WebAssembly::OpName::addr);
+ if (AddrOperandNum == FIOperandNum) {
+ unsigned OffsetOperandNum = WebAssembly::getNamedOperandIdx(
+ MI.getOpcode(), WebAssembly::OpName::off);
+ assert(FrameOffset >= 0 && MI.getOperand(OffsetOperandNum).getImm() >= 0);
+ int64_t Offset = MI.getOperand(OffsetOperandNum).getImm() + FrameOffset;
+
+ if (static_cast<uint64_t>(Offset) <= std::numeric_limits<uint32_t>::max()) {
+ MI.getOperand(OffsetOperandNum).setImm(Offset);
+ MI.getOperand(FIOperandNum)
+ .ChangeToRegister(FrameRegister, /*isDef=*/false);
+ return false;
+ }
+ }
+
+ // If this is an address being added to a constant, fold the frame offset
+ // into the constant.
+ if (MI.getOpcode() == WebAssemblyFrameLowering::getOpcAdd(MF)) {
+ MachineOperand &OtherMO = MI.getOperand(3 - FIOperandNum);
+ if (OtherMO.isReg()) {
+ Register OtherMOReg = OtherMO.getReg();
+ if (OtherMOReg.isVirtual()) {
+ MachineInstr *Def = MF.getRegInfo().getUniqueVRegDef(OtherMOReg);
+ // TODO: For now we just opportunistically do this in the case where
+ // the CONST_I32/64 happens to have exactly one def and one use. We
+ // should generalize this to optimize in more cases.
+ if (Def && Def->getOpcode() ==
+ WebAssemblyFrameLowering::getOpcConst(MF) &&
+ MRI.hasOneNonDBGUse(Def->getOperand(0).getReg())) {
+ MachineOperand &ImmMO = Def->getOperand(1);
+ if (ImmMO.isImm()) {
+ ImmMO.setImm(ImmMO.getImm() + uint32_t(FrameOffset));
+ MI.getOperand(FIOperandNum)
+ .ChangeToRegister(FrameRegister, /*isDef=*/false);
+ return false;
+ }
+ }
+ }
+ }
+ }
+
+ // Otherwise create an i32/64.add SP, offset and make it the operand.
+ const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+
+ unsigned FIRegOperand = FrameRegister;
+ if (FrameOffset) {
+ // Create i32/64.add SP, offset and make it the operand.
+ const TargetRegisterClass *PtrRC =
+ MRI.getTargetRegisterInfo()->getPointerRegClass(MF);
+ Register OffsetOp = MRI.createVirtualRegister(PtrRC);
+ BuildMI(MBB, *II, II->getDebugLoc(),
+ TII->get(WebAssemblyFrameLowering::getOpcConst(MF)),
+ OffsetOp)
+ .addImm(FrameOffset);
+ FIRegOperand = MRI.createVirtualRegister(PtrRC);
+ BuildMI(MBB, *II, II->getDebugLoc(),
+ TII->get(WebAssemblyFrameLowering::getOpcAdd(MF)),
+ FIRegOperand)
+ .addReg(FrameRegister)
+ .addReg(OffsetOp);
+ }
+ MI.getOperand(FIOperandNum).ChangeToRegister(FIRegOperand, /*isDef=*/false);
+ return false;
+}
+
+Register
+WebAssemblyRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
+ // If the PReg has been replaced by a VReg, return that.
+ const auto &MFI = MF.getInfo<WebAssemblyFunctionInfo>();
+ if (MFI->isFrameBaseVirtual())
+ return MFI->getFrameBaseVreg();
+ static const unsigned Regs[2][2] = {
+ /* !isArch64Bit isArch64Bit */
+ /* !hasFP */ {WebAssembly::SP32, WebAssembly::SP64},
+ /* hasFP */ {WebAssembly::FP32, WebAssembly::FP64}};
+ const WebAssemblyFrameLowering *TFI = getFrameLowering(MF);
+ return Regs[TFI->hasFP(MF)][TT.isArch64Bit()];
+}
+
+const TargetRegisterClass *
+WebAssemblyRegisterInfo::getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind) const {
+ assert(Kind == 0 && "Only one kind of pointer on WebAssembly");
+ if (MF.getSubtarget<WebAssemblySubtarget>().hasAddr64())
+ return &WebAssembly::I64RegClass;
+ return &WebAssembly::I32RegClass;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.h
new file mode 100644
index 000000000000..d875e4b93603
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.h
@@ -0,0 +1,53 @@
+// WebAssemblyRegisterInfo.h - WebAssembly Register Information Impl -*- C++ -*-
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the WebAssembly implementation of the
+/// WebAssemblyRegisterInfo class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYREGISTERINFO_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYREGISTERINFO_H
+
+#define GET_REGINFO_HEADER
+#include "WebAssemblyGenRegisterInfo.inc"
+
+namespace llvm {
+
+class MachineFunction;
+class RegScavenger;
+class TargetRegisterClass;
+class Triple;
+
+class WebAssemblyRegisterInfo final : public WebAssemblyGenRegisterInfo {
+ const Triple &TT;
+
+public:
+ explicit WebAssemblyRegisterInfo(const Triple &TT);
+
+ // Code Generation virtual methods.
+ const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override;
+ BitVector getReservedRegs(const MachineFunction &MF) const override;
+ bool eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj,
+ unsigned FIOperandNum,
+ RegScavenger *RS = nullptr) const override;
+
+ // Debug information queries.
+ Register getFrameRegister(const MachineFunction &MF) const override;
+
+ const TargetRegisterClass *
+ getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind = 0) const override;
+ // This does not apply to wasm.
+ const uint32_t *getNoPreservedMask() const override { return nullptr; }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td
new file mode 100644
index 000000000000..ba2936b492a9
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td
@@ -0,0 +1,69 @@
+//WebAssemblyRegisterInfo.td-Describe the WebAssembly Registers -*- tablegen -*-
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file describes the WebAssembly register classes and some nominal
+/// physical registers.
+///
+//===----------------------------------------------------------------------===//
+
+class WebAssemblyReg<string n> : Register<n> {
+ let Namespace = "WebAssembly";
+}
+
+class WebAssemblyRegClass<list<ValueType> regTypes, int alignment, dag regList>
+ : RegisterClass<"WebAssembly", regTypes, alignment, regList>;
+
+//===----------------------------------------------------------------------===//
+// Registers
+//===----------------------------------------------------------------------===//
+
+// Special registers used as the frame and stack pointer.
+//
+// WebAssembly may someday supports mixed 32-bit and 64-bit heaps in the same
+// application, which requires separate width FP and SP.
+def FP32 : WebAssemblyReg<"%FP32">;
+def FP64 : WebAssemblyReg<"%FP64">;
+def SP32 : WebAssemblyReg<"%SP32">;
+def SP64 : WebAssemblyReg<"%SP64">;
+
+// The register allocation framework requires register classes have at least
+// one register, so we define a few for the integer / floating point register
+// classes since we otherwise don't need a physical register in those classes.
+// These are also used a "types" in the generated assembly matcher.
+def I32_0 : WebAssemblyReg<"%i32.0">;
+def I64_0 : WebAssemblyReg<"%i64.0">;
+def F32_0 : WebAssemblyReg<"%f32.0">;
+def F64_0 : WebAssemblyReg<"%f64.0">;
+
+def V128_0: WebAssemblyReg<"%v128">;
+
+def FUNCREF_0 : WebAssemblyReg<"%funcref.0">;
+def EXTERNREF_0 : WebAssemblyReg<"%externref.0">;
+
+// The value stack "register". This is an opaque entity which serves to order
+// uses and defs that must remain in LIFO order.
+def VALUE_STACK : WebAssemblyReg<"STACK">;
+
+// The incoming arguments "register". This is an opaque entity which serves to
+// order the ARGUMENT instructions that are emulating live-in registers and
+// must not be scheduled below other instructions.
+def ARGUMENTS : WebAssemblyReg<"ARGUMENTS">;
+
+//===----------------------------------------------------------------------===//
+// Register classes
+//===----------------------------------------------------------------------===//
+
+def I32 : WebAssemblyRegClass<[i32], 32, (add FP32, SP32, I32_0)>;
+def I64 : WebAssemblyRegClass<[i64], 64, (add FP64, SP64, I64_0)>;
+def F32 : WebAssemblyRegClass<[f32], 32, (add F32_0)>;
+def F64 : WebAssemblyRegClass<[f64], 64, (add F64_0)>;
+def V128 : WebAssemblyRegClass<[v4f32, v2f64, v2i64, v4i32, v16i8, v8i16], 128,
+ (add V128_0)>;
+def FUNCREF : WebAssemblyRegClass<[funcref], 0, (add FUNCREF_0)>;
+def EXTERNREF : WebAssemblyRegClass<[externref], 0, (add EXTERNREF_0)>;
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp
new file mode 100644
index 000000000000..1e2bee7a5c73
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp
@@ -0,0 +1,107 @@
+//===-- WebAssemblyReplacePhysRegs.cpp - Replace phys regs with virt regs -===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements a pass that replaces physical registers with
+/// virtual registers.
+///
+/// LLVM expects certain physical registers, such as a stack pointer. However,
+/// WebAssembly doesn't actually have such physical registers. This pass is run
+/// once LLVM no longer needs these registers, and replaces them with virtual
+/// registers, so they can participate in register stackifying and coloring in
+/// the normal way.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-replace-phys-regs"
+
+namespace {
+class WebAssemblyReplacePhysRegs final : public MachineFunctionPass {
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyReplacePhysRegs() : MachineFunctionPass(ID) {}
+
+private:
+ StringRef getPassName() const override {
+ return "WebAssembly Replace Physical Registers";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+} // end anonymous namespace
+
+char WebAssemblyReplacePhysRegs::ID = 0;
+INITIALIZE_PASS(WebAssemblyReplacePhysRegs, DEBUG_TYPE,
+ "Replace physical registers with virtual registers", false,
+ false)
+
+FunctionPass *llvm::createWebAssemblyReplacePhysRegs() {
+ return new WebAssemblyReplacePhysRegs();
+}
+
+bool WebAssemblyReplacePhysRegs::runOnMachineFunction(MachineFunction &MF) {
+ LLVM_DEBUG({
+ dbgs() << "********** Replace Physical Registers **********\n"
+ << "********** Function: " << MF.getName() << '\n';
+ });
+
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ auto &TRI = *MF.getSubtarget<WebAssemblySubtarget>().getRegisterInfo();
+ bool Changed = false;
+
+ assert(!mustPreserveAnalysisID(LiveIntervalsID) &&
+ "LiveIntervals shouldn't be active yet!");
+
+ for (unsigned PReg = WebAssembly::NoRegister + 1;
+ PReg < WebAssembly::NUM_TARGET_REGS; ++PReg) {
+ // Skip fake registers that are never used explicitly.
+ if (PReg == WebAssembly::VALUE_STACK || PReg == WebAssembly::ARGUMENTS)
+ continue;
+
+ // Replace explicit uses of the physical register with a virtual register.
+ const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(PReg);
+ unsigned VReg = WebAssembly::NoRegister;
+ for (MachineOperand &MO :
+ llvm::make_early_inc_range(MRI.reg_operands(PReg))) {
+ if (!MO.isImplicit()) {
+ if (VReg == WebAssembly::NoRegister) {
+ VReg = MRI.createVirtualRegister(RC);
+ if (PReg == TRI.getFrameRegister(MF)) {
+ auto FI = MF.getInfo<WebAssemblyFunctionInfo>();
+ assert(!FI->isFrameBaseVirtual());
+ FI->setFrameBaseVreg(VReg);
+ LLVM_DEBUG({
+ dbgs() << "replacing preg " << PReg << " with " << VReg << " ("
+ << Register::virtReg2Index(VReg) << ")\n";
+ });
+ }
+ }
+ MO.setReg(VReg);
+ Changed = true;
+ }
+ }
+ }
+
+ return Changed;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp
new file mode 100644
index 000000000000..3e2e029695ab
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp
@@ -0,0 +1,907 @@
+// CodeGen/RuntimeLibcallSignatures.cpp - R.T. Lib. Call Signatures -*- C++ -*--
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains signature information for runtime libcalls.
+///
+/// CodeGen uses external symbols, which it refers to by name. The WebAssembly
+/// target needs type information for all functions. This file contains a big
+/// table providing type signatures for all runtime library functions that LLVM
+/// uses.
+///
+/// This is currently a fairly heavy-handed solution.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyRuntimeLibcallSignatures.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/CodeGen/RuntimeLibcalls.h"
+
+using namespace llvm;
+
+namespace {
+
+enum RuntimeLibcallSignature {
+ func,
+ f32_func_f32,
+ f32_func_f64,
+ f32_func_i32,
+ f32_func_i64,
+ f32_func_i16,
+ f64_func_f32,
+ f64_func_f64,
+ f64_func_i32,
+ f64_func_i64,
+ i32_func_f32,
+ i32_func_f64,
+ i32_func_i32,
+ i64_func_f32,
+ i64_func_f64,
+ i64_func_i64,
+ f32_func_f32_f32,
+ f32_func_f32_i32,
+ f32_func_i64_i64,
+ f64_func_f64_f64,
+ f64_func_f64_i32,
+ f64_func_i64_i64,
+ i16_func_f32,
+ i16_func_f64,
+ i16_func_i64_i64,
+ i8_func_i8_i8,
+ func_f32_iPTR_iPTR,
+ func_f64_iPTR_iPTR,
+ i16_func_i16_i16,
+ i32_func_f32_f32,
+ i32_func_f64_f64,
+ i32_func_i32_i32,
+ i32_func_i32_i32_iPTR,
+ i64_func_i64_i64,
+ i64_func_i64_i64_iPTR,
+ i64_i64_func_i32,
+ i64_i64_func_i64,
+ i64_i64_func_f32,
+ i64_i64_func_f64,
+ i16_i16_func_i16_i16,
+ i32_i32_func_i32_i32,
+ i64_i64_func_i64_i64,
+ i64_i64_func_i64_i64_i64_i64,
+ i64_i64_func_i64_i64_i64_i64_iPTR,
+ i64_i64_i64_i64_func_i64_i64_i64_i64,
+ i64_i64_func_i64_i64_i32,
+ i64_i64_func_i64_i64_i64_i64_i64_i64,
+ iPTR_func_i32,
+ iPTR_func_iPTR_i32_iPTR,
+ iPTR_func_iPTR_iPTR_iPTR,
+ f32_func_f32_f32_f32,
+ f64_func_f64_f64_f64,
+ func_i64_i64_iPTR_iPTR,
+ i32_func_i64_i64,
+ i32_func_i64_i64_i64_i64,
+ iPTR_func_f32,
+ iPTR_func_f64,
+ iPTR_func_i64_i64,
+ unsupported
+};
+
+struct RuntimeLibcallSignatureTable {
+ std::vector<RuntimeLibcallSignature> Table;
+
+ // Any newly-added libcalls will be unsupported by default.
+ RuntimeLibcallSignatureTable() : Table(RTLIB::UNKNOWN_LIBCALL, unsupported) {
+ // Integer
+ Table[RTLIB::SHL_I16] = i16_func_i16_i16;
+ Table[RTLIB::SHL_I32] = i32_func_i32_i32;
+ Table[RTLIB::SHL_I64] = i64_func_i64_i64;
+ Table[RTLIB::SHL_I128] = i64_i64_func_i64_i64_i32;
+ Table[RTLIB::SRL_I16] = i16_func_i16_i16;
+ Table[RTLIB::SRL_I32] = i32_func_i32_i32;
+ Table[RTLIB::SRL_I64] = i64_func_i64_i64;
+ Table[RTLIB::SRL_I128] = i64_i64_func_i64_i64_i32;
+ Table[RTLIB::SRA_I16] = i16_func_i16_i16;
+ Table[RTLIB::SRA_I32] = i32_func_i32_i32;
+ Table[RTLIB::SRA_I64] = i64_func_i64_i64;
+ Table[RTLIB::SRA_I128] = i64_i64_func_i64_i64_i32;
+ Table[RTLIB::MUL_I8] = i8_func_i8_i8;
+ Table[RTLIB::MUL_I16] = i16_func_i16_i16;
+ Table[RTLIB::MUL_I32] = i32_func_i32_i32;
+ Table[RTLIB::MUL_I64] = i64_func_i64_i64;
+ Table[RTLIB::MUL_I128] = i64_i64_func_i64_i64_i64_i64;
+ Table[RTLIB::MULO_I32] = i32_func_i32_i32_iPTR;
+ Table[RTLIB::MULO_I64] = i64_func_i64_i64_iPTR;
+ Table[RTLIB::MULO_I128] = i64_i64_func_i64_i64_i64_i64_iPTR;
+ Table[RTLIB::SDIV_I8] = i8_func_i8_i8;
+ Table[RTLIB::SDIV_I16] = i16_func_i16_i16;
+ Table[RTLIB::SDIV_I32] = i32_func_i32_i32;
+ Table[RTLIB::SDIV_I64] = i64_func_i64_i64;
+ Table[RTLIB::SDIV_I128] = i64_i64_func_i64_i64_i64_i64;
+ Table[RTLIB::UDIV_I8] = i8_func_i8_i8;
+ Table[RTLIB::UDIV_I16] = i16_func_i16_i16;
+ Table[RTLIB::UDIV_I32] = i32_func_i32_i32;
+ Table[RTLIB::UDIV_I64] = i64_func_i64_i64;
+ Table[RTLIB::UDIV_I128] = i64_i64_func_i64_i64_i64_i64;
+ Table[RTLIB::SREM_I8] = i8_func_i8_i8;
+ Table[RTLIB::SREM_I16] = i16_func_i16_i16;
+ Table[RTLIB::SREM_I32] = i32_func_i32_i32;
+ Table[RTLIB::SREM_I64] = i64_func_i64_i64;
+ Table[RTLIB::SREM_I128] = i64_i64_func_i64_i64_i64_i64;
+ Table[RTLIB::UREM_I8] = i8_func_i8_i8;
+ Table[RTLIB::UREM_I16] = i16_func_i16_i16;
+ Table[RTLIB::UREM_I32] = i32_func_i32_i32;
+ Table[RTLIB::UREM_I64] = i64_func_i64_i64;
+ Table[RTLIB::UREM_I128] = i64_i64_func_i64_i64_i64_i64;
+ Table[RTLIB::SDIVREM_I8] = i8_func_i8_i8;
+ Table[RTLIB::SDIVREM_I16] = i16_i16_func_i16_i16;
+ Table[RTLIB::SDIVREM_I32] = i32_i32_func_i32_i32;
+ Table[RTLIB::SDIVREM_I64] = i64_func_i64_i64;
+ Table[RTLIB::SDIVREM_I128] = i64_i64_i64_i64_func_i64_i64_i64_i64;
+ Table[RTLIB::UDIVREM_I8] = i8_func_i8_i8;
+ Table[RTLIB::UDIVREM_I16] = i16_i16_func_i16_i16;
+ Table[RTLIB::UDIVREM_I32] = i32_i32_func_i32_i32;
+ Table[RTLIB::UDIVREM_I64] = i64_i64_func_i64_i64;
+ Table[RTLIB::UDIVREM_I128] = i64_i64_i64_i64_func_i64_i64_i64_i64;
+ Table[RTLIB::NEG_I32] = i32_func_i32;
+ Table[RTLIB::NEG_I64] = i64_func_i64;
+
+ // Floating-point.
+ // All F80 and PPCF128 routines are unsupported.
+ Table[RTLIB::ADD_F32] = f32_func_f32_f32;
+ Table[RTLIB::ADD_F64] = f64_func_f64_f64;
+ Table[RTLIB::ADD_F128] = i64_i64_func_i64_i64_i64_i64;
+ Table[RTLIB::SUB_F32] = f32_func_f32_f32;
+ Table[RTLIB::SUB_F64] = f64_func_f64_f64;
+ Table[RTLIB::SUB_F128] = i64_i64_func_i64_i64_i64_i64;
+ Table[RTLIB::MUL_F32] = f32_func_f32_f32;
+ Table[RTLIB::MUL_F64] = f64_func_f64_f64;
+ Table[RTLIB::MUL_F128] = i64_i64_func_i64_i64_i64_i64;
+ Table[RTLIB::DIV_F32] = f32_func_f32_f32;
+ Table[RTLIB::DIV_F64] = f64_func_f64_f64;
+ Table[RTLIB::DIV_F128] = i64_i64_func_i64_i64_i64_i64;
+ Table[RTLIB::REM_F32] = f32_func_f32_f32;
+ Table[RTLIB::REM_F64] = f64_func_f64_f64;
+ Table[RTLIB::REM_F128] = i64_i64_func_i64_i64_i64_i64;
+ Table[RTLIB::FMA_F32] = f32_func_f32_f32_f32;
+ Table[RTLIB::FMA_F64] = f64_func_f64_f64_f64;
+ Table[RTLIB::FMA_F128] = i64_i64_func_i64_i64_i64_i64_i64_i64;
+ Table[RTLIB::POWI_F32] = f32_func_f32_i32;
+ Table[RTLIB::POWI_F64] = f64_func_f64_i32;
+ Table[RTLIB::POWI_F128] = i64_i64_func_i64_i64_i32;
+ Table[RTLIB::SQRT_F32] = f32_func_f32;
+ Table[RTLIB::SQRT_F64] = f64_func_f64;
+ Table[RTLIB::SQRT_F128] = i64_i64_func_i64_i64;
+ Table[RTLIB::CBRT_F32] = f32_func_f32;
+ Table[RTLIB::CBRT_F64] = f64_func_f64;
+ Table[RTLIB::CBRT_F128] = i64_i64_func_i64_i64;
+ Table[RTLIB::LOG_F32] = f32_func_f32;
+ Table[RTLIB::LOG_F64] = f64_func_f64;
+ Table[RTLIB::LOG_F128] = i64_i64_func_i64_i64;
+ Table[RTLIB::LOG2_F32] = f32_func_f32;
+ Table[RTLIB::LOG2_F64] = f64_func_f64;
+ Table[RTLIB::LOG2_F128] = i64_i64_func_i64_i64;
+ Table[RTLIB::LOG10_F32] = f32_func_f32;
+ Table[RTLIB::LOG10_F64] = f64_func_f64;
+ Table[RTLIB::LOG10_F128] = i64_i64_func_i64_i64;
+ Table[RTLIB::EXP_F32] = f32_func_f32;
+ Table[RTLIB::EXP_F64] = f64_func_f64;
+ Table[RTLIB::EXP_F128] = i64_i64_func_i64_i64;
+ Table[RTLIB::EXP2_F32] = f32_func_f32;
+ Table[RTLIB::EXP2_F64] = f64_func_f64;
+ Table[RTLIB::EXP2_F128] = i64_i64_func_i64_i64;
+ Table[RTLIB::EXP10_F32] = f32_func_f32;
+ Table[RTLIB::EXP10_F64] = f64_func_f64;
+ Table[RTLIB::EXP10_F128] = i64_i64_func_i64_i64;
+ Table[RTLIB::SIN_F32] = f32_func_f32;
+ Table[RTLIB::SIN_F64] = f64_func_f64;
+ Table[RTLIB::SIN_F128] = i64_i64_func_i64_i64;
+ Table[RTLIB::COS_F32] = f32_func_f32;
+ Table[RTLIB::COS_F64] = f64_func_f64;
+ Table[RTLIB::COS_F128] = i64_i64_func_i64_i64;
+ Table[RTLIB::SINCOS_F32] = func_f32_iPTR_iPTR;
+ Table[RTLIB::SINCOS_F64] = func_f64_iPTR_iPTR;
+ Table[RTLIB::SINCOS_F128] = func_i64_i64_iPTR_iPTR;
+ Table[RTLIB::POW_F32] = f32_func_f32_f32;
+ Table[RTLIB::POW_F64] = f64_func_f64_f64;
+ Table[RTLIB::POW_F128] = i64_i64_func_i64_i64_i64_i64;
+ Table[RTLIB::CEIL_F32] = f32_func_f32;
+ Table[RTLIB::CEIL_F64] = f64_func_f64;
+ Table[RTLIB::CEIL_F128] = i64_i64_func_i64_i64;
+ Table[RTLIB::TRUNC_F32] = f32_func_f32;
+ Table[RTLIB::TRUNC_F64] = f64_func_f64;
+ Table[RTLIB::TRUNC_F128] = i64_i64_func_i64_i64;
+ Table[RTLIB::RINT_F32] = f32_func_f32;
+ Table[RTLIB::RINT_F64] = f64_func_f64;
+ Table[RTLIB::RINT_F128] = i64_i64_func_i64_i64;
+ Table[RTLIB::NEARBYINT_F32] = f32_func_f32;
+ Table[RTLIB::NEARBYINT_F64] = f64_func_f64;
+ Table[RTLIB::NEARBYINT_F128] = i64_i64_func_i64_i64;
+ Table[RTLIB::ROUND_F32] = f32_func_f32;
+ Table[RTLIB::ROUND_F64] = f64_func_f64;
+ Table[RTLIB::ROUND_F128] = i64_i64_func_i64_i64;
+ Table[RTLIB::ROUNDEVEN_F32] = f32_func_f32;
+ Table[RTLIB::ROUNDEVEN_F64] = f64_func_f64;
+ Table[RTLIB::ROUNDEVEN_F128] = i64_i64_func_i64_i64;
+ Table[RTLIB::LROUND_F32] = iPTR_func_f32;
+ Table[RTLIB::LROUND_F64] = iPTR_func_f64;
+ Table[RTLIB::LROUND_F128] = iPTR_func_i64_i64;
+ Table[RTLIB::LLROUND_F32] = i64_func_f32;
+ Table[RTLIB::LLROUND_F64] = i64_func_f64;
+ Table[RTLIB::LLROUND_F128] = i64_func_i64_i64;
+ Table[RTLIB::LRINT_F32] = iPTR_func_f32;
+ Table[RTLIB::LRINT_F64] = iPTR_func_f64;
+ Table[RTLIB::LRINT_F128] = iPTR_func_i64_i64;
+ Table[RTLIB::LLRINT_F32] = i64_func_f32;
+ Table[RTLIB::LLRINT_F64] = i64_func_f64;
+ Table[RTLIB::LLRINT_F128] = i64_func_i64_i64;
+ Table[RTLIB::FLOOR_F32] = f32_func_f32;
+ Table[RTLIB::FLOOR_F64] = f64_func_f64;
+ Table[RTLIB::FLOOR_F128] = i64_i64_func_i64_i64;
+ Table[RTLIB::COPYSIGN_F32] = f32_func_f32_f32;
+ Table[RTLIB::COPYSIGN_F64] = f64_func_f64_f64;
+ Table[RTLIB::COPYSIGN_F128] = i64_i64_func_i64_i64_i64_i64;
+ Table[RTLIB::FMIN_F32] = f32_func_f32_f32;
+ Table[RTLIB::FMIN_F64] = f64_func_f64_f64;
+ Table[RTLIB::FMIN_F128] = i64_i64_func_i64_i64_i64_i64;
+ Table[RTLIB::FMAX_F32] = f32_func_f32_f32;
+ Table[RTLIB::FMAX_F64] = f64_func_f64_f64;
+ Table[RTLIB::FMAX_F128] = i64_i64_func_i64_i64_i64_i64;
+ Table[RTLIB::LDEXP_F32] = f32_func_f32_i32;
+ Table[RTLIB::LDEXP_F64] = f64_func_f64_i32;
+ Table[RTLIB::LDEXP_F128] = i64_i64_func_i64_i64_i32;
+ Table[RTLIB::FREXP_F32] = f32_func_f32_i32;
+ Table[RTLIB::FREXP_F64] = f64_func_f64_i32;
+ Table[RTLIB::FREXP_F128] = i64_i64_func_i64_i64_i32;
+
+ // Conversion
+ // All F80 and PPCF128 routines are unsupported.
+ Table[RTLIB::FPEXT_F64_F128] = i64_i64_func_f64;
+ Table[RTLIB::FPEXT_F32_F128] = i64_i64_func_f32;
+ Table[RTLIB::FPEXT_F32_F64] = f64_func_f32;
+ Table[RTLIB::FPEXT_F16_F32] = f32_func_i16;
+ Table[RTLIB::FPROUND_F32_F16] = i16_func_f32;
+ Table[RTLIB::FPROUND_F64_F16] = i16_func_f64;
+ Table[RTLIB::FPROUND_F64_F32] = f32_func_f64;
+ Table[RTLIB::FPROUND_F128_F16] = i16_func_i64_i64;
+ Table[RTLIB::FPROUND_F128_F32] = f32_func_i64_i64;
+ Table[RTLIB::FPROUND_F128_F64] = f64_func_i64_i64;
+ Table[RTLIB::FPTOSINT_F32_I32] = i32_func_f32;
+ Table[RTLIB::FPTOSINT_F32_I64] = i64_func_f32;
+ Table[RTLIB::FPTOSINT_F32_I128] = i64_i64_func_f32;
+ Table[RTLIB::FPTOSINT_F64_I32] = i32_func_f64;
+ Table[RTLIB::FPTOSINT_F64_I64] = i64_func_f64;
+ Table[RTLIB::FPTOSINT_F64_I128] = i64_i64_func_f64;
+ Table[RTLIB::FPTOSINT_F128_I32] = i32_func_i64_i64;
+ Table[RTLIB::FPTOSINT_F128_I64] = i64_func_i64_i64;
+ Table[RTLIB::FPTOSINT_F128_I128] = i64_i64_func_i64_i64;
+ Table[RTLIB::FPTOUINT_F32_I32] = i32_func_f32;
+ Table[RTLIB::FPTOUINT_F32_I64] = i64_func_f32;
+ Table[RTLIB::FPTOUINT_F32_I128] = i64_i64_func_f32;
+ Table[RTLIB::FPTOUINT_F64_I32] = i32_func_f64;
+ Table[RTLIB::FPTOUINT_F64_I64] = i64_func_f64;
+ Table[RTLIB::FPTOUINT_F64_I128] = i64_i64_func_f64;
+ Table[RTLIB::FPTOUINT_F128_I32] = i32_func_i64_i64;
+ Table[RTLIB::FPTOUINT_F128_I64] = i64_func_i64_i64;
+ Table[RTLIB::FPTOUINT_F128_I128] = i64_i64_func_i64_i64;
+ Table[RTLIB::SINTTOFP_I32_F32] = f32_func_i32;
+ Table[RTLIB::SINTTOFP_I32_F64] = f64_func_i32;
+ Table[RTLIB::SINTTOFP_I32_F128] = i64_i64_func_i32;
+ Table[RTLIB::SINTTOFP_I64_F32] = f32_func_i64;
+ Table[RTLIB::SINTTOFP_I64_F64] = f64_func_i64;
+ Table[RTLIB::SINTTOFP_I64_F128] = i64_i64_func_i64;
+ Table[RTLIB::SINTTOFP_I128_F32] = f32_func_i64_i64;
+ Table[RTLIB::SINTTOFP_I128_F64] = f64_func_i64_i64;
+ Table[RTLIB::SINTTOFP_I128_F128] = i64_i64_func_i64_i64;
+ Table[RTLIB::UINTTOFP_I32_F32] = f32_func_i32;
+ Table[RTLIB::UINTTOFP_I32_F64] = f64_func_i64;
+ Table[RTLIB::UINTTOFP_I32_F128] = i64_i64_func_i32;
+ Table[RTLIB::UINTTOFP_I64_F32] = f32_func_i64;
+ Table[RTLIB::UINTTOFP_I64_F64] = f64_func_i64;
+ Table[RTLIB::UINTTOFP_I64_F128] = i64_i64_func_i64;
+ Table[RTLIB::UINTTOFP_I128_F32] = f32_func_i64_i64;
+ Table[RTLIB::UINTTOFP_I128_F64] = f64_func_i64_i64;
+ Table[RTLIB::UINTTOFP_I128_F128] = i64_i64_func_i64_i64;
+
+ // Comparison
+ // ALl F80 and PPCF128 routines are unsupported.
+ Table[RTLIB::OEQ_F32] = i32_func_f32_f32;
+ Table[RTLIB::OEQ_F64] = i32_func_f64_f64;
+ Table[RTLIB::OEQ_F128] = i32_func_i64_i64_i64_i64;
+ Table[RTLIB::UNE_F32] = i32_func_f32_f32;
+ Table[RTLIB::UNE_F64] = i32_func_f64_f64;
+ Table[RTLIB::UNE_F128] = i32_func_i64_i64_i64_i64;
+ Table[RTLIB::OGE_F32] = i32_func_f32_f32;
+ Table[RTLIB::OGE_F64] = i32_func_f64_f64;
+ Table[RTLIB::OGE_F128] = i32_func_i64_i64_i64_i64;
+ Table[RTLIB::OLT_F32] = i32_func_f32_f32;
+ Table[RTLIB::OLT_F64] = i32_func_f64_f64;
+ Table[RTLIB::OLT_F128] = i32_func_i64_i64_i64_i64;
+ Table[RTLIB::OLE_F32] = i32_func_f32_f32;
+ Table[RTLIB::OLE_F64] = i32_func_f64_f64;
+ Table[RTLIB::OLE_F128] = i32_func_i64_i64_i64_i64;
+ Table[RTLIB::OGT_F32] = i32_func_f32_f32;
+ Table[RTLIB::OGT_F64] = i32_func_f64_f64;
+ Table[RTLIB::OGT_F128] = i32_func_i64_i64_i64_i64;
+ Table[RTLIB::UO_F32] = i32_func_f32_f32;
+ Table[RTLIB::UO_F64] = i32_func_f64_f64;
+ Table[RTLIB::UO_F128] = i32_func_i64_i64_i64_i64;
+
+ // Memory
+ Table[RTLIB::MEMCPY] = iPTR_func_iPTR_iPTR_iPTR;
+ Table[RTLIB::MEMSET] = iPTR_func_iPTR_i32_iPTR;
+ Table[RTLIB::MEMMOVE] = iPTR_func_iPTR_iPTR_iPTR;
+
+ // __stack_chk_fail
+ Table[RTLIB::STACKPROTECTOR_CHECK_FAIL] = func;
+
+ // Return address handling
+ Table[RTLIB::RETURN_ADDRESS] = iPTR_func_i32;
+
+ // Element-wise Atomic memory
+ // TODO: Fix these when we implement atomic support
+ Table[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_1] = unsupported;
+ Table[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_2] = unsupported;
+ Table[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_4] = unsupported;
+ Table[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_8] = unsupported;
+ Table[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_16] = unsupported;
+ Table[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1] = unsupported;
+ Table[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2] = unsupported;
+ Table[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4] = unsupported;
+ Table[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8] = unsupported;
+ Table[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16] = unsupported;
+
+ Table[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_1] = unsupported;
+ Table[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_2] = unsupported;
+ Table[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_4] = unsupported;
+ Table[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_8] = unsupported;
+ Table[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_16] = unsupported;
+
+ // Atomic '__sync_*' libcalls.
+ // TODO: Fix these when we implement atomic support
+ Table[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1] = unsupported;
+ Table[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2] = unsupported;
+ Table[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4] = unsupported;
+ Table[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8] = unsupported;
+ Table[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_16] = unsupported;
+ Table[RTLIB::SYNC_LOCK_TEST_AND_SET_1] = unsupported;
+ Table[RTLIB::SYNC_LOCK_TEST_AND_SET_2] = unsupported;
+ Table[RTLIB::SYNC_LOCK_TEST_AND_SET_4] = unsupported;
+ Table[RTLIB::SYNC_LOCK_TEST_AND_SET_8] = unsupported;
+ Table[RTLIB::SYNC_LOCK_TEST_AND_SET_16] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_ADD_1] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_ADD_2] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_ADD_4] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_ADD_8] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_ADD_16] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_SUB_1] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_SUB_2] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_SUB_4] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_SUB_8] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_SUB_16] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_AND_1] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_AND_2] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_AND_4] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_AND_8] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_AND_16] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_OR_1] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_OR_2] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_OR_4] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_OR_8] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_OR_16] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_XOR_1] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_XOR_2] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_XOR_4] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_XOR_8] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_XOR_16] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_NAND_1] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_NAND_2] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_NAND_4] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_NAND_8] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_NAND_16] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_MAX_1] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_MAX_2] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_MAX_4] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_MAX_8] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_MAX_16] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_UMAX_1] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_UMAX_2] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_UMAX_4] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_UMAX_8] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_UMAX_16] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_MIN_1] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_MIN_2] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_MIN_4] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_MIN_8] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_MIN_16] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_UMIN_1] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_UMIN_2] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_UMIN_4] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_UMIN_8] = unsupported;
+ Table[RTLIB::SYNC_FETCH_AND_UMIN_16] = unsupported;
+
+ // Atomic '__atomic_*' libcalls.
+ // TODO: Fix these when we implement atomic support
+ Table[RTLIB::ATOMIC_LOAD] = unsupported;
+ Table[RTLIB::ATOMIC_LOAD_1] = unsupported;
+ Table[RTLIB::ATOMIC_LOAD_2] = unsupported;
+ Table[RTLIB::ATOMIC_LOAD_4] = unsupported;
+ Table[RTLIB::ATOMIC_LOAD_8] = unsupported;
+ Table[RTLIB::ATOMIC_LOAD_16] = unsupported;
+
+ Table[RTLIB::ATOMIC_STORE] = unsupported;
+ Table[RTLIB::ATOMIC_STORE_1] = unsupported;
+ Table[RTLIB::ATOMIC_STORE_2] = unsupported;
+ Table[RTLIB::ATOMIC_STORE_4] = unsupported;
+ Table[RTLIB::ATOMIC_STORE_8] = unsupported;
+ Table[RTLIB::ATOMIC_STORE_16] = unsupported;
+
+ Table[RTLIB::ATOMIC_EXCHANGE] = unsupported;
+ Table[RTLIB::ATOMIC_EXCHANGE_1] = unsupported;
+ Table[RTLIB::ATOMIC_EXCHANGE_2] = unsupported;
+ Table[RTLIB::ATOMIC_EXCHANGE_4] = unsupported;
+ Table[RTLIB::ATOMIC_EXCHANGE_8] = unsupported;
+ Table[RTLIB::ATOMIC_EXCHANGE_16] = unsupported;
+
+ Table[RTLIB::ATOMIC_COMPARE_EXCHANGE] = unsupported;
+ Table[RTLIB::ATOMIC_COMPARE_EXCHANGE_1] = unsupported;
+ Table[RTLIB::ATOMIC_COMPARE_EXCHANGE_2] = unsupported;
+ Table[RTLIB::ATOMIC_COMPARE_EXCHANGE_4] = unsupported;
+ Table[RTLIB::ATOMIC_COMPARE_EXCHANGE_8] = unsupported;
+ Table[RTLIB::ATOMIC_COMPARE_EXCHANGE_16] = unsupported;
+
+ Table[RTLIB::ATOMIC_FETCH_ADD_1] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_ADD_2] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_ADD_4] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_ADD_8] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_ADD_16] = unsupported;
+
+ Table[RTLIB::ATOMIC_FETCH_SUB_1] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_SUB_2] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_SUB_4] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_SUB_8] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_SUB_16] = unsupported;
+
+ Table[RTLIB::ATOMIC_FETCH_AND_1] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_AND_2] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_AND_4] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_AND_8] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_AND_16] = unsupported;
+
+ Table[RTLIB::ATOMIC_FETCH_OR_1] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_OR_2] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_OR_4] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_OR_8] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_OR_16] = unsupported;
+
+ Table[RTLIB::ATOMIC_FETCH_XOR_1] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_XOR_2] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_XOR_4] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_XOR_8] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_XOR_16] = unsupported;
+
+ Table[RTLIB::ATOMIC_FETCH_NAND_1] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_NAND_2] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_NAND_4] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_NAND_8] = unsupported;
+ Table[RTLIB::ATOMIC_FETCH_NAND_16] = unsupported;
+ }
+};
+
+RuntimeLibcallSignatureTable &getRuntimeLibcallSignatures() {
+ static RuntimeLibcallSignatureTable RuntimeLibcallSignatures;
+ return RuntimeLibcallSignatures;
+}
+
+// Maps libcall names to their RTLIB::Libcall number. Builds the map in a
+// constructor for use with a static variable
+struct StaticLibcallNameMap {
+ StringMap<RTLIB::Libcall> Map;
+ StaticLibcallNameMap() {
+ static const std::pair<const char *, RTLIB::Libcall> NameLibcalls[] = {
+#define HANDLE_LIBCALL(code, name) {(const char *)name, RTLIB::code},
+#include "llvm/IR/RuntimeLibcalls.def"
+#undef HANDLE_LIBCALL
+ };
+ for (const auto &NameLibcall : NameLibcalls) {
+ if (NameLibcall.first != nullptr &&
+ getRuntimeLibcallSignatures().Table[NameLibcall.second] !=
+ unsupported) {
+ assert(!Map.contains(NameLibcall.first) &&
+ "duplicate libcall names in name map");
+ Map[NameLibcall.first] = NameLibcall.second;
+ }
+ }
+ // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
+ // consistent with the f64 and f128 names.
+ Map["__extendhfsf2"] = RTLIB::FPEXT_F16_F32;
+ Map["__truncsfhf2"] = RTLIB::FPROUND_F32_F16;
+
+ Map["emscripten_return_address"] = RTLIB::RETURN_ADDRESS;
+ }
+};
+
+} // end anonymous namespace
+
+void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
+ RTLIB::Libcall LC,
+ SmallVectorImpl<wasm::ValType> &Rets,
+ SmallVectorImpl<wasm::ValType> &Params) {
+ assert(Rets.empty());
+ assert(Params.empty());
+
+ wasm::ValType PtrTy =
+ Subtarget.hasAddr64() ? wasm::ValType::I64 : wasm::ValType::I32;
+
+ auto &Table = getRuntimeLibcallSignatures().Table;
+ switch (Table[LC]) {
+ case func:
+ break;
+ case f32_func_f32:
+ Rets.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::F32);
+ break;
+ case f32_func_f64:
+ Rets.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::F64);
+ break;
+ case f32_func_i32:
+ Rets.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case f32_func_i64:
+ Rets.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case f32_func_i16:
+ Rets.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case f64_func_f32:
+ Rets.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::F32);
+ break;
+ case f64_func_f64:
+ Rets.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::F64);
+ break;
+ case f64_func_i32:
+ Rets.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case f64_func_i64:
+ Rets.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case i32_func_f32:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::F32);
+ break;
+ case i32_func_f64:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::F64);
+ break;
+ case i32_func_i32:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case i64_func_f32:
+ Rets.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::F32);
+ break;
+ case i64_func_f64:
+ Rets.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::F64);
+ break;
+ case i64_func_i64:
+ Rets.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case f32_func_f32_f32:
+ Rets.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::F32);
+ break;
+ case f32_func_f32_i32:
+ Rets.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case f32_func_i64_i64:
+ Rets.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case f64_func_f64_f64:
+ Rets.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::F64);
+ break;
+ case f64_func_f64_i32:
+ Rets.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case f64_func_i64_i64:
+ Rets.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case i16_func_f32:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::F32);
+ break;
+ case i16_func_f64:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::F64);
+ break;
+ case i16_func_i64_i64:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case i8_func_i8_i8:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case func_f32_iPTR_iPTR:
+ Params.push_back(wasm::ValType::F32);
+ Params.push_back(PtrTy);
+ Params.push_back(PtrTy);
+ break;
+ case func_f64_iPTR_iPTR:
+ Params.push_back(wasm::ValType::F64);
+ Params.push_back(PtrTy);
+ Params.push_back(PtrTy);
+ break;
+ case i16_func_i16_i16:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case i32_func_f32_f32:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::F32);
+ break;
+ case i32_func_f64_f64:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::F64);
+ break;
+ case i32_func_i32_i32:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case i32_func_i32_i32_iPTR:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I32);
+ Params.push_back(PtrTy);
+ break;
+ case i64_func_i64_i64:
+ Rets.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case i64_func_i64_i64_iPTR:
+ Rets.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(PtrTy);
+ break;
+ case i64_i64_func_f32:
+ if (Subtarget.hasMultivalue()) {
+ Rets.push_back(wasm::ValType::I64);
+ Rets.push_back(wasm::ValType::I64);
+ } else {
+ Params.push_back(PtrTy);
+ }
+ Params.push_back(wasm::ValType::F32);
+ break;
+ case i64_i64_func_f64:
+ if (Subtarget.hasMultivalue()) {
+ Rets.push_back(wasm::ValType::I64);
+ Rets.push_back(wasm::ValType::I64);
+ } else {
+ Params.push_back(PtrTy);
+ }
+ Params.push_back(wasm::ValType::F64);
+ break;
+ case i16_i16_func_i16_i16:
+ if (Subtarget.hasMultivalue()) {
+ Rets.push_back(wasm::ValType::I32);
+ Rets.push_back(wasm::ValType::I32);
+ } else {
+ Params.push_back(PtrTy);
+ }
+ Params.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case i32_i32_func_i32_i32:
+ if (Subtarget.hasMultivalue()) {
+ Rets.push_back(wasm::ValType::I32);
+ Rets.push_back(wasm::ValType::I32);
+ } else {
+ Params.push_back(PtrTy);
+ }
+ Params.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case i64_i64_func_i64_i64:
+ if (Subtarget.hasMultivalue()) {
+ Rets.push_back(wasm::ValType::I64);
+ Rets.push_back(wasm::ValType::I64);
+ } else {
+ Params.push_back(PtrTy);
+ }
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case i64_i64_func_i64_i64_i64_i64:
+ if (Subtarget.hasMultivalue()) {
+ Rets.push_back(wasm::ValType::I64);
+ Rets.push_back(wasm::ValType::I64);
+ } else {
+ Params.push_back(PtrTy);
+ }
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case i64_i64_func_i64_i64_i64_i64_iPTR:
+ if (Subtarget.hasMultivalue()) {
+ Rets.push_back(wasm::ValType::I64);
+ Rets.push_back(wasm::ValType::I64);
+ } else {
+ Params.push_back(PtrTy);
+ }
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(PtrTy);
+ break;
+ case i64_i64_i64_i64_func_i64_i64_i64_i64:
+ if (Subtarget.hasMultivalue()) {
+ Rets.push_back(wasm::ValType::I64);
+ Rets.push_back(wasm::ValType::I64);
+ Rets.push_back(wasm::ValType::I64);
+ Rets.push_back(wasm::ValType::I64);
+ } else {
+ Params.push_back(PtrTy);
+ }
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case i64_i64_func_i64_i64_i32:
+ if (Subtarget.hasMultivalue()) {
+ Rets.push_back(wasm::ValType::I64);
+ Rets.push_back(wasm::ValType::I64);
+ } else {
+ Params.push_back(PtrTy);
+ }
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case iPTR_func_i32:
+ Rets.push_back(PtrTy);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case iPTR_func_iPTR_i32_iPTR:
+ Rets.push_back(PtrTy);
+ Params.push_back(PtrTy);
+ Params.push_back(wasm::ValType::I32);
+ Params.push_back(PtrTy);
+ break;
+ case iPTR_func_iPTR_iPTR_iPTR:
+ Rets.push_back(PtrTy);
+ Params.push_back(PtrTy);
+ Params.push_back(PtrTy);
+ Params.push_back(PtrTy);
+ break;
+ case f32_func_f32_f32_f32:
+ Rets.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::F32);
+ break;
+ case f64_func_f64_f64_f64:
+ Rets.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::F64);
+ break;
+ case func_i64_i64_iPTR_iPTR:
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(PtrTy);
+ Params.push_back(PtrTy);
+ break;
+ case i32_func_i64_i64:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case i32_func_i64_i64_i64_i64:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case iPTR_func_f32:
+ Rets.push_back(PtrTy);
+ Params.push_back(wasm::ValType::F32);
+ break;
+ case iPTR_func_f64:
+ Rets.push_back(PtrTy);
+ Params.push_back(wasm::ValType::F64);
+ break;
+ case iPTR_func_i64_i64:
+ Rets.push_back(PtrTy);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case i64_i64_func_i64_i64_i64_i64_i64_i64:
+ if (Subtarget.hasMultivalue()) {
+ Rets.push_back(wasm::ValType::I64);
+ Rets.push_back(wasm::ValType::I64);
+ } else {
+ Params.push_back(PtrTy);
+ }
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case i64_i64_func_i32:
+ if (Subtarget.hasMultivalue()) {
+ Rets.push_back(wasm::ValType::I64);
+ Rets.push_back(wasm::ValType::I64);
+ } else {
+ Params.push_back(PtrTy);
+ }
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case i64_i64_func_i64:
+ if (Subtarget.hasMultivalue()) {
+ Rets.push_back(wasm::ValType::I64);
+ Rets.push_back(wasm::ValType::I64);
+ } else {
+ Params.push_back(PtrTy);
+ }
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case unsupported:
+ llvm_unreachable("unsupported runtime library signature");
+ }
+}
+
+// TODO: If the RTLIB::Libcall-taking flavor of GetSignature remains unused
+// other than here, just roll its logic into this version.
+void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
+ StringRef Name,
+ SmallVectorImpl<wasm::ValType> &Rets,
+ SmallVectorImpl<wasm::ValType> &Params) {
+ static StaticLibcallNameMap LibcallNameMap;
+ auto &Map = LibcallNameMap.Map;
+ auto Val = Map.find(Name);
+#ifndef NDEBUG
+ if (Val == Map.end()) {
+ auto Message =
+ std::string("unexpected runtime library name: ") + std::string(Name);
+ llvm_unreachable(Message.c_str());
+ }
+#endif
+ return getLibcallSignature(Subtarget, Val->second, Rets, Params);
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h
new file mode 100644
index 000000000000..f7a94aa20bd4
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h
@@ -0,0 +1,37 @@
+// CodeGen/RuntimeLibcallSignatures.h - R.T. Lib. Call Signatures -*- C++ -*--//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file provides signature information for runtime libcalls.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_RUNTIME_LIBCALL_SIGNATURES_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_RUNTIME_LIBCALL_SIGNATURES_H
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/RuntimeLibcalls.h"
+
+namespace llvm {
+
+class WebAssemblySubtarget;
+
+extern void getLibcallSignature(const WebAssemblySubtarget &Subtarget,
+ RTLIB::Libcall LC,
+ SmallVectorImpl<wasm::ValType> &Rets,
+ SmallVectorImpl<wasm::ValType> &Params);
+
+extern void getLibcallSignature(const WebAssemblySubtarget &Subtarget,
+ StringRef Name,
+ SmallVectorImpl<wasm::ValType> &Rets,
+ SmallVectorImpl<wasm::ValType> &Params);
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp
new file mode 100644
index 000000000000..74af4c8873f7
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp
@@ -0,0 +1,59 @@
+//===-- WebAssemblySelectionDAGInfo.cpp - WebAssembly SelectionDAG Info ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the WebAssemblySelectionDAGInfo class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyTargetMachine.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-selectiondag-info"
+
+WebAssemblySelectionDAGInfo::~WebAssemblySelectionDAGInfo() = default; // anchor
+
+SDValue WebAssemblySelectionDAGInfo::EmitTargetCodeForMemcpy(
+ SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dst, SDValue Src,
+ SDValue Size, Align Alignment, bool IsVolatile, bool AlwaysInline,
+ MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
+ auto &ST = DAG.getMachineFunction().getSubtarget<WebAssemblySubtarget>();
+ if (!ST.hasBulkMemory())
+ return SDValue();
+
+ SDValue MemIdx = DAG.getConstant(0, DL, MVT::i32);
+ auto LenMVT = ST.hasAddr64() ? MVT::i64 : MVT::i32;
+ return DAG.getNode(WebAssemblyISD::MEMORY_COPY, DL, MVT::Other,
+ {Chain, MemIdx, MemIdx, Dst, Src,
+ DAG.getZExtOrTrunc(Size, DL, LenMVT)});
+}
+
+SDValue WebAssemblySelectionDAGInfo::EmitTargetCodeForMemmove(
+ SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Op1, SDValue Op2,
+ SDValue Op3, Align Alignment, bool IsVolatile,
+ MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
+ return EmitTargetCodeForMemcpy(DAG, DL, Chain, Op1, Op2, Op3,
+ Alignment, IsVolatile, false,
+ DstPtrInfo, SrcPtrInfo);
+}
+
+SDValue WebAssemblySelectionDAGInfo::EmitTargetCodeForMemset(
+ SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dst, SDValue Val,
+ SDValue Size, Align Alignment, bool IsVolatile, bool AlwaysInline,
+ MachinePointerInfo DstPtrInfo) const {
+ auto &ST = DAG.getMachineFunction().getSubtarget<WebAssemblySubtarget>();
+ if (!ST.hasBulkMemory())
+ return SDValue();
+
+ SDValue MemIdx = DAG.getConstant(0, DL, MVT::i32);
+ auto LenMVT = ST.hasAddr64() ? MVT::i64 : MVT::i32;
+ // Only low byte matters for val argument, so anyext the i8
+ return DAG.getNode(WebAssemblyISD::MEMORY_FILL, DL, MVT::Other, Chain, MemIdx,
+ Dst, DAG.getAnyExtOrTrunc(Val, DL, MVT::i32),
+ DAG.getZExtOrTrunc(Size, DL, LenMVT));
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h
new file mode 100644
index 000000000000..fd517b238715
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h
@@ -0,0 +1,46 @@
+//=- WebAssemblySelectionDAGInfo.h - WebAssembly SelectionDAG Info -*- C++ -*-//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the WebAssembly subclass for
+/// SelectionDAGTargetInfo.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYSELECTIONDAGINFO_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYSELECTIONDAGINFO_H
+
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
+
+namespace llvm {
+
+class WebAssemblySelectionDAGInfo final : public SelectionDAGTargetInfo {
+public:
+ ~WebAssemblySelectionDAGInfo() override;
+ SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
+ SDValue Chain, SDValue Op1, SDValue Op2,
+ SDValue Op3, Align Alignment, bool isVolatile,
+ bool AlwaysInline,
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) const override;
+ SDValue
+ EmitTargetCodeForMemmove(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain,
+ SDValue Op1, SDValue Op2, SDValue Op3,
+ Align Alignment, bool isVolatile,
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) const override;
+ SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, const SDLoc &DL,
+ SDValue Chain, SDValue Op1, SDValue Op2,
+ SDValue Op3, Align Alignment, bool IsVolatile,
+ bool AlwaysInline,
+ MachinePointerInfo DstPtrInfo) const override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp
new file mode 100644
index 000000000000..a453e7388e27
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp
@@ -0,0 +1,97 @@
+//=- WebAssemblySetP2AlignOperands.cpp - Set alignments on loads and stores -=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file sets the p2align operands on load and store instructions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyInstrInfo.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-set-p2align-operands"
+
+namespace {
+class WebAssemblySetP2AlignOperands final : public MachineFunctionPass {
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblySetP2AlignOperands() : MachineFunctionPass(ID) {}
+
+ StringRef getPassName() const override {
+ return "WebAssembly Set p2align Operands";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addPreserved<MachineBlockFrequencyInfo>();
+ AU.addPreservedID(MachineDominatorsID);
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+} // end anonymous namespace
+
+char WebAssemblySetP2AlignOperands::ID = 0;
+INITIALIZE_PASS(WebAssemblySetP2AlignOperands, DEBUG_TYPE,
+ "Set the p2align operands for WebAssembly loads and stores",
+ false, false)
+
+FunctionPass *llvm::createWebAssemblySetP2AlignOperands() {
+ return new WebAssemblySetP2AlignOperands();
+}
+
+static void rewriteP2Align(MachineInstr &MI, unsigned OperandNo) {
+ assert(MI.getOperand(OperandNo).getImm() == 0 &&
+ "ISel should set p2align operands to 0");
+ assert(MI.hasOneMemOperand() &&
+ "Load and store instructions have exactly one mem operand");
+ assert((*MI.memoperands_begin())->getSize() ==
+ (UINT64_C(1) << WebAssembly::GetDefaultP2Align(MI.getOpcode())) &&
+ "Default p2align value should be natural");
+ assert(MI.getDesc().operands()[OperandNo].OperandType ==
+ WebAssembly::OPERAND_P2ALIGN &&
+ "Load and store instructions should have a p2align operand");
+ uint64_t P2Align = Log2((*MI.memoperands_begin())->getAlign());
+
+ // WebAssembly does not currently support supernatural alignment.
+ P2Align = std::min(P2Align,
+ uint64_t(WebAssembly::GetDefaultP2Align(MI.getOpcode())));
+
+ MI.getOperand(OperandNo).setImm(P2Align);
+}
+
+bool WebAssemblySetP2AlignOperands::runOnMachineFunction(MachineFunction &MF) {
+ LLVM_DEBUG({
+ dbgs() << "********** Set p2align Operands **********\n"
+ << "********** Function: " << MF.getName() << '\n';
+ });
+
+ bool Changed = false;
+
+ for (auto &MBB : MF) {
+ for (auto &MI : MBB) {
+ int16_t P2AlignOpNum = WebAssembly::getNamedOperandIdx(
+ MI.getOpcode(), WebAssembly::OpName::p2align);
+ if (P2AlignOpNum != -1) {
+ rewriteP2Align(MI, P2AlignOpNum);
+ Changed = true;
+ }
+ }
+ }
+
+ return Changed;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySortRegion.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySortRegion.cpp
new file mode 100644
index 000000000000..cd84e68aed14
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySortRegion.cpp
@@ -0,0 +1,78 @@
+#include "WebAssemblySortRegion.h"
+#include "WebAssemblyExceptionInfo.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+
+using namespace llvm;
+using namespace WebAssembly;
+
+namespace llvm {
+namespace WebAssembly {
+template <>
+bool ConcreteSortRegion<MachineLoop>::isLoop() const {
+ return true;
+}
+} // end namespace WebAssembly
+} // end namespace llvm
+
+const SortRegion *SortRegionInfo::getRegionFor(const MachineBasicBlock *MBB) {
+ const auto *ML = MLI.getLoopFor(MBB);
+ const auto *WE = WEI.getExceptionFor(MBB);
+ if (!ML && !WE)
+ return nullptr;
+ // We determine subregion relationship by domination of their headers, i.e.,
+ // if region A's header dominates region B's header, B is a subregion of A.
+ // WebAssemblyException contains BBs in all its subregions (loops or
+ // exceptions), but MachineLoop may not, because MachineLoop does not
+ // contain BBs that don't have a path to its header even if they are
+ // dominated by its header. So here we should use
+ // WE->contains(ML->getHeader()), but not ML->contains(WE->getHeader()).
+ if ((ML && !WE) || (ML && WE && WE->contains(ML->getHeader()))) {
+ // If the smallest region containing MBB is a loop
+ if (LoopMap.count(ML))
+ return LoopMap[ML].get();
+ LoopMap[ML] = std::make_unique<ConcreteSortRegion<MachineLoop>>(ML);
+ return LoopMap[ML].get();
+ } else {
+ // If the smallest region containing MBB is an exception
+ if (ExceptionMap.count(WE))
+ return ExceptionMap[WE].get();
+ ExceptionMap[WE] =
+ std::make_unique<ConcreteSortRegion<WebAssemblyException>>(WE);
+ return ExceptionMap[WE].get();
+ }
+}
+
+MachineBasicBlock *SortRegionInfo::getBottom(const SortRegion *R) {
+ if (R->isLoop())
+ return getBottom(MLI.getLoopFor(R->getHeader()));
+ else
+ return getBottom(WEI.getExceptionFor(R->getHeader()));
+}
+
+MachineBasicBlock *SortRegionInfo::getBottom(const MachineLoop *ML) {
+ MachineBasicBlock *Bottom = ML->getHeader();
+ for (MachineBasicBlock *MBB : ML->blocks()) {
+ if (MBB->getNumber() > Bottom->getNumber())
+ Bottom = MBB;
+ // MachineLoop does not contain all BBs dominated by its header. BBs that
+ // don't have a path back to the loop header aren't included. But for the
+ // purpose of CFG sorting and stackification, we need a bottom BB among all
+ // BBs that are dominated by the loop header. So we check if there is any
+ // WebAssemblyException contained in this loop, and computes the most bottom
+ // BB of them all.
+ if (MBB->isEHPad()) {
+ MachineBasicBlock *ExBottom = getBottom(WEI.getExceptionFor(MBB));
+ if (ExBottom->getNumber() > Bottom->getNumber())
+ Bottom = ExBottom;
+ }
+ }
+ return Bottom;
+}
+
+MachineBasicBlock *SortRegionInfo::getBottom(const WebAssemblyException *WE) {
+ MachineBasicBlock *Bottom = WE->getHeader();
+ for (MachineBasicBlock *MBB : WE->blocks())
+ if (MBB->getNumber() > Bottom->getNumber())
+ Bottom = MBB;
+ return Bottom;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySortRegion.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySortRegion.h
new file mode 100644
index 000000000000..e92bf1764185
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySortRegion.h
@@ -0,0 +1,91 @@
+//===-- WebAssemblySortRegion.h - WebAssembly Sort SortRegion ----*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements regions used in CFGSort and CFGStackify.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYSORTREGION_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYSORTREGION_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/iterator_range.h"
+
+namespace llvm {
+
+class MachineBasicBlock;
+class MachineLoop;
+class MachineLoopInfo;
+class WebAssemblyException;
+class WebAssemblyExceptionInfo;
+
+namespace WebAssembly {
+
+// Wrapper for loops and exceptions
+class SortRegion {
+public:
+ virtual ~SortRegion() = default;
+ virtual MachineBasicBlock *getHeader() const = 0;
+ virtual bool contains(const MachineBasicBlock *MBB) const = 0;
+ virtual unsigned getNumBlocks() const = 0;
+ using block_iterator = typename ArrayRef<MachineBasicBlock *>::const_iterator;
+ virtual iterator_range<block_iterator> blocks() const = 0;
+ virtual bool isLoop() const = 0;
+};
+
+template <typename T> class ConcreteSortRegion : public SortRegion {
+ const T *Unit;
+
+public:
+ ConcreteSortRegion(const T *Unit) : Unit(Unit) {}
+ MachineBasicBlock *getHeader() const override { return Unit->getHeader(); }
+ bool contains(const MachineBasicBlock *MBB) const override {
+ return Unit->contains(MBB);
+ }
+ unsigned getNumBlocks() const override { return Unit->getNumBlocks(); }
+ iterator_range<block_iterator> blocks() const override {
+ return Unit->blocks();
+ }
+ bool isLoop() const override { return false; }
+};
+
+// This class has information of nested SortRegions; this is analogous to what
+// LoopInfo is for loops.
+class SortRegionInfo {
+ friend class ConcreteSortRegion<MachineLoopInfo>;
+ friend class ConcreteSortRegion<WebAssemblyException>;
+
+ const MachineLoopInfo &MLI;
+ const WebAssemblyExceptionInfo &WEI;
+ DenseMap<const MachineLoop *, std::unique_ptr<SortRegion>> LoopMap;
+ DenseMap<const WebAssemblyException *, std::unique_ptr<SortRegion>>
+ ExceptionMap;
+
+public:
+ SortRegionInfo(const MachineLoopInfo &MLI,
+ const WebAssemblyExceptionInfo &WEI)
+ : MLI(MLI), WEI(WEI) {}
+
+ // Returns a smallest loop or exception that contains MBB
+ const SortRegion *getRegionFor(const MachineBasicBlock *MBB);
+
+ // Return the "bottom" block among all blocks dominated by the region
+ // (MachineLoop or WebAssemblyException) header. This works when the entity is
+ // discontiguous.
+ MachineBasicBlock *getBottom(const SortRegion *R);
+ MachineBasicBlock *getBottom(const MachineLoop *ML);
+ MachineBasicBlock *getBottom(const WebAssemblyException *WE);
+};
+
+} // end namespace WebAssembly
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
new file mode 100644
index 000000000000..912f61765579
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
@@ -0,0 +1,60 @@
+//===-- WebAssemblySubtarget.cpp - WebAssembly Subtarget Information ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the WebAssembly-specific subclass of
+/// TargetSubtarget.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblySubtarget.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssemblyInstrInfo.h"
+#include "llvm/MC/TargetRegistry.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-subtarget"
+
+#define GET_SUBTARGETINFO_CTOR
+#define GET_SUBTARGETINFO_TARGET_DESC
+#include "WebAssemblyGenSubtargetInfo.inc"
+
+WebAssemblySubtarget &
+WebAssemblySubtarget::initializeSubtargetDependencies(StringRef CPU,
+ StringRef FS) {
+ // Determine default and user-specified characteristics
+ LLVM_DEBUG(llvm::dbgs() << "initializeSubtargetDependencies\n");
+
+ if (CPU.empty())
+ CPU = "generic";
+
+ ParseSubtargetFeatures(CPU, /*TuneCPU*/ CPU, FS);
+ return *this;
+}
+
+WebAssemblySubtarget::WebAssemblySubtarget(const Triple &TT,
+ const std::string &CPU,
+ const std::string &FS,
+ const TargetMachine &TM)
+ : WebAssemblyGenSubtargetInfo(TT, CPU, /*TuneCPU*/ CPU, FS),
+ TargetTriple(TT), InstrInfo(initializeSubtargetDependencies(CPU, FS)),
+ TLInfo(TM, *this) {}
+
+bool WebAssemblySubtarget::enableAtomicExpand() const {
+ // If atomics are disabled, atomic ops are lowered instead of expanded
+ return hasAtomics();
+}
+
+bool WebAssemblySubtarget::enableMachineScheduler() const {
+ // Disable the MachineScheduler for now. Even with ShouldTrackPressure set and
+ // enableMachineSchedDefaultSched overridden, it appears to have an overall
+ // negative effect for the kinds of register optimizations we're doing.
+ return false;
+}
+
+bool WebAssemblySubtarget::useAA() const { return true; }
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h
new file mode 100644
index 000000000000..85d02b087c78
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h
@@ -0,0 +1,114 @@
+//=- WebAssemblySubtarget.h - Define Subtarget for the WebAssembly -*- C++ -*-//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file declares the WebAssembly-specific subclass of
+/// TargetSubtarget.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYSUBTARGET_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYSUBTARGET_H
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssemblyFrameLowering.h"
+#include "WebAssemblyISelLowering.h"
+#include "WebAssemblyInstrInfo.h"
+#include "WebAssemblySelectionDAGInfo.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include <string>
+
+#define GET_SUBTARGETINFO_HEADER
+#include "WebAssemblyGenSubtargetInfo.inc"
+
+namespace llvm {
+
+// Defined in WebAssemblyGenSubtargetInfo.inc.
+extern const SubtargetFeatureKV
+ WebAssemblyFeatureKV[WebAssembly::NumSubtargetFeatures];
+
+class WebAssemblySubtarget final : public WebAssemblyGenSubtargetInfo {
+ enum SIMDEnum {
+ NoSIMD,
+ SIMD128,
+ RelaxedSIMD,
+ } SIMDLevel = NoSIMD;
+
+ bool HasAtomics = false;
+ bool HasNontrappingFPToInt = false;
+ bool HasSignExt = false;
+ bool HasExceptionHandling = false;
+ bool HasBulkMemory = false;
+ bool HasMultivalue = false;
+ bool HasMutableGlobals = false;
+ bool HasTailCall = false;
+ bool HasReferenceTypes = false;
+ bool HasExtendedConst = false;
+ bool HasMultiMemory = false;
+
+ /// What processor and OS we're targeting.
+ Triple TargetTriple;
+
+ WebAssemblyFrameLowering FrameLowering;
+ WebAssemblyInstrInfo InstrInfo;
+ WebAssemblySelectionDAGInfo TSInfo;
+ WebAssemblyTargetLowering TLInfo;
+
+ WebAssemblySubtarget &initializeSubtargetDependencies(StringRef CPU,
+ StringRef FS);
+
+public:
+ /// This constructor initializes the data members to match that
+ /// of the specified triple.
+ WebAssemblySubtarget(const Triple &TT, const std::string &CPU,
+ const std::string &FS, const TargetMachine &TM);
+
+ const WebAssemblySelectionDAGInfo *getSelectionDAGInfo() const override {
+ return &TSInfo;
+ }
+ const WebAssemblyFrameLowering *getFrameLowering() const override {
+ return &FrameLowering;
+ }
+ const WebAssemblyTargetLowering *getTargetLowering() const override {
+ return &TLInfo;
+ }
+ const WebAssemblyInstrInfo *getInstrInfo() const override {
+ return &InstrInfo;
+ }
+ const WebAssemblyRegisterInfo *getRegisterInfo() const override {
+ return &getInstrInfo()->getRegisterInfo();
+ }
+ const Triple &getTargetTriple() const { return TargetTriple; }
+ bool enableAtomicExpand() const override;
+ bool enableIndirectBrExpand() const override { return true; }
+ bool enableMachineScheduler() const override;
+ bool useAA() const override;
+
+ // Predicates used by WebAssemblyInstrInfo.td.
+ bool hasAddr64() const { return TargetTriple.isArch64Bit(); }
+ bool hasSIMD128() const { return SIMDLevel >= SIMD128; }
+ bool hasRelaxedSIMD() const { return SIMDLevel >= RelaxedSIMD; }
+ bool hasAtomics() const { return HasAtomics; }
+ bool hasNontrappingFPToInt() const { return HasNontrappingFPToInt; }
+ bool hasSignExt() const { return HasSignExt; }
+ bool hasExceptionHandling() const { return HasExceptionHandling; }
+ bool hasBulkMemory() const { return HasBulkMemory; }
+ bool hasMultivalue() const { return HasMultivalue; }
+ bool hasMutableGlobals() const { return HasMutableGlobals; }
+ bool hasTailCall() const { return HasTailCall; }
+ bool hasReferenceTypes() const { return HasReferenceTypes; }
+ bool hasMultiMemory() const { return HasMultiMemory; }
+
+ /// Parses features string setting specified subtarget options. Definition of
+ /// function is auto generated by tblgen.
+ void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
new file mode 100644
index 000000000000..2db1b6493cc4
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
@@ -0,0 +1,634 @@
+//===- WebAssemblyTargetMachine.cpp - Define TargetMachine for WebAssembly -==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the WebAssembly-specific subclass of TargetMachine.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyTargetMachine.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "TargetInfo/WebAssemblyTargetInfo.h"
+#include "WebAssembly.h"
+#include "WebAssemblyISelLowering.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblyTargetObjectFile.h"
+#include "WebAssemblyTargetTransformInfo.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/CodeGen/MIRParser/MIParser.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/RegAllocRegistry.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/IR/Function.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Scalar/LowerAtomicPass.h"
+#include "llvm/Transforms/Utils.h"
+#include <optional>
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm"
+
+// A command-line option to keep implicit locals
+// for the purpose of testing with lit/llc ONLY.
+// This produces output which is not valid WebAssembly, and is not supported
+// by assemblers/disassemblers and other MC based tools.
+static cl::opt<bool> WasmDisableExplicitLocals(
+ "wasm-disable-explicit-locals", cl::Hidden,
+ cl::desc("WebAssembly: output implicit locals in"
+ " instruction output for test purposes only."),
+ cl::init(false));
+
+static cl::opt<bool> WasmDisableFixIrreducibleControlFlowPass(
+ "wasm-disable-fix-irreducible-control-flow-pass", cl::Hidden,
+ cl::desc("webassembly: disables the fix "
+ " irreducible control flow optimization pass"),
+ cl::init(false));
+
+extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeWebAssemblyTarget() {
+ // Register the target.
+ RegisterTargetMachine<WebAssemblyTargetMachine> X(
+ getTheWebAssemblyTarget32());
+ RegisterTargetMachine<WebAssemblyTargetMachine> Y(
+ getTheWebAssemblyTarget64());
+
+ // Register backend passes
+ auto &PR = *PassRegistry::getPassRegistry();
+ initializeWebAssemblyAddMissingPrototypesPass(PR);
+ initializeWebAssemblyLowerEmscriptenEHSjLjPass(PR);
+ initializeLowerGlobalDtorsLegacyPassPass(PR);
+ initializeFixFunctionBitcastsPass(PR);
+ initializeOptimizeReturnedPass(PR);
+ initializeWebAssemblyArgumentMovePass(PR);
+ initializeWebAssemblySetP2AlignOperandsPass(PR);
+ initializeWebAssemblyReplacePhysRegsPass(PR);
+ initializeWebAssemblyOptimizeLiveIntervalsPass(PR);
+ initializeWebAssemblyMemIntrinsicResultsPass(PR);
+ initializeWebAssemblyRegStackifyPass(PR);
+ initializeWebAssemblyRegColoringPass(PR);
+ initializeWebAssemblyNullifyDebugValueListsPass(PR);
+ initializeWebAssemblyFixIrreducibleControlFlowPass(PR);
+ initializeWebAssemblyLateEHPreparePass(PR);
+ initializeWebAssemblyExceptionInfoPass(PR);
+ initializeWebAssemblyCFGSortPass(PR);
+ initializeWebAssemblyCFGStackifyPass(PR);
+ initializeWebAssemblyExplicitLocalsPass(PR);
+ initializeWebAssemblyLowerBrUnlessPass(PR);
+ initializeWebAssemblyRegNumberingPass(PR);
+ initializeWebAssemblyDebugFixupPass(PR);
+ initializeWebAssemblyPeepholePass(PR);
+ initializeWebAssemblyMCLowerPrePassPass(PR);
+ initializeWebAssemblyLowerRefTypesIntPtrConvPass(PR);
+ initializeWebAssemblyFixBrTableDefaultsPass(PR);
+ initializeWebAssemblyDAGToDAGISelPass(PR);
+}
+
+//===----------------------------------------------------------------------===//
+// WebAssembly Lowering public interface.
+//===----------------------------------------------------------------------===//
+
+static Reloc::Model getEffectiveRelocModel(std::optional<Reloc::Model> RM,
+ const Triple &TT) {
+ if (!RM) {
+ // Default to static relocation model. This should always be more optimial
+ // than PIC since the static linker can determine all global addresses and
+ // assume direct function calls.
+ return Reloc::Static;
+ }
+
+ return *RM;
+}
+
+/// Create an WebAssembly architecture model.
+///
+WebAssemblyTargetMachine::WebAssemblyTargetMachine(
+ const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
+ const TargetOptions &Options, std::optional<Reloc::Model> RM,
+ std::optional<CodeModel::Model> CM, CodeGenOptLevel OL, bool JIT)
+ : LLVMTargetMachine(
+ T,
+ TT.isArch64Bit()
+ ? (TT.isOSEmscripten() ? "e-m:e-p:64:64-p10:8:8-p20:8:8-i64:64-"
+ "f128:64-n32:64-S128-ni:1:10:20"
+ : "e-m:e-p:64:64-p10:8:8-p20:8:8-i64:64-"
+ "n32:64-S128-ni:1:10:20")
+ : (TT.isOSEmscripten() ? "e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-"
+ "f128:64-n32:64-S128-ni:1:10:20"
+ : "e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-"
+ "n32:64-S128-ni:1:10:20"),
+ TT, CPU, FS, Options, getEffectiveRelocModel(RM, TT),
+ getEffectiveCodeModel(CM, CodeModel::Large), OL),
+ TLOF(new WebAssemblyTargetObjectFile()) {
+ // WebAssembly type-checks instructions, but a noreturn function with a return
+ // type that doesn't match the context will cause a check failure. So we lower
+ // LLVM 'unreachable' to ISD::TRAP and then lower that to WebAssembly's
+ // 'unreachable' instructions which is meant for that case.
+ this->Options.TrapUnreachable = true;
+ this->Options.NoTrapAfterNoreturn = false;
+
+ // WebAssembly treats each function as an independent unit. Force
+ // -ffunction-sections, effectively, so that we can emit them independently.
+ this->Options.FunctionSections = true;
+ this->Options.DataSections = true;
+ this->Options.UniqueSectionNames = true;
+
+ initAsmInfo();
+
+ // Note that we don't use setRequiresStructuredCFG(true). It disables
+ // optimizations than we're ok with, and want, such as critical edge
+ // splitting and tail merging.
+}
+
+WebAssemblyTargetMachine::~WebAssemblyTargetMachine() = default; // anchor.
+
+const WebAssemblySubtarget *WebAssemblyTargetMachine::getSubtargetImpl() const {
+ return getSubtargetImpl(std::string(getTargetCPU()),
+ std::string(getTargetFeatureString()));
+}
+
+const WebAssemblySubtarget *
+WebAssemblyTargetMachine::getSubtargetImpl(std::string CPU,
+ std::string FS) const {
+ auto &I = SubtargetMap[CPU + FS];
+ if (!I) {
+ I = std::make_unique<WebAssemblySubtarget>(TargetTriple, CPU, FS, *this);
+ }
+ return I.get();
+}
+
+const WebAssemblySubtarget *
+WebAssemblyTargetMachine::getSubtargetImpl(const Function &F) const {
+ Attribute CPUAttr = F.getFnAttribute("target-cpu");
+ Attribute FSAttr = F.getFnAttribute("target-features");
+
+ std::string CPU =
+ CPUAttr.isValid() ? CPUAttr.getValueAsString().str() : TargetCPU;
+ std::string FS =
+ FSAttr.isValid() ? FSAttr.getValueAsString().str() : TargetFS;
+
+ // This needs to be done before we create a new subtarget since any
+ // creation will depend on the TM and the code generation flags on the
+ // function that reside in TargetOptions.
+ resetTargetOptions(F);
+
+ return getSubtargetImpl(CPU, FS);
+}
+
+namespace {
+
+class CoalesceFeaturesAndStripAtomics final : public ModulePass {
+ // Take the union of all features used in the module and use it for each
+ // function individually, since having multiple feature sets in one module
+ // currently does not make sense for WebAssembly. If atomics are not enabled,
+ // also strip atomic operations and thread local storage.
+ static char ID;
+ WebAssemblyTargetMachine *WasmTM;
+
+public:
+ CoalesceFeaturesAndStripAtomics(WebAssemblyTargetMachine *WasmTM)
+ : ModulePass(ID), WasmTM(WasmTM) {}
+
+ bool runOnModule(Module &M) override {
+ FeatureBitset Features = coalesceFeatures(M);
+
+ std::string FeatureStr = getFeatureString(Features);
+ WasmTM->setTargetFeatureString(FeatureStr);
+ for (auto &F : M)
+ replaceFeatures(F, FeatureStr);
+
+ bool StrippedAtomics = false;
+ bool StrippedTLS = false;
+
+ if (!Features[WebAssembly::FeatureAtomics]) {
+ StrippedAtomics = stripAtomics(M);
+ StrippedTLS = stripThreadLocals(M);
+ } else if (!Features[WebAssembly::FeatureBulkMemory]) {
+ StrippedTLS |= stripThreadLocals(M);
+ }
+
+ if (StrippedAtomics && !StrippedTLS)
+ stripThreadLocals(M);
+ else if (StrippedTLS && !StrippedAtomics)
+ stripAtomics(M);
+
+ recordFeatures(M, Features, StrippedAtomics || StrippedTLS);
+
+ // Conservatively assume we have made some change
+ return true;
+ }
+
+private:
+ FeatureBitset coalesceFeatures(const Module &M) {
+ FeatureBitset Features =
+ WasmTM
+ ->getSubtargetImpl(std::string(WasmTM->getTargetCPU()),
+ std::string(WasmTM->getTargetFeatureString()))
+ ->getFeatureBits();
+ for (auto &F : M)
+ Features |= WasmTM->getSubtargetImpl(F)->getFeatureBits();
+ return Features;
+ }
+
+ std::string getFeatureString(const FeatureBitset &Features) {
+ std::string Ret;
+ for (const SubtargetFeatureKV &KV : WebAssemblyFeatureKV) {
+ if (Features[KV.Value])
+ Ret += (StringRef("+") + KV.Key + ",").str();
+ }
+ return Ret;
+ }
+
+ void replaceFeatures(Function &F, const std::string &Features) {
+ F.removeFnAttr("target-features");
+ F.removeFnAttr("target-cpu");
+ F.addFnAttr("target-features", Features);
+ }
+
+ bool stripAtomics(Module &M) {
+ // Detect whether any atomics will be lowered, since there is no way to tell
+ // whether the LowerAtomic pass lowers e.g. stores.
+ bool Stripped = false;
+ for (auto &F : M) {
+ for (auto &B : F) {
+ for (auto &I : B) {
+ if (I.isAtomic()) {
+ Stripped = true;
+ goto done;
+ }
+ }
+ }
+ }
+
+ done:
+ if (!Stripped)
+ return false;
+
+ LowerAtomicPass Lowerer;
+ FunctionAnalysisManager FAM;
+ for (auto &F : M)
+ Lowerer.run(F, FAM);
+
+ return true;
+ }
+
+ bool stripThreadLocals(Module &M) {
+ bool Stripped = false;
+ for (auto &GV : M.globals()) {
+ if (GV.isThreadLocal()) {
+ Stripped = true;
+ GV.setThreadLocal(false);
+ }
+ }
+ return Stripped;
+ }
+
+ void recordFeatures(Module &M, const FeatureBitset &Features, bool Stripped) {
+ for (const SubtargetFeatureKV &KV : WebAssemblyFeatureKV) {
+ if (Features[KV.Value]) {
+ // Mark features as used
+ std::string MDKey = (StringRef("wasm-feature-") + KV.Key).str();
+ M.addModuleFlag(Module::ModFlagBehavior::Error, MDKey,
+ wasm::WASM_FEATURE_PREFIX_USED);
+ }
+ }
+ // Code compiled without atomics or bulk-memory may have had its atomics or
+ // thread-local data lowered to nonatomic operations or non-thread-local
+ // data. In that case, we mark the pseudo-feature "shared-mem" as disallowed
+ // to tell the linker that it would be unsafe to allow this code ot be used
+ // in a module with shared memory.
+ if (Stripped) {
+ M.addModuleFlag(Module::ModFlagBehavior::Error, "wasm-feature-shared-mem",
+ wasm::WASM_FEATURE_PREFIX_DISALLOWED);
+ }
+ }
+};
+char CoalesceFeaturesAndStripAtomics::ID = 0;
+
+/// WebAssembly Code Generator Pass Configuration Options.
+class WebAssemblyPassConfig final : public TargetPassConfig {
+public:
+ WebAssemblyPassConfig(WebAssemblyTargetMachine &TM, PassManagerBase &PM)
+ : TargetPassConfig(TM, PM) {}
+
+ WebAssemblyTargetMachine &getWebAssemblyTargetMachine() const {
+ return getTM<WebAssemblyTargetMachine>();
+ }
+
+ FunctionPass *createTargetRegisterAllocator(bool) override;
+
+ void addIRPasses() override;
+ void addISelPrepare() override;
+ bool addInstSelector() override;
+ void addOptimizedRegAlloc() override;
+ void addPostRegAlloc() override;
+ bool addGCPasses() override { return false; }
+ void addPreEmitPass() override;
+ bool addPreISel() override;
+
+ // No reg alloc
+ bool addRegAssignAndRewriteFast() override { return false; }
+
+ // No reg alloc
+ bool addRegAssignAndRewriteOptimized() override { return false; }
+};
+} // end anonymous namespace
+
+MachineFunctionInfo *WebAssemblyTargetMachine::createMachineFunctionInfo(
+ BumpPtrAllocator &Allocator, const Function &F,
+ const TargetSubtargetInfo *STI) const {
+ return WebAssemblyFunctionInfo::create<WebAssemblyFunctionInfo>(Allocator, F,
+ STI);
+}
+
+TargetTransformInfo
+WebAssemblyTargetMachine::getTargetTransformInfo(const Function &F) const {
+ return TargetTransformInfo(WebAssemblyTTIImpl(this, F));
+}
+
+TargetPassConfig *
+WebAssemblyTargetMachine::createPassConfig(PassManagerBase &PM) {
+ return new WebAssemblyPassConfig(*this, PM);
+}
+
+FunctionPass *WebAssemblyPassConfig::createTargetRegisterAllocator(bool) {
+ return nullptr; // No reg alloc
+}
+
+using WebAssembly::WasmEnableEH;
+using WebAssembly::WasmEnableEmEH;
+using WebAssembly::WasmEnableEmSjLj;
+using WebAssembly::WasmEnableSjLj;
+
+static void basicCheckForEHAndSjLj(TargetMachine *TM) {
+ // Before checking, we make sure TargetOptions.ExceptionModel is the same as
+ // MCAsmInfo.ExceptionsType. Normally these have to be the same, because clang
+ // stores the exception model info in LangOptions, which is later transferred
+ // to TargetOptions and MCAsmInfo. But when clang compiles bitcode directly,
+ // clang's LangOptions is not used and thus the exception model info is not
+ // correctly transferred to TargetOptions and MCAsmInfo, so we make sure we
+ // have the correct exception model in WebAssemblyMCAsmInfo constructor.
+ // But in this case TargetOptions is still not updated, so we make sure they
+ // are the same.
+ TM->Options.ExceptionModel = TM->getMCAsmInfo()->getExceptionHandlingType();
+
+ // Basic Correctness checking related to -exception-model
+ if (TM->Options.ExceptionModel != ExceptionHandling::None &&
+ TM->Options.ExceptionModel != ExceptionHandling::Wasm)
+ report_fatal_error("-exception-model should be either 'none' or 'wasm'");
+ if (WasmEnableEmEH && TM->Options.ExceptionModel == ExceptionHandling::Wasm)
+ report_fatal_error("-exception-model=wasm not allowed with "
+ "-enable-emscripten-cxx-exceptions");
+ if (WasmEnableEH && TM->Options.ExceptionModel != ExceptionHandling::Wasm)
+ report_fatal_error(
+ "-wasm-enable-eh only allowed with -exception-model=wasm");
+ if (WasmEnableSjLj && TM->Options.ExceptionModel != ExceptionHandling::Wasm)
+ report_fatal_error(
+ "-wasm-enable-sjlj only allowed with -exception-model=wasm");
+ if ((!WasmEnableEH && !WasmEnableSjLj) &&
+ TM->Options.ExceptionModel == ExceptionHandling::Wasm)
+ report_fatal_error(
+ "-exception-model=wasm only allowed with at least one of "
+ "-wasm-enable-eh or -wasm-enable-sjj");
+
+ // You can't enable two modes of EH at the same time
+ if (WasmEnableEmEH && WasmEnableEH)
+ report_fatal_error(
+ "-enable-emscripten-cxx-exceptions not allowed with -wasm-enable-eh");
+ // You can't enable two modes of SjLj at the same time
+ if (WasmEnableEmSjLj && WasmEnableSjLj)
+ report_fatal_error(
+ "-enable-emscripten-sjlj not allowed with -wasm-enable-sjlj");
+ // You can't mix Emscripten EH with Wasm SjLj.
+ if (WasmEnableEmEH && WasmEnableSjLj)
+ report_fatal_error(
+ "-enable-emscripten-cxx-exceptions not allowed with -wasm-enable-sjlj");
+ // Currently it is allowed to mix Wasm EH with Emscripten SjLj as an interim
+ // measure, but some code will error out at compile time in this combination.
+ // See WebAssemblyLowerEmscriptenEHSjLj pass for details.
+}
+
+//===----------------------------------------------------------------------===//
+// The following functions are called from lib/CodeGen/Passes.cpp to modify
+// the CodeGen pass sequence.
+//===----------------------------------------------------------------------===//
+
+void WebAssemblyPassConfig::addIRPasses() {
+ // Add signatures to prototype-less function declarations
+ addPass(createWebAssemblyAddMissingPrototypes());
+
+ // Lower .llvm.global_dtors into .llvm.global_ctors with __cxa_atexit calls.
+ addPass(createLowerGlobalDtorsLegacyPass());
+
+ // Fix function bitcasts, as WebAssembly requires caller and callee signatures
+ // to match.
+ addPass(createWebAssemblyFixFunctionBitcasts());
+
+ // Optimize "returned" function attributes.
+ if (getOptLevel() != CodeGenOptLevel::None)
+ addPass(createWebAssemblyOptimizeReturned());
+
+ basicCheckForEHAndSjLj(TM);
+
+ // If exception handling is not enabled and setjmp/longjmp handling is
+ // enabled, we lower invokes into calls and delete unreachable landingpad
+ // blocks. Lowering invokes when there is no EH support is done in
+ // TargetPassConfig::addPassesToHandleExceptions, but that runs after these IR
+ // passes and Emscripten SjLj handling expects all invokes to be lowered
+ // before.
+ if (!WasmEnableEmEH && !WasmEnableEH) {
+ addPass(createLowerInvokePass());
+ // The lower invoke pass may create unreachable code. Remove it in order not
+ // to process dead blocks in setjmp/longjmp handling.
+ addPass(createUnreachableBlockEliminationPass());
+ }
+
+ // Handle exceptions and setjmp/longjmp if enabled. Unlike Wasm EH preparation
+ // done in WasmEHPrepare pass, Wasm SjLj preparation shares libraries and
+ // transformation algorithms with Emscripten SjLj, so we run
+ // LowerEmscriptenEHSjLj pass also when Wasm SjLj is enabled.
+ if (WasmEnableEmEH || WasmEnableEmSjLj || WasmEnableSjLj)
+ addPass(createWebAssemblyLowerEmscriptenEHSjLj());
+
+ // Expand indirectbr instructions to switches.
+ addPass(createIndirectBrExpandPass());
+
+ TargetPassConfig::addIRPasses();
+}
+
+void WebAssemblyPassConfig::addISelPrepare() {
+ WebAssemblyTargetMachine *WasmTM =
+ static_cast<WebAssemblyTargetMachine *>(TM);
+ const WebAssemblySubtarget *Subtarget =
+ WasmTM->getSubtargetImpl(std::string(WasmTM->getTargetCPU()),
+ std::string(WasmTM->getTargetFeatureString()));
+ if (Subtarget->hasReferenceTypes()) {
+ // We need to remove allocas for reference types
+ addPass(createPromoteMemoryToRegisterPass(true));
+ }
+ // Lower atomics and TLS if necessary
+ addPass(new CoalesceFeaturesAndStripAtomics(&getWebAssemblyTargetMachine()));
+
+ // This is a no-op if atomics are not used in the module
+ addPass(createAtomicExpandPass());
+
+ TargetPassConfig::addISelPrepare();
+}
+
+bool WebAssemblyPassConfig::addInstSelector() {
+ (void)TargetPassConfig::addInstSelector();
+ addPass(
+ createWebAssemblyISelDag(getWebAssemblyTargetMachine(), getOptLevel()));
+ // Run the argument-move pass immediately after the ScheduleDAG scheduler
+ // so that we can fix up the ARGUMENT instructions before anything else
+ // sees them in the wrong place.
+ addPass(createWebAssemblyArgumentMove());
+ // Set the p2align operands. This information is present during ISel, however
+ // it's inconvenient to collect. Collect it now, and update the immediate
+ // operands.
+ addPass(createWebAssemblySetP2AlignOperands());
+
+ // Eliminate range checks and add default targets to br_table instructions.
+ addPass(createWebAssemblyFixBrTableDefaults());
+
+ return false;
+}
+
+void WebAssemblyPassConfig::addOptimizedRegAlloc() {
+ // Currently RegisterCoalesce degrades wasm debug info quality by a
+ // significant margin. As a quick fix, disable this for -O1, which is often
+ // used for debugging large applications. Disabling this increases code size
+ // of Emscripten core benchmarks by ~5%, which is acceptable for -O1, which is
+ // usually not used for production builds.
+ // TODO Investigate why RegisterCoalesce degrades debug info quality and fix
+ // it properly
+ if (getOptLevel() == CodeGenOptLevel::Less)
+ disablePass(&RegisterCoalescerID);
+ TargetPassConfig::addOptimizedRegAlloc();
+}
+
+void WebAssemblyPassConfig::addPostRegAlloc() {
+ // TODO: The following CodeGen passes don't currently support code containing
+ // virtual registers. Consider removing their restrictions and re-enabling
+ // them.
+
+ // These functions all require the NoVRegs property.
+ disablePass(&MachineLateInstrsCleanupID);
+ disablePass(&MachineCopyPropagationID);
+ disablePass(&PostRAMachineSinkingID);
+ disablePass(&PostRASchedulerID);
+ disablePass(&FuncletLayoutID);
+ disablePass(&StackMapLivenessID);
+ disablePass(&PatchableFunctionID);
+ disablePass(&ShrinkWrapID);
+
+ // This pass hurts code size for wasm because it can generate irreducible
+ // control flow.
+ disablePass(&MachineBlockPlacementID);
+
+ TargetPassConfig::addPostRegAlloc();
+}
+
+void WebAssemblyPassConfig::addPreEmitPass() {
+ TargetPassConfig::addPreEmitPass();
+
+ // Nullify DBG_VALUE_LISTs that we cannot handle.
+ addPass(createWebAssemblyNullifyDebugValueLists());
+
+ // Eliminate multiple-entry loops.
+ if (!WasmDisableFixIrreducibleControlFlowPass)
+ addPass(createWebAssemblyFixIrreducibleControlFlow());
+
+ // Do various transformations for exception handling.
+ // Every CFG-changing optimizations should come before this.
+ if (TM->Options.ExceptionModel == ExceptionHandling::Wasm)
+ addPass(createWebAssemblyLateEHPrepare());
+
+ // Now that we have a prologue and epilogue and all frame indices are
+ // rewritten, eliminate SP and FP. This allows them to be stackified,
+ // colored, and numbered with the rest of the registers.
+ addPass(createWebAssemblyReplacePhysRegs());
+
+ // Preparations and optimizations related to register stackification.
+ if (getOptLevel() != CodeGenOptLevel::None) {
+ // Depend on LiveIntervals and perform some optimizations on it.
+ addPass(createWebAssemblyOptimizeLiveIntervals());
+
+ // Prepare memory intrinsic calls for register stackifying.
+ addPass(createWebAssemblyMemIntrinsicResults());
+
+ // Mark registers as representing wasm's value stack. This is a key
+ // code-compression technique in WebAssembly. We run this pass (and
+ // MemIntrinsicResults above) very late, so that it sees as much code as
+ // possible, including code emitted by PEI and expanded by late tail
+ // duplication.
+ addPass(createWebAssemblyRegStackify());
+
+ // Run the register coloring pass to reduce the total number of registers.
+ // This runs after stackification so that it doesn't consider registers
+ // that become stackified.
+ addPass(createWebAssemblyRegColoring());
+ }
+
+ // Sort the blocks of the CFG into topological order, a prerequisite for
+ // BLOCK and LOOP markers.
+ addPass(createWebAssemblyCFGSort());
+
+ // Insert BLOCK and LOOP markers.
+ addPass(createWebAssemblyCFGStackify());
+
+ // Insert explicit local.get and local.set operators.
+ if (!WasmDisableExplicitLocals)
+ addPass(createWebAssemblyExplicitLocals());
+
+ // Lower br_unless into br_if.
+ addPass(createWebAssemblyLowerBrUnless());
+
+ // Perform the very last peephole optimizations on the code.
+ if (getOptLevel() != CodeGenOptLevel::None)
+ addPass(createWebAssemblyPeephole());
+
+ // Create a mapping from LLVM CodeGen virtual registers to wasm registers.
+ addPass(createWebAssemblyRegNumbering());
+
+ // Fix debug_values whose defs have been stackified.
+ if (!WasmDisableExplicitLocals)
+ addPass(createWebAssemblyDebugFixup());
+
+ // Collect information to prepare for MC lowering / asm printing.
+ addPass(createWebAssemblyMCLowerPrePass());
+}
+
+bool WebAssemblyPassConfig::addPreISel() {
+ TargetPassConfig::addPreISel();
+ addPass(createWebAssemblyLowerRefTypesIntPtrConv());
+ return false;
+}
+
+yaml::MachineFunctionInfo *
+WebAssemblyTargetMachine::createDefaultFuncInfoYAML() const {
+ return new yaml::WebAssemblyFunctionInfo();
+}
+
+yaml::MachineFunctionInfo *WebAssemblyTargetMachine::convertFuncInfoToYAML(
+ const MachineFunction &MF) const {
+ const auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
+ return new yaml::WebAssemblyFunctionInfo(MF, *MFI);
+}
+
+bool WebAssemblyTargetMachine::parseMachineFunctionInfo(
+ const yaml::MachineFunctionInfo &MFI, PerFunctionMIParsingState &PFS,
+ SMDiagnostic &Error, SMRange &SourceRange) const {
+ const auto &YamlMFI = static_cast<const yaml::WebAssemblyFunctionInfo &>(MFI);
+ MachineFunction &MF = PFS.MF;
+ MF.getInfo<WebAssemblyFunctionInfo>()->initializeBaseYamlFields(MF, YamlMFI);
+ return false;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.h
new file mode 100644
index 000000000000..2e8cd43840e3
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.h
@@ -0,0 +1,69 @@
+// WebAssemblyTargetMachine.h - Define TargetMachine for WebAssembly -*- C++ -*-
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file declares the WebAssembly-specific subclass of
+/// TargetMachine.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYTARGETMACHINE_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYTARGETMACHINE_H
+
+#include "WebAssemblySubtarget.h"
+#include "llvm/Target/TargetMachine.h"
+#include <optional>
+
+namespace llvm {
+
+class WebAssemblyTargetMachine final : public LLVMTargetMachine {
+ std::unique_ptr<TargetLoweringObjectFile> TLOF;
+ mutable StringMap<std::unique_ptr<WebAssemblySubtarget>> SubtargetMap;
+
+public:
+ WebAssemblyTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
+ std::optional<Reloc::Model> RM,
+ std::optional<CodeModel::Model> CM,
+ CodeGenOptLevel OL, bool JIT);
+
+ ~WebAssemblyTargetMachine() override;
+
+ const WebAssemblySubtarget *getSubtargetImpl() const;
+ const WebAssemblySubtarget *getSubtargetImpl(std::string CPU,
+ std::string FS) const;
+ const WebAssemblySubtarget *
+ getSubtargetImpl(const Function &F) const override;
+
+ // Pass Pipeline Configuration
+ TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
+
+ TargetLoweringObjectFile *getObjFileLowering() const override {
+ return TLOF.get();
+ }
+
+ MachineFunctionInfo *
+ createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F,
+ const TargetSubtargetInfo *STI) const override;
+
+ TargetTransformInfo getTargetTransformInfo(const Function &F) const override;
+
+ bool usesPhysRegsForValues() const override { return false; }
+
+ yaml::MachineFunctionInfo *createDefaultFuncInfoYAML() const override;
+ yaml::MachineFunctionInfo *
+ convertFuncInfoToYAML(const MachineFunction &MF) const override;
+ bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &,
+ PerFunctionMIParsingState &PFS,
+ SMDiagnostic &Error,
+ SMRange &SourceRange) const override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp
new file mode 100644
index 000000000000..ad57c600db10
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp
@@ -0,0 +1,24 @@
+//===-- WebAssemblyTargetObjectFile.cpp - WebAssembly Object Info ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the functions of the WebAssembly-specific subclass
+/// of TargetLoweringObjectFile.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyTargetObjectFile.h"
+#include "WebAssemblyTargetMachine.h"
+
+using namespace llvm;
+
+void WebAssemblyTargetObjectFile::Initialize(MCContext &Ctx,
+ const TargetMachine &TM) {
+ TargetLoweringObjectFileWasm::Initialize(Ctx, TM);
+ InitializeWasm();
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h
new file mode 100644
index 000000000000..f46bb2040a7d
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h
@@ -0,0 +1,29 @@
+//===-- WebAssemblyTargetObjectFile.h - WebAssembly Object Info -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file declares the WebAssembly-specific subclass of
+/// TargetLoweringObjectFile.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYTARGETOBJECTFILE_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYTARGETOBJECTFILE_H
+
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+
+namespace llvm {
+
+class WebAssemblyTargetObjectFile final : public TargetLoweringObjectFileWasm {
+public:
+ void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp
new file mode 100644
index 000000000000..9a434d9b1db5
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp
@@ -0,0 +1,144 @@
+//===-- WebAssemblyTargetTransformInfo.cpp - WebAssembly-specific TTI -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the WebAssembly-specific TargetTransformInfo
+/// implementation.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyTargetTransformInfo.h"
+#include "llvm/CodeGen/CostTable.h"
+#include "llvm/Support/Debug.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasmtti"
+
+TargetTransformInfo::PopcntSupportKind
+WebAssemblyTTIImpl::getPopcntSupport(unsigned TyWidth) const {
+ assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
+ return TargetTransformInfo::PSK_FastHardware;
+}
+
+unsigned WebAssemblyTTIImpl::getNumberOfRegisters(unsigned ClassID) const {
+ unsigned Result = BaseT::getNumberOfRegisters(ClassID);
+
+ // For SIMD, use at least 16 registers, as a rough guess.
+ bool Vector = (ClassID == 1);
+ if (Vector)
+ Result = std::max(Result, 16u);
+
+ return Result;
+}
+
+TypeSize WebAssemblyTTIImpl::getRegisterBitWidth(
+ TargetTransformInfo::RegisterKind K) const {
+ switch (K) {
+ case TargetTransformInfo::RGK_Scalar:
+ return TypeSize::getFixed(64);
+ case TargetTransformInfo::RGK_FixedWidthVector:
+ return TypeSize::getFixed(getST()->hasSIMD128() ? 128 : 64);
+ case TargetTransformInfo::RGK_ScalableVector:
+ return TypeSize::getScalable(0);
+ }
+
+ llvm_unreachable("Unsupported register kind");
+}
+
+InstructionCost WebAssemblyTTIImpl::getArithmeticInstrCost(
+ unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
+ TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info,
+ ArrayRef<const Value *> Args,
+ const Instruction *CxtI) {
+
+ InstructionCost Cost =
+ BasicTTIImplBase<WebAssemblyTTIImpl>::getArithmeticInstrCost(
+ Opcode, Ty, CostKind, Op1Info, Op2Info);
+
+ if (auto *VTy = dyn_cast<VectorType>(Ty)) {
+ switch (Opcode) {
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::Shl:
+ // SIMD128's shifts currently only accept a scalar shift count. For each
+ // element, we'll need to extract, op, insert. The following is a rough
+ // approximation.
+ if (!Op2Info.isUniform())
+ Cost =
+ cast<FixedVectorType>(VTy)->getNumElements() *
+ (TargetTransformInfo::TCC_Basic +
+ getArithmeticInstrCost(Opcode, VTy->getElementType(), CostKind) +
+ TargetTransformInfo::TCC_Basic);
+ break;
+ }
+ }
+ return Cost;
+}
+
+InstructionCost
+WebAssemblyTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
+ TTI::TargetCostKind CostKind,
+ unsigned Index, Value *Op0, Value *Op1) {
+ InstructionCost Cost = BasicTTIImplBase::getVectorInstrCost(
+ Opcode, Val, CostKind, Index, Op0, Op1);
+
+ // SIMD128's insert/extract currently only take constant indices.
+ if (Index == -1u)
+ return Cost + 25 * TargetTransformInfo::TCC_Expensive;
+
+ return Cost;
+}
+
+bool WebAssemblyTTIImpl::areInlineCompatible(const Function *Caller,
+ const Function *Callee) const {
+ // Allow inlining only when the Callee has a subset of the Caller's
+ // features. In principle, we should be able to inline regardless of any
+ // features because WebAssembly supports features at module granularity, not
+ // function granularity, but without this restriction it would be possible for
+ // a module to "forget" about features if all the functions that used them
+ // were inlined.
+ const TargetMachine &TM = getTLI()->getTargetMachine();
+
+ const FeatureBitset &CallerBits =
+ TM.getSubtargetImpl(*Caller)->getFeatureBits();
+ const FeatureBitset &CalleeBits =
+ TM.getSubtargetImpl(*Callee)->getFeatureBits();
+
+ return (CallerBits & CalleeBits) == CalleeBits;
+}
+
+void WebAssemblyTTIImpl::getUnrollingPreferences(
+ Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP,
+ OptimizationRemarkEmitter *ORE) const {
+ // Scan the loop: don't unroll loops with calls. This is a standard approach
+ // for most (all?) targets.
+ for (BasicBlock *BB : L->blocks())
+ for (Instruction &I : *BB)
+ if (isa<CallInst>(I) || isa<InvokeInst>(I))
+ if (const Function *F = cast<CallBase>(I).getCalledFunction())
+ if (isLoweredToCall(F))
+ return;
+
+ // The chosen threshold is within the range of 'LoopMicroOpBufferSize' of
+ // the various microarchitectures that use the BasicTTI implementation and
+ // has been selected through heuristics across multiple cores and runtimes.
+ UP.Partial = UP.Runtime = UP.UpperBound = true;
+ UP.PartialThreshold = 30;
+
+ // Avoid unrolling when optimizing for size.
+ UP.OptSizeThreshold = 0;
+ UP.PartialOptSizeThreshold = 0;
+
+ // Set number of instructions optimized when "back edge"
+ // becomes "fall through" to default value of 2.
+ UP.BEInsns = 2;
+}
+
+bool WebAssemblyTTIImpl::supportsTailCalls() const {
+ return getST()->hasTailCall();
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h
new file mode 100644
index 000000000000..a803fe5c1bbe
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h
@@ -0,0 +1,83 @@
+//==- WebAssemblyTargetTransformInfo.h - WebAssembly-specific TTI -*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file a TargetTransformInfo::Concept conforming object specific
+/// to the WebAssembly target machine.
+///
+/// It uses the target's detailed information to provide more precise answers to
+/// certain TTI queries, while letting the target independent and default TTI
+/// implementations handle the rest.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYTARGETTRANSFORMINFO_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYTARGETTRANSFORMINFO_H
+
+#include "WebAssemblyTargetMachine.h"
+#include "llvm/CodeGen/BasicTTIImpl.h"
+#include <algorithm>
+
+namespace llvm {
+
+class WebAssemblyTTIImpl final : public BasicTTIImplBase<WebAssemblyTTIImpl> {
+ typedef BasicTTIImplBase<WebAssemblyTTIImpl> BaseT;
+ typedef TargetTransformInfo TTI;
+ friend BaseT;
+
+ const WebAssemblySubtarget *ST;
+ const WebAssemblyTargetLowering *TLI;
+
+ const WebAssemblySubtarget *getST() const { return ST; }
+ const WebAssemblyTargetLowering *getTLI() const { return TLI; }
+
+public:
+ WebAssemblyTTIImpl(const WebAssemblyTargetMachine *TM, const Function &F)
+ : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
+ TLI(ST->getTargetLowering()) {}
+
+ /// \name Scalar TTI Implementations
+ /// @{
+
+ // TODO: Implement more Scalar TTI for WebAssembly
+
+ TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) const;
+
+ void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
+ TTI::UnrollingPreferences &UP,
+ OptimizationRemarkEmitter *ORE) const;
+
+ /// @}
+
+ /// \name Vector TTI Implementations
+ /// @{
+
+ unsigned getNumberOfRegisters(unsigned ClassID) const;
+ TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const;
+ InstructionCost getArithmeticInstrCost(
+ unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
+ TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None},
+ TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None},
+ ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
+ const Instruction *CxtI = nullptr);
+ using BaseT::getVectorInstrCost;
+ InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
+ TTI::TargetCostKind CostKind,
+ unsigned Index, Value *Op0, Value *Op1);
+
+ /// @}
+
+ bool areInlineCompatible(const Function *Caller,
+ const Function *Callee) const;
+
+ bool supportsTailCalls() const;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp
new file mode 100644
index 000000000000..189111cef7d0
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp
@@ -0,0 +1,181 @@
+//===-- WebAssemblyUtilities.cpp - WebAssembly Utility Functions ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements several utility functions for WebAssembly.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyUtilities.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/IR/Function.h"
+#include "llvm/MC/MCContext.h"
+using namespace llvm;
+
+// Function names in libc++abi and libunwind
+const char *const WebAssembly::CxaBeginCatchFn = "__cxa_begin_catch";
+const char *const WebAssembly::CxaRethrowFn = "__cxa_rethrow";
+const char *const WebAssembly::StdTerminateFn = "_ZSt9terminatev";
+const char *const WebAssembly::PersonalityWrapperFn =
+ "_Unwind_Wasm_CallPersonality";
+
+/// Test whether MI is a child of some other node in an expression tree.
+bool WebAssembly::isChild(const MachineInstr &MI,
+ const WebAssemblyFunctionInfo &MFI) {
+ if (MI.getNumOperands() == 0)
+ return false;
+ const MachineOperand &MO = MI.getOperand(0);
+ if (!MO.isReg() || MO.isImplicit() || !MO.isDef())
+ return false;
+ Register Reg = MO.getReg();
+ return Reg.isVirtual() && MFI.isVRegStackified(Reg);
+}
+
+bool WebAssembly::mayThrow(const MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ case WebAssembly::THROW:
+ case WebAssembly::THROW_S:
+ case WebAssembly::RETHROW:
+ case WebAssembly::RETHROW_S:
+ return true;
+ }
+ if (isCallIndirect(MI.getOpcode()))
+ return true;
+ if (!MI.isCall())
+ return false;
+
+ const MachineOperand &MO = getCalleeOp(MI);
+ assert(MO.isGlobal() || MO.isSymbol());
+
+ if (MO.isSymbol()) {
+ // Some intrinsics are lowered to calls to external symbols, which are then
+ // lowered to calls to library functions. Most of libcalls don't throw, but
+ // we only list some of them here now.
+ // TODO Consider adding 'nounwind' info in TargetLowering::CallLoweringInfo
+ // instead for more accurate info.
+ const char *Name = MO.getSymbolName();
+ if (strcmp(Name, "memcpy") == 0 || strcmp(Name, "memmove") == 0 ||
+ strcmp(Name, "memset") == 0)
+ return false;
+ return true;
+ }
+
+ const auto *F = dyn_cast<Function>(MO.getGlobal());
+ if (!F)
+ return true;
+ if (F->doesNotThrow())
+ return false;
+ // These functions never throw
+ if (F->getName() == CxaBeginCatchFn || F->getName() == PersonalityWrapperFn ||
+ F->getName() == StdTerminateFn)
+ return false;
+
+ // TODO Can we exclude call instructions that are marked as 'nounwind' in the
+ // original LLVm IR? (Even when the callee may throw)
+ return true;
+}
+
+const MachineOperand &WebAssembly::getCalleeOp(const MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ case WebAssembly::CALL:
+ case WebAssembly::CALL_S:
+ case WebAssembly::RET_CALL:
+ case WebAssembly::RET_CALL_S:
+ return MI.getOperand(MI.getNumExplicitDefs());
+ case WebAssembly::CALL_INDIRECT:
+ case WebAssembly::CALL_INDIRECT_S:
+ case WebAssembly::RET_CALL_INDIRECT:
+ case WebAssembly::RET_CALL_INDIRECT_S:
+ return MI.getOperand(MI.getNumExplicitOperands() - 1);
+ default:
+ llvm_unreachable("Not a call instruction");
+ }
+}
+
+MCSymbolWasm *WebAssembly::getOrCreateFunctionTableSymbol(
+ MCContext &Ctx, const WebAssemblySubtarget *Subtarget) {
+ StringRef Name = "__indirect_function_table";
+ MCSymbolWasm *Sym = cast_or_null<MCSymbolWasm>(Ctx.lookupSymbol(Name));
+ if (Sym) {
+ if (!Sym->isFunctionTable())
+ Ctx.reportError(SMLoc(), "symbol is not a wasm funcref table");
+ } else {
+ Sym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(Name));
+ Sym->setFunctionTable();
+ // The default function table is synthesized by the linker.
+ Sym->setUndefined();
+ }
+ // MVP object files can't have symtab entries for tables.
+ if (!(Subtarget && Subtarget->hasReferenceTypes()))
+ Sym->setOmitFromLinkingSection();
+ return Sym;
+}
+
+MCSymbolWasm *WebAssembly::getOrCreateFuncrefCallTableSymbol(
+ MCContext &Ctx, const WebAssemblySubtarget *Subtarget) {
+ StringRef Name = "__funcref_call_table";
+ MCSymbolWasm *Sym = cast_or_null<MCSymbolWasm>(Ctx.lookupSymbol(Name));
+ if (Sym) {
+ if (!Sym->isFunctionTable())
+ Ctx.reportError(SMLoc(), "symbol is not a wasm funcref table");
+ } else {
+ Sym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(Name));
+
+ // Setting Weak ensure only one table is left after linking when multiple
+ // modules define the table.
+ Sym->setWeak(true);
+
+ wasm::WasmLimits Limits = {0, 1, 1};
+ wasm::WasmTableType TableType = {wasm::WASM_TYPE_FUNCREF, Limits};
+ Sym->setType(wasm::WASM_SYMBOL_TYPE_TABLE);
+ Sym->setTableType(TableType);
+ }
+ // MVP object files can't have symtab entries for tables.
+ if (!(Subtarget && Subtarget->hasReferenceTypes()))
+ Sym->setOmitFromLinkingSection();
+ return Sym;
+}
+
+// Find a catch instruction from an EH pad.
+MachineInstr *WebAssembly::findCatch(MachineBasicBlock *EHPad) {
+ assert(EHPad->isEHPad());
+ auto Pos = EHPad->begin();
+ // Skip any label or debug instructions. Also skip 'end' marker instructions
+ // that may exist after marker placement in CFGStackify.
+ while (Pos != EHPad->end() &&
+ (Pos->isLabel() || Pos->isDebugInstr() || isMarker(Pos->getOpcode())))
+ Pos++;
+ if (Pos != EHPad->end() && WebAssembly::isCatch(Pos->getOpcode()))
+ return &*Pos;
+ return nullptr;
+}
+
+unsigned WebAssembly::getCopyOpcodeForRegClass(const TargetRegisterClass *RC) {
+ assert(RC != nullptr);
+ switch (RC->getID()) {
+ case WebAssembly::I32RegClassID:
+ return WebAssembly::COPY_I32;
+ case WebAssembly::I64RegClassID:
+ return WebAssembly::COPY_I64;
+ case WebAssembly::F32RegClassID:
+ return WebAssembly::COPY_F32;
+ case WebAssembly::F64RegClassID:
+ return WebAssembly::COPY_F64;
+ case WebAssembly::V128RegClassID:
+ return WebAssembly::COPY_V128;
+ case WebAssembly::FUNCREFRegClassID:
+ return WebAssembly::COPY_FUNCREF;
+ case WebAssembly::EXTERNREFRegClassID:
+ return WebAssembly::COPY_EXTERNREF;
+ default:
+ llvm_unreachable("Unexpected register class");
+ }
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.h
new file mode 100644
index 000000000000..7f28fb1858a6
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.h
@@ -0,0 +1,70 @@
+//===-- WebAssemblyUtilities - WebAssembly Utility Functions ---*- C++ -*-====//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the declaration of the WebAssembly-specific
+/// utility functions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_UTILS_WEBASSEMBLYUTILITIES_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_UTILS_WEBASSEMBLYUTILITIES_H
+
+#include "llvm/Support/CommandLine.h"
+
+namespace llvm {
+
+class MachineBasicBlock;
+class MachineInstr;
+class MachineOperand;
+class MCContext;
+class MCSymbolWasm;
+class TargetRegisterClass;
+class WebAssemblyFunctionInfo;
+class WebAssemblySubtarget;
+
+namespace WebAssembly {
+
+bool isChild(const MachineInstr &MI, const WebAssemblyFunctionInfo &MFI);
+bool mayThrow(const MachineInstr &MI);
+
+// Exception-related function names
+extern const char *const ClangCallTerminateFn;
+extern const char *const CxaBeginCatchFn;
+extern const char *const CxaRethrowFn;
+extern const char *const StdTerminateFn;
+extern const char *const PersonalityWrapperFn;
+
+/// Returns the operand number of a callee, assuming the argument is a call
+/// instruction.
+const MachineOperand &getCalleeOp(const MachineInstr &MI);
+
+/// Returns the __indirect_function_table, for use in call_indirect and in
+/// function bitcasts.
+MCSymbolWasm *
+getOrCreateFunctionTableSymbol(MCContext &Ctx,
+ const WebAssemblySubtarget *Subtarget);
+
+/// Returns the __funcref_call_table, for use in funcref calls when lowered to
+/// table.set + call_indirect.
+MCSymbolWasm *
+getOrCreateFuncrefCallTableSymbol(MCContext &Ctx,
+ const WebAssemblySubtarget *Subtarget);
+
+/// Find a catch instruction from an EH pad. Returns null if no catch
+/// instruction found or the catch is in an invalid location.
+MachineInstr *findCatch(MachineBasicBlock *EHPad);
+
+/// Returns the appropriate copy opcode for the given register class.
+unsigned getCopyOpcodeForRegClass(const TargetRegisterClass *RC);
+
+} // end namespace WebAssembly
+
+} // end namespace llvm
+
+#endif