diff options
Diffstat (limited to 'contrib/llvm/lib/Target/WebAssembly')
92 files changed, 4569 insertions, 3355 deletions
diff --git a/contrib/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp b/contrib/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp index 0a5908f43790..09628e872dd5 100644 --- a/contrib/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp @@ -1,9 +1,8 @@ //==- WebAssemblyAsmParser.cpp - Assembler for WebAssembly -*- C++ -*-==// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -16,12 +15,15 @@ #include "MCTargetDesc/WebAssemblyMCTargetDesc.h" #include "MCTargetDesc/WebAssemblyTargetStreamer.h" +#include "TargetInfo/WebAssemblyTargetInfo.h" #include "WebAssembly.h" #include "llvm/MC/MCContext.h" +#include "llvm/MC/MCExpr.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCInstrInfo.h" #include "llvm/MC/MCParser/MCParsedAsmOperand.h" #include "llvm/MC/MCParser/MCTargetAsmParser.h" +#include "llvm/MC/MCSectionWasm.h" #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSubtargetInfo.h" #include "llvm/MC/MCSymbol.h" @@ -87,9 +89,8 @@ struct WebAssemblyOperand : public MCParsedAsmOperand { } bool isToken() const override { return Kind == Token; } - bool isImm() const override { - return Kind == Integer || Kind == Float || Kind == Symbol; - } + bool isImm() const override { return Kind == Integer || Kind == Symbol; } + bool isFPImm() const { return Kind == Float; } bool isMem() const override { return false; } bool isReg() const override { return false; } bool isBrList() const { return Kind == BrList; } @@ -116,12 +117,18 @@ struct WebAssemblyOperand : public MCParsedAsmOperand { assert(N == 1 && "Invalid number of operands!"); if (Kind == Integer) Inst.addOperand(MCOperand::createImm(Int.Val)); - else if (Kind == Float) - Inst.addOperand(MCOperand::createFPImm(Flt.Val)); else if (Kind == Symbol) Inst.addOperand(MCOperand::createExpr(Sym.Exp)); else - llvm_unreachable("Should be immediate or symbol!"); + llvm_unreachable("Should be integer immediate or symbol!"); + } + + void addFPImmOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + if (Kind == Float) + Inst.addOperand(MCOperand::createFPImm(Flt.Val)); + else + llvm_unreachable("Should be float immediate!"); } void addBrListOperands(MCInst &Inst, unsigned N) const { @@ -170,6 +177,8 @@ class WebAssemblyAsmParser final : public MCTargetAsmParser { FunctionStart, FunctionLocals, Instructions, + EndFunction, + DataSection, } CurrentState = FileStart; // For ensuring blocks are properly nested. @@ -187,6 +196,7 @@ class WebAssemblyAsmParser final : public MCTargetAsmParser { // We track this to see if a .functype following a label is the same, // as this is how we recognize the start of a function. MCSymbol *LastLabel = nullptr; + MCSymbol *LastFunctionLabel = nullptr; public: WebAssemblyAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser, @@ -250,13 +260,13 @@ public: } bool ensureEmptyNestingStack() { - auto err = !NestingStack.empty(); + auto Err = !NestingStack.empty(); while (!NestingStack.empty()) { error(Twine("Unmatched block construct(s) at function end: ") + nestingString(NestingStack.back()).first); NestingStack.pop_back(); } - return err; + return Err; } bool isNext(AsmToken::TokenKind Kind) { @@ -298,6 +308,8 @@ public: Type == "i32x4" || Type == "i64x2" || Type == "f32x4" || Type == "f64x2") return wasm::ValType::V128; + if (Type == "exnref") + return wasm::ValType::EXNREF; return Optional<wasm::ValType>(); } @@ -308,7 +320,7 @@ public: .Case("f32", WebAssembly::ExprType::F32) .Case("f64", WebAssembly::ExprType::F64) .Case("v128", WebAssembly::ExprType::V128) - .Case("except_ref", WebAssembly::ExprType::ExceptRef) + .Case("exnref", WebAssembly::ExprType::Exnref) .Case("void", WebAssembly::ExprType::Void) .Default(WebAssembly::ExprType::Invalid); } @@ -317,7 +329,7 @@ public: while (Lexer.is(AsmToken::Identifier)) { auto Type = parseType(Lexer.getTok().getString()); if (!Type) - return true; + return error("unknown type: ", Lexer.getTok()); Types.push_back(Type.getValue()); Parser.Lex(); if (!isNext(AsmToken::Comma)) @@ -337,27 +349,67 @@ public: Parser.Lex(); } - bool parseOperandStartingWithInteger(bool IsNegative, OperandVector &Operands, - StringRef InstName) { - parseSingleInteger(IsNegative, Operands); + bool parseSingleFloat(bool IsNegative, OperandVector &Operands) { + auto &Flt = Lexer.getTok(); + double Val; + if (Flt.getString().getAsDouble(Val, false)) + return error("Cannot parse real: ", Flt); + if (IsNegative) + Val = -Val; + Operands.push_back(make_unique<WebAssemblyOperand>( + WebAssemblyOperand::Float, Flt.getLoc(), Flt.getEndLoc(), + WebAssemblyOperand::FltOp{Val})); + Parser.Lex(); + return false; + } + + bool parseSpecialFloatMaybe(bool IsNegative, OperandVector &Operands) { + if (Lexer.isNot(AsmToken::Identifier)) + return true; + auto &Flt = Lexer.getTok(); + auto S = Flt.getString(); + double Val; + if (S.compare_lower("infinity") == 0) { + Val = std::numeric_limits<double>::infinity(); + } else if (S.compare_lower("nan") == 0) { + Val = std::numeric_limits<double>::quiet_NaN(); + } else { + return true; + } + if (IsNegative) + Val = -Val; + Operands.push_back(make_unique<WebAssemblyOperand>( + WebAssemblyOperand::Float, Flt.getLoc(), Flt.getEndLoc(), + WebAssemblyOperand::FltOp{Val})); + Parser.Lex(); + return false; + } + + bool checkForP2AlignIfLoadStore(OperandVector &Operands, StringRef InstName) { // FIXME: there is probably a cleaner way to do this. - auto IsLoadStore = InstName.startswith("load") || - InstName.startswith("store") || - InstName.startswith("atomic_load") || - InstName.startswith("atomic_store"); - if (IsLoadStore) { - // Parse load/store operands of the form: offset align - auto &Offset = Lexer.getTok(); - if (Offset.is(AsmToken::Integer)) { + auto IsLoadStore = InstName.find(".load") != StringRef::npos || + InstName.find(".store") != StringRef::npos; + auto IsAtomic = InstName.find("atomic.") != StringRef::npos; + if (IsLoadStore || IsAtomic) { + // Parse load/store operands of the form: offset:p2align=align + if (IsLoadStore && isNext(AsmToken::Colon)) { + auto Id = expectIdent(); + if (Id != "p2align") + return error("Expected p2align, instead got: " + Id); + if (expect(AsmToken::Equal, "=")) + return true; + if (!Lexer.is(AsmToken::Integer)) + return error("Expected integer constant"); parseSingleInteger(false, Operands); } else { - // Alignment not specified. - // FIXME: correctly derive a default from the instruction. + // Alignment not specified (or atomics, must use default alignment). // We can't just call WebAssembly::GetDefaultP2Align since we don't have - // an opcode until after the assembly matcher. + // an opcode until after the assembly matcher, so set a default to fix + // up later. + auto Tok = Lexer.getTok(); Operands.push_back(make_unique<WebAssemblyOperand>( - WebAssemblyOperand::Integer, Offset.getLoc(), Offset.getEndLoc(), - WebAssemblyOperand::IntOp{0})); + WebAssemblyOperand::Integer, Tok.getLoc(), Tok.getEndLoc(), + WebAssemblyOperand::IntOp{-1})); } } return false; @@ -400,51 +452,45 @@ public: Operands.push_back(make_unique<WebAssemblyOperand>( WebAssemblyOperand::Token, NameLoc, SMLoc::getFromPointer(Name.end()), WebAssemblyOperand::TokOp{Name})); - auto NamePair = Name.split('.'); - // If no '.', there is no type prefix. - auto BaseName = NamePair.second.empty() ? NamePair.first : NamePair.second; // If this instruction is part of a control flow structure, ensure // proper nesting. bool ExpectBlockType = false; - if (BaseName == "block") { + if (Name == "block") { push(Block); ExpectBlockType = true; - } else if (BaseName == "loop") { + } else if (Name == "loop") { push(Loop); ExpectBlockType = true; - } else if (BaseName == "try") { + } else if (Name == "try") { push(Try); ExpectBlockType = true; - } else if (BaseName == "if") { + } else if (Name == "if") { push(If); ExpectBlockType = true; - } else if (BaseName == "else") { - if (pop(BaseName, If)) + } else if (Name == "else") { + if (pop(Name, If)) return true; push(Else); - } else if (BaseName == "catch") { - if (pop(BaseName, Try)) - return true; - push(Try); - } else if (BaseName == "catch_all") { - if (pop(BaseName, Try)) + } else if (Name == "catch") { + if (pop(Name, Try)) return true; push(Try); - } else if (BaseName == "end_if") { - if (pop(BaseName, If, Else)) + } else if (Name == "end_if") { + if (pop(Name, If, Else)) return true; - } else if (BaseName == "end_try") { - if (pop(BaseName, Try)) + } else if (Name == "end_try") { + if (pop(Name, Try)) return true; - } else if (BaseName == "end_loop") { - if (pop(BaseName, Loop)) + } else if (Name == "end_loop") { + if (pop(Name, Loop)) return true; - } else if (BaseName == "end_block") { - if (pop(BaseName, Block)) + } else if (Name == "end_block") { + if (pop(Name, Block)) return true; - } else if (BaseName == "end_function") { - if (pop(BaseName, Function) || ensureEmptyNestingStack()) + } else if (Name == "end_function") { + CurrentState = EndFunction; + if (pop(Name, Function) || ensureEmptyNestingStack()) return true; } @@ -452,6 +498,8 @@ public: auto &Tok = Lexer.getTok(); switch (Tok.getKind()) { case AsmToken::Identifier: { + if (!parseSpecialFloatMaybe(false, Operands)) + break; auto &Id = Lexer.getTok(); if (ExpectBlockType) { // Assume this identifier is a block_type. @@ -464,33 +512,39 @@ public: // Assume this identifier is a label. const MCExpr *Val; SMLoc End; - if (Parser.parsePrimaryExpr(Val, End)) + if (Parser.parseExpression(Val, End)) return error("Cannot parse symbol: ", Lexer.getTok()); Operands.push_back(make_unique<WebAssemblyOperand>( WebAssemblyOperand::Symbol, Id.getLoc(), Id.getEndLoc(), WebAssemblyOperand::SymOp{Val})); + if (checkForP2AlignIfLoadStore(Operands, Name)) + return true; } break; } case AsmToken::Minus: Parser.Lex(); - if (Lexer.isNot(AsmToken::Integer)) - return error("Expected integer instead got: ", Lexer.getTok()); - if (parseOperandStartingWithInteger(true, Operands, BaseName)) - return true; + if (Lexer.is(AsmToken::Integer)) { + parseSingleInteger(true, Operands); + if (checkForP2AlignIfLoadStore(Operands, Name)) + return true; + } else if(Lexer.is(AsmToken::Real)) { + if (parseSingleFloat(true, Operands)) + return true; + } else if (!parseSpecialFloatMaybe(true, Operands)) { + } else { + return error("Expected numeric constant instead got: ", + Lexer.getTok()); + } break; case AsmToken::Integer: - if (parseOperandStartingWithInteger(false, Operands, BaseName)) + parseSingleInteger(false, Operands); + if (checkForP2AlignIfLoadStore(Operands, Name)) return true; break; case AsmToken::Real: { - double Val; - if (Tok.getString().getAsDouble(Val, false)) - return error("Cannot parse real: ", Tok); - Operands.push_back(make_unique<WebAssemblyOperand>( - WebAssemblyOperand::Float, Tok.getLoc(), Tok.getEndLoc(), - WebAssemblyOperand::FltOp{Val})); - Parser.Lex(); + if (parseSingleFloat(false, Operands)) + return true; break; } case AsmToken::LCurly: { @@ -547,6 +601,17 @@ public: return false; } + bool CheckDataSection() { + if (CurrentState != DataSection) { + auto WS = cast<MCSectionWasm>(getStreamer().getCurrentSection().first); + if (WS && WS->getKind().isText()) + return error("data directive must occur in a data segment: ", + Lexer.getTok()); + } + CurrentState = DataSection; + return false; + } + // This function processes wasm-specific directives streamed to // WebAssemblyTargetStreamer, all others go to the generic parser // (see WasmAsmParser). @@ -561,6 +626,7 @@ public: auto &Out = getStreamer(); auto &TOut = reinterpret_cast<WebAssemblyTargetStreamer &>(*Out.getTargetStreamer()); + auto &Ctx = Out.getContext(); // TODO: any time we return an error, at least one token must have been // consumed, otherwise this will not signal an error to the caller. @@ -578,8 +644,7 @@ public: if (!Type) return error("Unknown type in .globaltype directive: ", TypeTok); // Now set this symbol with the correct type. - auto WasmSym = cast<MCSymbolWasm>( - TOut.getStreamer().getContext().getOrCreateSymbol(SymName)); + auto WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName)); WasmSym->setType(wasm::WASM_SYMBOL_TYPE_GLOBAL); WasmSym->setGlobalType( wasm::WasmGlobalType{uint8_t(Type.getValue()), true}); @@ -597,13 +662,13 @@ public: auto SymName = expectIdent(); if (SymName.empty()) return true; - auto WasmSym = cast<MCSymbolWasm>( - TOut.getStreamer().getContext().getOrCreateSymbol(SymName)); + auto WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName)); if (CurrentState == Label && WasmSym == LastLabel) { // This .functype indicates a start of a function. if (ensureEmptyNestingStack()) return true; CurrentState = FunctionStart; + LastFunctionLabel = LastLabel; push(Function); } auto Signature = make_unique<wasm::WasmSignature>(); @@ -621,8 +686,7 @@ public: auto SymName = expectIdent(); if (SymName.empty()) return true; - auto WasmSym = cast<MCSymbolWasm>( - TOut.getStreamer().getContext().getOrCreateSymbol(SymName)); + auto WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName)); auto Signature = make_unique<wasm::WasmSignature>(); if (parseRegTypeList(Signature->Params)) return true; @@ -646,6 +710,30 @@ public: return expect(AsmToken::EndOfStatement, "EOL"); } + if (DirectiveID.getString() == ".int8" || + DirectiveID.getString() == ".int16" || + DirectiveID.getString() == ".int32" || + DirectiveID.getString() == ".int64") { + if (CheckDataSection()) return true; + const MCExpr *Val; + SMLoc End; + if (Parser.parseExpression(Val, End)) + return error("Cannot parse .int expression: ", Lexer.getTok()); + size_t NumBits = 0; + DirectiveID.getString().drop_front(4).getAsInteger(10, NumBits); + Out.EmitValue(Val, NumBits / 8, End); + return expect(AsmToken::EndOfStatement, "EOL"); + } + + if (DirectiveID.getString() == ".asciz") { + if (CheckDataSection()) return true; + std::string S; + if (Parser.parseEscapedString(S)) + return error("Cannot parse string constant: ", Lexer.getTok()); + Out.EmitBytes(StringRef(S.c_str(), S.length() + 1)); + return expect(AsmToken::EndOfStatement, "EOL"); + } + return true; // We didn't process this directive. } @@ -667,8 +755,19 @@ public: *Out.getTargetStreamer()); TOut.emitLocal(SmallVector<wasm::ValType, 0>()); } - CurrentState = Instructions; + // Fix unknown p2align operands. + auto Align = WebAssembly::GetDefaultP2AlignAny(Inst.getOpcode()); + if (Align != -1U) { + auto &Op0 = Inst.getOperand(0); + if (Op0.getImm() == -1) + Op0.setImm(Align); + } Out.EmitInstruction(Inst, getSTI()); + if (CurrentState == EndFunction) { + onEndOfFunction(); + } else { + CurrentState = Instructions; + } return false; } case Match_MissingFeature: @@ -694,6 +793,35 @@ public: llvm_unreachable("Implement any new match types added!"); } + void doBeforeLabelEmit(MCSymbol *Symbol) override { + // Start a new section for the next function automatically, since our + // object writer expects each function to have its own section. This way + // The user can't forget this "convention". + auto SymName = Symbol->getName(); + if (SymName.startswith(".L")) + return; // Local Symbol. + // Only create a new text section if we're already in one. + auto CWS = cast<MCSectionWasm>(getStreamer().getCurrentSection().first); + if (!CWS || !CWS->getKind().isText()) + return; + auto SecName = ".text." + SymName; + auto WS = getContext().getWasmSection(SecName, SectionKind::getText()); + getStreamer().SwitchSection(WS); + } + + void onEndOfFunction() { + // Automatically output a .size directive, so it becomes optional for the + // user. + if (!LastFunctionLabel) return; + auto TempSym = getContext().createLinkerPrivateTempSymbol(); + getStreamer().EmitLabel(TempSym); + auto Start = MCSymbolRefExpr::create(LastFunctionLabel, getContext()); + auto End = MCSymbolRefExpr::create(TempSym, getContext()); + auto Expr = + MCBinaryExpr::create(MCBinaryExpr::Sub, End, Start, getContext()); + getStreamer().emitELFSize(LastFunctionLabel, Expr); + } + void onEndOfFile() override { ensureEmptyNestingStack(); } }; } // end anonymous namespace diff --git a/contrib/llvm/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp b/contrib/llvm/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp index 6acc9b20eed2..f9bf3f85d30f 100644 --- a/contrib/llvm/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp @@ -1,9 +1,8 @@ //==- WebAssemblyDisassembler.cpp - Disassembler for WebAssembly -*- C++ -*-==// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -15,7 +14,9 @@ /// //===----------------------------------------------------------------------===// +#include "MCTargetDesc/WebAssemblyInstPrinter.h" #include "MCTargetDesc/WebAssemblyMCTargetDesc.h" +#include "TargetInfo/WebAssemblyTargetInfo.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCDisassembler/MCDisassembler.h" #include "llvm/MC/MCFixedLenDisassembler.h" @@ -45,6 +46,10 @@ class WebAssemblyDisassembler final : public MCDisassembler { ArrayRef<uint8_t> Bytes, uint64_t Address, raw_ostream &VStream, raw_ostream &CStream) const override; + DecodeStatus onSymbolStart(StringRef Name, uint64_t &Size, + ArrayRef<uint8_t> Bytes, uint64_t Address, + raw_ostream &VStream, + raw_ostream &CStream) const override; public: WebAssemblyDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx, @@ -77,7 +82,7 @@ static int nextByte(ArrayRef<uint8_t> Bytes, uint64_t &Size) { } static bool nextLEB(int64_t &Val, ArrayRef<uint8_t> Bytes, uint64_t &Size, - bool Signed = false) { + bool Signed) { unsigned N = 0; const char *Error = nullptr; Val = Signed ? decodeSLEB128(Bytes.data() + Size, &N, @@ -104,9 +109,8 @@ template <typename T> bool parseImmediate(MCInst &MI, uint64_t &Size, ArrayRef<uint8_t> Bytes) { if (Size + sizeof(T) > Bytes.size()) return false; - T Val; - memcpy(&Val, Bytes.data() + Size, sizeof(T)); - support::endian::byte_swap<T, support::endianness::little>(Val); + T Val = support::endian::read<T, support::endianness::little, 1>( + Bytes.data() + Size); Size += sizeof(T); if (std::is_floating_point<T>::value) { MI.addOperand(MCOperand::createFPImm(static_cast<double>(Val))); @@ -116,6 +120,41 @@ bool parseImmediate(MCInst &MI, uint64_t &Size, ArrayRef<uint8_t> Bytes) { return true; } +MCDisassembler::DecodeStatus WebAssemblyDisassembler::onSymbolStart( + StringRef Name, uint64_t &Size, ArrayRef<uint8_t> Bytes, uint64_t Address, + raw_ostream &VStream, raw_ostream &CStream) const { + Size = 0; + if (Address == 0) { + // Start of a code section: we're parsing only the function count. + int64_t FunctionCount; + if (!nextLEB(FunctionCount, Bytes, Size, false)) + return MCDisassembler::Fail; + outs() << " # " << FunctionCount << " functions in section."; + } else { + // Parse the start of a single function. + int64_t BodySize, LocalEntryCount; + if (!nextLEB(BodySize, Bytes, Size, false) || + !nextLEB(LocalEntryCount, Bytes, Size, false)) + return MCDisassembler::Fail; + if (LocalEntryCount) { + outs() << " .local "; + for (int64_t I = 0; I < LocalEntryCount; I++) { + int64_t Count, Type; + if (!nextLEB(Count, Bytes, Size, false) || + !nextLEB(Type, Bytes, Size, false)) + return MCDisassembler::Fail; + for (int64_t J = 0; J < Count; J++) { + if (I || J) + outs() << ", "; + outs() << WebAssembly::anyTypeToString(Type); + } + } + } + } + outs() << "\n"; + return MCDisassembler::Success; +} + MCDisassembler::DecodeStatus WebAssemblyDisassembler::getInstruction( MCInst &MI, uint64_t &Size, ArrayRef<uint8_t> Bytes, uint64_t /*Address*/, raw_ostream & /*OS*/, raw_ostream &CS) const { @@ -138,7 +177,7 @@ MCDisassembler::DecodeStatus WebAssemblyDisassembler::getInstruction( if (!WasmInst) return MCDisassembler::Fail; int64_t PrefixedOpc; - if (!nextLEB(PrefixedOpc, Bytes, Size)) + if (!nextLEB(PrefixedOpc, Bytes, Size, false)) return MCDisassembler::Fail; if (PrefixedOpc < 0 || PrefixedOpc >= WebAssemblyInstructionTableSize) return MCDisassembler::Fail; @@ -161,6 +200,7 @@ MCDisassembler::DecodeStatus WebAssemblyDisassembler::getInstruction( case WebAssembly::OPERAND_OFFSET32: case WebAssembly::OPERAND_P2ALIGN: case WebAssembly::OPERAND_TYPEINDEX: + case WebAssembly::OPERAND_EVENT: case MCOI::OPERAND_IMMEDIATE: { if (!parseLEBImmediate(MI, Size, Bytes, false)) return MCDisassembler::Fail; diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp index 0726dd481174..70b409cf4a90 100644 --- a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyAsmBackend.cpp - WebAssembly Assembler Backend ---------===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -36,7 +35,6 @@ class WebAssemblyAsmBackend final : public MCAsmBackend { public: explicit WebAssemblyAsmBackend(bool Is64Bit) : MCAsmBackend(support::little), Is64Bit(Is64Bit) {} - ~WebAssemblyAsmBackend() override {} unsigned getNumFixupKinds() const override { return WebAssembly::NumTargetFixupKinds; @@ -77,9 +75,9 @@ WebAssemblyAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { // WebAssemblyFixupKinds.h. // // Name Offset (bits) Size (bits) Flags - {"fixup_code_sleb128_i32", 0, 5 * 8, 0}, - {"fixup_code_sleb128_i64", 0, 10 * 8, 0}, - {"fixup_code_uleb128_i32", 0, 5 * 8, 0}, + {"fixup_sleb128_i32", 0, 5 * 8, 0}, + {"fixup_sleb128_i64", 0, 10 * 8, 0}, + {"fixup_uleb128_i32", 0, 5 * 8, 0}, }; if (Kind < FirstTargetFixupKind) @@ -92,7 +90,7 @@ WebAssemblyAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { bool WebAssemblyAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const { - for (uint64_t i = 0; i < Count; ++i) + for (uint64_t I = 0; I < Count; ++I) OS << char(WebAssembly::Nop); return true; @@ -119,8 +117,8 @@ void WebAssemblyAsmBackend::applyFixup(const MCAssembler &Asm, // For each byte of the fragment that the fixup touches, mask in the // bits from the fixup value. - for (unsigned i = 0; i != NumBytes; ++i) - Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff); + for (unsigned I = 0; I != NumBytes; ++I) + Data[Offset + I] |= uint8_t((Value >> (I * 8)) & 0xff); } std::unique_ptr<MCObjectTargetWriter> diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyFixupKinds.h b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyFixupKinds.h index c2fac5f93a2f..33e8de282955 100644 --- a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyFixupKinds.h +++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyFixupKinds.h @@ -1,9 +1,8 @@ //=- WebAssemblyFixupKinds.h - WebAssembly Specific Fixup Entries -*- C++ -*-=// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// @@ -15,9 +14,9 @@ namespace llvm { namespace WebAssembly { enum Fixups { - fixup_code_sleb128_i32 = FirstTargetFixupKind, // 32-bit signed - fixup_code_sleb128_i64, // 64-bit signed - fixup_code_uleb128_i32, // 32-bit unsigned + fixup_sleb128_i32 = FirstTargetFixupKind, // 32-bit signed + fixup_sleb128_i64, // 64-bit signed + fixup_uleb128_i32, // 32-bit unsigned // Marker LastTargetFixupKind, diff --git a/contrib/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.cpp index 15532d7ff1a6..b5d4d369b726 100644 --- a/contrib/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.cpp @@ -1,9 +1,8 @@ //=- WebAssemblyInstPrinter.cpp - WebAssembly assembly instruction printing -=// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -12,7 +11,7 @@ /// //===----------------------------------------------------------------------===// -#include "InstPrinter/WebAssemblyInstPrinter.h" +#include "MCTargetDesc/WebAssemblyInstPrinter.h" #include "MCTargetDesc/WebAssemblyMCTargetDesc.h" #include "WebAssembly.h" #include "WebAssemblyMachineFunctionInfo.h" @@ -53,15 +52,15 @@ void WebAssemblyInstPrinter::printInst(const MCInst *MI, raw_ostream &OS, // Print any additional variadic operands. const MCInstrDesc &Desc = MII.get(MI->getOpcode()); if (Desc.isVariadic()) - for (auto i = Desc.getNumOperands(), e = MI->getNumOperands(); i < e; ++i) { + for (auto I = Desc.getNumOperands(), E = MI->getNumOperands(); I < E; ++I) { // FIXME: For CALL_INDIRECT_VOID, don't print a leading comma, because // we have an extra flags operand which is not currently printed, for // compatiblity reasons. - if (i != 0 && ((MI->getOpcode() != WebAssembly::CALL_INDIRECT_VOID && + if (I != 0 && ((MI->getOpcode() != WebAssembly::CALL_INDIRECT_VOID && MI->getOpcode() != WebAssembly::CALL_INDIRECT_VOID_S) || - i != Desc.getNumOperands())) + I != Desc.getNumOperands())) OS << ", "; - printOperand(MI, i, OS); + printOperand(MI, I, OS); } // Print any added annotation. @@ -123,61 +122,48 @@ void WebAssemblyInstPrinter::printInst(const MCInst *MI, raw_ostream &OS, } break; - case WebAssembly::CATCH_I32: - case WebAssembly::CATCH_I32_S: - case WebAssembly::CATCH_I64: - case WebAssembly::CATCH_I64_S: - case WebAssembly::CATCH_ALL: - case WebAssembly::CATCH_ALL_S: - // There can be multiple catch instructions for one try instruction, so we - // print a label only for the first 'catch' label. - if (LastSeenEHInst != CATCH) { - if (EHPadStack.empty()) { - printAnnotation(OS, "try-catch mismatch!"); - } else { - printAnnotation(OS, - "catch" + utostr(EHPadStack.pop_back_val()) + ':'); - } + case WebAssembly::CATCH: + case WebAssembly::CATCH_S: + if (EHPadStack.empty()) { + printAnnotation(OS, "try-catch mismatch!"); + } else { + printAnnotation(OS, "catch" + utostr(EHPadStack.pop_back_val()) + ':'); } - LastSeenEHInst = CATCH; break; } // Annotate any control flow label references. - unsigned NumFixedOperands = Desc.NumOperands; - SmallSet<uint64_t, 8> Printed; - for (unsigned i = 0, e = MI->getNumOperands(); i < e; ++i) { - // See if this operand denotes a basic block target. - if (i < NumFixedOperands) { - // A non-variable_ops operand, check its type. - if (Desc.OpInfo[i].OperandType != WebAssembly::OPERAND_BASIC_BLOCK) - continue; + + // rethrow instruction does not take any depth argument and rethrows to the + // nearest enclosing catch scope, if any. If there's no enclosing catch + // scope, it throws up to the caller. + if (Opc == WebAssembly::RETHROW || Opc == WebAssembly::RETHROW_S) { + if (EHPadStack.empty()) { + printAnnotation(OS, "to caller"); } else { - // A variable_ops operand, which currently can be immediates (used in - // br_table) which are basic block targets, or for call instructions - // when using -wasm-keep-registers (in which case they are registers, - // and should not be processed). - if (!MI->getOperand(i).isImm()) - continue; + printAnnotation(OS, "down to catch" + utostr(EHPadStack.back())); } - uint64_t Depth = MI->getOperand(i).getImm(); - if (!Printed.insert(Depth).second) - continue; - if (Opc == WebAssembly::RETHROW || Opc == WebAssembly::RETHROW_S) { - if (Depth > EHPadStack.size()) { - printAnnotation(OS, "Invalid depth argument!"); - } else if (Depth == EHPadStack.size()) { - // This can happen when rethrow instruction breaks out of all nests - // and throws up to the current function's caller. - printAnnotation(OS, utostr(Depth) + ": " + "to caller"); + } else { + unsigned NumFixedOperands = Desc.NumOperands; + SmallSet<uint64_t, 8> Printed; + for (unsigned I = 0, E = MI->getNumOperands(); I < E; ++I) { + // See if this operand denotes a basic block target. + if (I < NumFixedOperands) { + // A non-variable_ops operand, check its type. + if (Desc.OpInfo[I].OperandType != WebAssembly::OPERAND_BASIC_BLOCK) + continue; } else { - uint64_t CatchNo = EHPadStack.rbegin()[Depth]; - printAnnotation(OS, utostr(Depth) + ": " + "down to catch" + - utostr(CatchNo)); + // A variable_ops operand, which currently can be immediates (used in + // br_table) which are basic block targets, or for call instructions + // when using -wasm-keep-registers (in which case they are registers, + // and should not be processed). + if (!MI->getOperand(I).isImm()) + continue; } - - } else { + uint64_t Depth = MI->getOperand(I).getImm(); + if (!Printed.insert(Depth).second) + continue; if (Depth >= ControlFlowStack.size()) { printAnnotation(OS, "Invalid depth argument!"); } else { @@ -206,13 +192,13 @@ static std::string toString(const APFloat &FP) { // Use C99's hexadecimal floating-point representation. static const size_t BufBytes = 128; - char buf[BufBytes]; + char Buf[BufBytes]; auto Written = FP.convertToHexString( - buf, /*hexDigits=*/0, /*upperCase=*/false, APFloat::rmNearestTiesToEven); + Buf, /*HexDigits=*/0, /*UpperCase=*/false, APFloat::rmNearestTiesToEven); (void)Written; assert(Written != 0); assert(Written < BufBytes); - return buf; + return Buf; } void WebAssemblyInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, @@ -296,8 +282,8 @@ const char *llvm::WebAssembly::anyTypeToString(unsigned Ty) { return "funcref"; case wasm::WASM_TYPE_FUNC: return "func"; - case wasm::WASM_TYPE_EXCEPT_REF: - return "except_ref"; + case wasm::WASM_TYPE_EXNREF: + return "exnref"; case wasm::WASM_TYPE_NORESULT: return "void"; default: diff --git a/contrib/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.h b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.h index 5ad45c7d5c7f..b979de5028bf 100644 --- a/contrib/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.h +++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.h @@ -1,9 +1,8 @@ // WebAssemblyInstPrinter.h - Print wasm MCInst to assembly syntax -*- C++ -*-// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp index 44fcc129c39e..8f6531563e1b 100644 --- a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyMCAsmInfo.cpp - WebAssembly asm properties -------------===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -20,7 +19,7 @@ using namespace llvm; #define DEBUG_TYPE "wasm-mc-asm-info" -WebAssemblyMCAsmInfo::~WebAssemblyMCAsmInfo() {} +WebAssemblyMCAsmInfo::~WebAssemblyMCAsmInfo() = default; // anchor. WebAssemblyMCAsmInfo::WebAssemblyMCAsmInfo(const Triple &T) { CodePointerSize = CalleeSaveStackSlotSize = T.isArch64Bit() ? 8 : 4; diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h index 8627a6e40c6a..9efbbf881f59 100644 --- a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h +++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h @@ -1,9 +1,8 @@ //===-- WebAssemblyMCAsmInfo.h - WebAssembly asm properties -----*- C++ -*-===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp index 065a4dc94ca6..44b6d6a968a9 100644 --- a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp @@ -1,9 +1,8 @@ //=- WebAssemblyMCCodeEmitter.cpp - Convert WebAssembly code to machine code -// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -49,7 +48,7 @@ class WebAssemblyMCCodeEmitter final : public MCCodeEmitter { const MCSubtargetInfo &STI) const override; public: - WebAssemblyMCCodeEmitter(const MCInstrInfo &mcii) : MCII(mcii) {} + WebAssemblyMCCodeEmitter(const MCInstrInfo &MCII) : MCII(MCII) {} }; } // end anonymous namespace @@ -82,14 +81,14 @@ void WebAssemblyMCCodeEmitter::encodeInstruction( encodeULEB128(MI.getNumOperands() - 2, OS); const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); - for (unsigned i = 0, e = MI.getNumOperands(); i < e; ++i) { - const MCOperand &MO = MI.getOperand(i); + for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I) { + const MCOperand &MO = MI.getOperand(I); if (MO.isReg()) { /* nothing to encode */ } else if (MO.isImm()) { - if (i < Desc.getNumOperands()) { - const MCOperandInfo &Info = Desc.OpInfo[i]; + if (I < Desc.getNumOperands()) { + const MCOperandInfo &Info = Desc.OpInfo[I]; LLVM_DEBUG(dbgs() << "Encoding immediate: type=" << int(Info.OperandType) << "\n"); switch (Info.OperandType) { @@ -127,28 +126,28 @@ void WebAssemblyMCCodeEmitter::encodeInstruction( } } else if (MO.isFPImm()) { - const MCOperandInfo &Info = Desc.OpInfo[i]; + const MCOperandInfo &Info = Desc.OpInfo[I]; if (Info.OperandType == WebAssembly::OPERAND_F32IMM) { // TODO: MC converts all floating point immediate operands to double. // This is fine for numeric values, but may cause NaNs to change bits. - float f = float(MO.getFPImm()); - support::endian::write<float>(OS, f, support::little); + auto F = float(MO.getFPImm()); + support::endian::write<float>(OS, F, support::little); } else { assert(Info.OperandType == WebAssembly::OPERAND_F64IMM); - double d = MO.getFPImm(); - support::endian::write<double>(OS, d, support::little); + double D = MO.getFPImm(); + support::endian::write<double>(OS, D, support::little); } } else if (MO.isExpr()) { - const MCOperandInfo &Info = Desc.OpInfo[i]; + const MCOperandInfo &Info = Desc.OpInfo[I]; llvm::MCFixupKind FixupKind; size_t PaddedSize = 5; switch (Info.OperandType) { case WebAssembly::OPERAND_I32IMM: - FixupKind = MCFixupKind(WebAssembly::fixup_code_sleb128_i32); + FixupKind = MCFixupKind(WebAssembly::fixup_sleb128_i32); break; case WebAssembly::OPERAND_I64IMM: - FixupKind = MCFixupKind(WebAssembly::fixup_code_sleb128_i64); + FixupKind = MCFixupKind(WebAssembly::fixup_sleb128_i64); PaddedSize = 10; break; case WebAssembly::OPERAND_FUNCTION32: @@ -156,7 +155,7 @@ void WebAssemblyMCCodeEmitter::encodeInstruction( case WebAssembly::OPERAND_TYPEINDEX: case WebAssembly::OPERAND_GLOBAL: case WebAssembly::OPERAND_EVENT: - FixupKind = MCFixupKind(WebAssembly::fixup_code_uleb128_i32); + FixupKind = MCFixupKind(WebAssembly::fixup_uleb128_i32); break; default: llvm_unreachable("unexpected symbolic operand kind"); diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp index 390f367c2978..9c8ca1f13b18 100644 --- a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyMCTargetDesc.cpp - WebAssembly Target Descriptions -----===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -12,10 +11,11 @@ /// //===----------------------------------------------------------------------===// -#include "WebAssemblyMCTargetDesc.h" -#include "InstPrinter/WebAssemblyInstPrinter.h" -#include "WebAssemblyMCAsmInfo.h" -#include "WebAssemblyTargetStreamer.h" +#include "MCTargetDesc/WebAssemblyMCTargetDesc.h" +#include "MCTargetDesc/WebAssemblyInstPrinter.h" +#include "MCTargetDesc/WebAssemblyMCAsmInfo.h" +#include "MCTargetDesc/WebAssemblyTargetStreamer.h" +#include "TargetInfo/WebAssemblyTargetInfo.h" #include "llvm/MC/MCInstrInfo.h" #include "llvm/MC/MCRegisterInfo.h" #include "llvm/MC/MCSubtargetInfo.h" @@ -40,13 +40,13 @@ static MCAsmInfo *createMCAsmInfo(const MCRegisterInfo & /*MRI*/, } static MCInstrInfo *createMCInstrInfo() { - MCInstrInfo *X = new MCInstrInfo(); + auto *X = new MCInstrInfo(); InitWebAssemblyMCInstrInfo(X); return X; } static MCRegisterInfo *createMCRegisterInfo(const Triple & /*T*/) { - MCRegisterInfo *X = new MCRegisterInfo(); + auto *X = new MCRegisterInfo(); InitWebAssemblyMCRegisterInfo(X, 0); return X; } @@ -146,8 +146,8 @@ wasm::ValType WebAssembly::toValType(const MVT &Ty) { case MVT::v4f32: case MVT::v2f64: return wasm::ValType::V128; - case MVT::ExceptRef: - return wasm::ValType::EXCEPT_REF; + case MVT::exnref: + return wasm::ValType::EXNREF; default: llvm_unreachable("unexpected type"); } diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h index a01517fb90c3..7a9f59b1a4f2 100644 --- a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h +++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h @@ -1,9 +1,8 @@ //==- WebAssemblyMCTargetDesc.h - WebAssembly Target Descriptions -*- C++ -*-=// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -15,6 +14,7 @@ #ifndef LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYMCTARGETDESC_H #define LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYMCTARGETDESC_H +#include "../WebAssemblySubtarget.h" #include "llvm/BinaryFormat/Wasm.h" #include "llvm/MC/MCInstrDesc.h" #include "llvm/Support/DataTypes.h" @@ -33,9 +33,6 @@ class Target; class Triple; class raw_pwrite_stream; -Target &getTheWebAssemblyTarget32(); -Target &getTheWebAssemblyTarget64(); - MCCodeEmitter *createWebAssemblyMCCodeEmitter(const MCInstrInfo &MCII); MCAsmBackend *createWebAssemblyAsmBackend(const Triple &TT); @@ -90,12 +87,23 @@ namespace WebAssemblyII { enum TOF { MO_NO_FLAG = 0, - // Flags to indicate the type of the symbol being referenced - MO_SYMBOL_FUNCTION = 0x1, - MO_SYMBOL_GLOBAL = 0x2, - MO_SYMBOL_EVENT = 0x4, - MO_SYMBOL_MASK = 0x7, + // On a symbol operand this indicates that the immediate is a wasm global + // index. The value of the wasm global will be set to the symbol address at + // runtime. This adds a level of indirection similar to the GOT on native + // platforms. + MO_GOT, + + // On a symbol operand this indicates that the immediate is the symbol + // address relative the __memory_base wasm global. + // Only applicable to data symbols. + MO_MEMORY_BASE_REL, + + // On a symbol operand this indicates that the immediate is the symbol + // address relative the __table_base wasm global. + // Only applicable to function symbols. + MO_TABLE_BASE_REL, }; + } // end namespace WebAssemblyII } // end namespace llvm @@ -111,15 +119,30 @@ enum TOF { #define GET_INSTRINFO_ENUM #include "WebAssemblyGenInstrInfo.inc" -#define GET_SUBTARGETINFO_ENUM -#include "WebAssemblyGenSubtargetInfo.inc" - namespace llvm { namespace WebAssembly { +/// This is used to indicate block signatures. +enum class ExprType : unsigned { + Void = 0x40, + I32 = 0x7F, + I64 = 0x7E, + F32 = 0x7D, + F64 = 0x7C, + V128 = 0x7B, + Exnref = 0x68, + Invalid = 0x00 +}; + +/// Instruction opcodes emitted via means other than CodeGen. +static const unsigned Nop = 0x01; +static const unsigned End = 0x0b; + +wasm::ValType toValType(const MVT &Ty); + /// Return the default p2align value for a load or store with the given opcode. -inline unsigned GetDefaultP2Align(unsigned Opcode) { - switch (Opcode) { +inline unsigned GetDefaultP2AlignAny(unsigned Opc) { + switch (Opc) { case WebAssembly::LOAD8_S_I32: case WebAssembly::LOAD8_S_I32_S: case WebAssembly::LOAD8_U_I32: @@ -328,35 +351,238 @@ inline unsigned GetDefaultP2Align(unsigned Opcode) { case WebAssembly::STORE_v2f64_S: return 4; default: + return -1; + } +} + +inline unsigned GetDefaultP2Align(unsigned Opc) { + auto Align = GetDefaultP2AlignAny(Opc); + if (Align == -1U) { llvm_unreachable("Only loads and stores have p2align values"); } + return Align; } -/// The operand number of the load or store address in load/store instructions. -static const unsigned LoadAddressOperandNo = 3; -static const unsigned StoreAddressOperandNo = 2; +inline bool isArgument(unsigned Opc) { + switch (Opc) { + case WebAssembly::ARGUMENT_i32: + case WebAssembly::ARGUMENT_i32_S: + case WebAssembly::ARGUMENT_i64: + case WebAssembly::ARGUMENT_i64_S: + case WebAssembly::ARGUMENT_f32: + case WebAssembly::ARGUMENT_f32_S: + case WebAssembly::ARGUMENT_f64: + case WebAssembly::ARGUMENT_f64_S: + case WebAssembly::ARGUMENT_v16i8: + case WebAssembly::ARGUMENT_v16i8_S: + case WebAssembly::ARGUMENT_v8i16: + case WebAssembly::ARGUMENT_v8i16_S: + case WebAssembly::ARGUMENT_v4i32: + case WebAssembly::ARGUMENT_v4i32_S: + case WebAssembly::ARGUMENT_v2i64: + case WebAssembly::ARGUMENT_v2i64_S: + case WebAssembly::ARGUMENT_v4f32: + case WebAssembly::ARGUMENT_v4f32_S: + case WebAssembly::ARGUMENT_v2f64: + case WebAssembly::ARGUMENT_v2f64_S: + case WebAssembly::ARGUMENT_exnref: + case WebAssembly::ARGUMENT_exnref_S: + return true; + default: + return false; + } +} -/// The operand number of the load or store p2align in load/store instructions. -static const unsigned LoadP2AlignOperandNo = 1; -static const unsigned StoreP2AlignOperandNo = 0; +inline bool isCopy(unsigned Opc) { + switch (Opc) { + case WebAssembly::COPY_I32: + case WebAssembly::COPY_I32_S: + case WebAssembly::COPY_I64: + case WebAssembly::COPY_I64_S: + case WebAssembly::COPY_F32: + case WebAssembly::COPY_F32_S: + case WebAssembly::COPY_F64: + case WebAssembly::COPY_F64_S: + case WebAssembly::COPY_V128: + case WebAssembly::COPY_V128_S: + case WebAssembly::COPY_EXNREF: + case WebAssembly::COPY_EXNREF_S: + return true; + default: + return false; + } +} -/// This is used to indicate block signatures. -enum class ExprType : unsigned { - Void = 0x40, - I32 = 0x7F, - I64 = 0x7E, - F32 = 0x7D, - F64 = 0x7C, - V128 = 0x7B, - ExceptRef = 0x68, - Invalid = 0x00 -}; +inline bool isTee(unsigned Opc) { + switch (Opc) { + case WebAssembly::TEE_I32: + case WebAssembly::TEE_I32_S: + case WebAssembly::TEE_I64: + case WebAssembly::TEE_I64_S: + case WebAssembly::TEE_F32: + case WebAssembly::TEE_F32_S: + case WebAssembly::TEE_F64: + case WebAssembly::TEE_F64_S: + case WebAssembly::TEE_V128: + case WebAssembly::TEE_V128_S: + case WebAssembly::TEE_EXNREF: + case WebAssembly::TEE_EXNREF_S: + return true; + default: + return false; + } +} -/// Instruction opcodes emitted via means other than CodeGen. -static const unsigned Nop = 0x01; -static const unsigned End = 0x0b; +inline bool isCallDirect(unsigned Opc) { + switch (Opc) { + case WebAssembly::CALL_VOID: + case WebAssembly::CALL_VOID_S: + case WebAssembly::CALL_i32: + case WebAssembly::CALL_i32_S: + case WebAssembly::CALL_i64: + case WebAssembly::CALL_i64_S: + case WebAssembly::CALL_f32: + case WebAssembly::CALL_f32_S: + case WebAssembly::CALL_f64: + case WebAssembly::CALL_f64_S: + case WebAssembly::CALL_v16i8: + case WebAssembly::CALL_v16i8_S: + case WebAssembly::CALL_v8i16: + case WebAssembly::CALL_v8i16_S: + case WebAssembly::CALL_v4i32: + case WebAssembly::CALL_v4i32_S: + case WebAssembly::CALL_v2i64: + case WebAssembly::CALL_v2i64_S: + case WebAssembly::CALL_v4f32: + case WebAssembly::CALL_v4f32_S: + case WebAssembly::CALL_v2f64: + case WebAssembly::CALL_v2f64_S: + case WebAssembly::CALL_exnref: + case WebAssembly::CALL_exnref_S: + case WebAssembly::RET_CALL: + case WebAssembly::RET_CALL_S: + return true; + default: + return false; + } +} -wasm::ValType toValType(const MVT &Ty); +inline bool isCallIndirect(unsigned Opc) { + switch (Opc) { + case WebAssembly::CALL_INDIRECT_VOID: + case WebAssembly::CALL_INDIRECT_VOID_S: + case WebAssembly::CALL_INDIRECT_i32: + case WebAssembly::CALL_INDIRECT_i32_S: + case WebAssembly::CALL_INDIRECT_i64: + case WebAssembly::CALL_INDIRECT_i64_S: + case WebAssembly::CALL_INDIRECT_f32: + case WebAssembly::CALL_INDIRECT_f32_S: + case WebAssembly::CALL_INDIRECT_f64: + case WebAssembly::CALL_INDIRECT_f64_S: + case WebAssembly::CALL_INDIRECT_v16i8: + case WebAssembly::CALL_INDIRECT_v16i8_S: + case WebAssembly::CALL_INDIRECT_v8i16: + case WebAssembly::CALL_INDIRECT_v8i16_S: + case WebAssembly::CALL_INDIRECT_v4i32: + case WebAssembly::CALL_INDIRECT_v4i32_S: + case WebAssembly::CALL_INDIRECT_v2i64: + case WebAssembly::CALL_INDIRECT_v2i64_S: + case WebAssembly::CALL_INDIRECT_v4f32: + case WebAssembly::CALL_INDIRECT_v4f32_S: + case WebAssembly::CALL_INDIRECT_v2f64: + case WebAssembly::CALL_INDIRECT_v2f64_S: + case WebAssembly::CALL_INDIRECT_exnref: + case WebAssembly::CALL_INDIRECT_exnref_S: + case WebAssembly::RET_CALL_INDIRECT: + case WebAssembly::RET_CALL_INDIRECT_S: + return true; + default: + return false; + } +} + +/// Returns the operand number of a callee, assuming the argument is a call +/// instruction. +inline unsigned getCalleeOpNo(unsigned Opc) { + switch (Opc) { + case WebAssembly::CALL_VOID: + case WebAssembly::CALL_VOID_S: + case WebAssembly::CALL_INDIRECT_VOID: + case WebAssembly::CALL_INDIRECT_VOID_S: + case WebAssembly::RET_CALL: + case WebAssembly::RET_CALL_S: + case WebAssembly::RET_CALL_INDIRECT: + case WebAssembly::RET_CALL_INDIRECT_S: + return 0; + case WebAssembly::CALL_i32: + case WebAssembly::CALL_i32_S: + case WebAssembly::CALL_i64: + case WebAssembly::CALL_i64_S: + case WebAssembly::CALL_f32: + case WebAssembly::CALL_f32_S: + case WebAssembly::CALL_f64: + case WebAssembly::CALL_f64_S: + case WebAssembly::CALL_v16i8: + case WebAssembly::CALL_v16i8_S: + case WebAssembly::CALL_v8i16: + case WebAssembly::CALL_v8i16_S: + case WebAssembly::CALL_v4i32: + case WebAssembly::CALL_v4i32_S: + case WebAssembly::CALL_v2i64: + case WebAssembly::CALL_v2i64_S: + case WebAssembly::CALL_v4f32: + case WebAssembly::CALL_v4f32_S: + case WebAssembly::CALL_v2f64: + case WebAssembly::CALL_v2f64_S: + case WebAssembly::CALL_exnref: + case WebAssembly::CALL_exnref_S: + case WebAssembly::CALL_INDIRECT_i32: + case WebAssembly::CALL_INDIRECT_i32_S: + case WebAssembly::CALL_INDIRECT_i64: + case WebAssembly::CALL_INDIRECT_i64_S: + case WebAssembly::CALL_INDIRECT_f32: + case WebAssembly::CALL_INDIRECT_f32_S: + case WebAssembly::CALL_INDIRECT_f64: + case WebAssembly::CALL_INDIRECT_f64_S: + case WebAssembly::CALL_INDIRECT_v16i8: + case WebAssembly::CALL_INDIRECT_v16i8_S: + case WebAssembly::CALL_INDIRECT_v8i16: + case WebAssembly::CALL_INDIRECT_v8i16_S: + case WebAssembly::CALL_INDIRECT_v4i32: + case WebAssembly::CALL_INDIRECT_v4i32_S: + case WebAssembly::CALL_INDIRECT_v2i64: + case WebAssembly::CALL_INDIRECT_v2i64_S: + case WebAssembly::CALL_INDIRECT_v4f32: + case WebAssembly::CALL_INDIRECT_v4f32_S: + case WebAssembly::CALL_INDIRECT_v2f64: + case WebAssembly::CALL_INDIRECT_v2f64_S: + case WebAssembly::CALL_INDIRECT_exnref: + case WebAssembly::CALL_INDIRECT_exnref_S: + return 1; + default: + llvm_unreachable("Not a call instruction"); + } +} + +inline bool isMarker(unsigned Opc) { + switch (Opc) { + case WebAssembly::BLOCK: + case WebAssembly::BLOCK_S: + case WebAssembly::END_BLOCK: + case WebAssembly::END_BLOCK_S: + case WebAssembly::LOOP: + case WebAssembly::LOOP_S: + case WebAssembly::END_LOOP: + case WebAssembly::END_LOOP_S: + case WebAssembly::TRY: + case WebAssembly::TRY_S: + case WebAssembly::END_TRY: + case WebAssembly::END_TRY_S: + return true; + default: + return false; + } +} } // end namespace WebAssembly } // end namespace llvm diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp index 7caeebb1a9aa..e05efef7201b 100644 --- a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp @@ -1,9 +1,8 @@ //==-- WebAssemblyTargetStreamer.cpp - WebAssembly Target Streamer Methods --=// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -13,9 +12,9 @@ /// //===----------------------------------------------------------------------===// -#include "WebAssemblyTargetStreamer.h" -#include "InstPrinter/WebAssemblyInstPrinter.h" -#include "WebAssemblyMCTargetDesc.h" +#include "MCTargetDesc/WebAssemblyTargetStreamer.h" +#include "MCTargetDesc/WebAssemblyInstPrinter.h" +#include "MCTargetDesc/WebAssemblyMCTargetDesc.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCSectionWasm.h" #include "llvm/MC/MCSubtargetInfo.h" diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h index 2ee9956c8e38..5ea62b179d22 100644 --- a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h +++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h @@ -1,9 +1,8 @@ //==-- WebAssemblyTargetStreamer.h - WebAssembly Target Streamer -*- C++ -*-==// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp index 763e30be8e02..a1cc3e268e8f 100644 --- a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyWasmObjectWriter.cpp - WebAssembly Wasm Writer ---------===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -43,26 +42,7 @@ private: WebAssemblyWasmObjectWriter::WebAssemblyWasmObjectWriter(bool Is64Bit) : MCWasmObjectTargetWriter(Is64Bit) {} -// Test whether the given expression computes a function address. -static bool IsFunctionExpr(const MCExpr *Expr) { - if (auto SyExp = dyn_cast<MCSymbolRefExpr>(Expr)) - return cast<MCSymbolWasm>(SyExp->getSymbol()).isFunction(); - - if (auto BinOp = dyn_cast<MCBinaryExpr>(Expr)) - return IsFunctionExpr(BinOp->getLHS()) != IsFunctionExpr(BinOp->getRHS()); - - if (auto UnOp = dyn_cast<MCUnaryExpr>(Expr)) - return IsFunctionExpr(UnOp->getSubExpr()); - - return false; -} - -static bool IsFunctionType(const MCValue &Target) { - const MCSymbolRefExpr *RefA = Target.getSymA(); - return RefA && RefA->getKind() == MCSymbolRefExpr::VK_WebAssembly_TYPEINDEX; -} - -static const MCSection *GetFixupSection(const MCExpr *Expr) { +static const MCSection *getFixupSection(const MCExpr *Expr) { if (auto SyExp = dyn_cast<MCSymbolRefExpr>(Expr)) { if (SyExp->getSymbol().isInSection()) return &SyExp->getSymbol().getSection(); @@ -70,63 +50,66 @@ static const MCSection *GetFixupSection(const MCExpr *Expr) { } if (auto BinOp = dyn_cast<MCBinaryExpr>(Expr)) { - auto SectionLHS = GetFixupSection(BinOp->getLHS()); - auto SectionRHS = GetFixupSection(BinOp->getRHS()); + auto SectionLHS = getFixupSection(BinOp->getLHS()); + auto SectionRHS = getFixupSection(BinOp->getRHS()); return SectionLHS == SectionRHS ? nullptr : SectionLHS; } if (auto UnOp = dyn_cast<MCUnaryExpr>(Expr)) - return GetFixupSection(UnOp->getSubExpr()); + return getFixupSection(UnOp->getSubExpr()); return nullptr; } -static bool IsGlobalType(const MCValue &Target) { - const MCSymbolRefExpr *RefA = Target.getSymA(); - return RefA && RefA->getKind() == MCSymbolRefExpr::VK_WebAssembly_GLOBAL; -} - -static bool IsEventType(const MCValue &Target) { - const MCSymbolRefExpr *RefA = Target.getSymA(); - return RefA && RefA->getKind() == MCSymbolRefExpr::VK_WebAssembly_EVENT; -} - unsigned WebAssemblyWasmObjectWriter::getRelocType(const MCValue &Target, const MCFixup &Fixup) const { - // WebAssembly functions are not allocated in the data address space. To - // resolve a pointer to a function, we must use a special relocation type. - bool IsFunction = IsFunctionExpr(Fixup.getValue()); + const MCSymbolRefExpr *RefA = Target.getSymA(); + assert(RefA); + auto& SymA = cast<MCSymbolWasm>(RefA->getSymbol()); + + MCSymbolRefExpr::VariantKind Modifier = Target.getAccessVariant(); + + switch (Modifier) { + case MCSymbolRefExpr::VK_GOT: + return wasm::R_WASM_GLOBAL_INDEX_LEB; + case MCSymbolRefExpr::VK_WASM_TBREL: + assert(SymA.isFunction()); + return wasm::R_WASM_TABLE_INDEX_REL_SLEB; + case MCSymbolRefExpr::VK_WASM_MBREL: + assert(SymA.isData()); + return wasm::R_WASM_MEMORY_ADDR_REL_SLEB; + case MCSymbolRefExpr::VK_WASM_TYPEINDEX: + return wasm::R_WASM_TYPE_INDEX_LEB; + default: + break; + } switch (unsigned(Fixup.getKind())) { - case WebAssembly::fixup_code_sleb128_i32: - if (IsFunction) - return wasm::R_WEBASSEMBLY_TABLE_INDEX_SLEB; - return wasm::R_WEBASSEMBLY_MEMORY_ADDR_SLEB; - case WebAssembly::fixup_code_sleb128_i64: + case WebAssembly::fixup_sleb128_i32: + if (SymA.isFunction()) + return wasm::R_WASM_TABLE_INDEX_SLEB; + return wasm::R_WASM_MEMORY_ADDR_SLEB; + case WebAssembly::fixup_sleb128_i64: llvm_unreachable("fixup_sleb128_i64 not implemented yet"); - case WebAssembly::fixup_code_uleb128_i32: - if (IsGlobalType(Target)) - return wasm::R_WEBASSEMBLY_GLOBAL_INDEX_LEB; - if (IsFunctionType(Target)) - return wasm::R_WEBASSEMBLY_TYPE_INDEX_LEB; - if (IsFunction) - return wasm::R_WEBASSEMBLY_FUNCTION_INDEX_LEB; - if (IsEventType(Target)) - return wasm::R_WEBASSEMBLY_EVENT_INDEX_LEB; - return wasm::R_WEBASSEMBLY_MEMORY_ADDR_LEB; + case WebAssembly::fixup_uleb128_i32: + if (SymA.isGlobal()) + return wasm::R_WASM_GLOBAL_INDEX_LEB; + if (SymA.isFunction()) + return wasm::R_WASM_FUNCTION_INDEX_LEB; + if (SymA.isEvent()) + return wasm::R_WASM_EVENT_INDEX_LEB; + return wasm::R_WASM_MEMORY_ADDR_LEB; case FK_Data_4: - if (IsFunction) - return wasm::R_WEBASSEMBLY_TABLE_INDEX_I32; + if (SymA.isFunction()) + return wasm::R_WASM_TABLE_INDEX_I32; if (auto Section = static_cast<const MCSectionWasm *>( - GetFixupSection(Fixup.getValue()))) { + getFixupSection(Fixup.getValue()))) { if (Section->getKind().isText()) - return wasm::R_WEBASSEMBLY_FUNCTION_OFFSET_I32; + return wasm::R_WASM_FUNCTION_OFFSET_I32; else if (!Section->isWasmData()) - return wasm::R_WEBASSEMBLY_SECTION_OFFSET_I32; + return wasm::R_WASM_SECTION_OFFSET_I32; } - return wasm::R_WEBASSEMBLY_MEMORY_ADDR_I32; - case FK_Data_8: - llvm_unreachable("FK_Data_8 not implemented yet"); + return wasm::R_WASM_MEMORY_ADDR_I32; default: llvm_unreachable("unimplemented fixup kind"); } diff --git a/contrib/llvm/lib/Target/WebAssembly/README.txt b/contrib/llvm/lib/Target/WebAssembly/README.txt index a154b4bf7ea8..ef3f5aaf7d33 100644 --- a/contrib/llvm/lib/Target/WebAssembly/README.txt +++ b/contrib/llvm/lib/Target/WebAssembly/README.txt @@ -14,7 +14,7 @@ can run in browsers and other environments. For more information, see the Emscripten documentation in general, and this page in particular: * https://github.com/kripken/emscripten/wiki/New-WebAssembly-Backend - + Rust provides WebAssembly support integrated into Cargo. There are two main options: - wasm32-unknown-unknown, which provides a relatively minimal environment diff --git a/contrib/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp index f7a417c0ed49..e4afe2bb2830 100644 --- a/contrib/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyTargetInfo.cpp - WebAssembly Target Implementation -----===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -12,8 +11,7 @@ /// //===----------------------------------------------------------------------===// -#include "MCTargetDesc/WebAssemblyMCTargetDesc.h" -#include "llvm/ADT/Triple.h" +#include "TargetInfo/WebAssemblyTargetInfo.h" #include "llvm/Support/TargetRegistry.h" using namespace llvm; diff --git a/contrib/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.h b/contrib/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.h new file mode 100644 index 000000000000..a7427f78c72c --- /dev/null +++ b/contrib/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.h @@ -0,0 +1,26 @@ +//===-- WebAssemblyTargetInfo.h - WebAssembly Target Impl -------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file registers the WebAssembly target. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_TARGETINFO_WEBASSEMBLYTARGETINFO_H +#define LLVM_LIB_TARGET_WEBASSEMBLY_TARGETINFO_WEBASSEMBLYTARGETINFO_H + +namespace llvm { + +class Target; + +Target &getTheWebAssemblyTarget32(); +Target &getTheWebAssemblyTarget64(); + +} // namespace llvm + +#endif // LLVM_LIB_TARGET_WEBASSEMBLY_TARGETINFO_WEBASSEMBLYTARGETINFO_H diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssembly.h b/contrib/llvm/lib/Target/WebAssembly/WebAssembly.h index 45145c0a6527..fcbd0a5082ff 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssembly.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssembly.h @@ -1,9 +1,8 @@ //===-- WebAssembly.h - Top-level interface for WebAssembly ----*- C++ -*-===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -39,18 +38,17 @@ FunctionPass *createWebAssemblyArgumentMove(); FunctionPass *createWebAssemblySetP2AlignOperands(); // Late passes. -FunctionPass *createWebAssemblyEHRestoreStackPointer(); FunctionPass *createWebAssemblyReplacePhysRegs(); FunctionPass *createWebAssemblyPrepareForLiveIntervals(); FunctionPass *createWebAssemblyOptimizeLiveIntervals(); FunctionPass *createWebAssemblyMemIntrinsicResults(); FunctionPass *createWebAssemblyRegStackify(); FunctionPass *createWebAssemblyRegColoring(); -FunctionPass *createWebAssemblyExplicitLocals(); FunctionPass *createWebAssemblyFixIrreducibleControlFlow(); FunctionPass *createWebAssemblyLateEHPrepare(); FunctionPass *createWebAssemblyCFGSort(); FunctionPass *createWebAssemblyCFGStackify(); +FunctionPass *createWebAssemblyExplicitLocals(); FunctionPass *createWebAssemblyLowerBrUnless(); FunctionPass *createWebAssemblyRegNumbering(); FunctionPass *createWebAssemblyPeephole(); @@ -64,19 +62,18 @@ void initializeFixFunctionBitcastsPass(PassRegistry &); void initializeOptimizeReturnedPass(PassRegistry &); void initializeWebAssemblyArgumentMovePass(PassRegistry &); void initializeWebAssemblySetP2AlignOperandsPass(PassRegistry &); -void initializeWebAssemblyEHRestoreStackPointerPass(PassRegistry &); void initializeWebAssemblyReplacePhysRegsPass(PassRegistry &); void initializeWebAssemblyPrepareForLiveIntervalsPass(PassRegistry &); void initializeWebAssemblyOptimizeLiveIntervalsPass(PassRegistry &); void initializeWebAssemblyMemIntrinsicResultsPass(PassRegistry &); void initializeWebAssemblyRegStackifyPass(PassRegistry &); void initializeWebAssemblyRegColoringPass(PassRegistry &); -void initializeWebAssemblyExplicitLocalsPass(PassRegistry &); void initializeWebAssemblyFixIrreducibleControlFlowPass(PassRegistry &); void initializeWebAssemblyLateEHPreparePass(PassRegistry &); void initializeWebAssemblyExceptionInfoPass(PassRegistry &); void initializeWebAssemblyCFGSortPass(PassRegistry &); void initializeWebAssemblyCFGStackifyPass(PassRegistry &); +void initializeWebAssemblyExplicitLocalsPass(PassRegistry &); void initializeWebAssemblyLowerBrUnlessPass(PassRegistry &); void initializeWebAssemblyRegNumberingPass(PassRegistry &); void initializeWebAssemblyPeepholePass(PassRegistry &); diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssembly.td b/contrib/llvm/lib/Target/WebAssembly/WebAssembly.td index 6b218f8aa880..b0b8a9b996a3 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssembly.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssembly.td @@ -1,9 +1,8 @@ //- WebAssembly.td - Describe the WebAssembly Target Machine --*- tablegen -*-// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -34,6 +33,7 @@ def FeatureUnimplementedSIMD128 : def FeatureAtomics : SubtargetFeature<"atomics", "HasAtomics", "true", "Enable Atomics">; + def FeatureNontrappingFPToInt : SubtargetFeature<"nontrapping-fptoint", "HasNontrappingFPToInt", "true", @@ -44,10 +44,28 @@ def FeatureSignExt : "HasSignExt", "true", "Enable sign extension operators">; +def FeatureTailCall : + SubtargetFeature<"tail-call", + "HasTailCall", "true", + "Enable tail call instructions">; + def FeatureExceptionHandling : SubtargetFeature<"exception-handling", "HasExceptionHandling", "true", "Enable Wasm exception handling">; +def FeatureBulkMemory : + SubtargetFeature<"bulk-memory", "HasBulkMemory", "true", + "Enable bulk memory operations">; + +def FeatureMultivalue : + SubtargetFeature<"multivalue", + "HasMultivalue", "true", + "Enable multivalue blocks, instructions, and functions">; + +def FeatureMutableGlobals : + SubtargetFeature<"mutable-globals", "HasMutableGlobals", "true", + "Enable mutable globals">; + //===----------------------------------------------------------------------===// // Architectures. //===----------------------------------------------------------------------===// @@ -79,7 +97,8 @@ def : ProcessorModel<"generic", NoSchedModel, []>; // Latest and greatest experimental version of WebAssembly. Bugs included! def : ProcessorModel<"bleeding-edge", NoSchedModel, [FeatureSIMD128, FeatureAtomics, - FeatureNontrappingFPToInt, FeatureSignExt]>; + FeatureNontrappingFPToInt, FeatureSignExt, + FeatureMutableGlobals]>; //===----------------------------------------------------------------------===// // Target Declaration diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAddMissingPrototypes.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAddMissingPrototypes.cpp index e49e2b67f435..b7a701f15782 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAddMissingPrototypes.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAddMissingPrototypes.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyAddMissingPrototypes.cpp - Fix prototypeless functions -===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -79,32 +78,33 @@ bool WebAssemblyAddMissingPrototypes::runOnModule(Module &M) { report_fatal_error( "Functions with 'no-prototype' attribute must take varargs: " + F.getName()); - if (F.getFunctionType()->getNumParams() != 0) - report_fatal_error( - "Functions with 'no-prototype' attribute should not have params: " + - F.getName()); + unsigned NumParams = F.getFunctionType()->getNumParams(); + if (NumParams != 0) { + if (!(NumParams == 1 && F.arg_begin()->hasStructRetAttr())) + report_fatal_error("Functions with 'no-prototype' attribute should " + "not have params: " + + F.getName()); + } // Create a function prototype based on the first call site (first bitcast) // that we find. FunctionType *NewType = nullptr; - Function *NewF = nullptr; for (Use &U : F.uses()) { LLVM_DEBUG(dbgs() << "prototype-less use: " << F.getName() << "\n"); + LLVM_DEBUG(dbgs() << *U.getUser() << "\n"); if (auto *BC = dyn_cast<BitCastOperator>(U.getUser())) { if (auto *DestType = dyn_cast<FunctionType>( BC->getDestTy()->getPointerElementType())) { if (!NewType) { // Create a new function with the correct type NewType = DestType; - NewF = Function::Create(NewType, F.getLinkage(), F.getName()); - NewF->setAttributes(F.getAttributes()); - NewF->removeFnAttr("no-prototype"); - } else { - if (NewType != DestType) { - report_fatal_error("Prototypeless function used with " - "conflicting signatures: " + - F.getName()); - } + LLVM_DEBUG(dbgs() << "found function type: " << *NewType << "\n"); + } else if (NewType != DestType) { + errs() << "warning: prototype-less function used with " + "conflicting signatures: " + << F.getName() << "\n"; + LLVM_DEBUG(dbgs() << " " << *DestType << "\n"); + LLVM_DEBUG(dbgs() << " "<< *NewType << "\n"); } } } @@ -114,47 +114,30 @@ bool WebAssemblyAddMissingPrototypes::runOnModule(Module &M) { LLVM_DEBUG( dbgs() << "could not derive a function prototype from usage: " + F.getName() + "\n"); - continue; + // We could not derive a type for this function. In this case strip + // the isVarArg and make it a simple zero-arg function. This has more + // chance of being correct. The current signature of (...) is illegal in + // C since it doesn't have any arguments before the "...", we this at + // least makes it possible for this symbol to be resolved by the linker. + NewType = FunctionType::get(F.getFunctionType()->getReturnType(), false); } - SmallVector<Instruction *, 4> DeadInsts; - - for (Use &US : F.uses()) { - User *U = US.getUser(); - if (auto *BC = dyn_cast<BitCastOperator>(U)) { - if (auto *Inst = dyn_cast<BitCastInst>(U)) { - // Replace with a new bitcast - IRBuilder<> Builder(Inst); - Value *NewCast = Builder.CreatePointerCast(NewF, BC->getDestTy()); - Inst->replaceAllUsesWith(NewCast); - DeadInsts.push_back(Inst); - } else if (auto *Const = dyn_cast<ConstantExpr>(U)) { - Constant *NewConst = - ConstantExpr::getPointerCast(NewF, BC->getDestTy()); - Const->replaceAllUsesWith(NewConst); - } else { - dbgs() << *U->getType() << "\n"; -#ifndef NDEBUG - U->dump(); -#endif - report_fatal_error("unexpected use of prototypeless function: " + - F.getName() + "\n"); - } - } - } - - for (auto I : DeadInsts) - I->eraseFromParent(); + Function *NewF = + Function::Create(NewType, F.getLinkage(), F.getName() + ".fixed_sig"); + NewF->setAttributes(F.getAttributes()); + NewF->removeFnAttr("no-prototype"); Replacements.emplace_back(&F, NewF); } - - // Finally replace the old function declarations with the new ones for (auto &Pair : Replacements) { - Function *Old = Pair.first; - Function *New = Pair.second; - Old->eraseFromParent(); - M.getFunctionList().push_back(New); + Function *OldF = Pair.first; + Function *NewF = Pair.second; + std::string Name = OldF->getName(); + M.getFunctionList().push_back(NewF); + OldF->replaceAllUsesWith( + ConstantExpr::getPointerBitCastOrAddrSpaceCast(NewF, OldF->getType())); + OldF->eraseFromParent(); + NewF->setName(Name); } return !Replacements.empty(); diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp index 7c8a631cde8a..02f5cc6da77c 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyArgumentMove.cpp - Argument instruction moving ---------===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -79,7 +78,7 @@ bool WebAssemblyArgumentMove::runOnMachineFunction(MachineFunction &MF) { // Look for the first NonArg instruction. for (MachineInstr &MI : EntryMBB) { - if (!WebAssembly::isArgument(MI)) { + if (!WebAssembly::isArgument(MI.getOpcode())) { InsertPt = MI; break; } @@ -88,7 +87,7 @@ bool WebAssemblyArgumentMove::runOnMachineFunction(MachineFunction &MF) { // Now move any argument instructions later in the block // to before our first NonArg instruction. for (MachineInstr &MI : llvm::make_range(InsertPt, EntryMBB.end())) { - if (WebAssembly::isArgument(MI)) { + if (WebAssembly::isArgument(MI.getOpcode())) { EntryMBB.insert(InsertPt, MI.removeFromParent()); Changed = true; } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp index b492d1146950..7f9d41da3978 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyAsmPrinter.cpp - WebAssembly LLVM assembly writer ------===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -15,21 +14,27 @@ //===----------------------------------------------------------------------===// #include "WebAssemblyAsmPrinter.h" -#include "InstPrinter/WebAssemblyInstPrinter.h" +#include "MCTargetDesc/WebAssemblyInstPrinter.h" #include "MCTargetDesc/WebAssemblyMCTargetDesc.h" #include "MCTargetDesc/WebAssemblyTargetStreamer.h" +#include "TargetInfo/WebAssemblyTargetInfo.h" #include "WebAssembly.h" #include "WebAssemblyMCInstLower.h" #include "WebAssemblyMachineFunctionInfo.h" #include "WebAssemblyRegisterInfo.h" +#include "WebAssemblyTargetMachine.h" +#include "llvm/ADT/SmallSet.h" #include "llvm/ADT/StringExtras.h" +#include "llvm/BinaryFormat/Wasm.h" #include "llvm/CodeGen/Analysis.h" #include "llvm/CodeGen/AsmPrinter.h" #include "llvm/CodeGen/MachineConstantPool.h" #include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineModuleInfoImpls.h" #include "llvm/IR/DataLayout.h" +#include "llvm/IR/DebugInfoMetadata.h" #include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/Metadata.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCSectionWasm.h" #include "llvm/MC/MCStreamer.h" @@ -38,10 +43,13 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/TargetRegistry.h" #include "llvm/Support/raw_ostream.h" + using namespace llvm; #define DEBUG_TYPE "asm-printer" +extern cl::opt<bool> WasmKeepRegisters; + //===----------------------------------------------------------------------===// // Helpers. //===----------------------------------------------------------------------===// @@ -92,11 +100,11 @@ void WebAssemblyAsmPrinter::EmitEndOfAsmFile(Module &M) { if (F.isDeclarationForLinker() && !F.isIntrinsic()) { SmallVector<MVT, 4> Results; SmallVector<MVT, 4> Params; - ComputeSignatureVTs(F.getFunctionType(), F, TM, Params, Results); + computeSignatureVTs(F.getFunctionType(), F, TM, Params, Results); auto *Sym = cast<MCSymbolWasm>(getSymbol(&F)); Sym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION); if (!Sym->getSignature()) { - auto Signature = SignatureFromMVTs(Results, Params); + auto Signature = signatureFromMVTs(Results, Params); Sym->setSignature(Signature.get()); addSignature(std::move(Signature)); } @@ -136,7 +144,7 @@ void WebAssemblyAsmPrinter::EmitEndOfAsmFile(Module &M) { if (const NamedMDNode *Named = M.getNamedMetadata("wasm.custom_sections")) { for (const Metadata *MD : Named->operands()) { - const MDTuple *Tuple = dyn_cast<MDTuple>(MD); + const auto *Tuple = dyn_cast<MDTuple>(MD); if (!Tuple || Tuple->getNumOperands() != 2) continue; const MDString *Name = dyn_cast<MDString>(Tuple->getOperand(0)); @@ -146,13 +154,117 @@ void WebAssemblyAsmPrinter::EmitEndOfAsmFile(Module &M) { OutStreamer->PushSection(); std::string SectionName = (".custom_section." + Name->getString()).str(); - MCSectionWasm *mySection = + MCSectionWasm *MySection = OutContext.getWasmSection(SectionName, SectionKind::getMetadata()); - OutStreamer->SwitchSection(mySection); + OutStreamer->SwitchSection(MySection); OutStreamer->EmitBytes(Contents->getString()); OutStreamer->PopSection(); } } + + EmitProducerInfo(M); + EmitTargetFeatures(M); +} + +void WebAssemblyAsmPrinter::EmitProducerInfo(Module &M) { + llvm::SmallVector<std::pair<std::string, std::string>, 4> Languages; + if (const NamedMDNode *Debug = M.getNamedMetadata("llvm.dbg.cu")) { + llvm::SmallSet<StringRef, 4> SeenLanguages; + for (size_t I = 0, E = Debug->getNumOperands(); I < E; ++I) { + const auto *CU = cast<DICompileUnit>(Debug->getOperand(I)); + StringRef Language = dwarf::LanguageString(CU->getSourceLanguage()); + Language.consume_front("DW_LANG_"); + if (SeenLanguages.insert(Language).second) + Languages.emplace_back(Language.str(), ""); + } + } + + llvm::SmallVector<std::pair<std::string, std::string>, 4> Tools; + if (const NamedMDNode *Ident = M.getNamedMetadata("llvm.ident")) { + llvm::SmallSet<StringRef, 4> SeenTools; + for (size_t I = 0, E = Ident->getNumOperands(); I < E; ++I) { + const auto *S = cast<MDString>(Ident->getOperand(I)->getOperand(0)); + std::pair<StringRef, StringRef> Field = S->getString().split("version"); + StringRef Name = Field.first.trim(); + StringRef Version = Field.second.trim(); + if (SeenTools.insert(Name).second) + Tools.emplace_back(Name.str(), Version.str()); + } + } + + int FieldCount = int(!Languages.empty()) + int(!Tools.empty()); + if (FieldCount != 0) { + MCSectionWasm *Producers = OutContext.getWasmSection( + ".custom_section.producers", SectionKind::getMetadata()); + OutStreamer->PushSection(); + OutStreamer->SwitchSection(Producers); + OutStreamer->EmitULEB128IntValue(FieldCount); + for (auto &Producers : {std::make_pair("language", &Languages), + std::make_pair("processed-by", &Tools)}) { + if (Producers.second->empty()) + continue; + OutStreamer->EmitULEB128IntValue(strlen(Producers.first)); + OutStreamer->EmitBytes(Producers.first); + OutStreamer->EmitULEB128IntValue(Producers.second->size()); + for (auto &Producer : *Producers.second) { + OutStreamer->EmitULEB128IntValue(Producer.first.size()); + OutStreamer->EmitBytes(Producer.first); + OutStreamer->EmitULEB128IntValue(Producer.second.size()); + OutStreamer->EmitBytes(Producer.second); + } + } + OutStreamer->PopSection(); + } +} + +void WebAssemblyAsmPrinter::EmitTargetFeatures(Module &M) { + struct FeatureEntry { + uint8_t Prefix; + StringRef Name; + }; + + // Read target features and linkage policies from module metadata + SmallVector<FeatureEntry, 4> EmittedFeatures; + for (const SubtargetFeatureKV &KV : WebAssemblyFeatureKV) { + std::string MDKey = (StringRef("wasm-feature-") + KV.Key).str(); + Metadata *Policy = M.getModuleFlag(MDKey); + if (Policy == nullptr) + continue; + + FeatureEntry Entry; + Entry.Prefix = 0; + Entry.Name = KV.Key; + + if (auto *MD = cast<ConstantAsMetadata>(Policy)) + if (auto *I = cast<ConstantInt>(MD->getValue())) + Entry.Prefix = I->getZExtValue(); + + // Silently ignore invalid metadata + if (Entry.Prefix != wasm::WASM_FEATURE_PREFIX_USED && + Entry.Prefix != wasm::WASM_FEATURE_PREFIX_REQUIRED && + Entry.Prefix != wasm::WASM_FEATURE_PREFIX_DISALLOWED) + continue; + + EmittedFeatures.push_back(Entry); + } + + if (EmittedFeatures.size() == 0) + return; + + // Emit features and linkage policies into the "target_features" section + MCSectionWasm *FeaturesSection = OutContext.getWasmSection( + ".custom_section.target_features", SectionKind::getMetadata()); + OutStreamer->PushSection(); + OutStreamer->SwitchSection(FeaturesSection); + + OutStreamer->EmitULEB128IntValue(EmittedFeatures.size()); + for (auto &F : EmittedFeatures) { + OutStreamer->EmitIntValue(F.Prefix, 1); + OutStreamer->EmitULEB128IntValue(F.Name.size()); + OutStreamer->EmitBytes(F.Name); + } + + OutStreamer->PopSection(); } void WebAssemblyAsmPrinter::EmitConstantPool() { @@ -168,8 +280,8 @@ void WebAssemblyAsmPrinter::EmitFunctionBodyStart() { const Function &F = MF->getFunction(); SmallVector<MVT, 1> ResultVTs; SmallVector<MVT, 4> ParamVTs; - ComputeSignatureVTs(F.getFunctionType(), F, TM, ParamVTs, ResultVTs); - auto Signature = SignatureFromMVTs(ResultVTs, ParamVTs); + computeSignatureVTs(F.getFunctionType(), F, TM, ParamVTs, ResultVTs); + auto Signature = signatureFromMVTs(ResultVTs, ParamVTs); auto *WasmSym = cast<MCSymbolWasm>(CurrentFnSym); WasmSym->setSignature(Signature.get()); addSignature(std::move(Signature)); @@ -187,7 +299,7 @@ void WebAssemblyAsmPrinter::EmitFunctionBodyStart() { } SmallVector<wasm::ValType, 16> Locals; - ValTypesFromMVTs(MFI->getLocals(), Locals); + valTypesFromMVTs(MFI->getLocals(), Locals); getTargetStreamer()->emitLocal(Locals); AsmPrinter::EmitFunctionBodyStart(); @@ -257,34 +369,34 @@ void WebAssemblyAsmPrinter::EmitInstruction(const MachineInstr *MI) { OutStreamer->AddBlankLine(); } break; + case WebAssembly::COMPILER_FENCE: + // This is a compiler barrier that prevents instruction reordering during + // backend compilation, and should not be emitted. + break; + case WebAssembly::EXTRACT_EXCEPTION_I32: + case WebAssembly::EXTRACT_EXCEPTION_I32_S: + // These are pseudo instructions that simulates popping values from stack. + // We print these only when we have -wasm-keep-registers on for assembly + // readability. + if (!WasmKeepRegisters) + break; + LLVM_FALLTHROUGH; default: { WebAssemblyMCInstLower MCInstLowering(OutContext, *this); MCInst TmpInst; - MCInstLowering.Lower(MI, TmpInst); + MCInstLowering.lower(MI, TmpInst); EmitToStreamer(*OutStreamer, TmpInst); break; } } } -const MCExpr *WebAssemblyAsmPrinter::lowerConstant(const Constant *CV) { - if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV)) - if (GV->getValueType()->isFunctionTy()) { - return MCSymbolRefExpr::create( - getSymbol(GV), MCSymbolRefExpr::VK_WebAssembly_FUNCTION, OutContext); - } - return AsmPrinter::lowerConstant(CV); -} - bool WebAssemblyAsmPrinter::PrintAsmOperand(const MachineInstr *MI, - unsigned OpNo, unsigned AsmVariant, + unsigned OpNo, const char *ExtraCode, raw_ostream &OS) { - if (AsmVariant != 0) - report_fatal_error("There are no defined alternate asm variants"); - // First try the generic code, which knows about modifiers like 'c' and 'n'. - if (!AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, OS)) + if (!AsmPrinter::PrintAsmOperand(MI, OpNo, ExtraCode, OS)) return false; if (!ExtraCode) { @@ -300,8 +412,7 @@ bool WebAssemblyAsmPrinter::PrintAsmOperand(const MachineInstr *MI, OS << regToString(MO); return false; case MachineOperand::MO_GlobalAddress: - getSymbol(MO.getGlobal())->print(OS, MAI); - printOffset(MO.getOffset(), OS); + PrintSymbolOperand(MO, OS); return false; case MachineOperand::MO_ExternalSymbol: GetExternalSymbolSymbol(MO.getSymbolName())->print(OS, MAI); @@ -320,19 +431,15 @@ bool WebAssemblyAsmPrinter::PrintAsmOperand(const MachineInstr *MI, bool WebAssemblyAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo, - unsigned AsmVariant, const char *ExtraCode, raw_ostream &OS) { - if (AsmVariant != 0) - report_fatal_error("There are no defined alternate asm variants"); - // The current approach to inline asm is that "r" constraints are expressed // as local indices, rather than values on the operand stack. This simplifies // using "r" as it eliminates the need to push and pop the values in a // particular order, however it also makes it impossible to have an "m" // constraint. So we don't support it. - return AsmPrinter::PrintAsmMemoryOperand(MI, OpNo, AsmVariant, ExtraCode, OS); + return AsmPrinter::PrintAsmMemoryOperand(MI, OpNo, ExtraCode, OS); } // Force static initialization. diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.h index f6cb5610bad3..4e55c81dec38 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.h @@ -1,9 +1,8 @@ // WebAssemblyAsmPrinter.h - WebAssembly implementation of AsmPrinter-*- C++ -*- // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// @@ -59,17 +58,16 @@ public: //===------------------------------------------------------------------===// void EmitEndOfAsmFile(Module &M) override; + void EmitProducerInfo(Module &M); + void EmitTargetFeatures(Module &M); void EmitJumpTableInfo() override; void EmitConstantPool() override; void EmitFunctionBodyStart() override; void EmitInstruction(const MachineInstr *MI) override; - const MCExpr *lowerConstant(const Constant *CV) override; bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, - unsigned AsmVariant, const char *ExtraCode, - raw_ostream &OS) override; + const char *ExtraCode, raw_ostream &OS) override; bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo, - unsigned AsmVariant, const char *ExtraCode, - raw_ostream &OS) override; + const char *ExtraCode, raw_ostream &OS) override; MVT getRegType(unsigned RegNo) const; std::string regToString(const MachineOperand &MO); diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp index fc827e9d5780..4c5d0192fc28 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyCFGSort.cpp - CFG Sorting ------------------------------===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -35,6 +34,14 @@ using namespace llvm; #define DEBUG_TYPE "wasm-cfg-sort" +// Option to disable EH pad first sorting. Only for testing unwind destination +// mismatches in CFGStackify. +static cl::opt<bool> WasmDisableEHPadSort( + "wasm-disable-ehpad-sort", cl::ReallyHidden, + cl::desc( + "WebAssembly: Disable EH pad-first sort order. Testing purpose only."), + cl::init(false)); + namespace { // Wrapper for loops and exceptions @@ -133,7 +140,7 @@ FunctionPass *llvm::createWebAssemblyCFGSort() { return new WebAssemblyCFGSort(); } -static void MaybeUpdateTerminator(MachineBasicBlock *MBB) { +static void maybeUpdateTerminator(MachineBasicBlock *MBB) { #ifndef NDEBUG bool AnyBarrier = false; #endif @@ -188,10 +195,12 @@ namespace { struct CompareBlockNumbers { bool operator()(const MachineBasicBlock *A, const MachineBasicBlock *B) const { - if (A->isEHPad() && !B->isEHPad()) - return false; - if (!A->isEHPad() && B->isEHPad()) - return true; + if (!WasmDisableEHPadSort) { + if (A->isEHPad() && !B->isEHPad()) + return false; + if (!A->isEHPad() && B->isEHPad()) + return true; + } return A->getNumber() > B->getNumber(); } @@ -200,11 +209,12 @@ struct CompareBlockNumbers { struct CompareBlockNumbersBackwards { bool operator()(const MachineBasicBlock *A, const MachineBasicBlock *B) const { - // We give a higher priority to an EH pad - if (A->isEHPad() && !B->isEHPad()) - return false; - if (!A->isEHPad() && B->isEHPad()) - return true; + if (!WasmDisableEHPadSort) { + if (A->isEHPad() && !B->isEHPad()) + return false; + if (!A->isEHPad() && B->isEHPad()) + return true; + } return A->getNumber() < B->getNumber(); } @@ -228,7 +238,7 @@ struct Entry { /// interrupted by blocks not dominated by their header. /// TODO: There are many opportunities for improving the heuristics here. /// Explore them. -static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI, +static void sortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI, const WebAssemblyExceptionInfo &WEI, const MachineDominatorTree &MDT) { // Prepare for a topological sort: Record the number of predecessors each @@ -260,10 +270,10 @@ static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI, CompareBlockNumbersBackwards> Ready; - RegionInfo SUI(MLI, WEI); + RegionInfo RI(MLI, WEI); SmallVector<Entry, 4> Entries; for (MachineBasicBlock *MBB = &MF.front();;) { - const Region *R = SUI.getRegionFor(MBB); + const Region *R = RI.getRegionFor(MBB); if (R) { // If MBB is a region header, add it to the active region list. We can't // put any blocks that it doesn't dominate until we see the end of the @@ -320,7 +330,7 @@ static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI, if (!Next) { // If there are no more blocks to process, we're done. if (Ready.empty()) { - MaybeUpdateTerminator(MBB); + maybeUpdateTerminator(MBB); break; } for (;;) { @@ -338,7 +348,7 @@ static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI, } // Move the next block into place and iterate. Next->moveAfter(MBB); - MaybeUpdateTerminator(MBB); + maybeUpdateTerminator(MBB); MBB = Next; } assert(Entries.empty() && "Active sort region list not finished"); @@ -354,7 +364,7 @@ static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI, for (auto &MBB : MF) { assert(MBB.getNumber() >= 0 && "Renumbered blocks should be non-negative."); - const Region *Region = SUI.getRegionFor(&MBB); + const Region *Region = RI.getRegionFor(&MBB); if (Region && &MBB == Region->getHeader()) { if (Region->isLoop()) { @@ -379,7 +389,7 @@ static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI, for (auto Pred : MBB.predecessors()) assert(Pred->getNumber() < MBB.getNumber() && "Non-loop-header predecessors should be topologically sorted"); - assert(OnStack.count(SUI.getRegionFor(&MBB)) && + assert(OnStack.count(RI.getRegionFor(&MBB)) && "Blocks must be nested in their regions"); } while (OnStack.size() > 1 && &MBB == WebAssembly::getBottom(OnStack.back())) @@ -404,7 +414,7 @@ bool WebAssemblyCFGSort::runOnMachineFunction(MachineFunction &MF) { MF.getRegInfo().invalidateLiveness(); // Sort the blocks, with contiguous sort regions. - SortBlocks(MF, MLI, WEI, MDT); + sortBlocks(MF, MLI, WEI, MDT); return true; } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp index f8f5f4040c86..e6bfc5226e2e 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyCFGStackify.cpp - CFG Stackification -------------------===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -22,26 +21,21 @@ /// //===----------------------------------------------------------------------===// -#include "MCTargetDesc/WebAssemblyMCTargetDesc.h" #include "WebAssembly.h" #include "WebAssemblyExceptionInfo.h" #include "WebAssemblyMachineFunctionInfo.h" #include "WebAssemblySubtarget.h" #include "WebAssemblyUtilities.h" +#include "llvm/ADT/Statistic.h" #include "llvm/CodeGen/MachineDominators.h" -#include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstrBuilder.h" -#include "llvm/CodeGen/MachineLoopInfo.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/Passes.h" -#include "llvm/CodeGen/WasmEHFuncInfo.h" #include "llvm/MC/MCAsmInfo.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/raw_ostream.h" using namespace llvm; #define DEBUG_TYPE "wasm-cfg-stackify" +STATISTIC(NumUnwindMismatches, "Number of EH pad unwind mismatches found"); + namespace { class WebAssemblyCFGStackify final : public MachineFunctionPass { StringRef getPassName() const override { return "WebAssembly CFG Stackify"; } @@ -60,10 +54,13 @@ class WebAssemblyCFGStackify final : public MachineFunctionPass { // over scoped regions when walking blocks. SmallVector<MachineBasicBlock *, 8> ScopeTops; + // Placing markers. void placeMarkers(MachineFunction &MF); void placeBlockMarker(MachineBasicBlock &MBB); void placeLoopMarker(MachineBasicBlock &MBB); void placeTryMarker(MachineBasicBlock &MBB); + void removeUnnecessaryInstrs(MachineFunction &MF); + bool fixUnwindMismatches(MachineFunction &MF); void rewriteDepthImmediates(MachineFunction &MF); void fixEndsAtEndOfFunction(MachineFunction &MF); @@ -75,16 +72,28 @@ class WebAssemblyCFGStackify final : public MachineFunctionPass { DenseMap<const MachineInstr *, MachineBasicBlock *> TryToEHPad; // <EH pad, TRY marker> map DenseMap<const MachineBasicBlock *, MachineInstr *> EHPadToTry; - // <LOOP|TRY marker, Loop/exception bottom BB> map - DenseMap<const MachineInstr *, MachineBasicBlock *> BeginToBottom; - // Helper functions to register scope information created by marker - // instructions. + // There can be an appendix block at the end of each function, shared for: + // - creating a correct signature for fallthrough returns + // - target for rethrows that need to unwind to the caller, but are trapped + // inside another try/catch + MachineBasicBlock *AppendixBB = nullptr; + MachineBasicBlock *getAppendixBlock(MachineFunction &MF) { + if (!AppendixBB) { + AppendixBB = MF.CreateMachineBasicBlock(); + // Give it a fake predecessor so that AsmPrinter prints its label. + AppendixBB->addSuccessor(AppendixBB); + MF.push_back(AppendixBB); + } + return AppendixBB; + } + + // Helper functions to register / unregister scope information created by + // marker instructions. void registerScope(MachineInstr *Begin, MachineInstr *End); void registerTryScope(MachineInstr *Begin, MachineInstr *End, MachineBasicBlock *EHPad); - - MachineBasicBlock *getBottom(const MachineInstr *Begin); + void unregisterScope(MachineInstr *Begin); public: static char ID; // Pass identification, replacement for typeid @@ -96,7 +105,7 @@ public: char WebAssemblyCFGStackify::ID = 0; INITIALIZE_PASS(WebAssemblyCFGStackify, DEBUG_TYPE, - "Insert BLOCK and LOOP markers for WebAssembly scopes", false, + "Insert BLOCK/LOOP/TRY markers for WebAssembly scopes", false, false) FunctionPass *llvm::createWebAssemblyCFGStackify() { @@ -108,14 +117,12 @@ FunctionPass *llvm::createWebAssemblyCFGStackify() { /// code) for a branch instruction to both branch to a block and fallthrough /// to it, so we check the actual branch operands to see if there are any /// explicit mentions. -static bool ExplicitlyBranchesTo(MachineBasicBlock *Pred, +static bool explicitlyBranchesTo(MachineBasicBlock *Pred, MachineBasicBlock *MBB) { for (MachineInstr &MI : Pred->terminators()) - // Even if a rethrow takes a BB argument, it is not a branch - if (!WebAssembly::isRethrow(MI)) - for (MachineOperand &MO : MI.explicit_operands()) - if (MO.isMBB() && MO.getMBB() == MBB) - return true; + for (MachineOperand &MO : MI.explicit_operands()) + if (MO.isMBB() && MO.getMBB() == MBB) + return true; return false; } @@ -125,7 +132,7 @@ static bool ExplicitlyBranchesTo(MachineBasicBlock *Pred, // ones that should go after the marker. In this function, AfterSet is only // used for sanity checking. static MachineBasicBlock::iterator -GetEarliestInsertPos(MachineBasicBlock *MBB, +getEarliestInsertPos(MachineBasicBlock *MBB, const SmallPtrSet<const MachineInstr *, 4> &BeforeSet, const SmallPtrSet<const MachineInstr *, 4> &AfterSet) { auto InsertPos = MBB->end(); @@ -149,7 +156,7 @@ GetEarliestInsertPos(MachineBasicBlock *MBB, // ones that should go after the marker. In this function, BeforeSet is only // used for sanity checking. static MachineBasicBlock::iterator -GetLatestInsertPos(MachineBasicBlock *MBB, +getLatestInsertPos(MachineBasicBlock *MBB, const SmallPtrSet<const MachineInstr *, 4> &BeforeSet, const SmallPtrSet<const MachineInstr *, 4> &AfterSet) { auto InsertPos = MBB->begin(); @@ -181,33 +188,25 @@ void WebAssemblyCFGStackify::registerTryScope(MachineInstr *Begin, EHPadToTry[EHPad] = Begin; } -// Given a LOOP/TRY marker, returns its bottom BB. Use cached information if any -// to prevent recomputation. -MachineBasicBlock * -WebAssemblyCFGStackify::getBottom(const MachineInstr *Begin) { - const auto &MLI = getAnalysis<MachineLoopInfo>(); - const auto &WEI = getAnalysis<WebAssemblyExceptionInfo>(); - if (BeginToBottom.count(Begin)) - return BeginToBottom[Begin]; - if (Begin->getOpcode() == WebAssembly::LOOP) { - MachineLoop *L = MLI.getLoopFor(Begin->getParent()); - assert(L); - BeginToBottom[Begin] = WebAssembly::getBottom(L); - } else if (Begin->getOpcode() == WebAssembly::TRY) { - WebAssemblyException *WE = WEI.getExceptionFor(TryToEHPad[Begin]); - assert(WE); - BeginToBottom[Begin] = WebAssembly::getBottom(WE); - } else - assert(false); - return BeginToBottom[Begin]; +void WebAssemblyCFGStackify::unregisterScope(MachineInstr *Begin) { + assert(BeginToEnd.count(Begin)); + MachineInstr *End = BeginToEnd[Begin]; + assert(EndToBegin.count(End)); + BeginToEnd.erase(Begin); + EndToBegin.erase(End); + MachineBasicBlock *EHPad = TryToEHPad.lookup(Begin); + if (EHPad) { + assert(EHPadToTry.count(EHPad)); + TryToEHPad.erase(Begin); + EHPadToTry.erase(EHPad); + } } /// Insert a BLOCK marker for branches to MBB (if needed). +// TODO Consider a more generalized way of handling block (and also loop and +// try) signatures when we implement the multi-value proposal later. void WebAssemblyCFGStackify::placeBlockMarker(MachineBasicBlock &MBB) { - // This should have been handled in placeTryMarker. - if (MBB.isEHPad()) - return; - + assert(!MBB.isEHPad()); MachineFunction &MF = *MBB.getParent(); auto &MDT = getAnalysis<MachineDominatorTree>(); const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); @@ -218,12 +217,20 @@ void WebAssemblyCFGStackify::placeBlockMarker(MachineBasicBlock &MBB) { // which reduces overall stack height. MachineBasicBlock *Header = nullptr; bool IsBranchedTo = false; + bool IsBrOnExn = false; + MachineInstr *BrOnExn = nullptr; int MBBNumber = MBB.getNumber(); for (MachineBasicBlock *Pred : MBB.predecessors()) { if (Pred->getNumber() < MBBNumber) { Header = Header ? MDT.findNearestCommonDominator(Header, Pred) : Pred; - if (ExplicitlyBranchesTo(Pred, &MBB)) + if (explicitlyBranchesTo(Pred, &MBB)) { IsBranchedTo = true; + if (Pred->getFirstTerminator()->getOpcode() == WebAssembly::BR_ON_EXN) { + IsBrOnExn = true; + assert(!BrOnExn && "There should be only one br_on_exn per block"); + BrOnExn = &*Pred->getFirstTerminator(); + } + } } } if (!Header) @@ -232,7 +239,7 @@ void WebAssemblyCFGStackify::placeBlockMarker(MachineBasicBlock &MBB) { return; assert(&MBB != &MF.front() && "Header blocks shouldn't have predecessors"); - MachineBasicBlock *LayoutPred = &*std::prev(MachineFunction::iterator(&MBB)); + MachineBasicBlock *LayoutPred = MBB.getPrevNode(); // If the nearest common dominator is inside a more deeply nested context, // walk out to the nearest scope which isn't more deeply nested. @@ -240,7 +247,7 @@ void WebAssemblyCFGStackify::placeBlockMarker(MachineBasicBlock &MBB) { if (MachineBasicBlock *ScopeTop = ScopeTops[I->getNumber()]) { if (ScopeTop->getNumber() > Header->getNumber()) { // Skip over an intervening scope. - I = std::next(MachineFunction::iterator(ScopeTop)); + I = std::next(ScopeTop->getIterator()); } else { // We found a scope level at an appropriate depth. Header = ScopeTop; @@ -256,13 +263,12 @@ void WebAssemblyCFGStackify::placeBlockMarker(MachineBasicBlock &MBB) { // Instructions that should go after the BLOCK. SmallPtrSet<const MachineInstr *, 4> AfterSet; for (const auto &MI : *Header) { - // If there is a previously placed LOOP/TRY marker and the bottom block of - // the loop/exception is above MBB, it should be after the BLOCK, because - // the loop/exception is nested in this block. Otherwise it should be before - // the BLOCK. - if (MI.getOpcode() == WebAssembly::LOOP || - MI.getOpcode() == WebAssembly::TRY) { - if (MBB.getNumber() > getBottom(&MI)->getNumber()) + // If there is a previously placed LOOP marker and the bottom block of the + // loop is above MBB, it should be after the BLOCK, because the loop is + // nested in this BLOCK. Otherwise it should be before the BLOCK. + if (MI.getOpcode() == WebAssembly::LOOP) { + auto *LoopBottom = BeginToEnd[&MI]->getParent()->getPrevNode(); + if (MBB.getNumber() > LoopBottom->getNumber()) AfterSet.insert(&MI); #ifndef NDEBUG else @@ -270,9 +276,10 @@ void WebAssemblyCFGStackify::placeBlockMarker(MachineBasicBlock &MBB) { #endif } - // All previously inserted BLOCK markers should be after the BLOCK because - // they are all nested blocks. - if (MI.getOpcode() == WebAssembly::BLOCK) + // All previously inserted BLOCK/TRY markers should be after the BLOCK + // because they are all nested blocks. + if (MI.getOpcode() == WebAssembly::BLOCK || + MI.getOpcode() == WebAssembly::TRY) AfterSet.insert(&MI); #ifndef NDEBUG @@ -300,11 +307,27 @@ void WebAssemblyCFGStackify::placeBlockMarker(MachineBasicBlock &MBB) { } // Add the BLOCK. - auto InsertPos = GetLatestInsertPos(Header, BeforeSet, AfterSet); + + // 'br_on_exn' extracts exnref object and pushes variable number of values + // depending on its tag. For C++ exception, its a single i32 value, and the + // generated code will be in the form of: + // block i32 + // br_on_exn 0, $__cpp_exception + // rethrow + // end_block + WebAssembly::ExprType ReturnType = WebAssembly::ExprType::Void; + if (IsBrOnExn) { + const char *TagName = BrOnExn->getOperand(1).getSymbolName(); + if (std::strcmp(TagName, "__cpp_exception") != 0) + llvm_unreachable("Only C++ exception is supported"); + ReturnType = WebAssembly::ExprType::I32; + } + + auto InsertPos = getLatestInsertPos(Header, BeforeSet, AfterSet); MachineInstr *Begin = BuildMI(*Header, InsertPos, Header->findDebugLoc(InsertPos), TII.get(WebAssembly::BLOCK)) - .addImm(int64_t(WebAssembly::ExprType::Void)); + .addImm(int64_t(ReturnType)); // Decide where in Header to put the END_BLOCK. BeforeSet.clear(); @@ -333,7 +356,7 @@ void WebAssemblyCFGStackify::placeBlockMarker(MachineBasicBlock &MBB) { } // Mark the end of the block. - InsertPos = GetEarliestInsertPos(&MBB, BeforeSet, AfterSet); + InsertPos = getEarliestInsertPos(&MBB, BeforeSet, AfterSet); MachineInstr *End = BuildMI(MBB, InsertPos, MBB.findPrevDebugLoc(InsertPos), TII.get(WebAssembly::END_BLOCK)); registerScope(Begin, End); @@ -358,13 +381,10 @@ void WebAssemblyCFGStackify::placeLoopMarker(MachineBasicBlock &MBB) { // The operand of a LOOP is the first block after the loop. If the loop is the // bottom of the function, insert a dummy block at the end. MachineBasicBlock *Bottom = WebAssembly::getBottom(Loop); - auto Iter = std::next(MachineFunction::iterator(Bottom)); + auto Iter = std::next(Bottom->getIterator()); if (Iter == MF.end()) { - MachineBasicBlock *Label = MF.CreateMachineBasicBlock(); - // Give it a fake predecessor so that AsmPrinter prints its label. - Label->addSuccessor(Label); - MF.push_back(Label); - Iter = std::next(MachineFunction::iterator(Bottom)); + getAppendixBlock(MF); + Iter = std::next(Bottom->getIterator()); } MachineBasicBlock *AfterLoop = &*Iter; @@ -383,7 +403,7 @@ void WebAssemblyCFGStackify::placeLoopMarker(MachineBasicBlock &MBB) { } // Mark the beginning of the loop. - auto InsertPos = GetEarliestInsertPos(&MBB, BeforeSet, AfterSet); + auto InsertPos = getEarliestInsertPos(&MBB, BeforeSet, AfterSet); MachineInstr *Begin = BuildMI(MBB, InsertPos, MBB.findDebugLoc(InsertPos), TII.get(WebAssembly::LOOP)) .addImm(int64_t(WebAssembly::ExprType::Void)); @@ -400,8 +420,10 @@ void WebAssemblyCFGStackify::placeLoopMarker(MachineBasicBlock &MBB) { // Mark the end of the loop (using arbitrary debug location that branched to // the loop end as its location). - InsertPos = GetEarliestInsertPos(AfterLoop, BeforeSet, AfterSet); - DebugLoc EndDL = (*AfterLoop->pred_rbegin())->findBranchDebugLoc(); + InsertPos = getEarliestInsertPos(AfterLoop, BeforeSet, AfterSet); + DebugLoc EndDL = AfterLoop->pred_empty() + ? DebugLoc() + : (*AfterLoop->pred_rbegin())->findBranchDebugLoc(); MachineInstr *End = BuildMI(*AfterLoop, InsertPos, EndDL, TII.get(WebAssembly::END_LOOP)); registerScope(Begin, End); @@ -414,14 +436,7 @@ void WebAssemblyCFGStackify::placeLoopMarker(MachineBasicBlock &MBB) { } void WebAssemblyCFGStackify::placeTryMarker(MachineBasicBlock &MBB) { - if (!MBB.isEHPad()) - return; - - // catch_all terminate pad is grouped together with catch terminate pad and - // does not need a separate TRY and END_TRY marker. - if (WebAssembly::isCatchAllTerminatePad(MBB)) - return; - + assert(MBB.isEHPad()); MachineFunction &MF = *MBB.getParent(); auto &MDT = getAnalysis<MachineDominatorTree>(); const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); @@ -434,7 +449,7 @@ void WebAssemblyCFGStackify::placeTryMarker(MachineBasicBlock &MBB) { for (auto *Pred : MBB.predecessors()) { if (Pred->getNumber() < MBBNumber) { Header = Header ? MDT.findNearestCommonDominator(Header, Pred) : Pred; - assert(!ExplicitlyBranchesTo(Pred, &MBB) && + assert(!explicitlyBranchesTo(Pred, &MBB) && "Explicit branch to an EH pad!"); } } @@ -447,19 +462,15 @@ void WebAssemblyCFGStackify::placeTryMarker(MachineBasicBlock &MBB) { assert(WE); MachineBasicBlock *Bottom = WebAssembly::getBottom(WE); - auto Iter = std::next(MachineFunction::iterator(Bottom)); + auto Iter = std::next(Bottom->getIterator()); if (Iter == MF.end()) { - MachineBasicBlock *Label = MF.CreateMachineBasicBlock(); - // Give it a fake predecessor so that AsmPrinter prints its label. - Label->addSuccessor(Label); - MF.push_back(Label); - Iter = std::next(MachineFunction::iterator(Bottom)); + getAppendixBlock(MF); + Iter = std::next(Bottom->getIterator()); } - MachineBasicBlock *AfterTry = &*Iter; + MachineBasicBlock *Cont = &*Iter; - assert(AfterTry != &MF.front()); - MachineBasicBlock *LayoutPred = - &*std::prev(MachineFunction::iterator(AfterTry)); + assert(Cont != &MF.front()); + MachineBasicBlock *LayoutPred = Cont->getPrevNode(); // If the nearest common dominator is inside a more deeply nested context, // walk out to the nearest scope which isn't more deeply nested. @@ -467,7 +478,7 @@ void WebAssemblyCFGStackify::placeTryMarker(MachineBasicBlock &MBB) { if (MachineBasicBlock *ScopeTop = ScopeTops[I->getNumber()]) { if (ScopeTop->getNumber() > Header->getNumber()) { // Skip over an intervening scope. - I = std::next(MachineFunction::iterator(ScopeTop)); + I = std::next(ScopeTop->getIterator()); } else { // We found a scope level at an appropriate depth. Header = ScopeTop; @@ -478,16 +489,17 @@ void WebAssemblyCFGStackify::placeTryMarker(MachineBasicBlock &MBB) { // Decide where in Header to put the TRY. - // Instructions that should go before the BLOCK. + // Instructions that should go before the TRY. SmallPtrSet<const MachineInstr *, 4> BeforeSet; - // Instructions that should go after the BLOCK. + // Instructions that should go after the TRY. SmallPtrSet<const MachineInstr *, 4> AfterSet; for (const auto &MI : *Header) { - // If there is a previously placed LOOP marker and the bottom block of - // the loop is above MBB, the LOOP should be after the TRY, because the - // loop is nested in this try. Otherwise it should be before the TRY. + // If there is a previously placed LOOP marker and the bottom block of the + // loop is above MBB, it should be after the TRY, because the loop is nested + // in this TRY. Otherwise it should be before the TRY. if (MI.getOpcode() == WebAssembly::LOOP) { - if (MBB.getNumber() > Bottom->getNumber()) + auto *LoopBottom = BeginToEnd[&MI]->getParent()->getPrevNode(); + if (MBB.getNumber() > LoopBottom->getNumber()) AfterSet.insert(&MI); #ifndef NDEBUG else @@ -495,14 +507,16 @@ void WebAssemblyCFGStackify::placeTryMarker(MachineBasicBlock &MBB) { #endif } - // All previously inserted TRY markers should be after the TRY because they - // are all nested trys. - if (MI.getOpcode() == WebAssembly::TRY) + // All previously inserted BLOCK/TRY markers should be after the TRY because + // they are all nested trys. + if (MI.getOpcode() == WebAssembly::BLOCK || + MI.getOpcode() == WebAssembly::TRY) AfterSet.insert(&MI); #ifndef NDEBUG - // All END_(LOOP/TRY) markers should be before the TRY. - if (MI.getOpcode() == WebAssembly::END_LOOP || + // All END_(BLOCK/LOOP/TRY) markers should be before the TRY. + if (MI.getOpcode() == WebAssembly::END_BLOCK || + MI.getOpcode() == WebAssembly::END_LOOP || MI.getOpcode() == WebAssembly::END_TRY) BeforeSet.insert(&MI); #endif @@ -530,10 +544,16 @@ void WebAssemblyCFGStackify::placeTryMarker(MachineBasicBlock &MBB) { // throw. if (MBB.isPredecessor(Header)) { auto TermPos = Header->getFirstTerminator(); - if (TermPos == Header->end() || !WebAssembly::isRethrow(*TermPos)) { + if (TermPos == Header->end() || + TermPos->getOpcode() != WebAssembly::RETHROW) { for (const auto &MI : reverse(*Header)) { if (MI.isCall()) { AfterSet.insert(&MI); + // Possibly throwing calls are usually wrapped by EH_LABEL + // instructions. We don't want to split them and the call. + if (MI.getIterator() != Header->begin() && + std::prev(MI.getIterator())->isEHLabel()) + AfterSet.insert(&*std::prev(MI.getIterator())); break; } } @@ -541,7 +561,7 @@ void WebAssemblyCFGStackify::placeTryMarker(MachineBasicBlock &MBB) { } // Add the TRY. - auto InsertPos = GetLatestInsertPos(Header, BeforeSet, AfterSet); + auto InsertPos = getLatestInsertPos(Header, BeforeSet, AfterSet); MachineInstr *Begin = BuildMI(*Header, InsertPos, Header->findDebugLoc(InsertPos), TII.get(WebAssembly::TRY)) @@ -550,10 +570,11 @@ void WebAssemblyCFGStackify::placeTryMarker(MachineBasicBlock &MBB) { // Decide where in Header to put the END_TRY. BeforeSet.clear(); AfterSet.clear(); - for (const auto &MI : *AfterTry) { + for (const auto &MI : *Cont) { #ifndef NDEBUG - // END_TRY should precede existing LOOP markers. - if (MI.getOpcode() == WebAssembly::LOOP) + // END_TRY should precede existing LOOP and BLOCK markers. + if (MI.getOpcode() == WebAssembly::LOOP || + MI.getOpcode() == WebAssembly::BLOCK) AfterSet.insert(&MI); // All END_TRY markers placed earlier belong to exceptions that contains @@ -567,31 +588,595 @@ void WebAssemblyCFGStackify::placeTryMarker(MachineBasicBlock &MBB) { // the END_TRY marker should go after that. Otherwise, the whole try-catch // is contained within this loop, so the END_TRY should go before that. if (MI.getOpcode() == WebAssembly::END_LOOP) { - if (EndToBegin[&MI]->getParent()->getNumber() >= Header->getNumber()) + // For a LOOP to be after TRY, LOOP's BB should be after TRY's BB; if they + // are in the same BB, LOOP is always before TRY. + if (EndToBegin[&MI]->getParent()->getNumber() > Header->getNumber()) BeforeSet.insert(&MI); #ifndef NDEBUG else AfterSet.insert(&MI); #endif } + + // It is not possible for an END_BLOCK to be already in this block. } // Mark the end of the TRY. - InsertPos = GetEarliestInsertPos(AfterTry, BeforeSet, AfterSet); + InsertPos = getEarliestInsertPos(Cont, BeforeSet, AfterSet); MachineInstr *End = - BuildMI(*AfterTry, InsertPos, Bottom->findBranchDebugLoc(), + BuildMI(*Cont, InsertPos, Bottom->findBranchDebugLoc(), TII.get(WebAssembly::END_TRY)); registerTryScope(Begin, End, &MBB); - // Track the farthest-spanning scope that ends at this point. - int Number = AfterTry->getNumber(); - if (!ScopeTops[Number] || - ScopeTops[Number]->getNumber() > Header->getNumber()) - ScopeTops[Number] = Header; + // Track the farthest-spanning scope that ends at this point. We create two + // mappings: (BB with 'end_try' -> BB with 'try') and (BB with 'catch' -> BB + // with 'try'). We need to create 'catch' -> 'try' mapping here too because + // markers should not span across 'catch'. For example, this should not + // happen: + // + // try + // block --| (X) + // catch | + // end_block --| + // end_try + for (int Number : {Cont->getNumber(), MBB.getNumber()}) { + if (!ScopeTops[Number] || + ScopeTops[Number]->getNumber() > Header->getNumber()) + ScopeTops[Number] = Header; + } +} + +void WebAssemblyCFGStackify::removeUnnecessaryInstrs(MachineFunction &MF) { + const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); + + // When there is an unconditional branch right before a catch instruction and + // it branches to the end of end_try marker, we don't need the branch, because + // it there is no exception, the control flow transfers to that point anyway. + // bb0: + // try + // ... + // br bb2 <- Not necessary + // bb1: + // catch + // ... + // bb2: + // end + for (auto &MBB : MF) { + if (!MBB.isEHPad()) + continue; + + MachineBasicBlock *TBB = nullptr, *FBB = nullptr; + SmallVector<MachineOperand, 4> Cond; + MachineBasicBlock *EHPadLayoutPred = MBB.getPrevNode(); + MachineBasicBlock *Cont = BeginToEnd[EHPadToTry[&MBB]]->getParent(); + bool Analyzable = !TII.analyzeBranch(*EHPadLayoutPred, TBB, FBB, Cond); + if (Analyzable && ((Cond.empty() && TBB && TBB == Cont) || + (!Cond.empty() && FBB && FBB == Cont))) + TII.removeBranch(*EHPadLayoutPred); + } + + // When there are block / end_block markers that overlap with try / end_try + // markers, and the block and try markers' return types are the same, the + // block /end_block markers are not necessary, because try / end_try markers + // also can serve as boundaries for branches. + // block <- Not necessary + // try + // ... + // catch + // ... + // end + // end <- Not necessary + SmallVector<MachineInstr *, 32> ToDelete; + for (auto &MBB : MF) { + for (auto &MI : MBB) { + if (MI.getOpcode() != WebAssembly::TRY) + continue; + + MachineInstr *Try = &MI, *EndTry = BeginToEnd[Try]; + MachineBasicBlock *TryBB = Try->getParent(); + MachineBasicBlock *Cont = EndTry->getParent(); + int64_t RetType = Try->getOperand(0).getImm(); + for (auto B = Try->getIterator(), E = std::next(EndTry->getIterator()); + B != TryBB->begin() && E != Cont->end() && + std::prev(B)->getOpcode() == WebAssembly::BLOCK && + E->getOpcode() == WebAssembly::END_BLOCK && + std::prev(B)->getOperand(0).getImm() == RetType; + --B, ++E) { + ToDelete.push_back(&*std::prev(B)); + ToDelete.push_back(&*E); + } + } + } + for (auto *MI : ToDelete) { + if (MI->getOpcode() == WebAssembly::BLOCK) + unregisterScope(MI); + MI->eraseFromParent(); + } +} + +bool WebAssemblyCFGStackify::fixUnwindMismatches(MachineFunction &MF) { + const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + + // Linearizing the control flow by placing TRY / END_TRY markers can create + // mismatches in unwind destinations. There are two kinds of mismatches we + // try to solve here. + + // 1. When an instruction may throw, but the EH pad it will unwind to can be + // different from the original CFG. + // + // Example: we have the following CFG: + // bb0: + // call @foo (if it throws, unwind to bb2) + // bb1: + // call @bar (if it throws, unwind to bb3) + // bb2 (ehpad): + // catch + // ... + // bb3 (ehpad) + // catch + // handler body + // + // And the CFG is sorted in this order. Then after placing TRY markers, it + // will look like: (BB markers are omitted) + // try $label1 + // try + // call @foo + // call @bar (if it throws, unwind to bb3) + // catch <- ehpad (bb2) + // ... + // end_try + // catch <- ehpad (bb3) + // handler body + // end_try + // + // Now if bar() throws, it is going to end up ip in bb2, not bb3, where it + // is supposed to end up. We solve this problem by + // a. Split the target unwind EH pad (here bb3) so that the handler body is + // right after 'end_try', which means we extract the handler body out of + // the catch block. We do this because this handler body should be + // somewhere branch-eable from the inner scope. + // b. Wrap the call that has an incorrect unwind destination ('call @bar' + // here) with a nested try/catch/end_try scope, and within the new catch + // block, branches to the handler body. + // c. Place a branch after the newly inserted nested end_try so it can bypass + // the handler body, which is now outside of a catch block. + // + // The result will like as follows. (new: a) means this instruction is newly + // created in the process of doing 'a' above. + // + // block $label0 (new: placeBlockMarker) + // try $label1 + // try + // call @foo + // try (new: b) + // call @bar + // catch (new: b) + // local.set n / drop (new: b) + // br $label1 (new: b) + // end_try (new: b) + // catch <- ehpad (bb2) + // end_try + // br $label0 (new: c) + // catch <- ehpad (bb3) + // end_try (hoisted: a) + // handler body + // end_block (new: placeBlockMarker) + // + // Note that the new wrapping block/end_block will be generated later in + // placeBlockMarker. + // + // TODO Currently local.set and local.gets are generated to move exnref value + // created by catches. That's because we don't support yielding values from a + // block in LLVM machine IR yet, even though it is supported by wasm. Delete + // unnecessary local.get/local.sets once yielding values from a block is + // supported. The full EH spec requires multi-value support to do this, but + // for C++ we don't yet need it because we only throw a single i32. + // + // --- + // 2. The same as 1, but in this case an instruction unwinds to a caller + // function and not another EH pad. + // + // Example: we have the following CFG: + // bb0: + // call @foo (if it throws, unwind to bb2) + // bb1: + // call @bar (if it throws, unwind to caller) + // bb2 (ehpad): + // catch + // ... + // + // And the CFG is sorted in this order. Then after placing TRY markers, it + // will look like: + // try + // call @foo + // call @bar (if it throws, unwind to caller) + // catch <- ehpad (bb2) + // ... + // end_try + // + // Now if bar() throws, it is going to end up ip in bb2, when it is supposed + // throw up to the caller. + // We solve this problem by + // a. Create a new 'appendix' BB at the end of the function and put a single + // 'rethrow' instruction (+ local.get) in there. + // b. Wrap the call that has an incorrect unwind destination ('call @bar' + // here) with a nested try/catch/end_try scope, and within the new catch + // block, branches to the new appendix block. + // + // block $label0 (new: placeBlockMarker) + // try + // call @foo + // try (new: b) + // call @bar + // catch (new: b) + // local.set n (new: b) + // br $label0 (new: b) + // end_try (new: b) + // catch <- ehpad (bb2) + // ... + // end_try + // ... + // end_block (new: placeBlockMarker) + // local.get n (new: a) <- appendix block + // rethrow (new: a) + // + // In case there are multiple calls in a BB that may throw to the caller, they + // can be wrapped together in one nested try scope. (In 1, this couldn't + // happen, because may-throwing instruction there had an unwind destination, + // i.e., it was an invoke before, and there could be only one invoke within a + // BB.) + + SmallVector<const MachineBasicBlock *, 8> EHPadStack; + // Range of intructions to be wrapped in a new nested try/catch + using TryRange = std::pair<MachineInstr *, MachineInstr *>; + // In original CFG, <unwind destionation BB, a vector of try ranges> + DenseMap<MachineBasicBlock *, SmallVector<TryRange, 4>> UnwindDestToTryRanges; + // In new CFG, <destination to branch to, a vector of try ranges> + DenseMap<MachineBasicBlock *, SmallVector<TryRange, 4>> BrDestToTryRanges; + // In new CFG, <destination to branch to, register containing exnref> + DenseMap<MachineBasicBlock *, unsigned> BrDestToExnReg; + + // Gather possibly throwing calls (i.e., previously invokes) whose current + // unwind destination is not the same as the original CFG. + for (auto &MBB : reverse(MF)) { + bool SeenThrowableInstInBB = false; + for (auto &MI : reverse(MBB)) { + if (MI.getOpcode() == WebAssembly::TRY) + EHPadStack.pop_back(); + else if (MI.getOpcode() == WebAssembly::CATCH) + EHPadStack.push_back(MI.getParent()); + + // In this loop we only gather calls that have an EH pad to unwind. So + // there will be at most 1 such call (= invoke) in a BB, so after we've + // seen one, we can skip the rest of BB. Also if MBB has no EH pad + // successor or MI does not throw, this is not an invoke. + if (SeenThrowableInstInBB || !MBB.hasEHPadSuccessor() || + !WebAssembly::mayThrow(MI)) + continue; + SeenThrowableInstInBB = true; + + // If the EH pad on the stack top is where this instruction should unwind + // next, we're good. + MachineBasicBlock *UnwindDest = nullptr; + for (auto *Succ : MBB.successors()) { + if (Succ->isEHPad()) { + UnwindDest = Succ; + break; + } + } + if (EHPadStack.back() == UnwindDest) + continue; + + // If not, record the range. + UnwindDestToTryRanges[UnwindDest].push_back(TryRange(&MI, &MI)); + } + } + + assert(EHPadStack.empty()); + + // Gather possibly throwing calls that are supposed to unwind up to the caller + // if they throw, but currently unwind to an incorrect destination. Unlike the + // loop above, there can be multiple calls within a BB that unwind to the + // caller, which we should group together in a range. + bool NeedAppendixBlock = false; + for (auto &MBB : reverse(MF)) { + MachineInstr *RangeBegin = nullptr, *RangeEnd = nullptr; // inclusive + for (auto &MI : reverse(MBB)) { + if (MI.getOpcode() == WebAssembly::TRY) + EHPadStack.pop_back(); + else if (MI.getOpcode() == WebAssembly::CATCH) + EHPadStack.push_back(MI.getParent()); + + // If MBB has an EH pad successor, this inst does not unwind to caller. + if (MBB.hasEHPadSuccessor()) + continue; + + // We wrap up the current range when we see a marker even if we haven't + // finished a BB. + if (RangeEnd && WebAssembly::isMarker(MI.getOpcode())) { + NeedAppendixBlock = true; + // Record the range. nullptr here means the unwind destination is the + // caller. + UnwindDestToTryRanges[nullptr].push_back( + TryRange(RangeBegin, RangeEnd)); + RangeBegin = RangeEnd = nullptr; // Reset range pointers + } + + // If EHPadStack is empty, that means it is correctly unwind to caller if + // it throws, so we're good. If MI does not throw, we're good too. + if (EHPadStack.empty() || !WebAssembly::mayThrow(MI)) + continue; + + // We found an instruction that unwinds to the caller but currently has an + // incorrect unwind destination. Create a new range or increment the + // currently existing range. + if (!RangeEnd) + RangeBegin = RangeEnd = &MI; + else + RangeBegin = &MI; + } + + if (RangeEnd) { + NeedAppendixBlock = true; + // Record the range. nullptr here means the unwind destination is the + // caller. + UnwindDestToTryRanges[nullptr].push_back(TryRange(RangeBegin, RangeEnd)); + RangeBegin = RangeEnd = nullptr; // Reset range pointers + } + } + + assert(EHPadStack.empty()); + // We don't have any unwind destination mismatches to resolve. + if (UnwindDestToTryRanges.empty()) + return false; + + // If we found instructions that should unwind to the caller but currently + // have incorrect unwind destination, we create an appendix block at the end + // of the function with a local.get and a rethrow instruction. + if (NeedAppendixBlock) { + auto *AppendixBB = getAppendixBlock(MF); + unsigned ExnReg = MRI.createVirtualRegister(&WebAssembly::EXNREFRegClass); + BuildMI(AppendixBB, DebugLoc(), TII.get(WebAssembly::RETHROW)) + .addReg(ExnReg); + // These instruction ranges should branch to this appendix BB. + for (auto Range : UnwindDestToTryRanges[nullptr]) + BrDestToTryRanges[AppendixBB].push_back(Range); + BrDestToExnReg[AppendixBB] = ExnReg; + } + + // We loop through unwind destination EH pads that are targeted from some + // inner scopes. Because these EH pads are destination of more than one scope + // now, we split them so that the handler body is after 'end_try'. + // - Before + // ehpad: + // catch + // local.set n / drop + // handler body + // ... + // cont: + // end_try + // + // - After + // ehpad: + // catch + // local.set n / drop + // brdest: (new) + // end_try (hoisted from 'cont' BB) + // handler body (taken from 'ehpad') + // ... + // cont: + for (auto &P : UnwindDestToTryRanges) { + NumUnwindMismatches++; + + // This means the destination is the appendix BB, which was separately + // handled above. + if (!P.first) + continue; + + MachineBasicBlock *EHPad = P.first; + + // Find 'catch' and 'local.set' or 'drop' instruction that follows the + // 'catch'. If -wasm-disable-explicit-locals is not set, 'catch' should be + // always followed by either 'local.set' or a 'drop', because 'br_on_exn' is + // generated after 'catch' in LateEHPrepare and we don't support blocks + // taking values yet. + MachineInstr *Catch = nullptr; + unsigned ExnReg = 0; + for (auto &MI : *EHPad) { + switch (MI.getOpcode()) { + case WebAssembly::CATCH: + Catch = &MI; + ExnReg = Catch->getOperand(0).getReg(); + break; + } + } + assert(Catch && "EH pad does not have a catch"); + assert(ExnReg != 0 && "Invalid register"); + + auto SplitPos = std::next(Catch->getIterator()); + + // Create a new BB that's gonna be the destination for branches from the + // inner mismatched scope. + MachineInstr *BeginTry = EHPadToTry[EHPad]; + MachineInstr *EndTry = BeginToEnd[BeginTry]; + MachineBasicBlock *Cont = EndTry->getParent(); + auto *BrDest = MF.CreateMachineBasicBlock(); + MF.insert(std::next(EHPad->getIterator()), BrDest); + // Hoist up the existing 'end_try'. + BrDest->insert(BrDest->end(), EndTry->removeFromParent()); + // Take out the handler body from EH pad to the new branch destination BB. + BrDest->splice(BrDest->end(), EHPad, SplitPos, EHPad->end()); + // Fix predecessor-successor relationship. + BrDest->transferSuccessors(EHPad); + EHPad->addSuccessor(BrDest); + + // All try ranges that were supposed to unwind to this EH pad now have to + // branch to this new branch dest BB. + for (auto Range : UnwindDestToTryRanges[EHPad]) + BrDestToTryRanges[BrDest].push_back(Range); + BrDestToExnReg[BrDest] = ExnReg; + + // In case we fall through to the continuation BB after the catch block, we + // now have to add a branch to it. + // - Before + // try + // ... + // (falls through to 'cont') + // catch + // handler body + // end + // <-- cont + // + // - After + // try + // ... + // br %cont (new) + // catch + // end + // handler body + // <-- cont + MachineBasicBlock *EHPadLayoutPred = &*std::prev(EHPad->getIterator()); + MachineBasicBlock *TBB = nullptr, *FBB = nullptr; + SmallVector<MachineOperand, 4> Cond; + bool Analyzable = !TII.analyzeBranch(*EHPadLayoutPred, TBB, FBB, Cond); + if (Analyzable && !TBB && !FBB) { + DebugLoc DL = EHPadLayoutPred->empty() + ? DebugLoc() + : EHPadLayoutPred->rbegin()->getDebugLoc(); + BuildMI(EHPadLayoutPred, DL, TII.get(WebAssembly::BR)).addMBB(Cont); + } + } + + // For possibly throwing calls whose unwind destinations are currently + // incorrect because of CFG linearization, we wrap them with a nested + // try/catch/end_try, and within the new catch block, we branch to the correct + // handler. + // - Before + // mbb: + // call @foo <- Unwind destination mismatch! + // ehpad: + // ... + // + // - After + // mbb: + // try (new) + // call @foo + // nested-ehpad: (new) + // catch (new) + // local.set n / drop (new) + // br %brdest (new) + // nested-end: (new) + // end_try (new) + // ehpad: + // ... + for (auto &P : BrDestToTryRanges) { + MachineBasicBlock *BrDest = P.first; + auto &TryRanges = P.second; + unsigned ExnReg = BrDestToExnReg[BrDest]; + + for (auto Range : TryRanges) { + MachineInstr *RangeBegin = nullptr, *RangeEnd = nullptr; + std::tie(RangeBegin, RangeEnd) = Range; + auto *MBB = RangeBegin->getParent(); + + // Include possible EH_LABELs in the range + if (RangeBegin->getIterator() != MBB->begin() && + std::prev(RangeBegin->getIterator())->isEHLabel()) + RangeBegin = &*std::prev(RangeBegin->getIterator()); + if (std::next(RangeEnd->getIterator()) != MBB->end() && + std::next(RangeEnd->getIterator())->isEHLabel()) + RangeEnd = &*std::next(RangeEnd->getIterator()); + + MachineBasicBlock *EHPad = nullptr; + for (auto *Succ : MBB->successors()) { + if (Succ->isEHPad()) { + EHPad = Succ; + break; + } + } + + // Create the nested try instruction. + MachineInstr *NestedTry = + BuildMI(*MBB, *RangeBegin, RangeBegin->getDebugLoc(), + TII.get(WebAssembly::TRY)) + .addImm(int64_t(WebAssembly::ExprType::Void)); + + // Create the nested EH pad and fill instructions in. + MachineBasicBlock *NestedEHPad = MF.CreateMachineBasicBlock(); + MF.insert(std::next(MBB->getIterator()), NestedEHPad); + NestedEHPad->setIsEHPad(); + NestedEHPad->setIsEHScopeEntry(); + BuildMI(NestedEHPad, RangeEnd->getDebugLoc(), TII.get(WebAssembly::CATCH), + ExnReg); + BuildMI(NestedEHPad, RangeEnd->getDebugLoc(), TII.get(WebAssembly::BR)) + .addMBB(BrDest); + + // Create the nested continuation BB and end_try instruction. + MachineBasicBlock *NestedCont = MF.CreateMachineBasicBlock(); + MF.insert(std::next(NestedEHPad->getIterator()), NestedCont); + MachineInstr *NestedEndTry = + BuildMI(*NestedCont, NestedCont->begin(), RangeEnd->getDebugLoc(), + TII.get(WebAssembly::END_TRY)); + // In case MBB has more instructions after the try range, move them to the + // new nested continuation BB. + NestedCont->splice(NestedCont->end(), MBB, + std::next(RangeEnd->getIterator()), MBB->end()); + registerTryScope(NestedTry, NestedEndTry, NestedEHPad); + + // Fix predecessor-successor relationship. + NestedCont->transferSuccessors(MBB); + if (EHPad) + NestedCont->removeSuccessor(EHPad); + MBB->addSuccessor(NestedEHPad); + MBB->addSuccessor(NestedCont); + NestedEHPad->addSuccessor(BrDest); + } + } + + // Renumber BBs and recalculate ScopeTop info because new BBs might have been + // created and inserted above. + MF.RenumberBlocks(); + ScopeTops.clear(); + ScopeTops.resize(MF.getNumBlockIDs()); + for (auto &MBB : reverse(MF)) { + for (auto &MI : reverse(MBB)) { + if (ScopeTops[MBB.getNumber()]) + break; + switch (MI.getOpcode()) { + case WebAssembly::END_BLOCK: + case WebAssembly::END_LOOP: + case WebAssembly::END_TRY: + ScopeTops[MBB.getNumber()] = EndToBegin[&MI]->getParent(); + break; + case WebAssembly::CATCH: + ScopeTops[MBB.getNumber()] = EHPadToTry[&MBB]->getParent(); + break; + } + } + } + + // Recompute the dominator tree. + getAnalysis<MachineDominatorTree>().runOnMachineFunction(MF); + + // Place block markers for newly added branches. + SmallVector <MachineBasicBlock *, 8> BrDests; + for (auto &P : BrDestToTryRanges) + BrDests.push_back(P.first); + llvm::sort(BrDests, + [&](const MachineBasicBlock *A, const MachineBasicBlock *B) { + auto ANum = A->getNumber(); + auto BNum = B->getNumber(); + return ANum < BNum; + }); + for (auto *Dest : BrDests) + placeBlockMarker(*Dest); + + return true; } static unsigned -GetDepth(const SmallVectorImpl<const MachineBasicBlock *> &Stack, +getDepth(const SmallVectorImpl<const MachineBasicBlock *> &Stack, const MachineBasicBlock *MBB) { unsigned Depth = 0; for (auto X : reverse(Stack)) { @@ -617,19 +1202,19 @@ void WebAssemblyCFGStackify::fixEndsAtEndOfFunction(MachineFunction &MF) { if (MFI.getResults().empty()) return; - WebAssembly::ExprType retType; + WebAssembly::ExprType RetType; switch (MFI.getResults().front().SimpleTy) { case MVT::i32: - retType = WebAssembly::ExprType::I32; + RetType = WebAssembly::ExprType::I32; break; case MVT::i64: - retType = WebAssembly::ExprType::I64; + RetType = WebAssembly::ExprType::I64; break; case MVT::f32: - retType = WebAssembly::ExprType::F32; + RetType = WebAssembly::ExprType::F32; break; case MVT::f64: - retType = WebAssembly::ExprType::F64; + RetType = WebAssembly::ExprType::F64; break; case MVT::v16i8: case MVT::v8i16: @@ -637,10 +1222,10 @@ void WebAssemblyCFGStackify::fixEndsAtEndOfFunction(MachineFunction &MF) { case MVT::v2i64: case MVT::v4f32: case MVT::v2f64: - retType = WebAssembly::ExprType::V128; + RetType = WebAssembly::ExprType::V128; break; - case MVT::ExceptRef: - retType = WebAssembly::ExprType::ExceptRef; + case MVT::exnref: + RetType = WebAssembly::ExprType::Exnref; break; default: llvm_unreachable("unexpected return type"); @@ -651,11 +1236,11 @@ void WebAssemblyCFGStackify::fixEndsAtEndOfFunction(MachineFunction &MF) { if (MI.isPosition() || MI.isDebugInstr()) continue; if (MI.getOpcode() == WebAssembly::END_BLOCK) { - EndToBegin[&MI]->getOperand(0).setImm(int32_t(retType)); + EndToBegin[&MI]->getOperand(0).setImm(int32_t(RetType)); continue; } if (MI.getOpcode() == WebAssembly::END_LOOP) { - EndToBegin[&MI]->getOperand(0).setImm(int32_t(retType)); + EndToBegin[&MI]->getOperand(0).setImm(int32_t(RetType)); continue; } // Something other than an `end`. We're done. @@ -666,7 +1251,7 @@ void WebAssemblyCFGStackify::fixEndsAtEndOfFunction(MachineFunction &MF) { // WebAssembly functions end with an end instruction, as if the function body // were a block. -static void AppendEndToFunction(MachineFunction &MF, +static void appendEndToFunction(MachineFunction &MF, const WebAssemblyInstrInfo &TII) { BuildMI(MF.back(), MF.back().end(), MF.back().findPrevDebugLoc(MF.back().end()), @@ -675,66 +1260,42 @@ static void AppendEndToFunction(MachineFunction &MF, /// Insert LOOP/TRY/BLOCK markers at appropriate places. void WebAssemblyCFGStackify::placeMarkers(MachineFunction &MF) { - const MCAsmInfo *MCAI = MF.getTarget().getMCAsmInfo(); // We allocate one more than the number of blocks in the function to // accommodate for the possible fake block we may insert at the end. ScopeTops.resize(MF.getNumBlockIDs() + 1); // Place the LOOP for MBB if MBB is the header of a loop. for (auto &MBB : MF) placeLoopMarker(MBB); - // Place the TRY for MBB if MBB is the EH pad of an exception. - if (MCAI->getExceptionHandlingType() == ExceptionHandling::Wasm && - MF.getFunction().hasPersonalityFn()) - for (auto &MBB : MF) - placeTryMarker(MBB); - // Place the BLOCK for MBB if MBB is branched to from above. - for (auto &MBB : MF) - placeBlockMarker(MBB); + + const MCAsmInfo *MCAI = MF.getTarget().getMCAsmInfo(); + for (auto &MBB : MF) { + if (MBB.isEHPad()) { + // Place the TRY for MBB if MBB is the EH pad of an exception. + if (MCAI->getExceptionHandlingType() == ExceptionHandling::Wasm && + MF.getFunction().hasPersonalityFn()) + placeTryMarker(MBB); + } else { + // Place the BLOCK for MBB if MBB is branched to from above. + placeBlockMarker(MBB); + } + } + // Fix mismatches in unwind destinations induced by linearizing the code. + fixUnwindMismatches(MF); } void WebAssemblyCFGStackify::rewriteDepthImmediates(MachineFunction &MF) { - const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); // Now rewrite references to basic blocks to be depth immediates. - // We need two stacks: one for normal scopes and the other for EH pad scopes. - // EH pad stack is used to rewrite depths in rethrow instructions. SmallVector<const MachineBasicBlock *, 8> Stack; - SmallVector<const MachineBasicBlock *, 8> EHPadStack; for (auto &MBB : reverse(MF)) { for (auto I = MBB.rbegin(), E = MBB.rend(); I != E; ++I) { MachineInstr &MI = *I; switch (MI.getOpcode()) { case WebAssembly::BLOCK: - assert(ScopeTops[Stack.back()->getNumber()]->getNumber() <= - MBB.getNumber() && - "Block/try should be balanced"); - Stack.pop_back(); - break; - case WebAssembly::TRY: assert(ScopeTops[Stack.back()->getNumber()]->getNumber() <= MBB.getNumber() && "Block/try marker should be balanced"); Stack.pop_back(); - EHPadStack.pop_back(); - break; - - case WebAssembly::CATCH_I32: - case WebAssembly::CATCH_I64: - case WebAssembly::CATCH_ALL: - // Currently the only case there are more than one catch for a try is - // for catch terminate pad, in the form of - // try - // catch - // call @__clang_call_terminate - // unreachable - // catch_all - // call @std::terminate - // unreachable - // end - // So we shouldn't push the current BB for the second catch_all block - // here. - if (!WebAssembly::isCatchAllTerminatePad(MBB)) - EHPadStack.push_back(&MBB); break; case WebAssembly::LOOP: @@ -751,23 +1312,6 @@ void WebAssemblyCFGStackify::rewriteDepthImmediates(MachineFunction &MF) { Stack.push_back(EndToBegin[&MI]->getParent()); break; - case WebAssembly::RETHROW: { - // Rewrite MBB operands to be depth immediates. - unsigned EHPadDepth = GetDepth(EHPadStack, MI.getOperand(0).getMBB()); - MI.RemoveOperand(0); - MI.addOperand(MF, MachineOperand::CreateImm(EHPadDepth)); - break; - } - - case WebAssembly::RETHROW_TO_CALLER: { - MachineInstr *Rethrow = - BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(WebAssembly::RETHROW)) - .addImm(EHPadStack.size()); - MI.eraseFromParent(); - I = MachineBasicBlock::reverse_iterator(Rethrow); - break; - } - default: if (MI.isTerminator()) { // Rewrite MBB operands to be depth immediates. @@ -776,7 +1320,7 @@ void WebAssemblyCFGStackify::rewriteDepthImmediates(MachineFunction &MF) { MI.RemoveOperand(MI.getNumOperands() - 1); for (auto MO : Ops) { if (MO.isMBB()) - MO = MachineOperand::CreateImm(GetDepth(Stack, MO.getMBB())); + MO = MachineOperand::CreateImm(getDepth(Stack, MO.getMBB())); MI.addOperand(MF, MO); } } @@ -793,13 +1337,14 @@ void WebAssemblyCFGStackify::releaseMemory() { EndToBegin.clear(); TryToEHPad.clear(); EHPadToTry.clear(); - BeginToBottom.clear(); + AppendixBB = nullptr; } bool WebAssemblyCFGStackify::runOnMachineFunction(MachineFunction &MF) { LLVM_DEBUG(dbgs() << "********** CFG Stackifying **********\n" "********** Function: " << MF.getName() << '\n'); + const MCAsmInfo *MCAI = MF.getTarget().getMCAsmInfo(); releaseMemory(); @@ -809,6 +1354,11 @@ bool WebAssemblyCFGStackify::runOnMachineFunction(MachineFunction &MF) { // Place the BLOCK/LOOP/TRY markers to indicate the beginnings of scopes. placeMarkers(MF); + // Remove unnecessary instructions possibly introduced by try/end_trys. + if (MCAI->getExceptionHandlingType() == ExceptionHandling::Wasm && + MF.getFunction().hasPersonalityFn()) + removeUnnecessaryInstrs(MF); + // Convert MBB operands in terminators to relative depth immediates. rewriteDepthImmediates(MF); @@ -821,7 +1371,8 @@ bool WebAssemblyCFGStackify::runOnMachineFunction(MachineFunction &MF) { if (!MF.getSubtarget<WebAssemblySubtarget>() .getTargetTriple() .isOSBinFormatELF()) - AppendEndToFunction(MF, TII); + appendEndToFunction(MF, TII); + MF.getInfo<WebAssemblyFunctionInfo>()->setCFGStackified(); return true; } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp index aaa6d286598f..2537e6042b1e 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyCallIndirectFixup.cpp - Fix call_indirects -------------===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -61,19 +60,19 @@ FunctionPass *llvm::createWebAssemblyCallIndirectFixup() { return new WebAssemblyCallIndirectFixup(); } -static unsigned GetNonPseudoCallIndirectOpcode(const MachineInstr &MI) { +static unsigned getNonPseudoCallIndirectOpcode(const MachineInstr &MI) { switch (MI.getOpcode()) { using namespace WebAssembly; case PCALL_INDIRECT_VOID: return CALL_INDIRECT_VOID; - case PCALL_INDIRECT_I32: - return CALL_INDIRECT_I32; - case PCALL_INDIRECT_I64: - return CALL_INDIRECT_I64; - case PCALL_INDIRECT_F32: - return CALL_INDIRECT_F32; - case PCALL_INDIRECT_F64: - return CALL_INDIRECT_F64; + case PCALL_INDIRECT_i32: + return CALL_INDIRECT_i32; + case PCALL_INDIRECT_i64: + return CALL_INDIRECT_i64; + case PCALL_INDIRECT_f32: + return CALL_INDIRECT_f32; + case PCALL_INDIRECT_f64: + return CALL_INDIRECT_f64; case PCALL_INDIRECT_v16i8: return CALL_INDIRECT_v16i8; case PCALL_INDIRECT_v8i16: @@ -86,13 +85,17 @@ static unsigned GetNonPseudoCallIndirectOpcode(const MachineInstr &MI) { return CALL_INDIRECT_v4f32; case PCALL_INDIRECT_v2f64: return CALL_INDIRECT_v2f64; + case PCALL_INDIRECT_exnref: + return CALL_INDIRECT_exnref; + case PRET_CALL_INDIRECT: + return RET_CALL_INDIRECT; default: return INSTRUCTION_LIST_END; } } -static bool IsPseudoCallIndirect(const MachineInstr &MI) { - return GetNonPseudoCallIndirectOpcode(MI) != +static bool isPseudoCallIndirect(const MachineInstr &MI) { + return getNonPseudoCallIndirectOpcode(MI) != WebAssembly::INSTRUCTION_LIST_END; } @@ -106,11 +109,11 @@ bool WebAssemblyCallIndirectFixup::runOnMachineFunction(MachineFunction &MF) { for (MachineBasicBlock &MBB : MF) { for (MachineInstr &MI : MBB) { - if (IsPseudoCallIndirect(MI)) { + if (isPseudoCallIndirect(MI)) { LLVM_DEBUG(dbgs() << "Found call_indirect: " << MI << '\n'); // Rewrite pseudo to non-pseudo - const MCInstrDesc &Desc = TII->get(GetNonPseudoCallIndirectOpcode(MI)); + const MCInstrDesc &Desc = TII->get(getNonPseudoCallIndirectOpcode(MI)); MI.setDesc(Desc); // Rewrite argument order diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyDebugValueManager.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyDebugValueManager.cpp index 8ecc159951ad..579377c9a5d7 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyDebugValueManager.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyDebugValueManager.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyDebugValueManager.cpp - WebAssembly DebugValue Manager -===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyDebugValueManager.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyDebugValueManager.h index 73f317214058..06e8805b5ad0 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyDebugValueManager.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyDebugValueManager.h @@ -1,9 +1,8 @@ // WebAssemblyDebugValueManager.h - WebAssembly DebugValue Manager -*- C++ -*-// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyEHRestoreStackPointer.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyEHRestoreStackPointer.cpp deleted file mode 100644 index c86260ba408c..000000000000 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyEHRestoreStackPointer.cpp +++ /dev/null @@ -1,87 +0,0 @@ -//===-- WebAssemblyEHRestoreStackPointer.cpp - __stack_pointer restoration ===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -/// -/// \file -/// After the stack is unwound due to a thrown exception, the __stack_pointer -/// global can point to an invalid address. This inserts instructions that -/// restore __stack_pointer global. -/// -//===----------------------------------------------------------------------===// - -#include "MCTargetDesc/WebAssemblyMCTargetDesc.h" -#include "WebAssembly.h" -#include "WebAssemblySubtarget.h" -#include "WebAssemblyUtilities.h" -#include "llvm/CodeGen/MachineFrameInfo.h" -#include "llvm/MC/MCAsmInfo.h" -using namespace llvm; - -#define DEBUG_TYPE "wasm-eh-restore-stack-pointer" - -namespace { -class WebAssemblyEHRestoreStackPointer final : public MachineFunctionPass { -public: - static char ID; // Pass identification, replacement for typeid - WebAssemblyEHRestoreStackPointer() : MachineFunctionPass(ID) {} - - StringRef getPassName() const override { - return "WebAssembly Restore Stack Pointer for Exception Handling"; - } - - void getAnalysisUsage(AnalysisUsage &AU) const override { - AU.setPreservesCFG(); - MachineFunctionPass::getAnalysisUsage(AU); - } - - bool runOnMachineFunction(MachineFunction &MF) override; -}; -} // end anonymous namespace - -char WebAssemblyEHRestoreStackPointer::ID = 0; -INITIALIZE_PASS(WebAssemblyEHRestoreStackPointer, DEBUG_TYPE, - "Restore Stack Pointer for Exception Handling", true, false) - -FunctionPass *llvm::createWebAssemblyEHRestoreStackPointer() { - return new WebAssemblyEHRestoreStackPointer(); -} - -bool WebAssemblyEHRestoreStackPointer::runOnMachineFunction( - MachineFunction &MF) { - LLVM_DEBUG(dbgs() << "********** EH Restore Stack Pointer **********\n" - "********** Function: " - << MF.getName() << '\n'); - - const auto *FrameLowering = static_cast<const WebAssemblyFrameLowering *>( - MF.getSubtarget().getFrameLowering()); - if (!FrameLowering->needsPrologForEH(MF)) - return false; - bool Changed = false; - - for (auto &MBB : MF) { - if (!MBB.isEHPad()) - continue; - Changed = true; - - // Insert __stack_pointer restoring instructions at the beginning of each EH - // pad, after the catch instruction. (Catch instructions may have been - // reordered, and catch_all instructions have not been inserted yet, but - // those cases are handled in LateEHPrepare). - // - // Here it is safe to assume that SP32 holds the latest value of - // __stack_pointer, because the only exception for this case is when a - // function uses the red zone, but that only happens with leaf functions, - // and we don't restore __stack_pointer in leaf functions anyway. - auto InsertPos = MBB.begin(); - if (WebAssembly::isCatch(*MBB.begin())) - InsertPos++; - FrameLowering->writeSPToGlobal(WebAssembly::SP32, MF, MBB, InsertPos, - MBB.begin()->getDebugLoc()); - } - return Changed; -} diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp index 6b3a3e765786..0387957b14c2 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp @@ -1,9 +1,8 @@ //===--- WebAssemblyExceptionInfo.cpp - Exception Infomation --------------===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -51,10 +50,6 @@ void WebAssemblyExceptionInfo::recalculate( MachineBasicBlock *EHPad = DomNode->getBlock(); if (!EHPad->isEHPad()) continue; - // We group catch & catch-all terminate pads together, so skip the second - // one - if (WebAssembly::isCatchAllTerminatePad(*EHPad)) - continue; auto *WE = new WebAssemblyException(EHPad); discoverAndMapException(WE, MDT, MDF); Exceptions.push_back(WE); @@ -105,16 +100,6 @@ void WebAssemblyExceptionInfo::discoverAndMapException( // Map blocks that belong to a catchpad / cleanuppad MachineBasicBlock *EHPad = WE->getEHPad(); - - // We group catch & catch-all terminate pads together within an exception - if (WebAssembly::isCatchTerminatePad(*EHPad)) { - assert(EHPad->succ_size() == 1 && - "Catch terminate pad has more than one successors"); - changeExceptionFor(EHPad, WE); - changeExceptionFor(*(EHPad->succ_begin()), WE); - return; - } - SmallVector<MachineBasicBlock *, 8> WL; WL.push_back(EHPad); while (!WL.empty()) { diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.h index fcd7e2366e03..9a90d7df7d47 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.h @@ -1,9 +1,8 @@ //===-- WebAssemblyExceptionInfo.h - WebAssembly Exception Info -*- C++ -*-===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp index 27aabe6ba0bd..dbd62179f055 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyExplicitLocals.cpp - Make Locals Explicit --------------===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -91,13 +90,13 @@ static unsigned getDropOpcode(const TargetRegisterClass *RC) { return WebAssembly::DROP_F64; if (RC == &WebAssembly::V128RegClass) return WebAssembly::DROP_V128; - if (RC == &WebAssembly::EXCEPT_REFRegClass) - return WebAssembly::DROP_EXCEPT_REF; + if (RC == &WebAssembly::EXNREFRegClass) + return WebAssembly::DROP_EXNREF; llvm_unreachable("Unexpected register class"); } /// Get the appropriate local.get opcode for the given register class. -static unsigned getGetLocalOpcode(const TargetRegisterClass *RC) { +static unsigned getLocalGetOpcode(const TargetRegisterClass *RC) { if (RC == &WebAssembly::I32RegClass) return WebAssembly::LOCAL_GET_I32; if (RC == &WebAssembly::I64RegClass) @@ -108,13 +107,13 @@ static unsigned getGetLocalOpcode(const TargetRegisterClass *RC) { return WebAssembly::LOCAL_GET_F64; if (RC == &WebAssembly::V128RegClass) return WebAssembly::LOCAL_GET_V128; - if (RC == &WebAssembly::EXCEPT_REFRegClass) - return WebAssembly::LOCAL_GET_EXCEPT_REF; + if (RC == &WebAssembly::EXNREFRegClass) + return WebAssembly::LOCAL_GET_EXNREF; llvm_unreachable("Unexpected register class"); } /// Get the appropriate local.set opcode for the given register class. -static unsigned getSetLocalOpcode(const TargetRegisterClass *RC) { +static unsigned getLocalSetOpcode(const TargetRegisterClass *RC) { if (RC == &WebAssembly::I32RegClass) return WebAssembly::LOCAL_SET_I32; if (RC == &WebAssembly::I64RegClass) @@ -125,13 +124,13 @@ static unsigned getSetLocalOpcode(const TargetRegisterClass *RC) { return WebAssembly::LOCAL_SET_F64; if (RC == &WebAssembly::V128RegClass) return WebAssembly::LOCAL_SET_V128; - if (RC == &WebAssembly::EXCEPT_REFRegClass) - return WebAssembly::LOCAL_SET_EXCEPT_REF; + if (RC == &WebAssembly::EXNREFRegClass) + return WebAssembly::LOCAL_SET_EXNREF; llvm_unreachable("Unexpected register class"); } /// Get the appropriate local.tee opcode for the given register class. -static unsigned getTeeLocalOpcode(const TargetRegisterClass *RC) { +static unsigned getLocalTeeOpcode(const TargetRegisterClass *RC) { if (RC == &WebAssembly::I32RegClass) return WebAssembly::LOCAL_TEE_I32; if (RC == &WebAssembly::I64RegClass) @@ -142,8 +141,8 @@ static unsigned getTeeLocalOpcode(const TargetRegisterClass *RC) { return WebAssembly::LOCAL_TEE_F64; if (RC == &WebAssembly::V128RegClass) return WebAssembly::LOCAL_TEE_V128; - if (RC == &WebAssembly::EXCEPT_REFRegClass) - return WebAssembly::LOCAL_TEE_EXCEPT_REF; + if (RC == &WebAssembly::EXNREFRegClass) + return WebAssembly::LOCAL_TEE_EXNREF; llvm_unreachable("Unexpected register class"); } @@ -159,8 +158,8 @@ static MVT typeForRegClass(const TargetRegisterClass *RC) { return MVT::f64; if (RC == &WebAssembly::V128RegClass) return MVT::v16i8; - if (RC == &WebAssembly::EXCEPT_REFRegClass) - return MVT::ExceptRef; + if (RC == &WebAssembly::EXNREFRegClass) + return MVT::exnref; llvm_unreachable("unrecognized register class"); } @@ -206,7 +205,7 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) { E = MF.begin()->end(); I != E;) { MachineInstr &MI = *I++; - if (!WebAssembly::isArgument(MI)) + if (!WebAssembly::isArgument(MI.getOpcode())) break; unsigned Reg = MI.getOperand(0).getReg(); assert(!MFI.isVRegStackified(Reg)); @@ -228,7 +227,7 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) { for (MachineBasicBlock &MBB : MF) { for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) { MachineInstr &MI = *I++; - assert(!WebAssembly::isArgument(MI)); + assert(!WebAssembly::isArgument(MI.getOpcode())); if (MI.isDebugInstr() || MI.isLabel()) continue; @@ -236,7 +235,7 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) { // Replace tee instructions with local.tee. The difference is that tee // instructions have two defs, while local.tee instructions have one def // and an index of a local to write to. - if (WebAssembly::isTee(MI)) { + if (WebAssembly::isTee(MI.getOpcode())) { assert(MFI.isVRegStackified(MI.getOperand(0).getReg())); assert(!MFI.isVRegStackified(MI.getOperand(1).getReg())); unsigned OldReg = MI.getOperand(2).getReg(); @@ -246,7 +245,7 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) { if (!MFI.isVRegStackified(OldReg)) { unsigned LocalId = getLocalId(Reg2Local, CurLocal, OldReg); unsigned NewReg = MRI.createVirtualRegister(RC); - unsigned Opc = getGetLocalOpcode(RC); + unsigned Opc = getLocalGetOpcode(RC); BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(Opc), NewReg) .addImm(LocalId); MI.getOperand(2).setReg(NewReg); @@ -256,7 +255,7 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) { // Replace the TEE with a LOCAL_TEE. unsigned LocalId = getLocalId(Reg2Local, CurLocal, MI.getOperand(1).getReg()); - unsigned Opc = getTeeLocalOpcode(RC); + unsigned Opc = getLocalTeeOpcode(RC); BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(Opc), MI.getOperand(0).getReg()) .addImm(LocalId) @@ -275,7 +274,7 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) { if (!MFI.isVRegStackified(OldReg)) { const TargetRegisterClass *RC = MRI.getRegClass(OldReg); unsigned NewReg = MRI.createVirtualRegister(RC); - auto InsertPt = std::next(MachineBasicBlock::iterator(&MI)); + auto InsertPt = std::next(MI.getIterator()); if (MI.getOpcode() == WebAssembly::IMPLICIT_DEF) { MI.eraseFromParent(); Changed = true; @@ -290,7 +289,7 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) { Drop->getOperand(0).setIsKill(); } else { unsigned LocalId = getLocalId(Reg2Local, CurLocal, OldReg); - unsigned Opc = getSetLocalOpcode(RC); + unsigned Opc = getLocalSetOpcode(RC); BuildMI(MBB, InsertPt, MI.getDebugLoc(), TII->get(Opc)) .addImm(LocalId) .addReg(NewReg); @@ -317,7 +316,7 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) { // with inline asm register operands is to provide local indices as // immediates. if (MO.isDef()) { - assert(MI.getOpcode() == TargetOpcode::INLINEASM); + assert(MI.isInlineAsm()); unsigned LocalId = getLocalId(Reg2Local, CurLocal, OldReg); // If this register operand is tied to another operand, we can't // change it to an immediate. Untie it first. @@ -335,7 +334,7 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) { // Our contract with inline asm register operands is to provide local // indices as immediates. - if (MI.getOpcode() == TargetOpcode::INLINEASM) { + if (MI.isInlineAsm()) { unsigned LocalId = getLocalId(Reg2Local, CurLocal, OldReg); // Untie it first if this reg operand is tied to another operand. MI.untieRegOperand(MI.getOperandNo(&MO)); @@ -347,7 +346,7 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) { unsigned LocalId = getLocalId(Reg2Local, CurLocal, OldReg); const TargetRegisterClass *RC = MRI.getRegClass(OldReg); unsigned NewReg = MRI.createVirtualRegister(RC); - unsigned Opc = getGetLocalOpcode(RC); + unsigned Opc = getLocalGetOpcode(RC); InsertPt = BuildMI(MBB, InsertPt, MI.getDebugLoc(), TII->get(Opc), NewReg) .addImm(LocalId); @@ -357,7 +356,7 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) { } // Coalesce and eliminate COPY instructions. - if (WebAssembly::isCopy(MI)) { + if (WebAssembly::isCopy(MI.getOpcode())) { MRI.replaceRegWith(MI.getOperand(1).getReg(), MI.getOperand(0).getReg()); MI.eraseFromParent(); diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp index 3856700cca94..2552e9150833 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyFastISel.cpp - WebAssembly FastISel implementation -----===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -50,22 +49,22 @@ class WebAssemblyFastISel final : public FastISel { // All possible address modes. class Address { public: - typedef enum { RegBase, FrameIndexBase } BaseKind; + using BaseKind = enum { RegBase, FrameIndexBase }; private: - BaseKind Kind; + BaseKind Kind = RegBase; union { unsigned Reg; int FI; } Base; - int64_t Offset; + int64_t Offset = 0; - const GlobalValue *GV; + const GlobalValue *GV = nullptr; public: // Innocuous defaults for our address. - Address() : Kind(RegBase), Offset(0), GV(0) { Base.Reg = 0; } + Address() { Base.Reg = 0; } void setKind(BaseKind K) { assert(!isSet() && "Can't change kind with non-zero base"); Kind = K; @@ -92,9 +91,9 @@ class WebAssemblyFastISel final : public FastISel { return Base.FI; } - void setOffset(int64_t Offset_) { - assert(Offset_ >= 0 && "Offsets must be non-negative"); - Offset = Offset_; + void setOffset(int64_t NewOffset) { + assert(NewOffset >= 0 && "Offsets must be non-negative"); + Offset = NewOffset; } int64_t getOffset() const { return Offset; } void setGlobalValue(const GlobalValue *G) { GV = G; } @@ -116,7 +115,7 @@ class WebAssemblyFastISel final : public FastISel { private: // Utility helper routines MVT::SimpleValueType getSimpleType(Type *Ty) { - EVT VT = TLI.getValueType(DL, Ty, /*HandleUnknown=*/true); + EVT VT = TLI.getValueType(DL, Ty, /*AllowUnknown=*/true); return VT.isSimple() ? VT.getSimpleVT().SimpleTy : MVT::INVALID_SIMPLE_VALUE_TYPE; } @@ -130,7 +129,7 @@ private: case MVT::i64: case MVT::f32: case MVT::f64: - case MVT::ExceptRef: + case MVT::exnref: return VT; case MVT::f16: return MVT::f32; @@ -208,10 +207,9 @@ public: } // end anonymous namespace bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) { - const User *U = nullptr; unsigned Opcode = Instruction::UserOp1; - if (const Instruction *I = dyn_cast<Instruction>(Obj)) { + if (const auto *I = dyn_cast<Instruction>(Obj)) { // Don't walk into other basic blocks unless the object is an alloca from // another block, otherwise it may not have a virtual register assigned. if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || @@ -219,7 +217,7 @@ bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) { Opcode = I->getOpcode(); U = I; } - } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { + } else if (const auto *C = dyn_cast<ConstantExpr>(Obj)) { Opcode = C->getOpcode(); U = C; } @@ -230,9 +228,13 @@ bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) { // address spaces. return false; - if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) { + if (const auto *GV = dyn_cast<GlobalValue>(Obj)) { + if (TLI.isPositionIndependent()) + return false; if (Addr.getGlobalValue()) return false; + if (GV->isThreadLocal()) + return false; Addr.setGlobalValue(GV); return true; } @@ -275,7 +277,7 @@ bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) { } else { uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType()); for (;;) { - if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { + if (const auto *CI = dyn_cast<ConstantInt>(Op)) { // Constant-offset addressing. TmpOffset += CI->getSExtValue() * S; break; @@ -290,8 +292,7 @@ bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) { } if (canFoldAddIntoGEP(U, Op)) { // A compatible add with a constant operand. Fold the constant. - ConstantInt *CI = - cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); + auto *CI = cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); TmpOffset += CI->getSExtValue() * S; // Iterate on the other operand. Op = cast<AddOperator>(Op)->getOperand(0); @@ -315,7 +316,7 @@ bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) { break; } case Instruction::Alloca: { - const AllocaInst *AI = cast<AllocaInst>(Obj); + const auto *AI = cast<AllocaInst>(Obj); DenseMap<const AllocaInst *, int>::iterator SI = FuncInfo.StaticAllocaMap.find(AI); if (SI != FuncInfo.StaticAllocaMap.end()) { @@ -336,7 +337,7 @@ bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) { if (isa<ConstantInt>(LHS)) std::swap(LHS, RHS); - if (const ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) { + if (const auto *CI = dyn_cast<ConstantInt>(RHS)) { uint64_t TmpOffset = Addr.getOffset() + CI->getSExtValue(); if (int64_t(TmpOffset) >= 0) { Addr.setOffset(TmpOffset); @@ -356,7 +357,7 @@ bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) { const Value *LHS = U->getOperand(0); const Value *RHS = U->getOperand(1); - if (const ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) { + if (const auto *CI = dyn_cast<ConstantInt>(RHS)) { int64_t TmpOffset = Addr.getOffset() - CI->getSExtValue(); if (TmpOffset >= 0) { Addr.setOffset(TmpOffset); @@ -416,7 +417,7 @@ unsigned WebAssemblyFastISel::maskI1Value(unsigned Reg, const Value *V) { } unsigned WebAssemblyFastISel::getRegForI1Value(const Value *V, bool &Not) { - if (const ICmpInst *ICmp = dyn_cast<ICmpInst>(V)) + if (const auto *ICmp = dyn_cast<ICmpInst>(V)) if (const ConstantInt *C = dyn_cast<ConstantInt>(ICmp->getOperand(1))) if (ICmp->isEquality() && C->isZero() && C->getType()->isIntegerTy(32)) { Not = ICmp->isTrueWhenEqual(); @@ -524,7 +525,10 @@ unsigned WebAssemblyFastISel::zeroExtend(unsigned Reg, const Value *V, return Result; } - return zeroExtendToI32(Reg, V, From); + if (To == MVT::i32) + return zeroExtendToI32(Reg, V, From); + + return 0; } unsigned WebAssemblyFastISel::signExtend(unsigned Reg, const Value *V, @@ -543,7 +547,10 @@ unsigned WebAssemblyFastISel::signExtend(unsigned Reg, const Value *V, return Result; } - return signExtendToI32(Reg, V, From); + if (To == MVT::i32) + return signExtendToI32(Reg, V, From); + + return 0; } unsigned WebAssemblyFastISel::getRegForUnsignedValue(const Value *V) { @@ -607,6 +614,10 @@ unsigned WebAssemblyFastISel::fastMaterializeAlloca(const AllocaInst *AI) { unsigned WebAssemblyFastISel::fastMaterializeConstant(const Constant *C) { if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) { + if (TLI.isPositionIndependent()) + return 0; + if (GV->isThreadLocal()) + return 0; unsigned ResultReg = createResultReg(Subtarget->hasAddr64() ? &WebAssembly::I64RegClass : &WebAssembly::I32RegClass); @@ -629,14 +640,14 @@ bool WebAssemblyFastISel::fastLowerArguments() { if (F->isVarArg()) return false; - unsigned i = 0; + unsigned I = 0; for (auto const &Arg : F->args()) { const AttributeList &Attrs = F->getAttributes(); - if (Attrs.hasParamAttribute(i, Attribute::ByVal) || - Attrs.hasParamAttribute(i, Attribute::SwiftSelf) || - Attrs.hasParamAttribute(i, Attribute::SwiftError) || - Attrs.hasParamAttribute(i, Attribute::InAlloca) || - Attrs.hasParamAttribute(i, Attribute::Nest)) + if (Attrs.hasParamAttribute(I, Attribute::ByVal) || + Attrs.hasParamAttribute(I, Attribute::SwiftSelf) || + Attrs.hasParamAttribute(I, Attribute::SwiftError) || + Attrs.hasParamAttribute(I, Attribute::InAlloca) || + Attrs.hasParamAttribute(I, Attribute::Nest)) return false; Type *ArgTy = Arg.getType(); @@ -691,19 +702,19 @@ bool WebAssemblyFastISel::fastLowerArguments() { Opc = WebAssembly::ARGUMENT_v2f64; RC = &WebAssembly::V128RegClass; break; - case MVT::ExceptRef: - Opc = WebAssembly::ARGUMENT_ExceptRef; - RC = &WebAssembly::EXCEPT_REFRegClass; + case MVT::exnref: + Opc = WebAssembly::ARGUMENT_exnref; + RC = &WebAssembly::EXNREFRegClass; break; default: return false; } unsigned ResultReg = createResultReg(RC); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) - .addImm(i); + .addImm(I); updateValueMap(&Arg, ResultReg); - ++i; + ++I; } MRI.addLiveIn(WebAssembly::ARGUMENTS); @@ -732,8 +743,9 @@ bool WebAssemblyFastISel::fastLowerArguments() { } bool WebAssemblyFastISel::selectCall(const Instruction *I) { - const CallInst *Call = cast<CallInst>(I); + const auto *Call = cast<CallInst>(I); + // TODO: Support tail calls in FastISel if (Call->isMustTailCall() || Call->isInlineAsm() || Call->getFunctionType()->isVarArg()) return false; @@ -762,19 +774,19 @@ bool WebAssemblyFastISel::selectCall(const Instruction *I) { case MVT::i8: case MVT::i16: case MVT::i32: - Opc = IsDirect ? WebAssembly::CALL_I32 : WebAssembly::PCALL_INDIRECT_I32; + Opc = IsDirect ? WebAssembly::CALL_i32 : WebAssembly::PCALL_INDIRECT_i32; ResultReg = createResultReg(&WebAssembly::I32RegClass); break; case MVT::i64: - Opc = IsDirect ? WebAssembly::CALL_I64 : WebAssembly::PCALL_INDIRECT_I64; + Opc = IsDirect ? WebAssembly::CALL_i64 : WebAssembly::PCALL_INDIRECT_i64; ResultReg = createResultReg(&WebAssembly::I64RegClass); break; case MVT::f32: - Opc = IsDirect ? WebAssembly::CALL_F32 : WebAssembly::PCALL_INDIRECT_F32; + Opc = IsDirect ? WebAssembly::CALL_f32 : WebAssembly::PCALL_INDIRECT_f32; ResultReg = createResultReg(&WebAssembly::F32RegClass); break; case MVT::f64: - Opc = IsDirect ? WebAssembly::CALL_F64 : WebAssembly::PCALL_INDIRECT_F64; + Opc = IsDirect ? WebAssembly::CALL_f64 : WebAssembly::PCALL_INDIRECT_f64; ResultReg = createResultReg(&WebAssembly::F64RegClass); break; case MVT::v16i8: @@ -807,10 +819,10 @@ bool WebAssemblyFastISel::selectCall(const Instruction *I) { : WebAssembly::PCALL_INDIRECT_v2f64; ResultReg = createResultReg(&WebAssembly::V128RegClass); break; - case MVT::ExceptRef: - Opc = IsDirect ? WebAssembly::CALL_EXCEPT_REF - : WebAssembly::PCALL_INDIRECT_EXCEPT_REF; - ResultReg = createResultReg(&WebAssembly::EXCEPT_REFRegClass); + case MVT::exnref: + Opc = IsDirect ? WebAssembly::CALL_exnref + : WebAssembly::PCALL_INDIRECT_exnref; + ResultReg = createResultReg(&WebAssembly::EXNREFRegClass); break; default: return false; @@ -818,25 +830,25 @@ bool WebAssemblyFastISel::selectCall(const Instruction *I) { } SmallVector<unsigned, 8> Args; - for (unsigned i = 0, e = Call->getNumArgOperands(); i < e; ++i) { - Value *V = Call->getArgOperand(i); + for (unsigned I = 0, E = Call->getNumArgOperands(); I < E; ++I) { + Value *V = Call->getArgOperand(I); MVT::SimpleValueType ArgTy = getSimpleType(V->getType()); if (ArgTy == MVT::INVALID_SIMPLE_VALUE_TYPE) return false; const AttributeList &Attrs = Call->getAttributes(); - if (Attrs.hasParamAttribute(i, Attribute::ByVal) || - Attrs.hasParamAttribute(i, Attribute::SwiftSelf) || - Attrs.hasParamAttribute(i, Attribute::SwiftError) || - Attrs.hasParamAttribute(i, Attribute::InAlloca) || - Attrs.hasParamAttribute(i, Attribute::Nest)) + if (Attrs.hasParamAttribute(I, Attribute::ByVal) || + Attrs.hasParamAttribute(I, Attribute::SwiftSelf) || + Attrs.hasParamAttribute(I, Attribute::SwiftError) || + Attrs.hasParamAttribute(I, Attribute::InAlloca) || + Attrs.hasParamAttribute(I, Attribute::Nest)) return false; unsigned Reg; - if (Attrs.hasParamAttribute(i, Attribute::SExt)) + if (Attrs.hasParamAttribute(I, Attribute::SExt)) Reg = getRegForSignedValue(V); - else if (Attrs.hasParamAttribute(i, Attribute::ZExt)) + else if (Attrs.hasParamAttribute(I, Attribute::ZExt)) Reg = getRegForUnsignedValue(V); else Reg = getRegForValue(V); @@ -847,6 +859,13 @@ bool WebAssemblyFastISel::selectCall(const Instruction *I) { Args.push_back(Reg); } + unsigned CalleeReg = 0; + if (!IsDirect) { + CalleeReg = getRegForValue(Call->getCalledValue()); + if (!CalleeReg) + return false; + } + auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)); if (!IsVoid) @@ -854,12 +873,8 @@ bool WebAssemblyFastISel::selectCall(const Instruction *I) { if (IsDirect) MIB.addGlobalAddress(Func); - else { - unsigned Reg = getRegForValue(Call->getCalledValue()); - if (Reg == 0) - return false; - MIB.addReg(Reg); - } + else + MIB.addReg(CalleeReg); for (unsigned ArgReg : Args) MIB.addReg(ArgReg); @@ -870,7 +885,7 @@ bool WebAssemblyFastISel::selectCall(const Instruction *I) { } bool WebAssemblyFastISel::selectSelect(const Instruction *I) { - const SelectInst *Select = cast<SelectInst>(I); + const auto *Select = cast<SelectInst>(I); bool Not; unsigned CondReg = getRegForI1Value(Select->getCondition(), Not); @@ -910,9 +925,9 @@ bool WebAssemblyFastISel::selectSelect(const Instruction *I) { Opc = WebAssembly::SELECT_F64; RC = &WebAssembly::F64RegClass; break; - case MVT::ExceptRef: - Opc = WebAssembly::SELECT_EXCEPT_REF; - RC = &WebAssembly::EXCEPT_REFRegClass; + case MVT::exnref: + Opc = WebAssembly::SELECT_EXNREF; + RC = &WebAssembly::EXNREFRegClass; break; default: return false; @@ -929,7 +944,7 @@ bool WebAssemblyFastISel::selectSelect(const Instruction *I) { } bool WebAssemblyFastISel::selectTrunc(const Instruction *I) { - const TruncInst *Trunc = cast<TruncInst>(I); + const auto *Trunc = cast<TruncInst>(I); unsigned Reg = getRegForValue(Trunc->getOperand(0)); if (Reg == 0) @@ -948,7 +963,7 @@ bool WebAssemblyFastISel::selectTrunc(const Instruction *I) { } bool WebAssemblyFastISel::selectZExt(const Instruction *I) { - const ZExtInst *ZExt = cast<ZExtInst>(I); + const auto *ZExt = cast<ZExtInst>(I); const Value *Op = ZExt->getOperand(0); MVT::SimpleValueType From = getSimpleType(Op->getType()); @@ -965,7 +980,7 @@ bool WebAssemblyFastISel::selectZExt(const Instruction *I) { } bool WebAssemblyFastISel::selectSExt(const Instruction *I) { - const SExtInst *SExt = cast<SExtInst>(I); + const auto *SExt = cast<SExtInst>(I); const Value *Op = SExt->getOperand(0); MVT::SimpleValueType From = getSimpleType(Op->getType()); @@ -982,11 +997,11 @@ bool WebAssemblyFastISel::selectSExt(const Instruction *I) { } bool WebAssemblyFastISel::selectICmp(const Instruction *I) { - const ICmpInst *ICmp = cast<ICmpInst>(I); + const auto *ICmp = cast<ICmpInst>(I); bool I32 = getSimpleType(ICmp->getOperand(0)->getType()) != MVT::i64; unsigned Opc; - bool isSigned = false; + bool IsSigned = false; switch (ICmp->getPredicate()) { case ICmpInst::ICMP_EQ: Opc = I32 ? WebAssembly::EQ_I32 : WebAssembly::EQ_I64; @@ -1008,29 +1023,29 @@ bool WebAssemblyFastISel::selectICmp(const Instruction *I) { break; case ICmpInst::ICMP_SGT: Opc = I32 ? WebAssembly::GT_S_I32 : WebAssembly::GT_S_I64; - isSigned = true; + IsSigned = true; break; case ICmpInst::ICMP_SGE: Opc = I32 ? WebAssembly::GE_S_I32 : WebAssembly::GE_S_I64; - isSigned = true; + IsSigned = true; break; case ICmpInst::ICMP_SLT: Opc = I32 ? WebAssembly::LT_S_I32 : WebAssembly::LT_S_I64; - isSigned = true; + IsSigned = true; break; case ICmpInst::ICMP_SLE: Opc = I32 ? WebAssembly::LE_S_I32 : WebAssembly::LE_S_I64; - isSigned = true; + IsSigned = true; break; default: return false; } - unsigned LHS = getRegForPromotedValue(ICmp->getOperand(0), isSigned); + unsigned LHS = getRegForPromotedValue(ICmp->getOperand(0), IsSigned); if (LHS == 0) return false; - unsigned RHS = getRegForPromotedValue(ICmp->getOperand(1), isSigned); + unsigned RHS = getRegForPromotedValue(ICmp->getOperand(1), IsSigned); if (RHS == 0) return false; @@ -1043,7 +1058,7 @@ bool WebAssemblyFastISel::selectICmp(const Instruction *I) { } bool WebAssemblyFastISel::selectFCmp(const Instruction *I) { - const FCmpInst *FCmp = cast<FCmpInst>(I); + const auto *FCmp = cast<FCmpInst>(I); unsigned LHS = getRegForValue(FCmp->getOperand(0)); if (LHS == 0) @@ -1139,7 +1154,7 @@ bool WebAssemblyFastISel::selectBitCast(const Instruction *I) { } bool WebAssemblyFastISel::selectLoad(const Instruction *I) { - const LoadInst *Load = cast<LoadInst>(I); + const auto *Load = cast<LoadInst>(I); if (Load->isAtomic()) return false; if (!Subtarget->hasSIMD128() && Load->getType()->isVectorTy()) @@ -1196,7 +1211,7 @@ bool WebAssemblyFastISel::selectLoad(const Instruction *I) { } bool WebAssemblyFastISel::selectStore(const Instruction *I) { - const StoreInst *Store = cast<StoreInst>(I); + const auto *Store = cast<StoreInst>(I); if (Store->isAtomic()) return false; if (!Subtarget->hasSIMD128() && @@ -1252,7 +1267,7 @@ bool WebAssemblyFastISel::selectStore(const Instruction *I) { } bool WebAssemblyFastISel::selectBr(const Instruction *I) { - const BranchInst *Br = cast<BranchInst>(I); + const auto *Br = cast<BranchInst>(I); if (Br->isUnconditional()) { MachineBasicBlock *MSucc = FuncInfo.MBBMap[Br->getSuccessor(0)]; fastEmitBranch(MSucc, Br->getDebugLoc()); @@ -1283,7 +1298,7 @@ bool WebAssemblyFastISel::selectRet(const Instruction *I) { if (!FuncInfo.CanLowerReturn) return false; - const ReturnInst *Ret = cast<ReturnInst>(I); + const auto *Ret = cast<ReturnInst>(I); if (Ret->getNumOperands() == 0) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, @@ -1330,8 +1345,8 @@ bool WebAssemblyFastISel::selectRet(const Instruction *I) { case MVT::v2f64: Opc = WebAssembly::RETURN_v2f64; break; - case MVT::ExceptRef: - Opc = WebAssembly::RETURN_EXCEPT_REF; + case MVT::exnref: + Opc = WebAssembly::RETURN_EXNREF; break; default: return false; diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp index 13f37f611ed0..b7fc65401fc4 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyFixFunctionBitcasts.cpp - Fix function bitcasts --------===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -65,12 +64,12 @@ ModulePass *llvm::createWebAssemblyFixFunctionBitcasts() { // Recursively descend the def-use lists from V to find non-bitcast users of // bitcasts of V. -static void FindUses(Value *V, Function &F, +static void findUses(Value *V, Function &F, SmallVectorImpl<std::pair<Use *, Function *>> &Uses, SmallPtrSetImpl<Constant *> &ConstantBCs) { for (Use &U : V->uses()) { - if (BitCastOperator *BC = dyn_cast<BitCastOperator>(U.getUser())) - FindUses(BC, F, Uses, ConstantBCs); + if (auto *BC = dyn_cast<BitCastOperator>(U.getUser())) + findUses(BC, F, Uses, ConstantBCs); else if (U.get()->getType() != F.getType()) { CallSite CS(U.getUser()); if (!CS) @@ -82,8 +81,8 @@ static void FindUses(Value *V, Function &F, continue; if (isa<Constant>(U.get())) { // Only add constant bitcasts to the list once; they get RAUW'd - auto c = ConstantBCs.insert(cast<Constant>(U.get())); - if (!c.second) + auto C = ConstantBCs.insert(cast<Constant>(U.get())); + if (!C.second) continue; } Uses.push_back(std::make_pair(&U, &F)); @@ -114,7 +113,7 @@ static void FindUses(Value *V, Function &F, // For bitcasts that involve struct types we don't know at this stage if they // would be equivalent at the wasm level and so we can't know if we need to // generate a wrapper. -static Function *CreateWrapper(Function *F, FunctionType *Ty) { +static Function *createWrapper(Function *F, FunctionType *Ty) { Module *M = F->getParent(); Function *Wrapper = Function::Create(Ty, Function::PrivateLinkage, @@ -152,11 +151,11 @@ static Function *CreateWrapper(Function *F, FunctionType *Ty) { BB->getInstList().push_back(PtrCast); Args.push_back(PtrCast); } else if (ArgType->isStructTy() || ParamType->isStructTy()) { - LLVM_DEBUG(dbgs() << "CreateWrapper: struct param type in bitcast: " + LLVM_DEBUG(dbgs() << "createWrapper: struct param type in bitcast: " << F->getName() << "\n"); WrapperNeeded = false; } else { - LLVM_DEBUG(dbgs() << "CreateWrapper: arg type mismatch calling: " + LLVM_DEBUG(dbgs() << "createWrapper: arg type mismatch calling: " << F->getName() << "\n"); LLVM_DEBUG(dbgs() << "Arg[" << Args.size() << "] Expected: " << *ParamType << " Got: " << *ArgType << "\n"); @@ -192,11 +191,11 @@ static Function *CreateWrapper(Function *F, FunctionType *Ty) { BB->getInstList().push_back(Cast); ReturnInst::Create(M->getContext(), Cast, BB); } else if (RtnType->isStructTy() || ExpectedRtnType->isStructTy()) { - LLVM_DEBUG(dbgs() << "CreateWrapper: struct return type in bitcast: " + LLVM_DEBUG(dbgs() << "createWrapper: struct return type in bitcast: " << F->getName() << "\n"); WrapperNeeded = false; } else { - LLVM_DEBUG(dbgs() << "CreateWrapper: return type mismatch calling: " + LLVM_DEBUG(dbgs() << "createWrapper: return type mismatch calling: " << F->getName() << "\n"); LLVM_DEBUG(dbgs() << "Expected: " << *ExpectedRtnType << " Got: " << *RtnType << "\n"); @@ -213,18 +212,18 @@ static Function *CreateWrapper(Function *F, FunctionType *Ty) { new UnreachableInst(M->getContext(), BB); Wrapper->setName(F->getName() + "_bitcast_invalid"); } else if (!WrapperNeeded) { - LLVM_DEBUG(dbgs() << "CreateWrapper: no wrapper needed: " << F->getName() + LLVM_DEBUG(dbgs() << "createWrapper: no wrapper needed: " << F->getName() << "\n"); Wrapper->eraseFromParent(); return nullptr; } - LLVM_DEBUG(dbgs() << "CreateWrapper: " << F->getName() << "\n"); + LLVM_DEBUG(dbgs() << "createWrapper: " << F->getName() << "\n"); return Wrapper; } // Test whether a main function with type FuncTy should be rewritten to have // type MainTy. -bool shouldFixMainFunction(FunctionType *FuncTy, FunctionType *MainTy) { +static bool shouldFixMainFunction(FunctionType *FuncTy, FunctionType *MainTy) { // Only fix the main function if it's the standard zero-arg form. That way, // the standard cases will work as expected, and users will see signature // mismatches from the linker for non-standard cases. @@ -243,7 +242,7 @@ bool FixFunctionBitcasts::runOnModule(Module &M) { // Collect all the places that need wrappers. for (Function &F : M) { - FindUses(&F, F, Uses, ConstantBCs); + findUses(&F, F, Uses, ConstantBCs); // If we have a "main" function, and its type isn't // "int main(int argc, char *argv[])", create an artificial call with it @@ -263,7 +262,7 @@ bool FixFunctionBitcasts::runOnModule(Module &M) { UndefValue::get(MainArgTys[1])}; Value *Casted = ConstantExpr::getBitCast(Main, PointerType::get(MainTy, 0)); - CallMain = CallInst::Create(Casted, Args, "call_main"); + CallMain = CallInst::Create(MainTy, Casted, Args, "call_main"); Use *UseMain = &CallMain->getOperandUse(2); Uses.push_back(std::make_pair(UseMain, &F)); } @@ -275,8 +274,8 @@ bool FixFunctionBitcasts::runOnModule(Module &M) { for (auto &UseFunc : Uses) { Use *U = UseFunc.first; Function *F = UseFunc.second; - PointerType *PTy = cast<PointerType>(U->get()->getType()); - FunctionType *Ty = dyn_cast<FunctionType>(PTy->getElementType()); + auto *PTy = cast<PointerType>(U->get()->getType()); + auto *Ty = dyn_cast<FunctionType>(PTy->getElementType()); // If the function is casted to something like i8* as a "generic pointer" // to be later casted to something else, we can't generate a wrapper for it. @@ -286,7 +285,7 @@ bool FixFunctionBitcasts::runOnModule(Module &M) { auto Pair = Wrappers.insert(std::make_pair(std::make_pair(F, Ty), nullptr)); if (Pair.second) - Pair.first->second = CreateWrapper(F, Ty); + Pair.first->second = createWrapper(F, Ty); Function *Wrapper = Pair.first->second; if (!Wrapper) @@ -302,7 +301,7 @@ bool FixFunctionBitcasts::runOnModule(Module &M) { // one that gets called from startup. if (CallMain) { Main->setName("__original_main"); - Function *MainWrapper = + auto *MainWrapper = cast<Function>(CallMain->getCalledValue()->stripPointerCasts()); delete CallMain; if (Main->isDeclaration()) { diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp index 108f2879a071..7d8e86d9b2c0 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp @@ -1,46 +1,48 @@ //=- WebAssemblyFixIrreducibleControlFlow.cpp - Fix irreducible control flow -// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// /// \file -/// This file implements a pass that transforms irreducible control flow into -/// reducible control flow. Irreducible control flow means multiple-entry -/// loops; they appear as CFG cycles that are not recorded in MachineLoopInfo -/// due to being unnatural. +/// This file implements a pass that removes irreducible control flow. +/// Irreducible control flow means multiple-entry loops, which this pass +/// transforms to have a single entry. /// /// Note that LLVM has a generic pass that lowers irreducible control flow, but /// it linearizes control flow, turning diamonds into two triangles, which is /// both unnecessary and undesirable for WebAssembly. /// -/// The big picture: Ignoring natural loops (seeing them monolithically), we -/// find all the blocks which can return to themselves ("loopers"). Loopers -/// reachable from the non-loopers are loop entries: if there are 2 or more, -/// then we have irreducible control flow. We fix that as follows: a new block -/// is created that can dispatch to each of the loop entries, based on the -/// value of a label "helper" variable, and we replace direct branches to the -/// entries with assignments to the label variable and a branch to the dispatch -/// block. Then the dispatch block is the single entry in a new natural loop. +/// The big picture: We recursively process each "region", defined as a group +/// of blocks with a single entry and no branches back to that entry. A region +/// may be the entire function body, or the inner part of a loop, i.e., the +/// loop's body without branches back to the loop entry. In each region we fix +/// up multi-entry loops by adding a new block that can dispatch to each of the +/// loop entries, based on the value of a label "helper" variable, and we +/// replace direct branches to the entries with assignments to the label +/// variable and a branch to the dispatch block. Then the dispatch block is the +/// single entry in the loop containing the previous multiple entries. After +/// ensuring all the loops in a region are reducible, we recurse into them. The +/// total time complexity of this pass is: +/// +/// O(NumBlocks * NumNestedLoops * NumIrreducibleLoops + +/// NumLoops * NumLoops) /// -/// This is similar to what the Relooper [1] does, both identify looping code -/// that requires multiple entries, and resolve it in a similar way. In -/// Relooper terminology, we implement a Multiple shape in a Loop shape. Note +/// This pass is similar to what the Relooper [1] does. Both identify looping +/// code that requires multiple entries, and resolve it in a similar way (in +/// Relooper terminology, we implement a Multiple shape in a Loop shape). Note /// also that like the Relooper, we implement a "minimal" intervention: we only /// use the "label" helper for the blocks we absolutely must and no others. We -/// also prioritize code size and do not perform node splitting (i.e. we don't -/// duplicate code in order to resolve irreducibility). +/// also prioritize code size and do not duplicate code in order to resolve +/// irreducibility. The graph algorithms for finding loops and entries and so +/// forth are also similar to the Relooper. The main differences between this +/// pass and the Relooper are: /// -/// The difference between this code and the Relooper is that the Relooper also -/// generates ifs and loops and works in a recursive manner, knowing at each -/// point what the entries are, and recursively breaks down the problem. Here -/// we just want to resolve irreducible control flow, and we also want to use -/// as much LLVM infrastructure as possible. So we use the MachineLoopInfo to -/// identify natural loops, etc., and we start with the whole CFG and must -/// identify both the looping code and its entries. +/// * We just care about irreducibility, so we just look at loops. +/// * The Relooper emits structured control flow (with ifs etc.), while we +/// emit a CFG. /// /// [1] Alon Zakai. 2011. Emscripten: an LLVM-to-JavaScript compiler. In /// Proceedings of the ACM international conference companion on Object oriented @@ -52,200 +54,277 @@ #include "MCTargetDesc/WebAssemblyMCTargetDesc.h" #include "WebAssembly.h" -#include "WebAssemblyMachineFunctionInfo.h" #include "WebAssemblySubtarget.h" -#include "llvm/ADT/PriorityQueue.h" -#include "llvm/ADT/SCCIterator.h" -#include "llvm/ADT/SetVector.h" -#include "llvm/CodeGen/MachineDominators.h" -#include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstrBuilder.h" -#include "llvm/CodeGen/MachineLoopInfo.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/Passes.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/raw_ostream.h" using namespace llvm; #define DEBUG_TYPE "wasm-fix-irreducible-control-flow" namespace { -class LoopFixer { +using BlockVector = SmallVector<MachineBasicBlock *, 4>; +using BlockSet = SmallPtrSet<MachineBasicBlock *, 4>; + +// Calculates reachability in a region. Ignores branches to blocks outside of +// the region, and ignores branches to the region entry (for the case where +// the region is the inner part of a loop). +class ReachabilityGraph { public: - LoopFixer(MachineFunction &MF, MachineLoopInfo &MLI, MachineLoop *Loop) - : MF(MF), MLI(MLI), Loop(Loop) {} + ReachabilityGraph(MachineBasicBlock *Entry, const BlockSet &Blocks) + : Entry(Entry), Blocks(Blocks) { +#ifndef NDEBUG + // The region must have a single entry. + for (auto *MBB : Blocks) { + if (MBB != Entry) { + for (auto *Pred : MBB->predecessors()) { + assert(inRegion(Pred)); + } + } + } +#endif + calculate(); + } + + bool canReach(MachineBasicBlock *From, MachineBasicBlock *To) const { + assert(inRegion(From) && inRegion(To)); + auto I = Reachable.find(From); + if (I == Reachable.end()) + return false; + return I->second.count(To); + } + + // "Loopers" are blocks that are in a loop. We detect these by finding blocks + // that can reach themselves. + const BlockSet &getLoopers() const { return Loopers; } + + // Get all blocks that are loop entries. + const BlockSet &getLoopEntries() const { return LoopEntries; } - // Run the fixer on the given inputs. Returns whether changes were made. - bool run(); + // Get all blocks that enter a particular loop from outside. + const BlockSet &getLoopEnterers(MachineBasicBlock *LoopEntry) const { + assert(inRegion(LoopEntry)); + auto I = LoopEnterers.find(LoopEntry); + assert(I != LoopEnterers.end()); + return I->second; + } private: - MachineFunction &MF; - MachineLoopInfo &MLI; - MachineLoop *Loop; + MachineBasicBlock *Entry; + const BlockSet &Blocks; + + BlockSet Loopers, LoopEntries; + DenseMap<MachineBasicBlock *, BlockSet> LoopEnterers; - MachineBasicBlock *Header; - SmallPtrSet<MachineBasicBlock *, 4> LoopBlocks; + bool inRegion(MachineBasicBlock *MBB) const { return Blocks.count(MBB); } - using BlockSet = SmallPtrSet<MachineBasicBlock *, 4>; + // Maps a block to all the other blocks it can reach. DenseMap<MachineBasicBlock *, BlockSet> Reachable; - // The worklist contains pairs of recent additions, (a, b), where we just - // added a link a => b. - using BlockPair = std::pair<MachineBasicBlock *, MachineBasicBlock *>; - SmallVector<BlockPair, 4> WorkList; - - // Get a canonical block to represent a block or a loop: the block, or if in - // an inner loop, the loop header, of it in an outer loop scope, we can - // ignore it. We need to call this on all blocks we work on. - MachineBasicBlock *canonicalize(MachineBasicBlock *MBB) { - MachineLoop *InnerLoop = MLI.getLoopFor(MBB); - if (InnerLoop == Loop) { - return MBB; - } else { - // This is either in an outer or an inner loop, and not in ours. - if (!LoopBlocks.count(MBB)) { - // It's in outer code, ignore it. - return nullptr; + void calculate() { + // Reachability computation work list. Contains pairs of recent additions + // (A, B) where we just added a link A => B. + using BlockPair = std::pair<MachineBasicBlock *, MachineBasicBlock *>; + SmallVector<BlockPair, 4> WorkList; + + // Add all relevant direct branches. + for (auto *MBB : Blocks) { + for (auto *Succ : MBB->successors()) { + if (Succ != Entry && inRegion(Succ)) { + Reachable[MBB].insert(Succ); + WorkList.emplace_back(MBB, Succ); + } } - assert(InnerLoop); - // It's in an inner loop, canonicalize it to the header of that loop. - return InnerLoop->getHeader(); } - } - // For a successor we can additionally ignore it if it's a branch back to a - // natural loop top, as when we are in the scope of a loop, we just care - // about internal irreducibility, and can ignore the loop we are in. We need - // to call this on all blocks in a context where they are a successor. - MachineBasicBlock *canonicalizeSuccessor(MachineBasicBlock *MBB) { - if (Loop && MBB == Loop->getHeader()) { - // Ignore branches going to the loop's natural header. - return nullptr; + while (!WorkList.empty()) { + MachineBasicBlock *MBB, *Succ; + std::tie(MBB, Succ) = WorkList.pop_back_val(); + assert(inRegion(MBB) && Succ != Entry && inRegion(Succ)); + if (MBB != Entry) { + // We recently added MBB => Succ, and that means we may have enabled + // Pred => MBB => Succ. + for (auto *Pred : MBB->predecessors()) { + if (Reachable[Pred].insert(Succ).second) { + WorkList.emplace_back(Pred, Succ); + } + } + } } - return canonicalize(MBB); - } - // Potentially insert a new reachable edge, and if so, note it as further - // work. - void maybeInsert(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { - assert(MBB == canonicalize(MBB)); - assert(Succ); - // Succ may not be interesting as a sucessor. - Succ = canonicalizeSuccessor(Succ); - if (!Succ) - return; - if (Reachable[MBB].insert(Succ).second) { - // For there to be further work, it means that we have - // X => MBB => Succ - // for some other X, and in that case X => Succ would be a new edge for - // us to discover later. However, if we don't care about MBB as a - // successor, then we don't care about that anyhow. - if (canonicalizeSuccessor(MBB)) { - WorkList.emplace_back(MBB, Succ); + // Blocks that can return to themselves are in a loop. + for (auto *MBB : Blocks) { + if (canReach(MBB, MBB)) { + Loopers.insert(MBB); + } + } + assert(!Loopers.count(Entry)); + + // Find the loop entries - loopers reachable from blocks not in that loop - + // and those outside blocks that reach them, the "loop enterers". + for (auto *Looper : Loopers) { + for (auto *Pred : Looper->predecessors()) { + // Pred can reach Looper. If Looper can reach Pred, it is in the loop; + // otherwise, it is a block that enters into the loop. + if (!canReach(Looper, Pred)) { + LoopEntries.insert(Looper); + LoopEnterers[Looper].insert(Pred); + } } } } }; -bool LoopFixer::run() { - Header = Loop ? Loop->getHeader() : &*MF.begin(); - - // Identify all the blocks in this loop scope. - if (Loop) { - for (auto *MBB : Loop->getBlocks()) { - LoopBlocks.insert(MBB); - } - } else { - for (auto &MBB : MF) { - LoopBlocks.insert(&MBB); - } +// Finds the blocks in a single-entry loop, given the loop entry and the +// list of blocks that enter the loop. +class LoopBlocks { +public: + LoopBlocks(MachineBasicBlock *Entry, const BlockSet &Enterers) + : Entry(Entry), Enterers(Enterers) { + calculate(); } - // Compute which (canonicalized) blocks each block can reach. - - // Add all the initial work. - for (auto *MBB : LoopBlocks) { - MachineLoop *InnerLoop = MLI.getLoopFor(MBB); + BlockSet &getBlocks() { return Blocks; } - if (InnerLoop == Loop) { - for (auto *Succ : MBB->successors()) { - maybeInsert(MBB, Succ); - } - } else { - // It can't be in an outer loop - we loop on LoopBlocks - and so it must - // be an inner loop. - assert(InnerLoop); - // Check if we are the canonical block for this loop. - if (canonicalize(MBB) != MBB) { - continue; - } - // The successors are those of the loop. - SmallVector<MachineBasicBlock *, 2> ExitBlocks; - InnerLoop->getExitBlocks(ExitBlocks); - for (auto *Succ : ExitBlocks) { - maybeInsert(MBB, Succ); +private: + MachineBasicBlock *Entry; + const BlockSet &Enterers; + + BlockSet Blocks; + + void calculate() { + // Going backwards from the loop entry, if we ignore the blocks entering + // from outside, we will traverse all the blocks in the loop. + BlockVector WorkList; + BlockSet AddedToWorkList; + Blocks.insert(Entry); + for (auto *Pred : Entry->predecessors()) { + if (!Enterers.count(Pred)) { + WorkList.push_back(Pred); + AddedToWorkList.insert(Pred); } } - } - // Do work until we are all done. - while (!WorkList.empty()) { - MachineBasicBlock *MBB; - MachineBasicBlock *Succ; - std::tie(MBB, Succ) = WorkList.pop_back_val(); - // The worklist item is an edge we just added, so it must have valid blocks - // (and not something canonicalized to nullptr). - assert(MBB); - assert(Succ); - // The successor in that pair must also be a valid successor. - assert(MBB == canonicalizeSuccessor(MBB)); - // We recently added MBB => Succ, and that means we may have enabled - // Pred => MBB => Succ. Check all the predecessors. Note that our loop here - // is correct for both a block and a block representing a loop, as the loop - // is natural and so the predecessors are all predecessors of the loop - // header, which is the block we have here. - for (auto *Pred : MBB->predecessors()) { - // Canonicalize, make sure it's relevant, and check it's not the same - // block (an update to the block itself doesn't help compute that same - // block). - Pred = canonicalize(Pred); - if (Pred && Pred != MBB) { - maybeInsert(Pred, Succ); + while (!WorkList.empty()) { + auto *MBB = WorkList.pop_back_val(); + assert(!Enterers.count(MBB)); + if (Blocks.insert(MBB).second) { + for (auto *Pred : MBB->predecessors()) { + if (!AddedToWorkList.count(Pred)) { + WorkList.push_back(Pred); + AddedToWorkList.insert(Pred); + } + } } } } +}; - // It's now trivial to identify the loopers. - SmallPtrSet<MachineBasicBlock *, 4> Loopers; - for (auto MBB : LoopBlocks) { - if (Reachable[MBB].count(MBB)) { - Loopers.insert(MBB); - } +class WebAssemblyFixIrreducibleControlFlow final : public MachineFunctionPass { + StringRef getPassName() const override { + return "WebAssembly Fix Irreducible Control Flow"; } - // The header cannot be a looper. At the toplevel, LLVM does not allow the - // entry to be in a loop, and in a natural loop we should ignore the header. - assert(Loopers.count(Header) == 0); - - // Find the entries, loopers reachable from non-loopers. - SmallPtrSet<MachineBasicBlock *, 4> Entries; - SmallVector<MachineBasicBlock *, 4> SortedEntries; - for (auto *Looper : Loopers) { - for (auto *Pred : Looper->predecessors()) { - Pred = canonicalize(Pred); - if (Pred && !Loopers.count(Pred)) { - Entries.insert(Looper); - SortedEntries.push_back(Looper); + + bool runOnMachineFunction(MachineFunction &MF) override; + + bool processRegion(MachineBasicBlock *Entry, BlockSet &Blocks, + MachineFunction &MF); + + void makeSingleEntryLoop(BlockSet &Entries, BlockSet &Blocks, + MachineFunction &MF, const ReachabilityGraph &Graph); + +public: + static char ID; // Pass identification, replacement for typeid + WebAssemblyFixIrreducibleControlFlow() : MachineFunctionPass(ID) {} +}; + +bool WebAssemblyFixIrreducibleControlFlow::processRegion( + MachineBasicBlock *Entry, BlockSet &Blocks, MachineFunction &MF) { + bool Changed = false; + + // Remove irreducibility before processing child loops, which may take + // multiple iterations. + while (true) { + ReachabilityGraph Graph(Entry, Blocks); + + bool FoundIrreducibility = false; + + for (auto *LoopEntry : Graph.getLoopEntries()) { + // Find mutual entries - all entries which can reach this one, and + // are reached by it (that always includes LoopEntry itself). All mutual + // entries must be in the same loop, so if we have more than one, then we + // have irreducible control flow. + // + // Note that irreducibility may involve inner loops, e.g. imagine A + // starts one loop, and it has B inside it which starts an inner loop. + // If we add a branch from all the way on the outside to B, then in a + // sense B is no longer an "inner" loop, semantically speaking. We will + // fix that irreducibility by adding a block that dispatches to either + // either A or B, so B will no longer be an inner loop in our output. + // (A fancier approach might try to keep it as such.) + // + // Note that we still need to recurse into inner loops later, to handle + // the case where the irreducibility is entirely nested - we would not + // be able to identify that at this point, since the enclosing loop is + // a group of blocks all of whom can reach each other. (We'll see the + // irreducibility after removing branches to the top of that enclosing + // loop.) + BlockSet MutualLoopEntries; + MutualLoopEntries.insert(LoopEntry); + for (auto *OtherLoopEntry : Graph.getLoopEntries()) { + if (OtherLoopEntry != LoopEntry && + Graph.canReach(LoopEntry, OtherLoopEntry) && + Graph.canReach(OtherLoopEntry, LoopEntry)) { + MutualLoopEntries.insert(OtherLoopEntry); + } + } + + if (MutualLoopEntries.size() > 1) { + makeSingleEntryLoop(MutualLoopEntries, Blocks, MF, Graph); + FoundIrreducibility = true; + Changed = true; break; } } + // Only go on to actually process the inner loops when we are done + // removing irreducible control flow and changing the graph. Modifying + // the graph as we go is possible, and that might let us avoid looking at + // the already-fixed loops again if we are careful, but all that is + // complex and bug-prone. Since irreducible loops are rare, just starting + // another iteration is best. + if (FoundIrreducibility) { + continue; + } + + for (auto *LoopEntry : Graph.getLoopEntries()) { + LoopBlocks InnerBlocks(LoopEntry, Graph.getLoopEnterers(LoopEntry)); + // Each of these calls to processRegion may change the graph, but are + // guaranteed not to interfere with each other. The only changes we make + // to the graph are to add blocks on the way to a loop entry. As the + // loops are disjoint, that means we may only alter branches that exit + // another loop, which are ignored when recursing into that other loop + // anyhow. + if (processRegion(LoopEntry, InnerBlocks.getBlocks(), MF)) { + Changed = true; + } + } + + return Changed; } +} - // Check if we found irreducible control flow. - if (LLVM_LIKELY(Entries.size() <= 1)) - return false; +// Given a set of entries to a single loop, create a single entry for that +// loop by creating a dispatch block for them, routing control flow using +// a helper variable. Also updates Blocks with any new blocks created, so +// that we properly track all the blocks in the region. But this does not update +// ReachabilityGraph; this will be updated in the caller of this function as +// needed. +void WebAssemblyFixIrreducibleControlFlow::makeSingleEntryLoop( + BlockSet &Entries, BlockSet &Blocks, MachineFunction &MF, + const ReachabilityGraph &Graph) { + assert(Entries.size() >= 2); // Sort the entries to ensure a deterministic build. + BlockVector SortedEntries(Entries.begin(), Entries.end()); llvm::sort(SortedEntries, [&](const MachineBasicBlock *A, const MachineBasicBlock *B) { auto ANum = A->getNumber(); @@ -257,8 +336,8 @@ bool LoopFixer::run() { for (auto Block : SortedEntries) assert(Block->getNumber() != -1); if (SortedEntries.size() > 1) { - for (auto I = SortedEntries.begin(), E = SortedEntries.end() - 1; - I != E; ++I) { + for (auto I = SortedEntries.begin(), E = SortedEntries.end() - 1; I != E; + ++I) { auto ANum = (*I)->getNumber(); auto BNum = (*(std::next(I)))->getNumber(); assert(ANum != BNum); @@ -269,12 +348,12 @@ bool LoopFixer::run() { // Create a dispatch block which will contain a jump table to the entries. MachineBasicBlock *Dispatch = MF.CreateMachineBasicBlock(); MF.insert(MF.end(), Dispatch); - MLI.changeLoopFor(Dispatch, Loop); + Blocks.insert(Dispatch); // Add the jump table. const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); - MachineInstrBuilder MIB = BuildMI(*Dispatch, Dispatch->end(), DebugLoc(), - TII.get(WebAssembly::BR_TABLE_I32)); + MachineInstrBuilder MIB = + BuildMI(Dispatch, DebugLoc(), TII.get(WebAssembly::BR_TABLE_I32)); // Add the register which will be used to tell the jump table which block to // jump to. @@ -285,112 +364,110 @@ bool LoopFixer::run() { // Compute the indices in the superheader, one for each bad block, and // add them as successors. DenseMap<MachineBasicBlock *, unsigned> Indices; - for (auto *MBB : SortedEntries) { - auto Pair = Indices.insert(std::make_pair(MBB, 0)); - if (!Pair.second) { - continue; - } + for (auto *Entry : SortedEntries) { + auto Pair = Indices.insert(std::make_pair(Entry, 0)); + assert(Pair.second); unsigned Index = MIB.getInstr()->getNumExplicitOperands() - 1; Pair.first->second = Index; - MIB.addMBB(MBB); - Dispatch->addSuccessor(MBB); + MIB.addMBB(Entry); + Dispatch->addSuccessor(Entry); } - // Rewrite the problematic successors for every block that wants to reach the - // bad blocks. For simplicity, we just introduce a new block for every edge - // we need to rewrite. (Fancier things are possible.) + // Rewrite the problematic successors for every block that wants to reach + // the bad blocks. For simplicity, we just introduce a new block for every + // edge we need to rewrite. (Fancier things are possible.) - SmallVector<MachineBasicBlock *, 4> AllPreds; - for (auto *MBB : SortedEntries) { - for (auto *Pred : MBB->predecessors()) { + BlockVector AllPreds; + for (auto *Entry : SortedEntries) { + for (auto *Pred : Entry->predecessors()) { if (Pred != Dispatch) { AllPreds.push_back(Pred); } } } - for (MachineBasicBlock *MBB : AllPreds) { - DenseMap<MachineBasicBlock *, MachineBasicBlock *> Map; - for (auto *Succ : MBB->successors()) { - if (!Entries.count(Succ)) { + // This set stores predecessors within this loop. + DenseSet<MachineBasicBlock *> InLoop; + for (auto *Pred : AllPreds) { + for (auto *Entry : Pred->successors()) { + if (!Entries.count(Entry)) continue; + if (Graph.canReach(Entry, Pred)) { + InLoop.insert(Pred); + break; } + } + } + + // Record if each entry has a layout predecessor. This map stores + // <<Predecessor is within the loop?, loop entry>, layout predecessor> + std::map<std::pair<bool, MachineBasicBlock *>, MachineBasicBlock *> + EntryToLayoutPred; + for (auto *Pred : AllPreds) + for (auto *Entry : Pred->successors()) + if (Entries.count(Entry) && Pred->isLayoutSuccessor(Entry)) + EntryToLayoutPred[std::make_pair(InLoop.count(Pred), Entry)] = Pred; + + // We need to create at most two routing blocks per entry: one for + // predecessors outside the loop and one for predecessors inside the loop. + // This map stores + // <<Predecessor is within the loop?, loop entry>, routing block> + std::map<std::pair<bool, MachineBasicBlock *>, MachineBasicBlock *> Map; + for (auto *Pred : AllPreds) { + bool PredInLoop = InLoop.count(Pred); + for (auto *Entry : Pred->successors()) { + if (!Entries.count(Entry) || + Map.count(std::make_pair(InLoop.count(Pred), Entry))) + continue; + // If there exists a layout predecessor of this entry and this predecessor + // is not that, we rather create a routing block after that layout + // predecessor to save a branch. + if (EntryToLayoutPred.count(std::make_pair(PredInLoop, Entry)) && + EntryToLayoutPred[std::make_pair(PredInLoop, Entry)] != Pred) + continue; // This is a successor we need to rewrite. - MachineBasicBlock *Split = MF.CreateMachineBasicBlock(); - MF.insert(MBB->isLayoutSuccessor(Succ) ? MachineFunction::iterator(Succ) - : MF.end(), - Split); - MLI.changeLoopFor(Split, Loop); + MachineBasicBlock *Routing = MF.CreateMachineBasicBlock(); + MF.insert(Pred->isLayoutSuccessor(Entry) + ? MachineFunction::iterator(Entry) + : MF.end(), + Routing); + Blocks.insert(Routing); // Set the jump table's register of the index of the block we wish to // jump to, and jump to the jump table. - BuildMI(*Split, Split->end(), DebugLoc(), TII.get(WebAssembly::CONST_I32), - Reg) - .addImm(Indices[Succ]); - BuildMI(*Split, Split->end(), DebugLoc(), TII.get(WebAssembly::BR)) - .addMBB(Dispatch); - Split->addSuccessor(Dispatch); - Map[Succ] = Split; + BuildMI(Routing, DebugLoc(), TII.get(WebAssembly::CONST_I32), Reg) + .addImm(Indices[Entry]); + BuildMI(Routing, DebugLoc(), TII.get(WebAssembly::BR)).addMBB(Dispatch); + Routing->addSuccessor(Dispatch); + Map[std::make_pair(PredInLoop, Entry)] = Routing; } + } + + for (auto *Pred : AllPreds) { + bool PredInLoop = InLoop.count(Pred); // Remap the terminator operands and the successor list. - for (MachineInstr &Term : MBB->terminators()) + for (MachineInstr &Term : Pred->terminators()) for (auto &Op : Term.explicit_uses()) if (Op.isMBB() && Indices.count(Op.getMBB())) - Op.setMBB(Map[Op.getMBB()]); - for (auto Rewrite : Map) - MBB->replaceSuccessor(Rewrite.first, Rewrite.second); + Op.setMBB(Map[std::make_pair(PredInLoop, Op.getMBB())]); + + for (auto *Succ : Pred->successors()) { + if (!Entries.count(Succ)) + continue; + auto *Routing = Map[std::make_pair(PredInLoop, Succ)]; + Pred->replaceSuccessor(Succ, Routing); + } } // Create a fake default label, because br_table requires one. MIB.addMBB(MIB.getInstr() ->getOperand(MIB.getInstr()->getNumExplicitOperands() - 1) .getMBB()); - - return true; } -class WebAssemblyFixIrreducibleControlFlow final : public MachineFunctionPass { - StringRef getPassName() const override { - return "WebAssembly Fix Irreducible Control Flow"; - } - - void getAnalysisUsage(AnalysisUsage &AU) const override { - AU.setPreservesCFG(); - AU.addRequired<MachineDominatorTree>(); - AU.addPreserved<MachineDominatorTree>(); - AU.addRequired<MachineLoopInfo>(); - AU.addPreserved<MachineLoopInfo>(); - MachineFunctionPass::getAnalysisUsage(AU); - } - - bool runOnMachineFunction(MachineFunction &MF) override; - - bool runIteration(MachineFunction &MF, MachineLoopInfo &MLI) { - // Visit the function body, which is identified as a null loop. - if (LoopFixer(MF, MLI, nullptr).run()) { - return true; - } - - // Visit all the loops. - SmallVector<MachineLoop *, 8> Worklist(MLI.begin(), MLI.end()); - while (!Worklist.empty()) { - MachineLoop *Loop = Worklist.pop_back_val(); - Worklist.append(Loop->begin(), Loop->end()); - if (LoopFixer(MF, MLI, Loop).run()) { - return true; - } - } - - return false; - } - -public: - static char ID; // Pass identification, replacement for typeid - WebAssemblyFixIrreducibleControlFlow() : MachineFunctionPass(ID) {} -}; } // end anonymous namespace char WebAssemblyFixIrreducibleControlFlow::ID = 0; @@ -407,23 +484,18 @@ bool WebAssemblyFixIrreducibleControlFlow::runOnMachineFunction( "********** Function: " << MF.getName() << '\n'); - bool Changed = false; - auto &MLI = getAnalysis<MachineLoopInfo>(); - - // When we modify something, bail out and recompute MLI, then start again, as - // we create a new natural loop when we resolve irreducible control flow, and - // other loops may become nested in it, etc. In practice this is not an issue - // because irreducible control flow is rare, only very few cycles are needed - // here. - while (LLVM_UNLIKELY(runIteration(MF, MLI))) { - // We rewrote part of the function; recompute MLI and start again. - LLVM_DEBUG(dbgs() << "Recomputing loops.\n"); + // Start the recursive process on the entire function body. + BlockSet AllBlocks; + for (auto &MBB : MF) { + AllBlocks.insert(&MBB); + } + + if (LLVM_UNLIKELY(processRegion(&*MF.begin(), AllBlocks, MF))) { + // We rewrote part of the function; recompute relevant things. MF.getRegInfo().invalidateLiveness(); MF.RenumberBlocks(); - getAnalysis<MachineDominatorTree>().runOnMachineFunction(MF); - MLI.runOnMachineFunction(MF); - Changed = true; + return true; } - return Changed; + return false; } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp index 2d5aff28d27b..5299068efdd4 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyFrameLowering.cpp - WebAssembly Frame Lowering ----------==// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -131,7 +130,7 @@ void WebAssemblyFrameLowering::writeSPToGlobal( const char *ES = "__stack_pointer"; auto *SPSymbol = MF.createExternalSymbolName(ES); BuildMI(MBB, InsertStore, DL, TII->get(WebAssembly::GLOBAL_SET_I32)) - .addExternalSymbol(SPSymbol, WebAssemblyII::MO_SYMBOL_GLOBAL) + .addExternalSymbol(SPSymbol) .addReg(SrcReg); } @@ -165,7 +164,8 @@ void WebAssemblyFrameLowering::emitPrologue(MachineFunction &MF, auto &MRI = MF.getRegInfo(); auto InsertPt = MBB.begin(); - while (InsertPt != MBB.end() && WebAssembly::isArgument(*InsertPt)) + while (InsertPt != MBB.end() && + WebAssembly::isArgument(InsertPt->getOpcode())) ++InsertPt; DebugLoc DL; @@ -178,7 +178,7 @@ void WebAssemblyFrameLowering::emitPrologue(MachineFunction &MF, const char *ES = "__stack_pointer"; auto *SPSymbol = MF.createExternalSymbolName(ES); BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::GLOBAL_GET_I32), SPReg) - .addExternalSymbol(SPSymbol, WebAssemblyII::MO_SYMBOL_GLOBAL); + .addExternalSymbol(SPSymbol); bool HasBP = hasBP(MF); if (HasBP) { diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h index c6fa8261b03f..daddd4ca16ff 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h @@ -1,9 +1,8 @@ // WebAssemblyFrameLowering.h - TargetFrameLowering for WebAssembly -*- C++ -*-/ // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISD.def b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISD.def index e987d7f7f43a..77217f16a727 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISD.def +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISD.def @@ -1,9 +1,8 @@ //- WebAssemblyISD.def - WebAssembly ISD ---------------------------*- C++ -*-// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -16,9 +15,14 @@ HANDLE_NODETYPE(CALL1) HANDLE_NODETYPE(CALL0) +HANDLE_NODETYPE(RET_CALL) HANDLE_NODETYPE(RETURN) HANDLE_NODETYPE(ARGUMENT) +// A wrapper node for TargetExternalSymbol, TargetGlobalAddress, and MCSymbol HANDLE_NODETYPE(Wrapper) +// A special wapper used in PIC code for __memory_base/__table_base relcative +// access. +HANDLE_NODETYPE(WrapperPIC) HANDLE_NODETYPE(BR_IF) HANDLE_NODETYPE(BR_TABLE) HANDLE_NODETYPE(SHUFFLE) @@ -26,5 +30,7 @@ HANDLE_NODETYPE(VEC_SHL) HANDLE_NODETYPE(VEC_SHR_S) HANDLE_NODETYPE(VEC_SHR_U) HANDLE_NODETYPE(THROW) +HANDLE_NODETYPE(MEMORY_COPY) +HANDLE_NODETYPE(MEMORY_FILL) // add memory opcodes starting at ISD::FIRST_TARGET_MEMORY_OPCODE here... diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp index 0a7464cedc90..26339eaef37d 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp @@ -1,9 +1,8 @@ //- WebAssemblyISelDAGToDAG.cpp - A dag to dag inst selector for WebAssembly -// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -16,6 +15,7 @@ #include "WebAssembly.h" #include "WebAssemblyTargetMachine.h" #include "llvm/CodeGen/SelectionDAGISel.h" +#include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/Function.h" // To access function attributes. #include "llvm/Support/Debug.h" #include "llvm/Support/KnownBits.h" @@ -38,9 +38,9 @@ class WebAssemblyDAGToDAGISel final : public SelectionDAGISel { bool ForCodeSize; public: - WebAssemblyDAGToDAGISel(WebAssemblyTargetMachine &tm, + WebAssemblyDAGToDAGISel(WebAssemblyTargetMachine &TM, CodeGenOpt::Level OptLevel) - : SelectionDAGISel(tm, OptLevel), Subtarget(nullptr), ForCodeSize(false) { + : SelectionDAGISel(TM, OptLevel), Subtarget(nullptr), ForCodeSize(false) { } StringRef getPassName() const override { @@ -52,8 +52,7 @@ public: "********** Function: " << MF.getName() << '\n'); - ForCodeSize = MF.getFunction().hasFnAttribute(Attribute::OptimizeForSize) || - MF.getFunction().hasFnAttribute(Attribute::MinSize); + ForCodeSize = MF.getFunction().hasOptSize(); Subtarget = &MF.getSubtarget<WebAssemblySubtarget>(); return SelectionDAGISel::runOnMachineFunction(MF); } @@ -79,14 +78,159 @@ void WebAssemblyDAGToDAGISel::Select(SDNode *Node) { return; } - // Few custom selection stuff. If we need WebAssembly-specific selection, - // uncomment this block add corresponding case statements. - /* + // Few custom selection stuff. + SDLoc DL(Node); + MachineFunction &MF = CurDAG->getMachineFunction(); switch (Node->getOpcode()) { + case ISD::ATOMIC_FENCE: { + if (!MF.getSubtarget<WebAssemblySubtarget>().hasAtomics()) + break; + + uint64_t SyncScopeID = + cast<ConstantSDNode>(Node->getOperand(2).getNode())->getZExtValue(); + switch (SyncScopeID) { + case SyncScope::SingleThread: { + // We lower a single-thread fence to a pseudo compiler barrier instruction + // preventing instruction reordering. This will not be emitted in final + // binary. + MachineSDNode *Fence = + CurDAG->getMachineNode(WebAssembly::COMPILER_FENCE, + DL, // debug loc + MVT::Other, // outchain type + Node->getOperand(0) // inchain + ); + ReplaceNode(Node, Fence); + CurDAG->RemoveDeadNode(Node); + return; + } + + case SyncScope::System: { + // For non-emscripten systems, we have not decided on what we should + // traslate fences to yet. + if (!Subtarget->getTargetTriple().isOSEmscripten()) + report_fatal_error( + "ATOMIC_FENCE is not yet supported in non-emscripten OSes"); + + // Wasm does not have a fence instruction, but because all atomic + // instructions in wasm are sequentially consistent, we translate a + // fence to an idempotent atomic RMW instruction to a linear memory + // address. All atomic instructions in wasm are sequentially consistent, + // but this is to ensure a fence also prevents reordering of non-atomic + // instructions in the VM. Even though LLVM IR's fence instruction does + // not say anything about its relationship with non-atomic instructions, + // we think this is more user-friendly. + // + // While any address can work, here we use a value stored in + // __stack_pointer wasm global because there's high chance that area is + // in cache. + // + // So the selected instructions will be in the form of: + // %addr = get_global $__stack_pointer + // %0 = i32.const 0 + // i32.atomic.rmw.or %addr, %0 + SDValue StackPtrSym = CurDAG->getTargetExternalSymbol( + "__stack_pointer", TLI->getPointerTy(CurDAG->getDataLayout())); + MachineSDNode *GetGlobal = + CurDAG->getMachineNode(WebAssembly::GLOBAL_GET_I32, // opcode + DL, // debug loc + MVT::i32, // result type + StackPtrSym // __stack_pointer symbol + ); + + SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32); + auto *MMO = MF.getMachineMemOperand( + MachinePointerInfo::getUnknownStack(MF), + // FIXME Volatile isn't really correct, but currently all LLVM + // atomic instructions are treated as volatiles in the backend, so + // we should be consistent. + MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad | + MachineMemOperand::MOStore, + 4, 4, AAMDNodes(), nullptr, SyncScope::System, + AtomicOrdering::SequentiallyConsistent); + MachineSDNode *Const0 = + CurDAG->getMachineNode(WebAssembly::CONST_I32, DL, MVT::i32, Zero); + MachineSDNode *AtomicRMW = CurDAG->getMachineNode( + WebAssembly::ATOMIC_RMW_OR_I32, // opcode + DL, // debug loc + MVT::i32, // result type + MVT::Other, // outchain type + { + Zero, // alignment + Zero, // offset + SDValue(GetGlobal, 0), // __stack_pointer + SDValue(Const0, 0), // OR with 0 to make it idempotent + Node->getOperand(0) // inchain + }); + + CurDAG->setNodeMemRefs(AtomicRMW, {MMO}); + ReplaceUses(SDValue(Node, 0), SDValue(AtomicRMW, 1)); + CurDAG->RemoveDeadNode(Node); + return; + } + default: + llvm_unreachable("Unknown scope!"); + } + } + + case ISD::GlobalTLSAddress: { + const auto *GA = cast<GlobalAddressSDNode>(Node); + + if (!MF.getSubtarget<WebAssemblySubtarget>().hasBulkMemory()) + report_fatal_error("cannot use thread-local storage without bulk memory", + false); + + // Currently Emscripten does not support dynamic linking with threads. + // Therefore, if we have thread-local storage, only the local-exec model + // is possible. + // TODO: remove this and implement proper TLS models once Emscripten + // supports dynamic linking with threads. + if (GA->getGlobal()->getThreadLocalMode() != + GlobalValue::LocalExecTLSModel && + !Subtarget->getTargetTriple().isOSEmscripten()) { + report_fatal_error("only -ftls-model=local-exec is supported for now on " + "non-Emscripten OSes: variable " + + GA->getGlobal()->getName(), + false); + } + + MVT PtrVT = TLI->getPointerTy(CurDAG->getDataLayout()); + assert(PtrVT == MVT::i32 && "only wasm32 is supported for now"); + + SDValue TLSBaseSym = CurDAG->getTargetExternalSymbol("__tls_base", PtrVT); + SDValue TLSOffsetSym = CurDAG->getTargetGlobalAddress( + GA->getGlobal(), DL, PtrVT, GA->getOffset(), 0); + + MachineSDNode *TLSBase = CurDAG->getMachineNode(WebAssembly::GLOBAL_GET_I32, + DL, MVT::i32, TLSBaseSym); + MachineSDNode *TLSOffset = CurDAG->getMachineNode( + WebAssembly::CONST_I32, DL, MVT::i32, TLSOffsetSym); + MachineSDNode *TLSAddress = + CurDAG->getMachineNode(WebAssembly::ADD_I32, DL, MVT::i32, + SDValue(TLSBase, 0), SDValue(TLSOffset, 0)); + ReplaceNode(Node, TLSAddress); + return; + } + + case ISD::INTRINSIC_WO_CHAIN: { + unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue(); + switch (IntNo) { + case Intrinsic::wasm_tls_size: { + MVT PtrVT = TLI->getPointerTy(CurDAG->getDataLayout()); + assert(PtrVT == MVT::i32 && "only wasm32 is supported for now"); + + MachineSDNode *TLSSize = CurDAG->getMachineNode( + WebAssembly::GLOBAL_GET_I32, DL, PtrVT, + CurDAG->getTargetExternalSymbol("__tls_size", MVT::i32)); + ReplaceNode(Node, TLSSize); + return; + } + } + break; + } + default: break; } - */ // Select the default instruction. SelectCode(Node); diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp index f7f29d85cbb2..4064a983099c 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -1,9 +1,8 @@ //=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -46,9 +45,6 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering( setBooleanContents(ZeroOrOneBooleanContent); // Except in SIMD vectors setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); - // WebAssembly does not produce floating-point exceptions on normal floating - // point operations. - setHasFloatingPointExceptions(false); // We don't know the microarchitecture here, so just reduce register pressure. setSchedulingPreference(Sched::RegPressure); // Tell ISel that we have a stack pointer. @@ -64,10 +60,10 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering( addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass); addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass); addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass); - if (Subtarget->hasUnimplementedSIMD128()) { - addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass); - addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass); - } + } + if (Subtarget->hasUnimplementedSIMD128()) { + addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass); + addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass); } // Compute derived properties from the register classes. computeRegisterProperties(Subtarget->getRegisterInfo()); @@ -111,56 +107,62 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering( setTruncStoreAction(T, MVT::f16, Expand); } - // Support saturating add for i8x16 and i16x8 - if (Subtarget->hasSIMD128()) - for (auto T : {MVT::v16i8, MVT::v8i16}) - for (auto Op : {ISD::SADDSAT, ISD::UADDSAT}) - setOperationAction(Op, T, Legal); - // Expand unavailable integer operations. for (auto Op : {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU, ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) { - for (auto T : {MVT::i32, MVT::i64}) { + for (auto T : {MVT::i32, MVT::i64}) setOperationAction(Op, T, Expand); - } - if (Subtarget->hasSIMD128()) { - for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32}) { + if (Subtarget->hasSIMD128()) + for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32}) setOperationAction(Op, T, Expand); - } - if (Subtarget->hasUnimplementedSIMD128()) { - setOperationAction(Op, MVT::v2i64, Expand); - } - } + if (Subtarget->hasUnimplementedSIMD128()) + setOperationAction(Op, MVT::v2i64, Expand); } - // There is no i64x2.mul instruction - setOperationAction(ISD::MUL, MVT::v2i64, Expand); - - // We have custom shuffle lowering to expose the shuffle mask + // SIMD-specific configuration if (Subtarget->hasSIMD128()) { - for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}) { + // Support saturating add for i8x16 and i16x8 + for (auto Op : {ISD::SADDSAT, ISD::UADDSAT}) + for (auto T : {MVT::v16i8, MVT::v8i16}) + setOperationAction(Op, T, Legal); + + // Custom lower BUILD_VECTORs to minimize number of replace_lanes + for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}) + setOperationAction(ISD::BUILD_VECTOR, T, Custom); + if (Subtarget->hasUnimplementedSIMD128()) + for (auto T : {MVT::v2i64, MVT::v2f64}) + setOperationAction(ISD::BUILD_VECTOR, T, Custom); + + // We have custom shuffle lowering to expose the shuffle mask + for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}) setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom); - } - if (Subtarget->hasUnimplementedSIMD128()) { - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); - } - } + if (Subtarget->hasUnimplementedSIMD128()) + for (auto T: {MVT::v2i64, MVT::v2f64}) + setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom); - // Custom lowering since wasm shifts must have a scalar shift amount - if (Subtarget->hasSIMD128()) { - for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32}) - for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL}) + // Custom lowering since wasm shifts must have a scalar shift amount + for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL}) { + for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32}) setOperationAction(Op, T, Custom); - if (Subtarget->hasUnimplementedSIMD128()) - for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL}) + if (Subtarget->hasUnimplementedSIMD128()) setOperationAction(Op, MVT::v2i64, Custom); - } + } - // There are no select instructions for vectors - if (Subtarget->hasSIMD128()) + // Custom lower lane accesses to expand out variable indices + for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT}) { + for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}) + setOperationAction(Op, T, Custom); + if (Subtarget->hasUnimplementedSIMD128()) + for (auto T : {MVT::v2i64, MVT::v2f64}) + setOperationAction(Op, T, Custom); + } + + // There is no i64x2.mul instruction + setOperationAction(ISD::MUL, MVT::v2i64, Expand); + + // There are no vector select instructions for (auto Op : {ISD::VSELECT, ISD::SELECT_CC, ISD::SELECT}) { for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}) setOperationAction(Op, T, Expand); @@ -169,6 +171,31 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering( setOperationAction(Op, T, Expand); } + // Expand integer operations supported for scalars but not SIMD + for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP, ISD::SDIV, ISD::UDIV, + ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR}) { + for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32}) + setOperationAction(Op, T, Expand); + if (Subtarget->hasUnimplementedSIMD128()) + setOperationAction(Op, MVT::v2i64, Expand); + } + + // Expand float operations supported for scalars but not SIMD + for (auto Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, + ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10, + ISD::FEXP, ISD::FEXP2, ISD::FRINT}) { + setOperationAction(Op, MVT::v4f32, Expand); + if (Subtarget->hasUnimplementedSIMD128()) + setOperationAction(Op, MVT::v2f64, Expand); + } + + // Expand additional SIMD ops that V8 hasn't implemented yet + if (!Subtarget->hasUnimplementedSIMD128()) { + setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); + setOperationAction(ISD::FDIV, MVT::v4f32, Expand); + } + } + // As a special case, these operators use the type to mean the type to // sign-extend from. setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); @@ -220,25 +247,8 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering( } } - // Expand additional SIMD ops that V8 hasn't implemented yet - if (Subtarget->hasSIMD128() && !Subtarget->hasUnimplementedSIMD128()) { - setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); - setOperationAction(ISD::FDIV, MVT::v4f32, Expand); - } - - // Custom lower lane accesses to expand out variable indices - if (Subtarget->hasSIMD128()) { - for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}) { - setOperationAction(ISD::EXTRACT_VECTOR_ELT, T, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, T, Custom); - } - if (Subtarget->hasUnimplementedSIMD128()) { - for (auto T : {MVT::v2i64, MVT::v2f64}) { - setOperationAction(ISD::EXTRACT_VECTOR_ELT, T, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, T, Custom); - } - } - } + // Don't do anything clever with build_pairs + setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); // Trap lowers to wasm unreachable setOperationAction(ISD::TRAP, MVT::Other, Legal); @@ -248,6 +258,31 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering( setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); setMaxAtomicSizeInBitsSupported(64); + + if (Subtarget->hasBulkMemory()) { + // Use memory.copy and friends over multiple loads and stores + MaxStoresPerMemcpy = 1; + MaxStoresPerMemcpyOptSize = 1; + MaxStoresPerMemmove = 1; + MaxStoresPerMemmoveOptSize = 1; + MaxStoresPerMemset = 1; + MaxStoresPerMemsetOptSize = 1; + } + + // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is + // consistent with the f64 and f128 names. + setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2"); + setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2"); + + // Define the emscripten name for return address helper. + // TODO: when implementing other WASM backends, make this generic or only do + // this on emscripten depending on what they end up doing. + setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address"); + + // Always convert switches to br_tables unless there is only one case, which + // is equivalent to a simple branch. This reduces code size for wasm, and we + // defer possible jump table optimizations to the VM. + setMinimumJumpTableEntries(2); } TargetLowering::AtomicExpansionKind @@ -272,12 +307,6 @@ FastISel *WebAssemblyTargetLowering::createFastISel( return WebAssembly::createFastISel(FuncInfo, LibInfo); } -bool WebAssemblyTargetLowering::isOffsetFoldingLegal( - const GlobalAddressSDNode * /*GA*/) const { - // All offsets can be folded. - return true; -} - MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/, EVT VT) const { unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1); @@ -324,11 +353,11 @@ static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL, auto &Context = BB->getParent()->getFunction().getContext(); Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context); - const BasicBlock *LLVM_BB = BB->getBasicBlock(); + const BasicBlock *LLVMBB = BB->getBasicBlock(); MachineFunction *F = BB->getParent(); - MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVM_BB); - MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB); - MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB); + MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB); + MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB); MachineFunction::iterator It = ++BB->getIterator(); F->insert(It, FalseMBB); @@ -336,8 +365,7 @@ static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL, F->insert(It, DoneMBB); // Transfer the remainder of BB and its successor edges to DoneMBB. - DoneMBB->splice(DoneMBB->begin(), BB, - std::next(MachineBasicBlock::iterator(MI)), BB->end()); + DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end()); DoneMBB->transferSuccessorsAndUpdatePHIs(BB); BB->addSuccessor(TrueMBB); @@ -502,7 +530,8 @@ bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL, } bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses( - EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/, bool *Fast) const { + EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/, + MachineMemOperand::Flags /*Flags*/, bool *Fast) const { // WebAssembly supports unaligned accesses, though it should be declared // with the p2align attribute on loads and stores which do so, and there // may be a performance impact. We tell LLVM they're "fast" because @@ -578,14 +607,14 @@ bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, // Lowering Code //===----------------------------------------------------------------------===// -static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *msg) { +static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) { MachineFunction &MF = DAG.getMachineFunction(); DAG.getContext()->diagnose( - DiagnosticInfoUnsupported(MF.getFunction(), msg, DL.getDebugLoc())); + DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc())); } // Test whether the given calling convention is supported. -static bool CallingConvSupported(CallingConv::ID CallConv) { +static bool callingConvSupported(CallingConv::ID CallConv) { // We currently support the language-independent target-independent // conventions. We don't yet have a way to annotate calls with properties like // "cold", and we don't have any call-clobbered registers, so these are mostly @@ -608,20 +637,21 @@ WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI, auto Layout = MF.getDataLayout(); CallingConv::ID CallConv = CLI.CallConv; - if (!CallingConvSupported(CallConv)) + if (!callingConvSupported(CallConv)) fail(DL, DAG, "WebAssembly doesn't support language-specific or target-specific " "calling conventions yet"); if (CLI.IsPatchPoint) fail(DL, DAG, "WebAssembly doesn't support patch point yet"); - // WebAssembly doesn't currently support explicit tail calls. If they are - // required, fail. Otherwise, just disable them. - if ((CallConv == CallingConv::Fast && CLI.IsTailCall && - MF.getTarget().Options.GuaranteedTailCallOpt) || - (CLI.CS && CLI.CS.isMustTailCall())) - fail(DL, DAG, "WebAssembly doesn't support tail call yet"); - CLI.IsTailCall = false; + // Fail if tail calls are required but not enabled + if (!Subtarget->hasTailCall()) { + if ((CallConv == CallingConv::Fast && CLI.IsTailCall && + MF.getTarget().Options.GuaranteedTailCallOpt) || + (CLI.CS && CLI.CS.isMustTailCall())) + fail(DL, DAG, "WebAssembly 'tail-call' feature not enabled"); + CLI.IsTailCall = false; + } SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; if (Ins.size() > 1) @@ -630,9 +660,9 @@ WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI, SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; unsigned NumFixedArgs = 0; - for (unsigned i = 0; i < Outs.size(); ++i) { - const ISD::OutputArg &Out = Outs[i]; - SDValue &OutVal = OutVals[i]; + for (unsigned I = 0; I < Outs.size(); ++I) { + const ISD::OutputArg &Out = Outs[I]; + SDValue &OutVal = OutVals[I]; if (Out.Flags.isNest()) fail(DL, DAG, "WebAssembly hasn't implemented nest arguments"); if (Out.Flags.isInAlloca()) @@ -714,6 +744,18 @@ WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI, FINode = DAG.getIntPtrConstant(0, DL); } + if (Callee->getOpcode() == ISD::GlobalAddress) { + // If the callee is a GlobalAddress node (quite common, every direct call + // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress + // doesn't at MO_GOT which is not needed for direct calls. + GlobalAddressSDNode* GA = cast<GlobalAddressSDNode>(Callee); + Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL, + getPointerTy(DAG.getDataLayout()), + GA->getOffset()); + Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL, + getPointerTy(DAG.getDataLayout()), Callee); + } + // Compute the operands for the CALLn node. SmallVector<SDValue, 16> Ops; Ops.push_back(Chain); @@ -742,6 +784,13 @@ WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI, // registers. InTys.push_back(In.VT); } + + if (CLI.IsTailCall) { + // ret_calls do not return values to the current frame + SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); + return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops); + } + InTys.push_back(MVT::Other); SDVTList InTyList = DAG.getVTList(InTys); SDValue Res = @@ -771,7 +820,7 @@ SDValue WebAssemblyTargetLowering::LowerReturn( const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL, SelectionDAG &DAG) const { assert(Outs.size() <= 1 && "WebAssembly can only return up to one value"); - if (!CallingConvSupported(CallConv)) + if (!callingConvSupported(CallConv)) fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions"); SmallVector<SDValue, 4> RetOps(1, Chain); @@ -798,7 +847,7 @@ SDValue WebAssemblyTargetLowering::LowerFormalArguments( SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { - if (!CallingConvSupported(CallConv)) + if (!callingConvSupported(CallConv)) fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions"); MachineFunction &MF = DAG.getMachineFunction(); @@ -845,7 +894,7 @@ SDValue WebAssemblyTargetLowering::LowerFormalArguments( // Record the number and types of arguments and results. SmallVector<MVT, 4> Params; SmallVector<MVT, 4> Results; - ComputeSignatureVTs(MF.getFunction().getFunctionType(), MF.getFunction(), + computeSignatureVTs(MF.getFunction().getFunctionType(), MF.getFunction(), DAG.getTarget(), Params, Results); for (MVT VT : Results) MFI->addResult(VT); @@ -858,6 +907,21 @@ SDValue WebAssemblyTargetLowering::LowerFormalArguments( return Chain; } +void WebAssemblyTargetLowering::ReplaceNodeResults( + SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const { + switch (N->getOpcode()) { + case ISD::SIGN_EXTEND_INREG: + // Do not add any results, signifying that N should not be custom lowered + // after all. This happens because simd128 turns on custom lowering for + // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an + // illegal type. + break; + default: + llvm_unreachable( + "ReplaceNodeResults not implemented for this op for WebAssembly!"); + } +} + //===----------------------------------------------------------------------===// // Custom lowering hooks. //===----------------------------------------------------------------------===// @@ -885,22 +949,23 @@ SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op, case ISD::BRIND: fail(DL, DAG, "WebAssembly hasn't implemented computed gotos"); return SDValue(); - case ISD::RETURNADDR: // Probably nothing meaningful can be returned here. - fail(DL, DAG, "WebAssembly hasn't implemented __builtin_return_address"); - return SDValue(); + case ISD::RETURNADDR: + return LowerRETURNADDR(Op, DAG); case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); case ISD::CopyToReg: return LowerCopyToReg(Op, DAG); - case ISD::INTRINSIC_WO_CHAIN: - return LowerINTRINSIC_WO_CHAIN(Op, DAG); case ISD::EXTRACT_VECTOR_ELT: case ISD::INSERT_VECTOR_ELT: return LowerAccessVectorElement(Op, DAG); case ISD::INTRINSIC_VOID: - return LowerINTRINSIC_VOID(Op, DAG); + case ISD::INTRINSIC_WO_CHAIN: + case ISD::INTRINSIC_W_CHAIN: + return LowerIntrinsic(Op, DAG); case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); + case ISD::BUILD_VECTOR: + return LowerBUILD_VECTOR(Op, DAG); case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); case ISD::SHL: @@ -942,6 +1007,26 @@ SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op, return DAG.getTargetFrameIndex(FI, Op.getValueType()); } +SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + + if (!Subtarget->getTargetTriple().isOSEmscripten()) { + fail(DL, DAG, + "Non-Emscripten WebAssembly hasn't implemented " + "__builtin_return_address"); + return SDValue(); + } + + if (verifyReturnAddressArgumentIsConstant(Op, DAG)) + return SDValue(); + + unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); + return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(), + {DAG.getConstant(Depth, DL, MVT::i32)}, false, DL) + .first; +} + SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { // Non-zero depths are not supported by WebAssembly currently. Use the @@ -966,9 +1051,40 @@ SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op, "Unexpected target flags on generic GlobalAddressSDNode"); if (GA->getAddressSpace() != 0) fail(DL, DAG, "WebAssembly only expects the 0 address space"); - return DAG.getNode( - WebAssemblyISD::Wrapper, DL, VT, - DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset())); + + unsigned OperandFlags = 0; + if (isPositionIndependent()) { + const GlobalValue *GV = GA->getGlobal(); + if (getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) { + MachineFunction &MF = DAG.getMachineFunction(); + MVT PtrVT = getPointerTy(MF.getDataLayout()); + const char *BaseName; + if (GV->getValueType()->isFunctionTy()) { + BaseName = MF.createExternalSymbolName("__table_base"); + OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL; + } + else { + BaseName = MF.createExternalSymbolName("__memory_base"); + OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL; + } + SDValue BaseAddr = + DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, + DAG.getTargetExternalSymbol(BaseName, PtrVT)); + + SDValue SymAddr = DAG.getNode( + WebAssemblyISD::WrapperPIC, DL, VT, + DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(), + OperandFlags)); + + return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr); + } else { + OperandFlags = WebAssemblyII::MO_GOT; + } + } + + return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT, + DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, + GA->getOffset(), OperandFlags)); } SDValue @@ -979,15 +1095,8 @@ WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op, EVT VT = Op.getValueType(); assert(ES->getTargetFlags() == 0 && "Unexpected target flags on generic ExternalSymbolSDNode"); - // Set the TargetFlags to 0x1 which indicates that this is a "function" - // symbol rather than a data symbol. We do this unconditionally even though - // we don't know anything about the symbol other than its name, because all - // external symbols used in target-independent SelectionDAG code are for - // functions. - return DAG.getNode( - WebAssemblyISD::Wrapper, DL, VT, - DAG.getTargetExternalSymbol(ES->getSymbol(), VT, - WebAssemblyII::MO_SYMBOL_FUNCTION)); + return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT, + DAG.getTargetExternalSymbol(ES->getSymbol(), VT)); } SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op, @@ -1041,17 +1150,28 @@ SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op, MachinePointerInfo(SV), 0); } -SDValue -WebAssemblyTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, - SelectionDAG &DAG) const { - unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); +SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op, + SelectionDAG &DAG) const { + MachineFunction &MF = DAG.getMachineFunction(); + unsigned IntNo; + switch (Op.getOpcode()) { + case ISD::INTRINSIC_VOID: + case ISD::INTRINSIC_W_CHAIN: + IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); + break; + case ISD::INTRINSIC_WO_CHAIN: + IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); + break; + default: + llvm_unreachable("Invalid intrinsic"); + } SDLoc DL(Op); + switch (IntNo) { default: - return {}; // Don't custom lower most intrinsics. + return SDValue(); // Don't custom lower most intrinsics. case Intrinsic::wasm_lsda: { - MachineFunction &MF = DAG.getMachineFunction(); EVT VT = Op.getValueType(); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout()); @@ -1061,43 +1181,24 @@ WebAssemblyTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT, DAG.getMCSymbol(S, PtrVT)); } - } -} - -SDValue -WebAssemblyTargetLowering::LowerINTRINSIC_VOID(SDValue Op, - SelectionDAG &DAG) const { - MachineFunction &MF = DAG.getMachineFunction(); - unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); - SDLoc DL(Op); - - switch (IntNo) { - default: - return {}; // Don't custom lower most intrinsics. case Intrinsic::wasm_throw: { + // We only support C++ exceptions for now int Tag = cast<ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue(); - switch (Tag) { - case CPP_EXCEPTION: { - const TargetLowering &TLI = DAG.getTargetLoweringInfo(); - MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout()); - const char *SymName = MF.createExternalSymbolName("__cpp_exception"); - SDValue SymNode = - DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, - DAG.getTargetExternalSymbol( - SymName, PtrVT, WebAssemblyII::MO_SYMBOL_EVENT)); - return DAG.getNode(WebAssemblyISD::THROW, DL, - MVT::Other, // outchain type - { - Op.getOperand(0), // inchain - SymNode, // exception symbol - Op.getOperand(3) // thrown value - }); - } - default: + if (Tag != CPP_EXCEPTION) llvm_unreachable("Invalid tag!"); - } - break; + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout()); + const char *SymName = MF.createExternalSymbolName("__cpp_exception"); + SDValue SymNode = DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, + DAG.getTargetExternalSymbol(SymName, PtrVT)); + return DAG.getNode(WebAssemblyISD::THROW, DL, + MVT::Other, // outchain type + { + Op.getOperand(0), // inchain + SymNode, // exception symbol + Op.getOperand(3) // thrown value + }); } } } @@ -1105,6 +1206,7 @@ WebAssemblyTargetLowering::LowerINTRINSIC_VOID(SDValue Op, SDValue WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const { + SDLoc DL(Op); // If sign extension operations are disabled, allow sext_inreg only if operand // is a vector extract. SIMD does not depend on sign extension operations, but // allowing sext_inreg in this context lets us have simple patterns to select @@ -1112,12 +1214,136 @@ WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, // simpler in this file, but would necessitate large and brittle patterns to // undo the expansion and select extract_lane_s instructions. assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128()); - if (Op.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT) - return Op; + if (Op.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT) { + const SDValue &Extract = Op.getOperand(0); + MVT VecT = Extract.getOperand(0).getSimpleValueType(); + MVT ExtractedLaneT = static_cast<VTSDNode *>(Op.getOperand(1).getNode()) + ->getVT() + .getSimpleVT(); + MVT ExtractedVecT = + MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits()); + if (ExtractedVecT == VecT) + return Op; + // Bitcast vector to appropriate type to ensure ISel pattern coverage + const SDValue &Index = Extract.getOperand(1); + unsigned IndexVal = + static_cast<ConstantSDNode *>(Index.getNode())->getZExtValue(); + unsigned Scale = + ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements(); + assert(Scale > 1); + SDValue NewIndex = + DAG.getConstant(IndexVal * Scale, DL, Index.getValueType()); + SDValue NewExtract = DAG.getNode( + ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(), + DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex); + return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), + NewExtract, Op.getOperand(1)); + } // Otherwise expand return SDValue(); } +SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + const EVT VecT = Op.getValueType(); + const EVT LaneT = Op.getOperand(0).getValueType(); + const size_t Lanes = Op.getNumOperands(); + auto IsConstant = [](const SDValue &V) { + return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP; + }; + + // Find the most common operand, which is approximately the best to splat + using Entry = std::pair<SDValue, size_t>; + SmallVector<Entry, 16> ValueCounts; + size_t NumConst = 0, NumDynamic = 0; + for (const SDValue &Lane : Op->op_values()) { + if (Lane.isUndef()) { + continue; + } else if (IsConstant(Lane)) { + NumConst++; + } else { + NumDynamic++; + } + auto CountIt = std::find_if(ValueCounts.begin(), ValueCounts.end(), + [&Lane](Entry A) { return A.first == Lane; }); + if (CountIt == ValueCounts.end()) { + ValueCounts.emplace_back(Lane, 1); + } else { + CountIt->second++; + } + } + auto CommonIt = + std::max_element(ValueCounts.begin(), ValueCounts.end(), + [](Entry A, Entry B) { return A.second < B.second; }); + assert(CommonIt != ValueCounts.end() && "Unexpected all-undef build_vector"); + SDValue SplatValue = CommonIt->first; + size_t NumCommon = CommonIt->second; + + // If v128.const is available, consider using it instead of a splat + if (Subtarget->hasUnimplementedSIMD128()) { + // {i32,i64,f32,f64}.const opcode, and value + const size_t ConstBytes = 1 + std::max(size_t(4), 16 / Lanes); + // SIMD prefix and opcode + const size_t SplatBytes = 2; + const size_t SplatConstBytes = SplatBytes + ConstBytes; + // SIMD prefix, opcode, and lane index + const size_t ReplaceBytes = 3; + const size_t ReplaceConstBytes = ReplaceBytes + ConstBytes; + // SIMD prefix, v128.const opcode, and 128-bit value + const size_t VecConstBytes = 18; + // Initial v128.const and a replace_lane for each non-const operand + const size_t ConstInitBytes = VecConstBytes + NumDynamic * ReplaceBytes; + // Initial splat and all necessary replace_lanes + const size_t SplatInitBytes = + IsConstant(SplatValue) + // Initial constant splat + ? (SplatConstBytes + + // Constant replace_lanes + (NumConst - NumCommon) * ReplaceConstBytes + + // Dynamic replace_lanes + (NumDynamic * ReplaceBytes)) + // Initial dynamic splat + : (SplatBytes + + // Constant replace_lanes + (NumConst * ReplaceConstBytes) + + // Dynamic replace_lanes + (NumDynamic - NumCommon) * ReplaceBytes); + if (ConstInitBytes < SplatInitBytes) { + // Create build_vector that will lower to initial v128.const + SmallVector<SDValue, 16> ConstLanes; + for (const SDValue &Lane : Op->op_values()) { + if (IsConstant(Lane)) { + ConstLanes.push_back(Lane); + } else if (LaneT.isFloatingPoint()) { + ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT)); + } else { + ConstLanes.push_back(DAG.getConstant(0, DL, LaneT)); + } + } + SDValue Result = DAG.getBuildVector(VecT, DL, ConstLanes); + // Add replace_lane instructions for non-const lanes + for (size_t I = 0; I < Lanes; ++I) { + const SDValue &Lane = Op->getOperand(I); + if (!Lane.isUndef() && !IsConstant(Lane)) + Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane, + DAG.getConstant(I, DL, MVT::i32)); + } + return Result; + } + } + // Use a splat for the initial vector + SDValue Result = DAG.getSplatBuildVector(VecT, DL, SplatValue); + // Add replace_lane instructions for other values + for (size_t I = 0; I < Lanes; ++I) { + const SDValue &Lane = Op->getOperand(I); + if (Lane != SplatValue) + Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane, + DAG.getConstant(I, DL, MVT::i32)); + } + return Result; +} + SDValue WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { @@ -1134,11 +1360,10 @@ WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, Ops[OpIdx++] = Op.getOperand(1); // Expand mask indices to byte indices and materialize them as operands - for (size_t I = 0, Lanes = Mask.size(); I < Lanes; ++I) { + for (int M : Mask) { for (size_t J = 0; J < LaneBytes; ++J) { // Lower undefs (represented by -1 in mask) to zero - uint64_t ByteIndex = - Mask[I] == -1 ? 0 : (uint64_t)Mask[I] * LaneBytes + J; + uint64_t ByteIndex = M == -1 ? 0 : (uint64_t)M * LaneBytes + J; Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32); } } @@ -1158,7 +1383,7 @@ WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op, return SDValue(); } -static SDValue UnrollVectorShift(SDValue Op, SelectionDAG &DAG) { +static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG) { EVT LaneT = Op.getSimpleValueType().getVectorElementType(); // 32-bit and 64-bit unrolled shifts will have proper semantics if (LaneT.bitsGE(MVT::i32)) @@ -1193,17 +1418,17 @@ SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op, // Expand all vector shifts until V8 fixes its implementation // TODO: remove this once V8 is fixed if (!Subtarget->hasUnimplementedSIMD128()) - return UnrollVectorShift(Op, DAG); + return unrollVectorShift(Op, DAG); // Unroll non-splat vector shifts BuildVectorSDNode *ShiftVec; SDValue SplatVal; if (!(ShiftVec = dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode())) || !(SplatVal = ShiftVec->getSplatValue())) - return UnrollVectorShift(Op, DAG); + return unrollVectorShift(Op, DAG); // All splats except i64x2 const splats are handled by patterns - ConstantSDNode *SplatConst = dyn_cast<ConstantSDNode>(SplatVal); + auto *SplatConst = dyn_cast<ConstantSDNode>(SplatVal); if (!SplatConst || Op.getSimpleValueType() != MVT::v2i64) return Op; diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h index 59f4230ed889..b3c7f3defd5f 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h @@ -1,9 +1,8 @@ //- WebAssemblyISelLowering.h - WebAssembly DAG Lowering Interface -*- C++ -*-// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -47,7 +46,6 @@ private: AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override; FastISel *createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const override; - bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override; MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override; MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, @@ -62,6 +60,7 @@ private: unsigned AS, Instruction *I = nullptr) const override; bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace, unsigned Align, + MachineMemOperand::Flags Flags, bool *Fast) const override; bool isIntDivCheap(EVT VT, AttributeList Attr) const override; @@ -87,9 +86,17 @@ private: const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const override; + void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results, + SelectionDAG &DAG) const override; + + const char *getClearCacheBuiltinName() const override { + report_fatal_error("llvm.clear_cache is not supported on wasm"); + } + // Custom lowering hooks. SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; SDValue LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const; @@ -97,9 +104,9 @@ private: SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; SDValue LowerCopyToReg(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerIntrinsic(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerAccessVectorElement(SDValue Op, SelectionDAG &DAG) const; SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const; diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td index 5fb8ef90bc43..e85aa57efc42 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td @@ -1,9 +1,8 @@ // WebAssemblyInstrAtomics.td-WebAssembly Atomic codegen support-*- tablegen -*- // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -12,20 +11,132 @@ /// //===----------------------------------------------------------------------===// +let UseNamedOperandTable = 1 in +multiclass ATOMIC_I<dag oops_r, dag iops_r, dag oops_s, dag iops_s, + list<dag> pattern_r, string asmstr_r = "", + string asmstr_s = "", bits<32> atomic_op = -1> { + defm "" : I<oops_r, iops_r, oops_s, iops_s, pattern_r, asmstr_r, asmstr_s, + !or(0xfe00, !and(0xff, atomic_op))>, + Requires<[HasAtomics]>; +} + +multiclass ATOMIC_NRI<dag oops, dag iops, list<dag> pattern, string asmstr = "", + bits<32> atomic_op = -1> { + defm "" : NRI<oops, iops, pattern, asmstr, + !or(0xfe00, !and(0xff, atomic_op))>, + Requires<[HasAtomics]>; +} + +//===----------------------------------------------------------------------===// +// Atomic wait / notify +//===----------------------------------------------------------------------===// + +let hasSideEffects = 1 in { +defm ATOMIC_NOTIFY : + ATOMIC_I<(outs I32:$dst), + (ins P2Align:$p2align, offset32_op:$off, I32:$addr, I32:$count), + (outs), (ins P2Align:$p2align, offset32_op:$off), [], + "atomic.notify \t$dst, ${off}(${addr})${p2align}, $count", + "atomic.notify \t${off}${p2align}", 0x00>; +let mayLoad = 1 in { +defm ATOMIC_WAIT_I32 : + ATOMIC_I<(outs I32:$dst), + (ins P2Align:$p2align, offset32_op:$off, I32:$addr, I32:$exp, + I64:$timeout), + (outs), (ins P2Align:$p2align, offset32_op:$off), [], + "i32.atomic.wait \t$dst, ${off}(${addr})${p2align}, $exp, $timeout", + "i32.atomic.wait \t${off}${p2align}", 0x01>; +defm ATOMIC_WAIT_I64 : + ATOMIC_I<(outs I32:$dst), + (ins P2Align:$p2align, offset32_op:$off, I32:$addr, I64:$exp, + I64:$timeout), + (outs), (ins P2Align:$p2align, offset32_op:$off), [], + "i64.atomic.wait \t$dst, ${off}(${addr})${p2align}, $exp, $timeout", + "i64.atomic.wait \t${off}${p2align}", 0x02>; +} // mayLoad = 1 +} // hasSideEffects = 1 + +let Predicates = [HasAtomics] in { +// Select notifys with no constant offset. +def NotifyPatNoOffset : + Pat<(i32 (int_wasm_atomic_notify I32:$addr, I32:$count)), + (ATOMIC_NOTIFY 0, 0, I32:$addr, I32:$count)>; + +// Select notifys with a constant offset. + +// Pattern with address + immediate offset +class NotifyPatImmOff<PatFrag operand> : + Pat<(i32 (int_wasm_atomic_notify (operand I32:$addr, imm:$off), I32:$count)), + (ATOMIC_NOTIFY 0, imm:$off, I32:$addr, I32:$count)>; +def : NotifyPatImmOff<regPlusImm>; +def : NotifyPatImmOff<or_is_add>; + +def NotifyPatGlobalAddr : + Pat<(i32 (int_wasm_atomic_notify (regPlusGA I32:$addr, + (WebAssemblywrapper tglobaladdr:$off)), + I32:$count)), + (ATOMIC_NOTIFY 0, tglobaladdr:$off, I32:$addr, I32:$count)>; + +// Select notifys with just a constant offset. +def NotifyPatOffsetOnly : + Pat<(i32 (int_wasm_atomic_notify imm:$off, I32:$count)), + (ATOMIC_NOTIFY 0, imm:$off, (CONST_I32 0), I32:$count)>; + +def NotifyPatGlobalAddrOffOnly : + Pat<(i32 (int_wasm_atomic_notify (WebAssemblywrapper tglobaladdr:$off), + I32:$count)), + (ATOMIC_NOTIFY 0, tglobaladdr:$off, (CONST_I32 0), I32:$count)>; + +// Select waits with no constant offset. +class WaitPatNoOffset<ValueType ty, Intrinsic kind, NI inst> : + Pat<(i32 (kind I32:$addr, ty:$exp, I64:$timeout)), + (inst 0, 0, I32:$addr, ty:$exp, I64:$timeout)>; +def : WaitPatNoOffset<i32, int_wasm_atomic_wait_i32, ATOMIC_WAIT_I32>; +def : WaitPatNoOffset<i64, int_wasm_atomic_wait_i64, ATOMIC_WAIT_I64>; + +// Select waits with a constant offset. + +// Pattern with address + immediate offset +class WaitPatImmOff<ValueType ty, Intrinsic kind, PatFrag operand, NI inst> : + Pat<(i32 (kind (operand I32:$addr, imm:$off), ty:$exp, I64:$timeout)), + (inst 0, imm:$off, I32:$addr, ty:$exp, I64:$timeout)>; +def : WaitPatImmOff<i32, int_wasm_atomic_wait_i32, regPlusImm, ATOMIC_WAIT_I32>; +def : WaitPatImmOff<i32, int_wasm_atomic_wait_i32, or_is_add, ATOMIC_WAIT_I32>; +def : WaitPatImmOff<i64, int_wasm_atomic_wait_i64, regPlusImm, ATOMIC_WAIT_I64>; +def : WaitPatImmOff<i64, int_wasm_atomic_wait_i64, or_is_add, ATOMIC_WAIT_I64>; + +class WaitPatGlobalAddr<ValueType ty, Intrinsic kind, NI inst> : + Pat<(i32 (kind (regPlusGA I32:$addr, (WebAssemblywrapper tglobaladdr:$off)), + ty:$exp, I64:$timeout)), + (inst 0, tglobaladdr:$off, I32:$addr, ty:$exp, I64:$timeout)>; +def : WaitPatGlobalAddr<i32, int_wasm_atomic_wait_i32, ATOMIC_WAIT_I32>; +def : WaitPatGlobalAddr<i64, int_wasm_atomic_wait_i64, ATOMIC_WAIT_I64>; + +// Select wait_i32, ATOMIC_WAIT_I32s with just a constant offset. +class WaitPatOffsetOnly<ValueType ty, Intrinsic kind, NI inst> : + Pat<(i32 (kind imm:$off, ty:$exp, I64:$timeout)), + (inst 0, imm:$off, (CONST_I32 0), ty:$exp, I64:$timeout)>; +def : WaitPatOffsetOnly<i32, int_wasm_atomic_wait_i32, ATOMIC_WAIT_I32>; +def : WaitPatOffsetOnly<i64, int_wasm_atomic_wait_i64, ATOMIC_WAIT_I64>; + +class WaitPatGlobalAddrOffOnly<ValueType ty, Intrinsic kind, NI inst> : + Pat<(i32 (kind (WebAssemblywrapper tglobaladdr:$off), ty:$exp, I64:$timeout)), + (inst 0, tglobaladdr:$off, (CONST_I32 0), ty:$exp, I64:$timeout)>; +def : WaitPatGlobalAddrOffOnly<i32, int_wasm_atomic_wait_i32, ATOMIC_WAIT_I32>; +def : WaitPatGlobalAddrOffOnly<i64, int_wasm_atomic_wait_i64, ATOMIC_WAIT_I64>; +} // Predicates = [HasAtomics] + //===----------------------------------------------------------------------===// // Atomic loads //===----------------------------------------------------------------------===// -multiclass ATOMIC_I<dag oops_r, dag iops_r, dag oops_s, dag iops_s, - list<dag> pattern_r, string asmstr_r = "", - string asmstr_s = "", bits<32> inst = -1> { - defm "" : I<oops_r, iops_r, oops_s, iops_s, pattern_r, asmstr_r, asmstr_s, - inst>, +multiclass AtomicLoad<WebAssemblyRegClass rc, string name, int atomic_op> { + defm "" : WebAssemblyLoad<rc, name, !or(0xfe00, !and(0xff, atomic_op))>, Requires<[HasAtomics]>; } -defm ATOMIC_LOAD_I32 : WebAssemblyLoad<I32, "i32.atomic.load", 0xfe10>; -defm ATOMIC_LOAD_I64 : WebAssemblyLoad<I64, "i64.atomic.load", 0xfe11>; +defm ATOMIC_LOAD_I32 : AtomicLoad<I32, "i32.atomic.load", 0x10>; +defm ATOMIC_LOAD_I64 : AtomicLoad<I64, "i64.atomic.load", 0x11>; // Select loads with no constant offset. let Predicates = [HasAtomics] in { @@ -43,9 +154,6 @@ def : LoadPatImmOff<i64, atomic_load_64, or_is_add, ATOMIC_LOAD_I64>; def : LoadPatGlobalAddr<i32, atomic_load_32, ATOMIC_LOAD_I32>; def : LoadPatGlobalAddr<i64, atomic_load_64, ATOMIC_LOAD_I64>; -def : LoadPatExternalSym<i32, atomic_load_32, ATOMIC_LOAD_I32>; -def : LoadPatExternalSym<i64, atomic_load_64, ATOMIC_LOAD_I64>; - // Select loads with just a constant offset. def : LoadPatOffsetOnly<i32, atomic_load_32, ATOMIC_LOAD_I32>; def : LoadPatOffsetOnly<i64, atomic_load_64, ATOMIC_LOAD_I64>; @@ -53,18 +161,15 @@ def : LoadPatOffsetOnly<i64, atomic_load_64, ATOMIC_LOAD_I64>; def : LoadPatGlobalAddrOffOnly<i32, atomic_load_32, ATOMIC_LOAD_I32>; def : LoadPatGlobalAddrOffOnly<i64, atomic_load_64, ATOMIC_LOAD_I64>; -def : LoadPatExternSymOffOnly<i32, atomic_load_32, ATOMIC_LOAD_I32>; -def : LoadPatExternSymOffOnly<i64, atomic_load_64, ATOMIC_LOAD_I64>; - } // Predicates = [HasAtomics] // Extending loads. Note that there are only zero-extending atomic loads, no // sign-extending loads. -defm ATOMIC_LOAD8_U_I32 : WebAssemblyLoad<I32, "i32.atomic.load8_u", 0xfe12>; -defm ATOMIC_LOAD16_U_I32 : WebAssemblyLoad<I32, "i32.atomic.load16_u", 0xfe13>; -defm ATOMIC_LOAD8_U_I64 : WebAssemblyLoad<I64, "i64.atomic.load8_u", 0xfe14>; -defm ATOMIC_LOAD16_U_I64 : WebAssemblyLoad<I64, "i64.atomic.load16_u", 0xfe15>; -defm ATOMIC_LOAD32_U_I64 : WebAssemblyLoad<I64, "i64.atomic.load32_u", 0xfe16>; +defm ATOMIC_LOAD8_U_I32 : AtomicLoad<I32, "i32.atomic.load8_u", 0x12>; +defm ATOMIC_LOAD16_U_I32 : AtomicLoad<I32, "i32.atomic.load16_u", 0x13>; +defm ATOMIC_LOAD8_U_I64 : AtomicLoad<I64, "i64.atomic.load8_u", 0x14>; +defm ATOMIC_LOAD16_U_I64 : AtomicLoad<I64, "i64.atomic.load16_u", 0x15>; +defm ATOMIC_LOAD32_U_I64 : AtomicLoad<I64, "i64.atomic.load32_u", 0x16>; // Fragments for extending loads. These are different from regular loads because // the SDNodes are derived from AtomicSDNode rather than LoadSDNode and @@ -149,16 +254,6 @@ def : LoadPatGlobalAddr<i32, atomic_load_16, ATOMIC_LOAD16_U_I32>; def : LoadPatGlobalAddr<i64, sext_aload_8_64, ATOMIC_LOAD8_U_I64>; def : LoadPatGlobalAddr<i64, sext_aload_16_64, ATOMIC_LOAD16_U_I64>; -def : LoadPatExternalSym<i32, zext_aload_8_32, ATOMIC_LOAD8_U_I32>; -def : LoadPatExternalSym<i32, zext_aload_16_32, ATOMIC_LOAD16_U_I32>; -def : LoadPatExternalSym<i64, zext_aload_8_64, ATOMIC_LOAD8_U_I64>; -def : LoadPatExternalSym<i64, zext_aload_16_64, ATOMIC_LOAD16_U_I64>; -def : LoadPatExternalSym<i64, zext_aload_32_64, ATOMIC_LOAD32_U_I64>; -def : LoadPatExternalSym<i32, atomic_load_8, ATOMIC_LOAD8_U_I32>; -def : LoadPatExternalSym<i32, atomic_load_16, ATOMIC_LOAD16_U_I32>; -def : LoadPatExternalSym<i64, sext_aload_8_64, ATOMIC_LOAD8_U_I64>; -def : LoadPatExternalSym<i64, sext_aload_16_64, ATOMIC_LOAD16_U_I64>; - // Extending loads with just a constant offset def : LoadPatOffsetOnly<i32, zext_aload_8_32, ATOMIC_LOAD8_U_I32>; def : LoadPatOffsetOnly<i32, zext_aload_16_32, ATOMIC_LOAD16_U_I32>; @@ -180,24 +275,19 @@ def : LoadPatGlobalAddrOffOnly<i32, atomic_load_16, ATOMIC_LOAD16_U_I32>; def : LoadPatGlobalAddrOffOnly<i64, sext_aload_8_64, ATOMIC_LOAD8_U_I64>; def : LoadPatGlobalAddrOffOnly<i64, sext_aload_16_64, ATOMIC_LOAD16_U_I64>; -def : LoadPatExternSymOffOnly<i32, zext_aload_8_32, ATOMIC_LOAD8_U_I32>; -def : LoadPatExternSymOffOnly<i32, zext_aload_16_32, ATOMIC_LOAD16_U_I32>; -def : LoadPatExternSymOffOnly<i64, zext_aload_8_64, ATOMIC_LOAD8_U_I64>; -def : LoadPatExternSymOffOnly<i64, zext_aload_16_64, ATOMIC_LOAD16_U_I64>; -def : LoadPatExternSymOffOnly<i64, zext_aload_32_64, ATOMIC_LOAD32_U_I64>; -def : LoadPatExternSymOffOnly<i32, atomic_load_8, ATOMIC_LOAD8_U_I32>; -def : LoadPatExternSymOffOnly<i32, atomic_load_16, ATOMIC_LOAD16_U_I32>; -def : LoadPatExternSymOffOnly<i64, sext_aload_8_64, ATOMIC_LOAD8_U_I64>; -def : LoadPatExternSymOffOnly<i64, sext_aload_16_64, ATOMIC_LOAD16_U_I64>; - } // Predicates = [HasAtomics] //===----------------------------------------------------------------------===// // Atomic stores //===----------------------------------------------------------------------===// -defm ATOMIC_STORE_I32 : WebAssemblyStore<I32, "i32.atomic.store", 0xfe17>; -defm ATOMIC_STORE_I64 : WebAssemblyStore<I64, "i64.atomic.store", 0xfe18>; +multiclass AtomicStore<WebAssemblyRegClass rc, string name, int atomic_op> { + defm "" : WebAssemblyStore<rc, name, !or(0xfe00, !and(0xff, atomic_op))>, + Requires<[HasAtomics]>; +} + +defm ATOMIC_STORE_I32 : AtomicStore<I32, "i32.atomic.store", 0x17>; +defm ATOMIC_STORE_I64 : AtomicStore<I64, "i64.atomic.store", 0x18>; // We need an 'atomic' version of store patterns because store and atomic_store // nodes have different operand orders: @@ -230,12 +320,6 @@ class AStorePatGlobalAddr<ValueType ty, PatFrag kind, NI inst> : def : AStorePatGlobalAddr<i32, atomic_store_32, ATOMIC_STORE_I32>; def : AStorePatGlobalAddr<i64, atomic_store_64, ATOMIC_STORE_I64>; -class AStorePatExternalSym<ValueType ty, PatFrag kind, NI inst> : - Pat<(kind (add I32:$addr, (WebAssemblywrapper texternalsym:$off)), ty:$val), - (inst 0, texternalsym:$off, I32:$addr, ty:$val)>; -def : AStorePatExternalSym<i32, atomic_store_32, ATOMIC_STORE_I32>; -def : AStorePatExternalSym<i64, atomic_store_64, ATOMIC_STORE_I64>; - // Select stores with just a constant offset. class AStorePatOffsetOnly<ValueType ty, PatFrag kind, NI inst> : Pat<(kind imm:$off, ty:$val), (inst 0, imm:$off, (CONST_I32 0), ty:$val)>; @@ -248,20 +332,14 @@ class AStorePatGlobalAddrOffOnly<ValueType ty, PatFrag kind, NI inst> : def : AStorePatGlobalAddrOffOnly<i32, atomic_store_32, ATOMIC_STORE_I32>; def : AStorePatGlobalAddrOffOnly<i64, atomic_store_64, ATOMIC_STORE_I64>; -class AStorePatExternSymOffOnly<ValueType ty, PatFrag kind, NI inst> : - Pat<(kind (WebAssemblywrapper texternalsym:$off), ty:$val), - (inst 0, texternalsym:$off, (CONST_I32 0), ty:$val)>; -def : AStorePatExternSymOffOnly<i32, atomic_store_32, ATOMIC_STORE_I32>; -def : AStorePatExternSymOffOnly<i64, atomic_store_64, ATOMIC_STORE_I64>; - } // Predicates = [HasAtomics] // Truncating stores. -defm ATOMIC_STORE8_I32 : WebAssemblyStore<I32, "i32.atomic.store8", 0xfe19>; -defm ATOMIC_STORE16_I32 : WebAssemblyStore<I32, "i32.atomic.store16", 0xfe1a>; -defm ATOMIC_STORE8_I64 : WebAssemblyStore<I64, "i64.atomic.store8", 0xfe1b>; -defm ATOMIC_STORE16_I64 : WebAssemblyStore<I64, "i64.atomic.store16", 0xfe1c>; -defm ATOMIC_STORE32_I64 : WebAssemblyStore<I64, "i64.atomic.store32", 0xfe1d>; +defm ATOMIC_STORE8_I32 : AtomicStore<I32, "i32.atomic.store8", 0x19>; +defm ATOMIC_STORE16_I32 : AtomicStore<I32, "i32.atomic.store16", 0x1a>; +defm ATOMIC_STORE8_I64 : AtomicStore<I64, "i64.atomic.store8", 0x1b>; +defm ATOMIC_STORE16_I64 : AtomicStore<I64, "i64.atomic.store16", 0x1c>; +defm ATOMIC_STORE32_I64 : AtomicStore<I64, "i64.atomic.store32", 0x1d>; // Fragments for truncating stores. @@ -302,12 +380,6 @@ def : AStorePatGlobalAddr<i64, trunc_astore_8_64, ATOMIC_STORE8_I64>; def : AStorePatGlobalAddr<i64, trunc_astore_16_64, ATOMIC_STORE16_I64>; def : AStorePatGlobalAddr<i64, trunc_astore_32_64, ATOMIC_STORE32_I64>; -def : AStorePatExternalSym<i32, atomic_store_8, ATOMIC_STORE8_I32>; -def : AStorePatExternalSym<i32, atomic_store_16, ATOMIC_STORE16_I32>; -def : AStorePatExternalSym<i64, trunc_astore_8_64, ATOMIC_STORE8_I64>; -def : AStorePatExternalSym<i64, trunc_astore_16_64, ATOMIC_STORE16_I64>; -def : AStorePatExternalSym<i64, trunc_astore_32_64, ATOMIC_STORE32_I64>; - // Truncating stores with just a constant offset def : AStorePatOffsetOnly<i32, atomic_store_8, ATOMIC_STORE8_I32>; def : AStorePatOffsetOnly<i32, atomic_store_16, ATOMIC_STORE16_I32>; @@ -321,105 +393,101 @@ def : AStorePatGlobalAddrOffOnly<i64, trunc_astore_8_64, ATOMIC_STORE8_I64>; def : AStorePatGlobalAddrOffOnly<i64, trunc_astore_16_64, ATOMIC_STORE16_I64>; def : AStorePatGlobalAddrOffOnly<i64, trunc_astore_32_64, ATOMIC_STORE32_I64>; -def : AStorePatExternSymOffOnly<i32, atomic_store_8, ATOMIC_STORE8_I32>; -def : AStorePatExternSymOffOnly<i32, atomic_store_16, ATOMIC_STORE16_I32>; -def : AStorePatExternSymOffOnly<i64, trunc_astore_8_64, ATOMIC_STORE8_I64>; -def : AStorePatExternSymOffOnly<i64, trunc_astore_16_64, ATOMIC_STORE16_I64>; -def : AStorePatExternSymOffOnly<i64, trunc_astore_32_64, ATOMIC_STORE32_I64>; - } // Predicates = [HasAtomics] //===----------------------------------------------------------------------===// // Atomic binary read-modify-writes //===----------------------------------------------------------------------===// -multiclass WebAssemblyBinRMW<WebAssemblyRegClass rc, string Name, int Opcode> { - defm "" : I<(outs rc:$dst), - (ins P2Align:$p2align, offset32_op:$off, I32:$addr, rc:$val), - (outs), (ins P2Align:$p2align, offset32_op:$off), [], - !strconcat(Name, "\t$dst, ${off}(${addr})${p2align}, $val"), - !strconcat(Name, "\t${off}, ${p2align}"), Opcode>; +multiclass WebAssemblyBinRMW<WebAssemblyRegClass rc, string name, + int atomic_op> { + defm "" : + ATOMIC_I<(outs rc:$dst), + (ins P2Align:$p2align, offset32_op:$off, I32:$addr, rc:$val), + (outs), (ins P2Align:$p2align, offset32_op:$off), [], + !strconcat(name, "\t$dst, ${off}(${addr})${p2align}, $val"), + !strconcat(name, "\t${off}${p2align}"), atomic_op>; } -defm ATOMIC_RMW_ADD_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.add", 0xfe1e>; -defm ATOMIC_RMW_ADD_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.add", 0xfe1f>; +defm ATOMIC_RMW_ADD_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.add", 0x1e>; +defm ATOMIC_RMW_ADD_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.add", 0x1f>; defm ATOMIC_RMW8_U_ADD_I32 : - WebAssemblyBinRMW<I32, "i32.atomic.rmw8.add_u", 0xfe20>; + WebAssemblyBinRMW<I32, "i32.atomic.rmw8.add_u", 0x20>; defm ATOMIC_RMW16_U_ADD_I32 : - WebAssemblyBinRMW<I32, "i32.atomic.rmw16.add_u", 0xfe21>; + WebAssemblyBinRMW<I32, "i32.atomic.rmw16.add_u", 0x21>; defm ATOMIC_RMW8_U_ADD_I64 : - WebAssemblyBinRMW<I64, "i64.atomic.rmw8.add_u", 0xfe22>; + WebAssemblyBinRMW<I64, "i64.atomic.rmw8.add_u", 0x22>; defm ATOMIC_RMW16_U_ADD_I64 : - WebAssemblyBinRMW<I64, "i64.atomic.rmw16.add_u", 0xfe23>; + WebAssemblyBinRMW<I64, "i64.atomic.rmw16.add_u", 0x23>; defm ATOMIC_RMW32_U_ADD_I64 : - WebAssemblyBinRMW<I64, "i64.atomic.rmw32.add_u", 0xfe24>; + WebAssemblyBinRMW<I64, "i64.atomic.rmw32.add_u", 0x24>; -defm ATOMIC_RMW_SUB_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.sub", 0xfe25>; -defm ATOMIC_RMW_SUB_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.sub", 0xfe26>; +defm ATOMIC_RMW_SUB_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.sub", 0x25>; +defm ATOMIC_RMW_SUB_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.sub", 0x26>; defm ATOMIC_RMW8_U_SUB_I32 : - WebAssemblyBinRMW<I32, "i32.atomic.rmw8.sub_u", 0xfe27>; + WebAssemblyBinRMW<I32, "i32.atomic.rmw8.sub_u", 0x27>; defm ATOMIC_RMW16_U_SUB_I32 : - WebAssemblyBinRMW<I32, "i32.atomic.rmw16.sub_u", 0xfe28>; + WebAssemblyBinRMW<I32, "i32.atomic.rmw16.sub_u", 0x28>; defm ATOMIC_RMW8_U_SUB_I64 : - WebAssemblyBinRMW<I64, "i64.atomic.rmw8.sub_u", 0xfe29>; + WebAssemblyBinRMW<I64, "i64.atomic.rmw8.sub_u", 0x29>; defm ATOMIC_RMW16_U_SUB_I64 : - WebAssemblyBinRMW<I64, "i64.atomic.rmw16.sub_u", 0xfe2a>; + WebAssemblyBinRMW<I64, "i64.atomic.rmw16.sub_u", 0x2a>; defm ATOMIC_RMW32_U_SUB_I64 : - WebAssemblyBinRMW<I64, "i64.atomic.rmw32.sub_u", 0xfe2b>; + WebAssemblyBinRMW<I64, "i64.atomic.rmw32.sub_u", 0x2b>; -defm ATOMIC_RMW_AND_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.and", 0xfe2c>; -defm ATOMIC_RMW_AND_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.and", 0xfe2d>; +defm ATOMIC_RMW_AND_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.and", 0x2c>; +defm ATOMIC_RMW_AND_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.and", 0x2d>; defm ATOMIC_RMW8_U_AND_I32 : - WebAssemblyBinRMW<I32, "i32.atomic.rmw8.and_u", 0xfe2e>; + WebAssemblyBinRMW<I32, "i32.atomic.rmw8.and_u", 0x2e>; defm ATOMIC_RMW16_U_AND_I32 : - WebAssemblyBinRMW<I32, "i32.atomic.rmw16.and_u", 0xfe2f>; + WebAssemblyBinRMW<I32, "i32.atomic.rmw16.and_u", 0x2f>; defm ATOMIC_RMW8_U_AND_I64 : - WebAssemblyBinRMW<I64, "i64.atomic.rmw8.and_u", 0xfe30>; + WebAssemblyBinRMW<I64, "i64.atomic.rmw8.and_u", 0x30>; defm ATOMIC_RMW16_U_AND_I64 : - WebAssemblyBinRMW<I64, "i64.atomic.rmw16.and_u", 0xfe31>; + WebAssemblyBinRMW<I64, "i64.atomic.rmw16.and_u", 0x31>; defm ATOMIC_RMW32_U_AND_I64 : - WebAssemblyBinRMW<I64, "i64.atomic.rmw32.and_u", 0xfe32>; + WebAssemblyBinRMW<I64, "i64.atomic.rmw32.and_u", 0x32>; -defm ATOMIC_RMW_OR_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.or", 0xfe33>; -defm ATOMIC_RMW_OR_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.or", 0xfe34>; +defm ATOMIC_RMW_OR_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.or", 0x33>; +defm ATOMIC_RMW_OR_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.or", 0x34>; defm ATOMIC_RMW8_U_OR_I32 : - WebAssemblyBinRMW<I32, "i32.atomic.rmw8.or_u", 0xfe35>; + WebAssemblyBinRMW<I32, "i32.atomic.rmw8.or_u", 0x35>; defm ATOMIC_RMW16_U_OR_I32 : - WebAssemblyBinRMW<I32, "i32.atomic.rmw16.or_u", 0xfe36>; + WebAssemblyBinRMW<I32, "i32.atomic.rmw16.or_u", 0x36>; defm ATOMIC_RMW8_U_OR_I64 : - WebAssemblyBinRMW<I64, "i64.atomic.rmw8.or_u", 0xfe37>; + WebAssemblyBinRMW<I64, "i64.atomic.rmw8.or_u", 0x37>; defm ATOMIC_RMW16_U_OR_I64 : - WebAssemblyBinRMW<I64, "i64.atomic.rmw16.or_u", 0xfe38>; + WebAssemblyBinRMW<I64, "i64.atomic.rmw16.or_u", 0x38>; defm ATOMIC_RMW32_U_OR_I64 : - WebAssemblyBinRMW<I64, "i64.atomic.rmw32.or_u", 0xfe39>; + WebAssemblyBinRMW<I64, "i64.atomic.rmw32.or_u", 0x39>; -defm ATOMIC_RMW_XOR_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.xor", 0xfe3a>; -defm ATOMIC_RMW_XOR_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.xor", 0xfe3b>; +defm ATOMIC_RMW_XOR_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.xor", 0x3a>; +defm ATOMIC_RMW_XOR_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.xor", 0x3b>; defm ATOMIC_RMW8_U_XOR_I32 : - WebAssemblyBinRMW<I32, "i32.atomic.rmw8.xor_u", 0xfe3c>; + WebAssemblyBinRMW<I32, "i32.atomic.rmw8.xor_u", 0x3c>; defm ATOMIC_RMW16_U_XOR_I32 : - WebAssemblyBinRMW<I32, "i32.atomic.rmw16.xor_u", 0xfe3d>; + WebAssemblyBinRMW<I32, "i32.atomic.rmw16.xor_u", 0x3d>; defm ATOMIC_RMW8_U_XOR_I64 : - WebAssemblyBinRMW<I64, "i64.atomic.rmw8.xor_u", 0xfe3e>; + WebAssemblyBinRMW<I64, "i64.atomic.rmw8.xor_u", 0x3e>; defm ATOMIC_RMW16_U_XOR_I64 : - WebAssemblyBinRMW<I64, "i64.atomic.rmw16.xor_u", 0xfe3f>; + WebAssemblyBinRMW<I64, "i64.atomic.rmw16.xor_u", 0x3f>; defm ATOMIC_RMW32_U_XOR_I64 : - WebAssemblyBinRMW<I64, "i64.atomic.rmw32.xor_u", 0xfe40>; + WebAssemblyBinRMW<I64, "i64.atomic.rmw32.xor_u", 0x40>; defm ATOMIC_RMW_XCHG_I32 : - WebAssemblyBinRMW<I32, "i32.atomic.rmw.xchg", 0xfe41>; + WebAssemblyBinRMW<I32, "i32.atomic.rmw.xchg", 0x41>; defm ATOMIC_RMW_XCHG_I64 : - WebAssemblyBinRMW<I64, "i64.atomic.rmw.xchg", 0xfe42>; + WebAssemblyBinRMW<I64, "i64.atomic.rmw.xchg", 0x42>; defm ATOMIC_RMW8_U_XCHG_I32 : - WebAssemblyBinRMW<I32, "i32.atomic.rmw8.xchg_u", 0xfe43>; + WebAssemblyBinRMW<I32, "i32.atomic.rmw8.xchg_u", 0x43>; defm ATOMIC_RMW16_U_XCHG_I32 : - WebAssemblyBinRMW<I32, "i32.atomic.rmw16.xchg_u", 0xfe44>; + WebAssemblyBinRMW<I32, "i32.atomic.rmw16.xchg_u", 0x44>; defm ATOMIC_RMW8_U_XCHG_I64 : - WebAssemblyBinRMW<I64, "i64.atomic.rmw8.xchg_u", 0xfe45>; + WebAssemblyBinRMW<I64, "i64.atomic.rmw8.xchg_u", 0x45>; defm ATOMIC_RMW16_U_XCHG_I64 : - WebAssemblyBinRMW<I64, "i64.atomic.rmw16.xchg_u", 0xfe46>; + WebAssemblyBinRMW<I64, "i64.atomic.rmw16.xchg_u", 0x46>; defm ATOMIC_RMW32_U_XCHG_I64 : - WebAssemblyBinRMW<I64, "i64.atomic.rmw32.xchg_u", 0xfe47>; + WebAssemblyBinRMW<I64, "i64.atomic.rmw32.xchg_u", 0x47>; // Select binary RMWs with no constant offset. class BinRMWPatNoOffset<ValueType ty, PatFrag kind, NI inst> : @@ -437,11 +505,6 @@ class BinRMWPatGlobalAddr<ValueType ty, PatFrag kind, NI inst> : ty:$val)), (inst 0, tglobaladdr:$off, I32:$addr, ty:$val)>; -class BinRMWPatExternalSym<ValueType ty, PatFrag kind, NI inst> : - Pat<(ty (kind (add I32:$addr, (WebAssemblywrapper texternalsym:$off)), - ty:$val)), - (inst 0, texternalsym:$off, I32:$addr, ty:$val)>; - // Select binary RMWs with just a constant offset. class BinRMWPatOffsetOnly<ValueType ty, PatFrag kind, NI inst> : Pat<(ty (kind imm:$off, ty:$val)), @@ -451,10 +514,6 @@ class BinRMWPatGlobalAddrOffOnly<ValueType ty, PatFrag kind, NI inst> : Pat<(ty (kind (WebAssemblywrapper tglobaladdr:$off), ty:$val)), (inst 0, tglobaladdr:$off, (CONST_I32 0), ty:$val)>; -class BinRMWPatExternSymOffOnly<ValueType ty, PatFrag kind, NI inst> : - Pat<(ty (kind (WebAssemblywrapper texternalsym:$off), ty:$val)), - (inst 0, texternalsym:$off, (CONST_I32 0), ty:$val)>; - // Patterns for various addressing modes. multiclass BinRMWPattern<PatFrag rmw_32, PatFrag rmw_64, NI inst_32, NI inst_64> { @@ -469,17 +528,11 @@ multiclass BinRMWPattern<PatFrag rmw_32, PatFrag rmw_64, NI inst_32, def : BinRMWPatGlobalAddr<i32, rmw_32, inst_32>; def : BinRMWPatGlobalAddr<i64, rmw_64, inst_64>; - def : BinRMWPatExternalSym<i32, rmw_32, inst_32>; - def : BinRMWPatExternalSym<i64, rmw_64, inst_64>; - def : BinRMWPatOffsetOnly<i32, rmw_32, inst_32>; def : BinRMWPatOffsetOnly<i64, rmw_64, inst_64>; def : BinRMWPatGlobalAddrOffOnly<i32, rmw_32, inst_32>; def : BinRMWPatGlobalAddrOffOnly<i64, rmw_64, inst_64>; - - def : BinRMWPatExternSymOffOnly<i32, rmw_32, inst_32>; - def : BinRMWPatExternSymOffOnly<i64, rmw_64, inst_64>; } let Predicates = [HasAtomics] in { @@ -580,17 +633,6 @@ multiclass BinRMWTruncExtPattern< def : BinRMWPatGlobalAddr<i64, sext_bin_rmw_8_64<rmw_8>, inst8_64>; def : BinRMWPatGlobalAddr<i64, sext_bin_rmw_16_64<rmw_16>, inst16_64>; - def : BinRMWPatExternalSym<i32, zext_bin_rmw_8_32<rmw_8>, inst8_32>; - def : BinRMWPatExternalSym<i32, zext_bin_rmw_16_32<rmw_16>, inst16_32>; - def : BinRMWPatExternalSym<i64, zext_bin_rmw_8_64<rmw_8>, inst8_64>; - def : BinRMWPatExternalSym<i64, zext_bin_rmw_16_64<rmw_16>, inst16_64>; - def : BinRMWPatExternalSym<i64, zext_bin_rmw_32_64<rmw_32>, inst32_64>; - - def : BinRMWPatExternalSym<i32, sext_bin_rmw_8_32<rmw_8>, inst8_32>; - def : BinRMWPatExternalSym<i32, sext_bin_rmw_16_32<rmw_16>, inst16_32>; - def : BinRMWPatExternalSym<i64, sext_bin_rmw_8_64<rmw_8>, inst8_64>; - def : BinRMWPatExternalSym<i64, sext_bin_rmw_16_64<rmw_16>, inst16_64>; - // Truncating-extending binary RMWs with just a constant offset def : BinRMWPatOffsetOnly<i32, zext_bin_rmw_8_32<rmw_8>, inst8_32>; def : BinRMWPatOffsetOnly<i32, zext_bin_rmw_16_32<rmw_16>, inst16_32>; @@ -613,17 +655,6 @@ multiclass BinRMWTruncExtPattern< def : BinRMWPatGlobalAddrOffOnly<i32, sext_bin_rmw_16_32<rmw_16>, inst16_32>; def : BinRMWPatGlobalAddrOffOnly<i64, sext_bin_rmw_8_64<rmw_8>, inst8_64>; def : BinRMWPatGlobalAddrOffOnly<i64, sext_bin_rmw_16_64<rmw_16>, inst16_64>; - - def : BinRMWPatExternSymOffOnly<i32, zext_bin_rmw_8_32<rmw_8>, inst8_32>; - def : BinRMWPatExternSymOffOnly<i32, zext_bin_rmw_16_32<rmw_16>, inst16_32>; - def : BinRMWPatExternSymOffOnly<i64, zext_bin_rmw_8_64<rmw_8>, inst8_64>; - def : BinRMWPatExternSymOffOnly<i64, zext_bin_rmw_16_64<rmw_16>, inst16_64>; - def : BinRMWPatExternSymOffOnly<i64, zext_bin_rmw_32_64<rmw_32>, inst32_64>; - - def : BinRMWPatExternSymOffOnly<i32, sext_bin_rmw_8_32<rmw_8>, inst8_32>; - def : BinRMWPatExternSymOffOnly<i32, sext_bin_rmw_16_32<rmw_16>, inst16_32>; - def : BinRMWPatExternSymOffOnly<i64, sext_bin_rmw_8_64<rmw_8>, inst8_64>; - def : BinRMWPatExternSymOffOnly<i64, sext_bin_rmw_16_64<rmw_16>, inst16_64>; } let Predicates = [HasAtomics] in { @@ -663,29 +694,31 @@ defm : BinRMWTruncExtPattern< // Consider adding a pass after instruction selection that optimizes this case // if it is frequent. -multiclass WebAssemblyTerRMW<WebAssemblyRegClass rc, string Name, int Opcode> { - defm "" : I<(outs rc:$dst), - (ins P2Align:$p2align, offset32_op:$off, I32:$addr, rc:$exp, - rc:$new), - (outs), (ins P2Align:$p2align, offset32_op:$off), [], - !strconcat(Name, "\t$dst, ${off}(${addr})${p2align}, $exp, $new"), - !strconcat(Name, "\t${off}, ${p2align}"), Opcode>; +multiclass WebAssemblyTerRMW<WebAssemblyRegClass rc, string name, + int atomic_op> { + defm "" : + ATOMIC_I<(outs rc:$dst), + (ins P2Align:$p2align, offset32_op:$off, I32:$addr, rc:$exp, + rc:$new_), + (outs), (ins P2Align:$p2align, offset32_op:$off), [], + !strconcat(name, "\t$dst, ${off}(${addr})${p2align}, $exp, $new_"), + !strconcat(name, "\t${off}${p2align}"), atomic_op>; } defm ATOMIC_RMW_CMPXCHG_I32 : - WebAssemblyTerRMW<I32, "i32.atomic.rmw.cmpxchg", 0xfe48>; + WebAssemblyTerRMW<I32, "i32.atomic.rmw.cmpxchg", 0x48>; defm ATOMIC_RMW_CMPXCHG_I64 : - WebAssemblyTerRMW<I64, "i64.atomic.rmw.cmpxchg", 0xfe49>; + WebAssemblyTerRMW<I64, "i64.atomic.rmw.cmpxchg", 0x49>; defm ATOMIC_RMW8_U_CMPXCHG_I32 : - WebAssemblyTerRMW<I32, "i32.atomic.rmw8.cmpxchg_u", 0xfe4a>; + WebAssemblyTerRMW<I32, "i32.atomic.rmw8.cmpxchg_u", 0x4a>; defm ATOMIC_RMW16_U_CMPXCHG_I32 : - WebAssemblyTerRMW<I32, "i32.atomic.rmw16.cmpxchg_u", 0xfe4b>; + WebAssemblyTerRMW<I32, "i32.atomic.rmw16.cmpxchg_u", 0x4b>; defm ATOMIC_RMW8_U_CMPXCHG_I64 : - WebAssemblyTerRMW<I64, "i64.atomic.rmw8.cmpxchg_u", 0xfe4c>; + WebAssemblyTerRMW<I64, "i64.atomic.rmw8.cmpxchg_u", 0x4c>; defm ATOMIC_RMW16_U_CMPXCHG_I64 : - WebAssemblyTerRMW<I64, "i64.atomic.rmw16.cmpxchg_u", 0xfe4d>; + WebAssemblyTerRMW<I64, "i64.atomic.rmw16.cmpxchg_u", 0x4d>; defm ATOMIC_RMW32_U_CMPXCHG_I64 : - WebAssemblyTerRMW<I64, "i64.atomic.rmw32.cmpxchg_u", 0xfe4e>; + WebAssemblyTerRMW<I64, "i64.atomic.rmw32.cmpxchg_u", 0x4e>; // Select ternary RMWs with no constant offset. class TerRMWPatNoOffset<ValueType ty, PatFrag kind, NI inst> : @@ -704,11 +737,6 @@ class TerRMWPatGlobalAddr<ValueType ty, PatFrag kind, NI inst> : ty:$exp, ty:$new)), (inst 0, tglobaladdr:$off, I32:$addr, ty:$exp, ty:$new)>; -class TerRMWPatExternalSym<ValueType ty, PatFrag kind, NI inst> : - Pat<(ty (kind (add I32:$addr, (WebAssemblywrapper texternalsym:$off)), - ty:$exp, ty:$new)), - (inst 0, texternalsym:$off, I32:$addr, ty:$exp, ty:$new)>; - // Select ternary RMWs with just a constant offset. class TerRMWPatOffsetOnly<ValueType ty, PatFrag kind, NI inst> : Pat<(ty (kind imm:$off, ty:$exp, ty:$new)), @@ -718,10 +746,6 @@ class TerRMWPatGlobalAddrOffOnly<ValueType ty, PatFrag kind, NI inst> : Pat<(ty (kind (WebAssemblywrapper tglobaladdr:$off), ty:$exp, ty:$new)), (inst 0, tglobaladdr:$off, (CONST_I32 0), ty:$exp, ty:$new)>; -class TerRMWPatExternSymOffOnly<ValueType ty, PatFrag kind, NI inst> : - Pat<(ty (kind (WebAssemblywrapper texternalsym:$off), ty:$exp, ty:$new)), - (inst 0, texternalsym:$off, (CONST_I32 0), ty:$exp, ty:$new)>; - // Patterns for various addressing modes. multiclass TerRMWPattern<PatFrag rmw_32, PatFrag rmw_64, NI inst_32, NI inst_64> { @@ -736,23 +760,16 @@ multiclass TerRMWPattern<PatFrag rmw_32, PatFrag rmw_64, NI inst_32, def : TerRMWPatGlobalAddr<i32, rmw_32, inst_32>; def : TerRMWPatGlobalAddr<i64, rmw_64, inst_64>; - def : TerRMWPatExternalSym<i32, rmw_32, inst_32>; - def : TerRMWPatExternalSym<i64, rmw_64, inst_64>; - def : TerRMWPatOffsetOnly<i32, rmw_32, inst_32>; def : TerRMWPatOffsetOnly<i64, rmw_64, inst_64>; def : TerRMWPatGlobalAddrOffOnly<i32, rmw_32, inst_32>; def : TerRMWPatGlobalAddrOffOnly<i64, rmw_64, inst_64>; - - def : TerRMWPatExternSymOffOnly<i32, rmw_32, inst_32>; - def : TerRMWPatExternSymOffOnly<i64, rmw_64, inst_64>; } -let Predicates = [HasAtomics] in { +let Predicates = [HasAtomics] in defm : TerRMWPattern<atomic_cmp_swap_32, atomic_cmp_swap_64, ATOMIC_RMW_CMPXCHG_I32, ATOMIC_RMW_CMPXCHG_I64>; -} // Predicates = [HasAtomics] // Truncating & zero-extending ternary RMW patterns. // DAG legalization & optimization before instruction selection may introduce @@ -840,17 +857,6 @@ multiclass TerRMWTruncExtPattern< def : TerRMWPatGlobalAddr<i64, sext_ter_rmw_8_64<rmw_8>, inst8_64>; def : TerRMWPatGlobalAddr<i64, sext_ter_rmw_16_64<rmw_16>, inst16_64>; - def : TerRMWPatExternalSym<i32, zext_ter_rmw_8_32<rmw_8>, inst8_32>; - def : TerRMWPatExternalSym<i32, zext_ter_rmw_16_32<rmw_16>, inst16_32>; - def : TerRMWPatExternalSym<i64, zext_ter_rmw_8_64<rmw_8>, inst8_64>; - def : TerRMWPatExternalSym<i64, zext_ter_rmw_16_64<rmw_16>, inst16_64>; - def : TerRMWPatExternalSym<i64, zext_ter_rmw_32_64<rmw_32>, inst32_64>; - - def : TerRMWPatExternalSym<i32, sext_ter_rmw_8_32<rmw_8>, inst8_32>; - def : TerRMWPatExternalSym<i32, sext_ter_rmw_16_32<rmw_16>, inst16_32>; - def : TerRMWPatExternalSym<i64, sext_ter_rmw_8_64<rmw_8>, inst8_64>; - def : TerRMWPatExternalSym<i64, sext_ter_rmw_16_64<rmw_16>, inst16_64>; - // Truncating-extending ternary RMWs with just a constant offset def : TerRMWPatOffsetOnly<i32, zext_ter_rmw_8_32<rmw_8>, inst8_32>; def : TerRMWPatOffsetOnly<i32, zext_ter_rmw_16_32<rmw_16>, inst16_32>; @@ -873,147 +879,21 @@ multiclass TerRMWTruncExtPattern< def : TerRMWPatGlobalAddrOffOnly<i32, sext_ter_rmw_16_32<rmw_16>, inst16_32>; def : TerRMWPatGlobalAddrOffOnly<i64, sext_ter_rmw_8_64<rmw_8>, inst8_64>; def : TerRMWPatGlobalAddrOffOnly<i64, sext_ter_rmw_16_64<rmw_16>, inst16_64>; - - def : TerRMWPatExternSymOffOnly<i32, zext_ter_rmw_8_32<rmw_8>, inst8_32>; - def : TerRMWPatExternSymOffOnly<i32, zext_ter_rmw_16_32<rmw_16>, inst16_32>; - def : TerRMWPatExternSymOffOnly<i64, zext_ter_rmw_8_64<rmw_8>, inst8_64>; - def : TerRMWPatExternSymOffOnly<i64, zext_ter_rmw_16_64<rmw_16>, inst16_64>; - def : TerRMWPatExternSymOffOnly<i64, zext_ter_rmw_32_64<rmw_32>, inst32_64>; - - def : TerRMWPatExternSymOffOnly<i32, sext_ter_rmw_8_32<rmw_8>, inst8_32>; - def : TerRMWPatExternSymOffOnly<i32, sext_ter_rmw_16_32<rmw_16>, inst16_32>; - def : TerRMWPatExternSymOffOnly<i64, sext_ter_rmw_8_64<rmw_8>, inst8_64>; - def : TerRMWPatExternSymOffOnly<i64, sext_ter_rmw_16_64<rmw_16>, inst16_64>; } -let Predicates = [HasAtomics] in { +let Predicates = [HasAtomics] in defm : TerRMWTruncExtPattern< atomic_cmp_swap_8, atomic_cmp_swap_16, atomic_cmp_swap_32, atomic_cmp_swap_64, ATOMIC_RMW8_U_CMPXCHG_I32, ATOMIC_RMW16_U_CMPXCHG_I32, ATOMIC_RMW8_U_CMPXCHG_I64, ATOMIC_RMW16_U_CMPXCHG_I64, ATOMIC_RMW32_U_CMPXCHG_I64>; -} //===----------------------------------------------------------------------===// -// Atomic wait / notify +// Atomic fences //===----------------------------------------------------------------------===// -let hasSideEffects = 1 in { -defm ATOMIC_NOTIFY : - I<(outs I32:$dst), - (ins P2Align:$p2align, offset32_op:$off, I32:$addr, I32:$count), - (outs), (ins P2Align:$p2align, offset32_op:$off), [], - "atomic.notify \t$dst, ${off}(${addr})${p2align}, $count", - "atomic.notify \t${off}, ${p2align}", 0xfe00>; -let mayLoad = 1 in { -defm ATOMIC_WAIT_I32 : - I<(outs I32:$dst), - (ins P2Align:$p2align, offset32_op:$off, I32:$addr, I32:$exp, I64:$timeout), - (outs), (ins P2Align:$p2align, offset32_op:$off), [], - "i32.atomic.wait \t$dst, ${off}(${addr})${p2align}, $exp, $timeout", - "i32.atomic.wait \t${off}, ${p2align}", 0xfe01>; -defm ATOMIC_WAIT_I64 : - I<(outs I32:$dst), - (ins P2Align:$p2align, offset32_op:$off, I32:$addr, I64:$exp, I64:$timeout), - (outs), (ins P2Align:$p2align, offset32_op:$off), [], - "i64.atomic.wait \t$dst, ${off}(${addr})${p2align}, $exp, $timeout", - "i64.atomic.wait \t${off}, ${p2align}", 0xfe02>; -} // mayLoad = 1 -} // hasSideEffects = 1 - -let Predicates = [HasAtomics] in { -// Select notifys with no constant offset. -class NotifyPatNoOffset<Intrinsic kind> : - Pat<(i32 (kind I32:$addr, I32:$count)), - (ATOMIC_NOTIFY 0, 0, I32:$addr, I32:$count)>; -def : NotifyPatNoOffset<int_wasm_atomic_notify>; - -// Select notifys with a constant offset. - -// Pattern with address + immediate offset -class NotifyPatImmOff<Intrinsic kind, PatFrag operand> : - Pat<(i32 (kind (operand I32:$addr, imm:$off), I32:$count)), - (ATOMIC_NOTIFY 0, imm:$off, I32:$addr, I32:$count)>; -def : NotifyPatImmOff<int_wasm_atomic_notify, regPlusImm>; -def : NotifyPatImmOff<int_wasm_atomic_notify, or_is_add>; - -class NotifyPatGlobalAddr<Intrinsic kind> : - Pat<(i32 (kind (regPlusGA I32:$addr, (WebAssemblywrapper tglobaladdr:$off)), - I32:$count)), - (ATOMIC_NOTIFY 0, tglobaladdr:$off, I32:$addr, I32:$count)>; -def : NotifyPatGlobalAddr<int_wasm_atomic_notify>; - -class NotifyPatExternalSym<Intrinsic kind> : - Pat<(i32 (kind (add I32:$addr, (WebAssemblywrapper texternalsym:$off)), - I32:$count)), - (ATOMIC_NOTIFY 0, texternalsym:$off, I32:$addr, I32:$count)>; -def : NotifyPatExternalSym<int_wasm_atomic_notify>; - -// Select notifys with just a constant offset. -class NotifyPatOffsetOnly<Intrinsic kind> : - Pat<(i32 (kind imm:$off, I32:$count)), - (ATOMIC_NOTIFY 0, imm:$off, (CONST_I32 0), I32:$count)>; -def : NotifyPatOffsetOnly<int_wasm_atomic_notify>; - -class NotifyPatGlobalAddrOffOnly<Intrinsic kind> : - Pat<(i32 (kind (WebAssemblywrapper tglobaladdr:$off), I32:$count)), - (ATOMIC_NOTIFY 0, tglobaladdr:$off, (CONST_I32 0), I32:$count)>; -def : NotifyPatGlobalAddrOffOnly<int_wasm_atomic_notify>; - -class NotifyPatExternSymOffOnly<Intrinsic kind> : - Pat<(i32 (kind (WebAssemblywrapper texternalsym:$off), I32:$count)), - (ATOMIC_NOTIFY 0, texternalsym:$off, (CONST_I32 0), I32:$count)>; -def : NotifyPatExternSymOffOnly<int_wasm_atomic_notify>; - -// Select waits with no constant offset. -class WaitPatNoOffset<ValueType ty, Intrinsic kind, NI inst> : - Pat<(i32 (kind I32:$addr, ty:$exp, I64:$timeout)), - (inst 0, 0, I32:$addr, ty:$exp, I64:$timeout)>; -def : WaitPatNoOffset<i32, int_wasm_atomic_wait_i32, ATOMIC_WAIT_I32>; -def : WaitPatNoOffset<i64, int_wasm_atomic_wait_i64, ATOMIC_WAIT_I64>; - -// Select waits with a constant offset. - -// Pattern with address + immediate offset -class WaitPatImmOff<ValueType ty, Intrinsic kind, PatFrag operand, NI inst> : - Pat<(i32 (kind (operand I32:$addr, imm:$off), ty:$exp, I64:$timeout)), - (inst 0, imm:$off, I32:$addr, ty:$exp, I64:$timeout)>; -def : WaitPatImmOff<i32, int_wasm_atomic_wait_i32, regPlusImm, ATOMIC_WAIT_I32>; -def : WaitPatImmOff<i32, int_wasm_atomic_wait_i32, or_is_add, ATOMIC_WAIT_I32>; -def : WaitPatImmOff<i64, int_wasm_atomic_wait_i64, regPlusImm, ATOMIC_WAIT_I64>; -def : WaitPatImmOff<i64, int_wasm_atomic_wait_i64, or_is_add, ATOMIC_WAIT_I64>; - -class WaitPatGlobalAddr<ValueType ty, Intrinsic kind, NI inst> : - Pat<(i32 (kind (regPlusGA I32:$addr, (WebAssemblywrapper tglobaladdr:$off)), - ty:$exp, I64:$timeout)), - (inst 0, tglobaladdr:$off, I32:$addr, ty:$exp, I64:$timeout)>; -def : WaitPatGlobalAddr<i32, int_wasm_atomic_wait_i32, ATOMIC_WAIT_I32>; -def : WaitPatGlobalAddr<i64, int_wasm_atomic_wait_i64, ATOMIC_WAIT_I64>; - -class WaitPatExternalSym<ValueType ty, Intrinsic kind, NI inst> : - Pat<(i32 (kind (add I32:$addr, (WebAssemblywrapper texternalsym:$off)), - ty:$exp, I64:$timeout)), - (inst 0, texternalsym:$off, I32:$addr, ty:$exp, I64:$timeout)>; -def : WaitPatExternalSym<i32, int_wasm_atomic_wait_i32, ATOMIC_WAIT_I32>; -def : WaitPatExternalSym<i64, int_wasm_atomic_wait_i64, ATOMIC_WAIT_I64>; - -// Select wait_i32, ATOMIC_WAIT_I32s with just a constant offset. -class WaitPatOffsetOnly<ValueType ty, Intrinsic kind, NI inst> : - Pat<(i32 (kind imm:$off, ty:$exp, I64:$timeout)), - (inst 0, imm:$off, (CONST_I32 0), ty:$exp, I64:$timeout)>; -def : WaitPatOffsetOnly<i32, int_wasm_atomic_wait_i32, ATOMIC_WAIT_I32>; -def : WaitPatOffsetOnly<i64, int_wasm_atomic_wait_i64, ATOMIC_WAIT_I64>; - -class WaitPatGlobalAddrOffOnly<ValueType ty, Intrinsic kind, NI inst> : - Pat<(i32 (kind (WebAssemblywrapper tglobaladdr:$off), ty:$exp, I64:$timeout)), - (inst 0, tglobaladdr:$off, (CONST_I32 0), ty:$exp, I64:$timeout)>; -def : WaitPatGlobalAddrOffOnly<i32, int_wasm_atomic_wait_i32, ATOMIC_WAIT_I32>; -def : WaitPatGlobalAddrOffOnly<i64, int_wasm_atomic_wait_i64, ATOMIC_WAIT_I64>; - -class WaitPatExternSymOffOnly<ValueType ty, Intrinsic kind, NI inst> : - Pat<(i32 (kind (WebAssemblywrapper texternalsym:$off), ty:$exp, - I64:$timeout)), - (inst 0, texternalsym:$off, (CONST_I32 0), ty:$exp, I64:$timeout)>; -def : WaitPatExternSymOffOnly<i32, int_wasm_atomic_wait_i32, ATOMIC_WAIT_I32>; -def : WaitPatExternSymOffOnly<i64, int_wasm_atomic_wait_i64, ATOMIC_WAIT_I64>; -} // Predicates = [HasAtomics] +// A compiler fence instruction that prevents reordering of instructions. +let Defs = [ARGUMENTS] in { +let isPseudo = 1, hasSideEffects = 1 in +defm COMPILER_FENCE : ATOMIC_NRI<(outs), (ins), [], "compiler_fence">; +} // Defs = [ARGUMENTS] diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrBulkMemory.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrBulkMemory.td new file mode 100644 index 000000000000..f4352e3d12ec --- /dev/null +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrBulkMemory.td @@ -0,0 +1,71 @@ +// WebAssemblyInstrBulkMemory.td - bulk memory codegen support --*- tablegen -*- +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// WebAssembly bulk memory codegen constructs. +/// +//===----------------------------------------------------------------------===// + +// Instruction requiring HasBulkMemory and the bulk memory prefix byte +multiclass BULK_I<dag oops_r, dag iops_r, dag oops_s, dag iops_s, + list<dag> pattern_r, string asmstr_r = "", + string asmstr_s = "", bits<32> simdop = -1> { + defm "" : I<oops_r, iops_r, oops_s, iops_s, pattern_r, asmstr_r, asmstr_s, + !or(0xfc00, !and(0xff, simdop))>, + Requires<[HasBulkMemory]>; +} + +// Bespoke types and nodes for bulk memory ops +def wasm_memcpy_t : SDTypeProfile<0, 5, + [SDTCisInt<0>, SDTCisInt<1>, SDTCisPtrTy<2>, SDTCisPtrTy<3>, SDTCisInt<4>] +>; +def wasm_memcpy : SDNode<"WebAssemblyISD::MEMORY_COPY", wasm_memcpy_t, + [SDNPHasChain, SDNPMayLoad, SDNPMayStore]>; + +def wasm_memset_t : SDTypeProfile<0, 4, + [SDTCisInt<0>, SDTCisPtrTy<1>, SDTCisInt<2>, SDTCisInt<3>] +>; +def wasm_memset : SDNode<"WebAssemblyISD::MEMORY_FILL", wasm_memset_t, + [SDNPHasChain, SDNPMayStore]>; + +let mayStore = 1, hasSideEffects = 1 in +defm MEMORY_INIT : + BULK_I<(outs), + (ins i32imm_op:$seg, i32imm_op:$idx, I32:$dest, + I32:$offset, I32:$size), + (outs), (ins i32imm_op:$seg, i32imm_op:$idx), + [(int_wasm_memory_init (i32 imm:$seg), (i32 imm:$idx), I32:$dest, + I32:$offset, I32:$size + )], + "memory.init\t$seg, $idx, $dest, $offset, $size", + "memory.init\t$seg, $idx", 0x08>; + +let hasSideEffects = 1 in +defm DATA_DROP : + BULK_I<(outs), (ins i32imm_op:$seg), (outs), (ins i32imm_op:$seg), + [(int_wasm_data_drop (i32 imm:$seg))], + "data.drop\t$seg", "data.drop\t$seg", 0x09>; + +let mayLoad = 1, mayStore = 1 in +defm MEMORY_COPY : + BULK_I<(outs), (ins i32imm_op:$src_idx, i32imm_op:$dst_idx, + I32:$dst, I32:$src, I32:$len), + (outs), (ins i32imm_op:$src_idx, i32imm_op:$dst_idx), + [(wasm_memcpy (i32 imm:$src_idx), (i32 imm:$dst_idx), + I32:$dst, I32:$src, I32:$len + )], + "memory.copy\t$src_idx, $dst_idx, $dst, $src, $len", + "memory.copy\t$src_idx, $dst_idx", 0x0a>; + +let mayStore = 1 in +defm MEMORY_FILL : + BULK_I<(outs), (ins i32imm_op:$idx, I32:$dst, I32:$value, I32:$size), + (outs), (ins i32imm_op:$idx), + [(wasm_memset (i32 imm:$idx), I32:$dst, I32:$value, I32:$size)], + "memory.fill\t$idx, $dst, $value, $size", + "memory.fill\t$idx", 0x0b>; diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrCall.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrCall.td index 07839b790114..703c15d58c93 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrCall.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrCall.td @@ -1,9 +1,8 @@ //===- WebAssemblyInstrCall.td-WebAssembly Call codegen support -*- tablegen -*- // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -22,109 +21,112 @@ defm ADJCALLSTACKDOWN : NRI<(outs), (ins i32imm:$amt, i32imm:$amt2), [(WebAssemblycallseq_start timm:$amt, timm:$amt2)]>; defm ADJCALLSTACKUP : NRI<(outs), (ins i32imm:$amt, i32imm:$amt2), [(WebAssemblycallseq_end timm:$amt, timm:$amt2)]>; -} // isCodeGenOnly = 1 +} // Uses = [SP32, SP64], Defs = [SP32, SP64], isCodeGenOnly = 1 -multiclass CALL<WebAssemblyRegClass vt, string prefix> { - defm CALL_#vt : I<(outs vt:$dst), (ins function32_op:$callee, variable_ops), - (outs), (ins function32_op:$callee), - [(set vt:$dst, (WebAssemblycall1 (i32 imm:$callee)))], - !strconcat(prefix, "call\t$dst, $callee"), - !strconcat(prefix, "call\t$callee"), - 0x10>; +multiclass CALL<ValueType vt, WebAssemblyRegClass rt, string prefix, + list<Predicate> preds = []> { + defm CALL_#vt : + I<(outs rt:$dst), (ins function32_op:$callee, variable_ops), + (outs), (ins function32_op:$callee), + [(set (vt rt:$dst), (WebAssemblycall1 (i32 imm:$callee)))], + !strconcat(prefix, "call\t$dst, $callee"), + !strconcat(prefix, "call\t$callee"), + 0x10>, + Requires<preds>; - let isCodeGenOnly = 1 in { - defm PCALL_INDIRECT_#vt : I<(outs vt:$dst), (ins I32:$callee, variable_ops), - (outs), (ins I32:$callee), - [(set vt:$dst, (WebAssemblycall1 I32:$callee))], - "PSEUDO CALL INDIRECT\t$callee", - "PSEUDO CALL INDIRECT\t$callee">; - } // isCodeGenOnly = 1 + let isCodeGenOnly = 1 in + defm PCALL_INDIRECT_#vt : + I<(outs rt:$dst), (ins I32:$callee, variable_ops), + (outs), (ins I32:$callee), + [(set (vt rt:$dst), (WebAssemblycall1 I32:$callee))], + "PSEUDO CALL INDIRECT\t$callee", + "PSEUDO CALL INDIRECT\t$callee">, + Requires<preds>; - defm CALL_INDIRECT_#vt : I<(outs vt:$dst), - (ins TypeIndex:$type, i32imm:$flags, variable_ops), - (outs), (ins TypeIndex:$type, i32imm:$flags), - [], - !strconcat(prefix, "call_indirect\t$dst"), - !strconcat(prefix, "call_indirect\t$type"), - 0x11>; + defm CALL_INDIRECT_#vt : + I<(outs rt:$dst), + (ins TypeIndex:$type, i32imm:$flags, variable_ops), + (outs), (ins TypeIndex:$type, i32imm:$flags), + [], + !strconcat(prefix, "call_indirect\t$dst"), + !strconcat(prefix, "call_indirect\t$type"), + 0x11>, + Requires<preds>; } -multiclass SIMD_CALL<ValueType vt, string prefix> { +let Uses = [SP32, SP64], isCall = 1 in { +defm "" : CALL<i32, I32, "i32.">; +defm "" : CALL<i64, I64, "i64.">; +defm "" : CALL<f32, F32, "f32.">; +defm "" : CALL<f64, F64, "f64.">; +defm "" : CALL<exnref, EXNREF, "exnref.", [HasExceptionHandling]>; +defm "" : CALL<v16i8, V128, "v128.", [HasSIMD128]>; +defm "" : CALL<v8i16, V128, "v128.", [HasSIMD128]>; +defm "" : CALL<v4i32, V128, "v128.", [HasSIMD128]>; +defm "" : CALL<v2i64, V128, "v128.", [HasSIMD128]>; +defm "" : CALL<v4f32, V128, "v128.", [HasSIMD128]>; +defm "" : CALL<v2f64, V128, "v128.", [HasSIMD128]>; - defm CALL_#vt : I<(outs V128:$dst), (ins function32_op:$callee, variable_ops), - (outs), (ins function32_op:$callee), - [(set (vt V128:$dst), - (WebAssemblycall1 (i32 imm:$callee)))], - !strconcat(prefix, "call\t$dst, $callee"), - !strconcat(prefix, "call\t$callee"), - 0x10>, - Requires<[HasSIMD128]>; +let IsCanonical = 1 in { +defm CALL_VOID : + I<(outs), (ins function32_op:$callee, variable_ops), + (outs), (ins function32_op:$callee), + [(WebAssemblycall0 (i32 imm:$callee))], + "call \t$callee", "call\t$callee", 0x10>; - let isCodeGenOnly = 1 in { - defm PCALL_INDIRECT_#vt : I<(outs V128:$dst), - (ins I32:$callee, variable_ops), - (outs), (ins I32:$callee), - [(set (vt V128:$dst), - (WebAssemblycall1 I32:$callee))], - "PSEUDO CALL INDIRECT\t$callee", - "PSEUDO CALL INDIRECT\t$callee">, - Requires<[HasSIMD128]>; - } // isCodeGenOnly = 1 +let isReturn = 1 in +defm RET_CALL : + I<(outs), (ins function32_op:$callee, variable_ops), + (outs), (ins function32_op:$callee), + [(WebAssemblyretcall (i32 imm:$callee))], + "return_call \t$callee", "return_call\t$callee", 0x12>, + Requires<[HasTailCall]>; - defm CALL_INDIRECT_#vt : I<(outs V128:$dst), - (ins TypeIndex:$type, i32imm:$flags, variable_ops), - (outs), (ins TypeIndex:$type, i32imm:$flags), - [], - !strconcat(prefix, "call_indirect\t$dst"), - !strconcat(prefix, "call_indirect\t$type"), - 0x11>, - Requires<[HasSIMD128]>; -} +let isCodeGenOnly = 1 in +defm PCALL_INDIRECT_VOID : + I<(outs), (ins I32:$callee, variable_ops), + (outs), (ins I32:$callee), + [(WebAssemblycall0 I32:$callee)], + "PSEUDO CALL INDIRECT\t$callee", + "PSEUDO CALL INDIRECT\t$callee">; -let Uses = [SP32, SP64], isCall = 1 in { - defm "" : CALL<I32, "i32.">; - defm "" : CALL<I64, "i64.">; - defm "" : CALL<F32, "f32.">; - defm "" : CALL<F64, "f64.">; - defm "" : CALL<EXCEPT_REF, "except_ref.">; - defm "" : SIMD_CALL<v16i8, "v128.">; - defm "" : SIMD_CALL<v8i16, "v128.">; - defm "" : SIMD_CALL<v4i32, "v128.">; - defm "" : SIMD_CALL<v2i64, "v128.">; - defm "" : SIMD_CALL<v4f32, "v128.">; - defm "" : SIMD_CALL<v2f64, "v128.">; +defm CALL_INDIRECT_VOID : + I<(outs), (ins TypeIndex:$type, i32imm:$flags, variable_ops), + (outs), (ins TypeIndex:$type, i32imm:$flags), + [], + "call_indirect\t", "call_indirect\t$type", + 0x11>; - defm CALL_VOID : I<(outs), (ins function32_op:$callee, variable_ops), - (outs), (ins function32_op:$callee), - [(WebAssemblycall0 (i32 imm:$callee))], - "call \t$callee", "call\t$callee", 0x10>; +let isReturn = 1 in +defm RET_CALL_INDIRECT : + I<(outs), (ins TypeIndex:$type, i32imm:$flags, variable_ops), + (outs), (ins TypeIndex:$type, i32imm:$flags), + [], + "return_call_indirect\t", "return_call_indirect\t$type", + 0x13>, + Requires<[HasTailCall]>; - let isCodeGenOnly = 1 in { - defm PCALL_INDIRECT_VOID : I<(outs), (ins I32:$callee, variable_ops), - (outs), (ins I32:$callee), - [(WebAssemblycall0 I32:$callee)], - "PSEUDO CALL INDIRECT\t$callee", - "PSEUDO CALL INDIRECT\t$callee">; - } // isCodeGenOnly = 1 +let isCodeGenOnly = 1, isReturn = 1 in +defm PRET_CALL_INDIRECT: + I<(outs), (ins I32:$callee, variable_ops), + (outs), (ins I32:$callee), + [(WebAssemblyretcall I32:$callee)], + "PSEUDO RET_CALL INDIRECT\t$callee", + "PSEUDO RET_CALL INDIRECT\t$callee">, + Requires<[HasTailCall]>; - defm CALL_INDIRECT_VOID : I<(outs), - (ins TypeIndex:$type, i32imm:$flags, - variable_ops), - (outs), (ins TypeIndex:$type, i32imm:$flags), - [], - "call_indirect\t", "call_indirect\t$type", - 0x11>; +} // IsCanonical = 1 } // Uses = [SP32,SP64], isCall = 1 // Patterns for matching a direct call to a global address. def : Pat<(i32 (WebAssemblycall1 (WebAssemblywrapper tglobaladdr:$callee))), - (CALL_I32 tglobaladdr:$callee)>; + (CALL_i32 tglobaladdr:$callee)>; def : Pat<(i64 (WebAssemblycall1 (WebAssemblywrapper tglobaladdr:$callee))), - (CALL_I64 tglobaladdr:$callee)>; + (CALL_i64 tglobaladdr:$callee)>; def : Pat<(f32 (WebAssemblycall1 (WebAssemblywrapper tglobaladdr:$callee))), - (CALL_F32 tglobaladdr:$callee)>; + (CALL_f32 tglobaladdr:$callee)>; def : Pat<(f64 (WebAssemblycall1 (WebAssemblywrapper tglobaladdr:$callee))), - (CALL_F64 tglobaladdr:$callee)>; + (CALL_f64 tglobaladdr:$callee)>; def : Pat<(v16i8 (WebAssemblycall1 (WebAssemblywrapper tglobaladdr:$callee))), (CALL_v16i8 tglobaladdr:$callee)>, Requires<[HasSIMD128]>; def : Pat<(v8i16 (WebAssemblycall1 (WebAssemblywrapper tglobaladdr:$callee))), @@ -137,21 +139,23 @@ def : Pat<(v4f32 (WebAssemblycall1 (WebAssemblywrapper tglobaladdr:$callee))), (CALL_v4f32 tglobaladdr:$callee)>, Requires<[HasSIMD128]>; def : Pat<(v2f64 (WebAssemblycall1 (WebAssemblywrapper tglobaladdr:$callee))), (CALL_v2f64 tglobaladdr:$callee)>, Requires<[HasSIMD128]>; -def : Pat<(ExceptRef - (WebAssemblycall1 (WebAssemblywrapper tglobaladdr:$callee))), - (CALL_EXCEPT_REF tglobaladdr:$callee)>; +def : Pat<(exnref (WebAssemblycall1 (WebAssemblywrapper tglobaladdr:$callee))), + (CALL_exnref tglobaladdr:$callee)>, + Requires<[HasExceptionHandling]>; def : Pat<(WebAssemblycall0 (WebAssemblywrapper tglobaladdr:$callee)), (CALL_VOID tglobaladdr:$callee)>; +def : Pat<(WebAssemblyretcall (WebAssemblywrapper tglobaladdr:$callee)), + (RET_CALL tglobaladdr:$callee)>, Requires<[HasTailCall]>; // Patterns for matching a direct call to an external symbol. def : Pat<(i32 (WebAssemblycall1 (WebAssemblywrapper texternalsym:$callee))), - (CALL_I32 texternalsym:$callee)>; + (CALL_i32 texternalsym:$callee)>; def : Pat<(i64 (WebAssemblycall1 (WebAssemblywrapper texternalsym:$callee))), - (CALL_I64 texternalsym:$callee)>; + (CALL_i64 texternalsym:$callee)>; def : Pat<(f32 (WebAssemblycall1 (WebAssemblywrapper texternalsym:$callee))), - (CALL_F32 texternalsym:$callee)>; + (CALL_f32 texternalsym:$callee)>; def : Pat<(f64 (WebAssemblycall1 (WebAssemblywrapper texternalsym:$callee))), - (CALL_F64 texternalsym:$callee)>; + (CALL_f64 texternalsym:$callee)>; def : Pat<(v16i8 (WebAssemblycall1 (WebAssemblywrapper texternalsym:$callee))), (CALL_v16i8 texternalsym:$callee)>, Requires<[HasSIMD128]>; def : Pat<(v8i16 (WebAssemblycall1 (WebAssemblywrapper texternalsym:$callee))), @@ -164,8 +168,10 @@ def : Pat<(v4f32 (WebAssemblycall1 (WebAssemblywrapper texternalsym:$callee))), (CALL_v4f32 texternalsym:$callee)>, Requires<[HasSIMD128]>; def : Pat<(v2f64 (WebAssemblycall1 (WebAssemblywrapper texternalsym:$callee))), (CALL_v2f64 texternalsym:$callee)>, Requires<[HasSIMD128]>; -def : Pat<(ExceptRef - (WebAssemblycall1 (WebAssemblywrapper texternalsym:$callee))), - (CALL_EXCEPT_REF texternalsym:$callee)>; +def : Pat<(exnref (WebAssemblycall1 (WebAssemblywrapper texternalsym:$callee))), + (CALL_exnref texternalsym:$callee)>, + Requires<[HasExceptionHandling]>; def : Pat<(WebAssemblycall0 (WebAssemblywrapper texternalsym:$callee)), (CALL_VOID texternalsym:$callee)>; +def : Pat<(WebAssemblyretcall (WebAssemblywrapper texternalsym:$callee)), + (RET_CALL texternalsym:$callee)>, Requires<[HasTailCall]>; diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrControl.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrControl.td index 7eb6cbf4d249..1870c5bc34b0 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrControl.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrControl.td @@ -1,9 +1,8 @@ //===- WebAssemblyInstrControl.td-WebAssembly control-flow ------*- tablegen -*- // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -21,11 +20,10 @@ defm BR_IF : I<(outs), (ins bb_op:$dst, I32:$cond), let isCodeGenOnly = 1 in defm BR_UNLESS : I<(outs), (ins bb_op:$dst, I32:$cond), (outs), (ins bb_op:$dst), []>; -let isBarrier = 1 in { +let isBarrier = 1 in defm BR : NRI<(outs), (ins bb_op:$dst), [(br bb:$dst)], "br \t$dst", 0x0c>; -} // isBarrier = 1 } // isBranch = 1, isTerminator = 1, hasCtrlDep = 1 def : Pat<(brcond (i32 (setne I32:$cond, 0)), bb:$dst), @@ -36,14 +34,11 @@ def : Pat<(brcond (i32 (seteq I32:$cond, 0)), bb:$dst), // A list of branch targets enclosed in {} and separated by comma. // Used by br_table only. def BrListAsmOperand : AsmOperandClass { let Name = "BrList"; } -let OperandNamespace = "WebAssembly" in { -let OperandType = "OPERAND_BRLIST" in { +let OperandNamespace = "WebAssembly", OperandType = "OPERAND_BRLIST" in def brlist : Operand<i32> { let ParserMatchClass = BrListAsmOperand; let PrintMethod = "printBrList"; } -} // OPERAND_BRLIST -} // OperandNamespace = "WebAssembly" // TODO: SelectionDAG's lowering insists on using a pointer as the index for // jump tables, so in practice we don't ever use BR_TABLE_I64 in wasm32 mode @@ -82,6 +77,9 @@ defm ELSE : NRI<(outs), (ins), [], "else", 0x05>; defm END_BLOCK : NRI<(outs), (ins), [], "end_block", 0x0b>; defm END_LOOP : NRI<(outs), (ins), [], "end_loop", 0x0b>; defm END_IF : NRI<(outs), (ins), [], "end_if", 0x0b>; +// Generic instruction, for disassembler. +let IsCanonical = 1 in +defm END : NRI<(outs), (ins), [], "end", 0x0b>; let isTerminator = 1, isBarrier = 1 in defm END_FUNCTION : NRI<(outs), (ins), [], "end_function", 0x0b>; } // Uses = [VALUE_STACK], Defs = [VALUE_STACK] @@ -106,7 +104,7 @@ multiclass SIMD_RETURN<ValueType vt> { let isCodeGenOnly = 1 in defm FALLTHROUGH_RETURN_#vt : I<(outs), (ins V128:$val), (outs), (ins), []>, - Requires<[HasSIMD128]>; + Requires<[HasSIMD128]>; } let isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 in { @@ -116,7 +114,7 @@ let isReturn = 1 in { defm "": RETURN<I64>; defm "": RETURN<F32>; defm "": RETURN<F64>; - defm "": RETURN<EXCEPT_REF>; + defm "": RETURN<EXNREF>; defm "": SIMD_RETURN<v16i8>; defm "": SIMD_RETURN<v8i16>; defm "": SIMD_RETURN<v4i32>; @@ -142,23 +140,17 @@ let Predicates = [HasExceptionHandling] in { // Throwing an exception: throw / rethrow let isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 in { -defm THROW_I32 : I<(outs), (ins event_op:$tag, I32:$val), - (outs), (ins event_op:$tag), - [(WebAssemblythrow (WebAssemblywrapper texternalsym:$tag), - I32:$val)], - "throw \t$tag, $val", "throw \t$tag", - 0x08>; -defm THROW_I64 : I<(outs), (ins event_op:$tag, I64:$val), - (outs), (ins event_op:$tag), - [(WebAssemblythrow (WebAssemblywrapper texternalsym:$tag), - I64:$val)], - "throw \t$tag, $val", "throw \t$tag", - 0x08>; -defm RETHROW : NRI<(outs), (ins bb_op:$dst), [], "rethrow \t$dst", 0x09>; -let isCodeGenOnly = 1 in -// This is used when the destination for rethrow is the caller function. This -// will be converted to a rethrow in CFGStackify. -defm RETHROW_TO_CALLER : NRI<(outs), (ins), [], "rethrow">; +defm THROW : I<(outs), (ins event_op:$tag, variable_ops), + (outs), (ins event_op:$tag), + [(WebAssemblythrow (WebAssemblywrapper texternalsym:$tag))], + "throw \t$tag", "throw \t$tag", 0x08>; +defm RETHROW : I<(outs), (ins EXNREF:$exn), (outs), (ins), [], + "rethrow \t$exn", "rethrow", 0x09>; +// Pseudo instruction to be the lowering target of int_wasm_rethrow_in_catch +// intrinsic. Will be converted to the real rethrow instruction later. +let isPseudo = 1 in +defm RETHROW_IN_CATCH : NRI<(outs), (ins), [(int_wasm_rethrow_in_catch)], + "rethrow_in_catch", 0>; } // isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 // Region within which an exception is caught: try / end_try @@ -167,24 +159,33 @@ defm TRY : NRI<(outs), (ins Signature:$sig), [], "try \t$sig", 0x06>; defm END_TRY : NRI<(outs), (ins), [], "end_try", 0x0b>; } // Uses = [VALUE_STACK], Defs = [VALUE_STACK] -// Catching an exception: catch / catch_all -let hasCtrlDep = 1, hasSideEffects = 1 in { -defm CATCH_I32 : I<(outs I32:$dst), (ins i32imm:$tag), - (outs), (ins i32imm:$tag), - [(set I32:$dst, (int_wasm_catch imm:$tag))], - "i32.catch \t$dst, $tag", "i32.catch \t$tag", 0x07>; -defm CATCH_I64 : I<(outs I64:$dst), (ins i32imm:$tag), - (outs), (ins i32imm:$tag), - [(set I64:$dst, (int_wasm_catch imm:$tag))], - "i64.catch \t$dst, $tag", "i64.catch \t$tag", 0x07>; -defm CATCH_ALL : NRI<(outs), (ins), [], "catch_all", 0x05>; -} +// Catching an exception: catch / extract_exception +let hasCtrlDep = 1, hasSideEffects = 1 in +defm CATCH : I<(outs EXNREF:$dst), (ins), (outs), (ins), [], + "catch \t$dst", "catch", 0x07>; + +// Querying / extracing exception: br_on_exn +// br_on_exn queries an exnref to see if it matches the corresponding exception +// tag index. If true it branches to the given label and pushes the +// corresponding argument values of the exception onto the stack. +let isBranch = 1, isTerminator = 1, hasCtrlDep = 1 in +defm BR_ON_EXN : I<(outs), (ins bb_op:$dst, event_op:$tag, EXNREF:$exn), + (outs), (ins bb_op:$dst, event_op:$tag), [], + "br_on_exn \t$dst, $tag, $exn", "br_on_exn \t$dst, $tag", + 0x0a>; +// This is a pseudo instruction that simulates popping a value from stack, which +// has been pushed by br_on_exn +let isCodeGenOnly = 1, hasSideEffects = 1 in +defm EXTRACT_EXCEPTION_I32 : NRI<(outs I32:$dst), (ins), + [(set I32:$dst, (int_wasm_extract_exception))], + "extract_exception\t$dst">; // Pseudo instructions: cleanupret / catchret let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1, - isCodeGenOnly = 1, isEHScopeReturn = 1 in { - defm CLEANUPRET : NRI<(outs), (ins), [(cleanupret)], "", 0>; + isPseudo = 1, isEHScopeReturn = 1 in { + defm CLEANUPRET : NRI<(outs), (ins), [(cleanupret)], "cleanupret", 0>; defm CATCHRET : NRI<(outs), (ins bb_op:$dst, bb_op:$from), - [(catchret bb:$dst, bb:$from)], "", 0>; -} -} + [(catchret bb:$dst, bb:$from)], "catchret", 0>; +} // isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1, + // isPseudo = 1, isEHScopeReturn = 1 +} // Predicates = [HasExceptionHandling] diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrConv.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrConv.td index e128656a142c..661fee2715ba 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrConv.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrConv.td @@ -1,9 +1,8 @@ //===-- WebAssemblyInstrConv.td-WebAssembly Conversion support -*- tablegen -*-= // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrExceptRef.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrExceptRef.td deleted file mode 100644 index a251d60b89ee..000000000000 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrExceptRef.td +++ /dev/null @@ -1,27 +0,0 @@ -// WebAssemblyInstrExceptRef.td-WebAssembly except_ref codegen --*- tablegen -*- -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -/// -/// \file -/// WebAssembly except_ref operand code-gen constructs. -/// -//===----------------------------------------------------------------------===// - -defm SELECT_EXCEPT_REF : I<(outs EXCEPT_REF:$dst), - (ins EXCEPT_REF:$lhs, EXCEPT_REF:$rhs, I32:$cond), - (outs), (ins), - [(set EXCEPT_REF:$dst, - (select I32:$cond, EXCEPT_REF:$lhs, - EXCEPT_REF:$rhs))], - "except_ref.select\t$dst, $lhs, $rhs, $cond", - "except_ref.select", 0x1b>; - -def : Pat<(select (i32 (setne I32:$cond, 0)), EXCEPT_REF:$lhs, EXCEPT_REF:$rhs), - (SELECT_EXCEPT_REF EXCEPT_REF:$lhs, EXCEPT_REF:$rhs, I32:$cond)>; -def : Pat<(select (i32 (seteq I32:$cond, 0)), EXCEPT_REF:$lhs, EXCEPT_REF:$rhs), - (SELECT_EXCEPT_REF EXCEPT_REF:$rhs, EXCEPT_REF:$lhs, I32:$cond)>; diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFloat.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFloat.td index c5290f00b431..5c9b34f44734 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFloat.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFloat.td @@ -1,9 +1,8 @@ // WebAssemblyInstrFloat.td-WebAssembly Float codegen support ---*- tablegen -*- // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFormats.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFormats.td index 15a9714a55a1..aff4d20d8d82 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFormats.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFormats.td @@ -1,9 +1,8 @@ //=- WebAssemblyInstrFormats.td - WebAssembly Instr. Formats -*- tablegen -*-=// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -23,6 +22,9 @@ class WebAssemblyInst<bits<32> inst, string asmstr, string stack> : StackRel, let Namespace = "WebAssembly"; let Pattern = []; let AsmString = asmstr; + // When there are multiple instructions that map to the same encoding (in + // e.g. the disassembler use case) prefer the one where IsCanonical == 1. + bit IsCanonical = 0; } // Normal instructions. Default instantiation of a WebAssemblyInst. diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp index 5efff32d6167..a86c9af28f0d 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyInstrInfo.cpp - WebAssembly Instruction Information ----===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -28,6 +27,10 @@ using namespace llvm; #define GET_INSTRINFO_CTOR_DTOR #include "WebAssemblyGenInstrInfo.inc" +// defines WebAssembly::getNamedOperandIdx +#define GET_INSTRINFO_NAMED_OPS +#include "WebAssemblyGenInstrInfo.inc" + WebAssemblyInstrInfo::WebAssemblyInstrInfo(const WebAssemblySubtarget &STI) : WebAssemblyGenInstrInfo(WebAssembly::ADJCALLSTACKDOWN, WebAssembly::ADJCALLSTACKUP, @@ -72,6 +75,8 @@ void WebAssemblyInstrInfo::copyPhysReg(MachineBasicBlock &MBB, CopyOpcode = WebAssembly::COPY_F64; else if (RC == &WebAssembly::V128RegClass) CopyOpcode = WebAssembly::COPY_V128; + else if (RC == &WebAssembly::EXNREFRegClass) + CopyOpcode = WebAssembly::COPY_EXNREF; else llvm_unreachable("Unexpected register class"); @@ -98,6 +103,13 @@ bool WebAssemblyInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&FBB, SmallVectorImpl<MachineOperand> &Cond, bool /*AllowModify*/) const { + const auto &MFI = *MBB.getParent()->getInfo<WebAssemblyFunctionInfo>(); + // WebAssembly has control flow that doesn't have explicit branches or direct + // fallthrough (e.g. try/catch), which can't be modeled by analyzeBranch. It + // is created after CFGStackify. + if (MFI.isCFGStackified()) + return true; + bool HaveCond = false; for (MachineInstr &MI : MBB.terminators()) { switch (MI.getOpcode()) { @@ -107,9 +119,6 @@ bool WebAssemblyInstrInfo::analyzeBranch(MachineBasicBlock &MBB, case WebAssembly::BR_IF: if (HaveCond) return true; - // If we're running after CFGStackify, we can't optimize further. - if (!MI.getOperand(0).isMBB()) - return true; Cond.push_back(MachineOperand::CreateImm(true)); Cond.push_back(MI.getOperand(1)); TBB = MI.getOperand(0).getMBB(); @@ -118,23 +127,25 @@ bool WebAssemblyInstrInfo::analyzeBranch(MachineBasicBlock &MBB, case WebAssembly::BR_UNLESS: if (HaveCond) return true; - // If we're running after CFGStackify, we can't optimize further. - if (!MI.getOperand(0).isMBB()) - return true; Cond.push_back(MachineOperand::CreateImm(false)); Cond.push_back(MI.getOperand(1)); TBB = MI.getOperand(0).getMBB(); HaveCond = true; break; case WebAssembly::BR: - // If we're running after CFGStackify, we can't optimize further. - if (!MI.getOperand(0).isMBB()) - return true; if (!HaveCond) TBB = MI.getOperand(0).getMBB(); else FBB = MI.getOperand(0).getMBB(); break; + case WebAssembly::BR_ON_EXN: + if (HaveCond) + return true; + Cond.push_back(MachineOperand::CreateImm(true)); + Cond.push_back(MI.getOperand(2)); + TBB = MI.getOperand(0).getMBB(); + HaveCond = true; + break; } if (MI.isBarrier()) break; @@ -180,9 +191,22 @@ unsigned WebAssemblyInstrInfo::insertBranch( assert(Cond.size() == 2 && "Expected a flag and a successor block"); + MachineFunction &MF = *MBB.getParent(); + auto &MRI = MF.getRegInfo(); + bool IsBrOnExn = Cond[1].isReg() && MRI.getRegClass(Cond[1].getReg()) == + &WebAssembly::EXNREFRegClass; + if (Cond[0].getImm()) { - BuildMI(&MBB, DL, get(WebAssembly::BR_IF)).addMBB(TBB).add(Cond[1]); + if (IsBrOnExn) { + const char *CPPExnSymbol = MF.createExternalSymbolName("__cpp_exception"); + BuildMI(&MBB, DL, get(WebAssembly::BR_ON_EXN)) + .addMBB(TBB) + .addExternalSymbol(CPPExnSymbol) + .add(Cond[1]); + } else + BuildMI(&MBB, DL, get(WebAssembly::BR_IF)).addMBB(TBB).add(Cond[1]); } else { + assert(!IsBrOnExn && "br_on_exn does not have a reversed condition"); BuildMI(&MBB, DL, get(WebAssembly::BR_UNLESS)).addMBB(TBB).add(Cond[1]); } if (!FBB) @@ -194,7 +218,15 @@ unsigned WebAssemblyInstrInfo::insertBranch( bool WebAssemblyInstrInfo::reverseBranchCondition( SmallVectorImpl<MachineOperand> &Cond) const { - assert(Cond.size() == 2 && "Expected a flag and a successor block"); + assert(Cond.size() == 2 && "Expected a flag and a condition expression"); + + // br_on_exn's condition cannot be reversed + MachineFunction &MF = *Cond[1].getParent()->getParent()->getParent(); + auto &MRI = MF.getRegInfo(); + if (Cond[1].isReg() && + MRI.getRegClass(Cond[1].getReg()) == &WebAssembly::EXNREFRegClass) + return true; + Cond.front() = MachineOperand::CreateImm(!Cond.front().getImm()); return false; } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h index 4a3763c345b0..df1051b4f42c 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h @@ -1,9 +1,8 @@ //=- WebAssemblyInstrInfo.h - WebAssembly Instruction Information -*- C++ -*-=// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -22,8 +21,17 @@ #define GET_INSTRINFO_HEADER #include "WebAssemblyGenInstrInfo.inc" +#define GET_INSTRINFO_OPERAND_ENUM +#include "WebAssemblyGenInstrInfo.inc" + namespace llvm { +namespace WebAssembly { + +int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex); + +} + class WebAssemblySubtarget; class WebAssemblyInstrInfo final : public WebAssemblyGenInstrInfo { diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td index e3d795f2aab1..73ddbe85d551 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td @@ -1,9 +1,8 @@ // WebAssemblyInstrInfo.td-Describe the WebAssembly Instructions-*- tablegen -*- // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -16,41 +15,52 @@ // WebAssembly Instruction Predicate Definitions. //===----------------------------------------------------------------------===// +def IsPIC : Predicate<"TM.isPositionIndependent()">; +def IsNotPIC : Predicate<"!TM.isPositionIndependent()">; + def HasAddr32 : Predicate<"!Subtarget->hasAddr64()">; + def HasAddr64 : Predicate<"Subtarget->hasAddr64()">; -def HasSIMD128 : Predicate<"Subtarget->hasSIMD128()">, - AssemblerPredicate<"FeatureSIMD128", "simd128">; + +def HasSIMD128 : + Predicate<"Subtarget->hasSIMD128()">, + AssemblerPredicate<"FeatureSIMD128", "simd128">; + def HasUnimplementedSIMD128 : Predicate<"Subtarget->hasUnimplementedSIMD128()">, AssemblerPredicate<"FeatureUnimplementedSIMD128", "unimplemented-simd128">; -def HasAtomics : Predicate<"Subtarget->hasAtomics()">, - AssemblerPredicate<"FeatureAtomics", "atomics">; + +def HasAtomics : + Predicate<"Subtarget->hasAtomics()">, + AssemblerPredicate<"FeatureAtomics", "atomics">; + +def HasMultivalue : + Predicate<"Subtarget->hasMultivalue()">, + AssemblerPredicate<"FeatureMultivalue", "multivalue">; + def HasNontrappingFPToInt : Predicate<"Subtarget->hasNontrappingFPToInt()">, - AssemblerPredicate<"FeatureNontrappingFPToInt", - "nontrapping-fptoint">; + AssemblerPredicate<"FeatureNontrappingFPToInt", "nontrapping-fptoint">; + def NotHasNontrappingFPToInt : Predicate<"!Subtarget->hasNontrappingFPToInt()">, - AssemblerPredicate<"!FeatureNontrappingFPToInt", - "nontrapping-fptoint">; + AssemblerPredicate<"!FeatureNontrappingFPToInt", "nontrapping-fptoint">; + def HasSignExt : Predicate<"Subtarget->hasSignExt()">, - AssemblerPredicate<"FeatureSignExt", - "sign-ext">; -def NotHasSignExt : - Predicate<"!Subtarget->hasSignExt()">, - AssemblerPredicate<"!FeatureSignExt", - "sign-ext">; + AssemblerPredicate<"FeatureSignExt", "sign-ext">; + +def HasTailCall : + Predicate<"Subtarget->hasTailCall()">, + AssemblerPredicate<"FeatureTailCall", "tail-call">; def HasExceptionHandling : Predicate<"Subtarget->hasExceptionHandling()">, - AssemblerPredicate<"FeatureExceptionHandling", - "exception-handling">; + AssemblerPredicate<"FeatureExceptionHandling", "exception-handling">; -def NotHasExceptionHandling : - Predicate<"!Subtarget->hasExceptionHandling()">, - AssemblerPredicate<"!FeatureExceptionHandling", - "exception-handling">; +def HasBulkMemory : + Predicate<"Subtarget->hasBulkMemory()">, + AssemblerPredicate<"FeatureBulkMemory", "bulk-memory">; //===----------------------------------------------------------------------===// // WebAssembly-specific DAG Node Types. @@ -60,14 +70,16 @@ def SDT_WebAssemblyCallSeqStart : SDCallSeqStart<[SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>; def SDT_WebAssemblyCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>; -def SDT_WebAssemblyCall0 : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>; -def SDT_WebAssemblyCall1 : SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>; -def SDT_WebAssemblyBrTable : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>; -def SDT_WebAssemblyArgument : SDTypeProfile<1, 1, [SDTCisVT<1, i32>]>; -def SDT_WebAssemblyReturn : SDTypeProfile<0, -1, []>; -def SDT_WebAssemblyWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, - SDTCisPtrTy<0>]>; -def SDT_WebAssemblyThrow : SDTypeProfile<0, 2, [SDTCisPtrTy<0>]>; +def SDT_WebAssemblyCall0 : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>; +def SDT_WebAssemblyCall1 : SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>; +def SDT_WebAssemblyBrTable : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>; +def SDT_WebAssemblyArgument : SDTypeProfile<1, 1, [SDTCisVT<1, i32>]>; +def SDT_WebAssemblyReturn : SDTypeProfile<0, -1, []>; +def SDT_WebAssemblyWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, + SDTCisPtrTy<0>]>; +def SDT_WebAssemblyWrapperPIC : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, + SDTCisPtrTy<0>]>; +def SDT_WebAssemblyThrow : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>; //===----------------------------------------------------------------------===// // WebAssembly-specific DAG Nodes. @@ -85,6 +97,9 @@ def WebAssemblycall0 : SDNode<"WebAssemblyISD::CALL0", def WebAssemblycall1 : SDNode<"WebAssemblyISD::CALL1", SDT_WebAssemblyCall1, [SDNPHasChain, SDNPVariadic]>; +def WebAssemblyretcall : SDNode<"WebAssemblyISD::RET_CALL", + SDT_WebAssemblyCall0, + [SDNPHasChain, SDNPVariadic]>; def WebAssemblybr_table : SDNode<"WebAssemblyISD::BR_TABLE", SDT_WebAssemblyBrTable, [SDNPHasChain, SDNPVariadic]>; @@ -94,13 +109,26 @@ def WebAssemblyreturn : SDNode<"WebAssemblyISD::RETURN", SDT_WebAssemblyReturn, [SDNPHasChain]>; def WebAssemblywrapper : SDNode<"WebAssemblyISD::Wrapper", SDT_WebAssemblyWrapper>; +def WebAssemblywrapperPIC : SDNode<"WebAssemblyISD::WrapperPIC", + SDT_WebAssemblyWrapperPIC>; def WebAssemblythrow : SDNode<"WebAssemblyISD::THROW", SDT_WebAssemblyThrow, - [SDNPHasChain]>; + [SDNPHasChain, SDNPVariadic]>; //===----------------------------------------------------------------------===// // WebAssembly-specific Operands. //===----------------------------------------------------------------------===// +// Default Operand has AsmOperandClass "Imm" which is for integers (and +// symbols), so specialize one for floats: +def FPImmAsmOperand : AsmOperandClass { + let Name = "FPImm"; + let PredicateMethod = "isFPImm"; +} + +class FPOperand<ValueType ty> : Operand<ty> { + AsmOperandClass ParserMatchClass = FPImmAsmOperand; +} + let OperandNamespace = "WebAssembly" in { let OperandType = "OPERAND_BASIC_BLOCK" in @@ -119,10 +147,10 @@ let OperandType = "OPERAND_I64IMM" in def i64imm_op : Operand<i64>; let OperandType = "OPERAND_F32IMM" in -def f32imm_op : Operand<f32>; +def f32imm_op : FPOperand<f32>; let OperandType = "OPERAND_F64IMM" in -def f64imm_op : Operand<f64>; +def f64imm_op : FPOperand<f64>; let OperandType = "OPERAND_VEC_I8IMM" in def vec_i8imm_op : Operand<i32>; @@ -152,11 +180,10 @@ def event_op : Operand<i32>; } // OperandType = "OPERAND_P2ALIGN" -let OperandType = "OPERAND_SIGNATURE" in { +let OperandType = "OPERAND_SIGNATURE" in def Signature : Operand<i32> { let PrintMethod = "printWebAssemblySignatureOperand"; } -} // OperandType = "OPERAND_SIGNATURE" let OperandType = "OPERAND_TYPEINDEX" in def TypeIndex : Operand<i32>; @@ -187,8 +214,8 @@ include "WebAssemblyInstrFormats.td" //===----------------------------------------------------------------------===// multiclass ARGUMENT<WebAssemblyRegClass reg, ValueType vt> { - let hasSideEffects = 1, isCodeGenOnly = 1, - Defs = []<Register>, Uses = [ARGUMENTS] in + let hasSideEffects = 1, isCodeGenOnly = 1, Defs = []<Register>, + Uses = [ARGUMENTS] in defm ARGUMENT_#vt : I<(outs reg:$res), (ins i32imm:$argno), (outs), (ins i32imm:$argno), [(set (vt reg:$res), (WebAssemblyargument timm:$argno))]>; @@ -197,12 +224,12 @@ defm "": ARGUMENT<I32, i32>; defm "": ARGUMENT<I64, i64>; defm "": ARGUMENT<F32, f32>; defm "": ARGUMENT<F64, f64>; -defm "": ARGUMENT<EXCEPT_REF, ExceptRef>; +defm "": ARGUMENT<EXNREF, exnref>; // local.get and local.set are not generated by instruction selection; they // are implied by virtual register uses and defs. multiclass LOCAL<WebAssemblyRegClass vt> { -let hasSideEffects = 0 in { + let hasSideEffects = 0 in { // COPY is not an actual instruction in wasm, but since we allow local.get and // local.set to be implicit during most of codegen, we can have a COPY which // is actually a no-op because all the work is done in the implied local.get @@ -267,7 +294,7 @@ defm "" : LOCAL<I64>; defm "" : LOCAL<F32>; defm "" : LOCAL<F64>; defm "" : LOCAL<V128>, Requires<[HasSIMD128]>; -defm "" : LOCAL<EXCEPT_REF>, Requires<[HasExceptionHandling]>; +defm "" : LOCAL<EXNREF>, Requires<[HasExceptionHandling]>; let isMoveImm = 1, isAsCheapAsAMove = 1, isReMaterializable = 1 in { defm CONST_I32 : I<(outs I32:$res), (ins i32imm_op:$imm), @@ -289,9 +316,20 @@ defm CONST_F64 : I<(outs F64:$res), (ins f64imm_op:$imm), } // isMoveImm = 1, isAsCheapAsAMove = 1, isReMaterializable = 1 def : Pat<(i32 (WebAssemblywrapper tglobaladdr:$addr)), - (CONST_I32 tglobaladdr:$addr)>; + (CONST_I32 tglobaladdr:$addr)>, Requires<[IsNotPIC]>; + +def : Pat<(i32 (WebAssemblywrapper tglobaladdr:$addr)), + (GLOBAL_GET_I32 tglobaladdr:$addr)>, Requires<[IsPIC]>; + +def : Pat<(i32 (WebAssemblywrapperPIC tglobaladdr:$addr)), + (CONST_I32 tglobaladdr:$addr)>, Requires<[IsPIC]>; + def : Pat<(i32 (WebAssemblywrapper texternalsym:$addr)), - (CONST_I32 texternalsym:$addr)>; + (GLOBAL_GET_I32 texternalsym:$addr)>, Requires<[IsPIC]>; + +def : Pat<(i32 (WebAssemblywrapper texternalsym:$addr)), + (CONST_I32 texternalsym:$addr)>, Requires<[IsNotPIC]>; + def : Pat<(i32 (WebAssemblywrapper mcsym:$sym)), (CONST_I32 mcsym:$sym)>; def : Pat<(i64 (WebAssemblywrapper mcsym:$sym)), (CONST_I64 mcsym:$sym)>; @@ -307,4 +345,5 @@ include "WebAssemblyInstrConv.td" include "WebAssemblyInstrFloat.td" include "WebAssemblyInstrAtomics.td" include "WebAssemblyInstrSIMD.td" -include "WebAssemblyInstrExceptRef.td" +include "WebAssemblyInstrRef.td" +include "WebAssemblyInstrBulkMemory.td" diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInteger.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInteger.td index bd41f46214a3..18250cf8ef85 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInteger.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInteger.td @@ -1,9 +1,8 @@ // WebAssemblyInstrInteger.td-WebAssembly Integer codegen -------*- tablegen -*- // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrMemory.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrMemory.td index 518f81c61dc4..6916b165f970 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrMemory.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrMemory.td @@ -1,9 +1,8 @@ // WebAssemblyInstrMemory.td-WebAssembly Memory codegen support -*- tablegen -*- // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -53,7 +52,7 @@ def regPlusGA : PatFrag<(ops node:$addr, node:$off), // Defines atomic and non-atomic loads, regular and extending. multiclass WebAssemblyLoad<WebAssemblyRegClass rc, string Name, int Opcode> { - let mayLoad = 1 in + let mayLoad = 1, UseNamedOperandTable = 1 in defm "": I<(outs rc:$dst), (ins P2Align:$p2align, offset32_op:$off, I32:$addr), (outs), (ins P2Align:$p2align, offset32_op:$off), @@ -96,22 +95,13 @@ def : LoadPatImmOff<f64, load, or_is_add, LOAD_F64>; class LoadPatGlobalAddr<ValueType ty, PatFrag kind, NI inst> : Pat<(ty (kind (regPlusGA I32:$addr, (WebAssemblywrapper tglobaladdr:$off)))), - (inst 0, tglobaladdr:$off, I32:$addr)>; + (inst 0, tglobaladdr:$off, I32:$addr)>, Requires<[IsNotPIC]>; def : LoadPatGlobalAddr<i32, load, LOAD_I32>; def : LoadPatGlobalAddr<i64, load, LOAD_I64>; def : LoadPatGlobalAddr<f32, load, LOAD_F32>; def : LoadPatGlobalAddr<f64, load, LOAD_F64>; -class LoadPatExternalSym<ValueType ty, PatFrag kind, NI inst> : - Pat<(ty (kind (add I32:$addr, (WebAssemblywrapper texternalsym:$off)))), - (inst 0, texternalsym:$off, I32:$addr)>; -def : LoadPatExternalSym<i32, load, LOAD_I32>; -def : LoadPatExternalSym<i64, load, LOAD_I64>; -def : LoadPatExternalSym<f32, load, LOAD_F32>; -def : LoadPatExternalSym<f64, load, LOAD_F64>; - - // Select loads with just a constant offset. class LoadPatOffsetOnly<ValueType ty, PatFrag kind, NI inst> : Pat<(ty (kind imm:$off)), (inst 0, imm:$off, (CONST_I32 0))>; @@ -123,21 +113,13 @@ def : LoadPatOffsetOnly<f64, load, LOAD_F64>; class LoadPatGlobalAddrOffOnly<ValueType ty, PatFrag kind, NI inst> : Pat<(ty (kind (WebAssemblywrapper tglobaladdr:$off))), - (inst 0, tglobaladdr:$off, (CONST_I32 0))>; + (inst 0, tglobaladdr:$off, (CONST_I32 0))>, Requires<[IsNotPIC]>; def : LoadPatGlobalAddrOffOnly<i32, load, LOAD_I32>; def : LoadPatGlobalAddrOffOnly<i64, load, LOAD_I64>; def : LoadPatGlobalAddrOffOnly<f32, load, LOAD_F32>; def : LoadPatGlobalAddrOffOnly<f64, load, LOAD_F64>; -class LoadPatExternSymOffOnly<ValueType ty, PatFrag kind, NI inst> : - Pat<(ty (kind (WebAssemblywrapper texternalsym:$off))), - (inst 0, texternalsym:$off, (CONST_I32 0))>; -def : LoadPatExternSymOffOnly<i32, load, LOAD_I32>; -def : LoadPatExternSymOffOnly<i64, load, LOAD_I64>; -def : LoadPatExternSymOffOnly<f32, load, LOAD_F32>; -def : LoadPatExternSymOffOnly<f64, load, LOAD_F64>; - // Extending load. defm LOAD8_S_I32 : WebAssemblyLoad<I32, "i32.load8_s", 0x2c>; defm LOAD8_U_I32 : WebAssemblyLoad<I32, "i32.load8_u", 0x2d>; @@ -197,18 +179,6 @@ def : LoadPatGlobalAddr<i64, zextloadi16, LOAD16_U_I64>; def : LoadPatGlobalAddr<i64, sextloadi32, LOAD32_S_I64>; def : LoadPatGlobalAddr<i64, zextloadi32, LOAD32_U_I64>; -def : LoadPatExternalSym<i32, sextloadi8, LOAD8_S_I32>; -def : LoadPatExternalSym<i32, zextloadi8, LOAD8_U_I32>; -def : LoadPatExternalSym<i32, sextloadi16, LOAD16_S_I32>; -def : LoadPatExternalSym<i32, zextloadi16, LOAD16_U_I32>; -def : LoadPatExternalSym<i64, sextloadi8, LOAD8_S_I64>; -def : LoadPatExternalSym<i64, zextloadi8, LOAD8_U_I64>; -def : LoadPatExternalSym<i64, sextloadi16, LOAD16_S_I64>; -def : LoadPatExternalSym<i64, zextloadi16, LOAD16_U_I64>; -def : LoadPatExternalSym<i64, sextloadi32, LOAD32_S_I64>; -def : LoadPatExternalSym<i64, zextloadi32, LOAD32_U_I64>; - - // Select extending loads with just a constant offset. def : LoadPatOffsetOnly<i32, sextloadi8, LOAD8_S_I32>; def : LoadPatOffsetOnly<i32, zextloadi8, LOAD8_U_I32>; @@ -233,17 +203,6 @@ def : LoadPatGlobalAddrOffOnly<i64, zextloadi16, LOAD16_U_I64>; def : LoadPatGlobalAddrOffOnly<i64, sextloadi32, LOAD32_S_I64>; def : LoadPatGlobalAddrOffOnly<i64, zextloadi32, LOAD32_U_I64>; -def : LoadPatExternSymOffOnly<i32, sextloadi8, LOAD8_S_I32>; -def : LoadPatExternSymOffOnly<i32, zextloadi8, LOAD8_U_I32>; -def : LoadPatExternSymOffOnly<i32, sextloadi16, LOAD16_S_I32>; -def : LoadPatExternSymOffOnly<i32, zextloadi16, LOAD16_U_I32>; -def : LoadPatExternSymOffOnly<i64, sextloadi8, LOAD8_S_I64>; -def : LoadPatExternSymOffOnly<i64, zextloadi8, LOAD8_U_I64>; -def : LoadPatExternSymOffOnly<i64, sextloadi16, LOAD16_S_I64>; -def : LoadPatExternSymOffOnly<i64, zextloadi16, LOAD16_U_I64>; -def : LoadPatExternSymOffOnly<i64, sextloadi32, LOAD32_S_I64>; -def : LoadPatExternSymOffOnly<i64, zextloadi32, LOAD32_U_I64>; - // Resolve "don't care" extending loads to zero-extending loads. This is // somewhat arbitrary, but zero-extending is conceptually simpler. @@ -270,11 +229,6 @@ def : LoadPatGlobalAddr<i32, extloadi16, LOAD16_U_I32>; def : LoadPatGlobalAddr<i64, extloadi8, LOAD8_U_I64>; def : LoadPatGlobalAddr<i64, extloadi16, LOAD16_U_I64>; def : LoadPatGlobalAddr<i64, extloadi32, LOAD32_U_I64>; -def : LoadPatExternalSym<i32, extloadi8, LOAD8_U_I32>; -def : LoadPatExternalSym<i32, extloadi16, LOAD16_U_I32>; -def : LoadPatExternalSym<i64, extloadi8, LOAD8_U_I64>; -def : LoadPatExternalSym<i64, extloadi16, LOAD16_U_I64>; -def : LoadPatExternalSym<i64, extloadi32, LOAD32_U_I64>; // Select "don't care" extending loads with just a constant offset. def : LoadPatOffsetOnly<i32, extloadi8, LOAD8_U_I32>; @@ -287,15 +241,10 @@ def : LoadPatGlobalAddrOffOnly<i32, extloadi16, LOAD16_U_I32>; def : LoadPatGlobalAddrOffOnly<i64, extloadi8, LOAD8_U_I64>; def : LoadPatGlobalAddrOffOnly<i64, extloadi16, LOAD16_U_I64>; def : LoadPatGlobalAddrOffOnly<i64, extloadi32, LOAD32_U_I64>; -def : LoadPatExternSymOffOnly<i32, extloadi8, LOAD8_U_I32>; -def : LoadPatExternSymOffOnly<i32, extloadi16, LOAD16_U_I32>; -def : LoadPatExternSymOffOnly<i64, extloadi8, LOAD8_U_I64>; -def : LoadPatExternSymOffOnly<i64, extloadi16, LOAD16_U_I64>; -def : LoadPatExternSymOffOnly<i64, extloadi32, LOAD32_U_I64>; // Defines atomic and non-atomic stores, regular and truncating multiclass WebAssemblyStore<WebAssemblyRegClass rc, string Name, int Opcode> { - let mayStore = 1 in + let mayStore = 1, UseNamedOperandTable = 1 in defm "" : I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr, rc:$val), (outs), @@ -336,20 +285,12 @@ def : StorePatImmOff<f64, store, or_is_add, STORE_F64>; class StorePatGlobalAddr<ValueType ty, PatFrag kind, NI inst> : Pat<(kind ty:$val, (regPlusGA I32:$addr, (WebAssemblywrapper tglobaladdr:$off))), - (inst 0, tglobaladdr:$off, I32:$addr, ty:$val)>; + (inst 0, tglobaladdr:$off, I32:$addr, ty:$val)>, Requires<[IsNotPIC]>; def : StorePatGlobalAddr<i32, store, STORE_I32>; def : StorePatGlobalAddr<i64, store, STORE_I64>; def : StorePatGlobalAddr<f32, store, STORE_F32>; def : StorePatGlobalAddr<f64, store, STORE_F64>; -class StorePatExternalSym<ValueType ty, PatFrag kind, NI inst> : - Pat<(kind ty:$val, (add I32:$addr, (WebAssemblywrapper texternalsym:$off))), - (inst 0, texternalsym:$off, I32:$addr, ty:$val)>; -def : StorePatExternalSym<i32, store, STORE_I32>; -def : StorePatExternalSym<i64, store, STORE_I64>; -def : StorePatExternalSym<f32, store, STORE_F32>; -def : StorePatExternalSym<f64, store, STORE_F64>; - // Select stores with just a constant offset. class StorePatOffsetOnly<ValueType ty, PatFrag kind, NI inst> : Pat<(kind ty:$val, imm:$off), (inst 0, imm:$off, (CONST_I32 0), ty:$val)>; @@ -360,20 +301,12 @@ def : StorePatOffsetOnly<f64, store, STORE_F64>; class StorePatGlobalAddrOffOnly<ValueType ty, PatFrag kind, NI inst> : Pat<(kind ty:$val, (WebAssemblywrapper tglobaladdr:$off)), - (inst 0, tglobaladdr:$off, (CONST_I32 0), ty:$val)>; + (inst 0, tglobaladdr:$off, (CONST_I32 0), ty:$val)>, Requires<[IsNotPIC]>; def : StorePatGlobalAddrOffOnly<i32, store, STORE_I32>; def : StorePatGlobalAddrOffOnly<i64, store, STORE_I64>; def : StorePatGlobalAddrOffOnly<f32, store, STORE_F32>; def : StorePatGlobalAddrOffOnly<f64, store, STORE_F64>; -class StorePatExternSymOffOnly<ValueType ty, PatFrag kind, NI inst> : - Pat<(kind ty:$val, (WebAssemblywrapper texternalsym:$off)), - (inst 0, texternalsym:$off, (CONST_I32 0), ty:$val)>; -def : StorePatExternSymOffOnly<i32, store, STORE_I32>; -def : StorePatExternSymOffOnly<i64, store, STORE_I64>; -def : StorePatExternSymOffOnly<f32, store, STORE_F32>; -def : StorePatExternSymOffOnly<f64, store, STORE_F64>; - // Truncating store. defm STORE8_I32 : WebAssemblyStore<I32, "i32.store8", 0x3a>; defm STORE16_I32 : WebAssemblyStore<I32, "i32.store16", 0x3b>; @@ -405,11 +338,6 @@ def : StorePatGlobalAddr<i32, truncstorei16, STORE16_I32>; def : StorePatGlobalAddr<i64, truncstorei8, STORE8_I64>; def : StorePatGlobalAddr<i64, truncstorei16, STORE16_I64>; def : StorePatGlobalAddr<i64, truncstorei32, STORE32_I64>; -def : StorePatExternalSym<i32, truncstorei8, STORE8_I32>; -def : StorePatExternalSym<i32, truncstorei16, STORE16_I32>; -def : StorePatExternalSym<i64, truncstorei8, STORE8_I64>; -def : StorePatExternalSym<i64, truncstorei16, STORE16_I64>; -def : StorePatExternalSym<i64, truncstorei32, STORE32_I64>; // Select truncating stores with just a constant offset. def : StorePatOffsetOnly<i32, truncstorei8, STORE8_I32>; @@ -422,11 +350,6 @@ def : StorePatGlobalAddrOffOnly<i32, truncstorei16, STORE16_I32>; def : StorePatGlobalAddrOffOnly<i64, truncstorei8, STORE8_I64>; def : StorePatGlobalAddrOffOnly<i64, truncstorei16, STORE16_I64>; def : StorePatGlobalAddrOffOnly<i64, truncstorei32, STORE32_I64>; -def : StorePatExternSymOffOnly<i32, truncstorei8, STORE8_I32>; -def : StorePatExternSymOffOnly<i32, truncstorei16, STORE16_I32>; -def : StorePatExternSymOffOnly<i64, truncstorei8, STORE8_I64>; -def : StorePatExternSymOffOnly<i64, truncstorei16, STORE16_I64>; -def : StorePatExternSymOffOnly<i64, truncstorei32, STORE32_I64>; // Current memory size. defm MEMORY_SIZE_I32 : I<(outs I32:$dst), (ins i32imm:$flags), diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrRef.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrRef.td new file mode 100644 index 000000000000..afe89de60b36 --- /dev/null +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrRef.td @@ -0,0 +1,25 @@ +// WebAssemblyInstrRef.td - WebAssembly reference type codegen --*- tablegen -*- +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// WebAssembly refence type operand codegen constructs. +/// +//===----------------------------------------------------------------------===// + +defm SELECT_EXNREF : I<(outs EXNREF:$dst), + (ins EXNREF:$lhs, EXNREF:$rhs, I32:$cond), + (outs), (ins), + [(set EXNREF:$dst, + (select I32:$cond, EXNREF:$lhs, EXNREF:$rhs))], + "exnref.select\t$dst, $lhs, $rhs, $cond", + "exnref.select", 0x1b>; + +def : Pat<(select (i32 (setne I32:$cond, 0)), EXNREF:$lhs, EXNREF:$rhs), + (SELECT_EXNREF EXNREF:$lhs, EXNREF:$rhs, I32:$cond)>; +def : Pat<(select (i32 (seteq I32:$cond, 0)), EXNREF:$lhs, EXNREF:$rhs), + (SELECT_EXNREF EXNREF:$rhs, EXNREF:$lhs, I32:$cond)>; diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td index 587515c5b299..dd8930f079b0 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td @@ -1,9 +1,8 @@ // WebAssemblyInstrSIMD.td - WebAssembly SIMD codegen support -*- tablegen -*-// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -31,7 +30,7 @@ defm "" : ARGUMENT<V128, v2f64>; // Constrained immediate argument types foreach SIZE = [8, 16] in def ImmI#SIZE : ImmLeaf<i32, - "return ((uint64_t)Imm & ((1UL << "#SIZE#") - 1)) == (uint64_t)Imm;" + "return -(1 << ("#SIZE#" - 1)) <= Imm && Imm < (1 << ("#SIZE#" - 1));" >; foreach SIZE = [2, 4, 8, 16, 32] in def LaneIdx#SIZE : ImmLeaf<i32, "return 0 <= Imm && Imm < "#SIZE#";">; @@ -42,12 +41,12 @@ def LaneIdx#SIZE : ImmLeaf<i32, "return 0 <= Imm && Imm < "#SIZE#";">; // Load: v128.load multiclass SIMDLoad<ValueType vec_t> { - let mayLoad = 1 in + let mayLoad = 1, UseNamedOperandTable = 1 in defm LOAD_#vec_t : - SIMD_I<(outs V128:$dst), (ins P2Align:$align, offset32_op:$off, I32:$addr), - (outs), (ins P2Align:$align, offset32_op:$off), [], - "v128.load\t$dst, ${off}(${addr})$align", - "v128.load\t$off$align", 0>; + SIMD_I<(outs V128:$dst), (ins P2Align:$p2align, offset32_op:$off, I32:$addr), + (outs), (ins P2Align:$p2align, offset32_op:$off), [], + "v128.load\t$dst, ${off}(${addr})$p2align", + "v128.load\t$off$p2align", 0>; } foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in { @@ -58,20 +57,18 @@ def : LoadPatNoOffset<vec_t, load, !cast<NI>("LOAD_"#vec_t)>; def : LoadPatImmOff<vec_t, load, regPlusImm, !cast<NI>("LOAD_"#vec_t)>; def : LoadPatImmOff<vec_t, load, or_is_add, !cast<NI>("LOAD_"#vec_t)>; def : LoadPatGlobalAddr<vec_t, load, !cast<NI>("LOAD_"#vec_t)>; -def : LoadPatExternalSym<vec_t, load, !cast<NI>("LOAD_"#vec_t)>; def : LoadPatOffsetOnly<vec_t, load, !cast<NI>("LOAD_"#vec_t)>; def : LoadPatGlobalAddrOffOnly<vec_t, load, !cast<NI>("LOAD_"#vec_t)>; -def : LoadPatExternSymOffOnly<vec_t, load, !cast<NI>("LOAD_"#vec_t)>; } // Store: v128.store multiclass SIMDStore<ValueType vec_t> { - let mayStore = 1 in + let mayStore = 1, UseNamedOperandTable = 1 in defm STORE_#vec_t : - SIMD_I<(outs), (ins P2Align:$align, offset32_op:$off, I32:$addr, V128:$vec), - (outs), (ins P2Align:$align, offset32_op:$off), [], - "v128.store\t${off}(${addr})$align, $vec", - "v128.store\t$off$align", 1>; + SIMD_I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr, V128:$vec), + (outs), (ins P2Align:$p2align, offset32_op:$off), [], + "v128.store\t${off}(${addr})$p2align, $vec", + "v128.store\t$off$p2align", 1>; } foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in { @@ -82,10 +79,8 @@ def : StorePatNoOffset<vec_t, store, !cast<NI>("STORE_"#vec_t)>; def : StorePatImmOff<vec_t, store, regPlusImm, !cast<NI>("STORE_"#vec_t)>; def : StorePatImmOff<vec_t, store, or_is_add, !cast<NI>("STORE_"#vec_t)>; def : StorePatGlobalAddr<vec_t, store, !cast<NI>("STORE_"#vec_t)>; -def : StorePatExternalSym<vec_t, store, !cast<NI>("STORE_"#vec_t)>; def : StorePatOffsetOnly<vec_t, store, !cast<NI>("STORE_"#vec_t)>; def : StorePatGlobalAddrOffOnly<vec_t, store, !cast<NI>("STORE_"#vec_t)>; -def : StorePatExternSymOffOnly<vec_t, store, !cast<NI>("STORE_"#vec_t)>; } //===----------------------------------------------------------------------===// @@ -95,7 +90,7 @@ def : StorePatExternSymOffOnly<vec_t, store, !cast<NI>("STORE_"#vec_t)>; // Constant: v128.const multiclass ConstVec<ValueType vec_t, dag ops, dag pat, string args> { let isMoveImm = 1, isReMaterializable = 1, - Predicates = [HasSIMD128, HasUnimplementedSIMD128] in + Predicates = [HasSIMD128, HasUnimplementedSIMD128] in defm CONST_V128_#vec_t : SIMD_I<(outs V128:$dst), ops, (outs), ops, [(set V128:$dst, (vec_t pat))], "v128.const\t$dst, "#args, @@ -126,6 +121,7 @@ defm "" : ConstVec<v8i16, ImmI16:$i0, ImmI16:$i1, ImmI16:$i2, ImmI16:$i3, ImmI16:$i4, ImmI16:$i5, ImmI16:$i6, ImmI16:$i7), "$i0, $i1, $i2, $i3, $i4, $i5, $i6, $i7">; +let IsCanonical = 1 in defm "" : ConstVec<v4i32, (ins vec_i32imm_op:$i0, vec_i32imm_op:$i1, vec_i32imm_op:$i2, vec_i32imm_op:$i3), @@ -231,6 +227,19 @@ defm "" : Splat<v2i64, "i64x2", I64, splat2, 15>; defm "" : Splat<v4f32, "f32x4", F32, splat4, 18>; defm "" : Splat<v2f64, "f64x2", F64, splat2, 21>; +// scalar_to_vector leaves high lanes undefined, so can be a splat +class ScalarSplatPat<ValueType vec_t, ValueType lane_t, + WebAssemblyRegClass reg_t> : + Pat<(vec_t (scalar_to_vector (lane_t reg_t:$x))), + (!cast<Instruction>("SPLAT_"#vec_t) reg_t:$x)>; + +def : ScalarSplatPat<v16i8, i32, I32>; +def : ScalarSplatPat<v8i16, i32, I32>; +def : ScalarSplatPat<v4i32, i32, I32>; +def : ScalarSplatPat<v2i64, i64, I64>; +def : ScalarSplatPat<v4f32, f32, F32>; +def : ScalarSplatPat<v2f64, f64, F64>; + //===----------------------------------------------------------------------===// // Accessing lanes //===----------------------------------------------------------------------===// @@ -347,118 +356,6 @@ def : Pat<(vector_insert (v4f32 V128:$vec), F32:$x, undef), def : Pat<(vector_insert (v2f64 V128:$vec), F64:$x, undef), (REPLACE_LANE_v2f64 V128:$vec, 0, F64:$x)>; -// Arbitrary other BUILD_VECTOR patterns -def : Pat<(v16i8 (build_vector - (i32 I32:$x0), (i32 I32:$x1), (i32 I32:$x2), (i32 I32:$x3), - (i32 I32:$x4), (i32 I32:$x5), (i32 I32:$x6), (i32 I32:$x7), - (i32 I32:$x8), (i32 I32:$x9), (i32 I32:$x10), (i32 I32:$x11), - (i32 I32:$x12), (i32 I32:$x13), (i32 I32:$x14), (i32 I32:$x15) - )), - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (SPLAT_v16i8 (i32 I32:$x0))), - 1, I32:$x1 - )), - 2, I32:$x2 - )), - 3, I32:$x3 - )), - 4, I32:$x4 - )), - 5, I32:$x5 - )), - 6, I32:$x6 - )), - 7, I32:$x7 - )), - 8, I32:$x8 - )), - 9, I32:$x9 - )), - 10, I32:$x10 - )), - 11, I32:$x11 - )), - 12, I32:$x12 - )), - 13, I32:$x13 - )), - 14, I32:$x14 - )), - 15, I32:$x15 - ))>; -def : Pat<(v8i16 (build_vector - (i32 I32:$x0), (i32 I32:$x1), (i32 I32:$x2), (i32 I32:$x3), - (i32 I32:$x4), (i32 I32:$x5), (i32 I32:$x6), (i32 I32:$x7) - )), - (v8i16 (REPLACE_LANE_v8i16 - (v8i16 (REPLACE_LANE_v8i16 - (v8i16 (REPLACE_LANE_v8i16 - (v8i16 (REPLACE_LANE_v8i16 - (v8i16 (REPLACE_LANE_v8i16 - (v8i16 (REPLACE_LANE_v8i16 - (v8i16 (REPLACE_LANE_v8i16 - (v8i16 (SPLAT_v8i16 (i32 I32:$x0))), - 1, I32:$x1 - )), - 2, I32:$x2 - )), - 3, I32:$x3 - )), - 4, I32:$x4 - )), - 5, I32:$x5 - )), - 6, I32:$x6 - )), - 7, I32:$x7 - ))>; -def : Pat<(v4i32 (build_vector - (i32 I32:$x0), (i32 I32:$x1), (i32 I32:$x2), (i32 I32:$x3) - )), - (v4i32 (REPLACE_LANE_v4i32 - (v4i32 (REPLACE_LANE_v4i32 - (v4i32 (REPLACE_LANE_v4i32 - (v4i32 (SPLAT_v4i32 (i32 I32:$x0))), - 1, I32:$x1 - )), - 2, I32:$x2 - )), - 3, I32:$x3 - ))>; -def : Pat<(v2i64 (build_vector (i64 I64:$x0), (i64 I64:$x1))), - (v2i64 (REPLACE_LANE_v2i64 - (v2i64 (SPLAT_v2i64 (i64 I64:$x0))), 1, I64:$x1))>; -def : Pat<(v4f32 (build_vector - (f32 F32:$x0), (f32 F32:$x1), (f32 F32:$x2), (f32 F32:$x3) - )), - (v4f32 (REPLACE_LANE_v4f32 - (v4f32 (REPLACE_LANE_v4f32 - (v4f32 (REPLACE_LANE_v4f32 - (v4f32 (SPLAT_v4f32 (f32 F32:$x0))), - 1, F32:$x1 - )), - 2, F32:$x2 - )), - 3, F32:$x3 - ))>; -def : Pat<(v2f64 (build_vector (f64 F64:$x0), (f64 F64:$x1))), - (v2f64 (REPLACE_LANE_v2f64 - (v2f64 (SPLAT_v2f64 (f64 F64:$x0))), 1, F64:$x1))>; - //===----------------------------------------------------------------------===// // Comparisons //===----------------------------------------------------------------------===// @@ -520,16 +417,18 @@ defm GE_U : SIMDConditionInt<"ge_u", SETUGE, 33>; defm GE : SIMDConditionFP<"ge", SETOGE, 69>; // Lower float comparisons that don't care about NaN to standard WebAssembly -// float comparisons. These instructions are generated in the target-independent -// expansion of unordered comparisons and ordered ne. -def : Pat<(v4i32 (seteq (v4f32 V128:$lhs), (v4f32 V128:$rhs))), - (v4i32 (EQ_v4f32 (v4f32 V128:$lhs), (v4f32 V128:$rhs)))>; -def : Pat<(v4i32 (setne (v4f32 V128:$lhs), (v4f32 V128:$rhs))), - (v4i32 (NE_v4f32 (v4f32 V128:$lhs), (v4f32 V128:$rhs)))>; -def : Pat<(v2i64 (seteq (v2f64 V128:$lhs), (v2f64 V128:$rhs))), - (v2i64 (EQ_v2f64 (v2f64 V128:$lhs), (v2f64 V128:$rhs)))>; -def : Pat<(v2i64 (setne (v2f64 V128:$lhs), (v2f64 V128:$rhs))), - (v2i64 (NE_v2f64 (v2f64 V128:$lhs), (v2f64 V128:$rhs)))>; +// float comparisons. These instructions are generated with nnan and in the +// target-independent expansion of unordered comparisons and ordered ne. +foreach nodes = [[seteq, EQ_v4f32], [setne, NE_v4f32], [setlt, LT_v4f32], + [setgt, GT_v4f32], [setle, LE_v4f32], [setge, GE_v4f32]] in +def : Pat<(v4i32 (nodes[0] (v4f32 V128:$lhs), (v4f32 V128:$rhs))), + (v4i32 (nodes[1] (v4f32 V128:$lhs), (v4f32 V128:$rhs)))>; + +foreach nodes = [[seteq, EQ_v2f64], [setne, NE_v2f64], [setlt, LT_v2f64], + [setgt, GT_v2f64], [setle, LE_v2f64], [setge, GE_v2f64]] in +def : Pat<(v2i64 (nodes[0] (v2f64 V128:$lhs), (v2f64 V128:$rhs))), + (v2i64 (nodes[1] (v2f64 V128:$lhs), (v2f64 V128:$rhs)))>; + //===----------------------------------------------------------------------===// // Bitwise operations @@ -628,6 +527,28 @@ defm ANYTRUE : SIMDReduce<int_wasm_anytrue, "any_true", 82>; // All lanes true: all_true defm ALLTRUE : SIMDReduce<int_wasm_alltrue, "all_true", 83>; +// Reductions already return 0 or 1, so and 1, setne 0, and seteq 1 +// can be folded out +foreach reduction = + [["int_wasm_anytrue", "ANYTRUE"], ["int_wasm_alltrue", "ALLTRUE"]] in +foreach ty = [v16i8, v8i16, v4i32, v2i64] in { +def : Pat<(i32 (and + (i32 (!cast<Intrinsic>(reduction[0]) (ty V128:$x))), + (i32 1) + )), + (i32 (!cast<NI>(reduction[1]#"_"#ty) (ty V128:$x)))>; +def : Pat<(i32 (setne + (i32 (!cast<Intrinsic>(reduction[0]) (ty V128:$x))), + (i32 0) + )), + (i32 (!cast<NI>(reduction[1]#"_"#ty) (ty V128:$x)))>; +def : Pat<(i32 (seteq + (i32 (!cast<Intrinsic>(reduction[0]) (ty V128:$x))), + (i32 1) + )), + (i32 (!cast<NI>(reduction[1]#"_"#ty) (ty V128:$x)))>; +} + //===----------------------------------------------------------------------===// // Bit shifts //===----------------------------------------------------------------------===// @@ -658,10 +579,16 @@ defm SHL : SIMDShiftInt<shl, "shl", 84>; defm SHR_S : SIMDShiftInt<sra, "shr_s", 85>; defm SHR_U : SIMDShiftInt<srl, "shr_u", 86>; -// Truncate i64 shift operands to i32s -foreach shifts = [[shl, SHL_v2i64], [sra, SHR_S_v2i64], [srl, SHR_U_v2i64]] in +// Truncate i64 shift operands to i32s, except if they are already i32s +foreach shifts = [[shl, SHL_v2i64], [sra, SHR_S_v2i64], [srl, SHR_U_v2i64]] in { +def : Pat<(v2i64 (shifts[0] + (v2i64 V128:$vec), + (v2i64 (splat2 (i64 (sext I32:$x)))) + )), + (v2i64 (shifts[1] (v2i64 V128:$vec), (i32 I32:$x)))>; def : Pat<(v2i64 (shifts[0] (v2i64 V128:$vec), (v2i64 (splat2 I64:$x)))), (v2i64 (shifts[1] (v2i64 V128:$vec), (I32_WRAP_I64 I64:$x)))>; +} // 2xi64 shifts with constant shift amounts are custom lowered to avoid wrapping def wasm_shift_t : SDTypeProfile<1, 2, diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLateEHPrepare.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLateEHPrepare.cpp index ad838dfb574a..e92b34430272 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLateEHPrepare.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLateEHPrepare.cpp @@ -1,9 +1,8 @@ //=== WebAssemblyLateEHPrepare.cpp - WebAssembly Exception Preparation -===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -16,29 +15,26 @@ #include "WebAssembly.h" #include "WebAssemblySubtarget.h" #include "WebAssemblyUtilities.h" +#include "llvm/ADT/SmallSet.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/WasmEHFuncInfo.h" #include "llvm/MC/MCAsmInfo.h" using namespace llvm; -#define DEBUG_TYPE "wasm-exception-prepare" +#define DEBUG_TYPE "wasm-late-eh-prepare" namespace { class WebAssemblyLateEHPrepare final : public MachineFunctionPass { StringRef getPassName() const override { - return "WebAssembly Prepare Exception"; + return "WebAssembly Late Prepare Exception"; } bool runOnMachineFunction(MachineFunction &MF) override; - - bool removeUnnecessaryUnreachables(MachineFunction &MF); + bool addCatches(MachineFunction &MF); bool replaceFuncletReturns(MachineFunction &MF); - bool hoistCatches(MachineFunction &MF); - bool addCatchAlls(MachineFunction &MF); - bool addRethrows(MachineFunction &MF); - bool ensureSingleBBTermPads(MachineFunction &MF); - bool mergeTerminatePads(MachineFunction &MF); - bool addCatchAllTerminatePads(MachineFunction &MF); + bool removeUnnecessaryUnreachables(MachineFunction &MF); + bool addExceptionExtraction(MachineFunction &MF); + bool restoreStackPointer(MachineFunction &MF); public: static char ID; // Pass identification, replacement for typeid @@ -112,48 +108,40 @@ bool WebAssemblyLateEHPrepare::runOnMachineFunction(MachineFunction &MF) { return false; bool Changed = false; + if (MF.getFunction().hasPersonalityFn()) { + Changed |= addCatches(MF); + Changed |= replaceFuncletReturns(MF); + } Changed |= removeUnnecessaryUnreachables(MF); - Changed |= addRethrows(MF); - if (!MF.getFunction().hasPersonalityFn()) - return Changed; - Changed |= replaceFuncletReturns(MF); - Changed |= hoistCatches(MF); - Changed |= addCatchAlls(MF); - Changed |= ensureSingleBBTermPads(MF); - Changed |= mergeTerminatePads(MF); - Changed |= addCatchAllTerminatePads(MF); + if (MF.getFunction().hasPersonalityFn()) { + Changed |= addExceptionExtraction(MF); + Changed |= restoreStackPointer(MF); + } return Changed; } -bool WebAssemblyLateEHPrepare::removeUnnecessaryUnreachables( - MachineFunction &MF) { +// Add catch instruction to beginning of catchpads and cleanuppads. +bool WebAssemblyLateEHPrepare::addCatches(MachineFunction &MF) { bool Changed = false; + const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); + MachineRegisterInfo &MRI = MF.getRegInfo(); for (auto &MBB : MF) { - for (auto &MI : MBB) { - if (!WebAssembly::isThrow(MI)) - continue; + if (MBB.isEHPad()) { Changed = true; - - // The instruction after the throw should be an unreachable or a branch to - // another BB that should eventually lead to an unreachable. Delete it - // because throw itself is a terminator, and also delete successors if - // any. - MBB.erase(std::next(MachineBasicBlock::iterator(MI)), MBB.end()); - SmallVector<MachineBasicBlock *, 8> Succs(MBB.succ_begin(), - MBB.succ_end()); - for (auto *Succ : Succs) - MBB.removeSuccessor(Succ); - eraseDeadBBsAndChildren(Succs); + auto InsertPos = MBB.begin(); + if (InsertPos->isEHLabel()) // EH pad starts with an EH label + ++InsertPos; + unsigned DstReg = MRI.createVirtualRegister(&WebAssembly::EXNREFRegClass); + BuildMI(MBB, InsertPos, MBB.begin()->getDebugLoc(), + TII.get(WebAssembly::CATCH), DstReg); } } - return Changed; } bool WebAssemblyLateEHPrepare::replaceFuncletReturns(MachineFunction &MF) { bool Changed = false; const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); - auto *EHInfo = MF.getWasmEHFuncInfo(); for (auto &MBB : MF) { auto Pos = MBB.getFirstTerminator(); @@ -172,15 +160,17 @@ bool WebAssemblyLateEHPrepare::replaceFuncletReturns(MachineFunction &MF) { Changed = true; break; } - case WebAssembly::CLEANUPRET: { - // Replace a cleanupret with a rethrow - if (EHInfo->hasThrowUnwindDest(&MBB)) - BuildMI(MBB, TI, TI->getDebugLoc(), TII.get(WebAssembly::RETHROW)) - .addMBB(EHInfo->getThrowUnwindDest(&MBB)); - else - BuildMI(MBB, TI, TI->getDebugLoc(), - TII.get(WebAssembly::RETHROW_TO_CALLER)); - + case WebAssembly::CLEANUPRET: + case WebAssembly::RETHROW_IN_CATCH: { + // Replace a cleanupret/rethrow_in_catch with a rethrow + auto *EHPad = getMatchingEHPad(TI); + auto CatchPos = EHPad->begin(); + if (CatchPos->isEHLabel()) // EH pad starts with an EH label + ++CatchPos; + MachineInstr *Catch = &*CatchPos; + unsigned ExnReg = Catch->getOperand(0).getReg(); + BuildMI(MBB, TI, TI->getDebugLoc(), TII.get(WebAssembly::RETHROW)) + .addReg(ExnReg); TI->eraseFromParent(); Changed = true; break; @@ -190,233 +180,208 @@ bool WebAssemblyLateEHPrepare::replaceFuncletReturns(MachineFunction &MF) { return Changed; } -// Hoist catch instructions to the beginning of their matching EH pad BBs in -// case, -// (1) catch instruction is not the first instruction in EH pad. -// ehpad: -// some_other_instruction -// ... -// %exn = catch 0 -// (2) catch instruction is in a non-EH pad BB. For example, -// ehpad: -// br bb0 -// bb0: -// %exn = catch 0 -bool WebAssemblyLateEHPrepare::hoistCatches(MachineFunction &MF) { - bool Changed = false; - SmallVector<MachineInstr *, 16> Catches; - for (auto &MBB : MF) - for (auto &MI : MBB) - if (WebAssembly::isCatch(MI)) - Catches.push_back(&MI); - - for (auto *Catch : Catches) { - MachineBasicBlock *EHPad = getMatchingEHPad(Catch); - assert(EHPad && "No matching EH pad for catch"); - if (EHPad->begin() == Catch) - continue; - Changed = true; - EHPad->insert(EHPad->begin(), Catch->removeFromParent()); - } - return Changed; -} - -// Add catch_all to beginning of cleanup pads. -bool WebAssemblyLateEHPrepare::addCatchAlls(MachineFunction &MF) { +bool WebAssemblyLateEHPrepare::removeUnnecessaryUnreachables( + MachineFunction &MF) { bool Changed = false; - const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); - for (auto &MBB : MF) { - if (!MBB.isEHPad()) - continue; - // This runs after hoistCatches(), so we assume that if there is a catch, - // that should be the first instruction in an EH pad. - if (!WebAssembly::isCatch(*MBB.begin())) { - Changed = true; - BuildMI(MBB, MBB.begin(), MBB.begin()->getDebugLoc(), - TII.get(WebAssembly::CATCH_ALL)); - } - } - return Changed; -} - -// Add a 'rethrow' instruction after __cxa_rethrow() call -bool WebAssemblyLateEHPrepare::addRethrows(MachineFunction &MF) { - bool Changed = false; - const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); - auto *EHInfo = MF.getWasmEHFuncInfo(); - - for (auto &MBB : MF) for (auto &MI : MBB) { - // Check if it is a call to __cxa_rethrow() - if (!MI.isCall()) + if (MI.getOpcode() != WebAssembly::THROW && + MI.getOpcode() != WebAssembly::RETHROW) continue; - MachineOperand &CalleeOp = MI.getOperand(0); - if (!CalleeOp.isGlobal() || - CalleeOp.getGlobal()->getName() != WebAssembly::CxaRethrowFn) - continue; - - // Now we have __cxa_rethrow() call Changed = true; - auto InsertPt = std::next(MachineBasicBlock::iterator(MI)); - while (InsertPt != MBB.end() && InsertPt->isLabel()) // Skip EH_LABELs - ++InsertPt; - MachineInstr *Rethrow = nullptr; - if (EHInfo->hasThrowUnwindDest(&MBB)) - Rethrow = BuildMI(MBB, InsertPt, MI.getDebugLoc(), - TII.get(WebAssembly::RETHROW)) - .addMBB(EHInfo->getThrowUnwindDest(&MBB)); - else - Rethrow = BuildMI(MBB, InsertPt, MI.getDebugLoc(), - TII.get(WebAssembly::RETHROW_TO_CALLER)); - // Because __cxa_rethrow does not return, the instruction after the - // rethrow should be an unreachable or a branch to another BB that should - // eventually lead to an unreachable. Delete it because rethrow itself is - // a terminator, and also delete non-EH pad successors if any. - MBB.erase(std::next(MachineBasicBlock::iterator(Rethrow)), MBB.end()); - SmallVector<MachineBasicBlock *, 8> NonPadSuccessors; - for (auto *Succ : MBB.successors()) + // The instruction after the throw should be an unreachable or a branch to + // another BB that should eventually lead to an unreachable. Delete it + // because throw itself is a terminator, and also delete successors if + // any. + MBB.erase(std::next(MI.getIterator()), MBB.end()); + SmallVector<MachineBasicBlock *, 8> Succs(MBB.succ_begin(), + MBB.succ_end()); + for (auto *Succ : Succs) if (!Succ->isEHPad()) - NonPadSuccessors.push_back(Succ); - for (auto *Succ : NonPadSuccessors) - MBB.removeSuccessor(Succ); - eraseDeadBBsAndChildren(NonPadSuccessors); + MBB.removeSuccessor(Succ); + eraseDeadBBsAndChildren(Succs); } + } + return Changed; } -// Terminate pads are an single-BB EH pad in the form of -// termpad: -// %exn = catch 0 -// call @__clang_call_terminate(%exn) -// unreachable -// (There can be local.set and local.gets before the call if we didn't run -// RegStackify) -// But code transformations can change or add more control flow, so the call to -// __clang_call_terminate() function may not be in the original EH pad anymore. -// This ensures every terminate pad is a single BB in the form illustrated -// above. -bool WebAssemblyLateEHPrepare::ensureSingleBBTermPads(MachineFunction &MF) { +// Wasm uses 'br_on_exn' instruction to check the tag of an exception. It takes +// exnref type object returned by 'catch', and branches to the destination if it +// matches a given tag. We currently use __cpp_exception symbol to represent the +// tag for all C++ exceptions. +// +// block $l (result i32) +// ... +// ;; exnref $e is on the stack at this point +// br_on_exn $l $e ;; branch to $l with $e's arguments +// ... +// end +// ;; Here we expect the extracted values are on top of the wasm value stack +// ... Handle exception using values ... +// +// br_on_exn takes an exnref object and branches if it matches the given tag. +// There can be multiple br_on_exn instructions if we want to match for another +// tag, but for now we only test for __cpp_exception tag, and if it does not +// match, i.e., it is a foreign exception, we rethrow it. +// +// In the destination BB that's the target of br_on_exn, extracted exception +// values (in C++'s case a single i32, which represents an exception pointer) +// are placed on top of the wasm stack. Because we can't model wasm stack in +// LLVM instruction, we use 'extract_exception' pseudo instruction to retrieve +// it. The pseudo instruction will be deleted later. +bool WebAssemblyLateEHPrepare::addExceptionExtraction(MachineFunction &MF) { const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); + auto *EHInfo = MF.getWasmEHFuncInfo(); + SmallVector<MachineInstr *, 16> ExtractInstrs; + SmallVector<MachineInstr *, 8> ToDelete; + for (auto &MBB : MF) { + for (auto &MI : MBB) { + if (MI.getOpcode() == WebAssembly::EXTRACT_EXCEPTION_I32) { + if (MI.getOperand(0).isDead()) + ToDelete.push_back(&MI); + else + ExtractInstrs.push_back(&MI); + } + } + } + bool Changed = !ToDelete.empty() || !ExtractInstrs.empty(); + for (auto *MI : ToDelete) + MI->eraseFromParent(); + if (ExtractInstrs.empty()) + return Changed; - // Find calls to __clang_call_terminate() - SmallVector<MachineInstr *, 8> ClangCallTerminateCalls; - for (auto &MBB : MF) - for (auto &MI : MBB) + // Find terminate pads. + SmallSet<MachineBasicBlock *, 8> TerminatePads; + for (auto &MBB : MF) { + for (auto &MI : MBB) { if (MI.isCall()) { const MachineOperand &CalleeOp = MI.getOperand(0); if (CalleeOp.isGlobal() && CalleeOp.getGlobal()->getName() == WebAssembly::ClangCallTerminateFn) - ClangCallTerminateCalls.push_back(&MI); + TerminatePads.insert(getMatchingEHPad(&MI)); } - - bool Changed = false; - for (auto *Call : ClangCallTerminateCalls) { - MachineBasicBlock *EHPad = getMatchingEHPad(Call); - assert(EHPad && "No matching EH pad for catch"); - - // If it is already the form we want, skip it - if (Call->getParent() == EHPad && - Call->getNextNode()->getOpcode() == WebAssembly::UNREACHABLE) - continue; - - // In case the __clang_call_terminate() call is not in its matching EH pad, - // move the call to the end of EH pad and add an unreachable instruction - // after that. Delete all successors and their children if any, because here - // the program terminates. - Changed = true; - MachineInstr *Catch = &*EHPad->begin(); - // This runs after hoistCatches(), so catch instruction should be at the top - assert(WebAssembly::isCatch(*Catch)); - // Takes the result register of the catch instruction as argument. There may - // have been some other local.set/local.gets in between, but at this point - // we don't care. - Call->getOperand(1).setReg(Catch->getOperand(0).getReg()); - auto InsertPos = std::next(MachineBasicBlock::iterator(Catch)); - EHPad->insert(InsertPos, Call->removeFromParent()); - BuildMI(*EHPad, InsertPos, Call->getDebugLoc(), - TII.get(WebAssembly::UNREACHABLE)); - EHPad->erase(InsertPos, EHPad->end()); - SmallVector<MachineBasicBlock *, 8> Succs(EHPad->succ_begin(), - EHPad->succ_end()); - for (auto *Succ : Succs) - EHPad->removeSuccessor(Succ); - eraseDeadBBsAndChildren(Succs); + } } - return Changed; -} -// In case there are multiple terminate pads, merge them into one for code size. -// This runs after ensureSingleBBTermPads() and assumes every terminate pad is a -// single BB. -// In principle this violates EH scope relationship because it can merge -// multiple inner EH scopes, each of which is in different outer EH scope. But -// getEHScopeMembership() function will not be called after this, so it is fine. -bool WebAssemblyLateEHPrepare::mergeTerminatePads(MachineFunction &MF) { - SmallVector<MachineBasicBlock *, 8> TermPads; - for (auto &MBB : MF) - if (WebAssembly::isCatchTerminatePad(MBB)) - TermPads.push_back(&MBB); - if (TermPads.empty()) - return false; - - MachineBasicBlock *UniqueTermPad = TermPads.front(); - for (auto *TermPad : - llvm::make_range(std::next(TermPads.begin()), TermPads.end())) { - SmallVector<MachineBasicBlock *, 2> Preds(TermPad->pred_begin(), - TermPad->pred_end()); - for (auto *Pred : Preds) - Pred->replaceSuccessor(TermPad, UniqueTermPad); - TermPad->eraseFromParent(); + for (auto *Extract : ExtractInstrs) { + MachineBasicBlock *EHPad = getMatchingEHPad(Extract); + assert(EHPad && "No matching EH pad for extract_exception"); + auto CatchPos = EHPad->begin(); + if (CatchPos->isEHLabel()) // EH pad starts with an EH label + ++CatchPos; + MachineInstr *Catch = &*CatchPos; + + if (Catch->getNextNode() != Extract) + EHPad->insert(Catch->getNextNode(), Extract->removeFromParent()); + + // - Before: + // ehpad: + // %exnref:exnref = catch + // %exn:i32 = extract_exception + // ... use exn ... + // + // - After: + // ehpad: + // %exnref:exnref = catch + // br_on_exn %thenbb, $__cpp_exception, %exnref + // br %elsebb + // elsebb: + // rethrow + // thenbb: + // %exn:i32 = extract_exception + // ... use exn ... + unsigned ExnReg = Catch->getOperand(0).getReg(); + auto *ThenMBB = MF.CreateMachineBasicBlock(); + auto *ElseMBB = MF.CreateMachineBasicBlock(); + MF.insert(std::next(MachineFunction::iterator(EHPad)), ElseMBB); + MF.insert(std::next(MachineFunction::iterator(ElseMBB)), ThenMBB); + ThenMBB->splice(ThenMBB->end(), EHPad, Extract, EHPad->end()); + ThenMBB->transferSuccessors(EHPad); + EHPad->addSuccessor(ThenMBB); + EHPad->addSuccessor(ElseMBB); + + DebugLoc DL = Extract->getDebugLoc(); + const char *CPPExnSymbol = MF.createExternalSymbolName("__cpp_exception"); + BuildMI(EHPad, DL, TII.get(WebAssembly::BR_ON_EXN)) + .addMBB(ThenMBB) + .addExternalSymbol(CPPExnSymbol) + .addReg(ExnReg); + BuildMI(EHPad, DL, TII.get(WebAssembly::BR)).addMBB(ElseMBB); + + // When this is a terminate pad with __clang_call_terminate() call, we don't + // rethrow it anymore and call __clang_call_terminate() with a nullptr + // argument, which will call std::terminate(). + // + // - Before: + // ehpad: + // %exnref:exnref = catch + // %exn:i32 = extract_exception + // call @__clang_call_terminate(%exn) + // unreachable + // + // - After: + // ehpad: + // %exnref:exnref = catch + // br_on_exn %thenbb, $__cpp_exception, %exnref + // br %elsebb + // elsebb: + // call @__clang_call_terminate(0) + // unreachable + // thenbb: + // %exn:i32 = extract_exception + // call @__clang_call_terminate(%exn) + // unreachable + if (TerminatePads.count(EHPad)) { + Function *ClangCallTerminateFn = + MF.getFunction().getParent()->getFunction( + WebAssembly::ClangCallTerminateFn); + assert(ClangCallTerminateFn && + "There is no __clang_call_terminate() function"); + BuildMI(ElseMBB, DL, TII.get(WebAssembly::CALL_VOID)) + .addGlobalAddress(ClangCallTerminateFn) + .addImm(0); + BuildMI(ElseMBB, DL, TII.get(WebAssembly::UNREACHABLE)); + + } else { + BuildMI(ElseMBB, DL, TII.get(WebAssembly::RETHROW)).addReg(ExnReg); + if (EHInfo->hasEHPadUnwindDest(EHPad)) + ElseMBB->addSuccessor(EHInfo->getEHPadUnwindDest(EHPad)); + } } + return true; } -// Terminate pads are cleanup pads, so they should start with a 'catch_all' -// instruction. But in the Itanium model, when we have a C++ exception object, -// we pass them to __clang_call_terminate function, which calls __cxa_end_catch -// with the passed exception pointer and then std::terminate. This is the reason -// that terminate pads are generated with not a catch_all but a catch -// instruction in clang and earlier llvm passes. Here we append a terminate pad -// with a catch_all after each existing terminate pad so we can also catch -// foreign exceptions. For every terminate pad: -// %exn = catch 0 -// call @__clang_call_terminate(%exn) -// unreachable -// We append this BB right after that: -// catch_all -// call @std::terminate() -// unreachable -bool WebAssemblyLateEHPrepare::addCatchAllTerminatePads(MachineFunction &MF) { - const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); - SmallVector<MachineBasicBlock *, 8> TermPads; - for (auto &MBB : MF) - if (WebAssembly::isCatchTerminatePad(MBB)) - TermPads.push_back(&MBB); - if (TermPads.empty()) +// After the stack is unwound due to a thrown exception, the __stack_pointer +// global can point to an invalid address. This inserts instructions that +// restore __stack_pointer global. +bool WebAssemblyLateEHPrepare::restoreStackPointer(MachineFunction &MF) { + const auto *FrameLowering = static_cast<const WebAssemblyFrameLowering *>( + MF.getSubtarget().getFrameLowering()); + if (!FrameLowering->needsPrologForEH(MF)) return false; + bool Changed = false; - Function *StdTerminateFn = - MF.getFunction().getParent()->getFunction(WebAssembly::StdTerminateFn); - assert(StdTerminateFn && "There is no std::terminate() function"); - for (auto *CatchTermPad : TermPads) { - DebugLoc DL = CatchTermPad->findDebugLoc(CatchTermPad->begin()); - auto *CatchAllTermPad = MF.CreateMachineBasicBlock(); - MF.insert(std::next(MachineFunction::iterator(CatchTermPad)), - CatchAllTermPad); - CatchAllTermPad->setIsEHPad(); - BuildMI(CatchAllTermPad, DL, TII.get(WebAssembly::CATCH_ALL)); - BuildMI(CatchAllTermPad, DL, TII.get(WebAssembly::CALL_VOID)) - .addGlobalAddress(StdTerminateFn); - BuildMI(CatchAllTermPad, DL, TII.get(WebAssembly::UNREACHABLE)); + for (auto &MBB : MF) { + if (!MBB.isEHPad()) + continue; + Changed = true; - // Actually this CatchAllTermPad (new terminate pad with a catch_all) is not - // a successor of an existing terminate pad. CatchAllTermPad should have all - // predecessors CatchTermPad has instead. This is a hack to force - // CatchAllTermPad be always sorted right after CatchTermPad; the correct - // predecessor-successor relationships will be restored in CFGStackify pass. - CatchTermPad->addSuccessor(CatchAllTermPad); + // Insert __stack_pointer restoring instructions at the beginning of each EH + // pad, after the catch instruction. Here it is safe to assume that SP32 + // holds the latest value of __stack_pointer, because the only exception for + // this case is when a function uses the red zone, but that only happens + // with leaf functions, and we don't restore __stack_pointer in leaf + // functions anyway. + auto InsertPos = MBB.begin(); + if (InsertPos->isEHLabel()) // EH pad starts with an EH label + ++InsertPos; + if (InsertPos->getOpcode() == WebAssembly::CATCH) + ++InsertPos; + FrameLowering->writeSPToGlobal(WebAssembly::SP32, MF, MBB, InsertPos, + MBB.begin()->getDebugLoc()); } - return true; + return Changed; } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp index c9a3527d3fbd..34a8195ac4b4 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyLowerBrUnless.cpp - Lower br_unless --------------------===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp index 0491f71cea7f..960d5134f6e9 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp @@ -1,9 +1,8 @@ //=== WebAssemblyLowerEmscriptenEHSjLj.cpp - Lower exceptions for Emscripten =// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -240,16 +239,16 @@ class WebAssemblyLowerEmscriptenEHSjLj final : public ModulePass { bool EnableEH; // Enable exception handling bool EnableSjLj; // Enable setjmp/longjmp handling - GlobalVariable *ThrewGV; - GlobalVariable *ThrewValueGV; - Function *GetTempRet0Func; - Function *SetTempRet0Func; - Function *ResumeF; - Function *EHTypeIDF; - Function *EmLongjmpF; - Function *EmLongjmpJmpbufF; - Function *SaveSetjmpF; - Function *TestSetjmpF; + GlobalVariable *ThrewGV = nullptr; + GlobalVariable *ThrewValueGV = nullptr; + Function *GetTempRet0Func = nullptr; + Function *SetTempRet0Func = nullptr; + Function *ResumeF = nullptr; + Function *EHTypeIDF = nullptr; + Function *EmLongjmpF = nullptr; + Function *EmLongjmpJmpbufF = nullptr; + Function *SaveSetjmpF = nullptr; + Function *TestSetjmpF = nullptr; // __cxa_find_matching_catch_N functions. // Indexed by the number of clauses in an original landingpad instruction. @@ -282,11 +281,7 @@ public: static char ID; WebAssemblyLowerEmscriptenEHSjLj(bool EnableEH = true, bool EnableSjLj = true) - : ModulePass(ID), EnableEH(EnableEH), EnableSjLj(EnableSjLj), - ThrewGV(nullptr), ThrewValueGV(nullptr), GetTempRet0Func(nullptr), - SetTempRet0Func(nullptr), ResumeF(nullptr), EHTypeIDF(nullptr), - EmLongjmpF(nullptr), EmLongjmpJmpbufF(nullptr), SaveSetjmpF(nullptr), - TestSetjmpF(nullptr) { + : ModulePass(ID), EnableEH(EnableEH), EnableSjLj(EnableSjLj) { EHWhitelistSet.insert(EHWhitelist.begin(), EHWhitelist.end()); } bool runOnModule(Module &M) override; @@ -339,11 +334,12 @@ static bool canThrow(const Value *V) { // which will generate an import and asssumes that it will exist at link time. static GlobalVariable *getGlobalVariableI32(Module &M, IRBuilder<> &IRB, const char *Name) { - if (M.getNamedGlobal(Name)) - report_fatal_error(Twine("variable name is reserved: ") + Name); - return new GlobalVariable(M, IRB.getInt32Ty(), false, - GlobalValue::ExternalLinkage, nullptr, Name); + auto* GV = dyn_cast<GlobalVariable>(M.getOrInsertGlobal(Name, IRB.getInt32Ty())); + if (!GV) + report_fatal_error(Twine("unable to create global: ") + Name); + + return GV; } // Simple function name mangler. @@ -433,8 +429,8 @@ Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallOrInvoke *CI) { // No attributes for the callee pointer. ArgAttributes.push_back(AttributeSet()); // Copy the argument attributes from the original - for (unsigned i = 0, e = CI->getNumArgOperands(); i < e; ++i) - ArgAttributes.push_back(InvokeAL.getParamAttributes(i)); + for (unsigned I = 0, E = CI->getNumArgOperands(); I < E; ++I) + ArgAttributes.push_back(InvokeAL.getParamAttributes(I)); // Reconstruct the AttributesList based on the vector we constructed. AttributeList NewCallAL = @@ -446,7 +442,8 @@ Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallOrInvoke *CI) { // Post-invoke // %__THREW__.val = __THREW__; __THREW__ = 0; - Value *Threw = IRB.CreateLoad(ThrewGV, ThrewGV->getName() + ".val"); + Value *Threw = + IRB.CreateLoad(IRB.getInt32Ty(), ThrewGV, ThrewGV->getName() + ".val"); IRB.CreateStore(IRB.getInt32(0), ThrewGV); return Threw; } @@ -488,6 +485,13 @@ bool WebAssemblyLowerEmscriptenEHSjLj::canLongjmp(Module &M, if (CalleeF->isIntrinsic()) return false; + // Attempting to transform inline assembly will result in something like: + // call void @__invoke_void(void ()* asm ...) + // which is invalid because inline assembly blocks do not have addresses + // and can't be passed by pointer. The result is a crash with illegal IR. + if (isa<InlineAsm>(Callee)) + return false; + // The reason we include malloc/free here is to exclude the malloc/free // calls generated in setjmp prep / cleanup routines. Function *SetjmpF = M.getFunction("setjmp"); @@ -549,8 +553,8 @@ void WebAssemblyLowerEmscriptenEHSjLj::wrapTestSetjmp( BasicBlock *ElseBB1 = BasicBlock::Create(C, "if.else1", F); BasicBlock *EndBB1 = BasicBlock::Create(C, "if.end", F); Value *ThrewCmp = IRB.CreateICmpNE(Threw, IRB.getInt32(0)); - Value *ThrewValue = - IRB.CreateLoad(ThrewValueGV, ThrewValueGV->getName() + ".val"); + Value *ThrewValue = IRB.CreateLoad(IRB.getInt32Ty(), ThrewValueGV, + ThrewValueGV->getName() + ".val"); Value *ThrewValueCmp = IRB.CreateICmpNE(ThrewValue, IRB.getInt32(0)); Value *Cmp1 = IRB.CreateAnd(ThrewCmp, ThrewValueCmp, "cmp1"); IRB.CreateCondBr(Cmp1, ThenBB1, ElseBB1); @@ -562,8 +566,8 @@ void WebAssemblyLowerEmscriptenEHSjLj::wrapTestSetjmp( BasicBlock *EndBB2 = BasicBlock::Create(C, "if.end2", F); Value *ThrewInt = IRB.CreateIntToPtr(Threw, Type::getInt32PtrTy(C), Threw->getName() + ".i32p"); - Value *LoadedThrew = - IRB.CreateLoad(ThrewInt, ThrewInt->getName() + ".loaded"); + Value *LoadedThrew = IRB.CreateLoad(IRB.getInt32Ty(), ThrewInt, + ThrewInt->getName() + ".loaded"); Value *ThenLabel = IRB.CreateCall( TestSetjmpF, {LoadedThrew, SetjmpTable, SetjmpTableSize}, "label"); Value *Cmp2 = IRB.CreateICmpEQ(ThenLabel, IRB.getInt32(0)); @@ -606,11 +610,11 @@ void WebAssemblyLowerEmscriptenEHSjLj::rebuildSSA(Function &F) { ++UI; SSA.Initialize(I.getType(), I.getName()); SSA.AddAvailableValue(&BB, &I); - Instruction *User = cast<Instruction>(U.getUser()); + auto *User = cast<Instruction>(U.getUser()); if (User->getParent() == &BB) continue; - if (PHINode *UserPN = dyn_cast<PHINode>(User)) + if (auto *UserPN = dyn_cast<PHINode>(User)) if (UserPN->getIncomingBlock(U) == &BB) continue; @@ -769,7 +773,8 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runEHOnFunction(Function &F) { // This can't throw, and we don't need this invoke, just replace it with a // call+branch SmallVector<Value *, 16> Args(II->arg_begin(), II->arg_end()); - CallInst *NewCall = IRB.CreateCall(II->getCalledValue(), Args); + CallInst *NewCall = + IRB.CreateCall(II->getFunctionType(), II->getCalledValue(), Args); NewCall->takeName(II); NewCall->setCallingConv(II->getCallingConv()); NewCall->setDebugLoc(II->getDebugLoc()); @@ -836,15 +841,15 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runEHOnFunction(Function &F) { for (LandingPadInst *LPI : LandingPads) { IRB.SetInsertPoint(LPI); SmallVector<Value *, 16> FMCArgs; - for (unsigned i = 0, e = LPI->getNumClauses(); i < e; ++i) { - Constant *Clause = LPI->getClause(i); + for (unsigned I = 0, E = LPI->getNumClauses(); I < E; ++I) { + Constant *Clause = LPI->getClause(I); // As a temporary workaround for the lack of aggregate varargs support // in the interface between JS and wasm, break out filter operands into // their component elements. - if (LPI->isFilter(i)) { + if (LPI->isFilter(I)) { auto *ATy = cast<ArrayType>(Clause->getType()); - for (unsigned j = 0, e = ATy->getNumElements(); j < e; ++j) { - Value *EV = IRB.CreateExtractValue(Clause, makeArrayRef(j), "filter"); + for (unsigned J = 0, E = ATy->getNumElements(); J < E; ++J) { + Value *EV = IRB.CreateExtractValue(Clause, makeArrayRef(J), "filter"); FMCArgs.push_back(EV); } } else @@ -954,8 +959,8 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runSjLjOnFunction(Function &F) { BBs.push_back(&BB); // BBs.size() will change within the loop, so we query it every time - for (unsigned i = 0; i < BBs.size(); i++) { - BasicBlock *BB = BBs[i]; + for (unsigned I = 0; I < BBs.size(); I++) { + BasicBlock *BB = BBs[I]; for (Instruction &I : *BB) { assert(!isa<InvokeInst>(&I)); auto *CI = dyn_cast<CallInst>(&I); @@ -1028,9 +1033,9 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runSjLjOnFunction(Function &F) { // switch case). 0 means a longjmp that is not ours to handle, needs a // rethrow. Otherwise the index is the same as the index in P+1 (to avoid // 0). - for (unsigned i = 0; i < SetjmpRetPHIs.size(); i++) { - SI->addCase(IRB.getInt32(i + 1), SetjmpRetPHIs[i]->getParent()); - SetjmpRetPHIs[i]->addIncoming(LongjmpResult, EndBB); + for (unsigned I = 0; I < SetjmpRetPHIs.size(); I++) { + SI->addCase(IRB.getInt32(I + 1), SetjmpRetPHIs[I]->getParent()); + SetjmpRetPHIs[I]->addIncoming(LongjmpResult, EndBB); } // We are splitting the block here, and must continue to find other calls @@ -1077,7 +1082,7 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runSjLjOnFunction(Function &F) { Use &U = *UI; // Increment the iterator before removing the use from the list. ++UI; - if (Instruction *I = dyn_cast<Instruction>(U.getUser())) + if (auto *I = dyn_cast<Instruction>(U.getUser())) if (I->getParent() != &EntryBB) SetjmpTableSSA.RewriteUse(U); } @@ -1085,7 +1090,7 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runSjLjOnFunction(Function &F) { UI != UE;) { Use &U = *UI; ++UI; - if (Instruction *I = dyn_cast<Instruction>(U.getUser())) + if (auto *I = dyn_cast<Instruction>(U.getUser())) if (I->getParent() != &EntryBB) SetjmpTableSizeSSA.RewriteUse(U); } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp index 84c877cb8d02..494d3fadbc8c 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyLowerGlobalDtors.cpp - Lower @llvm.global_dtors --------===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -62,7 +61,7 @@ bool LowerGlobalDtors::runOnModule(Module &M) { LLVM_DEBUG(dbgs() << "********** Lower Global Destructors **********\n"); GlobalVariable *GV = M.getGlobalVariable("llvm.global_dtors"); - if (!GV) + if (!GV || !GV->hasInitializer()) return false; const ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer()); @@ -70,7 +69,7 @@ bool LowerGlobalDtors::runOnModule(Module &M) { return false; // Sanity-check @llvm.global_dtor's type. - StructType *ETy = dyn_cast<StructType>(InitList->getType()->getElementType()); + auto *ETy = dyn_cast<StructType>(InitList->getType()->getElementType()); if (!ETy || ETy->getNumElements() != 3 || !ETy->getTypeAtIndex(0U)->isIntegerTy() || !ETy->getTypeAtIndex(1U)->isPointerTy() || @@ -81,11 +80,11 @@ bool LowerGlobalDtors::runOnModule(Module &M) { // associated symbol. std::map<uint16_t, MapVector<Constant *, std::vector<Constant *>>> DtorFuncs; for (Value *O : InitList->operands()) { - ConstantStruct *CS = dyn_cast<ConstantStruct>(O); + auto *CS = dyn_cast<ConstantStruct>(O); if (!CS) continue; // Malformed. - ConstantInt *Priority = dyn_cast<ConstantInt>(CS->getOperand(0)); + auto *Priority = dyn_cast<ConstantInt>(CS->getOperand(0)); if (!Priority) continue; // Malformed. uint16_t PriorityValue = Priority->getLimitedValue(UINT16_MAX); @@ -110,10 +109,11 @@ bool LowerGlobalDtors::runOnModule(Module &M) { FunctionType::get(Type::getVoidTy(C), AtExitFuncArgs, /*isVarArg=*/false); - Type *AtExitArgs[] = {PointerType::get(AtExitFuncTy, 0), VoidStar, VoidStar}; - FunctionType *AtExitTy = FunctionType::get(Type::getInt32Ty(C), AtExitArgs, - /*isVarArg=*/false); - Constant *AtExit = M.getOrInsertFunction("__cxa_atexit", AtExitTy); + FunctionCallee AtExit = M.getOrInsertFunction( + "__cxa_atexit", + FunctionType::get(Type::getInt32Ty(C), + {PointerType::get(AtExitFuncTy, 0), VoidStar, VoidStar}, + /*isVarArg=*/false)); // Declare __dso_local. Constant *DsoHandle = M.getNamedValue("__dso_handle"); @@ -143,13 +143,13 @@ bool LowerGlobalDtors::runOnModule(Module &M) { : Twine()), &M); BasicBlock *BB = BasicBlock::Create(C, "body", CallDtors); + FunctionType *VoidVoid = FunctionType::get(Type::getVoidTy(C), + /*isVarArg=*/false); for (auto Dtor : AssociatedAndMore.second) - CallInst::Create(Dtor, "", BB); + CallInst::Create(VoidVoid, Dtor, "", BB); ReturnInst::Create(C, BB); - FunctionType *VoidVoid = FunctionType::get(Type::getVoidTy(C), - /*isVarArg=*/false); Function *RegisterCallDtors = Function::Create( VoidVoid, Function::PrivateLinkage, "register_call_dtors" + diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp index fa862fbaa634..288b991ae2c5 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp @@ -1,9 +1,8 @@ // WebAssemblyMCInstLower.cpp - Convert WebAssembly MachineInstr to an MCInst // // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -17,7 +16,7 @@ #include "WebAssemblyAsmPrinter.h" #include "WebAssemblyMachineFunctionInfo.h" #include "WebAssemblyRuntimeLibcallSignatures.h" -#include "WebAssemblyUtilities.h" +#include "MCTargetDesc/WebAssemblyMCTargetDesc.h" #include "llvm/CodeGen/AsmPrinter.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/IR/Constants.h" @@ -37,7 +36,7 @@ using namespace llvm; // This disables the removal of registers when lowering into MC, as required // by some current tests. -static cl::opt<bool> +cl::opt<bool> WasmKeepRegisters("wasm-keep-registers", cl::Hidden, cl::desc("WebAssembly: output stack registers in" " instruction output for test purposes only."), @@ -48,7 +47,7 @@ static void removeRegisterOperands(const MachineInstr *MI, MCInst &OutMI); MCSymbol * WebAssemblyMCInstLower::GetGlobalAddressSymbol(const MachineOperand &MO) const { const GlobalValue *Global = MO.getGlobal(); - MCSymbolWasm *WasmSym = cast<MCSymbolWasm>(Printer.getSymbol(Global)); + auto *WasmSym = cast<MCSymbolWasm>(Printer.getSymbol(Global)); if (const auto *FuncTy = dyn_cast<FunctionType>(Global->getValueType())) { const MachineFunction &MF = *MO.getParent()->getParent()->getParent(); @@ -57,9 +56,9 @@ WebAssemblyMCInstLower::GetGlobalAddressSymbol(const MachineOperand &MO) const { SmallVector<MVT, 1> ResultMVTs; SmallVector<MVT, 4> ParamMVTs; - ComputeSignatureVTs(FuncTy, CurrentFunc, TM, ParamMVTs, ResultMVTs); + computeSignatureVTs(FuncTy, CurrentFunc, TM, ParamMVTs, ResultMVTs); - auto Signature = SignatureFromMVTs(ResultMVTs, ParamMVTs); + auto Signature = signatureFromMVTs(ResultMVTs, ParamMVTs); WasmSym->setSignature(Signature.get()); Printer.addSignature(std::move(Signature)); WasmSym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION); @@ -71,20 +70,23 @@ WebAssemblyMCInstLower::GetGlobalAddressSymbol(const MachineOperand &MO) const { MCSymbol *WebAssemblyMCInstLower::GetExternalSymbolSymbol( const MachineOperand &MO) const { const char *Name = MO.getSymbolName(); - MCSymbolWasm *WasmSym = - cast<MCSymbolWasm>(Printer.GetExternalSymbolSymbol(Name)); + auto *WasmSym = cast<MCSymbolWasm>(Printer.GetExternalSymbolSymbol(Name)); const WebAssemblySubtarget &Subtarget = Printer.getSubtarget(); - // Except for the two exceptions (__stack_pointer and __cpp_exception), all - // other external symbols used by CodeGen are functions. It's OK to hardcode - // knowledge of specific symbols here; this method is precisely there for - // fetching the signatures of known Clang-provided symbols. - if (strcmp(Name, "__stack_pointer") == 0) { + // Except for certain known symbols, all symbols used by CodeGen are + // functions. It's OK to hardcode knowledge of specific symbols here; this + // method is precisely there for fetching the signatures of known + // Clang-provided symbols. + if (strcmp(Name, "__stack_pointer") == 0 || strcmp(Name, "__tls_base") == 0 || + strcmp(Name, "__memory_base") == 0 || strcmp(Name, "__table_base") == 0 || + strcmp(Name, "__tls_size") == 0) { + bool Mutable = + strcmp(Name, "__stack_pointer") == 0 || strcmp(Name, "__tls_base") == 0; WasmSym->setType(wasm::WASM_SYMBOL_TYPE_GLOBAL); WasmSym->setGlobalType(wasm::WasmGlobalType{ uint8_t(Subtarget.hasAddr64() ? wasm::WASM_TYPE_I64 : wasm::WASM_TYPE_I32), - true}); + Mutable}); return WasmSym; } @@ -110,7 +112,7 @@ MCSymbol *WebAssemblyMCInstLower::GetExternalSymbolSymbol( : wasm::ValType::I32); } else { // Function symbols WasmSym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION); - GetLibcallSignature(Subtarget, Name, Returns, Params); + getLibcallSignature(Subtarget, Name, Returns, Params); } auto Signature = make_unique<wasm::WasmSignature>(std::move(Returns), std::move(Params)); @@ -120,27 +122,42 @@ MCSymbol *WebAssemblyMCInstLower::GetExternalSymbolSymbol( return WasmSym; } -MCOperand WebAssemblyMCInstLower::LowerSymbolOperand(MCSymbol *Sym, - int64_t Offset, - bool IsFunc, bool IsGlob, - bool IsEvent) const { - MCSymbolRefExpr::VariantKind VK = - IsFunc ? MCSymbolRefExpr::VK_WebAssembly_FUNCTION - : IsGlob ? MCSymbolRefExpr::VK_WebAssembly_GLOBAL - : IsEvent ? MCSymbolRefExpr::VK_WebAssembly_EVENT - : MCSymbolRefExpr::VK_None; +MCOperand WebAssemblyMCInstLower::lowerSymbolOperand(const MachineOperand &MO, + MCSymbol *Sym) const { + MCSymbolRefExpr::VariantKind Kind = MCSymbolRefExpr::VK_None; + unsigned TargetFlags = MO.getTargetFlags(); + + switch (TargetFlags) { + case WebAssemblyII::MO_NO_FLAG: + break; + case WebAssemblyII::MO_GOT: + Kind = MCSymbolRefExpr::VK_GOT; + break; + case WebAssemblyII::MO_MEMORY_BASE_REL: + Kind = MCSymbolRefExpr::VK_WASM_MBREL; + break; + case WebAssemblyII::MO_TABLE_BASE_REL: + Kind = MCSymbolRefExpr::VK_WASM_TBREL; + break; + default: + llvm_unreachable("Unknown target flag on GV operand"); + } - const MCExpr *Expr = MCSymbolRefExpr::create(Sym, VK, Ctx); + const MCExpr *Expr = MCSymbolRefExpr::create(Sym, Kind, Ctx); - if (Offset != 0) { - if (IsFunc) + if (MO.getOffset() != 0) { + const auto *WasmSym = cast<MCSymbolWasm>(Sym); + if (TargetFlags == WebAssemblyII::MO_GOT) + report_fatal_error("GOT symbol references do not support offsets"); + if (WasmSym->isFunction()) report_fatal_error("Function addresses with offsets not supported"); - if (IsGlob) + if (WasmSym->isGlobal()) report_fatal_error("Global indexes with offsets not supported"); - if (IsEvent) + if (WasmSym->isEvent()) report_fatal_error("Event indexes with offsets not supported"); - Expr = - MCBinaryExpr::createAdd(Expr, MCConstantExpr::create(Offset, Ctx), Ctx); + + Expr = MCBinaryExpr::createAdd( + Expr, MCConstantExpr::create(MO.getOffset(), Ctx), Ctx); } return MCOperand::createExpr(Expr); @@ -161,13 +178,13 @@ static wasm::ValType getType(const TargetRegisterClass *RC) { llvm_unreachable("Unexpected register class"); } -void WebAssemblyMCInstLower::Lower(const MachineInstr *MI, +void WebAssemblyMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const { OutMI.setOpcode(MI->getOpcode()); const MCInstrDesc &Desc = MI->getDesc(); - for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { - const MachineOperand &MO = MI->getOperand(i); + for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) { + const MachineOperand &MO = MI->getOperand(I); MCOperand MCOp; switch (MO.getType()) { @@ -188,8 +205,8 @@ void WebAssemblyMCInstLower::Lower(const MachineInstr *MI, break; } case MachineOperand::MO_Immediate: - if (i < Desc.NumOperands) { - const MCOperandInfo &Info = Desc.OpInfo[i]; + if (I < Desc.NumOperands) { + const MCOperandInfo &Info = Desc.OpInfo[I]; if (Info.OperandType == WebAssembly::OPERAND_TYPEINDEX) { MCSymbol *Sym = Printer.createTempSymbol("typeindex"); @@ -206,10 +223,10 @@ void WebAssemblyMCInstLower::Lower(const MachineInstr *MI, // call_indirect instructions have a callee operand at the end which // doesn't count as a param. - if (WebAssembly::isCallIndirect(*MI)) + if (WebAssembly::isCallIndirect(MI->getOpcode())) Params.pop_back(); - MCSymbolWasm *WasmSym = cast<MCSymbolWasm>(Sym); + auto *WasmSym = cast<MCSymbolWasm>(Sym); auto Signature = make_unique<wasm::WasmSignature>(std::move(Returns), std::move(Params)); WasmSym->setSignature(Signature.get()); @@ -217,7 +234,7 @@ void WebAssemblyMCInstLower::Lower(const MachineInstr *MI, WasmSym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION); const MCExpr *Expr = MCSymbolRefExpr::create( - WasmSym, MCSymbolRefExpr::VK_WebAssembly_TYPEINDEX, Ctx); + WasmSym, MCSymbolRefExpr::VK_WASM_TYPEINDEX, Ctx); MCOp = MCOperand::createExpr(Expr); break; } @@ -237,30 +254,21 @@ void WebAssemblyMCInstLower::Lower(const MachineInstr *MI, break; } case MachineOperand::MO_GlobalAddress: - assert(MO.getTargetFlags() == WebAssemblyII::MO_NO_FLAG && - "WebAssembly does not use target flags on GlobalAddresses"); - MCOp = LowerSymbolOperand(GetGlobalAddressSymbol(MO), MO.getOffset(), - MO.getGlobal()->getValueType()->isFunctionTy(), - false, false); + MCOp = lowerSymbolOperand(MO, GetGlobalAddressSymbol(MO)); break; case MachineOperand::MO_ExternalSymbol: // The target flag indicates whether this is a symbol for a // variable or a function. - assert((MO.getTargetFlags() & ~WebAssemblyII::MO_SYMBOL_MASK) == 0 && + assert(MO.getTargetFlags() == 0 && "WebAssembly uses only symbol flags on ExternalSymbols"); - MCOp = LowerSymbolOperand( - GetExternalSymbolSymbol(MO), /*Offset=*/0, - (MO.getTargetFlags() & WebAssemblyII::MO_SYMBOL_FUNCTION) != 0, - (MO.getTargetFlags() & WebAssemblyII::MO_SYMBOL_GLOBAL) != 0, - (MO.getTargetFlags() & WebAssemblyII::MO_SYMBOL_EVENT) != 0); + MCOp = lowerSymbolOperand(MO, GetExternalSymbolSymbol(MO)); break; case MachineOperand::MO_MCSymbol: // This is currently used only for LSDA symbols (GCC_except_table), // because global addresses or other external symbols are handled above. assert(MO.getTargetFlags() == 0 && "WebAssembly does not use target flags on MCSymbol"); - MCOp = LowerSymbolOperand(MO.getMCSymbol(), /*Offset=*/0, false, false, - false); + MCOp = lowerSymbolOperand(MO, MO.getMCSymbol()); break; } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.h index fa7a0ea61b3b..2c375a01a7f5 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.h @@ -1,9 +1,8 @@ //===-- WebAssemblyMCInstLower.h - Lower MachineInstr to MCInst -*- C++ -*-===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -33,13 +32,12 @@ class LLVM_LIBRARY_VISIBILITY WebAssemblyMCInstLower { MCSymbol *GetGlobalAddressSymbol(const MachineOperand &MO) const; MCSymbol *GetExternalSymbolSymbol(const MachineOperand &MO) const; - MCOperand LowerSymbolOperand(MCSymbol *Sym, int64_t Offset, bool IsFunc, - bool IsGlob, bool IsEvent) const; + MCOperand lowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const; public: WebAssemblyMCInstLower(MCContext &ctx, WebAssemblyAsmPrinter &printer) : Ctx(ctx), Printer(printer) {} - void Lower(const MachineInstr *MI, MCInst &OutMI) const; + void lower(const MachineInstr *MI, MCInst &OutMI) const; }; } // end namespace llvm diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp index 0157af0f8510..d31c1226bfdb 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp @@ -1,9 +1,8 @@ //=- WebAssemblyMachineFunctionInfo.cpp - WebAssembly Machine Function Info -=// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -19,7 +18,7 @@ #include "llvm/CodeGen/Analysis.h" using namespace llvm; -WebAssemblyFunctionInfo::~WebAssemblyFunctionInfo() {} +WebAssemblyFunctionInfo::~WebAssemblyFunctionInfo() = default; // anchor. void WebAssemblyFunctionInfo::initWARegs() { assert(WARegs.empty()); @@ -27,7 +26,7 @@ void WebAssemblyFunctionInfo::initWARegs() { WARegs.resize(MF.getRegInfo().getNumVirtRegs(), Reg); } -void llvm::ComputeLegalValueVTs(const Function &F, const TargetMachine &TM, +void llvm::computeLegalValueVTs(const Function &F, const TargetMachine &TM, Type *Ty, SmallVectorImpl<MVT> &ValueVTs) { const DataLayout &DL(F.getParent()->getDataLayout()); const WebAssemblyTargetLowering &TLI = @@ -38,16 +37,16 @@ void llvm::ComputeLegalValueVTs(const Function &F, const TargetMachine &TM, for (EVT VT : VTs) { unsigned NumRegs = TLI.getNumRegisters(F.getContext(), VT); MVT RegisterVT = TLI.getRegisterType(F.getContext(), VT); - for (unsigned i = 0; i != NumRegs; ++i) + for (unsigned I = 0; I != NumRegs; ++I) ValueVTs.push_back(RegisterVT); } } -void llvm::ComputeSignatureVTs(const FunctionType *Ty, const Function &F, +void llvm::computeSignatureVTs(const FunctionType *Ty, const Function &F, const TargetMachine &TM, SmallVectorImpl<MVT> &Params, SmallVectorImpl<MVT> &Results) { - ComputeLegalValueVTs(F, TM, Ty->getReturnType(), Results); + computeLegalValueVTs(F, TM, Ty->getReturnType(), Results); MVT PtrVT = MVT::getIntegerVT(TM.createDataLayout().getPointerSizeInBits()); if (Results.size() > 1) { @@ -59,22 +58,35 @@ void llvm::ComputeSignatureVTs(const FunctionType *Ty, const Function &F, } for (auto *Param : Ty->params()) - ComputeLegalValueVTs(F, TM, Param, Params); + computeLegalValueVTs(F, TM, Param, Params); if (Ty->isVarArg()) Params.push_back(PtrVT); } -void llvm::ValTypesFromMVTs(const ArrayRef<MVT> &In, +void llvm::valTypesFromMVTs(const ArrayRef<MVT> &In, SmallVectorImpl<wasm::ValType> &Out) { for (MVT Ty : In) Out.push_back(WebAssembly::toValType(Ty)); } std::unique_ptr<wasm::WasmSignature> -llvm::SignatureFromMVTs(const SmallVectorImpl<MVT> &Results, +llvm::signatureFromMVTs(const SmallVectorImpl<MVT> &Results, const SmallVectorImpl<MVT> &Params) { auto Sig = make_unique<wasm::WasmSignature>(); - ValTypesFromMVTs(Results, Sig->Returns); - ValTypesFromMVTs(Params, Sig->Params); + valTypesFromMVTs(Results, Sig->Returns); + valTypesFromMVTs(Params, Sig->Params); return Sig; } + +yaml::WebAssemblyFunctionInfo::WebAssemblyFunctionInfo( + const llvm::WebAssemblyFunctionInfo &MFI) + : CFGStackified(MFI.isCFGStackified()) {} + +void yaml::WebAssemblyFunctionInfo::mappingImpl(yaml::IO &YamlIO) { + MappingTraits<WebAssemblyFunctionInfo>::mapping(YamlIO, *this); +} + +void WebAssemblyFunctionInfo::initializeBaseYamlFields( + const yaml::WebAssemblyFunctionInfo &YamlMFI) { + CFGStackified = YamlMFI.CFGStackified; +} diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h index 4be4beb85d04..4b9ba491dee6 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h @@ -1,9 +1,8 @@ // WebAssemblyMachineFunctionInfo.h-WebAssembly machine function info-*- C++ -*- // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -18,11 +17,16 @@ #include "MCTargetDesc/WebAssemblyMCTargetDesc.h" #include "llvm/BinaryFormat/Wasm.h" +#include "llvm/CodeGen/MIRYamlMapping.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/MC/MCSymbolWasm.h" namespace llvm { +namespace yaml { +struct WebAssemblyFunctionInfo; +} + /// This class is derived from MachineFunctionInfo and contains private /// WebAssembly-specific information for each MachineFunction. class WebAssemblyFunctionInfo final : public MachineFunctionInfo { @@ -52,9 +56,13 @@ class WebAssemblyFunctionInfo final : public MachineFunctionInfo { // overaligned values on the user stack. unsigned BasePtrVreg = -1U; + // Function properties. + bool CFGStackified = false; + public: explicit WebAssemblyFunctionInfo(MachineFunction &MF) : MF(MF) {} ~WebAssemblyFunctionInfo() override; + void initializeBaseYamlFields(const yaml::WebAssemblyFunctionInfo &YamlMFI); void addParam(MVT VT) { Params.push_back(VT); } const std::vector<MVT> &getParams() const { return Params; } @@ -118,24 +126,47 @@ public: assert(Reg & INT32_MIN); return Reg & INT32_MAX; } + + bool isCFGStackified() const { return CFGStackified; } + void setCFGStackified(bool Value = true) { CFGStackified = Value; } }; -void ComputeLegalValueVTs(const Function &F, const TargetMachine &TM, Type *Ty, +void computeLegalValueVTs(const Function &F, const TargetMachine &TM, Type *Ty, SmallVectorImpl<MVT> &ValueVTs); // Compute the signature for a given FunctionType (Ty). Note that it's not the // signature for F (F is just used to get varous context) -void ComputeSignatureVTs(const FunctionType *Ty, const Function &F, +void computeSignatureVTs(const FunctionType *Ty, const Function &F, const TargetMachine &TM, SmallVectorImpl<MVT> &Params, SmallVectorImpl<MVT> &Results); -void ValTypesFromMVTs(const ArrayRef<MVT> &In, +void valTypesFromMVTs(const ArrayRef<MVT> &In, SmallVectorImpl<wasm::ValType> &Out); std::unique_ptr<wasm::WasmSignature> -SignatureFromMVTs(const SmallVectorImpl<MVT> &Results, +signatureFromMVTs(const SmallVectorImpl<MVT> &Results, const SmallVectorImpl<MVT> &Params); +namespace yaml { + +struct WebAssemblyFunctionInfo final : public yaml::MachineFunctionInfo { + bool CFGStackified = false; + + WebAssemblyFunctionInfo() = default; + WebAssemblyFunctionInfo(const llvm::WebAssemblyFunctionInfo &MFI); + + void mappingImpl(yaml::IO &YamlIO) override; + ~WebAssemblyFunctionInfo() = default; +}; + +template <> struct MappingTraits<WebAssemblyFunctionInfo> { + static void mapping(IO &YamlIO, WebAssemblyFunctionInfo &MFI) { + YamlIO.mapOptional("isCFGStackified", MFI.CFGStackified, false); + } +}; + +} // end namespace yaml + } // end namespace llvm #endif diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMemIntrinsicResults.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMemIntrinsicResults.cpp index c4b5e96db0c7..7ac0511c28b0 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMemIntrinsicResults.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMemIntrinsicResults.cpp @@ -1,9 +1,8 @@ //== WebAssemblyMemIntrinsicResults.cpp - Optimize memory intrinsic results ==// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -82,7 +81,7 @@ FunctionPass *llvm::createWebAssemblyMemIntrinsicResults() { } // Replace uses of FromReg with ToReg if they are dominated by MI. -static bool ReplaceDominatedUses(MachineBasicBlock &MBB, MachineInstr &MI, +static bool replaceDominatedUses(MachineBasicBlock &MBB, MachineInstr &MI, unsigned FromReg, unsigned ToReg, const MachineRegisterInfo &MRI, MachineDominatorTree &MDT, @@ -157,10 +156,10 @@ static bool optimizeCall(MachineBasicBlock &MBB, MachineInstr &MI, return false; StringRef Name(Op1.getSymbolName()); - bool callReturnsInput = Name == TLI.getLibcallName(RTLIB::MEMCPY) || + bool CallReturnsInput = Name == TLI.getLibcallName(RTLIB::MEMCPY) || Name == TLI.getLibcallName(RTLIB::MEMMOVE) || Name == TLI.getLibcallName(RTLIB::MEMSET); - if (!callReturnsInput) + if (!CallReturnsInput) return false; LibFunc Func; @@ -172,7 +171,7 @@ static bool optimizeCall(MachineBasicBlock &MBB, MachineInstr &MI, if (MRI.getRegClass(FromReg) != MRI.getRegClass(ToReg)) report_fatal_error("Memory Intrinsic results: call to builtin function " "with wrong signature, from/to mismatch"); - return ReplaceDominatedUses(MBB, MI, FromReg, ToReg, MRI, MDT, LIS); + return replaceDominatedUses(MBB, MI, FromReg, ToReg, MRI, MDT, LIS); } bool WebAssemblyMemIntrinsicResults::runOnMachineFunction(MachineFunction &MF) { @@ -182,11 +181,11 @@ bool WebAssemblyMemIntrinsicResults::runOnMachineFunction(MachineFunction &MF) { }); MachineRegisterInfo &MRI = MF.getRegInfo(); - MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>(); + auto &MDT = getAnalysis<MachineDominatorTree>(); const WebAssemblyTargetLowering &TLI = *MF.getSubtarget<WebAssemblySubtarget>().getTargetLowering(); const auto &LibInfo = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); - LiveIntervals &LIS = getAnalysis<LiveIntervals>(); + auto &LIS = getAnalysis<LiveIntervals>(); bool Changed = false; // We don't preserve SSA form. @@ -201,8 +200,8 @@ bool WebAssemblyMemIntrinsicResults::runOnMachineFunction(MachineFunction &MF) { switch (MI.getOpcode()) { default: break; - case WebAssembly::CALL_I32: - case WebAssembly::CALL_I64: + case WebAssembly::CALL_i32: + case WebAssembly::CALL_i64: Changed |= optimizeCall(MBB, MI, MRI, MDT, LIS, TLI, LibInfo); break; } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp index 3d0a15244ee0..8c7c3305c201 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp @@ -1,9 +1,8 @@ //===--- WebAssemblyOptimizeLiveIntervals.cpp - LiveInterval processing ---===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -72,7 +71,7 @@ bool WebAssemblyOptimizeLiveIntervals::runOnMachineFunction( << MF.getName() << '\n'); MachineRegisterInfo &MRI = MF.getRegInfo(); - LiveIntervals &LIS = getAnalysis<LiveIntervals>(); + auto &LIS = getAnalysis<LiveIntervals>(); // We don't preserve SSA form. MRI.leaveSSA(); @@ -81,8 +80,8 @@ bool WebAssemblyOptimizeLiveIntervals::runOnMachineFunction( // Split multiple-VN LiveIntervals into multiple LiveIntervals. SmallVector<LiveInterval *, 4> SplitLIs; - for (unsigned i = 0, e = MRI.getNumVirtRegs(); i < e; ++i) { - unsigned Reg = TargetRegisterInfo::index2VirtReg(i); + for (unsigned I = 0, E = MRI.getNumVirtRegs(); I < E; ++I) { + unsigned Reg = TargetRegisterInfo::index2VirtReg(I); if (MRI.reg_nodbg_empty(Reg)) continue; diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp index 2c018d0785a7..d20352259e07 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyOptimizeReturned.cpp - Optimize "returned" attributes --===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -37,11 +36,11 @@ class OptimizeReturned final : public FunctionPass, bool runOnFunction(Function &F) override; - DominatorTree *DT; + DominatorTree *DT = nullptr; public: static char ID; - OptimizeReturned() : FunctionPass(ID), DT(nullptr) {} + OptimizeReturned() : FunctionPass(ID) {} void visitCallSite(CallSite CS); }; @@ -57,10 +56,10 @@ FunctionPass *llvm::createWebAssemblyOptimizeReturned() { } void OptimizeReturned::visitCallSite(CallSite CS) { - for (unsigned i = 0, e = CS.getNumArgOperands(); i < e; ++i) - if (CS.paramHasAttr(i, Attribute::Returned)) { + for (unsigned I = 0, E = CS.getNumArgOperands(); I < E; ++I) + if (CS.paramHasAttr(I, Attribute::Returned)) { Instruction *Inst = CS.getInstruction(); - Value *Arg = CS.getArgOperand(i); + Value *Arg = CS.getArgOperand(I); // Ignore constants, globals, undef, etc. if (isa<Constant>(Arg)) continue; diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyPeephole.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyPeephole.cpp index 2dfd85953f14..e11cdeaa0e79 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyPeephole.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyPeephole.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyPeephole.cpp - WebAssembly Peephole Optimiztions -------===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -58,7 +57,7 @@ FunctionPass *llvm::createWebAssemblyPeephole() { } /// If desirable, rewrite NewReg to a drop register. -static bool MaybeRewriteToDrop(unsigned OldReg, unsigned NewReg, +static bool maybeRewriteToDrop(unsigned OldReg, unsigned NewReg, MachineOperand &MO, WebAssemblyFunctionInfo &MFI, MachineRegisterInfo &MRI) { bool Changed = false; @@ -72,7 +71,7 @@ static bool MaybeRewriteToDrop(unsigned OldReg, unsigned NewReg, return Changed; } -static bool MaybeRewriteToFallthrough(MachineInstr &MI, MachineBasicBlock &MBB, +static bool maybeRewriteToFallthrough(MachineInstr &MI, MachineBasicBlock &MBB, const MachineFunction &MF, WebAssemblyFunctionInfo &MFI, MachineRegisterInfo &MRI, @@ -129,8 +128,8 @@ bool WebAssemblyPeephole::runOnMachineFunction(MachineFunction &MF) { switch (MI.getOpcode()) { default: break; - case WebAssembly::CALL_I32: - case WebAssembly::CALL_I64: { + case WebAssembly::CALL_i32: + case WebAssembly::CALL_i64: { MachineOperand &Op1 = MI.getOperand(1); if (Op1.isSymbol()) { StringRef Name(Op1.getSymbolName()); @@ -150,7 +149,7 @@ bool WebAssemblyPeephole::runOnMachineFunction(MachineFunction &MF) { if (MRI.getRegClass(NewReg) != MRI.getRegClass(OldReg)) report_fatal_error("Peephole: call to builtin function with " "wrong signature, from/to mismatch"); - Changed |= MaybeRewriteToDrop(OldReg, NewReg, MO, MFI, MRI); + Changed |= maybeRewriteToDrop(OldReg, NewReg, MO, MFI, MRI); } } } @@ -158,57 +157,57 @@ bool WebAssemblyPeephole::runOnMachineFunction(MachineFunction &MF) { } // Optimize away an explicit void return at the end of the function. case WebAssembly::RETURN_I32: - Changed |= MaybeRewriteToFallthrough( + Changed |= maybeRewriteToFallthrough( MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_I32, WebAssembly::COPY_I32); break; case WebAssembly::RETURN_I64: - Changed |= MaybeRewriteToFallthrough( + Changed |= maybeRewriteToFallthrough( MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_I64, WebAssembly::COPY_I64); break; case WebAssembly::RETURN_F32: - Changed |= MaybeRewriteToFallthrough( + Changed |= maybeRewriteToFallthrough( MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_F32, WebAssembly::COPY_F32); break; case WebAssembly::RETURN_F64: - Changed |= MaybeRewriteToFallthrough( + Changed |= maybeRewriteToFallthrough( MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_F64, WebAssembly::COPY_F64); break; case WebAssembly::RETURN_v16i8: - Changed |= MaybeRewriteToFallthrough( + Changed |= maybeRewriteToFallthrough( MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_v16i8, WebAssembly::COPY_V128); break; case WebAssembly::RETURN_v8i16: - Changed |= MaybeRewriteToFallthrough( + Changed |= maybeRewriteToFallthrough( MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_v8i16, WebAssembly::COPY_V128); break; case WebAssembly::RETURN_v4i32: - Changed |= MaybeRewriteToFallthrough( + Changed |= maybeRewriteToFallthrough( MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_v4i32, WebAssembly::COPY_V128); break; case WebAssembly::RETURN_v2i64: - Changed |= MaybeRewriteToFallthrough( + Changed |= maybeRewriteToFallthrough( MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_v2i64, WebAssembly::COPY_V128); break; case WebAssembly::RETURN_v4f32: - Changed |= MaybeRewriteToFallthrough( + Changed |= maybeRewriteToFallthrough( MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_v4f32, WebAssembly::COPY_V128); break; case WebAssembly::RETURN_v2f64: - Changed |= MaybeRewriteToFallthrough( + Changed |= maybeRewriteToFallthrough( MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_v2f64, WebAssembly::COPY_V128); break; case WebAssembly::RETURN_VOID: - Changed |= MaybeRewriteToFallthrough( + Changed |= maybeRewriteToFallthrough( MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_VOID, WebAssembly::INSTRUCTION_LIST_END); break; diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp index 0be0ba657830..3bfbf607344d 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp @@ -1,9 +1,8 @@ //===- WebAssemblyPrepareForLiveIntervals.cpp - Prepare for LiveIntervals -===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -63,9 +62,9 @@ FunctionPass *llvm::createWebAssemblyPrepareForLiveIntervals() { } // Test whether the given register has an ARGUMENT def. -static bool HasArgumentDef(unsigned Reg, const MachineRegisterInfo &MRI) { +static bool hasArgumentDef(unsigned Reg, const MachineRegisterInfo &MRI) { for (const auto &Def : MRI.def_instructions(Reg)) - if (WebAssembly::isArgument(Def)) + if (WebAssembly::isArgument(Def.getOpcode())) return true; return false; } @@ -95,15 +94,15 @@ bool WebAssemblyPrepareForLiveIntervals::runOnMachineFunction( // // TODO: This is fairly heavy-handed; find a better approach. // - for (unsigned i = 0, e = MRI.getNumVirtRegs(); i < e; ++i) { - unsigned Reg = TargetRegisterInfo::index2VirtReg(i); + for (unsigned I = 0, E = MRI.getNumVirtRegs(); I < E; ++I) { + unsigned Reg = TargetRegisterInfo::index2VirtReg(I); // Skip unused registers. if (MRI.use_nodbg_empty(Reg)) continue; // Skip registers that have an ARGUMENT definition. - if (HasArgumentDef(Reg, MRI)) + if (hasArgumentDef(Reg, MRI)) continue; BuildMI(Entry, Entry.begin(), DebugLoc(), @@ -115,7 +114,7 @@ bool WebAssemblyPrepareForLiveIntervals::runOnMachineFunction( // liveness reflects the fact that these really are live-in values. for (auto MII = Entry.begin(), MIE = Entry.end(); MII != MIE;) { MachineInstr &MI = *MII++; - if (WebAssembly::isArgument(MI)) { + if (WebAssembly::isArgument(MI.getOpcode())) { MI.removeFromParent(); Entry.insert(Entry.begin(), &MI); } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp index d97b13a8d699..6f09c45b6642 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyRegColoring.cpp - Register coloring --------------------===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -66,11 +65,11 @@ FunctionPass *llvm::createWebAssemblyRegColoring() { static float computeWeight(const MachineRegisterInfo *MRI, const MachineBlockFrequencyInfo *MBFI, unsigned VReg) { - float weight = 0.0f; + float Weight = 0.0f; for (MachineOperand &MO : MRI->reg_nodbg_operands(VReg)) - weight += LiveIntervals::getSpillWeight(MO.isDef(), MO.isUse(), MBFI, + Weight += LiveIntervals::getSpillWeight(MO.isDef(), MO.isUse(), MBFI, *MO.getParent()); - return weight; + return Weight; } bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) { @@ -98,8 +97,8 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) { SortedIntervals.reserve(NumVRegs); LLVM_DEBUG(dbgs() << "Interesting register intervals:\n"); - for (unsigned i = 0; i < NumVRegs; ++i) { - unsigned VReg = TargetRegisterInfo::index2VirtReg(i); + for (unsigned I = 0; I < NumVRegs; ++I) { + unsigned VReg = TargetRegisterInfo::index2VirtReg(I); if (MFI.isVRegStackified(VReg)) continue; // Skip unused registers, which can use $drop. @@ -134,10 +133,10 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) { SortedIntervals.size()); BitVector UsedColors(SortedIntervals.size()); bool Changed = false; - for (size_t i = 0, e = SortedIntervals.size(); i < e; ++i) { - LiveInterval *LI = SortedIntervals[i]; + for (size_t I = 0, E = SortedIntervals.size(); I < E; ++I) { + LiveInterval *LI = SortedIntervals[I]; unsigned Old = LI->reg; - size_t Color = i; + size_t Color = I; const TargetRegisterClass *RC = MRI->getRegClass(Old); // Check if it's possible to reuse any of the used colors. @@ -154,7 +153,7 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) { } unsigned New = SortedIntervals[Color]->reg; - SlotMapping[i] = New; + SlotMapping[I] = New; Changed |= Old != New; UsedColors.set(Color); Assignments[Color].push_back(LI); @@ -166,9 +165,9 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) { return false; // Rewrite register operands. - for (size_t i = 0, e = SortedIntervals.size(); i < e; ++i) { - unsigned Old = SortedIntervals[i]->reg; - unsigned New = SlotMapping[i]; + for (size_t I = 0, E = SortedIntervals.size(); I < E; ++I) { + unsigned Old = SortedIntervals[I]->reg; + unsigned New = SlotMapping[I]; if (Old != New) MRI->replaceRegWith(Old, New); } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp index 1e2a248f097e..cdca23f55b29 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyRegNumbering.cpp - Register Numbering ------------------===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -73,7 +72,7 @@ bool WebAssemblyRegNumbering::runOnMachineFunction(MachineFunction &MF) { // variables. Assign the numbers for them first. MachineBasicBlock &EntryMBB = MF.front(); for (MachineInstr &MI : EntryMBB) { - if (!WebAssembly::isArgument(MI)) + if (!WebAssembly::isArgument(MI.getOpcode())) break; int64_t Imm = MI.getOperand(1).getImm(); diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp index 1eb32ed64494..a120a6471014 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyRegStackify.cpp - Register Stackification --------------===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -80,7 +79,7 @@ FunctionPass *llvm::createWebAssemblyRegStackify() { // Decorate the given instruction with implicit operands that enforce the // expression stack ordering constraints for an instruction which is on // the expression stack. -static void ImposeStackOrdering(MachineInstr *MI) { +static void imposeStackOrdering(MachineInstr *MI) { // Write the opaque VALUE_STACK register. if (!MI->definesRegister(WebAssembly::VALUE_STACK)) MI->addOperand(MachineOperand::CreateReg(WebAssembly::VALUE_STACK, @@ -96,7 +95,7 @@ static void ImposeStackOrdering(MachineInstr *MI) { // Convert an IMPLICIT_DEF instruction into an instruction which defines // a constant zero value. -static void ConvertImplicitDefToConstZero(MachineInstr *MI, +static void convertImplicitDefToConstZero(MachineInstr *MI, MachineRegisterInfo &MRI, const TargetInstrInfo *TII, MachineFunction &MF, @@ -112,12 +111,12 @@ static void ConvertImplicitDefToConstZero(MachineInstr *MI, MI->addOperand(MachineOperand::CreateImm(0)); } else if (RegClass == &WebAssembly::F32RegClass) { MI->setDesc(TII->get(WebAssembly::CONST_F32)); - ConstantFP *Val = cast<ConstantFP>(Constant::getNullValue( + auto *Val = cast<ConstantFP>(Constant::getNullValue( Type::getFloatTy(MF.getFunction().getContext()))); MI->addOperand(MachineOperand::CreateFPImm(Val)); } else if (RegClass == &WebAssembly::F64RegClass) { MI->setDesc(TII->get(WebAssembly::CONST_F64)); - ConstantFP *Val = cast<ConstantFP>(Constant::getNullValue( + auto *Val = cast<ConstantFP>(Constant::getNullValue( Type::getDoubleTy(MF.getFunction().getContext()))); MI->addOperand(MachineOperand::CreateFPImm(Val)); } else if (RegClass == &WebAssembly::V128RegClass) { @@ -136,7 +135,7 @@ static void ConvertImplicitDefToConstZero(MachineInstr *MI, // Determine whether a call to the callee referenced by // MI->getOperand(CalleeOpNo) reads memory, writes memory, and/or has side // effects. -static void QueryCallee(const MachineInstr &MI, unsigned CalleeOpNo, bool &Read, +static void queryCallee(const MachineInstr &MI, unsigned CalleeOpNo, bool &Read, bool &Write, bool &Effects, bool &StackPointer) { // All calls can use the stack pointer. StackPointer = true; @@ -144,11 +143,11 @@ static void QueryCallee(const MachineInstr &MI, unsigned CalleeOpNo, bool &Read, const MachineOperand &MO = MI.getOperand(CalleeOpNo); if (MO.isGlobal()) { const Constant *GV = MO.getGlobal(); - if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) + if (const auto *GA = dyn_cast<GlobalAlias>(GV)) if (!GA->isInterposable()) GV = GA->getAliasee(); - if (const Function *F = dyn_cast<Function>(GV)) { + if (const auto *F = dyn_cast<Function>(GV)) { if (!F->doesNotThrow()) Effects = true; if (F->doesNotAccessMemory()) @@ -168,7 +167,7 @@ static void QueryCallee(const MachineInstr &MI, unsigned CalleeOpNo, bool &Read, // Determine whether MI reads memory, writes memory, has side effects, // and/or uses the stack pointer value. -static void Query(const MachineInstr &MI, AliasAnalysis &AA, bool &Read, +static void query(const MachineInstr &MI, AliasAnalysis &AA, bool &Read, bool &Write, bool &Effects, bool &StackPointer) { assert(!MI.isTerminator()); @@ -253,13 +252,13 @@ static void Query(const MachineInstr &MI, AliasAnalysis &AA, bool &Read, // Analyze calls. if (MI.isCall()) { - unsigned CalleeOpNo = WebAssembly::getCalleeOpNo(MI); - QueryCallee(MI, CalleeOpNo, Read, Write, Effects, StackPointer); + unsigned CalleeOpNo = WebAssembly::getCalleeOpNo(MI.getOpcode()); + queryCallee(MI, CalleeOpNo, Read, Write, Effects, StackPointer); } } // Test whether Def is safe and profitable to rematerialize. -static bool ShouldRematerialize(const MachineInstr &Def, AliasAnalysis &AA, +static bool shouldRematerialize(const MachineInstr &Def, AliasAnalysis &AA, const WebAssemblyInstrInfo *TII) { return Def.isAsCheapAsAMove() && TII->isTriviallyReMaterializable(Def, &AA); } @@ -267,7 +266,7 @@ static bool ShouldRematerialize(const MachineInstr &Def, AliasAnalysis &AA, // Identify the definition for this register at this point. This is a // generalization of MachineRegisterInfo::getUniqueVRegDef that uses // LiveIntervals to handle complex cases. -static MachineInstr *GetVRegDef(unsigned Reg, const MachineInstr *Insert, +static MachineInstr *getVRegDef(unsigned Reg, const MachineInstr *Insert, const MachineRegisterInfo &MRI, const LiveIntervals &LIS) { // Most registers are in SSA form here so we try a quick MRI query first. @@ -285,7 +284,7 @@ static MachineInstr *GetVRegDef(unsigned Reg, const MachineInstr *Insert, // Test whether Reg, as defined at Def, has exactly one use. This is a // generalization of MachineRegisterInfo::hasOneUse that uses LiveIntervals // to handle complex cases. -static bool HasOneUse(unsigned Reg, MachineInstr *Def, MachineRegisterInfo &MRI, +static bool hasOneUse(unsigned Reg, MachineInstr *Def, MachineRegisterInfo &MRI, MachineDominatorTree &MDT, LiveIntervals &LIS) { // Most registers are in SSA form here so we try a quick MRI query first. if (MRI.hasOneUse(Reg)) @@ -314,10 +313,22 @@ static bool HasOneUse(unsigned Reg, MachineInstr *Def, MachineRegisterInfo &MRI, // walking the block. // TODO: Compute memory dependencies in a way that uses AliasAnalysis to be // more precise. -static bool IsSafeToMove(const MachineInstr *Def, const MachineInstr *Insert, +static bool isSafeToMove(const MachineInstr *Def, const MachineInstr *Insert, AliasAnalysis &AA, const MachineRegisterInfo &MRI) { assert(Def->getParent() == Insert->getParent()); + // 'catch' and 'extract_exception' should be the first instruction of a BB and + // cannot move. + if (Def->getOpcode() == WebAssembly::CATCH || + Def->getOpcode() == WebAssembly::EXTRACT_EXCEPTION_I32) { + const MachineBasicBlock *MBB = Def->getParent(); + auto NextI = std::next(MachineBasicBlock::const_iterator(Def)); + for (auto E = MBB->end(); NextI != E && NextI->isDebugInstr(); ++NextI) + ; + if (NextI != Insert) + return false; + } + // Check for register dependencies. SmallVector<unsigned, 4> MutableRegisters; for (const MachineOperand &MO : Def->operands()) { @@ -350,7 +361,7 @@ static bool IsSafeToMove(const MachineInstr *Def, const MachineInstr *Insert, } bool Read = false, Write = false, Effects = false, StackPointer = false; - Query(*Def, AA, Read, Write, Effects, StackPointer); + query(*Def, AA, Read, Write, Effects, StackPointer); // If the instruction does not access memory and has no side effects, it has // no additional dependencies. @@ -365,7 +376,7 @@ static bool IsSafeToMove(const MachineInstr *Def, const MachineInstr *Insert, bool InterveningWrite = false; bool InterveningEffects = false; bool InterveningStackPointer = false; - Query(*I, AA, InterveningRead, InterveningWrite, InterveningEffects, + query(*I, AA, InterveningRead, InterveningWrite, InterveningEffects, InterveningStackPointer); if (Effects && InterveningEffects) return false; @@ -386,7 +397,7 @@ static bool IsSafeToMove(const MachineInstr *Def, const MachineInstr *Insert, } /// Test whether OneUse, a use of Reg, dominates all of Reg's other uses. -static bool OneUseDominatesOtherUses(unsigned Reg, const MachineOperand &OneUse, +static bool oneUseDominatesOtherUses(unsigned Reg, const MachineOperand &OneUse, const MachineBasicBlock &MBB, const MachineRegisterInfo &MRI, const MachineDominatorTree &MDT, @@ -445,7 +456,7 @@ static bool OneUseDominatesOtherUses(unsigned Reg, const MachineOperand &OneUse, } /// Get the appropriate tee opcode for the given register class. -static unsigned GetTeeOpcode(const TargetRegisterClass *RC) { +static unsigned getTeeOpcode(const TargetRegisterClass *RC) { if (RC == &WebAssembly::I32RegClass) return WebAssembly::TEE_I32; if (RC == &WebAssembly::I64RegClass) @@ -460,7 +471,7 @@ static unsigned GetTeeOpcode(const TargetRegisterClass *RC) { } // Shrink LI to its uses, cleaning up LI. -static void ShrinkToUses(LiveInterval &LI, LiveIntervals &LIS) { +static void shrinkToUses(LiveInterval &LI, LiveIntervals &LIS) { if (LIS.shrinkToUses(&LI)) { SmallVector<LiveInterval *, 4> SplitLIs; LIS.splitSeparateComponents(LI, SplitLIs); @@ -469,7 +480,7 @@ static void ShrinkToUses(LiveInterval &LI, LiveIntervals &LIS) { /// A single-use def in the same block with no intervening memory or register /// dependencies; move the def down and nest it with the current instruction. -static MachineInstr *MoveForSingleUse(unsigned Reg, MachineOperand &Op, +static MachineInstr *moveForSingleUse(unsigned Reg, MachineOperand &Op, MachineInstr *Def, MachineBasicBlock &MBB, MachineInstr *Insert, LiveIntervals &LIS, WebAssemblyFunctionInfo &MFI, @@ -508,13 +519,13 @@ static MachineInstr *MoveForSingleUse(unsigned Reg, MachineOperand &Op, LLVM_DEBUG(dbgs() << " - Replaced register: "; Def->dump()); } - ImposeStackOrdering(Def); + imposeStackOrdering(Def); return Def; } /// A trivially cloneable instruction; clone it and nest the new copy with the /// current instruction. -static MachineInstr *RematerializeCheapDef( +static MachineInstr *rematerializeCheapDef( unsigned Reg, MachineOperand &Op, MachineInstr &Def, MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator Insert, LiveIntervals &LIS, WebAssemblyFunctionInfo &MFI, MachineRegisterInfo &MRI, @@ -531,7 +542,7 @@ static MachineInstr *RematerializeCheapDef( LIS.InsertMachineInstrInMaps(*Clone); LIS.createAndComputeVirtRegInterval(NewReg); MFI.stackifyVReg(NewReg); - ImposeStackOrdering(Clone); + imposeStackOrdering(Clone); LLVM_DEBUG(dbgs() << " - Cloned to "; Clone->dump()); @@ -539,7 +550,7 @@ static MachineInstr *RematerializeCheapDef( bool IsDead = MRI.use_empty(Reg); if (!IsDead) { LiveInterval &LI = LIS.getInterval(Reg); - ShrinkToUses(LI, LIS); + shrinkToUses(LI, LIS); IsDead = !LI.liveAt(LIS.getInstructionIndex(Def).getDeadSlot()); } @@ -582,7 +593,7 @@ static MachineInstr *RematerializeCheapDef( /// /// with DefReg and TeeReg stackified. This eliminates a local.get from the /// resulting code. -static MachineInstr *MoveAndTeeForMultiUse( +static MachineInstr *moveAndTeeForMultiUse( unsigned Reg, MachineOperand &Op, MachineInstr *Def, MachineBasicBlock &MBB, MachineInstr *Insert, LiveIntervals &LIS, WebAssemblyFunctionInfo &MFI, MachineRegisterInfo &MRI, const WebAssemblyInstrInfo *TII) { @@ -600,7 +611,7 @@ static MachineInstr *MoveAndTeeForMultiUse( unsigned DefReg = MRI.createVirtualRegister(RegClass); MachineOperand &DefMO = Def->getOperand(0); MachineInstr *Tee = BuildMI(MBB, Insert, Insert->getDebugLoc(), - TII->get(GetTeeOpcode(RegClass)), TeeReg) + TII->get(getTeeOpcode(RegClass)), TeeReg) .addReg(Reg, RegState::Define) .addReg(DefReg, getUndefRegState(DefMO.isDead())); Op.setReg(TeeReg); @@ -616,15 +627,15 @@ static MachineInstr *MoveAndTeeForMultiUse( VNInfo *ValNo = LI.getVNInfoAt(DefIdx); I->start = TeeIdx; ValNo->def = TeeIdx; - ShrinkToUses(LI, LIS); + shrinkToUses(LI, LIS); // Finish stackifying the new regs. LIS.createAndComputeVirtRegInterval(TeeReg); LIS.createAndComputeVirtRegInterval(DefReg); MFI.stackifyVReg(DefReg); MFI.stackifyVReg(TeeReg); - ImposeStackOrdering(Def); - ImposeStackOrdering(Tee); + imposeStackOrdering(Def); + imposeStackOrdering(Tee); DefDIs.clone(Tee, DefReg); DefDIs.clone(Insert, TeeReg); @@ -638,9 +649,9 @@ namespace { /// A stack for walking the tree of instructions being built, visiting the /// MachineOperands in DFS order. class TreeWalkerState { - typedef MachineInstr::mop_iterator mop_iterator; - typedef std::reverse_iterator<mop_iterator> mop_reverse_iterator; - typedef iterator_range<mop_reverse_iterator> RangeTy; + using mop_iterator = MachineInstr::mop_iterator; + using mop_reverse_iterator = std::reverse_iterator<mop_iterator>; + using RangeTy = iterator_range<mop_reverse_iterator>; SmallVector<RangeTy, 4> Worklist; public: @@ -650,9 +661,9 @@ public: Worklist.push_back(reverse(Range)); } - bool Done() const { return Worklist.empty(); } + bool done() const { return Worklist.empty(); } - MachineOperand &Pop() { + MachineOperand &pop() { RangeTy &Range = Worklist.back(); MachineOperand &Op = *Range.begin(); Range = drop_begin(Range, 1); @@ -665,7 +676,7 @@ public: } /// Push Instr's operands onto the stack to be visited. - void PushOperands(MachineInstr *Instr) { + void pushOperands(MachineInstr *Instr) { const iterator_range<mop_iterator> &Range(Instr->explicit_uses()); if (Range.begin() != Range.end()) Worklist.push_back(reverse(Range)); @@ -673,8 +684,8 @@ public: /// Some of Instr's operands are on the top of the stack; remove them and /// re-insert them starting from the beginning (because we've commuted them). - void ResetTopOperands(MachineInstr *Instr) { - assert(HasRemainingOperands(Instr) && + void resetTopOperands(MachineInstr *Instr) { + assert(hasRemainingOperands(Instr) && "Reseting operands should only be done when the instruction has " "an operand still on the stack"); Worklist.back() = reverse(Instr->explicit_uses()); @@ -682,7 +693,7 @@ public: /// Test whether Instr has operands remaining to be visited at the top of /// the stack. - bool HasRemainingOperands(const MachineInstr *Instr) const { + bool hasRemainingOperands(const MachineInstr *Instr) const { if (Worklist.empty()) return false; const RangeTy &Range = Worklist.back(); @@ -695,7 +706,7 @@ public: /// /// This is needed as a consequence of using implicit local.gets for /// uses and implicit local.sets for defs. - bool IsOnStack(unsigned Reg) const { + bool isOnStack(unsigned Reg) const { for (const RangeTy &Range : Worklist) for (const MachineOperand &MO : Range) if (MO.isReg() && MO.getReg() == Reg) @@ -712,20 +723,18 @@ class CommutingState { /// state where we've commuted the operands of the current instruction and are /// revisiting it, and the declined state where we've reverted the operands /// back to their original order and will no longer commute it further. - bool TentativelyCommuting; - bool Declined; + bool TentativelyCommuting = false; + bool Declined = false; /// During the tentative state, these hold the operand indices of the commuted /// operands. unsigned Operand0, Operand1; public: - CommutingState() : TentativelyCommuting(false), Declined(false) {} - /// Stackification for an operand was not successful due to ordering /// constraints. If possible, and if we haven't already tried it and declined /// it, commute Insert's operands and prepare to revisit it. - void MaybeCommute(MachineInstr *Insert, TreeWalkerState &TreeWalker, + void maybeCommute(MachineInstr *Insert, TreeWalkerState &TreeWalker, const WebAssemblyInstrInfo *TII) { if (TentativelyCommuting) { assert(!Declined && @@ -734,13 +743,13 @@ public: TII->commuteInstruction(*Insert, /*NewMI=*/false, Operand0, Operand1); TentativelyCommuting = false; Declined = true; - } else if (!Declined && TreeWalker.HasRemainingOperands(Insert)) { + } else if (!Declined && TreeWalker.hasRemainingOperands(Insert)) { Operand0 = TargetInstrInfo::CommuteAnyOperandIndex; Operand1 = TargetInstrInfo::CommuteAnyOperandIndex; if (TII->findCommutedOpIndices(*Insert, Operand0, Operand1)) { // Tentatively commute the operands and try again. TII->commuteInstruction(*Insert, /*NewMI=*/false, Operand0, Operand1); - TreeWalker.ResetTopOperands(Insert); + TreeWalker.resetTopOperands(Insert); TentativelyCommuting = true; Declined = false; } @@ -749,7 +758,7 @@ public: /// Stackification for some operand was successful. Reset to the default /// state. - void Reset() { + void reset() { TentativelyCommuting = false; Declined = false; } @@ -767,8 +776,8 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) { const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); const auto *TRI = MF.getSubtarget<WebAssemblySubtarget>().getRegisterInfo(); AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); - MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>(); - LiveIntervals &LIS = getAnalysis<LiveIntervals>(); + auto &MDT = getAnalysis<MachineDominatorTree>(); + auto &LIS = getAnalysis<LiveIntervals>(); // Walk the instructions from the bottom up. Currently we don't look past // block boundaries, and the blocks aren't ordered so the block visitation @@ -780,19 +789,19 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) { MachineInstr *Insert = &*MII; // Don't nest anything inside an inline asm, because we don't have // constraints for $push inputs. - if (Insert->getOpcode() == TargetOpcode::INLINEASM) + if (Insert->isInlineAsm()) continue; // Ignore debugging intrinsics. - if (Insert->getOpcode() == TargetOpcode::DBG_VALUE) + if (Insert->isDebugValue()) continue; // Iterate through the inputs in reverse order, since we'll be pulling // operands off the stack in LIFO order. CommutingState Commuting; TreeWalkerState TreeWalker(Insert); - while (!TreeWalker.Done()) { - MachineOperand &Op = TreeWalker.Pop(); + while (!TreeWalker.done()) { + MachineOperand &Op = TreeWalker.pop(); // We're only interested in explicit virtual register operands. if (!Op.isReg()) @@ -806,18 +815,36 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) { continue; // Identify the definition for this register at this point. - MachineInstr *Def = GetVRegDef(Reg, Insert, MRI, LIS); + MachineInstr *Def = getVRegDef(Reg, Insert, MRI, LIS); if (!Def) continue; // Don't nest an INLINE_ASM def into anything, because we don't have // constraints for $pop outputs. - if (Def->getOpcode() == TargetOpcode::INLINEASM) + if (Def->isInlineAsm()) continue; // Argument instructions represent live-in registers and not real // instructions. - if (WebAssembly::isArgument(*Def)) + if (WebAssembly::isArgument(Def->getOpcode())) + continue; + + // Currently catch's return value register cannot be stackified, because + // the wasm LLVM backend currently does not support live-in values + // entering blocks, which is a part of multi-value proposal. + // + // Once we support live-in values of wasm blocks, this can be: + // catch ; push exnref value onto stack + // block exnref -> i32 + // br_on_exn $__cpp_exception ; pop the exnref value + // end_block + // + // But because we don't support it yet, the catch instruction's dst + // register should be assigned to a local to be propagated across + // 'block' boundary now. + // + // TODO Fix this once we support the multi-value proposal. + if (Def->getOpcode() == WebAssembly::CATCH) continue; // Decide which strategy to take. Prefer to move a single-use value @@ -827,23 +854,23 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) { // supports intra-block moves) and it's MachineSink's job to catch all // the sinking opportunities anyway. bool SameBlock = Def->getParent() == &MBB; - bool CanMove = SameBlock && IsSafeToMove(Def, Insert, AA, MRI) && - !TreeWalker.IsOnStack(Reg); - if (CanMove && HasOneUse(Reg, Def, MRI, MDT, LIS)) { - Insert = MoveForSingleUse(Reg, Op, Def, MBB, Insert, LIS, MFI, MRI); - } else if (ShouldRematerialize(*Def, AA, TII)) { + bool CanMove = SameBlock && isSafeToMove(Def, Insert, AA, MRI) && + !TreeWalker.isOnStack(Reg); + if (CanMove && hasOneUse(Reg, Def, MRI, MDT, LIS)) { + Insert = moveForSingleUse(Reg, Op, Def, MBB, Insert, LIS, MFI, MRI); + } else if (shouldRematerialize(*Def, AA, TII)) { Insert = - RematerializeCheapDef(Reg, Op, *Def, MBB, Insert->getIterator(), + rematerializeCheapDef(Reg, Op, *Def, MBB, Insert->getIterator(), LIS, MFI, MRI, TII, TRI); } else if (CanMove && - OneUseDominatesOtherUses(Reg, Op, MBB, MRI, MDT, LIS, MFI)) { - Insert = MoveAndTeeForMultiUse(Reg, Op, Def, MBB, Insert, LIS, MFI, + oneUseDominatesOtherUses(Reg, Op, MBB, MRI, MDT, LIS, MFI)) { + Insert = moveAndTeeForMultiUse(Reg, Op, Def, MBB, Insert, LIS, MFI, MRI, TII); } else { // We failed to stackify the operand. If the problem was ordering // constraints, Commuting may be able to help. if (!CanMove && SameBlock) - Commuting.MaybeCommute(Insert, TreeWalker, TII); + Commuting.maybeCommute(Insert, TreeWalker, TII); // Proceed to the next operand. continue; } @@ -852,18 +879,18 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) { // to a constant 0 so that the def is explicit, and the push/pop // correspondence is maintained. if (Insert->getOpcode() == TargetOpcode::IMPLICIT_DEF) - ConvertImplicitDefToConstZero(Insert, MRI, TII, MF, LIS); + convertImplicitDefToConstZero(Insert, MRI, TII, MF, LIS); // We stackified an operand. Add the defining instruction's operands to // the worklist stack now to continue to build an ever deeper tree. - Commuting.Reset(); - TreeWalker.PushOperands(Insert); + Commuting.reset(); + TreeWalker.pushOperands(Insert); } // If we stackified any operands, skip over the tree to start looking for // the next instruction we can build a tree on. if (Insert != &*MII) { - ImposeStackOrdering(&*MII); + imposeStackOrdering(&*MII); MII = MachineBasicBlock::iterator(Insert).getReverse(); Changed = true; } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp index 1f0870865b06..ea9cfc00adfd 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyRegisterInfo.cpp - WebAssembly Register Information ----===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -67,19 +66,22 @@ void WebAssemblyRegisterInfo::eliminateFrameIndex( assert(MFI.getObjectSize(FrameIndex) != 0 && "We assume that variable-sized objects have already been lowered, " "and don't use FrameIndex operands."); - unsigned FrameRegister = getFrameRegister(MF); + Register FrameRegister = getFrameRegister(MF); // If this is the address operand of a load or store, make it relative to SP // and fold the frame offset directly in. - if ((MI.mayLoad() && FIOperandNum == WebAssembly::LoadAddressOperandNo) || - (MI.mayStore() && FIOperandNum == WebAssembly::StoreAddressOperandNo)) { - assert(FrameOffset >= 0 && MI.getOperand(FIOperandNum - 1).getImm() >= 0); - int64_t Offset = MI.getOperand(FIOperandNum - 1).getImm() + FrameOffset; + unsigned AddrOperandNum = WebAssembly::getNamedOperandIdx( + MI.getOpcode(), WebAssembly::OpName::addr); + if (AddrOperandNum == FIOperandNum) { + unsigned OffsetOperandNum = WebAssembly::getNamedOperandIdx( + MI.getOpcode(), WebAssembly::OpName::off); + assert(FrameOffset >= 0 && MI.getOperand(OffsetOperandNum).getImm() >= 0); + int64_t Offset = MI.getOperand(OffsetOperandNum).getImm() + FrameOffset; if (static_cast<uint64_t>(Offset) <= std::numeric_limits<uint32_t>::max()) { - MI.getOperand(FIOperandNum - 1).setImm(Offset); + MI.getOperand(OffsetOperandNum).setImm(Offset); MI.getOperand(FIOperandNum) - .ChangeToRegister(FrameRegister, /*IsDef=*/false); + .ChangeToRegister(FrameRegister, /*isDef=*/false); return; } } @@ -100,7 +102,7 @@ void WebAssemblyRegisterInfo::eliminateFrameIndex( MachineOperand &ImmMO = Def->getOperand(1); ImmMO.setImm(ImmMO.getImm() + uint32_t(FrameOffset)); MI.getOperand(FIOperandNum) - .ChangeToRegister(FrameRegister, /*IsDef=*/false); + .ChangeToRegister(FrameRegister, /*isDef=*/false); return; } } @@ -125,10 +127,10 @@ void WebAssemblyRegisterInfo::eliminateFrameIndex( .addReg(FrameRegister) .addReg(OffsetOp); } - MI.getOperand(FIOperandNum).ChangeToRegister(FIRegOperand, /*IsDef=*/false); + MI.getOperand(FIOperandNum).ChangeToRegister(FIRegOperand, /*isDef=*/false); } -unsigned +Register WebAssemblyRegisterInfo::getFrameRegister(const MachineFunction &MF) const { static const unsigned Regs[2][2] = { /* !isArch64Bit isArch64Bit */ diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.h index 2a73dfd4b065..7880eb217dbf 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.h @@ -1,9 +1,8 @@ // WebAssemblyRegisterInfo.h - WebAssembly Register Information Impl -*- C++ -*- // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -40,7 +39,7 @@ public: RegScavenger *RS = nullptr) const override; // Debug information queries. - unsigned getFrameRegister(const MachineFunction &MF) const override; + Register getFrameRegister(const MachineFunction &MF) const override; const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td index a7c3d177724d..6d3d6c723277 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td @@ -1,9 +1,8 @@ //WebAssemblyRegisterInfo.td-Describe the WebAssembly Registers -*- tablegen -*- // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -44,7 +43,7 @@ def F64_0 : WebAssemblyReg<"%f64.0">; def V128_0: WebAssemblyReg<"%v128">; -def EXCEPT_REF_0 : WebAssemblyReg<"%except_ref.0">; +def EXNREF_0 : WebAssemblyReg<"%exnref.0">; // The value stack "register". This is an opaque entity which serves to order // uses and defs that must remain in LIFO order. @@ -65,4 +64,4 @@ def F32 : WebAssemblyRegClass<[f32], 32, (add F32_0)>; def F64 : WebAssemblyRegClass<[f64], 64, (add F64_0)>; def V128 : WebAssemblyRegClass<[v4f32, v2f64, v2i64, v4i32, v16i8, v8i16], 128, (add V128_0)>; -def EXCEPT_REF : WebAssemblyRegClass<[ExceptRef], 0, (add EXCEPT_REF_0)>; +def EXNREF : WebAssemblyRegClass<[exnref], 0, (add EXNREF_0)>; diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp index e5a3e47a3bcd..5eafd6c54e78 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyReplacePhysRegs.cpp - Replace phys regs with virt regs -===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp index 6cf81a9d77b3..7b9ae90326f0 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp @@ -1,9 +1,8 @@ // CodeGen/RuntimeLibcallSignatures.cpp - R.T. Lib. Call Signatures -*- C++ -*-- // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -52,6 +51,8 @@ enum RuntimeLibcallSignature { f64_func_f64_i32, f64_func_i64_i64, i16_func_f32, + i16_func_f64, + i16_func_i64_i64, i8_func_i8_i8, func_f32_iPTR_iPTR, func_f64_iPTR_iPTR, @@ -85,6 +86,9 @@ enum RuntimeLibcallSignature { func_iPTR_i64_i64_i64_i64_i64_i64, i32_func_i64_i64, i32_func_i64_i64_i64_i64, + iPTR_func_f32, + iPTR_func_f64, + iPTR_func_i64_i64, unsupported }; @@ -215,6 +219,18 @@ struct RuntimeLibcallSignatureTable { Table[RTLIB::ROUND_F32] = f32_func_f32; Table[RTLIB::ROUND_F64] = f64_func_f64; Table[RTLIB::ROUND_F128] = func_iPTR_i64_i64; + Table[RTLIB::LROUND_F32] = iPTR_func_f32; + Table[RTLIB::LROUND_F64] = iPTR_func_f64; + Table[RTLIB::LROUND_F128] = iPTR_func_i64_i64; + Table[RTLIB::LLROUND_F32] = i64_func_f32; + Table[RTLIB::LLROUND_F64] = i64_func_f64; + Table[RTLIB::LLROUND_F128] = i64_func_i64_i64; + Table[RTLIB::LRINT_F32] = iPTR_func_f32; + Table[RTLIB::LRINT_F64] = iPTR_func_f64; + Table[RTLIB::LRINT_F128] = iPTR_func_i64_i64; + Table[RTLIB::LLRINT_F32] = i64_func_f32; + Table[RTLIB::LLRINT_F64] = i64_func_f64; + Table[RTLIB::LLRINT_F128] = i64_func_i64_i64; Table[RTLIB::FLOOR_F32] = f32_func_f32; Table[RTLIB::FLOOR_F64] = f64_func_f64; Table[RTLIB::FLOOR_F128] = func_iPTR_i64_i64; @@ -229,13 +245,15 @@ struct RuntimeLibcallSignatureTable { Table[RTLIB::FMAX_F128] = func_iPTR_i64_i64_i64_i64; // Conversion - // All F80 and PPCF128 routines are unspported. + // All F80 and PPCF128 routines are unsupported. Table[RTLIB::FPEXT_F64_F128] = func_iPTR_f64; Table[RTLIB::FPEXT_F32_F128] = func_iPTR_f32; Table[RTLIB::FPEXT_F32_F64] = f64_func_f32; Table[RTLIB::FPEXT_F16_F32] = f32_func_i16; Table[RTLIB::FPROUND_F32_F16] = i16_func_f32; + Table[RTLIB::FPROUND_F64_F16] = i16_func_f64; Table[RTLIB::FPROUND_F64_F32] = f32_func_f64; + Table[RTLIB::FPROUND_F128_F16] = i16_func_i64_i64; Table[RTLIB::FPROUND_F128_F32] = f32_func_i64_i64; Table[RTLIB::FPROUND_F128_F64] = f64_func_i64_i64; Table[RTLIB::FPTOSINT_F32_I32] = i32_func_f32; @@ -310,6 +328,12 @@ struct RuntimeLibcallSignatureTable { Table[RTLIB::MEMSET] = iPTR_func_iPTR_i32_iPTR; Table[RTLIB::MEMMOVE] = iPTR_func_iPTR_iPTR_iPTR; + // __stack_chk_fail + Table[RTLIB::STACKPROTECTOR_CHECK_FAIL] = func; + + // Return address handling + Table[RTLIB::RETURN_ADDRESS] = i32_func_i32; + // Element-wise Atomic memory // TODO: Fix these when we implement atomic support Table[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_1] = unsupported; @@ -480,19 +504,25 @@ struct StaticLibcallNameMap { Map[NameLibcall.first] = NameLibcall.second; } } + // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is + // consistent with the f64 and f128 names. + Map["__extendhfsf2"] = RTLIB::FPEXT_F16_F32; + Map["__truncsfhf2"] = RTLIB::FPROUND_F32_F16; + + Map["emscripten_return_address"] = RTLIB::RETURN_ADDRESS; } }; } // end anonymous namespace -void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget, +void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget, RTLIB::Libcall LC, SmallVectorImpl<wasm::ValType> &Rets, SmallVectorImpl<wasm::ValType> &Params) { assert(Rets.empty()); assert(Params.empty()); - wasm::ValType iPTR = + wasm::ValType PtrTy = Subtarget.hasAddr64() ? wasm::ValType::I64 : wasm::ValType::I32; auto &Table = RuntimeLibcallSignatures->Table; @@ -593,6 +623,15 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget, Rets.push_back(wasm::ValType::I32); Params.push_back(wasm::ValType::F32); break; + case i16_func_f64: + Rets.push_back(wasm::ValType::I32); + Params.push_back(wasm::ValType::F64); + break; + case i16_func_i64_i64: + Rets.push_back(wasm::ValType::I32); + Params.push_back(wasm::ValType::I64); + Params.push_back(wasm::ValType::I64); + break; case i8_func_i8_i8: Rets.push_back(wasm::ValType::I32); Params.push_back(wasm::ValType::I32); @@ -600,13 +639,13 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget, break; case func_f32_iPTR_iPTR: Params.push_back(wasm::ValType::F32); - Params.push_back(iPTR); - Params.push_back(iPTR); + Params.push_back(PtrTy); + Params.push_back(PtrTy); break; case func_f64_iPTR_iPTR: Params.push_back(wasm::ValType::F64); - Params.push_back(iPTR); - Params.push_back(iPTR); + Params.push_back(PtrTy); + Params.push_back(PtrTy); break; case i16_func_i16_i16: Rets.push_back(wasm::ValType::I32); @@ -632,7 +671,7 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget, Rets.push_back(wasm::ValType::I32); Params.push_back(wasm::ValType::I32); Params.push_back(wasm::ValType::I32); - Params.push_back(iPTR); + Params.push_back(PtrTy); break; case i64_func_i64_i64: Rets.push_back(wasm::ValType::I64); @@ -643,14 +682,14 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget, Rets.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); - Params.push_back(iPTR); + Params.push_back(PtrTy); break; case i64_i64_func_f32: #if 0 // TODO: Enable this when wasm gets multiple-return-value support. Rets.push_back(wasm::ValType::I64); Rets.push_back(wasm::ValType::I64); #else - Params.push_back(iPTR); + Params.push_back(PtrTy); #endif Params.push_back(wasm::ValType::F32); break; @@ -659,7 +698,7 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget, Rets.push_back(wasm::ValType::I64); Rets.push_back(wasm::ValType::I64); #else - Params.push_back(iPTR); + Params.push_back(PtrTy); #endif Params.push_back(wasm::ValType::F64); break; @@ -668,7 +707,7 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget, Rets.push_back(wasm::ValType::I32); Rets.push_back(wasm::ValType::I32); #else - Params.push_back(iPTR); + Params.push_back(PtrTy); #endif Params.push_back(wasm::ValType::I32); Params.push_back(wasm::ValType::I32); @@ -678,7 +717,7 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget, Rets.push_back(wasm::ValType::I32); Rets.push_back(wasm::ValType::I32); #else - Params.push_back(iPTR); + Params.push_back(PtrTy); #endif Params.push_back(wasm::ValType::I32); Params.push_back(wasm::ValType::I32); @@ -688,7 +727,7 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget, Rets.push_back(wasm::ValType::I64); Rets.push_back(wasm::ValType::I64); #else - Params.push_back(iPTR); + Params.push_back(PtrTy); #endif Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); @@ -698,7 +737,7 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget, Rets.push_back(wasm::ValType::I64); Rets.push_back(wasm::ValType::I64); #else - Params.push_back(iPTR); + Params.push_back(PtrTy); #endif Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); @@ -710,13 +749,13 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget, Rets.push_back(wasm::ValType::I64); Rets.push_back(wasm::ValType::I64); #else - Params.push_back(iPTR); + Params.push_back(PtrTy); #endif Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); - Params.push_back(iPTR); + Params.push_back(PtrTy); break; case i64_i64_i64_i64_func_i64_i64_i64_i64: #if 0 // TODO: Enable this when wasm gets multiple-return-value support. @@ -725,7 +764,7 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget, Rets.push_back(wasm::ValType::I64); Rets.push_back(wasm::ValType::I64); #else - Params.push_back(iPTR); + Params.push_back(PtrTy); #endif Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); @@ -739,23 +778,23 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget, Rets.push_back(wasm::ValType::I64); Rets.push_back(wasm::ValType::I64); #else - Params.push_back(iPTR); + Params.push_back(PtrTy); #endif Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I32); break; case iPTR_func_iPTR_i32_iPTR: - Rets.push_back(iPTR); - Params.push_back(iPTR); + Rets.push_back(PtrTy); + Params.push_back(PtrTy); Params.push_back(wasm::ValType::I32); - Params.push_back(iPTR); + Params.push_back(PtrTy); break; case iPTR_func_iPTR_iPTR_iPTR: - Rets.push_back(iPTR); - Params.push_back(iPTR); - Params.push_back(iPTR); - Params.push_back(iPTR); + Rets.push_back(PtrTy); + Params.push_back(PtrTy); + Params.push_back(PtrTy); + Params.push_back(PtrTy); break; case f32_func_f32_f32_f32: Rets.push_back(wasm::ValType::F32); @@ -772,39 +811,39 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget, case func_i64_i64_iPTR_iPTR: Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); - Params.push_back(iPTR); - Params.push_back(iPTR); + Params.push_back(PtrTy); + Params.push_back(PtrTy); break; case func_iPTR_f32: - Params.push_back(iPTR); + Params.push_back(PtrTy); Params.push_back(wasm::ValType::F32); break; case func_iPTR_f64: - Params.push_back(iPTR); + Params.push_back(PtrTy); Params.push_back(wasm::ValType::F64); break; case func_iPTR_i32: - Params.push_back(iPTR); + Params.push_back(PtrTy); Params.push_back(wasm::ValType::I32); break; case func_iPTR_i64: - Params.push_back(iPTR); + Params.push_back(PtrTy); Params.push_back(wasm::ValType::I64); break; case func_iPTR_i64_i64: - Params.push_back(iPTR); + Params.push_back(PtrTy); Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); break; case func_iPTR_i64_i64_i64_i64: - Params.push_back(iPTR); + Params.push_back(PtrTy); Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); break; case func_iPTR_i64_i64_i64_i64_i64_i64: - Params.push_back(iPTR); + Params.push_back(PtrTy); Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); @@ -824,6 +863,19 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget, Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); break; + case iPTR_func_f32: + Rets.push_back(PtrTy); + Params.push_back(wasm::ValType::F32); + break; + case iPTR_func_f64: + Rets.push_back(PtrTy); + Params.push_back(wasm::ValType::F64); + break; + case iPTR_func_i64_i64: + Rets.push_back(PtrTy); + Params.push_back(wasm::ValType::I64); + Params.push_back(wasm::ValType::I64); + break; case unsupported: llvm_unreachable("unsupported runtime library signature"); } @@ -832,12 +884,17 @@ void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget, static ManagedStatic<StaticLibcallNameMap> LibcallNameMap; // TODO: If the RTLIB::Libcall-taking flavor of GetSignature remains unsed // other than here, just roll its logic into this version. -void llvm::GetLibcallSignature(const WebAssemblySubtarget &Subtarget, +void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget, const char *Name, SmallVectorImpl<wasm::ValType> &Rets, SmallVectorImpl<wasm::ValType> &Params) { auto &Map = LibcallNameMap->Map; - auto val = Map.find(Name); - assert(val != Map.end() && "unexpected runtime library name"); - return GetLibcallSignature(Subtarget, val->second, Rets, Params); + auto Val = Map.find(Name); +#ifndef NDEBUG + if (Val == Map.end()) { + auto message = std::string("unexpected runtime library name: ") + Name; + llvm_unreachable(message.c_str()); + } +#endif + return getLibcallSignature(Subtarget, Val->second, Rets, Params); } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h index 7fa70bea96de..6ae8aaaba59c 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h @@ -1,9 +1,8 @@ // CodeGen/RuntimeLibcallSignatures.h - R.T. Lib. Call Signatures -*- C++ -*--// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -23,12 +22,12 @@ namespace llvm { class WebAssemblySubtarget; -extern void GetLibcallSignature(const WebAssemblySubtarget &Subtarget, +extern void getLibcallSignature(const WebAssemblySubtarget &Subtarget, RTLIB::Libcall LC, SmallVectorImpl<wasm::ValType> &Rets, SmallVectorImpl<wasm::ValType> &Params); -extern void GetLibcallSignature(const WebAssemblySubtarget &Subtarget, +extern void getLibcallSignature(const WebAssemblySubtarget &Subtarget, const char *Name, SmallVectorImpl<wasm::ValType> &Rets, SmallVectorImpl<wasm::ValType> &Params); diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp index bec72049258a..890e4b8e4e2a 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblySelectionDAGInfo.cpp - WebAssembly SelectionDAG Info ---===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -17,4 +16,44 @@ using namespace llvm; #define DEBUG_TYPE "wasm-selectiondag-info" -WebAssemblySelectionDAGInfo::~WebAssemblySelectionDAGInfo() {} +WebAssemblySelectionDAGInfo::~WebAssemblySelectionDAGInfo() = default; // anchor + +SDValue WebAssemblySelectionDAGInfo::EmitTargetCodeForMemcpy( + SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dst, SDValue Src, + SDValue Size, unsigned Align, bool IsVolatile, bool AlwaysInline, + MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const { + if (!DAG.getMachineFunction() + .getSubtarget<WebAssemblySubtarget>() + .hasBulkMemory()) + return SDValue(); + + SDValue MemIdx = DAG.getConstant(0, DL, MVT::i32); + return DAG.getNode(WebAssemblyISD::MEMORY_COPY, DL, MVT::Other, + {Chain, MemIdx, MemIdx, Dst, Src, + DAG.getZExtOrTrunc(Size, DL, MVT::i32)}); +} + +SDValue WebAssemblySelectionDAGInfo::EmitTargetCodeForMemmove( + SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Op1, SDValue Op2, + SDValue Op3, unsigned Align, bool IsVolatile, + MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const { + return EmitTargetCodeForMemcpy(DAG, DL, Chain, Op1, Op2, Op3, Align, + IsVolatile, false, DstPtrInfo, + SrcPtrInfo); +} + +SDValue WebAssemblySelectionDAGInfo::EmitTargetCodeForMemset( + SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dst, SDValue Val, + SDValue Size, unsigned Align, bool IsVolatile, + MachinePointerInfo DstPtrInfo) const { + if (!DAG.getMachineFunction() + .getSubtarget<WebAssemblySubtarget>() + .hasBulkMemory()) + return SDValue(); + + SDValue MemIdx = DAG.getConstant(0, DL, MVT::i32); + // Only low byte matters for val argument, so anyext the i8 + return DAG.getNode(WebAssemblyISD::MEMORY_FILL, DL, MVT::Other, Chain, MemIdx, + Dst, DAG.getAnyExtOrTrunc(Val, DL, MVT::i32), + DAG.getZExtOrTrunc(Size, DL, MVT::i32)); +} diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h index 31d150eded67..0b90ece27dff 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h @@ -1,9 +1,8 @@ //=- WebAssemblySelectionDAGInfo.h - WebAssembly SelectionDAG Info -*- C++ -*-// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -23,6 +22,21 @@ namespace llvm { class WebAssemblySelectionDAGInfo final : public SelectionDAGTargetInfo { public: ~WebAssemblySelectionDAGInfo() override; + SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, const SDLoc &dl, + SDValue Chain, SDValue Op1, SDValue Op2, + SDValue Op3, unsigned Align, bool isVolatile, + bool AlwaysInline, + MachinePointerInfo DstPtrInfo, + MachinePointerInfo SrcPtrInfo) const override; + SDValue EmitTargetCodeForMemmove(SelectionDAG &DAG, const SDLoc &dl, + SDValue Chain, SDValue Op1, SDValue Op2, + SDValue Op3, unsigned Align, bool isVolatile, + MachinePointerInfo DstPtrInfo, + MachinePointerInfo SrcPtrInfo) const override; + SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, const SDLoc &DL, + SDValue Chain, SDValue Op1, SDValue Op2, + SDValue Op3, unsigned Align, bool IsVolatile, + MachinePointerInfo DstPtrInfo) const override; }; } // end namespace llvm diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp index c95af88c6f43..a249ccf17638 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp @@ -1,9 +1,8 @@ //=- WebAssemblySetP2AlignOperands.cpp - Set alignments on loads and stores -=// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -14,6 +13,7 @@ #include "MCTargetDesc/WebAssemblyMCTargetDesc.h" #include "WebAssembly.h" +#include "WebAssemblyInstrInfo.h" #include "WebAssemblyMachineFunctionInfo.h" #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" #include "llvm/CodeGen/MachineMemOperand.h" @@ -54,7 +54,7 @@ FunctionPass *llvm::createWebAssemblySetP2AlignOperands() { return new WebAssemblySetP2AlignOperands(); } -static void RewriteP2Align(MachineInstr &MI, unsigned OperandNo) { +static void rewriteP2Align(MachineInstr &MI, unsigned OperandNo) { assert(MI.getOperand(OperandNo).getImm() == 0 && "ISel should set p2align operands to 0"); assert(MI.hasOneMemOperand() && @@ -84,114 +84,11 @@ bool WebAssemblySetP2AlignOperands::runOnMachineFunction(MachineFunction &MF) { for (auto &MBB : MF) { for (auto &MI : MBB) { - switch (MI.getOpcode()) { - case WebAssembly::LOAD_I32: - case WebAssembly::LOAD_I64: - case WebAssembly::LOAD_F32: - case WebAssembly::LOAD_F64: - case WebAssembly::LOAD_v16i8: - case WebAssembly::LOAD_v8i16: - case WebAssembly::LOAD_v4i32: - case WebAssembly::LOAD_v2i64: - case WebAssembly::LOAD_v4f32: - case WebAssembly::LOAD_v2f64: - case WebAssembly::LOAD8_S_I32: - case WebAssembly::LOAD8_U_I32: - case WebAssembly::LOAD16_S_I32: - case WebAssembly::LOAD16_U_I32: - case WebAssembly::LOAD8_S_I64: - case WebAssembly::LOAD8_U_I64: - case WebAssembly::LOAD16_S_I64: - case WebAssembly::LOAD16_U_I64: - case WebAssembly::LOAD32_S_I64: - case WebAssembly::LOAD32_U_I64: - case WebAssembly::ATOMIC_LOAD_I32: - case WebAssembly::ATOMIC_LOAD8_U_I32: - case WebAssembly::ATOMIC_LOAD16_U_I32: - case WebAssembly::ATOMIC_LOAD_I64: - case WebAssembly::ATOMIC_LOAD8_U_I64: - case WebAssembly::ATOMIC_LOAD16_U_I64: - case WebAssembly::ATOMIC_LOAD32_U_I64: - case WebAssembly::ATOMIC_RMW8_U_ADD_I32: - case WebAssembly::ATOMIC_RMW8_U_ADD_I64: - case WebAssembly::ATOMIC_RMW8_U_SUB_I32: - case WebAssembly::ATOMIC_RMW8_U_SUB_I64: - case WebAssembly::ATOMIC_RMW8_U_AND_I32: - case WebAssembly::ATOMIC_RMW8_U_AND_I64: - case WebAssembly::ATOMIC_RMW8_U_OR_I32: - case WebAssembly::ATOMIC_RMW8_U_OR_I64: - case WebAssembly::ATOMIC_RMW8_U_XOR_I32: - case WebAssembly::ATOMIC_RMW8_U_XOR_I64: - case WebAssembly::ATOMIC_RMW8_U_XCHG_I32: - case WebAssembly::ATOMIC_RMW8_U_XCHG_I64: - case WebAssembly::ATOMIC_RMW8_U_CMPXCHG_I32: - case WebAssembly::ATOMIC_RMW8_U_CMPXCHG_I64: - case WebAssembly::ATOMIC_RMW16_U_ADD_I32: - case WebAssembly::ATOMIC_RMW16_U_ADD_I64: - case WebAssembly::ATOMIC_RMW16_U_SUB_I32: - case WebAssembly::ATOMIC_RMW16_U_SUB_I64: - case WebAssembly::ATOMIC_RMW16_U_AND_I32: - case WebAssembly::ATOMIC_RMW16_U_AND_I64: - case WebAssembly::ATOMIC_RMW16_U_OR_I32: - case WebAssembly::ATOMIC_RMW16_U_OR_I64: - case WebAssembly::ATOMIC_RMW16_U_XOR_I32: - case WebAssembly::ATOMIC_RMW16_U_XOR_I64: - case WebAssembly::ATOMIC_RMW16_U_XCHG_I32: - case WebAssembly::ATOMIC_RMW16_U_XCHG_I64: - case WebAssembly::ATOMIC_RMW16_U_CMPXCHG_I32: - case WebAssembly::ATOMIC_RMW16_U_CMPXCHG_I64: - case WebAssembly::ATOMIC_RMW_ADD_I32: - case WebAssembly::ATOMIC_RMW32_U_ADD_I64: - case WebAssembly::ATOMIC_RMW_SUB_I32: - case WebAssembly::ATOMIC_RMW32_U_SUB_I64: - case WebAssembly::ATOMIC_RMW_AND_I32: - case WebAssembly::ATOMIC_RMW32_U_AND_I64: - case WebAssembly::ATOMIC_RMW_OR_I32: - case WebAssembly::ATOMIC_RMW32_U_OR_I64: - case WebAssembly::ATOMIC_RMW_XOR_I32: - case WebAssembly::ATOMIC_RMW32_U_XOR_I64: - case WebAssembly::ATOMIC_RMW_XCHG_I32: - case WebAssembly::ATOMIC_RMW32_U_XCHG_I64: - case WebAssembly::ATOMIC_RMW_CMPXCHG_I32: - case WebAssembly::ATOMIC_RMW32_U_CMPXCHG_I64: - case WebAssembly::ATOMIC_RMW_ADD_I64: - case WebAssembly::ATOMIC_RMW_SUB_I64: - case WebAssembly::ATOMIC_RMW_AND_I64: - case WebAssembly::ATOMIC_RMW_OR_I64: - case WebAssembly::ATOMIC_RMW_XOR_I64: - case WebAssembly::ATOMIC_RMW_XCHG_I64: - case WebAssembly::ATOMIC_RMW_CMPXCHG_I64: - case WebAssembly::ATOMIC_NOTIFY: - case WebAssembly::ATOMIC_WAIT_I32: - case WebAssembly::ATOMIC_WAIT_I64: - RewriteP2Align(MI, WebAssembly::LoadP2AlignOperandNo); - break; - case WebAssembly::STORE_I32: - case WebAssembly::STORE_I64: - case WebAssembly::STORE_F32: - case WebAssembly::STORE_F64: - case WebAssembly::STORE_v16i8: - case WebAssembly::STORE_v8i16: - case WebAssembly::STORE_v4i32: - case WebAssembly::STORE_v2i64: - case WebAssembly::STORE_v4f32: - case WebAssembly::STORE_v2f64: - case WebAssembly::STORE8_I32: - case WebAssembly::STORE16_I32: - case WebAssembly::STORE8_I64: - case WebAssembly::STORE16_I64: - case WebAssembly::STORE32_I64: - case WebAssembly::ATOMIC_STORE_I32: - case WebAssembly::ATOMIC_STORE8_I32: - case WebAssembly::ATOMIC_STORE16_I32: - case WebAssembly::ATOMIC_STORE_I64: - case WebAssembly::ATOMIC_STORE8_I64: - case WebAssembly::ATOMIC_STORE16_I64: - case WebAssembly::ATOMIC_STORE32_I64: - RewriteP2Align(MI, WebAssembly::StoreP2AlignOperandNo); - break; - default: - break; + int16_t P2AlignOpNum = WebAssembly::getNamedOperandIdx( + MI.getOpcode(), WebAssembly::OpName::p2align); + if (P2AlignOpNum != -1) { + rewriteP2Align(MI, P2AlignOpNum); + Changed = true; } } } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp index 98133e2153a0..196a74565285 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblySubtarget.cpp - WebAssembly Subtarget Information ------===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -45,6 +44,11 @@ WebAssemblySubtarget::WebAssemblySubtarget(const Triple &TT, InstrInfo(initializeSubtargetDependencies(FS)), TSInfo(), TLInfo(TM, *this) {} +bool WebAssemblySubtarget::enableAtomicExpand() const { + // If atomics are disabled, atomic ops are lowered instead of expanded + return hasAtomics(); +} + bool WebAssemblySubtarget::enableMachineScheduler() const { // Disable the MachineScheduler for now. Even with ShouldTrackPressure set and // enableMachineSchedDefaultSched overridden, it appears to have an overall diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h index 0a0c04609ac4..8db2120f9834 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h @@ -1,9 +1,8 @@ //=- WebAssemblySubtarget.h - Define Subtarget for the WebAssembly -*- C++ -*-// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -23,11 +22,16 @@ #include "llvm/CodeGen/TargetSubtargetInfo.h" #include <string> +#define GET_SUBTARGETINFO_ENUM #define GET_SUBTARGETINFO_HEADER #include "WebAssemblyGenSubtargetInfo.inc" namespace llvm { +// Defined in WebAssemblyGenSubtargetInfo.inc. +extern const SubtargetFeatureKV + WebAssemblyFeatureKV[WebAssembly::NumSubtargetFeatures]; + class WebAssemblySubtarget final : public WebAssemblyGenSubtargetInfo { enum SIMDEnum { NoSIMD, @@ -39,6 +43,10 @@ class WebAssemblySubtarget final : public WebAssemblyGenSubtargetInfo { bool HasNontrappingFPToInt = false; bool HasSignExt = false; bool HasExceptionHandling = false; + bool HasBulkMemory = false; + bool HasMultivalue = false; + bool HasMutableGlobals = false; + bool HasTailCall = false; /// String name of used CPU. std::string CPUString; @@ -77,6 +85,8 @@ public: return &getInstrInfo()->getRegisterInfo(); } const Triple &getTargetTriple() const { return TargetTriple; } + bool enableAtomicExpand() const override; + bool enableIndirectBrExpand() const override { return true; } bool enableMachineScheduler() const override; bool useAA() const override; @@ -90,6 +100,10 @@ public: bool hasNontrappingFPToInt() const { return HasNontrappingFPToInt; } bool hasSignExt() const { return HasSignExt; } bool hasExceptionHandling() const { return HasExceptionHandling; } + bool hasBulkMemory() const { return HasBulkMemory; } + bool hasMultivalue() const { return HasMultivalue; } + bool hasMutableGlobals() const { return HasMutableGlobals; } + bool hasTailCall() const { return HasTailCall; } /// Parses features string setting specified subtarget options. Definition of /// function is auto generated by tblgen. diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp index 3bf8dd40892c..7e65368e671a 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp @@ -1,9 +1,8 @@ //===- WebAssemblyTargetMachine.cpp - Define TargetMachine for WebAssembly -==// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -14,9 +13,12 @@ #include "WebAssemblyTargetMachine.h" #include "MCTargetDesc/WebAssemblyMCTargetDesc.h" +#include "TargetInfo/WebAssemblyTargetInfo.h" #include "WebAssembly.h" +#include "WebAssemblyMachineFunctionInfo.h" #include "WebAssemblyTargetObjectFile.h" #include "WebAssemblyTargetTransformInfo.h" +#include "llvm/CodeGen/MIRParser/MIParser.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/RegAllocRegistry.h" @@ -25,6 +27,7 @@ #include "llvm/Support/TargetRegistry.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Scalar/LowerAtomic.h" #include "llvm/Transforms/Utils.h" using namespace llvm; @@ -58,19 +61,18 @@ extern "C" void LLVMInitializeWebAssemblyTarget() { initializeOptimizeReturnedPass(PR); initializeWebAssemblyArgumentMovePass(PR); initializeWebAssemblySetP2AlignOperandsPass(PR); - initializeWebAssemblyEHRestoreStackPointerPass(PR); initializeWebAssemblyReplacePhysRegsPass(PR); initializeWebAssemblyPrepareForLiveIntervalsPass(PR); initializeWebAssemblyOptimizeLiveIntervalsPass(PR); initializeWebAssemblyMemIntrinsicResultsPass(PR); initializeWebAssemblyRegStackifyPass(PR); initializeWebAssemblyRegColoringPass(PR); - initializeWebAssemblyExplicitLocalsPass(PR); initializeWebAssemblyFixIrreducibleControlFlowPass(PR); initializeWebAssemblyLateEHPreparePass(PR); initializeWebAssemblyExceptionInfoPass(PR); initializeWebAssemblyCFGSortPass(PR); initializeWebAssemblyCFGStackifyPass(PR); + initializeWebAssemblyExplicitLocalsPass(PR); initializeWebAssemblyLowerBrUnlessPass(PR); initializeWebAssemblyRegNumberingPass(PR); initializeWebAssemblyPeepholePass(PR); @@ -81,13 +83,22 @@ extern "C" void LLVMInitializeWebAssemblyTarget() { // WebAssembly Lowering public interface. //===----------------------------------------------------------------------===// -static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) { +static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM, + const Triple &TT) { if (!RM.hasValue()) { // Default to static relocation model. This should always be more optimial // than PIC since the static linker can determine all global addresses and // assume direct function calls. return Reloc::Static; } + + if (!TT.isOSEmscripten()) { + // Relocation modes other than static are currently implemented in a way + // that only works for Emscripten, so disable them if we aren't targeting + // Emscripten. + return Reloc::Static; + } + return *RM; } @@ -100,7 +111,7 @@ WebAssemblyTargetMachine::WebAssemblyTargetMachine( : LLVMTargetMachine(T, TT.isArch64Bit() ? "e-m:e-p:64:64-i64:64-n32:64-S128" : "e-m:e-p:32:32-i64:64-n32:64-S128", - TT, CPU, FS, Options, getEffectiveRelocModel(RM), + TT, CPU, FS, Options, getEffectiveRelocModel(RM, TT), getEffectiveCodeModel(CM, CodeModel::Large), OL), TLOF(new WebAssemblyTargetObjectFile()) { // WebAssembly type-checks instructions, but a noreturn function with a return @@ -122,7 +133,17 @@ WebAssemblyTargetMachine::WebAssemblyTargetMachine( // splitting and tail merging. } -WebAssemblyTargetMachine::~WebAssemblyTargetMachine() {} +WebAssemblyTargetMachine::~WebAssemblyTargetMachine() = default; // anchor. + +const WebAssemblySubtarget * +WebAssemblyTargetMachine::getSubtargetImpl(std::string CPU, + std::string FS) const { + auto &I = SubtargetMap[CPU + FS]; + if (!I) { + I = llvm::make_unique<WebAssemblySubtarget>(TargetTriple, CPU, FS, *this); + } + return I.get(); +} const WebAssemblySubtarget * WebAssemblyTargetMachine::getSubtargetImpl(const Function &F) const { @@ -136,33 +157,141 @@ WebAssemblyTargetMachine::getSubtargetImpl(const Function &F) const { ? FSAttr.getValueAsString().str() : TargetFS; - auto &I = SubtargetMap[CPU + FS]; - if (!I) { - // This needs to be done before we create a new subtarget since any - // creation will depend on the TM and the code generation flags on the - // function that reside in TargetOptions. - resetTargetOptions(F); - I = llvm::make_unique<WebAssemblySubtarget>(TargetTriple, CPU, FS, *this); - } - return I.get(); + // This needs to be done before we create a new subtarget since any + // creation will depend on the TM and the code generation flags on the + // function that reside in TargetOptions. + resetTargetOptions(F); + + return getSubtargetImpl(CPU, FS); } namespace { -class StripThreadLocal final : public ModulePass { - // The default thread model for wasm is single, where thread-local variables - // are identical to regular globals and should be treated the same. So this - // pass just converts all GlobalVariables to NotThreadLocal + +class CoalesceFeaturesAndStripAtomics final : public ModulePass { + // Take the union of all features used in the module and use it for each + // function individually, since having multiple feature sets in one module + // currently does not make sense for WebAssembly. If atomics are not enabled, + // also strip atomic operations and thread local storage. static char ID; + WebAssemblyTargetMachine *WasmTM; public: - StripThreadLocal() : ModulePass(ID) {} + CoalesceFeaturesAndStripAtomics(WebAssemblyTargetMachine *WasmTM) + : ModulePass(ID), WasmTM(WasmTM) {} + bool runOnModule(Module &M) override { - for (auto &GV : M.globals()) - GV.setThreadLocalMode(GlobalValue::ThreadLocalMode::NotThreadLocal); + FeatureBitset Features = coalesceFeatures(M); + + std::string FeatureStr = getFeatureString(Features); + for (auto &F : M) + replaceFeatures(F, FeatureStr); + + bool StrippedAtomics = false; + bool StrippedTLS = false; + + if (!Features[WebAssembly::FeatureAtomics]) + StrippedAtomics = stripAtomics(M); + + if (!Features[WebAssembly::FeatureBulkMemory]) + StrippedTLS = stripThreadLocals(M); + + if (StrippedAtomics && !StrippedTLS) + stripThreadLocals(M); + else if (StrippedTLS && !StrippedAtomics) + stripAtomics(M); + + recordFeatures(M, Features, StrippedAtomics || StrippedTLS); + + // Conservatively assume we have made some change + return true; + } + +private: + FeatureBitset coalesceFeatures(const Module &M) { + FeatureBitset Features = + WasmTM + ->getSubtargetImpl(WasmTM->getTargetCPU(), + WasmTM->getTargetFeatureString()) + ->getFeatureBits(); + for (auto &F : M) + Features |= WasmTM->getSubtargetImpl(F)->getFeatureBits(); + return Features; + } + + std::string getFeatureString(const FeatureBitset &Features) { + std::string Ret; + for (const SubtargetFeatureKV &KV : WebAssemblyFeatureKV) { + if (Features[KV.Value]) + Ret += (StringRef("+") + KV.Key + ",").str(); + } + return Ret; + } + + void replaceFeatures(Function &F, const std::string &Features) { + F.removeFnAttr("target-features"); + F.removeFnAttr("target-cpu"); + F.addFnAttr("target-features", Features); + } + + bool stripAtomics(Module &M) { + // Detect whether any atomics will be lowered, since there is no way to tell + // whether the LowerAtomic pass lowers e.g. stores. + bool Stripped = false; + for (auto &F : M) { + for (auto &B : F) { + for (auto &I : B) { + if (I.isAtomic()) { + Stripped = true; + goto done; + } + } + } + } + + done: + if (!Stripped) + return false; + + LowerAtomicPass Lowerer; + FunctionAnalysisManager FAM; + for (auto &F : M) + Lowerer.run(F, FAM); + return true; } + + bool stripThreadLocals(Module &M) { + bool Stripped = false; + for (auto &GV : M.globals()) { + if (GV.getThreadLocalMode() != + GlobalValue::ThreadLocalMode::NotThreadLocal) { + Stripped = true; + GV.setThreadLocalMode(GlobalValue::ThreadLocalMode::NotThreadLocal); + } + } + return Stripped; + } + + void recordFeatures(Module &M, const FeatureBitset &Features, bool Stripped) { + for (const SubtargetFeatureKV &KV : WebAssemblyFeatureKV) { + std::string MDKey = (StringRef("wasm-feature-") + KV.Key).str(); + if (KV.Value == WebAssembly::FeatureAtomics && Stripped) { + // "atomics" is special: code compiled without atomics may have had its + // atomics lowered to nonatomic operations. In that case, atomics is + // disallowed to prevent unsafe linking with atomics-enabled objects. + assert(!Features[WebAssembly::FeatureAtomics] || + !Features[WebAssembly::FeatureBulkMemory]); + M.addModuleFlag(Module::ModFlagBehavior::Error, MDKey, + wasm::WASM_FEATURE_PREFIX_DISALLOWED); + } else if (Features[KV.Value]) { + // Otherwise features are marked Used or not mentioned + M.addModuleFlag(Module::ModFlagBehavior::Error, MDKey, + wasm::WASM_FEATURE_PREFIX_USED); + } + } + } }; -char StripThreadLocal::ID = 0; +char CoalesceFeaturesAndStripAtomics::ID = 0; /// WebAssembly Code Generator Pass Configuration Options. class WebAssemblyPassConfig final : public TargetPassConfig { @@ -181,6 +310,12 @@ public: void addPostRegAlloc() override; bool addGCPasses() override { return false; } void addPreEmitPass() override; + + // No reg alloc + bool addRegAssignmentFast() override { return false; } + + // No reg alloc + bool addRegAssignmentOptimized() override { return false; } }; } // end anonymous namespace @@ -204,15 +339,11 @@ FunctionPass *WebAssemblyPassConfig::createTargetRegisterAllocator(bool) { //===----------------------------------------------------------------------===// void WebAssemblyPassConfig::addIRPasses() { - if (TM->Options.ThreadModel == ThreadModel::Single) { - // In "single" mode, atomics get lowered to non-atomics. - addPass(createLowerAtomicPass()); - addPass(new StripThreadLocal()); - } else { - // Expand some atomic operations. WebAssemblyTargetLowering has hooks which - // control specifically what gets lowered. - addPass(createAtomicExpandPass()); - } + // Runs LowerAtomicPass if necessary + addPass(new CoalesceFeaturesAndStripAtomics(&getWebAssemblyTargetMachine())); + + // This is a no-op if atomics are not used in the module + addPass(createAtomicExpandPass()); // Add signatures to prototype-less function declarations addPass(createWebAssemblyAddMissingPrototypes()); @@ -246,6 +377,9 @@ void WebAssemblyPassConfig::addIRPasses() { addPass(createWebAssemblyLowerEmscriptenEHSjLj(EnableEmException, EnableEmSjLj)); + // Expand indirectbr instructions to switches. + addPass(createIndirectBrExpandPass()); + TargetPassConfig::addIRPasses(); } @@ -279,20 +413,16 @@ void WebAssemblyPassConfig::addPostRegAlloc() { disablePass(&PatchableFunctionID); disablePass(&ShrinkWrapID); + // This pass hurts code size for wasm because it can generate irreducible + // control flow. + disablePass(&MachineBlockPlacementID); + TargetPassConfig::addPostRegAlloc(); } void WebAssemblyPassConfig::addPreEmitPass() { TargetPassConfig::addPreEmitPass(); - // Restore __stack_pointer global after an exception is thrown. - addPass(createWebAssemblyEHRestoreStackPointer()); - - // Now that we have a prologue and epilogue and all frame indices are - // rewritten, eliminate SP and FP. This allows them to be stackified, - // colored, and numbered with the rest of the registers. - addPass(createWebAssemblyReplacePhysRegs()); - // Rewrite pseudo call_indirect instructions as real instructions. // This needs to run before register stackification, because we change the // order of the arguments. @@ -302,8 +432,15 @@ void WebAssemblyPassConfig::addPreEmitPass() { addPass(createWebAssemblyFixIrreducibleControlFlow()); // Do various transformations for exception handling. + // Every CFG-changing optimizations should come before this. addPass(createWebAssemblyLateEHPrepare()); + // Now that we have a prologue and epilogue and all frame indices are + // rewritten, eliminate SP and FP. This allows them to be stackified, + // colored, and numbered with the rest of the registers. + addPass(createWebAssemblyReplacePhysRegs()); + + // Preparations and optimizations related to register stackification. if (getOptLevel() != CodeGenOpt::None) { // LiveIntervals isn't commonly run this late. Re-establish preconditions. addPass(createWebAssemblyPrepareForLiveIntervals()); @@ -327,9 +464,6 @@ void WebAssemblyPassConfig::addPreEmitPass() { addPass(createWebAssemblyRegColoring()); } - // Insert explicit local.get and local.set operators. - addPass(createWebAssemblyExplicitLocals()); - // Sort the blocks of the CFG into topological order, a prerequisite for // BLOCK and LOOP markers. addPass(createWebAssemblyCFGSort()); @@ -337,6 +471,9 @@ void WebAssemblyPassConfig::addPreEmitPass() { // Insert BLOCK and LOOP markers. addPass(createWebAssemblyCFGStackify()); + // Insert explicit local.get and local.set operators. + addPass(createWebAssemblyExplicitLocals()); + // Lower br_unless into br_if. addPass(createWebAssemblyLowerBrUnless()); @@ -347,3 +484,24 @@ void WebAssemblyPassConfig::addPreEmitPass() { // Create a mapping from LLVM CodeGen virtual registers to wasm registers. addPass(createWebAssemblyRegNumbering()); } + +yaml::MachineFunctionInfo * +WebAssemblyTargetMachine::createDefaultFuncInfoYAML() const { + return new yaml::WebAssemblyFunctionInfo(); +} + +yaml::MachineFunctionInfo *WebAssemblyTargetMachine::convertFuncInfoToYAML( + const MachineFunction &MF) const { + const auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>(); + return new yaml::WebAssemblyFunctionInfo(*MFI); +} + +bool WebAssemblyTargetMachine::parseMachineFunctionInfo( + const yaml::MachineFunctionInfo &MFI, PerFunctionMIParsingState &PFS, + SMDiagnostic &Error, SMRange &SourceRange) const { + const auto &YamlMFI = + reinterpret_cast<const yaml::WebAssemblyFunctionInfo &>(MFI); + MachineFunction &MF = PFS.MF; + MF.getInfo<WebAssemblyFunctionInfo>()->initializeBaseYamlFields(YamlMFI); + return false; +} diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.h index 41001e7a0cc7..850e6b9a9e9e 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.h @@ -1,9 +1,8 @@ // WebAssemblyTargetMachine.h - Define TargetMachine for WebAssembly -*- C++ -*- // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -33,6 +32,9 @@ public: bool JIT); ~WebAssemblyTargetMachine() override; + + const WebAssemblySubtarget *getSubtargetImpl(std::string CPU, + std::string FS) const; const WebAssemblySubtarget * getSubtargetImpl(const Function &F) const override; @@ -46,6 +48,14 @@ public: TargetTransformInfo getTargetTransformInfo(const Function &F) override; bool usesPhysRegsForPEI() const override { return false; } + + yaml::MachineFunctionInfo *createDefaultFuncInfoYAML() const override; + yaml::MachineFunctionInfo * + convertFuncInfoToYAML(const MachineFunction &MF) const override; + bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &, + PerFunctionMIParsingState &PFS, + SMDiagnostic &Error, + SMRange &SourceRange) const override; }; } // end namespace llvm diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp index 0459bfca418d..ad57c600db10 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyTargetObjectFile.cpp - WebAssembly Object Info ---------===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h index ce744ba8b8e8..f46bb2040a7d 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h @@ -1,9 +1,8 @@ //===-- WebAssemblyTargetObjectFile.h - WebAssembly Object Info -*- C++ -*-===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp index 4a2777cc3a9f..46ef765ce0f4 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyTargetTransformInfo.cpp - WebAssembly-specific TTI -----===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -51,7 +50,7 @@ unsigned WebAssemblyTTIImpl::getArithmeticInstrCost( unsigned Cost = BasicTTIImplBase<WebAssemblyTTIImpl>::getArithmeticInstrCost( Opcode, Ty, Opd1Info, Opd2Info, Opd1PropInfo, Opd2PropInfo); - if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { + if (auto *VTy = dyn_cast<VectorType>(Ty)) { switch (Opcode) { case Instruction::LShr: case Instruction::AShr: diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h index 4300ca3defbf..1b11b4b631eb 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h @@ -1,9 +1,8 @@ //==- WebAssemblyTargetTransformInfo.h - WebAssembly-specific TTI -*- C++ -*-=// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp index ada6fb9a96d7..e9d88d4818a5 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp @@ -1,9 +1,8 @@ //===-- WebAssemblyUtilities.cpp - WebAssembly Utility Functions ----------===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -25,70 +24,6 @@ const char *const WebAssembly::StdTerminateFn = "_ZSt9terminatev"; const char *const WebAssembly::PersonalityWrapperFn = "_Unwind_Wasm_CallPersonality"; -bool WebAssembly::isArgument(const MachineInstr &MI) { - switch (MI.getOpcode()) { - case WebAssembly::ARGUMENT_i32: - case WebAssembly::ARGUMENT_i32_S: - case WebAssembly::ARGUMENT_i64: - case WebAssembly::ARGUMENT_i64_S: - case WebAssembly::ARGUMENT_f32: - case WebAssembly::ARGUMENT_f32_S: - case WebAssembly::ARGUMENT_f64: - case WebAssembly::ARGUMENT_f64_S: - case WebAssembly::ARGUMENT_v16i8: - case WebAssembly::ARGUMENT_v16i8_S: - case WebAssembly::ARGUMENT_v8i16: - case WebAssembly::ARGUMENT_v8i16_S: - case WebAssembly::ARGUMENT_v4i32: - case WebAssembly::ARGUMENT_v4i32_S: - case WebAssembly::ARGUMENT_v2i64: - case WebAssembly::ARGUMENT_v2i64_S: - case WebAssembly::ARGUMENT_v4f32: - case WebAssembly::ARGUMENT_v4f32_S: - case WebAssembly::ARGUMENT_v2f64: - case WebAssembly::ARGUMENT_v2f64_S: - return true; - default: - return false; - } -} - -bool WebAssembly::isCopy(const MachineInstr &MI) { - switch (MI.getOpcode()) { - case WebAssembly::COPY_I32: - case WebAssembly::COPY_I32_S: - case WebAssembly::COPY_I64: - case WebAssembly::COPY_I64_S: - case WebAssembly::COPY_F32: - case WebAssembly::COPY_F32_S: - case WebAssembly::COPY_F64: - case WebAssembly::COPY_F64_S: - case WebAssembly::COPY_V128: - case WebAssembly::COPY_V128_S: - return true; - default: - return false; - } -} - -bool WebAssembly::isTee(const MachineInstr &MI) { - switch (MI.getOpcode()) { - case WebAssembly::TEE_I32: - case WebAssembly::TEE_I32_S: - case WebAssembly::TEE_I64: - case WebAssembly::TEE_I64_S: - case WebAssembly::TEE_F32: - case WebAssembly::TEE_F32_S: - case WebAssembly::TEE_F64: - case WebAssembly::TEE_F64_S: - case WebAssembly::TEE_V128: - case WebAssembly::TEE_V128_S: - return true; - default: - return false; - } -} - /// Test whether MI is a child of some other node in an expression tree. bool WebAssembly::isChild(const MachineInstr &MI, const WebAssemblyFunctionInfo &MFI) { @@ -102,201 +37,20 @@ bool WebAssembly::isChild(const MachineInstr &MI, MFI.isVRegStackified(Reg); } -bool WebAssembly::isCallDirect(const MachineInstr &MI) { - switch (MI.getOpcode()) { - case WebAssembly::CALL_VOID: - case WebAssembly::CALL_VOID_S: - case WebAssembly::CALL_I32: - case WebAssembly::CALL_I32_S: - case WebAssembly::CALL_I64: - case WebAssembly::CALL_I64_S: - case WebAssembly::CALL_F32: - case WebAssembly::CALL_F32_S: - case WebAssembly::CALL_F64: - case WebAssembly::CALL_F64_S: - case WebAssembly::CALL_v16i8: - case WebAssembly::CALL_v16i8_S: - case WebAssembly::CALL_v8i16: - case WebAssembly::CALL_v8i16_S: - case WebAssembly::CALL_v4i32: - case WebAssembly::CALL_v4i32_S: - case WebAssembly::CALL_v2i64: - case WebAssembly::CALL_v2i64_S: - case WebAssembly::CALL_v4f32: - case WebAssembly::CALL_v4f32_S: - case WebAssembly::CALL_v2f64: - case WebAssembly::CALL_v2f64_S: - case WebAssembly::CALL_EXCEPT_REF: - case WebAssembly::CALL_EXCEPT_REF_S: - return true; - default: - return false; - } -} - -bool WebAssembly::isCallIndirect(const MachineInstr &MI) { - switch (MI.getOpcode()) { - case WebAssembly::CALL_INDIRECT_VOID: - case WebAssembly::CALL_INDIRECT_VOID_S: - case WebAssembly::CALL_INDIRECT_I32: - case WebAssembly::CALL_INDIRECT_I32_S: - case WebAssembly::CALL_INDIRECT_I64: - case WebAssembly::CALL_INDIRECT_I64_S: - case WebAssembly::CALL_INDIRECT_F32: - case WebAssembly::CALL_INDIRECT_F32_S: - case WebAssembly::CALL_INDIRECT_F64: - case WebAssembly::CALL_INDIRECT_F64_S: - case WebAssembly::CALL_INDIRECT_v16i8: - case WebAssembly::CALL_INDIRECT_v16i8_S: - case WebAssembly::CALL_INDIRECT_v8i16: - case WebAssembly::CALL_INDIRECT_v8i16_S: - case WebAssembly::CALL_INDIRECT_v4i32: - case WebAssembly::CALL_INDIRECT_v4i32_S: - case WebAssembly::CALL_INDIRECT_v2i64: - case WebAssembly::CALL_INDIRECT_v2i64_S: - case WebAssembly::CALL_INDIRECT_v4f32: - case WebAssembly::CALL_INDIRECT_v4f32_S: - case WebAssembly::CALL_INDIRECT_v2f64: - case WebAssembly::CALL_INDIRECT_v2f64_S: - case WebAssembly::CALL_INDIRECT_EXCEPT_REF: - case WebAssembly::CALL_INDIRECT_EXCEPT_REF_S: - return true; - default: - return false; - } -} - -unsigned WebAssembly::getCalleeOpNo(const MachineInstr &MI) { - switch (MI.getOpcode()) { - case WebAssembly::CALL_VOID: - case WebAssembly::CALL_VOID_S: - case WebAssembly::CALL_INDIRECT_VOID: - case WebAssembly::CALL_INDIRECT_VOID_S: - return 0; - case WebAssembly::CALL_I32: - case WebAssembly::CALL_I32_S: - case WebAssembly::CALL_I64: - case WebAssembly::CALL_I64_S: - case WebAssembly::CALL_F32: - case WebAssembly::CALL_F32_S: - case WebAssembly::CALL_F64: - case WebAssembly::CALL_F64_S: - case WebAssembly::CALL_v16i8: - case WebAssembly::CALL_v16i8_S: - case WebAssembly::CALL_v8i16: - case WebAssembly::CALL_v8i16_S: - case WebAssembly::CALL_v4i32: - case WebAssembly::CALL_v4i32_S: - case WebAssembly::CALL_v2i64: - case WebAssembly::CALL_v2i64_S: - case WebAssembly::CALL_v4f32: - case WebAssembly::CALL_v4f32_S: - case WebAssembly::CALL_v2f64: - case WebAssembly::CALL_v2f64_S: - case WebAssembly::CALL_EXCEPT_REF: - case WebAssembly::CALL_EXCEPT_REF_S: - case WebAssembly::CALL_INDIRECT_I32: - case WebAssembly::CALL_INDIRECT_I32_S: - case WebAssembly::CALL_INDIRECT_I64: - case WebAssembly::CALL_INDIRECT_I64_S: - case WebAssembly::CALL_INDIRECT_F32: - case WebAssembly::CALL_INDIRECT_F32_S: - case WebAssembly::CALL_INDIRECT_F64: - case WebAssembly::CALL_INDIRECT_F64_S: - case WebAssembly::CALL_INDIRECT_v16i8: - case WebAssembly::CALL_INDIRECT_v16i8_S: - case WebAssembly::CALL_INDIRECT_v8i16: - case WebAssembly::CALL_INDIRECT_v8i16_S: - case WebAssembly::CALL_INDIRECT_v4i32: - case WebAssembly::CALL_INDIRECT_v4i32_S: - case WebAssembly::CALL_INDIRECT_v2i64: - case WebAssembly::CALL_INDIRECT_v2i64_S: - case WebAssembly::CALL_INDIRECT_v4f32: - case WebAssembly::CALL_INDIRECT_v4f32_S: - case WebAssembly::CALL_INDIRECT_v2f64: - case WebAssembly::CALL_INDIRECT_v2f64_S: - case WebAssembly::CALL_INDIRECT_EXCEPT_REF: - case WebAssembly::CALL_INDIRECT_EXCEPT_REF_S: - return 1; - default: - llvm_unreachable("Not a call instruction"); - } -} - -bool WebAssembly::isMarker(const MachineInstr &MI) { - switch (MI.getOpcode()) { - case WebAssembly::BLOCK: - case WebAssembly::BLOCK_S: - case WebAssembly::END_BLOCK: - case WebAssembly::END_BLOCK_S: - case WebAssembly::LOOP: - case WebAssembly::LOOP_S: - case WebAssembly::END_LOOP: - case WebAssembly::END_LOOP_S: - case WebAssembly::TRY: - case WebAssembly::TRY_S: - case WebAssembly::END_TRY: - case WebAssembly::END_TRY_S: - return true; - default: - return false; - } -} - -bool WebAssembly::isThrow(const MachineInstr &MI) { - switch (MI.getOpcode()) { - case WebAssembly::THROW_I32: - case WebAssembly::THROW_I32_S: - case WebAssembly::THROW_I64: - case WebAssembly::THROW_I64_S: - return true; - default: - return false; - } -} - -bool WebAssembly::isRethrow(const MachineInstr &MI) { - switch (MI.getOpcode()) { - case WebAssembly::RETHROW: - case WebAssembly::RETHROW_S: - case WebAssembly::RETHROW_TO_CALLER: - case WebAssembly::RETHROW_TO_CALLER_S: - return true; - default: - return false; - } -} - -bool WebAssembly::isCatch(const MachineInstr &MI) { - switch (MI.getOpcode()) { - case WebAssembly::CATCH_I32: - case WebAssembly::CATCH_I32_S: - case WebAssembly::CATCH_I64: - case WebAssembly::CATCH_I64_S: - case WebAssembly::CATCH_ALL: - case WebAssembly::CATCH_ALL_S: - return true; - default: - return false; - } -} - bool WebAssembly::mayThrow(const MachineInstr &MI) { switch (MI.getOpcode()) { - case WebAssembly::THROW_I32: - case WebAssembly::THROW_I32_S: - case WebAssembly::THROW_I64: - case WebAssembly::THROW_I64_S: + case WebAssembly::THROW: + case WebAssembly::THROW_S: case WebAssembly::RETHROW: case WebAssembly::RETHROW_S: return true; } - if (isCallIndirect(MI)) + if (isCallIndirect(MI.getOpcode())) return true; if (!MI.isCall()) return false; - const MachineOperand &MO = MI.getOperand(getCalleeOpNo(MI)); + const MachineOperand &MO = MI.getOperand(getCalleeOpNo(MI.getOpcode())); assert(MO.isGlobal()); const auto *F = dyn_cast<Function>(MO.getGlobal()); if (!F) @@ -307,43 +61,8 @@ bool WebAssembly::mayThrow(const MachineInstr &MI) { if (F->getName() == CxaBeginCatchFn || F->getName() == PersonalityWrapperFn || F->getName() == ClangCallTerminateFn || F->getName() == StdTerminateFn) return false; - return true; -} - -bool WebAssembly::isCatchTerminatePad(const MachineBasicBlock &MBB) { - if (!MBB.isEHPad()) - return false; - bool SeenCatch = false; - for (auto &MI : MBB) { - if (MI.getOpcode() == WebAssembly::CATCH_I32 || - MI.getOpcode() == WebAssembly::CATCH_I64 || - MI.getOpcode() == WebAssembly::CATCH_I32_S || - MI.getOpcode() == WebAssembly::CATCH_I64_S) - SeenCatch = true; - if (SeenCatch && MI.isCall()) { - const MachineOperand &CalleeOp = MI.getOperand(getCalleeOpNo(MI)); - if (CalleeOp.isGlobal() && - CalleeOp.getGlobal()->getName() == ClangCallTerminateFn) - return true; - } - } - return false; -} -bool WebAssembly::isCatchAllTerminatePad(const MachineBasicBlock &MBB) { - if (!MBB.isEHPad()) - return false; - bool SeenCatchAll = false; - for (auto &MI : MBB) { - if (MI.getOpcode() == WebAssembly::CATCH_ALL || - MI.getOpcode() == WebAssembly::CATCH_ALL_S) - SeenCatchAll = true; - if (SeenCatchAll && MI.isCall()) { - const MachineOperand &CalleeOp = MI.getOperand(getCalleeOpNo(MI)); - if (CalleeOp.isGlobal() && - CalleeOp.getGlobal()->getName() == StdTerminateFn) - return true; - } - } - return false; + // TODO Can we exclude call instructions that are marked as 'nounwind' in the + // original LLVm IR? (Even when the callee may throw) + return true; } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.h index cdb7873e9013..26cf84de89b9 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.h @@ -1,9 +1,8 @@ //===-- WebAssemblyUtilities - WebAssembly Utility Functions ---*- C++ -*-====// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -24,29 +23,9 @@ class WebAssemblyFunctionInfo; namespace WebAssembly { -bool isArgument(const MachineInstr &MI); -bool isCopy(const MachineInstr &MI); -bool isTee(const MachineInstr &MI); bool isChild(const MachineInstr &MI, const WebAssemblyFunctionInfo &MFI); -bool isCallDirect(const MachineInstr &MI); -bool isCallIndirect(const MachineInstr &MI); -bool isMarker(const MachineInstr &MI); -bool isThrow(const MachineInstr &MI); -bool isRethrow(const MachineInstr &MI); -bool isCatch(const MachineInstr &MI); bool mayThrow(const MachineInstr &MI); -/// Returns the operand number of a callee, assuming the argument is a call -/// instruction. -unsigned getCalleeOpNo(const MachineInstr &MI); - -/// Returns if the given BB is a single BB terminate pad which starts with a -/// 'catch' instruction. -bool isCatchTerminatePad(const MachineBasicBlock &MBB); -/// Returns if the given BB is a single BB terminate pad which starts with a -/// 'catch_all' insrtruction. -bool isCatchAllTerminatePad(const MachineBasicBlock &MBB); - // Exception-related function names extern const char *const ClangCallTerminateFn; extern const char *const CxaBeginCatchFn; diff --git a/contrib/llvm/lib/Target/WebAssembly/known_gcc_test_failures.txt b/contrib/llvm/lib/Target/WebAssembly/known_gcc_test_failures.txt index 364c871f61b0..701b347bcbd7 100644 --- a/contrib/llvm/lib/Target/WebAssembly/known_gcc_test_failures.txt +++ b/contrib/llvm/lib/Target/WebAssembly/known_gcc_test_failures.txt @@ -6,21 +6,13 @@ # error). The format is # <name> <attributes> # comment -# Computed gotos are not supported (Cannot select BlockAddress/BRIND) -20071220-1.c +# blockaddress without an indirectbr still can't be supported +20071220-1.c O2 # Relocation against a BB address 20071220-2.c -20040302-1.c -20041214-1.c O0 -20071210-1.c -920501-4.c -920501-5.c -comp-goto-1.c -980526-1.c 990208-1.c label13.C O0 label13a.C O0 label3.C -pr42462.C O0 # WebAssembly hasn't implemented (will never?) __builtin_return_address 20010122-1.c @@ -75,7 +67,6 @@ pr41935.c 920501-3.c 920728-1.c pr28865.c -widechar-2.c attr-alias-1.C attr-alias-2.C attr-ifunc-1.C @@ -86,7 +77,6 @@ complit12.C va-arg-pack-1.C va-arg-pack-len-1.C builtin-line1.C -builtin-location.C devirt-6.C # bad main signature devirt-13.C # bad main signature devirt-14.C # bad main signature @@ -94,11 +84,22 @@ devirt-21.C # bad main signature devirt-23.C # bad main signature lifetime2.C # violates C++ DR1696 +# WASI doesn't have stdjmp.h yet +pr56982.c +simd-2.C + +# WASI doesn't have pthread.h yet +thread_local3.C +thread_local3g.C +thread_local4.C +thread_local4g.C +thread_local5.C +thread_local5g.C + # Untriaged C++ failures spec5.C addr1.C ef_test.C -friend18.C member2.C new39.C new40.C |