aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2021-11-19 20:06:13 +0000
committerDimitry Andric <dim@FreeBSD.org>2021-11-19 20:06:13 +0000
commitc0981da47d5696fe36474fcf86b4ce03ae3ff818 (patch)
treef42add1021b9f2ac6a69ac7cf6c4499962739a45 /llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
parent344a3780b2e33f6ca763666c380202b18aab72a3 (diff)
downloadsrc-c0981da47d5696fe36474fcf86b4ce03ae3ff818.tar.gz
src-c0981da47d5696fe36474fcf86b4ce03ae3ff818.zip
Diffstat (limited to 'llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp')
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp562
1 files changed, 354 insertions, 208 deletions
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index 62c53c0051ae..0df8f3e0e09c 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -25,7 +25,6 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
-#include "llvm/CodeGen/WasmEHFuncInfo.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/Function.h"
@@ -33,6 +32,7 @@
#include "llvm/IR/IntrinsicsWebAssembly.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetOptions.h"
@@ -88,7 +88,9 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering(
}
}
if (Subtarget->hasReferenceTypes()) {
- for (auto T : {MVT::externref, MVT::funcref}) {
+ // We need custom load and store lowering for both externref, funcref and
+ // Other. The MVT::Other here represents tables of reference types.
+ for (auto T : {MVT::externref, MVT::funcref, MVT::Other}) {
setOperationAction(ISD::LOAD, T, Custom);
setOperationAction(ISD::STORE, T, Custom);
}
@@ -213,8 +215,8 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering(
setOperationAction(ISD::SELECT_CC, T, Expand);
// Expand integer operations supported for scalars but not SIMD
- for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP, ISD::SDIV, ISD::UDIV,
- ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR})
+ for (auto Op :
+ {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR})
for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
setOperationAction(Op, T, Expand);
@@ -223,8 +225,15 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering(
for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
setOperationAction(Op, T, Legal);
- // And we have popcnt for i8x16
+ // And we have popcnt for i8x16. It can be used to expand ctlz/cttz.
setOperationAction(ISD::CTPOP, MVT::v16i8, Legal);
+ setOperationAction(ISD::CTLZ, MVT::v16i8, Expand);
+ setOperationAction(ISD::CTTZ, MVT::v16i8, Expand);
+
+ // Custom lower bit counting operations for other types to scalarize them.
+ for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP})
+ for (auto T : {MVT::v8i16, MVT::v4i32, MVT::v2i64})
+ setOperationAction(Op, T, Custom);
// Expand float operations supported for scalars but not SIMD
for (auto Op : {ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10,
@@ -303,9 +312,7 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering(
setLoadExtAction(Ext, MVT::v4i32, MVT::v4i16, Legal);
setLoadExtAction(Ext, MVT::v2i64, MVT::v2i32, Legal);
}
- // And some truncating stores are legal as well
- setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
- setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Legal);
}
// Don't do anything clever with build_pairs
@@ -338,6 +345,24 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering(
setMinimumJumpTableEntries(2);
}
+MVT WebAssemblyTargetLowering::getPointerTy(const DataLayout &DL,
+ uint32_t AS) const {
+ if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_EXTERNREF)
+ return MVT::externref;
+ if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF)
+ return MVT::funcref;
+ return TargetLowering::getPointerTy(DL, AS);
+}
+
+MVT WebAssemblyTargetLowering::getPointerMemTy(const DataLayout &DL,
+ uint32_t AS) const {
+ if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_EXTERNREF)
+ return MVT::externref;
+ if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF)
+ return MVT::funcref;
+ return TargetLowering::getPointerMemTy(DL, AS);
+}
+
TargetLowering::AtomicExpansionKind
WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
// We have wasm instructions for these
@@ -551,7 +576,21 @@ LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB,
if (IsIndirect) {
auto FnPtr = CallParams.getOperand(0);
CallParams.RemoveOperand(0);
- CallParams.addOperand(FnPtr);
+
+ // For funcrefs, call_indirect is done through __funcref_call_table and the
+ // funcref is always installed in slot 0 of the table, therefore instead of having
+ // the function pointer added at the end of the params list, a zero (the index in
+ // __funcref_call_table is added).
+ if (IsFuncrefCall) {
+ Register RegZero =
+ MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
+ MachineInstrBuilder MIBC0 =
+ BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero).addImm(0);
+
+ BB->insert(CallResults.getIterator(), MIBC0);
+ MachineInstrBuilder(MF, CallParams).addReg(RegZero);
+ } else
+ CallParams.addOperand(FnPtr);
}
for (auto Def : CallResults.defs())
@@ -770,6 +809,13 @@ bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
(ExtT == MVT::v2i64 && MemT == MVT::v2i32);
}
+bool WebAssemblyTargetLowering::isOffsetFoldingLegal(
+ const GlobalAddressSDNode *GA) const {
+ // Wasm doesn't support function addresses with offsets
+ const GlobalValue *GV = GA->getGlobal();
+ return isa<Function>(GV) ? false : TargetLowering::isOffsetFoldingLegal(GA);
+}
+
EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
LLVMContext &C,
EVT VT) const {
@@ -823,6 +869,45 @@ bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
}
}
+void WebAssemblyTargetLowering::computeKnownBitsForTargetNode(
+ const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
+ const SelectionDAG &DAG, unsigned Depth) const {
+ switch (Op.getOpcode()) {
+ default:
+ break;
+ case ISD::INTRINSIC_WO_CHAIN: {
+ unsigned IntNo = Op.getConstantOperandVal(0);
+ switch (IntNo) {
+ default:
+ break;
+ case Intrinsic::wasm_bitmask: {
+ unsigned BitWidth = Known.getBitWidth();
+ EVT VT = Op.getOperand(1).getSimpleValueType();
+ unsigned PossibleBits = VT.getVectorNumElements();
+ APInt ZeroMask = APInt::getHighBitsSet(BitWidth, BitWidth - PossibleBits);
+ Known.Zero |= ZeroMask;
+ break;
+ }
+ }
+ }
+ }
+}
+
+TargetLoweringBase::LegalizeTypeAction
+WebAssemblyTargetLowering::getPreferredVectorAction(MVT VT) const {
+ if (VT.isFixedLengthVector()) {
+ MVT EltVT = VT.getVectorElementType();
+ // We have legal vector types with these lane types, so widening the
+ // vector would let us use some of the lanes directly without having to
+ // extend or truncate values.
+ if (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 ||
+ EltVT == MVT::i64 || EltVT == MVT::f32 || EltVT == MVT::f64)
+ return TypeWidenVector;
+ }
+
+ return TargetLoweringBase::getPreferredVectorAction(VT);
+}
+
//===----------------------------------------------------------------------===//
// WebAssembly Lowering private implementation.
//===----------------------------------------------------------------------===//
@@ -1088,7 +1173,8 @@ WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
// Lastly, if this is a call to a funcref we need to add an instruction
// table.set to the chain and transform the call.
- if (CLI.CB && isFuncrefType(CLI.CB->getCalledOperand()->getType())) {
+ if (CLI.CB &&
+ WebAssembly::isFuncrefType(CLI.CB->getCalledOperand()->getType())) {
// In the absence of function references proposal where a funcref call is
// lowered to call_ref, using reference types we generate a table.set to set
// the funcref to a special table used solely for this purpose, followed by
@@ -1106,7 +1192,8 @@ WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
WebAssemblyISD::TABLE_SET, DL, DAG.getVTList(MVT::Other), TableSetOps,
MVT::funcref,
// Machine Mem Operand args
- MachinePointerInfo(WasmAddressSpace::FUNCREF),
+ MachinePointerInfo(
+ WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF),
CLI.CB->getCalledOperand()->getPointerAlignment(DAG.getDataLayout()),
MachineMemOperand::MOStore);
@@ -1325,6 +1412,10 @@ SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
return LowerLoad(Op, DAG);
case ISD::STORE:
return LowerStore(Op, DAG);
+ case ISD::CTPOP:
+ case ISD::CTLZ:
+ case ISD::CTTZ:
+ return DAG.UnrollVectorOp(Op.getNode());
}
}
@@ -1344,14 +1435,78 @@ static Optional<unsigned> IsWebAssemblyLocal(SDValue Op, SelectionDAG &DAG) {
return WebAssemblyFrameLowering::getLocalForStackObject(MF, FI->getIndex());
}
-bool WebAssemblyTargetLowering::isFuncrefType(const Type *Ty) {
- return isa<PointerType>(Ty) &&
- Ty->getPointerAddressSpace() == WasmAddressSpace::FUNCREF;
+static bool IsWebAssemblyTable(SDValue Op) {
+ const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
+ if (GA && WebAssembly::isWasmVarAddressSpace(GA->getAddressSpace())) {
+ const GlobalValue *Value = GA->getGlobal();
+ const Type *Ty = Value->getValueType();
+
+ if (Ty->isArrayTy() && WebAssembly::isRefType(Ty->getArrayElementType()))
+ return true;
+ }
+ return false;
+}
+
+// This function will accept as Op any access to a table, so Op can
+// be the actual table or an offset into the table.
+static bool IsWebAssemblyTableWithOffset(SDValue Op) {
+ if (Op->getOpcode() == ISD::ADD && Op->getNumOperands() == 2)
+ return (Op->getOperand(1).getSimpleValueType() == MVT::i32 &&
+ IsWebAssemblyTableWithOffset(Op->getOperand(0))) ||
+ (Op->getOperand(0).getSimpleValueType() == MVT::i32 &&
+ IsWebAssemblyTableWithOffset(Op->getOperand(1)));
+
+ return IsWebAssemblyTable(Op);
}
-bool WebAssemblyTargetLowering::isExternrefType(const Type *Ty) {
- return isa<PointerType>(Ty) &&
- Ty->getPointerAddressSpace() == WasmAddressSpace::EXTERNREF;
+// Helper for table pattern matching used in LowerStore and LowerLoad
+bool WebAssemblyTargetLowering::MatchTableForLowering(SelectionDAG &DAG,
+ const SDLoc &DL,
+ const SDValue &Base,
+ GlobalAddressSDNode *&GA,
+ SDValue &Idx) const {
+ // We expect the following graph for a load of the form:
+ // table[<var> + <constant offset>]
+ //
+ // Case 1:
+ // externref = load t1
+ // t1: i32 = add t2, i32:<constant offset>
+ // t2: i32 = add tX, table
+ //
+ // This is in some cases simplified to just:
+ // Case 2:
+ // externref = load t1
+ // t1: i32 = add t2, i32:tX
+ //
+ // So, unfortunately we need to check for both cases and if we are in the
+ // first case extract the table GlobalAddressNode and build a new node tY
+ // that's tY: i32 = add i32:<constant offset>, i32:tX
+ //
+ if (IsWebAssemblyTable(Base)) {
+ GA = cast<GlobalAddressSDNode>(Base);
+ Idx = DAG.getConstant(0, DL, MVT::i32);
+ } else {
+ GA = dyn_cast<GlobalAddressSDNode>(Base->getOperand(0));
+ if (GA) {
+ // We are in Case 2 above.
+ Idx = Base->getOperand(1);
+ if (!Idx || GA->getNumValues() != 1 || Idx->getNumValues() != 1)
+ return false;
+ } else {
+ // This might be Case 1 above (or an error)
+ SDValue V = Base->getOperand(0);
+ GA = dyn_cast<GlobalAddressSDNode>(V->getOperand(1));
+
+ if (V->getOpcode() != ISD::ADD || V->getNumOperands() != 2 || !GA)
+ return false;
+
+ SDValue IdxV = DAG.getNode(ISD::ADD, DL, MVT::i32, Base->getOperand(1),
+ V->getOperand(0));
+ Idx = IdxV;
+ }
+ }
+
+ return true;
}
SDValue WebAssemblyTargetLowering::LowerStore(SDValue Op,
@@ -1362,6 +1517,26 @@ SDValue WebAssemblyTargetLowering::LowerStore(SDValue Op,
const SDValue &Base = SN->getBasePtr();
const SDValue &Offset = SN->getOffset();
+ if (IsWebAssemblyTableWithOffset(Base)) {
+ if (!Offset->isUndef())
+ report_fatal_error(
+ "unexpected offset when loading from webassembly table", false);
+
+ SDValue Idx;
+ GlobalAddressSDNode *GA;
+
+ if (!MatchTableForLowering(DAG, DL, Base, GA, Idx))
+ report_fatal_error("failed pattern matching for lowering table store",
+ false);
+
+ SDVTList Tys = DAG.getVTList(MVT::Other);
+ SDValue TableSetOps[] = {SN->getChain(), SDValue(GA, 0), Idx, Value};
+ SDValue TableSet =
+ DAG.getMemIntrinsicNode(WebAssemblyISD::TABLE_SET, DL, Tys, TableSetOps,
+ SN->getMemoryVT(), SN->getMemOperand());
+ return TableSet;
+ }
+
if (IsWebAssemblyGlobal(Base)) {
if (!Offset->isUndef())
report_fatal_error("unexpected offset when storing to webassembly global",
@@ -1394,6 +1569,26 @@ SDValue WebAssemblyTargetLowering::LowerLoad(SDValue Op,
const SDValue &Base = LN->getBasePtr();
const SDValue &Offset = LN->getOffset();
+ if (IsWebAssemblyTableWithOffset(Base)) {
+ if (!Offset->isUndef())
+ report_fatal_error(
+ "unexpected offset when loading from webassembly table", false);
+
+ GlobalAddressSDNode *GA;
+ SDValue Idx;
+
+ if (!MatchTableForLowering(DAG, DL, Base, GA, Idx))
+ report_fatal_error("failed pattern matching for lowering table load",
+ false);
+
+ SDVTList Tys = DAG.getVTList(LN->getValueType(0), MVT::Other);
+ SDValue TableGetOps[] = {LN->getChain(), SDValue(GA, 0), Idx};
+ SDValue TableGet =
+ DAG.getMemIntrinsicNode(WebAssemblyISD::TABLE_GET, DL, Tys, TableGetOps,
+ LN->getMemoryVT(), LN->getMemOperand());
+ return TableGet;
+ }
+
if (IsWebAssemblyGlobal(Base)) {
if (!Offset->isUndef())
report_fatal_error(
@@ -1468,7 +1663,7 @@ SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
if (verifyReturnAddressArgumentIsConstant(Op, DAG))
return SDValue();
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Depth = Op.getConstantOperandVal(0);
MakeLibCallOptions CallOptions;
return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
{DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL)
@@ -1495,7 +1690,6 @@ WebAssemblyTargetLowering::LowerGlobalTLSAddress(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
const auto *GA = cast<GlobalAddressSDNode>(Op);
- MVT PtrVT = getPointerTy(DAG.getDataLayout());
MachineFunction &MF = DAG.getMachineFunction();
if (!MF.getSubtarget<WebAssemblySubtarget>().hasBulkMemory())
@@ -1517,20 +1711,43 @@ WebAssemblyTargetLowering::LowerGlobalTLSAddress(SDValue Op,
false);
}
- auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
- : WebAssembly::GLOBAL_GET_I32;
- const char *BaseName = MF.createExternalSymbolName("__tls_base");
+ auto model = GV->getThreadLocalMode();
- SDValue BaseAddr(
- DAG.getMachineNode(GlobalGet, DL, PtrVT,
- DAG.getTargetExternalSymbol(BaseName, PtrVT)),
- 0);
+ // Unsupported TLS modes
+ assert(model != GlobalValue::NotThreadLocal);
+ assert(model != GlobalValue::InitialExecTLSModel);
+
+ if (model == GlobalValue::LocalExecTLSModel ||
+ model == GlobalValue::LocalDynamicTLSModel ||
+ (model == GlobalValue::GeneralDynamicTLSModel &&
+ getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))) {
+ // For DSO-local TLS variables we use offset from __tls_base
+
+ MVT PtrVT = getPointerTy(DAG.getDataLayout());
+ auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
+ : WebAssembly::GLOBAL_GET_I32;
+ const char *BaseName = MF.createExternalSymbolName("__tls_base");
+
+ SDValue BaseAddr(
+ DAG.getMachineNode(GlobalGet, DL, PtrVT,
+ DAG.getTargetExternalSymbol(BaseName, PtrVT)),
+ 0);
+
+ SDValue TLSOffset = DAG.getTargetGlobalAddress(
+ GV, DL, PtrVT, GA->getOffset(), WebAssemblyII::MO_TLS_BASE_REL);
+ SDValue SymOffset =
+ DAG.getNode(WebAssemblyISD::WrapperREL, DL, PtrVT, TLSOffset);
+
+ return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymOffset);
+ }
- SDValue TLSOffset = DAG.getTargetGlobalAddress(
- GV, DL, PtrVT, GA->getOffset(), WebAssemblyII::MO_TLS_BASE_REL);
- SDValue SymAddr = DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, TLSOffset);
+ assert(model == GlobalValue::GeneralDynamicTLSModel);
- return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymAddr);
+ EVT VT = Op.getValueType();
+ return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
+ DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
+ GA->getOffset(),
+ WebAssemblyII::MO_GOT_TLS));
}
SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
@@ -1563,14 +1780,13 @@ SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
DAG.getTargetExternalSymbol(BaseName, PtrVT));
SDValue SymAddr = DAG.getNode(
- WebAssemblyISD::WrapperPIC, DL, VT,
+ WebAssemblyISD::WrapperREL, DL, VT,
DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
OperandFlags));
return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
- } else {
- OperandFlags = WebAssemblyII::MO_GOT;
}
+ OperandFlags = WebAssemblyII::MO_GOT;
}
return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
@@ -1640,21 +1856,6 @@ SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
MachinePointerInfo(SV));
}
-static SDValue getCppExceptionSymNode(SDValue Op, unsigned TagIndex,
- SelectionDAG &DAG) {
- // We only support C++ exceptions for now
- int Tag =
- cast<ConstantSDNode>(Op.getOperand(TagIndex).getNode())->getZExtValue();
- if (Tag != WebAssembly::CPP_EXCEPTION)
- llvm_unreachable("Invalid tag: We only support C++ exceptions for now");
- auto &MF = DAG.getMachineFunction();
- const auto &TLI = DAG.getTargetLoweringInfo();
- MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
- const char *SymName = MF.createExternalSymbolName("__cpp_exception");
- return DAG.getNode(WebAssemblyISD::Wrapper, SDLoc(Op), PtrVT,
- DAG.getTargetExternalSymbol(SymName, PtrVT));
-}
-
SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
@@ -1662,10 +1863,10 @@ SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
switch (Op.getOpcode()) {
case ISD::INTRINSIC_VOID:
case ISD::INTRINSIC_W_CHAIN:
- IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ IntNo = Op.getConstantOperandVal(1);
break;
case ISD::INTRINSIC_WO_CHAIN:
- IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ IntNo = Op.getConstantOperandVal(0);
break;
default:
llvm_unreachable("Invalid intrinsic");
@@ -1677,38 +1878,22 @@ SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
return SDValue(); // Don't custom lower most intrinsics.
case Intrinsic::wasm_lsda: {
- EVT VT = Op.getValueType();
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
- auto &Context = MF.getMMI().getContext();
- MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
- Twine(MF.getFunctionNumber()));
- return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
- DAG.getMCSymbol(S, PtrVT));
- }
-
- case Intrinsic::wasm_throw: {
- SDValue SymNode = getCppExceptionSymNode(Op, 2, DAG);
- return DAG.getNode(WebAssemblyISD::THROW, DL,
- MVT::Other, // outchain type
- {
- Op.getOperand(0), // inchain
- SymNode, // exception symbol
- Op.getOperand(3) // thrown value
- });
- }
-
- case Intrinsic::wasm_catch: {
- SDValue SymNode = getCppExceptionSymNode(Op, 2, DAG);
- return DAG.getNode(WebAssemblyISD::CATCH, DL,
- {
- MVT::i32, // outchain type
- MVT::Other // return value
- },
- {
- Op.getOperand(0), // inchain
- SymNode // exception symbol
- });
+ auto PtrVT = getPointerTy(MF.getDataLayout());
+ const char *SymName = MF.createExternalSymbolName(
+ "GCC_except_table" + std::to_string(MF.getFunctionNumber()));
+ if (isPositionIndependent()) {
+ SDValue Node = DAG.getTargetExternalSymbol(
+ SymName, PtrVT, WebAssemblyII::MO_MEMORY_BASE_REL);
+ const char *BaseName = MF.createExternalSymbolName("__memory_base");
+ SDValue BaseAddr =
+ DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
+ DAG.getTargetExternalSymbol(BaseName, PtrVT));
+ SDValue SymAddr =
+ DAG.getNode(WebAssemblyISD::WrapperREL, DL, PtrVT, Node);
+ return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymAddr);
+ }
+ SDValue Node = DAG.getTargetExternalSymbol(SymName, PtrVT);
+ return DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, Node);
}
case Intrinsic::wasm_shuffle: {
@@ -1774,8 +1959,76 @@ WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
Op.getOperand(1));
}
+static SDValue LowerConvertLow(SDValue Op, SelectionDAG &DAG) {
+ SDLoc DL(Op);
+ if (Op.getValueType() != MVT::v2f64)
+ return SDValue();
+
+ auto GetConvertedLane = [](SDValue Op, unsigned &Opcode, SDValue &SrcVec,
+ unsigned &Index) -> bool {
+ switch (Op.getOpcode()) {
+ case ISD::SINT_TO_FP:
+ Opcode = WebAssemblyISD::CONVERT_LOW_S;
+ break;
+ case ISD::UINT_TO_FP:
+ Opcode = WebAssemblyISD::CONVERT_LOW_U;
+ break;
+ case ISD::FP_EXTEND:
+ Opcode = WebAssemblyISD::PROMOTE_LOW;
+ break;
+ default:
+ return false;
+ }
+
+ auto ExtractVector = Op.getOperand(0);
+ if (ExtractVector.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
+ return false;
+
+ if (!isa<ConstantSDNode>(ExtractVector.getOperand(1).getNode()))
+ return false;
+
+ SrcVec = ExtractVector.getOperand(0);
+ Index = ExtractVector.getConstantOperandVal(1);
+ return true;
+ };
+
+ unsigned LHSOpcode, RHSOpcode, LHSIndex, RHSIndex;
+ SDValue LHSSrcVec, RHSSrcVec;
+ if (!GetConvertedLane(Op.getOperand(0), LHSOpcode, LHSSrcVec, LHSIndex) ||
+ !GetConvertedLane(Op.getOperand(1), RHSOpcode, RHSSrcVec, RHSIndex))
+ return SDValue();
+
+ if (LHSOpcode != RHSOpcode)
+ return SDValue();
+
+ MVT ExpectedSrcVT;
+ switch (LHSOpcode) {
+ case WebAssemblyISD::CONVERT_LOW_S:
+ case WebAssemblyISD::CONVERT_LOW_U:
+ ExpectedSrcVT = MVT::v4i32;
+ break;
+ case WebAssemblyISD::PROMOTE_LOW:
+ ExpectedSrcVT = MVT::v4f32;
+ break;
+ }
+ if (LHSSrcVec.getValueType() != ExpectedSrcVT)
+ return SDValue();
+
+ auto Src = LHSSrcVec;
+ if (LHSIndex != 0 || RHSIndex != 1 || LHSSrcVec != RHSSrcVec) {
+ // Shuffle the source vector so that the converted lanes are the low lanes.
+ Src = DAG.getVectorShuffle(
+ ExpectedSrcVT, DL, LHSSrcVec, RHSSrcVec,
+ {static_cast<int>(LHSIndex), static_cast<int>(RHSIndex) + 4, -1, -1});
+ }
+ return DAG.getNode(LHSOpcode, DL, MVT::v2f64, Src);
+}
+
SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
SelectionDAG &DAG) const {
+ if (auto ConvertLow = LowerConvertLow(Op, DAG))
+ return ConvertLow;
+
SDLoc DL(Op);
const EVT VecT = Op.getValueType();
const EVT LaneT = Op.getOperand(0).getValueType();
@@ -1901,12 +2154,8 @@ SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
size_t NumShuffleLanes = 0;
if (ShuffleCounts.size()) {
std::tie(ShuffleSrc1, NumShuffleLanes) = GetMostCommon(ShuffleCounts);
- ShuffleCounts.erase(std::remove_if(ShuffleCounts.begin(),
- ShuffleCounts.end(),
- [&](const auto &Pair) {
- return Pair.first == ShuffleSrc1;
- }),
- ShuffleCounts.end());
+ llvm::erase_if(ShuffleCounts,
+ [&](const auto &Pair) { return Pair.first == ShuffleSrc1; });
}
if (ShuffleCounts.size()) {
size_t AdditionalShuffleLanes;
@@ -1974,7 +2223,23 @@ SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
SmallVector<SDValue, 16> ConstLanes;
for (const SDValue &Lane : Op->op_values()) {
if (IsConstant(Lane)) {
- ConstLanes.push_back(Lane);
+ // Values may need to be fixed so that they will sign extend to be
+ // within the expected range during ISel. Check whether the value is in
+ // bounds based on the lane bit width and if it is out of bounds, lop
+ // off the extra bits and subtract 2^n to reflect giving the high bit
+ // value -2^(n-1) rather than +2^(n-1). Skip the i64 case because it
+ // cannot possibly be out of range.
+ auto *Const = dyn_cast<ConstantSDNode>(Lane.getNode());
+ int64_t Val = Const ? Const->getSExtValue() : 0;
+ uint64_t LaneBits = 128 / Lanes;
+ assert((LaneBits == 64 || Val >= -(1ll << (LaneBits - 1))) &&
+ "Unexpected out of bounds negative value");
+ if (Const && LaneBits != 64 && Val > (1ll << (LaneBits - 1)) - 1) {
+ auto NewVal = ((uint64_t)Val % (1ll << LaneBits)) - (1ll << LaneBits);
+ ConstLanes.push_back(DAG.getConstant(NewVal, SDLoc(Lane), LaneT));
+ } else {
+ ConstLanes.push_back(Lane);
+ }
} else if (LaneT.isFloatingPoint()) {
ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
} else {
@@ -2227,120 +2492,6 @@ performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
}
static SDValue
-performVectorConvertLowCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
- auto &DAG = DCI.DAG;
-
- EVT ResVT = N->getValueType(0);
- if (ResVT != MVT::v2f64)
- return SDValue();
-
- auto GetWasmConversionOp = [](unsigned Op) {
- switch (Op) {
- case ISD::SINT_TO_FP:
- return WebAssemblyISD::CONVERT_LOW_S;
- case ISD::UINT_TO_FP:
- return WebAssemblyISD::CONVERT_LOW_U;
- case ISD::FP_EXTEND:
- return WebAssemblyISD::PROMOTE_LOW;
- }
- llvm_unreachable("unexpected op");
- };
-
- if (N->getOpcode() == ISD::EXTRACT_SUBVECTOR) {
- // Combine this:
- //
- // (v2f64 (extract_subvector
- // (v4f64 ({s,u}int_to_fp (v4i32 $x))), 0))
- //
- // into (f64x2.convert_low_i32x4_{s,u} $x).
- //
- // Or this:
- //
- // (v2f64 (extract_subvector
- // (v4f64 (fp_extend (v4f32 $x))), 0))
- //
- // into (f64x2.promote_low_f32x4 $x).
- auto Conversion = N->getOperand(0);
- auto ConversionOp = Conversion.getOpcode();
- MVT ExpectedSourceType;
- switch (ConversionOp) {
- case ISD::SINT_TO_FP:
- case ISD::UINT_TO_FP:
- ExpectedSourceType = MVT::v4i32;
- break;
- case ISD::FP_EXTEND:
- ExpectedSourceType = MVT::v4f32;
- break;
- default:
- return SDValue();
- }
-
- if (Conversion.getValueType() != MVT::v4f64)
- return SDValue();
-
- auto Source = Conversion.getOperand(0);
- if (Source.getValueType() != ExpectedSourceType)
- return SDValue();
-
- auto IndexNode = dyn_cast<ConstantSDNode>(N->getOperand(1));
- if (IndexNode == nullptr || IndexNode->getZExtValue() != 0)
- return SDValue();
-
- auto Op = GetWasmConversionOp(ConversionOp);
- return DAG.getNode(Op, SDLoc(N), ResVT, Source);
- }
-
- // Combine this:
- //
- // (v2f64 ({s,u}int_to_fp
- // (v2i32 (extract_subvector (v4i32 $x), 0))))
- //
- // into (f64x2.convert_low_i32x4_{s,u} $x).
- //
- // Or this:
- //
- // (v2f64 (fp_extend
- // (v2f32 (extract_subvector (v4f32 $x), 0))))
- //
- // into (f64x2.promote_low_f32x4 $x).
- auto ConversionOp = N->getOpcode();
- MVT ExpectedExtractType;
- MVT ExpectedSourceType;
- switch (ConversionOp) {
- case ISD::SINT_TO_FP:
- case ISD::UINT_TO_FP:
- ExpectedExtractType = MVT::v2i32;
- ExpectedSourceType = MVT::v4i32;
- break;
- case ISD::FP_EXTEND:
- ExpectedExtractType = MVT::v2f32;
- ExpectedSourceType = MVT::v4f32;
- break;
- default:
- llvm_unreachable("unexpected opcode");
- }
-
- auto Extract = N->getOperand(0);
- if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
- return SDValue();
-
- if (Extract.getValueType() != ExpectedExtractType)
- return SDValue();
-
- auto Source = Extract.getOperand(0);
- if (Source.getValueType() != ExpectedSourceType)
- return SDValue();
-
- auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
- if (IndexNode == nullptr || IndexNode->getZExtValue() != 0)
- return SDValue();
-
- unsigned Op = GetWasmConversionOp(ConversionOp);
- return DAG.getNode(Op, SDLoc(N), ResVT, Source);
-}
-
-static SDValue
performVectorTruncZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
auto &DAG = DCI.DAG;
@@ -2470,11 +2621,6 @@ WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N,
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
return performVectorExtendCombine(N, DCI);
- case ISD::SINT_TO_FP:
- case ISD::UINT_TO_FP:
- case ISD::FP_EXTEND:
- case ISD::EXTRACT_SUBVECTOR:
- return performVectorConvertLowCombine(N, DCI);
case ISD::FP_TO_SINT_SAT:
case ISD::FP_TO_UINT_SAT:
case ISD::FP_ROUND: