aboutsummaryrefslogtreecommitdiff
path: root/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/WebAssembly/WebAssemblyInstrSIMD.td')
-rw-r--r--lib/Target/WebAssembly/WebAssemblyInstrSIMD.td221
1 files changed, 187 insertions, 34 deletions
diff --git a/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td b/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
index dd8930f079b0..fc5d73dac52e 100644
--- a/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
+++ b/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
@@ -40,47 +40,124 @@ def LaneIdx#SIZE : ImmLeaf<i32, "return 0 <= Imm && Imm < "#SIZE#";">;
//===----------------------------------------------------------------------===//
// Load: v128.load
-multiclass SIMDLoad<ValueType vec_t> {
- let mayLoad = 1, UseNamedOperandTable = 1 in
- defm LOAD_#vec_t :
+let mayLoad = 1, UseNamedOperandTable = 1 in
+defm LOAD_V128 :
+ SIMD_I<(outs V128:$dst), (ins P2Align:$p2align, offset32_op:$off, I32:$addr),
+ (outs), (ins P2Align:$p2align, offset32_op:$off), [],
+ "v128.load\t$dst, ${off}(${addr})$p2align",
+ "v128.load\t$off$p2align", 0>;
+
+// Def load and store patterns from WebAssemblyInstrMemory.td for vector types
+foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in {
+def : LoadPatNoOffset<vec_t, load, LOAD_V128>;
+def : LoadPatImmOff<vec_t, load, regPlusImm, LOAD_V128>;
+def : LoadPatImmOff<vec_t, load, or_is_add, LOAD_V128>;
+def : LoadPatOffsetOnly<vec_t, load, LOAD_V128>;
+def : LoadPatGlobalAddrOffOnly<vec_t, load, LOAD_V128>;
+}
+
+// vNxM.load_splat
+multiclass SIMDLoadSplat<string vec, bits<32> simdop> {
+ let mayLoad = 1, UseNamedOperandTable = 1,
+ Predicates = [HasUnimplementedSIMD128] in
+ defm LOAD_SPLAT_#vec :
SIMD_I<(outs V128:$dst), (ins P2Align:$p2align, offset32_op:$off, I32:$addr),
(outs), (ins P2Align:$p2align, offset32_op:$off), [],
- "v128.load\t$dst, ${off}(${addr})$p2align",
- "v128.load\t$off$p2align", 0>;
+ vec#".load_splat\t$dst, ${off}(${addr})$p2align",
+ vec#".load_splat\t$off$p2align", simdop>;
}
-foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in {
-defm "" : SIMDLoad<vec_t>;
-
-// Def load and store patterns from WebAssemblyInstrMemory.td for vector types
-def : LoadPatNoOffset<vec_t, load, !cast<NI>("LOAD_"#vec_t)>;
-def : LoadPatImmOff<vec_t, load, regPlusImm, !cast<NI>("LOAD_"#vec_t)>;
-def : LoadPatImmOff<vec_t, load, or_is_add, !cast<NI>("LOAD_"#vec_t)>;
-def : LoadPatGlobalAddr<vec_t, load, !cast<NI>("LOAD_"#vec_t)>;
-def : LoadPatOffsetOnly<vec_t, load, !cast<NI>("LOAD_"#vec_t)>;
-def : LoadPatGlobalAddrOffOnly<vec_t, load, !cast<NI>("LOAD_"#vec_t)>;
+defm "" : SIMDLoadSplat<"v8x16", 194>;
+defm "" : SIMDLoadSplat<"v16x8", 195>;
+defm "" : SIMDLoadSplat<"v32x4", 196>;
+defm "" : SIMDLoadSplat<"v64x2", 197>;
+
+def wasm_load_splat_t : SDTypeProfile<1, 1, []>;
+def wasm_load_splat : SDNode<"WebAssemblyISD::LOAD_SPLAT", wasm_load_splat_t>;
+
+foreach args = [["v16i8", "i32", "extloadi8"], ["v8i16", "i32", "extloadi16"],
+ ["v4i32", "i32", "load"], ["v2i64", "i64", "load"],
+ ["v4f32", "f32", "load"], ["v2f64", "f64", "load"]] in
+def load_splat_#args[0] :
+ PatFrag<(ops node:$addr), (wasm_load_splat
+ (!cast<ValueType>(args[1]) (!cast<PatFrag>(args[2]) node:$addr)))>;
+
+let Predicates = [HasUnimplementedSIMD128] in
+foreach args = [["v16i8", "v8x16"], ["v8i16", "v16x8"], ["v4i32", "v32x4"],
+ ["v2i64", "v64x2"], ["v4f32", "v32x4"], ["v2f64", "v64x2"]] in {
+def : LoadPatNoOffset<!cast<ValueType>(args[0]),
+ !cast<PatFrag>("load_splat_"#args[0]),
+ !cast<NI>("LOAD_SPLAT_"#args[1])>;
+def : LoadPatImmOff<!cast<ValueType>(args[0]),
+ !cast<PatFrag>("load_splat_"#args[0]),
+ regPlusImm,
+ !cast<NI>("LOAD_SPLAT_"#args[1])>;
+def : LoadPatImmOff<!cast<ValueType>(args[0]),
+ !cast<PatFrag>("load_splat_"#args[0]),
+ or_is_add,
+ !cast<NI>("LOAD_SPLAT_"#args[1])>;
+def : LoadPatOffsetOnly<!cast<ValueType>(args[0]),
+ !cast<PatFrag>("load_splat_"#args[0]),
+ !cast<NI>("LOAD_SPLAT_"#args[1])>;
+def : LoadPatGlobalAddrOffOnly<!cast<ValueType>(args[0]),
+ !cast<PatFrag>("load_splat_"#args[0]),
+ !cast<NI>("LOAD_SPLAT_"#args[1])>;
}
-// Store: v128.store
-multiclass SIMDStore<ValueType vec_t> {
- let mayStore = 1, UseNamedOperandTable = 1 in
- defm STORE_#vec_t :
- SIMD_I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr, V128:$vec),
+// Load and extend
+multiclass SIMDLoadExtend<ValueType vec_t, string name, bits<32> simdop> {
+ let mayLoad = 1, UseNamedOperandTable = 1,
+ Predicates = [HasUnimplementedSIMD128] in {
+ defm LOAD_EXTEND_S_#vec_t :
+ SIMD_I<(outs V128:$dst), (ins P2Align:$p2align, offset32_op:$off, I32:$addr),
+ (outs), (ins P2Align:$p2align, offset32_op:$off), [],
+ name#"_s\t$dst, ${off}(${addr})$p2align",
+ name#"_s\t$off$p2align", simdop>;
+ defm LOAD_EXTEND_U_#vec_t :
+ SIMD_I<(outs V128:$dst), (ins P2Align:$p2align, offset32_op:$off, I32:$addr),
(outs), (ins P2Align:$p2align, offset32_op:$off), [],
- "v128.store\t${off}(${addr})$p2align, $vec",
- "v128.store\t$off$p2align", 1>;
+ name#"_u\t$dst, ${off}(${addr})$p2align",
+ name#"_u\t$off$p2align", !add(simdop, 1)>;
+ }
}
-foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in {
-defm "" : SIMDStore<vec_t>;
+defm "" : SIMDLoadExtend<v8i16, "i16x8.load8x8", 210>;
+defm "" : SIMDLoadExtend<v4i32, "i32x4.load16x4", 212>;
+defm "" : SIMDLoadExtend<v2i64, "i64x2.load32x2", 214>;
+
+let Predicates = [HasUnimplementedSIMD128] in
+foreach types = [[v8i16, i8], [v4i32, i16], [v2i64, i32]] in
+foreach exts = [["sextloadv", "_S"],
+ ["zextloadv", "_U"],
+ ["extloadv", "_U"]] in {
+def : LoadPatNoOffset<types[0], !cast<PatFrag>(exts[0]#types[1]),
+ !cast<NI>("LOAD_EXTEND"#exts[1]#"_"#types[0])>;
+def : LoadPatImmOff<types[0], !cast<PatFrag>(exts[0]#types[1]), regPlusImm,
+ !cast<NI>("LOAD_EXTEND"#exts[1]#"_"#types[0])>;
+def : LoadPatImmOff<types[0], !cast<PatFrag>(exts[0]#types[1]), or_is_add,
+ !cast<NI>("LOAD_EXTEND"#exts[1]#"_"#types[0])>;
+def : LoadPatOffsetOnly<types[0], !cast<PatFrag>(exts[0]#types[1]),
+ !cast<NI>("LOAD_EXTEND"#exts[1]#"_"#types[0])>;
+def : LoadPatGlobalAddrOffOnly<types[0], !cast<PatFrag>(exts[0]#types[1]),
+ !cast<NI>("LOAD_EXTEND"#exts[1]#"_"#types[0])>;
+}
+
+
+// Store: v128.store
+let mayStore = 1, UseNamedOperandTable = 1 in
+defm STORE_V128 :
+ SIMD_I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr, V128:$vec),
+ (outs), (ins P2Align:$p2align, offset32_op:$off), [],
+ "v128.store\t${off}(${addr})$p2align, $vec",
+ "v128.store\t$off$p2align", 1>;
+foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in {
// Def load and store patterns from WebAssemblyInstrMemory.td for vector types
-def : StorePatNoOffset<vec_t, store, !cast<NI>("STORE_"#vec_t)>;
-def : StorePatImmOff<vec_t, store, regPlusImm, !cast<NI>("STORE_"#vec_t)>;
-def : StorePatImmOff<vec_t, store, or_is_add, !cast<NI>("STORE_"#vec_t)>;
-def : StorePatGlobalAddr<vec_t, store, !cast<NI>("STORE_"#vec_t)>;
-def : StorePatOffsetOnly<vec_t, store, !cast<NI>("STORE_"#vec_t)>;
-def : StorePatGlobalAddrOffOnly<vec_t, store, !cast<NI>("STORE_"#vec_t)>;
+def : StorePatNoOffset<vec_t, store, STORE_V128>;
+def : StorePatImmOff<vec_t, store, regPlusImm, STORE_V128>;
+def : StorePatImmOff<vec_t, store, or_is_add, STORE_V128>;
+def : StorePatOffsetOnly<vec_t, store, STORE_V128>;
+def : StorePatGlobalAddrOffOnly<vec_t, store, STORE_V128>;
}
//===----------------------------------------------------------------------===//
@@ -90,7 +167,7 @@ def : StorePatGlobalAddrOffOnly<vec_t, store, !cast<NI>("STORE_"#vec_t)>;
// Constant: v128.const
multiclass ConstVec<ValueType vec_t, dag ops, dag pat, string args> {
let isMoveImm = 1, isReMaterializable = 1,
- Predicates = [HasSIMD128, HasUnimplementedSIMD128] in
+ Predicates = [HasUnimplementedSIMD128] in
defm CONST_V128_#vec_t : SIMD_I<(outs V128:$dst), ops, (outs), ops,
[(set V128:$dst, (vec_t pat))],
"v128.const\t$dst, "#args,
@@ -198,6 +275,19 @@ def : Pat<(vec_t (wasm_shuffle (vec_t V128:$x), (vec_t V128:$y),
(i32 LaneIdx32:$mE), (i32 LaneIdx32:$mF)))>;
}
+// Swizzle lanes: v8x16.swizzle
+def wasm_swizzle_t : SDTypeProfile<1, 2, []>;
+def wasm_swizzle : SDNode<"WebAssemblyISD::SWIZZLE", wasm_swizzle_t>;
+let Predicates = [HasUnimplementedSIMD128] in
+defm SWIZZLE :
+ SIMD_I<(outs V128:$dst), (ins V128:$src, V128:$mask), (outs), (ins),
+ [(set (v16i8 V128:$dst),
+ (wasm_swizzle (v16i8 V128:$src), (v16i8 V128:$mask)))],
+ "v8x16.swizzle\t$dst, $src, $mask", "v8x16.swizzle", 192>;
+
+def : Pat<(int_wasm_swizzle (v16i8 V128:$src), (v16i8 V128:$mask)),
+ (SWIZZLE V128:$src, V128:$mask)>;
+
// Create vector with identical lanes: splat
def splat2 : PatFrag<(ops node:$x), (build_vector node:$x, node:$x)>;
def splat4 : PatFrag<(ops node:$x), (build_vector
@@ -286,7 +376,7 @@ multiclass ExtractLaneExtended<string sign, bits<32> baseInst> {
}
defm "" : ExtractLaneExtended<"_s", 5>;
-let Predicates = [HasSIMD128, HasUnimplementedSIMD128] in
+let Predicates = [HasUnimplementedSIMD128] in
defm "" : ExtractLaneExtended<"_u", 6>;
defm "" : ExtractLane<v4i32, "i32x4", LaneIdx4, I32, 13>;
defm "" : ExtractLane<v2i64, "i64x2", LaneIdx2, I64, 16>;
@@ -472,6 +562,11 @@ defm OR : SIMDBitwise<or, "or", 78>;
defm XOR : SIMDBitwise<xor, "xor", 79>;
} // isCommutable = 1
+// Bitwise logic: v128.andnot
+def andnot : PatFrag<(ops node:$left, node:$right), (and $left, (vnot $right))>;
+let Predicates = [HasUnimplementedSIMD128] in
+defm ANDNOT : SIMDBitwise<andnot, "andnot", 216>;
+
// Bitwise select: v128.bitselect
foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in
defm BITSELECT_#vec_t :
@@ -655,7 +750,7 @@ defm ABS : SIMDUnaryFP<fabs, "abs", 149>;
defm NEG : SIMDUnaryFP<fneg, "neg", 150>;
// Square root: sqrt
-let Predicates = [HasSIMD128, HasUnimplementedSIMD128] in
+let Predicates = [HasUnimplementedSIMD128] in
defm SQRT : SIMDUnaryFP<fsqrt, "sqrt", 151>;
//===----------------------------------------------------------------------===//
@@ -679,7 +774,7 @@ let isCommutable = 1 in
defm MUL : SIMDBinaryFP<fmul, "mul", 156>;
// Division: div
-let Predicates = [HasSIMD128, HasUnimplementedSIMD128] in
+let Predicates = [HasUnimplementedSIMD128] in
defm DIV : SIMDBinaryFP<fdiv, "div", 157>;
// NaN-propagating minimum: min
@@ -712,6 +807,42 @@ defm "" : SIMDConvert<v4i32, v4f32, fp_to_uint, "i32x4.trunc_sat_f32x4_u", 172>;
defm "" : SIMDConvert<v2i64, v2f64, fp_to_sint, "i64x2.trunc_sat_f64x2_s", 173>;
defm "" : SIMDConvert<v2i64, v2f64, fp_to_uint, "i64x2.trunc_sat_f64x2_u", 174>;
+// Widening operations
+multiclass SIMDWiden<ValueType vec_t, string vec, ValueType arg_t, string arg,
+ bits<32> baseInst> {
+ defm "" : SIMDConvert<vec_t, arg_t, int_wasm_widen_low_signed,
+ vec#".widen_low_"#arg#"_s", baseInst>;
+ defm "" : SIMDConvert<vec_t, arg_t, int_wasm_widen_high_signed,
+ vec#".widen_high_"#arg#"_s", !add(baseInst, 1)>;
+ defm "" : SIMDConvert<vec_t, arg_t, int_wasm_widen_low_unsigned,
+ vec#".widen_low_"#arg#"_u", !add(baseInst, 2)>;
+ defm "" : SIMDConvert<vec_t, arg_t, int_wasm_widen_high_unsigned,
+ vec#".widen_high_"#arg#"_u", !add(baseInst, 3)>;
+}
+
+defm "" : SIMDWiden<v8i16, "i16x8", v16i8, "i8x16", 202>;
+defm "" : SIMDWiden<v4i32, "i32x4", v8i16, "i16x8", 206>;
+
+// Narrowing operations
+multiclass SIMDNarrow<ValueType vec_t, string vec, ValueType arg_t, string arg,
+ bits<32> baseInst> {
+ defm NARROW_S_#vec_t :
+ SIMD_I<(outs V128:$dst), (ins V128:$low, V128:$high), (outs), (ins),
+ [(set (vec_t V128:$dst), (vec_t (int_wasm_narrow_signed
+ (arg_t V128:$low), (arg_t V128:$high))))],
+ vec#".narrow_"#arg#"_s\t$dst, $low, $high", vec#".narrow_"#arg#"_s",
+ baseInst>;
+ defm NARROW_U_#vec_t :
+ SIMD_I<(outs V128:$dst), (ins V128:$low, V128:$high), (outs), (ins),
+ [(set (vec_t V128:$dst), (vec_t (int_wasm_narrow_unsigned
+ (arg_t V128:$low), (arg_t V128:$high))))],
+ vec#".narrow_"#arg#"_u\t$dst, $low, $high", vec#".narrow_"#arg#"_u",
+ !add(baseInst, 1)>;
+}
+
+defm "" : SIMDNarrow<v16i8, "i8x16", v8i16, "i16x8", 198>;
+defm "" : SIMDNarrow<v8i16, "i16x8", v4i32, "i32x4", 200>;
+
// Lower llvm.wasm.trunc.saturate.* to saturating instructions
def : Pat<(v4i32 (int_wasm_trunc_saturate_signed (v4f32 V128:$src))),
(fp_to_sint_v4i32_v4f32 (v4f32 V128:$src))>;
@@ -732,3 +863,25 @@ foreach t2 = !foldl(
)
) in
def : Pat<(t1 (bitconvert (t2 V128:$v))), (t1 V128:$v)>;
+
+//===----------------------------------------------------------------------===//
+// Quasi-Fused Multiply- Add and Subtract (QFMA/QFMS)
+//===----------------------------------------------------------------------===//
+
+multiclass SIMDQFM<ValueType vec_t, string vec, bits<32> baseInst> {
+ defm QFMA_#vec_t :
+ SIMD_I<(outs V128:$dst), (ins V128:$a, V128:$b, V128:$c),
+ (outs), (ins),
+ [(set (vec_t V128:$dst),
+ (int_wasm_qfma (vec_t V128:$a), (vec_t V128:$b), (vec_t V128:$c)))],
+ vec#".qfma\t$dst, $a, $b, $c", vec#".qfma", baseInst>;
+ defm QFMS_#vec_t :
+ SIMD_I<(outs V128:$dst), (ins V128:$a, V128:$b, V128:$c),
+ (outs), (ins),
+ [(set (vec_t V128:$dst),
+ (int_wasm_qfms (vec_t V128:$a), (vec_t V128:$b), (vec_t V128:$c)))],
+ vec#".qfms\t$dst, $a, $b, $c", vec#".qfms", !add(baseInst, 1)>;
+}
+
+defm "" : SIMDQFM<v4f32, "f32x4", 0x98>;
+defm "" : SIMDQFM<v2f64, "f64x2", 0xa3>;