summaryrefslogtreecommitdiff
path: root/test/CodeGen/WebAssembly
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-12-18 20:10:56 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-12-18 20:10:56 +0000
commit044eb2f6afba375a914ac9d8024f8f5142bb912e (patch)
tree1475247dc9f9fe5be155ebd4c9069c75aadf8c20 /test/CodeGen/WebAssembly
parenteb70dddbd77e120e5d490bd8fbe7ff3f8fa81c6b (diff)
Notes
Diffstat (limited to 'test/CodeGen/WebAssembly')
-rw-r--r--test/CodeGen/WebAssembly/call.ll25
-rw-r--r--test/CodeGen/WebAssembly/cfg-stackify.ll30
-rw-r--r--test/CodeGen/WebAssembly/comdat.ll5
-rw-r--r--test/CodeGen/WebAssembly/conv-trap.ll167
-rw-r--r--test/CodeGen/WebAssembly/conv.ll18
-rw-r--r--test/CodeGen/WebAssembly/dbgvalue.ll8
-rw-r--r--test/CodeGen/WebAssembly/fast-isel-noreg.ll54
-rw-r--r--test/CodeGen/WebAssembly/function-bitcasts-varargs.ll31
-rw-r--r--test/CodeGen/WebAssembly/function-bitcasts.ll166
-rw-r--r--test/CodeGen/WebAssembly/global.ll7
-rw-r--r--test/CodeGen/WebAssembly/globl.ll13
-rw-r--r--test/CodeGen/WebAssembly/i32-load-store-alignment.ll28
-rw-r--r--test/CodeGen/WebAssembly/i64-load-store-alignment.ll25
-rw-r--r--test/CodeGen/WebAssembly/inline-asm-m.ll13
-rw-r--r--test/CodeGen/WebAssembly/inline-asm.ll56
-rw-r--r--test/CodeGen/WebAssembly/load-ext-atomic.ll102
-rw-r--r--test/CodeGen/WebAssembly/lower-global-dtors.ll139
-rw-r--r--test/CodeGen/WebAssembly/main-declaration.ll19
-rw-r--r--test/CodeGen/WebAssembly/main.ll18
-rw-r--r--test/CodeGen/WebAssembly/offset-atomics.ll307
-rw-r--r--test/CodeGen/WebAssembly/reg-stackify.ll4
-rw-r--r--test/CodeGen/WebAssembly/signext-arg.ll22
-rw-r--r--test/CodeGen/WebAssembly/signext-inreg.ll71
-rw-r--r--test/CodeGen/WebAssembly/umulo-i64.ll13
24 files changed, 1237 insertions, 104 deletions
diff --git a/test/CodeGen/WebAssembly/call.ll b/test/CodeGen/WebAssembly/call.ll
index 1cf42242a6cc..8a5e8d8c480e 100644
--- a/test/CodeGen/WebAssembly/call.ll
+++ b/test/CodeGen/WebAssembly/call.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt | FileCheck %s
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -fast-isel -fast-isel-abort=1 | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-temporary-workarounds=false | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -fast-isel -fast-isel-abort=1 -wasm-temporary-workarounds=false | FileCheck %s
; Test that basic call operations assemble as expected.
@@ -150,6 +150,27 @@ define void @coldcc_tail_call_void_nullary() {
ret void
}
+; CHECK-LABEL: call_constexpr:
+; CHECK-NEXT: i32.const $push[[L0:[0-9]+]]=, 2{{$}}
+; CHECK-NEXT: i32.const $push[[L1:[0-9]+]]=, 3{{$}}
+; CHECK-NEXT: call .Lbitcast@FUNCTION, $pop[[L0]], $pop[[L1]]{{$}}
+; CHECK-NEXT: call other_void_nullary@FUNCTION{{$}}
+; CHECK-NEXT: call void_nullary@FUNCTION{{$}}
+; CHECK-NEXT: return{{$}}
+declare void @vararg_func(...)
+declare void @other_void_nullary()
+define void @call_constexpr() {
+bb0:
+ call void bitcast (void (...)* @vararg_func to void (i32, i32)*)(i32 2, i32 3)
+ br label %bb1
+bb1:
+ call void select (i1 0, void ()* @void_nullary, void ()* @other_void_nullary)()
+ br label %bb2
+bb2:
+ call void inttoptr (i32 ptrtoint (void ()* @void_nullary to i32) to void ()*)()
+ ret void
+}
+
; TODO: test the following:
; - More argument combinations.
; - Tail call.
diff --git a/test/CodeGen/WebAssembly/cfg-stackify.ll b/test/CodeGen/WebAssembly/cfg-stackify.ll
index ae6dd7a34ef8..1a5bce5a5d4e 100644
--- a/test/CodeGen/WebAssembly/cfg-stackify.ll
+++ b/test/CodeGen/WebAssembly/cfg-stackify.ll
@@ -892,19 +892,20 @@ end:
; CHECK: .LBB19_4:
; CHECK-NEXT: loop {{$}}
; CHECK-NOT: block
-; CHECK: br_if 3, {{[^,]+}}{{$}}
-; CHECK: block {{$}}
-; CHECK: br_table {{[^,]+}}, 1, 0, 4, 2, 3, 1{{$}}
-; CHECK-NEXT: .LBB19_6:
-; CHECK-NEXT: end_block{{$}}
+; CHECK: br_if 0, {{[^,]+}}{{$}}
; CHECK-NEXT: end_loop{{$}}
+; CHECK: br_if 1, {{[^,]+}}{{$}}
+; CHECK-NOT: block
+; CHECK: br_if 0, {{[^,]+}}{{$}}
; CHECK-NEXT: end_loop{{$}}
+; CHECK-NOT: block
+; CHECK: br_if 1, {{[^,]+}}{{$}}
; CHECK-NEXT: return{{$}}
-; CHECK-NEXT: .LBB19_7:
+; CHECK-NEXT: .LBB19_9:
; CHECK-NEXT: end_block{{$}}
; CHECK-NOT: block
; CHECK: br 0{{$}}
-; CHECK-NEXT: .LBB19_8:
+; CHECK-NEXT: .LBB19_10:
; OPT-LABEL: test10:
; OPT: .LBB19_1:
; OPT-NEXT: loop {{$}}
@@ -917,19 +918,20 @@ end:
; OPT: .LBB19_4:
; OPT-NEXT: loop {{$}}
; OPT-NOT: block
-; OPT: br_if 3, {{[^,]+}}{{$}}
-; OPT: block
-; OPT: br_table {{[^,]+}}, 1, 0, 4, 2, 3, 1{{$}}
-; OPT-NEXT: .LBB19_6:
-; OPT-NEXT: end_block{{$}}
+; OPT: br_if 0, {{[^,]+}}{{$}}
; OPT-NEXT: end_loop{{$}}
+; OPT: br_if 1, {{[^,]+}}{{$}}
+; OPT-NOT: block
+; OPT: br_if 0, {{[^,]+}}{{$}}
; OPT-NEXT: end_loop{{$}}
+; OPT-NOT: block
+; OPT: br_if 1, {{[^,]+}}{{$}}
; OPT-NEXT: return{{$}}
-; OPT-NEXT: .LBB19_7:
+; OPT-NEXT: .LBB19_9:
; OPT-NEXT: end_block{{$}}
; OPT-NOT: block
; OPT: br 0{{$}}
-; OPT-NEXT: .LBB19_8:
+; OPT-NEXT: .LBB19_10:
define void @test10() {
bb0:
br label %bb1
diff --git a/test/CodeGen/WebAssembly/comdat.ll b/test/CodeGen/WebAssembly/comdat.ll
new file mode 100644
index 000000000000..8aa1af8667ae
--- /dev/null
+++ b/test/CodeGen/WebAssembly/comdat.ll
@@ -0,0 +1,5 @@
+; RUN: not llc < %s -mtriple wasm32-unknown-unknown-wasm 2>&1 | FileCheck %s
+
+$f = comdat any
+@f = global i32 0, comdat
+; CHECK: LLVM ERROR: WebAssembly doesn't support COMDATs, 'f' cannot be lowered.
diff --git a/test/CodeGen/WebAssembly/conv-trap.ll b/test/CodeGen/WebAssembly/conv-trap.ll
new file mode 100644
index 000000000000..e20ed0a45271
--- /dev/null
+++ b/test/CodeGen/WebAssembly/conv-trap.ll
@@ -0,0 +1,167 @@
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals -mattr=-nontrapping-fptoint | FileCheck %s
+
+; Test that basic conversion operations assemble as expected using
+; the trapping opcodes and explicit code to suppress the trapping.
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown-wasm"
+
+; CHECK-LABEL: i32_trunc_s_f32:
+; CHECK-NEXT: .param f32{{$}}
+; CHECK-NEXT: .result i32{{$}}
+; CHECK-NEXT: block
+; CHECK-NEXT: f32.abs $push[[ABS:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: f32.const $push[[LIMIT:[0-9]+]]=, 0x1p31{{$}}
+; CHECK-NEXT: f32.lt $push[[LT:[0-9]+]]=, $pop[[ABS]], $pop[[LIMIT]]{{$}}
+; CHECK-NEXT: br_if 0, $pop[[LT]]{{$}}
+; CHECK-NEXT: i32.const $push[[ALT:[0-9]+]]=, -2147483648{{$}}
+; CHECK-NEXT: return $pop[[ALT]]{{$}}
+; CHECK-NEXT: BB
+; CHECK-NEXT: end_block
+; CHECK-NEXT: i32.trunc_s/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i32 @i32_trunc_s_f32(float %x) {
+ %a = fptosi float %x to i32
+ ret i32 %a
+}
+
+; CHECK-LABEL: i32_trunc_u_f32:
+; CHECK-NEXT: .param f32{{$}}
+; CHECK-NEXT: .result i32{{$}}
+; CHECK-NEXT: block
+; CHECK-NEXT: f32.const $push[[LIMIT:[0-9]+]]=, 0x1p32{{$}}
+; CHECK-NEXT: f32.lt $push[[LT:[0-9]+]]=, $0, $pop[[LIMIT]]{{$}}
+; CHECK-NEXT: f32.const $push[[ZERO:[0-9]+]]=, 0x0p0{{$}}
+; CHECK-NEXT: f32.ge $push[[GE:[0-9]+]]=, $0, $pop[[ZERO]]{{$}}
+; CHECK-NEXT: i32.and $push[[AND:[0-9]+]]=, $pop[[LT]], $pop[[GE]]{{$}}
+; CHECK-NEXT: br_if 0, $pop[[AND]]{{$}}
+; CHECK-NEXT: i32.const $push[[ALT:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: return $pop[[ALT]]{{$}}
+; CHECK-NEXT: BB
+; CHECK-NEXT: end_block
+; CHECK-NEXT: i32.trunc_u/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i32 @i32_trunc_u_f32(float %x) {
+ %a = fptoui float %x to i32
+ ret i32 %a
+}
+
+; CHECK-LABEL: i32_trunc_s_f64:
+; CHECK-NEXT: .param f64{{$}}
+; CHECK-NEXT: .result i32{{$}}
+; CHECK-NEXT: block
+; CHECK-NEXT: f64.abs $push[[ABS:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: f64.const $push[[LIMIT:[0-9]+]]=, 0x1p31{{$}}
+; CHECK-NEXT: f64.lt $push[[LT:[0-9]+]]=, $pop[[ABS]], $pop[[LIMIT]]{{$}}
+; CHECK-NEXT: br_if 0, $pop[[LT]]{{$}}
+; CHECK-NEXT: i32.const $push[[ALT:[0-9]+]]=, -2147483648{{$}}
+; CHECK-NEXT: return $pop[[ALT]]{{$}}
+; CHECK-NEXT: BB
+; CHECK-NEXT: end_block
+; CHECK-NEXT: i32.trunc_s/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i32 @i32_trunc_s_f64(double %x) {
+ %a = fptosi double %x to i32
+ ret i32 %a
+}
+
+; CHECK-LABEL: i32_trunc_u_f64:
+; CHECK-NEXT: .param f64{{$}}
+; CHECK-NEXT: .result i32{{$}}
+; CHECK-NEXT: block
+; CHECK-NEXT: f64.const $push[[LIMIT:[0-9]+]]=, 0x1p32{{$}}
+; CHECK-NEXT: f64.lt $push[[LT:[0-9]+]]=, $0, $pop[[LIMIT]]{{$}}
+; CHECK-NEXT: f64.const $push[[ZERO:[0-9]+]]=, 0x0p0{{$}}
+; CHECK-NEXT: f64.ge $push[[GE:[0-9]+]]=, $0, $pop[[ZERO]]{{$}}
+; CHECK-NEXT: i32.and $push[[AND:[0-9]+]]=, $pop[[LT]], $pop[[GE]]{{$}}
+; CHECK-NEXT: br_if 0, $pop[[AND]]{{$}}
+; CHECK-NEXT: i32.const $push[[ALT:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: return $pop[[ALT]]{{$}}
+; CHECK-NEXT: BB
+; CHECK-NEXT: end_block
+; CHECK-NEXT: i32.trunc_u/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i32 @i32_trunc_u_f64(double %x) {
+ %a = fptoui double %x to i32
+ ret i32 %a
+}
+
+; CHECK-LABEL: i64_trunc_s_f32:
+; CHECK-NEXT: .param f32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: block
+; CHECK-NEXT: f32.abs $push[[ABS:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: f32.const $push[[LIMIT:[0-9]+]]=, 0x1p63{{$}}
+; CHECK-NEXT: f32.lt $push[[LT:[0-9]+]]=, $pop[[ABS]], $pop[[LIMIT]]{{$}}
+; CHECK-NEXT: br_if 0, $pop[[LT]]{{$}}
+; CHECK-NEXT: i64.const $push[[ALT:[0-9]+]]=, -9223372036854775808{{$}}
+; CHECK-NEXT: return $pop[[ALT]]{{$}}
+; CHECK-NEXT: BB
+; CHECK-NEXT: end_block
+; CHECK-NEXT: i64.trunc_s/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @i64_trunc_s_f32(float %x) {
+ %a = fptosi float %x to i64
+ ret i64 %a
+}
+
+; CHECK-LABEL: i64_trunc_u_f32:
+; CHECK-NEXT: .param f32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: block
+; CHECK-NEXT: f32.const $push[[LIMIT:[0-9]+]]=, 0x1p64{{$}}
+; CHECK-NEXT: f32.lt $push[[LT:[0-9]+]]=, $0, $pop[[LIMIT]]{{$}}
+; CHECK-NEXT: f32.const $push[[ZERO:[0-9]+]]=, 0x0p0{{$}}
+; CHECK-NEXT: f32.ge $push[[GE:[0-9]+]]=, $0, $pop[[ZERO]]{{$}}
+; CHECK-NEXT: i32.and $push[[AND:[0-9]+]]=, $pop[[LT]], $pop[[GE]]{{$}}
+; CHECK-NEXT: br_if 0, $pop[[AND]]{{$}}
+; CHECK-NEXT: i64.const $push[[ALT:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: return $pop[[ALT]]{{$}}
+; CHECK-NEXT: BB
+; CHECK-NEXT: end_block
+; CHECK-NEXT: i64.trunc_u/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @i64_trunc_u_f32(float %x) {
+ %a = fptoui float %x to i64
+ ret i64 %a
+}
+
+; CHECK-LABEL: i64_trunc_s_f64:
+; CHECK-NEXT: .param f64{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: block
+; CHECK-NEXT: f64.abs $push[[ABS:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: f64.const $push[[LIMIT:[0-9]+]]=, 0x1p63{{$}}
+; CHECK-NEXT: f64.lt $push[[LT:[0-9]+]]=, $pop[[ABS]], $pop[[LIMIT]]{{$}}
+; CHECK-NEXT: br_if 0, $pop[[LT]]{{$}}
+; CHECK-NEXT: i64.const $push[[ALT:[0-9]+]]=, -9223372036854775808{{$}}
+; CHECK-NEXT: return $pop[[ALT]]{{$}}
+; CHECK-NEXT: BB
+; CHECK-NEXT: end_block
+; CHECK-NEXT: i64.trunc_s/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @i64_trunc_s_f64(double %x) {
+ %a = fptosi double %x to i64
+ ret i64 %a
+}
+
+; CHECK-LABEL: i64_trunc_u_f64:
+; CHECK-NEXT: .param f64{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: block
+; CHECK-NEXT: f64.const $push[[LIMIT:[0-9]+]]=, 0x1p64{{$}}
+; CHECK-NEXT: f64.lt $push[[LT:[0-9]+]]=, $0, $pop[[LIMIT]]{{$}}
+; CHECK-NEXT: f64.const $push[[ZERO:[0-9]+]]=, 0x0p0{{$}}
+; CHECK-NEXT: f64.ge $push[[GE:[0-9]+]]=, $0, $pop[[ZERO]]{{$}}
+; CHECK-NEXT: i32.and $push[[AND:[0-9]+]]=, $pop[[LT]], $pop[[GE]]{{$}}
+; CHECK-NEXT: br_if 0, $pop[[AND]]{{$}}
+; CHECK-NEXT: i64.const $push[[ALT:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: return $pop[[ALT]]{{$}}
+; CHECK-NEXT: BB
+; CHECK-NEXT: end_block
+; CHECK-NEXT: i64.trunc_u/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @i64_trunc_u_f64(double %x) {
+ %a = fptoui double %x to i64
+ ret i64 %a
+}
diff --git a/test/CodeGen/WebAssembly/conv.ll b/test/CodeGen/WebAssembly/conv.ll
index 913c4b0b19ea..7633f9703c75 100644
--- a/test/CodeGen/WebAssembly/conv.ll
+++ b/test/CodeGen/WebAssembly/conv.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals -mattr=+nontrapping-fptoint | FileCheck %s
; Test that basic conversion operations assemble as expected.
@@ -38,7 +38,7 @@ define i64 @i64_extend_u_i32(i32 %x) {
; CHECK-LABEL: i32_trunc_s_f32:
; CHECK-NEXT: .param f32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.trunc_s/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i32.trunc_s:sat/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @i32_trunc_s_f32(float %x) {
%a = fptosi float %x to i32
@@ -48,7 +48,7 @@ define i32 @i32_trunc_s_f32(float %x) {
; CHECK-LABEL: i32_trunc_u_f32:
; CHECK-NEXT: .param f32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.trunc_u/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i32.trunc_u:sat/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @i32_trunc_u_f32(float %x) {
%a = fptoui float %x to i32
@@ -58,7 +58,7 @@ define i32 @i32_trunc_u_f32(float %x) {
; CHECK-LABEL: i32_trunc_s_f64:
; CHECK-NEXT: .param f64{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.trunc_s/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i32.trunc_s:sat/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @i32_trunc_s_f64(double %x) {
%a = fptosi double %x to i32
@@ -68,7 +68,7 @@ define i32 @i32_trunc_s_f64(double %x) {
; CHECK-LABEL: i32_trunc_u_f64:
; CHECK-NEXT: .param f64{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.trunc_u/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i32.trunc_u:sat/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @i32_trunc_u_f64(double %x) {
%a = fptoui double %x to i32
@@ -78,7 +78,7 @@ define i32 @i32_trunc_u_f64(double %x) {
; CHECK-LABEL: i64_trunc_s_f32:
; CHECK-NEXT: .param f32{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.trunc_s/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i64.trunc_s:sat/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i64 @i64_trunc_s_f32(float %x) {
%a = fptosi float %x to i64
@@ -88,7 +88,7 @@ define i64 @i64_trunc_s_f32(float %x) {
; CHECK-LABEL: i64_trunc_u_f32:
; CHECK-NEXT: .param f32{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.trunc_u/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i64.trunc_u:sat/f32 $push[[NUM:[0-9]+]]=, $0{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i64 @i64_trunc_u_f32(float %x) {
%a = fptoui float %x to i64
@@ -98,7 +98,7 @@ define i64 @i64_trunc_u_f32(float %x) {
; CHECK-LABEL: i64_trunc_s_f64:
; CHECK-NEXT: .param f64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.trunc_s/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i64.trunc_s:sat/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i64 @i64_trunc_s_f64(double %x) {
%a = fptosi double %x to i64
@@ -108,7 +108,7 @@ define i64 @i64_trunc_s_f64(double %x) {
; CHECK-LABEL: i64_trunc_u_f64:
; CHECK-NEXT: .param f64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.trunc_u/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i64.trunc_u:sat/f64 $push[[NUM:[0-9]+]]=, $0{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i64 @i64_trunc_u_f64(double %x) {
%a = fptoui double %x to i64
diff --git a/test/CodeGen/WebAssembly/dbgvalue.ll b/test/CodeGen/WebAssembly/dbgvalue.ll
index eb39c6da1c99..438bea33282b 100644
--- a/test/CodeGen/WebAssembly/dbgvalue.ll
+++ b/test/CodeGen/WebAssembly/dbgvalue.ll
@@ -1,8 +1,8 @@
; RUN: llc < %s -O0 -verify-machineinstrs -mtriple=wasm32-unknown-unknown-wasm | FileCheck %s
-; CHECK: BB#0
-; CHECK: #DEBUG_VALUE: usage:self <- %vreg4
-; CHECK: BB#1
+; CHECK: %bb.0
+; CHECK: #DEBUG_VALUE: usage:self <- %4
+; CHECK: %bb.1
; CHECK: DW_TAG_variable
source_filename = "test/CodeGen/WebAssembly/dbgvalue.ll"
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
@@ -45,7 +45,7 @@ attributes #0 = { nounwind readnone }
!1 = !DIFile(filename: "crash.c", directory: "wasm/tests")
!2 = !{}
!3 = !{!4}
-!4 = !DIGlobalVariableExpression(var: !5)
+!4 = !DIGlobalVariableExpression(var: !5, expr: !DIExpression())
!5 = !DIGlobalVariable(name: "key", scope: !0, file: !1, line: 7, type: !6, isLocal: false, isDefinition: true)
!6 = !DICompositeType(tag: DW_TAG_array_type, baseType: !7, size: 120, align: 8, elements: !10)
!7 = !DIDerivedType(tag: DW_TAG_typedef, name: "uint8_t", file: !8, line: 185, baseType: !9)
diff --git a/test/CodeGen/WebAssembly/fast-isel-noreg.ll b/test/CodeGen/WebAssembly/fast-isel-noreg.ll
index 229651d093f0..d0f60f22a27e 100644
--- a/test/CodeGen/WebAssembly/fast-isel-noreg.ll
+++ b/test/CodeGen/WebAssembly/fast-isel-noreg.ll
@@ -32,4 +32,58 @@ entry:
ret i32 0
}
+; CHECK: i32.const {{.*}}, addr@FUNCTION
+; CHECK: i32.const {{.*}}, 24
+; CHECK: i32.shl
+; CHECK: i32.const {{.*}}, 24
+; CHECK: i32.shr_s
+; CHECK: i32.const {{.*}}, 64
+; CHECK: br_if 0, $pop0
+define hidden i32 @d() #0 {
+entry:
+ %t = icmp slt i8 ptrtoint (void ()* @addr to i8), 64
+ br i1 %t, label %a, label %b
+a:
+ unreachable
+b:
+ ret i32 0
+}
+
+; CHECK: i32.const {{.*}}, addr@FUNCTION
+; CHECK: i32.const {{.*}}, 255
+; CHECK: i32.and
+; CHECK: i32.const {{.*}}, 64
+; CHECK: br_if 0, $pop0
+define hidden i32 @e() #0 {
+entry:
+ %t = icmp ult i8 ptrtoint (void ()* @addr to i8), 64
+ br i1 %t, label %a, label %b
+a:
+ unreachable
+b:
+ ret i32 0
+}
+
+; CHECK: i32.const {{.*}}, addr@FUNCTION
+; CHECK: i32.const {{.*}}, 24
+; CHECK: i32.shl
+; CHECK: i32.const {{.*}}, 24
+; CHECK: i32.shr_s
+define hidden i32 @f() #0 {
+entry:
+ %t = sext i8 ptrtoint (void ()* @addr to i8) to i32
+ ret i32 %t
+}
+
+; CHECK: i32.const {{.*}}, addr@FUNCTION
+; CHECK: i32.const {{.*}}, 255
+; CHECK: i32.and
+define hidden i32 @g() #0 {
+entry:
+ %t = zext i8 ptrtoint (void ()* @addr to i8) to i32
+ ret i32 %t
+}
+
+declare void @addr()
+
attributes #0 = { noinline optnone }
diff --git a/test/CodeGen/WebAssembly/function-bitcasts-varargs.ll b/test/CodeGen/WebAssembly/function-bitcasts-varargs.ll
new file mode 100644
index 000000000000..b5f3d2f64e99
--- /dev/null
+++ b/test/CodeGen/WebAssembly/function-bitcasts-varargs.ll
@@ -0,0 +1,31 @@
+; RUN: llc < %s -asm-verbose=false -wasm-temporary-workarounds=false | FileCheck %s
+
+; Test that function pointer casts casting away varargs are replaced with
+; wrappers.
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown-wasm"
+
+define void @callWithArgs() {
+entry:
+ call void bitcast (void (...)* @underspecified to void (i32, i32)*)(i32 0, i32 1)
+ call void(...) bitcast (void (i32, i32)* @specified to void (...)*)(i32 0, i32 1)
+ ret void
+}
+
+declare void @underspecified(...)
+declare void @specified(i32, i32)
+
+; CHECK: callWithArgs:
+; CHECK: i32.const $push1=, 0
+; CHECK-NEXT: i32.const $push0=, 1
+; CHECK-NEXT: call .Lbitcast@FUNCTION, $pop1, $pop0
+; CHECK: call .Lbitcast.1@FUNCTION, $pop{{[0-9]+$}}
+
+; CHECK: .Lbitcast:
+; CHECK-NEXT: .param i32, i32{{$}}
+; CHECK: call underspecified@FUNCTION, $pop{{[0-9]+$}}
+
+; CHECK: .Lbitcast.1:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK: call specified@FUNCTION, $pop{{[0-9]+}}, $pop{{[0-9]+$}}
diff --git a/test/CodeGen/WebAssembly/function-bitcasts.ll b/test/CodeGen/WebAssembly/function-bitcasts.ll
index 3f20aef08115..ab03716ef74d 100644
--- a/test/CodeGen/WebAssembly/function-bitcasts.ll
+++ b/test/CodeGen/WebAssembly/function-bitcasts.ll
@@ -1,64 +1,36 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-explicit-locals | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-explicit-locals -enable-emscripten-cxx-exceptions -wasm-temporary-workarounds=false | FileCheck %s
; Test that function pointer casts are replaced with wrappers.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
target triple = "wasm32-unknown-unknown-wasm"
+declare void @has_i32_arg(i32)
+declare i32 @has_i32_ret()
+declare void @vararg(...)
+declare void @plain(i32)
+
+declare void @foo0()
+declare void @foo1()
+declare void @foo2()
+declare void @foo3()
+
; CHECK-LABEL: test:
; CHECK-NEXT: call .Lbitcast@FUNCTION{{$}}
; CHECK-NEXT: call .Lbitcast@FUNCTION{{$}}
; CHECK-NEXT: call .Lbitcast.1@FUNCTION{{$}}
; CHECK-NEXT: i32.const $push[[L0:[0-9]+]]=, 0
-; CHECK-NEXT: call .Lbitcast.2@FUNCTION, $pop[[L0]]{{$}}
+; CHECK-NEXT: call .Lbitcast.4@FUNCTION, $pop[[L0]]{{$}}
; CHECK-NEXT: i32.const $push[[L1:[0-9]+]]=, 0
-; CHECK-NEXT: call .Lbitcast.2@FUNCTION, $pop[[L1]]{{$}}
+; CHECK-NEXT: call .Lbitcast.4@FUNCTION, $pop[[L1]]{{$}}
; CHECK-NEXT: i32.const $push[[L2:[0-9]+]]=, 0
-; CHECK-NEXT: call .Lbitcast.2@FUNCTION, $pop[[L2]]{{$}}
+; CHECK-NEXT: call .Lbitcast.4@FUNCTION, $pop[[L2]]{{$}}
; CHECK-NEXT: call foo0@FUNCTION
-; CHECK-NEXT: i32.call $drop=, .Lbitcast.3@FUNCTION{{$}}
+; CHECK-NEXT: i32.call $drop=, .Lbitcast.5@FUNCTION{{$}}
; CHECK-NEXT: call foo2@FUNCTION{{$}}
; CHECK-NEXT: call foo1@FUNCTION{{$}}
; CHECK-NEXT: call foo3@FUNCTION{{$}}
; CHECK-NEXT: end_function
-
-; CHECK-LABEL: test_varargs:
-; CHECK: set_global
-; CHECK: i32.const $push[[L3:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: call vararg@FUNCTION, $pop[[L3]]{{$}}
-; CHECK-NEXT: i32.const $push[[L4:[0-9]+]]=, 0{{$}}
-; CHECK-NEXT: i32.store 0($[[L5:[0-9]+]]), $pop[[L4]]{{$}}
-; CHECK-NEXT: call plain@FUNCTION, $[[L5]]{{$}}
-
-; CHECK-LABEL: .Lbitcast:
-; CHECK-NEXT: call has_i32_arg@FUNCTION, $0{{$}}
-; CHECK-NEXT: end_function
-
-; CHECK-LABEL: .Lbitcast.1:
-; CHECK-NEXT: call $drop=, has_i32_ret@FUNCTION{{$}}
-; CHECK-NEXT: end_function
-
-; CHECK-LABEL: .Lbitcast.2:
-; CHECK-NEXT: .param i32
-; CHECK-NEXT: call foo0@FUNCTION{{$}}
-; CHECK-NEXT: end_function
-
-; CHECK-LABEL: .Lbitcast.3:
-; CHECK-NEXT: .result i32
-; CHECK-NEXT: call foo1@FUNCTION{{$}}
-; CHECK-NEXT: copy_local $push0=, $0
-; CHECK-NEXT: end_function
-
-declare void @has_i32_arg(i32)
-declare i32 @has_i32_ret()
-declare void @vararg(...)
-declare void @plain(i32)
-
-declare void @foo0()
-declare void @foo1()
-declare void @foo2()
-declare void @foo3()
-
define void @test() {
entry:
call void bitcast (void (i32)* @has_i32_arg to void ()*)()
@@ -79,8 +51,116 @@ entry:
ret void
}
+; CHECK-LABEL: test_varargs:
+; CHECK: set_global
+; CHECK: i32.const $push[[L3:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: call .Lbitcast.2@FUNCTION, $pop[[L3]]{{$}}
+; CHECK-NEXT: i32.const $push[[L4:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: i32.store 0($[[L5:[0-9]+]]), $pop[[L4]]{{$}}
+; CHECK-NEXT: call .Lbitcast.3@FUNCTION, $[[L5]]{{$}}
define void @test_varargs() {
call void bitcast (void (...)* @vararg to void (i32)*)(i32 0)
call void (...) bitcast (void (i32)* @plain to void (...)*)(i32 0)
ret void
}
+
+; Don't use wrappers when the value is stored in memory
+
+@global_func = hidden local_unnamed_addr global void ()* null
+
+; CHECK-LABEL: test_store:
+; CHECK-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: i32.const $push[[L1:[0-9]+]]=, has_i32_ret@FUNCTION{{$}}
+; CHECK-NEXT: i32.store global_func($pop[[L0]]), $pop[[L1]]{{$}}
+define void @test_store() {
+ %1 = bitcast i32 ()* @has_i32_ret to void ()*
+ store void ()* %1, void ()** @global_func
+ ret void
+}
+
+; CHECK-LABEL: test_load:
+; CHECK-NEXT: result i32{{$}}
+; CHECK-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: i32.load $push[[L1:[0-9]+]]=, global_func($pop[[L0]]){{$}}
+; CHECK-NEXT: i32.call_indirect $push{{[0-9]+}}=, $pop[[L1]]{{$}}
+define i32 @test_load() {
+ %1 = load i32 ()*, i32 ()** bitcast (void ()** @global_func to i32 ()**)
+ %2 = call i32 %1()
+ ret i32 %2
+}
+
+; Don't use wrappers when the value is passed to a function call
+
+declare void @call_func(i32 ()*)
+
+; CHECK-LABEL: test_argument:
+; CHECK-NEXT: i32.const $push[[L0:[0-9]+]]=, has_i32_ret@FUNCTION{{$}}
+; CHECK-NEXT: call call_func@FUNCTION, $pop[[L0]]{{$}}
+; CHECK-NEXT: i32.const $push[[L1:[0-9]+]]=, has_i32_arg@FUNCTION{{$}}
+; CHECK-NEXT: call call_func@FUNCTION, $pop[[L1]]{{$}}
+define void @test_argument() {
+ call void @call_func(i32 ()* @has_i32_ret)
+ call void @call_func(i32 ()* bitcast (void (i32)* @has_i32_arg to i32 ()*))
+ ret void
+}
+
+; Invokes should be treated like calls
+
+; CHECK-LABEL: test_invoke:
+; CHECK: i32.const $push[[L1:[0-9]+]]=, call_func@FUNCTION{{$}}
+; CHECK-NEXT: i32.const $push[[L0:[0-9]+]]=, has_i32_ret@FUNCTION{{$}}
+; CHECK-NEXT: call "__invoke_void_i32()*"@FUNCTION, $pop[[L1]], $pop[[L0]]{{$}}
+; CHECK: i32.const $push[[L3:[0-9]+]]=, call_func@FUNCTION{{$}}
+; CHECK-NEXT: i32.const $push[[L2:[0-9]+]]=, has_i32_arg@FUNCTION{{$}}
+; CHECK-NEXT: call "__invoke_void_i32()*"@FUNCTION, $pop[[L3]], $pop[[L2]]{{$}}
+; CHECK: i32.const $push[[L4:[0-9]+]]=, .Lbitcast@FUNCTION{{$}}
+; CHECK-NEXT: call __invoke_void@FUNCTION, $pop[[L4]]{{$}}
+declare i32 @personality(...)
+define void @test_invoke() personality i32 (...)* @personality {
+entry:
+ invoke void @call_func(i32 ()* @has_i32_ret)
+ to label %cont unwind label %lpad
+
+cont:
+ invoke void @call_func(i32 ()* bitcast (void (i32)* @has_i32_arg to i32 ()*))
+ to label %cont2 unwind label %lpad
+
+cont2:
+ invoke void bitcast (void (i32)* @has_i32_arg to void ()*)()
+ to label %end unwind label %lpad
+
+lpad:
+ %0 = landingpad { i8*, i32 }
+ catch i8* null
+ br label %end
+
+end:
+ ret void
+}
+
+; CHECK-LABEL: .Lbitcast:
+; CHECK-NEXT: call has_i32_arg@FUNCTION, $0{{$}}
+; CHECK-NEXT: end_function
+
+; CHECK-LABEL: .Lbitcast.1:
+; CHECK-NEXT: call $drop=, has_i32_ret@FUNCTION{{$}}
+; CHECK-NEXT: end_function
+
+; CHECK-LABEL: .Lbitcast.2:
+; CHECK: call vararg@FUNCTION, $1{{$}}
+; CHECK: end_function
+
+; CHECK-LABEL: .Lbitcast.3:
+; CHECK: call plain@FUNCTION, $1{{$}}
+; CHECK: end_function
+
+; CHECK-LABEL: .Lbitcast.4:
+; CHECK-NEXT: .param i32
+; CHECK-NEXT: call foo0@FUNCTION{{$}}
+; CHECK-NEXT: end_function
+
+; CHECK-LABEL: .Lbitcast.5:
+; CHECK-NEXT: .result i32
+; CHECK-NEXT: call foo1@FUNCTION{{$}}
+; CHECK-NEXT: copy_local $push0=, $0
+; CHECK-NEXT: end_function
diff --git a/test/CodeGen/WebAssembly/global.ll b/test/CodeGen/WebAssembly/global.ll
index 599eb53b431b..bb942bee560b 100644
--- a/test/CodeGen/WebAssembly/global.ll
+++ b/test/CodeGen/WebAssembly/global.ll
@@ -213,3 +213,10 @@ define i8* @call_memcpy(i8* %p, i8* nocapture readonly %q, i32 %n) {
; CHECK-NEXT: .size pointer_to_array, 4
@array = internal constant [8 x i8] zeroinitializer, align 1
@pointer_to_array = constant i8* getelementptr inbounds ([8 x i8], [8 x i8]* @array, i32 0, i32 4), align 4
+
+; Handle external objects with opaque type.
+%struct.ASTRUCT = type opaque
+@g_struct = external global %struct.ASTRUCT, align 1
+define i32 @address_of_opaque() {
+ ret i32 ptrtoint (%struct.ASTRUCT* @g_struct to i32)
+}
diff --git a/test/CodeGen/WebAssembly/globl.ll b/test/CodeGen/WebAssembly/globl.ll
index ba9f6659d7d7..c3126d558636 100644
--- a/test/CodeGen/WebAssembly/globl.ll
+++ b/test/CodeGen/WebAssembly/globl.ll
@@ -4,11 +4,14 @@ target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
target triple = "wasm32-unknown-unknown-wasm"
; CHECK: .globl foo
+; CHECK: .type foo,@function
; CHECK-LABEL: foo:
-define void @foo() {
- ret void
+; CHECK: .size foo,
+define i32* @foo() {
+ ret i32* @bar
}
-; Check import directives - must be at the end of the file
-; CHECK: .import_global bar{{$}}
-@bar = external global i32
+; CHECK: .type bar,@object
+; CHECK: .globl bar
+; CHECK: .size bar, 4
+@bar = global i32 2
diff --git a/test/CodeGen/WebAssembly/i32-load-store-alignment.ll b/test/CodeGen/WebAssembly/i32-load-store-alignment.ll
index 661d1b7bfc3e..1296632cca3a 100644
--- a/test/CodeGen/WebAssembly/i32-load-store-alignment.ll
+++ b/test/CodeGen/WebAssembly/i32-load-store-alignment.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
+; RUN: llc < %s -mattr=+atomics -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
; Test loads and stores with custom alignment values.
@@ -210,3 +210,29 @@ define void @sti16_a4(i16 *%p, i16 %v) {
store i16 %v, i16* %p, align 4
ret void
}
+
+; Atomics.
+; Wasm atomics have the alignment field, but it must always have the
+; type's natural alignment.
+
+; CHECK-LABEL: ldi32_atomic_a4:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i32{{$}}
+; CHECK-NEXT: i32.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i32 @ldi32_atomic_a4(i32 *%p) {
+ %v = load atomic i32, i32* %p seq_cst, align 4
+ ret i32 %v
+}
+
+; 8 is greater than the default alignment so it is rounded down to 4
+
+; CHECK-LABEL: ldi32_atomic_a8:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i32{{$}}
+; CHECK-NEXT: i32.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i32 @ldi32_atomic_a8(i32 *%p) {
+ %v = load atomic i32, i32* %p seq_cst, align 8
+ ret i32 %v
+}
diff --git a/test/CodeGen/WebAssembly/i64-load-store-alignment.ll b/test/CodeGen/WebAssembly/i64-load-store-alignment.ll
index 1ccb74cb9d28..757f785cfd67 100644
--- a/test/CodeGen/WebAssembly/i64-load-store-alignment.ll
+++ b/test/CodeGen/WebAssembly/i64-load-store-alignment.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
+; RUN: llc < %s -mattr=+atomics -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
; Test loads and stores with custom alignment values.
@@ -323,3 +323,26 @@ define void @sti32_a8(i32 *%p, i64 %w) {
store i32 %v, i32* %p, align 8
ret void
}
+
+; Atomics.
+; CHECK-LABEL: ldi64_atomic_a8:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: i64.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @ldi64_atomic_a8(i64 *%p) {
+ %v = load atomic i64, i64* %p seq_cst, align 8
+ ret i64 %v
+}
+
+; 16 is greater than the default alignment so it is ignored.
+
+; CHECK-LABEL: ldi64_atomic_a16:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: i64.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @ldi64_atomic_a16(i64 *%p) {
+ %v = load atomic i64, i64* %p seq_cst, align 16
+ ret i64 %v
+}
diff --git a/test/CodeGen/WebAssembly/inline-asm-m.ll b/test/CodeGen/WebAssembly/inline-asm-m.ll
new file mode 100644
index 000000000000..8d514a528fd9
--- /dev/null
+++ b/test/CodeGen/WebAssembly/inline-asm-m.ll
@@ -0,0 +1,13 @@
+; RUN: not llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals -no-integrated-as
+
+; Test basic inline assembly "m" operands, which are unsupported. Pass
+; -no-integrated-as since these aren't actually valid assembly syntax.
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown-wasm"
+
+define void @bar(i32* %r, i32* %s) {
+entry:
+ tail call void asm sideeffect "# $0 = bbb($1)", "=*m,*m"(i32* %s, i32* %r) #0, !srcloc !1
+ ret void
+}
diff --git a/test/CodeGen/WebAssembly/inline-asm.ll b/test/CodeGen/WebAssembly/inline-asm.ll
index 56576305d9e2..760b0ad0de60 100644
--- a/test/CodeGen/WebAssembly/inline-asm.ll
+++ b/test/CodeGen/WebAssembly/inline-asm.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals -no-integrated-as | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -no-integrated-as | FileCheck %s
; Test basic inline assembly. Pass -no-integrated-as since these aren't
; actually valid assembly syntax.
@@ -10,33 +10,24 @@ target triple = "wasm32-unknown-unknown-wasm"
; CHECK-NEXT: .param i32{{$}}
; CHECK-NEXT: .result i32{{$}}
; CHECK-NEXT: #APP{{$}}
-; CHECK-NEXT: # $0 = aaa($0){{$}}
+; CHECK-NEXT: # 0 = aaa(0){{$}}
; CHECK-NEXT: #NO_APP{{$}}
-; CHECK-NEXT: return $0{{$}}
+; CHECK-NEXT: get_local $push0=, 0{{$}}
+; CHECK-NEXT: return $pop0{{$}}
define i32 @foo(i32 %r) {
entry:
%0 = tail call i32 asm sideeffect "# $0 = aaa($1)", "=r,r"(i32 %r) #0, !srcloc !0
ret i32 %0
}
-; CHECK-LABEL: bar:
-; CHECK-NEXT: .param i32, i32{{$}}
-; CHECK-NEXT: #APP{{$}}
-; CHECK-NEXT: # 0($1) = bbb(0($0)){{$}}
-; CHECK-NEXT: #NO_APP{{$}}
-; CHECK-NEXT: return{{$}}
-define void @bar(i32* %r, i32* %s) {
-entry:
- tail call void asm sideeffect "# $0 = bbb($1)", "=*m,*m"(i32* %s, i32* %r) #0, !srcloc !1
- ret void
-}
-
; CHECK-LABEL: imm:
; CHECK-NEXT: .result i32{{$}}
+; CHECK-NEXT: .local i32{{$}}
; CHECK-NEXT: #APP{{$}}
-; CHECK-NEXT: # $0 = ccc(42){{$}}
+; CHECK-NEXT: # 0 = ccc(42){{$}}
; CHECK-NEXT: #NO_APP{{$}}
-; CHECK-NEXT: return $0{{$}}
+; CHECK-NEXT: get_local $push0=, 0{{$}}
+; CHECK-NEXT: return $pop0{{$}}
define i32 @imm() {
entry:
%0 = tail call i32 asm sideeffect "# $0 = ccc($1)", "=r,i"(i32 42) #0, !srcloc !2
@@ -47,9 +38,10 @@ entry:
; CHECK-NEXT: .param i64{{$}}
; CHECK-NEXT: .result i64{{$}}
; CHECK-NEXT: #APP{{$}}
-; CHECK-NEXT: # $0 = aaa($0){{$}}
+; CHECK-NEXT: # 0 = aaa(0){{$}}
; CHECK-NEXT: #NO_APP{{$}}
-; CHECK-NEXT: return $0{{$}}
+; CHECK-NEXT: get_local $push0=, 0{{$}}
+; CHECK-NEXT: return $pop0{{$}}
define i64 @foo_i64(i64 %r) {
entry:
%0 = tail call i64 asm sideeffect "# $0 = aaa($1)", "=r,r"(i64 %r) #0, !srcloc !0
@@ -57,16 +49,20 @@ entry:
}
; CHECK-LABEL: X_i16:
-; CHECK: foo $1{{$}}
-; CHECK: i32.store16 0($0), $1{{$}}
+; CHECK: foo 1{{$}}
+; CHECK: get_local $push[[S0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[S1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i32.store16 0($pop[[S0]]), $pop[[S1]]{{$}}
define void @X_i16(i16 * %t) {
call void asm sideeffect "foo $0", "=*X,~{dirflag},~{fpsr},~{flags},~{memory}"(i16* %t)
ret void
}
; CHECK-LABEL: X_ptr:
-; CHECK: foo $1{{$}}
-; CHECK: i32.store 0($0), $1{{$}}
+; CHECK: foo 1{{$}}
+; CHECK: get_local $push[[S0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[S1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i32.store 0($pop[[S0]]), $pop[[S1]]{{$}}
define void @X_ptr(i16 ** %t) {
call void asm sideeffect "foo $0", "=*X,~{dirflag},~{fpsr},~{flags},~{memory}"(i16** %t)
ret void
@@ -87,6 +83,20 @@ define void @varname() {
ret void
}
+; CHECK-LABEL: r_constraint
+; CHECK: i32.const $push[[S0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: set_local [[L0:[0-9]+]], $pop[[S0]]{{$}}
+; CHECK-NEXT: i32.const $push[[S1:[0-9]+]]=, 37{{$}}
+; CHECK-NEXT: set_local [[L1:[0-9]+]], $pop[[S1]]{{$}}
+; CHECK: foo [[L2:[0-9]+]], 1, [[L0]], [[L1]]{{$}}
+; CHECK: get_local $push{{[0-9]+}}=, [[L2]]{{$}}
+define hidden i32 @r_constraint(i32 %a, i32 %y) {
+entry:
+ %z = bitcast i32 0 to i32
+ %t0 = tail call i32 asm "foo $0, $1, $2, $3", "=r,r,r,r"(i32 %y, i32 %z, i32 37) #0, !srcloc !0
+ ret i32 %t0
+}
+
attributes #0 = { nounwind }
!0 = !{i32 47}
diff --git a/test/CodeGen/WebAssembly/load-ext-atomic.ll b/test/CodeGen/WebAssembly/load-ext-atomic.ll
new file mode 100644
index 000000000000..0c4552dc9afb
--- /dev/null
+++ b/test/CodeGen/WebAssembly/load-ext-atomic.ll
@@ -0,0 +1,102 @@
+; RUN: llc < %s -mattr=+atomics -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
+
+; Test that extending loads are assembled properly.
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown-wasm"
+
+; CHECK-LABEL: sext_i8_i32:
+; CHECK: i32.atomic.load8_u $push0=, 0($0){{$}}
+; CHECK-NEXT: i32.extend8_s $push1=, $pop0{{$}}
+; CHECK-NEXT: return $pop1{{$}}
+define i32 @sext_i8_i32(i8 *%p) {
+ %v = load atomic i8, i8* %p seq_cst, align 1
+ %e = sext i8 %v to i32
+ ret i32 %e
+}
+
+; CHECK-LABEL: zext_i8_i32:
+; CHECK: i32.atomic.load8_u $push0=, 0($0){{$}}
+; CHECK-NEXT: return $pop0{{$}}
+define i32 @zext_i8_i32(i8 *%p) {
+e1:
+ %v = load atomic i8, i8* %p seq_cst, align 1
+ %e = zext i8 %v to i32
+ ret i32 %e
+}
+
+; CHECK-LABEL: sext_i16_i32:
+; CHECK: i32.atomic.load16_u $push0=, 0($0){{$}}
+; CHECK-NEXT: i32.extend16_s $push1=, $pop0{{$}}
+; CHECK-NEXT: return $pop1{{$}}
+define i32 @sext_i16_i32(i16 *%p) {
+ %v = load atomic i16, i16* %p seq_cst, align 2
+ %e = sext i16 %v to i32
+ ret i32 %e
+}
+
+; CHECK-LABEL: zext_i16_i32:
+; CHECK: i32.atomic.load16_u $push0=, 0($0){{$}}
+; CHECK-NEXT: return $pop0{{$}}
+define i32 @zext_i16_i32(i16 *%p) {
+ %v = load atomic i16, i16* %p seq_cst, align 2
+ %e = zext i16 %v to i32
+ ret i32 %e
+}
+
+; CHECK-LABEL: sext_i8_i64:
+; CHECK: i64.atomic.load8_u $push0=, 0($0){{$}}
+; CHECK: i64.extend8_s $push1=, $pop0{{$}}
+; CHECK-NEXT: return $pop1{{$}}
+define i64 @sext_i8_i64(i8 *%p) {
+ %v = load atomic i8, i8* %p seq_cst, align 1
+ %e = sext i8 %v to i64
+ ret i64 %e
+}
+
+; CHECK-LABEL: zext_i8_i64:
+; CHECK: i64.atomic.load8_u $push0=, 0($0){{$}}
+; CHECK-NEXT: return $pop0{{$}}
+define i64 @zext_i8_i64(i8 *%p) {
+ %v = load atomic i8, i8* %p seq_cst, align 1
+ %e = zext i8 %v to i64
+ ret i64 %e
+}
+
+; CHECK-LABEL: sext_i16_i64:
+; CHECK: i64.atomic.load16_u $push0=, 0($0){{$}}
+; CHECK: i64.extend16_s $push1=, $pop0{{$}}
+; CHECK-NEXT: return $pop1{{$}}
+define i64 @sext_i16_i64(i16 *%p) {
+ %v = load atomic i16, i16* %p seq_cst, align 2
+ %e = sext i16 %v to i64
+ ret i64 %e
+}
+
+; CHECK-LABEL: zext_i16_i64:
+; CHECK: i64.atomic.load16_u $push0=, 0($0){{$}}
+; CHECK-NEXT: return $pop0{{$}}
+define i64 @zext_i16_i64(i16 *%p) {
+ %v = load atomic i16, i16* %p seq_cst, align 2
+ %e = zext i16 %v to i64
+ ret i64 %e
+}
+
+; CHECK-LABEL: sext_i32_i64:
+; CHECK: i32.atomic.load $push0=, 0($0){{$}}
+; CHECK: i64.extend_s/i32 $push1=, $pop0{{$}}
+; CHECK-NEXT: return $pop1{{$}}
+define i64 @sext_i32_i64(i32 *%p) {
+ %v = load atomic i32, i32* %p seq_cst, align 4
+ %e = sext i32 %v to i64
+ ret i64 %e
+}
+
+; CHECK-LABEL: zext_i32_i64:
+; CHECK: i64.atomic.load32_u $push0=, 0($0){{$}}
+; CHECK: return $pop0{{$}}
+define i64 @zext_i32_i64(i32 *%p) {
+ %v = load atomic i32, i32* %p seq_cst, align 4
+ %e = zext i32 %v to i64
+ ret i64 %e
+}
diff --git a/test/CodeGen/WebAssembly/lower-global-dtors.ll b/test/CodeGen/WebAssembly/lower-global-dtors.ll
new file mode 100644
index 000000000000..c3d654091a1c
--- /dev/null
+++ b/test/CodeGen/WebAssembly/lower-global-dtors.ll
@@ -0,0 +1,139 @@
+; RUN: llc < %s -asm-verbose=false | FileCheck --check-prefix=CHECK --check-prefix=FINI --check-prefix=NULL %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown-wasm"
+
+; Test that @llvm.global_dtors is properly lowered into @llvm.global_ctors,
+; grouping dtor calls by priority and associated symbol.
+
+declare void @orig_ctor()
+declare void @orig_dtor0()
+declare void @orig_dtor1a()
+declare void @orig_dtor1b()
+declare void @orig_dtor1c0()
+declare void @orig_dtor1c1a()
+declare void @orig_dtor1c1b()
+declare void @orig_dtor65536()
+declare void @after_the_null()
+
+@associated1c0 = external global i8
+@associated1c1 = external global i8
+
+@llvm.global_ctors = appending global
+[1 x { i32, void ()*, i8* }]
+[
+ { i32, void ()*, i8* } { i32 200, void ()* @orig_ctor, i8* null }
+]
+
+@llvm.global_dtors = appending global
+[9 x { i32, void ()*, i8* }]
+[
+ { i32, void ()*, i8* } { i32 0, void ()* @orig_dtor0, i8* null },
+ { i32, void ()*, i8* } { i32 1, void ()* @orig_dtor1a, i8* null },
+ { i32, void ()*, i8* } { i32 1, void ()* @orig_dtor1b, i8* null },
+ { i32, void ()*, i8* } { i32 1, void ()* @orig_dtor1c0, i8* @associated1c0 },
+ { i32, void ()*, i8* } { i32 1, void ()* @orig_dtor1c1a, i8* @associated1c1 },
+ { i32, void ()*, i8* } { i32 1, void ()* @orig_dtor1c1b, i8* @associated1c1 },
+ { i32, void ()*, i8* } { i32 65535, void ()* @orig_dtor65536, i8* null },
+ { i32, void ()*, i8* } { i32 65535, void ()* null, i8* null },
+ { i32, void ()*, i8* } { i32 65535, void ()* @after_the_null, i8* null }
+]
+
+; CHECK-LABEL: .Lcall_dtors.0:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: call orig_dtor0@FUNCTION{{$}}
+
+; CHECK-LABEL: .Lregister_call_dtors.0:
+; CHECK-NEXT: block
+; CHECK-NEXT: i32.const $push2=, .Lcall_dtors.0@FUNCTION{{$}}
+; CHECK-NEXT: i32.const $push1=, 0
+; CHECK-NEXT: i32.const $push0=, __dso_handle
+; CHECK-NEXT: i32.call $push3=, __cxa_atexit@FUNCTION, $pop2, $pop1, $pop0{{$}}
+; CHECK-NEXT: br_if 0, $pop3
+; CHECK-NEXT: return
+; CHECK: end_block
+; CHECK-NEXT: unreachable
+
+; CHECK-LABEL: .Lcall_dtors.1:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: call orig_dtor1a@FUNCTION{{$}}
+; CHECK-NEXT: call orig_dtor1b@FUNCTION{{$}}
+
+; CHECK-LABEL: .Lregister_call_dtors.1:
+; CHECK-NEXT: block
+; CHECK-NEXT: i32.const $push2=, .Lcall_dtors.1@FUNCTION{{$}}
+; CHECK-NEXT: i32.const $push1=, 0
+; CHECK-NEXT: i32.const $push0=, __dso_handle
+; CHECK-NEXT: i32.call $push3=, __cxa_atexit@FUNCTION, $pop2, $pop1, $pop0{{$}}
+; CHECK-NEXT: br_if 0, $pop3
+; CHECK-NEXT: return
+; CHECK: end_block
+; CHECK-NEXT: unreachable
+
+; CHECK-LABEL: .Lcall_dtors.1.associated1c0:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: call orig_dtor1c0@FUNCTION{{$}}
+
+; CHECK-LABEL: .Lregister_call_dtors.1.associated1c0:
+; CHECK-NEXT: block
+; CHECK-NEXT: i32.const $push2=, .Lcall_dtors.1.associated1c0@FUNCTION{{$}}
+; CHECK-NEXT: i32.const $push1=, 0
+; CHECK-NEXT: i32.const $push0=, __dso_handle
+; CHECK-NEXT: i32.call $push3=, __cxa_atexit@FUNCTION, $pop2, $pop1, $pop0{{$}}
+; CHECK-NEXT: br_if 0, $pop3
+; CHECK-NEXT: return
+; CHECK: end_block
+; CHECK-NEXT: unreachable
+
+; CHECK-LABEL: .Lcall_dtors.1.associated1c1:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: call orig_dtor1c1a@FUNCTION{{$}}
+; CHECK-NEXT: call orig_dtor1c1b@FUNCTION{{$}}
+
+; CHECK-LABEL: .Lregister_call_dtors.1.associated1c1:
+; CHECK-NEXT: block
+; CHECK-NEXT: i32.const $push2=, .Lcall_dtors.1.associated1c1@FUNCTION{{$}}
+; CHECK-NEXT: i32.const $push1=, 0
+; CHECK-NEXT: i32.const $push0=, __dso_handle
+; CHECK-NEXT: i32.call $push3=, __cxa_atexit@FUNCTION, $pop2, $pop1, $pop0{{$}}
+; CHECK-NEXT: br_if 0, $pop3
+; CHECK-NEXT: return
+; CHECK: end_block
+; CHECK-NEXT: unreachable
+
+; CHECK-LABEL: .Lcall_dtors:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: call orig_dtor65536@FUNCTION{{$}}
+
+; CHECK-LABEL: .Lregister_call_dtors:
+; CHECK-NEXT: block
+; CHECK-NEXT: i32.const $push2=, .Lcall_dtors@FUNCTION{{$}}
+; CHECK-NEXT: i32.const $push1=, 0
+; CHECK-NEXT: i32.const $push0=, __dso_handle
+; CHECK-NEXT: i32.call $push3=, __cxa_atexit@FUNCTION, $pop2, $pop1, $pop0{{$}}
+; CHECK-NEXT: br_if 0, $pop3
+; CHECK-NEXT: return
+; CHECK: end_block
+; CHECK-NEXT: unreachable
+
+; CHECK-LABEL: .section .init_array.0,"",@
+; CHECK: .int32 .Lregister_call_dtors.0@FUNCTION{{$}}
+; CHECK-LABEL: .section .init_array.1,"",@
+; CHECK: .int32 .Lregister_call_dtors.1@FUNCTION{{$}}
+; CHECK-LABEL: .section .init_array.200,"",@
+; CHECK: .int32 orig_ctor@FUNCTION{{$}}
+; CHECK-LABEL: .section .init_array,"",@
+; CHECK: .int32 .Lregister_call_dtors@FUNCTION{{$}}
+
+; CHECK-LABEL: .weak __dso_handle
+
+; CHECK-LABEL: .functype __cxa_atexit, i32, i32, i32, i32{{$}}
+
+; We shouldn't make use of a .fini_array section.
+
+; FINI-NOT: fini_array
+
+; This function is listed after the null terminator, so it should
+; be excluded.
+
+; NULL-NOT: after_the_null
diff --git a/test/CodeGen/WebAssembly/main-declaration.ll b/test/CodeGen/WebAssembly/main-declaration.ll
new file mode 100644
index 000000000000..4e337850b81a
--- /dev/null
+++ b/test/CodeGen/WebAssembly/main-declaration.ll
@@ -0,0 +1,19 @@
+; RUN: llc < %s -asm-verbose=false -wasm-temporary-workarounds=false | FileCheck %s
+
+; Test main functions with alternate signatures.
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown-wasm"
+
+declare void @main()
+
+define void @foo() {
+ call void @main()
+ ret void
+}
+
+; CHECK-NOT: __original_main
+; CHECK-LABEL: foo:
+; CHECK-NEXT: call main@FUNCTION
+; CHECK-NEXT: end_function
+; CHECK-NOT: __original_main
diff --git a/test/CodeGen/WebAssembly/main.ll b/test/CodeGen/WebAssembly/main.ll
new file mode 100644
index 000000000000..c77db8467d80
--- /dev/null
+++ b/test/CodeGen/WebAssembly/main.ll
@@ -0,0 +1,18 @@
+; RUN: llc < %s -asm-verbose=false -wasm-temporary-workarounds=false | FileCheck %s
+
+; Test main functions with alternate signatures.
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown-wasm"
+
+define void @main() {
+ ret void
+}
+
+; CHECK-LABEL: .L__original_main:
+; CHECK-NEXT: end_function
+
+; CHECK-LABEL: main:
+; CHECK-NEXT: .param i32, i32
+; CHECK-NEXT: .result i32
+; CHECK: call .L__original_main@FUNCTION
diff --git a/test/CodeGen/WebAssembly/offset-atomics.ll b/test/CodeGen/WebAssembly/offset-atomics.ll
new file mode 100644
index 000000000000..24727fc2608d
--- /dev/null
+++ b/test/CodeGen/WebAssembly/offset-atomics.ll
@@ -0,0 +1,307 @@
+; RUN: not llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals -mattr=+atomics | FileCheck %s
+
+; Test that atomic loads are assembled properly.
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown-wasm"
+
+; CHECK-LABEL: load_i32_no_offset:
+; CHECK: i32.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i32 @load_i32_no_offset(i32 *%p) {
+ %v = load atomic i32, i32* %p seq_cst, align 4
+ ret i32 %v
+}
+
+; With an nuw add, we can fold an offset.
+
+; CHECK-LABEL: load_i32_with_folded_offset:
+; CHECK: i32.atomic.load $push0=, 24($0){{$}}
+define i32 @load_i32_with_folded_offset(i32* %p) {
+ %q = ptrtoint i32* %p to i32
+ %r = add nuw i32 %q, 24
+ %s = inttoptr i32 %r to i32*
+ %t = load atomic i32, i32* %s seq_cst, align 4
+ ret i32 %t
+}
+
+; With an inbounds gep, we can fold an offset.
+
+; CHECK-LABEL: load_i32_with_folded_gep_offset:
+; CHECK: i32.atomic.load $push0=, 24($0){{$}}
+define i32 @load_i32_with_folded_gep_offset(i32* %p) {
+ %s = getelementptr inbounds i32, i32* %p, i32 6
+ %t = load atomic i32, i32* %s seq_cst, align 4
+ ret i32 %t
+}
+
+; We can't fold a negative offset though, even with an inbounds gep.
+
+; CHECK-LABEL: load_i32_with_unfolded_gep_negative_offset:
+; CHECK: i32.const $push0=, -24{{$}}
+; CHECK: i32.add $push1=, $0, $pop0{{$}}
+; CHECK: i32.atomic.load $push2=, 0($pop1){{$}}
+define i32 @load_i32_with_unfolded_gep_negative_offset(i32* %p) {
+ %s = getelementptr inbounds i32, i32* %p, i32 -6
+ %t = load atomic i32, i32* %s seq_cst, align 4
+ ret i32 %t
+}
+
+; Without nuw, and even with nsw, we can't fold an offset.
+
+; CHECK-LABEL: load_i32_with_unfolded_offset:
+; CHECK: i32.const $push0=, 24{{$}}
+; CHECK: i32.add $push1=, $0, $pop0{{$}}
+; CHECK: i32.atomic.load $push2=, 0($pop1){{$}}
+define i32 @load_i32_with_unfolded_offset(i32* %p) {
+ %q = ptrtoint i32* %p to i32
+ %r = add nsw i32 %q, 24
+ %s = inttoptr i32 %r to i32*
+ %t = load atomic i32, i32* %s seq_cst, align 4
+ ret i32 %t
+}
+
+; Without inbounds, we can't fold a gep offset.
+
+; CHECK-LABEL: load_i32_with_unfolded_gep_offset:
+; CHECK: i32.const $push0=, 24{{$}}
+; CHECK: i32.add $push1=, $0, $pop0{{$}}
+; CHECK: i32.atomic.load $push2=, 0($pop1){{$}}
+define i32 @load_i32_with_unfolded_gep_offset(i32* %p) {
+ %s = getelementptr i32, i32* %p, i32 6
+ %t = load atomic i32, i32* %s seq_cst, align 4
+ ret i32 %t
+}
+
+; CHECK-LABEL: load_i64_no_offset:
+; CHECK: i64.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @load_i64_no_offset(i64 *%p) {
+ %v = load atomic i64, i64* %p seq_cst, align 8
+ ret i64 %v
+}
+
+; Same as above but with i64.
+
+; CHECK-LABEL: load_i64_with_folded_offset:
+; CHECK: i64.atomic.load $push0=, 24($0){{$}}
+define i64 @load_i64_with_folded_offset(i64* %p) {
+ %q = ptrtoint i64* %p to i32
+ %r = add nuw i32 %q, 24
+ %s = inttoptr i32 %r to i64*
+ %t = load atomic i64, i64* %s seq_cst, align 8
+ ret i64 %t
+}
+
+; Same as above but with i64.
+
+; CHECK-LABEL: load_i64_with_folded_gep_offset:
+; CHECK: i64.atomic.load $push0=, 24($0){{$}}
+define i64 @load_i64_with_folded_gep_offset(i64* %p) {
+ %s = getelementptr inbounds i64, i64* %p, i32 3
+ %t = load atomic i64, i64* %s seq_cst, align 8
+ ret i64 %t
+}
+
+; Same as above but with i64.
+
+; CHECK-LABEL: load_i64_with_unfolded_gep_negative_offset:
+; CHECK: i32.const $push0=, -24{{$}}
+; CHECK: i32.add $push1=, $0, $pop0{{$}}
+; CHECK: i64.atomic.load $push2=, 0($pop1){{$}}
+define i64 @load_i64_with_unfolded_gep_negative_offset(i64* %p) {
+ %s = getelementptr inbounds i64, i64* %p, i32 -3
+ %t = load atomic i64, i64* %s seq_cst, align 8
+ ret i64 %t
+}
+
+; Same as above but with i64.
+
+; CHECK-LABEL: load_i64_with_unfolded_offset:
+; CHECK: i32.const $push0=, 24{{$}}
+; CHECK: i32.add $push1=, $0, $pop0{{$}}
+; CHECK: i64.atomic.load $push2=, 0($pop1){{$}}
+define i64 @load_i64_with_unfolded_offset(i64* %p) {
+ %q = ptrtoint i64* %p to i32
+ %r = add nsw i32 %q, 24
+ %s = inttoptr i32 %r to i64*
+ %t = load atomic i64, i64* %s seq_cst, align 8
+ ret i64 %t
+}
+
+; Same as above but with i64.
+
+; CHECK-LABEL: load_i64_with_unfolded_gep_offset:
+; CHECK: i32.const $push0=, 24{{$}}
+; CHECK: i32.add $push1=, $0, $pop0{{$}}
+; CHECK: i64.atomic.load $push2=, 0($pop1){{$}}
+define i64 @load_i64_with_unfolded_gep_offset(i64* %p) {
+ %s = getelementptr i64, i64* %p, i32 3
+ %t = load atomic i64, i64* %s seq_cst, align 8
+ ret i64 %t
+}
+
+; CHECK-LABEL: load_i32_with_folded_or_offset:
+; CHECK: i32.atomic.load8_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}){{$}}
+; CHECK-NEXT: i32.extend8_s $push{{[0-9]+}}=, $pop[[R1]]{{$}}
+define i32 @load_i32_with_folded_or_offset(i32 %x) {
+ %and = and i32 %x, -4
+ %t0 = inttoptr i32 %and to i8*
+ %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
+ %t1 = load atomic i8, i8* %arrayidx seq_cst, align 8
+ %conv = sext i8 %t1 to i32
+ ret i32 %conv
+}
+
+; When loading from a fixed address, materialize a zero.
+
+; CHECK-LABEL: load_i32_from_numeric_address
+; CHECK: i32.const $push0=, 0{{$}}
+; CHECK: i32.atomic.load $push1=, 42($pop0){{$}}
+define i32 @load_i32_from_numeric_address() {
+ %s = inttoptr i32 42 to i32*
+ %t = load atomic i32, i32* %s seq_cst, align 4
+ ret i32 %t
+}
+
+
+; CHECK-LABEL: load_i32_from_global_address
+; CHECK: i32.const $push0=, 0{{$}}
+; CHECK: i32.atomic.load $push1=, gv($pop0){{$}}
+@gv = global i32 0
+define i32 @load_i32_from_global_address() {
+ %t = load atomic i32, i32* @gv seq_cst, align 4
+ ret i32 %t
+}
+
+; Fold an offset into a sign-extending load.
+
+; CHECK-LABEL: load_i8_s_with_folded_offset:
+; CHECK: i32.atomic.load8_u $push0=, 24($0){{$}}
+; CHECK-NEXT: i32.extend8_s $push1=, $pop0
+define i32 @load_i8_s_with_folded_offset(i8* %p) {
+ %q = ptrtoint i8* %p to i32
+ %r = add nuw i32 %q, 24
+ %s = inttoptr i32 %r to i8*
+ %t = load atomic i8, i8* %s seq_cst, align 1
+ %u = sext i8 %t to i32
+ ret i32 %u
+}
+
+; Fold a gep offset into a sign-extending load.
+
+; CHECK-LABEL: load_i8_s_with_folded_gep_offset:
+; CHECK: i32.atomic.load8_u $push0=, 24($0){{$}}
+; CHECK-NEXT: i32.extend8_s $push1=, $pop0
+define i32 @load_i8_s_with_folded_gep_offset(i8* %p) {
+ %s = getelementptr inbounds i8, i8* %p, i32 24
+ %t = load atomic i8, i8* %s seq_cst, align 1
+ %u = sext i8 %t to i32
+ ret i32 %u
+}
+
+; CHECK-LABEL: load_i16_s_i64_with_folded_gep_offset:
+; CHECK: i64.atomic.load16_u $push0=, 6($0){{$}}
+define i64 @load_i16_s_i64_with_folded_gep_offset(i16* %p) {
+ %s = getelementptr inbounds i16, i16* %p, i32 3
+ %t = load atomic i16, i16* %s seq_cst, align 2
+ %u = zext i16 %t to i64
+ ret i64 %u
+}
+
+; CHECK-LABEL: load_i64_with_folded_or_offset:
+; CHECK: i64.atomic.load8_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}){{$}}
+; CHECK-NEXT: i64.extend8_s $push{{[0-9]+}}=, $pop[[R1]]{{$}}
+define i64 @load_i64_with_folded_or_offset(i32 %x) {
+ %and = and i32 %x, -4
+ %t0 = inttoptr i32 %and to i8*
+ %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
+ %t1 = load atomic i8, i8* %arrayidx seq_cst, align 8
+ %conv = sext i8 %t1 to i64
+ ret i64 %conv
+}
+
+
+; Fold an offset into a zero-extending load.
+
+; CHECK-LABEL: load_i16_u_with_folded_offset:
+; CHECK: i32.atomic.load16_u $push0=, 24($0){{$}}
+define i32 @load_i16_u_with_folded_offset(i8* %p) {
+ %q = ptrtoint i8* %p to i32
+ %r = add nuw i32 %q, 24
+ %s = inttoptr i32 %r to i16*
+ %t = load atomic i16, i16* %s seq_cst, align 2
+ %u = zext i16 %t to i32
+ ret i32 %u
+}
+
+; Fold a gep offset into a zero-extending load.
+
+; CHECK-LABEL: load_i8_u_with_folded_gep_offset:
+; CHECK: i32.atomic.load8_u $push0=, 24($0){{$}}
+define i32 @load_i8_u_with_folded_gep_offset(i8* %p) {
+ %s = getelementptr inbounds i8, i8* %p, i32 24
+ %t = load atomic i8, i8* %s seq_cst, align 1
+ %u = zext i8 %t to i32
+ ret i32 %u
+}
+
+
+; When loading from a fixed address, materialize a zero.
+; As above but with extending load.
+
+; CHECK-LABEL: load_zext_i32_from_numeric_address
+; CHECK: i32.const $push0=, 0{{$}}
+; CHECK: i32.atomic.load16_u $push1=, 42($pop0){{$}}
+define i32 @load_zext_i32_from_numeric_address() {
+ %s = inttoptr i32 42 to i16*
+ %t = load atomic i16, i16* %s seq_cst, align 2
+ %u = zext i16 %t to i32
+ ret i32 %u
+}
+
+; CHECK-LABEL: load_sext_i32_from_global_address
+; CHECK: i32.const $push0=, 0{{$}}
+; CHECK: i32.atomic.load8_u $push1=, gv8($pop0){{$}}
+; CHECK-NEXT: i32.extend8_s $push2=, $pop1{{$}}
+@gv8 = global i8 0
+define i32 @load_sext_i32_from_global_address() {
+ %t = load atomic i8, i8* @gv8 seq_cst, align 1
+ %u = sext i8 %t to i32
+ ret i32 %u
+}
+
+; Fold an offset into a sign-extending load.
+; As above but 32 extended to 64 bit.
+; CHECK-LABEL: load_i32_i64_s_with_folded_offset:
+; CHECK: i32.atomic.load $push0=, 24($0){{$}}
+; CHECK-NEXT: i64.extend_s/i32 $push1=, $pop0{{$}}
+define i64 @load_i32_i64_s_with_folded_offset(i32* %p) {
+ %q = ptrtoint i32* %p to i32
+ %r = add nuw i32 %q, 24
+ %s = inttoptr i32 %r to i32*
+ %t = load atomic i32, i32* %s seq_cst, align 4
+ %u = sext i32 %t to i64
+ ret i64 %u
+}
+
+; Fold a gep offset into a zero-extending load.
+; As above but 32 extended to 64 bit.
+; CHECK-LABEL: load_i32_i64_u_with_folded_gep_offset:
+; CHECK: i64.atomic.load32_u $push0=, 96($0){{$}}
+define i64 @load_i32_i64_u_with_folded_gep_offset(i32* %p) {
+ %s = getelementptr inbounds i32, i32* %p, i32 24
+ %t = load atomic i32, i32* %s seq_cst, align 4
+ %u = zext i32 %t to i64
+ ret i64 %u
+}
+
+; i8 return value should test anyext loads
+; CHECK-LABEL: ldi8_a1:
+; CHECK: i32.atomic.load8_u $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i8 @ldi8_a1(i8 *%p) {
+ %v = load atomic i8, i8* %p seq_cst, align 1
+ ret i8 %v
+}
diff --git a/test/CodeGen/WebAssembly/reg-stackify.ll b/test/CodeGen/WebAssembly/reg-stackify.ll
index ebda5373c600..c6602d81d4b8 100644
--- a/test/CodeGen/WebAssembly/reg-stackify.ll
+++ b/test/CodeGen/WebAssembly/reg-stackify.ll
@@ -357,7 +357,9 @@ bb17: ; preds = %bb13, %bb8
bb21: ; preds = %bb17, %bb5
%tmp22 = phi double [ %tmp, %bb5 ], [ %tmp9, %bb17 ]
%tmp23 = fadd double %tmp6, 1.000000e+00
- br label %bb5
+ br i1 %arg4, label %exit, label %bb5
+exit:
+ ret void
}
; Don't move calls past loads
diff --git a/test/CodeGen/WebAssembly/signext-arg.ll b/test/CodeGen/WebAssembly/signext-arg.ll
new file mode 100644
index 000000000000..32d74a20b755
--- /dev/null
+++ b/test/CodeGen/WebAssembly/signext-arg.ll
@@ -0,0 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -march=wasm32 | FileCheck %s
+
+declare i32 @get_int(i16 %arg)
+
+define i32 @func_1(i16 %arg1 , i32 %arg2) #0 {
+; CHECK-LABEL: func_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: i32.const $push1=, 16
+; CHECK-NEXT: i32.shl $push2=, $0, $pop1
+; CHECK-NEXT: i32.const $push4=, 16
+; CHECK-NEXT: i32.shr_s $push3=, $pop2, $pop4
+; CHECK-NEXT: i32.call $push0=, get_int@FUNCTION, $pop3
+; CHECK-NEXT: # fallthrough-return: $pop0
+; CHECK-NEXT: .endfunc
+entry:
+ %retval = call i32 @get_int(i16 signext %arg1)
+ ret i32 %retval
+}
+
+attributes #0 = {noinline nounwind optnone}
+
diff --git a/test/CodeGen/WebAssembly/signext-inreg.ll b/test/CodeGen/WebAssembly/signext-inreg.ll
new file mode 100644
index 000000000000..c97a1bf1b0e8
--- /dev/null
+++ b/test/CodeGen/WebAssembly/signext-inreg.ll
@@ -0,0 +1,71 @@
+; RUN: llc < %s -mattr=+atomics -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s --check-prefix=NOATOMIC
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown-wasm"
+
+; CHECK-LABEL: i32_extend8_s:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i32{{$}}
+; CHECK-NEXT: i32.extend8_s $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+
+; NOATOMIC-LABEL: i32_extend8_s
+; NOATOMIC-NOT: i32.extend8_s
+define i32 @i32_extend8_s(i8 %x) {
+ %a = sext i8 %x to i32
+ ret i32 %a
+}
+
+; CHECK-LABEL: i32_extend16_s:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i32{{$}}
+; CHECK-NEXT: i32.extend16_s $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+
+; NOATOMIC-LABEL: i32_extend16_s
+; NOATOMIC-NOT: i32.extend16_s
+define i32 @i32_extend16_s(i16 %x) {
+ %a = sext i16 %x to i32
+ ret i32 %a
+}
+
+; CHECK-LABEL: i64_extend8_s:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: i64.extend_u/i32 $push[[NUM1:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i64.extend8_s $push[[NUM2:[0-9]+]]=, $pop[[NUM1]]{{$}}
+; CHECK-NEXT: return $pop[[NUM2]]{{$}}
+
+; NOATOMIC-LABEL: i64_extend8_s
+; NOATOMIC-NOT: i64.extend8_s
+define i64 @i64_extend8_s(i8 %x) {
+ %a = sext i8 %x to i64
+ ret i64 %a
+}
+
+; CHECK-LABEL: i64_extend16_s:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: i64.extend_u/i32 $push[[NUM1:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: i64.extend16_s $push[[NUM2:[0-9]+]]=, $pop[[NUM1]]{{$}}
+; CHECK-NEXT: return $pop[[NUM2]]{{$}}
+
+; NOATOMIC-LABEL: i64_extend16_s
+; NOATOMIC-NOT: i16.extend16_s
+define i64 @i64_extend16_s(i16 %x) {
+ %a = sext i16 %x to i64
+ ret i64 %a
+}
+
+; No SIGN_EXTEND_INREG is needed for 32->64 extension.
+; CHECK-LABEL: i64_extend32_s:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: i64.extend_s/i32 $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @i64_extend32_s(i32 %x) {
+ %a = sext i32 %x to i64
+ ret i64 %a
+}
+
diff --git a/test/CodeGen/WebAssembly/umulo-i64.ll b/test/CodeGen/WebAssembly/umulo-i64.ll
index e47c8aa0bb3a..75c2d507e4a4 100644
--- a/test/CodeGen/WebAssembly/umulo-i64.ll
+++ b/test/CodeGen/WebAssembly/umulo-i64.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -asm-verbose=false | FileCheck %s
; Test that UMULO works correctly on 64-bit operands.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-emscripten"
+target triple = "wasm32-unknown-unknown"
; CHECK-LABEL: _ZN4core3num21_$LT$impl$u20$u64$GT$15overflowing_mul17h07be88b4cbac028fE:
; CHECK: __multi3
@@ -19,3 +19,14 @@ declare { i64, i1 } @llvm.umul.with.overflow.i64(i64, i64) #1
attributes #0 = { inlinehint }
attributes #1 = { nounwind readnone speculatable }
+
+; CHECK-LABEL: wut:
+; CHECK: call __multi3@FUNCTION, $2, $0, $pop0, $1, $pop10
+; CHECK: i64.load $0=, 8($2)
+define i1 @wut(i64, i64) {
+start:
+ %2 = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %0, i64 %1)
+ %3 = extractvalue { i64, i1 } %2, 1
+ ret i1 %3
+}
+