diff options
Diffstat (limited to 'test')
| -rw-r--r-- | test/CodeGen/AArch64/aarch64_win64cc_vararg.ll | 4 | ||||
| -rw-r--r-- | test/CodeGen/AArch64/misched-fusion-aes.ll | 130 | ||||
| -rw-r--r-- | test/CodeGen/AArch64/tbi.ll | 11 | ||||
| -rw-r--r-- | test/CodeGen/AArch64/win64_vararg.ll | 111 | ||||
| -rw-r--r-- | test/CodeGen/Mips/cconv/pr33883.ll | 12 | ||||
| -rw-r--r-- | test/CodeGen/Mips/pr33978.ll | 20 | ||||
| -rw-r--r-- | test/CodeGen/X86/conditional-tailcall-samedest.mir | 139 | ||||
| -rw-r--r-- | test/CodeGen/X86/pause.ll | 15 | ||||
| -rw-r--r-- | test/CodeGen/X86/tail-call-mutable-memarg.ll | 42 | ||||
| -rw-r--r-- | test/DllTool/coff-weak-exports.def | 18 | ||||
| -rw-r--r-- | test/Instrumentation/AddressSanitizer/force-dynamic-shadow.ll | 22 | ||||
| -rw-r--r-- | test/Instrumentation/AddressSanitizer/stack-poisoning-byval-args.ll | 4 | ||||
| -rw-r--r-- | test/MC/AArch64/arm64-crypto.s | 1 | ||||
| -rw-r--r-- | test/Transforms/ArgumentPromotion/byval.ll | 39 | ||||
| -rw-r--r-- | test/Transforms/InstSimplify/pr33957.ll | 29 | ||||
| -rw-r--r-- | test/Transforms/SCCP/definite-initializer.ll | 11 |
16 files changed, 499 insertions, 109 deletions
diff --git a/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll b/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll index 89efe335e329..43b821fa37c8 100644 --- a/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll +++ b/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll @@ -2,14 +2,14 @@ define win64cc void @pass_va(i32 %count, ...) nounwind { entry: -; CHECK: sub sp, sp, #80 +; CHECK: str x30, [sp, #-80]! ; CHECK: add x8, sp, #24 ; CHECK: add x0, sp, #24 ; CHECK: stp x6, x7, [sp, #64] ; CHECK: stp x4, x5, [sp, #48] ; CHECK: stp x2, x3, [sp, #32] ; CHECK: str x1, [sp, #24] -; CHECK: stp x30, x8, [sp] +; CHECK: str x8, [sp, #8] ; CHECK: bl other_func ; CHECK: ldr x30, [sp], #80 ; CHECK: ret diff --git a/test/CodeGen/AArch64/misched-fusion-aes.ll b/test/CodeGen/AArch64/misched-fusion-aes.ll index 8ee4dbcee52b..9c3af6dae300 100644 --- a/test/CodeGen/AArch64/misched-fusion-aes.ll +++ b/test/CodeGen/AArch64/misched-fusion-aes.ll @@ -1,10 +1,10 @@ -; RUN: llc %s -o - -mtriple=aarch64-unknown -mattr=+fuse-aes,+crypto | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKFUSEALLPAIRS -; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=generic -mattr=+crypto | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKFUSEALLPAIRS -; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a53 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKFUSEALLPAIRS -; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a57 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKFUSEALLPAIRS -; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a72 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKFUSEALLPAIRS -; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a73 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKFUSEALLPAIRS -; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKM1 +; RUN: llc %s -o - -mtriple=aarch64-unknown -mattr=+fuse-aes,+crypto | FileCheck %s +; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=generic -mattr=+crypto | FileCheck %s +; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a53 | FileCheck %s +; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a57 | FileCheck %s +; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a72 | FileCheck %s +; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a73 | FileCheck %s +; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m1 | FileCheck %s declare <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d, <16 x i8> %k) declare <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %d) @@ -76,41 +76,23 @@ define void @aesea(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d, ret void ; CHECK-LABEL: aesea: -; CHECKFUSEALLPAIRS: aese [[VA:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VA]] -; CHECKFUSEALLPAIRS: aese [[VB:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VB]] -; CHECKFUSEALLPAIRS: aese [[VC:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VC]] -; CHECKFUSEALLPAIRS: aese [[VD:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VD]] -; CHECKFUSEALLPAIRS: aese [[VE:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VE]] -; CHECKFUSEALLPAIRS: aese [[VF:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VF]] -; CHECKFUSEALLPAIRS: aese [[VG:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VG]] -; CHECKFUSEALLPAIRS: aese [[VH:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VH]] -; CHECKFUSEALLPAIRS-NOT: aesmc - -; CHECKM1: aese [[VA:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VA]] -; CHECKM1: aese [[VH:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1: aese [[VB:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VB]] -; CHECKM1: aese {{v[0-7].16b}}, {{v[0-7].16b}} -; CHECKM1: aese [[VC:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VC]] -; CHECKM1: aese [[VD:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VD]] -; CHECKM1: aesmc {{v[0-7].16b}}, [[VH]] -; CHECKM1: aese [[VE:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VE]] -; CHECKM1: aese [[VF:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VF]] -; CHECKM1: aese [[VG:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VG]] +; CHECK: aese [[VA:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesmc [[VA]], [[VA]] +; CHECK: aese [[VB:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesmc [[VB]], [[VB]] +; CHECK: aese [[VC:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesmc [[VC]], [[VC]] +; CHECK: aese [[VD:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesmc [[VD]], [[VD]] +; CHECK: aese [[VE:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesmc [[VE]], [[VE]] +; CHECK: aese [[VF:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesmc [[VF]], [[VF]] +; CHECK: aese [[VG:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesmc [[VG]], [[VG]] +; CHECK: aese [[VH:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesmc [[VH]], [[VH]] +; CHECK-NOT: aesmc } define void @aesda(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d, <16 x i8> %e) { @@ -178,41 +160,23 @@ define void @aesda(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d, ret void ; CHECK-LABEL: aesda: -; CHECKFUSEALLPAIRS: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VA]] -; CHECKFUSEALLPAIRS: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VB]] -; CHECKFUSEALLPAIRS: aesd [[VC:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VC]] -; CHECKFUSEALLPAIRS: aesd [[VD:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VD]] -; CHECKFUSEALLPAIRS: aesd [[VE:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VE]] -; CHECKFUSEALLPAIRS: aesd [[VF:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VF]] -; CHECKFUSEALLPAIRS: aesd [[VG:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VG]] -; CHECKFUSEALLPAIRS: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VH]] -; CHECKFUSEALLPAIRS-NOT: aesimc - -; CHECKM1: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VA]] -; CHECKM1: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VB]] -; CHECKM1: aesd {{v[0-7].16b}}, {{v[0-7].16b}} -; CHECKM1: aesd [[VC:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VC]] -; CHECKM1: aesd [[VD:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VD]] -; CHECKM1: aesimc {{v[0-7].16b}}, [[VH]] -; CHECKM1: aesd [[VE:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VE]] -; CHECKM1: aesd [[VF:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VF]] -; CHECKM1: aesd [[VG:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VG]] +; CHECK: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesimc [[VA]], [[VA]] +; CHECK: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesimc [[VB]], [[VB]] +; CHECK: aesd [[VC:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesimc [[VC]], [[VC]] +; CHECK: aesd [[VD:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesimc [[VD]], [[VD]] +; CHECK: aesd [[VE:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesimc [[VE]], [[VE]] +; CHECK: aesd [[VF:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesimc [[VF]], [[VF]] +; CHECK: aesd [[VG:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesimc [[VG]], [[VG]] +; CHECK: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesimc [[VH]], [[VH]] +; CHECK-NOT: aesimc } define void @aes_load_store(<16 x i8> *%p1, <16 x i8> *%p2 , <16 x i8> *%p3) { @@ -225,20 +189,20 @@ entry: %in1 = load <16 x i8>, <16 x i8>* %p1, align 16 store <16 x i8> %in1, <16 x i8>* %x1, align 16 %aese1 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %in1, <16 x i8> %in1) #2 - store <16 x i8> %aese1, <16 x i8>* %x2, align 16 %in2 = load <16 x i8>, <16 x i8>* %p2, align 16 %aesmc1= call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %aese1) #2 - store <16 x i8> %aesmc1, <16 x i8>* %x3, align 16 %aese2 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %in1, <16 x i8> %in2) #2 - store <16 x i8> %aese2, <16 x i8>* %x4, align 16 + store <16 x i8> %aesmc1, <16 x i8>* %x3, align 16 + %in3 = load <16 x i8>, <16 x i8>* %p3, align 16 %aesmc2= call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %aese2) #2 - store <16 x i8> %aesmc2, <16 x i8>* %x5, align 16 + %aese3 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %aesmc2, <16 x i8> %in3) #2 + store <16 x i8> %aese3, <16 x i8>* %x5, align 16 ret void ; CHECK-LABEL: aes_load_store: ; CHECK: aese [[VA:v[0-7].16b]], {{v[0-7].16b}} -; CHECK-NEXT: aesmc {{v[0-7].16b}}, [[VA]] +; CHECK-NEXT: aesmc [[VA]], [[VA]] ; CHECK: aese [[VB:v[0-7].16b]], {{v[0-7].16b}} -; CHECK-NEXT: aesmc {{v[0-7].16b}}, [[VB]] +; CHECK-NEXT: aesmc [[VB]], [[VB]] ; CHECK-NOT: aesmc } diff --git a/test/CodeGen/AArch64/tbi.ll b/test/CodeGen/AArch64/tbi.ll index ab2d31b7cacc..153bd4e6438d 100644 --- a/test/CodeGen/AArch64/tbi.ll +++ b/test/CodeGen/AArch64/tbi.ll @@ -100,3 +100,14 @@ define i32 @ld_and32_narrower(i64 %p) { %load = load i32, i32* %cast ret i32 %load } + +; BOTH-LABEL:ld_and8: +; BOTH: and x +define i32 @ld_and8(i64 %base, i8 %off) { + %off_masked = and i8 %off, 63 + %off_64 = zext i8 %off_masked to i64 + %p = add i64 %base, %off_64 + %cast = inttoptr i64 %p to i32* + %load = load i32, i32* %cast + ret i32 %load +} diff --git a/test/CodeGen/AArch64/win64_vararg.ll b/test/CodeGen/AArch64/win64_vararg.ll index 3acc7e520c96..7e28c9f79ec8 100644 --- a/test/CodeGen/AArch64/win64_vararg.ll +++ b/test/CodeGen/AArch64/win64_vararg.ll @@ -2,14 +2,14 @@ define void @pass_va(i32 %count, ...) nounwind { entry: -; CHECK: sub sp, sp, #80 +; CHECK: str x30, [sp, #-80]! ; CHECK: add x8, sp, #24 ; CHECK: add x0, sp, #24 ; CHECK: stp x6, x7, [sp, #64] ; CHECK: stp x4, x5, [sp, #48] ; CHECK: stp x2, x3, [sp, #32] ; CHECK: str x1, [sp, #24] -; CHECK: stp x30, x8, [sp] +; CHECK: str x8, [sp, #8] ; CHECK: bl other_func ; CHECK: ldr x30, [sp], #80 ; CHECK: ret @@ -102,6 +102,113 @@ declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1 declare i32 @__stdio_common_vsprintf(i64, i8*, i64, i8*, i8*, i8*) local_unnamed_addr #3 declare i64* @__local_stdio_printf_options() local_unnamed_addr #4 +; CHECK-LABEL: fp +; CHECK: str x21, [sp, #-96]! +; CHECK: stp x20, x19, [sp, #16] +; CHECK: stp x29, x30, [sp, #32] +; CHECK: add x29, sp, #32 +; CHECK: add x8, x29, #24 +; CHECK: mov x19, x2 +; CHECK: mov x20, x1 +; CHECK: mov x21, x0 +; CHECK: stp x6, x7, [x29, #48] +; CHECK: stp x4, x5, [x29, #32] +; CHECK: str x3, [x29, #24] +; CHECK: str x8, [sp, #8] +; CHECK: bl __local_stdio_printf_options +; CHECK: ldr x8, [x0] +; CHECK: add x5, x29, #24 +; CHECK: mov x1, x21 +; CHECK: mov x2, x20 +; CHECK: orr x0, x8, #0x2 +; CHECK: mov x3, x19 +; CHECK: mov x4, xzr +; CHECK: bl __stdio_common_vsprintf +; CHECK: ldp x29, x30, [sp, #32] +; CHECK: ldp x20, x19, [sp, #16] +; CHECK: cmp w0, #0 +; CHECK: csinv w0, w0, wzr, ge +; CHECK: ldr x21, [sp], #96 +; CHECK: ret +define i32 @fp(i8*, i64, i8*, ...) local_unnamed_addr #6 { + %4 = alloca i8*, align 8 + %5 = bitcast i8** %4 to i8* + call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %5) #2 + call void @llvm.va_start(i8* nonnull %5) + %6 = load i8*, i8** %4, align 8 + %7 = call i64* @__local_stdio_printf_options() #2 + %8 = load i64, i64* %7, align 8 + %9 = or i64 %8, 2 + %10 = call i32 @__stdio_common_vsprintf(i64 %9, i8* %0, i64 %1, i8* %2, i8* null, i8* %6) #2 + %11 = icmp sgt i32 %10, -1 + %12 = select i1 %11, i32 %10, i32 -1 + call void @llvm.va_end(i8* nonnull %5) + call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %5) #2 + ret i32 %12 +} + +attributes #6 = { "no-frame-pointer-elim"="true" } + +; CHECK-LABEL: vla +; CHECK: str x23, [sp, #-112]! +; CHECK: stp x22, x21, [sp, #16] +; CHECK: stp x20, x19, [sp, #32] +; CHECK: stp x29, x30, [sp, #48] +; CHECK: add x29, sp, #48 +; CHECK: add x8, x29, #16 +; CHECK: stur x8, [x29, #-40] +; CHECK: mov w8, w0 +; CHECK: add x8, x8, #15 +; CHECK: mov x9, sp +; CHECK: and x8, x8, #0x1fffffff0 +; CHECK: sub x20, x9, x8 +; CHECK: mov x19, x1 +; CHECK: mov x23, sp +; CHECK: stp x6, x7, [x29, #48] +; CHECK: stp x4, x5, [x29, #32] +; CHECK: stp x2, x3, [x29, #16] +; CHECK: mov sp, x20 +; CHECK: ldur x21, [x29, #-40] +; CHECK: sxtw x22, w0 +; CHECK: bl __local_stdio_printf_options +; CHECK: ldr x8, [x0] +; CHECK: mov x1, x20 +; CHECK: mov x2, x22 +; CHECK: mov x3, x19 +; CHECK: orr x0, x8, #0x2 +; CHECK: mov x4, xzr +; CHECK: mov x5, x21 +; CHECK: bl __stdio_common_vsprintf +; CHECK: mov sp, x23 +; CHECK: sub sp, x29, #48 +; CHECK: ldp x29, x30, [sp, #48] +; CHECK: ldp x20, x19, [sp, #32] +; CHECK: ldp x22, x21, [sp, #16] +; CHECK: ldr x23, [sp], #112 +; CHECK: ret +define void @vla(i32, i8*, ...) local_unnamed_addr { + %3 = alloca i8*, align 8 + %4 = bitcast i8** %3 to i8* + call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %4) #5 + call void @llvm.va_start(i8* nonnull %4) + %5 = zext i32 %0 to i64 + %6 = call i8* @llvm.stacksave() + %7 = alloca i8, i64 %5, align 1 + %8 = load i8*, i8** %3, align 8 + %9 = sext i32 %0 to i64 + %10 = call i64* @__local_stdio_printf_options() + %11 = load i64, i64* %10, align 8 + %12 = or i64 %11, 2 + %13 = call i32 @__stdio_common_vsprintf(i64 %12, i8* nonnull %7, i64 %9, i8* %1, i8* null, i8* %8) + call void @llvm.va_end(i8* nonnull %4) + call void @llvm.stackrestore(i8* %6) + call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %4) #5 + ret void +} + +declare i8* @llvm.stacksave() +declare void @llvm.stackrestore(i8*) + ; CHECK-LABEL: snprintf ; CHECK: sub sp, sp, #96 ; CHECK: stp x21, x20, [sp, #16] diff --git a/test/CodeGen/Mips/cconv/pr33883.ll b/test/CodeGen/Mips/cconv/pr33883.ll new file mode 100644 index 000000000000..54d7286ab8ff --- /dev/null +++ b/test/CodeGen/Mips/cconv/pr33883.ll @@ -0,0 +1,12 @@ +; RUN: llc -march=mips -mcpu=mips32 < %s -o /dev/null + +; Test that calls to vector intrinsics do not crash SelectionDAGBuilder. + +define <4 x float> @_ZN4simd3foo17hebb969c5fb39a194E(<4 x float>) { +start: + %1 = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %0) + + ret <4 x float> %1 +} + +declare <4 x float> @llvm.sqrt.v4f32(<4 x float>) diff --git a/test/CodeGen/Mips/pr33978.ll b/test/CodeGen/Mips/pr33978.ll new file mode 100644 index 000000000000..19fa1715baab --- /dev/null +++ b/test/CodeGen/Mips/pr33978.ll @@ -0,0 +1,20 @@ +; RUN: llc -march=mips -mcpu=mips32r2 < %s -o /dev/null + +; Test that SelectionDAG does not crash during DAGCombine when two pointers +; to the stack match with differing bases and offsets when expanding memcpy. +; This could result in one of the pointers being considered dereferenceable +; and other not. + +define void @foo(i8*) { +start: + %a = alloca [22 x i8] + %b = alloca [22 x i8] + %c = bitcast [22 x i8]* %a to i8* + %d = getelementptr inbounds [22 x i8], [22 x i8]* %b, i32 0, i32 2 + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %c, i8* %d, i32 20, i32 1, i1 false) + %e = getelementptr inbounds [22 x i8], [22 x i8]* %b, i32 0, i32 6 + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %e, i32 12, i32 1, i1 false) + ret void +} + +declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1) diff --git a/test/CodeGen/X86/conditional-tailcall-samedest.mir b/test/CodeGen/X86/conditional-tailcall-samedest.mir new file mode 100644 index 000000000000..c18a98be53f3 --- /dev/null +++ b/test/CodeGen/X86/conditional-tailcall-samedest.mir @@ -0,0 +1,139 @@ +# RUN: llc -run-pass=branch-folder %s -o - | FileCheck %s + +# PR33980 + +# Don't form conditional tail calls when the original conditional branch has +# the same true and false destination. Otherwise, when we remove the tail call +# successor we will also remove the fallthrough successor from the CFG. + +# CHECK: body: | +# CHECK: bb.0.entry: +# CHECK: successors: %bb.1.sw.bb(0x40000000) +# CHECK: liveins: %edi +# CHECK: CMP32ri8 killed %edi, 2, implicit-def %eflags +# CHECK: TCRETURNdi64cc @mergeable_conditional_tailcall + +# This was the unconditional branch to a dead MBB that we left behind before +# this bug was fixed. +# CHECK-NOT: JMP_1 %bb.-1 + +--- | + ; ModuleID = 't.ll' + source_filename = "t.ll" + target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" + target triple = "x86_64--linux" + + @static_local_guard = external global i64, align 8 + + ; Function Attrs: optsize + define void @f(i32 %arg) #0 { + entry: + switch i32 %arg, label %sw.epilog [ + i32 0, label %sw.bb + i32 1, label %sw.bb + i32 2, label %sw.bb2 + ] + + sw.bb: ; preds = %entry, %entry + %tmp = load atomic i8, i8* bitcast (i64* @static_local_guard to i8*) acquire, align 8 + %guard.uninitialized.i = icmp eq i8 %tmp, 0 + br i1 %guard.uninitialized.i, label %init.check.i, label %return, !prof !0 + + init.check.i: ; preds = %sw.bb + tail call void @initialize_static_local(i64* nonnull @static_local_guard) + ret void + + sw.bb2: ; preds = %entry + tail call void @mergeable_conditional_tailcall() + ret void + + sw.epilog: ; preds = %entry + tail call void @mergeable_conditional_tailcall() + ret void + + return: ; preds = %sw.bb + ret void + } + + declare void @mergeable_conditional_tailcall() + + declare void @initialize_static_local(i64*) + + ; Function Attrs: nounwind + declare void @llvm.stackprotector(i8*, i8**) #1 + + attributes #0 = { optsize } + attributes #1 = { nounwind } + + !0 = !{!"branch_weights", i32 1, i32 1048575} + +... +--- +name: f +alignment: 0 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +registers: +liveins: + - { reg: '%edi', virtual-reg: '' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + stackProtector: '' + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false + savePoint: '' + restorePoint: '' +fixedStack: +stack: +constants: +body: | + bb.0.entry: + successors: %bb.2.sw.bb(0x40000000), %bb.1.entry(0x40000000) + liveins: %edi + + CMP32ri8 killed %edi, 2, implicit-def %eflags + JB_1 %bb.2.sw.bb, implicit %eflags + JMP_1 %bb.1.entry + + bb.1.entry: + successors: %bb.4.sw.bb2(0x40000000), %bb.5.sw.epilog(0x40000000) + liveins: %eflags + + JE_1 %bb.4.sw.bb2, implicit killed %eflags + JMP_1 %bb.5.sw.epilog + + bb.2.sw.bb: + successors: %bb.3.init.check.i(0x00000800), %bb.6.return(0x7ffff800) + + %al = ACQUIRE_MOV8rm %rip, 1, _, @static_local_guard, _ :: (volatile load acquire 1 from `i8* bitcast (i64* @static_local_guard to i8*)`, align 8) + TEST8rr killed %al, %al, implicit-def %eflags + JNE_1 %bb.6.return, implicit killed %eflags + JMP_1 %bb.3.init.check.i + + bb.3.init.check.i: + dead %edi = MOV32ri64 @static_local_guard, implicit-def %rdi + TCRETURNdi64 @initialize_static_local, 0, csr_64, implicit %rsp, implicit %rdi + + bb.4.sw.bb2: + TCRETURNdi64 @mergeable_conditional_tailcall, 0, csr_64, implicit %rsp + + bb.5.sw.epilog: + TCRETURNdi64 @mergeable_conditional_tailcall, 0, csr_64, implicit %rsp + + bb.6.return: + RET 0 + +... diff --git a/test/CodeGen/X86/pause.ll b/test/CodeGen/X86/pause.ll new file mode 100644 index 000000000000..70ac79f78f6e --- /dev/null +++ b/test/CodeGen/X86/pause.ll @@ -0,0 +1,15 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=-sse -show-mc-encoding | FileCheck %s +; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=-avx,+sse2 -show-mc-encoding | FileCheck %s +; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx2 -show-mc-encoding | FileCheck %s +; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mcpu=skx -show-mc-encoding | FileCheck %s + +define void @test_x86_sse2_pause() { +; CHECK-LABEL: test_x86_sse2_pause: +; CHECK: ## BB#0: +; CHECK-NEXT: pause ## encoding: [0xf3,0x90] +; CHECK-NEXT: retl ## encoding: [0xc3] + tail call void @llvm.x86.sse2.pause() + ret void +} +declare void @llvm.x86.sse2.pause() nounwind diff --git a/test/CodeGen/X86/tail-call-mutable-memarg.ll b/test/CodeGen/X86/tail-call-mutable-memarg.ll new file mode 100644 index 000000000000..491bbba8c2fc --- /dev/null +++ b/test/CodeGen/X86/tail-call-mutable-memarg.ll @@ -0,0 +1,42 @@ +; RUN: llc < %s | FileCheck %s + +; Make sure we check that forwarded memory arguments are not modified when tail +; calling. inalloca and copy arg elimination make argument slots mutable. + +target datalayout = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32" +target triple = "i386-pc-windows-msvc19.0.24215" + +declare x86_stdcallcc void @tail_std(i32) +declare void @capture(i32*) + +define x86_thiscallcc void @inalloca(i32* %this, i32* inalloca %args) { +entry: + %val = load i32, i32* %args + store i32 0, i32* %args + tail call x86_stdcallcc void @tail_std(i32 %val) + ret void +} + +; CHECK-LABEL: _inalloca: # @inalloca +; CHECK: movl 4(%esp), %[[reg:[^ ]*]] +; CHECK: movl $0, 4(%esp) +; CHECK: pushl %[[reg]] +; CHECK: calll _tail_std@4 +; CHECK: retl $4 + +define x86_stdcallcc void @copy_elide(i32 %arg) { +entry: + %arg.ptr = alloca i32 + store i32 %arg, i32* %arg.ptr + call void @capture(i32* %arg.ptr) + tail call x86_stdcallcc void @tail_std(i32 %arg) + ret void +} + +; CHECK-LABEL: _copy_elide@4: # @copy_elide +; CHECK: leal {{[0-9]+}}(%esp), %[[reg:[^ ]*]] +; CHECK: pushl %[[reg]] +; CHECK: calll _capture +; ... +; CHECK: calll _tail_std@4 +; CHECK: retl $4 diff --git a/test/DllTool/coff-weak-exports.def b/test/DllTool/coff-weak-exports.def index 511d947d8395..b4709e972645 100644 --- a/test/DllTool/coff-weak-exports.def +++ b/test/DllTool/coff-weak-exports.def @@ -1,19 +1,11 @@ ; RUN: llvm-dlltool -m i386:x86-64 --input-def %s --output-lib %t.a -; RUN: llvm-readobj -coff-exports %t.a | FileCheck %s +; RUN: llvm-nm %t.a | FileCheck %s LIBRARY test.dll EXPORTS TestFunction==AltTestFunction -; CHECK: File: test.dll -; CHECK: Format: COFF-x86-64 -; CHECK: Arch: x86_64 -; CHECK: AddressSize: 64bit -; CHECK: File: test.dll -; CHECK: Format: COFF-x86-64 -; CHECK: Arch: x86_64 -; CHECK: AddressSize: 64bit -; CHECK: File: test.dll -; CHECK: Format: COFF-x86-64 -; CHECK: Arch: x86_64 -; CHECK: AddressSize: 64bit +; CHECK: U AltTestFunction +; CHECK-NEXT: w TestFunction +; CHECK: U __imp_AltTestFunction +; CHECK-NEXT: w __imp_TestFunction diff --git a/test/Instrumentation/AddressSanitizer/force-dynamic-shadow.ll b/test/Instrumentation/AddressSanitizer/force-dynamic-shadow.ll new file mode 100644 index 000000000000..d0152d278adc --- /dev/null +++ b/test/Instrumentation/AddressSanitizer/force-dynamic-shadow.ll @@ -0,0 +1,22 @@ +; Test -asan-force-dynamic-shadow flag. +; +; RUN: opt -asan -asan-module -S -asan-force-dynamic-shadow=1 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-FDS +; RUN: opt -asan -asan-module -S -asan-force-dynamic-shadow=0 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-NDS + +target triple = "x86_64-unknown-linux-gnu" + +define i32 @test_load(i32* %a) sanitize_address { +; First instrumentation in the function must be to load the dynamic shadow +; address into a local variable. +; CHECK-LABEL: @test_load +; CHECK: entry: +; CHECK-FDS-NEXT: %[[SHADOW:[^ ]*]] = load i64, i64* @__asan_shadow_memory_dynamic_address +; CHECK-NDS-NOT: __asan_shadow_memory_dynamic_address + +; Shadow address is loaded and added into the whole offset computation. +; CHECK-FDS add i64 %{{.*}}, %[[SHADOW] ] + +entry: + %tmp1 = load i32, i32* %a, align 4 + ret i32 %tmp1 +} diff --git a/test/Instrumentation/AddressSanitizer/stack-poisoning-byval-args.ll b/test/Instrumentation/AddressSanitizer/stack-poisoning-byval-args.ll index 32610ce3b815..8531cb963248 100644 --- a/test/Instrumentation/AddressSanitizer/stack-poisoning-byval-args.ll +++ b/test/Instrumentation/AddressSanitizer/stack-poisoning-byval-args.ll @@ -1,5 +1,7 @@ ; This check verifies that arguments passed by value get redzones. ; RUN: opt < %s -asan -asan-realign-stack=32 -S | FileCheck %s +; RUN: opt < %s -asan -asan-realign-stack=32 -asan-force-dynamic-shadow -S | FileCheck %s --check-prefixes=CHECK-FDS + target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" target triple = "x86_64-unknown-linux-gnu" @@ -8,6 +10,8 @@ target triple = "x86_64-unknown-linux-gnu" declare i32 @bar(%struct.A*) +; CHECK-FDS-NOT: {{\.byval}} + ; Test behavior for named argument with explicit alignment. The memcpy and ; alloca alignments should match the explicit alignment of 64. define void @foo(%struct.A* byval align 64 %a) sanitize_address { diff --git a/test/MC/AArch64/arm64-crypto.s b/test/MC/AArch64/arm64-crypto.s index 51efd2132a78..468a720276c0 100644 --- a/test/MC/AArch64/arm64-crypto.s +++ b/test/MC/AArch64/arm64-crypto.s @@ -1,4 +1,5 @@ ; RUN: llvm-mc -triple arm64-apple-darwin -mattr=crypto -show-encoding -output-asm-variant=1 < %s | FileCheck %s +; RUN: llvm-mc -triple arm64-apple-darwin -mattr='+crypto,+fuse-aes' -show-encoding -output-asm-variant=1 < %s | FileCheck %s foo: aese.16b v0, v1 diff --git a/test/Transforms/ArgumentPromotion/byval.ll b/test/Transforms/ArgumentPromotion/byval.ll index 58475fc89607..00542e3ec244 100644 --- a/test/Transforms/ArgumentPromotion/byval.ll +++ b/test/Transforms/ArgumentPromotion/byval.ll @@ -6,24 +6,45 @@ target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:1 %struct.ss = type { i32, i64 } define internal void @f(%struct.ss* byval %b) nounwind { +entry: + %tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0 + %tmp1 = load i32, i32* %tmp, align 4 + %tmp2 = add i32 %tmp1, 1 + store i32 %tmp2, i32* %tmp, align 4 + ret void +} + ; CHECK-LABEL: define internal void @f(i32 %b.0, i64 %b.1) +; CHECK: alloca %struct.ss{{$}} +; CHECK: store i32 %b.0 +; CHECK: store i64 %b.1 + +define internal void @g(%struct.ss* byval align 32 %b) nounwind { entry: - %tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0 ; <i32*> [#uses=2] - %tmp1 = load i32, i32* %tmp, align 4 ; <i32> [#uses=1] - %tmp2 = add i32 %tmp1, 1 ; <i32> [#uses=1] + %tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0 + %tmp1 = load i32, i32* %tmp, align 4 + %tmp2 = add i32 %tmp1, 1 store i32 %tmp2, i32* %tmp, align 4 ret void } +; CHECK-LABEL: define internal void @g(i32 %b.0, i64 %b.1) +; CHECK: alloca %struct.ss, align 32 +; CHECK: store i32 %b.0 +; CHECK: store i64 %b.1 + define i32 @main() nounwind { -; CHECK-LABEL: define i32 @main entry: - %S = alloca %struct.ss ; <%struct.ss*> [#uses=4] - %tmp1 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 0 ; <i32*> [#uses=1] + %S = alloca %struct.ss + %tmp1 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 0 store i32 1, i32* %tmp1, align 8 - %tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1 ; <i64*> [#uses=1] + %tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1 store i64 2, i64* %tmp4, align 4 - call void @f( %struct.ss* byval %S ) nounwind -; CHECK: call void @f(i32 %{{.*}}, i64 %{{.*}}) + call void @f(%struct.ss* byval %S) nounwind + call void @g(%struct.ss* byval %S) nounwind ret i32 0 } + +; CHECK-LABEL: define i32 @main +; CHECK: call void @f(i32 %{{.*}}, i64 %{{.*}}) +; CHECK: call void @g(i32 %{{.*}}, i64 %{{.*}}) diff --git a/test/Transforms/InstSimplify/pr33957.ll b/test/Transforms/InstSimplify/pr33957.ll new file mode 100644 index 000000000000..256bb89e7861 --- /dev/null +++ b/test/Transforms/InstSimplify/pr33957.ll @@ -0,0 +1,29 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -loop-unroll -S %s | FileCheck %s + +%struct.bar = type { i32 } + +@global = external constant [78 x %struct.bar], align 4 + +define void @patatino(i32 %x) { +; CHECK-LABEL: @patatino( +; CHECK-NEXT: bb: +; CHECK-NEXT: br i1 true, label [[BB1_PREHEADER:%.*]], label [[BB3:%.*]] +; CHECK: bb1.preheader: +; CHECK-NEXT: br label [[BB1:%.*]] +; CHECK: bb1: +; CHECK-NEXT: br label [[BB3]] +; CHECK: bb3: +; CHECK-NEXT: ret void +; +bb: + br i1 true, label %bb1, label %bb3 + +bb1: + %tmp = getelementptr inbounds [78 x %struct.bar], [78 x %struct.bar]* @global, i32 0, <4 x i32> undef + %tmp2 = getelementptr inbounds %struct.bar, <4 x %struct.bar*> %tmp, i32 1 + br i1 true, label %bb3, label %bb1 + +bb3: + ret void +} diff --git a/test/Transforms/SCCP/definite-initializer.ll b/test/Transforms/SCCP/definite-initializer.ll new file mode 100644 index 000000000000..a2c4521e07ca --- /dev/null +++ b/test/Transforms/SCCP/definite-initializer.ll @@ -0,0 +1,11 @@ +; RUN: opt -S -ipsccp < %s | FileCheck %s +@d = internal externally_initialized global i32 0, section ".openbsd.randomdata", align 4 + +; CHECK-LABEL: @test1( +define i32 @test1() { +entry: + %load = load i32, i32* @d, align 4 + ret i32 %load +; CHECK: %[[load:.*]] = load i32, i32* @d, align 4 +; CHECK: ret i32 %[[load]] +} |
