diff options
Diffstat (limited to 'test/CodeGen')
| -rw-r--r-- | test/CodeGen/AMDGPU/amdgpu-alias-analysis.ll | 24 | ||||
| -rw-r--r-- | test/CodeGen/AMDGPU/constant-address-space-32bit.ll | 39 | ||||
| -rw-r--r-- | test/CodeGen/ARM/ldrex-frame-size.ll | 36 | ||||
| -rw-r--r-- | test/CodeGen/ARM/ldstrex.ll | 85 | ||||
| -rw-r--r-- | test/CodeGen/X86/eip-addressing-i386.ll | 4 |
5 files changed, 172 insertions, 16 deletions
diff --git a/test/CodeGen/AMDGPU/amdgpu-alias-analysis.ll b/test/CodeGen/AMDGPU/amdgpu-alias-analysis.ll index 51d96498c53e..35614634e763 100644 --- a/test/CodeGen/AMDGPU/amdgpu-alias-analysis.ll +++ b/test/CodeGen/AMDGPU/amdgpu-alias-analysis.ll @@ -7,3 +7,27 @@ define void @test(i8 addrspace(5)* %p, i8 addrspace(1)* %p1) { ret void } +; CHECK: MayAlias: i8 addrspace(1)* %p1, i8 addrspace(4)* %p + +define void @test_constant_vs_global(i8 addrspace(4)* %p, i8 addrspace(1)* %p1) { + ret void +} + +; CHECK: MayAlias: i8 addrspace(1)* %p, i8 addrspace(4)* %p1 + +define void @test_global_vs_constant(i8 addrspace(1)* %p, i8 addrspace(4)* %p1) { + ret void +} + +; CHECK: MayAlias: i8 addrspace(1)* %p1, i8 addrspace(6)* %p + +define void @test_constant_32bit_vs_global(i8 addrspace(6)* %p, i8 addrspace(1)* %p1) { + ret void +} + +; CHECK: MayAlias: i8 addrspace(4)* %p1, i8 addrspace(6)* %p + +define void @test_constant_32bit_vs_constant(i8 addrspace(6)* %p, i8 addrspace(4)* %p1) { + ret void +} + diff --git a/test/CodeGen/AMDGPU/constant-address-space-32bit.ll b/test/CodeGen/AMDGPU/constant-address-space-32bit.ll index 4522497a85e8..040bcbc01827 100644 --- a/test/CodeGen/AMDGPU/constant-address-space-32bit.ll +++ b/test/CodeGen/AMDGPU/constant-address-space-32bit.ll @@ -12,7 +12,7 @@ ; VIGFX9-DAG: s_load_dword s{{[0-9]}}, s[0:1], 0x0 ; VIGFX9-DAG: s_load_dword s{{[0-9]}}, s[2:3], 0x8 define amdgpu_vs float @load_i32(i32 addrspace(6)* inreg %p0, i32 addrspace(6)* inreg %p1) #0 { - %gep1 = getelementptr i32, i32 addrspace(6)* %p1, i64 2 + %gep1 = getelementptr inbounds i32, i32 addrspace(6)* %p1, i32 2 %r0 = load i32, i32 addrspace(6)* %p0 %r1 = load i32, i32 addrspace(6)* %gep1 %r = add i32 %r0, %r1 @@ -29,7 +29,7 @@ define amdgpu_vs float @load_i32(i32 addrspace(6)* inreg %p0, i32 addrspace(6)* ; VIGFX9-DAG: s_load_dwordx2 s[{{.*}}], s[0:1], 0x0 ; VIGFX9-DAG: s_load_dwordx2 s[{{.*}}], s[2:3], 0x10 define amdgpu_vs <2 x float> @load_v2i32(<2 x i32> addrspace(6)* inreg %p0, <2 x i32> addrspace(6)* inreg %p1) #0 { - %gep1 = getelementptr <2 x i32>, <2 x i32> addrspace(6)* %p1, i64 2 + %gep1 = getelementptr inbounds <2 x i32>, <2 x i32> addrspace(6)* %p1, i32 2 %r0 = load <2 x i32>, <2 x i32> addrspace(6)* %p0 %r1 = load <2 x i32>, <2 x i32> addrspace(6)* %gep1 %r = add <2 x i32> %r0, %r1 @@ -46,7 +46,7 @@ define amdgpu_vs <2 x float> @load_v2i32(<2 x i32> addrspace(6)* inreg %p0, <2 x ; VIGFX9-DAG: s_load_dwordx4 s[{{.*}}], s[0:1], 0x0 ; VIGFX9-DAG: s_load_dwordx4 s[{{.*}}], s[2:3], 0x20 define amdgpu_vs <4 x float> @load_v4i32(<4 x i32> addrspace(6)* inreg %p0, <4 x i32> addrspace(6)* inreg %p1) #0 { - %gep1 = getelementptr <4 x i32>, <4 x i32> addrspace(6)* %p1, i64 2 + %gep1 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(6)* %p1, i32 2 %r0 = load <4 x i32>, <4 x i32> addrspace(6)* %p0 %r1 = load <4 x i32>, <4 x i32> addrspace(6)* %gep1 %r = add <4 x i32> %r0, %r1 @@ -63,7 +63,7 @@ define amdgpu_vs <4 x float> @load_v4i32(<4 x i32> addrspace(6)* inreg %p0, <4 x ; VIGFX9-DAG: s_load_dwordx8 s[{{.*}}], s[0:1], 0x0 ; VIGFX9-DAG: s_load_dwordx8 s[{{.*}}], s[2:3], 0x40 define amdgpu_vs <8 x float> @load_v8i32(<8 x i32> addrspace(6)* inreg %p0, <8 x i32> addrspace(6)* inreg %p1) #0 { - %gep1 = getelementptr <8 x i32>, <8 x i32> addrspace(6)* %p1, i64 2 + %gep1 = getelementptr inbounds <8 x i32>, <8 x i32> addrspace(6)* %p1, i32 2 %r0 = load <8 x i32>, <8 x i32> addrspace(6)* %p0 %r1 = load <8 x i32>, <8 x i32> addrspace(6)* %gep1 %r = add <8 x i32> %r0, %r1 @@ -80,7 +80,7 @@ define amdgpu_vs <8 x float> @load_v8i32(<8 x i32> addrspace(6)* inreg %p0, <8 x ; VIGFX9-DAG: s_load_dwordx16 s[{{.*}}], s[0:1], 0x0 ; VIGFX9-DAG: s_load_dwordx16 s[{{.*}}], s[2:3], 0x80 define amdgpu_vs <16 x float> @load_v16i32(<16 x i32> addrspace(6)* inreg %p0, <16 x i32> addrspace(6)* inreg %p1) #0 { - %gep1 = getelementptr <16 x i32>, <16 x i32> addrspace(6)* %p1, i64 2 + %gep1 = getelementptr inbounds <16 x i32>, <16 x i32> addrspace(6)* %p1, i32 2 %r0 = load <16 x i32>, <16 x i32> addrspace(6)* %p0 %r1 = load <16 x i32>, <16 x i32> addrspace(6)* %gep1 %r = add <16 x i32> %r0, %r1 @@ -97,7 +97,7 @@ define amdgpu_vs <16 x float> @load_v16i32(<16 x i32> addrspace(6)* inreg %p0, < ; VIGFX9-DAG: s_load_dword s{{[0-9]}}, s[0:1], 0x0 ; VIGFX9-DAG: s_load_dword s{{[0-9]}}, s[2:3], 0x8 define amdgpu_vs float @load_float(float addrspace(6)* inreg %p0, float addrspace(6)* inreg %p1) #0 { - %gep1 = getelementptr float, float addrspace(6)* %p1, i64 2 + %gep1 = getelementptr inbounds float, float addrspace(6)* %p1, i32 2 %r0 = load float, float addrspace(6)* %p0 %r1 = load float, float addrspace(6)* %gep1 %r = fadd float %r0, %r1 @@ -113,7 +113,7 @@ define amdgpu_vs float @load_float(float addrspace(6)* inreg %p0, float addrspac ; VIGFX9-DAG: s_load_dwordx2 s[{{.*}}], s[0:1], 0x0 ; VIGFX9-DAG: s_load_dwordx2 s[{{.*}}], s[2:3], 0x10 define amdgpu_vs <2 x float> @load_v2float(<2 x float> addrspace(6)* inreg %p0, <2 x float> addrspace(6)* inreg %p1) #0 { - %gep1 = getelementptr <2 x float>, <2 x float> addrspace(6)* %p1, i64 2 + %gep1 = getelementptr inbounds <2 x float>, <2 x float> addrspace(6)* %p1, i32 2 %r0 = load <2 x float>, <2 x float> addrspace(6)* %p0 %r1 = load <2 x float>, <2 x float> addrspace(6)* %gep1 %r = fadd <2 x float> %r0, %r1 @@ -129,7 +129,7 @@ define amdgpu_vs <2 x float> @load_v2float(<2 x float> addrspace(6)* inreg %p0, ; VIGFX9-DAG: s_load_dwordx4 s[{{.*}}], s[0:1], 0x0 ; VIGFX9-DAG: s_load_dwordx4 s[{{.*}}], s[2:3], 0x20 define amdgpu_vs <4 x float> @load_v4float(<4 x float> addrspace(6)* inreg %p0, <4 x float> addrspace(6)* inreg %p1) #0 { - %gep1 = getelementptr <4 x float>, <4 x float> addrspace(6)* %p1, i64 2 + %gep1 = getelementptr inbounds <4 x float>, <4 x float> addrspace(6)* %p1, i32 2 %r0 = load <4 x float>, <4 x float> addrspace(6)* %p0 %r1 = load <4 x float>, <4 x float> addrspace(6)* %gep1 %r = fadd <4 x float> %r0, %r1 @@ -145,7 +145,7 @@ define amdgpu_vs <4 x float> @load_v4float(<4 x float> addrspace(6)* inreg %p0, ; VIGFX9-DAG: s_load_dwordx8 s[{{.*}}], s[0:1], 0x0 ; VIGFX9-DAG: s_load_dwordx8 s[{{.*}}], s[2:3], 0x40 define amdgpu_vs <8 x float> @load_v8float(<8 x float> addrspace(6)* inreg %p0, <8 x float> addrspace(6)* inreg %p1) #0 { - %gep1 = getelementptr <8 x float>, <8 x float> addrspace(6)* %p1, i64 2 + %gep1 = getelementptr inbounds <8 x float>, <8 x float> addrspace(6)* %p1, i32 2 %r0 = load <8 x float>, <8 x float> addrspace(6)* %p0 %r1 = load <8 x float>, <8 x float> addrspace(6)* %gep1 %r = fadd <8 x float> %r0, %r1 @@ -161,7 +161,7 @@ define amdgpu_vs <8 x float> @load_v8float(<8 x float> addrspace(6)* inreg %p0, ; VIGFX9-DAG: s_load_dwordx16 s[{{.*}}], s[0:1], 0x0 ; VIGFX9-DAG: s_load_dwordx16 s[{{.*}}], s[2:3], 0x80 define amdgpu_vs <16 x float> @load_v16float(<16 x float> addrspace(6)* inreg %p0, <16 x float> addrspace(6)* inreg %p1) #0 { - %gep1 = getelementptr <16 x float>, <16 x float> addrspace(6)* %p1, i64 2 + %gep1 = getelementptr inbounds <16 x float>, <16 x float> addrspace(6)* %p1, i32 2 %r0 = load <16 x float>, <16 x float> addrspace(6)* %p0 %r1 = load <16 x float>, <16 x float> addrspace(6)* %gep1 %r = fadd <16 x float> %r0, %r1 @@ -212,12 +212,12 @@ main_body: %22 = call nsz float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %5) #8 %23 = bitcast float %22 to i32 %24 = shl i32 %23, 1 - %25 = getelementptr [0 x <8 x i32>], [0 x <8 x i32>] addrspace(6)* %1, i32 0, i32 %24, !amdgpu.uniform !0 + %25 = getelementptr inbounds [0 x <8 x i32>], [0 x <8 x i32>] addrspace(6)* %1, i32 0, i32 %24, !amdgpu.uniform !0 %26 = load <8 x i32>, <8 x i32> addrspace(6)* %25, align 32, !invariant.load !0 %27 = shl i32 %23, 2 %28 = or i32 %27, 3 %29 = bitcast [0 x <8 x i32>] addrspace(6)* %1 to [0 x <4 x i32>] addrspace(6)* - %30 = getelementptr [0 x <4 x i32>], [0 x <4 x i32>] addrspace(6)* %29, i32 0, i32 %28, !amdgpu.uniform !0 + %30 = getelementptr inbounds [0 x <4 x i32>], [0 x <4 x i32>] addrspace(6)* %29, i32 0, i32 %28, !amdgpu.uniform !0 %31 = load <4 x i32>, <4 x i32> addrspace(6)* %30, align 16, !invariant.load !0 %32 = call nsz <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f32(i32 15, float 0.0, <8 x i32> %26, <4 x i32> %31, i1 0, i32 0, i32 0) #8 %33 = extractelement <4 x float> %32, i32 0 @@ -246,12 +246,12 @@ main_body: %22 = call nsz float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %5) #8 %23 = bitcast float %22 to i32 %24 = shl i32 %23, 1 - %25 = getelementptr [0 x <8 x i32>], [0 x <8 x i32>] addrspace(6)* %1, i32 0, i32 %24 + %25 = getelementptr inbounds [0 x <8 x i32>], [0 x <8 x i32>] addrspace(6)* %1, i32 0, i32 %24 %26 = load <8 x i32>, <8 x i32> addrspace(6)* %25, align 32, !invariant.load !0 %27 = shl i32 %23, 2 %28 = or i32 %27, 3 %29 = bitcast [0 x <8 x i32>] addrspace(6)* %1 to [0 x <4 x i32>] addrspace(6)* - %30 = getelementptr [0 x <4 x i32>], [0 x <4 x i32>] addrspace(6)* %29, i32 0, i32 %28 + %30 = getelementptr inbounds [0 x <4 x i32>], [0 x <4 x i32>] addrspace(6)* %29, i32 0, i32 %28 %31 = load <4 x i32>, <4 x i32> addrspace(6)* %30, align 16, !invariant.load !0 %32 = call nsz <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f32(i32 15, float 0.0, <8 x i32> %26, <4 x i32> %31, i1 0, i32 0, i32 0) #8 %33 = extractelement <4 x float> %32, i32 0 @@ -268,6 +268,17 @@ main_body: ret <{ i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %43 } +; GCN-LABEL: {{^}}load_addr_no_fold: +; GCN-DAG: s_add_i32 s0, s0, 4 +; GCN-DAG: s_mov_b32 s1, 0 +; GCN: s_load_dword s{{[0-9]}}, s[0:1], 0x0 +define amdgpu_vs float @load_addr_no_fold(i32 addrspace(6)* inreg noalias %p0) #0 { + %gep1 = getelementptr i32, i32 addrspace(6)* %p0, i32 1 + %r1 = load i32, i32 addrspace(6)* %gep1 + %r2 = bitcast i32 %r1 to float + ret float %r2 +} + ; Function Attrs: nounwind readnone speculatable declare float @llvm.amdgcn.interp.mov(i32, i32, i32, i32) #6 diff --git a/test/CodeGen/ARM/ldrex-frame-size.ll b/test/CodeGen/ARM/ldrex-frame-size.ll new file mode 100644 index 000000000000..595540578a00 --- /dev/null +++ b/test/CodeGen/ARM/ldrex-frame-size.ll @@ -0,0 +1,36 @@ +; RUN: llc -mtriple=thumbv7-linux-gnueabi -o - %s | FileCheck %s + +; This alloca is just large enough that FrameLowering decides it needs a frame +; to guarantee access, based on the range of ldrex. + +; The actual alloca size is a bit of black magic, unfortunately: the real +; maximum accessible is 1020, but FrameLowering adds 16 bytes to its estimated +; stack size just because so the alloca is not actually the what the limit gets +; compared to. The important point is that we don't go up to ~4096, which is the +; default with no strange instructions. +define void @test_large_frame() { +; CHECK-LABEL: test_large_frame: +; CHECK: push +; CHECK: sub.w sp, sp, #1004 + + %ptr = alloca i32, i32 251 + + %addr = getelementptr i32, i32* %ptr, i32 1 + call i32 @llvm.arm.ldrex.p0i32(i32* %addr) + ret void +} + +; This alloca is just is just the other side of the limit, so no frame +define void @test_small_frame() { +; CHECK-LABEL: test_small_frame: +; CHECK-NOT: push +; CHECK: sub.w sp, sp, #1000 + + %ptr = alloca i32, i32 250 + + %addr = getelementptr i32, i32* %ptr, i32 1 + call i32 @llvm.arm.ldrex.p0i32(i32* %addr) + ret void +} + +declare i32 @llvm.arm.ldrex.p0i32(i32*) diff --git a/test/CodeGen/ARM/ldstrex.ll b/test/CodeGen/ARM/ldstrex.ll index 59349f72a8fe..73afa0e27469 100644 --- a/test/CodeGen/ARM/ldstrex.ll +++ b/test/CodeGen/ARM/ldstrex.ll @@ -142,6 +142,91 @@ define void @excl_addrmode() { ret void } +define void @test_excl_addrmode_folded() { +; CHECK-LABEL: test_excl_addrmode_folded: + %local = alloca i8, i32 4096 + + %local.0 = getelementptr i8, i8* %local, i32 4 + %local32.0 = bitcast i8* %local.0 to i32* + call i32 @llvm.arm.ldrex.p0i32(i32* %local32.0) + call i32 @llvm.arm.strex.p0i32(i32 0, i32* %local32.0) +; CHECK-T2ADDRMODE: ldrex {{r[0-9]+}}, [sp, #4] +; CHECK-T2ADDRMODE: strex {{r[0-9]+}}, {{r[0-9]+}}, [sp, #4] + + %local.1 = getelementptr i8, i8* %local, i32 1020 + %local32.1 = bitcast i8* %local.1 to i32* + call i32 @llvm.arm.ldrex.p0i32(i32* %local32.1) + call i32 @llvm.arm.strex.p0i32(i32 0, i32* %local32.1) +; CHECK-T2ADDRMODE: ldrex {{r[0-9]+}}, [sp, #1020] +; CHECK-T2ADDRMODE: strex {{r[0-9]+}}, {{r[0-9]+}}, [sp, #1020] + + ret void +} + +define void @test_excl_addrmode_range() { +; CHECK-LABEL: test_excl_addrmode_range: + %local = alloca i8, i32 4096 + + %local.0 = getelementptr i8, i8* %local, i32 1024 + %local32.0 = bitcast i8* %local.0 to i32* + call i32 @llvm.arm.ldrex.p0i32(i32* %local32.0) + call i32 @llvm.arm.strex.p0i32(i32 0, i32* %local32.0) +; CHECK-T2ADDRMODE: mov r[[TMP:[0-9]+]], sp +; CHECK-T2ADDRMODE: add.w r[[ADDR:[0-9]+]], r[[TMP]], #1024 +; CHECK-T2ADDRMODE: ldrex {{r[0-9]+}}, [r[[ADDR]]] +; CHECK-T2ADDRMODE: strex {{r[0-9]+}}, {{r[0-9]+}}, [r[[ADDR]]] + + ret void +} + +define void @test_excl_addrmode_align() { +; CHECK-LABEL: test_excl_addrmode_align: + %local = alloca i8, i32 4096 + + %local.0 = getelementptr i8, i8* %local, i32 2 + %local32.0 = bitcast i8* %local.0 to i32* + call i32 @llvm.arm.ldrex.p0i32(i32* %local32.0) + call i32 @llvm.arm.strex.p0i32(i32 0, i32* %local32.0) +; CHECK-T2ADDRMODE: mov r[[ADDR:[0-9]+]], sp +; CHECK-T2ADDRMODE: adds r[[ADDR:[0-9]+]], #2 +; CHECK-T2ADDRMODE: ldrex {{r[0-9]+}}, [r[[ADDR]]] +; CHECK-T2ADDRMODE: strex {{r[0-9]+}}, {{r[0-9]+}}, [r[[ADDR]]] + + ret void +} + +define void @test_excl_addrmode_sign() { +; CHECK-LABEL: test_excl_addrmode_sign: + %local = alloca i8, i32 4096 + + %local.0 = getelementptr i8, i8* %local, i32 -4 + %local32.0 = bitcast i8* %local.0 to i32* + call i32 @llvm.arm.ldrex.p0i32(i32* %local32.0) + call i32 @llvm.arm.strex.p0i32(i32 0, i32* %local32.0) +; CHECK-T2ADDRMODE: mov r[[ADDR:[0-9]+]], sp +; CHECK-T2ADDRMODE: subs r[[ADDR:[0-9]+]], #4 +; CHECK-T2ADDRMODE: ldrex {{r[0-9]+}}, [r[[ADDR]]] +; CHECK-T2ADDRMODE: strex {{r[0-9]+}}, {{r[0-9]+}}, [r[[ADDR]]] + + ret void +} + +define void @test_excl_addrmode_combination() { +; CHECK-LABEL: test_excl_addrmode_combination: + %local = alloca i8, i32 4096 + %unused = alloca i8, i32 64 + + %local.0 = getelementptr i8, i8* %local, i32 4 + %local32.0 = bitcast i8* %local.0 to i32* + call i32 @llvm.arm.ldrex.p0i32(i32* %local32.0) + call i32 @llvm.arm.strex.p0i32(i32 0, i32* %local32.0) +; CHECK-T2ADDRMODE: ldrex {{r[0-9]+}}, [sp, #68] +; CHECK-T2ADDRMODE: strex {{r[0-9]+}}, {{r[0-9]+}}, [sp, #68] + + ret void +} + + ; LLVM should know, even across basic blocks, that ldrex is setting the high ; bits of its i32 to 0. There should be no zero-extend operation. define zeroext i8 @test_cross_block_zext_i8(i1 %tst, i8* %addr) { diff --git a/test/CodeGen/X86/eip-addressing-i386.ll b/test/CodeGen/X86/eip-addressing-i386.ll index ddb7c782c204..b686be5727a1 100644 --- a/test/CodeGen/X86/eip-addressing-i386.ll +++ b/test/CodeGen/X86/eip-addressing-i386.ll @@ -1,8 +1,8 @@ ; RUN: not llc -mtriple i386-apple-- -o /dev/null < %s 2>&1| FileCheck %s -; CHECK: <inline asm>:1:13: error: register %eip is only available in 64-bit mode +; CHECK: <inline asm>:1:13: error: IP-relative addressing requires 64-bit mode ; CHECK-NEXT: jmpl *_foo(%eip) -; Make sure that we emit an error if we encounter RIP-relative instructions in +; Make sure that we emit an error if we encounter IP-relative instructions in ; 32-bit mode. define i32 @foo() { ret i32 0 } |
