summaryrefslogtreecommitdiff
path: root/test/Transforms/SROA
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2012-12-02 13:10:19 +0000
committerDimitry Andric <dim@FreeBSD.org>2012-12-02 13:10:19 +0000
commit522600a229b950314b5f4af84eba4f3e8a0ffea1 (patch)
tree32b4679ab4b8f28e5228daafc65e9dc436935353 /test/Transforms/SROA
parent902a7b529820e6a0aa85f98f21afaeb1805a22f8 (diff)
Notes
Diffstat (limited to 'test/Transforms/SROA')
-rw-r--r--test/Transforms/SROA/alignment.ll171
-rw-r--r--test/Transforms/SROA/basictest.ll1136
-rw-r--r--test/Transforms/SROA/big-endian.ll119
-rw-r--r--test/Transforms/SROA/fca.ll49
-rw-r--r--test/Transforms/SROA/lit.local.cfg1
-rw-r--r--test/Transforms/SROA/phi-and-select.ll427
-rw-r--r--test/Transforms/SROA/vector-promotion.ll267
7 files changed, 2170 insertions, 0 deletions
diff --git a/test/Transforms/SROA/alignment.ll b/test/Transforms/SROA/alignment.ll
new file mode 100644
index 0000000000000..ad5fb6c4a5d8c
--- /dev/null
+++ b/test/Transforms/SROA/alignment.ll
@@ -0,0 +1,171 @@
+; RUN: opt < %s -sroa -S | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
+
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
+
+define void @test1({ i8, i8 }* %a, { i8, i8 }* %b) {
+; CHECK: @test1
+; CHECK: %[[gep_a0:.*]] = getelementptr inbounds { i8, i8 }* %a, i64 0, i32 0
+; CHECK: %[[a0:.*]] = load i8* %[[gep_a0]], align 16
+; CHECK: %[[gep_a1:.*]] = getelementptr inbounds { i8, i8 }* %a, i64 0, i32 1
+; CHECK: %[[a1:.*]] = load i8* %[[gep_a1]], align 1
+; CHECK: %[[gep_b0:.*]] = getelementptr inbounds { i8, i8 }* %b, i64 0, i32 0
+; CHECK: store i8 %[[a0]], i8* %[[gep_b0]], align 16
+; CHECK: %[[gep_b1:.*]] = getelementptr inbounds { i8, i8 }* %b, i64 0, i32 1
+; CHECK: store i8 %[[a1]], i8* %[[gep_b1]], align 1
+; CHECK: ret void
+
+entry:
+ %alloca = alloca { i8, i8 }, align 16
+ %gep_a = getelementptr { i8, i8 }* %a, i32 0, i32 0
+ %gep_alloca = getelementptr { i8, i8 }* %alloca, i32 0, i32 0
+ %gep_b = getelementptr { i8, i8 }* %b, i32 0, i32 0
+
+ store i8 420, i8* %gep_alloca, align 16
+
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %gep_alloca, i8* %gep_a, i32 2, i32 16, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %gep_b, i8* %gep_alloca, i32 2, i32 16, i1 false)
+ ret void
+}
+
+define void @test2() {
+; CHECK: @test2
+; CHECK: alloca i16
+; CHECK: load i8* %{{.*}}
+; CHECK: store i8 42, i8* %{{.*}}
+; CHECK: ret void
+
+entry:
+ %a = alloca { i8, i8, i8, i8 }, align 2
+ %gep1 = getelementptr { i8, i8, i8, i8 }* %a, i32 0, i32 1
+ %cast1 = bitcast i8* %gep1 to i16*
+ store volatile i16 0, i16* %cast1
+ %gep2 = getelementptr { i8, i8, i8, i8 }* %a, i32 0, i32 2
+ %result = load i8* %gep2
+ store i8 42, i8* %gep2
+ ret void
+}
+
+define void @PR13920(<2 x i64>* %a, i16* %b) {
+; Test that alignments on memcpy intrinsics get propagated to loads and stores.
+; CHECK: @PR13920
+; CHECK: load <2 x i64>* %a, align 2
+; CHECK: store <2 x i64> {{.*}}, <2 x i64>* {{.*}}, align 2
+; CHECK: ret void
+
+entry:
+ %aa = alloca <2 x i64>, align 16
+ %aptr = bitcast <2 x i64>* %a to i8*
+ %aaptr = bitcast <2 x i64>* %aa to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %aaptr, i8* %aptr, i32 16, i32 2, i1 false)
+ %bptr = bitcast i16* %b to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %bptr, i8* %aaptr, i32 16, i32 2, i1 false)
+ ret void
+}
+
+define void @test3(i8* %x) {
+; Test that when we promote an alloca to a type with lower ABI alignment, we
+; provide the needed explicit alignment that code using the alloca may be
+; expecting. However, also check that any offset within an alloca can in turn
+; reduce the alignment.
+; CHECK: @test3
+; CHECK: alloca [22 x i8], align 8
+; CHECK: alloca [18 x i8], align 2
+; CHECK: ret void
+
+entry:
+ %a = alloca { i8*, i8*, i8* }
+ %b = alloca { i8*, i8*, i8* }
+ %a_raw = bitcast { i8*, i8*, i8* }* %a to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a_raw, i8* %x, i32 22, i32 8, i1 false)
+ %b_raw = bitcast { i8*, i8*, i8* }* %b to i8*
+ %b_gep = getelementptr i8* %b_raw, i32 6
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b_gep, i8* %x, i32 18, i32 2, i1 false)
+ ret void
+}
+
+define void @test5() {
+; Test that we preserve underaligned loads and stores when splitting.
+; CHECK: @test5
+; CHECK: alloca [9 x i8]
+; CHECK: alloca [9 x i8]
+; CHECK: store volatile double 0.0{{.*}}, double* %{{.*}}, align 1
+; CHECK: load i16* %{{.*}}, align 1
+; CHECK: load double* %{{.*}}, align 1
+; CHECK: store volatile double %{{.*}}, double* %{{.*}}, align 1
+; CHECK: load i16* %{{.*}}, align 1
+; CHECK: ret void
+
+entry:
+ %a = alloca [18 x i8]
+ %raw1 = getelementptr inbounds [18 x i8]* %a, i32 0, i32 0
+ %ptr1 = bitcast i8* %raw1 to double*
+ store volatile double 0.0, double* %ptr1, align 1
+ %weird_gep1 = getelementptr inbounds [18 x i8]* %a, i32 0, i32 7
+ %weird_cast1 = bitcast i8* %weird_gep1 to i16*
+ %weird_load1 = load i16* %weird_cast1, align 1
+
+ %raw2 = getelementptr inbounds [18 x i8]* %a, i32 0, i32 9
+ %ptr2 = bitcast i8* %raw2 to double*
+ %d1 = load double* %ptr1, align 1
+ store volatile double %d1, double* %ptr2, align 1
+ %weird_gep2 = getelementptr inbounds [18 x i8]* %a, i32 0, i32 16
+ %weird_cast2 = bitcast i8* %weird_gep2 to i16*
+ %weird_load2 = load i16* %weird_cast2, align 1
+
+ ret void
+}
+
+define void @test6() {
+; Test that we promote alignment when the underlying alloca switches to one
+; that innately provides it.
+; CHECK: @test6
+; CHECK: alloca double
+; CHECK: alloca double
+; CHECK-NOT: align
+; CHECK: ret void
+
+entry:
+ %a = alloca [16 x i8]
+ %raw1 = getelementptr inbounds [16 x i8]* %a, i32 0, i32 0
+ %ptr1 = bitcast i8* %raw1 to double*
+ store volatile double 0.0, double* %ptr1, align 1
+
+ %raw2 = getelementptr inbounds [16 x i8]* %a, i32 0, i32 8
+ %ptr2 = bitcast i8* %raw2 to double*
+ %val = load double* %ptr1, align 1
+ store volatile double %val, double* %ptr2, align 1
+
+ ret void
+}
+
+define void @test7(i8* %out) {
+; Test that we properly compute the destination alignment when rewriting
+; memcpys as direct loads or stores.
+; CHECK: @test7
+; CHECK-NOT: alloca
+
+entry:
+ %a = alloca [16 x i8]
+ %raw1 = getelementptr inbounds [16 x i8]* %a, i32 0, i32 0
+ %ptr1 = bitcast i8* %raw1 to double*
+ %raw2 = getelementptr inbounds [16 x i8]* %a, i32 0, i32 8
+ %ptr2 = bitcast i8* %raw2 to double*
+
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %raw1, i8* %out, i32 16, i32 0, i1 false)
+; CHECK: %[[val2:.*]] = load double* %{{.*}}, align 1
+; CHECK: %[[val1:.*]] = load double* %{{.*}}, align 1
+
+ %val1 = load double* %ptr2, align 1
+ %val2 = load double* %ptr1, align 1
+
+ store double %val1, double* %ptr1, align 1
+ store double %val2, double* %ptr2, align 1
+
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %out, i8* %raw1, i32 16, i32 0, i1 false)
+; CHECK: store double %[[val1]], double* %{{.*}}, align 1
+; CHECK: store double %[[val2]], double* %{{.*}}, align 1
+
+ ret void
+; CHECK: ret void
+}
diff --git a/test/Transforms/SROA/basictest.ll b/test/Transforms/SROA/basictest.ll
new file mode 100644
index 0000000000000..b363eefb3f9d9
--- /dev/null
+++ b/test/Transforms/SROA/basictest.ll
@@ -0,0 +1,1136 @@
+; RUN: opt < %s -sroa -S | FileCheck %s
+; RUN: opt < %s -sroa -force-ssa-updater -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
+
+declare void @llvm.lifetime.start(i64, i8* nocapture)
+declare void @llvm.lifetime.end(i64, i8* nocapture)
+
+define i32 @test0() {
+; CHECK: @test0
+; CHECK-NOT: alloca
+; CHECK: ret i32
+
+entry:
+ %a1 = alloca i32
+ %a2 = alloca float
+
+ %a1.i8 = bitcast i32* %a1 to i8*
+ call void @llvm.lifetime.start(i64 4, i8* %a1.i8)
+
+ store i32 0, i32* %a1
+ %v1 = load i32* %a1
+
+ call void @llvm.lifetime.end(i64 4, i8* %a1.i8)
+
+ %a2.i8 = bitcast float* %a2 to i8*
+ call void @llvm.lifetime.start(i64 4, i8* %a2.i8)
+
+ store float 0.0, float* %a2
+ %v2 = load float * %a2
+ %v2.int = bitcast float %v2 to i32
+ %sum1 = add i32 %v1, %v2.int
+
+ call void @llvm.lifetime.end(i64 4, i8* %a2.i8)
+
+ ret i32 %sum1
+}
+
+define i32 @test1() {
+; CHECK: @test1
+; CHECK-NOT: alloca
+; CHECK: ret i32 0
+
+entry:
+ %X = alloca { i32, float }
+ %Y = getelementptr { i32, float }* %X, i64 0, i32 0
+ store i32 0, i32* %Y
+ %Z = load i32* %Y
+ ret i32 %Z
+}
+
+define i64 @test2(i64 %X) {
+; CHECK: @test2
+; CHECK-NOT: alloca
+; CHECK: ret i64 %X
+
+entry:
+ %A = alloca [8 x i8]
+ %B = bitcast [8 x i8]* %A to i64*
+ store i64 %X, i64* %B
+ br label %L2
+
+L2:
+ %Z = load i64* %B
+ ret i64 %Z
+}
+
+define void @test3(i8* %dst, i8* %src) {
+; CHECK: @test3
+
+entry:
+ %a = alloca [300 x i8]
+; CHECK-NOT: alloca
+; CHECK: %[[test3_a1:.*]] = alloca [42 x i8]
+; CHECK-NEXT: %[[test3_a2:.*]] = alloca [99 x i8]
+; CHECK-NEXT: %[[test3_a3:.*]] = alloca [16 x i8]
+; CHECK-NEXT: %[[test3_a4:.*]] = alloca [42 x i8]
+; CHECK-NEXT: %[[test3_a5:.*]] = alloca [7 x i8]
+; CHECK-NEXT: %[[test3_a6:.*]] = alloca [7 x i8]
+; CHECK-NEXT: %[[test3_a7:.*]] = alloca [85 x i8]
+
+ %b = getelementptr [300 x i8]* %a, i64 0, i64 0
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b, i8* %src, i32 300, i32 1, i1 false)
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [42 x i8]* %[[test3_a1]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %src, i32 42
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 42
+; CHECK-NEXT: %[[test3_r1:.*]] = load i8* %[[gep]]
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 43
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [99 x i8]* %[[test3_a2]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 99
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 142
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 16
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 158
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [42 x i8]* %[[test3_a4]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 42
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 200
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 207
+; CHECK-NEXT: %[[test3_r2:.*]] = load i8* %[[gep]]
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 208
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 215
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [85 x i8]* %[[test3_a7]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 85
+
+ ; Clobber a single element of the array, this should be promotable.
+ %c = getelementptr [300 x i8]* %a, i64 0, i64 42
+ store i8 0, i8* %c
+
+ ; Make a sequence of overlapping stores to the array. These overlap both in
+ ; forward strides and in shrinking accesses.
+ %overlap.1.i8 = getelementptr [300 x i8]* %a, i64 0, i64 142
+ %overlap.2.i8 = getelementptr [300 x i8]* %a, i64 0, i64 143
+ %overlap.3.i8 = getelementptr [300 x i8]* %a, i64 0, i64 144
+ %overlap.4.i8 = getelementptr [300 x i8]* %a, i64 0, i64 145
+ %overlap.5.i8 = getelementptr [300 x i8]* %a, i64 0, i64 146
+ %overlap.6.i8 = getelementptr [300 x i8]* %a, i64 0, i64 147
+ %overlap.7.i8 = getelementptr [300 x i8]* %a, i64 0, i64 148
+ %overlap.8.i8 = getelementptr [300 x i8]* %a, i64 0, i64 149
+ %overlap.9.i8 = getelementptr [300 x i8]* %a, i64 0, i64 150
+ %overlap.1.i16 = bitcast i8* %overlap.1.i8 to i16*
+ %overlap.1.i32 = bitcast i8* %overlap.1.i8 to i32*
+ %overlap.1.i64 = bitcast i8* %overlap.1.i8 to i64*
+ %overlap.2.i64 = bitcast i8* %overlap.2.i8 to i64*
+ %overlap.3.i64 = bitcast i8* %overlap.3.i8 to i64*
+ %overlap.4.i64 = bitcast i8* %overlap.4.i8 to i64*
+ %overlap.5.i64 = bitcast i8* %overlap.5.i8 to i64*
+ %overlap.6.i64 = bitcast i8* %overlap.6.i8 to i64*
+ %overlap.7.i64 = bitcast i8* %overlap.7.i8 to i64*
+ %overlap.8.i64 = bitcast i8* %overlap.8.i8 to i64*
+ %overlap.9.i64 = bitcast i8* %overlap.9.i8 to i64*
+ store i8 1, i8* %overlap.1.i8
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 0
+; CHECK-NEXT: store i8 1, i8* %[[gep]]
+ store i16 1, i16* %overlap.1.i16
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast [16 x i8]* %[[test3_a3]] to i16*
+; CHECK-NEXT: store i16 1, i16* %[[bitcast]]
+ store i32 1, i32* %overlap.1.i32
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast [16 x i8]* %[[test3_a3]] to i32*
+; CHECK-NEXT: store i32 1, i32* %[[bitcast]]
+ store i64 1, i64* %overlap.1.i64
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast [16 x i8]* %[[test3_a3]] to i64*
+; CHECK-NEXT: store i64 1, i64* %[[bitcast]]
+ store i64 2, i64* %overlap.2.i64
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 1
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
+; CHECK-NEXT: store i64 2, i64* %[[bitcast]]
+ store i64 3, i64* %overlap.3.i64
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 2
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
+; CHECK-NEXT: store i64 3, i64* %[[bitcast]]
+ store i64 4, i64* %overlap.4.i64
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 3
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
+; CHECK-NEXT: store i64 4, i64* %[[bitcast]]
+ store i64 5, i64* %overlap.5.i64
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 4
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
+; CHECK-NEXT: store i64 5, i64* %[[bitcast]]
+ store i64 6, i64* %overlap.6.i64
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 5
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
+; CHECK-NEXT: store i64 6, i64* %[[bitcast]]
+ store i64 7, i64* %overlap.7.i64
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 6
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
+; CHECK-NEXT: store i64 7, i64* %[[bitcast]]
+ store i64 8, i64* %overlap.8.i64
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 7
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
+; CHECK-NEXT: store i64 8, i64* %[[bitcast]]
+ store i64 9, i64* %overlap.9.i64
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 8
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
+; CHECK-NEXT: store i64 9, i64* %[[bitcast]]
+
+ ; Make two sequences of overlapping stores with more gaps and irregularities.
+ %overlap2.1.0.i8 = getelementptr [300 x i8]* %a, i64 0, i64 200
+ %overlap2.1.1.i8 = getelementptr [300 x i8]* %a, i64 0, i64 201
+ %overlap2.1.2.i8 = getelementptr [300 x i8]* %a, i64 0, i64 202
+ %overlap2.1.3.i8 = getelementptr [300 x i8]* %a, i64 0, i64 203
+
+ %overlap2.2.0.i8 = getelementptr [300 x i8]* %a, i64 0, i64 208
+ %overlap2.2.1.i8 = getelementptr [300 x i8]* %a, i64 0, i64 209
+ %overlap2.2.2.i8 = getelementptr [300 x i8]* %a, i64 0, i64 210
+ %overlap2.2.3.i8 = getelementptr [300 x i8]* %a, i64 0, i64 211
+
+ %overlap2.1.0.i16 = bitcast i8* %overlap2.1.0.i8 to i16*
+ %overlap2.1.0.i32 = bitcast i8* %overlap2.1.0.i8 to i32*
+ %overlap2.1.1.i32 = bitcast i8* %overlap2.1.1.i8 to i32*
+ %overlap2.1.2.i32 = bitcast i8* %overlap2.1.2.i8 to i32*
+ %overlap2.1.3.i32 = bitcast i8* %overlap2.1.3.i8 to i32*
+ store i8 1, i8* %overlap2.1.0.i8
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 0
+; CHECK-NEXT: store i8 1, i8* %[[gep]]
+ store i16 1, i16* %overlap2.1.0.i16
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast [7 x i8]* %[[test3_a5]] to i16*
+; CHECK-NEXT: store i16 1, i16* %[[bitcast]]
+ store i32 1, i32* %overlap2.1.0.i32
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast [7 x i8]* %[[test3_a5]] to i32*
+; CHECK-NEXT: store i32 1, i32* %[[bitcast]]
+ store i32 2, i32* %overlap2.1.1.i32
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 1
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
+; CHECK-NEXT: store i32 2, i32* %[[bitcast]]
+ store i32 3, i32* %overlap2.1.2.i32
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 2
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
+; CHECK-NEXT: store i32 3, i32* %[[bitcast]]
+ store i32 4, i32* %overlap2.1.3.i32
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 3
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
+; CHECK-NEXT: store i32 4, i32* %[[bitcast]]
+
+ %overlap2.2.0.i32 = bitcast i8* %overlap2.2.0.i8 to i32*
+ %overlap2.2.1.i16 = bitcast i8* %overlap2.2.1.i8 to i16*
+ %overlap2.2.1.i32 = bitcast i8* %overlap2.2.1.i8 to i32*
+ %overlap2.2.2.i32 = bitcast i8* %overlap2.2.2.i8 to i32*
+ %overlap2.2.3.i32 = bitcast i8* %overlap2.2.3.i8 to i32*
+ store i32 1, i32* %overlap2.2.0.i32
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast [7 x i8]* %[[test3_a6]] to i32*
+; CHECK-NEXT: store i32 1, i32* %[[bitcast]]
+ store i8 1, i8* %overlap2.2.1.i8
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 1
+; CHECK-NEXT: store i8 1, i8* %[[gep]]
+ store i16 1, i16* %overlap2.2.1.i16
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 1
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
+; CHECK-NEXT: store i16 1, i16* %[[bitcast]]
+ store i32 1, i32* %overlap2.2.1.i32
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 1
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
+; CHECK-NEXT: store i32 1, i32* %[[bitcast]]
+ store i32 3, i32* %overlap2.2.2.i32
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 2
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
+; CHECK-NEXT: store i32 3, i32* %[[bitcast]]
+ store i32 4, i32* %overlap2.2.3.i32
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 3
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
+; CHECK-NEXT: store i32 4, i32* %[[bitcast]]
+
+ %overlap2.prefix = getelementptr i8* %overlap2.1.1.i8, i64 -4
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %overlap2.prefix, i8* %src, i32 8, i32 1, i1 false)
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [42 x i8]* %[[test3_a4]], i64 0, i64 39
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %src, i32 3
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 3
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 5
+
+ ; Bridge between the overlapping areas
+ call void @llvm.memset.p0i8.i32(i8* %overlap2.1.2.i8, i8 42, i32 8, i32 1, i1 false)
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 2
+; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* %[[gep]], i8 42, i32 5
+; ...promoted i8 store...
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* %[[gep]], i8 42, i32 2
+
+ ; Entirely within the second overlap.
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %overlap2.2.1.i8, i8* %src, i32 5, i32 1, i1 false)
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 1
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep]], i8* %src, i32 5
+
+ ; Trailing past the second overlap.
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %overlap2.2.2.i8, i8* %src, i32 8, i32 1, i1 false)
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 2
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep]], i8* %src, i32 5
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 5
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [85 x i8]* %[[test3_a7]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 3
+
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %b, i32 300, i32 1, i1 false)
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [42 x i8]* %[[test3_a1]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %[[gep]], i32 42
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 42
+; CHECK-NEXT: store i8 0, i8* %[[gep]]
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 43
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [99 x i8]* %[[test3_a2]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 99
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 142
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 16
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 158
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [42 x i8]* %[[test3_a4]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 42
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 200
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 207
+; CHECK-NEXT: store i8 42, i8* %[[gep]]
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 208
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 215
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [85 x i8]* %[[test3_a7]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 85
+
+ ret void
+}
+
+define void @test4(i8* %dst, i8* %src) {
+; CHECK: @test4
+
+entry:
+ %a = alloca [100 x i8]
+; CHECK-NOT: alloca
+; CHECK: %[[test4_a1:.*]] = alloca [20 x i8]
+; CHECK-NEXT: %[[test4_a2:.*]] = alloca [7 x i8]
+; CHECK-NEXT: %[[test4_a3:.*]] = alloca [10 x i8]
+; CHECK-NEXT: %[[test4_a4:.*]] = alloca [7 x i8]
+; CHECK-NEXT: %[[test4_a5:.*]] = alloca [7 x i8]
+; CHECK-NEXT: %[[test4_a6:.*]] = alloca [40 x i8]
+
+ %b = getelementptr [100 x i8]* %a, i64 0, i64 0
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b, i8* %src, i32 100, i32 1, i1 false)
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [20 x i8]* %[[test4_a1]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep]], i8* %src, i32 20
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 20
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
+; CHECK-NEXT: %[[test4_r1:.*]] = load i16* %[[bitcast]]
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 22
+; CHECK-NEXT: %[[test4_r2:.*]] = load i8* %[[gep]]
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 23
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a2]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 30
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [10 x i8]* %[[test4_a3]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 10
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 40
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
+; CHECK-NEXT: %[[test4_r3:.*]] = load i16* %[[bitcast]]
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 42
+; CHECK-NEXT: %[[test4_r4:.*]] = load i8* %[[gep]]
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 43
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a4]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 50
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
+; CHECK-NEXT: %[[test4_r5:.*]] = load i16* %[[bitcast]]
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 52
+; CHECK-NEXT: %[[test4_r6:.*]] = load i8* %[[gep]]
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 53
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a5]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 60
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [40 x i8]* %[[test4_a6]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 40
+
+ %a.src.1 = getelementptr [100 x i8]* %a, i64 0, i64 20
+ %a.dst.1 = getelementptr [100 x i8]* %a, i64 0, i64 40
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.dst.1, i8* %a.src.1, i32 10, i32 1, i1 false)
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a4]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a2]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
+
+ ; Clobber a single element of the array, this should be promotable, and be deleted.
+ %c = getelementptr [100 x i8]* %a, i64 0, i64 42
+ store i8 0, i8* %c
+
+ %a.src.2 = getelementptr [100 x i8]* %a, i64 0, i64 50
+ call void @llvm.memmove.p0i8.p0i8.i32(i8* %a.dst.1, i8* %a.src.2, i32 10, i32 1, i1 false)
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a4]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a5]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
+
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %b, i32 100, i32 1, i1 false)
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [20 x i8]* %[[test4_a1]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %[[gep]], i32 20
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 20
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
+; CHECK-NEXT: store i16 %[[test4_r1]], i16* %[[bitcast]]
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 22
+; CHECK-NEXT: store i8 %[[test4_r2]], i8* %[[gep]]
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 23
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a2]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 30
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [10 x i8]* %[[test4_a3]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 10
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 40
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
+; CHECK-NEXT: store i16 %[[test4_r5]], i16* %[[bitcast]]
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 42
+; CHECK-NEXT: store i8 %[[test4_r6]], i8* %[[gep]]
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 43
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a4]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 50
+; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
+; CHECK-NEXT: store i16 %[[test4_r5]], i16* %[[bitcast]]
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 52
+; CHECK-NEXT: store i8 %[[test4_r6]], i8* %[[gep]]
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 53
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a5]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 60
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [40 x i8]* %[[test4_a6]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 40
+
+ ret void
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
+declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
+
+define i16 @test5() {
+; CHECK: @test5
+; CHECK-NOT: alloca float
+; CHECK: %[[cast:.*]] = bitcast float 0.0{{.*}} to i32
+; CHECK-NEXT: %[[shr:.*]] = lshr i32 %[[cast]], 16
+; CHECK-NEXT: %[[trunc:.*]] = trunc i32 %[[shr]] to i16
+; CHECK-NEXT: ret i16 %[[trunc]]
+
+entry:
+ %a = alloca [4 x i8]
+ %fptr = bitcast [4 x i8]* %a to float*
+ store float 0.0, float* %fptr
+ %ptr = getelementptr [4 x i8]* %a, i32 0, i32 2
+ %iptr = bitcast i8* %ptr to i16*
+ %val = load i16* %iptr
+ ret i16 %val
+}
+
+define i32 @test6() {
+; CHECK: @test6
+; CHECK: alloca i32
+; CHECK-NEXT: store volatile i32
+; CHECK-NEXT: load i32*
+; CHECK-NEXT: ret i32
+
+entry:
+ %a = alloca [4 x i8]
+ %ptr = getelementptr [4 x i8]* %a, i32 0, i32 0
+ call void @llvm.memset.p0i8.i32(i8* %ptr, i8 42, i32 4, i32 1, i1 true)
+ %iptr = bitcast i8* %ptr to i32*
+ %val = load i32* %iptr
+ ret i32 %val
+}
+
+define void @test7(i8* %src, i8* %dst) {
+; CHECK: @test7
+; CHECK: alloca i32
+; CHECK-NEXT: bitcast i8* %src to i32*
+; CHECK-NEXT: load volatile i32*
+; CHECK-NEXT: store volatile i32
+; CHECK-NEXT: bitcast i8* %dst to i32*
+; CHECK-NEXT: load volatile i32*
+; CHECK-NEXT: store volatile i32
+; CHECK-NEXT: ret
+
+entry:
+ %a = alloca [4 x i8]
+ %ptr = getelementptr [4 x i8]* %a, i32 0, i32 0
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 4, i32 1, i1 true)
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 4, i32 1, i1 true)
+ ret void
+}
+
+
+%S1 = type { i32, i32, [16 x i8] }
+%S2 = type { %S1*, %S2* }
+
+define %S2 @test8(%S2* %s2) {
+; CHECK: @test8
+entry:
+ %new = alloca %S2
+; CHECK-NOT: alloca
+
+ %s2.next.ptr = getelementptr %S2* %s2, i64 0, i32 1
+ %s2.next = load %S2** %s2.next.ptr
+; CHECK: %[[gep:.*]] = getelementptr %S2* %s2, i64 0, i32 1
+; CHECK-NEXT: %[[next:.*]] = load %S2** %[[gep]]
+
+ %s2.next.s1.ptr = getelementptr %S2* %s2.next, i64 0, i32 0
+ %s2.next.s1 = load %S1** %s2.next.s1.ptr
+ %new.s1.ptr = getelementptr %S2* %new, i64 0, i32 0
+ store %S1* %s2.next.s1, %S1** %new.s1.ptr
+ %s2.next.next.ptr = getelementptr %S2* %s2.next, i64 0, i32 1
+ %s2.next.next = load %S2** %s2.next.next.ptr
+ %new.next.ptr = getelementptr %S2* %new, i64 0, i32 1
+ store %S2* %s2.next.next, %S2** %new.next.ptr
+; CHECK-NEXT: %[[gep:.*]] = getelementptr %S2* %[[next]], i64 0, i32 0
+; CHECK-NEXT: %[[next_s1:.*]] = load %S1** %[[gep]]
+; CHECK-NEXT: %[[gep:.*]] = getelementptr %S2* %[[next]], i64 0, i32 1
+; CHECK-NEXT: %[[next_next:.*]] = load %S2** %[[gep]]
+
+ %new.s1 = load %S1** %new.s1.ptr
+ %result1 = insertvalue %S2 undef, %S1* %new.s1, 0
+; CHECK-NEXT: %[[result1:.*]] = insertvalue %S2 undef, %S1* %[[next_s1]], 0
+ %new.next = load %S2** %new.next.ptr
+ %result2 = insertvalue %S2 %result1, %S2* %new.next, 1
+; CHECK-NEXT: %[[result2:.*]] = insertvalue %S2 %[[result1]], %S2* %[[next_next]], 1
+ ret %S2 %result2
+; CHECK-NEXT: ret %S2 %[[result2]]
+}
+
+define i64 @test9() {
+; Ensure we can handle loads off the end of an alloca even when wrapped in
+; weird bit casts and types. The result is undef, but this shouldn't crash
+; anything.
+; CHECK: @test9
+; CHECK-NOT: alloca
+; CHECK: ret i64 undef
+
+entry:
+ %a = alloca { [3 x i8] }
+ %gep1 = getelementptr inbounds { [3 x i8] }* %a, i32 0, i32 0, i32 0
+ store i8 0, i8* %gep1, align 1
+ %gep2 = getelementptr inbounds { [3 x i8] }* %a, i32 0, i32 0, i32 1
+ store i8 0, i8* %gep2, align 1
+ %gep3 = getelementptr inbounds { [3 x i8] }* %a, i32 0, i32 0, i32 2
+ store i8 26, i8* %gep3, align 1
+ %cast = bitcast { [3 x i8] }* %a to { i64 }*
+ %elt = getelementptr inbounds { i64 }* %cast, i32 0, i32 0
+ %result = load i64* %elt
+ ret i64 %result
+}
+
+define %S2* @test10() {
+; CHECK: @test10
+; CHECK-NOT: alloca %S2*
+; CHECK: ret %S2* null
+
+entry:
+ %a = alloca [8 x i8]
+ %ptr = getelementptr [8 x i8]* %a, i32 0, i32 0
+ call void @llvm.memset.p0i8.i32(i8* %ptr, i8 0, i32 8, i32 1, i1 false)
+ %s2ptrptr = bitcast i8* %ptr to %S2**
+ %s2ptr = load %S2** %s2ptrptr
+ ret %S2* %s2ptr
+}
+
+define i32 @test11() {
+; CHECK: @test11
+; CHECK-NOT: alloca
+; CHECK: ret i32 0
+
+entry:
+ %X = alloca i32
+ br i1 undef, label %good, label %bad
+
+good:
+ %Y = getelementptr i32* %X, i64 0
+ store i32 0, i32* %Y
+ %Z = load i32* %Y
+ ret i32 %Z
+
+bad:
+ %Y2 = getelementptr i32* %X, i64 1
+ store i32 0, i32* %Y2
+ %Z2 = load i32* %Y2
+ ret i32 %Z2
+}
+
+define i8 @test12() {
+; We fully promote these to the i24 load or store size, resulting in just masks
+; and other operations that instcombine will fold, but no alloca.
+;
+; CHECK: @test12
+
+entry:
+ %a = alloca [3 x i8]
+ %b = alloca [3 x i8]
+; CHECK-NOT: alloca
+
+ %a0ptr = getelementptr [3 x i8]* %a, i64 0, i32 0
+ store i8 0, i8* %a0ptr
+ %a1ptr = getelementptr [3 x i8]* %a, i64 0, i32 1
+ store i8 0, i8* %a1ptr
+ %a2ptr = getelementptr [3 x i8]* %a, i64 0, i32 2
+ store i8 0, i8* %a2ptr
+ %aiptr = bitcast [3 x i8]* %a to i24*
+ %ai = load i24* %aiptr
+; CHCEK-NOT: store
+; CHCEK-NOT: load
+; CHECK: %[[ext2:.*]] = zext i8 0 to i24
+; CHECK-NEXT: %[[shift2:.*]] = shl i24 %[[ext2]], 16
+; CHECK-NEXT: %[[mask2:.*]] = and i24 undef, 65535
+; CHECK-NEXT: %[[insert2:.*]] = or i24 %[[mask2]], %[[shift2]]
+; CHECK-NEXT: %[[ext1:.*]] = zext i8 0 to i24
+; CHECK-NEXT: %[[shift1:.*]] = shl i24 %[[ext1]], 8
+; CHECK-NEXT: %[[mask1:.*]] = and i24 %[[insert2]], -65281
+; CHECK-NEXT: %[[insert1:.*]] = or i24 %[[mask1]], %[[shift1]]
+; CHECK-NEXT: %[[ext0:.*]] = zext i8 0 to i24
+; CHECK-NEXT: %[[mask0:.*]] = and i24 %[[insert1]], -256
+; CHECK-NEXT: %[[insert0:.*]] = or i24 %[[mask0]], %[[ext0]]
+
+ %biptr = bitcast [3 x i8]* %b to i24*
+ store i24 %ai, i24* %biptr
+ %b0ptr = getelementptr [3 x i8]* %b, i64 0, i32 0
+ %b0 = load i8* %b0ptr
+ %b1ptr = getelementptr [3 x i8]* %b, i64 0, i32 1
+ %b1 = load i8* %b1ptr
+ %b2ptr = getelementptr [3 x i8]* %b, i64 0, i32 2
+ %b2 = load i8* %b2ptr
+; CHCEK-NOT: store
+; CHCEK-NOT: load
+; CHECK: %[[trunc0:.*]] = trunc i24 %[[insert0]] to i8
+; CHECK-NEXT: %[[shift1:.*]] = lshr i24 %[[insert0]], 8
+; CHECK-NEXT: %[[trunc1:.*]] = trunc i24 %[[shift1]] to i8
+; CHECK-NEXT: %[[shift2:.*]] = lshr i24 %[[insert0]], 16
+; CHECK-NEXT: %[[trunc2:.*]] = trunc i24 %[[shift2]] to i8
+
+ %bsum0 = add i8 %b0, %b1
+ %bsum1 = add i8 %bsum0, %b2
+ ret i8 %bsum1
+; CHECK: %[[sum0:.*]] = add i8 %[[trunc0]], %[[trunc1]]
+; CHECK-NEXT: %[[sum1:.*]] = add i8 %[[sum0]], %[[trunc2]]
+; CHECK-NEXT: ret i8 %[[sum1]]
+}
+
+define i32 @test13() {
+; Ensure we don't crash and handle undefined loads that straddle the end of the
+; allocation.
+; CHECK: @test13
+; CHECK: %[[ret:.*]] = zext i16 undef to i32
+; CHECK: ret i32 %[[ret]]
+
+entry:
+ %a = alloca [3 x i8]
+ %b0ptr = getelementptr [3 x i8]* %a, i64 0, i32 0
+ store i8 0, i8* %b0ptr
+ %b1ptr = getelementptr [3 x i8]* %a, i64 0, i32 1
+ store i8 0, i8* %b1ptr
+ %b2ptr = getelementptr [3 x i8]* %a, i64 0, i32 2
+ store i8 0, i8* %b2ptr
+ %iptrcast = bitcast [3 x i8]* %a to i16*
+ %iptrgep = getelementptr i16* %iptrcast, i64 1
+ %i = load i16* %iptrgep
+ %ret = zext i16 %i to i32
+ ret i32 %ret
+}
+
+%test14.struct = type { [3 x i32] }
+
+define void @test14(...) nounwind uwtable {
+; This is a strange case where we split allocas into promotable partitions, but
+; also gain enough data to prove they must be dead allocas due to GEPs that walk
+; across two adjacent allocas. Test that we don't try to promote or otherwise
+; do bad things to these dead allocas, they should just be removed.
+; CHECK: @test14
+; CHECK-NEXT: entry:
+; CHECK-NEXT: ret void
+
+entry:
+ %a = alloca %test14.struct
+ %p = alloca %test14.struct*
+ %0 = bitcast %test14.struct* %a to i8*
+ %1 = getelementptr i8* %0, i64 12
+ %2 = bitcast i8* %1 to %test14.struct*
+ %3 = getelementptr inbounds %test14.struct* %2, i32 0, i32 0
+ %4 = getelementptr inbounds %test14.struct* %a, i32 0, i32 0
+ %5 = bitcast [3 x i32]* %3 to i32*
+ %6 = bitcast [3 x i32]* %4 to i32*
+ %7 = load i32* %6, align 4
+ store i32 %7, i32* %5, align 4
+ %8 = getelementptr inbounds i32* %5, i32 1
+ %9 = getelementptr inbounds i32* %6, i32 1
+ %10 = load i32* %9, align 4
+ store i32 %10, i32* %8, align 4
+ %11 = getelementptr inbounds i32* %5, i32 2
+ %12 = getelementptr inbounds i32* %6, i32 2
+ %13 = load i32* %12, align 4
+ store i32 %13, i32* %11, align 4
+ ret void
+}
+
+define i32 @test15(i1 %flag) nounwind uwtable {
+; Ensure that when there are dead instructions using an alloca that are not
+; loads or stores we still delete them during partitioning and rewriting.
+; Otherwise we'll go to promote them while thy still have unpromotable uses.
+; CHECK: @test15
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label %loop
+; CHECK: loop:
+; CHECK-NEXT: br label %loop
+
+entry:
+ %l0 = alloca i64
+ %l1 = alloca i64
+ %l2 = alloca i64
+ %l3 = alloca i64
+ br label %loop
+
+loop:
+ %dead3 = phi i8* [ %gep3, %loop ], [ null, %entry ]
+
+ store i64 1879048192, i64* %l0, align 8
+ %bc0 = bitcast i64* %l0 to i8*
+ %gep0 = getelementptr i8* %bc0, i64 3
+ %dead0 = bitcast i8* %gep0 to i64*
+
+ store i64 1879048192, i64* %l1, align 8
+ %bc1 = bitcast i64* %l1 to i8*
+ %gep1 = getelementptr i8* %bc1, i64 3
+ %dead1 = getelementptr i8* %gep1, i64 1
+
+ store i64 1879048192, i64* %l2, align 8
+ %bc2 = bitcast i64* %l2 to i8*
+ %gep2.1 = getelementptr i8* %bc2, i64 1
+ %gep2.2 = getelementptr i8* %bc2, i64 3
+ ; Note that this select should get visited multiple times due to using two
+ ; different GEPs off the same alloca. We should only delete it once.
+ %dead2 = select i1 %flag, i8* %gep2.1, i8* %gep2.2
+
+ store i64 1879048192, i64* %l3, align 8
+ %bc3 = bitcast i64* %l3 to i8*
+ %gep3 = getelementptr i8* %bc3, i64 3
+
+ br label %loop
+}
+
+define void @test16(i8* %src, i8* %dst) {
+; Ensure that we can promote an alloca of [3 x i8] to an i24 SSA value.
+; CHECK: @test16
+; CHECK-NOT: alloca
+; CHECK: %[[srccast:.*]] = bitcast i8* %src to i24*
+; CHECK-NEXT: load i24* %[[srccast]]
+; CHECK-NEXT: %[[dstcast:.*]] = bitcast i8* %dst to i24*
+; CHECK-NEXT: store i24 0, i24* %[[dstcast]]
+; CHECK-NEXT: ret void
+
+entry:
+ %a = alloca [3 x i8]
+ %ptr = getelementptr [3 x i8]* %a, i32 0, i32 0
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 4, i32 1, i1 false)
+ %cast = bitcast i8* %ptr to i24*
+ store i24 0, i24* %cast
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 4, i32 1, i1 false)
+ ret void
+}
+
+define void @test17(i8* %src, i8* %dst) {
+; Ensure that we can rewrite unpromotable memcpys which extend past the end of
+; the alloca.
+; CHECK: @test17
+; CHECK: %[[a:.*]] = alloca [3 x i8]
+; CHECK-NEXT: %[[ptr:.*]] = getelementptr [3 x i8]* %[[a]], i32 0, i32 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[ptr]], i8* %src,
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %[[ptr]],
+; CHECK-NEXT: ret void
+
+entry:
+ %a = alloca [3 x i8]
+ %ptr = getelementptr [3 x i8]* %a, i32 0, i32 0
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 4, i32 1, i1 true)
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 4, i32 1, i1 true)
+ ret void
+}
+
+define void @test18(i8* %src, i8* %dst, i32 %size) {
+; Preserve transfer instrinsics with a variable size, even if they overlap with
+; fixed size operations. Further, continue to split and promote allocas preceding
+; the variable sized intrinsic.
+; CHECK: @test18
+; CHECK: %[[a:.*]] = alloca [34 x i8]
+; CHECK: %[[srcgep1:.*]] = getelementptr inbounds i8* %src, i64 4
+; CHECK-NEXT: %[[srccast1:.*]] = bitcast i8* %[[srcgep1]] to i32*
+; CHECK-NEXT: %[[srcload:.*]] = load i32* %[[srccast1]]
+; CHECK-NEXT: %[[agep1:.*]] = getelementptr inbounds [34 x i8]* %[[a]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[agep1]], i8* %src, i32 %size,
+; CHECK-NEXT: %[[agep2:.*]] = getelementptr inbounds [34 x i8]* %[[a]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* %[[agep2]], i8 42, i32 %size,
+; CHECK-NEXT: %[[dstcast1:.*]] = bitcast i8* %dst to i32*
+; CHECK-NEXT: store i32 42, i32* %[[dstcast1]]
+; CHECK-NEXT: %[[dstgep1:.*]] = getelementptr inbounds i8* %dst, i64 4
+; CHECK-NEXT: %[[dstcast2:.*]] = bitcast i8* %[[dstgep1]] to i32*
+; CHECK-NEXT: store i32 %[[srcload]], i32* %[[dstcast2]]
+; CHECK-NEXT: %[[agep3:.*]] = getelementptr inbounds [34 x i8]* %[[a]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %[[agep3]], i32 %size,
+; CHECK-NEXT: ret void
+
+entry:
+ %a = alloca [42 x i8]
+ %ptr = getelementptr [42 x i8]* %a, i32 0, i32 0
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 8, i32 1, i1 false)
+ %ptr2 = getelementptr [42 x i8]* %a, i32 0, i32 8
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr2, i8* %src, i32 %size, i32 1, i1 false)
+ call void @llvm.memset.p0i8.i32(i8* %ptr2, i8 42, i32 %size, i32 1, i1 false)
+ %cast = bitcast i8* %ptr to i32*
+ store i32 42, i32* %cast
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 8, i32 1, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr2, i32 %size, i32 1, i1 false)
+ ret void
+}
+
+%opaque = type opaque
+
+define i32 @test19(%opaque* %x) {
+; This input will cause us to try to compute a natural GEP when rewriting
+; pointers in such a way that we try to GEP through the opaque type. Previously,
+; a check for an unsized type was missing and this crashed. Ensure it behaves
+; reasonably now.
+; CHECK: @test19
+; CHECK-NOT: alloca
+; CHECK: ret i32 undef
+
+entry:
+ %a = alloca { i64, i8* }
+ %cast1 = bitcast %opaque* %x to i8*
+ %cast2 = bitcast { i64, i8* }* %a to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %cast2, i8* %cast1, i32 16, i32 1, i1 false)
+ %gep = getelementptr inbounds { i64, i8* }* %a, i32 0, i32 0
+ %val = load i64* %gep
+ ret i32 undef
+}
+
+define i32 @test20() {
+; Ensure we can track negative offsets (before the beginning of the alloca) and
+; negative relative offsets from offsets starting past the end of the alloca.
+; CHECK: @test20
+; CHECK-NOT: alloca
+; CHECK: %[[sum1:.*]] = add i32 1, 2
+; CHECK: %[[sum2:.*]] = add i32 %[[sum1]], 3
+; CHECK: ret i32 %[[sum2]]
+
+entry:
+ %a = alloca [3 x i32]
+ %gep1 = getelementptr [3 x i32]* %a, i32 0, i32 0
+ store i32 1, i32* %gep1
+ %gep2.1 = getelementptr [3 x i32]* %a, i32 0, i32 -2
+ %gep2.2 = getelementptr i32* %gep2.1, i32 3
+ store i32 2, i32* %gep2.2
+ %gep3.1 = getelementptr [3 x i32]* %a, i32 0, i32 14
+ %gep3.2 = getelementptr i32* %gep3.1, i32 -12
+ store i32 3, i32* %gep3.2
+
+ %load1 = load i32* %gep1
+ %load2 = load i32* %gep2.2
+ %load3 = load i32* %gep3.2
+ %sum1 = add i32 %load1, %load2
+ %sum2 = add i32 %sum1, %load3
+ ret i32 %sum2
+}
+
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+
+define i8 @test21() {
+; Test allocations and offsets which border on overflow of the int64_t used
+; internally. This is really awkward to really test as LLVM doesn't really
+; support such extreme constructs cleanly.
+; CHECK: @test21
+; CHECK-NOT: alloca
+; CHECK: or i8 -1, -1
+
+entry:
+ %a = alloca [2305843009213693951 x i8]
+ %gep0 = getelementptr [2305843009213693951 x i8]* %a, i64 0, i64 2305843009213693949
+ store i8 255, i8* %gep0
+ %gep1 = getelementptr [2305843009213693951 x i8]* %a, i64 0, i64 -9223372036854775807
+ %gep2 = getelementptr i8* %gep1, i64 -1
+ call void @llvm.memset.p0i8.i64(i8* %gep2, i8 0, i64 18446744073709551615, i32 1, i1 false)
+ %gep3 = getelementptr i8* %gep1, i64 9223372036854775807
+ %gep4 = getelementptr i8* %gep3, i64 9223372036854775807
+ %gep5 = getelementptr i8* %gep4, i64 -6917529027641081857
+ store i8 255, i8* %gep5
+ %cast1 = bitcast i8* %gep4 to i32*
+ store i32 0, i32* %cast1
+ %load = load i8* %gep0
+ %gep6 = getelementptr i8* %gep0, i32 1
+ %load2 = load i8* %gep6
+ %result = or i8 %load, %load2
+ ret i8 %result
+}
+
+%PR13916.struct = type { i8 }
+
+define void @PR13916.1() {
+; Ensure that we handle overlapping memcpy intrinsics correctly, especially in
+; the case where there is a directly identical value for both source and dest.
+; CHECK: @PR13916.1
+; CHECK-NOT: alloca
+; CHECK: ret void
+
+entry:
+ %a = alloca i8
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a, i8* %a, i32 1, i32 1, i1 false)
+ %tmp2 = load i8* %a
+ ret void
+}
+
+define void @PR13916.2() {
+; Check whether we continue to handle them correctly when they start off with
+; different pointer value chains, but during rewriting we coalesce them into the
+; same value.
+; CHECK: @PR13916.2
+; CHECK-NOT: alloca
+; CHECK: ret void
+
+entry:
+ %a = alloca %PR13916.struct, align 1
+ br i1 undef, label %if.then, label %if.end
+
+if.then:
+ %tmp0 = bitcast %PR13916.struct* %a to i8*
+ %tmp1 = bitcast %PR13916.struct* %a to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp0, i8* %tmp1, i32 1, i32 1, i1 false)
+ br label %if.end
+
+if.end:
+ %gep = getelementptr %PR13916.struct* %a, i32 0, i32 0
+ %tmp2 = load i8* %gep
+ ret void
+}
+
+define void @PR13990() {
+; Ensure we can handle cases where processing one alloca causes the other
+; alloca to become dead and get deleted. This might crash or fail under
+; Valgrind if we regress.
+; CHECK: @PR13990
+; CHECK-NOT: alloca
+; CHECK: unreachable
+; CHECK: unreachable
+
+entry:
+ %tmp1 = alloca i8*
+ %tmp2 = alloca i8*
+ br i1 undef, label %bb1, label %bb2
+
+bb1:
+ store i8* undef, i8** %tmp2
+ br i1 undef, label %bb2, label %bb3
+
+bb2:
+ %tmp50 = select i1 undef, i8** %tmp2, i8** %tmp1
+ br i1 undef, label %bb3, label %bb4
+
+bb3:
+ unreachable
+
+bb4:
+ unreachable
+}
+
+define double @PR13969(double %x) {
+; Check that we detect when promotion will un-escape an alloca and iterate to
+; re-try running SROA over that alloca. Without that, the two allocas that are
+; stored into a dead alloca don't get rewritten and promoted.
+; CHECK: @PR13969
+
+entry:
+ %a = alloca double
+ %b = alloca double*
+ %c = alloca double
+; CHECK-NOT: alloca
+
+ store double %x, double* %a
+ store double* %c, double** %b
+ store double* %a, double** %b
+ store double %x, double* %c
+ %ret = load double* %a
+; CHECK-NOT: store
+; CHECK-NOT: load
+
+ ret double %ret
+; CHECK: ret double %x
+}
+
+%PR14034.struct = type { { {} }, i32, %PR14034.list }
+%PR14034.list = type { %PR14034.list*, %PR14034.list* }
+
+define void @PR14034() {
+; This test case tries to form GEPs into the empty leading struct members, and
+; subsequently crashed (under valgrind) before we fixed the PR. The important
+; thing is to handle empty structs gracefully.
+; CHECK: @PR14034
+
+entry:
+ %a = alloca %PR14034.struct
+ %list = getelementptr %PR14034.struct* %a, i32 0, i32 2
+ %prev = getelementptr %PR14034.list* %list, i32 0, i32 1
+ store %PR14034.list* undef, %PR14034.list** %prev
+ %cast0 = bitcast %PR14034.struct* undef to i8*
+ %cast1 = bitcast %PR14034.struct* %a to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %cast0, i8* %cast1, i32 12, i32 0, i1 false)
+ ret void
+}
+
+define i32 @test22(i32 %x) {
+; Test that SROA and promotion is not confused by a grab bax mixture of pointer
+; types involving wrapper aggregates and zero-length aggregate members.
+; CHECK: @test22
+
+entry:
+ %a1 = alloca { { [1 x { i32 }] } }
+ %a2 = alloca { {}, { float }, [0 x i8] }
+ %a3 = alloca { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }
+; CHECK-NOT: alloca
+
+ %wrap1 = insertvalue [1 x { i32 }] undef, i32 %x, 0, 0
+ %gep1 = getelementptr { { [1 x { i32 }] } }* %a1, i32 0, i32 0, i32 0
+ store [1 x { i32 }] %wrap1, [1 x { i32 }]* %gep1
+
+ %gep2 = getelementptr { { [1 x { i32 }] } }* %a1, i32 0, i32 0
+ %ptrcast1 = bitcast { [1 x { i32 }] }* %gep2 to { [1 x { float }] }*
+ %load1 = load { [1 x { float }] }* %ptrcast1
+ %unwrap1 = extractvalue { [1 x { float }] } %load1, 0, 0
+
+ %wrap2 = insertvalue { {}, { float }, [0 x i8] } undef, { float } %unwrap1, 1
+ store { {}, { float }, [0 x i8] } %wrap2, { {}, { float }, [0 x i8] }* %a2
+
+ %gep3 = getelementptr { {}, { float }, [0 x i8] }* %a2, i32 0, i32 1, i32 0
+ %ptrcast2 = bitcast float* %gep3 to <4 x i8>*
+ %load3 = load <4 x i8>* %ptrcast2
+ %valcast1 = bitcast <4 x i8> %load3 to i32
+
+ %wrap3 = insertvalue [1 x [1 x i32]] undef, i32 %valcast1, 0, 0
+ %wrap4 = insertvalue { [1 x [1 x i32]], {} } undef, [1 x [1 x i32]] %wrap3, 0
+ %gep4 = getelementptr { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }* %a3, i32 0, i32 1
+ %ptrcast3 = bitcast { [0 x double], [1 x [1 x <4 x i8>]], {} }* %gep4 to { [1 x [1 x i32]], {} }*
+ store { [1 x [1 x i32]], {} } %wrap4, { [1 x [1 x i32]], {} }* %ptrcast3
+
+ %gep5 = getelementptr { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }* %a3, i32 0, i32 1, i32 1, i32 0
+ %ptrcast4 = bitcast [1 x <4 x i8>]* %gep5 to { {}, float, {} }*
+ %load4 = load { {}, float, {} }* %ptrcast4
+ %unwrap2 = extractvalue { {}, float, {} } %load4, 1
+ %valcast2 = bitcast float %unwrap2 to i32
+
+ ret i32 %valcast2
+; CHECK: ret i32
+}
+
+define void @PR14059.1(double* %d) {
+; In PR14059 a peculiar construct was identified as something that is used
+; pervasively in ARM's ABI-calling-convention lowering: the passing of a struct
+; of doubles via an array of i32 in order to place the data into integer
+; registers. This in turn was missed as an optimization by SROA due to the
+; partial loads and stores of integers to the double alloca we were trying to
+; form and promote. The solution is to widen the integer operations to be
+; whole-alloca operations, and perform the appropriate bitcasting on the
+; *values* rather than the pointers. When this works, partial reads and writes
+; via integers can be promoted away.
+; CHECK: @PR14059.1
+; CHECK-NOT: alloca
+; CHECK: ret void
+
+entry:
+ %X.sroa.0.i = alloca double, align 8
+ %0 = bitcast double* %X.sroa.0.i to i8*
+ call void @llvm.lifetime.start(i64 -1, i8* %0)
+
+ ; Store to the low 32-bits...
+ %X.sroa.0.0.cast2.i = bitcast double* %X.sroa.0.i to i32*
+ store i32 0, i32* %X.sroa.0.0.cast2.i, align 8
+
+ ; Also use a memset to the middle 32-bits for fun.
+ %X.sroa.0.2.raw_idx2.i = getelementptr inbounds i8* %0, i32 2
+ call void @llvm.memset.p0i8.i64(i8* %X.sroa.0.2.raw_idx2.i, i8 0, i64 4, i32 1, i1 false)
+
+ ; Or a memset of the whole thing.
+ call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 8, i32 1, i1 false)
+
+ ; Write to the high 32-bits with a memcpy.
+ %X.sroa.0.4.raw_idx4.i = getelementptr inbounds i8* %0, i32 4
+ %d.raw = bitcast double* %d to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %X.sroa.0.4.raw_idx4.i, i8* %d.raw, i32 4, i32 1, i1 false)
+
+ ; Store to the high 32-bits...
+ %X.sroa.0.4.cast5.i = bitcast i8* %X.sroa.0.4.raw_idx4.i to i32*
+ store i32 1072693248, i32* %X.sroa.0.4.cast5.i, align 4
+
+ ; Do the actual math...
+ %X.sroa.0.0.load1.i = load double* %X.sroa.0.i, align 8
+ %accum.real.i = load double* %d, align 8
+ %add.r.i = fadd double %accum.real.i, %X.sroa.0.0.load1.i
+ store double %add.r.i, double* %d, align 8
+ call void @llvm.lifetime.end(i64 -1, i8* %0)
+ ret void
+}
+
+define i64 @PR14059.2({ float, float }* %phi) {
+; Check that SROA can split up alloca-wide integer loads and stores where the
+; underlying alloca has smaller components that are accessed independently. This
+; shows up particularly with ABI lowering patterns coming out of Clang that rely
+; on the particular register placement of a single large integer return value.
+; CHECK: @PR14059.2
+
+entry:
+ %retval = alloca { float, float }, align 4
+ ; CHECK-NOT: alloca
+
+ %0 = bitcast { float, float }* %retval to i64*
+ store i64 0, i64* %0
+ ; CHECK-NOT: store
+
+ %phi.realp = getelementptr inbounds { float, float }* %phi, i32 0, i32 0
+ %phi.real = load float* %phi.realp
+ %phi.imagp = getelementptr inbounds { float, float }* %phi, i32 0, i32 1
+ %phi.imag = load float* %phi.imagp
+ ; CHECK: %[[realp:.*]] = getelementptr inbounds { float, float }* %phi, i32 0, i32 0
+ ; CHECK-NEXT: %[[real:.*]] = load float* %[[realp]]
+ ; CHECK-NEXT: %[[imagp:.*]] = getelementptr inbounds { float, float }* %phi, i32 0, i32 1
+ ; CHECK-NEXT: %[[imag:.*]] = load float* %[[imagp]]
+
+ %real = getelementptr inbounds { float, float }* %retval, i32 0, i32 0
+ %imag = getelementptr inbounds { float, float }* %retval, i32 0, i32 1
+ store float %phi.real, float* %real
+ store float %phi.imag, float* %imag
+ ; CHECK-NEXT: %[[real_convert:.*]] = bitcast float %[[real]] to i32
+ ; CHECK-NEXT: %[[imag_convert:.*]] = bitcast float %[[imag]] to i32
+ ; CHECK-NEXT: %[[imag_ext:.*]] = zext i32 %[[imag_convert]] to i64
+ ; CHECK-NEXT: %[[imag_shift:.*]] = shl i64 %[[imag_ext]], 32
+ ; CHECK-NEXT: %[[imag_mask:.*]] = and i64 undef, 4294967295
+ ; CHECK-NEXT: %[[imag_insert:.*]] = or i64 %[[imag_mask]], %[[imag_shift]]
+ ; CHECK-NEXT: %[[real_ext:.*]] = zext i32 %[[real_convert]] to i64
+ ; CHECK-NEXT: %[[real_mask:.*]] = and i64 %[[imag_insert]], -4294967296
+ ; CHECK-NEXT: %[[real_insert:.*]] = or i64 %[[real_mask]], %[[real_ext]]
+
+ %1 = load i64* %0, align 1
+ ret i64 %1
+ ; CHECK-NEXT: ret i64 %[[real_insert]]
+}
+
+define void @PR14105({ [16 x i8] }* %ptr) {
+; Ensure that when rewriting the GEP index '-1' for this alloca we preserve is
+; sign as negative. We use a volatile memcpy to ensure promotion never actually
+; occurs.
+; CHECK: @PR14105
+
+entry:
+ %a = alloca { [16 x i8] }, align 8
+; CHECK: alloca [16 x i8], align 8
+
+ %gep = getelementptr inbounds { [16 x i8] }* %ptr, i64 -1
+; CHECK-NEXT: getelementptr inbounds { [16 x i8] }* %ptr, i64 -1, i32 0, i64 0
+
+ %cast1 = bitcast { [16 x i8 ] }* %gep to i8*
+ %cast2 = bitcast { [16 x i8 ] }* %a to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %cast1, i8* %cast2, i32 16, i32 8, i1 true)
+ ret void
+; CHECK: ret
+}
diff --git a/test/Transforms/SROA/big-endian.ll b/test/Transforms/SROA/big-endian.ll
new file mode 100644
index 0000000000000..ce82d1f30b57c
--- /dev/null
+++ b/test/Transforms/SROA/big-endian.ll
@@ -0,0 +1,119 @@
+; RUN: opt < %s -sroa -S | FileCheck %s
+; RUN: opt < %s -sroa -force-ssa-updater -S | FileCheck %s
+
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
+
+define i8 @test1() {
+; We fully promote these to the i24 load or store size, resulting in just masks
+; and other operations that instcombine will fold, but no alloca. Note this is
+; the same as test12 in basictest.ll, but here we assert big-endian byte
+; ordering.
+;
+; CHECK: @test1
+
+entry:
+ %a = alloca [3 x i8]
+ %b = alloca [3 x i8]
+; CHECK-NOT: alloca
+
+ %a0ptr = getelementptr [3 x i8]* %a, i64 0, i32 0
+ store i8 0, i8* %a0ptr
+ %a1ptr = getelementptr [3 x i8]* %a, i64 0, i32 1
+ store i8 0, i8* %a1ptr
+ %a2ptr = getelementptr [3 x i8]* %a, i64 0, i32 2
+ store i8 0, i8* %a2ptr
+ %aiptr = bitcast [3 x i8]* %a to i24*
+ %ai = load i24* %aiptr
+; CHCEK-NOT: store
+; CHCEK-NOT: load
+; CHECK: %[[ext2:.*]] = zext i8 0 to i24
+; CHECK-NEXT: %[[mask2:.*]] = and i24 undef, -256
+; CHECK-NEXT: %[[insert2:.*]] = or i24 %[[mask2]], %[[ext2]]
+; CHECK-NEXT: %[[ext1:.*]] = zext i8 0 to i24
+; CHECK-NEXT: %[[shift1:.*]] = shl i24 %[[ext1]], 8
+; CHECK-NEXT: %[[mask1:.*]] = and i24 %[[insert2]], -65281
+; CHECK-NEXT: %[[insert1:.*]] = or i24 %[[mask1]], %[[shift1]]
+; CHECK-NEXT: %[[ext0:.*]] = zext i8 0 to i24
+; CHECK-NEXT: %[[shift0:.*]] = shl i24 %[[ext0]], 16
+; CHECK-NEXT: %[[mask0:.*]] = and i24 %[[insert1]], 65535
+; CHECK-NEXT: %[[insert0:.*]] = or i24 %[[mask0]], %[[shift0]]
+
+ %biptr = bitcast [3 x i8]* %b to i24*
+ store i24 %ai, i24* %biptr
+ %b0ptr = getelementptr [3 x i8]* %b, i64 0, i32 0
+ %b0 = load i8* %b0ptr
+ %b1ptr = getelementptr [3 x i8]* %b, i64 0, i32 1
+ %b1 = load i8* %b1ptr
+ %b2ptr = getelementptr [3 x i8]* %b, i64 0, i32 2
+ %b2 = load i8* %b2ptr
+; CHCEK-NOT: store
+; CHCEK-NOT: load
+; CHECK: %[[shift0:.*]] = lshr i24 %[[insert0]], 16
+; CHECK-NEXT: %[[trunc0:.*]] = trunc i24 %[[shift0]] to i8
+; CHECK-NEXT: %[[shift1:.*]] = lshr i24 %[[insert0]], 8
+; CHECK-NEXT: %[[trunc1:.*]] = trunc i24 %[[shift1]] to i8
+; CHECK-NEXT: %[[trunc2:.*]] = trunc i24 %[[insert0]] to i8
+
+ %bsum0 = add i8 %b0, %b1
+ %bsum1 = add i8 %bsum0, %b2
+ ret i8 %bsum1
+; CHECK: %[[sum0:.*]] = add i8 %[[trunc0]], %[[trunc1]]
+; CHECK-NEXT: %[[sum1:.*]] = add i8 %[[sum0]], %[[trunc2]]
+; CHECK-NEXT: ret i8 %[[sum1]]
+}
+
+define i64 @test2() {
+; Test for various mixed sizes of integer loads and stores all getting
+; promoted.
+;
+; CHECK: @test2
+
+entry:
+ %a = alloca [7 x i8]
+; CHECK-NOT: alloca
+
+ %a0ptr = getelementptr [7 x i8]* %a, i64 0, i32 0
+ %a1ptr = getelementptr [7 x i8]* %a, i64 0, i32 1
+ %a2ptr = getelementptr [7 x i8]* %a, i64 0, i32 2
+ %a3ptr = getelementptr [7 x i8]* %a, i64 0, i32 3
+
+; CHCEK-NOT: store
+; CHCEK-NOT: load
+
+ %a0i16ptr = bitcast i8* %a0ptr to i16*
+ store i16 1, i16* %a0i16ptr
+; CHECK: %[[mask0:.*]] = and i16 1, -16
+
+ %a1i4ptr = bitcast i8* %a1ptr to i4*
+ store i4 1, i4* %a1i4ptr
+; CHECK-NEXT: %[[insert0:.*]] = or i16 %[[mask0]], 1
+
+ store i8 1, i8* %a2ptr
+; CHECK-NEXT: %[[mask1:.*]] = and i40 undef, 4294967295
+; CHECK-NEXT: %[[insert1:.*]] = or i40 %[[mask1]], 4294967296
+
+ %a3i24ptr = bitcast i8* %a3ptr to i24*
+ store i24 1, i24* %a3i24ptr
+; CHECK-NEXT: %[[mask2:.*]] = and i40 %[[insert1]], -4294967041
+; CHECK-NEXT: %[[insert2:.*]] = or i40 %[[mask2]], 256
+
+ %a2i40ptr = bitcast i8* %a2ptr to i40*
+ store i40 1, i40* %a2i40ptr
+; CHECK-NEXT: %[[ext3:.*]] = zext i40 1 to i56
+; CHECK-NEXT: %[[mask3:.*]] = and i56 undef, -1099511627776
+; CHECK-NEXT: %[[insert3:.*]] = or i56 %[[mask3]], %[[ext3]]
+
+; CHCEK-NOT: store
+; CHCEK-NOT: load
+
+ %aiptr = bitcast [7 x i8]* %a to i56*
+ %ai = load i56* %aiptr
+ %ret = zext i56 %ai to i64
+ ret i64 %ret
+; CHECK-NEXT: %[[ext4:.*]] = zext i16 %[[insert0]] to i56
+; CHECK-NEXT: %[[shift4:.*]] = shl i56 %[[ext4]], 40
+; CHECK-NEXT: %[[mask4:.*]] = and i56 %[[insert3]], 1099511627775
+; CHECK-NEXT: %[[insert4:.*]] = or i56 %[[mask4]], %[[shift4]]
+; CHECK-NEXT: %[[ret:.*]] = zext i56 %[[insert4]] to i64
+; CHECK-NEXT: ret i64 %[[ret]]
+}
diff --git a/test/Transforms/SROA/fca.ll b/test/Transforms/SROA/fca.ll
new file mode 100644
index 0000000000000..c30a5cc974fc7
--- /dev/null
+++ b/test/Transforms/SROA/fca.ll
@@ -0,0 +1,49 @@
+; RUN: opt < %s -sroa -S | FileCheck %s
+; RUN: opt < %s -sroa -force-ssa-updater -S | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
+
+define { i32, i32 } @test0(i32 %x, i32 %y) {
+; CHECK: @test0
+; CHECK-NOT: alloca
+; CHECK: insertvalue { i32, i32 }
+; CHECK: insertvalue { i32, i32 }
+; CHECK: ret { i32, i32 }
+
+entry:
+ %a = alloca { i32, i32 }
+
+ store { i32, i32 } undef, { i32, i32 }* %a
+
+ %gep1 = getelementptr inbounds { i32, i32 }* %a, i32 0, i32 0
+ store i32 %x, i32* %gep1
+ %gep2 = getelementptr inbounds { i32, i32 }* %a, i32 0, i32 1
+ store i32 %y, i32* %gep2
+
+ %result = load { i32, i32 }* %a
+ ret { i32, i32 } %result
+}
+
+define { i32, i32 } @test1(i32 %x, i32 %y) {
+; FIXME: This may be too conservative. Duncan argues that we are allowed to
+; split the volatile load and store here but must produce volatile scalar loads
+; and stores from them.
+; CHECK: @test1
+; CHECK: alloca
+; CHECK: alloca
+; CHECK: load volatile { i32, i32 }*
+; CHECK: store volatile { i32, i32 }
+; CHECK: ret { i32, i32 }
+
+entry:
+ %a = alloca { i32, i32 }
+ %b = alloca { i32, i32 }
+
+ %gep1 = getelementptr inbounds { i32, i32 }* %a, i32 0, i32 0
+ store i32 %x, i32* %gep1
+ %gep2 = getelementptr inbounds { i32, i32 }* %a, i32 0, i32 1
+ store i32 %y, i32* %gep2
+
+ %result = load volatile { i32, i32 }* %a
+ store volatile { i32, i32 } %result, { i32, i32 }* %b
+ ret { i32, i32 } %result
+}
diff --git a/test/Transforms/SROA/lit.local.cfg b/test/Transforms/SROA/lit.local.cfg
new file mode 100644
index 0000000000000..c6106e4746f2d
--- /dev/null
+++ b/test/Transforms/SROA/lit.local.cfg
@@ -0,0 +1 @@
+config.suffixes = ['.ll']
diff --git a/test/Transforms/SROA/phi-and-select.ll b/test/Transforms/SROA/phi-and-select.ll
new file mode 100644
index 0000000000000..921016a9c24b3
--- /dev/null
+++ b/test/Transforms/SROA/phi-and-select.ll
@@ -0,0 +1,427 @@
+; RUN: opt < %s -sroa -S | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
+
+define i32 @test1() {
+; CHECK: @test1
+entry:
+ %a = alloca [2 x i32]
+; CHECK-NOT: alloca
+
+ %a0 = getelementptr [2 x i32]* %a, i64 0, i32 0
+ %a1 = getelementptr [2 x i32]* %a, i64 0, i32 1
+ store i32 0, i32* %a0
+ store i32 1, i32* %a1
+ %v0 = load i32* %a0
+ %v1 = load i32* %a1
+; CHECK-NOT: store
+; CHECK-NOT: load
+
+ %cond = icmp sle i32 %v0, %v1
+ br i1 %cond, label %then, label %exit
+
+then:
+ br label %exit
+
+exit:
+ %phi = phi i32* [ %a1, %then ], [ %a0, %entry ]
+; CHECK: phi i32 [ 1, %{{.*}} ], [ 0, %{{.*}} ]
+
+ %result = load i32* %phi
+ ret i32 %result
+}
+
+define i32 @test2() {
+; CHECK: @test2
+entry:
+ %a = alloca [2 x i32]
+; CHECK-NOT: alloca
+
+ %a0 = getelementptr [2 x i32]* %a, i64 0, i32 0
+ %a1 = getelementptr [2 x i32]* %a, i64 0, i32 1
+ store i32 0, i32* %a0
+ store i32 1, i32* %a1
+ %v0 = load i32* %a0
+ %v1 = load i32* %a1
+; CHECK-NOT: store
+; CHECK-NOT: load
+
+ %cond = icmp sle i32 %v0, %v1
+ %select = select i1 %cond, i32* %a1, i32* %a0
+; CHECK: select i1 %{{.*}}, i32 1, i32 0
+
+ %result = load i32* %select
+ ret i32 %result
+}
+
+define i32 @test3(i32 %x) {
+; CHECK: @test3
+entry:
+ %a = alloca [2 x i32]
+; CHECK-NOT: alloca
+
+ ; Note that we build redundant GEPs here to ensure that having different GEPs
+ ; into the same alloca partation continues to work with PHI speculation. This
+ ; was the underlying cause of PR13926.
+ %a0 = getelementptr [2 x i32]* %a, i64 0, i32 0
+ %a0b = getelementptr [2 x i32]* %a, i64 0, i32 0
+ %a1 = getelementptr [2 x i32]* %a, i64 0, i32 1
+ %a1b = getelementptr [2 x i32]* %a, i64 0, i32 1
+ store i32 0, i32* %a0
+ store i32 1, i32* %a1
+; CHECK-NOT: store
+
+ switch i32 %x, label %bb0 [ i32 1, label %bb1
+ i32 2, label %bb2
+ i32 3, label %bb3
+ i32 4, label %bb4
+ i32 5, label %bb5
+ i32 6, label %bb6
+ i32 7, label %bb7 ]
+
+bb0:
+ br label %exit
+bb1:
+ br label %exit
+bb2:
+ br label %exit
+bb3:
+ br label %exit
+bb4:
+ br label %exit
+bb5:
+ br label %exit
+bb6:
+ br label %exit
+bb7:
+ br label %exit
+
+exit:
+ %phi = phi i32* [ %a1, %bb0 ], [ %a0, %bb1 ], [ %a0, %bb2 ], [ %a1, %bb3 ],
+ [ %a1b, %bb4 ], [ %a0b, %bb5 ], [ %a0b, %bb6 ], [ %a1b, %bb7 ]
+; CHECK: phi i32 [ 1, %{{.*}} ], [ 0, %{{.*}} ], [ 0, %{{.*}} ], [ 1, %{{.*}} ], [ 1, %{{.*}} ], [ 0, %{{.*}} ], [ 0, %{{.*}} ], [ 1, %{{.*}} ]
+
+ %result = load i32* %phi
+ ret i32 %result
+}
+
+define i32 @test4() {
+; CHECK: @test4
+entry:
+ %a = alloca [2 x i32]
+; CHECK-NOT: alloca
+
+ %a0 = getelementptr [2 x i32]* %a, i64 0, i32 0
+ %a1 = getelementptr [2 x i32]* %a, i64 0, i32 1
+ store i32 0, i32* %a0
+ store i32 1, i32* %a1
+ %v0 = load i32* %a0
+ %v1 = load i32* %a1
+; CHECK-NOT: store
+; CHECK-NOT: load
+
+ %cond = icmp sle i32 %v0, %v1
+ %select = select i1 %cond, i32* %a0, i32* %a0
+; CHECK-NOT: select
+
+ %result = load i32* %select
+ ret i32 %result
+; CHECK: ret i32 0
+}
+
+define i32 @test5(i32* %b) {
+; CHECK: @test5
+entry:
+ %a = alloca [2 x i32]
+; CHECK-NOT: alloca
+
+ %a1 = getelementptr [2 x i32]* %a, i64 0, i32 1
+ store i32 1, i32* %a1
+; CHECK-NOT: store
+
+ %select = select i1 true, i32* %a1, i32* %b
+; CHECK-NOT: select
+
+ %result = load i32* %select
+; CHECK-NOT: load
+
+ ret i32 %result
+; CHECK: ret i32 1
+}
+
+declare void @f(i32*, i32*)
+
+define i32 @test6(i32* %b) {
+; CHECK: @test6
+entry:
+ %a = alloca [2 x i32]
+ %c = alloca i32
+; CHECK-NOT: alloca
+
+ %a1 = getelementptr [2 x i32]* %a, i64 0, i32 1
+ store i32 1, i32* %a1
+
+ %select = select i1 true, i32* %a1, i32* %b
+ %select2 = select i1 false, i32* %a1, i32* %b
+ %select3 = select i1 false, i32* %c, i32* %b
+; CHECK: %[[select2:.*]] = select i1 false, i32* undef, i32* %b
+; CHECK: %[[select3:.*]] = select i1 false, i32* undef, i32* %b
+
+ ; Note, this would potentially escape the alloca pointer except for the
+ ; constant folding of the select.
+ call void @f(i32* %select2, i32* %select3)
+; CHECK: call void @f(i32* %[[select2]], i32* %[[select3]])
+
+
+ %result = load i32* %select
+; CHECK-NOT: load
+
+ %dead = load i32* %c
+
+ ret i32 %result
+; CHECK: ret i32 1
+}
+
+define i32 @test7() {
+; CHECK: @test7
+; CHECK-NOT: alloca
+
+entry:
+ %X = alloca i32
+ br i1 undef, label %good, label %bad
+
+good:
+ %Y1 = getelementptr i32* %X, i64 0
+ store i32 0, i32* %Y1
+ br label %exit
+
+bad:
+ %Y2 = getelementptr i32* %X, i64 1
+ store i32 0, i32* %Y2
+ br label %exit
+
+exit:
+ %P = phi i32* [ %Y1, %good ], [ %Y2, %bad ]
+; CHECK: %[[phi:.*]] = phi i32 [ 0, %good ],
+ %Z2 = load i32* %P
+ ret i32 %Z2
+; CHECK: ret i32 %[[phi]]
+}
+
+define i32 @test8(i32 %b, i32* %ptr) {
+; Ensure that we rewrite allocas to the used type when that use is hidden by
+; a PHI that can be speculated.
+; CHECK: @test8
+; CHECK-NOT: alloca
+; CHECK-NOT: load
+; CHECK: %[[value:.*]] = load i32* %ptr
+; CHECK-NOT: load
+; CHECK: %[[result:.*]] = phi i32 [ undef, %else ], [ %[[value]], %then ]
+; CHECK-NEXT: ret i32 %[[result]]
+
+entry:
+ %f = alloca float
+ %test = icmp ne i32 %b, 0
+ br i1 %test, label %then, label %else
+
+then:
+ br label %exit
+
+else:
+ %bitcast = bitcast float* %f to i32*
+ br label %exit
+
+exit:
+ %phi = phi i32* [ %bitcast, %else ], [ %ptr, %then ]
+ %loaded = load i32* %phi, align 4
+ ret i32 %loaded
+}
+
+define i32 @test9(i32 %b, i32* %ptr) {
+; Same as @test8 but for a select rather than a PHI node.
+; CHECK: @test9
+; CHECK-NOT: alloca
+; CHECK-NOT: load
+; CHECK: %[[value:.*]] = load i32* %ptr
+; CHECK-NOT: load
+; CHECK: %[[result:.*]] = select i1 %{{.*}}, i32 undef, i32 %[[value]]
+; CHECK-NEXT: ret i32 %[[result]]
+
+entry:
+ %f = alloca float
+ store i32 0, i32* %ptr
+ %test = icmp ne i32 %b, 0
+ %bitcast = bitcast float* %f to i32*
+ %select = select i1 %test, i32* %bitcast, i32* %ptr
+ %loaded = load i32* %select, align 4
+ ret i32 %loaded
+}
+
+define float @test10(i32 %b, float* %ptr) {
+; Don't try to promote allocas which are not elligible for it even after
+; rewriting due to the necessity of inserting bitcasts when speculating a PHI
+; node.
+; CHECK: @test10
+; CHECK: %[[alloca:.*]] = alloca
+; CHECK: %[[argvalue:.*]] = load float* %ptr
+; CHECK: %[[cast:.*]] = bitcast double* %[[alloca]] to float*
+; CHECK: %[[allocavalue:.*]] = load float* %[[cast]]
+; CHECK: %[[result:.*]] = phi float [ %[[allocavalue]], %else ], [ %[[argvalue]], %then ]
+; CHECK-NEXT: ret float %[[result]]
+
+entry:
+ %f = alloca double
+ store double 0.0, double* %f
+ %test = icmp ne i32 %b, 0
+ br i1 %test, label %then, label %else
+
+then:
+ br label %exit
+
+else:
+ %bitcast = bitcast double* %f to float*
+ br label %exit
+
+exit:
+ %phi = phi float* [ %bitcast, %else ], [ %ptr, %then ]
+ %loaded = load float* %phi, align 4
+ ret float %loaded
+}
+
+define float @test11(i32 %b, float* %ptr) {
+; Same as @test10 but for a select rather than a PHI node.
+; CHECK: @test11
+; CHECK: %[[alloca:.*]] = alloca
+; CHECK: %[[cast:.*]] = bitcast double* %[[alloca]] to float*
+; CHECK: %[[allocavalue:.*]] = load float* %[[cast]]
+; CHECK: %[[argvalue:.*]] = load float* %ptr
+; CHECK: %[[result:.*]] = select i1 %{{.*}}, float %[[allocavalue]], float %[[argvalue]]
+; CHECK-NEXT: ret float %[[result]]
+
+entry:
+ %f = alloca double
+ store double 0.0, double* %f
+ store float 0.0, float* %ptr
+ %test = icmp ne i32 %b, 0
+ %bitcast = bitcast double* %f to float*
+ %select = select i1 %test, float* %bitcast, float* %ptr
+ %loaded = load float* %select, align 4
+ ret float %loaded
+}
+
+define i32 @test12(i32 %x, i32* %p) {
+; Ensure we don't crash or fail to nuke dead selects of allocas if no load is
+; never found.
+; CHECK: @test12
+; CHECK-NOT: alloca
+; CHECK-NOT: select
+; CHECK: ret i32 %x
+
+entry:
+ %a = alloca i32
+ store i32 %x, i32* %a
+ %dead = select i1 undef, i32* %a, i32* %p
+ %load = load i32* %a
+ ret i32 %load
+}
+
+define i32 @test13(i32 %x, i32* %p) {
+; Ensure we don't crash or fail to nuke dead phis of allocas if no load is ever
+; found.
+; CHECK: @test13
+; CHECK-NOT: alloca
+; CHECK-NOT: phi
+; CHECK: ret i32 %x
+
+entry:
+ %a = alloca i32
+ store i32 %x, i32* %a
+ br label %loop
+
+loop:
+ %phi = phi i32* [ %p, %entry ], [ %a, %loop ]
+ br i1 undef, label %loop, label %exit
+
+exit:
+ %load = load i32* %a
+ ret i32 %load
+}
+
+define i32 @PR13905() {
+; Check a pattern where we have a chain of dead phi nodes to ensure they are
+; deleted and promotion can proceed.
+; CHECK: @PR13905
+; CHECK-NOT: alloca i32
+; CHECK: ret i32 undef
+
+entry:
+ %h = alloca i32
+ store i32 0, i32* %h
+ br i1 undef, label %loop1, label %exit
+
+loop1:
+ %phi1 = phi i32* [ null, %entry ], [ %h, %loop1 ], [ %h, %loop2 ]
+ br i1 undef, label %loop1, label %loop2
+
+loop2:
+ br i1 undef, label %loop1, label %exit
+
+exit:
+ %phi2 = phi i32* [ %phi1, %loop2 ], [ null, %entry ]
+ ret i32 undef
+}
+
+define i32 @PR13906() {
+; Another pattern which can lead to crashes due to failing to clear out dead
+; PHI nodes or select nodes. This triggers subtly differently from the above
+; cases because the PHI node is (recursively) alive, but the select is dead.
+; CHECK: @PR13906
+; CHECK-NOT: alloca
+
+entry:
+ %c = alloca i32
+ store i32 0, i32* %c
+ br label %for.cond
+
+for.cond:
+ %d.0 = phi i32* [ undef, %entry ], [ %c, %if.then ], [ %d.0, %for.cond ]
+ br i1 undef, label %if.then, label %for.cond
+
+if.then:
+ %tmpcast.d.0 = select i1 undef, i32* %c, i32* %d.0
+ br label %for.cond
+}
+
+define i64 @PR14132(i1 %flag) {
+; CHECK: @PR14132
+; Here we form a PHI-node by promoting the pointer alloca first, and then in
+; order to promote the other two allocas, we speculate the load of the
+; now-phi-node-pointer. In doing so we end up loading a 64-bit value from an i8
+; alloca, which is completely bogus. However, we were asserting on trying to
+; rewrite it. Now it is replaced with undef. Eventually we may replace it with
+; unrechable and even the CFG will go away here.
+entry:
+ %a = alloca i64
+ %b = alloca i8
+ %ptr = alloca i64*
+; CHECK-NOT: alloca
+
+ %ptr.cast = bitcast i64** %ptr to i8**
+ store i64 0, i64* %a
+ store i8 1, i8* %b
+ store i64* %a, i64** %ptr
+ br i1 %flag, label %if.then, label %if.end
+
+if.then:
+ store i8* %b, i8** %ptr.cast
+ br label %if.end
+
+if.end:
+ %tmp = load i64** %ptr
+ %result = load i64* %tmp
+; CHECK-NOT: store
+; CHECK-NOT: load
+; CHECK: %[[result:.*]] = phi i64 [ undef, %if.then ], [ 0, %entry ]
+
+ ret i64 %result
+; CHECK-NEXT: ret i64 %[[result]]
+}
diff --git a/test/Transforms/SROA/vector-promotion.ll b/test/Transforms/SROA/vector-promotion.ll
new file mode 100644
index 0000000000000..ea28f5d1a647a
--- /dev/null
+++ b/test/Transforms/SROA/vector-promotion.ll
@@ -0,0 +1,267 @@
+; RUN: opt < %s -sroa -S | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
+
+%S1 = type { i64, [42 x float] }
+
+define i32 @test1(<4 x i32> %x, <4 x i32> %y) {
+; CHECK: @test1
+entry:
+ %a = alloca [2 x <4 x i32>]
+; CHECK-NOT: alloca
+
+ %a.x = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0
+ store <4 x i32> %x, <4 x i32>* %a.x
+ %a.y = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1
+ store <4 x i32> %y, <4 x i32>* %a.y
+; CHECK-NOT: store
+
+ %a.tmp1 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
+ %tmp1 = load i32* %a.tmp1
+ %a.tmp2 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
+ %tmp2 = load i32* %a.tmp2
+ %a.tmp3 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
+ %tmp3 = load i32* %a.tmp3
+; CHECK-NOT: load
+; CHECK: extractelement <4 x i32> %x, i32 2
+; CHECK-NEXT: extractelement <4 x i32> %y, i32 3
+; CHECK-NEXT: extractelement <4 x i32> %y, i32 0
+
+ %tmp4 = add i32 %tmp1, %tmp2
+ %tmp5 = add i32 %tmp3, %tmp4
+ ret i32 %tmp5
+; CHECK-NEXT: add
+; CHECK-NEXT: add
+; CHECK-NEXT: ret
+}
+
+define i32 @test2(<4 x i32> %x, <4 x i32> %y) {
+; CHECK: @test2
+; FIXME: This should be handled!
+entry:
+ %a = alloca [2 x <4 x i32>]
+; CHECK: alloca <4 x i32>
+
+ %a.x = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0
+ store <4 x i32> %x, <4 x i32>* %a.x
+ %a.y = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1
+ store <4 x i32> %y, <4 x i32>* %a.y
+
+ %a.tmp1 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
+ %tmp1 = load i32* %a.tmp1
+ %a.tmp2 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
+ %tmp2 = load i32* %a.tmp2
+ %a.tmp3 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
+ %a.tmp3.cast = bitcast i32* %a.tmp3 to <2 x i32>*
+ %tmp3.vec = load <2 x i32>* %a.tmp3.cast
+ %tmp3 = extractelement <2 x i32> %tmp3.vec, i32 0
+
+ %tmp4 = add i32 %tmp1, %tmp2
+ %tmp5 = add i32 %tmp3, %tmp4
+ ret i32 %tmp5
+}
+
+define i32 @test3(<4 x i32> %x, <4 x i32> %y) {
+; CHECK: @test3
+entry:
+ %a = alloca [2 x <4 x i32>]
+; CHECK-NOT: alloca
+
+ %a.x = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0
+ store <4 x i32> %x, <4 x i32>* %a.x
+ %a.y = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1
+ store <4 x i32> %y, <4 x i32>* %a.y
+; CHECK-NOT: store
+
+ %a.y.cast = bitcast <4 x i32>* %a.y to i8*
+ call void @llvm.memset.p0i8.i32(i8* %a.y.cast, i8 0, i32 16, i32 1, i1 false)
+; CHECK-NOT: memset
+
+ %a.tmp1 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
+ %a.tmp1.cast = bitcast i32* %a.tmp1 to i8*
+ call void @llvm.memset.p0i8.i32(i8* %a.tmp1.cast, i8 -1, i32 4, i32 1, i1 false)
+ %tmp1 = load i32* %a.tmp1
+ %a.tmp2 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
+ %tmp2 = load i32* %a.tmp2
+ %a.tmp3 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
+ %tmp3 = load i32* %a.tmp3
+; CHECK-NOT: load
+; CHECK: %[[insert:.*]] = insertelement <4 x i32> %x, i32 -1, i32 2
+; CHECK-NEXT: extractelement <4 x i32> %[[insert]], i32 2
+; CHECK-NEXT: extractelement <4 x i32> zeroinitializer, i32 3
+; CHECK-NEXT: extractelement <4 x i32> zeroinitializer, i32 0
+
+ %tmp4 = add i32 %tmp1, %tmp2
+ %tmp5 = add i32 %tmp3, %tmp4
+ ret i32 %tmp5
+; CHECK-NEXT: add
+; CHECK-NEXT: add
+; CHECK-NEXT: ret
+}
+
+define i32 @test4(<4 x i32> %x, <4 x i32> %y, <4 x i32>* %z) {
+; CHECK: @test4
+entry:
+ %a = alloca [2 x <4 x i32>]
+; CHECK-NOT: alloca
+
+ %a.x = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0
+ store <4 x i32> %x, <4 x i32>* %a.x
+ %a.y = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1
+ store <4 x i32> %y, <4 x i32>* %a.y
+; CHECK-NOT: store
+
+ %a.y.cast = bitcast <4 x i32>* %a.y to i8*
+ %z.cast = bitcast <4 x i32>* %z to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.y.cast, i8* %z.cast, i32 16, i32 1, i1 false)
+; CHECK-NOT: memcpy
+
+ %a.tmp1 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
+ %a.tmp1.cast = bitcast i32* %a.tmp1 to i8*
+ %z.tmp1 = getelementptr inbounds <4 x i32>* %z, i64 0, i64 2
+ %z.tmp1.cast = bitcast i32* %z.tmp1 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.tmp1.cast, i8* %z.tmp1.cast, i32 4, i32 1, i1 false)
+ %tmp1 = load i32* %a.tmp1
+ %a.tmp2 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
+ %tmp2 = load i32* %a.tmp2
+ %a.tmp3 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
+ %tmp3 = load i32* %a.tmp3
+; CHECK-NOT: memcpy
+; CHECK: %[[load:.*]] = load <4 x i32>* %z
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds <4 x i32>* %z, i64 0, i64 2
+; CHECK-NEXT: %[[element_load:.*]] = load i32* %[[gep]]
+; CHECK-NEXT: %[[insert:.*]] = insertelement <4 x i32> %x, i32 %[[element_load]], i32 2
+; CHECK-NEXT: extractelement <4 x i32> %[[insert]], i32 2
+; CHECK-NEXT: extractelement <4 x i32> %[[load]], i32 3
+; CHECK-NEXT: extractelement <4 x i32> %[[load]], i32 0
+
+ %tmp4 = add i32 %tmp1, %tmp2
+ %tmp5 = add i32 %tmp3, %tmp4
+ ret i32 %tmp5
+; CHECK-NEXT: add
+; CHECK-NEXT: add
+; CHECK-NEXT: ret
+}
+
+define i32 @test5(<4 x i32> %x, <4 x i32> %y, <4 x i32>* %z) {
+; CHECK: @test5
+; The same as the above, but with reversed source and destination for the
+; element memcpy, and a self copy.
+entry:
+ %a = alloca [2 x <4 x i32>]
+; CHECK-NOT: alloca
+
+ %a.x = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0
+ store <4 x i32> %x, <4 x i32>* %a.x
+ %a.y = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1
+ store <4 x i32> %y, <4 x i32>* %a.y
+; CHECK-NOT: store
+
+ %a.y.cast = bitcast <4 x i32>* %a.y to i8*
+ %a.x.cast = bitcast <4 x i32>* %a.x to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.x.cast, i8* %a.y.cast, i32 16, i32 1, i1 false)
+; CHECK-NOT: memcpy
+
+ %a.tmp1 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
+ %a.tmp1.cast = bitcast i32* %a.tmp1 to i8*
+ %z.tmp1 = getelementptr inbounds <4 x i32>* %z, i64 0, i64 2
+ %z.tmp1.cast = bitcast i32* %z.tmp1 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %z.tmp1.cast, i8* %a.tmp1.cast, i32 4, i32 1, i1 false)
+ %tmp1 = load i32* %a.tmp1
+ %a.tmp2 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
+ %tmp2 = load i32* %a.tmp2
+ %a.tmp3 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
+ %tmp3 = load i32* %a.tmp3
+; CHECK-NOT: memcpy
+; CHECK: %[[gep:.*]] = getelementptr inbounds <4 x i32>* %z, i64 0, i64 2
+; CHECK-NEXT: %[[extract:.*]] = extractelement <4 x i32> %y, i32 2
+; CHECK-NEXT: store i32 %[[extract]], i32* %[[gep]]
+; CHECK-NEXT: extractelement <4 x i32> %y, i32 2
+; CHECK-NEXT: extractelement <4 x i32> %y, i32 3
+; CHECK-NEXT: extractelement <4 x i32> %y, i32 0
+
+ %tmp4 = add i32 %tmp1, %tmp2
+ %tmp5 = add i32 %tmp3, %tmp4
+ ret i32 %tmp5
+; CHECK-NEXT: add
+; CHECK-NEXT: add
+; CHECK-NEXT: ret
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
+
+define i64 @test6(<4 x i64> %x, <4 x i64> %y, i64 %n) {
+; CHECK: @test6
+; The old scalarrepl pass would wrongly drop the store to the second alloca.
+; PR13254
+ %tmp = alloca { <4 x i64>, <4 x i64> }
+ %p0 = getelementptr inbounds { <4 x i64>, <4 x i64> }* %tmp, i32 0, i32 0
+ store <4 x i64> %x, <4 x i64>* %p0
+; CHECK: store <4 x i64> %x,
+ %p1 = getelementptr inbounds { <4 x i64>, <4 x i64> }* %tmp, i32 0, i32 1
+ store <4 x i64> %y, <4 x i64>* %p1
+; CHECK: store <4 x i64> %y,
+ %addr = getelementptr inbounds { <4 x i64>, <4 x i64> }* %tmp, i32 0, i32 0, i64 %n
+ %res = load i64* %addr, align 4
+ ret i64 %res
+}
+
+define i32 @PR14212() {
+; CHECK: @PR14212
+; This caused a crash when "splitting" the load of the i32 in order to promote
+; the store of <3 x i8> properly. Heavily reduced from an OpenCL test case.
+entry:
+ %retval = alloca <3 x i8>, align 4
+; CHECK-NOT: alloca
+
+ store <3 x i8> undef, <3 x i8>* %retval, align 4
+ %cast = bitcast <3 x i8>* %retval to i32*
+ %load = load i32* %cast, align 4
+ ret i32 %load
+; CHECK: ret i32
+}
+
+define <2 x i8> @PR14349.1(i32 %x) {
+; CEHCK: @PR14349.1
+; The first testcase for broken SROA rewriting of split integer loads and
+; stores due to smaller vector loads and stores. This particular test ensures
+; that we can rewrite a split store of an integer to a store of a vector.
+entry:
+ %a = alloca i32
+; CHECK-NOT: alloca
+
+ store i32 %x, i32* %a
+; CHECK-NOT: store
+
+ %cast = bitcast i32* %a to <2 x i8>*
+ %vec = load <2 x i8>* %cast
+; CHECK-NOT: load
+
+ ret <2 x i8> %vec
+; CHECK: %[[trunc:.*]] = trunc i32 %x to i16
+; CHECK: %[[cast:.*]] = bitcast i16 %[[trunc]] to <2 x i8>
+; CHECK: ret <2 x i8> %[[cast]]
+}
+
+define i32 @PR14349.2(<2 x i8> %x) {
+; CEHCK: @PR14349.2
+; The first testcase for broken SROA rewriting of split integer loads and
+; stores due to smaller vector loads and stores. This particular test ensures
+; that we can rewrite a split load of an integer to a load of a vector.
+entry:
+ %a = alloca i32
+; CHECK-NOT: alloca
+
+ %cast = bitcast i32* %a to <2 x i8>*
+ store <2 x i8> %x, <2 x i8>* %cast
+; CHECK-NOT: store
+
+ %int = load i32* %a
+; CHECK-NOT: load
+
+ ret i32 %int
+; CHECK: %[[cast:.*]] = bitcast <2 x i8> %x to i16
+; CHECK: %[[trunc:.*]] = zext i16 %[[cast]] to i32
+; CHECK: %[[insert:.*]] = or i32 %{{.*}}, %[[trunc]]
+; CHECK: ret i32 %[[insert]]
+}