diff options
Diffstat (limited to 'test/Instrumentation/MemorySanitizer')
-rw-r--r-- | test/Instrumentation/MemorySanitizer/X86/vararg.ll | 15 | ||||
-rw-r--r-- | test/Instrumentation/MemorySanitizer/atomics.ll | 189 | ||||
-rw-r--r-- | test/Instrumentation/MemorySanitizer/lit.local.cfg | 1 | ||||
-rw-r--r-- | test/Instrumentation/MemorySanitizer/msan_basic.ll | 190 | ||||
-rw-r--r-- | test/Instrumentation/MemorySanitizer/return_from_main.ll | 18 | ||||
-rw-r--r-- | test/Instrumentation/MemorySanitizer/vector_cvt.ll | 66 | ||||
-rw-r--r-- | test/Instrumentation/MemorySanitizer/wrap_indirect_calls.ll | 34 |
7 files changed, 506 insertions, 7 deletions
diff --git a/test/Instrumentation/MemorySanitizer/X86/vararg.ll b/test/Instrumentation/MemorySanitizer/X86/vararg.ll new file mode 100644 index 0000000000000..518c3dbf41d36 --- /dev/null +++ b/test/Instrumentation/MemorySanitizer/X86/vararg.ll @@ -0,0 +1,15 @@ +; RUN: opt < %s -msan -msan-check-access-address=0 -S +; Test that code using va_start can be compiled on i386. + +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S128" +target triple = "i386-unknown-linux-gnu" + +define void @VaStart(i8* %s, ...) { +entry: + %vl = alloca i8*, align 4 + %vl1 = bitcast i8** %vl to i8* + call void @llvm.va_start(i8* %vl1) + ret void +} + +declare void @llvm.va_start(i8*) diff --git a/test/Instrumentation/MemorySanitizer/atomics.ll b/test/Instrumentation/MemorySanitizer/atomics.ll new file mode 100644 index 0000000000000..ff0245262cb32 --- /dev/null +++ b/test/Instrumentation/MemorySanitizer/atomics.ll @@ -0,0 +1,189 @@ +; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +; atomicrmw xchg: store clean shadow, return clean shadow + +define i32 @AtomicRmwXchg(i32* %p, i32 %x) sanitize_memory { +entry: + %0 = atomicrmw xchg i32* %p, i32 %x seq_cst + ret i32 %0 +} + +; CHECK: @AtomicRmwXchg +; CHECK: store i32 0, +; CHECK: atomicrmw xchg {{.*}} seq_cst +; CHECK: store i32 0, {{.*}} @__msan_retval_tls +; CHECK: ret i32 + + +; atomicrmw max: exactly the same as above + +define i32 @AtomicRmwMax(i32* %p, i32 %x) sanitize_memory { +entry: + %0 = atomicrmw max i32* %p, i32 %x seq_cst + ret i32 %0 +} + +; CHECK: @AtomicRmwMax +; CHECK: store i32 0, +; CHECK: atomicrmw max {{.*}} seq_cst +; CHECK: store i32 0, {{.*}} @__msan_retval_tls +; CHECK: ret i32 + + +; cmpxchg: the same as above, but also check %a shadow + +define i32 @Cmpxchg(i32* %p, i32 %a, i32 %b) sanitize_memory { +entry: + %0 = cmpxchg i32* %p, i32 %a, i32 %b seq_cst + ret i32 %0 +} + +; CHECK: @Cmpxchg +; CHECK: store i32 0, +; CHECK: icmp +; CHECK: br +; CHECK: @__msan_warning +; CHECK: cmpxchg {{.*}} seq_cst +; CHECK: store i32 0, {{.*}} @__msan_retval_tls +; CHECK: ret i32 + + +; relaxed cmpxchg: bump up to "release" + +define i32 @CmpxchgMonotonic(i32* %p, i32 %a, i32 %b) sanitize_memory { +entry: + %0 = cmpxchg i32* %p, i32 %a, i32 %b monotonic + ret i32 %0 +} + +; CHECK: @CmpxchgMonotonic +; CHECK: store i32 0, +; CHECK: icmp +; CHECK: br +; CHECK: @__msan_warning +; CHECK: cmpxchg {{.*}} release +; CHECK: store i32 0, {{.*}} @__msan_retval_tls +; CHECK: ret i32 + + +; atomic load: preserve alignment, load shadow value after app value + +define i32 @AtomicLoad(i32* %p) sanitize_memory { +entry: + %0 = load atomic i32* %p seq_cst, align 16 + ret i32 %0 +} + +; CHECK: @AtomicLoad +; CHECK: load atomic i32* {{.*}} seq_cst, align 16 +; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16 +; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls +; CHECK: ret i32 + + +; atomic load: preserve alignment, load shadow value after app value + +define i32 @AtomicLoadAcquire(i32* %p) sanitize_memory { +entry: + %0 = load atomic i32* %p acquire, align 16 + ret i32 %0 +} + +; CHECK: @AtomicLoadAcquire +; CHECK: load atomic i32* {{.*}} acquire, align 16 +; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16 +; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls +; CHECK: ret i32 + + +; atomic load monotonic: bump up to load acquire + +define i32 @AtomicLoadMonotonic(i32* %p) sanitize_memory { +entry: + %0 = load atomic i32* %p monotonic, align 16 + ret i32 %0 +} + +; CHECK: @AtomicLoadMonotonic +; CHECK: load atomic i32* {{.*}} acquire, align 16 +; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16 +; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls +; CHECK: ret i32 + + +; atomic load unordered: bump up to load acquire + +define i32 @AtomicLoadUnordered(i32* %p) sanitize_memory { +entry: + %0 = load atomic i32* %p unordered, align 16 + ret i32 %0 +} + +; CHECK: @AtomicLoadUnordered +; CHECK: load atomic i32* {{.*}} acquire, align 16 +; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16 +; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls +; CHECK: ret i32 + + +; atomic store: preserve alignment, store clean shadow value before app value + +define void @AtomicStore(i32* %p, i32 %x) sanitize_memory { +entry: + store atomic i32 %x, i32* %p seq_cst, align 16 + ret void +} + +; CHECK: @AtomicStore +; CHECK-NOT: @__msan_param_tls +; CHECK: store i32 0, i32* {{.*}}, align 16 +; CHECK: store atomic i32 %x, i32* %p seq_cst, align 16 +; CHECK: ret void + + +; atomic store: preserve alignment, store clean shadow value before app value + +define void @AtomicStoreRelease(i32* %p, i32 %x) sanitize_memory { +entry: + store atomic i32 %x, i32* %p release, align 16 + ret void +} + +; CHECK: @AtomicStoreRelease +; CHECK-NOT: @__msan_param_tls +; CHECK: store i32 0, i32* {{.*}}, align 16 +; CHECK: store atomic i32 %x, i32* %p release, align 16 +; CHECK: ret void + + +; atomic store monotonic: bumped up to store release + +define void @AtomicStoreMonotonic(i32* %p, i32 %x) sanitize_memory { +entry: + store atomic i32 %x, i32* %p monotonic, align 16 + ret void +} + +; CHECK: @AtomicStoreMonotonic +; CHECK-NOT: @__msan_param_tls +; CHECK: store i32 0, i32* {{.*}}, align 16 +; CHECK: store atomic i32 %x, i32* %p release, align 16 +; CHECK: ret void + + +; atomic store unordered: bumped up to store release + +define void @AtomicStoreUnordered(i32* %p, i32 %x) sanitize_memory { +entry: + store atomic i32 %x, i32* %p unordered, align 16 + ret void +} + +; CHECK: @AtomicStoreUnordered +; CHECK-NOT: @__msan_param_tls +; CHECK: store i32 0, i32* {{.*}}, align 16 +; CHECK: store atomic i32 %x, i32* %p release, align 16 +; CHECK: ret void diff --git a/test/Instrumentation/MemorySanitizer/lit.local.cfg b/test/Instrumentation/MemorySanitizer/lit.local.cfg deleted file mode 100644 index 19eebc0ac7ac3..0000000000000 --- a/test/Instrumentation/MemorySanitizer/lit.local.cfg +++ /dev/null @@ -1 +0,0 @@ -config.suffixes = ['.ll', '.c', '.cpp'] diff --git a/test/Instrumentation/MemorySanitizer/msan_basic.ll b/test/Instrumentation/MemorySanitizer/msan_basic.ll index 1e7a31793deae..72a992dd5901b 100644 --- a/test/Instrumentation/MemorySanitizer/msan_basic.ll +++ b/test/Instrumentation/MemorySanitizer/msan_basic.ll @@ -1,12 +1,25 @@ ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -check-prefix=CHECK-ORIGINS %s +; RUN: opt < %s -msan -msan-check-access-address=1 -S | FileCheck %s -check-prefix=CHECK-AA + target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" ; Check the presence of __msan_init ; CHECK: @llvm.global_ctors {{.*}} @__msan_init -; Check the presence and the linkage type of __msan_track_origins -; CHECK: @__msan_track_origins = weak_odr constant i32 0 +; Check the presence and the linkage type of __msan_track_origins and +; other interface symbols. +; CHECK-NOT: @__msan_track_origins +; CHECK-ORIGINS: @__msan_track_origins = weak_odr constant i32 1 +; CHECK-NOT: @__msan_keep_going = weak_odr constant i32 0 +; CHECK: @__msan_retval_tls = external thread_local(initialexec) global [{{.*}}] +; CHECK: @__msan_retval_origin_tls = external thread_local(initialexec) global i32 +; CHECK: @__msan_param_tls = external thread_local(initialexec) global [{{.*}}] +; CHECK: @__msan_param_origin_tls = external thread_local(initialexec) global [{{.*}}] +; CHECK: @__msan_va_arg_tls = external thread_local(initialexec) global [{{.*}}] +; CHECK: @__msan_va_arg_overflow_size_tls = external thread_local(initialexec) global i64 +; CHECK: @__msan_origin_tls = external thread_local(initialexec) global i32 ; Check instrumentation of stores @@ -247,6 +260,8 @@ entry: ; CHECK: @Select ; CHECK: select +; CHECK-NEXT: sext i1 {{.*}} to i32 +; CHECK-NEXT: or i32 ; CHECK-NEXT: select ; CHECK: ret i32 @@ -261,6 +276,13 @@ entry: ret <8 x i16> %cond } +; CHECK: @SelectVector +; CHECK: select <8 x i1> +; CHECK-NEXT: sext <8 x i1> {{.*}} to <8 x i16> +; CHECK-NEXT: or <8 x i16> +; CHECK-NEXT: select <8 x i1> +; CHECK: ret <8 x i16> + ; CHECK-ORIGINS: @SelectVector ; CHECK-ORIGINS: bitcast <8 x i1> {{.*}} to i8 ; CHECK-ORIGINS: icmp ne i8 @@ -268,6 +290,38 @@ entry: ; CHECK-ORIGINS: ret <8 x i16> +; Check that we propagate origin for "select" with scalar condition and vector +; arguments. Select condition shadow is sign-extended to the vector type and +; mixed into the result shadow. + +define <8 x i16> @SelectVector2(<8 x i16> %a, <8 x i16> %b, i1 %c) nounwind uwtable readnone sanitize_memory { +entry: + %cond = select i1 %c, <8 x i16> %a, <8 x i16> %b + ret <8 x i16> %cond +} + +; CHECK: @SelectVector2 +; CHECK: select i1 +; CHECK: sext i1 {{.*}} to i128 +; CHECK: bitcast i128 {{.*}} to <8 x i16> +; CHECK: or <8 x i16> +; CHECK: select i1 +; CHECK: ret <8 x i16> + + +define { i64, i64 } @SelectStruct(i1 zeroext %x, { i64, i64 } %a, { i64, i64 } %b) readnone sanitize_memory { +entry: + %c = select i1 %x, { i64, i64 } %a, { i64, i64 } %b + ret { i64, i64 } %c +} + +; CHECK: @SelectStruct +; CHECK: select i1 {{.*}}, { i64, i64 } +; CHECK-NEXT: select i1 {{.*}}, { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 } +; CHECK-NEXT: select i1 {{.*}}, { i64, i64 } +; CHECK: ret { i64, i64 } + + define i8* @IntToPtr(i64 %x) nounwind uwtable readnone sanitize_memory { entry: %0 = inttoptr i64 %x to i8* @@ -407,8 +461,8 @@ define i32 @ShadowLoadAlignmentLarge() nounwind uwtable sanitize_memory { } ; CHECK: @ShadowLoadAlignmentLarge -; CHECK: load i32* {{.*}} align 64 ; CHECK: load volatile i32* {{.*}} align 64 +; CHECK: load i32* {{.*}} align 64 ; CHECK: ret i32 define i32 @ShadowLoadAlignmentSmall() nounwind uwtable sanitize_memory { @@ -418,14 +472,14 @@ define i32 @ShadowLoadAlignmentSmall() nounwind uwtable sanitize_memory { } ; CHECK: @ShadowLoadAlignmentSmall -; CHECK: load i32* {{.*}} align 2 ; CHECK: load volatile i32* {{.*}} align 2 +; CHECK: load i32* {{.*}} align 2 ; CHECK: ret i32 ; CHECK-ORIGINS: @ShadowLoadAlignmentSmall +; CHECK-ORIGINS: load volatile i32* {{.*}} align 2 ; CHECK-ORIGINS: load i32* {{.*}} align 2 ; CHECK-ORIGINS: load i32* {{.*}} align 4 -; CHECK-ORIGINS: load volatile i32* {{.*}} align 2 ; CHECK-ORIGINS: ret i32 @@ -565,8 +619,8 @@ define <8 x i8*> @VectorOfPointers(<8 x i8*>* %p) nounwind uwtable sanitize_memo } ; CHECK: @VectorOfPointers -; CHECK: load <8 x i64>* ; CHECK: load <8 x i8*>* +; CHECK: load <8 x i64>* ; CHECK: store <8 x i64> {{.*}} @__msan_retval_tls ; CHECK: ret <8 x i8*> @@ -584,6 +638,31 @@ define void @VACopy(i8* %p1, i8* %p2) nounwind uwtable sanitize_memory { ; CHECK: ret void +; Test that va_start instrumentation does not use va_arg_tls*. +; It should work with a local stack copy instead. + +%struct.__va_list_tag = type { i32, i32, i8*, i8* } +declare void @llvm.va_start(i8*) nounwind + +; Function Attrs: nounwind uwtable +define void @VAStart(i32 %x, ...) { +entry: + %x.addr = alloca i32, align 4 + %va = alloca [1 x %struct.__va_list_tag], align 16 + store i32 %x, i32* %x.addr, align 4 + %arraydecay = getelementptr inbounds [1 x %struct.__va_list_tag]* %va, i32 0, i32 0 + %arraydecay1 = bitcast %struct.__va_list_tag* %arraydecay to i8* + call void @llvm.va_start(i8* %arraydecay1) + ret void +} + +; CHECK: @VAStart +; CHECK: call void @llvm.va_start +; CHECK-NOT: @__msan_va_arg_tls +; CHECK-NOT: @__msan_va_arg_overflow_size_tls +; CHECK: ret void + + ; Test handling of volatile stores. ; Check that MemorySanitizer does not add a check of the value being stored. @@ -623,3 +702,102 @@ declare void @bar() ; CHECK: store {{.*}} @__msan_retval_tls ; CHECK-NOT: @__msan_warning ; CHECK: ret i32 + + +; Test that stack allocations are unpoisoned in functions missing +; sanitize_memory attribute + +define i32 @NoSanitizeMemoryAlloca() { +entry: + %p = alloca i32, align 4 + %x = call i32 @NoSanitizeMemoryAllocaHelper(i32* %p) + ret i32 %x +} + +declare i32 @NoSanitizeMemoryAllocaHelper(i32* %p) + +; CHECK: @NoSanitizeMemoryAlloca +; CHECK: call void @llvm.memset.p0i8.i64(i8* {{.*}}, i8 0, i64 4, i32 4, i1 false) +; CHECK: call i32 @NoSanitizeMemoryAllocaHelper(i32* +; CHECK: ret i32 + + +; Test that undef is unpoisoned in functions missing +; sanitize_memory attribute + +define i32 @NoSanitizeMemoryUndef() { +entry: + %x = call i32 @NoSanitizeMemoryUndefHelper(i32 undef) + ret i32 %x +} + +declare i32 @NoSanitizeMemoryUndefHelper(i32 %x) + +; CHECK: @NoSanitizeMemoryAlloca +; CHECK: store i32 0, i32* {{.*}} @__msan_param_tls +; CHECK: call i32 @NoSanitizeMemoryUndefHelper(i32 undef) +; CHECK: ret i32 + + +; Test argument shadow alignment + +define <2 x i64> @ArgumentShadowAlignment(i64 %a, <2 x i64> %b) sanitize_memory { +entry: + ret <2 x i64> %b +} + +; CHECK: @ArgumentShadowAlignment +; CHECK: load <2 x i64>* {{.*}} @__msan_param_tls {{.*}}, align 8 +; CHECK: store <2 x i64> {{.*}} @__msan_retval_tls {{.*}}, align 8 +; CHECK: ret <2 x i64> + + +; Test byval argument shadow alignment + +define <2 x i64> @ByValArgumentShadowLargeAlignment(<2 x i64>* byval %p) sanitize_memory { +entry: + %x = load <2 x i64>* %p + ret <2 x i64> %x +} + +; CHECK-AA: @ByValArgumentShadowLargeAlignment +; CHECK-AA: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 16, i32 8, i1 false) +; CHECK-AA: ret <2 x i64> + + +define i16 @ByValArgumentShadowSmallAlignment(i16* byval %p) sanitize_memory { +entry: + %x = load i16* %p + ret i16 %x +} + +; CHECK-AA: @ByValArgumentShadowSmallAlignment +; CHECK-AA: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 2, i32 2, i1 false) +; CHECK-AA: ret i16 + + +; Test origin propagation for insertvalue + +define { i64, i32 } @make_pair_64_32(i64 %x, i32 %y) sanitize_memory { +entry: + %a = insertvalue { i64, i32 } undef, i64 %x, 0 + %b = insertvalue { i64, i32 } %a, i32 %y, 1 + ret { i64, i32 } %b +} + +; CHECK-ORIGINS: @make_pair_64_32 +; First element shadow +; CHECK-ORIGINS: insertvalue { i64, i32 } { i64 -1, i32 -1 }, i64 {{.*}}, 0 +; First element origin +; CHECK-ORIGINS: icmp ne i64 +; CHECK-ORIGINS: select i1 +; First element app value +; CHECK-ORIGINS: insertvalue { i64, i32 } undef, i64 {{.*}}, 0 +; Second element shadow +; CHECK-ORIGINS: insertvalue { i64, i32 } {{.*}}, i32 {{.*}}, 1 +; Second element origin +; CHECK-ORIGINS: icmp ne i32 +; CHECK-ORIGINS: select i1 +; Second element app value +; CHECK-ORIGINS: insertvalue { i64, i32 } {{.*}}, i32 {{.*}}, 1 +; CHECK-ORIGINS: ret { i64, i32 } diff --git a/test/Instrumentation/MemorySanitizer/return_from_main.ll b/test/Instrumentation/MemorySanitizer/return_from_main.ll new file mode 100644 index 0000000000000..81dc88834db1c --- /dev/null +++ b/test/Instrumentation/MemorySanitizer/return_from_main.ll @@ -0,0 +1,18 @@ +; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +define i32 @main() sanitize_memory { +entry: + %call = tail call i32 @f() + ret i32 %call +} + +declare i32 @f() sanitize_memory + +; CHECK: @main +; CHECK: call i32 @f() +; CHECK: store i32 0, {{.*}} @__msan_retval_tls +; CHECK: br i1 +; CHECK: call void @__msan_warning_noreturn() +; CHECK: ret i32 diff --git a/test/Instrumentation/MemorySanitizer/vector_cvt.ll b/test/Instrumentation/MemorySanitizer/vector_cvt.ll new file mode 100644 index 0000000000000..9425e25bde5bc --- /dev/null +++ b/test/Instrumentation/MemorySanitizer/vector_cvt.ll @@ -0,0 +1,66 @@ +; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>) nounwind readnone +declare <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double>, i32) nounwind readnone +declare x86_mmx @llvm.x86.sse.cvtps2pi(<4 x float>) nounwind readnone + +; Single argument vector conversion. + +define i32 @test_cvtsd2si(<2 x double> %value) sanitize_memory { +entry: + %0 = tail call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %value) + ret i32 %0 +} + +; CHECK: @test_cvtsd2si +; CHECK: [[S:%[_01-9a-z]+]] = extractelement <2 x i64> {{.*}}, i32 0 +; CHECK: icmp ne {{.*}}[[S]], 0 +; CHECK: br +; CHECK: call void @__msan_warning_noreturn +; CHECK: call i32 @llvm.x86.sse2.cvtsd2si +; CHECK: store i32 0, {{.*}} @__msan_retval_tls +; CHECK: ret i32 + +; Two-argument vector conversion. + +define <2 x double> @test_cvtsi2sd(i32 %a, double %b) sanitize_memory { +entry: + %vec = insertelement <2 x double> undef, double %b, i32 1 + %0 = tail call <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double> %vec, i32 %a) + ret <2 x double> %0 +} + +; CHECK: @test_cvtsi2sd +; CHECK: [[Sa:%[_01-9a-z]+]] = load i32* {{.*}} @__msan_param_tls +; CHECK: [[Sout0:%[_01-9a-z]+]] = insertelement <2 x i64> <i64 -1, i64 -1>, i64 {{.*}}, i32 1 +; Clear low half of result shadow +; CHECK: [[Sout:%[_01-9a-z]+]] = insertelement <2 x i64> {{.*}}[[Sout0]], i64 0, i32 0 +; Trap on %a shadow. +; CHECK: icmp ne {{.*}}[[Sa]], 0 +; CHECK: br +; CHECK: call void @__msan_warning_noreturn +; CHECK: call <2 x double> @llvm.x86.sse2.cvtsi2sd +; CHECK: store <2 x i64> {{.*}}[[Sout]], {{.*}} @__msan_retval_tls +; CHECK: ret <2 x double> + +; x86_mmx packed vector conversion. + +define x86_mmx @test_cvtps2pi(<4 x float> %value) sanitize_memory { +entry: + %0 = tail call x86_mmx @llvm.x86.sse.cvtps2pi(<4 x float> %value) + ret x86_mmx %0 +} + +; CHECK: @test_cvtps2pi +; CHECK: extractelement <4 x i32> {{.*}}, i32 0 +; CHECK: extractelement <4 x i32> {{.*}}, i32 1 +; CHECK: [[S:%[_01-9a-z]+]] = or i32 +; CHECK: icmp ne {{.*}}[[S]], 0 +; CHECK: br +; CHECK: call void @__msan_warning_noreturn +; CHECK: call x86_mmx @llvm.x86.sse.cvtps2pi +; CHECK: store i64 0, {{.*}} @__msan_retval_tls +; CHECK: ret x86_mmx diff --git a/test/Instrumentation/MemorySanitizer/wrap_indirect_calls.ll b/test/Instrumentation/MemorySanitizer/wrap_indirect_calls.ll new file mode 100644 index 0000000000000..555695d258457 --- /dev/null +++ b/test/Instrumentation/MemorySanitizer/wrap_indirect_calls.ll @@ -0,0 +1,34 @@ +; RUN: opt < %s -msan -msan-check-access-address=0 -msan-wrap-indirect-calls=zzz -msan-wrap-indirect-calls-fast=0 -S | FileCheck %s +; RUN: opt < %s -msan -msan-check-access-address=0 -msan-wrap-indirect-calls=zzz -msan-wrap-indirect-calls-fast=1 -S | FileCheck -check-prefix=CHECK-FAST %s +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +; Test for -msan-wrap-indirect-calls functionality. +; Replaces indirect call to %f with a call to whatever is returned from the +; wrapper function. + +; This does not depend on the sanitize_memory attribute. +define i32 @func(i32 (i32, i32)* nocapture %f, i32 %x, i32 %y) { +entry: + %call = tail call i32 %f(i32 %x, i32 %y) + ret i32 %call +} + +; CHECK: @func +; CHECK: bitcast i32 (i32, i32)* %f to void ()* +; CHECK: call void ()* (void ()*)* @zzz(void ()* +; CHECK: [[A:%[01-9a-z_.]+]] = bitcast void ()* {{.*}} to i32 (i32, i32)* +; CHECK: call i32 {{.*}}[[A]](i32 {{.*}}, i32 {{.*}}) +; CHECK: ret i32 + +; CHECK-FAST: @func +; CHECK-FAST: bitcast i32 (i32, i32)* %f to void ()* +; CHECK-FAST-DAG: icmp ult void ()* {{.*}}, bitcast (i32* @__executable_start to void ()*) +; CHECK-FAST-DAG: icmp uge void ()* {{.*}}, bitcast (i32* @_end to void ()*) +; CHECK-FAST: or i1 +; CHECK-FAST: br i1 +; CHECK-FAST: call void ()* (void ()*)* @zzz(void ()* +; CHECK-FAST: br label +; CHECK-FAST: [[A:%[01-9a-z_.]+]] = phi i32 (i32, i32)* [ %f, %entry ], [ {{.*}} ] +; CHECK-FAST: call i32 {{.*}}[[A]](i32 {{.*}}, i32 {{.*}}) +; CHECK-FAST: ret i32 |