summaryrefslogtreecommitdiff
path: root/test/Instrumentation/AddressSanitizer/stack-poisoning-byval-args.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/Instrumentation/AddressSanitizer/stack-poisoning-byval-args.ll')
-rw-r--r--test/Instrumentation/AddressSanitizer/stack-poisoning-byval-args.ll48
1 files changed, 48 insertions, 0 deletions
diff --git a/test/Instrumentation/AddressSanitizer/stack-poisoning-byval-args.ll b/test/Instrumentation/AddressSanitizer/stack-poisoning-byval-args.ll
new file mode 100644
index 0000000000000..32610ce3b815c
--- /dev/null
+++ b/test/Instrumentation/AddressSanitizer/stack-poisoning-byval-args.ll
@@ -0,0 +1,48 @@
+; This check verifies that arguments passed by value get redzones.
+; RUN: opt < %s -asan -asan-realign-stack=32 -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-unknown-linux-gnu"
+
+%struct.A = type { [8 x i32] }
+
+declare i32 @bar(%struct.A*)
+
+; Test behavior for named argument with explicit alignment. The memcpy and
+; alloca alignments should match the explicit alignment of 64.
+define void @foo(%struct.A* byval align 64 %a) sanitize_address {
+entry:
+; CHECK-LABEL: foo
+; CHECK: call i64 @__asan_stack_malloc
+; CHECK: alloca i8, i64 {{.*}} align 64
+; CHECK: [[copyPtr:%[^ \t]+]] = inttoptr i64 %{{[^ \t]+}} to %struct.A*
+; CHECK: [[copyBytePtr:%[^ \t]+]] = bitcast %struct.A* [[copyPtr]]
+; CHECK: [[aBytePtr:%[^ \t]+]] = bitcast %struct.A* %a
+; CHECK: call void @llvm.memcpy{{[^%]+}}[[copyBytePtr]]{{[^%]+}}[[aBytePtr]],{{[^,]+}}, i32 64
+; CHECK: call i32 @bar(%struct.A* [[copyPtr]])
+; CHECK: ret void
+
+ %call = call i32 @bar(%struct.A* %a)
+ ret void
+}
+
+; Test behavior for unnamed argument without explicit alignment. In this case,
+; the first argument is referenced by the identifier %0 and the ABI requires a
+; minimum alignment of 4 bytes since struct.A contains i32s which have 4-byte
+; alignment. However, the alloca alignment will be 32 since that is the value
+; passed via the -asan-realign-stack option, which is greater than 4.
+define void @baz(%struct.A* byval) sanitize_address {
+entry:
+; CHECK-LABEL: baz
+; CHECK: call i64 @__asan_stack_malloc
+; CHECK: alloca i8, i64 {{.*}} align 32
+; CHECK: [[copyPtr:%[^ \t]+]] = inttoptr i64 %{{[^ \t]+}} to %struct.A*
+; CHECK: [[copyBytePtr:%[^ \t]+]] = bitcast %struct.A* [[copyPtr]]
+; CHECK: [[aBytePtr:%[^ \t]+]] = bitcast %struct.A* %0
+; CHECK: call void @llvm.memcpy{{[^%]+}}[[copyBytePtr]]{{[^%]+}}[[aBytePtr]],{{[^,]+}}, i32 4
+; CHECK: call i32 @bar(%struct.A* [[copyPtr]])
+; CHECK: ret void
+
+ %call = call i32 @bar(%struct.A* %0)
+ ret void
+}