summaryrefslogtreecommitdiff
path: root/test/Analysis/CostModel/AArch64/store.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/Analysis/CostModel/AArch64/store.ll')
-rw-r--r--test/Analysis/CostModel/AArch64/store.ll58
1 files changed, 50 insertions, 8 deletions
diff --git a/test/Analysis/CostModel/AArch64/store.ll b/test/Analysis/CostModel/AArch64/store.ll
index 58750721cb977..085863554f007 100644
--- a/test/Analysis/CostModel/AArch64/store.ll
+++ b/test/Analysis/CostModel/AArch64/store.ll
@@ -1,17 +1,59 @@
-; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-apple-ios | FileCheck %s
-; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-apple-ios -mattr=slow-misaligned-128store | FileCheck %s --check-prefix=SLOW_MISALIGNED_128_STORE
+; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-unknown | FileCheck %s
+; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-unknown -mattr=slow-misaligned-128store | FileCheck %s --check-prefix=SLOW_MISALIGNED_128_STORE
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
; CHECK-LABEL: getMemoryOpCost
; SLOW_MISALIGNED_128_STORE-LABEL: getMemoryOpCost
define void @getMemoryOpCost() {
- ; If FeatureSlowMisaligned128Store is set, we penalize <2 x i64> stores. On
- ; Cyclone, for example, such stores should be expensive because we don't
- ; split them and misaligned 16b stores have bad performance.
- ;
- ; CHECK: cost of 1 {{.*}} store
- ; SLOW_MISALIGNED_128_STORE: cost of 12 {{.*}} store
+ ; If FeatureSlowMisaligned128Store is set, we penalize 128-bit stores.
+ ; The unlegalized 256-bit stores are further penalized when legalized down
+ ; to 128-bit stores.
+
+ ; CHECK: cost of 2 for {{.*}} store <4 x i64>
+ ; SLOW_MISALIGNED_128_STORE: cost of 24 for {{.*}} store <4 x i64>
+ store <4 x i64> undef, <4 x i64> * undef
+ ; CHECK-NEXT: cost of 2 for {{.*}} store <8 x i32>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <8 x i32>
+ store <8 x i32> undef, <8 x i32> * undef
+ ; CHECK-NEXT: cost of 2 for {{.*}} store <16 x i16>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <16 x i16>
+ store <16 x i16> undef, <16 x i16> * undef
+ ; CHECK-NEXT: cost of 2 for {{.*}} store <32 x i8>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <32 x i8>
+ store <32 x i8> undef, <32 x i8> * undef
+
+ ; CHECK-NEXT: cost of 2 for {{.*}} store <4 x double>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <4 x double>
+ store <4 x double> undef, <4 x double> * undef
+ ; CHECK-NEXT: cost of 2 for {{.*}} store <8 x float>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <8 x float>
+ store <8 x float> undef, <8 x float> * undef
+ ; CHECK-NEXT: cost of 2 for {{.*}} store <16 x half>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <16 x half>
+ store <16 x half> undef, <16 x half> * undef
+
+ ; CHECK-NEXT: cost of 1 for {{.*}} store <2 x i64>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <2 x i64>
store <2 x i64> undef, <2 x i64> * undef
+ ; CHECK-NEXT: cost of 1 for {{.*}} store <4 x i32>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <4 x i32>
+ store <4 x i32> undef, <4 x i32> * undef
+ ; CHECK-NEXT: cost of 1 for {{.*}} store <8 x i16>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <8 x i16>
+ store <8 x i16> undef, <8 x i16> * undef
+ ; CHECK-NEXT: cost of 1 for {{.*}} store <16 x i8>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <16 x i8>
+ store <16 x i8> undef, <16 x i8> * undef
+
+ ; CHECK-NEXT: cost of 1 for {{.*}} store <2 x double>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <2 x double>
+ store <2 x double> undef, <2 x double> * undef
+ ; CHECK-NEXT: cost of 1 for {{.*}} store <4 x float>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <4 x float>
+ store <4 x float> undef, <4 x float> * undef
+ ; CHECK-NEXT: cost of 1 for {{.*}} store <8 x half>
+ ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <8 x half>
+ store <8 x half> undef, <8 x half> * undef
; We scalarize the loads/stores because there is no vector register name for
; these types (they get extended to v.4h/v.2s).