summaryrefslogtreecommitdiff
path: root/test/CodeGen/PowerPC
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/PowerPC')
-rw-r--r--test/CodeGen/PowerPC/PR33636.ll702
-rw-r--r--test/CodeGen/PowerPC/atomics-regression.ll528
-rw-r--r--test/CodeGen/PowerPC/bitreverse.ll23
-rw-r--r--test/CodeGen/PowerPC/build-vector-tests.ll4
-rw-r--r--test/CodeGen/PowerPC/ppc-ctr-dead-code.ll38
-rw-r--r--test/CodeGen/PowerPC/ppc-redzone-alignment-bug.ll32
-rw-r--r--test/CodeGen/PowerPC/ppc64le-smallarg.ll4
-rw-r--r--test/CodeGen/PowerPC/pr33093.ll165
-rw-r--r--test/CodeGen/PowerPC/select-addrRegRegOnly.ll37
-rw-r--r--test/CodeGen/PowerPC/svr4-redzone.ll6
-rw-r--r--test/CodeGen/PowerPC/tailcall1-64.ll7
-rw-r--r--test/CodeGen/PowerPC/testBitReverse.ll105
-rw-r--r--test/CodeGen/PowerPC/vec_extract_p9.ll167
-rw-r--r--test/CodeGen/PowerPC/vec_int_ext.ll253
-rw-r--r--test/CodeGen/PowerPC/vsx-partword-int-loads-and-stores.ll16
15 files changed, 1755 insertions, 332 deletions
diff --git a/test/CodeGen/PowerPC/PR33636.ll b/test/CodeGen/PowerPC/PR33636.ll
new file mode 100644
index 000000000000..4a1216dd4c11
--- /dev/null
+++ b/test/CodeGen/PowerPC/PR33636.ll
@@ -0,0 +1,702 @@
+; Just a test case for a crash reported in
+; https://bugs.llvm.org/show_bug.cgi?id=33636
+; RUN: llc -mtriple=powerpc64le-unknown-unknown -mcpu=pwr8 < %s | FileCheck %s
+@g_225 = external unnamed_addr global i16, align 2
+@g_756 = external global [6 x i32], align 4
+@g_3456 = external global i32, align 4
+@g_3708 = external global [9 x i32], align 4
+@g_1252 = external global i8*, align 8
+@g_3043 = external global float*, align 8
+
+; Function Attrs: nounwind
+define void @main() {
+ br i1 undef, label %1, label %4
+
+; <label>:1: ; preds = %0
+ br i1 undef, label %2, label %3
+
+; <label>:2: ; preds = %1
+ br label %3
+
+; <label>:3: ; preds = %2, %1
+ br label %4
+
+; <label>:4: ; preds = %3, %0
+ br label %5
+
+; <label>:5: ; preds = %5, %4
+ br i1 undef, label %6, label %5
+
+; <label>:6: ; preds = %5
+ br i1 undef, label %7, label %8
+
+; <label>:7: ; preds = %6
+ br i1 undef, label %70, label %69
+
+; <label>:8: ; preds = %6
+ br i1 undef, label %9, label %50
+
+; <label>:9: ; preds = %8
+ br label %11
+
+; <label>:10: ; preds = %28
+ br i1 undef, label %11, label %12
+
+; <label>:11: ; preds = %10, %9
+ br label %13
+
+; <label>:12: ; preds = %10
+ br label %30
+
+; <label>:13: ; preds = %23, %11
+ br i1 undef, label %17, label %14
+
+; <label>:14: ; preds = %13
+ br i1 undef, label %16, label %15
+
+; <label>:15: ; preds = %14
+ br label %22
+
+; <label>:16: ; preds = %14
+ br label %17
+
+; <label>:17: ; preds = %16, %13
+ br i1 undef, label %18, label %19
+
+; <label>:18: ; preds = %17
+ br label %19
+
+; <label>:19: ; preds = %18, %17
+ br i1 undef, label %48, label %20
+
+; <label>:20: ; preds = %19
+ br i1 undef, label %48, label %21
+
+; <label>:21: ; preds = %20
+ br label %22
+
+; <label>:22: ; preds = %21, %15
+ br i1 undef, label %23, label %24
+
+; <label>:23: ; preds = %22
+ br label %13
+
+; <label>:24: ; preds = %22
+ br i1 undef, label %28, label %25
+
+; <label>:25: ; preds = %24
+ br label %26
+
+; <label>:26: ; preds = %26, %25
+ br i1 undef, label %26, label %27
+
+; <label>:27: ; preds = %26
+ br label %48
+
+; <label>:28: ; preds = %24
+ br i1 undef, label %29, label %10
+
+; <label>:29: ; preds = %28
+ br label %48
+
+; <label>:30: ; preds = %33, %12
+ br i1 undef, label %32, label %33
+
+; <label>:31: ; preds = %33
+ br label %34
+
+; <label>:32: ; preds = %30
+ br label %33
+
+; <label>:33: ; preds = %32, %30
+ br i1 undef, label %30, label %31
+
+; <label>:34: ; preds = %47, %31
+ br i1 undef, label %35, label %36
+
+; <label>:35: ; preds = %34
+ br label %36
+
+; <label>:36: ; preds = %35, %34
+ br label %37
+
+; <label>:37: ; preds = %45, %36
+ br i1 undef, label %40, label %38
+
+; <label>:38: ; preds = %37
+ br i1 undef, label %39, label %46
+
+; <label>:39: ; preds = %38
+ br label %41
+
+; <label>:40: ; preds = %37
+ br label %41
+
+; <label>:41: ; preds = %40, %39
+ br label %42
+
+; <label>:42: ; preds = %44, %41
+ br i1 undef, label %43, label %44
+
+; <label>:43: ; preds = %42
+ br label %44
+
+; <label>:44: ; preds = %43, %42
+ br i1 undef, label %42, label %45
+
+; <label>:45: ; preds = %44
+ br i1 undef, label %37, label %47
+
+; <label>:46: ; preds = %38
+ br label %48
+
+; <label>:47: ; preds = %45
+ br i1 undef, label %34, label %49
+
+; <label>:48: ; preds = %46, %29, %27, %20, %19
+ br label %65
+
+; <label>:49: ; preds = %47
+ br label %58
+
+; <label>:50: ; preds = %8
+ br i1 undef, label %52, label %51
+
+; <label>:51: ; preds = %50
+ br label %57
+
+; <label>:52: ; preds = %50
+ br label %53
+
+; <label>:53: ; preds = %56, %52
+ br i1 undef, label %54, label %59
+
+; <label>:54: ; preds = %53
+ br i1 undef, label %60, label %59
+
+; <label>:55: ; preds = %64
+ br label %56
+
+; <label>:56: ; preds = %64, %55
+ br i1 undef, label %57, label %53
+
+; <label>:57: ; preds = %56, %51
+ br label %58
+
+; <label>:58: ; preds = %57, %49
+ br label %65
+
+; <label>:59: ; preds = %63, %62, %61, %60, %54, %53
+ br label %65
+
+; <label>:60: ; preds = %54
+ br i1 undef, label %61, label %59
+
+; <label>:61: ; preds = %60
+ br i1 undef, label %62, label %59
+
+; <label>:62: ; preds = %61
+ br i1 undef, label %63, label %59
+
+; <label>:63: ; preds = %62
+ br i1 undef, label %64, label %59
+
+; <label>:64: ; preds = %63
+ br i1 undef, label %55, label %56
+
+; <label>:65: ; preds = %59, %58, %48
+ br i1 undef, label %66, label %67
+
+; <label>:66: ; preds = %65
+ br label %67
+
+; <label>:67: ; preds = %66, %65
+ br i1 undef, label %68, label %92
+
+; <label>:68: ; preds = %67
+ br label %92
+
+; <label>:69: ; preds = %7
+ br label %70
+
+; <label>:70: ; preds = %69, %7
+ br i1 undef, label %72, label %71
+
+; <label>:71: ; preds = %70
+ br label %72
+
+; <label>:72: ; preds = %71, %70
+ br i1 undef, label %73, label %74
+
+; <label>:73: ; preds = %72
+ br label %74
+
+; <label>:74: ; preds = %73, %72
+ br i1 undef, label %85, label %75
+
+; <label>:75: ; preds = %74
+ br i1 undef, label %84, label %76
+
+; <label>:76: ; preds = %75
+ br i1 undef, label %78, label %77
+
+; <label>:77: ; preds = %77, %76
+ br i1 undef, label %84, label %77
+
+; <label>:78: ; preds = %76
+ br label %79
+
+; <label>:79: ; preds = %83, %78
+ br i1 undef, label %83, label %80
+
+; <label>:80: ; preds = %79
+ br i1 undef, label %81, label %82
+
+; <label>:81: ; preds = %80
+ br label %83
+
+; <label>:82: ; preds = %80
+ br label %83
+
+; <label>:83: ; preds = %82, %81, %79
+ br i1 undef, label %90, label %79
+
+; <label>:84: ; preds = %77, %75
+ br label %92
+
+; <label>:85: ; preds = %74
+ br i1 undef, label %86, label %88
+
+; <label>:86: ; preds = %85
+ br i1 undef, label %89, label %87
+
+; <label>:87: ; preds = %86
+ br i1 undef, label %89, label %88
+
+; <label>:88: ; preds = %87, %85
+ br label %89
+
+; <label>:89: ; preds = %88, %87, %86
+ br label %92
+
+; <label>:90: ; preds = %83
+ br i1 undef, label %92, label %91
+
+; <label>:91: ; preds = %90
+ br label %92
+
+; <label>:92: ; preds = %91, %90, %89, %84, %68, %67
+ br label %93
+
+; <label>:93: ; preds = %100, %92
+ br label %94
+
+; <label>:94: ; preds = %98, %93
+ br label %95
+
+; <label>:95: ; preds = %97, %94
+ br i1 undef, label %96, label %97
+
+; <label>:96: ; preds = %95
+ br label %97
+
+; <label>:97: ; preds = %96, %95
+ br i1 undef, label %95, label %98
+
+; <label>:98: ; preds = %97
+ store i32 7, i32* getelementptr inbounds ([9 x i32], [9 x i32]* @g_3708, i64 0, i64 7), align 4
+ %99 = load volatile i32, i32* @g_3456, align 4
+ br i1 undef, label %94, label %100
+
+; <label>:100: ; preds = %98
+ br i1 undef, label %93, label %101
+
+; <label>:101: ; preds = %100
+ br label %102
+
+; <label>:102: ; preds = %117, %101
+ br label %103
+
+; <label>:103: ; preds = %109, %102
+ store i8** @g_1252, i8*** undef, align 8
+ br i1 undef, label %105, label %104
+
+; <label>:104: ; preds = %103
+ br label %105
+
+; <label>:105: ; preds = %104, %103
+ %106 = icmp eq i32 0, 0
+ br i1 %106, label %107, label %116
+
+; <label>:107: ; preds = %105
+ br i1 icmp ne (i32* getelementptr inbounds ([6 x i32], [6 x i32]* @g_756, i64 0, i64 0), i32* getelementptr inbounds ([9 x i32], [9 x i32]* @g_3708, i64 0, i64 4)), label %109, label %108
+
+; <label>:108: ; preds = %107
+ br label %109
+
+; <label>:109: ; preds = %108, %107
+ %110 = phi i32 [ sdiv (i32 32, i32 zext (i1 icmp eq (i32* getelementptr inbounds ([6 x i32], [6 x i32]* @g_756, i64 0, i64 0), i32* getelementptr inbounds ([9 x i32], [9 x i32]* @g_3708, i64 0, i64 4)) to i32)), %108 ], [ 32, %107 ]
+ %111 = trunc i32 %110 to i8
+ %112 = icmp ne i8 %111, 0
+ %113 = and i1 %112, icmp eq (i32* getelementptr inbounds ([6 x i32], [6 x i32]* @g_756, i64 0, i64 0), i32* getelementptr inbounds ([9 x i32], [9 x i32]* @g_3708, i64 0, i64 4))
+ %114 = zext i1 %113 to i16
+ store i16 %114, i16* @g_225, align 2
+ %115 = load volatile float*, float** @g_3043, align 8
+ br i1 undef, label %103, label %117
+
+; <label>:116: ; preds = %105
+ br label %119
+
+; <label>:117: ; preds = %109
+ br i1 undef, label %102, label %118
+
+; <label>:118: ; preds = %117
+ br label %119
+
+; <label>:119: ; preds = %118, %116
+ br i1 undef, label %120, label %231
+
+; <label>:120: ; preds = %119
+ br label %232
+
+; <label>:121: ; preds = %230
+ br label %122
+
+; <label>:122: ; preds = %230, %121
+ br i1 undef, label %124, label %123
+
+; <label>:123: ; preds = %122
+ br label %124
+
+; <label>:124: ; preds = %123, %122
+ br i1 undef, label %228, label %225
+
+; <label>:125: ; preds = %218
+ br label %127
+
+; <label>:126: ; preds = %218
+ br label %127
+
+; <label>:127: ; preds = %216, %126, %125
+ br i1 undef, label %204, label %128
+
+; <label>:128: ; preds = %127
+ br label %205
+
+; <label>:129: ; preds = %216
+ br i1 undef, label %131, label %130
+
+; <label>:130: ; preds = %129
+ br label %131
+
+; <label>:131: ; preds = %130, %129
+ br i1 undef, label %133, label %132
+
+; <label>:132: ; preds = %131
+ br label %133
+
+; <label>:133: ; preds = %132, %131
+ br label %134
+
+; <label>:134: ; preds = %203, %133
+ br i1 undef, label %193, label %135
+
+; <label>:135: ; preds = %134
+ br label %194
+
+; <label>:136: ; preds = %203
+ br i1 undef, label %138, label %137
+
+; <label>:137: ; preds = %136
+ br label %138
+
+; <label>:138: ; preds = %137, %136
+ br i1 undef, label %192, label %139
+
+; <label>:139: ; preds = %138
+ br label %191
+
+; <label>:140: ; preds = %191, %190
+ br i1 undef, label %180, label %141
+
+; <label>:141: ; preds = %140
+ br label %181
+
+; <label>:142: ; preds = %190
+ br i1 undef, label %143, label %178
+
+; <label>:143: ; preds = %142
+ br label %179
+
+; <label>:144: ; preds = %179
+ br label %176
+
+; <label>:145: ; preds = %179
+ br label %176
+
+; <label>:146: ; preds = %177, %175, %174
+ br i1 undef, label %165, label %147
+
+; <label>:147: ; preds = %146
+ br label %166
+
+; <label>:148: ; preds = %174
+ br label %149
+
+; <label>:149: ; preds = %164, %148
+ br i1 undef, label %154, label %150
+
+; <label>:150: ; preds = %149
+ br label %155
+
+; <label>:151: ; preds = %164
+ br i1 undef, label %153, label %152
+
+; <label>:152: ; preds = %151
+ br label %153
+
+; <label>:153: ; preds = %152, %151
+ ret void
+
+; <label>:154: ; preds = %149
+ br label %155
+
+; <label>:155: ; preds = %154, %150
+ br i1 undef, label %157, label %156
+
+; <label>:156: ; preds = %155
+ br label %158
+
+; <label>:157: ; preds = %155
+ br label %158
+
+; <label>:158: ; preds = %157, %156
+ br i1 undef, label %160, label %159
+
+; <label>:159: ; preds = %158
+ br label %161
+
+; <label>:160: ; preds = %158
+ br label %161
+
+; <label>:161: ; preds = %160, %159
+ br i1 undef, label %163, label %162
+
+; <label>:162: ; preds = %161
+ br label %164
+
+; <label>:163: ; preds = %161
+ br label %164
+
+; <label>:164: ; preds = %163, %162
+ br i1 undef, label %151, label %149
+
+; <label>:165: ; preds = %146
+ br label %166
+
+; <label>:166: ; preds = %165, %147
+ br i1 undef, label %168, label %167
+
+; <label>:167: ; preds = %166
+ br label %169
+
+; <label>:168: ; preds = %166
+ br label %169
+
+; <label>:169: ; preds = %168, %167
+ br i1 undef, label %171, label %170
+
+; <label>:170: ; preds = %169
+ br label %172
+
+; <label>:171: ; preds = %169
+ br label %172
+
+; <label>:172: ; preds = %171, %170
+ br i1 undef, label %174, label %173
+
+; <label>:173: ; preds = %172
+ br label %174
+
+; <label>:174: ; preds = %173, %172
+ br i1 undef, label %148, label %146
+
+; <label>:175: ; preds = %176
+ br label %146
+
+; <label>:176: ; preds = %145, %144
+ br i1 undef, label %177, label %175
+
+; <label>:177: ; preds = %176
+ br label %146
+
+; <label>:178: ; preds = %142
+ br label %179
+
+; <label>:179: ; preds = %178, %143
+ br i1 undef, label %145, label %144
+
+; <label>:180: ; preds = %140
+ br label %181
+
+; <label>:181: ; preds = %180, %141
+ br i1 undef, label %183, label %182
+
+; <label>:182: ; preds = %181
+ br label %184
+
+; <label>:183: ; preds = %181
+ br label %184
+
+; <label>:184: ; preds = %183, %182
+ br i1 undef, label %186, label %185
+
+; <label>:185: ; preds = %184
+ br label %187
+
+; <label>:186: ; preds = %184
+ br label %187
+
+; <label>:187: ; preds = %186, %185
+ br i1 undef, label %189, label %188
+
+; <label>:188: ; preds = %187
+ br label %190
+
+; <label>:189: ; preds = %187
+ br label %190
+
+; <label>:190: ; preds = %189, %188
+ br i1 undef, label %142, label %140
+
+; <label>:191: ; preds = %192, %139
+ br label %140
+
+; <label>:192: ; preds = %138
+ br label %191
+
+; <label>:193: ; preds = %134
+ br label %194
+
+; <label>:194: ; preds = %193, %135
+ br i1 undef, label %196, label %195
+
+; <label>:195: ; preds = %194
+ br label %197
+
+; <label>:196: ; preds = %194
+ br label %197
+
+; <label>:197: ; preds = %196, %195
+ br i1 undef, label %199, label %198
+
+; <label>:198: ; preds = %197
+ br label %200
+
+; <label>:199: ; preds = %197
+ br label %200
+
+; <label>:200: ; preds = %199, %198
+ br i1 undef, label %202, label %201
+
+; <label>:201: ; preds = %200
+ br label %203
+
+; <label>:202: ; preds = %200
+ br label %203
+
+; <label>:203: ; preds = %202, %201
+ br i1 undef, label %136, label %134
+
+; <label>:204: ; preds = %127
+ br label %205
+
+; <label>:205: ; preds = %204, %128
+ br i1 undef, label %207, label %206
+
+; <label>:206: ; preds = %205
+ br label %208
+
+; <label>:207: ; preds = %205
+ br label %208
+
+; <label>:208: ; preds = %207, %206
+ br i1 undef, label %210, label %209
+
+; <label>:209: ; preds = %208
+ br label %211
+
+; <label>:210: ; preds = %208
+ br label %211
+
+; <label>:211: ; preds = %210, %209
+ br i1 undef, label %213, label %212
+
+; <label>:212: ; preds = %211
+ br label %214
+
+; <label>:213: ; preds = %211
+ br label %214
+
+; <label>:214: ; preds = %213, %212
+ br i1 undef, label %216, label %215
+
+; <label>:215: ; preds = %214
+ br label %216
+
+; <label>:216: ; preds = %215, %214
+ br i1 undef, label %129, label %127
+
+; <label>:217: ; preds = %220
+ br label %218
+
+; <label>:218: ; preds = %221, %217
+ br i1 undef, label %126, label %125
+
+; <label>:219: ; preds = %223
+ br label %220
+
+; <label>:220: ; preds = %224, %219
+ br i1 undef, label %221, label %217
+
+; <label>:221: ; preds = %220
+ br label %218
+
+; <label>:222: ; preds = %226
+ br label %223
+
+; <label>:223: ; preds = %227, %222
+ br i1 undef, label %224, label %219
+
+; <label>:224: ; preds = %223
+ br label %220
+
+; <label>:225: ; preds = %124
+ br label %226
+
+; <label>:226: ; preds = %228, %225
+ br i1 undef, label %227, label %222
+
+; <label>:227: ; preds = %226
+ br label %223
+
+; <label>:228: ; preds = %124
+ br label %226
+
+; <label>:229: ; preds = %232
+ br label %230
+
+; <label>:230: ; preds = %233, %229
+ br i1 undef, label %122, label %121
+
+; <label>:231: ; preds = %119
+ br label %232
+
+; <label>:232: ; preds = %231, %120
+ br i1 undef, label %233, label %229
+
+; <label>:233: ; preds = %232
+ br label %230
+
+; CHECK: blr
+}
diff --git a/test/CodeGen/PowerPC/atomics-regression.ll b/test/CodeGen/PowerPC/atomics-regression.ll
index d57b3a203791..0c7a31d16b19 100644
--- a/test/CodeGen/PowerPC/atomics-regression.ll
+++ b/test/CodeGen/PowerPC/atomics-regression.ll
@@ -370,7 +370,7 @@ define void @test36() {
; PPC64LE: # BB#0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- fence singlethread acquire
+ fence syncscope("singlethread") acquire
ret void
}
@@ -379,7 +379,7 @@ define void @test37() {
; PPC64LE: # BB#0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- fence singlethread release
+ fence syncscope("singlethread") release
ret void
}
@@ -388,7 +388,7 @@ define void @test38() {
; PPC64LE: # BB#0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- fence singlethread acq_rel
+ fence syncscope("singlethread") acq_rel
ret void
}
@@ -397,7 +397,7 @@ define void @test39() {
; PPC64LE: # BB#0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: blr
- fence singlethread seq_cst
+ fence syncscope("singlethread") seq_cst
ret void
}
@@ -1273,7 +1273,7 @@ define void @test80(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread monotonic monotonic
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") monotonic monotonic
ret void
}
@@ -1294,7 +1294,7 @@ define void @test81(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread acquire monotonic
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") acquire monotonic
ret void
}
@@ -1315,7 +1315,7 @@ define void @test82(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread acquire acquire
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") acquire acquire
ret void
}
@@ -1336,7 +1336,7 @@ define void @test83(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread release monotonic
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") release monotonic
ret void
}
@@ -1357,7 +1357,7 @@ define void @test84(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread release acquire
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") release acquire
ret void
}
@@ -1379,7 +1379,7 @@ define void @test85(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread acq_rel monotonic
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") acq_rel monotonic
ret void
}
@@ -1401,7 +1401,7 @@ define void @test86(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread acq_rel acquire
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") acq_rel acquire
ret void
}
@@ -1423,7 +1423,7 @@ define void @test87(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread seq_cst monotonic
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") seq_cst monotonic
ret void
}
@@ -1445,7 +1445,7 @@ define void @test88(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread seq_cst acquire
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") seq_cst acquire
ret void
}
@@ -1467,7 +1467,7 @@ define void @test89(i8* %ptr, i8 %cmp, i8 %val) {
; PPC64LE-NEXT: stbcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread seq_cst seq_cst
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") seq_cst seq_cst
ret void
}
@@ -1487,7 +1487,7 @@ define void @test90(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread monotonic monotonic
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") monotonic monotonic
ret void
}
@@ -1508,7 +1508,7 @@ define void @test91(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread acquire monotonic
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") acquire monotonic
ret void
}
@@ -1529,7 +1529,7 @@ define void @test92(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread acquire acquire
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") acquire acquire
ret void
}
@@ -1550,7 +1550,7 @@ define void @test93(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread release monotonic
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") release monotonic
ret void
}
@@ -1571,7 +1571,7 @@ define void @test94(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread release acquire
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") release acquire
ret void
}
@@ -1593,7 +1593,7 @@ define void @test95(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread acq_rel monotonic
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") acq_rel monotonic
ret void
}
@@ -1615,7 +1615,7 @@ define void @test96(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread acq_rel acquire
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") acq_rel acquire
ret void
}
@@ -1637,7 +1637,7 @@ define void @test97(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread seq_cst monotonic
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") seq_cst monotonic
ret void
}
@@ -1659,7 +1659,7 @@ define void @test98(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread seq_cst acquire
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") seq_cst acquire
ret void
}
@@ -1681,7 +1681,7 @@ define void @test99(i16* %ptr, i16 %cmp, i16 %val) {
; PPC64LE-NEXT: sthcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread seq_cst seq_cst
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") seq_cst seq_cst
ret void
}
@@ -1701,7 +1701,7 @@ define void @test100(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread monotonic monotonic
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") monotonic monotonic
ret void
}
@@ -1722,7 +1722,7 @@ define void @test101(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread acquire monotonic
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") acquire monotonic
ret void
}
@@ -1743,7 +1743,7 @@ define void @test102(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread acquire acquire
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") acquire acquire
ret void
}
@@ -1764,7 +1764,7 @@ define void @test103(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread release monotonic
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") release monotonic
ret void
}
@@ -1785,7 +1785,7 @@ define void @test104(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread release acquire
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") release acquire
ret void
}
@@ -1807,7 +1807,7 @@ define void @test105(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread acq_rel monotonic
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") acq_rel monotonic
ret void
}
@@ -1829,7 +1829,7 @@ define void @test106(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread acq_rel acquire
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") acq_rel acquire
ret void
}
@@ -1851,7 +1851,7 @@ define void @test107(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread seq_cst monotonic
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") seq_cst monotonic
ret void
}
@@ -1873,7 +1873,7 @@ define void @test108(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread seq_cst acquire
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") seq_cst acquire
ret void
}
@@ -1895,7 +1895,7 @@ define void @test109(i32* %ptr, i32 %cmp, i32 %val) {
; PPC64LE-NEXT: stwcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread seq_cst seq_cst
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") seq_cst seq_cst
ret void
}
@@ -1915,7 +1915,7 @@ define void @test110(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread monotonic monotonic
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") monotonic monotonic
ret void
}
@@ -1936,7 +1936,7 @@ define void @test111(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread acquire monotonic
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") acquire monotonic
ret void
}
@@ -1957,7 +1957,7 @@ define void @test112(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread acquire acquire
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") acquire acquire
ret void
}
@@ -1978,7 +1978,7 @@ define void @test113(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread release monotonic
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") release monotonic
ret void
}
@@ -1999,7 +1999,7 @@ define void @test114(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: # BB#3:
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread release acquire
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") release acquire
ret void
}
@@ -2021,7 +2021,7 @@ define void @test115(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread acq_rel monotonic
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") acq_rel monotonic
ret void
}
@@ -2043,7 +2043,7 @@ define void @test116(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread acq_rel acquire
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") acq_rel acquire
ret void
}
@@ -2065,7 +2065,7 @@ define void @test117(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread seq_cst monotonic
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") seq_cst monotonic
ret void
}
@@ -2087,7 +2087,7 @@ define void @test118(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread seq_cst acquire
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") seq_cst acquire
ret void
}
@@ -2109,7 +2109,7 @@ define void @test119(i64* %ptr, i64 %cmp, i64 %val) {
; PPC64LE-NEXT: stdcx. 6, 0, 3
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread seq_cst seq_cst
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") seq_cst seq_cst
ret void
}
@@ -5847,7 +5847,7 @@ define i8 @test340(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread monotonic
+ %ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
@@ -5862,7 +5862,7 @@ define i8 @test341(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread acquire
+ %ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
@@ -5877,7 +5877,7 @@ define i8 @test342(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread release
+ %ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
@@ -5893,7 +5893,7 @@ define i8 @test343(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread acq_rel
+ %ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
@@ -5909,7 +5909,7 @@ define i8 @test344(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread seq_cst
+ %ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
@@ -5923,7 +5923,7 @@ define i16 @test345(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread monotonic
+ %ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
@@ -5938,7 +5938,7 @@ define i16 @test346(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread acquire
+ %ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
@@ -5953,7 +5953,7 @@ define i16 @test347(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread release
+ %ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
@@ -5969,7 +5969,7 @@ define i16 @test348(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread acq_rel
+ %ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
@@ -5985,7 +5985,7 @@ define i16 @test349(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread seq_cst
+ %ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
@@ -5999,7 +5999,7 @@ define i32 @test350(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread monotonic
+ %ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
@@ -6014,7 +6014,7 @@ define i32 @test351(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread acquire
+ %ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
@@ -6029,7 +6029,7 @@ define i32 @test352(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread release
+ %ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
@@ -6045,7 +6045,7 @@ define i32 @test353(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread acq_rel
+ %ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
@@ -6061,7 +6061,7 @@ define i32 @test354(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread seq_cst
+ %ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
@@ -6075,7 +6075,7 @@ define i64 @test355(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread monotonic
+ %ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
@@ -6090,7 +6090,7 @@ define i64 @test356(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread acquire
+ %ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
@@ -6105,7 +6105,7 @@ define i64 @test357(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread release
+ %ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
@@ -6121,7 +6121,7 @@ define i64 @test358(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread acq_rel
+ %ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
@@ -6137,7 +6137,7 @@ define i64 @test359(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread seq_cst
+ %ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
@@ -6152,7 +6152,7 @@ define i8 @test360(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i8* %ptr, i8 %val singlethread monotonic
+ %ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
@@ -6168,7 +6168,7 @@ define i8 @test361(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i8* %ptr, i8 %val singlethread acquire
+ %ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
@@ -6184,7 +6184,7 @@ define i8 @test362(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i8* %ptr, i8 %val singlethread release
+ %ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
@@ -6201,7 +6201,7 @@ define i8 @test363(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i8* %ptr, i8 %val singlethread acq_rel
+ %ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
@@ -6218,7 +6218,7 @@ define i8 @test364(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i8* %ptr, i8 %val singlethread seq_cst
+ %ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
@@ -6233,7 +6233,7 @@ define i16 @test365(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i16* %ptr, i16 %val singlethread monotonic
+ %ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
@@ -6249,7 +6249,7 @@ define i16 @test366(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i16* %ptr, i16 %val singlethread acquire
+ %ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
@@ -6265,7 +6265,7 @@ define i16 @test367(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i16* %ptr, i16 %val singlethread release
+ %ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
@@ -6282,7 +6282,7 @@ define i16 @test368(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i16* %ptr, i16 %val singlethread acq_rel
+ %ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
@@ -6299,7 +6299,7 @@ define i16 @test369(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i16* %ptr, i16 %val singlethread seq_cst
+ %ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
@@ -6314,7 +6314,7 @@ define i32 @test370(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i32* %ptr, i32 %val singlethread monotonic
+ %ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
@@ -6330,7 +6330,7 @@ define i32 @test371(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i32* %ptr, i32 %val singlethread acquire
+ %ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
@@ -6346,7 +6346,7 @@ define i32 @test372(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i32* %ptr, i32 %val singlethread release
+ %ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
@@ -6363,7 +6363,7 @@ define i32 @test373(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i32* %ptr, i32 %val singlethread acq_rel
+ %ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
@@ -6380,7 +6380,7 @@ define i32 @test374(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i32* %ptr, i32 %val singlethread seq_cst
+ %ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
@@ -6395,7 +6395,7 @@ define i64 @test375(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i64* %ptr, i64 %val singlethread monotonic
+ %ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
@@ -6411,7 +6411,7 @@ define i64 @test376(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i64* %ptr, i64 %val singlethread acquire
+ %ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
@@ -6427,7 +6427,7 @@ define i64 @test377(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i64* %ptr, i64 %val singlethread release
+ %ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
@@ -6444,7 +6444,7 @@ define i64 @test378(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i64* %ptr, i64 %val singlethread acq_rel
+ %ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
@@ -6461,7 +6461,7 @@ define i64 @test379(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i64* %ptr, i64 %val singlethread seq_cst
+ %ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
@@ -6476,7 +6476,7 @@ define i8 @test380(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i8* %ptr, i8 %val singlethread monotonic
+ %ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
@@ -6492,7 +6492,7 @@ define i8 @test381(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i8* %ptr, i8 %val singlethread acquire
+ %ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
@@ -6508,7 +6508,7 @@ define i8 @test382(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i8* %ptr, i8 %val singlethread release
+ %ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
@@ -6525,7 +6525,7 @@ define i8 @test383(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i8* %ptr, i8 %val singlethread acq_rel
+ %ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
@@ -6542,7 +6542,7 @@ define i8 @test384(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i8* %ptr, i8 %val singlethread seq_cst
+ %ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
@@ -6557,7 +6557,7 @@ define i16 @test385(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i16* %ptr, i16 %val singlethread monotonic
+ %ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
@@ -6573,7 +6573,7 @@ define i16 @test386(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i16* %ptr, i16 %val singlethread acquire
+ %ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
@@ -6589,7 +6589,7 @@ define i16 @test387(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i16* %ptr, i16 %val singlethread release
+ %ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
@@ -6606,7 +6606,7 @@ define i16 @test388(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i16* %ptr, i16 %val singlethread acq_rel
+ %ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
@@ -6623,7 +6623,7 @@ define i16 @test389(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i16* %ptr, i16 %val singlethread seq_cst
+ %ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
@@ -6638,7 +6638,7 @@ define i32 @test390(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i32* %ptr, i32 %val singlethread monotonic
+ %ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
@@ -6654,7 +6654,7 @@ define i32 @test391(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i32* %ptr, i32 %val singlethread acquire
+ %ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
@@ -6670,7 +6670,7 @@ define i32 @test392(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i32* %ptr, i32 %val singlethread release
+ %ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
@@ -6687,7 +6687,7 @@ define i32 @test393(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i32* %ptr, i32 %val singlethread acq_rel
+ %ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
@@ -6704,7 +6704,7 @@ define i32 @test394(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i32* %ptr, i32 %val singlethread seq_cst
+ %ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
@@ -6719,7 +6719,7 @@ define i64 @test395(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i64* %ptr, i64 %val singlethread monotonic
+ %ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
@@ -6735,7 +6735,7 @@ define i64 @test396(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i64* %ptr, i64 %val singlethread acquire
+ %ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
@@ -6751,7 +6751,7 @@ define i64 @test397(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i64* %ptr, i64 %val singlethread release
+ %ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
@@ -6768,7 +6768,7 @@ define i64 @test398(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i64* %ptr, i64 %val singlethread acq_rel
+ %ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
@@ -6785,7 +6785,7 @@ define i64 @test399(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i64* %ptr, i64 %val singlethread seq_cst
+ %ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
@@ -6800,7 +6800,7 @@ define i8 @test400(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i8* %ptr, i8 %val singlethread monotonic
+ %ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
@@ -6816,7 +6816,7 @@ define i8 @test401(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i8* %ptr, i8 %val singlethread acquire
+ %ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
@@ -6832,7 +6832,7 @@ define i8 @test402(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i8* %ptr, i8 %val singlethread release
+ %ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
@@ -6849,7 +6849,7 @@ define i8 @test403(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i8* %ptr, i8 %val singlethread acq_rel
+ %ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
@@ -6866,7 +6866,7 @@ define i8 @test404(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i8* %ptr, i8 %val singlethread seq_cst
+ %ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
@@ -6881,7 +6881,7 @@ define i16 @test405(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i16* %ptr, i16 %val singlethread monotonic
+ %ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
@@ -6897,7 +6897,7 @@ define i16 @test406(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i16* %ptr, i16 %val singlethread acquire
+ %ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
@@ -6913,7 +6913,7 @@ define i16 @test407(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i16* %ptr, i16 %val singlethread release
+ %ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
@@ -6930,7 +6930,7 @@ define i16 @test408(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i16* %ptr, i16 %val singlethread acq_rel
+ %ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
@@ -6947,7 +6947,7 @@ define i16 @test409(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i16* %ptr, i16 %val singlethread seq_cst
+ %ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
@@ -6962,7 +6962,7 @@ define i32 @test410(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i32* %ptr, i32 %val singlethread monotonic
+ %ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
@@ -6978,7 +6978,7 @@ define i32 @test411(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i32* %ptr, i32 %val singlethread acquire
+ %ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
@@ -6994,7 +6994,7 @@ define i32 @test412(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i32* %ptr, i32 %val singlethread release
+ %ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
@@ -7011,7 +7011,7 @@ define i32 @test413(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i32* %ptr, i32 %val singlethread acq_rel
+ %ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
@@ -7028,7 +7028,7 @@ define i32 @test414(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i32* %ptr, i32 %val singlethread seq_cst
+ %ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
@@ -7043,7 +7043,7 @@ define i64 @test415(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i64* %ptr, i64 %val singlethread monotonic
+ %ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
@@ -7059,7 +7059,7 @@ define i64 @test416(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i64* %ptr, i64 %val singlethread acquire
+ %ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
@@ -7075,7 +7075,7 @@ define i64 @test417(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i64* %ptr, i64 %val singlethread release
+ %ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
@@ -7092,7 +7092,7 @@ define i64 @test418(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i64* %ptr, i64 %val singlethread acq_rel
+ %ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
@@ -7109,7 +7109,7 @@ define i64 @test419(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i64* %ptr, i64 %val singlethread seq_cst
+ %ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
@@ -7124,7 +7124,7 @@ define i8 @test420(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i8* %ptr, i8 %val singlethread monotonic
+ %ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
@@ -7140,7 +7140,7 @@ define i8 @test421(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i8* %ptr, i8 %val singlethread acquire
+ %ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
@@ -7156,7 +7156,7 @@ define i8 @test422(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i8* %ptr, i8 %val singlethread release
+ %ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
@@ -7173,7 +7173,7 @@ define i8 @test423(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i8* %ptr, i8 %val singlethread acq_rel
+ %ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
@@ -7190,7 +7190,7 @@ define i8 @test424(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i8* %ptr, i8 %val singlethread seq_cst
+ %ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
@@ -7205,7 +7205,7 @@ define i16 @test425(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i16* %ptr, i16 %val singlethread monotonic
+ %ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
@@ -7221,7 +7221,7 @@ define i16 @test426(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i16* %ptr, i16 %val singlethread acquire
+ %ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
@@ -7237,7 +7237,7 @@ define i16 @test427(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i16* %ptr, i16 %val singlethread release
+ %ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
@@ -7254,7 +7254,7 @@ define i16 @test428(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i16* %ptr, i16 %val singlethread acq_rel
+ %ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
@@ -7271,7 +7271,7 @@ define i16 @test429(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i16* %ptr, i16 %val singlethread seq_cst
+ %ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
@@ -7286,7 +7286,7 @@ define i32 @test430(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i32* %ptr, i32 %val singlethread monotonic
+ %ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
@@ -7302,7 +7302,7 @@ define i32 @test431(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i32* %ptr, i32 %val singlethread acquire
+ %ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
@@ -7318,7 +7318,7 @@ define i32 @test432(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i32* %ptr, i32 %val singlethread release
+ %ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
@@ -7335,7 +7335,7 @@ define i32 @test433(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i32* %ptr, i32 %val singlethread acq_rel
+ %ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
@@ -7352,7 +7352,7 @@ define i32 @test434(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i32* %ptr, i32 %val singlethread seq_cst
+ %ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
@@ -7367,7 +7367,7 @@ define i64 @test435(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i64* %ptr, i64 %val singlethread monotonic
+ %ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
@@ -7383,7 +7383,7 @@ define i64 @test436(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i64* %ptr, i64 %val singlethread acquire
+ %ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
@@ -7399,7 +7399,7 @@ define i64 @test437(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i64* %ptr, i64 %val singlethread release
+ %ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
@@ -7416,7 +7416,7 @@ define i64 @test438(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i64* %ptr, i64 %val singlethread acq_rel
+ %ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
@@ -7433,7 +7433,7 @@ define i64 @test439(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i64* %ptr, i64 %val singlethread seq_cst
+ %ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
@@ -7448,7 +7448,7 @@ define i8 @test440(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i8* %ptr, i8 %val singlethread monotonic
+ %ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
@@ -7464,7 +7464,7 @@ define i8 @test441(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i8* %ptr, i8 %val singlethread acquire
+ %ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
@@ -7480,7 +7480,7 @@ define i8 @test442(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i8* %ptr, i8 %val singlethread release
+ %ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
@@ -7497,7 +7497,7 @@ define i8 @test443(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i8* %ptr, i8 %val singlethread acq_rel
+ %ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
@@ -7514,7 +7514,7 @@ define i8 @test444(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i8* %ptr, i8 %val singlethread seq_cst
+ %ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
@@ -7529,7 +7529,7 @@ define i16 @test445(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i16* %ptr, i16 %val singlethread monotonic
+ %ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
@@ -7545,7 +7545,7 @@ define i16 @test446(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i16* %ptr, i16 %val singlethread acquire
+ %ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
@@ -7561,7 +7561,7 @@ define i16 @test447(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i16* %ptr, i16 %val singlethread release
+ %ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
@@ -7578,7 +7578,7 @@ define i16 @test448(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i16* %ptr, i16 %val singlethread acq_rel
+ %ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
@@ -7595,7 +7595,7 @@ define i16 @test449(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i16* %ptr, i16 %val singlethread seq_cst
+ %ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
@@ -7610,7 +7610,7 @@ define i32 @test450(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i32* %ptr, i32 %val singlethread monotonic
+ %ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
@@ -7626,7 +7626,7 @@ define i32 @test451(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i32* %ptr, i32 %val singlethread acquire
+ %ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
@@ -7642,7 +7642,7 @@ define i32 @test452(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i32* %ptr, i32 %val singlethread release
+ %ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
@@ -7659,7 +7659,7 @@ define i32 @test453(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i32* %ptr, i32 %val singlethread acq_rel
+ %ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
@@ -7676,7 +7676,7 @@ define i32 @test454(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i32* %ptr, i32 %val singlethread seq_cst
+ %ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
@@ -7691,7 +7691,7 @@ define i64 @test455(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i64* %ptr, i64 %val singlethread monotonic
+ %ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
@@ -7707,7 +7707,7 @@ define i64 @test456(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i64* %ptr, i64 %val singlethread acquire
+ %ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
@@ -7723,7 +7723,7 @@ define i64 @test457(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i64* %ptr, i64 %val singlethread release
+ %ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
@@ -7740,7 +7740,7 @@ define i64 @test458(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i64* %ptr, i64 %val singlethread acq_rel
+ %ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
@@ -7757,7 +7757,7 @@ define i64 @test459(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i64* %ptr, i64 %val singlethread seq_cst
+ %ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
@@ -7772,7 +7772,7 @@ define i8 @test460(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i8* %ptr, i8 %val singlethread monotonic
+ %ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
@@ -7788,7 +7788,7 @@ define i8 @test461(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i8* %ptr, i8 %val singlethread acquire
+ %ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
@@ -7804,7 +7804,7 @@ define i8 @test462(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i8* %ptr, i8 %val singlethread release
+ %ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
@@ -7821,7 +7821,7 @@ define i8 @test463(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i8* %ptr, i8 %val singlethread acq_rel
+ %ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
@@ -7838,7 +7838,7 @@ define i8 @test464(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i8* %ptr, i8 %val singlethread seq_cst
+ %ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
@@ -7853,7 +7853,7 @@ define i16 @test465(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i16* %ptr, i16 %val singlethread monotonic
+ %ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
@@ -7869,7 +7869,7 @@ define i16 @test466(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i16* %ptr, i16 %val singlethread acquire
+ %ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
@@ -7885,7 +7885,7 @@ define i16 @test467(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i16* %ptr, i16 %val singlethread release
+ %ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
@@ -7902,7 +7902,7 @@ define i16 @test468(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i16* %ptr, i16 %val singlethread acq_rel
+ %ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
@@ -7919,7 +7919,7 @@ define i16 @test469(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i16* %ptr, i16 %val singlethread seq_cst
+ %ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
@@ -7934,7 +7934,7 @@ define i32 @test470(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i32* %ptr, i32 %val singlethread monotonic
+ %ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
@@ -7950,7 +7950,7 @@ define i32 @test471(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i32* %ptr, i32 %val singlethread acquire
+ %ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
@@ -7966,7 +7966,7 @@ define i32 @test472(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i32* %ptr, i32 %val singlethread release
+ %ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
@@ -7983,7 +7983,7 @@ define i32 @test473(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i32* %ptr, i32 %val singlethread acq_rel
+ %ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
@@ -8000,7 +8000,7 @@ define i32 @test474(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i32* %ptr, i32 %val singlethread seq_cst
+ %ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
@@ -8015,7 +8015,7 @@ define i64 @test475(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i64* %ptr, i64 %val singlethread monotonic
+ %ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
@@ -8031,7 +8031,7 @@ define i64 @test476(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i64* %ptr, i64 %val singlethread acquire
+ %ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
@@ -8047,7 +8047,7 @@ define i64 @test477(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: # BB#2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i64* %ptr, i64 %val singlethread release
+ %ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
@@ -8064,7 +8064,7 @@ define i64 @test478(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i64* %ptr, i64 %val singlethread acq_rel
+ %ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
@@ -8081,7 +8081,7 @@ define i64 @test479(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i64* %ptr, i64 %val singlethread seq_cst
+ %ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
@@ -8099,7 +8099,7 @@ define i8 @test480(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB480_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i8* %ptr, i8 %val singlethread monotonic
+ %ret = atomicrmw max i8* %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
@@ -8118,7 +8118,7 @@ define i8 @test481(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB481_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i8* %ptr, i8 %val singlethread acquire
+ %ret = atomicrmw max i8* %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
@@ -8137,7 +8137,7 @@ define i8 @test482(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB482_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i8* %ptr, i8 %val singlethread release
+ %ret = atomicrmw max i8* %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
@@ -8157,7 +8157,7 @@ define i8 @test483(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i8* %ptr, i8 %val singlethread acq_rel
+ %ret = atomicrmw max i8* %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
@@ -8177,7 +8177,7 @@ define i8 @test484(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i8* %ptr, i8 %val singlethread seq_cst
+ %ret = atomicrmw max i8* %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
@@ -8195,7 +8195,7 @@ define i16 @test485(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB485_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i16* %ptr, i16 %val singlethread monotonic
+ %ret = atomicrmw max i16* %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
@@ -8214,7 +8214,7 @@ define i16 @test486(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB486_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i16* %ptr, i16 %val singlethread acquire
+ %ret = atomicrmw max i16* %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
@@ -8233,7 +8233,7 @@ define i16 @test487(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB487_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i16* %ptr, i16 %val singlethread release
+ %ret = atomicrmw max i16* %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
@@ -8253,7 +8253,7 @@ define i16 @test488(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i16* %ptr, i16 %val singlethread acq_rel
+ %ret = atomicrmw max i16* %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
@@ -8273,7 +8273,7 @@ define i16 @test489(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i16* %ptr, i16 %val singlethread seq_cst
+ %ret = atomicrmw max i16* %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
@@ -8290,7 +8290,7 @@ define i32 @test490(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB490_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i32* %ptr, i32 %val singlethread monotonic
+ %ret = atomicrmw max i32* %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
@@ -8308,7 +8308,7 @@ define i32 @test491(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB491_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i32* %ptr, i32 %val singlethread acquire
+ %ret = atomicrmw max i32* %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
@@ -8326,7 +8326,7 @@ define i32 @test492(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB492_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i32* %ptr, i32 %val singlethread release
+ %ret = atomicrmw max i32* %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
@@ -8345,7 +8345,7 @@ define i32 @test493(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i32* %ptr, i32 %val singlethread acq_rel
+ %ret = atomicrmw max i32* %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
@@ -8364,7 +8364,7 @@ define i32 @test494(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i32* %ptr, i32 %val singlethread seq_cst
+ %ret = atomicrmw max i32* %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
@@ -8381,7 +8381,7 @@ define i64 @test495(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB495_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i64* %ptr, i64 %val singlethread monotonic
+ %ret = atomicrmw max i64* %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
@@ -8399,7 +8399,7 @@ define i64 @test496(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB496_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i64* %ptr, i64 %val singlethread acquire
+ %ret = atomicrmw max i64* %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
@@ -8417,7 +8417,7 @@ define i64 @test497(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB497_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i64* %ptr, i64 %val singlethread release
+ %ret = atomicrmw max i64* %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
@@ -8436,7 +8436,7 @@ define i64 @test498(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i64* %ptr, i64 %val singlethread acq_rel
+ %ret = atomicrmw max i64* %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
@@ -8455,7 +8455,7 @@ define i64 @test499(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i64* %ptr, i64 %val singlethread seq_cst
+ %ret = atomicrmw max i64* %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
@@ -8473,7 +8473,7 @@ define i8 @test500(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB500_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i8* %ptr, i8 %val singlethread monotonic
+ %ret = atomicrmw min i8* %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
@@ -8492,7 +8492,7 @@ define i8 @test501(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB501_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i8* %ptr, i8 %val singlethread acquire
+ %ret = atomicrmw min i8* %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
@@ -8511,7 +8511,7 @@ define i8 @test502(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB502_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i8* %ptr, i8 %val singlethread release
+ %ret = atomicrmw min i8* %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
@@ -8531,7 +8531,7 @@ define i8 @test503(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i8* %ptr, i8 %val singlethread acq_rel
+ %ret = atomicrmw min i8* %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
@@ -8551,7 +8551,7 @@ define i8 @test504(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i8* %ptr, i8 %val singlethread seq_cst
+ %ret = atomicrmw min i8* %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
@@ -8569,7 +8569,7 @@ define i16 @test505(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB505_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i16* %ptr, i16 %val singlethread monotonic
+ %ret = atomicrmw min i16* %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
@@ -8588,7 +8588,7 @@ define i16 @test506(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB506_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i16* %ptr, i16 %val singlethread acquire
+ %ret = atomicrmw min i16* %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
@@ -8607,7 +8607,7 @@ define i16 @test507(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB507_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i16* %ptr, i16 %val singlethread release
+ %ret = atomicrmw min i16* %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
@@ -8627,7 +8627,7 @@ define i16 @test508(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i16* %ptr, i16 %val singlethread acq_rel
+ %ret = atomicrmw min i16* %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
@@ -8647,7 +8647,7 @@ define i16 @test509(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i16* %ptr, i16 %val singlethread seq_cst
+ %ret = atomicrmw min i16* %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
@@ -8664,7 +8664,7 @@ define i32 @test510(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB510_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i32* %ptr, i32 %val singlethread monotonic
+ %ret = atomicrmw min i32* %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
@@ -8682,7 +8682,7 @@ define i32 @test511(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB511_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i32* %ptr, i32 %val singlethread acquire
+ %ret = atomicrmw min i32* %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
@@ -8700,7 +8700,7 @@ define i32 @test512(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB512_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i32* %ptr, i32 %val singlethread release
+ %ret = atomicrmw min i32* %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
@@ -8719,7 +8719,7 @@ define i32 @test513(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i32* %ptr, i32 %val singlethread acq_rel
+ %ret = atomicrmw min i32* %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
@@ -8738,7 +8738,7 @@ define i32 @test514(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i32* %ptr, i32 %val singlethread seq_cst
+ %ret = atomicrmw min i32* %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
@@ -8755,7 +8755,7 @@ define i64 @test515(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB515_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i64* %ptr, i64 %val singlethread monotonic
+ %ret = atomicrmw min i64* %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
@@ -8773,7 +8773,7 @@ define i64 @test516(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB516_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i64* %ptr, i64 %val singlethread acquire
+ %ret = atomicrmw min i64* %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
@@ -8791,7 +8791,7 @@ define i64 @test517(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB517_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i64* %ptr, i64 %val singlethread release
+ %ret = atomicrmw min i64* %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
@@ -8810,7 +8810,7 @@ define i64 @test518(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i64* %ptr, i64 %val singlethread acq_rel
+ %ret = atomicrmw min i64* %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
@@ -8829,7 +8829,7 @@ define i64 @test519(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i64* %ptr, i64 %val singlethread seq_cst
+ %ret = atomicrmw min i64* %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
@@ -8846,7 +8846,7 @@ define i8 @test520(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB520_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i8* %ptr, i8 %val singlethread monotonic
+ %ret = atomicrmw umax i8* %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
@@ -8864,7 +8864,7 @@ define i8 @test521(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB521_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i8* %ptr, i8 %val singlethread acquire
+ %ret = atomicrmw umax i8* %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
@@ -8882,7 +8882,7 @@ define i8 @test522(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB522_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i8* %ptr, i8 %val singlethread release
+ %ret = atomicrmw umax i8* %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
@@ -8901,7 +8901,7 @@ define i8 @test523(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i8* %ptr, i8 %val singlethread acq_rel
+ %ret = atomicrmw umax i8* %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
@@ -8920,7 +8920,7 @@ define i8 @test524(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i8* %ptr, i8 %val singlethread seq_cst
+ %ret = atomicrmw umax i8* %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
@@ -8937,7 +8937,7 @@ define i16 @test525(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB525_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i16* %ptr, i16 %val singlethread monotonic
+ %ret = atomicrmw umax i16* %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
@@ -8955,7 +8955,7 @@ define i16 @test526(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB526_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i16* %ptr, i16 %val singlethread acquire
+ %ret = atomicrmw umax i16* %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
@@ -8973,7 +8973,7 @@ define i16 @test527(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB527_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i16* %ptr, i16 %val singlethread release
+ %ret = atomicrmw umax i16* %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
@@ -8992,7 +8992,7 @@ define i16 @test528(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i16* %ptr, i16 %val singlethread acq_rel
+ %ret = atomicrmw umax i16* %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
@@ -9011,7 +9011,7 @@ define i16 @test529(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i16* %ptr, i16 %val singlethread seq_cst
+ %ret = atomicrmw umax i16* %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
@@ -9028,7 +9028,7 @@ define i32 @test530(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB530_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i32* %ptr, i32 %val singlethread monotonic
+ %ret = atomicrmw umax i32* %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
@@ -9046,7 +9046,7 @@ define i32 @test531(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB531_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i32* %ptr, i32 %val singlethread acquire
+ %ret = atomicrmw umax i32* %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
@@ -9064,7 +9064,7 @@ define i32 @test532(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB532_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i32* %ptr, i32 %val singlethread release
+ %ret = atomicrmw umax i32* %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
@@ -9083,7 +9083,7 @@ define i32 @test533(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i32* %ptr, i32 %val singlethread acq_rel
+ %ret = atomicrmw umax i32* %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
@@ -9102,7 +9102,7 @@ define i32 @test534(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i32* %ptr, i32 %val singlethread seq_cst
+ %ret = atomicrmw umax i32* %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
@@ -9119,7 +9119,7 @@ define i64 @test535(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB535_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i64* %ptr, i64 %val singlethread monotonic
+ %ret = atomicrmw umax i64* %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
@@ -9137,7 +9137,7 @@ define i64 @test536(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB536_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i64* %ptr, i64 %val singlethread acquire
+ %ret = atomicrmw umax i64* %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
@@ -9155,7 +9155,7 @@ define i64 @test537(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB537_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i64* %ptr, i64 %val singlethread release
+ %ret = atomicrmw umax i64* %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
@@ -9174,7 +9174,7 @@ define i64 @test538(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i64* %ptr, i64 %val singlethread acq_rel
+ %ret = atomicrmw umax i64* %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
@@ -9193,7 +9193,7 @@ define i64 @test539(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i64* %ptr, i64 %val singlethread seq_cst
+ %ret = atomicrmw umax i64* %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
@@ -9210,7 +9210,7 @@ define i8 @test540(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB540_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i8* %ptr, i8 %val singlethread monotonic
+ %ret = atomicrmw umin i8* %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
@@ -9228,7 +9228,7 @@ define i8 @test541(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB541_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i8* %ptr, i8 %val singlethread acquire
+ %ret = atomicrmw umin i8* %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
@@ -9246,7 +9246,7 @@ define i8 @test542(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB542_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i8* %ptr, i8 %val singlethread release
+ %ret = atomicrmw umin i8* %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
@@ -9265,7 +9265,7 @@ define i8 @test543(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i8* %ptr, i8 %val singlethread acq_rel
+ %ret = atomicrmw umin i8* %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
@@ -9284,7 +9284,7 @@ define i8 @test544(i8* %ptr, i8 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i8* %ptr, i8 %val singlethread seq_cst
+ %ret = atomicrmw umin i8* %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
@@ -9301,7 +9301,7 @@ define i16 @test545(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB545_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i16* %ptr, i16 %val singlethread monotonic
+ %ret = atomicrmw umin i16* %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
@@ -9319,7 +9319,7 @@ define i16 @test546(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB546_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i16* %ptr, i16 %val singlethread acquire
+ %ret = atomicrmw umin i16* %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
@@ -9337,7 +9337,7 @@ define i16 @test547(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB547_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i16* %ptr, i16 %val singlethread release
+ %ret = atomicrmw umin i16* %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
@@ -9356,7 +9356,7 @@ define i16 @test548(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i16* %ptr, i16 %val singlethread acq_rel
+ %ret = atomicrmw umin i16* %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
@@ -9375,7 +9375,7 @@ define i16 @test549(i16* %ptr, i16 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i16* %ptr, i16 %val singlethread seq_cst
+ %ret = atomicrmw umin i16* %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
@@ -9392,7 +9392,7 @@ define i32 @test550(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB550_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i32* %ptr, i32 %val singlethread monotonic
+ %ret = atomicrmw umin i32* %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
@@ -9410,7 +9410,7 @@ define i32 @test551(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB551_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i32* %ptr, i32 %val singlethread acquire
+ %ret = atomicrmw umin i32* %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
@@ -9428,7 +9428,7 @@ define i32 @test552(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB552_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i32* %ptr, i32 %val singlethread release
+ %ret = atomicrmw umin i32* %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
@@ -9447,7 +9447,7 @@ define i32 @test553(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i32* %ptr, i32 %val singlethread acq_rel
+ %ret = atomicrmw umin i32* %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
@@ -9466,7 +9466,7 @@ define i32 @test554(i32* %ptr, i32 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i32* %ptr, i32 %val singlethread seq_cst
+ %ret = atomicrmw umin i32* %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
@@ -9483,7 +9483,7 @@ define i64 @test555(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB555_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i64* %ptr, i64 %val singlethread monotonic
+ %ret = atomicrmw umin i64* %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
@@ -9501,7 +9501,7 @@ define i64 @test556(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB556_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i64* %ptr, i64 %val singlethread acquire
+ %ret = atomicrmw umin i64* %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
@@ -9519,7 +9519,7 @@ define i64 @test557(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB557_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i64* %ptr, i64 %val singlethread release
+ %ret = atomicrmw umin i64* %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
@@ -9538,7 +9538,7 @@ define i64 @test558(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i64* %ptr, i64 %val singlethread acq_rel
+ %ret = atomicrmw umin i64* %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
@@ -9557,7 +9557,7 @@ define i64 @test559(i64* %ptr, i64 %val) {
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i64* %ptr, i64 %val singlethread seq_cst
+ %ret = atomicrmw umin i64* %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
diff --git a/test/CodeGen/PowerPC/bitreverse.ll b/test/CodeGen/PowerPC/bitreverse.ll
deleted file mode 100644
index dca7340d035d..000000000000
--- a/test/CodeGen/PowerPC/bitreverse.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc -verify-machineinstrs -march=ppc64 %s -o - | FileCheck %s
-
-; These tests just check that the plumbing is in place for @llvm.bitreverse. The
-; actual output is massive at the moment as llvm.bitreverse is not yet legal.
-
-declare <2 x i16> @llvm.bitreverse.v2i16(<2 x i16>) readnone
-
-define <2 x i16> @f(<2 x i16> %a) {
-; CHECK-LABEL: f:
-; CHECK: rlwinm
- %b = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> %a)
- ret <2 x i16> %b
-}
-
-declare i8 @llvm.bitreverse.i8(i8) readnone
-
-define i8 @g(i8 %a) {
-; CHECK-LABEL: g:
-; CHECK: rlwinm
-; CHECK: rlwimi
- %b = call i8 @llvm.bitreverse.i8(i8 %a)
- ret i8 %b
-}
diff --git a/test/CodeGen/PowerPC/build-vector-tests.ll b/test/CodeGen/PowerPC/build-vector-tests.ll
index c42f677d17ab..60bec4d18f12 100644
--- a/test/CodeGen/PowerPC/build-vector-tests.ll
+++ b/test/CodeGen/PowerPC/build-vector-tests.ll
@@ -1028,7 +1028,7 @@ entry:
; P9LE: vperm
; P9LE: blr
; P8BE: sldi {{r[0-9]+}}, r4, 2
-; P8BE-DAG: lxvw4x {{v[0-9]+}}, r3,
+; P8BE-DAG: lxvw4x {{v[0-9]+}}, 0, r3
; P8BE-DAG: lxvw4x
; P8BE: vperm
; P8BE: blr
@@ -2187,7 +2187,7 @@ entry:
; P9LE: vperm
; P9LE: blr
; P8BE-DAG: sldi {{r[0-9]+}}, r4, 2
-; P8BE-DAG: lxvw4x {{v[0-9]+}}, r3
+; P8BE-DAG: lxvw4x {{v[0-9]+}}, 0, r3
; P8BE-DAG: lxvw4x
; P8BE: vperm
; P8BE: blr
diff --git a/test/CodeGen/PowerPC/ppc-ctr-dead-code.ll b/test/CodeGen/PowerPC/ppc-ctr-dead-code.ll
new file mode 100644
index 000000000000..71755f722cb2
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc-ctr-dead-code.ll
@@ -0,0 +1,38 @@
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr9 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr9 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -verify-machineinstrs | FileCheck %s
+
+; Function Attrs: norecurse nounwind readonly
+define signext i32 @limit_loop(i32 signext %iters, i32* nocapture readonly %vec, i32 signext %limit) local_unnamed_addr {
+entry:
+ %cmp5 = icmp sgt i32 %iters, 0
+ br i1 %cmp5, label %for.body.preheader, label %cleanup
+
+for.body.preheader: ; preds = %entry
+ %0 = sext i32 %iters to i64
+ br label %for.body
+
+for.cond: ; preds = %for.body
+ %cmp = icmp slt i64 %indvars.iv.next, %0
+ br i1 %cmp, label %for.body, label %cleanup
+
+for.body: ; preds = %for.body.preheader, %for.cond
+ %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.cond ]
+ %arrayidx = getelementptr inbounds i32, i32* %vec, i64 %indvars.iv
+ %1 = load i32, i32* %arrayidx, align 4
+ %cmp1 = icmp slt i32 %1, %limit
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ br i1 %cmp1, label %for.cond, label %cleanup
+
+cleanup: ; preds = %for.body, %for.cond, %entry
+ %2 = phi i32 [ 0, %entry ], [ 0, %for.cond ], [ 1, %for.body ]
+ ret i32 %2
+; CHECK-LABEL: limit_loop
+; CHECK: mtctr
+; CHECK-NOT: addi {{[0-9]+}}, {{[0-9]+}}, 1
+; CHECK: bdnz
+; CHECK: blr
+}
+
+
diff --git a/test/CodeGen/PowerPC/ppc-redzone-alignment-bug.ll b/test/CodeGen/PowerPC/ppc-redzone-alignment-bug.ll
new file mode 100644
index 000000000000..87b45beeab7e
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc-redzone-alignment-bug.ll
@@ -0,0 +1,32 @@
+; Note the formula for negative number alignment calculation should be y = x & ~(n-1) rather than y = (x + (n-1)) & ~(n-1).
+; after patch https://reviews.llvm.org/D34337, we could save 16 bytes in the best case.
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s -check-prefix=CHECK-BE
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s -check-prefix=CHECK-LE
+
+define signext i32 @bar(i32 signext %ii) {
+entry:
+ %0 = tail call i32 asm sideeffect "add $0, $1, $2\0A", "=r,r,r,~{f14},~{r15},~{v20}"(i32 %ii, i32 10)
+ ret i32 %0
+; Before the fix by patch D34337:
+; stdu 1, -544(1)
+; std 15, 264(1)
+; stfd 14, 400(1)
+; stdu 1, -560(1)
+; std 15, 280(1)
+; stfd 14, 416(1)
+
+; After the fix by patch D34337:
+; CHECK-LE: stdu 1, -528(1)
+; CHECK-LE:std 15, 248(1)
+; CHECK-LE:stfd 14, 384(1)
+; CHECK-BE: stdu 1, -544(1)
+; CHECK-BE:std 15, 264(1)
+; CHECK-BE:stfd 14, 400(1)
+}
+
+define signext i32 @foo() {
+entry:
+ %call = tail call signext i32 @bar(i32 signext 5)
+ ret i32 %call
+}
+
diff --git a/test/CodeGen/PowerPC/ppc64le-smallarg.ll b/test/CodeGen/PowerPC/ppc64le-smallarg.ll
index 0e871c358869..3a425406d043 100644
--- a/test/CodeGen/PowerPC/ppc64le-smallarg.ll
+++ b/test/CodeGen/PowerPC/ppc64le-smallarg.ll
@@ -53,8 +53,8 @@ entry:
ret void
}
; CHECK: @caller2
-; CHECK: li [[TOCOFF:[0-9]+]], 136
-; CHECK: stxsspx {{[0-9]+}}, 1, [[TOCOFF]]
+; CHECK: addi [[TOCOFF:[0-9]+]], {{[0-9]+}}, 136
+; CHECK: stxsspx {{[0-9]+}}, 0, [[TOCOFF]]
; CHECK: bl test2
declare float @test2(float, float, float, float, float, float, float, float, float, float, float, float, float, float)
diff --git a/test/CodeGen/PowerPC/pr33093.ll b/test/CodeGen/PowerPC/pr33093.ll
new file mode 100644
index 000000000000..5212973f8317
--- /dev/null
+++ b/test/CodeGen/PowerPC/pr33093.ll
@@ -0,0 +1,165 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+
+define zeroext i32 @ReverseBits(i32 zeroext %n) {
+; CHECK-LABEL: ReverseBits:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: lis 4, -21846
+; CHECK-NEXT: lis 5, 21845
+; CHECK-NEXT: slwi 6, 3, 1
+; CHECK-NEXT: srwi 3, 3, 1
+; CHECK-NEXT: lis 7, -13108
+; CHECK-NEXT: lis 8, 13107
+; CHECK-NEXT: ori 4, 4, 43690
+; CHECK-NEXT: ori 5, 5, 21845
+; CHECK-NEXT: lis 10, -3856
+; CHECK-NEXT: lis 11, 3855
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: and 4, 6, 4
+; CHECK-NEXT: ori 5, 8, 13107
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: ori 4, 7, 52428
+; CHECK-NEXT: slwi 9, 3, 2
+; CHECK-NEXT: srwi 3, 3, 2
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: and 4, 9, 4
+; CHECK-NEXT: ori 5, 11, 3855
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: ori 4, 10, 61680
+; CHECK-NEXT: slwi 12, 3, 4
+; CHECK-NEXT: srwi 3, 3, 4
+; CHECK-NEXT: and 4, 12, 4
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: rotlwi 4, 3, 24
+; CHECK-NEXT: rlwimi 4, 3, 8, 8, 15
+; CHECK-NEXT: rlwimi 4, 3, 8, 24, 31
+; CHECK-NEXT: rldicl 3, 4, 0, 32
+; CHECK-NEXT: clrldi 3, 3, 32
+; CHECK-NEXT: blr
+entry:
+ %shr = lshr i32 %n, 1
+ %and = and i32 %shr, 1431655765
+ %and1 = shl i32 %n, 1
+ %shl = and i32 %and1, -1431655766
+ %or = or i32 %and, %shl
+ %shr2 = lshr i32 %or, 2
+ %and3 = and i32 %shr2, 858993459
+ %and4 = shl i32 %or, 2
+ %shl5 = and i32 %and4, -858993460
+ %or6 = or i32 %and3, %shl5
+ %shr7 = lshr i32 %or6, 4
+ %and8 = and i32 %shr7, 252645135
+ %and9 = shl i32 %or6, 4
+ %shl10 = and i32 %and9, -252645136
+ %or11 = or i32 %and8, %shl10
+ %shr13 = lshr i32 %or11, 24
+ %and14 = lshr i32 %or11, 8
+ %shr15 = and i32 %and14, 65280
+ %and17 = shl i32 %or11, 8
+ %shl18 = and i32 %and17, 16711680
+ %shl21 = shl i32 %or11, 24
+ %or16 = or i32 %shl21, %shr13
+ %or19 = or i32 %or16, %shr15
+ %or22 = or i32 %or19, %shl18
+ ret i32 %or22
+}
+
+define i64 @ReverseBits64(i64 %n) {
+; CHECK-LABEL: ReverseBits64:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: lis 4, -21846
+; CHECK-NEXT: lis 5, 21845
+; CHECK-NEXT: lis 6, -13108
+; CHECK-NEXT: lis 7, 13107
+; CHECK-NEXT: sldi 8, 3, 1
+; CHECK-NEXT: rldicl 3, 3, 63, 1
+; CHECK-NEXT: ori 4, 4, 43690
+; CHECK-NEXT: ori 5, 5, 21845
+; CHECK-NEXT: ori 6, 6, 52428
+; CHECK-NEXT: ori 7, 7, 13107
+; CHECK-NEXT: sldi 4, 4, 32
+; CHECK-NEXT: sldi 5, 5, 32
+; CHECK-NEXT: oris 4, 4, 43690
+; CHECK-NEXT: oris 5, 5, 21845
+; CHECK-NEXT: ori 4, 4, 43690
+; CHECK-NEXT: ori 5, 5, 21845
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: sldi 5, 6, 32
+; CHECK-NEXT: sldi 6, 7, 32
+; CHECK-NEXT: and 4, 8, 4
+; CHECK-NEXT: lis 7, 3855
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: oris 12, 5, 52428
+; CHECK-NEXT: oris 9, 6, 13107
+; CHECK-NEXT: lis 6, -3856
+; CHECK-NEXT: ori 7, 7, 3855
+; CHECK-NEXT: sldi 8, 3, 2
+; CHECK-NEXT: ori 4, 12, 52428
+; CHECK-NEXT: rldicl 3, 3, 62, 2
+; CHECK-NEXT: ori 5, 9, 13107
+; CHECK-NEXT: ori 6, 6, 61680
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: sldi 5, 6, 32
+; CHECK-NEXT: and 4, 8, 4
+; CHECK-NEXT: sldi 6, 7, 32
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: oris 10, 5, 61680
+; CHECK-NEXT: oris 11, 6, 3855
+; CHECK-NEXT: sldi 6, 3, 4
+; CHECK-NEXT: ori 4, 10, 61680
+; CHECK-NEXT: rldicl 3, 3, 60, 4
+; CHECK-NEXT: ori 5, 11, 3855
+; CHECK-NEXT: and 4, 6, 4
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: rldicl 4, 3, 32, 32
+; CHECK-NEXT: rlwinm 6, 3, 24, 0, 31
+; CHECK-NEXT: rlwinm 5, 4, 24, 0, 31
+; CHECK-NEXT: rlwimi 6, 3, 8, 8, 15
+; CHECK-NEXT: rlwimi 5, 4, 8, 8, 15
+; CHECK-NEXT: rlwimi 6, 3, 8, 24, 31
+; CHECK-NEXT: rlwimi 5, 4, 8, 24, 31
+; CHECK-NEXT: sldi 12, 5, 32
+; CHECK-NEXT: or 3, 12, 6
+; CHECK-NEXT: blr
+entry:
+ %shr = lshr i64 %n, 1
+ %and = and i64 %shr, 6148914691236517205
+ %and1 = shl i64 %n, 1
+ %shl = and i64 %and1, -6148914691236517206
+ %or = or i64 %and, %shl
+ %shr2 = lshr i64 %or, 2
+ %and3 = and i64 %shr2, 3689348814741910323
+ %and4 = shl i64 %or, 2
+ %shl5 = and i64 %and4, -3689348814741910324
+ %or6 = or i64 %and3, %shl5
+ %shr7 = lshr i64 %or6, 4
+ %and8 = and i64 %shr7, 1085102592571150095
+ %and9 = shl i64 %or6, 4
+ %shl10 = and i64 %and9, -1085102592571150096
+ %or11 = or i64 %and8, %shl10
+ %shr13 = lshr i64 %or11, 56
+ %and14 = lshr i64 %or11, 40
+ %shr15 = and i64 %and14, 65280
+ %and17 = lshr i64 %or11, 24
+ %shr18 = and i64 %and17, 16711680
+ %and20 = lshr i64 %or11, 8
+ %shr21 = and i64 %and20, 4278190080
+ %and23 = shl i64 %or11, 8
+ %shl24 = and i64 %and23, 1095216660480
+ %and26 = shl i64 %or11, 24
+ %shl27 = and i64 %and26, 280375465082880
+ %and29 = shl i64 %or11, 40
+ %shl30 = and i64 %and29, 71776119061217280
+ %shl33 = shl i64 %or11, 56
+ %or16 = or i64 %shl33, %shr13
+ %or19 = or i64 %or16, %shr15
+ %or22 = or i64 %or19, %shr18
+ %or25 = or i64 %or22, %shr21
+ %or28 = or i64 %or25, %shl24
+ %or31 = or i64 %or28, %shl27
+ %or34 = or i64 %or31, %shl30
+ ret i64 %or34
+}
diff --git a/test/CodeGen/PowerPC/select-addrRegRegOnly.ll b/test/CodeGen/PowerPC/select-addrRegRegOnly.ll
new file mode 100644
index 000000000000..f880d1faf9d9
--- /dev/null
+++ b/test/CodeGen/PowerPC/select-addrRegRegOnly.ll
@@ -0,0 +1,37 @@
+; RUN: llc -mcpu=pwr8 -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mcpu=pwr8 -mtriple=powerpc64-unknown-unknown -verify-machineinstrs < %s | FileCheck %s
+
+; Function Attrs: norecurse nounwind readonly
+define float @testSingleAccess(i32* nocapture readonly %arr) local_unnamed_addr #0 {
+; CHECK-LABEL: testSingleAccess:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addi 3, 3, 8
+; CHECK-NEXT: lxsiwax 0, 0, 3
+; CHECK-NEXT: xscvsxdsp 1, 0
+; CHECK-NEXT: blr
+entry:
+ %arrayidx = getelementptr inbounds i32, i32* %arr, i64 2
+ %0 = load i32, i32* %arrayidx, align 4
+ %conv = sitofp i32 %0 to float
+ ret float %conv
+}
+
+; Function Attrs: norecurse nounwind readonly
+define float @testMultipleAccess(i32* nocapture readonly %arr) local_unnamed_addr #0 {
+; CHECK-LABEL: testMultipleAccess:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: lwz 4, 8(3)
+; CHECK-NEXT: lwz 12, 12(3)
+; CHECK-NEXT: add 3, 12, 4
+; CHECK-NEXT: mtvsrwa 0, 3
+; CHECK-NEXT: xscvsxdsp 1, 0
+; CHECK-NEXT: blr
+entry:
+ %arrayidx = getelementptr inbounds i32, i32* %arr, i64 2
+ %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds i32, i32* %arr, i64 3
+ %1 = load i32, i32* %arrayidx1, align 4
+ %add = add nsw i32 %1, %0
+ %conv = sitofp i32 %add to float
+ ret float %conv
+}
diff --git a/test/CodeGen/PowerPC/svr4-redzone.ll b/test/CodeGen/PowerPC/svr4-redzone.ll
index 7bb6cc180c96..26c4410ded6d 100644
--- a/test/CodeGen/PowerPC/svr4-redzone.ll
+++ b/test/CodeGen/PowerPC/svr4-redzone.ll
@@ -29,11 +29,11 @@ entry:
define i8* @bigstack() nounwind {
entry:
- %0 = alloca i8, i32 230
+ %0 = alloca i8, i32 290
ret i8* %0
}
; PPC32-LABEL: bigstack:
-; PPC32: stwu 1, -240(1)
+; PPC32: stwu 1, -304(1)
; PPC64-LABEL: bigstack:
-; PPC64: stdu 1, -288(1)
+; PPC64: stdu 1, -352(1)
diff --git a/test/CodeGen/PowerPC/tailcall1-64.ll b/test/CodeGen/PowerPC/tailcall1-64.ll
index 3dc2672556ea..58ab0bce309c 100644
--- a/test/CodeGen/PowerPC/tailcall1-64.ll
+++ b/test/CodeGen/PowerPC/tailcall1-64.ll
@@ -1,4 +1,5 @@
-; RUN: llc -relocation-model=static -verify-machineinstrs < %s -march=ppc64 -tailcallopt | grep TC_RETURNd8
+; RUN: llc -relocation-model=static -verify-machineinstrs < %s -mtriple=ppc64-- -tailcallopt | grep TC_RETURNd8
+; RUN: llc -relocation-model=static -verify-machineinstrs -mtriple=ppc64-- < %s | FileCheck %s
define fastcc i32 @tailcallee(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
entry:
ret i32 %a3
@@ -6,6 +7,8 @@ entry:
define fastcc i32 @tailcaller(i32 %in1, i32 %in2) {
entry:
- %tmp11 = tail call fastcc i32 @tailcallee( i32 %in1, i32 %in2, i32 %in1, i32 %in2 ) ; <i32> [#uses=1]
+ %tmp11 = tail call fastcc i32 @tailcallee( i32 %in1, i32 %in2, i32 %in1, i32 %in2 )
ret i32 %tmp11
+; CHECK-LABEL: tailcaller
+; CHECK-NOT: stdu
}
diff --git a/test/CodeGen/PowerPC/testBitReverse.ll b/test/CodeGen/PowerPC/testBitReverse.ll
new file mode 100644
index 000000000000..6993d17ad8f3
--- /dev/null
+++ b/test/CodeGen/PowerPC/testBitReverse.ll
@@ -0,0 +1,105 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+declare i32 @llvm.bitreverse.i32(i32)
+define i32 @testBitReverseIntrinsicI32(i32 %arg) {
+; CHECK-LABEL: testBitReverseIntrinsicI32:
+; CHECK: # BB#0:
+; CHECK-NEXT: lis 4, -21846
+; CHECK-NEXT: lis 5, 21845
+; CHECK-NEXT: slwi 6, 3, 1
+; CHECK-NEXT: srwi 3, 3, 1
+; CHECK-NEXT: lis 7, -13108
+; CHECK-NEXT: lis 8, 13107
+; CHECK-NEXT: ori 4, 4, 43690
+; CHECK-NEXT: ori 5, 5, 21845
+; CHECK-NEXT: lis 10, -3856
+; CHECK-NEXT: lis 11, 3855
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: and 4, 6, 4
+; CHECK-NEXT: ori 5, 8, 13107
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: ori 4, 7, 52428
+; CHECK-NEXT: slwi 9, 3, 2
+; CHECK-NEXT: srwi 3, 3, 2
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: and 4, 9, 4
+; CHECK-NEXT: ori 5, 11, 3855
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: ori 4, 10, 61680
+; CHECK-NEXT: slwi 12, 3, 4
+; CHECK-NEXT: srwi 3, 3, 4
+; CHECK-NEXT: and 4, 12, 4
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: rotlwi 4, 3, 24
+; CHECK-NEXT: rlwimi 4, 3, 8, 8, 15
+; CHECK-NEXT: rlwimi 4, 3, 8, 24, 31
+; CHECK-NEXT: rldicl 3, 4, 0, 32
+; CHECK-NEXT: blr
+ %res = call i32 @llvm.bitreverse.i32(i32 %arg)
+ ret i32 %res
+}
+
+declare i64 @llvm.bitreverse.i64(i64)
+define i64 @testBitReverseIntrinsicI64(i64 %arg) {
+; CHECK-LABEL: testBitReverseIntrinsicI64:
+; CHECK: # BB#0:
+; CHECK-NEXT: lis 4, -21846
+; CHECK-NEXT: lis 5, 21845
+; CHECK-NEXT: lis 6, -13108
+; CHECK-NEXT: lis 7, 13107
+; CHECK-NEXT: sldi 8, 3, 1
+; CHECK-NEXT: rldicl 3, 3, 63, 1
+; CHECK-NEXT: ori 4, 4, 43690
+; CHECK-NEXT: ori 5, 5, 21845
+; CHECK-NEXT: ori 6, 6, 52428
+; CHECK-NEXT: ori 7, 7, 13107
+; CHECK-NEXT: sldi 4, 4, 32
+; CHECK-NEXT: sldi 5, 5, 32
+; CHECK-NEXT: oris 4, 4, 43690
+; CHECK-NEXT: oris 5, 5, 21845
+; CHECK-NEXT: ori 4, 4, 43690
+; CHECK-NEXT: ori 5, 5, 21845
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: sldi 5, 6, 32
+; CHECK-NEXT: sldi 6, 7, 32
+; CHECK-NEXT: and 4, 8, 4
+; CHECK-NEXT: lis 7, 3855
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: oris 12, 5, 52428
+; CHECK-NEXT: oris 9, 6, 13107
+; CHECK-NEXT: lis 6, -3856
+; CHECK-NEXT: ori 7, 7, 3855
+; CHECK-NEXT: sldi 8, 3, 2
+; CHECK-NEXT: ori 4, 12, 52428
+; CHECK-NEXT: rldicl 3, 3, 62, 2
+; CHECK-NEXT: ori 5, 9, 13107
+; CHECK-NEXT: ori 6, 6, 61680
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: sldi 5, 6, 32
+; CHECK-NEXT: and 4, 8, 4
+; CHECK-NEXT: sldi 6, 7, 32
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: oris 10, 5, 61680
+; CHECK-NEXT: oris 11, 6, 3855
+; CHECK-NEXT: sldi 6, 3, 4
+; CHECK-NEXT: ori 4, 10, 61680
+; CHECK-NEXT: rldicl 3, 3, 60, 4
+; CHECK-NEXT: ori 5, 11, 3855
+; CHECK-NEXT: and 4, 6, 4
+; CHECK-NEXT: and 3, 3, 5
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: rldicl 4, 3, 32, 32
+; CHECK-NEXT: rlwinm 6, 3, 24, 0, 31
+; CHECK-NEXT: rlwinm 5, 4, 24, 0, 31
+; CHECK-NEXT: rlwimi 6, 3, 8, 8, 15
+; CHECK-NEXT: rlwimi 5, 4, 8, 8, 15
+; CHECK-NEXT: rlwimi 6, 3, 8, 24, 31
+; CHECK-NEXT: rlwimi 5, 4, 8, 24, 31
+; CHECK-NEXT: sldi 12, 5, 32
+; CHECK-NEXT: or 3, 12, 6
+; CHECK-NEXT: blr
+ %res = call i64 @llvm.bitreverse.i64(i64 %arg)
+ ret i64 %res
+}
diff --git a/test/CodeGen/PowerPC/vec_extract_p9.ll b/test/CodeGen/PowerPC/vec_extract_p9.ll
new file mode 100644
index 000000000000..241209a0e6b7
--- /dev/null
+++ b/test/CodeGen/PowerPC/vec_extract_p9.ll
@@ -0,0 +1,167 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-gnu-linux -mcpu=pwr9 < %s | FileCheck %s -check-prefix=CHECK-LE
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-gnu-linux -mcpu=pwr9 < %s | FileCheck %s -check-prefix=CHECK-BE
+
+define zeroext i8 @test1(<16 x i8> %a, i32 signext %index) {
+; CHECK-LE-LABEL: test1:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vextubrx 3, 5, 2
+; CHECK-LE-NEXT: clrldi 3, 3, 56
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: test1:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: vextublx 3, 5, 2
+; CHECK-BE-NEXT: clrldi 3, 3, 56
+; CHECK-BE-NEXT: blr
+
+entry:
+ %vecext = extractelement <16 x i8> %a, i32 %index
+ ret i8 %vecext
+}
+
+define signext i8 @test2(<16 x i8> %a, i32 signext %index) {
+; CHECK-LE-LABEL: test2:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vextubrx 3, 5, 2
+; CHECK-LE-NEXT: extsb 3, 3
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: test2:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: vextublx 3, 5, 2
+; CHECK-BE-NEXT: extsb 3, 3
+; CHECK-BE-NEXT: blr
+
+entry:
+ %vecext = extractelement <16 x i8> %a, i32 %index
+ ret i8 %vecext
+}
+
+define zeroext i16 @test3(<8 x i16> %a, i32 signext %index) {
+; CHECK-LE-LABEL: test3:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: rlwinm 3, 5, 1, 28, 30
+; CHECK-LE-NEXT: vextuhrx 3, 3, 2
+; CHECK-LE-NEXT: clrldi 3, 3, 48
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: test3:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: rlwinm 3, 5, 1, 28, 30
+; CHECK-BE-NEXT: vextuhlx 3, 3, 2
+; CHECK-BE-NEXT: clrldi 3, 3, 48
+; CHECK-BE-NEXT: blr
+
+entry:
+ %vecext = extractelement <8 x i16> %a, i32 %index
+ ret i16 %vecext
+}
+
+define signext i16 @test4(<8 x i16> %a, i32 signext %index) {
+; CHECK-LE-LABEL: test4:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: rlwinm 3, 5, 1, 28, 30
+; CHECK-LE-NEXT: vextuhrx 3, 3, 2
+; CHECK-LE-NEXT: extsh 3, 3
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: test4:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: rlwinm 3, 5, 1, 28, 30
+; CHECK-BE-NEXT: vextuhlx 3, 3, 2
+; CHECK-BE-NEXT: extsh 3, 3
+; CHECK-BE-NEXT: blr
+
+entry:
+ %vecext = extractelement <8 x i16> %a, i32 %index
+ ret i16 %vecext
+}
+
+define zeroext i32 @test5(<4 x i32> %a, i32 signext %index) {
+; CHECK-LE-LABEL: test5:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: rlwinm 3, 5, 2, 28, 29
+; CHECK-LE-NEXT: vextuwrx 3, 3, 2
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: test5:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: rlwinm 3, 5, 2, 28, 29
+; CHECK-BE-NEXT: vextuwlx 3, 3, 2
+; CHECK-BE-NEXT: blr
+
+entry:
+ %vecext = extractelement <4 x i32> %a, i32 %index
+ ret i32 %vecext
+}
+
+define signext i32 @test6(<4 x i32> %a, i32 signext %index) {
+; CHECK-LE-LABEL: test6:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: rlwinm 3, 5, 2, 28, 29
+; CHECK-LE-NEXT: vextuwrx 3, 3, 2
+; CHECK-LE-NEXT: extsw 3, 3
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: test6:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: rlwinm 3, 5, 2, 28, 29
+; CHECK-BE-NEXT: vextuwlx 3, 3, 2
+; CHECK-BE-NEXT: extsw 3, 3
+; CHECK-BE-NEXT: blr
+
+entry:
+ %vecext = extractelement <4 x i32> %a, i32 %index
+ ret i32 %vecext
+}
+
+; Test with immediate index
+define zeroext i8 @test7(<16 x i8> %a) {
+; CHECK-LE-LABEL: test7:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: li 3, 1
+; CHECK-LE-NEXT: vextubrx 3, 3, 2
+; CHECK-LE-NEXT: clrldi 3, 3, 56
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: test7:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: li 3, 1
+; CHECK-BE-NEXT: vextublx 3, 3, 2
+; CHECK-BE-NEXT: clrldi 3, 3, 56
+; CHECK-BE-NEXT: blr
+
+entry:
+ %vecext = extractelement <16 x i8> %a, i32 1
+ ret i8 %vecext
+}
+
+define zeroext i16 @test8(<8 x i16> %a) {
+; CHECK-LE-LABEL: test8:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: li 3, 2
+; CHECK-LE-NEXT: vextuhrx 3, 3, 2
+; CHECK-LE-NEXT: clrldi 3, 3, 48
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: test8:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: li 3, 2
+; CHECK-BE-NEXT: vextuhlx 3, 3, 2
+; CHECK-BE-NEXT: clrldi 3, 3, 48
+; CHECK-BE-NEXT: blr
+
+entry:
+ %vecext = extractelement <8 x i16> %a, i32 1
+ ret i16 %vecext
+}
+
+define zeroext i32 @test9(<4 x i32> %a) {
+; CHECK-LE-LABEL: test9:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: li 3, 4
+; CHECK-LE-NEXT: vextuwrx 3, 3, 2
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: test9:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: li 3, 4
+; CHECK-BE-NEXT: vextuwlx 3, 3, 2
+; CHECK-BE-NEXT: blr
+
+entry:
+ %vecext = extractelement <4 x i32> %a, i32 1
+ ret i32 %vecext
+}
diff --git a/test/CodeGen/PowerPC/vec_int_ext.ll b/test/CodeGen/PowerPC/vec_int_ext.ll
index 9e1218c423b7..d7bed503318e 100644
--- a/test/CodeGen/PowerPC/vec_int_ext.ll
+++ b/test/CodeGen/PowerPC/vec_int_ext.ll
@@ -1,12 +1,18 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -verify-machineinstrs -mcpu=pwr9 < %s | FileCheck %s -check-prefix=PWR9
-target triple = "powerpc64le-unknown-linux-gnu"
-
-define <4 x i32> @vextsb2w(<16 x i8> %a) {
-; PWR9-LABEL: vextsb2w:
-; PWR9: # BB#0: # %entry
-; PWR9-NEXT: vextsb2w 2, 2
-; PWR9-NEXT: blr
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-gnu-linux -mcpu=pwr9 < %s | FileCheck %s -check-prefix=CHECK-LE
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-gnu-linux -mcpu=pwr9 < %s | FileCheck %s -check-prefix=CHECK-BE
+
+define <4 x i32> @vextsb2wLE(<16 x i8> %a) {
+; CHECK-LE-LABEL: vextsb2wLE:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vextsb2w 2, 2
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: vextsb2wLE:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: vperm 2, 2, 2, 3
+; CHECK-BE-NEXT: vextsb2w 2, 2
+; CHECK-BE-NEXT: blr
+
entry:
%vecext = extractelement <16 x i8> %a, i32 0
%conv = sext i8 %vecext to i32
@@ -23,11 +29,17 @@ entry:
ret <4 x i32> %vecinit9
}
-define <2 x i64> @vextsb2d(<16 x i8> %a) {
-; PWR9-LABEL: vextsb2d:
-; PWR9: # BB#0: # %entry
-; PWR9-NEXT: vextsb2d 2, 2
-; PWR9-NEXT: blr
+define <2 x i64> @vextsb2dLE(<16 x i8> %a) {
+; CHECK-LE-LABEL: vextsb2dLE:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vextsb2d 2, 2
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: vextsb2dLE:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: vperm 2, 2, 2, 3
+; CHECK-BE-NEXT: vextsb2d 2, 2
+; CHECK-BE-NEXT: blr
+
entry:
%vecext = extractelement <16 x i8> %a, i32 0
%conv = sext i8 %vecext to i64
@@ -38,11 +50,17 @@ entry:
ret <2 x i64> %vecinit3
}
-define <4 x i32> @vextsh2w(<8 x i16> %a) {
-; PWR9-LABEL: vextsh2w:
-; PWR9: # BB#0: # %entry
-; PWR9-NEXT: vextsh2w 2, 2
-; PWR9-NEXT: blr
+define <4 x i32> @vextsh2wLE(<8 x i16> %a) {
+; CHECK-LE-LABEL: vextsh2wLE:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vextsh2w 2, 2
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: vextsh2wLE:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: vperm 2, 2, 2, 3
+; CHECK-BE-NEXT: vextsh2w 2, 2
+; CHECK-BE-NEXT: blr
+
entry:
%vecext = extractelement <8 x i16> %a, i32 0
%conv = sext i16 %vecext to i32
@@ -59,11 +77,17 @@ entry:
ret <4 x i32> %vecinit9
}
-define <2 x i64> @vextsh2d(<8 x i16> %a) {
-; PWR9-LABEL: vextsh2d:
-; PWR9: # BB#0: # %entry
-; PWR9-NEXT: vextsh2d 2, 2
-; PWR9-NEXT: blr
+define <2 x i64> @vextsh2dLE(<8 x i16> %a) {
+; CHECK-LE-LABEL: vextsh2dLE:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vextsh2d 2, 2
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: vextsh2dLE:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: vperm 2, 2, 2, 3
+; CHECK-BE-NEXT: vextsh2d 2, 2
+; CHECK-BE-NEXT: blr
+
entry:
%vecext = extractelement <8 x i16> %a, i32 0
%conv = sext i16 %vecext to i64
@@ -74,11 +98,17 @@ entry:
ret <2 x i64> %vecinit3
}
-define <2 x i64> @vextsw2d(<4 x i32> %a) {
-; PWR9-LABEL: vextsw2d:
-; PWR9: # BB#0: # %entry
-; PWR9-NEXT: vextsw2d 2, 2
-; PWR9-NEXT: blr
+define <2 x i64> @vextsw2dLE(<4 x i32> %a) {
+; CHECK-LE-LABEL: vextsw2dLE:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vextsw2d 2, 2
+; CHECK-LE-NEXT: blr
+; CHECK-BE-LABEL: vextsw2dLE:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE: vmrgew
+; CHECK-BE-NEXT: vextsw2d 2, 2
+; CHECK-BE-NEXT: blr
+
entry:
%vecext = extractelement <4 x i32> %a, i32 0
%conv = sext i32 %vecext to i64
@@ -88,3 +118,170 @@ entry:
%vecinit3 = insertelement <2 x i64> %vecinit, i64 %conv2, i32 1
ret <2 x i64> %vecinit3
}
+
+define <4 x i32> @vextsb2wBE(<16 x i8> %a) {
+; CHECK-BE-LABEL: vextsb2wBE:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: vextsb2w 2, 2
+; CHECK-BE-NEXT: blr
+; CHECK-LE-LABEL: vextsb2wBE:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vsldoi 2, 2, 2, 13
+; CHECK-LE-NEXT: vextsb2w 2, 2
+; CHECK-LE-NEXT: blr
+entry:
+ %vecext = extractelement <16 x i8> %a, i32 3
+ %conv = sext i8 %vecext to i32
+ %vecinit = insertelement <4 x i32> undef, i32 %conv, i32 0
+ %vecext1 = extractelement <16 x i8> %a, i32 7
+ %conv2 = sext i8 %vecext1 to i32
+ %vecinit3 = insertelement <4 x i32> %vecinit, i32 %conv2, i32 1
+ %vecext4 = extractelement <16 x i8> %a, i32 11
+ %conv5 = sext i8 %vecext4 to i32
+ %vecinit6 = insertelement <4 x i32> %vecinit3, i32 %conv5, i32 2
+ %vecext7 = extractelement <16 x i8> %a, i32 15
+ %conv8 = sext i8 %vecext7 to i32
+ %vecinit9 = insertelement <4 x i32> %vecinit6, i32 %conv8, i32 3
+ ret <4 x i32> %vecinit9
+}
+
+define <2 x i64> @vextsb2dBE(<16 x i8> %a) {
+; CHECK-BE-LABEL: vextsb2dBE:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: vextsb2d 2, 2
+; CHECK-BE-NEXT: blr
+; CHECK-LE-LABEL: vextsb2dBE:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vsldoi 2, 2, 2, 9
+; CHECK-LE-NEXT: vextsb2d 2, 2
+; CHECK-LE-NEXT: blr
+entry:
+ %vecext = extractelement <16 x i8> %a, i32 7
+ %conv = sext i8 %vecext to i64
+ %vecinit = insertelement <2 x i64> undef, i64 %conv, i32 0
+ %vecext1 = extractelement <16 x i8> %a, i32 15
+ %conv2 = sext i8 %vecext1 to i64
+ %vecinit3 = insertelement <2 x i64> %vecinit, i64 %conv2, i32 1
+ ret <2 x i64> %vecinit3
+}
+
+define <4 x i32> @vextsh2wBE(<8 x i16> %a) {
+; CHECK-BE-LABEL: vextsh2wBE:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: vextsh2w 2, 2
+; CHECK-BE-NEXT: blr
+; CHECK-LE-LABEL: vextsh2wBE:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vsldoi 2, 2, 2, 14
+; CHECK-LE-NEXT: vextsh2w 2, 2
+; CHECK-LE-NEXT: blr
+entry:
+ %vecext = extractelement <8 x i16> %a, i32 1
+ %conv = sext i16 %vecext to i32
+ %vecinit = insertelement <4 x i32> undef, i32 %conv, i32 0
+ %vecext1 = extractelement <8 x i16> %a, i32 3
+ %conv2 = sext i16 %vecext1 to i32
+ %vecinit3 = insertelement <4 x i32> %vecinit, i32 %conv2, i32 1
+ %vecext4 = extractelement <8 x i16> %a, i32 5
+ %conv5 = sext i16 %vecext4 to i32
+ %vecinit6 = insertelement <4 x i32> %vecinit3, i32 %conv5, i32 2
+ %vecext7 = extractelement <8 x i16> %a, i32 7
+ %conv8 = sext i16 %vecext7 to i32
+ %vecinit9 = insertelement <4 x i32> %vecinit6, i32 %conv8, i32 3
+ ret <4 x i32> %vecinit9
+}
+
+define <2 x i64> @vextsh2dBE(<8 x i16> %a) {
+; CHECK-BE-LABEL: vextsh2dBE:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: vextsh2d 2, 2
+; CHECK-BE-NEXT: blr
+; CHECK-LE-LABEL: vextsh2dBE:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vsldoi 2, 2, 2, 10
+; CHECK-LE-NEXT: vextsh2d 2, 2
+; CHECK-LE-NEXT: blr
+entry:
+ %vecext = extractelement <8 x i16> %a, i32 3
+ %conv = sext i16 %vecext to i64
+ %vecinit = insertelement <2 x i64> undef, i64 %conv, i32 0
+ %vecext1 = extractelement <8 x i16> %a, i32 7
+ %conv2 = sext i16 %vecext1 to i64
+ %vecinit3 = insertelement <2 x i64> %vecinit, i64 %conv2, i32 1
+ ret <2 x i64> %vecinit3
+}
+
+define <2 x i64> @vextsw2dBE(<4 x i32> %a) {
+; CHECK-BE-LABEL: vextsw2dBE:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NEXT: vextsw2d 2, 2
+; CHECK-BE-NEXT: blr
+; CHECK-LE-LABEL: vextsw2dBE:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NEXT: vsldoi 2, 2, 2, 12
+; CHECK-LE-NEXT: vextsw2d 2, 2
+; CHECK-LE-NEXT: blr
+entry:
+ %vecext = extractelement <4 x i32> %a, i32 1
+ %conv = sext i32 %vecext to i64
+ %vecinit = insertelement <2 x i64> undef, i64 %conv, i32 0
+ %vecext1 = extractelement <4 x i32> %a, i32 3
+ %conv2 = sext i32 %vecext1 to i64
+ %vecinit3 = insertelement <2 x i64> %vecinit, i64 %conv2, i32 1
+ ret <2 x i64> %vecinit3
+}
+
+define <2 x i64> @vextDiffVectors(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LE-LABEL: vextDiffVectors:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NOT: vextsw2d
+
+; CHECK-BE-LABEL: vextDiffVectors:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NOT: vextsw2d
+entry:
+ %vecext = extractelement <4 x i32> %a, i32 0
+ %conv = sext i32 %vecext to i64
+ %vecinit = insertelement <2 x i64> undef, i64 %conv, i32 0
+ %vecext1 = extractelement <4 x i32> %b, i32 2
+ %conv2 = sext i32 %vecext1 to i64
+ %vecinit3 = insertelement <2 x i64> %vecinit, i64 %conv2, i32 1
+ ret <2 x i64> %vecinit3
+}
+
+define <8 x i16> @testInvalidExtend(<16 x i8> %a) {
+entry:
+; CHECK-LE-LABEL: testInvalidExtend:
+; CHECK-LE: # BB#0: # %entry
+; CHECK-LE-NOT: vexts
+
+; CHECK-BE-LABEL: testInvalidExtend:
+; CHECK-BE: # BB#0: # %entry
+; CHECK-BE-NOT: vexts
+
+ %vecext = extractelement <16 x i8> %a, i32 0
+ %conv = sext i8 %vecext to i16
+ %vecinit = insertelement <8 x i16> undef, i16 %conv, i32 0
+ %vecext1 = extractelement <16 x i8> %a, i32 2
+ %conv2 = sext i8 %vecext1 to i16
+ %vecinit3 = insertelement <8 x i16> %vecinit, i16 %conv2, i32 1
+ %vecext4 = extractelement <16 x i8> %a, i32 4
+ %conv5 = sext i8 %vecext4 to i16
+ %vecinit6 = insertelement <8 x i16> %vecinit3, i16 %conv5, i32 2
+ %vecext7 = extractelement <16 x i8> %a, i32 6
+ %conv8 = sext i8 %vecext7 to i16
+ %vecinit9 = insertelement <8 x i16> %vecinit6, i16 %conv8, i32 3
+ %vecext10 = extractelement <16 x i8> %a, i32 8
+ %conv11 = sext i8 %vecext10 to i16
+ %vecinit12 = insertelement <8 x i16> %vecinit9, i16 %conv11, i32 4
+ %vecext13 = extractelement <16 x i8> %a, i32 10
+ %conv14 = sext i8 %vecext13 to i16
+ %vecinit15 = insertelement <8 x i16> %vecinit12, i16 %conv14, i32 5
+ %vecext16 = extractelement <16 x i8> %a, i32 12
+ %conv17 = sext i8 %vecext16 to i16
+ %vecinit18 = insertelement <8 x i16> %vecinit15, i16 %conv17, i32 6
+ %vecext19 = extractelement <16 x i8> %a, i32 14
+ %conv20 = sext i8 %vecext19 to i16
+ %vecinit21 = insertelement <8 x i16> %vecinit18, i16 %conv20, i32 7
+ ret <8 x i16> %vecinit21
+}
diff --git a/test/CodeGen/PowerPC/vsx-partword-int-loads-and-stores.ll b/test/CodeGen/PowerPC/vsx-partword-int-loads-and-stores.ll
index 67146e40db0e..5346d8a429fb 100644
--- a/test/CodeGen/PowerPC/vsx-partword-int-loads-and-stores.ll
+++ b/test/CodeGen/PowerPC/vsx-partword-int-loads-and-stores.ll
@@ -321,8 +321,8 @@ entry:
; CHECK: lxsibzx 34, 0, 3
; CHECK-NEXT: vspltb 2, 2, 7
; CHECK-BE-LABEL: vecucus
-; CHECK-BE: li [[OFFSET:[0-9]+]], 1
-; CHECK-BE-NEXT: lxsibzx 34, 3, [[OFFSET]]
+; CHECK-BE: addi [[OFFSET:[0-9]+]], [[OFFSET]], 1
+; CHECK-BE-NEXT: lxsibzx 34, 0, [[OFFSET]]
; CHECK-BE-NEXT: vspltb 2, 2, 7
}
@@ -385,8 +385,8 @@ entry:
; CHECK: lxsibzx 34, 0, 3
; CHECK-NEXT: vspltb 2, 2, 7
; CHECK-BE-LABEL: vecscus
-; CHECK-BE: li [[OFFSET:[0-9]+]], 1
-; CHECK-BE-NEXT: lxsibzx 34, 3, [[OFFSET]]
+; CHECK-BE: addi [[OFFSET:[0-9]+]], [[OFFSET]], 1
+; CHECK-BE-NEXT: lxsibzx 34, 0, [[OFFSET]]
; CHECK-BE-NEXT: vspltb 2, 2, 7
}
@@ -487,8 +487,8 @@ entry:
; CHECK: lxsibzx 34, 0, 3
; CHECK-NEXT: vspltb 2, 2, 7
; CHECK-BE-LABEL: vecucss
-; CHECK-BE: li [[OFFSET:[0-9]+]], 1
-; CHECK-BE-NEXT: lxsibzx 34, 3, [[OFFSET]]
+; CHECK-BE: addi [[OFFSET:[0-9]+]], [[OFFSET]], 1
+; CHECK-BE-NEXT: lxsibzx 34, 0, [[OFFSET]]
; CHECK-BE-NEXT: vspltb 2, 2, 7
}
@@ -540,8 +540,8 @@ entry:
; CHECK: lxsibzx 34, 0, 3
; CHECK-NEXT: vspltb 2, 2, 7
; CHECK-BE-LABEL: vecscss
-; CHECK-BE: li [[OFFSET:[0-9]+]], 1
-; CHECK-BE-NEXT: lxsibzx 34, 3, [[OFFSET]]
+; CHECK-BE: addi [[OFFSET:[0-9]+]], [[OFFSET]], 1
+; CHECK-BE-NEXT: lxsibzx 34, 0, [[OFFSET]]
; CHECK-BE-NEXT: vspltb 2, 2, 7
}