@@ -5005,7 +5005,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_offset(ptr %out, i64 %in, i64 %old
5005
5005
; GFX12-NEXT: s_endpgm
5006
5006
entry:
5007
5007
%gep = getelementptr i64 , ptr %out , i64 4
5008
- %val = cmpxchg volatile ptr %gep , i64 %old , i64 %in syncscope("agent" ) seq_cst seq_cst
5008
+ %val = cmpxchg volatile ptr %gep , i64 %old , i64 %in syncscope("agent" ) seq_cst seq_cst , !noalias.addrspace !0
5009
5009
ret void
5010
5010
}
5011
5011
@@ -5061,7 +5061,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_soffset(ptr %out, i64 %in, i64 %ol
5061
5061
; GFX12-NEXT: s_endpgm
5062
5062
entry:
5063
5063
%gep = getelementptr i64 , ptr %out , i64 9000
5064
- %val = cmpxchg volatile ptr %gep , i64 %old , i64 %in syncscope("agent" ) seq_cst seq_cst
5064
+ %val = cmpxchg volatile ptr %gep , i64 %old , i64 %in syncscope("agent" ) seq_cst seq_cst , !noalias.addrspace !0
5065
5065
ret void
5066
5066
}
5067
5067
@@ -5121,7 +5121,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_offset(ptr %out, ptr %out2, i6
5121
5121
; GFX12-NEXT: s_endpgm
5122
5122
entry:
5123
5123
%gep = getelementptr i64 , ptr %out , i64 4
5124
- %val = cmpxchg volatile ptr %gep , i64 %old , i64 %in syncscope("agent" ) seq_cst seq_cst
5124
+ %val = cmpxchg volatile ptr %gep , i64 %old , i64 %in syncscope("agent" ) seq_cst seq_cst , !noalias.addrspace !0
5125
5125
%extract0 = extractvalue { i64 , i1 } %val , 0
5126
5126
store i64 %extract0 , ptr %out2
5127
5127
ret void
@@ -5184,7 +5184,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_addr64_offset(ptr %out, i64 %in, i
5184
5184
entry:
5185
5185
%ptr = getelementptr i64 , ptr %out , i64 %index
5186
5186
%gep = getelementptr i64 , ptr %ptr , i64 4
5187
- %val = cmpxchg volatile ptr %gep , i64 %old , i64 %in syncscope("agent" ) seq_cst seq_cst
5187
+ %val = cmpxchg volatile ptr %gep , i64 %old , i64 %in syncscope("agent" ) seq_cst seq_cst , !noalias.addrspace !0
5188
5188
ret void
5189
5189
}
5190
5190
@@ -5257,7 +5257,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64_offset(ptr %out, ptr %o
5257
5257
entry:
5258
5258
%ptr = getelementptr i64 , ptr %out , i64 %index
5259
5259
%gep = getelementptr i64 , ptr %ptr , i64 4
5260
- %val = cmpxchg volatile ptr %gep , i64 %old , i64 %in syncscope("agent" ) seq_cst seq_cst
5260
+ %val = cmpxchg volatile ptr %gep , i64 %old , i64 %in syncscope("agent" ) seq_cst seq_cst , !noalias.addrspace !0
5261
5261
%extract0 = extractvalue { i64 , i1 } %val , 0
5262
5262
store i64 %extract0 , ptr %out2
5263
5263
ret void
@@ -5310,7 +5310,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64(ptr %out, i64 %in, i64 %old) {
5310
5310
; GFX12-NEXT: global_inv scope:SCOPE_DEV
5311
5311
; GFX12-NEXT: s_endpgm
5312
5312
entry:
5313
- %val = cmpxchg volatile ptr %out , i64 %old , i64 %in syncscope("agent" ) seq_cst seq_cst
5313
+ %val = cmpxchg volatile ptr %out , i64 %old , i64 %in syncscope("agent" ) seq_cst seq_cst , !noalias.addrspace !0
5314
5314
ret void
5315
5315
}
5316
5316
@@ -5365,7 +5365,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret(ptr %out, ptr %out2, i64 %in,
5365
5365
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
5366
5366
; GFX12-NEXT: s_endpgm
5367
5367
entry:
5368
- %val = cmpxchg volatile ptr %out , i64 %old , i64 %in syncscope("agent" ) seq_cst seq_cst
5368
+ %val = cmpxchg volatile ptr %out , i64 %old , i64 %in syncscope("agent" ) seq_cst seq_cst , !noalias.addrspace !0
5369
5369
%extract0 = extractvalue { i64 , i1 } %val , 0
5370
5370
store i64 %extract0 , ptr %out2
5371
5371
ret void
@@ -5423,7 +5423,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_addr64(ptr %out, i64 %in, i64 %ind
5423
5423
; GFX12-NEXT: s_endpgm
5424
5424
entry:
5425
5425
%ptr = getelementptr i64 , ptr %out , i64 %index
5426
- %val = cmpxchg volatile ptr %ptr , i64 %old , i64 %in syncscope("agent" ) seq_cst seq_cst
5426
+ %val = cmpxchg volatile ptr %ptr , i64 %old , i64 %in syncscope("agent" ) seq_cst seq_cst , !noalias.addrspace !0
5427
5427
ret void
5428
5428
}
5429
5429
@@ -5491,7 +5491,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64(ptr %out, ptr %out2, i6
5491
5491
; GFX12-NEXT: s_endpgm
5492
5492
entry:
5493
5493
%ptr = getelementptr i64 , ptr %out , i64 %index
5494
- %val = cmpxchg volatile ptr %ptr , i64 %old , i64 %in syncscope("agent" ) seq_cst seq_cst
5494
+ %val = cmpxchg volatile ptr %ptr , i64 %old , i64 %in syncscope("agent" ) seq_cst seq_cst , !noalias.addrspace !0
5495
5495
%extract0 = extractvalue { i64 , i1 } %val , 0
5496
5496
store i64 %extract0 , ptr %out2
5497
5497
ret void
@@ -5543,7 +5543,7 @@ define amdgpu_kernel void @atomic_load_f64_offset(ptr %in, ptr %out) {
5543
5543
; GFX12-NEXT: s_endpgm
5544
5544
entry:
5545
5545
%gep = getelementptr double , ptr %in , i64 4
5546
- %val = load atomic double , ptr %gep seq_cst , align 8
5546
+ %val = load atomic double , ptr %gep seq_cst , align 8 , !noalias.addrspace !0
5547
5547
store double %val , ptr %out
5548
5548
ret void
5549
5549
}
@@ -5589,7 +5589,7 @@ define amdgpu_kernel void @atomic_load_f64(ptr %in, ptr %out) {
5589
5589
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
5590
5590
; GFX12-NEXT: s_endpgm
5591
5591
entry:
5592
- %val = load atomic double , ptr %in syncscope("agent" ) seq_cst , align 8
5592
+ %val = load atomic double , ptr %in syncscope("agent" ) seq_cst , align 8 , !noalias.addrspace !0
5593
5593
store double %val , ptr %out
5594
5594
ret void
5595
5595
}
@@ -5654,7 +5654,7 @@ define amdgpu_kernel void @atomic_load_f64_addr64_offset(ptr %in, ptr %out, i64
5654
5654
entry:
5655
5655
%ptr = getelementptr double , ptr %in , i64 %index
5656
5656
%gep = getelementptr double , ptr %ptr , i64 4
5657
- %val = load atomic double , ptr %gep seq_cst , align 8
5657
+ %val = load atomic double , ptr %gep seq_cst , align 8 , !noalias.addrspace !0
5658
5658
store double %val , ptr %out
5659
5659
ret void
5660
5660
}
@@ -5714,7 +5714,7 @@ define amdgpu_kernel void @atomic_load_f64_addr64(ptr %in, ptr %out, i64 %index)
5714
5714
; GFX12-NEXT: s_endpgm
5715
5715
entry:
5716
5716
%ptr = getelementptr double , ptr %in , i64 %index
5717
- %val = load atomic double , ptr %ptr seq_cst , align 8
5717
+ %val = load atomic double , ptr %ptr seq_cst , align 8 , !noalias.addrspace !0
5718
5718
store double %val , ptr %out
5719
5719
ret void
5720
5720
}
@@ -5757,7 +5757,7 @@ define amdgpu_kernel void @atomic_store_f64_offset(double %in, ptr %out) {
5757
5757
; GFX12-NEXT: s_endpgm
5758
5758
entry:
5759
5759
%gep = getelementptr double , ptr %out , i64 4
5760
- store atomic double %in , ptr %gep seq_cst , align 8
5760
+ store atomic double %in , ptr %gep seq_cst , align 8 , !noalias.addrspace !0
5761
5761
ret void
5762
5762
}
5763
5763
@@ -5794,7 +5794,7 @@ define amdgpu_kernel void @atomic_store_f64(double %in, ptr %out) {
5794
5794
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] scope:SCOPE_SYS
5795
5795
; GFX12-NEXT: s_endpgm
5796
5796
entry:
5797
- store atomic double %in , ptr %out seq_cst , align 8
5797
+ store atomic double %in , ptr %out seq_cst , align 8 , !noalias.addrspace !0
5798
5798
ret void
5799
5799
}
5800
5800
@@ -5850,7 +5850,7 @@ define amdgpu_kernel void @atomic_store_f64_addr64_offset(double %in, ptr %out,
5850
5850
entry:
5851
5851
%ptr = getelementptr double , ptr %out , i64 %index
5852
5852
%gep = getelementptr double , ptr %ptr , i64 4
5853
- store atomic double %in , ptr %gep seq_cst , align 8
5853
+ store atomic double %in , ptr %gep seq_cst , align 8 , !noalias.addrspace !0
5854
5854
ret void
5855
5855
}
5856
5856
@@ -5901,7 +5901,7 @@ define amdgpu_kernel void @atomic_store_f64_addr64(double %in, ptr %out, i64 %in
5901
5901
; GFX12-NEXT: s_endpgm
5902
5902
entry:
5903
5903
%ptr = getelementptr double , ptr %out , i64 %index
5904
- store atomic double %in , ptr %ptr seq_cst , align 8
5904
+ store atomic double %in , ptr %ptr seq_cst , align 8 , !noalias.addrspace !0
5905
5905
ret void
5906
5906
}
5907
5907
0 commit comments