11
11
; a[i] *= x;
12
12
; }
13
13
; }
14
+ ;
15
+ ; FIXME: Currently unrolling is not applied. This is because the new Loop ID
16
+ ; created after vectorization does not directly contain unroll metadata.
17
+ ; Unexpected nests have been created.
14
18
define void @f (ptr noundef captures(none) %a , float noundef %x ) {
15
19
; CHECK-LABEL: define void @f(
16
20
; CHECK-SAME: ptr noundef captures(none) [[A:%.*]], float noundef [[X:%.*]]) {
@@ -21,47 +25,14 @@ define void @f(ptr noundef captures(none) %a, float noundef %x) {
21
25
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
22
26
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
23
27
; CHECK: [[VECTOR_BODY]]:
24
- ; CHECK-NEXT: [[INDEX_NEXT_6:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
28
+ ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
29
+ ; CHECK-NEXT: [[INDEX_NEXT_6:%.*]] = add i64 [[INDEX]], 0
25
30
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX_NEXT_6]]
26
- ; CHECK-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x float>, ptr [[TMP14]], align 4
31
+ ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[TMP14]], i32 0
32
+ ; CHECK-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
27
33
; CHECK-NEXT: [[TMP15:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD_7]]
28
- ; CHECK-NEXT: store <4 x float> [[TMP15]], ptr [[TMP14]], align 4
29
- ; CHECK-NEXT: [[INDEX_NEXT1:%.*]] = add nuw nsw i64 [[INDEX_NEXT_6]], 4
30
- ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX_NEXT1]]
31
- ; CHECK-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
32
- ; CHECK-NEXT: [[TMP3:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD_1]]
33
- ; CHECK-NEXT: store <4 x float> [[TMP3]], ptr [[TMP2]], align 4
34
- ; CHECK-NEXT: [[INDEX_NEXT_1:%.*]] = add nuw nsw i64 [[INDEX_NEXT_6]], 8
35
- ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX_NEXT_1]]
36
- ; CHECK-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x float>, ptr [[TMP16]], align 4
37
- ; CHECK-NEXT: [[TMP5:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD_2]]
38
- ; CHECK-NEXT: store <4 x float> [[TMP5]], ptr [[TMP16]], align 4
39
- ; CHECK-NEXT: [[INDEX_NEXT_2:%.*]] = add nuw nsw i64 [[INDEX_NEXT_6]], 12
40
- ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX_NEXT_2]]
41
- ; CHECK-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x float>, ptr [[TMP6]], align 4
42
- ; CHECK-NEXT: [[TMP7:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD_3]]
43
- ; CHECK-NEXT: store <4 x float> [[TMP7]], ptr [[TMP6]], align 4
44
- ; CHECK-NEXT: [[INDEX_NEXT_3:%.*]] = add nuw nsw i64 [[INDEX_NEXT_6]], 16
45
- ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX_NEXT_3]]
46
- ; CHECK-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x float>, ptr [[TMP8]], align 4
47
- ; CHECK-NEXT: [[TMP9:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD_4]]
48
- ; CHECK-NEXT: store <4 x float> [[TMP9]], ptr [[TMP8]], align 4
49
- ; CHECK-NEXT: [[INDEX_NEXT_4:%.*]] = add nuw nsw i64 [[INDEX_NEXT_6]], 20
50
- ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX_NEXT_4]]
51
- ; CHECK-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x float>, ptr [[TMP10]], align 4
52
- ; CHECK-NEXT: [[TMP11:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD_5]]
53
- ; CHECK-NEXT: store <4 x float> [[TMP11]], ptr [[TMP10]], align 4
54
- ; CHECK-NEXT: [[INDEX_NEXT_5:%.*]] = add nuw nsw i64 [[INDEX_NEXT_6]], 24
55
- ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX_NEXT_5]]
56
- ; CHECK-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x float>, ptr [[TMP12]], align 4
57
- ; CHECK-NEXT: [[TMP13:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD_6]]
58
- ; CHECK-NEXT: store <4 x float> [[TMP13]], ptr [[TMP12]], align 4
59
- ; CHECK-NEXT: [[INDEX_NEXT_7:%.*]] = add nuw nsw i64 [[INDEX_NEXT_6]], 28
60
- ; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX_NEXT_7]]
61
- ; CHECK-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x float>, ptr [[TMP17]], align 4
62
- ; CHECK-NEXT: [[TMP18:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD_8]]
63
- ; CHECK-NEXT: store <4 x float> [[TMP18]], ptr [[TMP17]], align 4
64
- ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw nsw i64 [[INDEX_NEXT_6]], 32
34
+ ; CHECK-NEXT: store <4 x float> [[TMP15]], ptr [[TMP2]], align 4
35
+ ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
65
36
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
66
37
; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
67
38
; CHECK: [[MIDDLE_BLOCK]]:
@@ -70,49 +41,14 @@ define void @f(ptr noundef captures(none) %a, float noundef %x) {
70
41
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1024, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
71
42
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
72
43
; CHECK: [[FOR_BODY]]:
73
- ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT_7 :%.*]], %[[FOR_BODY]] ]
44
+ ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT :%.*]], %[[FOR_BODY]] ]
74
45
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV]]
75
46
; CHECK-NEXT: [[LOAD:%.*]] = load float, ptr [[ARRAYIDX]], align 4
76
47
; CHECK-NEXT: [[MUL:%.*]] = fmul float [[X]], [[LOAD]]
77
48
; CHECK-NEXT: store float [[MUL]], ptr [[ARRAYIDX]], align 4
78
- ; CHECK-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1
79
- ; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV_NEXT]]
80
- ; CHECK-NEXT: [[LOAD_1:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
81
- ; CHECK-NEXT: [[MUL_1:%.*]] = fmul float [[X]], [[LOAD_1]]
82
- ; CHECK-NEXT: store float [[MUL_1]], ptr [[ARRAYIDX_1]], align 4
83
- ; CHECK-NEXT: [[IV_NEXT_1:%.*]] = add nuw nsw i64 [[IV]], 2
84
- ; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV_NEXT_1]]
85
- ; CHECK-NEXT: [[LOAD_2:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
86
- ; CHECK-NEXT: [[MUL_2:%.*]] = fmul float [[X]], [[LOAD_2]]
87
- ; CHECK-NEXT: store float [[MUL_2]], ptr [[ARRAYIDX_2]], align 4
88
- ; CHECK-NEXT: [[IV_NEXT_2:%.*]] = add nuw nsw i64 [[IV]], 3
89
- ; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV_NEXT_2]]
90
- ; CHECK-NEXT: [[LOAD_3:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
91
- ; CHECK-NEXT: [[MUL_3:%.*]] = fmul float [[X]], [[LOAD_3]]
92
- ; CHECK-NEXT: store float [[MUL_3]], ptr [[ARRAYIDX_3]], align 4
93
- ; CHECK-NEXT: [[IV_NEXT_3:%.*]] = add nuw nsw i64 [[IV]], 4
94
- ; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV_NEXT_3]]
95
- ; CHECK-NEXT: [[LOAD_4:%.*]] = load float, ptr [[ARRAYIDX_4]], align 4
96
- ; CHECK-NEXT: [[MUL_4:%.*]] = fmul float [[X]], [[LOAD_4]]
97
- ; CHECK-NEXT: store float [[MUL_4]], ptr [[ARRAYIDX_4]], align 4
98
- ; CHECK-NEXT: [[IV_NEXT_4:%.*]] = add nuw nsw i64 [[IV]], 5
99
- ; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV_NEXT_4]]
100
- ; CHECK-NEXT: [[LOAD_5:%.*]] = load float, ptr [[ARRAYIDX_5]], align 4
101
- ; CHECK-NEXT: [[MUL_5:%.*]] = fmul float [[X]], [[LOAD_5]]
102
- ; CHECK-NEXT: store float [[MUL_5]], ptr [[ARRAYIDX_5]], align 4
103
- ; CHECK-NEXT: [[IV_NEXT_5:%.*]] = add nuw nsw i64 [[IV]], 6
104
- ; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV_NEXT_5]]
105
- ; CHECK-NEXT: [[LOAD_6:%.*]] = load float, ptr [[ARRAYIDX_6]], align 4
106
- ; CHECK-NEXT: [[MUL_6:%.*]] = fmul float [[X]], [[LOAD_6]]
107
- ; CHECK-NEXT: store float [[MUL_6]], ptr [[ARRAYIDX_6]], align 4
108
- ; CHECK-NEXT: [[IV_NEXT_6:%.*]] = add nuw nsw i64 [[IV]], 7
109
- ; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV_NEXT_6]]
110
- ; CHECK-NEXT: [[LOAD_7:%.*]] = load float, ptr [[ARRAYIDX_7]], align 4
111
- ; CHECK-NEXT: [[MUL_7:%.*]] = fmul float [[X]], [[LOAD_7]]
112
- ; CHECK-NEXT: store float [[MUL_7]], ptr [[ARRAYIDX_7]], align 4
113
- ; CHECK-NEXT: [[IV_NEXT_7]] = add nuw nsw i64 [[IV]], 8
114
- ; CHECK-NEXT: [[COMP_7:%.*]] = icmp eq i64 [[IV_NEXT_7]], 1024
115
- ; CHECK-NEXT: br i1 [[COMP_7]], label %[[EXIT_LOOPEXIT:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
49
+ ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
50
+ ; CHECK-NEXT: [[COMP:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
51
+ ; CHECK-NEXT: br i1 [[COMP]], label %[[EXIT_LOOPEXIT:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
116
52
; CHECK: [[EXIT_LOOPEXIT]]:
117
53
; CHECK-NEXT: br label %[[EXIT]]
118
54
; CHECK: [[EXIT]]:
@@ -142,8 +78,10 @@ exit:
142
78
!4 = !{!"llvm.loop.isvectorized" }
143
79
!5 = !{!"llvm.loop.unroll.count" , i32 8 }
144
80
;.
145
- ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
146
- ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized"}
147
- ; CHECK: [[META2]] = !{!"llvm.loop.unroll.disable"}
148
- ; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]}
81
+ ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META4:![0-9]+]]}
82
+ ; CHECK: [[META1]] = distinct !{[[META1]], [[META2:![0-9]+]], [[META3:![0-9]+]]}
83
+ ; CHECK: [[META2]] = !{!"llvm.loop.isvectorized"}
84
+ ; CHECK: [[META3]] = !{!"llvm.loop.unroll.count", i32 8}
85
+ ; CHECK: [[META4]] = !{!"llvm.loop.unroll.runtime.disable"}
86
+ ; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]]}
149
87
;.
0 commit comments