@@ -123,12 +123,26 @@ static void release_task_mempolicy(struct proc_maps_private *priv)
123
123
}
124
124
#endif
125
125
126
+ static struct vm_area_struct * proc_get_vma (struct proc_maps_private * priv ,
127
+ loff_t * ppos )
128
+ {
129
+ struct vm_area_struct * vma = vma_next (& priv -> iter );
130
+
131
+ if (vma ) {
132
+ * ppos = vma -> vm_start ;
133
+ } else {
134
+ * ppos = -2UL ;
135
+ vma = get_gate_vma (priv -> mm );
136
+ }
137
+
138
+ return vma ;
139
+ }
140
+
126
141
static void * m_start (struct seq_file * m , loff_t * ppos )
127
142
{
128
143
struct proc_maps_private * priv = m -> private ;
129
144
unsigned long last_addr = * ppos ;
130
145
struct mm_struct * mm ;
131
- struct vm_area_struct * vma ;
132
146
133
147
/* See m_next(). Zero at the start or after lseek. */
134
148
if (last_addr == -1UL )
@@ -152,31 +166,21 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
152
166
return ERR_PTR (- EINTR );
153
167
}
154
168
169
+ vma_iter_init (& priv -> iter , mm , last_addr );
155
170
hold_task_mempolicy (priv );
156
- priv -> tail_vma = get_gate_vma (mm );
157
-
158
- vma = find_vma (mm , last_addr );
159
- if (vma )
160
- return vma ;
171
+ if (last_addr == -2UL )
172
+ return get_gate_vma (mm );
161
173
162
- return priv -> tail_vma ;
174
+ return proc_get_vma ( priv , ppos ) ;
163
175
}
164
176
165
177
static void * m_next (struct seq_file * m , void * v , loff_t * ppos )
166
178
{
167
- struct proc_maps_private * priv = m -> private ;
168
- struct vm_area_struct * next , * vma = v ;
169
-
170
- if (vma == priv -> tail_vma )
171
- next = NULL ;
172
- else if (vma -> vm_next )
173
- next = vma -> vm_next ;
174
- else
175
- next = priv -> tail_vma ;
176
-
177
- * ppos = next ? next -> vm_start : -1UL ;
178
-
179
- return next ;
179
+ if (* ppos == -2UL ) {
180
+ * ppos = -1UL ;
181
+ return NULL ;
182
+ }
183
+ return proc_get_vma (m -> private , ppos );
180
184
}
181
185
182
186
static void m_stop (struct seq_file * m , void * v )
@@ -876,16 +880,16 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
876
880
{
877
881
struct proc_maps_private * priv = m -> private ;
878
882
struct mem_size_stats mss ;
879
- struct mm_struct * mm ;
883
+ struct mm_struct * mm = priv -> mm ;
880
884
struct vm_area_struct * vma ;
881
- unsigned long last_vma_end = 0 ;
885
+ unsigned long vma_start = 0 , last_vma_end = 0 ;
882
886
int ret = 0 ;
887
+ MA_STATE (mas , & mm -> mm_mt , 0 , 0 );
883
888
884
889
priv -> task = get_proc_task (priv -> inode );
885
890
if (!priv -> task )
886
891
return - ESRCH ;
887
892
888
- mm = priv -> mm ;
889
893
if (!mm || !mmget_not_zero (mm )) {
890
894
ret = - ESRCH ;
891
895
goto out_put_task ;
@@ -898,8 +902,13 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
898
902
goto out_put_mm ;
899
903
900
904
hold_task_mempolicy (priv );
905
+ vma = mas_find (& mas , 0 );
906
+
907
+ if (unlikely (!vma ))
908
+ goto empty_set ;
901
909
902
- for (vma = priv -> mm -> mmap ; vma ;) {
910
+ vma_start = vma -> vm_start ;
911
+ do {
903
912
smap_gather_stats (vma , & mss , 0 );
904
913
last_vma_end = vma -> vm_end ;
905
914
@@ -908,6 +917,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
908
917
* access it for write request.
909
918
*/
910
919
if (mmap_lock_is_contended (mm )) {
920
+ mas_pause (& mas );
911
921
mmap_read_unlock (mm );
912
922
ret = mmap_read_lock_killable (mm );
913
923
if (ret ) {
@@ -951,7 +961,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
951
961
* contains last_vma_end.
952
962
* Iterate VMA' from last_vma_end.
953
963
*/
954
- vma = find_vma ( mm , last_vma_end - 1 );
964
+ vma = mas_find ( & mas , ULONG_MAX );
955
965
/* Case 3 above */
956
966
if (!vma )
957
967
break ;
@@ -965,11 +975,10 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
965
975
smap_gather_stats (vma , & mss , last_vma_end );
966
976
}
967
977
/* Case 2 above */
968
- vma = vma -> vm_next ;
969
- }
978
+ } while ((vma = mas_find (& mas , ULONG_MAX )) != NULL );
970
979
971
- show_vma_header_prefix ( m , priv -> mm -> mmap -> vm_start ,
972
- last_vma_end , 0 , 0 , 0 , 0 );
980
+ empty_set :
981
+ show_vma_header_prefix ( m , vma_start , last_vma_end , 0 , 0 , 0 , 0 );
973
982
seq_pad (m , ' ' );
974
983
seq_puts (m , "[rollup]\n" );
975
984
@@ -1262,6 +1271,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1262
1271
return - ESRCH ;
1263
1272
mm = get_task_mm (task );
1264
1273
if (mm ) {
1274
+ MA_STATE (mas , & mm -> mm_mt , 0 , 0 );
1265
1275
struct mmu_notifier_range range ;
1266
1276
struct clear_refs_private cp = {
1267
1277
.type = type ,
@@ -1281,7 +1291,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1281
1291
}
1282
1292
1283
1293
if (type == CLEAR_REFS_SOFT_DIRTY ) {
1284
- for ( vma = mm -> mmap ; vma ; vma = vma -> vm_next ) {
1294
+ mas_for_each ( & mas , vma , ULONG_MAX ) {
1285
1295
if (!(vma -> vm_flags & VM_SOFTDIRTY ))
1286
1296
continue ;
1287
1297
vma -> vm_flags &= ~VM_SOFTDIRTY ;
@@ -1293,8 +1303,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1293
1303
0 , NULL , mm , 0 , -1UL );
1294
1304
mmu_notifier_invalidate_range_start (& range );
1295
1305
}
1296
- walk_page_range (mm , 0 , mm -> highest_vm_end , & clear_refs_walk_ops ,
1297
- & cp );
1306
+ walk_page_range (mm , 0 , -1 , & clear_refs_walk_ops , & cp );
1298
1307
if (type == CLEAR_REFS_SOFT_DIRTY ) {
1299
1308
mmu_notifier_invalidate_range_end (& range );
1300
1309
flush_tlb_mm (mm );
0 commit comments