Skip to content

Commit c4c84f0

Browse files
Matthew Wilcox (Oracle)akpm00
Matthew Wilcox (Oracle)
authored andcommitted
fs/proc/task_mmu: stop using linked list and highest_vm_end
Remove references to mm_struct linked list and highest_vm_end for when they are removed Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Signed-off-by: Liam R. Howlett <[email protected]> Tested-by: Yu Zhao <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: David Howells <[email protected]> Cc: Davidlohr Bueso <[email protected]> Cc: SeongJae Park <[email protected]> Cc: Sven Schnelle <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Will Deacon <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 5f14b92 commit c4c84f0

File tree

2 files changed

+42
-33
lines changed

2 files changed

+42
-33
lines changed

fs/proc/internal.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -285,7 +285,7 @@ struct proc_maps_private {
285285
struct task_struct *task;
286286
struct mm_struct *mm;
287287
#ifdef CONFIG_MMU
288-
struct vm_area_struct *tail_vma;
288+
struct vma_iterator iter;
289289
#endif
290290
#ifdef CONFIG_NUMA
291291
struct mempolicy *task_mempolicy;

fs/proc/task_mmu.c

+41-32
Original file line numberDiff line numberDiff line change
@@ -123,12 +123,26 @@ static void release_task_mempolicy(struct proc_maps_private *priv)
123123
}
124124
#endif
125125

126+
static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
127+
loff_t *ppos)
128+
{
129+
struct vm_area_struct *vma = vma_next(&priv->iter);
130+
131+
if (vma) {
132+
*ppos = vma->vm_start;
133+
} else {
134+
*ppos = -2UL;
135+
vma = get_gate_vma(priv->mm);
136+
}
137+
138+
return vma;
139+
}
140+
126141
static void *m_start(struct seq_file *m, loff_t *ppos)
127142
{
128143
struct proc_maps_private *priv = m->private;
129144
unsigned long last_addr = *ppos;
130145
struct mm_struct *mm;
131-
struct vm_area_struct *vma;
132146

133147
/* See m_next(). Zero at the start or after lseek. */
134148
if (last_addr == -1UL)
@@ -152,31 +166,21 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
152166
return ERR_PTR(-EINTR);
153167
}
154168

169+
vma_iter_init(&priv->iter, mm, last_addr);
155170
hold_task_mempolicy(priv);
156-
priv->tail_vma = get_gate_vma(mm);
157-
158-
vma = find_vma(mm, last_addr);
159-
if (vma)
160-
return vma;
171+
if (last_addr == -2UL)
172+
return get_gate_vma(mm);
161173

162-
return priv->tail_vma;
174+
return proc_get_vma(priv, ppos);
163175
}
164176

165177
static void *m_next(struct seq_file *m, void *v, loff_t *ppos)
166178
{
167-
struct proc_maps_private *priv = m->private;
168-
struct vm_area_struct *next, *vma = v;
169-
170-
if (vma == priv->tail_vma)
171-
next = NULL;
172-
else if (vma->vm_next)
173-
next = vma->vm_next;
174-
else
175-
next = priv->tail_vma;
176-
177-
*ppos = next ? next->vm_start : -1UL;
178-
179-
return next;
179+
if (*ppos == -2UL) {
180+
*ppos = -1UL;
181+
return NULL;
182+
}
183+
return proc_get_vma(m->private, ppos);
180184
}
181185

182186
static void m_stop(struct seq_file *m, void *v)
@@ -876,16 +880,16 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
876880
{
877881
struct proc_maps_private *priv = m->private;
878882
struct mem_size_stats mss;
879-
struct mm_struct *mm;
883+
struct mm_struct *mm = priv->mm;
880884
struct vm_area_struct *vma;
881-
unsigned long last_vma_end = 0;
885+
unsigned long vma_start = 0, last_vma_end = 0;
882886
int ret = 0;
887+
MA_STATE(mas, &mm->mm_mt, 0, 0);
883888

884889
priv->task = get_proc_task(priv->inode);
885890
if (!priv->task)
886891
return -ESRCH;
887892

888-
mm = priv->mm;
889893
if (!mm || !mmget_not_zero(mm)) {
890894
ret = -ESRCH;
891895
goto out_put_task;
@@ -898,8 +902,13 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
898902
goto out_put_mm;
899903

900904
hold_task_mempolicy(priv);
905+
vma = mas_find(&mas, 0);
906+
907+
if (unlikely(!vma))
908+
goto empty_set;
901909

902-
for (vma = priv->mm->mmap; vma;) {
910+
vma_start = vma->vm_start;
911+
do {
903912
smap_gather_stats(vma, &mss, 0);
904913
last_vma_end = vma->vm_end;
905914

@@ -908,6 +917,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
908917
* access it for write request.
909918
*/
910919
if (mmap_lock_is_contended(mm)) {
920+
mas_pause(&mas);
911921
mmap_read_unlock(mm);
912922
ret = mmap_read_lock_killable(mm);
913923
if (ret) {
@@ -951,7 +961,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
951961
* contains last_vma_end.
952962
* Iterate VMA' from last_vma_end.
953963
*/
954-
vma = find_vma(mm, last_vma_end - 1);
964+
vma = mas_find(&mas, ULONG_MAX);
955965
/* Case 3 above */
956966
if (!vma)
957967
break;
@@ -965,11 +975,10 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
965975
smap_gather_stats(vma, &mss, last_vma_end);
966976
}
967977
/* Case 2 above */
968-
vma = vma->vm_next;
969-
}
978+
} while ((vma = mas_find(&mas, ULONG_MAX)) != NULL);
970979

971-
show_vma_header_prefix(m, priv->mm->mmap->vm_start,
972-
last_vma_end, 0, 0, 0, 0);
980+
empty_set:
981+
show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0);
973982
seq_pad(m, ' ');
974983
seq_puts(m, "[rollup]\n");
975984

@@ -1262,6 +1271,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
12621271
return -ESRCH;
12631272
mm = get_task_mm(task);
12641273
if (mm) {
1274+
MA_STATE(mas, &mm->mm_mt, 0, 0);
12651275
struct mmu_notifier_range range;
12661276
struct clear_refs_private cp = {
12671277
.type = type,
@@ -1281,7 +1291,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
12811291
}
12821292

12831293
if (type == CLEAR_REFS_SOFT_DIRTY) {
1284-
for (vma = mm->mmap; vma; vma = vma->vm_next) {
1294+
mas_for_each(&mas, vma, ULONG_MAX) {
12851295
if (!(vma->vm_flags & VM_SOFTDIRTY))
12861296
continue;
12871297
vma->vm_flags &= ~VM_SOFTDIRTY;
@@ -1293,8 +1303,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
12931303
0, NULL, mm, 0, -1UL);
12941304
mmu_notifier_invalidate_range_start(&range);
12951305
}
1296-
walk_page_range(mm, 0, mm->highest_vm_end, &clear_refs_walk_ops,
1297-
&cp);
1306+
walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp);
12981307
if (type == CLEAR_REFS_SOFT_DIRTY) {
12991308
mmu_notifier_invalidate_range_end(&range);
13001309
flush_tlb_mm(mm);

0 commit comments

Comments
 (0)