Skip to content

Commit b9bbfbe

Browse files
aagittorvalds
authored andcommitted
thp: memcg huge memory
Add memcg charge/uncharge to hugepage faults in huge_memory.c. Signed-off-by: Andrea Arcangeli <[email protected]> Acked-by: Rik van Riel <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 152c9cc commit b9bbfbe

File tree

1 file changed

+31
-5
lines changed

1 file changed

+31
-5
lines changed

mm/huge_memory.c

Lines changed: 31 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -233,6 +233,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
233233
VM_BUG_ON(!PageCompound(page));
234234
pgtable = pte_alloc_one(mm, haddr);
235235
if (unlikely(!pgtable)) {
236+
mem_cgroup_uncharge_page(page);
236237
put_page(page);
237238
return VM_FAULT_OOM;
238239
}
@@ -243,6 +244,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
243244
spin_lock(&mm->page_table_lock);
244245
if (unlikely(!pmd_none(*pmd))) {
245246
spin_unlock(&mm->page_table_lock);
247+
mem_cgroup_uncharge_page(page);
246248
put_page(page);
247249
pte_free(mm, pgtable);
248250
} else {
@@ -286,6 +288,10 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
286288
page = alloc_hugepage(transparent_hugepage_defrag(vma));
287289
if (unlikely(!page))
288290
goto out;
291+
if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
292+
put_page(page);
293+
goto out;
294+
}
289295

290296
return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
291297
}
@@ -402,9 +408,17 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
402408
for (i = 0; i < HPAGE_PMD_NR; i++) {
403409
pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
404410
vma, address);
405-
if (unlikely(!pages[i])) {
406-
while (--i >= 0)
411+
if (unlikely(!pages[i] ||
412+
mem_cgroup_newpage_charge(pages[i], mm,
413+
GFP_KERNEL))) {
414+
if (pages[i])
407415
put_page(pages[i]);
416+
mem_cgroup_uncharge_start();
417+
while (--i >= 0) {
418+
mem_cgroup_uncharge_page(pages[i]);
419+
put_page(pages[i]);
420+
}
421+
mem_cgroup_uncharge_end();
408422
kfree(pages);
409423
ret |= VM_FAULT_OOM;
410424
goto out;
@@ -455,8 +469,12 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
455469

456470
out_free_pages:
457471
spin_unlock(&mm->page_table_lock);
458-
for (i = 0; i < HPAGE_PMD_NR; i++)
472+
mem_cgroup_uncharge_start();
473+
for (i = 0; i < HPAGE_PMD_NR; i++) {
474+
mem_cgroup_uncharge_page(pages[i]);
459475
put_page(pages[i]);
476+
}
477+
mem_cgroup_uncharge_end();
460478
kfree(pages);
461479
goto out;
462480
}
@@ -501,14 +519,22 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
501519
goto out;
502520
}
503521

522+
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
523+
put_page(new_page);
524+
put_page(page);
525+
ret |= VM_FAULT_OOM;
526+
goto out;
527+
}
528+
504529
copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
505530
__SetPageUptodate(new_page);
506531

507532
spin_lock(&mm->page_table_lock);
508533
put_page(page);
509-
if (unlikely(!pmd_same(*pmd, orig_pmd)))
534+
if (unlikely(!pmd_same(*pmd, orig_pmd))) {
535+
mem_cgroup_uncharge_page(new_page);
510536
put_page(new_page);
511-
else {
537+
} else {
512538
pmd_t entry;
513539
VM_BUG_ON(!PageHead(page));
514540
entry = mk_pmd(new_page, vma->vm_page_prot);

0 commit comments

Comments
 (0)