@@ -1027,6 +1027,10 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page)
10271027{
10281028 struct page_cgroup * pc ;
10291029 struct mem_cgroup_per_zone * mz ;
1030+ int page_size = PAGE_SIZE ;
1031+
1032+ if (PageTransHuge (page ))
1033+ page_size <<= compound_order (page );
10301034
10311035 if (mem_cgroup_disabled ())
10321036 return NULL ;
@@ -1887,12 +1891,14 @@ static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
18871891 * oom-killer can be invoked.
18881892 */
18891893static int __mem_cgroup_try_charge (struct mm_struct * mm ,
1890- gfp_t gfp_mask , struct mem_cgroup * * memcg , bool oom )
1894+ gfp_t gfp_mask ,
1895+ struct mem_cgroup * * memcg , bool oom ,
1896+ int page_size )
18911897{
18921898 int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES ;
18931899 struct mem_cgroup * mem = NULL ;
18941900 int ret ;
1895- int csize = CHARGE_SIZE ;
1901+ int csize = max ( CHARGE_SIZE , ( unsigned long ) page_size ) ;
18961902
18971903 /*
18981904 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
@@ -1917,7 +1923,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
19171923 VM_BUG_ON (css_is_removed (& mem -> css ));
19181924 if (mem_cgroup_is_root (mem ))
19191925 goto done ;
1920- if (consume_stock (mem ))
1926+ if (page_size == PAGE_SIZE && consume_stock (mem ))
19211927 goto done ;
19221928 css_get (& mem -> css );
19231929 } else {
@@ -1940,7 +1946,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
19401946 rcu_read_unlock ();
19411947 goto done ;
19421948 }
1943- if (consume_stock (mem )) {
1949+ if (page_size == PAGE_SIZE && consume_stock (mem )) {
19441950 /*
19451951 * It seems dagerous to access memcg without css_get().
19461952 * But considering how consume_stok works, it's not
@@ -1981,7 +1987,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
19811987 case CHARGE_OK :
19821988 break ;
19831989 case CHARGE_RETRY : /* not in OOM situation but retry */
1984- csize = PAGE_SIZE ;
1990+ csize = page_size ;
19851991 css_put (& mem -> css );
19861992 mem = NULL ;
19871993 goto again ;
@@ -2002,8 +2008,8 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
20022008 }
20032009 } while (ret != CHARGE_OK );
20042010
2005- if (csize > PAGE_SIZE )
2006- refill_stock (mem , csize - PAGE_SIZE );
2011+ if (csize > page_size )
2012+ refill_stock (mem , csize - page_size );
20072013 css_put (& mem -> css );
20082014done :
20092015 * memcg = mem ;
@@ -2031,9 +2037,10 @@ static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem,
20312037 }
20322038}
20332039
2034- static void mem_cgroup_cancel_charge (struct mem_cgroup * mem )
2040+ static void mem_cgroup_cancel_charge (struct mem_cgroup * mem ,
2041+ int page_size )
20352042{
2036- __mem_cgroup_cancel_charge (mem , 1 );
2043+ __mem_cgroup_cancel_charge (mem , page_size >> PAGE_SHIFT );
20372044}
20382045
20392046/*
@@ -2089,8 +2096,9 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
20892096 */
20902097
20912098static void __mem_cgroup_commit_charge (struct mem_cgroup * mem ,
2092- struct page_cgroup * pc ,
2093- enum charge_type ctype )
2099+ struct page_cgroup * pc ,
2100+ enum charge_type ctype ,
2101+ int page_size )
20942102{
20952103 /* try_charge() can return NULL to *memcg, taking care of it. */
20962104 if (!mem )
@@ -2099,7 +2107,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
20992107 lock_page_cgroup (pc );
21002108 if (unlikely (PageCgroupUsed (pc ))) {
21012109 unlock_page_cgroup (pc );
2102- mem_cgroup_cancel_charge (mem );
2110+ mem_cgroup_cancel_charge (mem , page_size );
21032111 return ;
21042112 }
21052113
@@ -2173,7 +2181,7 @@ static void __mem_cgroup_move_account(struct page_cgroup *pc,
21732181 mem_cgroup_charge_statistics (from , pc , false);
21742182 if (uncharge )
21752183 /* This is not "cancel", but cancel_charge does all we need. */
2176- mem_cgroup_cancel_charge (from );
2184+ mem_cgroup_cancel_charge (from , PAGE_SIZE );
21772185
21782186 /* caller should have done css_get */
21792187 pc -> mem_cgroup = to ;
@@ -2234,13 +2242,14 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
22342242 goto put ;
22352243
22362244 parent = mem_cgroup_from_cont (pcg );
2237- ret = __mem_cgroup_try_charge (NULL , gfp_mask , & parent , false);
2245+ ret = __mem_cgroup_try_charge (NULL , gfp_mask , & parent , false,
2246+ PAGE_SIZE );
22382247 if (ret || !parent )
22392248 goto put_back ;
22402249
22412250 ret = mem_cgroup_move_account (pc , child , parent , true);
22422251 if (ret )
2243- mem_cgroup_cancel_charge (parent );
2252+ mem_cgroup_cancel_charge (parent , PAGE_SIZE );
22442253put_back :
22452254 putback_lru_page (page );
22462255put :
@@ -2261,18 +2270,22 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
22612270 struct mem_cgroup * mem = NULL ;
22622271 struct page_cgroup * pc ;
22632272 int ret ;
2273+ int page_size = PAGE_SIZE ;
2274+
2275+ if (PageTransHuge (page ))
2276+ page_size <<= compound_order (page );
22642277
22652278 pc = lookup_page_cgroup (page );
22662279 /* can happen at boot */
22672280 if (unlikely (!pc ))
22682281 return 0 ;
22692282 prefetchw (pc );
22702283
2271- ret = __mem_cgroup_try_charge (mm , gfp_mask , & mem , true);
2284+ ret = __mem_cgroup_try_charge (mm , gfp_mask , & mem , true, page_size );
22722285 if (ret || !mem )
22732286 return ret ;
22742287
2275- __mem_cgroup_commit_charge (mem , pc , ctype );
2288+ __mem_cgroup_commit_charge (mem , pc , ctype , page_size );
22762289 return 0 ;
22772290}
22782291
@@ -2281,8 +2294,6 @@ int mem_cgroup_newpage_charge(struct page *page,
22812294{
22822295 if (mem_cgroup_disabled ())
22832296 return 0 ;
2284- if (PageCompound (page ))
2285- return 0 ;
22862297 /*
22872298 * If already mapped, we don't have to account.
22882299 * If page cache, page->mapping has address_space.
@@ -2388,13 +2399,13 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
23882399 if (!mem )
23892400 goto charge_cur_mm ;
23902401 * ptr = mem ;
2391- ret = __mem_cgroup_try_charge (NULL , mask , ptr , true);
2402+ ret = __mem_cgroup_try_charge (NULL , mask , ptr , true, PAGE_SIZE );
23922403 css_put (& mem -> css );
23932404 return ret ;
23942405charge_cur_mm :
23952406 if (unlikely (!mm ))
23962407 mm = & init_mm ;
2397- return __mem_cgroup_try_charge (mm , mask , ptr , true);
2408+ return __mem_cgroup_try_charge (mm , mask , ptr , true, PAGE_SIZE );
23982409}
23992410
24002411static void
@@ -2410,7 +2421,7 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
24102421 cgroup_exclude_rmdir (& ptr -> css );
24112422 pc = lookup_page_cgroup (page );
24122423 mem_cgroup_lru_del_before_commit_swapcache (page );
2413- __mem_cgroup_commit_charge (ptr , pc , ctype );
2424+ __mem_cgroup_commit_charge (ptr , pc , ctype , PAGE_SIZE );
24142425 mem_cgroup_lru_add_after_commit_swapcache (page );
24152426 /*
24162427 * Now swap is on-memory. This means this page may be
@@ -2459,11 +2470,12 @@ void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
24592470 return ;
24602471 if (!mem )
24612472 return ;
2462- mem_cgroup_cancel_charge (mem );
2473+ mem_cgroup_cancel_charge (mem , PAGE_SIZE );
24632474}
24642475
24652476static void
2466- __do_uncharge (struct mem_cgroup * mem , const enum charge_type ctype )
2477+ __do_uncharge (struct mem_cgroup * mem , const enum charge_type ctype ,
2478+ int page_size )
24672479{
24682480 struct memcg_batch_info * batch = NULL ;
24692481 bool uncharge_memsw = true;
@@ -2490,6 +2502,9 @@ __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
24902502 if (!batch -> do_batch || test_thread_flag (TIF_MEMDIE ))
24912503 goto direct_uncharge ;
24922504
2505+ if (page_size != PAGE_SIZE )
2506+ goto direct_uncharge ;
2507+
24932508 /*
24942509 * In typical case, batch->memcg == mem. This means we can
24952510 * merge a series of uncharges to an uncharge of res_counter.
@@ -2503,9 +2518,9 @@ __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
25032518 batch -> memsw_bytes += PAGE_SIZE ;
25042519 return ;
25052520direct_uncharge :
2506- res_counter_uncharge (& mem -> res , PAGE_SIZE );
2521+ res_counter_uncharge (& mem -> res , page_size );
25072522 if (uncharge_memsw )
2508- res_counter_uncharge (& mem -> memsw , PAGE_SIZE );
2523+ res_counter_uncharge (& mem -> memsw , page_size );
25092524 if (unlikely (batch -> memcg != mem ))
25102525 memcg_oom_recover (mem );
25112526 return ;
@@ -2519,13 +2534,17 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
25192534{
25202535 struct page_cgroup * pc ;
25212536 struct mem_cgroup * mem = NULL ;
2537+ int page_size = PAGE_SIZE ;
25222538
25232539 if (mem_cgroup_disabled ())
25242540 return NULL ;
25252541
25262542 if (PageSwapCache (page ))
25272543 return NULL ;
25282544
2545+ if (PageTransHuge (page ))
2546+ page_size <<= compound_order (page );
2547+
25292548 /*
25302549 * Check if our page_cgroup is valid
25312550 */
@@ -2579,7 +2598,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
25792598 mem_cgroup_get (mem );
25802599 }
25812600 if (!mem_cgroup_is_root (mem ))
2582- __do_uncharge (mem , ctype );
2601+ __do_uncharge (mem , ctype , page_size );
25832602
25842603 return mem ;
25852604
@@ -2774,6 +2793,7 @@ int mem_cgroup_prepare_migration(struct page *page,
27742793 enum charge_type ctype ;
27752794 int ret = 0 ;
27762795
2796+ VM_BUG_ON (PageTransHuge (page ));
27772797 if (mem_cgroup_disabled ())
27782798 return 0 ;
27792799
@@ -2823,7 +2843,7 @@ int mem_cgroup_prepare_migration(struct page *page,
28232843 return 0 ;
28242844
28252845 * ptr = mem ;
2826- ret = __mem_cgroup_try_charge (NULL , GFP_KERNEL , ptr , false);
2846+ ret = __mem_cgroup_try_charge (NULL , GFP_KERNEL , ptr , false, PAGE_SIZE );
28272847 css_put (& mem -> css );/* drop extra refcnt */
28282848 if (ret || * ptr == NULL ) {
28292849 if (PageAnon (page )) {
@@ -2850,7 +2870,7 @@ int mem_cgroup_prepare_migration(struct page *page,
28502870 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE ;
28512871 else
28522872 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM ;
2853- __mem_cgroup_commit_charge (mem , pc , ctype );
2873+ __mem_cgroup_commit_charge (mem , pc , ctype , PAGE_SIZE );
28542874 return ret ;
28552875}
28562876
@@ -4461,7 +4481,8 @@ static int mem_cgroup_do_precharge(unsigned long count)
44614481 batch_count = PRECHARGE_COUNT_AT_ONCE ;
44624482 cond_resched ();
44634483 }
4464- ret = __mem_cgroup_try_charge (NULL , GFP_KERNEL , & mem , false);
4484+ ret = __mem_cgroup_try_charge (NULL , GFP_KERNEL , & mem , false,
4485+ PAGE_SIZE );
44654486 if (ret || !mem )
44664487 /* mem_cgroup_clear_mc() will do uncharge later */
44674488 return - ENOMEM ;
@@ -4623,6 +4644,7 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
46234644 pte_t * pte ;
46244645 spinlock_t * ptl ;
46254646
4647+ VM_BUG_ON (pmd_trans_huge (* pmd ));
46264648 pte = pte_offset_map_lock (vma -> vm_mm , pmd , addr , & ptl );
46274649 for (; addr != end ; pte ++ , addr += PAGE_SIZE )
46284650 if (is_target_pte_for_mc (vma , addr , * pte , NULL ))
@@ -4789,6 +4811,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
47894811 spinlock_t * ptl ;
47904812
47914813retry :
4814+ VM_BUG_ON (pmd_trans_huge (* pmd ));
47924815 pte = pte_offset_map_lock (vma -> vm_mm , pmd , addr , & ptl );
47934816 for (; addr != end ; addr += PAGE_SIZE ) {
47944817 pte_t ptent = * (pte ++ );
0 commit comments