@@ -74,7 +74,13 @@ atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
74
74
75
75
static bool hw_memory_failure __read_mostly = false;
76
76
77
- static bool __page_handle_poison (struct page * page )
77
+ /*
78
+ * Return values:
79
+ * 1: the page is dissolved (if needed) and taken off from buddy,
80
+ * 0: the page is dissolved (if needed) and not taken off from buddy,
81
+ * < 0: failed to dissolve.
82
+ */
83
+ static int __page_handle_poison (struct page * page )
78
84
{
79
85
int ret ;
80
86
@@ -84,7 +90,7 @@ static bool __page_handle_poison(struct page *page)
84
90
ret = take_page_off_buddy (page );
85
91
zone_pcp_enable (page_zone (page ));
86
92
87
- return ret > 0 ;
93
+ return ret ;
88
94
}
89
95
90
96
static bool page_handle_poison (struct page * page , bool hugepage_or_freepage , bool release )
@@ -94,7 +100,7 @@ static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, boo
94
100
* Doing this check for free pages is also fine since dissolve_free_huge_page
95
101
* returns 0 for non-hugetlb pages as well.
96
102
*/
97
- if (! __page_handle_poison (page ))
103
+ if (__page_handle_poison (page ) <= 0 )
98
104
/*
99
105
* We could fail to take off the target page from buddy
100
106
* for example due to racy page allocation, but that's
@@ -1086,7 +1092,7 @@ static int me_huge_page(struct page_state *ps, struct page *p)
1086
1092
* subpages.
1087
1093
*/
1088
1094
put_page (hpage );
1089
- if (__page_handle_poison (p )) {
1095
+ if (__page_handle_poison (p ) > 0 ) {
1090
1096
page_ref_inc (p );
1091
1097
res = MF_RECOVERED ;
1092
1098
}
@@ -1867,7 +1873,7 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb
1867
1873
if (res == 0 ) {
1868
1874
unlock_page (head );
1869
1875
res = MF_FAILED ;
1870
- if (__page_handle_poison (p )) {
1876
+ if (__page_handle_poison (p ) > 0 ) {
1871
1877
page_ref_inc (p );
1872
1878
res = MF_RECOVERED ;
1873
1879
}
0 commit comments