diff --git a/include/linux/mm.h b/include/linux/mm.h index 3fb49aec13fd4e6b39247d8ee7ffe092b405d1ed..9cc02a7e503b47b0338b03927f3441c8b3992716 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -892,11 +892,7 @@ static inline void set_compound_page_dtor(struct page *page, page[1].compound_dtor = compound_dtor; } -static inline void destroy_compound_page(struct page *page) -{ - VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page); - compound_page_dtors[page[1].compound_dtor](page); -} +void destroy_large_folio(struct folio *folio); static inline int head_compound_pincount(struct page *head) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 248469134962c320b42ad6ae3e492d7e92f433b2..52fd92b2c1fe3f83bb26b3b27416aca123f4556f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -744,6 +744,14 @@ void prep_compound_page(struct page *page, unsigned int order) prep_compound_head(page, order); } +void destroy_large_folio(struct folio *folio) +{ + enum compound_dtor_id dtor = folio_page(folio, 1)->compound_dtor; + + VM_BUG_ON_FOLIO(dtor >= NR_COMPOUND_DTORS, folio); + compound_page_dtors[dtor](&folio->page); +} + #ifdef CONFIG_DEBUG_PAGEALLOC unsigned int _debug_guardpage_minorder; diff --git a/mm/swap.c b/mm/swap.c index 5f6caa6515992745ac3c83dcb1b134cf84d92fef..1f563d857768587810e6000bbb73749bc4c834c2 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -115,7 +115,7 @@ static void __folio_put_large(struct folio *folio) */ if (!folio_test_hugetlb(folio)) __page_cache_release(folio); - destroy_compound_page(&folio->page); + destroy_large_folio(folio); } void __folio_put(struct folio *folio) diff --git a/mm/vmscan.c b/mm/vmscan.c index e7d3db64a4e08bb730ca88dbbff4a5b8cda79905..e660d7205f47323593d3d5659a7606ec5200bbcc 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1979,7 +1979,7 @@ static unsigned int shrink_page_list(struct list_head *page_list, * appear not as the counts should be low */ if (unlikely(folio_test_large(folio))) - destroy_compound_page(&folio->page); + destroy_large_folio(folio); else list_add(&folio->lru, &free_pages); continue; @@ -2348,7 +2348,7 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec, if (unlikely(folio_test_large(folio))) { spin_unlock_irq(&lruvec->lru_lock); - destroy_compound_page(&folio->page); + destroy_large_folio(folio); spin_lock_irq(&lruvec->lru_lock); } else list_add(&folio->lru, &folios_to_free);