diff --git a/mm/gup.c b/mm/gup.c index ce217ce2a584d1e85f144c50380e99a77d1cbb7b..c64e0c22ceb2b5c80fa41178275304e310600bba 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1981,7 +1981,7 @@ static unsigned long collect_longterm_unpinnable_pages( continue; } - if (drain_allow) { + if (!folio_test_lru(folio) && drain_allow) { lru_add_drain_all(); drain_allow = false; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ef8a375377aec4693ccfdaca712d528881d0568d..d438beb856b2b57073af3a4b7d1c2cc8cc899fe7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2801,20 +2801,10 @@ struct page *rmqueue(struct zone *preferred_zone, WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); if (likely(pcp_allowed_order(order))) { -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA || - order != pageblock_order) { - page = rmqueue_pcplist(preferred_zone, zone, order, - migratetype, alloc_flags); - if (likely(page)) - goto out; - } -#else page = rmqueue_pcplist(preferred_zone, zone, order, migratetype, alloc_flags); if (likely(page)) goto out; -#endif } page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, diff --git a/mm/swap.c b/mm/swap.c index e5380d732c0dfbbb1bf684e3f3052b7891565289..24d0cdef6694a465b14ef70782c9ee3866c4b176 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -209,10 +209,6 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn) for (i = 0; i < folio_batch_count(fbatch); i++) { struct folio *folio = fbatch->folios[i]; - /* block memcg migration while the folio moves between lru */ - if (move_fn != lru_add_fn && !folio_test_clear_lru(folio)) - continue; - lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags); move_fn(lruvec, folio); @@ -254,11 +250,16 @@ static void lru_move_tail_fn(struct lruvec *lruvec, struct folio *folio) void folio_rotate_reclaimable(struct folio *folio) { if (!folio_test_locked(folio) && !folio_test_dirty(folio) && - !folio_test_unevictable(folio) && folio_test_lru(folio)) { + !folio_test_unevictable(folio)) { struct folio_batch *fbatch; unsigned long flags; folio_get(folio); + if (!folio_test_clear_lru(folio)) { + folio_put(folio); + return; + } + local_lock_irqsave(&lru_rotate.lock, flags); fbatch = this_cpu_ptr(&lru_rotate.fbatch); folio_batch_add_and_move(fbatch, folio, lru_move_tail_fn); @@ -351,11 +352,15 @@ static void folio_activate_drain(int cpu) void folio_activate(struct folio *folio) { - if (folio_test_lru(folio) && !folio_test_active(folio) && - !folio_test_unevictable(folio)) { + if (!folio_test_active(folio) && !folio_test_unevictable(folio)) { struct folio_batch *fbatch; folio_get(folio); + if (!folio_test_clear_lru(folio)) { + folio_put(folio); + return; + } + local_lock(&cpu_fbatches.lock); fbatch = this_cpu_ptr(&cpu_fbatches.activate); folio_batch_add_and_move(fbatch, folio, folio_activate_fn); @@ -696,6 +701,11 @@ void deactivate_file_folio(struct folio *folio) return; folio_get(folio); + if (!folio_test_clear_lru(folio)) { + folio_put(folio); + return; + } + local_lock(&cpu_fbatches.lock); fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate_file); folio_batch_add_and_move(fbatch, folio, lru_deactivate_file_fn); @@ -712,11 +722,16 @@ void deactivate_file_folio(struct folio *folio) */ void folio_deactivate(struct folio *folio) { - if (folio_test_lru(folio) && !folio_test_unevictable(folio) && - (folio_test_active(folio) || lru_gen_enabled())) { + if (!folio_test_unevictable(folio) && (folio_test_active(folio) || + lru_gen_enabled())) { struct folio_batch *fbatch; folio_get(folio); + if (!folio_test_clear_lru(folio)) { + folio_put(folio); + return; + } + local_lock(&cpu_fbatches.lock); fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate); folio_batch_add_and_move(fbatch, folio, lru_deactivate_fn); @@ -733,12 +748,16 @@ void folio_deactivate(struct folio *folio) */ void folio_mark_lazyfree(struct folio *folio) { - if (folio_test_lru(folio) && folio_test_anon(folio) && - folio_test_swapbacked(folio) && !folio_test_swapcache(folio) && - !folio_test_unevictable(folio)) { + if (folio_test_anon(folio) && folio_test_swapbacked(folio) && + !folio_test_swapcache(folio) && !folio_test_unevictable(folio)) { struct folio_batch *fbatch; folio_get(folio); + if (!folio_test_clear_lru(folio)) { + folio_put(folio); + return; + } + local_lock(&cpu_fbatches.lock); fbatch = this_cpu_ptr(&cpu_fbatches.lru_lazyfree); folio_batch_add_and_move(fbatch, folio, lru_lazyfree_fn); diff --git a/mm/vmscan.c b/mm/vmscan.c index 28ffb52160f52bb08c78df37417878e819e74dcb..fd5c2e9e7b29095b0281b314dd0b244d18ea58a4 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -5030,7 +5030,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c } /* ineligible */ - if (zone > sc->reclaim_idx) { + if (!folio_test_lru(folio) || zone > sc->reclaim_idx) { gen = folio_inc_gen(lruvec, folio, false); list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); return true;