aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-05-12 20:23:03 -0700
committerAndrew Morton <akpm@linux-foundation.org>2022-05-13 07:20:15 -0700
commit64daa5d818ae3430f0785206b0af13ef528cb9ef (patch)
tree94f29f501413ea0994fc58b8e1ed781f1812b89f
parent0a36111c8c20b2edc7c83f084bdba2be9d42c1e9 (diff)
vmscan: convert lazy freeing to folios
Remove a hidden call to compound_head(), and account nr_pages instead of a single page. This matches the code in lru_lazyfree_fn() that accounts nr_pages to PGLAZYFREE. Link: https://lkml.kernel.org/r/20220504182857.4013401-12-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--include/linux/memcontrol.h14
-rw-r--r--mm/vmscan.c18
2 files changed, 23 insertions, 9 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index fe580cb96683..8ea4b541c31e 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1057,6 +1057,15 @@ static inline void count_memcg_page_event(struct page *page,
count_memcg_events(memcg, idx, 1);
}
+static inline void count_memcg_folio_events(struct folio *folio,
+ enum vm_event_item idx, unsigned long nr)
+{
+ struct mem_cgroup *memcg = folio_memcg(folio);
+
+ if (memcg)
+ count_memcg_events(memcg, idx, nr);
+}
+
static inline void count_memcg_event_mm(struct mm_struct *mm,
enum vm_event_item idx)
{
@@ -1494,6 +1503,11 @@ static inline void count_memcg_page_event(struct page *page,
{
}
+static inline void count_memcg_folio_events(struct folio *folio,
+ enum vm_event_item idx, unsigned long nr)
+{
+}
+
static inline
void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
{
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 00b985caf40d..972557e2fb92 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1902,20 +1902,20 @@ retry:
}
}
- if (PageAnon(page) && !PageSwapBacked(page)) {
+ if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) {
/* follow __remove_mapping for reference */
- if (!page_ref_freeze(page, 1))
+ if (!folio_ref_freeze(folio, 1))
goto keep_locked;
/*
- * The page has only one reference left, which is
+ * The folio has only one reference left, which is
* from the isolation. After the caller puts the
- * page back on lru and drops the reference, the
- * page will be freed anyway. It doesn't matter
- * which lru it goes. So we don't bother checking
- * PageDirty here.
+ * folio back on the lru and drops the reference, the
+ * folio will be freed anyway. It doesn't matter
+ * which lru it goes on. So we don't bother checking
+ * the dirty flag here.
*/
- count_vm_event(PGLAZYFREED);
- count_memcg_page_event(page, PGLAZYFREED);
+ count_vm_events(PGLAZYFREED, nr_pages);
+ count_memcg_folio_events(folio, PGLAZYFREED, nr_pages);
} else if (!mapping || !__remove_mapping(mapping, folio, true,
sc->target_mem_cgroup))
goto keep_locked;