kernel: bump 6.1 to 6.1.55

This commit is contained in:
lean 2023-09-30 16:32:49 +08:00
parent f35d972136
commit 9f1e059f89
16 changed files with 69 additions and 266 deletions

View File

@ -1,2 +1,2 @@
LINUX_VERSION-6.1 = .52 LINUX_VERSION-6.1 = .55
LINUX_KERNEL_HASH-6.1.52 = 567737990dbc9265966a0786392821a9fa559fd346494fd1eff050dbeb383a52 LINUX_KERNEL_HASH-6.1.55 = a87e241ec15d53452c4efe219713a3769d88cc436b5b98cf6efb262c4aff15c0

View File

@ -256,9 +256,9 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
- struct lru_gen_struct *lrugen = &lruvec->lrugen; - struct lru_gen_struct *lrugen = &lruvec->lrugen;
+ struct lru_gen_folio *lrugen = &lruvec->lrugen; + struct lru_gen_folio *lrugen = &lruvec->lrugen;
restart:
spin_lock_irq(&lruvec->lru_lock); spin_lock_irq(&lruvec->lru_lock);
@@ -4389,7 +4389,7 @@ static bool try_to_inc_max_seq(struct lr
@@ -4387,7 +4387,7 @@ static bool try_to_inc_max_seq(struct lr
bool success; bool success;
struct lru_gen_mm_walk *walk; struct lru_gen_mm_walk *walk;
struct mm_struct *mm = NULL; struct mm_struct *mm = NULL;
@ -267,7 +267,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq)); VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq));
@@ -4452,7 +4452,7 @@ static bool should_run_aging(struct lruv @@ -4454,7 +4454,7 @@ static bool should_run_aging(struct lruv
unsigned long old = 0; unsigned long old = 0;
unsigned long young = 0; unsigned long young = 0;
unsigned long total = 0; unsigned long total = 0;
@ -276,7 +276,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
struct mem_cgroup *memcg = lruvec_memcg(lruvec); struct mem_cgroup *memcg = lruvec_memcg(lruvec);
for (type = !can_swap; type < ANON_AND_FILE; type++) { for (type = !can_swap; type < ANON_AND_FILE; type++) {
@@ -4737,7 +4737,7 @@ static bool sort_folio(struct lruvec *lr @@ -4740,7 +4740,7 @@ static bool sort_folio(struct lruvec *lr
int delta = folio_nr_pages(folio); int delta = folio_nr_pages(folio);
int refs = folio_lru_refs(folio); int refs = folio_lru_refs(folio);
int tier = lru_tier_from_refs(refs); int tier = lru_tier_from_refs(refs);
@ -285,7 +285,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio); VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio);
@@ -4837,7 +4837,7 @@ static int scan_folios(struct lruvec *lr @@ -4848,7 +4848,7 @@ static int scan_folios(struct lruvec *lr
int scanned = 0; int scanned = 0;
int isolated = 0; int isolated = 0;
int remaining = MAX_LRU_BATCH; int remaining = MAX_LRU_BATCH;
@ -294,7 +294,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
struct mem_cgroup *memcg = lruvec_memcg(lruvec); struct mem_cgroup *memcg = lruvec_memcg(lruvec);
VM_WARN_ON_ONCE(!list_empty(list)); VM_WARN_ON_ONCE(!list_empty(list));
@@ -5237,7 +5237,7 @@ done: @@ -5249,7 +5249,7 @@ done:
static bool __maybe_unused state_is_valid(struct lruvec *lruvec) static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
{ {
@ -303,7 +303,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
if (lrugen->enabled) { if (lrugen->enabled) {
enum lru_list lru; enum lru_list lru;
@@ -5519,7 +5519,7 @@ static void lru_gen_seq_show_full(struct @@ -5531,7 +5531,7 @@ static void lru_gen_seq_show_full(struct
int i; int i;
int type, tier; int type, tier;
int hist = lru_hist_from_seq(seq); int hist = lru_hist_from_seq(seq);
@ -312,7 +312,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
for (tier = 0; tier < MAX_NR_TIERS; tier++) { for (tier = 0; tier < MAX_NR_TIERS; tier++) {
seq_printf(m, " %10d", tier); seq_printf(m, " %10d", tier);
@@ -5569,7 +5569,7 @@ static int lru_gen_seq_show(struct seq_f @@ -5581,7 +5581,7 @@ static int lru_gen_seq_show(struct seq_f
unsigned long seq; unsigned long seq;
bool full = !debugfs_real_fops(m->file)->write; bool full = !debugfs_real_fops(m->file)->write;
struct lruvec *lruvec = v; struct lruvec *lruvec = v;
@ -321,7 +321,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
int nid = lruvec_pgdat(lruvec)->node_id; int nid = lruvec_pgdat(lruvec)->node_id;
struct mem_cgroup *memcg = lruvec_memcg(lruvec); struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MAX_SEQ(lruvec); DEFINE_MAX_SEQ(lruvec);
@@ -5823,7 +5823,7 @@ void lru_gen_init_lruvec(struct lruvec * @@ -5835,7 +5835,7 @@ void lru_gen_init_lruvec(struct lruvec *
{ {
int i; int i;
int gen, type, zone; int gen, type, zone;

View File

@ -1,197 +0,0 @@
From 656287d55d9cfc72a4bcd4d9bd098570f12ce409 Mon Sep 17 00:00:00 2001
From: Yu Zhao <yuzhao@google.com>
Date: Wed, 21 Dec 2022 21:19:00 -0700
Subject: [PATCH 02/19] UPSTREAM: mm: multi-gen LRU: rename lrugen->lists[] to
lrugen->folios[]
lru_gen_folio will be chained into per-node lists by the coming
lrugen->list.
Link: https://lkml.kernel.org/r/20221222041905.2431096-3-yuzhao@google.com
Signed-off-by: Yu Zhao <yuzhao@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Michael Larabel <Michael@MichaelLarabel.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Bug: 274865848
(cherry picked from commit 6df1b2212950aae2b2188c6645ea18e2a9e3fdd5)
Change-Id: I09f53e0fb2cd6b8b3adbb8a80b15dc5efbeae857
Signed-off-by: T.J. Mercier <tjmercier@google.com>
---
Documentation/mm/multigen_lru.rst | 8 ++++----
include/linux/mm_inline.h | 4 ++--
include/linux/mmzone.h | 8 ++++----
mm/vmscan.c | 20 ++++++++++----------
4 files changed, 20 insertions(+), 20 deletions(-)
--- a/Documentation/mm/multigen_lru.rst
+++ b/Documentation/mm/multigen_lru.rst
@@ -89,15 +89,15 @@ variables are monotonically increasing.
Generation numbers are truncated into ``order_base_2(MAX_NR_GENS+1)``
bits in order to fit into the gen counter in ``folio->flags``. Each
-truncated generation number is an index to ``lrugen->lists[]``. The
+truncated generation number is an index to ``lrugen->folios[]``. The
sliding window technique is used to track at least ``MIN_NR_GENS`` and
at most ``MAX_NR_GENS`` generations. The gen counter stores a value
within ``[1, MAX_NR_GENS]`` while a page is on one of
-``lrugen->lists[]``; otherwise it stores zero.
+``lrugen->folios[]``; otherwise it stores zero.
Each generation is divided into multiple tiers. A page accessed ``N``
times through file descriptors is in tier ``order_base_2(N)``. Unlike
-generations, tiers do not have dedicated ``lrugen->lists[]``. In
+generations, tiers do not have dedicated ``lrugen->folios[]``. In
contrast to moving across generations, which requires the LRU lock,
moving across tiers only involves atomic operations on
``folio->flags`` and therefore has a negligible cost. A feedback loop
@@ -127,7 +127,7 @@ page mapped by this PTE to ``(max_seq%MA
Eviction
--------
The eviction consumes old generations. Given an ``lruvec``, it
-increments ``min_seq`` when ``lrugen->lists[]`` indexed by
+increments ``min_seq`` when ``lrugen->folios[]`` indexed by
``min_seq%MAX_NR_GENS`` becomes empty. To select a type and a tier to
evict from, it first compares ``min_seq[]`` to select the older type.
If both types are equally old, it selects the one whose first tier has
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -256,9 +256,9 @@ static inline bool lru_gen_add_folio(str
lru_gen_update_size(lruvec, folio, -1, gen);
/* for folio_rotate_reclaimable() */
if (reclaiming)
- list_add_tail(&folio->lru, &lrugen->lists[gen][type][zone]);
+ list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
else
- list_add(&folio->lru, &lrugen->lists[gen][type][zone]);
+ list_add(&folio->lru, &lrugen->folios[gen][type][zone]);
return true;
}
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -312,7 +312,7 @@ enum lruvec_flags {
* They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An
* offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the
* corresponding generation. The gen counter in folio->flags stores gen+1 while
- * a page is on one of lrugen->lists[]. Otherwise it stores 0.
+ * a page is on one of lrugen->folios[]. Otherwise it stores 0.
*
* A page is added to the youngest generation on faulting. The aging needs to
* check the accessed bit at least twice before handing this page over to the
@@ -324,8 +324,8 @@ enum lruvec_flags {
* rest of generations, if they exist, are considered inactive. See
* lru_gen_is_active().
*
- * PG_active is always cleared while a page is on one of lrugen->lists[] so that
- * the aging needs not to worry about it. And it's set again when a page
+ * PG_active is always cleared while a page is on one of lrugen->folios[] so
+ * that the aging needs not to worry about it. And it's set again when a page
* considered active is isolated for non-reclaiming purposes, e.g., migration.
* See lru_gen_add_folio() and lru_gen_del_folio().
*
@@ -412,7 +412,7 @@ struct lru_gen_folio {
/* the birth time of each generation in jiffies */
unsigned long timestamps[MAX_NR_GENS];
/* the multi-gen LRU lists, lazily sorted on eviction */
- struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ struct list_head folios[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
/* the multi-gen LRU sizes, eventually consistent */
long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
/* the exponential moving average of refaulted */
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4258,7 +4258,7 @@ static bool inc_min_seq(struct lruvec *l
/* prevent cold/hot inversion if force_scan is true */
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
- struct list_head *head = &lrugen->lists[old_gen][type][zone];
+ struct list_head *head = &lrugen->folios[old_gen][type][zone];
while (!list_empty(head)) {
struct folio *folio = lru_to_folio(head);
@@ -4269,7 +4269,7 @@ static bool inc_min_seq(struct lruvec *l
VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
new_gen = folio_inc_gen(lruvec, folio, false);
- list_move_tail(&folio->lru, &lrugen->lists[new_gen][type][zone]);
+ list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]);
if (!--remaining)
return false;
@@ -4297,7 +4297,7 @@ static bool try_to_inc_min_seq(struct lr
gen = lru_gen_from_seq(min_seq[type]);
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
- if (!list_empty(&lrugen->lists[gen][type][zone]))
+ if (!list_empty(&lrugen->folios[gen][type][zone]))
goto next;
}
@@ -4762,7 +4762,7 @@ static bool sort_folio(struct lruvec *lr
/* promoted */
if (gen != lru_gen_from_seq(lrugen->min_seq[type])) {
- list_move(&folio->lru, &lrugen->lists[gen][type][zone]);
+ list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
return true;
}
@@ -4771,7 +4771,7 @@ static bool sort_folio(struct lruvec *lr
int hist = lru_hist_from_seq(lrugen->min_seq[type]);
gen = folio_inc_gen(lruvec, folio, false);
- list_move_tail(&folio->lru, &lrugen->lists[gen][type][zone]);
+ list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
WRITE_ONCE(lrugen->protected[hist][type][tier - 1],
lrugen->protected[hist][type][tier - 1] + delta);
@@ -4783,7 +4783,7 @@ static bool sort_folio(struct lruvec *lr
if (folio_test_locked(folio) || folio_test_writeback(folio) ||
(type == LRU_GEN_FILE && folio_test_dirty(folio))) {
gen = folio_inc_gen(lruvec, folio, true);
- list_move(&folio->lru, &lrugen->lists[gen][type][zone]);
+ list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
return true;
}
@@ -4850,7 +4850,7 @@ static int scan_folios(struct lruvec *lr
for (zone = sc->reclaim_idx; zone >= 0; zone--) {
LIST_HEAD(moved);
int skipped = 0;
- struct list_head *head = &lrugen->lists[gen][type][zone];
+ struct list_head *head = &lrugen->folios[gen][type][zone];
while (!list_empty(head)) {
struct folio *folio = lru_to_folio(head);
@@ -5250,7 +5250,7 @@ static bool __maybe_unused state_is_vali
int gen, type, zone;
for_each_gen_type_zone(gen, type, zone) {
- if (!list_empty(&lrugen->lists[gen][type][zone]))
+ if (!list_empty(&lrugen->folios[gen][type][zone]))
return false;
}
}
@@ -5295,7 +5295,7 @@ static bool drain_evictable(struct lruve
int remaining = MAX_LRU_BATCH;
for_each_gen_type_zone(gen, type, zone) {
- struct list_head *head = &lruvec->lrugen.lists[gen][type][zone];
+ struct list_head *head = &lruvec->lrugen.folios[gen][type][zone];
while (!list_empty(head)) {
bool success;
@@ -5832,7 +5832,7 @@ void lru_gen_init_lruvec(struct lruvec *
lrugen->timestamps[i] = jiffies;
for_each_gen_type_zone(gen, type, zone)
- INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]);
+ INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]);
lruvec->mm_state.seq = MIN_NR_GENS;
init_waitqueue_head(&lruvec->mm_state.wait);

View File

@ -66,7 +66,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
static bool writeback_throttling_sane(struct scan_control *sc) static bool writeback_throttling_sane(struct scan_control *sc)
{ {
return true; return true;
@@ -4993,8 +5003,7 @@ static int isolate_folios(struct lruvec @@ -5005,8 +5015,7 @@ static int isolate_folios(struct lruvec
return scanned; return scanned;
} }
@ -76,7 +76,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
{ {
int type; int type;
int scanned; int scanned;
@@ -5083,9 +5092,6 @@ retry: @@ -5095,9 +5104,6 @@ retry:
goto retry; goto retry;
} }
@ -86,7 +86,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
return scanned; return scanned;
} }
@@ -5124,67 +5130,26 @@ done: @@ -5136,67 +5142,26 @@ done:
return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0; return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
} }
@ -163,7 +163,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
lru_add_drain(); lru_add_drain();
@@ -5208,7 +5173,7 @@ static void lru_gen_shrink_lruvec(struct @@ -5220,7 +5185,7 @@ static void lru_gen_shrink_lruvec(struct
if (!nr_to_scan) if (!nr_to_scan)
goto done; goto done;
@ -172,7 +172,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
if (!delta) if (!delta)
goto done; goto done;
@@ -5216,7 +5181,7 @@ static void lru_gen_shrink_lruvec(struct @@ -5228,7 +5193,7 @@ static void lru_gen_shrink_lruvec(struct
if (scanned >= nr_to_scan) if (scanned >= nr_to_scan)
break; break;
@ -181,7 +181,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
break; break;
cond_resched(); cond_resched();
@@ -5666,7 +5631,7 @@ static int run_eviction(struct lruvec *l @@ -5678,7 +5643,7 @@ static int run_eviction(struct lruvec *l
if (sc->nr_reclaimed >= nr_to_reclaim) if (sc->nr_reclaimed >= nr_to_reclaim)
return 0; return 0;

View File

@ -52,7 +52,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
unsigned long last_reclaimed; unsigned long last_reclaimed;
#endif #endif
@@ -4455,7 +4454,7 @@ done: @@ -4457,7 +4456,7 @@ done:
return true; return true;
} }
@ -61,7 +61,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan) struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
{ {
int gen, type, zone; int gen, type, zone;
@@ -4464,6 +4463,13 @@ static bool should_run_aging(struct lruv @@ -4466,6 +4465,13 @@ static bool should_run_aging(struct lruv
unsigned long total = 0; unsigned long total = 0;
struct lru_gen_folio *lrugen = &lruvec->lrugen; struct lru_gen_folio *lrugen = &lruvec->lrugen;
struct mem_cgroup *memcg = lruvec_memcg(lruvec); struct mem_cgroup *memcg = lruvec_memcg(lruvec);
@ -75,7 +75,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
for (type = !can_swap; type < ANON_AND_FILE; type++) { for (type = !can_swap; type < ANON_AND_FILE; type++) {
unsigned long seq; unsigned long seq;
@@ -4492,8 +4498,6 @@ static bool should_run_aging(struct lruv @@ -4494,8 +4500,6 @@ static bool should_run_aging(struct lruv
* stalls when the number of generations reaches MIN_NR_GENS. Hence, the * stalls when the number of generations reaches MIN_NR_GENS. Hence, the
* ideal number of generations is MIN_NR_GENS+1. * ideal number of generations is MIN_NR_GENS+1.
*/ */
@ -84,7 +84,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
if (min_seq[!can_swap] + MIN_NR_GENS < max_seq) if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
return false; return false;
@@ -4512,40 +4516,54 @@ static bool should_run_aging(struct lruv @@ -4514,40 +4518,54 @@ static bool should_run_aging(struct lruv
return false; return false;
} }
@ -160,7 +160,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
} }
/* to protect the working set of the last N jiffies */ /* to protect the working set of the last N jiffies */
@@ -4554,46 +4572,32 @@ static unsigned long lru_gen_min_ttl __r @@ -4556,46 +4574,32 @@ static unsigned long lru_gen_min_ttl __r
static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
{ {
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
@ -214,7 +214,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
*/ */
if (mutex_trylock(&oom_lock)) { if (mutex_trylock(&oom_lock)) {
struct oom_control oc = { struct oom_control oc = {
@@ -5101,33 +5105,27 @@ retry: @@ -5113,33 +5117,27 @@ retry:
* reclaim. * reclaim.
*/ */
static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc,
@ -254,7 +254,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
} }
static unsigned long get_nr_to_reclaim(struct scan_control *sc) static unsigned long get_nr_to_reclaim(struct scan_control *sc)
@@ -5146,9 +5144,7 @@ static unsigned long get_nr_to_reclaim(s @@ -5158,9 +5156,7 @@ static unsigned long get_nr_to_reclaim(s
static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{ {
struct blk_plug plug; struct blk_plug plug;
@ -264,7 +264,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
unsigned long nr_to_reclaim = get_nr_to_reclaim(sc); unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
lru_add_drain(); lru_add_drain();
@@ -5169,13 +5165,13 @@ static void lru_gen_shrink_lruvec(struct @@ -5181,13 +5177,13 @@ static void lru_gen_shrink_lruvec(struct
else else
swappiness = 0; swappiness = 0;
@ -281,7 +281,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
scanned += delta; scanned += delta;
if (scanned >= nr_to_scan) if (scanned >= nr_to_scan)
@@ -5187,10 +5183,6 @@ static void lru_gen_shrink_lruvec(struct @@ -5199,10 +5195,6 @@ static void lru_gen_shrink_lruvec(struct
cond_resched(); cond_resched();
} }

View File

@ -26,7 +26,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
--- a/mm/vmscan.c --- a/mm/vmscan.c
+++ b/mm/vmscan.c +++ b/mm/vmscan.c
@@ -4454,68 +4454,6 @@ done: @@ -4456,68 +4456,6 @@ done:
return true; return true;
} }
@ -95,7 +95,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc) static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
{ {
int gen, type, zone; int gen, type, zone;
@@ -5099,6 +5037,68 @@ retry: @@ -5111,6 +5049,68 @@ retry:
return scanned; return scanned;
} }

View File

@ -76,7 +76,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
--- a/include/linux/memcontrol.h --- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h
@@ -790,6 +790,11 @@ static inline void obj_cgroup_put(struct @@ -795,6 +795,11 @@ static inline void obj_cgroup_put(struct
percpu_ref_put(&objcg->refcnt); percpu_ref_put(&objcg->refcnt);
} }
@ -88,7 +88,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
static inline void mem_cgroup_put(struct mem_cgroup *memcg) static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{ {
if (memcg) if (memcg)
@@ -1290,6 +1295,11 @@ static inline void obj_cgroup_put(struct @@ -1295,6 +1300,11 @@ static inline void obj_cgroup_put(struct
{ {
} }
@ -402,7 +402,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid) static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
{ {
struct pglist_data *pgdat = NODE_DATA(nid); struct pglist_data *pgdat = NODE_DATA(nid);
@@ -4440,8 +4440,7 @@ done: @@ -4442,8 +4442,7 @@ done:
if (sc->priority <= DEF_PRIORITY - 2) if (sc->priority <= DEF_PRIORITY - 2)
wait_event_killable(lruvec->mm_state.wait, wait_event_killable(lruvec->mm_state.wait,
max_seq < READ_ONCE(lrugen->max_seq)); max_seq < READ_ONCE(lrugen->max_seq));
@ -412,7 +412,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
} }
VM_WARN_ON_ONCE(max_seq != READ_ONCE(lrugen->max_seq)); VM_WARN_ON_ONCE(max_seq != READ_ONCE(lrugen->max_seq));
@@ -4514,8 +4513,6 @@ static void lru_gen_age_node(struct pgli @@ -4516,8 +4515,6 @@ static void lru_gen_age_node(struct pgli
VM_WARN_ON_ONCE(!current_is_kswapd()); VM_WARN_ON_ONCE(!current_is_kswapd());
@ -421,7 +421,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
/* check the order to exclude compaction-induced reclaim */ /* check the order to exclude compaction-induced reclaim */
if (!min_ttl || sc->order || sc->priority == DEF_PRIORITY) if (!min_ttl || sc->order || sc->priority == DEF_PRIORITY)
return; return;
@@ -5104,8 +5101,7 @@ static bool should_run_aging(struct lruv @@ -5116,8 +5113,7 @@ static bool should_run_aging(struct lruv
* 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
* reclaim. * reclaim.
*/ */
@ -431,7 +431,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
{ {
unsigned long nr_to_scan; unsigned long nr_to_scan;
struct mem_cgroup *memcg = lruvec_memcg(lruvec); struct mem_cgroup *memcg = lruvec_memcg(lruvec);
@@ -5122,10 +5118,8 @@ static unsigned long get_nr_to_scan(stru @@ -5134,10 +5130,8 @@ static unsigned long get_nr_to_scan(stru
if (sc->priority == DEF_PRIORITY) if (sc->priority == DEF_PRIORITY)
return nr_to_scan; return nr_to_scan;
@ -443,7 +443,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
} }
static unsigned long get_nr_to_reclaim(struct scan_control *sc) static unsigned long get_nr_to_reclaim(struct scan_control *sc)
@@ -5134,29 +5128,18 @@ static unsigned long get_nr_to_reclaim(s @@ -5146,29 +5140,18 @@ static unsigned long get_nr_to_reclaim(s
if (!global_reclaim(sc)) if (!global_reclaim(sc))
return -1; return -1;
@ -475,7 +475,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
if (sc->may_swap) if (sc->may_swap)
swappiness = get_swappiness(lruvec, sc); swappiness = get_swappiness(lruvec, sc);
@@ -5166,7 +5149,7 @@ static void lru_gen_shrink_lruvec(struct @@ -5178,7 +5161,7 @@ static void lru_gen_shrink_lruvec(struct
swappiness = 0; swappiness = 0;
nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness); nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
@ -484,7 +484,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
break; break;
delta = evict_folios(lruvec, sc, swappiness); delta = evict_folios(lruvec, sc, swappiness);
@@ -5183,10 +5166,251 @@ static void lru_gen_shrink_lruvec(struct @@ -5195,10 +5178,251 @@ static void lru_gen_shrink_lruvec(struct
cond_resched(); cond_resched();
} }
@ -736,7 +736,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
/****************************************************************************** /******************************************************************************
* state change * state change
@@ -5644,11 +5868,11 @@ static int run_cmd(char cmd, int memcg_i @@ -5656,11 +5880,11 @@ static int run_cmd(char cmd, int memcg_i
if (!mem_cgroup_disabled()) { if (!mem_cgroup_disabled()) {
rcu_read_lock(); rcu_read_lock();
@ -751,7 +751,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
rcu_read_unlock(); rcu_read_unlock();
if (!memcg) if (!memcg)
@@ -5796,6 +6020,19 @@ void lru_gen_init_lruvec(struct lruvec * @@ -5808,6 +6032,19 @@ void lru_gen_init_lruvec(struct lruvec *
} }
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
@ -771,7 +771,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
void lru_gen_init_memcg(struct mem_cgroup *memcg) void lru_gen_init_memcg(struct mem_cgroup *memcg)
{ {
INIT_LIST_HEAD(&memcg->mm_list.fifo); INIT_LIST_HEAD(&memcg->mm_list.fifo);
@@ -5819,7 +6056,69 @@ void lru_gen_exit_memcg(struct mem_cgrou @@ -5831,7 +6068,69 @@ void lru_gen_exit_memcg(struct mem_cgrou
} }
} }
} }
@ -842,7 +842,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
static int __init init_lru_gen(void) static int __init init_lru_gen(void)
{ {
@@ -5846,6 +6145,10 @@ static void lru_gen_shrink_lruvec(struct @@ -5858,6 +6157,10 @@ static void lru_gen_shrink_lruvec(struct
{ {
} }
@ -853,7 +853,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
#endif /* CONFIG_LRU_GEN */ #endif /* CONFIG_LRU_GEN */
static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
@@ -5859,7 +6162,7 @@ static void shrink_lruvec(struct lruvec @@ -5871,7 +6174,7 @@ static void shrink_lruvec(struct lruvec
bool proportional_reclaim; bool proportional_reclaim;
struct blk_plug plug; struct blk_plug plug;
@ -862,7 +862,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
lru_gen_shrink_lruvec(lruvec, sc); lru_gen_shrink_lruvec(lruvec, sc);
return; return;
} }
@@ -6102,6 +6405,11 @@ static void shrink_node(pg_data_t *pgdat @@ -6114,6 +6417,11 @@ static void shrink_node(pg_data_t *pgdat
struct lruvec *target_lruvec; struct lruvec *target_lruvec;
bool reclaimable = false; bool reclaimable = false;

View File

@ -69,7 +69,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
VM_WARN_ON_ONCE(current_is_kswapd()); VM_WARN_ON_ONCE(current_is_kswapd());
walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN); walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
@@ -4417,7 +4420,7 @@ static bool try_to_inc_max_seq(struct lr @@ -4419,7 +4422,7 @@ static bool try_to_inc_max_seq(struct lr
goto done; goto done;
} }
@ -78,7 +78,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
if (!walk) { if (!walk) {
success = iterate_mm_list_nowalk(lruvec, max_seq); success = iterate_mm_list_nowalk(lruvec, max_seq);
goto done; goto done;
@@ -4486,8 +4489,6 @@ static bool lruvec_is_reclaimable(struct @@ -4488,8 +4491,6 @@ static bool lruvec_is_reclaimable(struct
struct mem_cgroup *memcg = lruvec_memcg(lruvec); struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MIN_SEQ(lruvec); DEFINE_MIN_SEQ(lruvec);
@ -87,7 +87,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
/* see the comment on lru_gen_folio */ /* see the comment on lru_gen_folio */
gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]); gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]);
birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
@@ -4743,12 +4744,8 @@ static bool isolate_folio(struct lruvec @@ -4753,12 +4754,8 @@ static bool isolate_folio(struct lruvec
{ {
bool success; bool success;
@ -101,7 +101,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
(folio_test_dirty(folio) || (folio_test_dirty(folio) ||
(folio_test_anon(folio) && !folio_test_swapcache(folio)))) (folio_test_anon(folio) && !folio_test_swapcache(folio))))
return false; return false;
@@ -4845,9 +4842,8 @@ static int scan_folios(struct lruvec *lr @@ -4857,9 +4854,8 @@ static int scan_folios(struct lruvec *lr
__count_vm_events(PGSCAN_ANON + type, isolated); __count_vm_events(PGSCAN_ANON + type, isolated);
/* /*
@ -113,7 +113,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
*/ */
return isolated || !remaining ? scanned : 0; return isolated || !remaining ? scanned : 0;
} }
@@ -5107,8 +5103,7 @@ static long get_nr_to_scan(struct lruvec @@ -5119,8 +5115,7 @@ static long get_nr_to_scan(struct lruvec
struct mem_cgroup *memcg = lruvec_memcg(lruvec); struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MAX_SEQ(lruvec); DEFINE_MAX_SEQ(lruvec);
@ -123,7 +123,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
return 0; return 0;
if (!should_run_aging(lruvec, max_seq, sc, can_swap, &nr_to_scan)) if (!should_run_aging(lruvec, max_seq, sc, can_swap, &nr_to_scan))
@@ -5136,17 +5131,14 @@ static bool try_to_shrink_lruvec(struct @@ -5148,17 +5143,14 @@ static bool try_to_shrink_lruvec(struct
long nr_to_scan; long nr_to_scan;
unsigned long scanned = 0; unsigned long scanned = 0;
unsigned long nr_to_reclaim = get_nr_to_reclaim(sc); unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
@ -146,7 +146,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness); nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
if (nr_to_scan <= 0) if (nr_to_scan <= 0)
@@ -5277,12 +5269,13 @@ static void lru_gen_shrink_lruvec(struct @@ -5289,12 +5281,13 @@ static void lru_gen_shrink_lruvec(struct
struct blk_plug plug; struct blk_plug plug;
VM_WARN_ON_ONCE(global_reclaim(sc)); VM_WARN_ON_ONCE(global_reclaim(sc));
@ -161,7 +161,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
if (try_to_shrink_lruvec(lruvec, sc)) if (try_to_shrink_lruvec(lruvec, sc))
lru_gen_rotate_memcg(lruvec, MEMCG_LRU_YOUNG); lru_gen_rotate_memcg(lruvec, MEMCG_LRU_YOUNG);
@@ -5338,11 +5331,19 @@ static void lru_gen_shrink_node(struct p @@ -5350,11 +5343,19 @@ static void lru_gen_shrink_node(struct p
VM_WARN_ON_ONCE(!global_reclaim(sc)); VM_WARN_ON_ONCE(!global_reclaim(sc));
@ -182,7 +182,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
set_initial_priority(pgdat, sc); set_initial_priority(pgdat, sc);
@@ -5360,7 +5361,7 @@ static void lru_gen_shrink_node(struct p @@ -5372,7 +5373,7 @@ static void lru_gen_shrink_node(struct p
clear_mm_walk(); clear_mm_walk();
blk_finish_plug(&plug); blk_finish_plug(&plug);
@ -191,7 +191,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
/* kswapd should never fail */ /* kswapd should never fail */
pgdat->kswapd_failures = 0; pgdat->kswapd_failures = 0;
} }
@@ -5932,7 +5933,7 @@ static ssize_t lru_gen_seq_write(struct @@ -5944,7 +5945,7 @@ static ssize_t lru_gen_seq_write(struct
set_task_reclaim_state(current, &sc.reclaim_state); set_task_reclaim_state(current, &sc.reclaim_state);
flags = memalloc_noreclaim_save(); flags = memalloc_noreclaim_save();
blk_start_plug(&plug); blk_start_plug(&plug);

View File

@ -27,7 +27,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
--- a/mm/vmscan.c --- a/mm/vmscan.c
+++ b/mm/vmscan.c +++ b/mm/vmscan.c
@@ -4415,7 +4415,7 @@ static bool try_to_inc_max_seq(struct lr @@ -4417,7 +4417,7 @@ static bool try_to_inc_max_seq(struct lr
* handful of PTEs. Spreading the work out over a period of time usually * handful of PTEs. Spreading the work out over a period of time usually
* is less efficient, but it avoids bursty page faults. * is less efficient, but it avoids bursty page faults.
*/ */

View File

@ -29,7 +29,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
--- a/mm/vmscan.c --- a/mm/vmscan.c
+++ b/mm/vmscan.c +++ b/mm/vmscan.c
@@ -5206,18 +5206,20 @@ static int shrink_one(struct lruvec *lru @@ -5218,18 +5218,20 @@ static int shrink_one(struct lruvec *lru
static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc) static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
{ {
@ -52,7 +52,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq)); gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq));
rcu_read_lock(); rcu_read_lock();
@@ -5241,14 +5243,22 @@ restart: @@ -5253,14 +5255,22 @@ restart:
op = shrink_one(lruvec, sc); op = shrink_one(lruvec, sc);
@ -78,7 +78,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
/* restart if raced with lru_gen_rotate_memcg() */ /* restart if raced with lru_gen_rotate_memcg() */
if (gen != get_nulls_value(pos)) if (gen != get_nulls_value(pos))
goto restart; goto restart;
@@ -5257,11 +5267,6 @@ restart: @@ -5269,11 +5279,6 @@ restart:
bin = get_memcg_bin(bin + 1); bin = get_memcg_bin(bin + 1);
if (bin != first_bin) if (bin != first_bin)
goto restart; goto restart;

View File

@ -54,7 +54,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
The multi-gen LRU can be disassembled into the following parts: The multi-gen LRU can be disassembled into the following parts:
--- a/mm/vmscan.c --- a/mm/vmscan.c
+++ b/mm/vmscan.c +++ b/mm/vmscan.c
@@ -4459,6 +4459,10 @@ done: @@ -4461,6 +4461,10 @@ done:
return true; return true;
} }

View File

@ -44,7 +44,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
The multi-gen LRU can be disassembled into the following parts: The multi-gen LRU can be disassembled into the following parts:
--- a/mm/vmscan.c --- a/mm/vmscan.c
+++ b/mm/vmscan.c +++ b/mm/vmscan.c
@@ -4553,6 +4553,10 @@ static void lru_gen_age_node(struct pgli @@ -4555,6 +4555,10 @@ static void lru_gen_age_node(struct pgli
} }
} }

View File

@ -154,7 +154,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
--- a/mm/vmscan.c --- a/mm/vmscan.c
+++ b/mm/vmscan.c +++ b/mm/vmscan.c
@@ -4690,6 +4690,148 @@ void lru_gen_look_around(struct page_vma @@ -4692,6 +4692,148 @@ void lru_gen_look_around(struct page_vma
} }
/****************************************************************************** /******************************************************************************
@ -303,7 +303,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
* the eviction * the eviction
******************************************************************************/ ******************************************************************************/
@@ -5386,53 +5528,6 @@ done: @@ -5398,53 +5540,6 @@ done:
pgdat->kswapd_failures = 0; pgdat->kswapd_failures = 0;
} }
@ -357,7 +357,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
/****************************************************************************** /******************************************************************************
* state change * state change
******************************************************************************/ ******************************************************************************/
@@ -6078,67 +6173,6 @@ void lru_gen_exit_memcg(struct mem_cgrou @@ -6090,67 +6185,6 @@ void lru_gen_exit_memcg(struct mem_cgrou
} }
} }

View File

@ -20,7 +20,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
--- a/mm/vmscan.c --- a/mm/vmscan.c
+++ b/mm/vmscan.c +++ b/mm/vmscan.c
@@ -6160,12 +6160,17 @@ void lru_gen_exit_memcg(struct mem_cgrou @@ -6172,12 +6172,17 @@ void lru_gen_exit_memcg(struct mem_cgrou
int i; int i;
int nid; int nid;

View File

@ -26,7 +26,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
--- a/mm/vmscan.c --- a/mm/vmscan.c
+++ b/mm/vmscan.c +++ b/mm/vmscan.c
@@ -4571,13 +4571,12 @@ static void lru_gen_age_node(struct pgli @@ -4573,13 +4573,12 @@ static void lru_gen_age_node(struct pgli
void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
{ {
int i; int i;
@ -42,7 +42,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
struct folio *folio = pfn_folio(pvmw->pfn); struct folio *folio = pfn_folio(pvmw->pfn);
struct mem_cgroup *memcg = folio_memcg(folio); struct mem_cgroup *memcg = folio_memcg(folio);
struct pglist_data *pgdat = folio_pgdat(folio); struct pglist_data *pgdat = folio_pgdat(folio);
@@ -4594,25 +4593,28 @@ void lru_gen_look_around(struct page_vma @@ -4596,25 +4595,28 @@ void lru_gen_look_around(struct page_vma
/* avoid taking the LRU lock under the PTL when possible */ /* avoid taking the LRU lock under the PTL when possible */
walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL; walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL;
@ -79,7 +79,7 @@ Signed-off-by: T.J. Mercier <tjmercier@google.com>
for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) { for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) {
unsigned long pfn; unsigned long pfn;
@@ -4637,56 +4639,27 @@ void lru_gen_look_around(struct page_vma @@ -4639,56 +4641,27 @@ void lru_gen_look_around(struct page_vma
!folio_test_swapcache(folio))) !folio_test_swapcache(folio)))
folio_mark_dirty(folio); folio_mark_dirty(folio);

View File

@ -226,7 +226,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/* folio_update_gen() requires stable folio_memcg() */ /* folio_update_gen() requires stable folio_memcg() */
if (!mem_cgroup_trylock_pages(memcg)) if (!mem_cgroup_trylock_pages(memcg))
break; break;
@@ -4442,25 +4425,12 @@ static bool try_to_inc_max_seq(struct lr @@ -4444,25 +4427,12 @@ static bool try_to_inc_max_seq(struct lr
success = iterate_mm_list(lruvec, walk, &mm); success = iterate_mm_list(lruvec, walk, &mm);
if (mm) if (mm)
walk_mm(lruvec, mm, walk); walk_mm(lruvec, mm, walk);
@ -255,7 +255,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
} }
/****************************************************************************** /******************************************************************************
@@ -6105,7 +6075,6 @@ void lru_gen_init_lruvec(struct lruvec * @@ -6117,7 +6087,6 @@ void lru_gen_init_lruvec(struct lruvec *
INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]); INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]);
lruvec->mm_state.seq = MIN_NR_GENS; lruvec->mm_state.seq = MIN_NR_GENS;
@ -263,7 +263,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
} }
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
@@ -6138,7 +6107,6 @@ void lru_gen_exit_memcg(struct mem_cgrou @@ -6150,7 +6119,6 @@ void lru_gen_exit_memcg(struct mem_cgrou
for_each_node(nid) { for_each_node(nid) {
struct lruvec *lruvec = get_lruvec(memcg, nid); struct lruvec *lruvec = get_lruvec(memcg, nid);