[rh7,v3,1/7] ms/mm: vmscan: pass memcg to get_scan_count()

Submitted by Andrey Ryabinin on Feb. 27, 2019, 4:39 p.m.

Details

Message ID 20190227164005.26009-1-aryabinin@virtuozzo.com
State New
Series "Series without cover letter"
Headers show

Commit Message

Andrey Ryabinin Feb. 27, 2019, 4:39 p.m.
From: Vladimir Davydov <vdavydov@virtuozzo.com>

memcg will come in handy in get_scan_count().  It can already be used for
getting swappiness immediately in get_scan_count() instead of passing it
around.  The following patches will add more memcg-related values, which
will be used there.

Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com>

Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

https://pmc.acronis.com/browse/VSTOR-19037

(cherry picked from commit 3337767850b490eec5ca822f871241c981664737)
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
---
 mm/vmscan.c | 32 ++++++++++++++------------------
 1 file changed, 14 insertions(+), 18 deletions(-)

Patch hide | download patch | download mbox

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8ede70b78f6f..27aff034ce40 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -103,9 +103,6 @@  struct scan_control {
 	/* Reclaim only slab */
 	bool slab_only;
 
-	/* anon vs. file LRUs scanning "ratio" */
-	int swappiness;
-
 	/*
 	 * The memory cgroup that hit its limit and as a result is the
 	 * primary target of this reclaim invocation.
@@ -2119,9 +2116,12 @@  enum scan_balance {
  * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
  * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
  */
-static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
-			   unsigned long *nr, unsigned long *lru_pages)
+
+static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
+			   struct scan_control *sc, unsigned long *nr,
+			   unsigned long *lru_pages)
 {
+	int swappiness = mem_cgroup_swappiness(memcg);
 	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
 	u64 fraction[2];
 	u64 denominator = 0;	/* gcc */
@@ -2163,7 +2163,7 @@  static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
 	 * using the memory controller's swap limit feature would be
 	 * too expensive.
 	 */
-	if (!global_reclaim(sc) && !sc->swappiness) {
+	if (!global_reclaim(sc) && !swappiness) {
 		scan_balance = SCAN_FILE;
 		goto out;
 	}
@@ -2173,7 +2173,7 @@  static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
 	 * system is close to OOM, scan both anon and file equally
 	 * (unless the swappiness setting disagrees with swapping).
 	 */
-	if (!sc->priority && sc->swappiness) {
+	if (!sc->priority && swappiness) {
 		scan_balance = SCAN_EQUAL;
 		goto out;
 	}
@@ -2220,7 +2220,7 @@  static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
 	 * With swappiness at 100, anonymous and file have the same priority.
 	 * This scanning priority is essentially the inverse of IO cost.
 	 */
-	anon_prio = sc->swappiness;
+	anon_prio = swappiness;
 	file_prio = 200 - anon_prio;
 
 	/*
@@ -2322,9 +2322,10 @@  static inline void init_tlb_ubc(void)
 /*
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
  */
-static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc,
-			  unsigned long *lru_pages)
+static void shrink_zone_memcg(struct zone *zone, struct mem_cgroup *memcg,
+			      struct scan_control *sc, unsigned long *lru_pages)
 {
+	struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
 	unsigned long nr[NR_LRU_LISTS];
 	unsigned long targets[NR_LRU_LISTS];
 	unsigned long nr_to_scan;
@@ -2334,7 +2335,7 @@  static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc,
 	struct blk_plug plug;
 	bool scan_adjusted;
 
-	get_scan_count(lruvec, sc, nr, lru_pages);
+	get_scan_count(lruvec, memcg, sc, nr, lru_pages);
 
 	/* Record the original scan target for proportional adjustments later */
 	memcpy(targets, nr, sizeof(nr));
@@ -2545,7 +2546,6 @@  static void shrink_zone(struct zone *zone, struct scan_control *sc,
 		memcg = mem_cgroup_iter(root, NULL, &reclaim);
 		do {
 			unsigned long lru_pages, scanned;
-			struct lruvec *lruvec;
 
 			if (!sc->may_thrash && mem_cgroup_low(root, memcg))
 				continue;
@@ -2553,9 +2553,7 @@  static void shrink_zone(struct zone *zone, struct scan_control *sc,
 			scanned = sc->nr_scanned;
 
 			if (!slab_only) {
-				lruvec = mem_cgroup_zone_lruvec(zone, memcg);
-				sc->swappiness = mem_cgroup_swappiness(memcg);
-				shrink_lruvec(lruvec, sc, &lru_pages);
+				shrink_zone_memcg(zone, memcg, sc, &lru_pages);
 				zone_lru_pages += lru_pages;
 			}
 
@@ -3128,11 +3126,9 @@  unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
 		.may_swap = !noswap,
 		.order = 0,
 		.priority = 0,
-		.swappiness = mem_cgroup_swappiness(memcg),
 		.target_mem_cgroup = memcg,
 		.stat = &stat,
 	};
-	struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
 	unsigned long lru_pages;
 
 	sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
@@ -3149,7 +3145,7 @@  unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
 	 * will pick up pages from other mem cgroup's as well. We hack
 	 * the priority and make it zero.
 	 */
-	shrink_lruvec(lruvec, &sc, &lru_pages);
+	shrink_zone_memcg(zone, memcg, &sc, &lru_pages);
 
 	trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);