[rh7,2/2] mm/memcg: reclaim memory.cache.limit_in_bytes from background

Submitted by Andrey Ryabinin on July 5, 2019, 2:17 p.m.

Details

Message ID 20190705141749.25740-2-aryabinin@virtuozzo.com
State New
Series "Series without cover letter"
Headers show

Commit Message

Andrey Ryabinin July 5, 2019, 2:17 p.m.
Reclaiming memory above memory.cache.limit_in_bytes always in direct
reclaim mode adds to much of a cost for vstorage. Instead of direct
reclaim allow to overflow memory.cache.limit_in_bytes but launch
the reclaim in background task.

https://pmc.acronis.com/browse/VSTOR-24395
https://jira.sw.ru/browse/PSBM-94761
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
---
 mm/memcontrol.c | 37 ++++++++++++++-----------------------
 1 file changed, 14 insertions(+), 23 deletions(-)

Patch hide | download patch | download mbox

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index cc8cf887c205..a28a66bff002 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3002,11 +3002,15 @@  static void reclaim_high(struct mem_cgroup *memcg,
 			 unsigned int nr_pages,
 			 gfp_t gfp_mask)
 {
+
 	do {
-		if (page_counter_read(&memcg->memory) <= memcg->high)
-			continue;
+		if (page_counter_read(&memcg->memory) > memcg->high)
+			try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, 0);
+
+		if (page_counter_read(&memcg->cache) > memcg->cache.limit)
+			try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask,
+						MEM_CGROUP_RECLAIM_NOSWAP);
 
-		try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, 0);
 	} while ((memcg = parent_mem_cgroup(memcg)));
 }
 
@@ -3067,11 +3071,6 @@  static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge
 			goto charge;
 		}
 
-		if (cache_charge && page_counter_try_charge(
-				&memcg->cache, nr_pages, &counter)) {
-			refill_stock(memcg, nr_pages);
-			goto charge;
-		}
 		goto done;
 	}
 
@@ -3097,19 +3096,6 @@  static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge
 		}
 	}
 
-	if (!mem_over_limit && cache_charge) {
-		if (!page_counter_try_charge(&memcg->cache, nr_pages, &counter))
-			goto done_restock;
-
-		flags |= MEM_CGROUP_RECLAIM_NOSWAP;
-		mem_over_limit = mem_cgroup_from_counter(counter, cache);
-		page_counter_uncharge(&memcg->memory, batch);
-		if (do_swap_account)
-			page_counter_uncharge(&memcg->memsw, batch);
-		if (kmem_charge)
-			page_counter_uncharge(&memcg->kmem, batch);
-	}
-
 	if (!mem_over_limit)
 		goto done_restock;
 
@@ -3222,8 +3208,6 @@  static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge
 		page_counter_uncharge(&memcg->memory, batch);
 		if (do_swap_account)
 			page_counter_uncharge(&memcg->memsw, batch);
-		if (cache_charge)
-			page_counter_uncharge(&memcg->cache, nr_pages);
 		if (kmem_charge) {
 			WARN_ON_ONCE(1);
 			page_counter_uncharge(&memcg->kmem, nr_pages);
@@ -3235,6 +3219,9 @@  static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge
 	if (batch > nr_pages)
 		refill_stock(memcg, batch - nr_pages);
 done:
+	if (cache_charge)
+		page_counter_charge(&memcg->cache, nr_pages);
+
 	/*
 	 * If the hierarchy is above the normal consumption range, schedule
 	 * reclaim on returning to userland.  We can perform reclaim here
@@ -3254,7 +3241,11 @@  static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge
 			current->memcg_nr_pages_over_high += batch;
 			set_notify_resume(current);
 			break;
+		} else if (page_counter_read(&memcg->cache) > memcg->cache.limit) {
+			if (!work_pending(&memcg->high_work))
+				schedule_work(&memcg->high_work);
 		}
+
 	} while ((memcg = parent_mem_cgroup(memcg)));
 
 	return 0;