[vz8,1/2] mm/memcg: reclaim memory.cache.limit_in_bytes from background

Submitted by Andrey Ryabinin on Oct. 2, 2020, 4:08 p.m.

Details

Message ID 20201002160843.28666-1-aryabinin@virtuozzo.com
State New
Series "Series without cover letter"
Headers show

Commit Message

Andrey Ryabinin Oct. 2, 2020, 4:08 p.m.
Reclaiming memory above memory.cache.limit_in_bytes always in direct
reclaim mode adds to much of a cost for vstorage. Instead of direct
reclaim allow to overflow memory.cache.limit_in_bytes but launch
the reclaim in background task.

https://pmc.acronis.com/browse/VSTOR-24395
https://jira.sw.ru/browse/PSBM-94761
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
---
 mm/memcontrol.c | 42 ++++++++++++++++++------------------------
 1 file changed, 18 insertions(+), 24 deletions(-)

Patch hide | download patch | download mbox

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 68242a72be4d..c30150b8732d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2211,11 +2211,16 @@  static void reclaim_high(struct mem_cgroup *memcg,
 			 unsigned int nr_pages,
 			 gfp_t gfp_mask)
 {
+
 	do {
-		if (page_counter_read(&memcg->memory) <= memcg->high)
-			continue;
-		memcg_memory_event(memcg, MEMCG_HIGH);
-		try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
+
+		if (page_counter_read(&memcg->memory) > memcg->high) {
+			memcg_memory_event(memcg, MEMCG_HIGH);
+			try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
+		}
+
+		if (page_counter_read(&memcg->cache) > memcg->cache.max)
+			try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, false);
 	} while ((memcg = parent_mem_cgroup(memcg)));
 }
 
@@ -2270,13 +2275,8 @@  static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge
 			refill_stock(memcg, nr_pages);
 			goto charge;
 		}
-
-		if (cache_charge && !page_counter_try_charge(
-				&memcg->cache, nr_pages, &counter)) {
-			refill_stock(memcg, nr_pages);
-			goto charge;
-		}
-		return 0;
+		css_get_many(&memcg->css, batch);
+		goto done;
 	}
 
 charge:
@@ -2301,19 +2301,6 @@  static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge
 		}
 	}
 
-	if (!mem_over_limit && cache_charge) {
-		if (page_counter_try_charge(&memcg->cache, nr_pages, &counter))
-			goto done_restock;
-
-		may_swap = false;
-		mem_over_limit = mem_cgroup_from_counter(counter, cache);
-		page_counter_uncharge(&memcg->memory, batch);
-		if (do_memsw_account())
-			page_counter_uncharge(&memcg->memsw, batch);
-		if (kmem_charge)
-			page_counter_uncharge(&memcg->kmem, nr_pages);
-	}
-
 	if (!mem_over_limit)
 		goto done_restock;
 
@@ -2437,6 +2424,9 @@  static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge
 	css_get_many(&memcg->css, batch);
 	if (batch > nr_pages)
 		refill_stock(memcg, batch - nr_pages);
+done:
+	if (cache_charge)
+		page_counter_charge(&memcg->cache, nr_pages);
 
 	/*
 	 * If the hierarchy is above the normal consumption range, schedule
@@ -2457,7 +2447,11 @@  static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge
 			current->memcg_nr_pages_over_high += batch;
 			set_notify_resume(current);
 			break;
+		} else if (page_counter_read(&memcg->cache) > memcg->cache.max) {
+			if (!work_pending(&memcg->high_work))
+				schedule_work(&memcg->high_work);
 		}
+
 	} while ((memcg = parent_mem_cgroup(memcg)));
 
 	return 0;