[Devel,RHEL7,COMMIT] mm/memcontrol: fix shmem accounting

Submitted by Konstantin Khorenko on Feb. 6, 2017, 12:52 p.m.

Details

Message ID 201702061252.v16CqV5o032256@finist_cl7.x64_64.work.ct
State New
Series "mm/memcontrol: fix shmem accounting"
Headers show

Commit Message

Konstantin Khorenko Feb. 6, 2017, 12:52 p.m.
The commit is pushed to "branch-rh7-3.10.0-514.6.1.vz7.28.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-514.6.1.vz7.28.4
------>
commit 3927df723b4d52034bbb62272a01b5599c587a8f
Author: Andrey Ryabinin <aryabinin@virtuozzo.com>
Date:   Mon Feb 6 16:52:31 2017 +0400

    mm/memcontrol: fix shmem accounting
    
    uncharge_batch_list() doesn't know anything about shmem pages.
    As a result shmem stat is constantly growing.
    Teach uncharge_batch_list() to deal with shmem pages to fix that.
    
    https://jira.sw.ru/browse/PSBM-58984
    
    Fixes: 35e719e7f63d("ms/mm: memcontrol: use page lists for uncharge batching")
    Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
---
 mm/memcontrol.c | 18 ++++++++++++------
 1 file changed, 12 insertions(+), 6 deletions(-)

Patch hide | download patch | download mbox

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 233f542..49b69f7 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6923,7 +6923,7 @@  static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
 			   unsigned long nr_mem, unsigned long nr_memsw,
 			   unsigned long nr_anon, unsigned long nr_file,
 			   unsigned long nr_huge, unsigned long nr_kmem,
-			   struct page *dummy_page)
+			   unsigned long nr_shmem, struct page *dummy_page)
 {
 	unsigned long flags;
 
@@ -6940,6 +6940,7 @@  static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
 	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
 	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
 	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
+	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_SHMEM], nr_shmem);
 	__this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
 	__this_cpu_add(memcg->stat->nr_page_events, nr_anon + nr_file);
 	memcg_check_events(memcg, dummy_page);
@@ -6956,6 +6957,7 @@  static void uncharge_list(struct list_head *page_list)
 	unsigned long nr_kmem = 0;
 	unsigned long pgpgout = 0;
 	unsigned long nr_mem = 0;
+	unsigned long nr_shmem = 0;
 	struct list_head *next;
 	struct page *page;
 
@@ -6983,9 +6985,10 @@  static void uncharge_list(struct list_head *page_list)
 		if (memcg != pc->mem_cgroup) {
 			if (memcg) {
 				uncharge_batch(memcg, pgpgout, nr_mem, nr_memsw,
-					nr_anon, nr_file, nr_huge, nr_kmem, page);
+					nr_anon, nr_file, nr_huge, nr_kmem,
+					nr_shmem, page);
 				pgpgout = nr_mem = nr_memsw = nr_kmem = 0;
-				nr_anon = nr_file = nr_huge = 0;
+				nr_anon = nr_file = nr_huge = nr_shmem = 0;
 			}
 			memcg = pc->mem_cgroup;
 		}
@@ -6998,8 +7001,11 @@  static void uncharge_list(struct list_head *page_list)
 			}
 			if (PageAnon(page))
 				nr_anon += nr_pages;
-			else
+			else {
+				if (PageSwapBacked(page))
+					nr_shmem += nr_pages;
 				nr_file += nr_pages;
+			}
 			pgpgout++;
 		} else {
 			nr_kmem += 1 << compound_order(page);
@@ -7016,8 +7022,8 @@  static void uncharge_list(struct list_head *page_list)
 	} while (next != page_list);
 
 	if (memcg)
-		uncharge_batch(memcg, pgpgout, nr_mem, nr_memsw,
-			       nr_anon, nr_file, nr_huge, nr_kmem, page);
+		uncharge_batch(memcg, pgpgout, nr_mem, nr_memsw, nr_anon,
+				nr_file, nr_huge, nr_kmem, nr_shmem, page);
 }
 
 /**