[rh7,2/2] mm/vmstat: Add more vmstats in containers /proc/vmstat

Submitted by Andrey Ryabinin on Sept. 26, 2018, 10:46 a.m.

Details

Message ID 20180926104652.2680-2-aryabinin@virtuozzo.com
State New
Series "Series without cover letter"
Headers show

Commit Message

Andrey Ryabinin Sept. 26, 2018, 10:46 a.m.
Add more counters in containers /proc/vmstat, such as nr_free_pages
nr_inactive_anon, nr_active_anon, nr_inactive_file, nr_active_file,
nr_unevictable, nr_anon_pages, nr_mapped, nr_file_pages,
nr_slab_reclaimable, nr_slab_unreclaimable, pswpin, pswpout, pgfault,
pgmajfault.

https://jira.sw.ru/browse/PSBM-88323
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
---
 include/linux/memcontrol.h |  7 +++++++
 kernel/bc/vm_pages.c       |  9 ++++++++-
 mm/memcontrol.c            | 50 ++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 65 insertions(+), 1 deletion(-)

Patch hide | download patch | download mbox

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 0fafaa8c285f..640a5802e398 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -215,6 +215,8 @@  static inline void mem_cgroup_dec_page_stat(struct page *page,
 	mem_cgroup_update_page_stat(page, idx, -1);
 }
 
+void mem_cgroup_fill_vmstat(struct mem_cgroup *memcg, unsigned long *stats);
+
 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
 						gfp_t gfp_mask,
 						unsigned long *total_scanned);
@@ -438,6 +440,11 @@  static inline void mem_cgroup_dec_page_stat(struct page *page,
 {
 }
 
+static inline void mem_cgroup_fill_vmstat(struct mem_cgroup *memcg,
+					unsigned long *stats)
+{
+}
+
 static inline
 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
 					    gfp_t gfp_mask,
diff --git a/kernel/bc/vm_pages.c b/kernel/bc/vm_pages.c
index 0bc5dda98c29..4f798a385801 100644
--- a/kernel/bc/vm_pages.c
+++ b/kernel/bc/vm_pages.c
@@ -229,7 +229,14 @@  static int bc_fill_meminfo(struct user_beancounter *ub,
 
 static int bc_fill_vmstat(struct user_beancounter *ub, unsigned long *stat)
 {
-	/* FIXME: show swapin/swapout? */
+	struct cgroup_subsys_state *css;
+
+	if (ub == get_ub0())
+		return NOTIFY_OK;
+
+	css = ub_get_mem_css(ub);
+	mem_cgroup_fill_vmstat(mem_cgroup_from_cont(css->cgroup), stat);
+	css_put(css);
 	return NOTIFY_OK;
 }
 
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a1baff607b68..423bcf8adfd0 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1161,6 +1161,56 @@  static void mem_cgroup_inc_failcnt(struct mem_cgroup *memcg,
 
 	if (do_swap_account && margin < nr_pages)
 		atomic_long_inc(&memcg->swap_failcnt);
+
+}
+
+static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
+			unsigned int lru_mask);
+
+void mem_cgroup_fill_vmstat(struct mem_cgroup *memcg, unsigned long *stats)
+{
+	int cpu, i;
+	unsigned long limit = READ_ONCE(memcg->memory.limit);
+	unsigned long memory = page_counter_read(&memcg->memory);
+	unsigned long *zone_stats = stats;
+	unsigned long *vm_stats = stats + NR_VM_ZONE_STAT_ITEMS +
+	        NR_VM_WRITEBACK_STAT_ITEMS;
+
+
+
+	zone_stats[NR_FREE_PAGES] = memory > limit ? 0 : limit- memory;
+	for (i = LRU_BASE; i < NR_LRU_LISTS; i++)
+		zone_stats[NR_LRU_BASE + i] = mem_cgroup_nr_lru_pages(memcg, BIT(i));
+
+	zone_stats[NR_ANON_PAGES] = zone_stats[NR_ACTIVE_ANON] +
+		zone_stats[NR_INACTIVE_ANON];
+	zone_stats[NR_FILE_PAGES] = zone_stats[NR_ACTIVE_FILE] +
+		zone_stats[NR_INACTIVE_FILE];
+	/*file dirty, wrb should be taken from ub*/
+	zone_stats[NR_SLAB_RECLAIMABLE] = mem_cgroup_read_stat2_fast(memcg,
+						MEM_CGROUP_STAT_SLAB_RECLAIMABLE);
+
+
+
+	for_each_possible_cpu(cpu) {
+		zone_stats[NR_FILE_MAPPED] += per_cpu(
+			memcg->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], cpu);
+		zone_stats[NR_SLAB_UNRECLAIMABLE] += per_cpu(
+			memcg->stat->count[MEM_CGROUP_STAT_SLAB_UNRECLAIMABLE], cpu);
+		zone_stats[NR_SHMEM] += per_cpu(
+			memcg->stat->count[MEM_CGROUP_STAT_SHMEM], cpu);
+
+#ifdef CONFIG_VM_EVENT_COUNTERS
+		vm_stats[PSWPIN] += per_cpu(
+			memcg->stat->events[MEM_CGROUP_EVENTS_PSWPIN], cpu);
+		vm_stats[PSWPOUT] += per_cpu(
+			memcg->stat->events[MEM_CGROUP_EVENTS_PSWPOUT], cpu);
+		vm_stats[PGFAULT] += per_cpu(
+			memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT], cpu);
+		vm_stats[PGMAJFAULT] += per_cpu(
+			memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], cpu);
+#endif
+	}
 }
 
 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,