[rh7,6/7] mm: Add and use batched version of __tlb_remove_table()

Submitted by Andrey Ryabinin on July 13, 2020, 1:09 p.m.

Details

Message ID 20200713130938.23850-6-aryabinin@virtuozzo.com
State New
Series "Series without cover letter"
Headers show

Commit Message

Andrey Ryabinin July 13, 2020, 1:09 p.m.
tlb_remove_table_rcu() removes tables on by one using the
__tlb_remove_table() -> free_page_and_swap_cache(table). Use batched
free_pages_and_swap_cache_nodrain() instead to remove all tables in
one go. This helps to remove contention on the memcgroups counters
since we decrease them only once instead of decrementing one by one
for each page individually.

https://jira.sw.ru/browse/PSBM-101300
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
---
 arch/x86/include/asm/tlb.h |  5 +++++
 include/linux/swap.h       |  1 +
 mm/memory.c                |  4 +---
 mm/swap_state.c            | 16 ++++++++++++++--
 4 files changed, 21 insertions(+), 5 deletions(-)

Patch hide | download patch | download mbox

diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 79a4ca6a9606..64924a1bb146 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -29,4 +29,9 @@  static inline void __tlb_remove_table(void *table)
 	free_page_and_swap_cache(table);
 }
 
+static inline void __tlb_remove_tables(void **tables, int nr)
+{
+	free_pages_and_swap_cache_nodrain((struct page **)tables, nr);
+}
+
 #endif /* _ASM_X86_TLB_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 9c48d29a6e05..c07cabc814e6 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -481,6 +481,7 @@  extern void __delete_from_swap_cache(struct page *);
 extern void delete_from_swap_cache(struct page *);
 extern void free_page_and_swap_cache(struct page *);
 extern void free_pages_and_swap_cache(struct page **, int);
+extern void free_pages_and_swap_cache_nodrain(struct page **, int);
 extern struct page *lookup_swap_cache(swp_entry_t entry,
 				      struct vm_area_struct *vma,
 				      unsigned long addr);
diff --git a/mm/memory.c b/mm/memory.c
index 4370dd400822..1f43e46a1f93 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -379,12 +379,10 @@  static void tlb_remove_table_one(void *table)
 static void tlb_remove_table_rcu(struct rcu_head *head)
 {
 	struct mmu_table_batch *batch;
-	int i;
 
 	batch = container_of(head, struct mmu_table_batch, rcu);
 
-	for (i = 0; i < batch->nr; i++)
-		__tlb_remove_table(batch->tables[i]);
+	__tlb_remove_tables(batch->tables, batch->nr);
 
 	free_page((unsigned long)batch);
 }
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 5292312e1bd9..58af0af81cc1 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -295,11 +295,13 @@  void free_page_and_swap_cache(struct page *page)
  * Passed an array of pages, drop them all from swapcache and then release
  * them.  They are removed from the LRU and freed if this is their last use.
  */
-void free_pages_and_swap_cache(struct page **pages, int nr)
+static void __free_pages_and_swap_cache(struct page **pages, int nr, bool drain)
 {
 	struct page **pagep = pages;
 
-	lru_add_drain();
+	if (drain)
+		lru_add_drain();
+
 	while (nr) {
 		int todo = min(nr, PAGEVEC_SIZE);
 		int i;
@@ -319,6 +321,16 @@  void free_pages_and_swap_cache(struct page **pages, int nr)
 	}
 }
 
+void free_pages_and_swap_cache(struct page **pages, int nr)
+{
+	__free_pages_and_swap_cache(pages, nr, true);
+}
+
+void free_pages_and_swap_cache_nodrain(struct page **pages, int nr)
+{
+	__free_pages_and_swap_cache(pages, nr, false);
+}
+
 /*
  * Lookup a swap entry in the swap cache. A found page will be returned
  * unlocked and with its refcount incremented - we rely on the kernel