@@ -29,4 +29,9 @@ static inline void __tlb_remove_table(void *table)
free_page_and_swap_cache(table);
}
+static inline void __tlb_remove_tables(void **tables, int nr)
+{
+ free_pages_and_swap_cache_nodrain((struct page **)tables, nr);
+}
+
#endif /* _ASM_X86_TLB_H */
@@ -481,6 +481,7 @@ extern void __delete_from_swap_cache(struct page *);
extern void delete_from_swap_cache(struct page *);
extern void free_page_and_swap_cache(struct page *);
extern void free_pages_and_swap_cache(struct page **, int);
+extern void free_pages_and_swap_cache_nodrain(struct page **, int);
extern struct page *lookup_swap_cache(swp_entry_t entry,
struct vm_area_struct *vma,
unsigned long addr);
@@ -379,12 +379,10 @@ static void tlb_remove_table_one(void *table)
static void tlb_remove_table_rcu(struct rcu_head *head)
{
struct mmu_table_batch *batch;
- int i;
batch = container_of(head, struct mmu_table_batch, rcu);
- for (i = 0; i < batch->nr; i++)
- __tlb_remove_table(batch->tables[i]);
+ __tlb_remove_tables(batch->tables, batch->nr);
free_page((unsigned long)batch);
}
@@ -295,11 +295,13 @@ void free_page_and_swap_cache(struct page *page)
* Passed an array of pages, drop them all from swapcache and then release
* them. They are removed from the LRU and freed if this is their last use.
*/
-void free_pages_and_swap_cache(struct page **pages, int nr)
+static void __free_pages_and_swap_cache(struct page **pages, int nr, bool drain)
{
struct page **pagep = pages;
- lru_add_drain();
+ if (drain)
+ lru_add_drain();
+
while (nr) {
int todo = min(nr, PAGEVEC_SIZE);
int i;
@@ -319,6 +321,16 @@ void free_pages_and_swap_cache(struct page **pages, int nr)
}
}
+void free_pages_and_swap_cache(struct page **pages, int nr)
+{
+ __free_pages_and_swap_cache(pages, nr, true);
+}
+
+void free_pages_and_swap_cache_nodrain(struct page **pages, int nr)
+{
+ __free_pages_and_swap_cache(pages, nr, false);
+}
+
/*
* Lookup a swap entry in the swap cache. A found page will be returned
* unlocked and with its refcount incremented - we rely on the kernel
The commit is pushed to "branch-rh7-3.10.0-1127.10.1.vz7.162.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git after rh7-3.10.0-1127.10.1.vz7.162.13 ------> commit 9a3ca2497cdbf4c80f822b2076e8b707b1297b22 Author: Andrey Ryabinin <aryabinin@virtuozzo.com> Date: Tue Jul 21 17:59:36 2020 +0300 mm: Add and use batched version of __tlb_remove_table() tlb_remove_table_rcu() removes tables on by one using the __tlb_remove_table() -> free_page_and_swap_cache(table). Use batched free_pages_and_swap_cache_nodrain() instead to remove all tables in one go. This helps to remove contention on the memcgroups counters since we decrease them only once instead of decrementing one by one for each page individually. https://jira.sw.ru/browse/PSBM-101300 Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com> --- arch/x86/include/asm/tlb.h | 5 +++++ include/linux/swap.h | 1 + mm/memory.c | 4 +--- mm/swap_state.c | 16 ++++++++++++++-- 4 files changed, 21 insertions(+), 5 deletions(-)