[Devel,rh7,9/9] tcache: Move add/sub out of pni->lock

Submitted by Kirill Tkhai on Aug. 15, 2017, 11:24 a.m.

Details

Message ID 150279627890.12806.3753737510757259804.stgit@localhost.localdomain
State New
Series "Manage LRU lists under per-filesystem lock"
Headers show

Commit Message

Kirill Tkhai Aug. 15, 2017, 11:24 a.m.
This minimizes number of operations happening under pni->lock.
Note, that we do add before linking to the list, so parallel
shrink does not make nr_pages negative.

Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
---
 mm/tcache.c |   15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)

Patch hide | download patch | download mbox

diff --git a/mm/tcache.c b/mm/tcache.c
index 6fad864a37f..fa9a7c026c3 100644
--- a/mm/tcache.c
+++ b/mm/tcache.c
@@ -274,6 +274,8 @@  static void tcache_lru_add(struct tcache_pool *pool, struct page *page)
 	struct tcache_nodeinfo *ni = &tcache_nodeinfo[nid];
 	struct tcache_pool_nodeinfo *pni = &pool->nodeinfo[nid];
 
+	atomic_long_inc(&ni->nr_pages);
+
 	spin_lock(&pni->lock);
 	pni->nr_pages++;
 	list_add_tail(&page->lru, &pni->lru);
@@ -284,8 +286,6 @@  static void tcache_lru_add(struct tcache_pool *pool, struct page *page)
 		pni->recent_puts /= 2;
 	}
 
-	atomic_long_inc(&ni->nr_pages);
-
 	if (tcache_check_events(ni, pni) || RB_EMPTY_NODE(&pni->reclaim_node)) {
 		spin_lock(&ni->lock);
 		if (!RB_EMPTY_NODE(&pni->reclaim_node))
@@ -294,7 +294,6 @@  static void tcache_lru_add(struct tcache_pool *pool, struct page *page)
 		__tcache_insert_reclaim_node(ni, pni);
 		spin_unlock(&ni->lock);
 	}
-
 	spin_unlock(&pni->lock);
 }
 
@@ -315,6 +314,7 @@  static void tcache_lru_del(struct tcache_pool *pool, struct page *page,
 	int nid = page_to_nid(page);
 	struct tcache_nodeinfo *ni = &tcache_nodeinfo[nid];
 	struct tcache_pool_nodeinfo *pni = &pool->nodeinfo[nid];
+	bool deleted = false;
 
 	spin_lock(&pni->lock);
 
@@ -323,12 +323,11 @@  static void tcache_lru_del(struct tcache_pool *pool, struct page *page,
 		goto out;
 
 	__tcache_lru_del(pni, page);
+	deleted = true;
 
 	if (reused)
 		pni->recent_gets++;
 
-	atomic_long_dec(&ni->nr_pages);
-
 	if (tcache_check_events(ni, pni)) {
 		spin_lock(&ni->lock);
 		if (!RB_EMPTY_NODE(&pni->reclaim_node))
@@ -339,6 +338,8 @@  static void tcache_lru_del(struct tcache_pool *pool, struct page *page,
 	}
 out:
 	spin_unlock(&pni->lock);
+	if (deleted)
+		atomic_long_dec(&ni->nr_pages);
 }
 
 static int tcache_create_pool(void)
@@ -1075,8 +1076,6 @@  tcache_lru_isolate(int nid, struct page **pages, int nr_to_isolate)
 	if (!nr)
 		goto unlock;
 
-	atomic_long_sub(nr, &ni->nr_pages);
-
 	if (!RB_EMPTY_NODE(rbn) || !list_empty(&pni->lru)) {
 		spin_lock(&ni->lock);
 		if (!RB_EMPTY_NODE(rbn))
@@ -1091,6 +1090,8 @@  tcache_lru_isolate(int nid, struct page **pages, int nr_to_isolate)
 	spin_unlock(&pni->lock);
 	tcache_put_pool(pni->pool);
 out:
+	if (nr_isolated)
+		atomic_long_sub(nr_isolated, &ni->nr_pages);
 	return nr_isolated;
 }