[Devel,rh7,8/9] tcache: Use ni->lock only for inserting and erasing from rbtree.

Submitted by Kirill Tkhai on Aug. 15, 2017, 11:24 a.m.

Details

Message ID 150279626705.12806.14324518807848770006.stgit@localhost.localdomain
State New
Series "Manage LRU lists under per-filesystem lock"
Headers show

Commit Message

Kirill Tkhai Aug. 15, 2017, 11:24 a.m.
This patch completes splitting of ni->lock into ni->lock and pni->lock.
Now, global ni->lock is used for inserting in tcache_nodeinfo::reclaim_tree,
which happen just on every 1024 inserting or erasing of pages.
For other LRU operations is used pni->lock, which is per-filesystem
(i.e., per-container), and does not affect other containers.

Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
---
 mm/tcache.c |   35 ++++++++++++++++-------------------
 1 file changed, 16 insertions(+), 19 deletions(-)

Patch hide | download patch | download mbox

diff --git a/mm/tcache.c b/mm/tcache.c
index 53fc08de447..6fad864a37f 100644
--- a/mm/tcache.c
+++ b/mm/tcache.c
@@ -274,7 +274,6 @@  static void tcache_lru_add(struct tcache_pool *pool, struct page *page)
 	struct tcache_nodeinfo *ni = &tcache_nodeinfo[nid];
 	struct tcache_pool_nodeinfo *pni = &pool->nodeinfo[nid];
 
-	spin_lock(&ni->lock);
 	spin_lock(&pni->lock);
 	pni->nr_pages++;
 	list_add_tail(&page->lru, &pni->lru);
@@ -288,14 +287,15 @@  static void tcache_lru_add(struct tcache_pool *pool, struct page *page)
 	atomic_long_inc(&ni->nr_pages);
 
 	if (tcache_check_events(ni, pni) || RB_EMPTY_NODE(&pni->reclaim_node)) {
+		spin_lock(&ni->lock);
 		if (!RB_EMPTY_NODE(&pni->reclaim_node))
 			rb_erase(&pni->reclaim_node, &ni->reclaim_tree);
 		update_reclaim_weight(ni, pni);
 		__tcache_insert_reclaim_node(ni, pni);
+		spin_unlock(&ni->lock);
 	}
 
 	spin_unlock(&pni->lock);
-	spin_unlock(&ni->lock);
 }
 
 static void __tcache_lru_del(struct tcache_pool_nodeinfo *pni,
@@ -316,7 +316,6 @@  static void tcache_lru_del(struct tcache_pool *pool, struct page *page,
 	struct tcache_nodeinfo *ni = &tcache_nodeinfo[nid];
 	struct tcache_pool_nodeinfo *pni = &pool->nodeinfo[nid];
 
-	spin_lock(&ni->lock);
 	spin_lock(&pni->lock);
 
 	/* Raced with reclaimer? */
@@ -331,14 +330,15 @@  static void tcache_lru_del(struct tcache_pool *pool, struct page *page,
 	atomic_long_dec(&ni->nr_pages);
 
 	if (tcache_check_events(ni, pni)) {
+		spin_lock(&ni->lock);
 		if (!RB_EMPTY_NODE(&pni->reclaim_node))
 			rb_erase(&pni->reclaim_node, &ni->reclaim_tree);
 		update_reclaim_weight(ni, pni);
 		__tcache_insert_reclaim_node(ni, pni);
+		spin_unlock(&ni->lock);
 	}
 out:
 	spin_unlock(&pni->lock);
-	spin_unlock(&ni->lock);
 }
 
 static int tcache_create_pool(void)
@@ -1069,29 +1069,26 @@  tcache_lru_isolate(int nid, struct page **pages, int nr_to_isolate)
 	}
 	rcu_read_unlock();
 
-	spin_lock_irq(&ni->lock);
 	spin_lock(&pni->lock);
 	nr = __tcache_lru_isolate(pni, pages, nr_to_isolate);
 	nr_isolated += nr;
-
-	if (!nr) {
-		spin_unlock(&pni->lock);
-		spin_unlock_irq(&ni->lock);
-		goto out_put;
-	}
+	if (!nr)
+		goto unlock;
 
 	atomic_long_sub(nr, &ni->nr_pages);
 
-	if (!RB_EMPTY_NODE(rbn)) {
-		rb_erase(rbn, &ni->reclaim_tree);
-		RB_CLEAR_NODE(rbn);
+	if (!RB_EMPTY_NODE(rbn) || !list_empty(&pni->lru)) {
+		spin_lock(&ni->lock);
+		if (!RB_EMPTY_NODE(rbn))
+			rb_erase(rbn, &ni->reclaim_tree);
+		if (!list_empty(&pni->lru))
+			__tcache_insert_reclaim_node(ni, pni);
+		else
+			RB_CLEAR_NODE(rbn);
+		spin_unlock(&ni->lock);
 	}
-	if (!list_empty(&pni->lru))
-		__tcache_insert_reclaim_node(ni, pni);
-
+unlock:
 	spin_unlock(&pni->lock);
-	spin_unlock_irq(&ni->lock);
-out_put:
 	tcache_put_pool(pni->pool);
 out:
 	return nr_isolated;