[Devel,RHEL7,COMMIT] tcache: Make tcache_lru_isolate() keep ni->lock less

Submitted by Konstantin Khorenko on Aug. 31, 2017, 3:18 p.m.

Details

Message ID 201708311518.v7VFIKtD017611@finist_ce7.work
State New
Series "tcache: Manage LRU lists under per-filesystem lock"
Headers show

Commit Message

Konstantin Khorenko Aug. 31, 2017, 3:18 p.m.
The commit is pushed to "branch-rh7-3.10.0-514.26.1.vz7.35.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-514.26.1.vz7.35.5
------>
commit 05e159ab7e5981fc76950e8e999d5f855d9313f7
Author: Kirill Tkhai <ktkhai@virtuozzo.com>
Date:   Thu Aug 31 18:18:20 2017 +0300

    tcache: Make tcache_lru_isolate() keep ni->lock less
    
    Grab pool using RCU technics, and do not use ni->lock.
    This refactors the function and will be used in further.
    
    v2: Use tcache_nodeinfo::rb_first
    
    Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
    Acked-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
---
 mm/tcache.c | 40 ++++++++++++++++++++++++++++------------
 1 file changed, 28 insertions(+), 12 deletions(-)

Patch hide | download patch | download mbox

diff --git a/mm/tcache.c b/mm/tcache.c
index 40608ec..3d9c5ac 100644
--- a/mm/tcache.c
+++ b/mm/tcache.c
@@ -1044,33 +1044,49 @@  tcache_lru_isolate(int nid, struct page **pages, int nr_to_isolate)
 	int nr_isolated = 0;
 	struct rb_node *rbn;
 
-	spin_lock_irq(&ni->lock);
+	rcu_read_lock();
 again:
-	rbn = rb_first(&ni->reclaim_tree);
-	if (!rbn)
+	rbn = rcu_dereference(ni->rb_first);
+	if (!rbn) {
+		rcu_read_unlock();
 		goto out;
-
-	rb_erase(rbn, &ni->reclaim_tree);
-	RB_CLEAR_NODE(rbn);
-	update_ni_rb_first(ni);
+	}
 
 	pni = rb_entry(rbn, struct tcache_pool_nodeinfo, reclaim_node);
-	if (!tcache_grab_pool(pni->pool))
+	if (!tcache_grab_pool(pni->pool)) {
+		spin_lock_irq(&ni->lock);
+		if (!RB_EMPTY_NODE(rbn) && list_empty(&pni->lru)) {
+			rb_erase(rbn, &ni->reclaim_tree);
+			RB_CLEAR_NODE(rbn);
+			update_ni_rb_first(ni);
+		}
+		spin_unlock_irq(&ni->lock);
 		goto again;
+	}
+	rcu_read_unlock();
 
+	spin_lock_irq(&ni->lock);
 	spin_lock(&pni->lock);
 	nr_isolated = __tcache_lru_isolate(pni, pages, nr_to_isolate);
+
+	if (!nr_isolated)
+		goto unlock;
+
 	ni->nr_pages -= nr_isolated;
 
-	if (!list_empty(&pni->lru)) {
-		__tcache_insert_reclaim_node(ni, pni);
-		update_ni_rb_first(ni);
+	if (!RB_EMPTY_NODE(rbn)) {
+		rb_erase(rbn, &ni->reclaim_tree);
+		RB_CLEAR_NODE(rbn);
 	}
+	if (!list_empty(&pni->lru))
+		__tcache_insert_reclaim_node(ni, pni);
+	update_ni_rb_first(ni);
 
+unlock:
 	spin_unlock(&pni->lock);
+	spin_unlock_irq(&ni->lock);
 	tcache_put_pool(pni->pool);
 out:
-	spin_unlock_irq(&ni->lock);
 	return nr_isolated;
 }