[Devel,rh7,v2,05/10] tcache: Cache rb_first() of reclaim tree in tcache_nodeinfo::rb_first

Submitted by Kirill Tkhai on Aug. 16, 2017, 11:52 a.m.

Details

Message ID 150288436212.16671.5735678677150879838.stgit@localhost.localdomain
State New
Series "tcache: Manage LRU lists under per-filesystem lock"
Headers show

Commit Message

Kirill Tkhai Aug. 16, 2017, 11:52 a.m.
Set rb_first via RCU and, thus, allow lockless access to it.

v2: New
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
---
 mm/tcache.c |   16 +++++++++++++++-
 1 file changed, 15 insertions(+), 1 deletion(-)

Patch hide | download patch | download mbox

diff --git a/mm/tcache.c b/mm/tcache.c
index ed04f810b55..fe69bd5ceff 100644
--- a/mm/tcache.c
+++ b/mm/tcache.c
@@ -157,6 +157,7 @@  struct tcache_nodeinfo {
 
 	/* tree of pools, sorted by reclaim prio */
 	struct rb_root reclaim_tree;
+	struct rb_node __rcu *rb_first;
 
 	/* total number of pages on all LRU lists corresponding to this node */
 	unsigned long nr_pages;
@@ -205,6 +206,13 @@  node_tree_from_key(struct tcache_pool *pool,
 	return &pool->node_tree[key_hash(key) & (num_node_trees - 1)];
 }
 
+static struct rb_node *update_ni_rb_first(struct tcache_nodeinfo *ni)
+{
+	struct rb_node *first = rb_first(&ni->reclaim_tree);
+	rcu_assign_pointer(ni->rb_first, first);
+	return first;
+}
+
 static void __tcache_insert_reclaim_node(struct tcache_nodeinfo *ni,
 					 struct tcache_pool_nodeinfo *pni);
 
@@ -242,6 +250,7 @@  static inline void __tcache_check_events(struct tcache_nodeinfo *ni,
 
 	rb_erase(&pni->reclaim_node, &ni->reclaim_tree);
 	__tcache_insert_reclaim_node(ni, pni);
+	update_ni_rb_first(ni);
 }
 
 /*
@@ -933,6 +942,7 @@  tcache_remove_from_reclaim_trees(struct tcache_pool *pool)
 		spin_lock_irq(&ni->lock);
 		if (!RB_EMPTY_NODE(&pni->reclaim_node)) {
 			rb_erase(&pni->reclaim_node, &ni->reclaim_tree);
+			update_ni_rb_first(ni);
 			/*
 			 * Clear the node for __tcache_check_events() not to
 			 * reinsert the pool back into the tree.
@@ -1039,6 +1049,7 @@  tcache_lru_isolate(int nid, struct page **pages, int nr_to_isolate)
 
 	rb_erase(rbn, &ni->reclaim_tree);
 	RB_CLEAR_NODE(rbn);
+	update_ni_rb_first(ni);
 
 	pni = rb_entry(rbn, struct tcache_pool_nodeinfo, reclaim_node);
 	if (!tcache_grab_pool(pni->pool))
@@ -1048,8 +1059,10 @@  tcache_lru_isolate(int nid, struct page **pages, int nr_to_isolate)
 	nr_isolated = __tcache_lru_isolate(pni, pages, nr_to_isolate);
 	ni->nr_pages -= nr_isolated;
 
-	if (!list_empty(&pni->lru))
+	if (!list_empty(&pni->lru)) {
 		__tcache_insert_reclaim_node(ni, pni);
+		update_ni_rb_first(ni);
+	}
 
 	spin_unlock(&pni->lock);
 	tcache_put_pool(pni->pool);
@@ -1348,6 +1361,7 @@  static int __init tcache_nodeinfo_init(void)
 		ni = &tcache_nodeinfo[i];
 		spin_lock_init(&ni->lock);
 		ni->reclaim_tree = RB_ROOT;
+		update_ni_rb_first(ni);
 	}
 	return 0;
 }

Comments

Andrey Ryabinin Aug. 16, 2017, 1:36 p.m.
On 08/16/2017 02:52 PM, Kirill Tkhai wrote:

>  
> @@ -242,6 +250,7 @@ static inline void __tcache_check_events(struct tcache_nodeinfo *ni,
>  
>  	rb_erase(&pni->reclaim_node, &ni->reclaim_tree);
>  	__tcache_insert_reclaim_node(ni, pni);
> +	update_ni_rb_first(ni);


Either add update_ni_rb_first() in tcache_lru_add() after __tcache_insert_reclaim_node() call
or add one update_ni_rb_first() in __tcache_insert_reclaim_node() itself and remove it from here
and ...

> @@ -1048,8 +1059,10 @@ tcache_lru_isolate(int nid, struct page **pages, int nr_to_isolate)
>  	nr_isolated = __tcache_lru_isolate(pni, pages, nr_to_isolate);
>  	ni->nr_pages -= nr_isolated;
>  
> -	if (!list_empty(&pni->lru))
> +	if (!list_empty(&pni->lru)) {
>  		__tcache_insert_reclaim_node(ni, pni);
> +		update_ni_rb_first(ni);

... from here.