[Devel,rh7,5/9] tcache: Move erase-insert logic out of tcache_check_events()

Submitted by Kirill Tkhai on Aug. 15, 2017, 11:23 a.m.

Details

Message ID 150279623976.12806.11204431774622043589.stgit@localhost.localdomain
State New
Series "Manage LRU lists under per-filesystem lock"
Headers show

Commit Message

Kirill Tkhai Aug. 15, 2017, 11:23 a.m.
Make the function return true, when erase-insert (requeue)
should be executed. Move erase-insert out of the function.

Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
---
 mm/tcache.c |   28 ++++++++++++++++------------
 1 file changed, 16 insertions(+), 12 deletions(-)

Patch hide | download patch | download mbox

diff --git a/mm/tcache.c b/mm/tcache.c
index cecaf02b365..f70de72c3c1 100644
--- a/mm/tcache.c
+++ b/mm/tcache.c
@@ -208,8 +208,8 @@  node_tree_from_key(struct tcache_pool *pool,
 static void __tcache_insert_reclaim_node(struct tcache_nodeinfo *ni,
 					 struct tcache_pool_nodeinfo *pni);
 
-static inline void __tcache_check_events(struct tcache_nodeinfo *ni,
-					 struct tcache_pool_nodeinfo *pni)
+static inline bool tcache_check_events(struct tcache_nodeinfo *ni,
+				       struct tcache_pool_nodeinfo *pni)
 {
 	/*
 	 * We don't want to rebalance reclaim_tree on each get/put, because it
@@ -220,7 +220,7 @@  static inline void __tcache_check_events(struct tcache_nodeinfo *ni,
 	 */
 	pni->events++;
 	if (likely(pni->events < 1024))
-		return;
+		return false;
 
 	pni->events = 0;
 
@@ -230,7 +230,7 @@  static inline void __tcache_check_events(struct tcache_nodeinfo *ni,
 	 * it will be done by the shrinker once it tries to scan it.
 	 */
 	if (unlikely(list_empty(&pni->lru)))
-		return;
+		return false;
 
 	/*
 	 * This can only happen if the node was removed from the tree on pool
@@ -238,10 +238,9 @@  static inline void __tcache_check_events(struct tcache_nodeinfo *ni,
 	 * then.
 	 */
 	if (unlikely(RB_EMPTY_NODE(&pni->reclaim_node)))
-		return;
+		return false;
 
-	rb_erase(&pni->reclaim_node, &ni->reclaim_tree);
-	__tcache_insert_reclaim_node(ni, pni);
+	return true;
 }
 
 /*
@@ -268,10 +267,11 @@  static void tcache_lru_add(struct tcache_pool *pool, struct page *page)
 		pni->recent_puts /= 2;
 	}
 
-	__tcache_check_events(ni, pni);
-
-	if (unlikely(RB_EMPTY_NODE(&pni->reclaim_node)))
+	if (tcache_check_events(ni, pni) || RB_EMPTY_NODE(&pni->reclaim_node)) {
+		if (!RB_EMPTY_NODE(&pni->reclaim_node))
+			rb_erase(&pni->reclaim_node, &ni->reclaim_tree);
 		__tcache_insert_reclaim_node(ni, pni);
+	}
 
 	spin_unlock(&pni->lock);
 	spin_unlock(&ni->lock);
@@ -308,7 +308,11 @@  static void tcache_lru_del(struct tcache_pool *pool, struct page *page,
 	if (reused)
 		pni->recent_gets++;
 
-	__tcache_check_events(ni, pni);
+	if (tcache_check_events(ni, pni)) {
+		if (!RB_EMPTY_NODE(&pni->reclaim_node))
+			rb_erase(&pni->reclaim_node, &ni->reclaim_tree);
+		__tcache_insert_reclaim_node(ni, pni);
+	}
 out:
 	spin_unlock(&pni->lock);
 	spin_unlock(&ni->lock);
@@ -934,7 +938,7 @@  tcache_remove_from_reclaim_trees(struct tcache_pool *pool)
 		if (!RB_EMPTY_NODE(&pni->reclaim_node)) {
 			rb_erase(&pni->reclaim_node, &ni->reclaim_tree);
 			/*
-			 * Clear the node for __tcache_check_events() not to
+			 * Clear the node for tcache_check_events() not to
 			 * reinsert the pool back into the tree.
 			 */
 			RB_CLEAR_NODE(&pni->reclaim_node);