[Devel,rh7,2/2] tcache: speedup tcaches operations on empty tcache.

Submitted by Andrey Ryabinin on Dec. 12, 2016, 11:28 a.m.

Details

Message ID 1481542086-5887-2-git-send-email-aryabinin@virtuozzo.com
State New
Series "Series without cover letter"
Headers show

Commit Message

Andrey Ryabinin Dec. 12, 2016, 11:28 a.m.
If tcache is empty we can bail out immediately from
tcache_cleancache_[get_page, invalidate_page,invalidate_inode]().
As a fast way of identifying empty tcache this patch adds global atomic
counter of tcache nodes.

https://jira.sw.ru/browse/PSBM-56475

Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
---
 mm/tcache.c | 13 +++++++++++++
 1 file changed, 13 insertions(+)

Patch hide | download patch | download mbox

diff --git a/mm/tcache.c b/mm/tcache.c
index cec5a4e..00e15d6 100644
--- a/mm/tcache.c
+++ b/mm/tcache.c
@@ -98,6 +98,8 @@  struct tcache_pool {
 	struct tcache_pool_nodeinfo	nodeinfo[0];
 };
 
+static atomic_t nr_tcache_nodes;
+
 /*
  * Tcache nodes correspond to inodes. A node is created automatically when a
  * new page is added to the cache (cleancache_put_page) and destroyed either
@@ -561,6 +563,7 @@  retry:
 		node->pool = pool;
 		node->key = *key;
 		atomic_long_inc(&pool->nr_nodes);
+		atomic_inc(&nr_tcache_nodes);
 		__tcache_insert_node(&tree->root, node, rb_link, rb_parent);
 	}
 	spin_unlock_irqrestore(&tree->lock, flags);
@@ -590,6 +593,7 @@  static void tcache_node_release_fn(struct kref *kref)
 	__tcache_delete_node(&tree->root, node);
 	spin_unlock(&tree->lock);
 
+	atomic_dec(&nr_tcache_nodes);
 	atomic_long_dec(&node->pool->nr_nodes);
 	kfree(node);
 }
@@ -1168,6 +1172,9 @@  static int tcache_cleancache_get_page(int pool_id,
 	struct tcache_node *node;
 	struct page *cache_page = NULL;
 
+	if (!atomic_read(&nr_tcache_nodes))
+		return -1;
+
 	node = tcache_get_node_and_pool(pool_id, &key, false);
 	if (node) {
 		cache_page = tcache_detach_page(node, index, true);
@@ -1192,6 +1199,9 @@  static void tcache_cleancache_invalidate_page(int pool_id,
 	struct tcache_node *node;
 	struct page *page;
 
+	if (!atomic_read(&nr_tcache_nodes))
+		return;
+
 	node = tcache_get_node_and_pool(pool_id, &key, false);
 	if (node) {
 		page = tcache_detach_page(node, index, false);
@@ -1206,6 +1216,9 @@  static void tcache_cleancache_invalidate_inode(int pool_id,
 {
 	struct tcache_pool *pool;
 
+	if (!atomic_read(&nr_tcache_nodes))
+		return;
+
 	pool = tcache_get_pool(pool_id);
 	if (pool) {
 		tcache_invalidate_node(pool, &key);

Comments

Kirill Tkhai Dec. 13, 2016, 12:10 p.m.
On 12.12.2016 14:28, Andrey Ryabinin wrote:
> If tcache is empty we can bail out immediately from
> tcache_cleancache_[get_page, invalidate_page,invalidate_inode]().
> As a fast way of identifying empty tcache this patch adds global atomic
> counter of tcache nodes.
> 
> https://jira.sw.ru/browse/PSBM-56475
> 
> Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>

Reviewed-by: Kirill Tkhai <ktkhai@virtuozzo.com>

> ---
>  mm/tcache.c | 13 +++++++++++++
>  1 file changed, 13 insertions(+)
> 
> diff --git a/mm/tcache.c b/mm/tcache.c
> index cec5a4e..00e15d6 100644
> --- a/mm/tcache.c
> +++ b/mm/tcache.c
> @@ -98,6 +98,8 @@ struct tcache_pool {
>  	struct tcache_pool_nodeinfo	nodeinfo[0];
>  };
>  
> +static atomic_t nr_tcache_nodes;
> +
>  /*
>   * Tcache nodes correspond to inodes. A node is created automatically when a
>   * new page is added to the cache (cleancache_put_page) and destroyed either
> @@ -561,6 +563,7 @@ retry:
>  		node->pool = pool;
>  		node->key = *key;
>  		atomic_long_inc(&pool->nr_nodes);
> +		atomic_inc(&nr_tcache_nodes);
>  		__tcache_insert_node(&tree->root, node, rb_link, rb_parent);
>  	}
>  	spin_unlock_irqrestore(&tree->lock, flags);
> @@ -590,6 +593,7 @@ static void tcache_node_release_fn(struct kref *kref)
>  	__tcache_delete_node(&tree->root, node);
>  	spin_unlock(&tree->lock);
>  
> +	atomic_dec(&nr_tcache_nodes);
>  	atomic_long_dec(&node->pool->nr_nodes);
>  	kfree(node);
>  }
> @@ -1168,6 +1172,9 @@ static int tcache_cleancache_get_page(int pool_id,
>  	struct tcache_node *node;
>  	struct page *cache_page = NULL;
>  
> +	if (!atomic_read(&nr_tcache_nodes))
> +		return -1;
> +
>  	node = tcache_get_node_and_pool(pool_id, &key, false);
>  	if (node) {
>  		cache_page = tcache_detach_page(node, index, true);
> @@ -1192,6 +1199,9 @@ static void tcache_cleancache_invalidate_page(int pool_id,
>  	struct tcache_node *node;
>  	struct page *page;
>  
> +	if (!atomic_read(&nr_tcache_nodes))
> +		return;
> +
>  	node = tcache_get_node_and_pool(pool_id, &key, false);
>  	if (node) {
>  		page = tcache_detach_page(node, index, false);
> @@ -1206,6 +1216,9 @@ static void tcache_cleancache_invalidate_inode(int pool_id,
>  {
>  	struct tcache_pool *pool;
>  
> +	if (!atomic_read(&nr_tcache_nodes))
> +		return;
> +
>  	pool = tcache_get_pool(pool_id);
>  	if (pool) {
>  		tcache_invalidate_node(pool, &key);
>