tcache: Close race between tcache_invalidate_node() and tcache_attach_page()

Submitted by Kirill Tkhai on Jan. 15, 2018, 11:29 a.m.

Details

Message ID 151601569696.31757.7658356486657939177.stgit@localhost.localdomain
State New
Series "tcache: Close race between tcache_invalidate_node() and tcache_attach_page()"
Headers show

Commit Message

Kirill Tkhai Jan. 15, 2018, 11:29 a.m.
tcache_attach_page()		tcache_invalidate_node()
..				__tcache_lookup_node()
..				__tcache_delete_node()
Check node->invalidated		..
tcache_page_tree_insert()	..
tcache_lru_add()		..
..				tcache_invalidate_node_pages()
..				  node->invalidated = true

Check nr_page to determ if there is a race and repeat
node pages iterations if so.

Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
---
 mm/tcache.c |   12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)

Patch hide | download patch | download mbox

diff --git a/mm/tcache.c b/mm/tcache.c
index a45af63fbd1b..06919efdd357 100644
--- a/mm/tcache.c
+++ b/mm/tcache.c
@@ -676,6 +676,7 @@  static void tcache_invalidate_node(struct tcache_pool *pool,
 	node = __tcache_lookup_node(&tree->root, key, &rb_link, &rb_parent);
 	if (node) {
 		tcache_hold_node(node);
+		node->invalidated = true;
 		__tcache_delete_node(&tree->root, node);
 	}
 	spin_unlock_irq(&tree->lock);
@@ -699,6 +700,7 @@  tcache_invalidate_node_tree(struct tcache_node_tree *tree)
 		node = rb_entry(rb_first(&tree->root),
 				struct tcache_node, tree_node);
 
+		node->invalidated = true;
 		/* Remaining nodes must be held solely by their pages */
 		WARN_ON(atomic_read(&node->kref.refcount) != 1);
 		WARN_ON(node->nr_pages == 0);
@@ -921,7 +923,6 @@  tcache_invalidate_node_pages(struct tcache_node *node)
 	/*
 	 * First forbid new page insertions - see tcache_page_tree_replace.
 	 */
-	node->invalidated = true;
 again:
 	repeat = false;
 	while ((nr_pages = tcache_lookup(pages, node, index,
@@ -948,12 +949,17 @@  tcache_invalidate_node_pages(struct tcache_node *node)
 		index++;
 	}
 
+	/* We may race with parallel page_ref_freeze() and tcache_attach_page() */
+	if (!repeat) {
+		spin_lock_irq(&node->tree_lock);
+		repeat = (node->nr_pages != 0);
+		spin_unlock_irq(&node->tree_lock);
+	}
 	if (repeat) {
+		schedule_timeout_interruptible(1);
 		index = 0;
 		goto again;
 	}
-
-	WARN_ON(node->nr_pages != 0);
 }
 
 static noinline_for_stack void

Comments

Kirill Tkhai Jan. 15, 2018, 11:29 a.m.
https://jira.sw.ru/browse/PSBM-80561

On 15.01.2018 14:29, Kirill Tkhai wrote:
> tcache_attach_page()		tcache_invalidate_node()
> ..				__tcache_lookup_node()
> ..				__tcache_delete_node()
> Check node->invalidated		..
> tcache_page_tree_insert()	..
> tcache_lru_add()		..
> ..				tcache_invalidate_node_pages()
> ..				  node->invalidated = true
> 
> Check nr_page to determ if there is a race and repeat
> node pages iterations if so.
> 
> Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
> ---
>  mm/tcache.c |   12 +++++++++---
>  1 file changed, 9 insertions(+), 3 deletions(-)
> 
> diff --git a/mm/tcache.c b/mm/tcache.c
> index a45af63fbd1b..06919efdd357 100644
> --- a/mm/tcache.c
> +++ b/mm/tcache.c
> @@ -676,6 +676,7 @@ static void tcache_invalidate_node(struct tcache_pool *pool,
>  	node = __tcache_lookup_node(&tree->root, key, &rb_link, &rb_parent);
>  	if (node) {
>  		tcache_hold_node(node);
> +		node->invalidated = true;
>  		__tcache_delete_node(&tree->root, node);
>  	}
>  	spin_unlock_irq(&tree->lock);
> @@ -699,6 +700,7 @@ tcache_invalidate_node_tree(struct tcache_node_tree *tree)
>  		node = rb_entry(rb_first(&tree->root),
>  				struct tcache_node, tree_node);
>  
> +		node->invalidated = true;
>  		/* Remaining nodes must be held solely by their pages */
>  		WARN_ON(atomic_read(&node->kref.refcount) != 1);
>  		WARN_ON(node->nr_pages == 0);
> @@ -921,7 +923,6 @@ tcache_invalidate_node_pages(struct tcache_node *node)
>  	/*
>  	 * First forbid new page insertions - see tcache_page_tree_replace.
>  	 */
> -	node->invalidated = true;
>  again:
>  	repeat = false;
>  	while ((nr_pages = tcache_lookup(pages, node, index,
> @@ -948,12 +949,17 @@ tcache_invalidate_node_pages(struct tcache_node *node)
>  		index++;
>  	}
>  
> +	/* We may race with parallel page_ref_freeze() and tcache_attach_page() */
> +	if (!repeat) {
> +		spin_lock_irq(&node->tree_lock);
> +		repeat = (node->nr_pages != 0);
> +		spin_unlock_irq(&node->tree_lock);
> +	}
>  	if (repeat) {
> +		schedule_timeout_interruptible(1);
>  		index = 0;
>  		goto again;
>  	}
> -
> -	WARN_ON(node->nr_pages != 0);
>  }
>  
>  static noinline_for_stack void
>