[rh7,2/2] mm/mempool: Use kvmalloc to allocate array of element pointers

Submitted by Andrey Ryabinin on Sept. 12, 2018, 10:13 a.m.

Details

Message ID 20180912101333.12824-2-aryabinin@virtuozzo.com
State New
Series "Series without cover letter"
Headers show

Commit Message

Andrey Ryabinin Sept. 12, 2018, 10:13 a.m.
The minimal number of mempool elements passed to
mempool_create()/mempool_resize() could be large enough to
trigger high order allocation in kmalloc_node(). High order
allocations are costly and/or may fail if memory is fragmented.
Since we don't need the array of element pointers to be physically
contiguous, use kvmalloc_node() to allocate it.

https://jira.sw.ru/browse/HCI-132
https://pmc.acronis.com/browse/VSTOR-14758
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
---
 mm/mempool.c | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

Patch hide | download patch | download mbox

diff --git a/mm/mempool.c b/mm/mempool.c
index 864b0779adb7..d2535b6e25a9 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -153,7 +153,7 @@  void mempool_destroy(mempool_t *pool)
 		void *element = remove_element(pool, GFP_KERNEL);
 		pool->free(element, pool->pool_data);
 	}
-	kfree(pool->elements);
+	kvfree(pool->elements);
 	kfree(pool);
 }
 EXPORT_SYMBOL(mempool_destroy);
@@ -188,8 +188,8 @@  mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
 	pool = kmalloc_node(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id);
 	if (!pool)
 		return NULL;
-	pool->elements = kmalloc_node(min_nr * sizeof(void *),
-				      gfp_mask, node_id);
+	pool->elements = kvmalloc_node(min_nr * sizeof(void *),
+				       gfp_mask, node_id);
 	if (!pool->elements) {
 		kfree(pool);
 		return NULL;
@@ -239,6 +239,7 @@  int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
 	void *element;
 	void **new_elements;
 	unsigned long flags;
+	void *old_elements = NULL;
 
 	BUG_ON(new_min_nr <= 0);
 
@@ -256,7 +257,7 @@  int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
 	spin_unlock_irqrestore(&pool->lock, flags);
 
 	/* Grow the pool */
-	new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
+	new_elements = kvmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
 	if (!new_elements)
 		return -ENOMEM;
 
@@ -264,12 +265,12 @@  int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
 	if (unlikely(new_min_nr <= pool->min_nr)) {
 		/* Raced, other resize will do our work */
 		spin_unlock_irqrestore(&pool->lock, flags);
-		kfree(new_elements);
+		kvfree(new_elements);
 		goto out;
 	}
 	memcpy(new_elements, pool->elements,
 			pool->curr_nr * sizeof(*new_elements));
-	kfree(pool->elements);
+	old_elements = pool->elements;
 	pool->elements = new_elements;
 	pool->min_nr = new_min_nr;
 
@@ -290,6 +291,7 @@  int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
 out_unlock:
 	spin_unlock_irqrestore(&pool->lock, flags);
 out:
+	kvfree(old_elements);
 	return 0;
 }
 EXPORT_SYMBOL(mempool_resize);

Comments

Kirill Tkhai Sept. 12, 2018, 2:45 p.m.
On 12.09.2018 13:13, Andrey Ryabinin wrote:
> The minimal number of mempool elements passed to
> mempool_create()/mempool_resize() could be large enough to
> trigger high order allocation in kmalloc_node(). High order
> allocations are costly and/or may fail if memory is fragmented.
> Since we don't need the array of element pointers to be physically
> contiguous, use kvmalloc_node() to allocate it.
> 
> https://jira.sw.ru/browse/HCI-132
> https://pmc.acronis.com/browse/VSTOR-14758
> Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>

Reviewed-by: Kirill Tkhai <ktkhai@virtuozzo.com>

> ---
>  mm/mempool.c | 14 ++++++++------
>  1 file changed, 8 insertions(+), 6 deletions(-)
> 
> diff --git a/mm/mempool.c b/mm/mempool.c
> index 864b0779adb7..d2535b6e25a9 100644
> --- a/mm/mempool.c
> +++ b/mm/mempool.c
> @@ -153,7 +153,7 @@ void mempool_destroy(mempool_t *pool)
>  		void *element = remove_element(pool, GFP_KERNEL);
>  		pool->free(element, pool->pool_data);
>  	}
> -	kfree(pool->elements);
> +	kvfree(pool->elements);
>  	kfree(pool);
>  }
>  EXPORT_SYMBOL(mempool_destroy);
> @@ -188,8 +188,8 @@ mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
>  	pool = kmalloc_node(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id);
>  	if (!pool)
>  		return NULL;
> -	pool->elements = kmalloc_node(min_nr * sizeof(void *),
> -				      gfp_mask, node_id);
> +	pool->elements = kvmalloc_node(min_nr * sizeof(void *),
> +				       gfp_mask, node_id);
>  	if (!pool->elements) {
>  		kfree(pool);
>  		return NULL;
> @@ -239,6 +239,7 @@ int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
>  	void *element;
>  	void **new_elements;
>  	unsigned long flags;
> +	void *old_elements = NULL;
>  
>  	BUG_ON(new_min_nr <= 0);
>  
> @@ -256,7 +257,7 @@ int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
>  	spin_unlock_irqrestore(&pool->lock, flags);
>  
>  	/* Grow the pool */
> -	new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
> +	new_elements = kvmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
>  	if (!new_elements)
>  		return -ENOMEM;
>  
> @@ -264,12 +265,12 @@ int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
>  	if (unlikely(new_min_nr <= pool->min_nr)) {
>  		/* Raced, other resize will do our work */
>  		spin_unlock_irqrestore(&pool->lock, flags);
> -		kfree(new_elements);
> +		kvfree(new_elements);
>  		goto out;
>  	}
>  	memcpy(new_elements, pool->elements,
>  			pool->curr_nr * sizeof(*new_elements));
> -	kfree(pool->elements);
> +	old_elements = pool->elements;
>  	pool->elements = new_elements;
>  	pool->min_nr = new_min_nr;
>  
> @@ -290,6 +291,7 @@ int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
>  out_unlock:
>  	spin_unlock_irqrestore(&pool->lock, flags);
>  out:
> +	kvfree(old_elements);
>  	return 0;
>  }
>  EXPORT_SYMBOL(mempool_resize);
>