[v3] tcache/tswap: don't shrink tcache/tswap until they are initialized

Submitted by Konstantin Khorenko on Dec. 11, 2018, 9:19 p.m.

Details

Message ID 20181211211946.15283-1-khorenko@virtuozzo.com
State New
Series "tcache/tswap: don't shrink tcache/tswap until they are initialized"
Headers show

Commit Message

Konstantin Khorenko Dec. 11, 2018, 9:19 p.m.
kswapd could run before tcache_init()/tswap_init() finished,
thus before tcache_nodeinfo/tswap_lru_node are allocated,
thus lead to a kernel panic in tcache_shrink_count()/tswap_shrink_count().

v2: add smp_{wmb,rmb} to guarantee proper load ordering.
v3: fixed check if tcache/tswap inited

https://pmc.acronis.com/browse/VSTOR-18694

Found-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Signed-off-by: Konstantin Khorenko <khorenko@virtuozzo.com>
---
 mm/internal.h | 22 ++++++++++++++++++----
 mm/tcache.c   |  5 +++++
 mm/tswap.c    |  6 ++++++
 3 files changed, 29 insertions(+), 4 deletions(-)

Patch hide | download patch | download mbox

diff --git a/mm/internal.h b/mm/internal.h
index b34b03dc37f2..18a2a69552f0 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -393,11 +393,18 @@  unsigned long tswap_shrink_count(struct shrinker *shrink,
 static inline unsigned long tswap_shrink(struct shrink_control *sc)
 {
 	unsigned long ret;
-	extern bool tswap_enabled;
+	extern struct static_key tswap_inited;
 
-	if (!READ_ONCE(tswap_enabled))
+	if (!static_key_false(&tswap_inited))
 		return 0;
 
+	/* Pairs with smp_wmb() in tswap_init().
+	 *
+	 * Guarantees that if see tswap_inited == true, then
+	 * tswap_lru_node is already properly initialized.
+	 */
+	smp_rmb();
+
 	ret = tswap_shrink_count(NULL, sc);
 	if (!ret)
 		return ret;
@@ -421,11 +428,18 @@  unsigned long tcache_shrink_count(struct shrinker *shrink,
 static inline unsigned long tcache_shrink(struct shrink_control *sc)
 {
 	unsigned long ret;
-	extern bool tcache_enabled;
+	extern struct static_key tcache_inited;
 
-	if (!READ_ONCE(tcache_enabled))
+	if (!static_key_false(&tcache_inited))
 		return 0;
 
+	/* Pairs with smp_wmb() in tcache_init().
+	 *
+	 * Guarantees that if see tcache_inited == true, then
+	 * tcache_nodeinfo is already properly initialized.
+	 */
+	smp_rmb();
+
 	ret = tcache_shrink_count(NULL, sc);
 	if (!ret)
 		return ret;
diff --git a/mm/tcache.c b/mm/tcache.c
index a6afb0c510aa..8be027f41dff 100644
--- a/mm/tcache.c
+++ b/mm/tcache.c
@@ -179,6 +179,8 @@  static struct tcache_nodeinfo *tcache_nodeinfo;
 /* Enable/disable tcache backend (set at boot time) */
 bool tcache_enabled __read_mostly = true;
 module_param_named(enabled, tcache_enabled, bool, 0444);
+/* To avoid shrink attempt before tcache is inited */
+struct static_key tcache_inited = STATIC_KEY_INIT_FALSE;
 
 /* Enable/disable populating the cache */
 static bool tcache_active __read_mostly = true;
@@ -1460,6 +1462,9 @@  static int __init tcache_init(void)
 	if (err)
 		goto out_unregister_shrinker;
 
+	/* pairs with smp_rmb() in tcache_shrink() */
+	smp_wmb();
+	static_key_slow_inc(&tcache_inited);
 	pr_info("tcache loaded\n");
 	return 0;
 
diff --git a/mm/tswap.c b/mm/tswap.c
index bf7644d5f033..112a13d223d6 100644
--- a/mm/tswap.c
+++ b/mm/tswap.c
@@ -37,6 +37,8 @@  static struct tswap_lru *tswap_lru_node;
 /* Enable/disable tswap backend (set at boot time) */
 bool tswap_enabled __read_mostly = true;
 module_param_named(enabled, tswap_enabled, bool, 0444);
+/* To avoid shrink attempt before tswap is inited */
+struct static_key tswap_inited = STATIC_KEY_INIT_FALSE;
 
 /* Enable/disable populating the cache */
 static bool tswap_active __read_mostly = true;
@@ -433,6 +435,10 @@  static int __init tswap_init(void)
 	frontswap_tmem_exclusive_gets(true);
 
 	old_ops = frontswap_register_ops(&tswap_frontswap_ops);
+
+	/* pairs with smp_rmb() in tswap_shrink() */
+	smp_wmb();
+	static_key_slow_inc(&tswap_inited);
 	pr_info("tswap loaded\n");
 	if (old_ops)
 		pr_warn("tswap: frontswap_ops %p overridden\n", old_ops);

Comments

Kirill Tkhai Dec. 12, 2018, 9 a.m.
On 12.12.2018 00:19, Konstantin Khorenko wrote:
> kswapd could run before tcache_init()/tswap_init() finished,
> thus before tcache_nodeinfo/tswap_lru_node are allocated,
> thus lead to a kernel panic in tcache_shrink_count()/tswap_shrink_count().
> 
> v2: add smp_{wmb,rmb} to guarantee proper load ordering.
> v3: fixed check if tcache/tswap inited
> 
> https://pmc.acronis.com/browse/VSTOR-18694
> 
> Found-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
> Signed-off-by: Konstantin Khorenko <khorenko@virtuozzo.com>

Reviewed-by: Kirill Tkhai <ktkhai@virtuozzo.com>

> ---
>  mm/internal.h | 22 ++++++++++++++++++----
>  mm/tcache.c   |  5 +++++
>  mm/tswap.c    |  6 ++++++
>  3 files changed, 29 insertions(+), 4 deletions(-)
> 
> diff --git a/mm/internal.h b/mm/internal.h
> index b34b03dc37f2..18a2a69552f0 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -393,11 +393,18 @@ unsigned long tswap_shrink_count(struct shrinker *shrink,
>  static inline unsigned long tswap_shrink(struct shrink_control *sc)
>  {
>  	unsigned long ret;
> -	extern bool tswap_enabled;
> +	extern struct static_key tswap_inited;
>  
> -	if (!READ_ONCE(tswap_enabled))
> +	if (!static_key_false(&tswap_inited))
>  		return 0;
>  
> +	/* Pairs with smp_wmb() in tswap_init().
> +	 *
> +	 * Guarantees that if see tswap_inited == true, then
> +	 * tswap_lru_node is already properly initialized.
> +	 */
> +	smp_rmb();
> +
>  	ret = tswap_shrink_count(NULL, sc);
>  	if (!ret)
>  		return ret;
> @@ -421,11 +428,18 @@ unsigned long tcache_shrink_count(struct shrinker *shrink,
>  static inline unsigned long tcache_shrink(struct shrink_control *sc)
>  {
>  	unsigned long ret;
> -	extern bool tcache_enabled;
> +	extern struct static_key tcache_inited;
>  
> -	if (!READ_ONCE(tcache_enabled))
> +	if (!static_key_false(&tcache_inited))
>  		return 0;
>  
> +	/* Pairs with smp_wmb() in tcache_init().
> +	 *
> +	 * Guarantees that if see tcache_inited == true, then
> +	 * tcache_nodeinfo is already properly initialized.
> +	 */
> +	smp_rmb();
> +
>  	ret = tcache_shrink_count(NULL, sc);
>  	if (!ret)
>  		return ret;
> diff --git a/mm/tcache.c b/mm/tcache.c
> index a6afb0c510aa..8be027f41dff 100644
> --- a/mm/tcache.c
> +++ b/mm/tcache.c
> @@ -179,6 +179,8 @@ static struct tcache_nodeinfo *tcache_nodeinfo;
>  /* Enable/disable tcache backend (set at boot time) */
>  bool tcache_enabled __read_mostly = true;
>  module_param_named(enabled, tcache_enabled, bool, 0444);
> +/* To avoid shrink attempt before tcache is inited */
> +struct static_key tcache_inited = STATIC_KEY_INIT_FALSE;
>  
>  /* Enable/disable populating the cache */
>  static bool tcache_active __read_mostly = true;
> @@ -1460,6 +1462,9 @@ static int __init tcache_init(void)
>  	if (err)
>  		goto out_unregister_shrinker;
>  
> +	/* pairs with smp_rmb() in tcache_shrink() */
> +	smp_wmb();
> +	static_key_slow_inc(&tcache_inited);
>  	pr_info("tcache loaded\n");
>  	return 0;
>  
> diff --git a/mm/tswap.c b/mm/tswap.c
> index bf7644d5f033..112a13d223d6 100644
> --- a/mm/tswap.c
> +++ b/mm/tswap.c
> @@ -37,6 +37,8 @@ static struct tswap_lru *tswap_lru_node;
>  /* Enable/disable tswap backend (set at boot time) */
>  bool tswap_enabled __read_mostly = true;
>  module_param_named(enabled, tswap_enabled, bool, 0444);
> +/* To avoid shrink attempt before tswap is inited */
> +struct static_key tswap_inited = STATIC_KEY_INIT_FALSE;
>  
>  /* Enable/disable populating the cache */
>  static bool tswap_active __read_mostly = true;
> @@ -433,6 +435,10 @@ static int __init tswap_init(void)
>  	frontswap_tmem_exclusive_gets(true);
>  
>  	old_ops = frontswap_register_ops(&tswap_frontswap_ops);
> +
> +	/* pairs with smp_rmb() in tswap_shrink() */
> +	smp_wmb();
> +	static_key_slow_inc(&tswap_inited);
>  	pr_info("tswap loaded\n");
>  	if (old_ops)
>  		pr_warn("tswap: frontswap_ops %p overridden\n", old_ops);
>