[Devel,rh7] sched: Fix race on toggling cfs_bandwidth_used

Submitted by Andrey Ryabinin on Aug. 30, 2016, 2:58 p.m.

Details

Message ID 1472569102-16787-1-git-send-email-aryabinin@virtuozzo.com
State New
Series "sched: Fix race on toggling cfs_bandwidth_used"
Headers show

Commit Message

Andrey Ryabinin Aug. 30, 2016, 2:58 p.m.
From: Ben Segall <bsegall@google.com>

commit 1ee14e6c8cddeeb8a490d7b54cd9016e4bb900b4 upstream.

When we transition cfs_bandwidth_used to false, any currently
throttled groups will incorrectly return false from cfs_rq_throttled.
While tg_set_cfs_bandwidth will unthrottle them eventually, currently
running code (including at least dequeue_task_fair and
distribute_cfs_runtime) will cause errors.

Fix this by turning off cfs_bandwidth_used only after unthrottling all
cfs_rqs.

Tested: toggle bandwidth back and forth on a loaded cgroup. Caused
crashes in minutes without the patch, hasn't crashed with it.

Signed-off-by: Ben Segall <bsegall@google.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: pjt@google.com
Link: http://lkml.kernel.org/r/20131016181611.22647.80365.stgit@sword-of-the-dawn.mtv.corp.google.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

https://jira.sw.ru/browse/PSBM-51056

Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
---
 kernel/sched/core.c  |  9 ++++++++-
 kernel/sched/fair.c  | 16 +++++++++-------
 kernel/sched/sched.h |  3 ++-
 3 files changed, 19 insertions(+), 9 deletions(-)

Patch hide | download patch | download mbox

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 657b8e4..ccc826a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8470,7 +8470,12 @@  static int __tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
 
 	runtime_enabled = quota != RUNTIME_INF;
 	runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
-	account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
+	/*
+	 * If we need to toggle cfs_bandwidth_used, off->on must occur
+	 * before making related changes, and on->off must occur afterwards
+	 */
+	if (runtime_enabled && !runtime_was_enabled)
+		cfs_bandwidth_usage_inc();
 	raw_spin_lock_irq(&cfs_b->lock);
 	cfs_b->period = ns_to_ktime(period);
 	cfs_b->quota = quota;
@@ -8498,6 +8503,8 @@  static int __tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
 	}
 	if (runtime_enabled != runtime_was_enabled)
 		tg_limit_toggled(tg);
+	if (runtime_was_enabled && !runtime_enabled)
+		cfs_bandwidth_usage_dec();
 	return ret;
 }
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a419f39..b4da77d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -422,13 +422,14 @@  static inline bool cfs_bandwidth_used(void)
 	return static_key_false(&__cfs_bandwidth_used);
 }
 
-void account_cfs_bandwidth_used(int enabled, int was_enabled)
+void cfs_bandwidth_usage_inc(void)
 {
-	/* only need to count groups transitioning between enabled/!enabled */
-	if (enabled && !was_enabled)
-		static_key_slow_inc(&__cfs_bandwidth_used);
-	else if (!enabled && was_enabled)
-		static_key_slow_dec(&__cfs_bandwidth_used);
+	static_key_slow_inc(&__cfs_bandwidth_used);
+}
+
+void cfs_bandwidth_usage_dec(void)
+{
+	static_key_slow_dec(&__cfs_bandwidth_used);
 }
 #else /* HAVE_JUMP_LABEL */
 static bool cfs_bandwidth_used(void)
@@ -436,7 +437,8 @@  static bool cfs_bandwidth_used(void)
 	return true;
 }
 
-void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
+void cfs_bandwidth_usage_inc(void) {}
+void cfs_bandwidth_usage_dec(void) {}
 #endif /* HAVE_JUMP_LABEL */
 
 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d9fe825..894260d 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1444,7 +1444,8 @@  extern void print_rt_stats(struct seq_file *m, int cpu);
 extern void init_cfs_rq(struct cfs_rq *cfs_rq);
 extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
 
-extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
+extern void cfs_bandwidth_usage_inc(void);
+extern void cfs_bandwidth_usage_dec(void);
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)

Comments

Kirill Tkhai Aug. 31, 2016, 9:44 a.m.
On 30.08.2016 17:58, Andrey Ryabinin wrote:
> From: Ben Segall <bsegall@google.com>
> 
> commit 1ee14e6c8cddeeb8a490d7b54cd9016e4bb900b4 upstream.
> 
> When we transition cfs_bandwidth_used to false, any currently
> throttled groups will incorrectly return false from cfs_rq_throttled.
> While tg_set_cfs_bandwidth will unthrottle them eventually, currently
> running code (including at least dequeue_task_fair and
> distribute_cfs_runtime) will cause errors.
> 
> Fix this by turning off cfs_bandwidth_used only after unthrottling all
> cfs_rqs.
> 
> Tested: toggle bandwidth back and forth on a loaded cgroup. Caused
> crashes in minutes without the patch, hasn't crashed with it.
> 
> Signed-off-by: Ben Segall <bsegall@google.com>
> Signed-off-by: Peter Zijlstra <peterz@infradead.org>
> Cc: pjt@google.com
> Link: http://lkml.kernel.org/r/20131016181611.22647.80365.stgit@sword-of-the-dawn.mtv.corp.google.com
> Signed-off-by: Ingo Molnar <mingo@kernel.org>
> 
> https://jira.sw.ru/browse/PSBM-51056
> 
> Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>

Acked-by: Kirill Tkhai <ktkhai@virtuozzo.com>

> ---
>  kernel/sched/core.c  |  9 ++++++++-
>  kernel/sched/fair.c  | 16 +++++++++-------
>  kernel/sched/sched.h |  3 ++-
>  3 files changed, 19 insertions(+), 9 deletions(-)
> 
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 657b8e4..ccc826a 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -8470,7 +8470,12 @@ static int __tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
>  
>  	runtime_enabled = quota != RUNTIME_INF;
>  	runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
> -	account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
> +	/*
> +	 * If we need to toggle cfs_bandwidth_used, off->on must occur
> +	 * before making related changes, and on->off must occur afterwards
> +	 */
> +	if (runtime_enabled && !runtime_was_enabled)
> +		cfs_bandwidth_usage_inc();
>  	raw_spin_lock_irq(&cfs_b->lock);
>  	cfs_b->period = ns_to_ktime(period);
>  	cfs_b->quota = quota;
> @@ -8498,6 +8503,8 @@ static int __tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
>  	}
>  	if (runtime_enabled != runtime_was_enabled)
>  		tg_limit_toggled(tg);
> +	if (runtime_was_enabled && !runtime_enabled)
> +		cfs_bandwidth_usage_dec();
>  	return ret;
>  }
>  
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index a419f39..b4da77d 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -422,13 +422,14 @@ static inline bool cfs_bandwidth_used(void)
>  	return static_key_false(&__cfs_bandwidth_used);
>  }
>  
> -void account_cfs_bandwidth_used(int enabled, int was_enabled)
> +void cfs_bandwidth_usage_inc(void)
>  {
> -	/* only need to count groups transitioning between enabled/!enabled */
> -	if (enabled && !was_enabled)
> -		static_key_slow_inc(&__cfs_bandwidth_used);
> -	else if (!enabled && was_enabled)
> -		static_key_slow_dec(&__cfs_bandwidth_used);
> +	static_key_slow_inc(&__cfs_bandwidth_used);
> +}
> +
> +void cfs_bandwidth_usage_dec(void)
> +{
> +	static_key_slow_dec(&__cfs_bandwidth_used);
>  }
>  #else /* HAVE_JUMP_LABEL */
>  static bool cfs_bandwidth_used(void)
> @@ -436,7 +437,8 @@ static bool cfs_bandwidth_used(void)
>  	return true;
>  }
>  
> -void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
> +void cfs_bandwidth_usage_inc(void) {}
> +void cfs_bandwidth_usage_dec(void) {}
>  #endif /* HAVE_JUMP_LABEL */
>  
>  static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index d9fe825..894260d 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -1444,7 +1444,8 @@ extern void print_rt_stats(struct seq_file *m, int cpu);
>  extern void init_cfs_rq(struct cfs_rq *cfs_rq);
>  extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
>  
> -extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
> +extern void cfs_bandwidth_usage_inc(void);
> +extern void cfs_bandwidth_usage_dec(void);
>  
>  #ifdef CONFIG_FAIR_GROUP_SCHED
>  static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
>