[RHEL7,v21,13/14] ve/cgroup: cleanup per_cgroot_data

Submitted by Valeriy Vdovin on July 28, 2020, 5:53 p.m.

Details

Message ID 1595958806-338946-14-git-send-email-valeriy.vdovin@virtuozzo.com
State New
Series "Make release_agent per-cgroup property. Run release_agent in proper ve."
Headers show

Commit Message

Valeriy Vdovin July 28, 2020, 5:53 p.m.
Signed-off-by: Valeriy Vdovin <valeriy.vdovin@virtuozzo.com>
---
 include/linux/ve.h |  2 ++
 kernel/cgroup.c    |  1 +
 kernel/ve/ve.c     | 30 +++++++++++++++++++++---------
 3 files changed, 24 insertions(+), 9 deletions(-)

Patch hide | download patch | download mbox

diff --git a/include/linux/ve.h b/include/linux/ve.h
index 5bf275f..2dcd7bb 100644
--- a/include/linux/ve.h
+++ b/include/linux/ve.h
@@ -220,6 +220,8 @@  int ve_set_release_agent_path(struct cgroup *cgroot,
 
 const char *ve_get_release_agent_path(struct cgroup *cgrp_root);
 
+void ve_cleanup_per_cgroot_data(struct ve_struct *ve, struct cgroup *cgrp);
+
 extern struct ve_struct *get_ve(struct ve_struct *ve);
 extern void put_ve(struct ve_struct *ve);
 
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index bb77804..402973a 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1591,6 +1591,7 @@  static void cgroup_drop_root(struct cgroupfs_root *root)
 {
 	if (!root)
 		return;
+	ve_cleanup_per_cgroot_data(NULL, &root->top_cgroup);
 
 	BUG_ON(!root->hierarchy_id);
 	spin_lock(&hierarchy_id_lock);
diff --git a/kernel/ve/ve.c b/kernel/ve/ve.c
index 8d78270..db26cbd4 100644
--- a/kernel/ve/ve.c
+++ b/kernel/ve/ve.c
@@ -745,21 +745,33 @@  err_list:
 	return err;
 }
 
-static void ve_per_cgroot_free(struct ve_struct *ve)
+static inline void per_cgroot_data_free(struct per_cgroot_data *data)
+{
+	struct cgroup_rcu_string *release_agent = data->release_agent_path;
+
+	RCU_INIT_POINTER(data->release_agent_path, NULL);
+	if (release_agent)
+		kfree_rcu(release_agent, rcu_head);
+	kfree(data);
+}
+
+void ve_cleanup_per_cgroot_data(struct ve_struct *ve, struct cgroup *cgrp)
 {
 	struct per_cgroot_data *data, *saved;
-	struct cgroup_rcu_string *release_agent;
 
+	BUG_ON(!ve && !cgrp);
+	rcu_read_lock();
+	if (!ve)
+		ve = cgroup_get_ve_owner(cgrp);
 	raw_spin_lock(&ve->per_cgroot_list_lock);
 	list_for_each_entry_safe(data, saved, &ve->per_cgroot_list, list) {
-		release_agent = data->release_agent_path;
-		RCU_INIT_POINTER(data->release_agent_path, NULL);
-		if (release_agent)
-			kfree_rcu(release_agent, rcu_head);
-		list_del_init(&data->list);
-		kfree(data);
+		if (!cgrp || data->cgroot == cgrp) {
+			list_del_init(&data->list);
+			per_cgroot_data_free(data);
+		}
 	}
 	raw_spin_unlock(&ve->per_cgroot_list_lock);
+	rcu_read_unlock();
 }
 
 void ve_stop_ns(struct pid_namespace *pid_ns)
@@ -812,7 +824,7 @@  void ve_exit_ns(struct pid_namespace *pid_ns)
 
 	ve_workqueue_stop(ve);
 
-	ve_per_cgroot_free(ve);
+	ve_cleanup_per_cgroot_data(ve, NULL);
 
 	/*
 	 * At this point all userspace tasks in container are dead.

Comments

Kirill Tkhai July 31, 2020, 8:26 a.m.
On 28.07.2020 20:53, Valeriy Vdovin wrote:

Can we have better description here?

> Signed-off-by: Valeriy Vdovin <valeriy.vdovin@virtuozzo.com>
> ---
>  include/linux/ve.h |  2 ++
>  kernel/cgroup.c    |  1 +
>  kernel/ve/ve.c     | 30 +++++++++++++++++++++---------
>  3 files changed, 24 insertions(+), 9 deletions(-)
> 
> diff --git a/include/linux/ve.h b/include/linux/ve.h
> index 5bf275f..2dcd7bb 100644
> --- a/include/linux/ve.h
> +++ b/include/linux/ve.h
> @@ -220,6 +220,8 @@ int ve_set_release_agent_path(struct cgroup *cgroot,
>  
>  const char *ve_get_release_agent_path(struct cgroup *cgrp_root);
>  
> +void ve_cleanup_per_cgroot_data(struct ve_struct *ve, struct cgroup *cgrp);
> +
>  extern struct ve_struct *get_ve(struct ve_struct *ve);
>  extern void put_ve(struct ve_struct *ve);
>  
> diff --git a/kernel/cgroup.c b/kernel/cgroup.c
> index bb77804..402973a 100644
> --- a/kernel/cgroup.c
> +++ b/kernel/cgroup.c
> @@ -1591,6 +1591,7 @@ static void cgroup_drop_root(struct cgroupfs_root *root)
>  {
>  	if (!root)
>  		return;
> +	ve_cleanup_per_cgroot_data(NULL, &root->top_cgroup);
>  
>  	BUG_ON(!root->hierarchy_id);
>  	spin_lock(&hierarchy_id_lock);
> diff --git a/kernel/ve/ve.c b/kernel/ve/ve.c
> index 8d78270..db26cbd4 100644
> --- a/kernel/ve/ve.c
> +++ b/kernel/ve/ve.c
> @@ -745,21 +745,33 @@ err_list:
>  	return err;
>  }
>  
> -static void ve_per_cgroot_free(struct ve_struct *ve)
> +static inline void per_cgroot_data_free(struct per_cgroot_data *data)
> +{
> +	struct cgroup_rcu_string *release_agent = data->release_agent_path;
> +
> +	RCU_INIT_POINTER(data->release_agent_path, NULL);
> +	if (release_agent)
> +		kfree_rcu(release_agent, rcu_head);
> +	kfree(data);
> +}
> +
> +void ve_cleanup_per_cgroot_data(struct ve_struct *ve, struct cgroup *cgrp)
>  {
>  	struct per_cgroot_data *data, *saved;
> -	struct cgroup_rcu_string *release_agent;
>  
> +	BUG_ON(!ve && !cgrp);
> +	rcu_read_lock();
> +	if (!ve)
> +		ve = cgroup_get_ve_owner(cgrp);
>  	raw_spin_lock(&ve->per_cgroot_list_lock);
>  	list_for_each_entry_safe(data, saved, &ve->per_cgroot_list, list) {
> -		release_agent = data->release_agent_path;
> -		RCU_INIT_POINTER(data->release_agent_path, NULL);
> -		if (release_agent)
> -			kfree_rcu(release_agent, rcu_head);
> -		list_del_init(&data->list);
> -		kfree(data);
> +		if (!cgrp || data->cgroot == cgrp) {
> +			list_del_init(&data->list);
> +			per_cgroot_data_free(data);
> +		}
>  	}
>  	raw_spin_unlock(&ve->per_cgroot_list_lock);
> +	rcu_read_unlock();
>  }
>  
>  void ve_stop_ns(struct pid_namespace *pid_ns)
> @@ -812,7 +824,7 @@ void ve_exit_ns(struct pid_namespace *pid_ns)
>  
>  	ve_workqueue_stop(ve);
>  
> -	ve_per_cgroot_free(ve);
> +	ve_cleanup_per_cgroot_data(ve, NULL);
>  
>  	/*
>  	 * At this point all userspace tasks in container are dead.
>