[5/7] ve/cgroup: private per-cgroup-root data container

Submitted by Valeriy Vdovin on April 1, 2020, 3:41 p.m.

Details

Message ID 1585755688-20348-6-git-send-email-valeriy.vdovin@virtuozzo.com
State New
Series "Make release_agent per-cgroup property. Run release_agent in proper ve."
Headers show

Commit Message

Valeriy Vdovin April 1, 2020, 3:41 p.m.
As long as each ve is internally attached to a particular
css_set via it's init_task, it's good to have container with parameters,
which are common to each cgroup subsystem hierarchy, rooting from it's
virtual root.

Signed-off-by: Valeriy Vdovin <valeriy.vdovin@virtuozzo.com>
---
 include/linux/ve.h |  7 +++++++
 kernel/ve/ve.c     | 60 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 66 insertions(+), 1 deletion(-)

Patch hide | download patch | download mbox

diff --git a/include/linux/ve.h b/include/linux/ve.h
index fcdb25a..12b5873 100644
--- a/include/linux/ve.h
+++ b/include/linux/ve.h
@@ -137,6 +137,13 @@  struct ve_struct {
 	struct work_struct	release_agent_work;
 
 	/*
+	 * List of data, private for each root cgroup in
+	 * ve's css_set.
+	 */
+	struct list_head	per_cgroot_list;
+	struct raw_spinlock	per_cgroot_list_lock;
+
+	/*
 	 * All tasks, that belong to this ve, live
 	 * in cgroups, that are children to cgroups
 	 * that form this css_set.
diff --git a/kernel/ve/ve.c b/kernel/ve/ve.c
index 7eff0e7..0136bf2 100644
--- a/kernel/ve/ve.c
+++ b/kernel/ve/ve.c
@@ -45,6 +45,14 @@ 
 #include <linux/vziptable_defs.h>
 #include <net/rtnetlink.h>
 
+struct per_cgroot_data {
+	struct list_head list;
+	/*
+	 * data is related to this cgroup
+	 */
+	struct cgroup *cgroot;
+};
+
 extern struct kmapset_set sysfs_ve_perms_set;
 
 static struct kmem_cache *ve_cachep;
@@ -91,6 +99,8 @@  struct ve_struct ve0 = {
 	.release_list		= LIST_HEAD_INIT(ve0.release_list),
 	.release_agent_work	= __WORK_INITIALIZER(ve0.release_agent_work,
 					cgroup_release_agent),
+	.per_cgroot_list	= LIST_HEAD_INIT(ve0.per_cgroot_list),
+	.per_cgroot_list_lock	= __RAW_SPIN_LOCK_UNLOCKED(ve0.per_cgroot_list_lock),
 };
 EXPORT_SYMBOL(ve0);
 
@@ -117,6 +127,31 @@  void put_ve(struct ve_struct *ve)
 }
 EXPORT_SYMBOL(put_ve);
 
+static struct per_cgroot_data *per_cgroot_data_find_locked(
+	struct list_head *per_cgroot_list, struct cgroup *cgroot)
+{
+	struct per_cgroot_data *data;
+
+	list_for_each_entry(data, per_cgroot_list, list) {
+		if (data->cgroot == cgroot)
+			return data;
+	}
+	return NULL;
+}
+
+static struct per_cgroot_data *per_cgroot_data_new_locked(
+	struct list_head *per_cgroot_list, struct cgroup *cgroot)
+{
+	struct per_cgroot_data *data;
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return ERR_PTR(-ENOMEM);
+
+	data->cgroot = cgroot;
+	list_add(&data->list, per_cgroot_list);
+	return data;
+}
+
 struct cgroup_subsys_state *ve_get_init_css(struct ve_struct *ve, int subsys_id)
 {
 	struct cgroup_subsys_state *css, *tmp;
@@ -635,7 +670,26 @@  err_list:
 	return err;
 }
 
-extern void cgroup_unbind_roots_from_ve(struct ve_struct *ve);
+static void per_cgroot_free_all_locked(struct list_head *per_cgroot_list)
+{
+	struct per_cgroot_data *data, *saved;
+	struct cgroup_rcu_string *release_agent;
+	list_for_each_entry_safe(data, saved, per_cgroot_list, list) {
+		release_agent = data->release_agent_path;
+		RCU_INIT_POINTER(data->release_agent_path, NULL);
+		if (release_agent)
+			kfree_rcu(release_agent, rcu_head);
+		list_del_init(&data->list);
+		kfree(data);
+	}
+}
+
+static void ve_per_cgroot_free(struct ve_struct *ve)
+{
+	raw_spin_lock(&ve->per_cgroot_list_lock);
+	per_cgroot_free_all_locked(&ve->per_cgroot_list);
+	raw_spin_unlock(&ve->per_cgroot_list_lock);
+}
 
 void ve_stop_ns(struct pid_namespace *pid_ns)
 {
@@ -685,6 +739,8 @@  void ve_stop_ns(struct pid_namespace *pid_ns)
 
 	down_write(&ve->op_sem);
 
+	ve_per_cgroot_free(ve);
+
 	/*
 	 * Neither it can be in pseudosuper state
 	 * anymore, setup it again if needed.
@@ -783,6 +839,7 @@  static struct cgroup_subsys_state *ve_create(struct cgroup *cg)
 
 	INIT_WORK(&ve->release_agent_work, cgroup_release_agent);
 	raw_spin_lock_init(&ve->release_list_lock);
+	raw_spin_lock_init(&ve->per_cgroot_list_lock);
 
 	ve->_randomize_va_space = ve0._randomize_va_space;
 
@@ -819,6 +876,7 @@  do_init:
 	INIT_LIST_HEAD(&ve->ve_list);
 	INIT_LIST_HEAD(&ve->devmnt_list);
 	INIT_LIST_HEAD(&ve->release_list);
+	INIT_LIST_HEAD(&ve->per_cgroot_list);
 	mutex_init(&ve->devmnt_mutex);
 
 #ifdef CONFIG_AIO

Comments

Pavel Tikhomirov April 2, 2020, 10:22 a.m.
On 4/1/20 6:41 PM, Valeriy Vdovin wrote:
> As long as each ve is internally attached to a particular
> css_set via it's init_task, it's good to have container with parameters,
> which are common to each cgroup subsystem hierarchy, rooting from it's
> virtual root.
> 
> Signed-off-by: Valeriy Vdovin <valeriy.vdovin@virtuozzo.com>
> ---
>   include/linux/ve.h |  7 +++++++
>   kernel/ve/ve.c     | 60 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
>   2 files changed, 66 insertions(+), 1 deletion(-)
> 
> diff --git a/include/linux/ve.h b/include/linux/ve.h
> index fcdb25a..12b5873 100644
> --- a/include/linux/ve.h
> +++ b/include/linux/ve.h
> @@ -137,6 +137,13 @@ struct ve_struct {
>   	struct work_struct	release_agent_work;
>   
>   	/*
> +	 * List of data, private for each root cgroup in
> +	 * ve's css_set.
> +	 */
> +	struct list_head	per_cgroot_list;
> +	struct raw_spinlock	per_cgroot_list_lock;
> +
> +	/*
>   	 * All tasks, that belong to this ve, live
>   	 * in cgroups, that are children to cgroups
>   	 * that form this css_set.
> diff --git a/kernel/ve/ve.c b/kernel/ve/ve.c
> index 7eff0e7..0136bf2 100644
> --- a/kernel/ve/ve.c
> +++ b/kernel/ve/ve.c
> @@ -45,6 +45,14 @@
>   #include <linux/vziptable_defs.h>
>   #include <net/rtnetlink.h>
>   
> +struct per_cgroot_data {
> +	struct list_head list;
> +	/*
> +	 * data is related to this cgroup
> +	 */
> +	struct cgroup *cgroot;
> +};
> +
>   extern struct kmapset_set sysfs_ve_perms_set;
>   
>   static struct kmem_cache *ve_cachep;
> @@ -91,6 +99,8 @@ struct ve_struct ve0 = {
>   	.release_list		= LIST_HEAD_INIT(ve0.release_list),
>   	.release_agent_work	= __WORK_INITIALIZER(ve0.release_agent_work,
>   					cgroup_release_agent),
> +	.per_cgroot_list	= LIST_HEAD_INIT(ve0.per_cgroot_list),
> +	.per_cgroot_list_lock	= __RAW_SPIN_LOCK_UNLOCKED(ve0.per_cgroot_list_lock),
>   };
>   EXPORT_SYMBOL(ve0);
>   
> @@ -117,6 +127,31 @@ void put_ve(struct ve_struct *ve)
>   }
>   EXPORT_SYMBOL(put_ve);
>   
> +static struct per_cgroot_data *per_cgroot_data_find_locked(
> +	struct list_head *per_cgroot_list, struct cgroup *cgroot)
> +{
> +	struct per_cgroot_data *data;
> +
> +	list_for_each_entry(data, per_cgroot_list, list) {
> +		if (data->cgroot == cgroot)
> +			return data;
> +	}
> +	return NULL;
> +}
> +
> +static struct per_cgroot_data *per_cgroot_data_new_locked(
> +	struct list_head *per_cgroot_list, struct cgroup *cgroot)
> +{
> +	struct per_cgroot_data *data;
> +	data = kzalloc(sizeof(*data), GFP_KERNEL);
> +	if (!data)
> +		return ERR_PTR(-ENOMEM);
> +
> +	data->cgroot = cgroot;
> +	list_add(&data->list, per_cgroot_list);
> +	return data;
> +}
> +
>   struct cgroup_subsys_state *ve_get_init_css(struct ve_struct *ve, int subsys_id)
>   {
>   	struct cgroup_subsys_state *css, *tmp;
> @@ -635,7 +670,26 @@ err_list:
>   	return err;
>   }
>   
> -extern void cgroup_unbind_roots_from_ve(struct ve_struct *ve);
> +static void per_cgroot_free_all_locked(struct list_head *per_cgroot_list)
> +{
> +	struct per_cgroot_data *data, *saved;
> +	struct cgroup_rcu_string *release_agent;
> +	list_for_each_entry_safe(data, saved, per_cgroot_list, list) {
> +		release_agent = data->release_agent_path;
> +		RCU_INIT_POINTER(data->release_agent_path, NULL);

There is no release_agent_path in struct per_cgroot_data yet.

> +		if (release_agent)
> +			kfree_rcu(release_agent, rcu_head);
> +		list_del_init(&data->list);
> +		kfree(data);
> +	}
> +}
> +
> +static void ve_per_cgroot_free(struct ve_struct *ve)
> +{
> +	raw_spin_lock(&ve->per_cgroot_list_lock);
> +	per_cgroot_free_all_locked(&ve->per_cgroot_list);
> +	raw_spin_unlock(&ve->per_cgroot_list_lock);
> +}
>   
>   void ve_stop_ns(struct pid_namespace *pid_ns)
>   {
> @@ -685,6 +739,8 @@ void ve_stop_ns(struct pid_namespace *pid_ns)
>   
>   	down_write(&ve->op_sem);
>   
> +	ve_per_cgroot_free(ve);
> +
>   	/*
>   	 * Neither it can be in pseudosuper state
>   	 * anymore, setup it again if needed.
> @@ -783,6 +839,7 @@ static struct cgroup_subsys_state *ve_create(struct cgroup *cg)
>   
>   	INIT_WORK(&ve->release_agent_work, cgroup_release_agent);
>   	raw_spin_lock_init(&ve->release_list_lock);
> +	raw_spin_lock_init(&ve->per_cgroot_list_lock);
>   
>   	ve->_randomize_va_space = ve0._randomize_va_space;
>   
> @@ -819,6 +876,7 @@ do_init:
>   	INIT_LIST_HEAD(&ve->ve_list);
>   	INIT_LIST_HEAD(&ve->devmnt_list);
>   	INIT_LIST_HEAD(&ve->release_list);
> +	INIT_LIST_HEAD(&ve->per_cgroot_list);
>   	mutex_init(&ve->devmnt_mutex);
>   
>   #ifdef CONFIG_AIO
>