[v8,1/9] Implemented per-ve workqueue.

Submitted by Valeriy Vdovin on April 16, 2020, 10:06 a.m.

Details

Message ID 1587031593-780388-2-git-send-email-valeriy.vdovin@virtuozzo.com
State New
Series "Make release_agent per-cgroup property. Run release_agent in proper ve."
Headers show

Commit Message

Valeriy Vdovin April 16, 2020, 10:06 a.m.
Signed-off-by: Valeriy Vdovin <valeriy.vdovin@virtuozzo.com>
---
 include/linux/ve.h |  2 ++
 kernel/ve/ve.c     | 25 +++++++++++++++++++++++++
 2 files changed, 27 insertions(+)

Patch hide | download patch | download mbox

diff --git a/include/linux/ve.h b/include/linux/ve.h
index 9d60838..362dae1 100644
--- a/include/linux/ve.h
+++ b/include/linux/ve.h
@@ -125,6 +125,8 @@  struct ve_struct {
 	struct cn_private	*cn;
 #endif
 	struct kmapset_key	sysfs_perms_key;
+
+	struct workqueue_struct	*wq;
 };
 
 struct ve_devmnt {
diff --git a/kernel/ve/ve.c b/kernel/ve/ve.c
index ad3a698..ba547e1 100644
--- a/kernel/ve/ve.c
+++ b/kernel/ve/ve.c
@@ -481,6 +481,21 @@  static const struct timespec zero_time = { };
 
 extern void cgroup_mark_ve_root(struct ve_struct *ve);
 
+static int ve_workqueue_start(struct ve_struct *ve)
+{
+	ve->wq = alloc_workqueue("ve_wq_%s",
+		WQ_SYSFS|WQ_FREEZABLE|WQ_UNBOUND, 8, ve->ve_name);
+
+	if (!ve->wq)
+		return -ENOMEM;
+	return 0;
+}
+
+static void ve_workqueue_stop(struct ve_struct *ve)
+{
+	destroy_workqueue(ve->wq);
+}
+
 /* under ve->op_sem write-lock */
 static int ve_start_container(struct ve_struct *ve)
 {
@@ -530,6 +545,10 @@  static int ve_start_container(struct ve_struct *ve)
 
 	cgroup_mark_ve_root(ve);
 
+	err = ve_workqueue_start(ve);
+	if (err)
+		goto err_mark_ve;
+
 	ve->is_running = 1;
 
 	printk(KERN_INFO "CT: %s: started\n", ve_name(ve));
@@ -538,6 +557,8 @@  static int ve_start_container(struct ve_struct *ve)
 
 	return 0;
 
+err_mark_ve:
+	ve_hook_iterate_fini(VE_SS_CHAIN, ve);
 err_iterate:
 	ve_stop_umh(ve);
 err_umh:
@@ -595,6 +616,8 @@  void ve_exit_ns(struct pid_namespace *pid_ns)
 	if (!ve->ve_ns || ve->ve_ns->pid_ns != pid_ns)
 		return;
 
+	ve_workqueue_stop(ve);
+
 	/*
 	 * At this point all userspace tasks in container are dead.
 	 */
@@ -1648,6 +1671,8 @@  static int __init ve_subsys_init(void)
 {
 	ve_cachep = KMEM_CACHE(ve_struct, SLAB_PANIC);
 	list_add(&ve0.ve_list, &ve_list_head);
+	ve0.wq = alloc_workqueue("ve0_wq", WQ_FREEZABLE|WQ_UNBOUND, 8);
+	BUG_ON(!ve0.wq);
 	return 0;
 }
 late_initcall(ve_subsys_init);

Comments

Kirill Tkhai April 16, 2020, 10:43 a.m.
On 16.04.2020 13:06, Valeriy Vdovin wrote:
> Signed-off-by: Valeriy Vdovin <valeriy.vdovin@virtuozzo.com>
> ---
>  include/linux/ve.h |  2 ++
>  kernel/ve/ve.c     | 25 +++++++++++++++++++++++++
>  2 files changed, 27 insertions(+)
> 
> diff --git a/include/linux/ve.h b/include/linux/ve.h
> index 9d60838..362dae1 100644
> --- a/include/linux/ve.h
> +++ b/include/linux/ve.h
> @@ -125,6 +125,8 @@ struct ve_struct {
>  	struct cn_private	*cn;
>  #endif
>  	struct kmapset_key	sysfs_perms_key;
> +
> +	struct workqueue_struct	*wq;
>  };
>  
>  struct ve_devmnt {
> diff --git a/kernel/ve/ve.c b/kernel/ve/ve.c
> index ad3a698..ba547e1 100644
> --- a/kernel/ve/ve.c
> +++ b/kernel/ve/ve.c
> @@ -481,6 +481,21 @@ static const struct timespec zero_time = { };
>  
>  extern void cgroup_mark_ve_root(struct ve_struct *ve);
>  
> +static int ve_workqueue_start(struct ve_struct *ve)
> +{
> +	ve->wq = alloc_workqueue("ve_wq_%s",
> +		WQ_SYSFS|WQ_FREEZABLE|WQ_UNBOUND, 8, ve->ve_name);
> +
> +	if (!ve->wq)
> +		return -ENOMEM;
> +	return 0;
> +}
> +
> +static void ve_workqueue_stop(struct ve_struct *ve)
> +{
> +	destroy_workqueue(ve->wq);
> +}
> +
>  /* under ve->op_sem write-lock */
>  static int ve_start_container(struct ve_struct *ve)
>  {
> @@ -530,6 +545,10 @@ static int ve_start_container(struct ve_struct *ve)
>  
>  	cgroup_mark_ve_root(ve);
>  
> +	err = ve_workqueue_start(ve);
> +	if (err)
> +		goto err_mark_ve;

This looks to has to be in reverse order:

	err = ve_workqueue_start(ve);
	if (err)
		goto ...;

	cgroup_mark_ve_root(ve);

One of next patches adds ve_owner assignment, and ve_owner will remain uncleared
in case of ver_workqueue_start() fails.

> +
>  	ve->is_running = 1;
>  
>  	printk(KERN_INFO "CT: %s: started\n", ve_name(ve));
> @@ -538,6 +557,8 @@ static int ve_start_container(struct ve_struct *ve)
>  
>  	return 0;
>  
> +err_mark_ve:
> +	ve_hook_iterate_fini(VE_SS_CHAIN, ve);
>  err_iterate:
>  	ve_stop_umh(ve);
>  err_umh:
> @@ -595,6 +616,8 @@ void ve_exit_ns(struct pid_namespace *pid_ns)
>  	if (!ve->ve_ns || ve->ve_ns->pid_ns != pid_ns)
>  		return;
>  
> +	ve_workqueue_stop(ve);
> +
>  	/*
>  	 * At this point all userspace tasks in container are dead.
>  	 */
> @@ -1648,6 +1671,8 @@ static int __init ve_subsys_init(void)
>  {
>  	ve_cachep = KMEM_CACHE(ve_struct, SLAB_PANIC);
>  	list_add(&ve0.ve_list, &ve_list_head);
> +	ve0.wq = alloc_workqueue("ve0_wq", WQ_FREEZABLE|WQ_UNBOUND, 8);
> +	BUG_ON(!ve0.wq);
>  	return 0;
>  }
>  late_initcall(ve_subsys_init);
>