[6/6] mem: Cleanup dumping pages controls

Submitted by Pavel Emelianov on Sept. 19, 2016, 11:18 a.m.

Details

Message ID 57DFC97E.50406@virtuozzo.com
State Rejected
Series "Try to sanitize mem dump and pages xfer mess a bit"
Headers show

Commit Message

Pavel Emelianov Sept. 19, 2016, 11:18 a.m.
This makes the API cleaner -- now we tell to parasite_dump_pages_seized
only two things -- whether it's pre-dump or not and, if not, whether this
is lazy dump.

And make it do all the other decisions himself.

Signed-off-by: Pavel Emelyanov <xemul@virtuozzo.com>
---
 criu/cr-dump.c     |  4 ++--
 criu/include/mem.h |  2 +-
 criu/mem.c         | 37 ++++++++++++++++++++++++-------------
 3 files changed, 27 insertions(+), 16 deletions(-)

Patch hide | download patch | download mbox

diff --git a/criu/cr-dump.c b/criu/cr-dump.c
index d845ebd..d30dec8 100644
--- a/criu/cr-dump.c
+++ b/criu/cr-dump.c
@@ -1176,7 +1176,7 @@  static int pre_dump_one_task(struct pstree_item *item)
 
 	parasite_ctl->pid.virt = item->pid.virt = misc.pid;
 
-	mdc.delayed_dump = true;
+	mdc.pre_dump = true;
 	mdc.lazy = false;
 
 	ret = parasite_dump_pages_seized(parasite_ctl, &vmas, &mdc);
@@ -1336,7 +1336,7 @@  static int dump_one_task(struct pstree_item *item)
 		}
 	}
 
-	mdc.delayed_dump = opts.lazy_pages;
+	mdc.pre_dump = false;
 	mdc.lazy = opts.lazy_pages;
 
 	ret = parasite_dump_pages_seized(parasite_ctl, &vmas, &mdc);
diff --git a/criu/include/mem.h b/criu/include/mem.h
index 4b18af1..f1e6b78 100644
--- a/criu/include/mem.h
+++ b/criu/include/mem.h
@@ -11,7 +11,7 @@  struct page_pipe;
 struct pstree_item;
 
 struct mem_dump_ctl {
-	bool	delayed_dump;
+	bool	pre_dump;
 	bool	lazy;
 };
 
diff --git a/criu/mem.c b/criu/mem.c
index 09a225d..96083ab 100644
--- a/criu/mem.c
+++ b/criu/mem.c
@@ -199,7 +199,7 @@  static int generate_iovs(struct vma_area *vma, struct page_pipe *pp, u64 *map, u
 }
 
 static struct parasite_dump_pages_args *prep_dump_pages_args(struct parasite_ctl *ctl,
-		struct vm_area_list *vma_area_list, bool delayed_dump)
+		struct vm_area_list *vma_area_list, bool skip_non_trackable)
 {
 	struct parasite_dump_pages_args *args;
 	struct parasite_vma_entry *p_vma;
@@ -217,7 +217,7 @@  static struct parasite_dump_pages_args *prep_dump_pages_args(struct parasite_ctl
 		 * Kernel write to aio ring is not soft-dirty tracked,
 		 * so we ignore them at pre-dump.
 		 */
-		if (vma_entry_is(vma->e, VMA_AREA_AIORING) && delayed_dump)
+		if (vma_entry_is(vma->e, VMA_AREA_AIORING) && skip_non_trackable)
 			continue;
 		if (vma->e->prot & PROT_READ)
 			continue;
@@ -292,7 +292,6 @@  static int __parasite_dump_pages_seized(struct parasite_ctl *ctl,
 	int ret = -1;
 	unsigned cpp_flags = 0;
 	unsigned long pmc_size;
-	bool should_xfer = (!mdc->delayed_dump || mdc->lazy);
 
 	pr_info("\n");
 	pr_info("Dumping pages (type: %d pid: %d)\n", CR_FD_PAGES, ctl->pid.real);
@@ -314,17 +313,27 @@  static int __parasite_dump_pages_seized(struct parasite_ctl *ctl,
 		return -1;
 
 	ret = -1;
-	if (!mdc->delayed_dump)
+	if (!(mdc->pre_dump || mdc->lazy))
+		/*
+		 * Chunk mode pushes pages portion by portion. This mode
+		 * only works when we don't need to keep pp for later
+		 * use, i.e. on non-lazy non-predump.
+		 */
 		cpp_flags |= PP_CHUNK_MODE;
 	if (!seized_native(ctl))
 		cpp_flags |= PP_COMPAT;
-	ctl->mem_pp = pp = create_page_pipe(vma_area_list->priv_size,
+	pp = create_page_pipe(vma_area_list->priv_size,
 					    mdc->lazy ? NULL : pargs_iovs(args),
 					    cpp_flags);
 	if (!pp)
 		goto out;
 
-	if (should_xfer) {
+	if (!mdc->pre_dump) {
+		/*
+		 * Regular dump -- create xfer object and send pages to it
+		 * right here. For pre-dumps the pp will be taken by the
+		 * caller and handled later.
+		 */
 		ret = open_page_xfer(&xfer, CR_FD_PAGEMAP, ctl->pid.virt);
 		if (ret < 0)
 			goto out_pp;
@@ -350,7 +359,7 @@  static int __parasite_dump_pages_seized(struct parasite_ctl *ctl,
 				!vma_area_is(vma_area, VMA_ANON_SHARED))
 			continue;
 		if (vma_entry_is(vma_area->e, VMA_AREA_AIORING)) {
-			if (mdc->delayed_dump)
+			if (mdc->pre_dump)
 				continue;
 			has_parent = false;
 		}
@@ -365,11 +374,11 @@  again:
 			ret = generate_iovs(vma_area, pp, map, &off,
 				has_parent);
 			if (ret == -EAGAIN) {
-				BUG_ON(mdc->delayed_dump);
+				BUG_ON((pp->flags & PP_CHUNK_MODE) || mdc->pre_dump);
 
 				ret = drain_pages(pp, ctl, args);
 				if (!ret)
-					ret = xfer_pages(pp, &xfer, false);
+					ret = xfer_pages(pp, &xfer, mdc->lazy /* false actually */);
 				if (!ret) {
 					page_pipe_reinit(pp);
 					goto again;
@@ -384,7 +393,7 @@  again:
 		memcpy(pargs_iovs(args), pp->iovs,
 		       sizeof(struct iovec) * pp->nr_iovs);
 	ret = drain_pages(pp, ctl, args);
-	if (!ret && should_xfer)
+	if (!ret && !mdc->pre_dump)
 		ret = xfer_pages(pp, &xfer, mdc->lazy);
 	if (ret)
 		goto out_xfer;
@@ -397,11 +406,13 @@  again:
 
 	ret = task_reset_dirty_track(ctl->pid.real);
 out_xfer:
-	if (should_xfer)
+	if (!mdc->pre_dump)
 		xfer.close(&xfer);
 out_pp:
-	if (ret || !mdc->delayed_dump)
+	if (ret || !(mdc->pre_dump || mdc->lazy))
 		destroy_page_pipe(pp);
+	else
+		ctl->mem_pp = pp;
 out:
 	pmc_fini(&pmc);
 	pr_info("----------------------------------------\n");
@@ -415,7 +426,7 @@  int parasite_dump_pages_seized(struct parasite_ctl *ctl,
 	int ret;
 	struct parasite_dump_pages_args *pargs;
 
-	pargs = prep_dump_pages_args(ctl, vma_area_list, mdc->delayed_dump);
+	pargs = prep_dump_pages_args(ctl, vma_area_list, mdc->pre_dump);
 
 	/*
 	 * Add PROT_READ protection for all VMAs we're about to

Comments

Mike Rapoport Sept. 20, 2016, 6:34 a.m.
On Mon, Sep 19, 2016 at 02:18:22PM +0300, Pavel Emelyanov wrote:
> This makes the API cleaner -- now we tell to parasite_dump_pages_seized
> only two things -- whether it's pre-dump or not and, if not, whether this
> is lazy dump.
> 
> And make it do all the other decisions himself.
> 
> Signed-off-by: Pavel Emelyanov <xemul@virtuozzo.com>
> ---
>  criu/cr-dump.c     |  4 ++--
>  criu/include/mem.h |  2 +-
>  criu/mem.c         | 37 ++++++++++++++++++++++++-------------
>  3 files changed, 27 insertions(+), 16 deletions(-)
> 
> diff --git a/criu/cr-dump.c b/criu/cr-dump.c
> index d845ebd..d30dec8 100644
> --- a/criu/cr-dump.c
> +++ b/criu/cr-dump.c
> @@ -1176,7 +1176,7 @@ static int pre_dump_one_task(struct pstree_item *item)
> 
>  	parasite_ctl->pid.virt = item->pid.virt = misc.pid;
> 
> -	mdc.delayed_dump = true;
> +	mdc.pre_dump = true;
>  	mdc.lazy = false;
> 
>  	ret = parasite_dump_pages_seized(parasite_ctl, &vmas, &mdc);
> @@ -1336,7 +1336,7 @@ static int dump_one_task(struct pstree_item *item)
>  		}
>  	}
> 
> -	mdc.delayed_dump = opts.lazy_pages;
> +	mdc.pre_dump = false;
>  	mdc.lazy = opts.lazy_pages;
> 
>  	ret = parasite_dump_pages_seized(parasite_ctl, &vmas, &mdc);
> diff --git a/criu/include/mem.h b/criu/include/mem.h
> index 4b18af1..f1e6b78 100644
> --- a/criu/include/mem.h
> +++ b/criu/include/mem.h
> @@ -11,7 +11,7 @@ struct page_pipe;
>  struct pstree_item;
> 
>  struct mem_dump_ctl {
> -	bool	delayed_dump;
> +	bool	pre_dump;
>  	bool	lazy;
>  };
> 
> diff --git a/criu/mem.c b/criu/mem.c
> index 09a225d..96083ab 100644
> --- a/criu/mem.c
> +++ b/criu/mem.c
> @@ -199,7 +199,7 @@ static int generate_iovs(struct vma_area *vma, struct page_pipe *pp, u64 *map, u
>  }
> 
>  static struct parasite_dump_pages_args *prep_dump_pages_args(struct parasite_ctl *ctl,
> -		struct vm_area_list *vma_area_list, bool delayed_dump)
> +		struct vm_area_list *vma_area_list, bool skip_non_trackable)
>  {
>  	struct parasite_dump_pages_args *args;
>  	struct parasite_vma_entry *p_vma;
> @@ -217,7 +217,7 @@ static struct parasite_dump_pages_args *prep_dump_pages_args(struct parasite_ctl
>  		 * Kernel write to aio ring is not soft-dirty tracked,
>  		 * so we ignore them at pre-dump.
>  		 */
> -		if (vma_entry_is(vma->e, VMA_AREA_AIORING) && delayed_dump)
> +		if (vma_entry_is(vma->e, VMA_AREA_AIORING) && skip_non_trackable)
>  			continue;
>  		if (vma->e->prot & PROT_READ)
>  			continue;
> @@ -292,7 +292,6 @@ static int __parasite_dump_pages_seized(struct parasite_ctl *ctl,
>  	int ret = -1;
>  	unsigned cpp_flags = 0;
>  	unsigned long pmc_size;
> -	bool should_xfer = (!mdc->delayed_dump || mdc->lazy);
> 
>  	pr_info("\n");
>  	pr_info("Dumping pages (type: %d pid: %d)\n", CR_FD_PAGES, ctl->pid.real);
> @@ -314,17 +313,27 @@ static int __parasite_dump_pages_seized(struct parasite_ctl *ctl,
>  		return -1;
> 
>  	ret = -1;
> -	if (!mdc->delayed_dump)
> +	if (!(mdc->pre_dump || mdc->lazy))
> +		/*
> +		 * Chunk mode pushes pages portion by portion. This mode
> +		 * only works when we don't need to keep pp for later
> +		 * use, i.e. on non-lazy non-predump.
> +		 */
>  		cpp_flags |= PP_CHUNK_MODE;
>  	if (!seized_native(ctl))
>  		cpp_flags |= PP_COMPAT;
> -	ctl->mem_pp = pp = create_page_pipe(vma_area_list->priv_size,
> +	pp = create_page_pipe(vma_area_list->priv_size,
>  					    mdc->lazy ? NULL : pargs_iovs(args),
>  					    cpp_flags);
>  	if (!pp)
>  		goto out;
> 
> -	if (should_xfer) {
> +	if (!mdc->pre_dump) {
> +		/*
> +		 * Regular dump -- create xfer object and send pages to it
> +		 * right here. For pre-dumps the pp will be taken by the
> +		 * caller and handled later.
> +		 */
>  		ret = open_page_xfer(&xfer, CR_FD_PAGEMAP, ctl->pid.virt);
>  		if (ret < 0)
>  			goto out_pp;
> @@ -350,7 +359,7 @@ static int __parasite_dump_pages_seized(struct parasite_ctl *ctl,
>  				!vma_area_is(vma_area, VMA_ANON_SHARED))
>  			continue;
>  		if (vma_entry_is(vma_area->e, VMA_AREA_AIORING)) {
> -			if (mdc->delayed_dump)
> +			if (mdc->pre_dump)
>  				continue;
>  			has_parent = false;
>  		}
> @@ -365,11 +374,11 @@ again:
>  			ret = generate_iovs(vma_area, pp, map, &off,
>  				has_parent);
>  			if (ret == -EAGAIN) {
> -				BUG_ON(mdc->delayed_dump);
> +				BUG_ON((pp->flags & PP_CHUNK_MODE) || mdc->pre_dump);

I think it should be BUG_ON(!(pp->flags & PP_CHUCK_MODE))...

> 
>  				ret = drain_pages(pp, ctl, args);
>  				if (!ret)
> -					ret = xfer_pages(pp, &xfer, false);
> +					ret = xfer_pages(pp, &xfer, mdc->lazy /* false actually */);
>  				if (!ret) {
>  					page_pipe_reinit(pp);
>  					goto again;
> @@ -384,7 +393,7 @@ again:
>  		memcpy(pargs_iovs(args), pp->iovs,
>  		       sizeof(struct iovec) * pp->nr_iovs);
>  	ret = drain_pages(pp, ctl, args);
> -	if (!ret && should_xfer)
> +	if (!ret && !mdc->pre_dump)
>  		ret = xfer_pages(pp, &xfer, mdc->lazy);
>  	if (ret)
>  		goto out_xfer;
> @@ -397,11 +406,13 @@ again:
> 
>  	ret = task_reset_dirty_track(ctl->pid.real);
>  out_xfer:
> -	if (should_xfer)
> +	if (!mdc->pre_dump)
>  		xfer.close(&xfer);
>  out_pp:
> -	if (ret || !mdc->delayed_dump)
> +	if (ret || !(mdc->pre_dump || mdc->lazy))
>  		destroy_page_pipe(pp);
> +	else
> +		ctl->mem_pp = pp;
>  out:
>  	pmc_fini(&pmc);
>  	pr_info("----------------------------------------\n");
> @@ -415,7 +426,7 @@ int parasite_dump_pages_seized(struct parasite_ctl *ctl,
>  	int ret;
>  	struct parasite_dump_pages_args *pargs;
> 
> -	pargs = prep_dump_pages_args(ctl, vma_area_list, mdc->delayed_dump);
> +	pargs = prep_dump_pages_args(ctl, vma_area_list, mdc->pre_dump);
> 
>  	/*
>  	 * Add PROT_READ protection for all VMAs we're about to
> -- 
> 2.5.0
> 
> _______________________________________________
> CRIU mailing list
> CRIU@openvz.org
> https://lists.openvz.org/mailman/listinfo/criu
>