[2/2] mem: Don't assume guard page is returned in procfs with new kernels

Submitted by Cyrill Gorcunov on June 20, 2017, 2:22 p.m.

Details

Message ID 1497968567-9213-3-git-send-email-gorcunov@openvz.org
State New
Series "mem: Address dropped guard pages from procfs report"
Headers show

Commit Message

Cyrill Gorcunov June 20, 2017, 2:22 p.m.
If the guard page is not reported in show_map_vma we should
not ajust vma address neither we should call unmap_guard_pages
in restorer.

https://github.com/xemul/criu/issues/322

Signed-off-by: Cyrill Gorcunov <gorcunov@openvz.org>
---
 criu/include/mem.h |  2 ++
 criu/mem.c         | 16 ++++++++++++----
 criu/proc_parse.c  |  4 +++-
 3 files changed, 17 insertions(+), 5 deletions(-)

Patch hide | download patch | download mbox

diff --git a/criu/include/mem.h b/criu/include/mem.h
index 6791bfd0a647..814fb272b144 100644
--- a/criu/include/mem.h
+++ b/criu/include/mem.h
@@ -7,6 +7,7 @@ 
 
 struct parasite_ctl;
 struct vm_area_list;
+struct vma_area;
 struct page_pipe;
 struct pstree_item;
 
@@ -15,6 +16,7 @@  struct mem_dump_ctl {
 	bool	lazy;
 };
 
+extern bool vma_has_guard(struct vma_area *vma);
 extern bool page_is_zero(u64 pme);
 extern bool page_in_parent(bool dirty);
 extern int prepare_mm_pid(struct pstree_item *i);
diff --git a/criu/mem.c b/criu/mem.c
index 3c7186ade2c0..27f876dd659c 100644
--- a/criu/mem.c
+++ b/criu/mem.c
@@ -32,6 +32,11 @@ 
 #include "protobuf.h"
 #include "images/pagemap.pb-c.h"
 
+bool vma_has_guard(struct vma_area *vma)
+{
+	return kdat.mm_guard_page_maps && (vma->e->flags & MAP_GROWSDOWN);
+}
+
 static int task_reset_dirty_track(int pid)
 {
 	int ret;
@@ -530,7 +535,7 @@  int prepare_mm_pid(struct pstree_item *i)
 
 		if (vma_area_is_private(vma, kdat.task_size)) {
 			ri->vmas.priv_size += vma_area_len(vma);
-			if (vma->e->flags & MAP_GROWSDOWN)
+			if (vma_has_guard(vma))
 				ri->vmas.priv_size += PAGE_SIZE;
 		}
 
@@ -665,7 +670,7 @@  static int premap_private_vma(struct pstree_item *t, struct vma_area *vma, void
 	 * A grow-down VMA has a guard page, which protect a VMA below it.
 	 * So one more page is mapped here to restore content of the first page
 	 */
-	if (vma->e->flags & MAP_GROWSDOWN)
+	if (vma_has_guard(vma))
 		vma->e->start -= PAGE_SIZE;
 
 	size = vma_entry_len(vma->e);
@@ -717,7 +722,7 @@  static int premap_private_vma(struct pstree_item *t, struct vma_area *vma, void
 		 */
 
 		paddr = decode_pointer(vma->pvma->premmaped_addr);
-		if (vma->e->flags & MAP_GROWSDOWN)
+		if (vma_has_guard(vma))
 			paddr -= PAGE_SIZE;
 
 		addr = mremap(paddr, size, size,
@@ -733,7 +738,7 @@  static int premap_private_vma(struct pstree_item *t, struct vma_area *vma, void
 	pr_debug("\tpremap %#016"PRIx64"-%#016"PRIx64" -> %016lx\n",
 		vma->e->start, vma->e->end, (unsigned long)addr);
 
-	if (vma->e->flags & MAP_GROWSDOWN) { /* Skip gurad page */
+	if (vma_has_guard(vma)) { /* Skip gurad page */
 		vma->e->start += PAGE_SIZE;
 		vma->premmaped_addr += PAGE_SIZE;
 	}
@@ -1104,6 +1109,9 @@  int unmap_guard_pages(struct pstree_item *t)
 	struct vma_area *vma;
 	struct list_head *vmas = &rsti(t)->vmas.h;
 
+	if (!kdat.mm_guard_page_maps)
+		return 0;
+
 	list_for_each_entry(vma, vmas, list) {
 		if (!vma_area_is(vma, VMA_PREMMAPED))
 			continue;
diff --git a/criu/proc_parse.c b/criu/proc_parse.c
index 041d4512413d..252d6d1716c8 100644
--- a/criu/proc_parse.c
+++ b/criu/proc_parse.c
@@ -25,6 +25,7 @@ 
 #include "kerndat.h"
 #include "vdso.h"
 #include "vma.h"
+#include "mem.h"
 #include "bfd.h"
 #include "proc_parse.h"
 #include "fdinfo.h"
@@ -637,9 +638,10 @@  static int vma_list_add(struct vma_area *vma_area,
 	}
 
 	/* Add a guard page only if here is enough space for it */
-	if ((vma_area->e->flags & MAP_GROWSDOWN) &&
+	if (vma_has_guard(vma_area) &&
 	    *prev_end < vma_area->e->start)
 		vma_area->e->start -= PAGE_SIZE; /* Guard page */
+
 	*prev_end = vma_area->e->end;
 
 	list_add_tail(&vma_area->list, &vma_area_list->h);