When moving pages to the new process in phx_restart, the reverse mapping is not correctly setup, resulting in the fields anon_vma in vm_area_struct is a null pointer.
And in the move_page_range function, this would cause an early return in line 1508:
|
move_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) |
|
{ |
|
pgd_t *src_pgd, *dst_pgd; |
|
unsigned long next; |
|
unsigned long addr = src_vma->vm_start; |
|
unsigned long end = src_vma->vm_end; |
|
struct mm_struct *dst_mm = dst_vma->vm_mm; |
|
struct mm_struct *src_mm = src_vma->vm_mm; |
|
struct mmu_notifier_range range; |
|
int ret; |
|
|
|
/* |
|
* Don't copy ptes where a page fault will fill them correctly. |
|
* Fork becomes much lighter when there are big shared or private |
|
* readonly mappings. The tradeoff is that copy_page_range is more |
|
* efficient than faulting. |
|
*/ |
|
if (!(src_vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) && |
|
!src_vma->anon_vma) |
|
return 0; |
Current temporary make-up solution is calling anon_vma_fork after moving pages:
|
ret = 0; |
|
dst_pgd = pgd_offset(dst_mm, addr); |
|
src_pgd = pgd_offset(src_mm, addr); |
|
do { |
|
next = pgd_addr_end(addr, end); |
|
if (pgd_none_or_clear_bad(src_pgd)) |
|
continue; |
|
if (unlikely(move_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd, |
|
addr, next))) { |
|
ret = -ENOMEM; |
|
break; |
|
} |
|
} while (dst_pgd++, src_pgd++, addr = next, addr != end); |
|
|
|
/* TODO: add the following line may help multiple recoveries? */ |
|
if (anon_vma_fork(dst_vma, src_vma)) { |
|
printk("anon_vma_fork failed, but why?"); |
|
ret = -ENOMEM; |
|
} |
|
|
Which may results in additional cost to the memory since we don't need all the reverse mapping that a fork system call needs.
When moving pages to the new process in phx_restart, the reverse mapping is not correctly setup, resulting in the fields anon_vma in vm_area_struct is a null pointer.
And in the move_page_range function, this would cause an early return in line 1508:
phoenix-kernel/mm/memory.c
Lines 1489 to 1508 in b4e8179
Current temporary make-up solution is calling anon_vma_fork after moving pages:
phoenix-kernel/mm/memory.c
Lines 1562 to 1581 in 1e76d17
Which may results in additional cost to the memory since we don't need all the reverse mapping that a fork system call needs.