--- x/include/linux/mm.h +++ y/include/linux/mm.h @@ -2626,6 +2626,9 @@ static inline void zap_vma_pages(struct void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas, struct vm_area_struct *start_vma, unsigned long start, unsigned long end, unsigned long tree_end); +void lock_unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas, + struct vm_area_struct *start_vma, unsigned long start, + unsigned long end, unsigned long tree_end); struct mmu_notifier_range; --- x/mm/memory.c +++ y/mm/memory.c @@ -2104,7 +2104,31 @@ void unmap_vmas(struct mmu_gather *tlb, } while (vma && likely(!xa_is_zero(vma))); mmu_notifier_invalidate_range_end(&range); } +void lock_unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas, + struct vm_area_struct *vma, unsigned long start_addr, + unsigned long end_addr, unsigned long tree_end) +{ + struct mmu_notifier_range range; + struct zap_details details = { + .zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP, + /* Careful - we need to zap private pages too! */ + .even_cows = true, + }; + mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, + start_addr, end_addr); + mmu_notifier_invalidate_range_start(&range); + do { + unsigned long start = start_addr; + unsigned long end = end_addr; + hugetlb_zap_begin(vma, &start, &end); + vma_start_write(vma); + unmap_single_vma(tlb, vma, start, end, &details); + hugetlb_zap_end(vma, &details); + vma = mas_find(mas, tree_end - 1); + } while (vma && likely(!xa_is_zero(vma))); + mmu_notifier_invalidate_range_end(&range); +} /** * zap_page_range_single_batched - remove user pages in a given range * @tlb: pointer to the caller's struct mmu_gather --- x/mm/vma.c +++ y/mm/vma.c @@ -1228,7 +1228,7 @@ static inline void vms_clear_ptes(struct mas_set(mas_detach, 1); tlb_gather_mmu(&tlb, vms->vma->vm_mm); update_hiwater_rss(vms->vma->vm_mm); - unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end, + lock_unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end, vms->vma_count); mas_set(mas_detach, 1); @@ -1271,8 +1271,6 @@ static void vms_complete_munmap_vmas(str mm = current->mm; mm->map_count -= vms->vma_count; mm->locked_vm -= vms->locked_vm; - if (vms->unlock) - mmap_write_downgrade(mm); if (!vms->nr_pages) return; @@ -1298,7 +1296,7 @@ static void vms_complete_munmap_vmas(str vm_unacct_memory(vms->nr_accounted); validate_mm(mm); if (vms->unlock) - mmap_read_unlock(mm); + mmap_write_unlock(mm); __mt_destroy(mas_detach->tree); }