--- x/fs/fs_context.c +++ y/fs/fs_context.c @@ -282,6 +282,8 @@ static struct fs_context *alloc_fs_conte struct fs_context *fc; int ret = -ENOMEM; + if (current->flags & PF_EXITING) + return ERR_PTR(-ENOMEM); fc = kzalloc(sizeof(struct fs_context), GFP_KERNEL_ACCOUNT); if (!fc) return ERR_PTR(-ENOMEM); --- x/arch/arm64/include/asm/pgtable.h +++ y/arch/arm64/include/asm/pgtable.h @@ -64,7 +64,11 @@ static inline void queue_pte_barriers(vo { unsigned long flags; - VM_WARN_ON(in_interrupt()); + if (in_interrupt()) { + emit_pte_barriers(); + return; + } + flags = read_thread_flags(); if (flags & BIT(TIF_LAZY_MMU)) { @@ -79,20 +83,38 @@ static inline void queue_pte_barriers(vo #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE static inline void arch_enter_lazy_mmu_mode(void) { - VM_WARN_ON(in_interrupt()); - VM_WARN_ON(test_thread_flag(TIF_LAZY_MMU)); + /* + * lazy_mmu_mode is not supposed to permit nesting. But in practice this + * does happen with CONFIG_DEBUG_PAGEALLOC, where a page allocation + * inside a lazy_mmu_mode section (such as zap_pte_range()) will change + * permissions on the linear map with apply_to_page_range(), which + * re-enters lazy_mmu_mode. So we tolerate nesting in our + * implementation. The first call to arch_leave_lazy_mmu_mode() will + * flush and clear the flag such that the remainder of the work in the + * outer nest behaves as if outside of lazy mmu mode. This is safe and + * keeps tracking simple. + */ + + if (in_interrupt()) + return; set_thread_flag(TIF_LAZY_MMU); } static inline void arch_flush_lazy_mmu_mode(void) { + if (in_interrupt()) + return; + if (test_and_clear_thread_flag(TIF_LAZY_MMU_PENDING)) emit_pte_barriers(); } static inline void arch_leave_lazy_mmu_mode(void) { + if (in_interrupt()) + return; + arch_flush_lazy_mmu_mode(); clear_thread_flag(TIF_LAZY_MMU); }