INFO: task syz-executor:5827 blocked for more than 143 seconds. Not tainted syzkaller #0 "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. task:syz-executor state:D stack:20032 pid:5827 tgid:5827 ppid:1 task_flags:0x400140 flags:0x00080002 Call Trace: context_switch kernel/sched/core.c:5256 [inline] __schedule+0x14bc/0x5000 kernel/sched/core.c:6863 __schedule_loop kernel/sched/core.c:6945 [inline] schedule+0x165/0x360 kernel/sched/core.c:6960 io_schedule+0x80/0xd0 kernel/sched/core.c:7792 folio_wait_bit_common+0x6b0/0xb80 mm/filemap.c:1317 __folio_lock mm/filemap.c:1693 [inline] folio_lock include/linux/pagemap.h:1151 [inline] __filemap_get_folio+0x139/0xaf0 mm/filemap.c:1947 truncate_inode_pages_range+0x3ed/0xd90 mm/truncate.c:413 evict+0x607/0xae0 fs/inode.c:839 dispose_list fs/inode.c:879 [inline] evict_inodes+0x753/0x7e0 fs/inode.c:933 generic_shutdown_super+0x9a/0x2c0 fs/super.c:628 kill_block_super+0x44/0x90 fs/super.c:1730 deactivate_locked_super+0xbc/0x130 fs/super.c:474 cleanup_mnt+0x425/0x4c0 fs/namespace.c:1318 task_work_run+0x1d4/0x260 kernel/task_work.c:233 resume_user_mode_work include/linux/resume_user_mode.h:50 [inline] __exit_to_user_mode_loop kernel/entry/common.c:44 [inline] exit_to_user_mode_loop+0xff/0x4f0 kernel/entry/common.c:75 __exit_to_user_mode_prepare include/linux/irq-entry-common.h:226 [inline] syscall_exit_to_user_mode_prepare include/linux/irq-entry-common.h:256 [inline] syscall_exit_to_user_mode_work include/linux/entry-common.h:159 [inline] syscall_exit_to_user_mode include/linux/entry-common.h:194 [inline] do_syscall_64+0x2e3/0xf80 arch/x86/entry/syscall_64.c:100 entry_SYSCALL_64_after_hwframe+0x77/0x7f RIP: 0033:0x7f3aa2390a77 RSP: 002b:00007ffeab8af4f8 EFLAGS: 00000246 ORIG_RAX: 00000000000000a6 RAX: 0000000000000000 RBX: 00007f3aa2413d7d RCX: 00007f3aa2390a77 RDX: 0000000000000000 RSI: 0000000000000009 RDI: 00007ffeab8af5b0 RBP: 00007ffeab8af5b0 R08: 0000000000000000 R09: 0000000000000000 R10: 00000000ffffffff R11: 0000000000000246 R12: 00007ffeab8b0640 R13: 00007f3aa2413d7d R14: 0000000000017804 R15: 00007ffeab8b0680 Showing all locks held in the system: 1 lock held by khungtaskd/31: #0: ffffffff8df41cc0 (rcu_read_lock){....}-{1:3}, at: rcu_lock_acquire include/linux/rcupdate.h:331 [inline] #0: ffffffff8df41cc0 (rcu_read_lock){....}-{1:3}, at: rcu_read_lock include/linux/rcupdate.h:867 [inline] #0: ffffffff8df41cc0 (rcu_read_lock){....}-{1:3}, at: debug_show_all_locks+0x2e/0x180 kernel/locking/lockdep.c:6775 3 locks held by kworker/u8:9/1135: #0: ffff88801a069948 ((wq_completion)events_unbound#2){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3232 [inline] #0: ffff88801a069948 ((wq_completion)events_unbound#2){+.+.}-{0:0}, at: process_scheduled_works+0x9b4/0x1770 kernel/workqueue.c:3340 #1: ffffc9000414fb80 ((linkwatch_work).work){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3233 [inline] #1: ffffc9000414fb80 ((linkwatch_work).work){+.+.}-{0:0}, at: process_scheduled_works+0x9ef/0x1770 kernel/workqueue.c:3340 #2: ffffffff8f302688 (rtnl_mutex){+.+.}-{4:4}, at: linkwatch_event+0xe/0x60 net/core/link_watch.c:303 1 lock held by jbd2/sda1-8/5160: 1 lock held by dhcpcd/5493: #0: ffffffff8f302688 (rtnl_mutex){+.+.}-{4:4}, at: rtnl_net_lock include/linux/rtnetlink.h:130 [inline] #0: ffffffff8f302688 (rtnl_mutex){+.+.}-{4:4}, at: devinet_ioctl+0x323/0x1b20 net/ipv4/devinet.c:1120 2 locks held by getty/5586: #0: ffff88814dccd0a0 (&tty->ldisc_sem){++++}-{0:0}, at: tty_ldisc_ref_wait+0x25/0x70 drivers/tty/tty_ldisc.c:243 #1: ffffc9000332b2f0 (&ldata->atomic_read_lock){+.+.}-{4:4}, at: n_tty_read+0x43e/0x1400 drivers/tty/n_tty.c:2222 1 lock held by syz-executor/5827: #0: ffff8880334b20e0 (&type->s_umount_key#56){+.+.}-{4:4}, at: __super_lock fs/super.c:57 [inline] #0: ffff8880334b20e0 (&type->s_umount_key#56){+.+.}-{4:4}, at: __super_lock_excl fs/super.c:72 [inline] #0: ffff8880334b20e0 (&type->s_umount_key#56){+.+.}-{4:4}, at: deactivate_super+0xa9/0xe0 fs/super.c:506 5 locks held by kworker/0:8/6034: 2 locks held by kworker/u8:17/6134: #0: ffff88801a069948 ((wq_completion)events_unbound#2){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3232 [inline] #0: ffff88801a069948 ((wq_completion)events_unbound#2){+.+.}-{0:0}, at: process_scheduled_works+0x9b4/0x1770 kernel/workqueue.c:3340 #1: ffffc900056b7b80 (connector_reaper_work){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3233 [inline] #1: ffffc900056b7b80 (connector_reaper_work){+.+.}-{0:0}, at: process_scheduled_works+0x9ef/0x1770 kernel/workqueue.c:3340 5 locks held by kworker/u8:19/6772: #0: ffff88801aed7148 ((wq_completion)netns){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3232 [inline] #0: ffff88801aed7148 ((wq_completion)netns){+.+.}-{0:0}, at: process_scheduled_works+0x9b4/0x1770 kernel/workqueue.c:3340 #1: ffffc900049ffb80 (net_cleanup_work){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3233 [inline] #1: ffffc900049ffb80 (net_cleanup_work){+.+.}-{0:0}, at: process_scheduled_works+0x9ef/0x1770 kernel/workqueue.c:3340 #2: ffffffff8f2f55f0 (pernet_ops_rwsem){++++}-{4:4}, at: cleanup_net+0xf7/0x7a0 net/core/net_namespace.c:670 #3: ffffffff8f302688 (rtnl_mutex){+.+.}-{4:4}, at: default_device_exit_batch+0xdc/0x9e0 net/core/dev.c:13022 #4: ffffffff8df477f8 (rcu_state.exp_mutex){+.+.}-{4:4}, at: exp_funnel_lock kernel/rcu/tree_exp.h:311 [inline] #4: ffffffff8df477f8 (rcu_state.exp_mutex){+.+.}-{4:4}, at: synchronize_rcu_expedited+0x2f6/0x730 kernel/rcu/tree_exp.h:956 2 locks held by kworker/u8:23/9032: #0: ffff88801a069948 ((wq_completion)events_unbound#2){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3232 [inline] #0: ffff88801a069948 ((wq_completion)events_unbound#2){+.+.}-{0:0}, at: process_scheduled_works+0x9b4/0x1770 kernel/workqueue.c:3340 #1: ffffc90003a17b80 ((reaper_work).work){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3233 [inline] #1: ffffc90003a17b80 ((reaper_work).work){+.+.}-{0:0}, at: process_scheduled_works+0x9ef/0x1770 kernel/workqueue.c:3340 2 locks held by kworker/0:10/10141: 2 locks held by syz-executor/10179: #0: ffffffff8ea93ea8 (&ops->srcu#2){.+.+}-{0:0}, at: rcu_lock_acquire include/linux/rcupdate.h:331 [inline] #0: ffffffff8ea93ea8 (&ops->srcu#2){.+.+}-{0:0}, at: rcu_read_lock include/linux/rcupdate.h:867 [inline] #0: ffffffff8ea93ea8 (&ops->srcu#2){.+.+}-{0:0}, at: rtnl_link_ops_get+0x23/0x250 net/core/rtnetlink.c:570 #1: ffffffff8f302688 (rtnl_mutex){+.+.}-{4:4}, at: rtnl_lock net/core/rtnetlink.c:80 [inline] #1: ffffffff8f302688 (rtnl_mutex){+.+.}-{4:4}, at: rtnl_nets_lock net/core/rtnetlink.c:341 [inline] #1: ffffffff8f302688 (rtnl_mutex){+.+.}-{4:4}, at: rtnl_newlink+0x8ec/0x1c90 net/core/rtnetlink.c:4071 2 locks held by syz.4.1823/10375: #0: ffffffff8f816f60 (&ops->srcu#2){.+.+}-{0:0}, at: rcu_lock_acquire include/linux/rcupdate.h:331 [inline] #0: ffffffff8f816f60 (&ops->srcu#2){.+.+}-{0:0}, at: rcu_read_lock include/linux/rcupdate.h:867 [inline] #0: ffffffff8f816f60 (&ops->srcu#2){.+.+}-{0:0}, at: rtnl_link_ops_get+0x23/0x250 net/core/rtnetlink.c:570 #1: ffffffff8f302688 (rtnl_mutex){+.+.}-{4:4}, at: rtnl_lock net/core/rtnetlink.c:80 [inline] #1: ffffffff8f302688 (rtnl_mutex){+.+.}-{4:4}, at: rtnl_nets_lock net/core/rtnetlink.c:341 [inline] #1: ffffffff8f302688 (rtnl_mutex){+.+.}-{4:4}, at: rtnl_newlink+0x8ec/0x1c90 net/core/rtnetlink.c:4071 1 lock held by syz.0.1830/10393: #0: ffffffff8f302688 (rtnl_mutex){+.+.}-{4:4}, at: rtnl_net_lock include/linux/rtnetlink.h:130 [inline] #0: ffffffff8f302688 (rtnl_mutex){+.+.}-{4:4}, at: inet_rtm_newroute+0x109/0x210 net/ipv4/fib_frontend.c:922 2 locks held by syz.7.1840/10420: #0: ffff88807ede7508 (&sb->s_type->i_mutex_key#12){+.+.}-{4:4}, at: inode_lock include/linux/fs.h:1027 [inline] #0: ffff88807ede7508 (&sb->s_type->i_mutex_key#12){+.+.}-{4:4}, at: __sock_release net/socket.c:652 [inline] #0: ffff88807ede7508 (&sb->s_type->i_mutex_key#12){+.+.}-{4:4}, at: sock_close+0x9b/0x240 net/socket.c:1446 #1: ffffffff8f302688 (rtnl_mutex){+.+.}-{4:4}, at: raw_release+0x1bc/0xa10 net/can/raw.c:418 1 lock held by syz.6.1843/10429: #0: ffffffff8f302688 (rtnl_mutex){+.+.}-{4:4}, at: dev_ethtool+0x1d0/0x19c0 net/ethtool/ioctl.c:3553 2 locks held by syz.3.1844/10431: #0: ffffffff8f36a230 (cb_lock){++++}-{4:4}, at: genl_rcv+0x19/0x40 net/netlink/genetlink.c:1218 #1: ffffffff8f302688 (rtnl_mutex){+.+.}-{4:4}, at: nl80211_pre_doit+0x5f/0x930 net/wireless/nl80211.c:17932 ============================================= NMI backtrace for cpu 1 CPU: 1 UID: 0 PID: 31 Comm: khungtaskd Not tainted syzkaller #0 PREEMPT(full) Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 10/25/2025 Call Trace: dump_stack_lvl+0x189/0x250 lib/dump_stack.c:120 nmi_cpu_backtrace+0x39e/0x3d0 lib/nmi_backtrace.c:113 nmi_trigger_cpumask_backtrace+0x17a/0x300 lib/nmi_backtrace.c:62 trigger_all_cpu_backtrace include/linux/nmi.h:160 [inline] check_hung_uninterruptible_tasks kernel/hung_task.c:332 [inline] watchdog+0xf3c/0xf80 kernel/hung_task.c:495 kthread+0x711/0x8a0 kernel/kthread.c:463 ret_from_fork+0x599/0xb30 arch/x86/kernel/process.c:158 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:246 Sending NMI from CPU 1 to CPUs 0: NMI backtrace for cpu 0 CPU: 0 UID: 0 PID: 11 Comm: kworker/0:0H Not tainted syzkaller #0 PREEMPT(full) Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 10/25/2025 Workqueue: kblockd blk_mq_timeout_work RIP: 0010:arch_atomic_read arch/x86/include/asm/atomic.h:23 [inline] RIP: 0010:raw_atomic_read include/linux/atomic/atomic-arch-fallback.h:457 [inline] RIP: 0010:atomic_read include/linux/atomic/atomic-instrumented.h:33 [inline] RIP: 0010:queued_spin_is_locked include/asm-generic/qspinlock.h:57 [inline] RIP: 0010:debug_spin_unlock kernel/locking/spinlock_debug.c:101 [inline] RIP: 0010:do_raw_spin_unlock+0x5b/0x240 kernel/locking/spinlock_debug.c:141 Code: 00 00 41 81 3e ad 4e ad de 0f 85 f3 00 00 00 48 89 df be 04 00 00 00 e8 f3 73 88 00 48 89 d8 48 c1 e8 03 42 0f b6 04 20 84 c0 <0f> 85 74 01 00 00 83 3b 00 0f 84 ea 00 00 00 4c 8d 73 10 4d 89 f5 RSP: 0018:ffffc90000007848 EFLAGS: 00000046 RAX: 0000000000000000 RBX: ffff888026a4d2f8 RCX: ffffffff819ed20d RDX: 0000000000000000 RSI: 0000000000000004 RDI: ffff888026a4d2f8 RBP: ffffc90000007908 R08: ffff888026a4d2fb R09: 1ffff11004d49a5f R10: dffffc0000000000 R11: ffffed1004d49a60 R12: dffffc0000000000 R13: ffff88814d7562b0 R14: ffff888026a4d2fc R15: 1ffff92000000f10 FS: 0000000000000000(0000) GS:ffff88812608e000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000001b31013ff8 CR3: 000000000dd3a000 CR4: 0000000000350ef0 Call Trace: __raw_spin_unlock_irqrestore include/linux/spinlock_api_smp.h:150 [inline] _raw_spin_unlock_irqrestore+0x78/0x110 kernel/locking/spinlock.c:194 ath9k_hif_usb_reg_in_cb+0x4c8/0x6f0 drivers/net/wireless/ath/ath9k/hif_usb.c:789 __usb_hcd_giveback_urb+0x376/0x540 drivers/usb/core/hcd.c:1661 dummy_timer+0x85f/0x44c0 drivers/usb/gadget/udc/dummy_hcd.c:1995 __run_hrtimer kernel/time/hrtimer.c:1777 [inline] __hrtimer_run_queues+0x51c/0xc30 kernel/time/hrtimer.c:1841 hrtimer_run_softirq+0x187/0x2b0 kernel/time/hrtimer.c:1858 handle_softirqs+0x27d/0x850 kernel/softirq.c:622 __do_softirq kernel/softirq.c:656 [inline] invoke_softirq kernel/softirq.c:496 [inline] __irq_exit_rcu+0xca/0x1f0 kernel/softirq.c:723 irq_exit_rcu+0x9/0x30 kernel/softirq.c:739 instr_sysvec_apic_timer_interrupt arch/x86/kernel/apic/apic.c:1056 [inline] sysvec_apic_timer_interrupt+0xa6/0xc0 arch/x86/kernel/apic/apic.c:1056 asm_sysvec_apic_timer_interrupt+0x1a/0x20 arch/x86/include/asm/idtentry.h:697 RIP: 0010:lock_acquire+0x16c/0x340 kernel/locking/lockdep.c:5872 Code: 00 00 00 00 9c 8f 44 24 30 f7 44 24 30 00 02 00 00 0f 85 cd 00 00 00 f7 44 24 08 00 02 00 00 74 01 fb 65 48 8b 05 14 f5 da 10 <48> 3b 44 24 58 0f 85 e5 00 00 00 48 83 c4 60 5b 41 5c 41 5d 41 5e RSP: 0018:ffffc900001079a0 EFLAGS: 00000206 RAX: 8ad6c6c332cdec00 RBX: 0000000000000000 RCX: 8ad6c6c332cdec00 RDX: 000000005f5d5300 RSI: ffffffff8d96aaff RDI: ffffffff8bbffbe0 RBP: ffffffff818bb104 R08: ffffffff818bb104 R09: ffff88801b2e0548 R10: dffffc0000000000 R11: fffffbfff1f025af R12: 0000000000000000 R13: ffff88801b2e0548 R14: 0000000000000001 R15: 0000000000000246 process_one_work kernel/workqueue.c:3232 [inline] process_scheduled_works+0x9c9/0x1770 kernel/workqueue.c:3340 worker_thread+0x8a0/0xda0 kernel/workqueue.c:3421 kthread+0x711/0x8a0 kernel/kthread.c:463 ret_from_fork+0x599/0xb30 arch/x86/kernel/process.c:158 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:246 sched: DL replenish lagged too much