diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 7974e91ffe13..78f749b6887b 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -545,6 +545,7 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir,
 		return 0;
 
 	iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
+	pr_info("pid=%d comm=%s lock idx=%d\n", current->pid, current->comm, iter_info.srcu_idx);
 
 	iter_info.marks[FSNOTIFY_ITER_TYPE_SB] =
 		fsnotify_first_mark(&sb->s_fsnotify_marks);
@@ -577,6 +578,7 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir,
 	}
 	ret = 0;
 out:
+	pr_info("pid=%d comm=%s unlock idx=%d\n", current->pid, current->comm, iter_info.srcu_idx);
 	srcu_read_unlock(&fsnotify_mark_srcu, iter_info.srcu_idx);
 
 	return ret;
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index c74ef947447d..91ad7046522f 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -205,6 +205,7 @@ static void fsnotify_connector_destroy_workfn(struct work_struct *work)
 	connector_destroy_list = NULL;
 	spin_unlock(&destroy_lock);
 
+	pr_info("pid=%d comm=%s synchronize\n", current->pid, current->comm);
 	synchronize_srcu(&fsnotify_mark_srcu);
 	while (conn) {
 		free = conn;
@@ -409,11 +410,13 @@ bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info)
 	 * lists, we can drop SRCU lock, and safely resume the list iteration
 	 * once userspace returns.
 	 */
+	pr_info("pid=%d comm=%s unlock idx=%d\n", current->pid, current->comm, iter_info->srcu_idx);
 	srcu_read_unlock(&fsnotify_mark_srcu, iter_info->srcu_idx);
 
 	return true;
 
 fail:
+	pr_info("pid=%d comm=%s skip idx=%d\n", current->pid, current->comm, iter_info->srcu_idx);
 	for (type--; type >= 0; type--)
 		fsnotify_put_mark_wake(iter_info->marks[type]);
 	return false;
@@ -425,6 +428,7 @@ void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info)
 	int type;
 
 	iter_info->srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
+	pr_info("pid=%d comm=%s lock idx=%d\n", current->pid, current->comm, iter_info->srcu_idx);
 	fsnotify_foreach_iter_type(type)
 		fsnotify_put_mark_wake(iter_info->marks[type]);
 }
@@ -586,16 +590,19 @@ static struct fsnotify_mark_connector *fsnotify_grab_connector(
 	int idx;
 
 	idx = srcu_read_lock(&fsnotify_mark_srcu);
+	pr_info("pid=%d comm=%s lock idx=%d\n", current->pid, current->comm, idx);
 	conn = srcu_dereference(*connp, &fsnotify_mark_srcu);
 	if (!conn)
 		goto out;
 	spin_lock(&conn->lock);
 	if (conn->type == FSNOTIFY_OBJ_TYPE_DETACHED) {
 		spin_unlock(&conn->lock);
+		pr_info("pid=%d comm=%s unlock idx=%d\n", current->pid, current->comm, idx);
 		srcu_read_unlock(&fsnotify_mark_srcu, idx);
 		return NULL;
 	}
 out:
+	pr_info("pid=%d comm=%s unlock idx=%d\n", current->pid, current->comm, idx);
 	srcu_read_unlock(&fsnotify_mark_srcu, idx);
 	return conn;
 }
@@ -895,6 +902,7 @@ static void fsnotify_mark_destroy_workfn(struct work_struct *work)
 	list_replace_init(&destroy_list, &private_destroy_list);
 	spin_unlock(&destroy_lock);
 
+	pr_info("pid=%d comm=%s synchronize\n", current->pid, current->comm);
 	synchronize_srcu(&fsnotify_mark_srcu);
 
 	list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) {
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index dbb409d77d4f..0567d5ce5b4a 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -50,6 +50,7 @@ extern int debug_locks_off(void);
 #ifdef CONFIG_LOCKDEP
 extern void debug_show_all_locks(void);
 extern void debug_show_held_locks(struct task_struct *task);
+extern void debug_show_all_lock_holders(void);
 extern void debug_check_no_locks_freed(const void *from, unsigned long len);
 extern void debug_check_no_locks_held(void);
 #else
@@ -61,6 +62,10 @@ static inline void debug_show_held_locks(struct task_struct *task)
 {
 }
 
+static inline void debug_show_all_lock_holders(void)
+{
+}
+
 static inline void
 debug_check_no_locks_freed(const void *from, unsigned long len)
 {
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 9a24574988d2..3dfb6ed3e981 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -215,7 +215,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
  unlock:
 	rcu_read_unlock();
 	if (hung_task_show_lock)
-		debug_show_all_locks();
+		debug_show_all_lock_holders();
 
 	if (hung_task_show_all_bt) {
 		hung_task_show_all_bt = false;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index dcd1d5bfc1e0..0d98ba19b214 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -32,6 +32,7 @@
 #include <linux/sched/clock.h>
 #include <linux/sched/task.h>
 #include <linux/sched/mm.h>
+#include <linux/sched/debug.h>
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/proc_fs.h>
@@ -6562,6 +6563,33 @@ void debug_show_all_locks(void)
 	pr_warn("=============================================\n\n");
 }
 EXPORT_SYMBOL_GPL(debug_show_all_locks);
+
+void debug_show_all_lock_holders(void)
+{
+	struct task_struct *g, *p;
+
+	if (unlikely(!debug_locks)) {
+		pr_warn("INFO: lockdep is turned off.\n");
+		return;
+	}
+	pr_warn("\nShowing all threads with locks held in the system:\n");
+
+	rcu_read_lock();
+	for_each_process_thread(g, p) {
+		if (!p->lockdep_depth)
+			continue;
+		if (p == current && p->lockdep_depth == 1)
+			continue;
+		sched_show_task(p);
+		lockdep_print_held_locks(p);
+		touch_nmi_watchdog();
+		touch_all_softlockup_watchdogs();
+	}
+	rcu_read_unlock();
+
+	pr_warn("\n");
+	pr_warn("=============================================\n\n");
+}
 #endif
 
 /*