syzbot |
sign-in | mailing list | source | docs | 🏰 |
| ID | Workflow | Result | Correct | Bug | Created | Started | Finished | Revision | Error |
|---|---|---|---|---|---|---|---|---|---|
| 27ff3d90-6a2a-41bb-96b3-22d5777a1838 | patching | 🏃 | BUG: corrupted list in flow_block_cb_setup_simple | 2026/01/23 14:58 | 2026/01/23 14:59 | 7ee46ad36cdaae818f74dea493b5ec30df3fe31b |
non-slab/vmalloc memory list_del corruption. prev->next should be ffff888028878200, but was ffffffff8e940fc0. (prev=ffffffff8e940fc0) ------------[ cut here ]------------ kernel BUG at lib/list_debug.c:64! Oops: invalid opcode: 0000 [#1] SMP KASAN PTI CPU: 1 UID: 0 PID: 6308 Comm: syz.3.231 Not tainted syzkaller #0 PREEMPT(full) Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 08/18/2025 RIP: 0010:__list_del_entry_valid_or_report+0x15a/0x190 lib/list_debug.c:62 Code: e8 1b 29 74 fd 43 80 3c 2c 00 74 08 4c 89 ff e8 9c 30 95 fd 49 8b 17 48 c7 c7 80 c9 9e 8b 48 89 de 4c 89 f9 e8 67 95 99 fc 90 <0f> 0b 4c 89 f7 e8 ec 28 74 fd 43 80 3c 2c 00 74 08 4c 89 ff e8 6d RSP: 0018:ffffc90003e6e740 EFLAGS: 00010246 RAX: 000000000000006d RBX: ffff888028878200 RCX: b532cdd832cb2600 RDX: 0000000000000000 RSI: 0000000080000000 RDI: 0000000000000000 RBP: 1ffff920007cdd3a R08: 0000000000000003 R09: 0000000000000004 R10: dffffc0000000000 R11: fffffbfff1b7a118 R12: 1ffffffff1d281f8 R13: dffffc0000000000 R14: ffffffff8e940fc0 R15: ffffffff8e940fc0 FS: 000055556c292500(0000) GS:ffff88812649b000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000001b30a63fff CR3: 0000000077f04000 CR4: 00000000003526f0 Call Trace: <TASK> __list_del_entry_valid include/linux/list.h:124 [inline] __list_del_entry include/linux/list.h:215 [inline] list_del include/linux/list.h:229 [inline] flow_block_cb_setup_simple+0x62d/0x740 net/core/flow_offload.c:369 nft_block_offload_cmd net/netfilter/nf_tables_offload.c:397 [inline] nft_chain_offload_cmd+0x293/0x660 net/netfilter/nf_tables_offload.c:451 nft_flow_block_chain net/netfilter/nf_tables_offload.c:471 [inline] nft_flow_offload_chain net/netfilter/nf_tables_offload.c:513 [inline] nft_flow_rule_offload_commit+0x40d/0x1b60 net/netfilter/nf_tables_offload.c:592 nf_tables_commit+0x675/0x8710 net/netfilter/nf_tables_api.c:10925 nfnetlink_rcv_batch net/netfilter/nfnetlink.c:576 [inline] nfnetlink_rcv_skb_batch net/netfilter/nfnetlink.c:649 [inline] nfnetlink_rcv+0x1ac9/0x2590 net/netfilter/nfnetlink.c:667 netlink_unicast_kernel net/netlink/af_netlink.c:1320 [inline] netlink_unicast+0x82c/0x9e0 net/netlink/af_netlink.c:1346 netlink_sendmsg+0x805/0xb30 net/netlink/af_netlink.c:1896 sock_sendmsg_nosec net/socket.c:727 [inline] __sock_sendmsg+0x219/0x270 net/socket.c:742 ____sys_sendmsg+0x505/0x830 net/socket.c:2630 ___sys_sendmsg+0x21f/0x2a0 net/socket.c:2684 __sys_sendmsg net/socket.c:2716 [inline] __do_sys_sendmsg net/socket.c:2721 [inline] __se_sys_sendmsg net/socket.c:2719 [inline] __x64_sys_sendmsg+0x19b/0x260 net/socket.c:2719 do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline] do_syscall_64+0xfa/0x3b0 arch/x86/entry/syscall_64.c:94 entry_SYSCALL_64_after_hwframe+0x77/0x7f RIP: 0033:0x7f13c218eec9 Code: ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 a8 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007ffe2cb63208 EFLAGS: 00000246 ORIG_RAX: 000000000000002e RAX: ffffffffffffffda RBX: 00007f13c23e5fa0 RCX: 00007f13c218eec9 RDX: 000000000000c050 RSI: 0000200000000cc0 RDI: 0000000000000003 RBP: 00007f13c2211f91 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000 R13: 00007f13c23e5fa0 R14: 00007f13c23e5fa0 R15: 0000000000000003 </TASK> Modules linked in: ---[ end trace 0000000000000000 ]--- RIP: 0010:__list_del_entry_valid_or_report+0x15a/0x190 lib/list_debug.c:62 Code: e8 1b 29 74 fd 43 80 3c 2c 00 74 08 4c 89 ff e8 9c 30 95 fd 49 8b 17 48 c7 c7 80 c9 9e 8b 48 89 de 4c 89 f9 e8 67 95 99 fc 90 <0f> 0b 4c 89 f7 e8 ec 28 74 fd 43 80 3c 2c 00 74 08 4c 89 ff e8 6d RSP: 0018:ffffc90003e6e740 EFLAGS: 00010246 RAX: 000000000000006d RBX: ffff888028878200 RCX: b532cdd832cb2600 RDX: 0000000000000000 RSI: 0000000080000000 RDI: 0000000000000000 RBP: 1ffff920007cdd3a R08: 0000000000000003 R09: 0000000000000004 R10: dffffc0000000000 R11: fffffbfff1b7a118 R12: 1ffffffff1d281f8 R13: dffffc0000000000 R14: ffffffff8e940fc0 R15: ffffffff8e940fc0 FS: 000055556c292500(0000) GS:ffff88812649b000(0000) knlGS:0000000000000000
| Seq | Timestamp | Type | Name | Duration |
|---|---|---|---|---|
| 0/0 | 2026/01/23 14:59 | flow | patching |
|
| 1/1 | 2026/01/23 14:59 | action | base-commit-picker |
0mResults:map[KernelCommit:v6.19-rc6 KernelRepo:git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git] |
| 2/1 | 2026/01/23 14:59 | action | kernel-checkouter |
0mResults:map[KernelSrc:/usr/local/google/home/dvyukov/syzkaller/agent/workdir/cache/src/56c37daf054285e608d3681723509574c876d55b] |
| 3/1 | 2026/01/23 14:59 | action | kernel-builder |
5mResults:map[KernelObj:/usr/local/google/home/dvyukov/syzkaller/agent/workdir/cache/build/4804218e87c6a4ed7fdbd2eb53690bac4ce22a04] |
| 4/1 | 2026/01/23 15:05 | action | crash-reproducer |
2mResults:map[CrashReport:list_del corruption. prev->next should be ffff88804848b400, but was ffffffff8eddb420. (prev=ffffffff8eddb420) ------------[ cut here ]------------ kernel BUG at lib/list_debug.c:64! Oops: invalid opcode: 0000 [#1] SMP KASAN NOPTI CPU: 1 UID: 0 PID: 17347 Comm: syz-executor157 Not tainted syzkaller #1 PREEMPT(full) Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-debian-1.16.3-2 04/01/2014 RIP: 0010:__list_del_entry_valid_or_report+0x15a/0x190 lib/list_debug.c:62 Code: e8 ab 89 6c fd 43 80 3c 2c 00 74 08 4c 89 ff e8 bc a7 8e fd 49 8b 17 48 c7 c7 20 b1 c7 8b 48 89 de 4c 89 f9 e8 e7 e6 97 fc 90 <0f> 0b 4c 89 f7 e8 7c 89 6c fd 43 80 3c 2c 00 74 08 4c 89 ff e8 8d RSP: 0018:ffffc9000c666700 EFLAGS: 00010246 RAX: 000000000000006d RBX: ffff88804848b400 RCX: 03ecdcd850c3e600 RDX: 0000000000000000 RSI: 0000000080000000 RDI: 0000000000000000 RBP: 1ffff920018ccd32 R08: ffffc9000c666467 R09: 1ffff920018ccc8c R10: dffffc0000000000 R11: fffff520018ccc8d R12: 1ffffffff1dbb684 R13: dffffc0000000000 R14: ffffffff8eddb420 R15: ffffffff8eddb420 FS: 00005555716cb3c0(0000) GS:ffff8880ec3bd000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000200000000380 CR3: 0000000066dfd000 CR4: 0000000000750ef0 PKRU: 55555554 Call Trace: <TASK> __list_del_entry_valid include/linux/list.h:132 [inline] __list_del_entry include/linux/list.h:223 [inline] list_del include/linux/list.h:237 [inline] flow_block_cb_setup_simple+0x62d/0x740 net/core/flow_offload.c:369 nft_block_offload_cmd net/netfilter/nf_tables_offload.c:397 [inline] nft_chain_offload_cmd+0x293/0x680 net/netfilter/nf_tables_offload.c:451 nft_flow_block_chain net/netfilter/nf_tables_offload.c:471 [inline] nft_flow_offload_chain net/netfilter/nf_tables_offload.c:513 [inline] nft_flow_rule_offload_commit+0x40d/0x1b60 net/netfilter/nf_tables_offload.c:592 nf_tables_commit+0x7ad/0xa1c0 net/netfilter/nf_tables_api.c:10985 nfnetlink_rcv_batch net/netfilter/nfnetlink.c:576 [inline] nfnetlink_rcv_skb_batch net/netfilter/nfnetlink.c:649 [inline] nfnetlink_rcv+0x1a88/0x2590 net/netfilter/nfnetlink.c:667 netlink_unicast_kernel net/netlink/af_netlink.c:1318 [inline] netlink_unicast+0x82f/0x9e0 net/netlink/af_netlink.c:1344 netlink_sendmsg+0x805/0xb30 net/netlink/af_netlink.c:1894 sock_sendmsg_nosec net/socket.c:727 [inline] __sock_sendmsg+0x21c/0x270 net/socket.c:742 ____sys_sendmsg+0x507/0x820 net/socket.c:2592 ___sys_sendmsg+0x21f/0x2a0 net/socket.c:2646 __sys_sendmsg net/socket.c:2678 [inline] __do_sys_sendmsg net/socket.c:2683 [inline] __se_sys_sendmsg net/socket.c:2681 [inline] __x64_sys_sendmsg+0x19b/0x260 net/socket.c:2681 do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline] do_syscall_64+0xe8/0xf80 arch/x86/entry/syscall_64.c:94 entry_SYSCALL_64_after_hwframe+0x77/0x7f RIP: 0033:0x7fcf7e128919 Code: c0 79 93 eb d5 48 8d 7c 1d 00 eb 99 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 d8 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007ffcf157c608 EFLAGS: 00000246 ORIG_RAX: 000000000000002e RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 00007fcf7e128919 RDX: 000000000000c050 RSI: 0000200000000cc0 RDI: 0000000000000003 RBP: 0000000000000000 R08: 0000000000000000 R09: 00007fcf7e194222 R10: 0000000000000000 R11: 0000000000000246 R12: 00007ffcf157c62c R13: 00007ffcf157c660 R14: 00007ffcf157c640 R15: 00000000000008a9 </TASK> Modules linked in: ---[ end trace 0000000000000000 ]--- RIP: 0010:__list_del_entry_valid_or_report+0x15a/0x190 lib/list_debug.c:62 Code: e8 ab 89 6c fd 43 80 3c 2c 00 74 08 4c 89 ff e8 bc a7 8e fd 49 8b 17 48 c7 c7 20 b1 c7 8b 48 89 de 4c 89 f9 e8 e7 e6 97 fc 90 <0f> 0b 4c 89 f7 e8 7c 89 6c fd 43 80 3c 2c 00 74 08 4c 89 ff e8 8d RSP: 0018:ffffc9000c666700 EFLAGS: 00010246 RAX: 000000000000006d RBX: ffff88804848b400 RCX: 03ecdcd850c3e600 RDX: 0000000000000000 RSI: 0000000080000000 RDI: 0000000000000000 RBP: 1ffff920018ccd32 R08: ffffc9000c666467 R09: 1ffff920018ccc8c R10: dffffc0000000000 R11: fffff520018ccc8d R12: 1ffffffff1dbb684 R13: dffffc0000000000 R14: ffffffff8eddb420 R15: ffffffff8eddb420 FS: 00005555716cb3c0(0000) GS:ffff8880ec3bd000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fcf7e16b170 CR3: 0000000066dfd000 CR4: 0000000000750ef0 PKRU: 55555554 ] |
| 5/1 | 2026/01/23 15:08 | action | codesearch-prepare |
4mResults:map[Index:codesearch-index] |
| 6/1 | 2026/01/23 15:12 | agent | debugger |
Model:gemini-3-pro-preview Instruction: You are an experienced Linux kernel developer tasked with debugging a kernel crash root cause. You need to provide a detailed explanation of the root cause for another developer to be able to write a fix for the bug based on your explanation. Your final reply must contain only the explanation. Call some codesearch tools first. Prefer calling several tools at the same time to save round-trips. Prompt: The crash is: list_del corruption. prev->next should be ffff88804848b400, but was ffffffff8eddb420. (prev=ffffffff8eddb420) ------------[ cut here ]------------ kernel BUG at lib/list_debug.c:64! Oops: invalid opcode: 0000 [#1] SMP KASAN NOPTI CPU: 1 UID: 0 PID: 17347 Comm: syz-executor157 Not tainted syzkaller #1 PREEMPT(full) Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-debian-1.16.3-2 04/01/2014 RIP: 0010:__list_del_entry_valid_or_report+0x15a/0x190 lib/list_debug.c:62 Code: e8 ab 89 6c fd 43 80 3c 2c 00 74 08 4c 89 ff e8 bc a7 8e fd 49 8b 17 48 c7 c7 20 b1 c7 8b 48 89 de 4c 89 f9 e8 e7 e6 97 fc 90 <0f> 0b 4c 89 f7 e8 7c 89 6c fd 43 80 3c 2c 00 74 08 4c 89 ff e8 8d RSP: 0018:ffffc9000c666700 EFLAGS: 00010246 RAX: 000000000000006d RBX: ffff88804848b400 RCX: 03ecdcd850c3e600 RDX: 0000000000000000 RSI: 0000000080000000 RDI: 0000000000000000 RBP: 1ffff920018ccd32 R08: ffffc9000c666467 R09: 1ffff920018ccc8c R10: dffffc0000000000 R11: fffff520018ccc8d R12: 1ffffffff1dbb684 R13: dffffc0000000000 R14: ffffffff8eddb420 R15: ffffffff8eddb420 FS: 00005555716cb3c0(0000) GS:ffff8880ec3bd000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000200000000380 CR3: 0000000066dfd000 CR4: 0000000000750ef0 PKRU: 55555554 Call Trace: <TASK> __list_del_entry_valid include/linux/list.h:132 [inline] __list_del_entry include/linux/list.h:223 [inline] list_del include/linux/list.h:237 [inline] flow_block_cb_setup_simple+0x62d/0x740 net/core/flow_offload.c:369 nft_block_offload_cmd net/netfilter/nf_tables_offload.c:397 [inline] nft_chain_offload_cmd+0x293/0x680 net/netfilter/nf_tables_offload.c:451 nft_flow_block_chain net/netfilter/nf_tables_offload.c:471 [inline] nft_flow_offload_chain net/netfilter/nf_tables_offload.c:513 [inline] nft_flow_rule_offload_commit+0x40d/0x1b60 net/netfilter/nf_tables_offload.c:592 nf_tables_commit+0x7ad/0xa1c0 net/netfilter/nf_tables_api.c:10985 nfnetlink_rcv_batch net/netfilter/nfnetlink.c:576 [inline] nfnetlink_rcv_skb_batch net/netfilter/nfnetlink.c:649 [inline] nfnetlink_rcv+0x1a88/0x2590 net/netfilter/nfnetlink.c:667 netlink_unicast_kernel net/netlink/af_netlink.c:1318 [inline] netlink_unicast+0x82f/0x9e0 net/netlink/af_netlink.c:1344 netlink_sendmsg+0x805/0xb30 net/netlink/af_netlink.c:1894 sock_sendmsg_nosec net/socket.c:727 [inline] __sock_sendmsg+0x21c/0x270 net/socket.c:742 ____sys_sendmsg+0x507/0x820 net/socket.c:2592 ___sys_sendmsg+0x21f/0x2a0 net/socket.c:2646 __sys_sendmsg net/socket.c:2678 [inline] __do_sys_sendmsg net/socket.c:2683 [inline] __se_sys_sendmsg net/socket.c:2681 [inline] __x64_sys_sendmsg+0x19b/0x260 net/socket.c:2681 do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline] do_syscall_64+0xe8/0xf80 arch/x86/entry/syscall_64.c:94 entry_SYSCALL_64_after_hwframe+0x77/0x7f RIP: 0033:0x7fcf7e128919 Code: c0 79 93 eb d5 48 8d 7c 1d 00 eb 99 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 d8 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007ffcf157c608 EFLAGS: 00000246 ORIG_RAX: 000000000000002e RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 00007fcf7e128919 RDX: 000000000000c050 RSI: 0000200000000cc0 RDI: 0000000000000003 RBP: 0000000000000000 R08: 0000000000000000 R09: 00007fcf7e194222 R10: 0000000000000000 R11: 0000000000000246 R12: 00007ffcf157c62c R13: 00007ffcf157c660 R14: 00007ffcf157c640 R15: 00000000000008a9 </TASK> Modules linked in: ---[ end trace 0000000000000000 ]--- RIP: 0010:__list_del_entry_valid_or_report+0x15a/0x190 lib/list_debug.c:62 Code: e8 ab 89 6c fd 43 80 3c 2c 00 74 08 4c 89 ff e8 bc a7 8e fd 49 8b 17 48 c7 c7 20 b1 c7 8b 48 89 de 4c 89 f9 e8 e7 e6 97 fc 90 <0f> 0b 4c 89 f7 e8 7c 89 6c fd 43 80 3c 2c 00 74 08 4c 89 ff e8 8d RSP: 0018:ffffc9000c666700 EFLAGS: 00010246 RAX: 000000000000006d RBX: ffff88804848b400 RCX: 03ecdcd850c3e600 RDX: 0000000000000000 RSI: 0000000080000000 RDI: 0000000000000000 RBP: 1ffff920018ccd32 R08: ffffc9000c666467 R09: 1ffff920018ccc8c R10: dffffc0000000000 R11: fffff520018ccc8d R12: 1ffffffff1dbb684 R13: dffffc0000000000 R14: ffffffff8eddb420 R15: ffffffff8eddb420 FS: 00005555716cb3c0(0000) GS:ffff8880ec3bd000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fcf7e16b170 CR3: 0000000066dfd000 CR4: 0000000000750ef0 PKRU: 55555554 |
| 7/2 | 2026/01/23 15:12 | llm | debugger |
7mModel:gemini-3-pro-preview Thoughts: **Root Cause Analysis of Kernel Crash** Okay, so the user wants me to figure out this kernel crash, specifically a `list_del corruption` error within `flow_block_cb_setup_simple`. That function is called from `nft_block_offload_cmd` and, indirectly, through `nft_flow_rule_offload_commit`. I need to understand what's going wrong with the linked list. The error message is pretty clear: "list_del corruption. prev->next should be X, but was Y". In this specific case, `prev->next` should be `ffff88804848b400`, but it's actually `ffffffff8eddb420`. And wait, that's important, `prev` itself is `ffffffff8eddb420`. So `prev->next` is actually pointing back to `prev`! That's a classic symptom. It means the list is corrupted during the delete operation. Something's gone terribly wrong with the pointers in this doubly linked list. Typically, this points to a few possibilities. Either `prev` is actually a list head that's been incorrectly used, or the memory has been corrupted, maybe a use-after-free situation, or potentially a double-delete. I need to figure out which. The call stack gives me some context. It's `flow_block_cb_setup_simple` calling `list_del`, which is triggered by `nft_block_offload_cmd` and ultimately related to flow rule offloading via `nft_flow_rule_offload_commit`. This touches the `nf_tables` subsystem, so I'll need to look there as well. My next step is to examine `net/core/flow_offload.c`, specifically `flow_block_cb_setup_simple` and its surroundings. I'll also check `net/netfilter/nf_tables_offload.c` and trace the execution path. I'll start with `net/core/flow_offload.c` around line 369 to see the list manipulation. This crash will need to be thoroughly examined, starting at that list manipulation location. |
| 8/2 | 2026/01/23 15:20 | tool | read-file |
0mArgs:map[File:net/core/flow_offload.c] Results: map[Contents:/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/kernel.h>
#include <linux/slab.h>
#include <net/act_api.h>
#include <net/flow_offload.h>
#include <linux/rtnetlink.h>
#include <linux/mutex.h>
#include <linux/rhashtable.h>
struct flow_rule *flow_rule_alloc(unsigned int num_actions)
{
struct flow_rule *rule;
int i;
rule = kzalloc(struct_size(rule, action.entries, num_actions),
GFP_KERNEL);
if (!rule)
return NULL;
rule->action.num_entries = num_actions;
/* Pre-fill each action hw_stats with DONT_CARE.
* Caller can override this if it wants stats for a given action.
*/
for (i = 0; i < num_actions; i++)
rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
return rule;
}
EXPORT_SYMBOL(flow_rule_alloc);
struct flow_offload_action *offload_action_alloc(unsigned int num_actions)
{
struct flow_offload_action *fl_action;
int i;
fl_action = kzalloc(struct_size(fl_action, action.entries, num_actions),
GFP_KERNEL);
if (!fl_action)
return NULL;
fl_action->action.num_entries = num_actions;
/* Pre-fill each action hw_stats with DONT_CARE.
* Caller can override this if it wants stats for a given action.
*/
for (i = 0; i < num_actions; i++)
fl_action->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
return fl_action;
}
#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \
const struct flow_match *__m = &(__rule)->match; \
struct flow_dissector *__d = (__m)->dissector; \
\
(__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \
(__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \
void flow_rule_match_meta(const struct flow_rule *rule,
struct flow_match_meta *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out);
}
EXPORT_SYMBOL(flow_rule_match_meta);
void flow_rule_match_basic(const struct flow_rule *rule,
struct flow_match_basic *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
}
EXPORT_SYMBOL(flow_rule_match_basic);
void flow_rule_match_control(const struct flow_rule *rule,
struct flow_match_control *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
}
EXPORT_SYMBOL(flow_rule_match_control);
void flow_rule_match_eth_addrs(const struct flow_rule *rule,
struct flow_match_eth_addrs *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
}
EXPORT_SYMBOL(flow_rule_match_eth_addrs);
void flow_rule_match_vlan(const struct flow_rule *rule,
struct flow_match_vlan *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
}
EXPORT_SYMBOL(flow_rule_match_vlan);
void flow_rule_match_cvlan(const struct flow_rule *rule,
struct flow_match_vlan *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
}
EXPORT_SYMBOL(flow_rule_match_cvlan);
void flow_rule_match_arp(const struct flow_rule *rule,
struct flow_match_arp *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ARP, out);
}
EXPORT_SYMBOL(flow_rule_match_arp);
void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
struct flow_match_ipv4_addrs *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
}
EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
struct flow_match_ipv6_addrs *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
}
EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
void flow_rule_match_ip(const struct flow_rule *rule,
struct flow_match_ip *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
}
EXPORT_SYMBOL(flow_rule_match_ip);
void flow_rule_match_ports(const struct flow_rule *rule,
struct flow_match_ports *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
}
EXPORT_SYMBOL(flow_rule_match_ports);
void flow_rule_match_ports_range(const struct flow_rule *rule,
struct flow_match_ports_range *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS_RANGE, out);
}
EXPORT_SYMBOL(flow_rule_match_ports_range);
void flow_rule_match_tcp(const struct flow_rule *rule,
struct flow_match_tcp *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
}
EXPORT_SYMBOL(flow_rule_match_tcp);
void flow_rule_match_ipsec(const struct flow_rule *rule,
struct flow_match_ipsec *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPSEC, out);
}
EXPORT_SYMBOL(flow_rule_match_ipsec);
void flow_rule_match_icmp(const struct flow_rule *rule,
struct flow_match_icmp *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
}
EXPORT_SYMBOL(flow_rule_match_icmp);
void flow_rule_match_mpls(const struct flow_rule *rule,
struct flow_match_mpls *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
}
EXPORT_SYMBOL(flow_rule_match_mpls);
void flow_rule_match_enc_control(const struct flow_rule *rule,
struct flow_match_control *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
}
EXPORT_SYMBOL(flow_rule_match_enc_control);
void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
struct flow_match_ipv4_addrs *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
}
EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
struct flow_match_ipv6_addrs *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
}
EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
void flow_rule_match_enc_ip(const struct flow_rule *rule,
struct flow_match_ip *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
}
EXPORT_SYMBOL(flow_rule_match_enc_ip);
void flow_rule_match_enc_ports(const struct flow_rule *rule,
struct flow_match_ports *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
}
EXPORT_SYMBOL(flow_rule_match_enc_ports);
void flow_rule_match_enc_keyid(const struct flow_rule *rule,
struct flow_match_enc_keyid *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
}
EXPORT_SYMBOL(flow_rule_match_enc_keyid);
void flow_rule_match_enc_opts(const struct flow_rule *rule,
struct flow_match_enc_opts *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
}
EXPORT_SYMBOL(flow_rule_match_enc_opts);
struct flow_action_cookie *flow_action_cookie_create(void *data,
unsigned int len,
gfp_t gfp)
{
struct flow_action_cookie *cookie;
cookie = kmalloc(sizeof(*cookie) + len, gfp);
if (!cookie)
return NULL;
cookie->cookie_len = len;
memcpy(cookie->cookie, data, len);
return cookie;
}
EXPORT_SYMBOL(flow_action_cookie_create);
void flow_action_cookie_destroy(struct flow_action_cookie *cookie)
{
kfree(cookie);
}
EXPORT_SYMBOL(flow_action_cookie_destroy);
void flow_rule_match_ct(const struct flow_rule *rule,
struct flow_match_ct *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out);
}
EXPORT_SYMBOL(flow_rule_match_ct);
void flow_rule_match_pppoe(const struct flow_rule *rule,
struct flow_match_pppoe *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PPPOE, out);
}
EXPORT_SYMBOL(flow_rule_match_pppoe);
void flow_rule_match_l2tpv3(const struct flow_rule *rule,
struct flow_match_l2tpv3 *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_L2TPV3, out);
}
EXPORT_SYMBOL(flow_rule_match_l2tpv3);
struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
void *cb_ident, void *cb_priv,
void (*release)(void *cb_priv))
{
struct flow_block_cb *block_cb;
block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
if (!block_cb)
return ERR_PTR(-ENOMEM);
block_cb->cb = cb;
block_cb->cb_ident = cb_ident;
block_cb->cb_priv = cb_priv;
block_cb->release = release;
return block_cb;
}
EXPORT_SYMBOL(flow_block_cb_alloc);
void flow_block_cb_free(struct flow_block_cb *block_cb)
{
if (block_cb->release)
block_cb->release(block_cb->cb_priv);
kfree(block_cb);
}
EXPORT_SYMBOL(flow_block_cb_free);
struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
flow_setup_cb_t *cb, void *cb_ident)
{
struct flow_block_cb *block_cb;
list_for_each_entry(block_cb, &block->cb_list, list) {
if (block_cb->cb == cb &&
block_cb->cb_ident == cb_ident)
return block_cb;
}
return NULL;
}
EXPORT_SYMBOL(flow_block_cb_lookup);
void *flow_block_cb_priv(struct flow_block_cb *block_cb)
{
return block_cb->cb_priv;
}
EXPORT_SYMBOL(flow_block_cb_priv);
void flow_block_cb_incref(struct flow_block_cb *block_cb)
{
block_cb->refcnt++;
}
EXPORT_SYMBOL(flow_block_cb_incref);
unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
{
return --block_cb->refcnt;
}
EXPORT_SYMBOL(flow_block_cb_decref);
bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
struct list_head *driver_block_list)
{
struct flow_block_cb *block_cb;
list_for_each_entry(block_cb, driver_block_list, driver_list) {
if (block_cb->cb == cb &&
block_cb->cb_ident == cb_ident)
return true;
}
return false;
}
EXPORT_SYMBOL(flow_block_cb_is_busy);
int flow_block_cb_setup_simple(struct flow_block_offload *f,
struct list_head *driver_block_list,
flow_setup_cb_t *cb,
void *cb_ident, void *cb_priv,
bool ingress_only)
{
struct flow_block_cb *block_cb;
if (ingress_only &&
f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
f->driver_block_list = driver_block_list;
switch (f->command) {
case FLOW_BLOCK_BIND:
if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
return -EBUSY;
block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
flow_block_cb_add(block_cb, f);
list_add_tail(&block_cb->driver_list, driver_block_list);
return 0;
case FLOW_BLOCK_UNBIND:
block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
if (!block_cb)
return -ENOENT;
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
return 0;
default:
return -EOPNOTSUPP;
}
}
EXPORT_SYMBOL(flow_block_cb_setup_simple);
static DEFINE_MUTEX(flow_indr_block_lock);
static LIST_HEAD(flow_block_indr_list);
static LIST_HEAD(flow_block_indr_dev_list);
static LIST_HEAD(flow_indir_dev_list);
struct flow_indr_dev {
struct list_head list;
flow_indr_block_bind_cb_t *cb;
void *cb_priv;
refcount_t refcnt;
};
static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
void *cb_priv)
{
struct flow_indr_dev *indr_dev;
indr_dev = kmalloc(sizeof(*indr_dev), GFP_KERNEL);
if (!indr_dev)
return NULL;
indr_dev->cb = cb;
indr_dev->cb_priv = cb_priv;
refcount_set(&indr_dev->refcnt, 1);
return indr_dev;
}
struct flow_indir_dev_info {
void *data;
struct net_device *dev;
struct Qdisc *sch;
enum tc_setup_type type;
void (*cleanup)(struct flow_block_cb *block_cb);
struct list_head list;
enum flow_block_command command;
enum flow_block_binder_type binder_type;
struct list_head *cb_list;
};
static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
{
struct flow_block_offload bo;
struct flow_indir_dev_info *cur;
list_for_each_entry(cur, &flow_indir_dev_list, list) {
memset(&bo, 0, sizeof(bo));
bo.command = cur->command;
bo.binder_type = cur->binder_type;
INIT_LIST_HEAD(&bo.cb_list);
cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup);
list_splice(&bo.cb_list, cur->cb_list);
}
}
int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
{
struct flow_indr_dev *indr_dev;
mutex_lock(&flow_indr_block_lock);
list_for_each_entry(indr_dev, &flow_block_indr_dev_list, list) {
if (indr_dev->cb == cb &&
indr_dev->cb_priv == cb_priv) {
refcount_inc(&indr_dev->refcnt);
mutex_unlock(&flow_indr_block_lock);
return 0;
}
}
indr_dev = flow_indr_dev_alloc(cb, cb_priv);
if (!indr_dev) {
mutex_unlock(&flow_indr_block_lock);
return -ENOMEM;
}
list_add(&indr_dev->list, &flow_block_indr_dev_list);
existing_qdiscs_register(cb, cb_priv);
mutex_unlock(&flow_indr_block_lock);
tcf_action_reoffload_cb(cb, cb_priv, true);
return 0;
}
EXPORT_SYMBOL(flow_indr_dev_register);
static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
void *cb_priv,
struct list_head *cleanup_list)
{
struct flow_block_cb *this, *next;
list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
if (this->release == release &&
this->indr.cb_priv == cb_priv)
list_move(&this->indr.list, cleanup_list);
}
}
static void flow_block_indr_notify(struct list_head *cleanup_list)
{
struct flow_block_cb *this, *next;
list_for_each_entry_safe(this, next, cleanup_list, indr.list) {
list_del(&this->indr.list);
this->indr.cleanup(this);
}
}
void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
void (*release)(void *cb_priv))
{
struct flow_indr_dev *this, *next, *indr_dev = NULL;
LIST_HEAD(cleanup_list);
mutex_lock(&flow_indr_block_lock);
list_for_each_entry_safe(this, next, &flow_block_indr_dev_list, list) {
if (this->cb == cb &&
this->cb_priv == cb_priv &&
refcount_dec_and_test(&this->refcnt)) {
indr_dev = this;
list_del(&indr_dev->list);
break;
}
}
if (!indr_dev) {
mutex_unlock(&flow_indr_block_lock);
return;
}
__flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
mutex_unlock(&flow_indr_block_lock);
tcf_action_reoffload_cb(cb, cb_priv, false);
flow_block_indr_notify(&cleanup_list);
kfree(indr_dev);
}
EXPORT_SYMBOL(flow_indr_dev_unregister);
static void flow_block_indr_init(struct flow_block_cb *flow_block,
struct flow_block_offload *bo,
struct net_device *dev, struct Qdisc *sch, void *data,
void *cb_priv,
void (*cleanup)(struct flow_block_cb *block_cb))
{
flow_block->indr.binder_type = bo->binder_type;
flow_block->indr.data = data;
flow_block->indr.cb_priv = cb_priv;
flow_block->indr.dev = dev;
flow_block->indr.sch = sch;
flow_block->indr.cleanup = cleanup;
}
struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
void *cb_ident, void *cb_priv,
void (*release)(void *cb_priv),
struct flow_block_offload *bo,
struct net_device *dev,
struct Qdisc *sch, void *data,
void *indr_cb_priv,
void (*cleanup)(struct flow_block_cb *block_cb))
{
struct flow_block_cb *block_cb;
block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release);
if (IS_ERR(block_cb))
goto out;
flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup);
list_add(&block_cb->indr.list, &flow_block_indr_list);
out:
return block_cb;
}
EXPORT_SYMBOL(flow_indr_block_cb_alloc);
static struct flow_indir_dev_info *find_indir_dev(void *data)
{
struct flow_indir_dev_info *cur;
list_for_each_entry(cur, &flow_indir_dev_list, list) {
if (cur->data == data)
return cur;
}
return NULL;
}
static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch,
enum tc_setup_type type, void (*cleanup)(struct flow_block_cb *block_cb),
struct flow_block_offload *bo)
{
struct flow_indir_dev_info *info;
info = find_indir_dev(data);
if (info)
return -EEXIST;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->data = data;
info->dev = dev;
info->sch = sch;
info->type = type;
info->cleanup = cleanup;
info->command = bo->command;
info->binder_type = bo->binder_type;
info->cb_list = bo->cb_list_head;
list_add(&info->list, &flow_indir_dev_list);
return 0;
}
static int indir_dev_remove(void *data)
{
struct flow_indir_dev_info *info;
info = find_indir_dev(data);
if (!info)
return -ENOENT;
list_del(&info->list);
kfree(info);
return 0;
}
int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
enum tc_setup_type type, void *data,
struct flow_block_offload *bo,
void (*cleanup)(struct flow_block_cb *block_cb))
{
struct flow_indr_dev *this;
u32 count = 0;
int err;
mutex_lock(&flow_indr_block_lock);
if (bo) {
if (bo->command == FLOW_BLOCK_BIND)
indir_dev_add(data, dev, sch, type, cleanup, bo);
else if (bo->command == FLOW_BLOCK_UNBIND)
indir_dev_remove(data);
}
list_for_each_entry(this, &flow_block_indr_dev_list, list) {
err = this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
if (!err)
count++;
}
mutex_unlock(&flow_indr_block_lock);
return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count;
}
EXPORT_SYMBOL(flow_indr_dev_setup_offload);
bool flow_indr_dev_exists(void)
{
return !list_empty(&flow_block_indr_dev_list);
}
EXPORT_SYMBOL(flow_indr_dev_exists);
] |
| 9/2 | 2026/01/23 15:20 | tool | read-file |
0mArgs:map[File:net/netfilter/nf_tables_offload.c] Results: map[Contents:/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netfilter.h>
#include <net/flow_offload.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_offload.h>
#include <net/pkt_cls.h>
static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions)
{
struct nft_flow_rule *flow;
flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL);
if (!flow)
return NULL;
flow->rule = flow_rule_alloc(num_actions);
if (!flow->rule) {
kfree(flow);
return NULL;
}
flow->rule->match.dissector = &flow->match.dissector;
flow->rule->match.mask = &flow->match.mask;
flow->rule->match.key = &flow->match.key;
return flow;
}
void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
enum flow_dissector_key_id addr_type)
{
struct nft_flow_match *match = &flow->match;
struct nft_flow_key *mask = &match->mask;
struct nft_flow_key *key = &match->key;
if (match->dissector.used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL))
return;
key->control.addr_type = addr_type;
mask->control.addr_type = 0xffff;
match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL);
match->dissector.offset[FLOW_DISSECTOR_KEY_CONTROL] =
offsetof(struct nft_flow_key, control);
}
struct nft_offload_ethertype {
__be16 value;
__be16 mask;
};
static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow)
{
struct nft_flow_match *match = &flow->match;
struct nft_offload_ethertype ethertype = {
.value = match->key.basic.n_proto,
.mask = match->mask.basic.n_proto,
};
if (match->dissector.used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) &&
(match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) ||
match->key.vlan.vlan_tpid == htons(ETH_P_8021AD))) {
match->key.basic.n_proto = match->key.cvlan.vlan_tpid;
match->mask.basic.n_proto = match->mask.cvlan.vlan_tpid;
match->key.cvlan.vlan_tpid = match->key.vlan.vlan_tpid;
match->mask.cvlan.vlan_tpid = match->mask.vlan.vlan_tpid;
match->key.vlan.vlan_tpid = ethertype.value;
match->mask.vlan.vlan_tpid = ethertype.mask;
match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] =
offsetof(struct nft_flow_key, cvlan);
match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN);
} else if (match->dissector.used_keys &
BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) &&
(match->key.basic.n_proto == htons(ETH_P_8021Q) ||
match->key.basic.n_proto == htons(ETH_P_8021AD))) {
match->key.basic.n_proto = match->key.vlan.vlan_tpid;
match->mask.basic.n_proto = match->mask.vlan.vlan_tpid;
match->key.vlan.vlan_tpid = ethertype.value;
match->mask.vlan.vlan_tpid = ethertype.mask;
match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] =
offsetof(struct nft_flow_key, vlan);
match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_VLAN);
}
}
struct nft_flow_rule *nft_flow_rule_create(struct net *net,
const struct nft_rule *rule)
{
struct nft_offload_ctx *ctx;
struct nft_flow_rule *flow;
int num_actions = 0, err;
struct nft_expr *expr;
expr = nft_expr_first(rule);
while (nft_expr_more(rule, expr)) {
if (expr->ops->offload_action &&
expr->ops->offload_action(expr))
num_actions++;
expr = nft_expr_next(expr);
}
if (num_actions == 0)
return ERR_PTR(-EOPNOTSUPP);
flow = nft_flow_rule_alloc(num_actions);
if (!flow)
return ERR_PTR(-ENOMEM);
expr = nft_expr_first(rule);
ctx = kzalloc(sizeof(struct nft_offload_ctx), GFP_KERNEL);
if (!ctx) {
err = -ENOMEM;
goto err_out;
}
ctx->net = net;
ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
while (nft_expr_more(rule, expr)) {
if (!expr->ops->offload) {
err = -EOPNOTSUPP;
goto err_out;
}
err = expr->ops->offload(ctx, flow, expr);
if (err < 0)
goto err_out;
expr = nft_expr_next(expr);
}
nft_flow_rule_transfer_vlan(ctx, flow);
flow->proto = ctx->dep.l3num;
kfree(ctx);
return flow;
err_out:
kfree(ctx);
nft_flow_rule_destroy(flow);
return ERR_PTR(err);
}
void nft_flow_rule_destroy(struct nft_flow_rule *flow)
{
struct flow_action_entry *entry;
int i;
flow_action_for_each(i, entry, &flow->rule->action) {
switch (entry->id) {
case FLOW_ACTION_REDIRECT:
case FLOW_ACTION_MIRRED:
dev_put(entry->dev);
break;
default:
break;
}
}
kfree(flow->rule);
kfree(flow);
}
void nft_offload_set_dependency(struct nft_offload_ctx *ctx,
enum nft_offload_dep_type type)
{
ctx->dep.type = type;
}
void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
const void *data, u32 len)
{
switch (ctx->dep.type) {
case NFT_OFFLOAD_DEP_NETWORK:
WARN_ON(len != sizeof(__u16));
memcpy(&ctx->dep.l3num, data, sizeof(__u16));
break;
case NFT_OFFLOAD_DEP_TRANSPORT:
WARN_ON(len != sizeof(__u8));
memcpy(&ctx->dep.protonum, data, sizeof(__u8));
break;
default:
break;
}
ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
}
static void nft_flow_offload_common_init(struct flow_cls_common_offload *common,
__be16 proto, int priority,
struct netlink_ext_ack *extack)
{
common->protocol = proto;
common->prio = priority;
common->extack = extack;
}
static int nft_setup_cb_call(enum tc_setup_type type, void *type_data,
struct list_head *cb_list)
{
struct flow_block_cb *block_cb;
int err;
list_for_each_entry(block_cb, cb_list, list) {
err = block_cb->cb(type, type_data, block_cb->cb_priv);
if (err < 0)
return err;
}
return 0;
}
static int nft_chain_offload_priority(const struct nft_base_chain *basechain)
{
if (basechain->ops.priority <= 0 ||
basechain->ops.priority > USHRT_MAX)
return -1;
return 0;
}
bool nft_chain_offload_support(const struct nft_base_chain *basechain)
{
struct nf_hook_ops *ops;
struct net_device *dev;
struct nft_hook *hook;
if (nft_chain_offload_priority(basechain) < 0)
return false;
list_for_each_entry(hook, &basechain->hook_list, list) {
list_for_each_entry(ops, &hook->ops_list, list) {
if (ops->pf != NFPROTO_NETDEV ||
ops->hooknum != NF_NETDEV_INGRESS)
return false;
dev = ops->dev;
if (!dev->netdev_ops->ndo_setup_tc &&
!flow_indr_dev_exists())
return false;
}
}
return true;
}
static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow,
const struct nft_base_chain *basechain,
const struct nft_rule *rule,
const struct nft_flow_rule *flow,
struct netlink_ext_ack *extack,
enum flow_cls_command command)
{
__be16 proto = ETH_P_ALL;
memset(cls_flow, 0, sizeof(*cls_flow));
if (flow)
proto = flow->proto;
nft_flow_offload_common_init(&cls_flow->common, proto,
basechain->ops.priority, extack);
cls_flow->command = command;
cls_flow->cookie = (unsigned long) rule;
if (flow)
cls_flow->rule = flow->rule;
}
static int nft_flow_offload_cmd(const struct nft_chain *chain,
const struct nft_rule *rule,
struct nft_flow_rule *flow,
enum flow_cls_command command,
struct flow_cls_offload *cls_flow)
{
struct netlink_ext_ack extack = {};
struct nft_base_chain *basechain;
if (!nft_is_base_chain(chain))
return -EOPNOTSUPP;
basechain = nft_base_chain(chain);
nft_flow_cls_offload_setup(cls_flow, basechain, rule, flow, &extack,
command);
return nft_setup_cb_call(TC_SETUP_CLSFLOWER, cls_flow,
&basechain->flow_block.cb_list);
}
static int nft_flow_offload_rule(const struct nft_chain *chain,
struct nft_rule *rule,
struct nft_flow_rule *flow,
enum flow_cls_command command)
{
struct flow_cls_offload cls_flow;
return nft_flow_offload_cmd(chain, rule, flow, command, &cls_flow);
}
int nft_flow_rule_stats(const struct nft_chain *chain,
const struct nft_rule *rule)
{
struct flow_cls_offload cls_flow = {};
struct nft_expr *expr, *next;
int err;
err = nft_flow_offload_cmd(chain, rule, NULL, FLOW_CLS_STATS,
&cls_flow);
if (err < 0)
return err;
nft_rule_for_each_expr(expr, next, rule) {
if (expr->ops->offload_stats)
expr->ops->offload_stats(expr, &cls_flow.stats);
}
return 0;
}
static int nft_flow_offload_bind(struct flow_block_offload *bo,
struct nft_base_chain *basechain)
{
list_splice(&bo->cb_list, &basechain->flow_block.cb_list);
return 0;
}
static int nft_flow_offload_unbind(struct flow_block_offload *bo,
struct nft_base_chain *basechain)
{
struct flow_block_cb *block_cb, *next;
struct flow_cls_offload cls_flow;
struct netlink_ext_ack extack;
struct nft_chain *chain;
struct nft_rule *rule;
chain = &basechain->chain;
list_for_each_entry(rule, &chain->rules, list) {
memset(&extack, 0, sizeof(extack));
nft_flow_cls_offload_setup(&cls_flow, basechain, rule, NULL,
&extack, FLOW_CLS_DESTROY);
nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow, &bo->cb_list);
}
list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
list_del(&block_cb->list);
flow_block_cb_free(block_cb);
}
return 0;
}
static int nft_block_setup(struct nft_base_chain *basechain,
struct flow_block_offload *bo,
enum flow_block_command cmd)
{
int err;
switch (cmd) {
case FLOW_BLOCK_BIND:
err = nft_flow_offload_bind(bo, basechain);
break;
case FLOW_BLOCK_UNBIND:
err = nft_flow_offload_unbind(bo, basechain);
break;
default:
WARN_ON_ONCE(1);
err = -EOPNOTSUPP;
}
return err;
}
static void nft_flow_block_offload_init(struct flow_block_offload *bo,
struct net *net,
enum flow_block_command cmd,
struct nft_base_chain *basechain,
struct netlink_ext_ack *extack)
{
memset(bo, 0, sizeof(*bo));
bo->net = net;
bo->block = &basechain->flow_block;
bo->command = cmd;
bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
bo->extack = extack;
bo->cb_list_head = &basechain->flow_block.cb_list;
INIT_LIST_HEAD(&bo->cb_list);
}
static int nft_block_offload_cmd(struct nft_base_chain *chain,
struct net_device *dev,
enum flow_block_command cmd)
{
struct netlink_ext_ack extack = {};
struct flow_block_offload bo;
int err;
nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack);
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
if (err < 0)
return err;
return nft_block_setup(chain, &bo, cmd);
}
static void nft_indr_block_cleanup(struct flow_block_cb *block_cb)
{
struct nft_base_chain *basechain = block_cb->indr.data;
struct net_device *dev = block_cb->indr.dev;
struct netlink_ext_ack extack = {};
struct nftables_pernet *nft_net;
struct net *net = dev_net(dev);
struct flow_block_offload bo;
nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND,
basechain, &extack);
nft_net = nft_pernet(net);
mutex_lock(&nft_net->commit_mutex);
list_del(&block_cb->driver_list);
list_move(&block_cb->list, &bo.cb_list);
nft_flow_offload_unbind(&bo, basechain);
mutex_unlock(&nft_net->commit_mutex);
}
static int nft_indr_block_offload_cmd(struct nft_base_chain *basechain,
struct net_device *dev,
enum flow_block_command cmd)
{
struct netlink_ext_ack extack = {};
struct flow_block_offload bo;
int err;
nft_flow_block_offload_init(&bo, dev_net(dev), cmd, basechain, &extack);
err = flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_BLOCK, basechain, &bo,
nft_indr_block_cleanup);
if (err < 0)
return err;
if (list_empty(&bo.cb_list))
return -EOPNOTSUPP;
return nft_block_setup(basechain, &bo, cmd);
}
static int nft_chain_offload_cmd(struct nft_base_chain *basechain,
struct net_device *dev,
enum flow_block_command cmd)
{
int err;
if (dev->netdev_ops->ndo_setup_tc)
err = nft_block_offload_cmd(basechain, dev, cmd);
else
err = nft_indr_block_offload_cmd(basechain, dev, cmd);
return err;
}
static int nft_flow_block_chain(struct nft_base_chain *basechain,
const struct net_device *this_dev,
enum flow_block_command cmd)
{
struct nf_hook_ops *ops;
struct nft_hook *hook;
int err, i = 0;
list_for_each_entry(hook, &basechain->hook_list, list) {
list_for_each_entry(ops, &hook->ops_list, list) {
if (this_dev && this_dev != ops->dev)
continue;
err = nft_chain_offload_cmd(basechain, ops->dev, cmd);
if (err < 0 && cmd == FLOW_BLOCK_BIND) {
if (!this_dev)
goto err_flow_block;
return err;
}
i++;
}
}
return 0;
err_flow_block:
list_for_each_entry(hook, &basechain->hook_list, list) {
list_for_each_entry(ops, &hook->ops_list, list) {
if (i-- <= 0)
break;
nft_chain_offload_cmd(basechain, ops->dev,
FLOW_BLOCK_UNBIND);
}
}
return err;
}
static int nft_flow_offload_chain(struct nft_chain *chain, u8 *ppolicy,
enum flow_block_command cmd)
{
struct nft_base_chain *basechain;
u8 policy;
if (!nft_is_base_chain(chain))
return -EOPNOTSUPP;
basechain = nft_base_chain(chain);
policy = ppolicy ? *ppolicy : basechain->policy;
/* Only default policy to accept is supported for now. */
if (cmd == FLOW_BLOCK_BIND && policy == NF_DROP)
return -EOPNOTSUPP;
return nft_flow_block_chain(basechain, NULL, cmd);
}
static void nft_flow_rule_offload_abort(struct net *net,
struct nft_trans *trans)
{
struct nftables_pernet *nft_net = nft_pernet(net);
int err = 0;
list_for_each_entry_continue_reverse(trans, &nft_net->commit_list, list) {
if (trans->table->family != NFPROTO_NETDEV)
continue;
switch (trans->msg_type) {
case NFT_MSG_NEWCHAIN:
if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD) ||
nft_trans_chain_update(trans))
continue;
err = nft_flow_offload_chain(nft_trans_chain(trans), NULL,
FLOW_BLOCK_UNBIND);
break;
case NFT_MSG_DELCHAIN:
if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
err = nft_flow_offload_chain(nft_trans_chain(trans), NULL,
FLOW_BLOCK_BIND);
break;
case NFT_MSG_NEWRULE:
if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
err = nft_flow_offload_rule(nft_trans_rule_chain(trans),
nft_trans_rule(trans),
NULL, FLOW_CLS_DESTROY);
break;
case NFT_MSG_DELRULE:
if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
err = nft_flow_offload_rule(nft_trans_rule_chain(trans),
nft_trans_rule(trans),
nft_trans_flow_rule(trans),
FLOW_CLS_REPLACE);
break;
}
if (WARN_ON_ONCE(err))
break;
}
}
int nft_flow_rule_offload_commit(struct net *net)
{
struct nftables_pernet *nft_net = nft_pernet(net);
struct nft_trans *trans;
int err = 0;
u8 policy;
list_for_each_entry(trans, &nft_net->commit_list, list) {
if (trans->table->family != NFPROTO_NETDEV)
continue;
switch (trans->msg_type) {
case NFT_MSG_NEWCHAIN:
if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD) ||
nft_trans_chain_update(trans))
continue;
policy = nft_trans_chain_policy(trans);
err = nft_flow_offload_chain(nft_trans_chain(trans), &policy,
FLOW_BLOCK_BIND);
break;
case NFT_MSG_DELCHAIN:
if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
policy = nft_trans_chain_policy(trans);
err = nft_flow_offload_chain(nft_trans_chain(trans), &policy,
FLOW_BLOCK_UNBIND);
break;
case NFT_MSG_NEWRULE:
if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
if (trans->flags & NLM_F_REPLACE ||
!(trans->flags & NLM_F_APPEND)) {
err = -EOPNOTSUPP;
break;
}
err = nft_flow_offload_rule(nft_trans_rule_chain(trans),
nft_trans_rule(trans),
nft_trans_flow_rule(trans),
FLOW_CLS_REPLACE);
break;
case NFT_MSG_DELRULE:
if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
err = nft_flow_offload_rule(nft_trans_rule_chain(trans),
nft_trans_rule(trans),
NULL, FLOW_CLS_DESTROY);
break;
}
if (err) {
nft_flow_rule_offload_abort(net, trans);
break;
}
}
return err;
}
static struct nft_chain *__nft_offload_get_chain(const struct nftables_pernet *nft_net,
struct net_device *dev)
{
struct nft_base_chain *basechain;
struct nft_hook *hook, *found;
const struct nft_table *table;
struct nft_chain *chain;
list_for_each_entry(table, &nft_net->tables, list) {
if (table->family != NFPROTO_NETDEV)
continue;
list_for_each_entry(chain, &table->chains, list) {
if (!nft_is_base_chain(chain) ||
!(chain->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
found = NULL;
basechain = nft_base_chain(chain);
list_for_each_entry(hook, &basechain->hook_list, list) {
if (!nft_hook_find_ops(hook, dev))
continue;
found = hook;
break;
}
if (!found)
continue;
return chain;
}
}
return NULL;
}
static int nft_offload_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct nftables_pernet *nft_net;
struct net *net = dev_net(dev);
struct nft_chain *chain;
if (event != NETDEV_UNREGISTER)
return NOTIFY_DONE;
nft_net = nft_pernet(net);
mutex_lock(&nft_net->commit_mutex);
chain = __nft_offload_get_chain(nft_net, dev);
if (chain)
nft_flow_block_chain(nft_base_chain(chain), dev,
FLOW_BLOCK_UNBIND);
mutex_unlock(&nft_net->commit_mutex);
return NOTIFY_DONE;
}
static struct notifier_block nft_offload_netdev_notifier = {
.notifier_call = nft_offload_netdev_event,
};
int nft_offload_init(void)
{
return register_netdevice_notifier(&nft_offload_netdev_notifier);
}
void nft_offload_exit(void)
{
unregister_netdevice_notifier(&nft_offload_netdev_notifier);
}
] |
| 10/2 | 2026/01/23 15:20 | llm | debugger |
Model:gemini-3-pro-preview |
| Total Calls | Total Tokens | Avg Tokens | Total Duration (Seconds) | Avg Duration (Seconds) |
|---|
| Total Calls | Total Duration (Seconds) | Avg Duration (Seconds) |
|---|