@@ -52,6 +52,12 @@ struct vxlanhdr {
52
52
u32 vni ;
53
53
};
54
54
55
+ // Only include the first byte to save stack space.
56
+ struct genevehdr {
57
+ u8 flags :2 ;
58
+ u8 opt_len :6 ;
59
+ };
60
+
55
61
struct skb_meta {
56
62
u32 netns ;
57
63
u32 mark ;
@@ -283,6 +289,36 @@ filter_pcap_ebpf_tunnel_l3(void *_skb, void *__skb, void *___skb, void *data, vo
283
289
return data != data_end && _skb == __skb && __skb == ___skb ;
284
290
}
285
291
292
+ #define VXLAN_PORT1_NET 6177 // 8472 in net byte order.
293
+ #define VXLAN_PORT2_NET 46354 // 4789 in net byte order.
294
+ #define GENEVE_PORT_NET 49431 // 6081 in net byte order.
295
+
296
+ static __always_inline s16
297
+ __tunnel_hdr_len (void * skb_head , u16 sport , u16 dport , u16 l4_data_off ) {
298
+ u16 tunnel_l2_off ;
299
+ if (dport == GENEVE_PORT_NET || sport == GENEVE_PORT_NET ) {
300
+ struct genevehdr gh ;
301
+ if (bpf_probe_read_kernel (& gh , sizeof (struct genevehdr ), skb_head + l4_data_off ) != 0 ) {
302
+ return -1 ;
303
+ }
304
+ tunnel_l2_off = gh .opt_len * 4 + 8 ;
305
+ } else if ((dport == VXLAN_PORT1_NET || sport == VXLAN_PORT1_NET )) {
306
+ struct vxlanhdr vxh = {};
307
+ if (bpf_probe_read_kernel (& vxh , sizeof (struct vxlanhdr ), skb_head + l4_data_off ) != 0 ) {
308
+ return -1 ;
309
+ }
310
+ tunnel_l2_off = sizeof (struct vxlanhdr );
311
+ if (vxh .flags != 0x8 ) {
312
+ return -1 ;
313
+ }
314
+ } else {
315
+ return -1 ;
316
+ }
317
+ return tunnel_l2_off ;
318
+ }
319
+
320
+
321
+
286
322
static __always_inline bool
287
323
filter_pcap_tunnel_l2 (struct sk_buff * skb )
288
324
{
@@ -296,17 +332,19 @@ filter_pcap_tunnel_l2(struct sk_buff *skb)
296
332
if (BPF_CORE_READ (ip4 , protocol ) != IPPROTO_UDP ) {
297
333
return true;
298
334
}
335
+
336
+ struct udphdr * udp = (struct udphdr * ) (data + l4_off );
299
337
300
- struct vxlanhdr vxh = {};
301
- if (bpf_probe_read_kernel (& vxh , sizeof (struct vxlanhdr ), data + l4_off + 8 ) != 0 ) {
302
- return true;
303
- }
304
-
305
- if (vxh .flags != 8 ) {
338
+ s16 tunnel_hdr_len = __tunnel_hdr_len (
339
+ skb_head ,
340
+ BPF_CORE_READ (udp , source ),
341
+ BPF_CORE_READ (udp , dest ),
342
+ l4_off + sizeof (struct udphdr ));
343
+
344
+ if (tunnel_hdr_len == -1 )
306
345
return true;
307
- }
308
346
309
- data = (void * ) (data + l4_off + 8 + 8 );
347
+ data = (void * ) (data + l4_off + sizeof ( struct udphdr ) + tunnel_hdr_len );
310
348
struct ethhdr * eth = (struct ethhdr * ) data ;
311
349
if (BPF_CORE_READ (eth , h_proto ) != bpf_htons (ETH_P_IP ))
312
350
return false;
@@ -427,17 +465,32 @@ set_tuple(struct sk_buff *skb, struct tuple *tpl, struct tuple *tunnel_tpl, stru
427
465
}
428
466
429
467
static __always_inline void
430
- set_tunnel (struct sk_buff * skb , struct tuple * tunnel_tpl , struct l2tuple * l2_tuple , u16 l4_data_off ) {
468
+ set_tunnel (struct sk_buff * skb , struct tuple * tpl , struct tuple * tunnel_tpl , struct l2tuple * l2_tuple , u16 l4_data_off ) {
431
469
void * skb_head = BPF_CORE_READ (skb , head );
432
- struct vxlanhdr vxh = {};
433
- if (bpf_probe_read_kernel (& vxh , sizeof (struct vxlanhdr ), skb_head + l4_data_off ) != 0 ) {
434
- return ;
435
- }
436
470
437
- if (vxh .flags != 0x8 )
471
+ s16 tunnel_hdr_len = __tunnel_hdr_len (skb_head , tpl -> sport , tpl -> dport , l4_data_off );
472
+ /*if (tpl->dport == GENEVE_PORT_NET || tpl->sport == GENEVE_PORT_NET) {
473
+ struct genevehdr gh;
474
+ if (bpf_probe_read_kernel(&gh, sizeof(struct genevehdr), skb_head + l4_data_off) != 0) {
475
+ return;
476
+ }
477
+ tunnel_l2_off = gh.opt_len * 4 + 8;
478
+ } else if ((tpl->dport == VXLAN_PORT1_NET || tpl->sport == VXLAN_PORT1_NET)) {
479
+ struct vxlanhdr vxh = {};
480
+ if (bpf_probe_read_kernel(&vxh, sizeof(struct vxlanhdr), skb_head + l4_data_off) != 0) {
481
+ return;
482
+ }
483
+ tunnel_l2_off = sizeof(struct vxlanhdr);
484
+ if (vxh.flags != 0x8) {
485
+ return;
486
+ }
487
+ } else {
488
+ return;
489
+ }*/
490
+ if (tunnel_hdr_len == -1 )
438
491
return ;
439
492
440
- struct ethhdr * inner = (struct ethhdr * ) (skb_head + l4_data_off + sizeof ( struct vxlanhdr ) );
493
+ struct ethhdr * inner = (struct ethhdr * ) (skb_head + l4_data_off + tunnel_hdr_len );
441
494
BPF_CORE_READ_INTO (& l2_tuple -> src , inner , h_source );
442
495
BPF_CORE_READ_INTO (& l2_tuple -> dest , inner , h_dest );
443
496
@@ -550,7 +603,7 @@ set_output(void *ctx, struct sk_buff *skb, struct event_t *event) {
550
603
if (cfg -> output_tuple ) {
551
604
s16 l4_off = set_tuple (skb , & event -> tuple , & event -> tunnel_tuple , & event -> l2_tuple , cfg -> output_tunnel );
552
605
if (cfg -> output_tunnel && l4_off != -1 && event -> tuple .l4_proto == IPPROTO_UDP ) {
553
- set_tunnel (skb , & event -> tunnel_tuple , & event -> l2_tuple , l4_off );
606
+ set_tunnel (skb , & event -> tuple , & event -> tunnel_tuple , & event -> l2_tuple , l4_off );
554
607
}
555
608
}
556
609
0 commit comments