@@ -264,7 +264,7 @@ int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
264
264
if (unlikely (mlx5e_page_alloc_mapped (rq , di )))
265
265
return - ENOMEM ;
266
266
267
- wqe -> data .addr = cpu_to_be64 (di -> addr + MLX5_RX_HEADROOM );
267
+ wqe -> data .addr = cpu_to_be64 (di -> addr + rq -> rx_headroom );
268
268
return 0 ;
269
269
}
270
270
@@ -646,8 +646,7 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq)
646
646
647
647
static inline void mlx5e_xmit_xdp_frame (struct mlx5e_rq * rq ,
648
648
struct mlx5e_dma_info * di ,
649
- unsigned int data_offset ,
650
- int len )
649
+ const struct xdp_buff * xdp )
651
650
{
652
651
struct mlx5e_sq * sq = & rq -> channel -> xdp_sq ;
653
652
struct mlx5_wq_cyc * wq = & sq -> wq ;
@@ -659,9 +658,16 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
659
658
struct mlx5_wqe_eth_seg * eseg = & wqe -> eth ;
660
659
struct mlx5_wqe_data_seg * dseg ;
661
660
661
+ ptrdiff_t data_offset = xdp -> data - xdp -> data_hard_start ;
662
662
dma_addr_t dma_addr = di -> addr + data_offset + MLX5E_XDP_MIN_INLINE ;
663
- unsigned int dma_len = len - MLX5E_XDP_MIN_INLINE ;
664
- void * data = page_address (di -> page ) + data_offset ;
663
+ unsigned int dma_len = xdp -> data_end - xdp -> data ;
664
+
665
+ if (unlikely (dma_len < MLX5E_XDP_MIN_INLINE ||
666
+ MLX5E_SW2HW_MTU (rq -> netdev -> mtu ) < dma_len )) {
667
+ rq -> stats .xdp_drop ++ ;
668
+ mlx5e_page_release (rq , di , true);
669
+ return ;
670
+ }
665
671
666
672
if (unlikely (!mlx5e_sq_has_room_for (sq , MLX5E_XDP_TX_WQEBBS ))) {
667
673
if (sq -> db .xdp .doorbell ) {
@@ -674,13 +680,14 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
674
680
return ;
675
681
}
676
682
683
+ dma_len -= MLX5E_XDP_MIN_INLINE ;
677
684
dma_sync_single_for_device (sq -> pdev , dma_addr , dma_len ,
678
685
PCI_DMA_TODEVICE );
679
686
680
687
memset (wqe , 0 , sizeof (* wqe ));
681
688
682
689
/* copy the inline part */
683
- memcpy (eseg -> inline_hdr_start , data , MLX5E_XDP_MIN_INLINE );
690
+ memcpy (eseg -> inline_hdr_start , xdp -> data , MLX5E_XDP_MIN_INLINE );
684
691
eseg -> inline_hdr_sz = cpu_to_be16 (MLX5E_XDP_MIN_INLINE );
685
692
686
693
dseg = (struct mlx5_wqe_data_seg * )cseg + (MLX5E_XDP_TX_DS_COUNT - 1 );
@@ -703,25 +710,29 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
703
710
}
704
711
705
712
/* returns true if packet was consumed by xdp */
706
- static inline bool mlx5e_xdp_handle (struct mlx5e_rq * rq ,
707
- const struct bpf_prog * prog ,
708
- struct mlx5e_dma_info * di ,
709
- void * data , u16 len )
713
+ static inline int mlx5e_xdp_handle (struct mlx5e_rq * rq ,
714
+ struct mlx5e_dma_info * di ,
715
+ void * va , u16 * rx_headroom , u32 * len )
710
716
{
717
+ const struct bpf_prog * prog = READ_ONCE (rq -> xdp_prog );
711
718
struct xdp_buff xdp ;
712
719
u32 act ;
713
720
714
721
if (!prog )
715
722
return false;
716
723
717
- xdp .data = data ;
718
- xdp .data_end = xdp .data + len ;
724
+ xdp .data = va + * rx_headroom ;
725
+ xdp .data_end = xdp .data + * len ;
726
+ xdp .data_hard_start = va ;
727
+
719
728
act = bpf_prog_run_xdp (prog , & xdp );
720
729
switch (act ) {
721
730
case XDP_PASS :
731
+ * rx_headroom = xdp .data - xdp .data_hard_start ;
732
+ * len = xdp .data_end - xdp .data ;
722
733
return false;
723
734
case XDP_TX :
724
- mlx5e_xmit_xdp_frame (rq , di , MLX5_RX_HEADROOM , len );
735
+ mlx5e_xmit_xdp_frame (rq , di , & xdp );
725
736
return true;
726
737
default :
727
738
bpf_warn_invalid_xdp_action (act );
@@ -740,15 +751,16 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
740
751
struct mlx5e_dma_info * di ;
741
752
struct sk_buff * skb ;
742
753
void * va , * data ;
754
+ u16 rx_headroom = rq -> rx_headroom ;
743
755
bool consumed ;
744
756
745
757
di = & rq -> dma_info [wqe_counter ];
746
758
va = page_address (di -> page );
747
- data = va + MLX5_RX_HEADROOM ;
759
+ data = va + rx_headroom ;
748
760
749
761
dma_sync_single_range_for_cpu (rq -> pdev ,
750
762
di -> addr ,
751
- MLX5_RX_HEADROOM ,
763
+ rx_headroom ,
752
764
rq -> buff .wqe_sz ,
753
765
DMA_FROM_DEVICE );
754
766
prefetch (data );
@@ -760,8 +772,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
760
772
}
761
773
762
774
rcu_read_lock ();
763
- consumed = mlx5e_xdp_handle (rq , READ_ONCE (rq -> xdp_prog ), di , data ,
764
- cqe_bcnt );
775
+ consumed = mlx5e_xdp_handle (rq , di , va , & rx_headroom , & cqe_bcnt );
765
776
rcu_read_unlock ();
766
777
if (consumed )
767
778
return NULL ; /* page/packet was consumed by XDP */
@@ -777,7 +788,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
777
788
page_ref_inc (di -> page );
778
789
mlx5e_page_release (rq , di , true);
779
790
780
- skb_reserve (skb , MLX5_RX_HEADROOM );
791
+ skb_reserve (skb , rx_headroom );
781
792
skb_put (skb , cqe_bcnt );
782
793
783
794
return skb ;
0 commit comments