@@ -48,6 +48,7 @@ struct mlx5e_sq_param {
48
48
u32 sqc [MLX5_ST_SZ_DW (sqc )];
49
49
struct mlx5_wq_param wq ;
50
50
u16 max_inline ;
51
+ bool icosq ;
51
52
};
52
53
53
54
struct mlx5e_cq_param {
@@ -59,8 +60,10 @@ struct mlx5e_cq_param {
59
60
struct mlx5e_channel_param {
60
61
struct mlx5e_rq_param rq ;
61
62
struct mlx5e_sq_param sq ;
63
+ struct mlx5e_sq_param icosq ;
62
64
struct mlx5e_cq_param rx_cq ;
63
65
struct mlx5e_cq_param tx_cq ;
66
+ struct mlx5e_cq_param icosq_cq ;
64
67
};
65
68
66
69
static void mlx5e_update_carrier (struct mlx5e_priv * priv )
@@ -502,6 +505,8 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
502
505
struct mlx5e_rq_param * param ,
503
506
struct mlx5e_rq * rq )
504
507
{
508
+ struct mlx5e_sq * sq = & c -> icosq ;
509
+ u16 pi = sq -> pc & sq -> wq .sz_m1 ;
505
510
int err ;
506
511
507
512
err = mlx5e_create_rq (c , param , rq );
@@ -517,7 +522,10 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
517
522
goto err_disable_rq ;
518
523
519
524
set_bit (MLX5E_RQ_STATE_POST_WQES_ENABLE , & rq -> state );
520
- mlx5e_send_nop (& c -> sq [0 ], true); /* trigger mlx5e_post_rx_wqes() */
525
+
526
+ sq -> ico_wqe_info [pi ].opcode = MLX5_OPCODE_NOP ;
527
+ sq -> ico_wqe_info [pi ].num_wqebbs = 1 ;
528
+ mlx5e_send_nop (sq , true); /* trigger mlx5e_post_rx_wqes() */
521
529
522
530
return 0 ;
523
531
@@ -583,7 +591,6 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
583
591
584
592
void * sqc = param -> sqc ;
585
593
void * sqc_wq = MLX5_ADDR_OF (sqc , sqc , wq );
586
- int txq_ix ;
587
594
int err ;
588
595
589
596
err = mlx5_alloc_map_uar (mdev , & sq -> uar , true);
@@ -611,8 +618,24 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
611
618
if (err )
612
619
goto err_sq_wq_destroy ;
613
620
614
- txq_ix = c -> ix + tc * priv -> params .num_channels ;
615
- sq -> txq = netdev_get_tx_queue (priv -> netdev , txq_ix );
621
+ if (param -> icosq ) {
622
+ u8 wq_sz = mlx5_wq_cyc_get_size (& sq -> wq );
623
+
624
+ sq -> ico_wqe_info = kzalloc_node (sizeof (* sq -> ico_wqe_info ) *
625
+ wq_sz ,
626
+ GFP_KERNEL ,
627
+ cpu_to_node (c -> cpu ));
628
+ if (!sq -> ico_wqe_info ) {
629
+ err = - ENOMEM ;
630
+ goto err_free_sq_db ;
631
+ }
632
+ } else {
633
+ int txq_ix ;
634
+
635
+ txq_ix = c -> ix + tc * priv -> params .num_channels ;
636
+ sq -> txq = netdev_get_tx_queue (priv -> netdev , txq_ix );
637
+ priv -> txq_to_sq_map [txq_ix ] = sq ;
638
+ }
616
639
617
640
sq -> pdev = c -> pdev ;
618
641
sq -> tstamp = & priv -> tstamp ;
@@ -621,10 +644,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
621
644
sq -> tc = tc ;
622
645
sq -> edge = (sq -> wq .sz_m1 + 1 ) - MLX5_SEND_WQE_MAX_WQEBBS ;
623
646
sq -> bf_budget = MLX5E_SQ_BF_BUDGET ;
624
- priv -> txq_to_sq_map [txq_ix ] = sq ;
625
647
626
648
return 0 ;
627
649
650
+ err_free_sq_db :
651
+ mlx5e_free_sq_db (sq );
652
+
628
653
err_sq_wq_destroy :
629
654
mlx5_wq_destroy (& sq -> wq_ctrl );
630
655
@@ -639,6 +664,7 @@ static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
639
664
struct mlx5e_channel * c = sq -> channel ;
640
665
struct mlx5e_priv * priv = c -> priv ;
641
666
667
+ kfree (sq -> ico_wqe_info );
642
668
mlx5e_free_sq_db (sq );
643
669
mlx5_wq_destroy (& sq -> wq_ctrl );
644
670
mlx5_unmap_free_uar (priv -> mdev , & sq -> uar );
@@ -667,10 +693,10 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
667
693
668
694
memcpy (sqc , param -> sqc , sizeof (param -> sqc ));
669
695
670
- MLX5_SET (sqc , sqc , tis_num_0 , priv -> tisn [sq -> tc ]);
671
- MLX5_SET (sqc , sqc , cqn , c -> sq [ sq -> tc ]. cq .mcq .cqn );
696
+ MLX5_SET (sqc , sqc , tis_num_0 , param -> icosq ? 0 : priv -> tisn [sq -> tc ]);
697
+ MLX5_SET (sqc , sqc , cqn , sq -> cq .mcq .cqn );
672
698
MLX5_SET (sqc , sqc , state , MLX5_SQC_STATE_RST );
673
- MLX5_SET (sqc , sqc , tis_lst_sz , 1 );
699
+ MLX5_SET (sqc , sqc , tis_lst_sz , param -> icosq ? 0 : 1 );
674
700
MLX5_SET (sqc , sqc , flush_in_error_en , 1 );
675
701
676
702
MLX5_SET (wq , wq , wq_type , MLX5_WQ_TYPE_CYCLIC );
@@ -745,9 +771,11 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
745
771
if (err )
746
772
goto err_disable_sq ;
747
773
748
- set_bit (MLX5E_SQ_STATE_WAKE_TXQ_ENABLE , & sq -> state );
749
- netdev_tx_reset_queue (sq -> txq );
750
- netif_tx_start_queue (sq -> txq );
774
+ if (sq -> txq ) {
775
+ set_bit (MLX5E_SQ_STATE_WAKE_TXQ_ENABLE , & sq -> state );
776
+ netdev_tx_reset_queue (sq -> txq );
777
+ netif_tx_start_queue (sq -> txq );
778
+ }
751
779
752
780
return 0 ;
753
781
@@ -768,15 +796,19 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
768
796
769
797
static void mlx5e_close_sq (struct mlx5e_sq * sq )
770
798
{
771
- clear_bit (MLX5E_SQ_STATE_WAKE_TXQ_ENABLE , & sq -> state );
772
- napi_synchronize (& sq -> channel -> napi ); /* prevent netif_tx_wake_queue */
773
- netif_tx_disable_queue (sq -> txq );
799
+ if (sq -> txq ) {
800
+ clear_bit (MLX5E_SQ_STATE_WAKE_TXQ_ENABLE , & sq -> state );
801
+ /* prevent netif_tx_wake_queue */
802
+ napi_synchronize (& sq -> channel -> napi );
803
+ netif_tx_disable_queue (sq -> txq );
774
804
775
- /* ensure hw is notified of all pending wqes */
776
- if (mlx5e_sq_has_room_for (sq , 1 ))
777
- mlx5e_send_nop (sq , true);
805
+ /* ensure hw is notified of all pending wqes */
806
+ if (mlx5e_sq_has_room_for (sq , 1 ))
807
+ mlx5e_send_nop (sq , true);
808
+
809
+ mlx5e_modify_sq (sq , MLX5_SQC_STATE_RDY , MLX5_SQC_STATE_ERR );
810
+ }
778
811
779
- mlx5e_modify_sq (sq , MLX5_SQC_STATE_RDY , MLX5_SQC_STATE_ERR );
780
812
while (sq -> cc != sq -> pc ) /* wait till sq is empty */
781
813
msleep (20 );
782
814
@@ -1030,10 +1062,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1030
1062
1031
1063
netif_napi_add (netdev , & c -> napi , mlx5e_napi_poll , 64 );
1032
1064
1033
- err = mlx5e_open_tx_cqs (c , cparam );
1065
+ err = mlx5e_open_cq (c , & cparam -> icosq_cq , & c -> icosq . cq , 0 , 0 );
1034
1066
if (err )
1035
1067
goto err_napi_del ;
1036
1068
1069
+ err = mlx5e_open_tx_cqs (c , cparam );
1070
+ if (err )
1071
+ goto err_close_icosq_cq ;
1072
+
1037
1073
err = mlx5e_open_cq (c , & cparam -> rx_cq , & c -> rq .cq ,
1038
1074
priv -> params .rx_cq_moderation_usec ,
1039
1075
priv -> params .rx_cq_moderation_pkts );
@@ -1042,10 +1078,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1042
1078
1043
1079
napi_enable (& c -> napi );
1044
1080
1045
- err = mlx5e_open_sqs (c , cparam );
1081
+ err = mlx5e_open_sq (c , 0 , & cparam -> icosq , & c -> icosq );
1046
1082
if (err )
1047
1083
goto err_disable_napi ;
1048
1084
1085
+ err = mlx5e_open_sqs (c , cparam );
1086
+ if (err )
1087
+ goto err_close_icosq ;
1088
+
1049
1089
err = mlx5e_open_rq (c , & cparam -> rq , & c -> rq );
1050
1090
if (err )
1051
1091
goto err_close_sqs ;
@@ -1058,13 +1098,19 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1058
1098
err_close_sqs :
1059
1099
mlx5e_close_sqs (c );
1060
1100
1101
+ err_close_icosq :
1102
+ mlx5e_close_sq (& c -> icosq );
1103
+
1061
1104
err_disable_napi :
1062
1105
napi_disable (& c -> napi );
1063
1106
mlx5e_close_cq (& c -> rq .cq );
1064
1107
1065
1108
err_close_tx_cqs :
1066
1109
mlx5e_close_tx_cqs (c );
1067
1110
1111
+ err_close_icosq_cq :
1112
+ mlx5e_close_cq (& c -> icosq .cq );
1113
+
1068
1114
err_napi_del :
1069
1115
netif_napi_del (& c -> napi );
1070
1116
napi_hash_del (& c -> napi );
@@ -1077,9 +1123,11 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
1077
1123
{
1078
1124
mlx5e_close_rq (& c -> rq );
1079
1125
mlx5e_close_sqs (c );
1126
+ mlx5e_close_sq (& c -> icosq );
1080
1127
napi_disable (& c -> napi );
1081
1128
mlx5e_close_cq (& c -> rq .cq );
1082
1129
mlx5e_close_tx_cqs (c );
1130
+ mlx5e_close_cq (& c -> icosq .cq );
1083
1131
netif_napi_del (& c -> napi );
1084
1132
1085
1133
napi_hash_del (& c -> napi );
@@ -1125,17 +1173,27 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
1125
1173
MLX5_SET (wq , wq , log_wq_stride , ilog2 (sizeof (struct mlx5e_rx_wqe )));
1126
1174
}
1127
1175
1128
- static void mlx5e_build_sq_param (struct mlx5e_priv * priv ,
1129
- struct mlx5e_sq_param * param )
1176
+ static void mlx5e_build_sq_param_common (struct mlx5e_priv * priv ,
1177
+ struct mlx5e_sq_param * param )
1130
1178
{
1131
1179
void * sqc = param -> sqc ;
1132
1180
void * wq = MLX5_ADDR_OF (sqc , sqc , wq );
1133
1181
1134
- MLX5_SET (wq , wq , log_wq_sz , priv -> params .log_sq_size );
1135
1182
MLX5_SET (wq , wq , log_wq_stride , ilog2 (MLX5_SEND_WQE_BB ));
1136
1183
MLX5_SET (wq , wq , pd , priv -> pdn );
1137
1184
1138
1185
param -> wq .buf_numa_node = dev_to_node (& priv -> mdev -> pdev -> dev );
1186
+ }
1187
+
1188
+ static void mlx5e_build_sq_param (struct mlx5e_priv * priv ,
1189
+ struct mlx5e_sq_param * param )
1190
+ {
1191
+ void * sqc = param -> sqc ;
1192
+ void * wq = MLX5_ADDR_OF (sqc , sqc , wq );
1193
+
1194
+ mlx5e_build_sq_param_common (priv , param );
1195
+ MLX5_SET (wq , wq , log_wq_sz , priv -> params .log_sq_size );
1196
+
1139
1197
param -> max_inline = priv -> params .tx_max_inline ;
1140
1198
}
1141
1199
@@ -1172,20 +1230,49 @@ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1172
1230
{
1173
1231
void * cqc = param -> cqc ;
1174
1232
1175
- MLX5_SET (cqc , cqc , log_cq_size , priv -> params .log_sq_size );
1233
+ MLX5_SET (cqc , cqc , log_cq_size , priv -> params .log_sq_size );
1176
1234
1177
1235
mlx5e_build_common_cq_param (priv , param );
1178
1236
}
1179
1237
1238
+ static void mlx5e_build_ico_cq_param (struct mlx5e_priv * priv ,
1239
+ struct mlx5e_cq_param * param ,
1240
+ u8 log_wq_size )
1241
+ {
1242
+ void * cqc = param -> cqc ;
1243
+
1244
+ MLX5_SET (cqc , cqc , log_cq_size , log_wq_size );
1245
+
1246
+ mlx5e_build_common_cq_param (priv , param );
1247
+ }
1248
+
1249
+ static void mlx5e_build_icosq_param (struct mlx5e_priv * priv ,
1250
+ struct mlx5e_sq_param * param ,
1251
+ u8 log_wq_size )
1252
+ {
1253
+ void * sqc = param -> sqc ;
1254
+ void * wq = MLX5_ADDR_OF (sqc , sqc , wq );
1255
+
1256
+ mlx5e_build_sq_param_common (priv , param );
1257
+
1258
+ MLX5_SET (wq , wq , log_wq_sz , log_wq_size );
1259
+
1260
+ param -> icosq = true;
1261
+ }
1262
+
1180
1263
static void mlx5e_build_channel_param (struct mlx5e_priv * priv ,
1181
1264
struct mlx5e_channel_param * cparam )
1182
1265
{
1266
+ u8 icosq_log_wq_sz = 0 ;
1267
+
1183
1268
memset (cparam , 0 , sizeof (* cparam ));
1184
1269
1185
1270
mlx5e_build_rq_param (priv , & cparam -> rq );
1186
1271
mlx5e_build_sq_param (priv , & cparam -> sq );
1272
+ mlx5e_build_icosq_param (priv , & cparam -> icosq , icosq_log_wq_sz );
1187
1273
mlx5e_build_rx_cq_param (priv , & cparam -> rx_cq );
1188
1274
mlx5e_build_tx_cq_param (priv , & cparam -> tx_cq );
1275
+ mlx5e_build_ico_cq_param (priv , & cparam -> icosq_cq , icosq_log_wq_sz );
1189
1276
}
1190
1277
1191
1278
static int mlx5e_open_channels (struct mlx5e_priv * priv )
0 commit comments