@@ -96,6 +96,38 @@ void mptcp_parse_option(const struct sk_buff *skb, const unsigned char *ptr,
96
96
mp_opt -> rcvr_key , mp_opt -> data_len );
97
97
break ;
98
98
99
+ case MPTCPOPT_MP_JOIN :
100
+ mp_opt -> mp_join = 1 ;
101
+ if (opsize == TCPOLEN_MPTCP_MPJ_SYN ) {
102
+ mp_opt -> backup = * ptr ++ & MPTCPOPT_BACKUP ;
103
+ mp_opt -> join_id = * ptr ++ ;
104
+ mp_opt -> token = get_unaligned_be32 (ptr );
105
+ ptr += 4 ;
106
+ mp_opt -> nonce = get_unaligned_be32 (ptr );
107
+ ptr += 4 ;
108
+ pr_debug ("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u" ,
109
+ mp_opt -> backup , mp_opt -> join_id ,
110
+ mp_opt -> token , mp_opt -> nonce );
111
+ } else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK ) {
112
+ mp_opt -> backup = * ptr ++ & MPTCPOPT_BACKUP ;
113
+ mp_opt -> join_id = * ptr ++ ;
114
+ mp_opt -> thmac = get_unaligned_be64 (ptr );
115
+ ptr += 8 ;
116
+ mp_opt -> nonce = get_unaligned_be32 (ptr );
117
+ ptr += 4 ;
118
+ pr_debug ("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u" ,
119
+ mp_opt -> backup , mp_opt -> join_id ,
120
+ mp_opt -> thmac , mp_opt -> nonce );
121
+ } else if (opsize == TCPOLEN_MPTCP_MPJ_ACK ) {
122
+ ptr += 2 ;
123
+ memcpy (mp_opt -> hmac , ptr , MPTCPOPT_HMAC_LEN );
124
+ pr_debug ("MP_JOIN hmac" );
125
+ } else {
126
+ pr_warn ("MP_JOIN bad option size" );
127
+ mp_opt -> mp_join = 0 ;
128
+ }
129
+ break ;
130
+
99
131
case MPTCPOPT_DSS :
100
132
pr_debug ("DSS" );
101
133
ptr ++ ;
@@ -572,37 +604,80 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
572
604
pr_debug ("subflow_req=%p, local_key=%llu" ,
573
605
subflow_req , subflow_req -> local_key );
574
606
return true;
607
+ } else if (subflow_req -> mp_join ) {
608
+ opts -> suboptions = OPTION_MPTCP_MPJ_SYNACK ;
609
+ opts -> backup = subflow_req -> backup ;
610
+ opts -> join_id = subflow_req -> local_id ;
611
+ opts -> thmac = subflow_req -> thmac ;
612
+ opts -> nonce = subflow_req -> local_nonce ;
613
+ pr_debug ("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u" ,
614
+ subflow_req , opts -> backup , opts -> join_id ,
615
+ opts -> thmac , opts -> nonce );
616
+ * size = TCPOLEN_MPTCP_MPJ_SYNACK ;
617
+ return true;
575
618
}
576
619
return false;
577
620
}
578
621
579
- static bool check_fully_established (struct mptcp_subflow_context * subflow ,
622
+ static bool check_fully_established (struct mptcp_sock * msk , struct sock * sk ,
623
+ struct mptcp_subflow_context * subflow ,
580
624
struct sk_buff * skb ,
581
625
struct mptcp_options_received * mp_opt )
582
626
{
583
627
/* here we can process OoO, in-window pkts, only in-sequence 4th ack
584
- * are relevant
628
+ * will make the subflow fully established
585
629
*/
586
- if (likely (subflow -> fully_established ||
587
- TCP_SKB_CB (skb )-> seq != subflow -> ssn_offset + 1 ))
588
- return true;
630
+ if (likely (subflow -> fully_established )) {
631
+ /* on passive sockets, check for 3rd ack retransmission
632
+ * note that msk is always set by subflow_syn_recv_sock()
633
+ * for mp_join subflows
634
+ */
635
+ if (TCP_SKB_CB (skb )-> seq == subflow -> ssn_offset + 1 &&
636
+ TCP_SKB_CB (skb )-> end_seq == TCP_SKB_CB (skb )-> seq &&
637
+ subflow -> mp_join && mp_opt -> mp_join &&
638
+ READ_ONCE (msk -> pm .server_side ))
639
+ tcp_send_ack (sk );
640
+ goto fully_established ;
641
+ }
642
+
643
+ /* we should process OoO packets before the first subflow is fully
644
+ * established, but not expected for MP_JOIN subflows
645
+ */
646
+ if (TCP_SKB_CB (skb )-> seq != subflow -> ssn_offset + 1 )
647
+ return subflow -> mp_capable ;
589
648
590
- if (mp_opt -> use_ack )
649
+ if (mp_opt -> use_ack ) {
650
+ /* subflows are fully established as soon as we get any
651
+ * additional ack.
652
+ */
591
653
subflow -> fully_established = 1 ;
654
+ goto fully_established ;
655
+ }
592
656
593
- if (subflow -> can_ack )
594
- return true;
657
+ WARN_ON_ONCE (subflow -> can_ack );
595
658
596
659
/* If the first established packet does not contain MP_CAPABLE + data
597
660
* then fallback to TCP
598
661
*/
599
662
if (!mp_opt -> mp_capable ) {
600
663
subflow -> mp_capable = 0 ;
601
- tcp_sk (mptcp_subflow_tcp_sock ( subflow ) )-> is_mptcp = 0 ;
664
+ tcp_sk (sk )-> is_mptcp = 0 ;
602
665
return false;
603
666
}
667
+
668
+ subflow -> fully_established = 1 ;
604
669
subflow -> remote_key = mp_opt -> sndr_key ;
605
670
subflow -> can_ack = 1 ;
671
+
672
+ fully_established :
673
+ if (likely (subflow -> pm_notified ))
674
+ return true;
675
+
676
+ subflow -> pm_notified = 1 ;
677
+ if (subflow -> mp_join )
678
+ mptcp_pm_subflow_established (msk , subflow );
679
+ else
680
+ mptcp_pm_fully_established (msk );
606
681
return true;
607
682
}
608
683
@@ -641,7 +716,7 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
641
716
struct mptcp_ext * mpext ;
642
717
643
718
mp_opt = & opt_rx -> mptcp ;
644
- if (!check_fully_established (subflow , skb , mp_opt ))
719
+ if (!check_fully_established (msk , sk , subflow , skb , mp_opt ))
645
720
return ;
646
721
647
722
if (mp_opt -> add_addr && add_addr_hmac_valid (msk , mp_opt )) {
@@ -700,8 +775,6 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
700
775
}
701
776
702
777
mpext -> data_fin = mp_opt -> data_fin ;
703
-
704
- mptcp_pm_fully_established (msk );
705
778
}
706
779
707
780
void mptcp_write_options (__be32 * ptr , struct mptcp_out_options * opts )
@@ -787,6 +860,16 @@ void mptcp_write_options(__be32 *ptr, struct mptcp_out_options *opts)
787
860
0 , opts -> rm_id );
788
861
}
789
862
863
+ if (OPTION_MPTCP_MPJ_SYNACK & opts -> suboptions ) {
864
+ * ptr ++ = mptcp_option (MPTCPOPT_MP_JOIN ,
865
+ TCPOLEN_MPTCP_MPJ_SYNACK ,
866
+ opts -> backup , opts -> join_id );
867
+ put_unaligned_be64 (opts -> thmac , ptr );
868
+ ptr += 2 ;
869
+ put_unaligned_be32 (opts -> nonce , ptr );
870
+ ptr += 1 ;
871
+ }
872
+
790
873
if (opts -> ext_copy .use_ack || opts -> ext_copy .use_map ) {
791
874
struct mptcp_ext * mpext = & opts -> ext_copy ;
792
875
u8 len = TCPOLEN_MPTCP_DSS_BASE ;
0 commit comments