7 #include <ipxe/timer.h>
8 #include <ipxe/iobuf.h>
9 #include <ipxe/malloc.h>
10 #include <ipxe/init.h>
11 #include <ipxe/retry.h>
12 #include <ipxe/refcnt.h>
13 #include <ipxe/pending.h>
14 #include <ipxe/xfer.h>
15 #include <ipxe/open.h>
17 #include <ipxe/netdevice.h>
18 #include <ipxe/profile.h>
19 #include <ipxe/process.h>
20 #include <ipxe/tcpip.h>
29 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
31 /** A TCP connection */
32 struct tcp_connection {
33 /** Reference counter */
35 /** List of TCP connections */
36 struct list_head list;
41 /** Data transfer interface */
42 struct interface xfer;
44 /** Remote socket address */
45 struct sockaddr_tcpip peer;
47 unsigned int local_port;
48 /** Maximum segment size */
51 /** Current TCP state */
52 unsigned int tcp_state;
53 /** Previous TCP state
55 * Maintained only for debug messages
57 unsigned int prev_tcp_state;
58 /** Current sequence number
60 * Equivalent to SND.UNA in RFC 793 terminology.
63 /** Unacknowledged sequence count
65 * Equivalent to (SND.NXT-SND.UNA) in RFC 793 terminology.
70 * Equivalent to SND.WND in RFC 793 terminology
73 /** Current acknowledgement number
75 * Equivalent to RCV.NXT in RFC 793 terminology.
80 * Equivalent to RCV.WND in RFC 793 terminology.
83 /** Received timestamp value
85 * Updated when a packet is received; copied to ts_recent when
86 * the window is advanced.
89 /** Most recent received timestamp that advanced the window
91 * Equivalent to TS.Recent in RFC 1323 terminology.
96 * Equivalent to Snd.Wind.Scale in RFC 1323 terminology
98 uint8_t snd_win_scale;
99 /** Receive window scale
101 * Equivalent to Rcv.Wind.Scale in RFC 1323 terminology
103 uint8_t rcv_win_scale;
105 /** Selective acknowledgement list (in host-endian order) */
106 struct tcp_sack_block sack[TCP_SACK_MAX];
108 /** Transmit queue */
109 struct list_head tx_queue;
111 struct list_head rx_queue;
112 /** Transmission process */
113 struct process process;
114 /** Retransmission timer */
115 struct retry_timer timer;
116 /** Shutdown (TIME_WAIT) timer */
117 struct retry_timer wait;
119 /** Pending operations for SYN and FIN */
120 struct pending_operation pending_flags;
121 /** Pending operations for transmit queue */
122 struct pending_operation pending_data;
127 /** TCP data transfer interface has been closed */
128 TCP_XFER_CLOSED = 0x0001,
129 /** TCP timestamps are enabled */
130 TCP_TS_ENABLED = 0x0002,
131 /** TCP acknowledgement is pending */
132 TCP_ACK_PENDING = 0x0004,
133 /** TCP selective acknowledgement is enabled */
134 TCP_SACK_ENABLED = 0x0008,
137 /** TCP internal header
139 * This is the header that replaces the TCP header for packets
140 * enqueued on the receive queue.
142 struct tcp_rx_queued_header {
143 /** SEQ value, in host-endian order
145 * This represents the SEQ value at the time the packet is
146 * enqueued, and so excludes the SYN, if present.
149 /** Next SEQ value, in host-endian order */
153 * Only FIN is valid within this flags byte; all other flags
154 * have already been processed by the time the packet is
163 * List of registered TCP connections
165 static LIST_HEAD ( tcp_conns );
167 /** Transmit profiler */
168 static struct profiler tcp_tx_profiler __profiler = { .name = "tcp.tx" };
170 /** Receive profiler */
171 static struct profiler tcp_rx_profiler __profiler = { .name = "tcp.rx" };
173 /** Data transfer profiler */
174 static struct profiler tcp_xfer_profiler __profiler = { .name = "tcp.xfer" };
176 /* Forward declarations */
177 static struct process_descriptor tcp_process_desc;
178 static struct interface_descriptor tcp_xfer_desc;
179 static void tcp_expired ( struct retry_timer *timer, int over );
180 static void tcp_wait_expired ( struct retry_timer *timer, int over );
181 static struct tcp_connection * tcp_demux ( unsigned int local_port );
182 static int tcp_rx_ack ( struct tcp_connection *tcp, uint32_t ack,
189 * @ret name Name of TCP state
191 static inline __attribute__ (( always_inline )) const char *
192 tcp_state ( int state ) {
194 case TCP_CLOSED: return "CLOSED";
195 case TCP_LISTEN: return "LISTEN";
196 case TCP_SYN_SENT: return "SYN_SENT";
197 case TCP_SYN_RCVD: return "SYN_RCVD";
198 case TCP_ESTABLISHED: return "ESTABLISHED";
199 case TCP_FIN_WAIT_1: return "FIN_WAIT_1";
200 case TCP_FIN_WAIT_2: return "FIN_WAIT_2";
201 case TCP_CLOSING_OR_LAST_ACK: return "CLOSING/LAST_ACK";
202 case TCP_TIME_WAIT: return "TIME_WAIT";
203 case TCP_CLOSE_WAIT: return "CLOSE_WAIT";
204 default: return "INVALID";
209 * Dump TCP state transition
211 * @v tcp TCP connection
213 static inline __attribute__ (( always_inline )) void
214 tcp_dump_state ( struct tcp_connection *tcp ) {
216 if ( tcp->tcp_state != tcp->prev_tcp_state ) {
217 DBGC ( tcp, "TCP %p transitioned from %s to %s\n", tcp,
218 tcp_state ( tcp->prev_tcp_state ),
219 tcp_state ( tcp->tcp_state ) );
221 tcp->prev_tcp_state = tcp->tcp_state;
229 static inline __attribute__ (( always_inline )) void
230 tcp_dump_flags ( struct tcp_connection *tcp, unsigned int flags ) {
231 if ( flags & TCP_RST )
232 DBGC2 ( tcp, " RST" );
233 if ( flags & TCP_SYN )
234 DBGC2 ( tcp, " SYN" );
235 if ( flags & TCP_PSH )
236 DBGC2 ( tcp, " PSH" );
237 if ( flags & TCP_FIN )
238 DBGC2 ( tcp, " FIN" );
239 if ( flags & TCP_ACK )
240 DBGC2 ( tcp, " ACK" );
243 /***************************************************************************
247 ***************************************************************************
251 * Check if local TCP port is available
253 * @v port Local port number
254 * @ret port Local port number, or negative error
256 static int tcp_port_available ( int port ) {
258 return ( tcp_demux ( port ) ? -EADDRINUSE : port );
262 * Open a TCP connection
264 * @v xfer Data transfer interface
265 * @v peer Peer socket address
266 * @v local Local socket address, or NULL
267 * @ret rc Return status code
269 static int tcp_open ( struct interface *xfer, struct sockaddr *peer,
270 struct sockaddr *local ) {
271 struct sockaddr_tcpip *st_peer = ( struct sockaddr_tcpip * ) peer;
272 struct sockaddr_tcpip *st_local = ( struct sockaddr_tcpip * ) local;
273 struct tcp_connection *tcp;
278 /* Allocate and initialise structure */
279 tcp = zalloc ( sizeof ( *tcp ) );
282 DBGC ( tcp, "TCP %p allocated\n", tcp );
283 ref_init ( &tcp->refcnt, NULL );
284 intf_init ( &tcp->xfer, &tcp_xfer_desc, &tcp->refcnt );
285 process_init_stopped ( &tcp->process, &tcp_process_desc, &tcp->refcnt );
286 timer_init ( &tcp->timer, tcp_expired, &tcp->refcnt );
287 timer_init ( &tcp->wait, tcp_wait_expired, &tcp->refcnt );
288 tcp->prev_tcp_state = TCP_CLOSED;
289 tcp->tcp_state = TCP_STATE_SENT ( TCP_SYN );
290 tcp_dump_state ( tcp );
291 tcp->snd_seq = random();
292 INIT_LIST_HEAD ( &tcp->tx_queue );
293 INIT_LIST_HEAD ( &tcp->rx_queue );
294 memcpy ( &tcp->peer, st_peer, sizeof ( tcp->peer ) );
297 mtu = tcpip_mtu ( &tcp->peer );
299 DBGC ( tcp, "TCP %p has no route to %s\n",
300 tcp, sock_ntoa ( peer ) );
304 tcp->mss = ( mtu - sizeof ( struct tcp_header ) );
306 /* Bind to local port */
307 port = tcpip_bind ( st_local, tcp_port_available );
310 DBGC ( tcp, "TCP %p could not bind: %s\n",
311 tcp, strerror ( rc ) );
314 tcp->local_port = port;
315 DBGC ( tcp, "TCP %p bound to port %d\n", tcp, tcp->local_port );
317 /* Start timer to initiate SYN */
318 start_timer_nodelay ( &tcp->timer );
320 /* Add a pending operation for the SYN */
321 pending_get ( &tcp->pending_flags );
323 /* Attach parent interface, transfer reference to connection
326 intf_plug_plug ( &tcp->xfer, xfer );
327 list_add ( &tcp->list, &tcp_conns );
331 ref_put ( &tcp->refcnt );
336 * Close TCP connection
338 * @v tcp TCP connection
339 * @v rc Reason for close
341 * Closes the data transfer interface. If the TCP state machine is in
342 * a suitable state, the connection will be deleted.
344 static void tcp_close ( struct tcp_connection *tcp, int rc ) {
345 struct io_buffer *iobuf;
346 struct io_buffer *tmp;
348 /* Close data transfer interface */
349 intf_shutdown ( &tcp->xfer, rc );
350 tcp->flags |= TCP_XFER_CLOSED;
352 /* If we are in CLOSED, or have otherwise not yet received a
353 * SYN (i.e. we are in LISTEN or SYN_SENT), just delete the
356 if ( ! ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) ) {
358 /* Transition to CLOSED for the sake of debugging messages */
359 tcp->tcp_state = TCP_CLOSED;
360 tcp_dump_state ( tcp );
362 /* Free any unprocessed I/O buffers */
363 list_for_each_entry_safe ( iobuf, tmp, &tcp->rx_queue, list ) {
364 list_del ( &iobuf->list );
368 /* Free any unsent I/O buffers */
369 list_for_each_entry_safe ( iobuf, tmp, &tcp->tx_queue, list ) {
370 list_del ( &iobuf->list );
372 pending_put ( &tcp->pending_data );
374 assert ( ! is_pending ( &tcp->pending_data ) );
376 /* Remove pending operations for SYN and FIN, if applicable */
377 pending_put ( &tcp->pending_flags );
378 pending_put ( &tcp->pending_flags );
380 /* Remove from list and drop reference */
381 process_del ( &tcp->process );
382 stop_timer ( &tcp->timer );
383 stop_timer ( &tcp->wait );
384 list_del ( &tcp->list );
385 ref_put ( &tcp->refcnt );
386 DBGC ( tcp, "TCP %p connection deleted\n", tcp );
390 /* If we have not had our SYN acknowledged (i.e. we are in
391 * SYN_RCVD), pretend that it has been acknowledged so that we
392 * can send a FIN without breaking things.
394 if ( ! ( tcp->tcp_state & TCP_STATE_ACKED ( TCP_SYN ) ) )
395 tcp_rx_ack ( tcp, ( tcp->snd_seq + 1 ), 0 );
397 /* If we have no data remaining to send, start sending FIN */
398 if ( list_empty ( &tcp->tx_queue ) &&
399 ! ( tcp->tcp_state & TCP_STATE_SENT ( TCP_FIN ) ) ) {
401 tcp->tcp_state |= TCP_STATE_SENT ( TCP_FIN );
402 tcp_dump_state ( tcp );
403 process_add ( &tcp->process );
405 /* Add a pending operation for the FIN */
406 pending_get ( &tcp->pending_flags );
410 /***************************************************************************
414 ***************************************************************************
418 * Calculate transmission window
420 * @v tcp TCP connection
421 * @ret len Maximum length that can be sent in a single packet
423 static size_t tcp_xmit_win ( struct tcp_connection *tcp ) {
426 /* Not ready if we're not in a suitable connection state */
427 if ( ! TCP_CAN_SEND_DATA ( tcp->tcp_state ) )
430 /* Length is the minimum of the receiver's window and the path MTU */
432 if ( len > TCP_PATH_MTU )
439 * Check data-transfer flow control window
441 * @v tcp TCP connection
442 * @ret len Length of window
444 static size_t tcp_xfer_window ( struct tcp_connection *tcp ) {
446 /* Not ready if data queue is non-empty. This imposes a limit
447 * of only one unACKed packet in the TX queue at any time; we
448 * do this to conserve memory usage.
450 if ( ! list_empty ( &tcp->tx_queue ) )
453 /* Return TCP window length */
454 return tcp_xmit_win ( tcp );
458 * Find selective acknowledgement block
460 * @v tcp TCP connection
461 * @v seq SEQ value in SACK block (in host-endian order)
462 * @v sack SACK block to fill in (in host-endian order)
463 * @ret len Length of SACK block
465 static uint32_t tcp_sack_block ( struct tcp_connection *tcp, uint32_t seq,
466 struct tcp_sack_block *sack ) {
467 struct io_buffer *iobuf;
468 struct tcp_rx_queued_header *tcpqhdr;
469 uint32_t left = tcp->rcv_ack;
470 uint32_t right = left;
472 /* Find highest block which does not start after SEQ */
473 list_for_each_entry ( iobuf, &tcp->rx_queue, list ) {
474 tcpqhdr = iobuf->data;
475 if ( tcp_cmp ( tcpqhdr->seq, right ) > 0 ) {
476 if ( tcp_cmp ( tcpqhdr->seq, seq ) > 0 )
480 if ( tcp_cmp ( tcpqhdr->nxt, right ) > 0 )
481 right = tcpqhdr->nxt;
484 /* Fail if this block does not contain SEQ */
485 if ( tcp_cmp ( right, seq ) < 0 )
488 /* Populate SACK block */
491 return ( right - left );
495 * Update TCP selective acknowledgement list
497 * @v tcp TCP connection
498 * @v seq SEQ value in first SACK block (in host-endian order)
499 * @ret count Number of SACK blocks
501 static unsigned int tcp_sack ( struct tcp_connection *tcp, uint32_t seq ) {
502 struct tcp_sack_block sack[TCP_SACK_MAX];
503 unsigned int old = 0;
504 unsigned int new = 0;
508 /* Populate first new SACK block */
509 len = tcp_sack_block ( tcp, seq, &sack[0] );
513 /* Populate remaining new SACK blocks based on old SACK blocks */
514 for ( old = 0 ; old < TCP_SACK_MAX ; old++ ) {
516 /* Stop if we run out of space in the new list */
517 if ( new == TCP_SACK_MAX )
520 /* Skip empty old SACK blocks */
521 if ( tcp->sack[old].left == tcp->sack[old].right )
524 /* Populate new SACK block */
525 len = tcp_sack_block ( tcp, tcp->sack[old].left, &sack[new] );
529 /* Eliminate duplicates */
530 for ( i = 0 ; i < new ; i++ ) {
531 if ( sack[i].left == sack[new].left ) {
539 /* Update SACK list */
540 memset ( tcp->sack, 0, sizeof ( tcp->sack ) );
541 memcpy ( tcp->sack, sack, ( new * sizeof ( tcp->sack[0] ) ) );
546 * Process TCP transmit queue
548 * @v tcp TCP connection
549 * @v max_len Maximum length to process
550 * @v dest I/O buffer to fill with data, or NULL
551 * @v remove Remove data from queue
552 * @ret len Length of data processed
554 * This processes at most @c max_len bytes from the TCP connection's
555 * transmit queue. Data will be copied into the @c dest I/O buffer
556 * (if provided) and, if @c remove is true, removed from the transmit
559 static size_t tcp_process_tx_queue ( struct tcp_connection *tcp, size_t max_len,
560 struct io_buffer *dest, int remove ) {
561 struct io_buffer *iobuf;
562 struct io_buffer *tmp;
566 list_for_each_entry_safe ( iobuf, tmp, &tcp->tx_queue, list ) {
567 frag_len = iob_len ( iobuf );
568 if ( frag_len > max_len )
571 memcpy ( iob_put ( dest, frag_len ), iobuf->data,
575 iob_pull ( iobuf, frag_len );
576 if ( ! iob_len ( iobuf ) ) {
577 list_del ( &iobuf->list );
579 pending_put ( &tcp->pending_data );
589 * Transmit any outstanding data (with selective acknowledgement)
591 * @v tcp TCP connection
592 * @v sack_seq SEQ for first selective acknowledgement (if any)
594 * Transmits any outstanding data on the connection.
596 * Note that even if an error is returned, the retransmission timer
597 * will have been started if necessary, and so the stack will
598 * eventually attempt to retransmit the failed packet.
600 static void tcp_xmit_sack ( struct tcp_connection *tcp, uint32_t sack_seq ) {
601 struct io_buffer *iobuf;
602 struct tcp_header *tcphdr;
603 struct tcp_mss_option *mssopt;
604 struct tcp_window_scale_padded_option *wsopt;
605 struct tcp_timestamp_padded_option *tsopt;
606 struct tcp_sack_permitted_padded_option *spopt;
607 struct tcp_sack_padded_option *sackopt;
608 struct tcp_sack_block *sack;
611 unsigned int sack_count;
616 uint32_t max_rcv_win;
617 uint32_t max_representable_win;
620 /* Start profiling */
621 profile_start ( &tcp_tx_profiler );
623 /* If retransmission timer is already running, do nothing */
624 if ( timer_running ( &tcp->timer ) )
627 /* Calculate both the actual (payload) and sequence space
628 * lengths that we wish to transmit.
630 if ( TCP_CAN_SEND_DATA ( tcp->tcp_state ) ) {
631 len = tcp_process_tx_queue ( tcp, tcp_xmit_win ( tcp ),
635 flags = TCP_FLAGS_SENDING ( tcp->tcp_state );
636 if ( flags & ( TCP_SYN | TCP_FIN ) ) {
637 /* SYN or FIN consume one byte, and we can never send both */
638 assert ( ! ( ( flags & TCP_SYN ) && ( flags & TCP_FIN ) ) );
641 tcp->snd_sent = seq_len;
643 /* If we have nothing to transmit, stop now */
644 if ( ( seq_len == 0 ) && ! ( tcp->flags & TCP_ACK_PENDING ) )
647 /* If we are transmitting anything that requires
648 * acknowledgement (i.e. consumes sequence space), start the
649 * retransmission timer. Do this before attempting to
650 * allocate the I/O buffer, in case allocation itself fails.
653 start_timer ( &tcp->timer );
655 /* Allocate I/O buffer */
656 iobuf = alloc_iob ( len + TCP_MAX_HEADER_LEN );
658 DBGC ( tcp, "TCP %p could not allocate iobuf for %08x..%08x "
659 "%08x\n", tcp, tcp->snd_seq, ( tcp->snd_seq + seq_len ),
663 iob_reserve ( iobuf, TCP_MAX_HEADER_LEN );
665 /* Fill data payload from transmit queue */
666 tcp_process_tx_queue ( tcp, len, iobuf, 0 );
668 /* Expand receive window if possible */
669 max_rcv_win = xfer_window ( &tcp->xfer );
670 if ( max_rcv_win > TCP_MAX_WINDOW_SIZE )
671 max_rcv_win = TCP_MAX_WINDOW_SIZE;
672 max_representable_win = ( 0xffff << tcp->rcv_win_scale );
673 if ( max_rcv_win > max_representable_win )
674 max_rcv_win = max_representable_win;
675 max_rcv_win &= ~0x03; /* Keep everything dword-aligned */
676 if ( tcp->rcv_win < max_rcv_win )
677 tcp->rcv_win = max_rcv_win;
679 /* Fill up the TCP header */
680 payload = iobuf->data;
681 if ( flags & TCP_SYN ) {
682 mssopt = iob_push ( iobuf, sizeof ( *mssopt ) );
683 mssopt->kind = TCP_OPTION_MSS;
684 mssopt->length = sizeof ( *mssopt );
685 mssopt->mss = htons ( tcp->mss );
686 wsopt = iob_push ( iobuf, sizeof ( *wsopt ) );
687 wsopt->nop = TCP_OPTION_NOP;
688 wsopt->wsopt.kind = TCP_OPTION_WS;
689 wsopt->wsopt.length = sizeof ( wsopt->wsopt );
690 wsopt->wsopt.scale = TCP_RX_WINDOW_SCALE;
691 spopt = iob_push ( iobuf, sizeof ( *spopt ) );
692 memset ( spopt->nop, TCP_OPTION_NOP, sizeof ( spopt ) );
693 spopt->spopt.kind = TCP_OPTION_SACK_PERMITTED;
694 spopt->spopt.length = sizeof ( spopt->spopt );
696 if ( ( flags & TCP_SYN ) || ( tcp->flags & TCP_TS_ENABLED ) ) {
697 tsopt = iob_push ( iobuf, sizeof ( *tsopt ) );
698 memset ( tsopt->nop, TCP_OPTION_NOP, sizeof ( tsopt->nop ) );
699 tsopt->tsopt.kind = TCP_OPTION_TS;
700 tsopt->tsopt.length = sizeof ( tsopt->tsopt );
701 tsopt->tsopt.tsval = htonl ( currticks() );
702 tsopt->tsopt.tsecr = htonl ( tcp->ts_recent );
704 if ( ( tcp->flags & TCP_SACK_ENABLED ) &&
705 ( ! list_empty ( &tcp->rx_queue ) ) &&
706 ( ( sack_count = tcp_sack ( tcp, sack_seq ) ) != 0 ) ) {
707 sack_len = ( sack_count * sizeof ( *sack ) );
708 sackopt = iob_push ( iobuf, ( sizeof ( *sackopt ) + sack_len ));
709 memset ( sackopt->nop, TCP_OPTION_NOP, sizeof ( sackopt->nop ));
710 sackopt->sackopt.kind = TCP_OPTION_SACK;
711 sackopt->sackopt.length =
712 ( sizeof ( sackopt->sackopt ) + sack_len );
713 sack = ( ( ( void * ) sackopt ) + sizeof ( *sackopt ) );
714 for ( i = 0 ; i < sack_count ; i++, sack++ ) {
715 sack->left = htonl ( tcp->sack[i].left );
716 sack->right = htonl ( tcp->sack[i].right );
721 tcphdr = iob_push ( iobuf, sizeof ( *tcphdr ) );
722 memset ( tcphdr, 0, sizeof ( *tcphdr ) );
723 tcphdr->src = htons ( tcp->local_port );
724 tcphdr->dest = tcp->peer.st_port;
725 tcphdr->seq = htonl ( tcp->snd_seq );
726 tcphdr->ack = htonl ( tcp->rcv_ack );
727 tcphdr->hlen = ( ( payload - iobuf->data ) << 2 );
728 tcphdr->flags = flags;
729 tcphdr->win = htons ( tcp->rcv_win >> tcp->rcv_win_scale );
730 tcphdr->csum = tcpip_chksum ( iobuf->data, iob_len ( iobuf ) );
733 DBGC2 ( tcp, "TCP %p TX %d->%d %08x..%08x %08x %4zd",
734 tcp, ntohs ( tcphdr->src ), ntohs ( tcphdr->dest ),
735 ntohl ( tcphdr->seq ), ( ntohl ( tcphdr->seq ) + seq_len ),
736 ntohl ( tcphdr->ack ), len );
737 tcp_dump_flags ( tcp, tcphdr->flags );
740 /* Transmit packet */
741 if ( ( rc = tcpip_tx ( iobuf, &tcp_protocol, NULL, &tcp->peer, NULL,
742 &tcphdr->csum ) ) != 0 ) {
743 DBGC ( tcp, "TCP %p could not transmit %08x..%08x %08x: %s\n",
744 tcp, tcp->snd_seq, ( tcp->snd_seq + tcp->snd_sent ),
745 tcp->rcv_ack, strerror ( rc ) );
749 /* Clear ACK-pending flag */
750 tcp->flags &= ~TCP_ACK_PENDING;
752 profile_stop ( &tcp_tx_profiler );
756 * Transmit any outstanding data
758 * @v tcp TCP connection
760 static void tcp_xmit ( struct tcp_connection *tcp ) {
762 /* Transmit without an explicit first SACK */
763 tcp_xmit_sack ( tcp, tcp->rcv_ack );
766 /** TCP process descriptor */
767 static struct process_descriptor tcp_process_desc =
768 PROC_DESC_ONCE ( struct tcp_connection, process, tcp_xmit );
771 * Retransmission timer expired
773 * @v timer Retransmission timer
774 * @v over Failure indicator
776 static void tcp_expired ( struct retry_timer *timer, int over ) {
777 struct tcp_connection *tcp =
778 container_of ( timer, struct tcp_connection, timer );
780 DBGC ( tcp, "TCP %p timer %s in %s for %08x..%08x %08x\n", tcp,
781 ( over ? "expired" : "fired" ), tcp_state ( tcp->tcp_state ),
782 tcp->snd_seq, ( tcp->snd_seq + tcp->snd_sent ), tcp->rcv_ack );
784 assert ( ( tcp->tcp_state == TCP_SYN_SENT ) ||
785 ( tcp->tcp_state == TCP_SYN_RCVD ) ||
786 ( tcp->tcp_state == TCP_ESTABLISHED ) ||
787 ( tcp->tcp_state == TCP_FIN_WAIT_1 ) ||
788 ( tcp->tcp_state == TCP_CLOSE_WAIT ) ||
789 ( tcp->tcp_state == TCP_CLOSING_OR_LAST_ACK ) );
792 /* If we have finally timed out and given up,
793 * terminate the connection
795 tcp->tcp_state = TCP_CLOSED;
796 tcp_dump_state ( tcp );
797 tcp_close ( tcp, -ETIMEDOUT );
799 /* Otherwise, retransmit the packet */
805 * Shutdown timer expired
807 * @v timer Shutdown timer
808 * @v over Failure indicator
810 static void tcp_wait_expired ( struct retry_timer *timer, int over __unused ) {
811 struct tcp_connection *tcp =
812 container_of ( timer, struct tcp_connection, wait );
814 assert ( tcp->tcp_state == TCP_TIME_WAIT );
816 DBGC ( tcp, "TCP %p wait complete in %s for %08x..%08x %08x\n", tcp,
817 tcp_state ( tcp->tcp_state ), tcp->snd_seq,
818 ( tcp->snd_seq + tcp->snd_sent ), tcp->rcv_ack );
820 tcp->tcp_state = TCP_CLOSED;
821 tcp_dump_state ( tcp );
822 tcp_close ( tcp, 0 );
826 * Send RST response to incoming packet
828 * @v in_tcphdr TCP header of incoming packet
829 * @ret rc Return status code
831 static int tcp_xmit_reset ( struct tcp_connection *tcp,
832 struct sockaddr_tcpip *st_dest,
833 struct tcp_header *in_tcphdr ) {
834 struct io_buffer *iobuf;
835 struct tcp_header *tcphdr;
838 /* Allocate space for dataless TX buffer */
839 iobuf = alloc_iob ( TCP_MAX_HEADER_LEN );
841 DBGC ( tcp, "TCP %p could not allocate iobuf for RST "
842 "%08x..%08x %08x\n", tcp, ntohl ( in_tcphdr->ack ),
843 ntohl ( in_tcphdr->ack ), ntohl ( in_tcphdr->seq ) );
846 iob_reserve ( iobuf, TCP_MAX_HEADER_LEN );
848 /* Construct RST response */
849 tcphdr = iob_push ( iobuf, sizeof ( *tcphdr ) );
850 memset ( tcphdr, 0, sizeof ( *tcphdr ) );
851 tcphdr->src = in_tcphdr->dest;
852 tcphdr->dest = in_tcphdr->src;
853 tcphdr->seq = in_tcphdr->ack;
854 tcphdr->ack = in_tcphdr->seq;
855 tcphdr->hlen = ( ( sizeof ( *tcphdr ) / 4 ) << 4 );
856 tcphdr->flags = ( TCP_RST | TCP_ACK );
857 tcphdr->win = htons ( 0 );
858 tcphdr->csum = tcpip_chksum ( iobuf->data, iob_len ( iobuf ) );
861 DBGC2 ( tcp, "TCP %p TX %d->%d %08x..%08x %08x %4d",
862 tcp, ntohs ( tcphdr->src ), ntohs ( tcphdr->dest ),
863 ntohl ( tcphdr->seq ), ( ntohl ( tcphdr->seq ) ),
864 ntohl ( tcphdr->ack ), 0 );
865 tcp_dump_flags ( tcp, tcphdr->flags );
868 /* Transmit packet */
869 if ( ( rc = tcpip_tx ( iobuf, &tcp_protocol, NULL, st_dest,
870 NULL, &tcphdr->csum ) ) != 0 ) {
871 DBGC ( tcp, "TCP %p could not transmit RST %08x..%08x %08x: "
872 "%s\n", tcp, ntohl ( in_tcphdr->ack ),
873 ntohl ( in_tcphdr->ack ), ntohl ( in_tcphdr->seq ),
881 /***************************************************************************
885 ***************************************************************************
889 * Identify TCP connection by local port number
891 * @v local_port Local port
892 * @ret tcp TCP connection, or NULL
894 static struct tcp_connection * tcp_demux ( unsigned int local_port ) {
895 struct tcp_connection *tcp;
897 list_for_each_entry ( tcp, &tcp_conns, list ) {
898 if ( tcp->local_port == local_port )
905 * Parse TCP received options
907 * @v tcp TCP connection
908 * @v data Raw options data
909 * @v len Raw options length
910 * @v options Options structure to fill in
912 static void tcp_rx_opts ( struct tcp_connection *tcp, const void *data,
913 size_t len, struct tcp_options *options ) {
914 const void *end = ( data + len );
915 const struct tcp_option *option;
918 memset ( options, 0, sizeof ( *options ) );
919 while ( data < end ) {
922 if ( kind == TCP_OPTION_END )
924 if ( kind == TCP_OPTION_NOP ) {
930 options->mssopt = data;
933 options->wsopt = data;
935 case TCP_OPTION_SACK_PERMITTED:
936 options->spopt = data;
938 case TCP_OPTION_SACK:
939 /* Ignore received SACKs */
942 options->tsopt = data;
945 DBGC ( tcp, "TCP %p received unknown option %d\n",
949 data += option->length;
954 * Consume received sequence space
956 * @v tcp TCP connection
957 * @v seq_len Sequence space length to consume
959 static void tcp_rx_seq ( struct tcp_connection *tcp, uint32_t seq_len ) {
963 assert ( seq_len > 0 );
965 /* Update acknowledgement number */
966 tcp->rcv_ack += seq_len;
969 if ( tcp->rcv_win > seq_len ) {
970 tcp->rcv_win -= seq_len;
975 /* Update timestamp */
976 tcp->ts_recent = tcp->ts_val;
978 /* Update SACK list */
979 for ( sack = 0 ; sack < TCP_SACK_MAX ; sack++ ) {
980 if ( tcp->sack[sack].left == tcp->sack[sack].right )
982 if ( tcp_cmp ( tcp->sack[sack].left, tcp->rcv_ack ) < 0 )
983 tcp->sack[sack].left = tcp->rcv_ack;
984 if ( tcp_cmp ( tcp->sack[sack].right, tcp->rcv_ack ) < 0 )
985 tcp->sack[sack].right = tcp->rcv_ack;
988 /* Mark ACK as pending */
989 tcp->flags |= TCP_ACK_PENDING;
993 * Handle TCP received SYN
995 * @v tcp TCP connection
996 * @v seq SEQ value (in host-endian order)
997 * @v options TCP options
998 * @ret rc Return status code
1000 static int tcp_rx_syn ( struct tcp_connection *tcp, uint32_t seq,
1001 struct tcp_options *options ) {
1003 /* Synchronise sequence numbers on first SYN */
1004 if ( ! ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) ) {
1006 if ( options->tsopt )
1007 tcp->flags |= TCP_TS_ENABLED;
1008 if ( options->spopt )
1009 tcp->flags |= TCP_SACK_ENABLED;
1010 if ( options->wsopt ) {
1011 tcp->snd_win_scale = options->wsopt->scale;
1012 tcp->rcv_win_scale = TCP_RX_WINDOW_SCALE;
1016 /* Ignore duplicate SYN */
1017 if ( seq != tcp->rcv_ack )
1020 /* Acknowledge SYN */
1021 tcp_rx_seq ( tcp, 1 );
1023 /* Mark SYN as received and start sending ACKs with each packet */
1024 tcp->tcp_state |= ( TCP_STATE_SENT ( TCP_ACK ) |
1025 TCP_STATE_RCVD ( TCP_SYN ) );
1031 * Handle TCP received ACK
1033 * @v tcp TCP connection
1034 * @v ack ACK value (in host-endian order)
1035 * @v win WIN value (in host-endian order)
1036 * @ret rc Return status code
1038 static int tcp_rx_ack ( struct tcp_connection *tcp, uint32_t ack,
1040 uint32_t ack_len = ( ack - tcp->snd_seq );
1042 unsigned int acked_flags;
1044 /* Check for out-of-range or old duplicate ACKs */
1045 if ( ack_len > tcp->snd_sent ) {
1046 DBGC ( tcp, "TCP %p received ACK for %08x..%08x, "
1047 "sent only %08x..%08x\n", tcp, tcp->snd_seq,
1048 ( tcp->snd_seq + ack_len ), tcp->snd_seq,
1049 ( tcp->snd_seq + tcp->snd_sent ) );
1051 if ( TCP_HAS_BEEN_ESTABLISHED ( tcp->tcp_state ) ) {
1052 /* Just ignore what might be old duplicate ACKs */
1055 /* Send RST if an out-of-range ACK is received
1056 * on a not-yet-established connection, as per
1063 /* Update window size */
1066 /* Ignore ACKs that don't actually acknowledge any new data.
1067 * (In particular, do not stop the retransmission timer; this
1068 * avoids creating a sorceror's apprentice syndrome when a
1069 * duplicate ACK is received and we still have data in our
1075 /* Stop the retransmission timer */
1076 stop_timer ( &tcp->timer );
1078 /* Determine acknowledged flags and data length */
1080 acked_flags = ( TCP_FLAGS_SENDING ( tcp->tcp_state ) &
1081 ( TCP_SYN | TCP_FIN ) );
1082 if ( acked_flags ) {
1084 pending_put ( &tcp->pending_flags );
1087 /* Update SEQ and sent counters */
1091 /* Remove any acknowledged data from transmit queue */
1092 tcp_process_tx_queue ( tcp, len, NULL, 1 );
1094 /* Mark SYN/FIN as acknowledged if applicable. */
1096 tcp->tcp_state |= TCP_STATE_ACKED ( acked_flags );
1098 /* Start sending FIN if we've had all possible data ACKed */
1099 if ( list_empty ( &tcp->tx_queue ) &&
1100 ( tcp->flags & TCP_XFER_CLOSED ) &&
1101 ! ( tcp->tcp_state & TCP_STATE_SENT ( TCP_FIN ) ) ) {
1102 tcp->tcp_state |= TCP_STATE_SENT ( TCP_FIN );
1103 pending_get ( &tcp->pending_flags );
1110 * Handle TCP received data
1112 * @v tcp TCP connection
1113 * @v seq SEQ value (in host-endian order)
1114 * @v iobuf I/O buffer
1115 * @ret rc Return status code
1117 * This function takes ownership of the I/O buffer.
1119 static int tcp_rx_data ( struct tcp_connection *tcp, uint32_t seq,
1120 struct io_buffer *iobuf ) {
1121 uint32_t already_rcvd;
1125 /* Ignore duplicate or out-of-order data */
1126 already_rcvd = ( tcp->rcv_ack - seq );
1127 len = iob_len ( iobuf );
1128 if ( already_rcvd >= len ) {
1132 iob_pull ( iobuf, already_rcvd );
1133 len -= already_rcvd;
1135 /* Acknowledge new data */
1136 tcp_rx_seq ( tcp, len );
1138 /* Deliver data to application */
1139 profile_start ( &tcp_xfer_profiler );
1140 if ( ( rc = xfer_deliver_iob ( &tcp->xfer, iobuf ) ) != 0 ) {
1141 DBGC ( tcp, "TCP %p could not deliver %08x..%08x: %s\n",
1142 tcp, seq, ( seq + len ), strerror ( rc ) );
1145 profile_stop ( &tcp_xfer_profiler );
1151 * Handle TCP received FIN
1153 * @v tcp TCP connection
1154 * @v seq SEQ value (in host-endian order)
1155 * @ret rc Return status code
1157 static int tcp_rx_fin ( struct tcp_connection *tcp, uint32_t seq ) {
1159 /* Ignore duplicate or out-of-order FIN */
1160 if ( seq != tcp->rcv_ack )
1163 /* Acknowledge FIN */
1164 tcp_rx_seq ( tcp, 1 );
1166 /* Mark FIN as received */
1167 tcp->tcp_state |= TCP_STATE_RCVD ( TCP_FIN );
1169 /* Close connection */
1170 tcp_close ( tcp, 0 );
1176 * Handle TCP received RST
1178 * @v tcp TCP connection
1179 * @v seq SEQ value (in host-endian order)
1180 * @ret rc Return status code
1182 static int tcp_rx_rst ( struct tcp_connection *tcp, uint32_t seq ) {
1184 /* Accept RST only if it falls within the window. If we have
1185 * not yet received a SYN, then we have no window to test
1186 * against, so fall back to checking that our SYN has been
1189 if ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) {
1190 if ( ! tcp_in_window ( seq, tcp->rcv_ack, tcp->rcv_win ) )
1193 if ( ! ( tcp->tcp_state & TCP_STATE_ACKED ( TCP_SYN ) ) )
1197 /* Abort connection */
1198 tcp->tcp_state = TCP_CLOSED;
1199 tcp_dump_state ( tcp );
1200 tcp_close ( tcp, -ECONNRESET );
1202 DBGC ( tcp, "TCP %p connection reset by peer\n", tcp );
1207 * Enqueue received TCP packet
1209 * @v tcp TCP connection
1210 * @v seq SEQ value (in host-endian order)
1211 * @v flags TCP flags
1212 * @v iobuf I/O buffer
1214 static void tcp_rx_enqueue ( struct tcp_connection *tcp, uint32_t seq,
1215 uint8_t flags, struct io_buffer *iobuf ) {
1216 struct tcp_rx_queued_header *tcpqhdr;
1217 struct io_buffer *queued;
1222 /* Calculate remaining flags and sequence length. Note that
1223 * SYN, if present, has already been processed by this point.
1226 len = iob_len ( iobuf );
1227 seq_len = ( len + ( flags ? 1 : 0 ) );
1228 nxt = ( seq + seq_len );
1230 /* Discard immediately (to save memory) if:
1232 * a) we have not yet received a SYN (and so have no defined
1233 * receive window), or
1234 * b) the packet lies entirely outside the receive window, or
1235 * c) there is no further content to process.
1237 if ( ( ! ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) ) ||
1238 ( tcp_cmp ( seq, tcp->rcv_ack + tcp->rcv_win ) >= 0 ) ||
1239 ( tcp_cmp ( nxt, tcp->rcv_ack ) < 0 ) ||
1240 ( seq_len == 0 ) ) {
1245 /* Add internal header */
1246 tcpqhdr = iob_push ( iobuf, sizeof ( *tcpqhdr ) );
1249 tcpqhdr->flags = flags;
1251 /* Add to RX queue */
1252 list_for_each_entry ( queued, &tcp->rx_queue, list ) {
1253 tcpqhdr = queued->data;
1254 if ( tcp_cmp ( seq, tcpqhdr->seq ) < 0 )
1257 list_add_tail ( &iobuf->list, &queued->list );
1261 * Process receive queue
1263 * @v tcp TCP connection
1265 static void tcp_process_rx_queue ( struct tcp_connection *tcp ) {
1266 struct io_buffer *iobuf;
1267 struct tcp_rx_queued_header *tcpqhdr;
1272 /* Process all applicable received buffers. Note that we
1273 * cannot use list_for_each_entry() to iterate over the RX
1274 * queue, since tcp_discard() may remove packets from the RX
1275 * queue while we are processing.
1277 while ( ( iobuf = list_first_entry ( &tcp->rx_queue, struct io_buffer,
1280 /* Stop processing when we hit the first gap */
1281 tcpqhdr = iobuf->data;
1282 if ( tcp_cmp ( tcpqhdr->seq, tcp->rcv_ack ) > 0 )
1285 /* Strip internal header and remove from RX queue */
1286 list_del ( &iobuf->list );
1288 flags = tcpqhdr->flags;
1289 iob_pull ( iobuf, sizeof ( *tcpqhdr ) );
1290 len = iob_len ( iobuf );
1292 /* Handle new data, if any */
1293 tcp_rx_data ( tcp, seq, iob_disown ( iobuf ) );
1296 /* Handle FIN, if present */
1297 if ( flags & TCP_FIN ) {
1298 tcp_rx_fin ( tcp, seq );
1305 * Process received packet
1307 * @v iobuf I/O buffer
1308 * @v netdev Network device
1309 * @v st_src Partially-filled source address
1310 * @v st_dest Partially-filled destination address
1311 * @v pshdr_csum Pseudo-header checksum
1312 * @ret rc Return status code
1314 static int tcp_rx ( struct io_buffer *iobuf,
1315 struct net_device *netdev __unused,
1316 struct sockaddr_tcpip *st_src,
1317 struct sockaddr_tcpip *st_dest __unused,
1318 uint16_t pshdr_csum ) {
1319 struct tcp_header *tcphdr = iobuf->data;
1320 struct tcp_connection *tcp;
1321 struct tcp_options options;
1331 size_t old_xfer_window;
1334 /* Start profiling */
1335 profile_start ( &tcp_rx_profiler );
1337 /* Sanity check packet */
1338 if ( iob_len ( iobuf ) < sizeof ( *tcphdr ) ) {
1339 DBG ( "TCP packet too short at %zd bytes (min %zd bytes)\n",
1340 iob_len ( iobuf ), sizeof ( *tcphdr ) );
1344 hlen = ( ( tcphdr->hlen & TCP_MASK_HLEN ) / 16 ) * 4;
1345 if ( hlen < sizeof ( *tcphdr ) ) {
1346 DBG ( "TCP header too short at %zd bytes (min %zd bytes)\n",
1347 hlen, sizeof ( *tcphdr ) );
1351 if ( hlen > iob_len ( iobuf ) ) {
1352 DBG ( "TCP header too long at %zd bytes (max %zd bytes)\n",
1353 hlen, iob_len ( iobuf ) );
1357 csum = tcpip_continue_chksum ( pshdr_csum, iobuf->data,
1358 iob_len ( iobuf ) );
1360 DBG ( "TCP checksum incorrect (is %04x including checksum "
1361 "field, should be 0000)\n", csum );
1366 /* Parse parameters from header and strip header */
1367 tcp = tcp_demux ( ntohs ( tcphdr->dest ) );
1368 seq = ntohl ( tcphdr->seq );
1369 ack = ntohl ( tcphdr->ack );
1370 raw_win = ntohs ( tcphdr->win );
1371 flags = tcphdr->flags;
1372 tcp_rx_opts ( tcp, ( ( ( void * ) tcphdr ) + sizeof ( *tcphdr ) ),
1373 ( hlen - sizeof ( *tcphdr ) ), &options );
1374 if ( tcp && options.tsopt )
1375 tcp->ts_val = ntohl ( options.tsopt->tsval );
1376 iob_pull ( iobuf, hlen );
1377 len = iob_len ( iobuf );
1378 seq_len = ( len + ( ( flags & TCP_SYN ) ? 1 : 0 ) +
1379 ( ( flags & TCP_FIN ) ? 1 : 0 ) );
1382 DBGC2 ( tcp, "TCP %p RX %d<-%d %08x %08x..%08x %4zd",
1383 tcp, ntohs ( tcphdr->dest ), ntohs ( tcphdr->src ),
1384 ntohl ( tcphdr->ack ), ntohl ( tcphdr->seq ),
1385 ( ntohl ( tcphdr->seq ) + seq_len ), len );
1386 tcp_dump_flags ( tcp, tcphdr->flags );
1387 DBGC2 ( tcp, "\n" );
1389 /* If no connection was found, silently drop packet */
1395 /* Record old data-transfer window */
1396 old_xfer_window = tcp_xfer_window ( tcp );
1398 /* Handle ACK, if present */
1399 if ( flags & TCP_ACK ) {
1400 win = ( raw_win << tcp->snd_win_scale );
1401 if ( ( rc = tcp_rx_ack ( tcp, ack, win ) ) != 0 ) {
1402 tcp_xmit_reset ( tcp, st_src, tcphdr );
1407 /* Force an ACK if this packet is out of order */
1408 if ( ( tcp->tcp_state & TCP_STATE_RCVD ( TCP_SYN ) ) &&
1409 ( seq != tcp->rcv_ack ) ) {
1410 tcp->flags |= TCP_ACK_PENDING;
1413 /* Handle SYN, if present */
1414 if ( flags & TCP_SYN ) {
1415 tcp_rx_syn ( tcp, seq, &options );
1419 /* Handle RST, if present */
1420 if ( flags & TCP_RST ) {
1421 if ( ( rc = tcp_rx_rst ( tcp, seq ) ) != 0 )
1425 /* Enqueue received data */
1426 tcp_rx_enqueue ( tcp, seq, flags, iob_disown ( iobuf ) );
1428 /* Process receive queue */
1429 tcp_process_rx_queue ( tcp );
1431 /* Dump out any state change as a result of the received packet */
1432 tcp_dump_state ( tcp );
1434 /* Schedule transmission of ACK (and any pending data). If we
1435 * have received any out-of-order packets (i.e. if the receive
1436 * queue remains non-empty after processing) then send the ACK
1437 * immediately in order to trigger Fast Retransmission.
1439 if ( list_empty ( &tcp->rx_queue ) ) {
1440 process_add ( &tcp->process );
1442 tcp_xmit_sack ( tcp, seq );
1445 /* If this packet was the last we expect to receive, set up
1446 * timer to expire and cause the connection to be freed.
1448 if ( TCP_CLOSED_GRACEFULLY ( tcp->tcp_state ) ) {
1449 stop_timer ( &tcp->wait );
1450 start_timer_fixed ( &tcp->wait, ( 2 * TCP_MSL ) );
1453 /* Notify application if window has changed */
1454 if ( tcp_xfer_window ( tcp ) != old_xfer_window )
1455 xfer_window_changed ( &tcp->xfer );
1457 profile_stop ( &tcp_rx_profiler );
1461 /* Free received packet */
1467 struct tcpip_protocol tcp_protocol __tcpip_protocol = {
1470 .tcpip_proto = IP_TCP,
1474 * Discard some cached TCP data
1476 * @ret discarded Number of cached items discarded
1478 static unsigned int tcp_discard ( void ) {
1479 struct tcp_connection *tcp;
1480 struct io_buffer *iobuf;
1481 unsigned int discarded = 0;
1483 /* Try to drop one queued RX packet from each connection */
1484 list_for_each_entry ( tcp, &tcp_conns, list ) {
1485 list_for_each_entry_reverse ( iobuf, &tcp->rx_queue, list ) {
1487 /* Remove packet from queue */
1488 list_del ( &iobuf->list );
1491 /* Report discard */
1500 /** TCP cache discarder */
1501 struct cache_discarder tcp_discarder __cache_discarder ( CACHE_NORMAL ) = {
1502 .discard = tcp_discard,
1506 * Find first TCP connection that has not yet been closed
1508 * @ret tcp First unclosed connection, or NULL
1510 static struct tcp_connection * tcp_first_unclosed ( void ) {
1511 struct tcp_connection *tcp;
1513 /* Find first connection which has not yet been closed */
1514 list_for_each_entry ( tcp, &tcp_conns, list ) {
1515 if ( ! ( tcp->flags & TCP_XFER_CLOSED ) )
1522 * Find first TCP connection that has not yet finished all operations
1524 * @ret tcp First unfinished connection, or NULL
1526 static struct tcp_connection * tcp_first_unfinished ( void ) {
1527 struct tcp_connection *tcp;
1529 /* Find first connection which has not yet closed gracefully,
1530 * or which still has a pending transmission (e.g. to ACK the
1533 list_for_each_entry ( tcp, &tcp_conns, list ) {
1534 if ( ( ! TCP_CLOSED_GRACEFULLY ( tcp->tcp_state ) ) ||
1535 process_running ( &tcp->process ) ) {
1543 * Shut down all TCP connections
1546 static void tcp_shutdown ( int booting __unused ) {
1547 struct tcp_connection *tcp;
1548 unsigned long start;
1549 unsigned long elapsed;
1551 /* Initiate a graceful close of all connections, allowing for
1552 * the fact that the connection list may change as we do so.
1554 while ( ( tcp = tcp_first_unclosed() ) ) {
1555 DBGC ( tcp, "TCP %p closing for shutdown\n", tcp );
1556 tcp_close ( tcp, -ECANCELED );
1559 /* Wait for all connections to finish closing gracefully */
1560 start = currticks();
1561 while ( ( tcp = tcp_first_unfinished() ) &&
1562 ( ( elapsed = ( currticks() - start ) ) < TCP_FINISH_TIMEOUT )){
1566 /* Forcibly close any remaining connections */
1567 while ( ( tcp = list_first_entry ( &tcp_conns, struct tcp_connection,
1568 list ) ) != NULL ) {
1569 tcp->tcp_state = TCP_CLOSED;
1570 tcp_dump_state ( tcp );
1571 tcp_close ( tcp, -ECANCELED );
1575 /** TCP shutdown function */
1576 struct startup_fn tcp_startup_fn __startup_fn ( STARTUP_LATE ) = {
1577 .shutdown = tcp_shutdown,
1580 /***************************************************************************
1582 * Data transfer interface
1584 ***************************************************************************
1590 * @v tcp TCP connection
1591 * @v rc Reason for close
1593 static void tcp_xfer_close ( struct tcp_connection *tcp, int rc ) {
1595 /* Close data transfer interface */
1596 tcp_close ( tcp, rc );
1598 /* Transmit FIN, if possible */
1603 * Deliver datagram as I/O buffer
1605 * @v tcp TCP connection
1606 * @v iobuf Datagram I/O buffer
1607 * @v meta Data transfer metadata
1608 * @ret rc Return status code
1610 static int tcp_xfer_deliver ( struct tcp_connection *tcp,
1611 struct io_buffer *iobuf,
1612 struct xfer_metadata *meta __unused ) {
1614 /* Enqueue packet */
1615 list_add_tail ( &iobuf->list, &tcp->tx_queue );
1617 /* Each enqueued packet is a pending operation */
1618 pending_get ( &tcp->pending_data );
1620 /* Transmit data, if possible */
1626 /** TCP data transfer interface operations */
1627 static struct interface_operation tcp_xfer_operations[] = {
1628 INTF_OP ( xfer_deliver, struct tcp_connection *, tcp_xfer_deliver ),
1629 INTF_OP ( xfer_window, struct tcp_connection *, tcp_xfer_window ),
1630 INTF_OP ( intf_close, struct tcp_connection *, tcp_xfer_close ),
1633 /** TCP data transfer interface descriptor */
1634 static struct interface_descriptor tcp_xfer_desc =
1635 INTF_DESC ( struct tcp_connection, xfer, tcp_xfer_operations );
1637 /***************************************************************************
1641 ***************************************************************************
1644 /** TCP IPv4 socket opener */
1645 struct socket_opener tcp_ipv4_socket_opener __socket_opener = {
1646 .semantics = TCP_SOCK_STREAM,
1651 /** TCP IPv6 socket opener */
1652 struct socket_opener tcp_ipv6_socket_opener __socket_opener = {
1653 .semantics = TCP_SOCK_STREAM,
1659 int tcp_sock_stream = TCP_SOCK_STREAM;
1664 * @v xfer Data transfer interface
1666 * @ret rc Return status code
1668 static int tcp_open_uri ( struct interface *xfer, struct uri *uri ) {
1669 struct sockaddr_tcpip peer;
1675 memset ( &peer, 0, sizeof ( peer ) );
1676 peer.st_port = htons ( uri_port ( uri, 0 ) );
1677 return xfer_open_named_socket ( xfer, SOCK_STREAM,
1678 ( struct sockaddr * ) &peer,
1682 /** TCP URI opener */
1683 struct uri_opener tcp_uri_opener __uri_opener = {
1685 .open = tcp_open_uri,