These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / net / ipv4 / tcp_timer.c
index 8c65dc1..193ba1f 100644 (file)
@@ -83,7 +83,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
 }
 
 /* Calculate maximal number or retries on an orphaned socket. */
-static int tcp_orphan_retries(struct sock *sk, int alive)
+static int tcp_orphan_retries(struct sock *sk, bool alive)
 {
        int retries = sysctl_tcp_orphan_retries; /* May be zero. */
 
@@ -168,7 +168,7 @@ static int tcp_write_timeout(struct sock *sk)
                        dst_negative_advice(sk);
                        if (tp->syn_fastopen || tp->syn_data)
                                tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
-                       if (tp->syn_data)
+                       if (tp->syn_data && icsk->icsk_retransmits == 1)
                                NET_INC_STATS_BH(sock_net(sk),
                                                 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
                }
@@ -176,6 +176,18 @@ static int tcp_write_timeout(struct sock *sk)
                syn_set = true;
        } else {
                if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
+                       /* Some middle-boxes may black-hole Fast Open _after_
+                        * the handshake. Therefore we conservatively disable
+                        * Fast Open on this path on recurring timeouts with
+                        * few or zero bytes acked after Fast Open.
+                        */
+                       if (tp->syn_data_acked &&
+                           tp->bytes_acked <= tp->rx_opt.mss_clamp) {
+                               tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
+                               if (icsk->icsk_retransmits == sysctl_tcp_retries1)
+                                       NET_INC_STATS_BH(sock_net(sk),
+                                                        LINUX_MIB_TCPFASTOPENACTIVEFAIL);
+                       }
                        /* Black hole detection */
                        tcp_mtu_probing(icsk, sk);
 
@@ -184,7 +196,7 @@ static int tcp_write_timeout(struct sock *sk)
 
                retry_until = sysctl_tcp_retries2;
                if (sock_flag(sk, SOCK_DEAD)) {
-                       const int alive = icsk->icsk_rto < TCP_RTO_MAX;
+                       const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
 
                        retry_until = tcp_orphan_retries(sk, alive);
                        do_reset = alive ||
@@ -247,7 +259,7 @@ void tcp_delack_timer_handler(struct sock *sk)
        }
 
 out:
-       if (sk_under_memory_pressure(sk))
+       if (tcp_under_memory_pressure(sk))
                sk_mem_reclaim(sk);
 }
 
@@ -298,7 +310,7 @@ static void tcp_probe_timer(struct sock *sk)
 
        max_probes = sysctl_tcp_retries2;
        if (sock_flag(sk, SOCK_DEAD)) {
-               const int alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
+               const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
 
                max_probes = tcp_orphan_retries(sk, alive);
                if (!alive && icsk->icsk_backoff >= max_probes)
@@ -616,7 +628,7 @@ static void tcp_keepalive_timer (unsigned long data)
                        tcp_write_err(sk);
                        goto out;
                }
-               if (tcp_write_wakeup(sk) <= 0) {
+               if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
                        icsk->icsk_probes_out++;
                        elapsed = keepalive_intvl_when(tp);
                } else {
@@ -649,4 +661,3 @@ void tcp_init_xmit_timers(struct sock *sk)
        inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
                                  &tcp_keepalive_timer);
 }
-EXPORT_SYMBOL(tcp_init_xmit_timers);