Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / net / ipv4 / tcp.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Implementation of the Transmission Control Protocol(TCP).
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Mark Evans, <evansmp@uhura.aston.ac.uk>
11  *              Corey Minyard <wf-rch!minyard@relay.EU.net>
12  *              Florian La Roche, <flla@stud.uni-sb.de>
13  *              Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14  *              Linus Torvalds, <torvalds@cs.helsinki.fi>
15  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
16  *              Matthew Dillon, <dillon@apollo.west.oic.com>
17  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18  *              Jorge Cwik, <jorge@laser.satlink.net>
19  *
20  * Fixes:
21  *              Alan Cox        :       Numerous verify_area() calls
22  *              Alan Cox        :       Set the ACK bit on a reset
23  *              Alan Cox        :       Stopped it crashing if it closed while
24  *                                      sk->inuse=1 and was trying to connect
25  *                                      (tcp_err()).
26  *              Alan Cox        :       All icmp error handling was broken
27  *                                      pointers passed where wrong and the
28  *                                      socket was looked up backwards. Nobody
29  *                                      tested any icmp error code obviously.
30  *              Alan Cox        :       tcp_err() now handled properly. It
31  *                                      wakes people on errors. poll
32  *                                      behaves and the icmp error race
33  *                                      has gone by moving it into sock.c
34  *              Alan Cox        :       tcp_send_reset() fixed to work for
35  *                                      everything not just packets for
36  *                                      unknown sockets.
37  *              Alan Cox        :       tcp option processing.
38  *              Alan Cox        :       Reset tweaked (still not 100%) [Had
39  *                                      syn rule wrong]
40  *              Herp Rosmanith  :       More reset fixes
41  *              Alan Cox        :       No longer acks invalid rst frames.
42  *                                      Acking any kind of RST is right out.
43  *              Alan Cox        :       Sets an ignore me flag on an rst
44  *                                      receive otherwise odd bits of prattle
45  *                                      escape still
46  *              Alan Cox        :       Fixed another acking RST frame bug.
47  *                                      Should stop LAN workplace lockups.
48  *              Alan Cox        :       Some tidyups using the new skb list
49  *                                      facilities
50  *              Alan Cox        :       sk->keepopen now seems to work
51  *              Alan Cox        :       Pulls options out correctly on accepts
52  *              Alan Cox        :       Fixed assorted sk->rqueue->next errors
53  *              Alan Cox        :       PSH doesn't end a TCP read. Switched a
54  *                                      bit to skb ops.
55  *              Alan Cox        :       Tidied tcp_data to avoid a potential
56  *                                      nasty.
57  *              Alan Cox        :       Added some better commenting, as the
58  *                                      tcp is hard to follow
59  *              Alan Cox        :       Removed incorrect check for 20 * psh
60  *      Michael O'Reilly        :       ack < copied bug fix.
61  *      Johannes Stille         :       Misc tcp fixes (not all in yet).
62  *              Alan Cox        :       FIN with no memory -> CRASH
63  *              Alan Cox        :       Added socket option proto entries.
64  *                                      Also added awareness of them to accept.
65  *              Alan Cox        :       Added TCP options (SOL_TCP)
66  *              Alan Cox        :       Switched wakeup calls to callbacks,
67  *                                      so the kernel can layer network
68  *                                      sockets.
69  *              Alan Cox        :       Use ip_tos/ip_ttl settings.
70  *              Alan Cox        :       Handle FIN (more) properly (we hope).
71  *              Alan Cox        :       RST frames sent on unsynchronised
72  *                                      state ack error.
73  *              Alan Cox        :       Put in missing check for SYN bit.
74  *              Alan Cox        :       Added tcp_select_window() aka NET2E
75  *                                      window non shrink trick.
76  *              Alan Cox        :       Added a couple of small NET2E timer
77  *                                      fixes
78  *              Charles Hedrick :       TCP fixes
79  *              Toomas Tamm     :       TCP window fixes
80  *              Alan Cox        :       Small URG fix to rlogin ^C ack fight
81  *              Charles Hedrick :       Rewrote most of it to actually work
82  *              Linus           :       Rewrote tcp_read() and URG handling
83  *                                      completely
84  *              Gerhard Koerting:       Fixed some missing timer handling
85  *              Matthew Dillon  :       Reworked TCP machine states as per RFC
86  *              Gerhard Koerting:       PC/TCP workarounds
87  *              Adam Caldwell   :       Assorted timer/timing errors
88  *              Matthew Dillon  :       Fixed another RST bug
89  *              Alan Cox        :       Move to kernel side addressing changes.
90  *              Alan Cox        :       Beginning work on TCP fastpathing
91  *                                      (not yet usable)
92  *              Arnt Gulbrandsen:       Turbocharged tcp_check() routine.
93  *              Alan Cox        :       TCP fast path debugging
94  *              Alan Cox        :       Window clamping
95  *              Michael Riepe   :       Bug in tcp_check()
96  *              Matt Dillon     :       More TCP improvements and RST bug fixes
97  *              Matt Dillon     :       Yet more small nasties remove from the
98  *                                      TCP code (Be very nice to this man if
99  *                                      tcp finally works 100%) 8)
100  *              Alan Cox        :       BSD accept semantics.
101  *              Alan Cox        :       Reset on closedown bug.
102  *      Peter De Schrijver      :       ENOTCONN check missing in tcp_sendto().
103  *              Michael Pall    :       Handle poll() after URG properly in
104  *                                      all cases.
105  *              Michael Pall    :       Undo the last fix in tcp_read_urg()
106  *                                      (multi URG PUSH broke rlogin).
107  *              Michael Pall    :       Fix the multi URG PUSH problem in
108  *                                      tcp_readable(), poll() after URG
109  *                                      works now.
110  *              Michael Pall    :       recv(...,MSG_OOB) never blocks in the
111  *                                      BSD api.
112  *              Alan Cox        :       Changed the semantics of sk->socket to
113  *                                      fix a race and a signal problem with
114  *                                      accept() and async I/O.
115  *              Alan Cox        :       Relaxed the rules on tcp_sendto().
116  *              Yury Shevchuk   :       Really fixed accept() blocking problem.
117  *              Craig I. Hagan  :       Allow for BSD compatible TIME_WAIT for
118  *                                      clients/servers which listen in on
119  *                                      fixed ports.
120  *              Alan Cox        :       Cleaned the above up and shrank it to
121  *                                      a sensible code size.
122  *              Alan Cox        :       Self connect lockup fix.
123  *              Alan Cox        :       No connect to multicast.
124  *              Ross Biro       :       Close unaccepted children on master
125  *                                      socket close.
126  *              Alan Cox        :       Reset tracing code.
127  *              Alan Cox        :       Spurious resets on shutdown.
128  *              Alan Cox        :       Giant 15 minute/60 second timer error
129  *              Alan Cox        :       Small whoops in polling before an
130  *                                      accept.
131  *              Alan Cox        :       Kept the state trace facility since
132  *                                      it's handy for debugging.
133  *              Alan Cox        :       More reset handler fixes.
134  *              Alan Cox        :       Started rewriting the code based on
135  *                                      the RFC's for other useful protocol
136  *                                      references see: Comer, KA9Q NOS, and
137  *                                      for a reference on the difference
138  *                                      between specifications and how BSD
139  *                                      works see the 4.4lite source.
140  *              A.N.Kuznetsov   :       Don't time wait on completion of tidy
141  *                                      close.
142  *              Linus Torvalds  :       Fin/Shutdown & copied_seq changes.
143  *              Linus Torvalds  :       Fixed BSD port reuse to work first syn
144  *              Alan Cox        :       Reimplemented timers as per the RFC
145  *                                      and using multiple timers for sanity.
146  *              Alan Cox        :       Small bug fixes, and a lot of new
147  *                                      comments.
148  *              Alan Cox        :       Fixed dual reader crash by locking
149  *                                      the buffers (much like datagram.c)
150  *              Alan Cox        :       Fixed stuck sockets in probe. A probe
151  *                                      now gets fed up of retrying without
152  *                                      (even a no space) answer.
153  *              Alan Cox        :       Extracted closing code better
154  *              Alan Cox        :       Fixed the closing state machine to
155  *                                      resemble the RFC.
156  *              Alan Cox        :       More 'per spec' fixes.
157  *              Jorge Cwik      :       Even faster checksumming.
158  *              Alan Cox        :       tcp_data() doesn't ack illegal PSH
159  *                                      only frames. At least one pc tcp stack
160  *                                      generates them.
161  *              Alan Cox        :       Cache last socket.
162  *              Alan Cox        :       Per route irtt.
163  *              Matt Day        :       poll()->select() match BSD precisely on error
164  *              Alan Cox        :       New buffers
165  *              Marc Tamsky     :       Various sk->prot->retransmits and
166  *                                      sk->retransmits misupdating fixed.
167  *                                      Fixed tcp_write_timeout: stuck close,
168  *                                      and TCP syn retries gets used now.
169  *              Mark Yarvis     :       In tcp_read_wakeup(), don't send an
170  *                                      ack if state is TCP_CLOSED.
171  *              Alan Cox        :       Look up device on a retransmit - routes may
172  *                                      change. Doesn't yet cope with MSS shrink right
173  *                                      but it's a start!
174  *              Marc Tamsky     :       Closing in closing fixes.
175  *              Mike Shaver     :       RFC1122 verifications.
176  *              Alan Cox        :       rcv_saddr errors.
177  *              Alan Cox        :       Block double connect().
178  *              Alan Cox        :       Small hooks for enSKIP.
179  *              Alexey Kuznetsov:       Path MTU discovery.
180  *              Alan Cox        :       Support soft errors.
181  *              Alan Cox        :       Fix MTU discovery pathological case
182  *                                      when the remote claims no mtu!
183  *              Marc Tamsky     :       TCP_CLOSE fix.
184  *              Colin (G3TNE)   :       Send a reset on syn ack replies in
185  *                                      window but wrong (fixes NT lpd problems)
186  *              Pedro Roque     :       Better TCP window handling, delayed ack.
187  *              Joerg Reuter    :       No modification of locked buffers in
188  *                                      tcp_do_retransmit()
189  *              Eric Schenk     :       Changed receiver side silly window
190  *                                      avoidance algorithm to BSD style
191  *                                      algorithm. This doubles throughput
192  *                                      against machines running Solaris,
193  *                                      and seems to result in general
194  *                                      improvement.
195  *      Stefan Magdalinski      :       adjusted tcp_readable() to fix FIONREAD
196  *      Willy Konynenberg       :       Transparent proxying support.
197  *      Mike McLagan            :       Routing by source
198  *              Keith Owens     :       Do proper merging with partial SKB's in
199  *                                      tcp_do_sendmsg to avoid burstiness.
200  *              Eric Schenk     :       Fix fast close down bug with
201  *                                      shutdown() followed by close().
202  *              Andi Kleen      :       Make poll agree with SIGIO
203  *      Salvatore Sanfilippo    :       Support SO_LINGER with linger == 1 and
204  *                                      lingertime == 0 (RFC 793 ABORT Call)
205  *      Hirokazu Takahashi      :       Use copy_from_user() instead of
206  *                                      csum_and_copy_from_user() if possible.
207  *
208  *              This program is free software; you can redistribute it and/or
209  *              modify it under the terms of the GNU General Public License
210  *              as published by the Free Software Foundation; either version
211  *              2 of the License, or(at your option) any later version.
212  *
213  * Description of States:
214  *
215  *      TCP_SYN_SENT            sent a connection request, waiting for ack
216  *
217  *      TCP_SYN_RECV            received a connection request, sent ack,
218  *                              waiting for final ack in three-way handshake.
219  *
220  *      TCP_ESTABLISHED         connection established
221  *
222  *      TCP_FIN_WAIT1           our side has shutdown, waiting to complete
223  *                              transmission of remaining buffered data
224  *
225  *      TCP_FIN_WAIT2           all buffered data sent, waiting for remote
226  *                              to shutdown
227  *
228  *      TCP_CLOSING             both sides have shutdown but we still have
229  *                              data we have to finish sending
230  *
231  *      TCP_TIME_WAIT           timeout to catch resent junk before entering
232  *                              closed, can only be entered from FIN_WAIT2
233  *                              or CLOSING.  Required because the other end
234  *                              may not have gotten our last ACK causing it
235  *                              to retransmit the data packet (which we ignore)
236  *
237  *      TCP_CLOSE_WAIT          remote side has shutdown and is waiting for
238  *                              us to finish writing our data and to shutdown
239  *                              (we have to close() to move on to LAST_ACK)
240  *
241  *      TCP_LAST_ACK            out side has shutdown after remote has
242  *                              shutdown.  There may still be data in our
243  *                              buffer that we have to finish sending
244  *
245  *      TCP_CLOSE               socket is finished
246  */
247
248 #define pr_fmt(fmt) "TCP: " fmt
249
250 #include <linux/kernel.h>
251 #include <linux/module.h>
252 #include <linux/types.h>
253 #include <linux/fcntl.h>
254 #include <linux/poll.h>
255 #include <linux/inet_diag.h>
256 #include <linux/init.h>
257 #include <linux/fs.h>
258 #include <linux/skbuff.h>
259 #include <linux/scatterlist.h>
260 #include <linux/splice.h>
261 #include <linux/net.h>
262 #include <linux/socket.h>
263 #include <linux/random.h>
264 #include <linux/bootmem.h>
265 #include <linux/highmem.h>
266 #include <linux/swap.h>
267 #include <linux/cache.h>
268 #include <linux/err.h>
269 #include <linux/crypto.h>
270 #include <linux/time.h>
271 #include <linux/slab.h>
272
273 #include <net/icmp.h>
274 #include <net/inet_common.h>
275 #include <net/tcp.h>
276 #include <net/xfrm.h>
277 #include <net/ip.h>
278 #include <net/sock.h>
279
280 #include <asm/uaccess.h>
281 #include <asm/ioctls.h>
282 #include <asm/unaligned.h>
283 #include <net/busy_poll.h>
284
285 int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
286
287 int sysctl_tcp_min_tso_segs __read_mostly = 2;
288
289 int sysctl_tcp_autocorking __read_mostly = 1;
290
291 struct percpu_counter tcp_orphan_count;
292 EXPORT_SYMBOL_GPL(tcp_orphan_count);
293
294 long sysctl_tcp_mem[3] __read_mostly;
295 int sysctl_tcp_wmem[3] __read_mostly;
296 int sysctl_tcp_rmem[3] __read_mostly;
297
298 EXPORT_SYMBOL(sysctl_tcp_mem);
299 EXPORT_SYMBOL(sysctl_tcp_rmem);
300 EXPORT_SYMBOL(sysctl_tcp_wmem);
301
302 atomic_long_t tcp_memory_allocated;     /* Current allocated memory. */
303 EXPORT_SYMBOL(tcp_memory_allocated);
304
305 /*
306  * Current number of TCP sockets.
307  */
308 struct percpu_counter tcp_sockets_allocated;
309 EXPORT_SYMBOL(tcp_sockets_allocated);
310
311 /*
312  * TCP splice context
313  */
314 struct tcp_splice_state {
315         struct pipe_inode_info *pipe;
316         size_t len;
317         unsigned int flags;
318 };
319
320 /*
321  * Pressure flag: try to collapse.
322  * Technical note: it is used by multiple contexts non atomically.
323  * All the __sk_mem_schedule() is of this nature: accounting
324  * is strict, actions are advisory and have some latency.
325  */
326 int tcp_memory_pressure __read_mostly;
327 EXPORT_SYMBOL(tcp_memory_pressure);
328
329 void tcp_enter_memory_pressure(struct sock *sk)
330 {
331         if (!tcp_memory_pressure) {
332                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
333                 tcp_memory_pressure = 1;
334         }
335 }
336 EXPORT_SYMBOL(tcp_enter_memory_pressure);
337
338 /* Convert seconds to retransmits based on initial and max timeout */
339 static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
340 {
341         u8 res = 0;
342
343         if (seconds > 0) {
344                 int period = timeout;
345
346                 res = 1;
347                 while (seconds > period && res < 255) {
348                         res++;
349                         timeout <<= 1;
350                         if (timeout > rto_max)
351                                 timeout = rto_max;
352                         period += timeout;
353                 }
354         }
355         return res;
356 }
357
358 /* Convert retransmits to seconds based on initial and max timeout */
359 static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
360 {
361         int period = 0;
362
363         if (retrans > 0) {
364                 period = timeout;
365                 while (--retrans) {
366                         timeout <<= 1;
367                         if (timeout > rto_max)
368                                 timeout = rto_max;
369                         period += timeout;
370                 }
371         }
372         return period;
373 }
374
375 /* Address-family independent initialization for a tcp_sock.
376  *
377  * NOTE: A lot of things set to zero explicitly by call to
378  *       sk_alloc() so need not be done here.
379  */
380 void tcp_init_sock(struct sock *sk)
381 {
382         struct inet_connection_sock *icsk = inet_csk(sk);
383         struct tcp_sock *tp = tcp_sk(sk);
384
385         __skb_queue_head_init(&tp->out_of_order_queue);
386         tcp_init_xmit_timers(sk);
387         tcp_prequeue_init(tp);
388         INIT_LIST_HEAD(&tp->tsq_node);
389
390         icsk->icsk_rto = TCP_TIMEOUT_INIT;
391         tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
392         tp->rtt_min[0].rtt = ~0U;
393
394         /* So many TCP implementations out there (incorrectly) count the
395          * initial SYN frame in their delayed-ACK and congestion control
396          * algorithms that we must have the following bandaid to talk
397          * efficiently to them.  -DaveM
398          */
399         tp->snd_cwnd = TCP_INIT_CWND;
400
401         /* See draft-stevens-tcpca-spec-01 for discussion of the
402          * initialization of these values.
403          */
404         tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
405         tp->snd_cwnd_clamp = ~0;
406         tp->mss_cache = TCP_MSS_DEFAULT;
407         u64_stats_init(&tp->syncp);
408
409         tp->reordering = sysctl_tcp_reordering;
410         tcp_enable_early_retrans(tp);
411         tcp_assign_congestion_control(sk);
412
413         tp->tsoffset = 0;
414
415         sk->sk_state = TCP_CLOSE;
416
417         sk->sk_write_space = sk_stream_write_space;
418         sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
419
420         icsk->icsk_sync_mss = tcp_sync_mss;
421
422         sk->sk_sndbuf = sysctl_tcp_wmem[1];
423         sk->sk_rcvbuf = sysctl_tcp_rmem[1];
424
425         local_bh_disable();
426         sock_update_memcg(sk);
427         sk_sockets_allocated_inc(sk);
428         local_bh_enable();
429 }
430 EXPORT_SYMBOL(tcp_init_sock);
431
432 static void tcp_tx_timestamp(struct sock *sk, struct sk_buff *skb)
433 {
434         if (sk->sk_tsflags) {
435                 struct skb_shared_info *shinfo = skb_shinfo(skb);
436
437                 sock_tx_timestamp(sk, &shinfo->tx_flags);
438                 if (shinfo->tx_flags & SKBTX_ANY_TSTAMP)
439                         shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1;
440         }
441 }
442
443 /*
444  *      Wait for a TCP event.
445  *
446  *      Note that we don't need to lock the socket, as the upper poll layers
447  *      take care of normal races (between the test and the event) and we don't
448  *      go look at any of the socket buffers directly.
449  */
450 unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
451 {
452         unsigned int mask;
453         struct sock *sk = sock->sk;
454         const struct tcp_sock *tp = tcp_sk(sk);
455         int state;
456
457         sock_rps_record_flow(sk);
458
459         sock_poll_wait(file, sk_sleep(sk), wait);
460
461         state = sk_state_load(sk);
462         if (state == TCP_LISTEN)
463                 return inet_csk_listen_poll(sk);
464
465         /* Socket is not locked. We are protected from async events
466          * by poll logic and correct handling of state changes
467          * made by other threads is impossible in any case.
468          */
469
470         mask = 0;
471
472         /*
473          * POLLHUP is certainly not done right. But poll() doesn't
474          * have a notion of HUP in just one direction, and for a
475          * socket the read side is more interesting.
476          *
477          * Some poll() documentation says that POLLHUP is incompatible
478          * with the POLLOUT/POLLWR flags, so somebody should check this
479          * all. But careful, it tends to be safer to return too many
480          * bits than too few, and you can easily break real applications
481          * if you don't tell them that something has hung up!
482          *
483          * Check-me.
484          *
485          * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
486          * our fs/select.c). It means that after we received EOF,
487          * poll always returns immediately, making impossible poll() on write()
488          * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
489          * if and only if shutdown has been made in both directions.
490          * Actually, it is interesting to look how Solaris and DUX
491          * solve this dilemma. I would prefer, if POLLHUP were maskable,
492          * then we could set it on SND_SHUTDOWN. BTW examples given
493          * in Stevens' books assume exactly this behaviour, it explains
494          * why POLLHUP is incompatible with POLLOUT.    --ANK
495          *
496          * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
497          * blocking on fresh not-connected or disconnected socket. --ANK
498          */
499         if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
500                 mask |= POLLHUP;
501         if (sk->sk_shutdown & RCV_SHUTDOWN)
502                 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
503
504         /* Connected or passive Fast Open socket? */
505         if (state != TCP_SYN_SENT &&
506             (state != TCP_SYN_RECV || tp->fastopen_rsk)) {
507                 int target = sock_rcvlowat(sk, 0, INT_MAX);
508
509                 if (tp->urg_seq == tp->copied_seq &&
510                     !sock_flag(sk, SOCK_URGINLINE) &&
511                     tp->urg_data)
512                         target++;
513
514                 if (tp->rcv_nxt - tp->copied_seq >= target)
515                         mask |= POLLIN | POLLRDNORM;
516
517                 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
518                         if (sk_stream_is_writeable(sk)) {
519                                 mask |= POLLOUT | POLLWRNORM;
520                         } else {  /* send SIGIO later */
521                                 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
522                                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
523
524                                 /* Race breaker. If space is freed after
525                                  * wspace test but before the flags are set,
526                                  * IO signal will be lost. Memory barrier
527                                  * pairs with the input side.
528                                  */
529                                 smp_mb__after_atomic();
530                                 if (sk_stream_is_writeable(sk))
531                                         mask |= POLLOUT | POLLWRNORM;
532                         }
533                 } else
534                         mask |= POLLOUT | POLLWRNORM;
535
536                 if (tp->urg_data & TCP_URG_VALID)
537                         mask |= POLLPRI;
538         }
539         /* This barrier is coupled with smp_wmb() in tcp_reset() */
540         smp_rmb();
541         if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
542                 mask |= POLLERR;
543
544         return mask;
545 }
546 EXPORT_SYMBOL(tcp_poll);
547
548 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
549 {
550         struct tcp_sock *tp = tcp_sk(sk);
551         int answ;
552         bool slow;
553
554         switch (cmd) {
555         case SIOCINQ:
556                 if (sk->sk_state == TCP_LISTEN)
557                         return -EINVAL;
558
559                 slow = lock_sock_fast(sk);
560                 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
561                         answ = 0;
562                 else if (sock_flag(sk, SOCK_URGINLINE) ||
563                          !tp->urg_data ||
564                          before(tp->urg_seq, tp->copied_seq) ||
565                          !before(tp->urg_seq, tp->rcv_nxt)) {
566
567                         answ = tp->rcv_nxt - tp->copied_seq;
568
569                         /* Subtract 1, if FIN was received */
570                         if (answ && sock_flag(sk, SOCK_DONE))
571                                 answ--;
572                 } else
573                         answ = tp->urg_seq - tp->copied_seq;
574                 unlock_sock_fast(sk, slow);
575                 break;
576         case SIOCATMARK:
577                 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
578                 break;
579         case SIOCOUTQ:
580                 if (sk->sk_state == TCP_LISTEN)
581                         return -EINVAL;
582
583                 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
584                         answ = 0;
585                 else
586                         answ = tp->write_seq - tp->snd_una;
587                 break;
588         case SIOCOUTQNSD:
589                 if (sk->sk_state == TCP_LISTEN)
590                         return -EINVAL;
591
592                 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
593                         answ = 0;
594                 else
595                         answ = tp->write_seq - tp->snd_nxt;
596                 break;
597         default:
598                 return -ENOIOCTLCMD;
599         }
600
601         return put_user(answ, (int __user *)arg);
602 }
603 EXPORT_SYMBOL(tcp_ioctl);
604
605 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
606 {
607         TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
608         tp->pushed_seq = tp->write_seq;
609 }
610
611 static inline bool forced_push(const struct tcp_sock *tp)
612 {
613         return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
614 }
615
616 static void skb_entail(struct sock *sk, struct sk_buff *skb)
617 {
618         struct tcp_sock *tp = tcp_sk(sk);
619         struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
620
621         skb->csum    = 0;
622         tcb->seq     = tcb->end_seq = tp->write_seq;
623         tcb->tcp_flags = TCPHDR_ACK;
624         tcb->sacked  = 0;
625         __skb_header_release(skb);
626         tcp_add_write_queue_tail(sk, skb);
627         sk->sk_wmem_queued += skb->truesize;
628         sk_mem_charge(sk, skb->truesize);
629         if (tp->nonagle & TCP_NAGLE_PUSH)
630                 tp->nonagle &= ~TCP_NAGLE_PUSH;
631
632         tcp_slow_start_after_idle_check(sk);
633 }
634
635 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
636 {
637         if (flags & MSG_OOB)
638                 tp->snd_up = tp->write_seq;
639 }
640
641 /* If a not yet filled skb is pushed, do not send it if
642  * we have data packets in Qdisc or NIC queues :
643  * Because TX completion will happen shortly, it gives a chance
644  * to coalesce future sendmsg() payload into this skb, without
645  * need for a timer, and with no latency trade off.
646  * As packets containing data payload have a bigger truesize
647  * than pure acks (dataless) packets, the last checks prevent
648  * autocorking if we only have an ACK in Qdisc/NIC queues,
649  * or if TX completion was delayed after we processed ACK packet.
650  */
651 static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
652                                 int size_goal)
653 {
654         return skb->len < size_goal &&
655                sysctl_tcp_autocorking &&
656                skb != tcp_write_queue_head(sk) &&
657                atomic_read(&sk->sk_wmem_alloc) > skb->truesize;
658 }
659
660 static void tcp_push(struct sock *sk, int flags, int mss_now,
661                      int nonagle, int size_goal)
662 {
663         struct tcp_sock *tp = tcp_sk(sk);
664         struct sk_buff *skb;
665
666         if (!tcp_send_head(sk))
667                 return;
668
669         skb = tcp_write_queue_tail(sk);
670         if (!(flags & MSG_MORE) || forced_push(tp))
671                 tcp_mark_push(tp, skb);
672
673         tcp_mark_urg(tp, flags);
674
675         if (tcp_should_autocork(sk, skb, size_goal)) {
676
677                 /* avoid atomic op if TSQ_THROTTLED bit is already set */
678                 if (!test_bit(TSQ_THROTTLED, &tp->tsq_flags)) {
679                         NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
680                         set_bit(TSQ_THROTTLED, &tp->tsq_flags);
681                 }
682                 /* It is possible TX completion already happened
683                  * before we set TSQ_THROTTLED.
684                  */
685                 if (atomic_read(&sk->sk_wmem_alloc) > skb->truesize)
686                         return;
687         }
688
689         if (flags & MSG_MORE)
690                 nonagle = TCP_NAGLE_CORK;
691
692         __tcp_push_pending_frames(sk, mss_now, nonagle);
693 }
694
695 static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
696                                 unsigned int offset, size_t len)
697 {
698         struct tcp_splice_state *tss = rd_desc->arg.data;
699         int ret;
700
701         ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe,
702                               min(rd_desc->count, len), tss->flags,
703                               skb_socket_splice);
704         if (ret > 0)
705                 rd_desc->count -= ret;
706         return ret;
707 }
708
709 static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
710 {
711         /* Store TCP splice context information in read_descriptor_t. */
712         read_descriptor_t rd_desc = {
713                 .arg.data = tss,
714                 .count    = tss->len,
715         };
716
717         return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
718 }
719
720 /**
721  *  tcp_splice_read - splice data from TCP socket to a pipe
722  * @sock:       socket to splice from
723  * @ppos:       position (not valid)
724  * @pipe:       pipe to splice to
725  * @len:        number of bytes to splice
726  * @flags:      splice modifier flags
727  *
728  * Description:
729  *    Will read pages from given socket and fill them into a pipe.
730  *
731  **/
732 ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
733                         struct pipe_inode_info *pipe, size_t len,
734                         unsigned int flags)
735 {
736         struct sock *sk = sock->sk;
737         struct tcp_splice_state tss = {
738                 .pipe = pipe,
739                 .len = len,
740                 .flags = flags,
741         };
742         long timeo;
743         ssize_t spliced;
744         int ret;
745
746         sock_rps_record_flow(sk);
747         /*
748          * We can't seek on a socket input
749          */
750         if (unlikely(*ppos))
751                 return -ESPIPE;
752
753         ret = spliced = 0;
754
755         lock_sock(sk);
756
757         timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
758         while (tss.len) {
759                 ret = __tcp_splice_read(sk, &tss);
760                 if (ret < 0)
761                         break;
762                 else if (!ret) {
763                         if (spliced)
764                                 break;
765                         if (sock_flag(sk, SOCK_DONE))
766                                 break;
767                         if (sk->sk_err) {
768                                 ret = sock_error(sk);
769                                 break;
770                         }
771                         if (sk->sk_shutdown & RCV_SHUTDOWN)
772                                 break;
773                         if (sk->sk_state == TCP_CLOSE) {
774                                 /*
775                                  * This occurs when user tries to read
776                                  * from never connected socket.
777                                  */
778                                 if (!sock_flag(sk, SOCK_DONE))
779                                         ret = -ENOTCONN;
780                                 break;
781                         }
782                         if (!timeo) {
783                                 ret = -EAGAIN;
784                                 break;
785                         }
786                         /* if __tcp_splice_read() got nothing while we have
787                          * an skb in receive queue, we do not want to loop.
788                          * This might happen with URG data.
789                          */
790                         if (!skb_queue_empty(&sk->sk_receive_queue))
791                                 break;
792                         sk_wait_data(sk, &timeo, NULL);
793                         if (signal_pending(current)) {
794                                 ret = sock_intr_errno(timeo);
795                                 break;
796                         }
797                         continue;
798                 }
799                 tss.len -= ret;
800                 spliced += ret;
801
802                 if (!timeo)
803                         break;
804                 release_sock(sk);
805                 lock_sock(sk);
806
807                 if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
808                     (sk->sk_shutdown & RCV_SHUTDOWN) ||
809                     signal_pending(current))
810                         break;
811         }
812
813         release_sock(sk);
814
815         if (spliced)
816                 return spliced;
817
818         return ret;
819 }
820 EXPORT_SYMBOL(tcp_splice_read);
821
822 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
823                                     bool force_schedule)
824 {
825         struct sk_buff *skb;
826
827         /* The TCP header must be at least 32-bit aligned.  */
828         size = ALIGN(size, 4);
829
830         if (unlikely(tcp_under_memory_pressure(sk)))
831                 sk_mem_reclaim_partial(sk);
832
833         skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
834         if (likely(skb)) {
835                 bool mem_scheduled;
836
837                 if (force_schedule) {
838                         mem_scheduled = true;
839                         sk_forced_mem_schedule(sk, skb->truesize);
840                 } else {
841                         mem_scheduled = sk_wmem_schedule(sk, skb->truesize);
842                 }
843                 if (likely(mem_scheduled)) {
844                         skb_reserve(skb, sk->sk_prot->max_header);
845                         /*
846                          * Make sure that we have exactly size bytes
847                          * available to the caller, no more, no less.
848                          */
849                         skb->reserved_tailroom = skb->end - skb->tail - size;
850                         return skb;
851                 }
852                 __kfree_skb(skb);
853         } else {
854                 sk->sk_prot->enter_memory_pressure(sk);
855                 sk_stream_moderate_sndbuf(sk);
856         }
857         return NULL;
858 }
859
860 static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
861                                        int large_allowed)
862 {
863         struct tcp_sock *tp = tcp_sk(sk);
864         u32 new_size_goal, size_goal;
865
866         if (!large_allowed || !sk_can_gso(sk))
867                 return mss_now;
868
869         /* Note : tcp_tso_autosize() will eventually split this later */
870         new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER;
871         new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal);
872
873         /* We try hard to avoid divides here */
874         size_goal = tp->gso_segs * mss_now;
875         if (unlikely(new_size_goal < size_goal ||
876                      new_size_goal >= size_goal + mss_now)) {
877                 tp->gso_segs = min_t(u16, new_size_goal / mss_now,
878                                      sk->sk_gso_max_segs);
879                 size_goal = tp->gso_segs * mss_now;
880         }
881
882         return max(size_goal, mss_now);
883 }
884
885 static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
886 {
887         int mss_now;
888
889         mss_now = tcp_current_mss(sk);
890         *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
891
892         return mss_now;
893 }
894
895 static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
896                                 size_t size, int flags)
897 {
898         struct tcp_sock *tp = tcp_sk(sk);
899         int mss_now, size_goal;
900         int err;
901         ssize_t copied;
902         long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
903
904         /* Wait for a connection to finish. One exception is TCP Fast Open
905          * (passive side) where data is allowed to be sent before a connection
906          * is fully established.
907          */
908         if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
909             !tcp_passive_fastopen(sk)) {
910                 err = sk_stream_wait_connect(sk, &timeo);
911                 if (err != 0)
912                         goto out_err;
913         }
914
915         sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
916
917         mss_now = tcp_send_mss(sk, &size_goal, flags);
918         copied = 0;
919
920         err = -EPIPE;
921         if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
922                 goto out_err;
923
924         while (size > 0) {
925                 struct sk_buff *skb = tcp_write_queue_tail(sk);
926                 int copy, i;
927                 bool can_coalesce;
928
929                 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
930 new_segment:
931                         if (!sk_stream_memory_free(sk))
932                                 goto wait_for_sndbuf;
933
934                         skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
935                                                   skb_queue_empty(&sk->sk_write_queue));
936                         if (!skb)
937                                 goto wait_for_memory;
938
939                         skb_entail(sk, skb);
940                         copy = size_goal;
941                 }
942
943                 if (copy > size)
944                         copy = size;
945
946                 i = skb_shinfo(skb)->nr_frags;
947                 can_coalesce = skb_can_coalesce(skb, i, page, offset);
948                 if (!can_coalesce && i >= sysctl_max_skb_frags) {
949                         tcp_mark_push(tp, skb);
950                         goto new_segment;
951                 }
952                 if (!sk_wmem_schedule(sk, copy))
953                         goto wait_for_memory;
954
955                 if (can_coalesce) {
956                         skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
957                 } else {
958                         get_page(page);
959                         skb_fill_page_desc(skb, i, page, offset, copy);
960                 }
961                 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
962
963                 skb->len += copy;
964                 skb->data_len += copy;
965                 skb->truesize += copy;
966                 sk->sk_wmem_queued += copy;
967                 sk_mem_charge(sk, copy);
968                 skb->ip_summed = CHECKSUM_PARTIAL;
969                 tp->write_seq += copy;
970                 TCP_SKB_CB(skb)->end_seq += copy;
971                 tcp_skb_pcount_set(skb, 0);
972
973                 if (!copied)
974                         TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
975
976                 copied += copy;
977                 offset += copy;
978                 size -= copy;
979                 if (!size) {
980                         tcp_tx_timestamp(sk, skb);
981                         goto out;
982                 }
983
984                 if (skb->len < size_goal || (flags & MSG_OOB))
985                         continue;
986
987                 if (forced_push(tp)) {
988                         tcp_mark_push(tp, skb);
989                         __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
990                 } else if (skb == tcp_send_head(sk))
991                         tcp_push_one(sk, mss_now);
992                 continue;
993
994 wait_for_sndbuf:
995                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
996 wait_for_memory:
997                 tcp_push(sk, flags & ~MSG_MORE, mss_now,
998                          TCP_NAGLE_PUSH, size_goal);
999
1000                 err = sk_stream_wait_memory(sk, &timeo);
1001                 if (err != 0)
1002                         goto do_error;
1003
1004                 mss_now = tcp_send_mss(sk, &size_goal, flags);
1005         }
1006
1007 out:
1008         if (copied && !(flags & MSG_SENDPAGE_NOTLAST))
1009                 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1010         return copied;
1011
1012 do_error:
1013         if (copied)
1014                 goto out;
1015 out_err:
1016         /* make sure we wake any epoll edge trigger waiter */
1017         if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
1018                 sk->sk_write_space(sk);
1019         return sk_stream_error(sk, flags, err);
1020 }
1021
1022 int tcp_sendpage(struct sock *sk, struct page *page, int offset,
1023                  size_t size, int flags)
1024 {
1025         ssize_t res;
1026
1027         if (!(sk->sk_route_caps & NETIF_F_SG) ||
1028             !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
1029                 return sock_no_sendpage(sk->sk_socket, page, offset, size,
1030                                         flags);
1031
1032         lock_sock(sk);
1033         res = do_tcp_sendpages(sk, page, offset, size, flags);
1034         release_sock(sk);
1035         return res;
1036 }
1037 EXPORT_SYMBOL(tcp_sendpage);
1038
1039 static inline int select_size(const struct sock *sk, bool sg)
1040 {
1041         const struct tcp_sock *tp = tcp_sk(sk);
1042         int tmp = tp->mss_cache;
1043
1044         if (sg) {
1045                 if (sk_can_gso(sk)) {
1046                         /* Small frames wont use a full page:
1047                          * Payload will immediately follow tcp header.
1048                          */
1049                         tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
1050                 } else {
1051                         int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
1052
1053                         if (tmp >= pgbreak &&
1054                             tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
1055                                 tmp = pgbreak;
1056                 }
1057         }
1058
1059         return tmp;
1060 }
1061
1062 void tcp_free_fastopen_req(struct tcp_sock *tp)
1063 {
1064         if (tp->fastopen_req) {
1065                 kfree(tp->fastopen_req);
1066                 tp->fastopen_req = NULL;
1067         }
1068 }
1069
1070 static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1071                                 int *copied, size_t size)
1072 {
1073         struct tcp_sock *tp = tcp_sk(sk);
1074         int err, flags;
1075
1076         if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
1077                 return -EOPNOTSUPP;
1078         if (tp->fastopen_req)
1079                 return -EALREADY; /* Another Fast Open is in progress */
1080
1081         tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
1082                                    sk->sk_allocation);
1083         if (unlikely(!tp->fastopen_req))
1084                 return -ENOBUFS;
1085         tp->fastopen_req->data = msg;
1086         tp->fastopen_req->size = size;
1087
1088         flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
1089         err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
1090                                     msg->msg_namelen, flags);
1091         *copied = tp->fastopen_req->copied;
1092         tcp_free_fastopen_req(tp);
1093         return err;
1094 }
1095
1096 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1097 {
1098         struct tcp_sock *tp = tcp_sk(sk);
1099         struct sk_buff *skb;
1100         int flags, err, copied = 0;
1101         int mss_now = 0, size_goal, copied_syn = 0;
1102         bool sg;
1103         long timeo;
1104
1105         lock_sock(sk);
1106
1107         flags = msg->msg_flags;
1108         if (flags & MSG_FASTOPEN) {
1109                 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
1110                 if (err == -EINPROGRESS && copied_syn > 0)
1111                         goto out;
1112                 else if (err)
1113                         goto out_err;
1114         }
1115
1116         timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1117
1118         /* Wait for a connection to finish. One exception is TCP Fast Open
1119          * (passive side) where data is allowed to be sent before a connection
1120          * is fully established.
1121          */
1122         if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
1123             !tcp_passive_fastopen(sk)) {
1124                 err = sk_stream_wait_connect(sk, &timeo);
1125                 if (err != 0)
1126                         goto do_error;
1127         }
1128
1129         if (unlikely(tp->repair)) {
1130                 if (tp->repair_queue == TCP_RECV_QUEUE) {
1131                         copied = tcp_send_rcvq(sk, msg, size);
1132                         goto out_nopush;
1133                 }
1134
1135                 err = -EINVAL;
1136                 if (tp->repair_queue == TCP_NO_QUEUE)
1137                         goto out_err;
1138
1139                 /* 'common' sending to sendq */
1140         }
1141
1142         /* This should be in poll */
1143         sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1144
1145         mss_now = tcp_send_mss(sk, &size_goal, flags);
1146
1147         /* Ok commence sending. */
1148         copied = 0;
1149
1150         err = -EPIPE;
1151         if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
1152                 goto out_err;
1153
1154         sg = !!(sk->sk_route_caps & NETIF_F_SG);
1155
1156         while (msg_data_left(msg)) {
1157                 int copy = 0;
1158                 int max = size_goal;
1159
1160                 skb = tcp_write_queue_tail(sk);
1161                 if (tcp_send_head(sk)) {
1162                         if (skb->ip_summed == CHECKSUM_NONE)
1163                                 max = mss_now;
1164                         copy = max - skb->len;
1165                 }
1166
1167                 if (copy <= 0) {
1168 new_segment:
1169                         /* Allocate new segment. If the interface is SG,
1170                          * allocate skb fitting to single page.
1171                          */
1172                         if (!sk_stream_memory_free(sk))
1173                                 goto wait_for_sndbuf;
1174
1175                         skb = sk_stream_alloc_skb(sk,
1176                                                   select_size(sk, sg),
1177                                                   sk->sk_allocation,
1178                                                   skb_queue_empty(&sk->sk_write_queue));
1179                         if (!skb)
1180                                 goto wait_for_memory;
1181
1182                         /*
1183                          * Check whether we can use HW checksum.
1184                          */
1185                         if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
1186                                 skb->ip_summed = CHECKSUM_PARTIAL;
1187
1188                         skb_entail(sk, skb);
1189                         copy = size_goal;
1190                         max = size_goal;
1191
1192                         /* All packets are restored as if they have
1193                          * already been sent. skb_mstamp isn't set to
1194                          * avoid wrong rtt estimation.
1195                          */
1196                         if (tp->repair)
1197                                 TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
1198                 }
1199
1200                 /* Try to append data to the end of skb. */
1201                 if (copy > msg_data_left(msg))
1202                         copy = msg_data_left(msg);
1203
1204                 /* Where to copy to? */
1205                 if (skb_availroom(skb) > 0) {
1206                         /* We have some space in skb head. Superb! */
1207                         copy = min_t(int, copy, skb_availroom(skb));
1208                         err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy);
1209                         if (err)
1210                                 goto do_fault;
1211                 } else {
1212                         bool merge = true;
1213                         int i = skb_shinfo(skb)->nr_frags;
1214                         struct page_frag *pfrag = sk_page_frag(sk);
1215
1216                         if (!sk_page_frag_refill(sk, pfrag))
1217                                 goto wait_for_memory;
1218
1219                         if (!skb_can_coalesce(skb, i, pfrag->page,
1220                                               pfrag->offset)) {
1221                                 if (i >= sysctl_max_skb_frags || !sg) {
1222                                         tcp_mark_push(tp, skb);
1223                                         goto new_segment;
1224                                 }
1225                                 merge = false;
1226                         }
1227
1228                         copy = min_t(int, copy, pfrag->size - pfrag->offset);
1229
1230                         if (!sk_wmem_schedule(sk, copy))
1231                                 goto wait_for_memory;
1232
1233                         err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
1234                                                        pfrag->page,
1235                                                        pfrag->offset,
1236                                                        copy);
1237                         if (err)
1238                                 goto do_error;
1239
1240                         /* Update the skb. */
1241                         if (merge) {
1242                                 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1243                         } else {
1244                                 skb_fill_page_desc(skb, i, pfrag->page,
1245                                                    pfrag->offset, copy);
1246                                 get_page(pfrag->page);
1247                         }
1248                         pfrag->offset += copy;
1249                 }
1250
1251                 if (!copied)
1252                         TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
1253
1254                 tp->write_seq += copy;
1255                 TCP_SKB_CB(skb)->end_seq += copy;
1256                 tcp_skb_pcount_set(skb, 0);
1257
1258                 copied += copy;
1259                 if (!msg_data_left(msg)) {
1260                         tcp_tx_timestamp(sk, skb);
1261                         goto out;
1262                 }
1263
1264                 if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
1265                         continue;
1266
1267                 if (forced_push(tp)) {
1268                         tcp_mark_push(tp, skb);
1269                         __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
1270                 } else if (skb == tcp_send_head(sk))
1271                         tcp_push_one(sk, mss_now);
1272                 continue;
1273
1274 wait_for_sndbuf:
1275                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1276 wait_for_memory:
1277                 if (copied)
1278                         tcp_push(sk, flags & ~MSG_MORE, mss_now,
1279                                  TCP_NAGLE_PUSH, size_goal);
1280
1281                 err = sk_stream_wait_memory(sk, &timeo);
1282                 if (err != 0)
1283                         goto do_error;
1284
1285                 mss_now = tcp_send_mss(sk, &size_goal, flags);
1286         }
1287
1288 out:
1289         if (copied)
1290                 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1291 out_nopush:
1292         release_sock(sk);
1293         return copied + copied_syn;
1294
1295 do_fault:
1296         if (!skb->len) {
1297                 tcp_unlink_write_queue(skb, sk);
1298                 /* It is the one place in all of TCP, except connection
1299                  * reset, where we can be unlinking the send_head.
1300                  */
1301                 tcp_check_send_head(sk, skb);
1302                 sk_wmem_free_skb(sk, skb);
1303         }
1304
1305 do_error:
1306         if (copied + copied_syn)
1307                 goto out;
1308 out_err:
1309         err = sk_stream_error(sk, flags, err);
1310         /* make sure we wake any epoll edge trigger waiter */
1311         if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
1312                 sk->sk_write_space(sk);
1313         release_sock(sk);
1314         return err;
1315 }
1316 EXPORT_SYMBOL(tcp_sendmsg);
1317
1318 /*
1319  *      Handle reading urgent data. BSD has very simple semantics for
1320  *      this, no blocking and very strange errors 8)
1321  */
1322
1323 static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
1324 {
1325         struct tcp_sock *tp = tcp_sk(sk);
1326
1327         /* No URG data to read. */
1328         if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1329             tp->urg_data == TCP_URG_READ)
1330                 return -EINVAL; /* Yes this is right ! */
1331
1332         if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1333                 return -ENOTCONN;
1334
1335         if (tp->urg_data & TCP_URG_VALID) {
1336                 int err = 0;
1337                 char c = tp->urg_data;
1338
1339                 if (!(flags & MSG_PEEK))
1340                         tp->urg_data = TCP_URG_READ;
1341
1342                 /* Read urgent data. */
1343                 msg->msg_flags |= MSG_OOB;
1344
1345                 if (len > 0) {
1346                         if (!(flags & MSG_TRUNC))
1347                                 err = memcpy_to_msg(msg, &c, 1);
1348                         len = 1;
1349                 } else
1350                         msg->msg_flags |= MSG_TRUNC;
1351
1352                 return err ? -EFAULT : len;
1353         }
1354
1355         if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1356                 return 0;
1357
1358         /* Fixed the recv(..., MSG_OOB) behaviour.  BSD docs and
1359          * the available implementations agree in this case:
1360          * this call should never block, independent of the
1361          * blocking state of the socket.
1362          * Mike <pall@rz.uni-karlsruhe.de>
1363          */
1364         return -EAGAIN;
1365 }
1366
1367 static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
1368 {
1369         struct sk_buff *skb;
1370         int copied = 0, err = 0;
1371
1372         /* XXX -- need to support SO_PEEK_OFF */
1373
1374         skb_queue_walk(&sk->sk_write_queue, skb) {
1375                 err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
1376                 if (err)
1377                         break;
1378
1379                 copied += skb->len;
1380         }
1381
1382         return err ?: copied;
1383 }
1384
1385 /* Clean up the receive buffer for full frames taken by the user,
1386  * then send an ACK if necessary.  COPIED is the number of bytes
1387  * tcp_recvmsg has given to the user so far, it speeds up the
1388  * calculation of whether or not we must ACK for the sake of
1389  * a window update.
1390  */
1391 static void tcp_cleanup_rbuf(struct sock *sk, int copied)
1392 {
1393         struct tcp_sock *tp = tcp_sk(sk);
1394         bool time_to_ack = false;
1395
1396         struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1397
1398         WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1399              "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1400              tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1401
1402         if (inet_csk_ack_scheduled(sk)) {
1403                 const struct inet_connection_sock *icsk = inet_csk(sk);
1404                    /* Delayed ACKs frequently hit locked sockets during bulk
1405                     * receive. */
1406                 if (icsk->icsk_ack.blocked ||
1407                     /* Once-per-two-segments ACK was not sent by tcp_input.c */
1408                     tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1409                     /*
1410                      * If this read emptied read buffer, we send ACK, if
1411                      * connection is not bidirectional, user drained
1412                      * receive buffer and there was a small segment
1413                      * in queue.
1414                      */
1415                     (copied > 0 &&
1416                      ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
1417                       ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1418                        !icsk->icsk_ack.pingpong)) &&
1419                       !atomic_read(&sk->sk_rmem_alloc)))
1420                         time_to_ack = true;
1421         }
1422
1423         /* We send an ACK if we can now advertise a non-zero window
1424          * which has been raised "significantly".
1425          *
1426          * Even if window raised up to infinity, do not send window open ACK
1427          * in states, where we will not receive more. It is useless.
1428          */
1429         if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1430                 __u32 rcv_window_now = tcp_receive_window(tp);
1431
1432                 /* Optimize, __tcp_select_window() is not cheap. */
1433                 if (2*rcv_window_now <= tp->window_clamp) {
1434                         __u32 new_window = __tcp_select_window(sk);
1435
1436                         /* Send ACK now, if this read freed lots of space
1437                          * in our buffer. Certainly, new_window is new window.
1438                          * We can advertise it now, if it is not less than current one.
1439                          * "Lots" means "at least twice" here.
1440                          */
1441                         if (new_window && new_window >= 2 * rcv_window_now)
1442                                 time_to_ack = true;
1443                 }
1444         }
1445         if (time_to_ack)
1446                 tcp_send_ack(sk);
1447 }
1448
1449 static void tcp_prequeue_process(struct sock *sk)
1450 {
1451         struct sk_buff *skb;
1452         struct tcp_sock *tp = tcp_sk(sk);
1453
1454         NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
1455
1456         /* RX process wants to run with disabled BHs, though it is not
1457          * necessary */
1458         local_bh_disable();
1459         while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1460                 sk_backlog_rcv(sk, skb);
1461         local_bh_enable();
1462
1463         /* Clear memory counter. */
1464         tp->ucopy.memory = 0;
1465 }
1466
1467 static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1468 {
1469         struct sk_buff *skb;
1470         u32 offset;
1471
1472         while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
1473                 offset = seq - TCP_SKB_CB(skb)->seq;
1474                 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
1475                         offset--;
1476                 if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) {
1477                         *off = offset;
1478                         return skb;
1479                 }
1480                 /* This looks weird, but this can happen if TCP collapsing
1481                  * splitted a fat GRO packet, while we released socket lock
1482                  * in skb_splice_bits()
1483                  */
1484                 sk_eat_skb(sk, skb);
1485         }
1486         return NULL;
1487 }
1488
1489 /*
1490  * This routine provides an alternative to tcp_recvmsg() for routines
1491  * that would like to handle copying from skbuffs directly in 'sendfile'
1492  * fashion.
1493  * Note:
1494  *      - It is assumed that the socket was locked by the caller.
1495  *      - The routine does not block.
1496  *      - At present, there is no support for reading OOB data
1497  *        or for 'peeking' the socket using this routine
1498  *        (although both would be easy to implement).
1499  */
1500 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1501                   sk_read_actor_t recv_actor)
1502 {
1503         struct sk_buff *skb;
1504         struct tcp_sock *tp = tcp_sk(sk);
1505         u32 seq = tp->copied_seq;
1506         u32 offset;
1507         int copied = 0;
1508
1509         if (sk->sk_state == TCP_LISTEN)
1510                 return -ENOTCONN;
1511         while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1512                 if (offset < skb->len) {
1513                         int used;
1514                         size_t len;
1515
1516                         len = skb->len - offset;
1517                         /* Stop reading if we hit a patch of urgent data */
1518                         if (tp->urg_data) {
1519                                 u32 urg_offset = tp->urg_seq - seq;
1520                                 if (urg_offset < len)
1521                                         len = urg_offset;
1522                                 if (!len)
1523                                         break;
1524                         }
1525                         used = recv_actor(desc, skb, offset, len);
1526                         if (used <= 0) {
1527                                 if (!copied)
1528                                         copied = used;
1529                                 break;
1530                         } else if (used <= len) {
1531                                 seq += used;
1532                                 copied += used;
1533                                 offset += used;
1534                         }
1535                         /* If recv_actor drops the lock (e.g. TCP splice
1536                          * receive) the skb pointer might be invalid when
1537                          * getting here: tcp_collapse might have deleted it
1538                          * while aggregating skbs from the socket queue.
1539                          */
1540                         skb = tcp_recv_skb(sk, seq - 1, &offset);
1541                         if (!skb)
1542                                 break;
1543                         /* TCP coalescing might have appended data to the skb.
1544                          * Try to splice more frags
1545                          */
1546                         if (offset + 1 != skb->len)
1547                                 continue;
1548                 }
1549                 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
1550                         sk_eat_skb(sk, skb);
1551                         ++seq;
1552                         break;
1553                 }
1554                 sk_eat_skb(sk, skb);
1555                 if (!desc->count)
1556                         break;
1557                 tp->copied_seq = seq;
1558         }
1559         tp->copied_seq = seq;
1560
1561         tcp_rcv_space_adjust(sk);
1562
1563         /* Clean up data we have read: This will do ACK frames. */
1564         if (copied > 0) {
1565                 tcp_recv_skb(sk, seq, &offset);
1566                 tcp_cleanup_rbuf(sk, copied);
1567         }
1568         return copied;
1569 }
1570 EXPORT_SYMBOL(tcp_read_sock);
1571
1572 /*
1573  *      This routine copies from a sock struct into the user buffer.
1574  *
1575  *      Technical note: in 2.3 we work on _locked_ socket, so that
1576  *      tricks with *seq access order and skb->users are not required.
1577  *      Probably, code can be easily improved even more.
1578  */
1579
1580 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1581                 int flags, int *addr_len)
1582 {
1583         struct tcp_sock *tp = tcp_sk(sk);
1584         int copied = 0;
1585         u32 peek_seq;
1586         u32 *seq;
1587         unsigned long used;
1588         int err;
1589         int target;             /* Read at least this many bytes */
1590         long timeo;
1591         struct task_struct *user_recv = NULL;
1592         struct sk_buff *skb, *last;
1593         u32 urg_hole = 0;
1594
1595         if (unlikely(flags & MSG_ERRQUEUE))
1596                 return inet_recv_error(sk, msg, len, addr_len);
1597
1598         if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
1599             (sk->sk_state == TCP_ESTABLISHED))
1600                 sk_busy_loop(sk, nonblock);
1601
1602         lock_sock(sk);
1603
1604         err = -ENOTCONN;
1605         if (sk->sk_state == TCP_LISTEN)
1606                 goto out;
1607
1608         timeo = sock_rcvtimeo(sk, nonblock);
1609
1610         /* Urgent data needs to be handled specially. */
1611         if (flags & MSG_OOB)
1612                 goto recv_urg;
1613
1614         if (unlikely(tp->repair)) {
1615                 err = -EPERM;
1616                 if (!(flags & MSG_PEEK))
1617                         goto out;
1618
1619                 if (tp->repair_queue == TCP_SEND_QUEUE)
1620                         goto recv_sndq;
1621
1622                 err = -EINVAL;
1623                 if (tp->repair_queue == TCP_NO_QUEUE)
1624                         goto out;
1625
1626                 /* 'common' recv queue MSG_PEEK-ing */
1627         }
1628
1629         seq = &tp->copied_seq;
1630         if (flags & MSG_PEEK) {
1631                 peek_seq = tp->copied_seq;
1632                 seq = &peek_seq;
1633         }
1634
1635         target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1636
1637         do {
1638                 u32 offset;
1639
1640                 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1641                 if (tp->urg_data && tp->urg_seq == *seq) {
1642                         if (copied)
1643                                 break;
1644                         if (signal_pending(current)) {
1645                                 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1646                                 break;
1647                         }
1648                 }
1649
1650                 /* Next get a buffer. */
1651
1652                 last = skb_peek_tail(&sk->sk_receive_queue);
1653                 skb_queue_walk(&sk->sk_receive_queue, skb) {
1654                         last = skb;
1655                         /* Now that we have two receive queues this
1656                          * shouldn't happen.
1657                          */
1658                         if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
1659                                  "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
1660                                  *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
1661                                  flags))
1662                                 break;
1663
1664                         offset = *seq - TCP_SKB_CB(skb)->seq;
1665                         if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
1666                                 offset--;
1667                         if (offset < skb->len)
1668                                 goto found_ok_skb;
1669                         if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1670                                 goto found_fin_ok;
1671                         WARN(!(flags & MSG_PEEK),
1672                              "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
1673                              *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
1674                 }
1675
1676                 /* Well, if we have backlog, try to process it now yet. */
1677
1678                 if (copied >= target && !sk->sk_backlog.tail)
1679                         break;
1680
1681                 if (copied) {
1682                         if (sk->sk_err ||
1683                             sk->sk_state == TCP_CLOSE ||
1684                             (sk->sk_shutdown & RCV_SHUTDOWN) ||
1685                             !timeo ||
1686                             signal_pending(current))
1687                                 break;
1688                 } else {
1689                         if (sock_flag(sk, SOCK_DONE))
1690                                 break;
1691
1692                         if (sk->sk_err) {
1693                                 copied = sock_error(sk);
1694                                 break;
1695                         }
1696
1697                         if (sk->sk_shutdown & RCV_SHUTDOWN)
1698                                 break;
1699
1700                         if (sk->sk_state == TCP_CLOSE) {
1701                                 if (!sock_flag(sk, SOCK_DONE)) {
1702                                         /* This occurs when user tries to read
1703                                          * from never connected socket.
1704                                          */
1705                                         copied = -ENOTCONN;
1706                                         break;
1707                                 }
1708                                 break;
1709                         }
1710
1711                         if (!timeo) {
1712                                 copied = -EAGAIN;
1713                                 break;
1714                         }
1715
1716                         if (signal_pending(current)) {
1717                                 copied = sock_intr_errno(timeo);
1718                                 break;
1719                         }
1720                 }
1721
1722                 tcp_cleanup_rbuf(sk, copied);
1723
1724                 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1725                         /* Install new reader */
1726                         if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1727                                 user_recv = current;
1728                                 tp->ucopy.task = user_recv;
1729                                 tp->ucopy.msg = msg;
1730                         }
1731
1732                         tp->ucopy.len = len;
1733
1734                         WARN_ON(tp->copied_seq != tp->rcv_nxt &&
1735                                 !(flags & (MSG_PEEK | MSG_TRUNC)));
1736
1737                         /* Ugly... If prequeue is not empty, we have to
1738                          * process it before releasing socket, otherwise
1739                          * order will be broken at second iteration.
1740                          * More elegant solution is required!!!
1741                          *
1742                          * Look: we have the following (pseudo)queues:
1743                          *
1744                          * 1. packets in flight
1745                          * 2. backlog
1746                          * 3. prequeue
1747                          * 4. receive_queue
1748                          *
1749                          * Each queue can be processed only if the next ones
1750                          * are empty. At this point we have empty receive_queue.
1751                          * But prequeue _can_ be not empty after 2nd iteration,
1752                          * when we jumped to start of loop because backlog
1753                          * processing added something to receive_queue.
1754                          * We cannot release_sock(), because backlog contains
1755                          * packets arrived _after_ prequeued ones.
1756                          *
1757                          * Shortly, algorithm is clear --- to process all
1758                          * the queues in order. We could make it more directly,
1759                          * requeueing packets from backlog to prequeue, if
1760                          * is not empty. It is more elegant, but eats cycles,
1761                          * unfortunately.
1762                          */
1763                         if (!skb_queue_empty(&tp->ucopy.prequeue))
1764                                 goto do_prequeue;
1765
1766                         /* __ Set realtime policy in scheduler __ */
1767                 }
1768
1769                 if (copied >= target) {
1770                         /* Do not sleep, just process backlog. */
1771                         release_sock(sk);
1772                         lock_sock(sk);
1773                 } else {
1774                         sk_wait_data(sk, &timeo, last);
1775                 }
1776
1777                 if (user_recv) {
1778                         int chunk;
1779
1780                         /* __ Restore normal policy in scheduler __ */
1781
1782                         chunk = len - tp->ucopy.len;
1783                         if (chunk != 0) {
1784                                 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1785                                 len -= chunk;
1786                                 copied += chunk;
1787                         }
1788
1789                         if (tp->rcv_nxt == tp->copied_seq &&
1790                             !skb_queue_empty(&tp->ucopy.prequeue)) {
1791 do_prequeue:
1792                                 tcp_prequeue_process(sk);
1793
1794                                 chunk = len - tp->ucopy.len;
1795                                 if (chunk != 0) {
1796                                         NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1797                                         len -= chunk;
1798                                         copied += chunk;
1799                                 }
1800                         }
1801                 }
1802                 if ((flags & MSG_PEEK) &&
1803                     (peek_seq - copied - urg_hole != tp->copied_seq)) {
1804                         net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
1805                                             current->comm,
1806                                             task_pid_nr(current));
1807                         peek_seq = tp->copied_seq;
1808                 }
1809                 continue;
1810
1811         found_ok_skb:
1812                 /* Ok so how much can we use? */
1813                 used = skb->len - offset;
1814                 if (len < used)
1815                         used = len;
1816
1817                 /* Do we have urgent data here? */
1818                 if (tp->urg_data) {
1819                         u32 urg_offset = tp->urg_seq - *seq;
1820                         if (urg_offset < used) {
1821                                 if (!urg_offset) {
1822                                         if (!sock_flag(sk, SOCK_URGINLINE)) {
1823                                                 ++*seq;
1824                                                 urg_hole++;
1825                                                 offset++;
1826                                                 used--;
1827                                                 if (!used)
1828                                                         goto skip_copy;
1829                                         }
1830                                 } else
1831                                         used = urg_offset;
1832                         }
1833                 }
1834
1835                 if (!(flags & MSG_TRUNC)) {
1836                         err = skb_copy_datagram_msg(skb, offset, msg, used);
1837                         if (err) {
1838                                 /* Exception. Bailout! */
1839                                 if (!copied)
1840                                         copied = -EFAULT;
1841                                 break;
1842                         }
1843                 }
1844
1845                 *seq += used;
1846                 copied += used;
1847                 len -= used;
1848
1849                 tcp_rcv_space_adjust(sk);
1850
1851 skip_copy:
1852                 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1853                         tp->urg_data = 0;
1854                         tcp_fast_path_check(sk);
1855                 }
1856                 if (used + offset < skb->len)
1857                         continue;
1858
1859                 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1860                         goto found_fin_ok;
1861                 if (!(flags & MSG_PEEK))
1862                         sk_eat_skb(sk, skb);
1863                 continue;
1864
1865         found_fin_ok:
1866                 /* Process the FIN. */
1867                 ++*seq;
1868                 if (!(flags & MSG_PEEK))
1869                         sk_eat_skb(sk, skb);
1870                 break;
1871         } while (len > 0);
1872
1873         if (user_recv) {
1874                 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1875                         int chunk;
1876
1877                         tp->ucopy.len = copied > 0 ? len : 0;
1878
1879                         tcp_prequeue_process(sk);
1880
1881                         if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1882                                 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1883                                 len -= chunk;
1884                                 copied += chunk;
1885                         }
1886                 }
1887
1888                 tp->ucopy.task = NULL;
1889                 tp->ucopy.len = 0;
1890         }
1891
1892         /* According to UNIX98, msg_name/msg_namelen are ignored
1893          * on connected socket. I was just happy when found this 8) --ANK
1894          */
1895
1896         /* Clean up data we have read: This will do ACK frames. */
1897         tcp_cleanup_rbuf(sk, copied);
1898
1899         release_sock(sk);
1900         return copied;
1901
1902 out:
1903         release_sock(sk);
1904         return err;
1905
1906 recv_urg:
1907         err = tcp_recv_urg(sk, msg, len, flags);
1908         goto out;
1909
1910 recv_sndq:
1911         err = tcp_peek_sndq(sk, msg, len);
1912         goto out;
1913 }
1914 EXPORT_SYMBOL(tcp_recvmsg);
1915
1916 void tcp_set_state(struct sock *sk, int state)
1917 {
1918         int oldstate = sk->sk_state;
1919
1920         switch (state) {
1921         case TCP_ESTABLISHED:
1922                 if (oldstate != TCP_ESTABLISHED)
1923                         TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
1924                 break;
1925
1926         case TCP_CLOSE:
1927                 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
1928                         TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
1929
1930                 sk->sk_prot->unhash(sk);
1931                 if (inet_csk(sk)->icsk_bind_hash &&
1932                     !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
1933                         inet_put_port(sk);
1934                 /* fall through */
1935         default:
1936                 if (oldstate == TCP_ESTABLISHED)
1937                         TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
1938         }
1939
1940         /* Change state AFTER socket is unhashed to avoid closed
1941          * socket sitting in hash tables.
1942          */
1943         sk_state_store(sk, state);
1944
1945 #ifdef STATE_TRACE
1946         SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
1947 #endif
1948 }
1949 EXPORT_SYMBOL_GPL(tcp_set_state);
1950
1951 /*
1952  *      State processing on a close. This implements the state shift for
1953  *      sending our FIN frame. Note that we only send a FIN for some
1954  *      states. A shutdown() may have already sent the FIN, or we may be
1955  *      closed.
1956  */
1957
1958 static const unsigned char new_state[16] = {
1959   /* current state:        new state:      action:      */
1960   [0 /* (Invalid) */]   = TCP_CLOSE,
1961   [TCP_ESTABLISHED]     = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1962   [TCP_SYN_SENT]        = TCP_CLOSE,
1963   [TCP_SYN_RECV]        = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1964   [TCP_FIN_WAIT1]       = TCP_FIN_WAIT1,
1965   [TCP_FIN_WAIT2]       = TCP_FIN_WAIT2,
1966   [TCP_TIME_WAIT]       = TCP_CLOSE,
1967   [TCP_CLOSE]           = TCP_CLOSE,
1968   [TCP_CLOSE_WAIT]      = TCP_LAST_ACK  | TCP_ACTION_FIN,
1969   [TCP_LAST_ACK]        = TCP_LAST_ACK,
1970   [TCP_LISTEN]          = TCP_CLOSE,
1971   [TCP_CLOSING]         = TCP_CLOSING,
1972   [TCP_NEW_SYN_RECV]    = TCP_CLOSE,    /* should not happen ! */
1973 };
1974
1975 static int tcp_close_state(struct sock *sk)
1976 {
1977         int next = (int)new_state[sk->sk_state];
1978         int ns = next & TCP_STATE_MASK;
1979
1980         tcp_set_state(sk, ns);
1981
1982         return next & TCP_ACTION_FIN;
1983 }
1984
1985 /*
1986  *      Shutdown the sending side of a connection. Much like close except
1987  *      that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
1988  */
1989
1990 void tcp_shutdown(struct sock *sk, int how)
1991 {
1992         /*      We need to grab some memory, and put together a FIN,
1993          *      and then put it into the queue to be sent.
1994          *              Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1995          */
1996         if (!(how & SEND_SHUTDOWN))
1997                 return;
1998
1999         /* If we've already sent a FIN, or it's a closed state, skip this. */
2000         if ((1 << sk->sk_state) &
2001             (TCPF_ESTABLISHED | TCPF_SYN_SENT |
2002              TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
2003                 /* Clear out any half completed packets.  FIN if needed. */
2004                 if (tcp_close_state(sk))
2005                         tcp_send_fin(sk);
2006         }
2007 }
2008 EXPORT_SYMBOL(tcp_shutdown);
2009
2010 bool tcp_check_oom(struct sock *sk, int shift)
2011 {
2012         bool too_many_orphans, out_of_socket_memory;
2013
2014         too_many_orphans = tcp_too_many_orphans(sk, shift);
2015         out_of_socket_memory = tcp_out_of_memory(sk);
2016
2017         if (too_many_orphans)
2018                 net_info_ratelimited("too many orphaned sockets\n");
2019         if (out_of_socket_memory)
2020                 net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
2021         return too_many_orphans || out_of_socket_memory;
2022 }
2023
2024 void tcp_close(struct sock *sk, long timeout)
2025 {
2026         struct sk_buff *skb;
2027         int data_was_unread = 0;
2028         int state;
2029
2030         lock_sock(sk);
2031         sk->sk_shutdown = SHUTDOWN_MASK;
2032
2033         if (sk->sk_state == TCP_LISTEN) {
2034                 tcp_set_state(sk, TCP_CLOSE);
2035
2036                 /* Special case. */
2037                 inet_csk_listen_stop(sk);
2038
2039                 goto adjudge_to_death;
2040         }
2041
2042         /*  We need to flush the recv. buffs.  We do this only on the
2043          *  descriptor close, not protocol-sourced closes, because the
2044          *  reader process may not have drained the data yet!
2045          */
2046         while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
2047                 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq;
2048
2049                 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
2050                         len--;
2051                 data_was_unread += len;
2052                 __kfree_skb(skb);
2053         }
2054
2055         sk_mem_reclaim(sk);
2056
2057         /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
2058         if (sk->sk_state == TCP_CLOSE)
2059                 goto adjudge_to_death;
2060
2061         /* As outlined in RFC 2525, section 2.17, we send a RST here because
2062          * data was lost. To witness the awful effects of the old behavior of
2063          * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
2064          * GET in an FTP client, suspend the process, wait for the client to
2065          * advertise a zero window, then kill -9 the FTP client, wheee...
2066          * Note: timeout is always zero in such a case.
2067          */
2068         if (unlikely(tcp_sk(sk)->repair)) {
2069                 sk->sk_prot->disconnect(sk, 0);
2070         } else if (data_was_unread) {
2071                 /* Unread data was tossed, zap the connection. */
2072                 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
2073                 tcp_set_state(sk, TCP_CLOSE);
2074                 tcp_send_active_reset(sk, sk->sk_allocation);
2075         } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
2076                 /* Check zero linger _after_ checking for unread data. */
2077                 sk->sk_prot->disconnect(sk, 0);
2078                 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
2079         } else if (tcp_close_state(sk)) {
2080                 /* We FIN if the application ate all the data before
2081                  * zapping the connection.
2082                  */
2083
2084                 /* RED-PEN. Formally speaking, we have broken TCP state
2085                  * machine. State transitions:
2086                  *
2087                  * TCP_ESTABLISHED -> TCP_FIN_WAIT1
2088                  * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
2089                  * TCP_CLOSE_WAIT -> TCP_LAST_ACK
2090                  *
2091                  * are legal only when FIN has been sent (i.e. in window),
2092                  * rather than queued out of window. Purists blame.
2093                  *
2094                  * F.e. "RFC state" is ESTABLISHED,
2095                  * if Linux state is FIN-WAIT-1, but FIN is still not sent.
2096                  *
2097                  * The visible declinations are that sometimes
2098                  * we enter time-wait state, when it is not required really
2099                  * (harmless), do not send active resets, when they are
2100                  * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
2101                  * they look as CLOSING or LAST_ACK for Linux)
2102                  * Probably, I missed some more holelets.
2103                  *                                              --ANK
2104                  * XXX (TFO) - To start off we don't support SYN+ACK+FIN
2105                  * in a single packet! (May consider it later but will
2106                  * probably need API support or TCP_CORK SYN-ACK until
2107                  * data is written and socket is closed.)
2108                  */
2109                 tcp_send_fin(sk);
2110         }
2111
2112         sk_stream_wait_close(sk, timeout);
2113
2114 adjudge_to_death:
2115         state = sk->sk_state;
2116         sock_hold(sk);
2117         sock_orphan(sk);
2118
2119         /* It is the last release_sock in its life. It will remove backlog. */
2120         release_sock(sk);
2121
2122
2123         /* Now socket is owned by kernel and we acquire BH lock
2124            to finish close. No need to check for user refs.
2125          */
2126         local_bh_disable();
2127         bh_lock_sock(sk);
2128         WARN_ON(sock_owned_by_user(sk));
2129
2130         percpu_counter_inc(sk->sk_prot->orphan_count);
2131
2132         /* Have we already been destroyed by a softirq or backlog? */
2133         if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
2134                 goto out;
2135
2136         /*      This is a (useful) BSD violating of the RFC. There is a
2137          *      problem with TCP as specified in that the other end could
2138          *      keep a socket open forever with no application left this end.
2139          *      We use a 1 minute timeout (about the same as BSD) then kill
2140          *      our end. If they send after that then tough - BUT: long enough
2141          *      that we won't make the old 4*rto = almost no time - whoops
2142          *      reset mistake.
2143          *
2144          *      Nope, it was not mistake. It is really desired behaviour
2145          *      f.e. on http servers, when such sockets are useless, but
2146          *      consume significant resources. Let's do it with special
2147          *      linger2 option.                                 --ANK
2148          */
2149
2150         if (sk->sk_state == TCP_FIN_WAIT2) {
2151                 struct tcp_sock *tp = tcp_sk(sk);
2152                 if (tp->linger2 < 0) {
2153                         tcp_set_state(sk, TCP_CLOSE);
2154                         tcp_send_active_reset(sk, GFP_ATOMIC);
2155                         NET_INC_STATS_BH(sock_net(sk),
2156                                         LINUX_MIB_TCPABORTONLINGER);
2157                 } else {
2158                         const int tmo = tcp_fin_time(sk);
2159
2160                         if (tmo > TCP_TIMEWAIT_LEN) {
2161                                 inet_csk_reset_keepalive_timer(sk,
2162                                                 tmo - TCP_TIMEWAIT_LEN);
2163                         } else {
2164                                 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
2165                                 goto out;
2166                         }
2167                 }
2168         }
2169         if (sk->sk_state != TCP_CLOSE) {
2170                 sk_mem_reclaim(sk);
2171                 if (tcp_check_oom(sk, 0)) {
2172                         tcp_set_state(sk, TCP_CLOSE);
2173                         tcp_send_active_reset(sk, GFP_ATOMIC);
2174                         NET_INC_STATS_BH(sock_net(sk),
2175                                         LINUX_MIB_TCPABORTONMEMORY);
2176                 }
2177         }
2178
2179         if (sk->sk_state == TCP_CLOSE) {
2180                 struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
2181                 /* We could get here with a non-NULL req if the socket is
2182                  * aborted (e.g., closed with unread data) before 3WHS
2183                  * finishes.
2184                  */
2185                 if (req)
2186                         reqsk_fastopen_remove(sk, req, false);
2187                 inet_csk_destroy_sock(sk);
2188         }
2189         /* Otherwise, socket is reprieved until protocol close. */
2190
2191 out:
2192         bh_unlock_sock(sk);
2193         local_bh_enable();
2194         sock_put(sk);
2195 }
2196 EXPORT_SYMBOL(tcp_close);
2197
2198 /* These states need RST on ABORT according to RFC793 */
2199
2200 static inline bool tcp_need_reset(int state)
2201 {
2202         return (1 << state) &
2203                (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
2204                 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
2205 }
2206
2207 int tcp_disconnect(struct sock *sk, int flags)
2208 {
2209         struct inet_sock *inet = inet_sk(sk);
2210         struct inet_connection_sock *icsk = inet_csk(sk);
2211         struct tcp_sock *tp = tcp_sk(sk);
2212         int err = 0;
2213         int old_state = sk->sk_state;
2214
2215         if (old_state != TCP_CLOSE)
2216                 tcp_set_state(sk, TCP_CLOSE);
2217
2218         /* ABORT function of RFC793 */
2219         if (old_state == TCP_LISTEN) {
2220                 inet_csk_listen_stop(sk);
2221         } else if (unlikely(tp->repair)) {
2222                 sk->sk_err = ECONNABORTED;
2223         } else if (tcp_need_reset(old_state) ||
2224                    (tp->snd_nxt != tp->write_seq &&
2225                     (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
2226                 /* The last check adjusts for discrepancy of Linux wrt. RFC
2227                  * states
2228                  */
2229                 tcp_send_active_reset(sk, gfp_any());
2230                 sk->sk_err = ECONNRESET;
2231         } else if (old_state == TCP_SYN_SENT)
2232                 sk->sk_err = ECONNRESET;
2233
2234         tcp_clear_xmit_timers(sk);
2235         __skb_queue_purge(&sk->sk_receive_queue);
2236         tcp_write_queue_purge(sk);
2237         __skb_queue_purge(&tp->out_of_order_queue);
2238
2239         inet->inet_dport = 0;
2240
2241         if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
2242                 inet_reset_saddr(sk);
2243
2244         sk->sk_shutdown = 0;
2245         sock_reset_flag(sk, SOCK_DONE);
2246         tp->srtt_us = 0;
2247         tp->write_seq += tp->max_window + 2;
2248         if (tp->write_seq == 0)
2249                 tp->write_seq = 1;
2250         icsk->icsk_backoff = 0;
2251         tp->snd_cwnd = 2;
2252         icsk->icsk_probes_out = 0;
2253         tp->packets_out = 0;
2254         tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
2255         tp->snd_cwnd_cnt = 0;
2256         tp->window_clamp = 0;
2257         tcp_set_ca_state(sk, TCP_CA_Open);
2258         tcp_clear_retrans(tp);
2259         inet_csk_delack_init(sk);
2260         tcp_init_send_head(sk);
2261         memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2262         __sk_dst_reset(sk);
2263
2264         WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
2265
2266         sk->sk_error_report(sk);
2267         return err;
2268 }
2269 EXPORT_SYMBOL(tcp_disconnect);
2270
2271 static inline bool tcp_can_repair_sock(const struct sock *sk)
2272 {
2273         return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) &&
2274                 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
2275 }
2276
2277 static int tcp_repair_options_est(struct tcp_sock *tp,
2278                 struct tcp_repair_opt __user *optbuf, unsigned int len)
2279 {
2280         struct tcp_repair_opt opt;
2281
2282         while (len >= sizeof(opt)) {
2283                 if (copy_from_user(&opt, optbuf, sizeof(opt)))
2284                         return -EFAULT;
2285
2286                 optbuf++;
2287                 len -= sizeof(opt);
2288
2289                 switch (opt.opt_code) {
2290                 case TCPOPT_MSS:
2291                         tp->rx_opt.mss_clamp = opt.opt_val;
2292                         break;
2293                 case TCPOPT_WINDOW:
2294                         {
2295                                 u16 snd_wscale = opt.opt_val & 0xFFFF;
2296                                 u16 rcv_wscale = opt.opt_val >> 16;
2297
2298                                 if (snd_wscale > 14 || rcv_wscale > 14)
2299                                         return -EFBIG;
2300
2301                                 tp->rx_opt.snd_wscale = snd_wscale;
2302                                 tp->rx_opt.rcv_wscale = rcv_wscale;
2303                                 tp->rx_opt.wscale_ok = 1;
2304                         }
2305                         break;
2306                 case TCPOPT_SACK_PERM:
2307                         if (opt.opt_val != 0)
2308                                 return -EINVAL;
2309
2310                         tp->rx_opt.sack_ok |= TCP_SACK_SEEN;
2311                         if (sysctl_tcp_fack)
2312                                 tcp_enable_fack(tp);
2313                         break;
2314                 case TCPOPT_TIMESTAMP:
2315                         if (opt.opt_val != 0)
2316                                 return -EINVAL;
2317
2318                         tp->rx_opt.tstamp_ok = 1;
2319                         break;
2320                 }
2321         }
2322
2323         return 0;
2324 }
2325
2326 /*
2327  *      Socket option code for TCP.
2328  */
2329 static int do_tcp_setsockopt(struct sock *sk, int level,
2330                 int optname, char __user *optval, unsigned int optlen)
2331 {
2332         struct tcp_sock *tp = tcp_sk(sk);
2333         struct inet_connection_sock *icsk = inet_csk(sk);
2334         int val;
2335         int err = 0;
2336
2337         /* These are data/string values, all the others are ints */
2338         switch (optname) {
2339         case TCP_CONGESTION: {
2340                 char name[TCP_CA_NAME_MAX];
2341
2342                 if (optlen < 1)
2343                         return -EINVAL;
2344
2345                 val = strncpy_from_user(name, optval,
2346                                         min_t(long, TCP_CA_NAME_MAX-1, optlen));
2347                 if (val < 0)
2348                         return -EFAULT;
2349                 name[val] = 0;
2350
2351                 lock_sock(sk);
2352                 err = tcp_set_congestion_control(sk, name);
2353                 release_sock(sk);
2354                 return err;
2355         }
2356         default:
2357                 /* fallthru */
2358                 break;
2359         }
2360
2361         if (optlen < sizeof(int))
2362                 return -EINVAL;
2363
2364         if (get_user(val, (int __user *)optval))
2365                 return -EFAULT;
2366
2367         lock_sock(sk);
2368
2369         switch (optname) {
2370         case TCP_MAXSEG:
2371                 /* Values greater than interface MTU won't take effect. However
2372                  * at the point when this call is done we typically don't yet
2373                  * know which interface is going to be used */
2374                 if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
2375                         err = -EINVAL;
2376                         break;
2377                 }
2378                 tp->rx_opt.user_mss = val;
2379                 break;
2380
2381         case TCP_NODELAY:
2382                 if (val) {
2383                         /* TCP_NODELAY is weaker than TCP_CORK, so that
2384                          * this option on corked socket is remembered, but
2385                          * it is not activated until cork is cleared.
2386                          *
2387                          * However, when TCP_NODELAY is set we make
2388                          * an explicit push, which overrides even TCP_CORK
2389                          * for currently queued segments.
2390                          */
2391                         tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
2392                         tcp_push_pending_frames(sk);
2393                 } else {
2394                         tp->nonagle &= ~TCP_NAGLE_OFF;
2395                 }
2396                 break;
2397
2398         case TCP_THIN_LINEAR_TIMEOUTS:
2399                 if (val < 0 || val > 1)
2400                         err = -EINVAL;
2401                 else
2402                         tp->thin_lto = val;
2403                 break;
2404
2405         case TCP_THIN_DUPACK:
2406                 if (val < 0 || val > 1)
2407                         err = -EINVAL;
2408                 else {
2409                         tp->thin_dupack = val;
2410                         if (tp->thin_dupack)
2411                                 tcp_disable_early_retrans(tp);
2412                 }
2413                 break;
2414
2415         case TCP_REPAIR:
2416                 if (!tcp_can_repair_sock(sk))
2417                         err = -EPERM;
2418                 else if (val == 1) {
2419                         tp->repair = 1;
2420                         sk->sk_reuse = SK_FORCE_REUSE;
2421                         tp->repair_queue = TCP_NO_QUEUE;
2422                 } else if (val == 0) {
2423                         tp->repair = 0;
2424                         sk->sk_reuse = SK_NO_REUSE;
2425                         tcp_send_window_probe(sk);
2426                 } else
2427                         err = -EINVAL;
2428
2429                 break;
2430
2431         case TCP_REPAIR_QUEUE:
2432                 if (!tp->repair)
2433                         err = -EPERM;
2434                 else if (val < TCP_QUEUES_NR)
2435                         tp->repair_queue = val;
2436                 else
2437                         err = -EINVAL;
2438                 break;
2439
2440         case TCP_QUEUE_SEQ:
2441                 if (sk->sk_state != TCP_CLOSE)
2442                         err = -EPERM;
2443                 else if (tp->repair_queue == TCP_SEND_QUEUE)
2444                         tp->write_seq = val;
2445                 else if (tp->repair_queue == TCP_RECV_QUEUE)
2446                         tp->rcv_nxt = val;
2447                 else
2448                         err = -EINVAL;
2449                 break;
2450
2451         case TCP_REPAIR_OPTIONS:
2452                 if (!tp->repair)
2453                         err = -EINVAL;
2454                 else if (sk->sk_state == TCP_ESTABLISHED)
2455                         err = tcp_repair_options_est(tp,
2456                                         (struct tcp_repair_opt __user *)optval,
2457                                         optlen);
2458                 else
2459                         err = -EPERM;
2460                 break;
2461
2462         case TCP_CORK:
2463                 /* When set indicates to always queue non-full frames.
2464                  * Later the user clears this option and we transmit
2465                  * any pending partial frames in the queue.  This is
2466                  * meant to be used alongside sendfile() to get properly
2467                  * filled frames when the user (for example) must write
2468                  * out headers with a write() call first and then use
2469                  * sendfile to send out the data parts.
2470                  *
2471                  * TCP_CORK can be set together with TCP_NODELAY and it is
2472                  * stronger than TCP_NODELAY.
2473                  */
2474                 if (val) {
2475                         tp->nonagle |= TCP_NAGLE_CORK;
2476                 } else {
2477                         tp->nonagle &= ~TCP_NAGLE_CORK;
2478                         if (tp->nonagle&TCP_NAGLE_OFF)
2479                                 tp->nonagle |= TCP_NAGLE_PUSH;
2480                         tcp_push_pending_frames(sk);
2481                 }
2482                 break;
2483
2484         case TCP_KEEPIDLE:
2485                 if (val < 1 || val > MAX_TCP_KEEPIDLE)
2486                         err = -EINVAL;
2487                 else {
2488                         tp->keepalive_time = val * HZ;
2489                         if (sock_flag(sk, SOCK_KEEPOPEN) &&
2490                             !((1 << sk->sk_state) &
2491                               (TCPF_CLOSE | TCPF_LISTEN))) {
2492                                 u32 elapsed = keepalive_time_elapsed(tp);
2493                                 if (tp->keepalive_time > elapsed)
2494                                         elapsed = tp->keepalive_time - elapsed;
2495                                 else
2496                                         elapsed = 0;
2497                                 inet_csk_reset_keepalive_timer(sk, elapsed);
2498                         }
2499                 }
2500                 break;
2501         case TCP_KEEPINTVL:
2502                 if (val < 1 || val > MAX_TCP_KEEPINTVL)
2503                         err = -EINVAL;
2504                 else
2505                         tp->keepalive_intvl = val * HZ;
2506                 break;
2507         case TCP_KEEPCNT:
2508                 if (val < 1 || val > MAX_TCP_KEEPCNT)
2509                         err = -EINVAL;
2510                 else
2511                         tp->keepalive_probes = val;
2512                 break;
2513         case TCP_SYNCNT:
2514                 if (val < 1 || val > MAX_TCP_SYNCNT)
2515                         err = -EINVAL;
2516                 else
2517                         icsk->icsk_syn_retries = val;
2518                 break;
2519
2520         case TCP_SAVE_SYN:
2521                 if (val < 0 || val > 1)
2522                         err = -EINVAL;
2523                 else
2524                         tp->save_syn = val;
2525                 break;
2526
2527         case TCP_LINGER2:
2528                 if (val < 0)
2529                         tp->linger2 = -1;
2530                 else if (val > sysctl_tcp_fin_timeout / HZ)
2531                         tp->linger2 = 0;
2532                 else
2533                         tp->linger2 = val * HZ;
2534                 break;
2535
2536         case TCP_DEFER_ACCEPT:
2537                 /* Translate value in seconds to number of retransmits */
2538                 icsk->icsk_accept_queue.rskq_defer_accept =
2539                         secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
2540                                         TCP_RTO_MAX / HZ);
2541                 break;
2542
2543         case TCP_WINDOW_CLAMP:
2544                 if (!val) {
2545                         if (sk->sk_state != TCP_CLOSE) {
2546                                 err = -EINVAL;
2547                                 break;
2548                         }
2549                         tp->window_clamp = 0;
2550                 } else
2551                         tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2552                                                 SOCK_MIN_RCVBUF / 2 : val;
2553                 break;
2554
2555         case TCP_QUICKACK:
2556                 if (!val) {
2557                         icsk->icsk_ack.pingpong = 1;
2558                 } else {
2559                         icsk->icsk_ack.pingpong = 0;
2560                         if ((1 << sk->sk_state) &
2561                             (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2562                             inet_csk_ack_scheduled(sk)) {
2563                                 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
2564                                 tcp_cleanup_rbuf(sk, 1);
2565                                 if (!(val & 1))
2566                                         icsk->icsk_ack.pingpong = 1;
2567                         }
2568                 }
2569                 break;
2570
2571 #ifdef CONFIG_TCP_MD5SIG
2572         case TCP_MD5SIG:
2573                 /* Read the IP->Key mappings from userspace */
2574                 err = tp->af_specific->md5_parse(sk, optval, optlen);
2575                 break;
2576 #endif
2577         case TCP_USER_TIMEOUT:
2578                 /* Cap the max time in ms TCP will retry or probe the window
2579                  * before giving up and aborting (ETIMEDOUT) a connection.
2580                  */
2581                 if (val < 0)
2582                         err = -EINVAL;
2583                 else
2584                         icsk->icsk_user_timeout = msecs_to_jiffies(val);
2585                 break;
2586
2587         case TCP_FASTOPEN:
2588                 if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
2589                     TCPF_LISTEN))) {
2590                         tcp_fastopen_init_key_once(true);
2591
2592                         fastopen_queue_tune(sk, val);
2593                 } else {
2594                         err = -EINVAL;
2595                 }
2596                 break;
2597         case TCP_TIMESTAMP:
2598                 if (!tp->repair)
2599                         err = -EPERM;
2600                 else
2601                         tp->tsoffset = val - tcp_time_stamp;
2602                 break;
2603         case TCP_NOTSENT_LOWAT:
2604                 tp->notsent_lowat = val;
2605                 sk->sk_write_space(sk);
2606                 break;
2607         default:
2608                 err = -ENOPROTOOPT;
2609                 break;
2610         }
2611
2612         release_sock(sk);
2613         return err;
2614 }
2615
2616 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2617                    unsigned int optlen)
2618 {
2619         const struct inet_connection_sock *icsk = inet_csk(sk);
2620
2621         if (level != SOL_TCP)
2622                 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
2623                                                      optval, optlen);
2624         return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2625 }
2626 EXPORT_SYMBOL(tcp_setsockopt);
2627
2628 #ifdef CONFIG_COMPAT
2629 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
2630                           char __user *optval, unsigned int optlen)
2631 {
2632         if (level != SOL_TCP)
2633                 return inet_csk_compat_setsockopt(sk, level, optname,
2634                                                   optval, optlen);
2635         return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2636 }
2637 EXPORT_SYMBOL(compat_tcp_setsockopt);
2638 #endif
2639
2640 /* Return information about state of tcp endpoint in API format. */
2641 void tcp_get_info(struct sock *sk, struct tcp_info *info)
2642 {
2643         const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
2644         const struct inet_connection_sock *icsk = inet_csk(sk);
2645         u32 now = tcp_time_stamp;
2646         unsigned int start;
2647         u64 rate64;
2648         u32 rate;
2649
2650         memset(info, 0, sizeof(*info));
2651         if (sk->sk_type != SOCK_STREAM)
2652                 return;
2653
2654         info->tcpi_state = sk_state_load(sk);
2655
2656         info->tcpi_ca_state = icsk->icsk_ca_state;
2657         info->tcpi_retransmits = icsk->icsk_retransmits;
2658         info->tcpi_probes = icsk->icsk_probes_out;
2659         info->tcpi_backoff = icsk->icsk_backoff;
2660
2661         if (tp->rx_opt.tstamp_ok)
2662                 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2663         if (tcp_is_sack(tp))
2664                 info->tcpi_options |= TCPI_OPT_SACK;
2665         if (tp->rx_opt.wscale_ok) {
2666                 info->tcpi_options |= TCPI_OPT_WSCALE;
2667                 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2668                 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2669         }
2670
2671         if (tp->ecn_flags & TCP_ECN_OK)
2672                 info->tcpi_options |= TCPI_OPT_ECN;
2673         if (tp->ecn_flags & TCP_ECN_SEEN)
2674                 info->tcpi_options |= TCPI_OPT_ECN_SEEN;
2675         if (tp->syn_data_acked)
2676                 info->tcpi_options |= TCPI_OPT_SYN_DATA;
2677
2678         info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2679         info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
2680         info->tcpi_snd_mss = tp->mss_cache;
2681         info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
2682
2683         if (info->tcpi_state == TCP_LISTEN) {
2684                 info->tcpi_unacked = sk->sk_ack_backlog;
2685                 info->tcpi_sacked = sk->sk_max_ack_backlog;
2686         } else {
2687                 info->tcpi_unacked = tp->packets_out;
2688                 info->tcpi_sacked = tp->sacked_out;
2689         }
2690         info->tcpi_lost = tp->lost_out;
2691         info->tcpi_retrans = tp->retrans_out;
2692         info->tcpi_fackets = tp->fackets_out;
2693
2694         info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2695         info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
2696         info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2697
2698         info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
2699         info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2700         info->tcpi_rtt = tp->srtt_us >> 3;
2701         info->tcpi_rttvar = tp->mdev_us >> 2;
2702         info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2703         info->tcpi_snd_cwnd = tp->snd_cwnd;
2704         info->tcpi_advmss = tp->advmss;
2705         info->tcpi_reordering = tp->reordering;
2706
2707         info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2708         info->tcpi_rcv_space = tp->rcvq_space.space;
2709
2710         info->tcpi_total_retrans = tp->total_retrans;
2711
2712         rate = READ_ONCE(sk->sk_pacing_rate);
2713         rate64 = rate != ~0U ? rate : ~0ULL;
2714         put_unaligned(rate64, &info->tcpi_pacing_rate);
2715
2716         rate = READ_ONCE(sk->sk_max_pacing_rate);
2717         rate64 = rate != ~0U ? rate : ~0ULL;
2718         put_unaligned(rate64, &info->tcpi_max_pacing_rate);
2719
2720         do {
2721                 start = u64_stats_fetch_begin_irq(&tp->syncp);
2722                 put_unaligned(tp->bytes_acked, &info->tcpi_bytes_acked);
2723                 put_unaligned(tp->bytes_received, &info->tcpi_bytes_received);
2724         } while (u64_stats_fetch_retry_irq(&tp->syncp, start));
2725         info->tcpi_segs_out = tp->segs_out;
2726         info->tcpi_segs_in = tp->segs_in;
2727 }
2728 EXPORT_SYMBOL_GPL(tcp_get_info);
2729
2730 static int do_tcp_getsockopt(struct sock *sk, int level,
2731                 int optname, char __user *optval, int __user *optlen)
2732 {
2733         struct inet_connection_sock *icsk = inet_csk(sk);
2734         struct tcp_sock *tp = tcp_sk(sk);
2735         int val, len;
2736
2737         if (get_user(len, optlen))
2738                 return -EFAULT;
2739
2740         len = min_t(unsigned int, len, sizeof(int));
2741
2742         if (len < 0)
2743                 return -EINVAL;
2744
2745         switch (optname) {
2746         case TCP_MAXSEG:
2747                 val = tp->mss_cache;
2748                 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2749                         val = tp->rx_opt.user_mss;
2750                 if (tp->repair)
2751                         val = tp->rx_opt.mss_clamp;
2752                 break;
2753         case TCP_NODELAY:
2754                 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2755                 break;
2756         case TCP_CORK:
2757                 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2758                 break;
2759         case TCP_KEEPIDLE:
2760                 val = keepalive_time_when(tp) / HZ;
2761                 break;
2762         case TCP_KEEPINTVL:
2763                 val = keepalive_intvl_when(tp) / HZ;
2764                 break;
2765         case TCP_KEEPCNT:
2766                 val = keepalive_probes(tp);
2767                 break;
2768         case TCP_SYNCNT:
2769                 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
2770                 break;
2771         case TCP_LINGER2:
2772                 val = tp->linger2;
2773                 if (val >= 0)
2774                         val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2775                 break;
2776         case TCP_DEFER_ACCEPT:
2777                 val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
2778                                       TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
2779                 break;
2780         case TCP_WINDOW_CLAMP:
2781                 val = tp->window_clamp;
2782                 break;
2783         case TCP_INFO: {
2784                 struct tcp_info info;
2785
2786                 if (get_user(len, optlen))
2787                         return -EFAULT;
2788
2789                 tcp_get_info(sk, &info);
2790
2791                 len = min_t(unsigned int, len, sizeof(info));
2792                 if (put_user(len, optlen))
2793                         return -EFAULT;
2794                 if (copy_to_user(optval, &info, len))
2795                         return -EFAULT;
2796                 return 0;
2797         }
2798         case TCP_CC_INFO: {
2799                 const struct tcp_congestion_ops *ca_ops;
2800                 union tcp_cc_info info;
2801                 size_t sz = 0;
2802                 int attr;
2803
2804                 if (get_user(len, optlen))
2805                         return -EFAULT;
2806
2807                 ca_ops = icsk->icsk_ca_ops;
2808                 if (ca_ops && ca_ops->get_info)
2809                         sz = ca_ops->get_info(sk, ~0U, &attr, &info);
2810
2811                 len = min_t(unsigned int, len, sz);
2812                 if (put_user(len, optlen))
2813                         return -EFAULT;
2814                 if (copy_to_user(optval, &info, len))
2815                         return -EFAULT;
2816                 return 0;
2817         }
2818         case TCP_QUICKACK:
2819                 val = !icsk->icsk_ack.pingpong;
2820                 break;
2821
2822         case TCP_CONGESTION:
2823                 if (get_user(len, optlen))
2824                         return -EFAULT;
2825                 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2826                 if (put_user(len, optlen))
2827                         return -EFAULT;
2828                 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
2829                         return -EFAULT;
2830                 return 0;
2831
2832         case TCP_THIN_LINEAR_TIMEOUTS:
2833                 val = tp->thin_lto;
2834                 break;
2835         case TCP_THIN_DUPACK:
2836                 val = tp->thin_dupack;
2837                 break;
2838
2839         case TCP_REPAIR:
2840                 val = tp->repair;
2841                 break;
2842
2843         case TCP_REPAIR_QUEUE:
2844                 if (tp->repair)
2845                         val = tp->repair_queue;
2846                 else
2847                         return -EINVAL;
2848                 break;
2849
2850         case TCP_QUEUE_SEQ:
2851                 if (tp->repair_queue == TCP_SEND_QUEUE)
2852                         val = tp->write_seq;
2853                 else if (tp->repair_queue == TCP_RECV_QUEUE)
2854                         val = tp->rcv_nxt;
2855                 else
2856                         return -EINVAL;
2857                 break;
2858
2859         case TCP_USER_TIMEOUT:
2860                 val = jiffies_to_msecs(icsk->icsk_user_timeout);
2861                 break;
2862
2863         case TCP_FASTOPEN:
2864                 val = icsk->icsk_accept_queue.fastopenq.max_qlen;
2865                 break;
2866
2867         case TCP_TIMESTAMP:
2868                 val = tcp_time_stamp + tp->tsoffset;
2869                 break;
2870         case TCP_NOTSENT_LOWAT:
2871                 val = tp->notsent_lowat;
2872                 break;
2873         case TCP_SAVE_SYN:
2874                 val = tp->save_syn;
2875                 break;
2876         case TCP_SAVED_SYN: {
2877                 if (get_user(len, optlen))
2878                         return -EFAULT;
2879
2880                 lock_sock(sk);
2881                 if (tp->saved_syn) {
2882                         if (len < tp->saved_syn[0]) {
2883                                 if (put_user(tp->saved_syn[0], optlen)) {
2884                                         release_sock(sk);
2885                                         return -EFAULT;
2886                                 }
2887                                 release_sock(sk);
2888                                 return -EINVAL;
2889                         }
2890                         len = tp->saved_syn[0];
2891                         if (put_user(len, optlen)) {
2892                                 release_sock(sk);
2893                                 return -EFAULT;
2894                         }
2895                         if (copy_to_user(optval, tp->saved_syn + 1, len)) {
2896                                 release_sock(sk);
2897                                 return -EFAULT;
2898                         }
2899                         tcp_saved_syn_free(tp);
2900                         release_sock(sk);
2901                 } else {
2902                         release_sock(sk);
2903                         len = 0;
2904                         if (put_user(len, optlen))
2905                                 return -EFAULT;
2906                 }
2907                 return 0;
2908         }
2909         default:
2910                 return -ENOPROTOOPT;
2911         }
2912
2913         if (put_user(len, optlen))
2914                 return -EFAULT;
2915         if (copy_to_user(optval, &val, len))
2916                 return -EFAULT;
2917         return 0;
2918 }
2919
2920 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2921                    int __user *optlen)
2922 {
2923         struct inet_connection_sock *icsk = inet_csk(sk);
2924
2925         if (level != SOL_TCP)
2926                 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2927                                                      optval, optlen);
2928         return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2929 }
2930 EXPORT_SYMBOL(tcp_getsockopt);
2931
2932 #ifdef CONFIG_COMPAT
2933 int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2934                           char __user *optval, int __user *optlen)
2935 {
2936         if (level != SOL_TCP)
2937                 return inet_csk_compat_getsockopt(sk, level, optname,
2938                                                   optval, optlen);
2939         return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2940 }
2941 EXPORT_SYMBOL(compat_tcp_getsockopt);
2942 #endif
2943
2944 #ifdef CONFIG_TCP_MD5SIG
2945 static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool);
2946 static DEFINE_MUTEX(tcp_md5sig_mutex);
2947 static bool tcp_md5sig_pool_populated = false;
2948
2949 static void __tcp_alloc_md5sig_pool(void)
2950 {
2951         int cpu;
2952
2953         for_each_possible_cpu(cpu) {
2954                 if (!per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm) {
2955                         struct crypto_hash *hash;
2956
2957                         hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2958                         if (IS_ERR_OR_NULL(hash))
2959                                 return;
2960                         per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash;
2961                 }
2962         }
2963         /* before setting tcp_md5sig_pool_populated, we must commit all writes
2964          * to memory. See smp_rmb() in tcp_get_md5sig_pool()
2965          */
2966         smp_wmb();
2967         tcp_md5sig_pool_populated = true;
2968 }
2969
2970 bool tcp_alloc_md5sig_pool(void)
2971 {
2972         if (unlikely(!tcp_md5sig_pool_populated)) {
2973                 mutex_lock(&tcp_md5sig_mutex);
2974
2975                 if (!tcp_md5sig_pool_populated)
2976                         __tcp_alloc_md5sig_pool();
2977
2978                 mutex_unlock(&tcp_md5sig_mutex);
2979         }
2980         return tcp_md5sig_pool_populated;
2981 }
2982 EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2983
2984
2985 /**
2986  *      tcp_get_md5sig_pool - get md5sig_pool for this user
2987  *
2988  *      We use percpu structure, so if we succeed, we exit with preemption
2989  *      and BH disabled, to make sure another thread or softirq handling
2990  *      wont try to get same context.
2991  */
2992 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
2993 {
2994         local_bh_disable();
2995
2996         if (tcp_md5sig_pool_populated) {
2997                 /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
2998                 smp_rmb();
2999                 return this_cpu_ptr(&tcp_md5sig_pool);
3000         }
3001         local_bh_enable();
3002         return NULL;
3003 }
3004 EXPORT_SYMBOL(tcp_get_md5sig_pool);
3005
3006 int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
3007                         const struct tcphdr *th)
3008 {
3009         struct scatterlist sg;
3010         struct tcphdr hdr;
3011         int err;
3012
3013         /* We are not allowed to change tcphdr, make a local copy */
3014         memcpy(&hdr, th, sizeof(hdr));
3015         hdr.check = 0;
3016
3017         /* options aren't included in the hash */
3018         sg_init_one(&sg, &hdr, sizeof(hdr));
3019         err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(hdr));
3020         return err;
3021 }
3022 EXPORT_SYMBOL(tcp_md5_hash_header);
3023
3024 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3025                           const struct sk_buff *skb, unsigned int header_len)
3026 {
3027         struct scatterlist sg;
3028         const struct tcphdr *tp = tcp_hdr(skb);
3029         struct hash_desc *desc = &hp->md5_desc;
3030         unsigned int i;
3031         const unsigned int head_data_len = skb_headlen(skb) > header_len ?
3032                                            skb_headlen(skb) - header_len : 0;
3033         const struct skb_shared_info *shi = skb_shinfo(skb);
3034         struct sk_buff *frag_iter;
3035
3036         sg_init_table(&sg, 1);
3037
3038         sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
3039         if (crypto_hash_update(desc, &sg, head_data_len))
3040                 return 1;
3041
3042         for (i = 0; i < shi->nr_frags; ++i) {
3043                 const struct skb_frag_struct *f = &shi->frags[i];
3044                 unsigned int offset = f->page_offset;
3045                 struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
3046
3047                 sg_set_page(&sg, page, skb_frag_size(f),
3048                             offset_in_page(offset));
3049                 if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
3050                         return 1;
3051         }
3052
3053         skb_walk_frags(skb, frag_iter)
3054                 if (tcp_md5_hash_skb_data(hp, frag_iter, 0))
3055                         return 1;
3056
3057         return 0;
3058 }
3059 EXPORT_SYMBOL(tcp_md5_hash_skb_data);
3060
3061 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
3062 {
3063         struct scatterlist sg;
3064
3065         sg_init_one(&sg, key->key, key->keylen);
3066         return crypto_hash_update(&hp->md5_desc, &sg, key->keylen);
3067 }
3068 EXPORT_SYMBOL(tcp_md5_hash_key);
3069
3070 #endif
3071
3072 void tcp_done(struct sock *sk)
3073 {
3074         struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
3075
3076         if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
3077                 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
3078
3079         tcp_set_state(sk, TCP_CLOSE);
3080         tcp_clear_xmit_timers(sk);
3081         if (req)
3082                 reqsk_fastopen_remove(sk, req, false);
3083
3084         sk->sk_shutdown = SHUTDOWN_MASK;
3085
3086         if (!sock_flag(sk, SOCK_DEAD))
3087                 sk->sk_state_change(sk);
3088         else
3089                 inet_csk_destroy_sock(sk);
3090 }
3091 EXPORT_SYMBOL_GPL(tcp_done);
3092
3093 extern struct tcp_congestion_ops tcp_reno;
3094
3095 static __initdata unsigned long thash_entries;
3096 static int __init set_thash_entries(char *str)
3097 {
3098         ssize_t ret;
3099
3100         if (!str)
3101                 return 0;
3102
3103         ret = kstrtoul(str, 0, &thash_entries);
3104         if (ret)
3105                 return 0;
3106
3107         return 1;
3108 }
3109 __setup("thash_entries=", set_thash_entries);
3110
3111 static void __init tcp_init_mem(void)
3112 {
3113         unsigned long limit = nr_free_buffer_pages() / 16;
3114
3115         limit = max(limit, 128UL);
3116         sysctl_tcp_mem[0] = limit / 4 * 3;              /* 4.68 % */
3117         sysctl_tcp_mem[1] = limit;                      /* 6.25 % */
3118         sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;      /* 9.37 % */
3119 }
3120
3121 void __init tcp_init(void)
3122 {
3123         unsigned long limit;
3124         int max_rshare, max_wshare, cnt;
3125         unsigned int i;
3126
3127         sock_skb_cb_check_size(sizeof(struct tcp_skb_cb));
3128
3129         percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
3130         percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
3131         tcp_hashinfo.bind_bucket_cachep =
3132                 kmem_cache_create("tcp_bind_bucket",
3133                                   sizeof(struct inet_bind_bucket), 0,
3134                                   SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3135
3136         /* Size and allocate the main established and bind bucket
3137          * hash tables.
3138          *
3139          * The methodology is similar to that of the buffer cache.
3140          */
3141         tcp_hashinfo.ehash =
3142                 alloc_large_system_hash("TCP established",
3143                                         sizeof(struct inet_ehash_bucket),
3144                                         thash_entries,
3145                                         17, /* one slot per 128 KB of memory */
3146                                         0,
3147                                         NULL,
3148                                         &tcp_hashinfo.ehash_mask,
3149                                         0,
3150                                         thash_entries ? 0 : 512 * 1024);
3151         for (i = 0; i <= tcp_hashinfo.ehash_mask; i++)
3152                 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
3153
3154         if (inet_ehash_locks_alloc(&tcp_hashinfo))
3155                 panic("TCP: failed to alloc ehash_locks");
3156         tcp_hashinfo.bhash =
3157                 alloc_large_system_hash("TCP bind",
3158                                         sizeof(struct inet_bind_hashbucket),
3159                                         tcp_hashinfo.ehash_mask + 1,
3160                                         17, /* one slot per 128 KB of memory */
3161                                         0,
3162                                         &tcp_hashinfo.bhash_size,
3163                                         NULL,
3164                                         0,
3165                                         64 * 1024);
3166         tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
3167         for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
3168                 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
3169                 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
3170         }
3171
3172
3173         cnt = tcp_hashinfo.ehash_mask + 1;
3174
3175         tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
3176         sysctl_tcp_max_orphans = cnt / 2;
3177         sysctl_max_syn_backlog = max(128, cnt / 256);
3178
3179         tcp_init_mem();
3180         /* Set per-socket limits to no more than 1/128 the pressure threshold */
3181         limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
3182         max_wshare = min(4UL*1024*1024, limit);
3183         max_rshare = min(6UL*1024*1024, limit);
3184
3185         sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
3186         sysctl_tcp_wmem[1] = 16*1024;
3187         sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
3188
3189         sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
3190         sysctl_tcp_rmem[1] = 87380;
3191         sysctl_tcp_rmem[2] = max(87380, max_rshare);
3192
3193         pr_info("Hash tables configured (established %u bind %u)\n",
3194                 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
3195
3196         tcp_metrics_init();
3197         BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0);
3198         tcp_tasklet_init();
3199 }