Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / net / ipv4 / inet_timewait_sock.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Generic TIME_WAIT sockets functions
7  *
8  *              From code orinally in TCP
9  */
10
11 #include <linux/kernel.h>
12 #include <linux/kmemcheck.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <net/inet_hashtables.h>
16 #include <net/inet_timewait_sock.h>
17 #include <net/ip.h>
18
19
20 /**
21  *      inet_twsk_unhash - unhash a timewait socket from established hash
22  *      @tw: timewait socket
23  *
24  *      unhash a timewait socket from established hash, if hashed.
25  *      ehash lock must be held by caller.
26  *      Returns 1 if caller should call inet_twsk_put() after lock release.
27  */
28 int inet_twsk_unhash(struct inet_timewait_sock *tw)
29 {
30         if (hlist_nulls_unhashed(&tw->tw_node))
31                 return 0;
32
33         hlist_nulls_del_rcu(&tw->tw_node);
34         sk_nulls_node_init(&tw->tw_node);
35         /*
36          * We cannot call inet_twsk_put() ourself under lock,
37          * caller must call it for us.
38          */
39         return 1;
40 }
41
42 /**
43  *      inet_twsk_bind_unhash - unhash a timewait socket from bind hash
44  *      @tw: timewait socket
45  *      @hashinfo: hashinfo pointer
46  *
47  *      unhash a timewait socket from bind hash, if hashed.
48  *      bind hash lock must be held by caller.
49  *      Returns 1 if caller should call inet_twsk_put() after lock release.
50  */
51 int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
52                           struct inet_hashinfo *hashinfo)
53 {
54         struct inet_bind_bucket *tb = tw->tw_tb;
55
56         if (!tb)
57                 return 0;
58
59         __hlist_del(&tw->tw_bind_node);
60         tw->tw_tb = NULL;
61         inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
62         /*
63          * We cannot call inet_twsk_put() ourself under lock,
64          * caller must call it for us.
65          */
66         return 1;
67 }
68
69 /* Must be called with locally disabled BHs. */
70 static void inet_twsk_kill(struct inet_timewait_sock *tw)
71 {
72         struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
73         struct inet_bind_hashbucket *bhead;
74         int refcnt;
75         /* Unlink from established hashes. */
76         spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
77
78         spin_lock(lock);
79         refcnt = inet_twsk_unhash(tw);
80         spin_unlock(lock);
81
82         /* Disassociate with bind bucket. */
83         bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
84                         hashinfo->bhash_size)];
85
86         spin_lock(&bhead->lock);
87         refcnt += inet_twsk_bind_unhash(tw, hashinfo);
88         spin_unlock(&bhead->lock);
89
90         BUG_ON(refcnt >= atomic_read(&tw->tw_refcnt));
91         atomic_sub(refcnt, &tw->tw_refcnt);
92         atomic_dec(&tw->tw_dr->tw_count);
93         inet_twsk_put(tw);
94 }
95
96 void inet_twsk_free(struct inet_timewait_sock *tw)
97 {
98         struct module *owner = tw->tw_prot->owner;
99         twsk_destructor((struct sock *)tw);
100 #ifdef SOCK_REFCNT_DEBUG
101         pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw);
102 #endif
103         kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
104         module_put(owner);
105 }
106
107 void inet_twsk_put(struct inet_timewait_sock *tw)
108 {
109         if (atomic_dec_and_test(&tw->tw_refcnt))
110                 inet_twsk_free(tw);
111 }
112 EXPORT_SYMBOL_GPL(inet_twsk_put);
113
114 static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
115                                    struct hlist_nulls_head *list)
116 {
117         hlist_nulls_add_head_rcu(&tw->tw_node, list);
118 }
119
120 static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
121                                     struct hlist_head *list)
122 {
123         hlist_add_head(&tw->tw_bind_node, list);
124 }
125
126 /*
127  * Enter the time wait state. This is called with locally disabled BH.
128  * Essentially we whip up a timewait bucket, copy the relevant info into it
129  * from the SK, and mess with hash chains and list linkage.
130  */
131 void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
132                            struct inet_hashinfo *hashinfo)
133 {
134         const struct inet_sock *inet = inet_sk(sk);
135         const struct inet_connection_sock *icsk = inet_csk(sk);
136         struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
137         spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
138         struct inet_bind_hashbucket *bhead;
139         /* Step 1: Put TW into bind hash. Original socket stays there too.
140            Note, that any socket with inet->num != 0 MUST be bound in
141            binding cache, even if it is closed.
142          */
143         bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
144                         hashinfo->bhash_size)];
145         spin_lock(&bhead->lock);
146         tw->tw_tb = icsk->icsk_bind_hash;
147         WARN_ON(!icsk->icsk_bind_hash);
148         inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
149         spin_unlock(&bhead->lock);
150
151         spin_lock(lock);
152
153         /*
154          * Step 2: Hash TW into tcp ehash chain.
155          * Notes :
156          * - tw_refcnt is set to 3 because :
157          * - We have one reference from bhash chain.
158          * - We have one reference from ehash chain.
159          * We can use atomic_set() because prior spin_lock()/spin_unlock()
160          * committed into memory all tw fields.
161          */
162         atomic_set(&tw->tw_refcnt, 1 + 1 + 1);
163         inet_twsk_add_node_rcu(tw, &ehead->chain);
164
165         /* Step 3: Remove SK from hash chain */
166         if (__sk_nulls_del_node_init_rcu(sk))
167                 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
168
169         spin_unlock(lock);
170 }
171 EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
172
173 void tw_timer_handler(unsigned long data)
174 {
175         struct inet_timewait_sock *tw = (struct inet_timewait_sock *)data;
176
177         if (tw->tw_kill)
178                 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
179         else
180                 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED);
181         inet_twsk_kill(tw);
182 }
183
184 struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
185                                            struct inet_timewait_death_row *dr,
186                                            const int state)
187 {
188         struct inet_timewait_sock *tw;
189
190         if (atomic_read(&dr->tw_count) >= dr->sysctl_max_tw_buckets)
191                 return NULL;
192
193         tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
194                               GFP_ATOMIC);
195         if (tw) {
196                 const struct inet_sock *inet = inet_sk(sk);
197
198                 kmemcheck_annotate_bitfield(tw, flags);
199
200                 tw->tw_dr           = dr;
201                 /* Give us an identity. */
202                 tw->tw_daddr        = inet->inet_daddr;
203                 tw->tw_rcv_saddr    = inet->inet_rcv_saddr;
204                 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
205                 tw->tw_tos          = inet->tos;
206                 tw->tw_num          = inet->inet_num;
207                 tw->tw_state        = TCP_TIME_WAIT;
208                 tw->tw_substate     = state;
209                 tw->tw_sport        = inet->inet_sport;
210                 tw->tw_dport        = inet->inet_dport;
211                 tw->tw_family       = sk->sk_family;
212                 tw->tw_reuse        = sk->sk_reuse;
213                 tw->tw_hash         = sk->sk_hash;
214                 tw->tw_ipv6only     = 0;
215                 tw->tw_transparent  = inet->transparent;
216                 tw->tw_prot         = sk->sk_prot_creator;
217                 atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
218                 twsk_net_set(tw, sock_net(sk));
219                 setup_timer(&tw->tw_timer, tw_timer_handler, (unsigned long)tw);
220                 /*
221                  * Because we use RCU lookups, we should not set tw_refcnt
222                  * to a non null value before everything is setup for this
223                  * timewait socket.
224                  */
225                 atomic_set(&tw->tw_refcnt, 0);
226
227                 __module_get(tw->tw_prot->owner);
228         }
229
230         return tw;
231 }
232 EXPORT_SYMBOL_GPL(inet_twsk_alloc);
233
234 /* These are always called from BH context.  See callers in
235  * tcp_input.c to verify this.
236  */
237
238 /* This is for handling early-kills of TIME_WAIT sockets. */
239 void inet_twsk_deschedule(struct inet_timewait_sock *tw)
240 {
241         if (del_timer_sync(&tw->tw_timer))
242                 inet_twsk_kill(tw);
243 }
244 EXPORT_SYMBOL(inet_twsk_deschedule);
245
246 void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo)
247 {
248         /* timeout := RTO * 3.5
249          *
250          * 3.5 = 1+2+0.5 to wait for two retransmits.
251          *
252          * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
253          * our ACK acking that FIN can be lost. If N subsequent retransmitted
254          * FINs (or previous seqments) are lost (probability of such event
255          * is p^(N+1), where p is probability to lose single packet and
256          * time to detect the loss is about RTO*(2^N - 1) with exponential
257          * backoff). Normal timewait length is calculated so, that we
258          * waited at least for one retransmitted FIN (maximal RTO is 120sec).
259          * [ BTW Linux. following BSD, violates this requirement waiting
260          *   only for 60sec, we should wait at least for 240 secs.
261          *   Well, 240 consumes too much of resources 8)
262          * ]
263          * This interval is not reduced to catch old duplicate and
264          * responces to our wandering segments living for two MSLs.
265          * However, if we use PAWS to detect
266          * old duplicates, we can reduce the interval to bounds required
267          * by RTO, rather than MSL. So, if peer understands PAWS, we
268          * kill tw bucket after 3.5*RTO (it is important that this number
269          * is greater than TS tick!) and detect old duplicates with help
270          * of PAWS.
271          */
272
273         tw->tw_kill = timeo <= 4*HZ;
274         if (!mod_timer_pinned(&tw->tw_timer, jiffies + timeo)) {
275                 atomic_inc(&tw->tw_refcnt);
276                 atomic_inc(&tw->tw_dr->tw_count);
277         }
278 }
279 EXPORT_SYMBOL_GPL(inet_twsk_schedule);
280
281 void inet_twsk_purge(struct inet_hashinfo *hashinfo,
282                      struct inet_timewait_death_row *twdr, int family)
283 {
284         struct inet_timewait_sock *tw;
285         struct sock *sk;
286         struct hlist_nulls_node *node;
287         unsigned int slot;
288
289         for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
290                 struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
291 restart_rcu:
292                 cond_resched();
293                 rcu_read_lock();
294 restart:
295                 sk_nulls_for_each_rcu(sk, node, &head->chain) {
296                         if (sk->sk_state != TCP_TIME_WAIT)
297                                 continue;
298                         tw = inet_twsk(sk);
299                         if ((tw->tw_family != family) ||
300                                 atomic_read(&twsk_net(tw)->count))
301                                 continue;
302
303                         if (unlikely(!atomic_inc_not_zero(&tw->tw_refcnt)))
304                                 continue;
305
306                         if (unlikely((tw->tw_family != family) ||
307                                      atomic_read(&twsk_net(tw)->count))) {
308                                 inet_twsk_put(tw);
309                                 goto restart;
310                         }
311
312                         rcu_read_unlock();
313                         local_bh_disable();
314                         inet_twsk_deschedule(tw);
315                         local_bh_enable();
316                         inet_twsk_put(tw);
317                         goto restart_rcu;
318                 }
319                 /* If the nulls value we got at the end of this lookup is
320                  * not the expected one, we must restart lookup.
321                  * We probably met an item that was moved to another chain.
322                  */
323                 if (get_nulls_value(node) != slot)
324                         goto restart;
325                 rcu_read_unlock();
326         }
327 }
328 EXPORT_SYMBOL_GPL(inet_twsk_purge);