Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / net / ipv4 / inet_timewait_sock.c
diff --git a/kernel/net/ipv4/inet_timewait_sock.c b/kernel/net/ipv4/inet_timewait_sock.c
new file mode 100644 (file)
index 0000000..00ec8d5
--- /dev/null
@@ -0,0 +1,328 @@
+/*
+ * INET                An implementation of the TCP/IP protocol suite for the LINUX
+ *             operating system.  INET is implemented using the  BSD Socket
+ *             interface as the means of communication with the user level.
+ *
+ *             Generic TIME_WAIT sockets functions
+ *
+ *             From code orinally in TCP
+ */
+
+#include <linux/kernel.h>
+#include <linux/kmemcheck.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <net/inet_hashtables.h>
+#include <net/inet_timewait_sock.h>
+#include <net/ip.h>
+
+
+/**
+ *     inet_twsk_unhash - unhash a timewait socket from established hash
+ *     @tw: timewait socket
+ *
+ *     unhash a timewait socket from established hash, if hashed.
+ *     ehash lock must be held by caller.
+ *     Returns 1 if caller should call inet_twsk_put() after lock release.
+ */
+int inet_twsk_unhash(struct inet_timewait_sock *tw)
+{
+       if (hlist_nulls_unhashed(&tw->tw_node))
+               return 0;
+
+       hlist_nulls_del_rcu(&tw->tw_node);
+       sk_nulls_node_init(&tw->tw_node);
+       /*
+        * We cannot call inet_twsk_put() ourself under lock,
+        * caller must call it for us.
+        */
+       return 1;
+}
+
+/**
+ *     inet_twsk_bind_unhash - unhash a timewait socket from bind hash
+ *     @tw: timewait socket
+ *     @hashinfo: hashinfo pointer
+ *
+ *     unhash a timewait socket from bind hash, if hashed.
+ *     bind hash lock must be held by caller.
+ *     Returns 1 if caller should call inet_twsk_put() after lock release.
+ */
+int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+                         struct inet_hashinfo *hashinfo)
+{
+       struct inet_bind_bucket *tb = tw->tw_tb;
+
+       if (!tb)
+               return 0;
+
+       __hlist_del(&tw->tw_bind_node);
+       tw->tw_tb = NULL;
+       inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
+       /*
+        * We cannot call inet_twsk_put() ourself under lock,
+        * caller must call it for us.
+        */
+       return 1;
+}
+
+/* Must be called with locally disabled BHs. */
+static void inet_twsk_kill(struct inet_timewait_sock *tw)
+{
+       struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
+       struct inet_bind_hashbucket *bhead;
+       int refcnt;
+       /* Unlink from established hashes. */
+       spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
+
+       spin_lock(lock);
+       refcnt = inet_twsk_unhash(tw);
+       spin_unlock(lock);
+
+       /* Disassociate with bind bucket. */
+       bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
+                       hashinfo->bhash_size)];
+
+       spin_lock(&bhead->lock);
+       refcnt += inet_twsk_bind_unhash(tw, hashinfo);
+       spin_unlock(&bhead->lock);
+
+       BUG_ON(refcnt >= atomic_read(&tw->tw_refcnt));
+       atomic_sub(refcnt, &tw->tw_refcnt);
+       atomic_dec(&tw->tw_dr->tw_count);
+       inet_twsk_put(tw);
+}
+
+void inet_twsk_free(struct inet_timewait_sock *tw)
+{
+       struct module *owner = tw->tw_prot->owner;
+       twsk_destructor((struct sock *)tw);
+#ifdef SOCK_REFCNT_DEBUG
+       pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw);
+#endif
+       kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
+       module_put(owner);
+}
+
+void inet_twsk_put(struct inet_timewait_sock *tw)
+{
+       if (atomic_dec_and_test(&tw->tw_refcnt))
+               inet_twsk_free(tw);
+}
+EXPORT_SYMBOL_GPL(inet_twsk_put);
+
+static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
+                                  struct hlist_nulls_head *list)
+{
+       hlist_nulls_add_head_rcu(&tw->tw_node, list);
+}
+
+static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
+                                   struct hlist_head *list)
+{
+       hlist_add_head(&tw->tw_bind_node, list);
+}
+
+/*
+ * Enter the time wait state. This is called with locally disabled BH.
+ * Essentially we whip up a timewait bucket, copy the relevant info into it
+ * from the SK, and mess with hash chains and list linkage.
+ */
+void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+                          struct inet_hashinfo *hashinfo)
+{
+       const struct inet_sock *inet = inet_sk(sk);
+       const struct inet_connection_sock *icsk = inet_csk(sk);
+       struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
+       spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
+       struct inet_bind_hashbucket *bhead;
+       /* Step 1: Put TW into bind hash. Original socket stays there too.
+          Note, that any socket with inet->num != 0 MUST be bound in
+          binding cache, even if it is closed.
+        */
+       bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
+                       hashinfo->bhash_size)];
+       spin_lock(&bhead->lock);
+       tw->tw_tb = icsk->icsk_bind_hash;
+       WARN_ON(!icsk->icsk_bind_hash);
+       inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
+       spin_unlock(&bhead->lock);
+
+       spin_lock(lock);
+
+       /*
+        * Step 2: Hash TW into tcp ehash chain.
+        * Notes :
+        * - tw_refcnt is set to 3 because :
+        * - We have one reference from bhash chain.
+        * - We have one reference from ehash chain.
+        * We can use atomic_set() because prior spin_lock()/spin_unlock()
+        * committed into memory all tw fields.
+        */
+       atomic_set(&tw->tw_refcnt, 1 + 1 + 1);
+       inet_twsk_add_node_rcu(tw, &ehead->chain);
+
+       /* Step 3: Remove SK from hash chain */
+       if (__sk_nulls_del_node_init_rcu(sk))
+               sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+
+       spin_unlock(lock);
+}
+EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
+
+void tw_timer_handler(unsigned long data)
+{
+       struct inet_timewait_sock *tw = (struct inet_timewait_sock *)data;
+
+       if (tw->tw_kill)
+               NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
+       else
+               NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED);
+       inet_twsk_kill(tw);
+}
+
+struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
+                                          struct inet_timewait_death_row *dr,
+                                          const int state)
+{
+       struct inet_timewait_sock *tw;
+
+       if (atomic_read(&dr->tw_count) >= dr->sysctl_max_tw_buckets)
+               return NULL;
+
+       tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
+                             GFP_ATOMIC);
+       if (tw) {
+               const struct inet_sock *inet = inet_sk(sk);
+
+               kmemcheck_annotate_bitfield(tw, flags);
+
+               tw->tw_dr           = dr;
+               /* Give us an identity. */
+               tw->tw_daddr        = inet->inet_daddr;
+               tw->tw_rcv_saddr    = inet->inet_rcv_saddr;
+               tw->tw_bound_dev_if = sk->sk_bound_dev_if;
+               tw->tw_tos          = inet->tos;
+               tw->tw_num          = inet->inet_num;
+               tw->tw_state        = TCP_TIME_WAIT;
+               tw->tw_substate     = state;
+               tw->tw_sport        = inet->inet_sport;
+               tw->tw_dport        = inet->inet_dport;
+               tw->tw_family       = sk->sk_family;
+               tw->tw_reuse        = sk->sk_reuse;
+               tw->tw_hash         = sk->sk_hash;
+               tw->tw_ipv6only     = 0;
+               tw->tw_transparent  = inet->transparent;
+               tw->tw_prot         = sk->sk_prot_creator;
+               atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
+               twsk_net_set(tw, sock_net(sk));
+               setup_timer(&tw->tw_timer, tw_timer_handler, (unsigned long)tw);
+               /*
+                * Because we use RCU lookups, we should not set tw_refcnt
+                * to a non null value before everything is setup for this
+                * timewait socket.
+                */
+               atomic_set(&tw->tw_refcnt, 0);
+
+               __module_get(tw->tw_prot->owner);
+       }
+
+       return tw;
+}
+EXPORT_SYMBOL_GPL(inet_twsk_alloc);
+
+/* These are always called from BH context.  See callers in
+ * tcp_input.c to verify this.
+ */
+
+/* This is for handling early-kills of TIME_WAIT sockets. */
+void inet_twsk_deschedule(struct inet_timewait_sock *tw)
+{
+       if (del_timer_sync(&tw->tw_timer))
+               inet_twsk_kill(tw);
+}
+EXPORT_SYMBOL(inet_twsk_deschedule);
+
+void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo)
+{
+       /* timeout := RTO * 3.5
+        *
+        * 3.5 = 1+2+0.5 to wait for two retransmits.
+        *
+        * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
+        * our ACK acking that FIN can be lost. If N subsequent retransmitted
+        * FINs (or previous seqments) are lost (probability of such event
+        * is p^(N+1), where p is probability to lose single packet and
+        * time to detect the loss is about RTO*(2^N - 1) with exponential
+        * backoff). Normal timewait length is calculated so, that we
+        * waited at least for one retransmitted FIN (maximal RTO is 120sec).
+        * [ BTW Linux. following BSD, violates this requirement waiting
+        *   only for 60sec, we should wait at least for 240 secs.
+        *   Well, 240 consumes too much of resources 8)
+        * ]
+        * This interval is not reduced to catch old duplicate and
+        * responces to our wandering segments living for two MSLs.
+        * However, if we use PAWS to detect
+        * old duplicates, we can reduce the interval to bounds required
+        * by RTO, rather than MSL. So, if peer understands PAWS, we
+        * kill tw bucket after 3.5*RTO (it is important that this number
+        * is greater than TS tick!) and detect old duplicates with help
+        * of PAWS.
+        */
+
+       tw->tw_kill = timeo <= 4*HZ;
+       if (!mod_timer_pinned(&tw->tw_timer, jiffies + timeo)) {
+               atomic_inc(&tw->tw_refcnt);
+               atomic_inc(&tw->tw_dr->tw_count);
+       }
+}
+EXPORT_SYMBOL_GPL(inet_twsk_schedule);
+
+void inet_twsk_purge(struct inet_hashinfo *hashinfo,
+                    struct inet_timewait_death_row *twdr, int family)
+{
+       struct inet_timewait_sock *tw;
+       struct sock *sk;
+       struct hlist_nulls_node *node;
+       unsigned int slot;
+
+       for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
+               struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
+restart_rcu:
+               cond_resched();
+               rcu_read_lock();
+restart:
+               sk_nulls_for_each_rcu(sk, node, &head->chain) {
+                       if (sk->sk_state != TCP_TIME_WAIT)
+                               continue;
+                       tw = inet_twsk(sk);
+                       if ((tw->tw_family != family) ||
+                               atomic_read(&twsk_net(tw)->count))
+                               continue;
+
+                       if (unlikely(!atomic_inc_not_zero(&tw->tw_refcnt)))
+                               continue;
+
+                       if (unlikely((tw->tw_family != family) ||
+                                    atomic_read(&twsk_net(tw)->count))) {
+                               inet_twsk_put(tw);
+                               goto restart;
+                       }
+
+                       rcu_read_unlock();
+                       local_bh_disable();
+                       inet_twsk_deschedule(tw);
+                       local_bh_enable();
+                       inet_twsk_put(tw);
+                       goto restart_rcu;
+               }
+               /* If the nulls value we got at the end of this lookup is
+                * not the expected one, we must restart lookup.
+                * We probably met an item that was moved to another chain.
+                */
+               if (get_nulls_value(node) != slot)
+                       goto restart;
+               rcu_read_unlock();
+       }
+}
+EXPORT_SYMBOL_GPL(inet_twsk_purge);