Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / net / ipv4 / netfilter / nf_conntrack_l3proto_ipv4_compat.c
diff --git a/kernel/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/kernel/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
new file mode 100644 (file)
index 0000000..f0dfe92
--- /dev/null
@@ -0,0 +1,469 @@
+/* ip_conntrack proc compat - based on ip_conntrack_standalone.c
+ *
+ * (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2006-2010 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/percpu.h>
+#include <linux/security.h>
+#include <net/net_namespace.h>
+
+#include <linux/netfilter.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_l3proto.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <linux/rculist_nulls.h>
+#include <linux/export.h>
+
+struct ct_iter_state {
+       struct seq_net_private p;
+       unsigned int bucket;
+};
+
+static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
+{
+       struct net *net = seq_file_net(seq);
+       struct ct_iter_state *st = seq->private;
+       struct hlist_nulls_node *n;
+
+       for (st->bucket = 0;
+            st->bucket < net->ct.htable_size;
+            st->bucket++) {
+               n = rcu_dereference(
+                       hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
+               if (!is_a_nulls(n))
+                       return n;
+       }
+       return NULL;
+}
+
+static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
+                                     struct hlist_nulls_node *head)
+{
+       struct net *net = seq_file_net(seq);
+       struct ct_iter_state *st = seq->private;
+
+       head = rcu_dereference(hlist_nulls_next_rcu(head));
+       while (is_a_nulls(head)) {
+               if (likely(get_nulls_value(head) == st->bucket)) {
+                       if (++st->bucket >= net->ct.htable_size)
+                               return NULL;
+               }
+               head = rcu_dereference(
+                       hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
+       }
+       return head;
+}
+
+static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
+{
+       struct hlist_nulls_node *head = ct_get_first(seq);
+
+       if (head)
+               while (pos && (head = ct_get_next(seq, head)))
+                       pos--;
+       return pos ? NULL : head;
+}
+
+static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
+       __acquires(RCU)
+{
+       rcu_read_lock();
+       return ct_get_idx(seq, *pos);
+}
+
+static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+       (*pos)++;
+       return ct_get_next(s, v);
+}
+
+static void ct_seq_stop(struct seq_file *s, void *v)
+       __releases(RCU)
+{
+       rcu_read_unlock();
+}
+
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+static void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
+{
+       int ret;
+       u32 len;
+       char *secctx;
+
+       ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
+       if (ret)
+               return;
+
+       seq_printf(s, "secctx=%s ", secctx);
+
+       security_release_secctx(secctx, len);
+}
+#else
+static inline void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
+{
+}
+#endif
+
+static int ct_seq_show(struct seq_file *s, void *v)
+{
+       struct nf_conntrack_tuple_hash *hash = v;
+       struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
+       const struct nf_conntrack_l3proto *l3proto;
+       const struct nf_conntrack_l4proto *l4proto;
+       int ret = 0;
+
+       NF_CT_ASSERT(ct);
+       if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
+               return 0;
+
+
+       /* we only want to print DIR_ORIGINAL */
+       if (NF_CT_DIRECTION(hash))
+               goto release;
+       if (nf_ct_l3num(ct) != AF_INET)
+               goto release;
+
+       l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
+       NF_CT_ASSERT(l3proto);
+       l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
+       NF_CT_ASSERT(l4proto);
+
+       ret = -ENOSPC;
+       seq_printf(s, "%-8s %u %ld ",
+                  l4proto->name, nf_ct_protonum(ct),
+                  timer_pending(&ct->timeout)
+                  ? (long)(ct->timeout.expires - jiffies)/HZ : 0);
+
+       if (l4proto->print_conntrack)
+               l4proto->print_conntrack(s, ct);
+
+       if (seq_has_overflowed(s))
+               goto release;
+
+       print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+                   l3proto, l4proto);
+
+       if (seq_has_overflowed(s))
+               goto release;
+
+       if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL))
+               goto release;
+
+       if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
+               seq_printf(s, "[UNREPLIED] ");
+
+       print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+                   l3proto, l4proto);
+
+       if (seq_has_overflowed(s))
+               goto release;
+
+       if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
+               goto release;
+
+       if (test_bit(IPS_ASSURED_BIT, &ct->status))
+               seq_printf(s, "[ASSURED] ");
+
+#ifdef CONFIG_NF_CONNTRACK_MARK
+       seq_printf(s, "mark=%u ", ct->mark);
+#endif
+
+       ct_show_secctx(s, ct);
+
+       seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use));
+
+       if (seq_has_overflowed(s))
+               goto release;
+
+       ret = 0;
+release:
+       nf_ct_put(ct);
+       return ret;
+}
+
+static const struct seq_operations ct_seq_ops = {
+       .start = ct_seq_start,
+       .next  = ct_seq_next,
+       .stop  = ct_seq_stop,
+       .show  = ct_seq_show
+};
+
+static int ct_open(struct inode *inode, struct file *file)
+{
+       return seq_open_net(inode, file, &ct_seq_ops,
+                           sizeof(struct ct_iter_state));
+}
+
+static const struct file_operations ct_file_ops = {
+       .owner   = THIS_MODULE,
+       .open    = ct_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release_net,
+};
+
+/* expects */
+struct ct_expect_iter_state {
+       struct seq_net_private p;
+       unsigned int bucket;
+};
+
+static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
+{
+       struct net *net = seq_file_net(seq);
+       struct ct_expect_iter_state *st = seq->private;
+       struct hlist_node *n;
+
+       for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
+               n = rcu_dereference(
+                       hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
+               if (n)
+                       return n;
+       }
+       return NULL;
+}
+
+static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
+                                            struct hlist_node *head)
+{
+       struct net *net = seq_file_net(seq);
+       struct ct_expect_iter_state *st = seq->private;
+
+       head = rcu_dereference(hlist_next_rcu(head));
+       while (head == NULL) {
+               if (++st->bucket >= nf_ct_expect_hsize)
+                       return NULL;
+               head = rcu_dereference(
+                       hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
+       }
+       return head;
+}
+
+static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
+{
+       struct hlist_node *head = ct_expect_get_first(seq);
+
+       if (head)
+               while (pos && (head = ct_expect_get_next(seq, head)))
+                       pos--;
+       return pos ? NULL : head;
+}
+
+static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
+       __acquires(RCU)
+{
+       rcu_read_lock();
+       return ct_expect_get_idx(seq, *pos);
+}
+
+static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       (*pos)++;
+       return ct_expect_get_next(seq, v);
+}
+
+static void exp_seq_stop(struct seq_file *seq, void *v)
+       __releases(RCU)
+{
+       rcu_read_unlock();
+}
+
+static int exp_seq_show(struct seq_file *s, void *v)
+{
+       struct nf_conntrack_expect *exp;
+       const struct hlist_node *n = v;
+
+       exp = hlist_entry(n, struct nf_conntrack_expect, hnode);
+
+       if (exp->tuple.src.l3num != AF_INET)
+               return 0;
+
+       if (exp->timeout.function)
+               seq_printf(s, "%ld ", timer_pending(&exp->timeout)
+                          ? (long)(exp->timeout.expires - jiffies)/HZ : 0);
+       else
+               seq_printf(s, "- ");
+
+       seq_printf(s, "proto=%u ", exp->tuple.dst.protonum);
+
+       print_tuple(s, &exp->tuple,
+                   __nf_ct_l3proto_find(exp->tuple.src.l3num),
+                   __nf_ct_l4proto_find(exp->tuple.src.l3num,
+                                        exp->tuple.dst.protonum));
+       seq_putc(s, '\n');
+
+       return 0;
+}
+
+static const struct seq_operations exp_seq_ops = {
+       .start = exp_seq_start,
+       .next = exp_seq_next,
+       .stop = exp_seq_stop,
+       .show = exp_seq_show
+};
+
+static int exp_open(struct inode *inode, struct file *file)
+{
+       return seq_open_net(inode, file, &exp_seq_ops,
+                           sizeof(struct ct_expect_iter_state));
+}
+
+static const struct file_operations ip_exp_file_ops = {
+       .owner   = THIS_MODULE,
+       .open    = exp_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release_net,
+};
+
+static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
+{
+       struct net *net = seq_file_net(seq);
+       int cpu;
+
+       if (*pos == 0)
+               return SEQ_START_TOKEN;
+
+       for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
+               if (!cpu_possible(cpu))
+                       continue;
+               *pos = cpu+1;
+               return per_cpu_ptr(net->ct.stat, cpu);
+       }
+
+       return NULL;
+}
+
+static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       struct net *net = seq_file_net(seq);
+       int cpu;
+
+       for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
+               if (!cpu_possible(cpu))
+                       continue;
+               *pos = cpu+1;
+               return per_cpu_ptr(net->ct.stat, cpu);
+       }
+
+       return NULL;
+}
+
+static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static int ct_cpu_seq_show(struct seq_file *seq, void *v)
+{
+       struct net *net = seq_file_net(seq);
+       unsigned int nr_conntracks = atomic_read(&net->ct.count);
+       const struct ip_conntrack_stat *st = v;
+
+       if (v == SEQ_START_TOKEN) {
+               seq_printf(seq, "entries  searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error  expect_new expect_create expect_delete search_restart\n");
+               return 0;
+       }
+
+       seq_printf(seq, "%08x  %08x %08x %08x %08x %08x %08x %08x "
+                       "%08x %08x %08x %08x %08x  %08x %08x %08x %08x\n",
+                  nr_conntracks,
+                  st->searched,
+                  st->found,
+                  st->new,
+                  st->invalid,
+                  st->ignore,
+                  st->delete,
+                  st->delete_list,
+                  st->insert,
+                  st->insert_failed,
+                  st->drop,
+                  st->early_drop,
+                  st->error,
+
+                  st->expect_new,
+                  st->expect_create,
+                  st->expect_delete,
+                  st->search_restart
+               );
+       return 0;
+}
+
+static const struct seq_operations ct_cpu_seq_ops = {
+       .start  = ct_cpu_seq_start,
+       .next   = ct_cpu_seq_next,
+       .stop   = ct_cpu_seq_stop,
+       .show   = ct_cpu_seq_show,
+};
+
+static int ct_cpu_seq_open(struct inode *inode, struct file *file)
+{
+       return seq_open_net(inode, file, &ct_cpu_seq_ops,
+                           sizeof(struct seq_net_private));
+}
+
+static const struct file_operations ct_cpu_seq_fops = {
+       .owner   = THIS_MODULE,
+       .open    = ct_cpu_seq_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release_net,
+};
+
+static int __net_init ip_conntrack_net_init(struct net *net)
+{
+       struct proc_dir_entry *proc, *proc_exp, *proc_stat;
+
+       proc = proc_create("ip_conntrack", 0440, net->proc_net, &ct_file_ops);
+       if (!proc)
+               goto err1;
+
+       proc_exp = proc_create("ip_conntrack_expect", 0440, net->proc_net,
+                              &ip_exp_file_ops);
+       if (!proc_exp)
+               goto err2;
+
+       proc_stat = proc_create("ip_conntrack", S_IRUGO,
+                               net->proc_net_stat, &ct_cpu_seq_fops);
+       if (!proc_stat)
+               goto err3;
+       return 0;
+
+err3:
+       remove_proc_entry("ip_conntrack_expect", net->proc_net);
+err2:
+       remove_proc_entry("ip_conntrack", net->proc_net);
+err1:
+       return -ENOMEM;
+}
+
+static void __net_exit ip_conntrack_net_exit(struct net *net)
+{
+       remove_proc_entry("ip_conntrack", net->proc_net_stat);
+       remove_proc_entry("ip_conntrack_expect", net->proc_net);
+       remove_proc_entry("ip_conntrack", net->proc_net);
+}
+
+static struct pernet_operations ip_conntrack_net_ops = {
+       .init = ip_conntrack_net_init,
+       .exit = ip_conntrack_net_exit,
+};
+
+int __init nf_conntrack_ipv4_compat_init(void)
+{
+       return register_pernet_subsys(&ip_conntrack_net_ops);
+}
+
+void __exit nf_conntrack_ipv4_compat_fini(void)
+{
+       unregister_pernet_subsys(&ip_conntrack_net_ops);
+}