These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / net / netfilter / x_tables.c
index 51a459c..d4aaad7 100644 (file)
@@ -67,9 +67,6 @@ static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
        [NFPROTO_IPV6]   = "ip6",
 };
 
-/* Allow this many total (re)entries. */
-static const unsigned int xt_jumpstack_multiplier = 2;
-
 /* Registration hooks for targets. */
 int xt_register_target(struct xt_target *target)
 {
@@ -658,35 +655,23 @@ EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
 
 struct xt_table_info *xt_alloc_table_info(unsigned int size)
 {
-       struct xt_table_info *newinfo;
-       int cpu;
+       struct xt_table_info *info = NULL;
+       size_t sz = sizeof(*info) + size;
 
        /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
        if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
                return NULL;
 
-       newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);
-       if (!newinfo)
-               return NULL;
-
-       newinfo->size = size;
-
-       for_each_possible_cpu(cpu) {
-               if (size <= PAGE_SIZE)
-                       newinfo->entries[cpu] = kmalloc_node(size,
-                                                       GFP_KERNEL,
-                                                       cpu_to_node(cpu));
-               else
-                       newinfo->entries[cpu] = vmalloc_node(size,
-                                                       cpu_to_node(cpu));
-
-               if (newinfo->entries[cpu] == NULL) {
-                       xt_free_table_info(newinfo);
+       if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
+               info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+       if (!info) {
+               info = vmalloc(sz);
+               if (!info)
                        return NULL;
-               }
        }
-
-       return newinfo;
+       memset(info, 0, sizeof(*info));
+       info->size = size;
+       return info;
 }
 EXPORT_SYMBOL(xt_alloc_table_info);
 
@@ -694,18 +679,13 @@ void xt_free_table_info(struct xt_table_info *info)
 {
        int cpu;
 
-       for_each_possible_cpu(cpu)
-               kvfree(info->entries[cpu]);
-
        if (info->jumpstack != NULL) {
                for_each_possible_cpu(cpu)
                        kvfree(info->jumpstack[cpu]);
                kvfree(info->jumpstack);
        }
 
-       free_percpu(info->stackptr);
-
-       kfree(info);
+       kvfree(info);
 }
 EXPORT_SYMBOL(xt_free_table_info);
 
@@ -747,15 +727,14 @@ EXPORT_SYMBOL_GPL(xt_compat_unlock);
 DEFINE_PER_CPU(seqcount_t, xt_recseq);
 EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
 
+struct static_key xt_tee_enabled __read_mostly;
+EXPORT_SYMBOL_GPL(xt_tee_enabled);
+
 static int xt_jumpstack_alloc(struct xt_table_info *i)
 {
        unsigned int size;
        int cpu;
 
-       i->stackptr = alloc_percpu(unsigned int);
-       if (i->stackptr == NULL)
-               return -ENOMEM;
-
        size = sizeof(void **) * nr_cpu_ids;
        if (size > PAGE_SIZE)
                i->jumpstack = vzalloc(size);
@@ -764,8 +743,21 @@ static int xt_jumpstack_alloc(struct xt_table_info *i)
        if (i->jumpstack == NULL)
                return -ENOMEM;
 
-       i->stacksize *= xt_jumpstack_multiplier;
-       size = sizeof(void *) * i->stacksize;
+       /* ruleset without jumps -- no stack needed */
+       if (i->stacksize == 0)
+               return 0;
+
+       /* Jumpstack needs to be able to record two full callchains, one
+        * from the first rule set traversal, plus one table reentrancy
+        * via -j TEE without clobbering the callchain that brought us to
+        * TEE target.
+        *
+        * This is done by allocating two jumpstacks per cpu, on reentry
+        * the upper half of the stack is used.
+        *
+        * see the jumpstack setup in ipt_do_table() for more details.
+        */
+       size = sizeof(void *) * i->stacksize * 2u;
        for_each_possible_cpu(cpu) {
                if (size > PAGE_SIZE)
                        i->jumpstack[cpu] = vmalloc_node(size,
@@ -947,11 +939,9 @@ static int xt_table_seq_show(struct seq_file *seq, void *v)
 {
        struct xt_table *table = list_entry(v, struct xt_table, list);
 
-       if (strlen(table->name)) {
+       if (*table->name)
                seq_printf(seq, "%s\n", table->name);
-               return seq_has_overflowed(seq);
-       } else
-               return 0;
+       return 0;
 }
 
 static const struct seq_operations xt_table_seq_ops = {
@@ -1087,10 +1077,8 @@ static int xt_match_seq_show(struct seq_file *seq, void *v)
                if (trav->curr == trav->head)
                        return 0;
                match = list_entry(trav->curr, struct xt_match, list);
-               if (*match->name == '\0')
-                       return 0;
-               seq_printf(seq, "%s\n", match->name);
-               return seq_has_overflowed(seq);
+               if (*match->name)
+                       seq_printf(seq, "%s\n", match->name);
        }
        return 0;
 }
@@ -1142,10 +1130,8 @@ static int xt_target_seq_show(struct seq_file *seq, void *v)
                if (trav->curr == trav->head)
                        return 0;
                target = list_entry(trav->curr, struct xt_target, list);
-               if (*target->name == '\0')
-                       return 0;
-               seq_printf(seq, "%s\n", target->name);
-               return seq_has_overflowed(seq);
+               if (*target->name)
+                       seq_printf(seq, "%s\n", target->name);
        }
        return 0;
 }
@@ -1207,7 +1193,6 @@ struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn)
                if (!(hook_mask & 1))
                        continue;
                ops[i].hook     = fn;
-               ops[i].owner    = table->me;
                ops[i].pf       = table->af;
                ops[i].hooknum  = hooknum;
                ops[i].priority = table->priority;