Merge "[l2l3 stack] implements new nd state machine & nd buffering"
[samplevnf.git] / VNFs / DPPD-PROX / handle_qinq_decap6.c
1 /*
2 // Copyright (c) 2010-2017 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //     http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 #include <rte_cycles.h>
18 #include <rte_table_hash.h>
19
20 #include "prox_lua.h"
21 #include "prox_lua_types.h"
22
23 #include "handle_qinq_encap6.h"
24 #include "log.h"
25 #include "lconf.h"
26 #include "task_init.h"
27 #include "task_base.h"
28 #include "tx_pkt.h"
29 #include "defines.h"
30 #include "pkt_prototypes.h"
31 #include "prox_assert.h"
32 #include "hash_utils.h"
33 #include "task_base.h"
34 #include "prefetch.h"
35 #include "hash_entry_types.h"
36 #include "prox_cfg.h"
37 #include "log.h"
38 #include "quit.h"
39 #include "prox_shared.h"
40
41 /* Packets must all be IPv6, always store QinQ tags for lookup (not configurable) */
42 struct task_qinq_decap6 {
43         struct task_base                base;
44         struct rte_table_hash           *cpe_table;
45         uint16_t                        *user_table;
46         uint32_t                        bucket_index;
47         struct ether_addr               edaddr;
48         struct rte_lpm6                 *rte_lpm6;
49         void*                           period_data; /* used if using dual stack*/
50         void (*period_func)(void* data);
51         uint64_t                        cpe_timeout;
52 };
53
54 void update_arp_entries6(void* data);
55
56 static void init_task_qinq_decap6(struct task_base *tbase, struct task_args *targ)
57 {
58         struct task_qinq_decap6 *task = (struct task_qinq_decap6 *)tbase;
59         const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
60
61         task->edaddr = targ->edaddr;
62         task->cpe_table = targ->cpe_table;
63         task->cpe_timeout = msec_to_tsc(targ->cpe_table_timeout_ms);
64
65         if (targ->cpe_table_timeout_ms) {
66                 if (targ->lconf->period_func) {
67                         task->period_func = targ->lconf->period_func;
68                         task->period_data = targ->lconf->period_data;
69                 }
70                 targ->lconf->period_func = update_arp_entries6;
71                 targ->lconf->period_data = tbase;
72                 targ->lconf->period_timeout = msec_to_tsc(500) / NUM_VCPES;
73         }
74
75         task->user_table = prox_sh_find_socket(socket_id, "user_table");
76         if (!task->user_table) {
77                 PROX_PANIC(!strcmp(targ->user_table, ""), "No user table defined\n");
78                 int ret = lua_to_user_table(prox_lua(), GLOBAL, targ->user_table, socket_id, &task->user_table);
79                 PROX_PANIC(ret, "Failed to create user table from config:\n%s\n", get_lua_to_errors());
80                 prox_sh_add_socket(socket_id, "user_table", task->user_table);
81         }
82
83         struct lpm6 *lpm = prox_sh_find_socket(socket_id, "lpm6");
84         if (!lpm) {
85                 struct lpm6 *lpm6;
86                 int ret;
87
88                 ret = lua_to_lpm6(prox_lua(), GLOBAL, "lpm6", socket_id, &lpm6);
89                 PROX_PANIC(ret, "Failed to read lpm6 from config:\n%s\n", get_lua_to_errors());
90                 prox_sh_add_socket(socket_id, "lpm6", lpm6);
91         }
92         task->rte_lpm6 = lpm->rte_lpm6;
93 }
94
95 static void early_init(struct task_args *targ)
96 {
97         if (!targ->cpe_table) {
98                 init_cpe6_table(targ);
99         }
100 }
101
102 static inline uint8_t handle_qinq_decap6(struct task_qinq_decap6 *task, struct rte_mbuf *mbuf)
103 {
104         struct qinq_hdr *pqinq = rte_pktmbuf_mtod(mbuf, struct qinq_hdr *);
105         struct ipv6_hdr *pip6 = (struct ipv6_hdr *)(pqinq + 1);
106
107         uint16_t svlan = pqinq->svlan.vlan_tci & 0xFF0F;
108         uint16_t cvlan = pqinq->cvlan.vlan_tci & 0xFF0F;
109
110         struct cpe_data entry;
111         entry.mac_port_8bytes = *((uint64_t *)(((uint8_t *)pqinq) + 5)) << 16;
112         entry.qinq_svlan = svlan;
113         entry.qinq_cvlan = cvlan;
114         entry.user = task->user_table[PKT_TO_LUTQINQ(svlan, cvlan)];
115         entry.tsc = rte_rdtsc() + task->cpe_timeout;
116
117         int key_found = 0;
118         void* entry_in_hash = NULL;
119         int ret = rte_table_hash_ext_dosig_ops.
120                 f_add(task->cpe_table, pip6->src_addr, &entry, &key_found, &entry_in_hash);
121
122         if (unlikely(ret)) {
123                 plogx_err("Failed to add key " IPv6_BYTES_FMT "\n", IPv6_BYTES(pip6->src_addr));
124                 return OUT_DISCARD;
125         }
126
127         pqinq = (struct qinq_hdr *)rte_pktmbuf_adj(mbuf, 2 * sizeof(struct vlan_hdr));
128         PROX_ASSERT(pqinq);
129         pqinq->ether_type = ETYPE_IPv6;
130         // Dest MAC addresses
131         ether_addr_copy(&task->edaddr, &pqinq->d_addr);
132         return 0;
133 }
134
135 static int handle_qinq_decap6_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
136 {
137         struct task_qinq_decap6 *task = (struct task_qinq_decap6 *)tbase;
138         uint8_t out[MAX_PKT_BURST];
139         uint16_t j;
140
141         prefetch_first(mbufs, n_pkts);
142
143         for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) {
144 #ifdef PROX_PREFETCH_OFFSET
145                 PREFETCH0(mbufs[j + PREFETCH_OFFSET]);
146                 PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *));
147 #endif
148                 out[j] = handle_qinq_decap6(task, mbufs[j]);
149         }
150 #ifdef PROX_PREFETCH_OFFSET
151         PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *));
152         for (; j < n_pkts; ++j) {
153                 out[j] = handle_qinq_decap6(task, mbufs[j]);
154         }
155 #endif
156
157         return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
158 }
159
160 void update_arp_entries6(void* data)
161 {
162         uint64_t cur_tsc = rte_rdtsc();
163         struct task_qinq_decap6 *task = (struct task_qinq_decap6 *)data;
164
165         struct cpe_data *entries[4] = {0};
166         void *key[4] = {0};
167         uint64_t n_buckets = get_bucket(task->cpe_table, task->bucket_index, key, (void**)entries);
168
169         for (uint8_t i = 0; i < 4 && entries[i]; ++i) {
170                 if (entries[i]->tsc < cur_tsc) {
171                         int key_found = 0;
172                         void* entry = 0;
173                         rte_table_hash_ext_dosig_ops.f_delete(task->cpe_table, key[i], &key_found, entry);
174                 }
175         }
176
177         task->bucket_index++;
178         task->bucket_index &= (n_buckets - 1);
179
180         if (task->period_func) {
181                 task->period_func(task->period_data);
182         }
183 }
184
185 static struct task_init task_init_qinq_decap6 = {
186         .mode = QINQ_DECAP6,
187         .mode_str = "qinqdecapv6",
188         .early_init = early_init,
189         .init = init_task_qinq_decap6,
190         .handle = handle_qinq_decap6_bulk,
191         .size = sizeof(struct task_qinq_decap6)
192 };
193
194 __attribute__((constructor)) static void reg_task_qinq_decap6(void)
195 {
196         reg_task(&task_init_qinq_decap6);
197 }