Added initial support for NDP (IPv6)
[samplevnf.git] / VNFs / DPPD-PROX / handle_routing.c
1 /*
2 // Copyright (c) 2010-2017 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //     http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 #include <rte_lpm.h>
18 #include <rte_cycles.h>
19 #include <string.h>
20 #include <rte_version.h>
21 #include <rte_ip.h>
22 #include <rte_byteorder.h>
23
24 #include "prox_lua.h"
25 #include "prox_lua_types.h"
26
27 #include "quit.h"
28 #include "log.h"
29 #include "handle_routing.h"
30 #include "tx_pkt.h"
31 #include "gre.h"
32 #include "lconf.h"
33 #include "prox_port_cfg.h"
34 #include "etypes.h"
35 #include "prefetch.h"
36 #include "hash_entry_types.h"
37 #include "mpls.h"
38 #include "qinq.h"
39 #include "prox_cfg.h"
40 #include "prox_shared.h"
41 #include "prox_cksum.h"
42 #include "mbuf_utils.h"
43 #include "prox_compat.h"
44
45 struct task_routing {
46         struct task_base                base;
47         uint8_t                         runtime_flags;
48         struct lcore_cfg                *lconf;
49         struct rte_lpm                  *ipv4_lpm;
50         struct next_hop                 *next_hops;
51         uint32_t                        number_free_rules;
52         uint16_t                        qinq_tag;
53         uint32_t                        marking[4];
54         uint64_t                        src_mac[PROX_MAX_PORTS];
55 };
56
57 static void routing_update(struct task_base *tbase, void **data, uint16_t n_msgs)
58 {
59         struct task_routing *task = (struct task_routing *)tbase;
60         struct route_msg *msg;
61
62         for (uint16_t i = 0; i < n_msgs; ++i) {
63                 msg = (struct route_msg *)data[i];
64
65                 if (task->number_free_rules == 0) {
66                         plog_warn("Failed adding route: %u.%u.%u.%u/%u: lpm table full\n",
67                                 msg->ip_bytes[0], msg->ip_bytes[1], msg->ip_bytes[2],
68                                 msg->ip_bytes[3], msg->prefix);
69                 } else {
70                         if (rte_lpm_add(task->ipv4_lpm, rte_bswap32(msg->ip), msg->prefix, msg->nh)) {
71                                 plog_warn("Failed adding route: %u.%u.%u.%u/%u\n",
72                                         msg->ip_bytes[0], msg->ip_bytes[1], msg->ip_bytes[2],
73                                         msg->ip_bytes[3], msg->prefix);
74                         } else {
75                                 task->number_free_rules--;
76                         }
77                 }
78         }
79 }
80
81 static void init_task_routing(struct task_base *tbase, struct task_args *targ)
82 {
83         struct task_routing *task = (struct task_routing *)tbase;
84         const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
85         struct lpm4 *lpm;
86
87         task->lconf = targ->lconf;
88         task->qinq_tag = targ->qinq_tag;
89         task->runtime_flags = targ->runtime_flags;
90
91         PROX_PANIC(!strcmp(targ->route_table, ""), "route table not specified\n");
92         if (targ->flags & TASK_ARG_LOCAL_LPM) {
93                 int ret = lua_to_lpm4(prox_lua(), GLOBAL, targ->route_table, socket_id, &lpm);
94                 PROX_PANIC(ret, "Failed to load IPv4 LPM:\n%s\n", get_lua_to_errors());
95                 prox_sh_add_socket(socket_id, targ->route_table, lpm);
96
97                 task->number_free_rules = lpm->n_free_rules;
98         }
99         else {
100                 lpm = prox_sh_find_socket(socket_id, targ->route_table);
101                 if (!lpm) {
102                         int ret = lua_to_lpm4(prox_lua(), GLOBAL, targ->route_table, socket_id, &lpm);
103                         PROX_PANIC(ret, "Failed to load IPv4 LPM:\n%s\n", get_lua_to_errors());
104                         prox_sh_add_socket(socket_id, targ->route_table, lpm);
105                 }
106         }
107         task->ipv4_lpm = lpm->rte_lpm;
108         task->next_hops = lpm->next_hops;
109         task->number_free_rules = lpm->n_free_rules;
110
111         for (uint32_t i = 0; i < MAX_HOP_INDEX; i++) {
112                 int tx_port = task->next_hops[i].mac_port.out_idx;
113                 if ((tx_port > targ->nb_txports - 1) && (tx_port > targ->nb_txrings - 1)) {
114                         PROX_PANIC(1, "Routing Table contains port %d but only %d tx port/ %d ring:\n", tx_port, targ->nb_txports, targ->nb_txrings);
115                 }
116         }
117
118         if (targ->nb_txrings) {
119                 struct task_args *dtarg;
120                 struct core_task ct;
121                 for (uint32_t i = 0; i < targ->nb_txrings; ++i) {
122                         ct = targ->core_task_set[0].core_task[i];
123                         dtarg = core_targ_get(ct.core, ct.task);
124                         dtarg = find_reachable_task_sending_to_port(dtarg);
125                         if (task->runtime_flags & TASK_MPLS_TAGGING) {
126                                 task->src_mac[i] = (0x0000ffffffffffff & ((*(uint64_t*)&prox_port_cfg[dtarg->tx_port_queue[0].port].eth_addr))) | ((uint64_t)ETYPE_MPLSU << (64 - 16));
127                         } else {
128                                 task->src_mac[i] = (0x0000ffffffffffff & ((*(uint64_t*)&prox_port_cfg[dtarg->tx_port_queue[0].port].eth_addr))) | ((uint64_t)ETYPE_IPv4 << (64 - 16));
129                         }
130                 }
131         } else {
132                 for (uint32_t i = 0; i < targ->nb_txports; ++i) {
133                         if (task->runtime_flags & TASK_MPLS_TAGGING) {
134                                 task->src_mac[i] = (0x0000ffffffffffff & ((*(uint64_t*)&prox_port_cfg[targ->tx_port_queue[i].port].eth_addr))) | ((uint64_t)ETYPE_MPLSU << (64 - 16));
135                         } else {
136                                 task->src_mac[i] = (0x0000ffffffffffff & ((*(uint64_t*)&prox_port_cfg[targ->tx_port_queue[i].port].eth_addr))) | ((uint64_t)ETYPE_IPv4 << (64 - 16));
137                         }
138                 }
139         }
140
141         for (uint32_t i = 0; i < 4; ++i) {
142                 task->marking[i] = rte_bswap32(targ->marking[i] << 9);
143         }
144
145         struct prox_port_cfg *port = find_reachable_port(targ);
146
147         targ->lconf->ctrl_func_m[targ->task] = routing_update;
148         targ->lconf->ctrl_timeout = freq_to_tsc(20);
149 }
150
151 static inline uint8_t handle_routing(struct task_routing *task, struct rte_mbuf *mbuf);
152
153 static int handle_routing_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
154 {
155         struct task_routing *task = (struct task_routing *)tbase;
156         uint8_t out[MAX_PKT_BURST];
157         uint16_t j;
158
159         prefetch_first(mbufs, n_pkts);
160
161         for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) {
162 #ifdef PROX_PREFETCH_OFFSET
163                 PREFETCH0(mbufs[j + PREFETCH_OFFSET]);
164                 PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *));
165 #endif
166                 out[j] = handle_routing(task, mbufs[j]);
167         }
168 #ifdef PROX_PREFETCH_OFFSET
169         PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *));
170         for (; j < n_pkts; ++j) {
171                 out[j] = handle_routing(task, mbufs[j]);
172         }
173 #endif
174
175         return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
176 }
177
178 static void set_l2(struct task_routing *task, struct rte_mbuf *mbuf, uint8_t nh_idx)
179 {
180         prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
181         *((uint64_t *)(&peth->d_addr)) = task->next_hops[nh_idx].mac_port_8bytes;
182         *((uint64_t *)(&peth->s_addr)) = task->src_mac[task->next_hops[nh_idx].mac_port.out_idx];
183 }
184
185 static void set_l2_mpls(struct task_routing *task, struct rte_mbuf *mbuf, uint8_t nh_idx)
186 {
187         prox_rte_ether_hdr *peth = (prox_rte_ether_hdr *)rte_pktmbuf_prepend(mbuf, sizeof(struct mpls_hdr));
188
189         *((uint64_t *)(&peth->d_addr)) = task->next_hops[nh_idx].mac_port_8bytes;
190         *((uint64_t *)(&peth->s_addr)) = task->src_mac[task->next_hops[nh_idx].mac_port.out_idx];
191         /* MPLSU ether_type written as high word of 64bit src_mac prepared by init_task_routing */
192         struct mpls_hdr *mpls = (struct mpls_hdr *)(peth + 1);
193
194         if (task->runtime_flags & TASK_MARK) {
195                   enum prox_rte_color color = rte_sched_port_pkt_read_color(mbuf);
196                 *(uint32_t *)mpls = task->next_hops[nh_idx].mpls | task->marking[color] | 0x00010000; // Set BoS to 1
197         }
198         else {
199                 *(uint32_t *)mpls = task->next_hops[nh_idx].mpls | 0x00010000; // Set BoS to 1
200         }
201 }
202
203 static uint8_t route_ipv4(struct task_routing *task, uint8_t *beg, uint32_t ip_offset, struct rte_mbuf *mbuf)
204 {
205         prox_rte_ipv4_hdr *ip = (prox_rte_ipv4_hdr*)(beg + ip_offset);
206         prox_rte_ether_hdr *peth_out;
207         uint8_t tx_port;
208         uint32_t dst_ip;
209
210         if (unlikely(ip->version_ihl >> 4 != 4)) {
211                 plog_warn("Offset: %d\n", ip_offset);
212                 plog_warn("Expected to receive IPv4 packet but IP version was %d\n",
213                         ip->version_ihl >> 4);
214                 return OUT_DISCARD;
215         }
216
217         switch(ip->next_proto_id) {
218         case IPPROTO_GRE: {
219                 struct gre_hdr *pgre = (struct gre_hdr *)(ip + 1);
220                 dst_ip = ((prox_rte_ipv4_hdr *)(pgre + 1))->dst_addr;
221                 break;
222         }
223         case IPPROTO_TCP:
224         case IPPROTO_UDP:
225                 dst_ip = ip->dst_addr;
226                 break;
227         default:
228                 /* Routing for other protocols is not implemented */
229                 return OUT_DISCARD;
230         }
231
232 #if RTE_VERSION >= RTE_VERSION_NUM(16,4,0,1)
233         uint32_t next_hop_index;
234 #else
235         uint8_t next_hop_index;
236 #endif
237         if (unlikely(rte_lpm_lookup(task->ipv4_lpm, rte_bswap32(dst_ip), &next_hop_index) != 0)) {
238                 uint8_t* dst_ipp = (uint8_t*)&dst_ip;
239                 plog_warn("lpm_lookup failed for ip %d.%d.%d.%d: rc = %d\n",
240                         dst_ipp[0], dst_ipp[1], dst_ipp[2], dst_ipp[3], -ENOENT);
241                 return OUT_DISCARD;
242         }
243
244         tx_port = task->next_hops[next_hop_index].mac_port.out_idx;
245         if (task->runtime_flags & TASK_MPLS_TAGGING) {
246                 uint16_t padlen = rte_pktmbuf_pkt_len(mbuf) - rte_be_to_cpu_16(ip->total_length) - ip_offset;
247                 if (padlen) {
248                         rte_pktmbuf_trim(mbuf, padlen);
249                 }
250
251                 set_l2_mpls(task, mbuf, next_hop_index);
252         }
253         else {
254                 set_l2(task, mbuf, next_hop_index);
255         }
256         return tx_port;
257 }
258
259 static inline uint8_t handle_routing(struct task_routing *task, struct rte_mbuf *mbuf)
260 {
261         struct qinq_hdr *qinq;
262         prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
263
264         switch (peth->ether_type) {
265         case ETYPE_8021ad: {
266                 struct qinq_hdr *qinq = (struct qinq_hdr *)peth;
267                 if ((qinq->cvlan.eth_proto != ETYPE_VLAN)) {
268                         plog_warn("Unexpected proto in QinQ = %#04x\n", qinq->cvlan.eth_proto);
269                         return OUT_DISCARD;
270                 }
271
272                 return route_ipv4(task, (uint8_t*)qinq, sizeof(*qinq), mbuf);
273         }
274         case ETYPE_IPv4:
275                 return route_ipv4(task, (uint8_t*)peth, sizeof(*peth), mbuf);
276         case ETYPE_MPLSU: {
277                 /* skip MPLS headers if any for routing */
278                 struct mpls_hdr *mpls = (struct mpls_hdr *)(peth + 1);
279                 uint32_t count = sizeof(prox_rte_ether_hdr);
280                 while (!(mpls->bytes & 0x00010000)) {
281                         mpls++;
282                         count += sizeof(struct mpls_hdr);
283                 }
284                 count += sizeof(struct mpls_hdr);
285
286                 return route_ipv4(task, (uint8_t*)peth, count, mbuf);
287         }
288         default:
289                 if (peth->ether_type == task->qinq_tag) {
290                         struct qinq_hdr *qinq = (struct qinq_hdr *)peth;
291                         if ((qinq->cvlan.eth_proto != ETYPE_VLAN)) {
292                                 plog_warn("Unexpected proto in QinQ = %#04x\n", qinq->cvlan.eth_proto);
293                                 return OUT_DISCARD;
294                         }
295
296                         return route_ipv4(task, (uint8_t*)qinq, sizeof(*qinq), mbuf);
297                 }
298                 plog_warn("Failed routing packet: ether_type %#06x is unknown\n", peth->ether_type);
299                 return OUT_DISCARD;
300         }
301 }
302
303 static struct task_init task_init_routing = {
304         .mode_str = "routing",
305         .init = init_task_routing,
306         .handle = handle_routing_bulk,
307         .flag_features = TASK_FEATURE_ROUTING|TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS,
308         .size = sizeof(struct task_routing)
309 };
310
311 __attribute__((constructor)) static void reg_task_routing(void)
312 {
313         reg_task(&task_init_routing);
314 }