2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
17 #include <rte_byteorder.h>
18 #include <rte_cycles.h>
19 #include <rte_table_hash.h>
21 #include <rte_version.h>
24 #include "prox_lua_types.h"
25 #include "handle_qinq_decap4.h"
26 #include "handle_qinq_encap4.h"
30 #include "handle_routing.h"
31 #include "prox_assert.h"
32 #include "task_init.h"
34 #include "pkt_prototypes.h"
35 #include "task_base.h"
36 #include "task_init.h"
38 #include "prox_cksum.h"
39 #include "expire_cpe.h"
40 #include "prox_port_cfg.h"
45 #include "prox_shared.h"
46 #include "prox_compat.h"
48 struct task_qinq_decap4 {
49 struct task_base base;
50 struct rte_table_hash *cpe_table;
51 struct rte_table_hash *qinq_gre_table;
52 struct qinq_gre_data *qinq_gre_data;
53 struct next_hop *next_hops;
54 struct rte_lpm *ipv4_lpm;
57 uint8_t runtime_flags;
60 uint64_t src_mac[PROX_MAX_PORTS];
61 struct rte_mbuf* fake_packets[64];
62 struct expire_cpe expire_cpe;
64 uint8_t mapping[PROX_MAX_PORTS];
67 static uint8_t handle_qinq_decap4(struct task_qinq_decap4 *task, struct rte_mbuf *mbuf, struct qinq_gre_data* entry);
68 /* Convert IPv4 packets to GRE and optionally store QinQ Tags */
69 static void arp_update(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts);
70 static void arp_msg(struct task_base *tbase, void **data, uint16_t n_msgs);
72 static void init_task_qinq_decap4(struct task_base *tbase, struct task_args *targ)
74 struct task_qinq_decap4 *task = (struct task_qinq_decap4 *)tbase;
75 const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
78 task->cpe_table = targ->cpe_table;
79 task->cpe_timeout = msec_to_tsc(targ->cpe_table_timeout_ms);
81 PROX_PANIC(!strcmp(targ->route_table, ""), "route table not specified\n");
82 lpm = prox_sh_find_socket(socket_id, targ->route_table);
84 int ret = lua_to_lpm4(prox_lua(), GLOBAL, targ->route_table, socket_id, &lpm);
85 PROX_PANIC(ret, "Failed to load IPv4 LPM:\n%s\n", get_lua_to_errors());
86 prox_sh_add_socket(socket_id, targ->route_table, lpm);
88 task->ipv4_lpm = lpm->rte_lpm;
89 task->next_hops = lpm->next_hops;
91 task->qinq_tag = targ->qinq_tag;
92 task->local_ipv4 = targ->local_ipv4;
93 task->runtime_flags = targ->runtime_flags;
94 if (strcmp(targ->task_init->sub_mode_str, "pe"))
95 PROX_PANIC(targ->qinq_gre_table == NULL, "can't set up qinq gre\n");
97 task->qinq_gre_table = targ->qinq_gre_table;
99 if (targ->cpe_table_timeout_ms) {
100 targ->lconf->period_func = check_expire_cpe;
101 task->expire_cpe.cpe_table = task->cpe_table;
102 targ->lconf->period_data = &task->expire_cpe;
103 targ->lconf->period_timeout = msec_to_tsc(500) / NUM_VCPES;
106 for (uint32_t i = 0; i < 64; ++i) {
107 task->fake_packets[i] = (struct rte_mbuf*)((uint8_t*)&task->keys[i] - sizeof (struct rte_mbuf));
109 if (task->runtime_flags & TASK_ROUTING) {
110 if (targ->nb_txrings) {
111 struct task_args *dtarg;
114 for (uint32_t i = 0; i < targ->nb_txrings; ++i) {
115 ct = targ->core_task_set[0].core_task[i];
116 dtarg = core_targ_get(ct.core, ct.task);
117 dtarg = find_reachable_task_sending_to_port(dtarg);
119 PROX_PANIC(dtarg == NULL, "Error finding destination port through other tasks for outgoing ring %u\n", i);
120 task->src_mac[i] = *(uint64_t*)&prox_port_cfg[dtarg->tx_port_queue[0].port].eth_addr;
124 for (uint32_t i = 0; i < targ->nb_txports; ++i) {
125 task->src_mac[i] = *(uint64_t*)&prox_port_cfg[targ->tx_port_queue[i].port].eth_addr;
130 if (targ->runtime_flags & TASK_CTRL_HANDLE_ARP) {
131 targ->lconf->ctrl_func_p[targ->task] = arp_update;
134 /* Copy the mapping from a sibling task which is configured
135 with mode encap4. The mapping is constant, so it is faster
136 to apply it when entries are added (least common case)
137 instead of re-applying it for every packet (most common
140 for (uint8_t task_id = 0; task_id < targ->lconf->n_tasks_all; ++task_id) {
141 enum task_mode smode = targ->lconf->targs[task_id].mode;
142 if (QINQ_ENCAP4 == smode) {
143 for (uint8_t i = 0; i < PROX_MAX_PORTS; ++i) {
144 task->mapping[i] = targ->lconf->targs[task_id].mapping[i];
149 struct prox_port_cfg *port = find_reachable_port(targ);
151 task->offload_crc = port->capabilities.tx_offload_cksum;
154 // By default, calling this function 1K times per second => 64K ARP per second max
155 // If 4 interfaces sending to here, = ~0.1% of workload.
156 // If receiving more ARP, they will be dropped, or will dramatically slow down LB if in "no drop" mode.
157 targ->lconf->ctrl_timeout = freq_to_tsc(targ->ctrl_freq);
158 targ->lconf->ctrl_func_m[targ->task] = arp_msg;
161 static void early_init_table(struct task_args *targ)
163 if (!targ->qinq_gre_table && !targ->cpe_table) {
164 init_qinq_gre_table(targ, get_qinq_gre_map(targ));
165 init_cpe4_table(targ);
169 static inline void extract_key_bulk(struct rte_mbuf **mbufs, uint16_t n_pkts, struct task_qinq_decap4 *task)
171 for (uint16_t j = 0; j < n_pkts; ++j) {
172 extract_key_cpe(mbufs[j], &task->keys[j]);
176 __attribute__((cold)) static void handle_error(struct rte_mbuf *mbuf)
178 struct cpe_pkt *packet = rte_pktmbuf_mtod(mbuf, struct cpe_pkt *);
180 uint64_t key = (*(uint64_t*)(((uint8_t *)packet) + 12)) & 0xFF0FFFFFFF0FFFFF;
181 uint32_t svlan = packet->qinq_hdr.svlan.vlan_tci;
182 uint32_t cvlan = packet->qinq_hdr.cvlan.vlan_tci;
184 svlan = rte_be_to_cpu_16(svlan & 0xFF0F);
185 cvlan = rte_be_to_cpu_16(cvlan & 0xFF0F);
186 #if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0)
187 plogx_err("Can't convert key %016lx qinq %d|%d (%x|%x) to gre_id, rss=%x flags=%lx, status_err_len=%lx, L2Tag=%d type=%d\n",
188 key, svlan, cvlan, svlan, cvlan, mbuf->hash.rss, mbuf->ol_flags, mbuf->udata64, mbuf->vlan_tci_outer, mbuf->packet_type);
190 #if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0)
191 plogx_err("Can't convert key %016lx qinq %d|%d (%x|%x) to gre_id, rss=%x flags=%lx, status_err_len=%lx, L2Tag=%d type=%d\n",
192 key, svlan, cvlan, svlan, cvlan, mbuf->hash.rss, mbuf->ol_flags, mbuf->udata64, mbuf->reserved, mbuf->packet_type);
194 plogx_err("Can't convert key %016lx qinq %d|%d (%x|%x) to gre_id, flags=%x, L2Tag=%d\n",
195 key, svlan, cvlan, svlan, cvlan, mbuf->ol_flags, mbuf->reserved);
199 plogx_err("Can't convert ip %x to gre_id\n", rte_bswap32(packet->ipv4_hdr.src_addr));
203 static int add_cpe_entry(struct rte_table_hash *hash, struct cpe_key *key, struct cpe_data *data)
206 int ret, key_found = 0;
208 ret = prox_rte_table_key8_add(hash, key, data, &key_found, &entry_in_hash);
210 plogx_err("Failed to add key: ip %x, gre %x\n", key->ip, key->gre_id);
216 static void extract_key_data_arp(struct rte_mbuf* mbuf, struct cpe_key* key, struct cpe_data* data, const struct qinq_gre_data* entry, uint64_t cpe_timeout, uint8_t* mapping)
218 const struct cpe_packet_arp *packet = rte_pktmbuf_mtod(mbuf, const struct cpe_packet_arp *);
219 uint32_t svlan = packet->qinq_hdr.svlan.vlan_tci & 0xFF0F;
220 uint32_t cvlan = packet->qinq_hdr.cvlan.vlan_tci & 0xFF0F;
222 key->ip = packet->arp.data.spa;
223 key->gre_id = entry->gre_id;
225 data->mac_port_8bytes = *((const uint64_t *)(&packet->qinq_hdr.s_addr));
226 data->qinq_svlan = svlan;
227 data->qinq_cvlan = cvlan;
228 #if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0)
229 port_id = mbuf->port;
232 port_id = mbuf->pkt.in_port;
234 uint8_t mapped = mapping[port_id];
235 data->mac_port.out_idx = mapping[port_id];
237 if (unlikely(mapped == 255)) {
238 /* This error only occurs if the system is configured incorrectly */
239 plog_warn("Failed adding packet: unknown mapping for port %d", port_id);
240 data->mac_port.out_idx = 0;
243 data->user = entry->user;
244 data->tsc = rte_rdtsc() + cpe_timeout;
247 void arp_msg_to_str(char *str, struct arp_msg *msg)
249 sprintf(str, "%u %u %u %u %u.%u.%u.%u %x:%x:%x:%x:%x:%x %u\n",
250 msg->data.mac_port.out_idx, msg->key.gre_id, msg->data.qinq_svlan, msg->data.qinq_cvlan,
251 msg->key.ip_bytes[0], msg->key.ip_bytes[1], msg->key.ip_bytes[2], msg->key.ip_bytes[3],
252 msg->data.mac_port_b[0], msg->data.mac_port_b[1], msg->data.mac_port_b[2],
253 msg->data.mac_port_b[3], msg->data.mac_port_b[4], msg->data.mac_port_b[5], msg->data.user);
256 int str_to_arp_msg(struct arp_msg *msg, const char *str)
258 uint32_t ip[4], interface, gre_id, svlan, cvlan, mac[6], user;
260 int ret = sscanf(str, "%u %u %u %u %u.%u.%u.%u %x:%x:%x:%x:%x:%x %u",
261 &interface, &gre_id, &svlan, &cvlan,
262 ip, ip + 1, ip + 2, ip + 3,
263 mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5, &user);
265 for (uint8_t i = 0; i < 4; ++i)
266 msg->key.ip_bytes[i] = ip[i];
267 msg->key.gre_id = gre_id;
269 for (uint8_t i = 0; i < 4; ++i)
270 msg->data.mac_port_b[i] = mac[i];
271 msg->data.qinq_svlan = svlan;
272 msg->data.qinq_cvlan = cvlan;
273 msg->data.user = user;
274 msg->data.mac_port.out_idx = interface;
279 void arp_update_from_msg(struct rte_table_hash * cpe_table, struct arp_msg **msgs, uint16_t n_msgs, uint64_t cpe_timeout)
281 int ret, key_found = 0;
284 for (uint16_t i = 0; i < n_msgs; ++i) {
285 msgs[i]->data.tsc = rte_rdtsc() + cpe_timeout;
286 ret = prox_rte_table_key8_add(cpe_table, &msgs[i]->key, &msgs[i]->data, &key_found, &entry_in_hash);
288 plogx_err("Failed to add key %x, gre %x\n", msgs[i]->key.ip, msgs[i]->key.gre_id);
293 static void arp_msg(struct task_base *tbase, void **data, uint16_t n_msgs)
295 struct task_qinq_decap4 *task = (struct task_qinq_decap4 *)tbase;
296 struct arp_msg **msgs = (struct arp_msg **)data;
298 arp_update_from_msg(task->cpe_table, msgs, n_msgs, task->cpe_timeout);
301 static void arp_update(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
303 struct task_qinq_decap4 *task = (struct task_qinq_decap4 *)tbase;
305 prefetch_pkts(mbufs, n_pkts);
306 extract_key_bulk(mbufs, n_pkts, task);
308 uint64_t pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
309 uint64_t lookup_hit_mask = 0;
310 struct qinq_gre_data* entries[64];
311 prox_rte_table_key8_lookup(task->qinq_gre_table, task->fake_packets, pkts_mask, &lookup_hit_mask, (void**)entries);
313 TASK_STATS_ADD_RX(&task->base.aux->stats, n_pkts);
314 for (uint16_t j = 0; j < n_pkts; ++j) {
315 if (unlikely(!((lookup_hit_mask >> j) & 0x1))) {
316 handle_error(mbufs[j]);
317 rte_pktmbuf_free(mbufs[j]);
322 struct cpe_data data;
324 extract_key_data_arp(mbufs[j], &key, &data, entries[j], task->cpe_timeout, task->mapping);
327 int ret, key_found = 0;
329 ret = prox_rte_table_key8_add(task->cpe_table, &key, &data, &key_found, &entry_in_hash);
332 plogx_err("Failed to add key %x, gre %x\n", key.ip, key.gre_id);
333 TASK_STATS_ADD_DROP_DISCARD(&task->base.aux->stats, 1);
336 /* should do ARP reply */
337 TASK_STATS_ADD_DROP_HANDLED(&task->base.aux->stats, 1);
338 rte_pktmbuf_free(mbufs[j]);
342 static int handle_qinq_decap4_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
344 struct task_qinq_decap4 *task = (struct task_qinq_decap4 *)tbase;
345 uint64_t pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
346 struct qinq_gre_data* entries[64];
347 uint8_t out[MAX_PKT_BURST];
348 uint64_t lookup_hit_mask;
349 prefetch_pkts(mbufs, n_pkts);
351 // Prefetch headroom, as we will prepend mbuf and write to this cache line
352 for (uint16_t j = 0; j < n_pkts; ++j) {
353 PREFETCH0((rte_pktmbuf_mtod(mbufs[j], char*)-1));
356 extract_key_bulk(mbufs, n_pkts, task);
357 prox_rte_table_key8_lookup(task->qinq_gre_table, task->fake_packets, pkts_mask, &lookup_hit_mask, (void**)entries);
359 if (likely(lookup_hit_mask == pkts_mask)) {
360 for (uint16_t j = 0; j < n_pkts; ++j) {
361 out[j] = handle_qinq_decap4(task, mbufs[j], entries[j]);
365 for (uint16_t j = 0; j < n_pkts; ++j) {
366 if (unlikely(!((lookup_hit_mask >> j) & 0x1))) {
367 // This might fail as the packet has not the expected QinQ or it's not an IPv4 packet
368 handle_error(mbufs[j]);
369 out[j] = OUT_DISCARD;
372 out[j] = handle_qinq_decap4(task, mbufs[j], entries[j]);
376 return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
380 static inline void gre_encap(struct task_qinq_decap4 *task, uint32_t src_ipv4, struct rte_mbuf *mbuf, uint32_t gre_id)
383 struct ipv4_hdr *pip = (struct ipv4_hdr *)(1 + rte_pktmbuf_mtod(mbuf, struct qinq_hdr *));
385 struct ipv4_hdr *pip = (struct ipv4_hdr *)(1 + rte_pktmbuf_mtod(mbuf, struct ether_hdr *));
387 uint16_t ip_len = rte_be_to_cpu_16(pip->total_length);
388 uint16_t padlen = rte_pktmbuf_pkt_len(mbuf) - 20 - ip_len - sizeof(struct qinq_hdr);
391 rte_pktmbuf_trim(mbuf, padlen);
394 PROX_PANIC(rte_pktmbuf_data_len(mbuf) - padlen + 20 > ETHER_MAX_LEN,
395 "Would need to fragment packet new size = %u - not implemented\n",
396 rte_pktmbuf_data_len(mbuf) - padlen + 20);
399 /* prepend only 20 bytes instead of 28, 8 bytes are present from the QinQ */
400 struct ether_hdr *peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, 20);
402 struct ether_hdr *peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, 28);
407 if (task->runtime_flags & TASK_TX_CRC) {
408 /* calculate IP CRC here to avoid problems with -O3 flag with gcc */
410 prox_ip_cksum(mbuf, pip, sizeof(struct ether_hdr) + sizeof(struct mpls_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
412 prox_ip_cksum(mbuf, pip, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
417 struct ipv4_hdr *p_tunnel_ip = (struct ipv4_hdr *)(peth + 1);
418 rte_memcpy(p_tunnel_ip, &tunnel_ip_proto, sizeof(struct ipv4_hdr));
419 ip_len += sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr);
420 p_tunnel_ip->total_length = rte_cpu_to_be_16(ip_len);
421 p_tunnel_ip->src_addr = src_ipv4;
423 /* Add GRE Header values */
424 struct gre_hdr *pgre = (struct gre_hdr *)(p_tunnel_ip + 1);
426 rte_memcpy(pgre, &gre_hdr_proto, sizeof(struct gre_hdr));
427 pgre->gre_id = gre_id;
428 peth->ether_type = ETYPE_IPv4;
431 static inline uint16_t calc_padlen(const struct rte_mbuf *mbuf, const uint16_t ip_len)
433 return rte_pktmbuf_pkt_len(mbuf) - DOWNSTREAM_DELTA - ip_len - offsetof(struct cpe_pkt, ipv4_hdr);
436 static inline uint8_t gre_encap_route(uint32_t src_ipv4, struct rte_mbuf *mbuf, uint32_t gre_id, struct task_qinq_decap4 *task)
438 PROX_PANIC(rte_pktmbuf_data_len(mbuf) + DOWNSTREAM_DELTA > ETHER_MAX_LEN,
439 "Would need to fragment packet new size = %u - not implemented\n",
440 rte_pktmbuf_data_len(mbuf) + DOWNSTREAM_DELTA);
442 struct core_net_pkt_m *packet = (struct core_net_pkt_m *)rte_pktmbuf_prepend(mbuf, DOWNSTREAM_DELTA);
446 struct ipv4_hdr *pip = &((struct cpe_pkt_delta *)packet)->pkt.ipv4_hdr;
447 uint16_t ip_len = rte_be_to_cpu_16(pip->total_length);
449 /* returns 0 on success, returns -ENOENT of failure (or -EINVAL if first or last parameter is NULL) */
450 #if RTE_VERSION >= RTE_VERSION_NUM(16,4,0,1)
451 uint32_t next_hop_index;
453 uint8_t next_hop_index;
455 if (unlikely(rte_lpm_lookup(task->ipv4_lpm, rte_bswap32(pip->dst_addr), &next_hop_index) != 0)) {
456 plog_warn("lpm_lookup failed for ip %x: rc = %d\n", rte_bswap32(pip->dst_addr), -ENOENT);
459 PREFETCH0(&task->next_hops[next_hop_index]);
461 /* calculate outer IP CRC here to avoid problems with -O3 flag with gcc */
462 const uint16_t padlen = calc_padlen(mbuf, ip_len);
464 rte_pktmbuf_trim(mbuf, padlen);
466 const uint8_t port_id = task->next_hops[next_hop_index].mac_port.out_idx;
468 *((uint64_t *)(&packet->ether_hdr.d_addr)) = task->next_hops[next_hop_index].mac_port_8bytes;
469 *((uint64_t *)(&packet->ether_hdr.s_addr)) = task->src_mac[task->next_hops[next_hop_index].mac_port.out_idx];
472 packet->mpls_bytes = task->next_hops[next_hop_index].mpls | 0x00010000; // Set BoS to 1
473 packet->ether_hdr.ether_type = ETYPE_MPLSU;
475 packet->ether_hdr.ether_type = ETYPE_IPv4;
479 rte_memcpy(&packet->tunnel_ip_hdr, &tunnel_ip_proto, sizeof(struct ipv4_hdr));
480 ip_len += sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr);
481 packet->tunnel_ip_hdr.total_length = rte_cpu_to_be_16(ip_len);
482 packet->tunnel_ip_hdr.src_addr = src_ipv4;
483 packet->tunnel_ip_hdr.dst_addr = task->next_hops[next_hop_index].ip_dst;
484 if (task->runtime_flags & TASK_TX_CRC) {
486 prox_ip_cksum(mbuf, (void *)&(packet->tunnel_ip_hdr), sizeof(struct ether_hdr) + sizeof(struct mpls_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
488 prox_ip_cksum(mbuf, (void *)&(packet->tunnel_ip_hdr), sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
492 /* Add GRE Header values */
493 rte_memcpy(&packet->gre_hdr, &gre_hdr_proto, sizeof(struct gre_hdr));
494 packet->gre_hdr.gre_id = rte_be_to_cpu_32(gre_id);
499 static void extract_key_data(struct rte_mbuf* mbuf, struct cpe_key* key, struct cpe_data* data, const struct qinq_gre_data* entry, uint64_t cpe_timeout, uint8_t *mapping)
501 struct cpe_pkt *packet = rte_pktmbuf_mtod(mbuf, struct cpe_pkt *);
505 const uint32_t tmp = rte_bswap32(packet->ipv4_hdr.src_addr) & 0x00FFFFFF;
506 const uint32_t svlan = rte_bswap16(tmp >> 12);
507 const uint32_t cvlan = rte_bswap16(tmp & 0x0FFF);
511 key->ip = packet->ipv4_hdr.src_addr;
515 key->gre_id = entry->gre_id;
518 data->mac_port_8bytes = *((const uint64_t *)(&packet->qinq_hdr.s_addr));
519 data->qinq_svlan = packet->qinq_hdr.svlan.vlan_tci & 0xFF0F;
520 data->qinq_cvlan = packet->qinq_hdr.cvlan.vlan_tci & 0xFF0F;
522 data->mac_port_8bytes = *((const uint64_t *)(&packet->ether_hdr.s_addr));
523 data->qinq_svlan = svlan;
524 data->qinq_cvlan = cvlan;
527 #if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0)
528 port_id = mbuf->port;
531 port_id = mbuf->pkt.in_port;
533 uint8_t mapped = mapping[port_id];
534 data->mac_port.out_idx = mapped;
536 if (unlikely(mapped == 255)) {
537 /* This error only occurs if the system is configured incorrectly */
538 plog_warn("Failed adding packet: unknown mapping for port %d", port_id);
539 data->mac_port.out_idx = 0;
542 data->mac_port.out_idx = mapped;
545 data->user = entry->user;
546 data->tsc = rte_rdtsc() + cpe_timeout;
549 static uint8_t handle_qinq_decap4(struct task_qinq_decap4 *task, struct rte_mbuf *mbuf, struct qinq_gre_data* entry)
551 if (!(task->runtime_flags & (TASK_CTRL_HANDLE_ARP|TASK_FP_HANDLE_ARP))) {
552 // We learn CPE MAC addresses on every packets
554 struct cpe_data data;
555 extract_key_data(mbuf, &key, &data, entry, task->cpe_timeout, task->mapping);
556 //plogx_err("Adding key ip=%x/gre_id=%x data (svlan|cvlan)=%x|%x, rss=%x, gre_id=%x\n", key.ip, key.gre_id, data.qinq_svlan,data.qinq_cvlan, mbuf->hash.rss, entry->gre_id);
558 if (add_cpe_entry(task->cpe_table, &key, &data)) {
559 plog_warn("Failed to add ARP entry\n");
563 if (task->runtime_flags & TASK_FP_HANDLE_ARP) {
564 // We learn CPE MAC addresses on ARP packets in Fast Path
565 #if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0)
566 if (mbuf->packet_type == 0xB) {
568 struct cpe_data data;
569 extract_key_data_arp(mbuf, &key, &data, entry, task->cpe_timeout, task->mapping);
571 if (add_cpe_entry(task->cpe_table, &key, &data)) {
572 plog_warn("Failed to add ARP entry\n");
580 struct cpe_pkt *packet = rte_pktmbuf_mtod(mbuf, struct cpe_pkt*);
581 if (packet->qinq_hdr.svlan.eth_proto == task->qinq_tag &&
582 packet->qinq_hdr.ether_type == ETYPE_ARP) {
584 struct cpe_data data;
585 extract_key_data_arp(mbuf, &key, &data, entry, task->cpe_timeout, task->mapping);
587 if (add_cpe_entry(task->cpe_table, &key, &data)) {
588 plog_warn("Failed to add ARP entry\n");
596 if (task->runtime_flags & TASK_ROUTING) {
598 tx_portid = gre_encap_route(task->local_ipv4, mbuf, entry->gre_id, task);
600 return tx_portid == ROUTE_ERR? OUT_DISCARD : tx_portid;
603 gre_encap(task, task->local_ipv4, mbuf, entry->gre_id);
608 static void flow_iter_next(struct flow_iter *iter, struct task_args *targ)
612 } while (iter->idx < (int)get_qinq_gre_map(targ)->count &&
613 get_qinq_gre_map(targ)->entries[iter->idx].gre_id % targ->nb_slave_threads != targ->worker_thread_id);
616 static void flow_iter_beg(struct flow_iter *iter, struct task_args *targ)
619 flow_iter_next(iter, targ);
622 static int flow_iter_is_end(struct flow_iter *iter, struct task_args *targ)
624 return iter->idx == (int)get_qinq_gre_map(targ)->count;
627 static uint16_t flow_iter_get_svlan(struct flow_iter *iter, struct task_args *targ)
629 return get_qinq_gre_map(targ)->entries[iter->idx].svlan;
632 static uint16_t flow_iter_get_cvlan(struct flow_iter *iter, struct task_args *targ)
634 return get_qinq_gre_map(targ)->entries[iter->idx].cvlan;
637 static struct task_init task_init_qinq_decapv4_table = {
639 .mode_str = "qinqdecapv4",
640 .early_init = early_init_table,
641 .init = init_task_qinq_decap4,
642 .handle = handle_qinq_decap4_bulk,
643 .flag_features = TASK_FEATURE_ROUTING,
645 .beg = flow_iter_beg,
646 .is_end = flow_iter_is_end,
647 .next = flow_iter_next,
648 .get_svlan = flow_iter_get_svlan,
649 .get_cvlan = flow_iter_get_cvlan,
651 .size = sizeof(struct task_qinq_decap4)
654 __attribute__((constructor)) static void reg_task_qinq_decap4(void)
656 reg_task(&task_init_qinq_decapv4_table);