2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
17 #include <rte_byteorder.h>
18 #include <rte_cycles.h>
19 #include <rte_table_hash.h>
21 #include <rte_version.h>
24 #include "prox_lua_types.h"
25 #include "handle_qinq_decap4.h"
26 #include "handle_qinq_encap4.h"
30 #include "handle_routing.h"
31 #include "prox_assert.h"
32 #include "task_init.h"
34 #include "pkt_prototypes.h"
35 #include "task_base.h"
36 #include "task_init.h"
38 #include "prox_cksum.h"
39 #include "expire_cpe.h"
40 #include "prox_port_cfg.h"
45 #include "prox_shared.h"
47 struct task_qinq_decap4 {
48 struct task_base base;
49 struct rte_table_hash *cpe_table;
50 struct rte_table_hash *qinq_gre_table;
51 struct qinq_gre_data *qinq_gre_data;
52 struct next_hop *next_hops;
53 struct rte_lpm *ipv4_lpm;
56 uint8_t runtime_flags;
59 uint64_t src_mac[PROX_MAX_PORTS];
60 struct rte_mbuf* fake_packets[64];
61 struct expire_cpe expire_cpe;
63 uint8_t mapping[PROX_MAX_PORTS];
66 static uint8_t handle_qinq_decap4(struct task_qinq_decap4 *task, struct rte_mbuf *mbuf, struct qinq_gre_data* entry);
67 /* Convert IPv4 packets to GRE and optionally store QinQ Tags */
68 static void arp_update(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts);
69 static void arp_msg(struct task_base *tbase, void **data, uint16_t n_msgs);
71 static void init_task_qinq_decap4(struct task_base *tbase, struct task_args *targ)
73 struct task_qinq_decap4 *task = (struct task_qinq_decap4 *)tbase;
74 const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
77 task->cpe_table = targ->cpe_table;
78 task->cpe_timeout = msec_to_tsc(targ->cpe_table_timeout_ms);
80 PROX_PANIC(!strcmp(targ->route_table, ""), "route table not specified\n");
81 lpm = prox_sh_find_socket(socket_id, targ->route_table);
83 int ret = lua_to_lpm4(prox_lua(), GLOBAL, targ->route_table, socket_id, &lpm);
84 PROX_PANIC(ret, "Failed to load IPv4 LPM:\n%s\n", get_lua_to_errors());
85 prox_sh_add_socket(socket_id, targ->route_table, lpm);
87 task->ipv4_lpm = lpm->rte_lpm;
88 task->next_hops = lpm->next_hops;
90 task->qinq_tag = targ->qinq_tag;
91 task->local_ipv4 = targ->local_ipv4;
92 task->runtime_flags = targ->runtime_flags;
93 if (strcmp(targ->task_init->sub_mode_str, "pe"))
94 PROX_PANIC(targ->qinq_gre_table == NULL, "can't set up qinq gre\n");
96 task->qinq_gre_table = targ->qinq_gre_table;
98 if (targ->cpe_table_timeout_ms) {
99 targ->lconf->period_func = check_expire_cpe;
100 task->expire_cpe.cpe_table = task->cpe_table;
101 targ->lconf->period_data = &task->expire_cpe;
102 targ->lconf->period_timeout = msec_to_tsc(500) / NUM_VCPES;
105 for (uint32_t i = 0; i < 64; ++i) {
106 task->fake_packets[i] = (struct rte_mbuf*)((uint8_t*)&task->keys[i] - sizeof (struct rte_mbuf));
108 if (task->runtime_flags & TASK_ROUTING) {
109 if (targ->nb_txrings) {
110 struct task_args *dtarg;
113 for (uint32_t i = 0; i < targ->nb_txrings; ++i) {
114 ct = targ->core_task_set[0].core_task[i];
115 dtarg = core_targ_get(ct.core, ct.task);
116 dtarg = find_reachable_task_sending_to_port(dtarg);
118 PROX_PANIC(dtarg == NULL, "Error finding destination port through other tasks for outgoing ring %u\n", i);
119 task->src_mac[i] = *(uint64_t*)&prox_port_cfg[dtarg->tx_port_queue[0].port].eth_addr;
123 for (uint32_t i = 0; i < targ->nb_txports; ++i) {
124 task->src_mac[i] = *(uint64_t*)&prox_port_cfg[targ->tx_port_queue[i].port].eth_addr;
129 if (targ->runtime_flags & TASK_CTRL_HANDLE_ARP) {
130 targ->lconf->ctrl_func_p[targ->task] = arp_update;
133 /* Copy the mapping from a sibling task which is configured
134 with mode encap4. The mapping is constant, so it is faster
135 to apply it when entries are added (least common case)
136 instead of re-applying it for every packet (most common
139 for (uint8_t task_id = 0; task_id < targ->lconf->n_tasks_all; ++task_id) {
140 enum task_mode smode = targ->lconf->targs[task_id].mode;
141 if (QINQ_ENCAP4 == smode) {
142 for (uint8_t i = 0; i < PROX_MAX_PORTS; ++i) {
143 task->mapping[i] = targ->lconf->targs[task_id].mapping[i];
148 struct prox_port_cfg *port = find_reachable_port(targ);
150 task->offload_crc = port->capabilities.tx_offload_cksum;
153 // By default, calling this function 1K times per second => 64K ARP per second max
154 // If 4 interfaces sending to here, = ~0.1% of workload.
155 // If receiving more ARP, they will be dropped, or will dramatically slow down LB if in "no drop" mode.
156 targ->lconf->ctrl_timeout = freq_to_tsc(targ->ctrl_freq);
157 targ->lconf->ctrl_func_m[targ->task] = arp_msg;
160 static void early_init_table(struct task_args *targ)
162 if (!targ->qinq_gre_table && !targ->cpe_table) {
163 init_qinq_gre_table(targ, get_qinq_gre_map(targ));
164 init_cpe4_table(targ);
168 static inline void extract_key_bulk(struct rte_mbuf **mbufs, uint16_t n_pkts, struct task_qinq_decap4 *task)
170 for (uint16_t j = 0; j < n_pkts; ++j) {
171 extract_key_cpe(mbufs[j], &task->keys[j]);
175 __attribute__((cold)) static void handle_error(struct rte_mbuf *mbuf)
177 struct cpe_pkt *packet = rte_pktmbuf_mtod(mbuf, struct cpe_pkt *);
179 uint64_t key = (*(uint64_t*)(((uint8_t *)packet) + 12)) & 0xFF0FFFFFFF0FFFFF;
180 uint32_t svlan = packet->qinq_hdr.svlan.vlan_tci;
181 uint32_t cvlan = packet->qinq_hdr.cvlan.vlan_tci;
183 svlan = rte_be_to_cpu_16(svlan & 0xFF0F);
184 cvlan = rte_be_to_cpu_16(cvlan & 0xFF0F);
185 #if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0)
186 plogx_err("Can't convert key %016lx qinq %d|%d (%x|%x) to gre_id, rss=%x flags=%lx, status_err_len=%lx, L2Tag=%d type=%d\n",
187 key, svlan, cvlan, svlan, cvlan, mbuf->hash.rss, mbuf->ol_flags, mbuf->udata64, mbuf->vlan_tci_outer, mbuf->packet_type);
189 #if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0)
190 plogx_err("Can't convert key %016lx qinq %d|%d (%x|%x) to gre_id, rss=%x flags=%lx, status_err_len=%lx, L2Tag=%d type=%d\n",
191 key, svlan, cvlan, svlan, cvlan, mbuf->hash.rss, mbuf->ol_flags, mbuf->udata64, mbuf->reserved, mbuf->packet_type);
193 plogx_err("Can't convert key %016lx qinq %d|%d (%x|%x) to gre_id, flags=%x, L2Tag=%d\n",
194 key, svlan, cvlan, svlan, cvlan, mbuf->ol_flags, mbuf->reserved);
198 plogx_err("Can't convert ip %x to gre_id\n", rte_bswap32(packet->ipv4_hdr.src_addr));
202 static int add_cpe_entry(struct rte_table_hash *hash, struct cpe_key *key, struct cpe_data *data)
205 int ret, key_found = 0;
207 ret = rte_table_hash_key8_ext_dosig_ops.
208 f_add(hash, key, data, &key_found, &entry_in_hash);
210 plogx_err("Failed to add key: ip %x, gre %x\n", key->ip, key->gre_id);
216 static void extract_key_data_arp(struct rte_mbuf* mbuf, struct cpe_key* key, struct cpe_data* data, const struct qinq_gre_data* entry, uint64_t cpe_timeout, uint8_t* mapping)
218 const struct cpe_packet_arp *packet = rte_pktmbuf_mtod(mbuf, const struct cpe_packet_arp *);
219 uint32_t svlan = packet->qinq_hdr.svlan.vlan_tci & 0xFF0F;
220 uint32_t cvlan = packet->qinq_hdr.cvlan.vlan_tci & 0xFF0F;
222 key->ip = packet->arp.data.spa;
223 key->gre_id = entry->gre_id;
225 data->mac_port_8bytes = *((const uint64_t *)(&packet->qinq_hdr.s_addr));
226 data->qinq_svlan = svlan;
227 data->qinq_cvlan = cvlan;
228 #if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0)
229 port_id = mbuf->port;
232 port_id = mbuf->pkt.in_port;
234 uint8_t mapped = mapping[port_id];
235 data->mac_port.out_idx = mapping[port_id];
237 if (unlikely(mapped == 255)) {
238 /* This error only occurs if the system is configured incorrectly */
239 plog_warn("Failed adding packet: unknown mapping for port %d", port_id);
240 data->mac_port.out_idx = 0;
243 data->user = entry->user;
244 data->tsc = rte_rdtsc() + cpe_timeout;
247 void arp_msg_to_str(char *str, struct arp_msg *msg)
249 sprintf(str, "%u %u %u %u %u.%u.%u.%u %x:%x:%x:%x:%x:%x %u\n",
250 msg->data.mac_port.out_idx, msg->key.gre_id, msg->data.qinq_svlan, msg->data.qinq_cvlan,
251 msg->key.ip_bytes[0], msg->key.ip_bytes[1], msg->key.ip_bytes[2], msg->key.ip_bytes[3],
252 msg->data.mac_port_b[0], msg->data.mac_port_b[1], msg->data.mac_port_b[2],
253 msg->data.mac_port_b[3], msg->data.mac_port_b[4], msg->data.mac_port_b[5], msg->data.user);
256 int str_to_arp_msg(struct arp_msg *msg, const char *str)
258 uint32_t ip[4], interface, gre_id, svlan, cvlan, mac[6], user;
260 int ret = sscanf(str, "%u %u %u %u %u.%u.%u.%u %x:%x:%x:%x:%x:%x %u",
261 &interface, &gre_id, &svlan, &cvlan,
262 ip, ip + 1, ip + 2, ip + 3,
263 mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5, &user);
265 for (uint8_t i = 0; i < 4; ++i)
266 msg->key.ip_bytes[i] = ip[i];
267 msg->key.gre_id = gre_id;
269 for (uint8_t i = 0; i < 4; ++i)
270 msg->data.mac_port_b[i] = mac[i];
271 msg->data.qinq_svlan = svlan;
272 msg->data.qinq_cvlan = cvlan;
273 msg->data.user = user;
274 msg->data.mac_port.out_idx = interface;
279 void arp_update_from_msg(struct rte_table_hash * cpe_table, struct arp_msg **msgs, uint16_t n_msgs, uint64_t cpe_timeout)
281 int ret, key_found = 0;
284 for (uint16_t i = 0; i < n_msgs; ++i) {
285 msgs[i]->data.tsc = rte_rdtsc() + cpe_timeout;
286 ret = rte_table_hash_key8_ext_dosig_ops.
287 f_add(cpe_table, &msgs[i]->key, &msgs[i]->data, &key_found, &entry_in_hash);
289 plogx_err("Failed to add key %x, gre %x\n", msgs[i]->key.ip, msgs[i]->key.gre_id);
294 static void arp_msg(struct task_base *tbase, void **data, uint16_t n_msgs)
296 struct task_qinq_decap4 *task = (struct task_qinq_decap4 *)tbase;
297 struct arp_msg **msgs = (struct arp_msg **)data;
299 arp_update_from_msg(task->cpe_table, msgs, n_msgs, task->cpe_timeout);
302 static void arp_update(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
304 struct task_qinq_decap4 *task = (struct task_qinq_decap4 *)tbase;
306 prefetch_pkts(mbufs, n_pkts);
307 extract_key_bulk(mbufs, n_pkts, task);
309 uint64_t pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
310 uint64_t lookup_hit_mask = 0;
311 struct qinq_gre_data* entries[64];
312 rte_table_hash_key8_ext_dosig_ops.f_lookup(task->qinq_gre_table, task->fake_packets, pkts_mask, &lookup_hit_mask, (void**)entries);
314 TASK_STATS_ADD_RX(&task->base.aux->stats, n_pkts);
315 for (uint16_t j = 0; j < n_pkts; ++j) {
316 if (unlikely(!((lookup_hit_mask >> j) & 0x1))) {
317 handle_error(mbufs[j]);
318 rte_pktmbuf_free(mbufs[j]);
323 struct cpe_data data;
325 extract_key_data_arp(mbufs[j], &key, &data, entries[j], task->cpe_timeout, task->mapping);
328 int ret, key_found = 0;
330 ret = rte_table_hash_key8_ext_dosig_ops.
331 f_add(task->cpe_table, &key, &data, &key_found, &entry_in_hash);
334 plogx_err("Failed to add key %x, gre %x\n", key.ip, key.gre_id);
335 TASK_STATS_ADD_DROP_DISCARD(&task->base.aux->stats, 1);
338 /* should do ARP reply */
339 TASK_STATS_ADD_DROP_HANDLED(&task->base.aux->stats, 1);
340 rte_pktmbuf_free(mbufs[j]);
344 static int handle_qinq_decap4_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
346 struct task_qinq_decap4 *task = (struct task_qinq_decap4 *)tbase;
347 uint64_t pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
348 struct qinq_gre_data* entries[64];
349 uint8_t out[MAX_PKT_BURST];
350 uint64_t lookup_hit_mask;
351 prefetch_pkts(mbufs, n_pkts);
353 // Prefetch headroom, as we will prepend mbuf and write to this cache line
354 for (uint16_t j = 0; j < n_pkts; ++j) {
355 PREFETCH0((rte_pktmbuf_mtod(mbufs[j], char*)-1));
358 extract_key_bulk(mbufs, n_pkts, task);
359 rte_table_hash_key8_ext_dosig_ops.f_lookup(task->qinq_gre_table, task->fake_packets, pkts_mask, &lookup_hit_mask, (void**)entries);
361 if (likely(lookup_hit_mask == pkts_mask)) {
362 for (uint16_t j = 0; j < n_pkts; ++j) {
363 out[j] = handle_qinq_decap4(task, mbufs[j], entries[j]);
367 for (uint16_t j = 0; j < n_pkts; ++j) {
368 if (unlikely(!((lookup_hit_mask >> j) & 0x1))) {
369 // This might fail as the packet has not the expected QinQ or it's not an IPv4 packet
370 handle_error(mbufs[j]);
371 out[j] = OUT_DISCARD;
374 out[j] = handle_qinq_decap4(task, mbufs[j], entries[j]);
378 return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
382 static inline void gre_encap(struct task_qinq_decap4 *task, uint32_t src_ipv4, struct rte_mbuf *mbuf, uint32_t gre_id)
385 struct ipv4_hdr *pip = (struct ipv4_hdr *)(1 + rte_pktmbuf_mtod(mbuf, struct qinq_hdr *));
387 struct ipv4_hdr *pip = (struct ipv4_hdr *)(1 + rte_pktmbuf_mtod(mbuf, struct ether_hdr *));
389 uint16_t ip_len = rte_be_to_cpu_16(pip->total_length);
390 uint16_t padlen = rte_pktmbuf_pkt_len(mbuf) - 20 - ip_len - sizeof(struct qinq_hdr);
393 rte_pktmbuf_trim(mbuf, padlen);
396 PROX_PANIC(rte_pktmbuf_data_len(mbuf) - padlen + 20 > ETHER_MAX_LEN,
397 "Would need to fragment packet new size = %u - not implemented\n",
398 rte_pktmbuf_data_len(mbuf) - padlen + 20);
401 /* prepend only 20 bytes instead of 28, 8 bytes are present from the QinQ */
402 struct ether_hdr *peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, 20);
404 struct ether_hdr *peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, 28);
409 if (task->runtime_flags & TASK_TX_CRC) {
410 /* calculate IP CRC here to avoid problems with -O3 flag with gcc */
412 prox_ip_cksum(mbuf, pip, sizeof(struct ether_hdr) + sizeof(struct mpls_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
414 prox_ip_cksum(mbuf, pip, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
419 struct ipv4_hdr *p_tunnel_ip = (struct ipv4_hdr *)(peth + 1);
420 rte_memcpy(p_tunnel_ip, &tunnel_ip_proto, sizeof(struct ipv4_hdr));
421 ip_len += sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr);
422 p_tunnel_ip->total_length = rte_cpu_to_be_16(ip_len);
423 p_tunnel_ip->src_addr = src_ipv4;
425 /* Add GRE Header values */
426 struct gre_hdr *pgre = (struct gre_hdr *)(p_tunnel_ip + 1);
428 rte_memcpy(pgre, &gre_hdr_proto, sizeof(struct gre_hdr));
429 pgre->gre_id = gre_id;
430 peth->ether_type = ETYPE_IPv4;
433 static inline uint16_t calc_padlen(const struct rte_mbuf *mbuf, const uint16_t ip_len)
435 return rte_pktmbuf_pkt_len(mbuf) - DOWNSTREAM_DELTA - ip_len - offsetof(struct cpe_pkt, ipv4_hdr);
438 static inline uint8_t gre_encap_route(uint32_t src_ipv4, struct rte_mbuf *mbuf, uint32_t gre_id, struct task_qinq_decap4 *task)
440 PROX_PANIC(rte_pktmbuf_data_len(mbuf) + DOWNSTREAM_DELTA > ETHER_MAX_LEN,
441 "Would need to fragment packet new size = %u - not implemented\n",
442 rte_pktmbuf_data_len(mbuf) + DOWNSTREAM_DELTA);
444 struct core_net_pkt_m *packet = (struct core_net_pkt_m *)rte_pktmbuf_prepend(mbuf, DOWNSTREAM_DELTA);
448 struct ipv4_hdr *pip = &((struct cpe_pkt_delta *)packet)->pkt.ipv4_hdr;
449 uint16_t ip_len = rte_be_to_cpu_16(pip->total_length);
451 /* returns 0 on success, returns -ENOENT of failure (or -EINVAL if first or last parameter is NULL) */
452 #if RTE_VERSION >= RTE_VERSION_NUM(16,4,0,1)
453 uint32_t next_hop_index;
455 uint8_t next_hop_index;
457 if (unlikely(rte_lpm_lookup(task->ipv4_lpm, rte_bswap32(pip->dst_addr), &next_hop_index) != 0)) {
458 plog_warn("lpm_lookup failed for ip %x: rc = %d\n", rte_bswap32(pip->dst_addr), -ENOENT);
461 PREFETCH0(&task->next_hops[next_hop_index]);
463 /* calculate outer IP CRC here to avoid problems with -O3 flag with gcc */
464 const uint16_t padlen = calc_padlen(mbuf, ip_len);
466 rte_pktmbuf_trim(mbuf, padlen);
468 const uint8_t port_id = task->next_hops[next_hop_index].mac_port.out_idx;
470 *((uint64_t *)(&packet->ether_hdr.d_addr)) = task->next_hops[next_hop_index].mac_port_8bytes;
471 *((uint64_t *)(&packet->ether_hdr.s_addr)) = task->src_mac[task->next_hops[next_hop_index].mac_port.out_idx];
474 packet->mpls_bytes = task->next_hops[next_hop_index].mpls | 0x00010000; // Set BoS to 1
475 packet->ether_hdr.ether_type = ETYPE_MPLSU;
477 packet->ether_hdr.ether_type = ETYPE_IPv4;
481 rte_memcpy(&packet->tunnel_ip_hdr, &tunnel_ip_proto, sizeof(struct ipv4_hdr));
482 ip_len += sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr);
483 packet->tunnel_ip_hdr.total_length = rte_cpu_to_be_16(ip_len);
484 packet->tunnel_ip_hdr.src_addr = src_ipv4;
485 packet->tunnel_ip_hdr.dst_addr = task->next_hops[next_hop_index].ip_dst;
486 if (task->runtime_flags & TASK_TX_CRC) {
488 prox_ip_cksum(mbuf, (void *)&(packet->tunnel_ip_hdr), sizeof(struct ether_hdr) + sizeof(struct mpls_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
490 prox_ip_cksum(mbuf, (void *)&(packet->tunnel_ip_hdr), sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
494 /* Add GRE Header values */
495 rte_memcpy(&packet->gre_hdr, &gre_hdr_proto, sizeof(struct gre_hdr));
496 packet->gre_hdr.gre_id = rte_be_to_cpu_32(gre_id);
501 static void extract_key_data(struct rte_mbuf* mbuf, struct cpe_key* key, struct cpe_data* data, const struct qinq_gre_data* entry, uint64_t cpe_timeout, uint8_t *mapping)
503 struct cpe_pkt *packet = rte_pktmbuf_mtod(mbuf, struct cpe_pkt *);
507 const uint32_t tmp = rte_bswap32(packet->ipv4_hdr.src_addr) & 0x00FFFFFF;
508 const uint32_t svlan = rte_bswap16(tmp >> 12);
509 const uint32_t cvlan = rte_bswap16(tmp & 0x0FFF);
513 key->ip = packet->ipv4_hdr.src_addr;
517 key->gre_id = entry->gre_id;
520 data->mac_port_8bytes = *((const uint64_t *)(&packet->qinq_hdr.s_addr));
521 data->qinq_svlan = packet->qinq_hdr.svlan.vlan_tci & 0xFF0F;
522 data->qinq_cvlan = packet->qinq_hdr.cvlan.vlan_tci & 0xFF0F;
524 data->mac_port_8bytes = *((const uint64_t *)(&packet->ether_hdr.s_addr));
525 data->qinq_svlan = svlan;
526 data->qinq_cvlan = cvlan;
529 #if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0)
530 port_id = mbuf->port;
533 port_id = mbuf->pkt.in_port;
535 uint8_t mapped = mapping[port_id];
536 data->mac_port.out_idx = mapped;
538 if (unlikely(mapped == 255)) {
539 /* This error only occurs if the system is configured incorrectly */
540 plog_warn("Failed adding packet: unknown mapping for port %d", port_id);
541 data->mac_port.out_idx = 0;
544 data->mac_port.out_idx = mapped;
547 data->user = entry->user;
548 data->tsc = rte_rdtsc() + cpe_timeout;
551 static uint8_t handle_qinq_decap4(struct task_qinq_decap4 *task, struct rte_mbuf *mbuf, struct qinq_gre_data* entry)
553 if (!(task->runtime_flags & (TASK_CTRL_HANDLE_ARP|TASK_FP_HANDLE_ARP))) {
554 // We learn CPE MAC addresses on every packets
556 struct cpe_data data;
557 extract_key_data(mbuf, &key, &data, entry, task->cpe_timeout, task->mapping);
558 //plogx_err("Adding key ip=%x/gre_id=%x data (svlan|cvlan)=%x|%x, rss=%x, gre_id=%x\n", key.ip, key.gre_id, data.qinq_svlan,data.qinq_cvlan, mbuf->hash.rss, entry->gre_id);
560 if (add_cpe_entry(task->cpe_table, &key, &data)) {
561 plog_warn("Failed to add ARP entry\n");
565 if (task->runtime_flags & TASK_FP_HANDLE_ARP) {
566 // We learn CPE MAC addresses on ARP packets in Fast Path
567 #if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0)
568 if (mbuf->packet_type == 0xB) {
570 struct cpe_data data;
571 extract_key_data_arp(mbuf, &key, &data, entry, task->cpe_timeout, task->mapping);
573 if (add_cpe_entry(task->cpe_table, &key, &data)) {
574 plog_warn("Failed to add ARP entry\n");
582 struct cpe_pkt *packet = rte_pktmbuf_mtod(mbuf, struct cpe_pkt*);
583 if (packet->qinq_hdr.svlan.eth_proto == task->qinq_tag &&
584 packet->qinq_hdr.ether_type == ETYPE_ARP) {
586 struct cpe_data data;
587 extract_key_data_arp(mbuf, &key, &data, entry, task->cpe_timeout, task->mapping);
589 if (add_cpe_entry(task->cpe_table, &key, &data)) {
590 plog_warn("Failed to add ARP entry\n");
598 if (task->runtime_flags & TASK_ROUTING) {
600 tx_portid = gre_encap_route(task->local_ipv4, mbuf, entry->gre_id, task);
602 return tx_portid == ROUTE_ERR? OUT_DISCARD : tx_portid;
605 gre_encap(task, task->local_ipv4, mbuf, entry->gre_id);
610 static void flow_iter_next(struct flow_iter *iter, struct task_args *targ)
614 } while (iter->idx < (int)get_qinq_gre_map(targ)->count &&
615 get_qinq_gre_map(targ)->entries[iter->idx].gre_id % targ->nb_slave_threads != targ->worker_thread_id);
618 static void flow_iter_beg(struct flow_iter *iter, struct task_args *targ)
621 flow_iter_next(iter, targ);
624 static int flow_iter_is_end(struct flow_iter *iter, struct task_args *targ)
626 return iter->idx == (int)get_qinq_gre_map(targ)->count;
629 static uint16_t flow_iter_get_svlan(struct flow_iter *iter, struct task_args *targ)
631 return get_qinq_gre_map(targ)->entries[iter->idx].svlan;
634 static uint16_t flow_iter_get_cvlan(struct flow_iter *iter, struct task_args *targ)
636 return get_qinq_gre_map(targ)->entries[iter->idx].cvlan;
639 static struct task_init task_init_qinq_decapv4_table = {
641 .mode_str = "qinqdecapv4",
642 .early_init = early_init_table,
643 .init = init_task_qinq_decap4,
644 .handle = handle_qinq_decap4_bulk,
645 .flag_features = TASK_FEATURE_ROUTING,
647 .beg = flow_iter_beg,
648 .is_end = flow_iter_is_end,
649 .next = flow_iter_next,
650 .get_svlan = flow_iter_get_svlan,
651 .get_cvlan = flow_iter_get_cvlan,
653 .size = sizeof(struct task_qinq_decap4)
656 __attribute__((constructor)) static void reg_task_qinq_decap4(void)
658 reg_task(&task_init_qinq_decapv4_table);