2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
17 #include <rte_table_hash.h>
18 #include <rte_hash_crc.h>
19 #include <rte_cycles.h>
21 #include "mbuf_utils.h"
22 #include "prox_malloc.h"
24 #include "prox_lua_types.h"
25 #include "handle_qinq_encap4.h"
26 #include "handle_qinq_decap4.h"
27 #include "prox_args.h"
31 #include "pkt_prototypes.h"
32 #include "hash_entry_types.h"
33 #include "task_init.h"
35 #include "prox_cksum.h"
36 #include "hash_utils.h"
38 #include "prox_port_cfg.h"
39 #include "handle_lb_net.h"
43 #include "prox_shared.h"
44 #include "prox_compat.h"
46 static struct cpe_table_data *read_cpe_table_config(const char *name, uint8_t socket)
48 struct lua_State *L = prox_lua();
49 struct cpe_table_data *ret = NULL;
51 lua_getglobal(L, name);
52 PROX_PANIC(lua_isnil(L, -1), "Coudn't find cpe_table data\n");
57 struct qinq_gre_map *get_qinq_gre_map(struct task_args *targ)
59 const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
60 struct qinq_gre_map *ret = prox_sh_find_socket(socket_id, "qinq_gre_map");
63 PROX_PANIC(!strcmp(targ->user_table, ""), "No user table defined\n");
64 int rv = lua_to_qinq_gre_map(prox_lua(), GLOBAL, targ->user_table, socket_id, &ret);
65 PROX_PANIC(rv, "Error reading mapping between qinq and gre from qinq_gre_map: \n%s\n",
67 prox_sh_add_socket(socket_id, "qinq_gre_map", ret);
72 /* Encapsulate IPv4 packets in QinQ. QinQ tags are derived from gre_id. */
73 int handle_qinq_encap4_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts);
74 static void arp_msg(struct task_base *tbase, void **data, uint16_t n_msgs);
76 static void fill_table(struct task_args *targ, struct rte_table_hash *table)
78 struct cpe_table_data *cpe_table_data;
79 const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
80 int ret = lua_to_cpe_table_data(prox_lua(), GLOBAL, targ->cpe_table_name, socket_id, &cpe_table_data);
81 const uint8_t n_slaves = targ->nb_slave_threads;
82 const uint8_t worker_id = targ->worker_thread_id;
84 for (uint32_t i = 0; i < cpe_table_data->n_entries; ++i) {
85 if (rte_bswap32(cpe_table_data->entries[i].ip) % n_slaves != worker_id) {
88 struct cpe_table_entry *entry = &cpe_table_data->entries[i];
90 uint32_t port_idx = prox_cfg.cpe_table_ports[entry->port_idx];
91 PROX_PANIC(targ->mapping[port_idx] == 255, "Error reading cpe table: Mapping for port %d is missing", port_idx);
93 struct cpe_key key = {
95 .gre_id = entry->gre_id,
98 struct cpe_data data = {
99 .qinq_svlan = entry->svlan,
100 .qinq_cvlan = entry->cvlan,
103 .mac = entry->eth_addr,
104 .out_idx = targ->mapping[port_idx],
111 prox_rte_table_key8_add(table, &key, &data, &key_found, &entry_in_hash);
115 static void init_task_qinq_encap4(struct task_base *tbase, struct task_args *targ)
117 struct task_qinq_encap4 *task = (struct task_qinq_encap4 *)(tbase);
118 int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
120 task->qinq_tag = targ->qinq_tag;
121 task->cpe_table = targ->cpe_table;
122 task->cpe_timeout = msec_to_tsc(targ->cpe_table_timeout_ms);
124 if (!strcmp(targ->task_init->sub_mode_str, "pe")) {
125 PROX_PANIC(!strcmp(targ->cpe_table_name, ""), "CPE table not configured\n");
126 fill_table(targ, task->cpe_table);
129 #ifdef ENABLE_EXTRA_USER_STATISTICS
130 task->n_users = targ->n_users;
131 task->stats_per_user = prox_zmalloc(targ->n_users * sizeof(uint32_t), socket_id);
133 if (targ->runtime_flags & TASK_CLASSIFY) {
134 PROX_PANIC(!strcmp(targ->dscp, ""), "DSCP table not specified\n");
135 task->dscp = prox_sh_find_socket(socket_id, targ->dscp);
137 int ret = lua_to_dscp(prox_lua(), GLOBAL, targ->dscp, socket_id, &task->dscp);
138 PROX_PANIC(ret, "Failed to create dscp table from config:\n%s\n",
139 get_lua_to_errors());
140 prox_sh_add_socket(socket_id, targ->dscp, task->dscp);
144 task->runtime_flags = targ->runtime_flags;
146 for (uint32_t i = 0; i < 64; ++i) {
147 task->fake_packets[i] = (struct rte_mbuf*)((uint8_t*)&task->keys[i] - sizeof (struct rte_mbuf));
150 targ->lconf->ctrl_timeout = freq_to_tsc(targ->ctrl_freq);
151 targ->lconf->ctrl_func_m[targ->task] = arp_msg;
153 struct prox_port_cfg *port = find_reachable_port(targ);
155 task->offload_crc = port->requested_tx_offload & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
158 /* TODO: check if it is not necessary to limit reverse mapping
159 for the elements that have been changing in mapping? */
161 for (uint32_t i =0 ; i < sizeof(targ->mapping)/sizeof(targ->mapping[0]); ++i) {
162 task->src_mac[targ->mapping[i]] = *(uint64_t*)&prox_port_cfg[i].eth_addr;
165 /* task->src_mac[entry->port_idx] = *(uint64_t*)&prox_port_cfg[entry->port_idx].eth_addr; */
168 static void arp_msg(struct task_base *tbase, void **data, uint16_t n_msgs)
170 struct task_qinq_encap4 *task = (struct task_qinq_encap4 *)tbase;
171 struct arp_msg **msgs = (struct arp_msg **)data;
173 arp_update_from_msg(task->cpe_table, msgs, n_msgs, task->cpe_timeout);
176 static inline void add_key(struct task_args *targ, struct qinq_gre_map *qinq_gre_map, struct rte_table_hash* qinq_gre_table, uint32_t i, uint32_t *count)
178 struct qinq_gre_data entry = {
179 .gre_id = qinq_gre_map->entries[i].gre_id,
180 .user = qinq_gre_map->entries[i].user,
184 struct vlans qinq2 = {
185 .svlan = {.eth_proto = targ->qinq_tag, .vlan_tci = qinq_gre_map->entries[i].svlan},
186 .cvlan = {.eth_proto = ETYPE_VLAN, .vlan_tci = qinq_gre_map->entries[i].cvlan}
190 void* entry_in_hash = NULL;
191 prox_rte_table_key8_add(qinq_gre_table, &qinq2, &entry, &key_found, &entry_in_hash);
193 plog_dbg("Core %u adding user %u (tag %x svlan %x cvlan %x), rss=%x\n",
194 targ->lconf->id, qinq_gre_map->entries[i].user, qinq2.svlan.eth_proto,
195 rte_bswap16(qinq_gre_map->entries[i].svlan),
196 rte_bswap16(qinq_gre_map->entries[i].cvlan),
197 qinq_gre_map->entries[i].rss);
199 /* lower 3 bytes of IPv4 address contain svlan/cvlan. */
200 uint64_t ip = ((uint32_t)rte_bswap16(qinq_gre_map->entries[i].svlan) << 12) |
201 rte_bswap16(qinq_gre_map->entries[i].cvlan);
203 void* entry_in_hash = NULL;
204 prox_rte_table_key8_add(qinq_gre_table, &ip, &entry, &key_found, &entry_in_hash);
206 plog_dbg("Core %u hash table add: key = %016"PRIx64"\n",
207 targ->lconf->id, ip);
212 void init_qinq_gre_table(struct task_args *targ, struct qinq_gre_map *qinq_gre_map)
214 struct rte_table_hash* qinq_gre_table;
215 uint8_t table_part = targ->nb_slave_threads;
216 if (!rte_is_power_of_2(table_part)) {
217 table_part = rte_align32pow2(table_part) >> 1;
223 uint32_t n_entries = MAX_GRE / table_part;
224 static char hash_name[30];
225 sprintf(hash_name, "qinq_gre_hash_table_%03d", targ->lconf->id);
227 struct prox_rte_table_params table_hash_params = {
231 .n_buckets = n_entries,
232 .f_hash = (rte_table_hash_op_hash)hash_crc32,
234 .key_offset = HASH_METADATA_OFFSET(0),
238 qinq_gre_table = prox_rte_table_create(&table_hash_params, rte_lcore_to_socket_id(targ->lconf->id), sizeof(struct qinq_gre_data));
240 // LB configuration known from Network Load Balancer
241 // Find LB network Load balancer, i.e. ENCAP friend.
242 for (uint8_t task_id = 0; task_id < targ->lconf->n_tasks_all; ++task_id) {
243 enum task_mode smode = targ->lconf->targs[task_id].mode;
244 if (QINQ_ENCAP4 == smode) {
245 targ->lb_friend_core = targ->lconf->targs[task_id].lb_friend_core;
246 targ->lb_friend_task = targ->lconf->targs[task_id].lb_friend_task;
249 // Packet coming from Load balancer. LB could balance on gre_id LSB, qinq hash or qinq RSS
250 uint32_t flag_features = 0;
251 if (targ->lb_friend_core != 0xFF) {
252 struct task_args *lb_targ = &lcore_cfg[targ->lb_friend_core].targs[targ->lb_friend_task];
253 flag_features = lb_targ->task_init->flag_features;
254 plog_info("\t\tWT %d Updated features to %x from friend %d\n", targ->lconf->id, flag_features, targ->lb_friend_core);
256 plog_info("\t\tWT %d has no friend\n", targ->lconf->id);
258 if (targ->nb_slave_threads == 0) {
259 // No slave threads, i.e. using RSS
260 plog_info("feature was %x is now %x\n", flag_features, TASK_FEATURE_LUT_QINQ_RSS);
261 flag_features = TASK_FEATURE_LUT_QINQ_RSS;
263 if ((flag_features & (TASK_FEATURE_GRE_ID|TASK_FEATURE_LUT_QINQ_RSS|TASK_FEATURE_LUT_QINQ_HASH)) == 0) {
264 plog_info("\t\tCould not find flag feature from Load balancer => supposing TASK_FEATURE_GRE_ID\n");
265 flag_features = TASK_FEATURE_GRE_ID;
268 /* Only store QinQ <-> GRE mapping for packets that are handled by this worker thread */
270 if (flag_features & TASK_FEATURE_LUT_QINQ_RSS) {
271 // If there is a load balancer, number of worker thread is indicated by targ->nb_slave_threads and n_rxq = 0
272 // If there is no load balancers, number of worker thread is indicated by n_rxq and nb_slave_threads = 0
273 uint8_t nb_worker_threads, worker_thread_id;
274 if (targ->nb_slave_threads) {
275 nb_worker_threads = targ->nb_slave_threads;
276 worker_thread_id = targ->worker_thread_id;
277 } else if (prox_port_cfg[targ->rx_port_queue[0].port].n_rxq) {
278 nb_worker_threads = prox_port_cfg[targ->rx_port_queue[0].port].n_rxq;
279 worker_thread_id = targ->rx_port_queue[0].queue;
281 PROX_PANIC(1, "Unexpected: unknown number of worker thread\n");
283 plog_info("\t\tUsing %d worker_threads id %d\n", nb_worker_threads, worker_thread_id);
284 for (uint32_t i = 0; i < qinq_gre_map->count; ++i) {
285 if (targ->nb_slave_threads == 0 || rss_to_queue(qinq_gre_map->entries[i].rss, nb_worker_threads) == worker_thread_id) {
286 add_key(targ, qinq_gre_map, qinq_gre_table, i, &count);
287 //plog_info("Queue %d adding key %16lx, svlan %x cvlan %x, rss=%x\n", targ->rx_queue, *(uint64_t *)q, qinq_to_gre_lookup[i].svlan, qinq_to_gre_lookup[i].cvlan, qinq_to_gre_lookup[i].rss);
290 plog_info("\t\tAdded %d entries to worker thread %d\n", count, worker_thread_id);
291 } else if (flag_features & TASK_FEATURE_LUT_QINQ_HASH) {
292 for (uint32_t i = 0; i < qinq_gre_map->count; ++i) {
293 uint64_t cvlan = rte_bswap16(qinq_gre_map->entries[i].cvlan & 0xFF0F);
294 uint64_t svlan = rte_bswap16((qinq_gre_map->entries[i].svlan & 0xFF0F));
295 uint64_t qinq = rte_bswap64((svlan << 32) | cvlan);
296 uint8_t queue = rte_hash_crc(&qinq, 8, 0) % targ->nb_slave_threads;
297 if (queue == targ->worker_thread_id) {
298 add_key(targ, qinq_gre_map, qinq_gre_table, i, &count);
301 plog_info("\t\tAdded %d entries to WT %d\n", count, targ->worker_thread_id);
302 } else if (flag_features & TASK_FEATURE_GRE_ID) {
303 for (uint32_t i = 0; i < qinq_gre_map->count; ++i) {
304 if (qinq_gre_map->entries[i].gre_id % targ->nb_slave_threads == targ->worker_thread_id) {
305 add_key(targ, qinq_gre_map, qinq_gre_table, i, &count);
310 for (uint8_t task_id = 0; task_id < targ->lconf->n_tasks_all; ++task_id) {
311 enum task_mode smode = targ->lconf->targs[task_id].mode;
312 if (QINQ_DECAP4 == smode) {
313 targ->lconf->targs[task_id].qinq_gre_table = qinq_gre_table;
319 void init_cpe4_table(struct task_args *targ)
322 sprintf(name, "core_%u_CPEv4Table", targ->lconf->id);
324 uint8_t table_part = targ->nb_slave_threads;
325 if (!rte_is_power_of_2(table_part)) {
326 table_part = rte_align32pow2(table_part) >> 1;
332 uint32_t n_entries = MAX_GRE / table_part;
334 static char hash_name[30];
335 sprintf(hash_name, "cpe4_table_%03d", targ->lconf->id);
337 struct prox_rte_table_params table_hash_params = {
341 .n_buckets = n_entries >> 1,
342 .f_hash = (rte_table_hash_op_hash)hash_crc32,
344 .key_offset = HASH_METADATA_OFFSET(0),
347 size_t entry_size = sizeof(struct cpe_data);
348 if (!rte_is_power_of_2(entry_size)) {
349 entry_size = rte_align32pow2(entry_size);
352 struct rte_table_hash* phash = prox_rte_table_create(&table_hash_params, rte_lcore_to_socket_id(targ->lconf->id), entry_size);
353 PROX_PANIC(NULL == phash, "Unable to allocate memory for IPv4 hash table on core %u\n", targ->lconf->id);
355 /* for locality, copy the pointer to the port structure where it is needed at packet handling time */
356 for (uint8_t task_id = 0; task_id < targ->lconf->n_tasks_all; ++task_id) {
357 enum task_mode smode = targ->lconf->targs[task_id].mode;
358 if (QINQ_ENCAP4 == smode || QINQ_DECAP4 == smode) {
359 targ->lconf->targs[task_id].cpe_table = phash;
364 static void early_init_table(struct task_args* targ)
366 if (!targ->cpe_table) {
367 init_cpe4_table(targ);
371 static inline void restore_cpe(struct cpe_pkt *packet, struct cpe_data *table, __attribute__((unused)) uint16_t qinq_tag, uint64_t *src_mac)
374 struct qinq_hdr *pqinq = &packet->qinq_hdr;
375 rte_memcpy(pqinq, &qinq_proto, sizeof(struct qinq_hdr));
376 (*(uint64_t *)(&pqinq->d_addr)) = table->mac_port_8bytes;
377 /* set source as well now */
378 *((uint64_t *)(&pqinq->s_addr)) = *((uint64_t *)&src_mac[table->mac_port.out_idx]);
379 pqinq->svlan.vlan_tci = table->qinq_svlan;
380 pqinq->cvlan.vlan_tci = table->qinq_cvlan;
381 pqinq->svlan.eth_proto = qinq_tag;
382 pqinq->cvlan.eth_proto = ETYPE_VLAN;
383 pqinq->ether_type = ETYPE_IPv4;
385 (*(uint64_t *)(&packet->ether_hdr.d_addr)) = table->mac_port_8bytes;
386 /* set source as well now */
387 *((uint64_t *)(&packet->ether_hdr.s_addr)) = *((uint64_t *)&src_mac[table->mac_port.out_idx]);
388 packet->ether_hdr.ether_type = ETYPE_IPv4;
390 packet->ipv4_hdr.dst_addr = rte_bswap32(10 << 24 | rte_bswap16(table->qinq_svlan) << 12 | rte_bswap16(table->qinq_cvlan));
394 static inline uint8_t handle_qinq_encap4(struct task_qinq_encap4 *task, struct cpe_pkt *cpe_pkt, struct rte_mbuf *mbuf, struct cpe_data *entry);
396 /* Same functionality as handle_qinq_encap_v4_bulk but untag MPLS as well. */
397 static int handle_qinq_encap4_untag_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
399 struct task_qinq_encap4 *task = (struct task_qinq_encap4 *)tbase;
400 uint8_t out[MAX_PKT_BURST];
401 prefetch_pkts(mbufs, n_pkts);
403 for (uint16_t j = 0; j < n_pkts; ++j) {
404 if (likely(mpls_untag(mbufs[j]))) {
405 struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_adj(mbufs[j], UPSTREAM_DELTA);
406 out[j] = handle_qinq_encap4(task, cpe_pkt, mbufs[j], NULL);
409 out[j] = OUT_DISCARD;
413 return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
416 static inline void extract_key_bulk(struct task_qinq_encap4 *task, struct rte_mbuf **mbufs, uint16_t n_pkts)
418 for (uint16_t j = 0; j < n_pkts; ++j) {
419 extract_key_core(mbufs[j], &task->keys[j]);
423 __attribute__((cold)) static void handle_error(struct rte_mbuf *mbuf)
425 struct core_net_pkt* core_pkt = rte_pktmbuf_mtod(mbuf, struct core_net_pkt *);
426 uint32_t dst_ip = core_pkt->ip_hdr.dst_addr;
427 uint32_t le_gre_id = rte_be_to_cpu_32(core_pkt->gre_hdr.gre_id);
429 plogx_dbg("Unknown IP %x/gre_id %x\n", dst_ip, le_gre_id);
432 static int handle_qinq_encap4_bulk_pe(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
434 struct task_qinq_encap4 *task = (struct task_qinq_encap4 *)tbase;
435 uint64_t pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
436 struct cpe_data* entries[64];
437 uint8_t out[MAX_PKT_BURST];
438 uint64_t lookup_hit_mask;
440 prefetch_pkts(mbufs, n_pkts);
442 for (uint16_t j = 0; j < n_pkts; ++j) {
443 struct ipv4_hdr* ip = (struct ipv4_hdr *)(rte_pktmbuf_mtod(mbufs[j], struct ether_hdr *) + 1);
444 task->keys[j] = (uint64_t)ip->dst_addr;
446 prox_rte_table_key8_lookup(task->cpe_table, task->fake_packets, pkts_mask, &lookup_hit_mask, (void**)entries);
448 if (likely(lookup_hit_mask == pkts_mask)) {
449 for (uint16_t j = 0; j < n_pkts; ++j) {
450 struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_prepend(mbufs[j], sizeof(struct qinq_hdr) - sizeof(struct ether_hdr));
451 uint16_t padlen = mbuf_calc_padlen(mbufs[j], cpe_pkt, &cpe_pkt->ipv4_hdr);
454 rte_pktmbuf_trim(mbufs[j], padlen);
456 out[j] = handle_qinq_encap4(task, cpe_pkt, mbufs[j], entries[j]);
460 for (uint16_t j = 0; j < n_pkts; ++j) {
461 if (unlikely(!((lookup_hit_mask >> j) & 0x1))) {
462 handle_error(mbufs[j]);
463 out[j] = OUT_DISCARD;
466 struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_prepend(mbufs[j], sizeof(struct qinq_hdr) - sizeof(struct ether_hdr));
467 uint16_t padlen = mbuf_calc_padlen(mbufs[j], cpe_pkt, &cpe_pkt->ipv4_hdr);
470 rte_pktmbuf_trim(mbufs[j], padlen);
472 out[j] = handle_qinq_encap4(task, cpe_pkt, mbufs[j], entries[j]);
476 return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
478 int handle_qinq_encap4_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
480 struct task_qinq_encap4 *task = (struct task_qinq_encap4 *)tbase;
481 uint64_t pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
482 struct cpe_data* entries[64];
483 uint8_t out[MAX_PKT_BURST];
484 uint64_t lookup_hit_mask;
486 prefetch_pkts(mbufs, n_pkts);
488 // From GRE ID and IP address, retrieve QinQ and MAC addresses
489 extract_key_bulk(task, mbufs, n_pkts);
490 prox_rte_table_key8_lookup(task->cpe_table, task->fake_packets, pkts_mask, &lookup_hit_mask, (void**)entries);
492 if (likely(lookup_hit_mask == pkts_mask)) {
493 for (uint16_t j = 0; j < n_pkts; ++j) {
494 struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_adj(mbufs[j], UPSTREAM_DELTA);
495 // We are receiving GRE tunnelled packets (and removing UPSTRAM_DELTA bytes), whose length is > 64 bytes
496 // So there should be no padding, but in case the is one, remove it
497 uint16_t padlen = mbuf_calc_padlen(mbufs[j], cpe_pkt, &cpe_pkt->ipv4_hdr);
500 rte_pktmbuf_trim(mbufs[j], padlen);
502 out[j] = handle_qinq_encap4(task, cpe_pkt, mbufs[j], entries[j]);
506 for (uint16_t j = 0; j < n_pkts; ++j) {
507 if (unlikely(!((lookup_hit_mask >> j) & 0x1))) {
508 handle_error(mbufs[j]);
509 out[j] = OUT_DISCARD;
512 struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_adj(mbufs[j], UPSTREAM_DELTA);
513 uint16_t padlen = mbuf_calc_padlen(mbufs[j], cpe_pkt, &cpe_pkt->ipv4_hdr);
516 rte_pktmbuf_trim(mbufs[j], padlen);
518 out[j] = handle_qinq_encap4(task, cpe_pkt, mbufs[j], entries[j]);
522 return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
525 static inline uint8_t handle_qinq_encap4(struct task_qinq_encap4 *task, struct cpe_pkt *cpe_pkt, struct rte_mbuf *mbuf, struct cpe_data *entry)
527 PROX_ASSERT(cpe_pkt);
529 if (cpe_pkt->ipv4_hdr.time_to_live) {
530 cpe_pkt->ipv4_hdr.time_to_live--;
533 plog_info("TTL = 0 => Dropping\n");
536 cpe_pkt->ipv4_hdr.hdr_checksum = 0;
538 restore_cpe(cpe_pkt, entry, task->qinq_tag, task->src_mac);
540 if (task->runtime_flags & TASK_CLASSIFY) {
541 uint8_t queue = task->dscp[cpe_pkt->ipv4_hdr.type_of_service >> 2] & 0x3;
542 uint8_t tc = task->dscp[cpe_pkt->ipv4_hdr.type_of_service >> 2] >> 2;
544 rte_sched_port_pkt_write(mbuf, 0, entry->user, tc, queue, 0);
546 #ifdef ENABLE_EXTRA_USER_STATISTICS
547 task->stats_per_user[entry->user]++;
549 if (task->runtime_flags & TASK_TX_CRC) {
550 prox_ip_cksum(mbuf, &cpe_pkt->ipv4_hdr, sizeof(struct qinq_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
552 return entry->mac_port.out_idx;
555 static void flow_iter_next(struct flow_iter *iter, struct task_args *targ)
559 uint8_t flag_features = iter->data;
561 if (flag_features & TASK_FEATURE_LUT_QINQ_RSS) {
562 // If there is a load balancer, number of worker thread is indicated by targ->nb_slave_threads and n_rxq = 0
563 // If there is no load balancers, number of worker thread is indicated by n_rxq and nb_slave_threads = 0
564 uint8_t nb_worker_threads, worker_thread_id;
565 nb_worker_threads = 1;
566 worker_thread_id = 1;
567 if (targ->nb_slave_threads) {
568 nb_worker_threads = targ->nb_slave_threads;
569 worker_thread_id = targ->worker_thread_id;
570 } else if (prox_port_cfg[targ->rx_port_queue[0].port].n_rxq) {
571 nb_worker_threads = prox_port_cfg[targ->rx_port_queue[0].port].n_rxq;
572 worker_thread_id = targ->rx_port_queue[0].queue;
574 plog_err("Unexpected: unknown number of worker thread\n");
577 if (targ->nb_slave_threads == 0 || rss_to_queue(get_qinq_gre_map(targ)->entries[iter->idx].rss, nb_worker_threads) == worker_thread_id)
579 } else if (flag_features & TASK_FEATURE_LUT_QINQ_HASH) {
580 uint64_t cvlan = rte_bswap16(get_qinq_gre_map(targ)->entries[iter->idx].cvlan & 0xFF0F);
581 uint64_t svlan = rte_bswap16(get_qinq_gre_map(targ)->entries[iter->idx].svlan & 0xFF0F);
582 uint64_t qinq = rte_bswap64((svlan << 32) | cvlan);
583 uint8_t queue = rte_hash_crc(&qinq, 8, 0) % targ->nb_slave_threads;
584 if (queue == targ->worker_thread_id)
586 } else if (flag_features & TASK_FEATURE_GRE_ID) {
587 if (get_qinq_gre_map(targ)->entries[iter->idx].gre_id % targ->nb_slave_threads == targ->worker_thread_id)
590 } while (iter->idx != (int)get_qinq_gre_map(targ)->count);
593 static void flow_iter_beg(struct flow_iter *iter, struct task_args *targ)
595 uint32_t flag_features = 0;
596 if (targ->lb_friend_core != 0xFF) {
597 struct task_args *lb_targ = &lcore_cfg[targ->lb_friend_core].targs[targ->lb_friend_task];
598 flag_features = lb_targ->task_init->flag_features;
599 plog_info("\t\tWT %d Updated features to %x from friend %d\n", targ->lconf->id, flag_features, targ->lb_friend_core);
601 plog_info("\t\tWT %d has no friend\n", targ->lconf->id);
603 if (targ->nb_slave_threads == 0) {
604 // No slave threads, i.e. using RSS
605 plog_info("feature was %x is now %x\n", flag_features, TASK_FEATURE_LUT_QINQ_RSS);
606 flag_features = TASK_FEATURE_LUT_QINQ_RSS;
608 if ((flag_features & (TASK_FEATURE_GRE_ID|TASK_FEATURE_LUT_QINQ_RSS|TASK_FEATURE_LUT_QINQ_HASH)) == 0) {
609 plog_info("\t\tCould not find flag feature from Load balancer => supposing TASK_FEATURE_GRE_ID\n");
610 flag_features = TASK_FEATURE_GRE_ID;
614 flow_iter_next(iter, targ);
617 static int flow_iter_is_end(struct flow_iter *iter, struct task_args *targ)
619 return iter->idx == (int)get_qinq_gre_map(targ)->count;
622 static uint32_t flow_iter_get_gre_id(struct flow_iter *iter, struct task_args *targ)
624 return get_qinq_gre_map(targ)->entries[iter->idx].gre_id;
627 static struct task_init task_init_qinq_encap4_table = {
629 .mode_str = "qinqencapv4",
630 .early_init = early_init_table,
631 .init = init_task_qinq_encap4,
632 .handle = handle_qinq_encap4_bulk,
633 /* In this case user in qinq_lookup table is the QoS user
634 (from user_table), i.e. usually from 0 to 32K Otherwise it
635 would have been a user from (0 to n_interface x 32K) */
637 .beg = flow_iter_beg,
638 .is_end = flow_iter_is_end,
639 .next = flow_iter_next,
640 .get_gre_id = flow_iter_get_gre_id,
642 .flag_features = TASK_FEATURE_CLASSIFY,
643 .size = sizeof(struct task_qinq_encap4)
646 static struct task_init task_init_qinq_encap4_table_pe = {
648 .mode_str = "qinqencapv4",
649 .sub_mode_str = "pe",
650 .early_init = early_init_table,
651 .init = init_task_qinq_encap4,
652 .handle = handle_qinq_encap4_bulk_pe,
653 .flag_features = TASK_FEATURE_CLASSIFY,
654 .size = sizeof(struct task_qinq_encap4)
657 static struct task_init task_init_qinq_encap4_untag = {
659 .sub_mode_str = "unmpls",
660 .mode_str = "qinqencapv4",
661 .init = init_task_qinq_encap4,
662 .handle = handle_qinq_encap4_untag_bulk,
663 .flag_features = TASK_FEATURE_CLASSIFY,
664 .size = sizeof(struct task_qinq_encap4)
667 __attribute__((constructor)) static void reg_task_qinq_encap4(void)
669 reg_task(&task_init_qinq_encap4_table);
670 reg_task(&task_init_qinq_encap4_table_pe);
671 reg_task(&task_init_qinq_encap4_untag);