2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
17 #include <rte_table_hash.h>
18 #include <rte_cycles.h>
20 #include "mbuf_utils.h"
21 #include "prox_malloc.h"
23 #include "prox_lua_types.h"
24 #include "handle_qinq_encap4.h"
25 #include "handle_qinq_decap4.h"
26 #include "prox_args.h"
30 #include "pkt_prototypes.h"
31 #include "hash_entry_types.h"
32 #include "task_init.h"
34 #include "prox_cksum.h"
35 #include "hash_utils.h"
37 #include "prox_port_cfg.h"
38 #include "handle_lb_net.h"
42 #include "prox_shared.h"
44 static struct cpe_table_data *read_cpe_table_config(const char *name, uint8_t socket)
46 struct lua_State *L = prox_lua();
47 struct cpe_table_data *ret = NULL;
49 lua_getglobal(L, name);
50 PROX_PANIC(lua_isnil(L, -1), "Coudn't find cpe_table data\n");
55 struct qinq_gre_map *get_qinq_gre_map(struct task_args *targ)
57 const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
58 struct qinq_gre_map *ret = prox_sh_find_socket(socket_id, "qinq_gre_map");
61 PROX_PANIC(!strcmp(targ->user_table, ""), "No user table defined\n");
62 int rv = lua_to_qinq_gre_map(prox_lua(), GLOBAL, targ->user_table, socket_id, &ret);
63 PROX_PANIC(rv, "Error reading mapping between qinq and gre from qinq_gre_map: \n%s\n",
65 prox_sh_add_socket(socket_id, "qinq_gre_map", ret);
70 /* Encapsulate IPv4 packets in QinQ. QinQ tags are derived from gre_id. */
71 int handle_qinq_encap4_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts);
72 static void arp_msg(struct task_base *tbase, void **data, uint16_t n_msgs);
74 static void fill_table(struct task_args *targ, struct rte_table_hash *table)
76 struct cpe_table_data *cpe_table_data;
77 const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
78 int ret = lua_to_cpe_table_data(prox_lua(), GLOBAL, targ->cpe_table_name, socket_id, &cpe_table_data);
79 const uint8_t n_slaves = targ->nb_slave_threads;
80 const uint8_t worker_id = targ->worker_thread_id;
82 for (uint32_t i = 0; i < cpe_table_data->n_entries; ++i) {
83 if (rte_bswap32(cpe_table_data->entries[i].ip) % n_slaves != worker_id) {
86 struct cpe_table_entry *entry = &cpe_table_data->entries[i];
88 uint32_t port_idx = prox_cfg.cpe_table_ports[entry->port_idx];
89 PROX_PANIC(targ->mapping[port_idx] == 255, "Error reading cpe table: Mapping for port %d is missing", port_idx);
91 struct cpe_key key = {
93 .gre_id = entry->gre_id,
96 struct cpe_data data = {
97 .qinq_svlan = entry->svlan,
98 .qinq_cvlan = entry->cvlan,
101 .mac = entry->eth_addr,
102 .out_idx = targ->mapping[port_idx],
109 rte_table_hash_key8_ext_dosig_ops.f_add(table, &key, &data, &key_found, &entry_in_hash);
113 static void init_task_qinq_encap4(struct task_base *tbase, struct task_args *targ)
115 struct task_qinq_encap4 *task = (struct task_qinq_encap4 *)(tbase);
116 int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
118 task->qinq_tag = targ->qinq_tag;
119 task->cpe_table = targ->cpe_table;
120 task->cpe_timeout = msec_to_tsc(targ->cpe_table_timeout_ms);
122 if (!strcmp(targ->task_init->sub_mode_str, "pe")) {
123 PROX_PANIC(!strcmp(targ->cpe_table_name, ""), "CPE table not configured\n");
124 fill_table(targ, task->cpe_table);
127 #ifdef ENABLE_EXTRA_USER_STATISTICS
128 task->n_users = targ->n_users;
129 task->stats_per_user = prox_zmalloc(targ->n_users * sizeof(uint32_t), socket_id);
131 if (targ->runtime_flags & TASK_CLASSIFY) {
132 PROX_PANIC(!strcmp(targ->dscp, ""), "DSCP table not specified\n");
133 task->dscp = prox_sh_find_socket(socket_id, targ->dscp);
135 int ret = lua_to_dscp(prox_lua(), GLOBAL, targ->dscp, socket_id, &task->dscp);
136 PROX_PANIC(ret, "Failed to create dscp table from config:\n%s\n",
137 get_lua_to_errors());
138 prox_sh_add_socket(socket_id, targ->dscp, task->dscp);
142 task->runtime_flags = targ->runtime_flags;
144 for (uint32_t i = 0; i < 64; ++i) {
145 task->fake_packets[i] = (struct rte_mbuf*)((uint8_t*)&task->keys[i] - sizeof (struct rte_mbuf));
148 targ->lconf->ctrl_timeout = freq_to_tsc(targ->ctrl_freq);
149 targ->lconf->ctrl_func_m[targ->task] = arp_msg;
151 struct prox_port_cfg *port = find_reachable_port(targ);
153 task->offload_crc = port->capabilities.tx_offload_cksum;
156 /* TODO: check if it is not necessary to limit reverse mapping
157 for the elements that have been changing in mapping? */
159 for (uint32_t i =0 ; i < sizeof(targ->mapping)/sizeof(targ->mapping[0]); ++i) {
160 task->src_mac[targ->mapping[i]] = *(uint64_t*)&prox_port_cfg[i].eth_addr;
163 /* task->src_mac[entry->port_idx] = *(uint64_t*)&prox_port_cfg[entry->port_idx].eth_addr; */
166 static void arp_msg(struct task_base *tbase, void **data, uint16_t n_msgs)
168 struct task_qinq_encap4 *task = (struct task_qinq_encap4 *)tbase;
169 struct arp_msg **msgs = (struct arp_msg **)data;
171 arp_update_from_msg(task->cpe_table, msgs, n_msgs, task->cpe_timeout);
174 static inline void add_key(struct task_args *targ, struct qinq_gre_map *qinq_gre_map, struct rte_table_hash* qinq_gre_table, uint32_t i, uint32_t *count)
176 struct qinq_gre_data entry = {
177 .gre_id = qinq_gre_map->entries[i].gre_id,
178 .user = qinq_gre_map->entries[i].user,
182 struct vlans qinq2 = {
183 .svlan = {.eth_proto = targ->qinq_tag, .vlan_tci = qinq_gre_map->entries[i].svlan},
184 .cvlan = {.eth_proto = ETYPE_VLAN, .vlan_tci = qinq_gre_map->entries[i].cvlan}
188 void* entry_in_hash = NULL;
189 rte_table_hash_key8_ext_dosig_ops.f_add(qinq_gre_table, &qinq2, &entry, &key_found, &entry_in_hash);
191 plog_dbg("Core %u adding user %u (tag %x svlan %x cvlan %x), rss=%x\n",
192 targ->lconf->id, qinq_gre_map->entries[i].user, qinq2.svlan.eth_proto,
193 rte_bswap16(qinq_gre_map->entries[i].svlan),
194 rte_bswap16(qinq_gre_map->entries[i].cvlan),
195 qinq_gre_map->entries[i].rss);
197 /* lower 3 bytes of IPv4 address contain svlan/cvlan. */
198 uint64_t ip = ((uint32_t)rte_bswap16(qinq_gre_map->entries[i].svlan) << 12) |
199 rte_bswap16(qinq_gre_map->entries[i].cvlan);
201 void* entry_in_hash = NULL;
202 rte_table_hash_key8_ext_dosig_ops.f_add(qinq_gre_table, &ip, &entry, &key_found, &entry_in_hash);
204 plog_dbg("Core %u hash table add: key = %016"PRIx64"\n",
205 targ->lconf->id, ip);
210 void init_qinq_gre_table(struct task_args *targ, struct qinq_gre_map *qinq_gre_map)
212 struct rte_table_hash* qinq_gre_table;
213 uint8_t table_part = targ->nb_slave_threads;
214 if (!rte_is_power_of_2(table_part)) {
215 table_part = rte_align32pow2(table_part) >> 1;
221 uint32_t n_entries = MAX_GRE / table_part;
223 struct rte_table_hash_key8_ext_params table_hash_params = {
224 .n_entries = n_entries,
225 .n_entries_ext = n_entries >> 1,
226 .f_hash = hash_crc32,
228 .signature_offset = HASH_METADATA_OFFSET(8),
229 .key_offset = HASH_METADATA_OFFSET(0),
232 qinq_gre_table = rte_table_hash_key8_ext_dosig_ops.
233 f_create(&table_hash_params, rte_lcore_to_socket_id(targ->lconf->id), sizeof(struct qinq_gre_data));
235 // LB configuration known from Network Load Balancer
236 // Find LB network Load balancer, i.e. ENCAP friend.
237 for (uint8_t task_id = 0; task_id < targ->lconf->n_tasks_all; ++task_id) {
238 enum task_mode smode = targ->lconf->targs[task_id].mode;
239 if (QINQ_ENCAP4 == smode) {
240 targ->lb_friend_core = targ->lconf->targs[task_id].lb_friend_core;
241 targ->lb_friend_task = targ->lconf->targs[task_id].lb_friend_task;
244 // Packet coming from Load balancer. LB could balance on gre_id LSB, qinq hash or qinq RSS
245 uint32_t flag_features = 0;
246 if (targ->lb_friend_core != 0xFF) {
247 struct task_args *lb_targ = &lcore_cfg[targ->lb_friend_core].targs[targ->lb_friend_task];
248 flag_features = lb_targ->task_init->flag_features;
249 plog_info("\t\tWT %d Updated features to %x from friend %d\n", targ->lconf->id, flag_features, targ->lb_friend_core);
251 plog_info("\t\tWT %d has no friend\n", targ->lconf->id);
253 if (targ->nb_slave_threads == 0) {
254 // No slave threads, i.e. using RSS
255 plog_info("feature was %x is now %x\n", flag_features, TASK_FEATURE_LUT_QINQ_RSS);
256 flag_features = TASK_FEATURE_LUT_QINQ_RSS;
258 if ((flag_features & (TASK_FEATURE_GRE_ID|TASK_FEATURE_LUT_QINQ_RSS|TASK_FEATURE_LUT_QINQ_HASH)) == 0) {
259 plog_info("\t\tCould not find flag feature from Load balancer => supposing TASK_FEATURE_GRE_ID\n");
260 flag_features = TASK_FEATURE_GRE_ID;
263 /* Only store QinQ <-> GRE mapping for packets that are handled by this worker thread */
265 if (flag_features & TASK_FEATURE_LUT_QINQ_RSS) {
266 // If there is a load balancer, number of worker thread is indicated by targ->nb_slave_threads and n_rxq = 0
267 // If there is no load balancers, number of worker thread is indicated by n_rxq and nb_slave_threads = 0
268 uint8_t nb_worker_threads, worker_thread_id;
269 if (targ->nb_slave_threads) {
270 nb_worker_threads = targ->nb_slave_threads;
271 worker_thread_id = targ->worker_thread_id;
272 } else if (prox_port_cfg[targ->rx_port_queue[0].port].n_rxq) {
273 nb_worker_threads = prox_port_cfg[targ->rx_port_queue[0].port].n_rxq;
274 worker_thread_id = targ->rx_port_queue[0].queue;
276 PROX_PANIC(1, "Unexpected: unknown number of worker thread\n");
278 plog_info("\t\tUsing %d worker_threads id %d\n", nb_worker_threads, worker_thread_id);
279 for (uint32_t i = 0; i < qinq_gre_map->count; ++i) {
280 if (targ->nb_slave_threads == 0 || rss_to_queue(qinq_gre_map->entries[i].rss, nb_worker_threads) == worker_thread_id) {
281 add_key(targ, qinq_gre_map, qinq_gre_table, i, &count);
282 //plog_info("Queue %d adding key %16lx, svlan %x cvlan %x, rss=%x\n", targ->rx_queue, *(uint64_t *)q, qinq_to_gre_lookup[i].svlan, qinq_to_gre_lookup[i].cvlan, qinq_to_gre_lookup[i].rss);
285 plog_info("\t\tAdded %d entries to worker thread %d\n", count, worker_thread_id);
286 } else if (flag_features & TASK_FEATURE_LUT_QINQ_HASH) {
287 for (uint32_t i = 0; i < qinq_gre_map->count; ++i) {
288 uint64_t cvlan = rte_bswap16(qinq_gre_map->entries[i].cvlan & 0xFF0F);
289 uint64_t svlan = rte_bswap16((qinq_gre_map->entries[i].svlan & 0xFF0F));
290 uint64_t qinq = rte_bswap64((svlan << 32) | cvlan);
291 uint8_t queue = hash_crc32(&qinq, 8, 0) % targ->nb_slave_threads;
292 if (queue == targ->worker_thread_id) {
293 add_key(targ, qinq_gre_map, qinq_gre_table, i, &count);
296 plog_info("\t\tAdded %d entries to WT %d\n", count, targ->worker_thread_id);
297 } else if (flag_features & TASK_FEATURE_GRE_ID) {
298 for (uint32_t i = 0; i < qinq_gre_map->count; ++i) {
299 if (qinq_gre_map->entries[i].gre_id % targ->nb_slave_threads == targ->worker_thread_id) {
300 add_key(targ, qinq_gre_map, qinq_gre_table, i, &count);
305 for (uint8_t task_id = 0; task_id < targ->lconf->n_tasks_all; ++task_id) {
306 enum task_mode smode = targ->lconf->targs[task_id].mode;
307 if (QINQ_DECAP4 == smode) {
308 targ->lconf->targs[task_id].qinq_gre_table = qinq_gre_table;
314 void init_cpe4_table(struct task_args *targ)
317 sprintf(name, "core_%u_CPEv4Table", targ->lconf->id);
319 uint8_t table_part = targ->nb_slave_threads;
320 if (!rte_is_power_of_2(table_part)) {
321 table_part = rte_align32pow2(table_part) >> 1;
327 uint32_t n_entries = MAX_GRE / table_part;
328 struct rte_table_hash_key8_ext_params table_hash_params = {
329 .n_entries = n_entries,
330 .n_entries_ext = n_entries >> 1,
331 .f_hash = hash_crc32,
333 .signature_offset = HASH_METADATA_OFFSET(8),
334 .key_offset = HASH_METADATA_OFFSET(0),
336 size_t entry_size = sizeof(struct cpe_data);
337 if (!rte_is_power_of_2(entry_size)) {
338 entry_size = rte_align32pow2(entry_size);
341 struct rte_table_hash* phash = rte_table_hash_key8_ext_dosig_ops.
342 f_create(&table_hash_params, rte_lcore_to_socket_id(targ->lconf->id), entry_size);
343 PROX_PANIC(NULL == phash, "Unable to allocate memory for IPv4 hash table on core %u\n", targ->lconf->id);
345 /* for locality, copy the pointer to the port structure where it is needed at packet handling time */
346 for (uint8_t task_id = 0; task_id < targ->lconf->n_tasks_all; ++task_id) {
347 enum task_mode smode = targ->lconf->targs[task_id].mode;
348 if (QINQ_ENCAP4 == smode || QINQ_DECAP4 == smode) {
349 targ->lconf->targs[task_id].cpe_table = phash;
354 static void early_init_table(struct task_args* targ)
356 if (!targ->cpe_table) {
357 init_cpe4_table(targ);
361 static inline void restore_cpe(struct cpe_pkt *packet, struct cpe_data *table, __attribute__((unused)) uint16_t qinq_tag, uint64_t *src_mac)
364 struct qinq_hdr *pqinq = &packet->qinq_hdr;
365 rte_memcpy(pqinq, &qinq_proto, sizeof(struct qinq_hdr));
366 (*(uint64_t *)(&pqinq->d_addr)) = table->mac_port_8bytes;
367 /* set source as well now */
368 *((uint64_t *)(&pqinq->s_addr)) = *((uint64_t *)&src_mac[table->mac_port.out_idx]);
369 pqinq->svlan.vlan_tci = table->qinq_svlan;
370 pqinq->cvlan.vlan_tci = table->qinq_cvlan;
371 pqinq->svlan.eth_proto = qinq_tag;
372 pqinq->cvlan.eth_proto = ETYPE_VLAN;
373 pqinq->ether_type = ETYPE_IPv4;
375 (*(uint64_t *)(&packet->ether_hdr.d_addr)) = table->mac_port_8bytes;
376 /* set source as well now */
377 *((uint64_t *)(&packet->ether_hdr.s_addr)) = *((uint64_t *)&src_mac[table->mac_port.out_idx]);
378 packet->ether_hdr.ether_type = ETYPE_IPv4;
380 packet->ipv4_hdr.dst_addr = rte_bswap32(10 << 24 | rte_bswap16(table->qinq_svlan) << 12 | rte_bswap16(table->qinq_cvlan));
384 static inline uint8_t handle_qinq_encap4(struct task_qinq_encap4 *task, struct cpe_pkt *cpe_pkt, struct rte_mbuf *mbuf, struct cpe_data *entry);
386 /* Same functionality as handle_qinq_encap_v4_bulk but untag MPLS as well. */
387 static int handle_qinq_encap4_untag_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
389 struct task_qinq_encap4 *task = (struct task_qinq_encap4 *)tbase;
390 uint8_t out[MAX_PKT_BURST];
391 prefetch_pkts(mbufs, n_pkts);
393 for (uint16_t j = 0; j < n_pkts; ++j) {
394 if (likely(mpls_untag(mbufs[j]))) {
395 struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_adj(mbufs[j], UPSTREAM_DELTA);
396 out[j] = handle_qinq_encap4(task, cpe_pkt, mbufs[j], NULL);
399 out[j] = OUT_DISCARD;
403 return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
406 static inline void extract_key_bulk(struct task_qinq_encap4 *task, struct rte_mbuf **mbufs, uint16_t n_pkts)
408 for (uint16_t j = 0; j < n_pkts; ++j) {
409 extract_key_core(mbufs[j], &task->keys[j]);
413 __attribute__((cold)) static void handle_error(struct rte_mbuf *mbuf)
415 struct core_net_pkt* core_pkt = rte_pktmbuf_mtod(mbuf, struct core_net_pkt *);
416 uint32_t dst_ip = core_pkt->ip_hdr.dst_addr;
417 uint32_t le_gre_id = rte_be_to_cpu_32(core_pkt->gre_hdr.gre_id);
419 plogx_dbg("Unknown IP %x/gre_id %x\n", dst_ip, le_gre_id);
422 static int handle_qinq_encap4_bulk_pe(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
424 struct task_qinq_encap4 *task = (struct task_qinq_encap4 *)tbase;
425 uint64_t pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
426 struct cpe_data* entries[64];
427 uint8_t out[MAX_PKT_BURST];
428 uint64_t lookup_hit_mask;
430 prefetch_pkts(mbufs, n_pkts);
432 for (uint16_t j = 0; j < n_pkts; ++j) {
433 struct ipv4_hdr* ip = (struct ipv4_hdr *)(rte_pktmbuf_mtod(mbufs[j], struct ether_hdr *) + 1);
434 task->keys[j] = (uint64_t)ip->dst_addr;
436 rte_table_hash_key8_ext_dosig_ops.f_lookup(task->cpe_table, task->fake_packets, pkts_mask, &lookup_hit_mask, (void**)entries);
438 if (likely(lookup_hit_mask == pkts_mask)) {
439 for (uint16_t j = 0; j < n_pkts; ++j) {
440 struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_prepend(mbufs[j], sizeof(struct qinq_hdr) - sizeof(struct ether_hdr));
441 uint16_t padlen = mbuf_calc_padlen(mbufs[j], cpe_pkt, &cpe_pkt->ipv4_hdr);
444 rte_pktmbuf_trim(mbufs[j], padlen);
446 out[j] = handle_qinq_encap4(task, cpe_pkt, mbufs[j], entries[j]);
450 for (uint16_t j = 0; j < n_pkts; ++j) {
451 if (unlikely(!((lookup_hit_mask >> j) & 0x1))) {
452 handle_error(mbufs[j]);
453 out[j] = OUT_DISCARD;
456 struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_prepend(mbufs[j], sizeof(struct qinq_hdr) - sizeof(struct ether_hdr));
457 uint16_t padlen = mbuf_calc_padlen(mbufs[j], cpe_pkt, &cpe_pkt->ipv4_hdr);
460 rte_pktmbuf_trim(mbufs[j], padlen);
462 out[j] = handle_qinq_encap4(task, cpe_pkt, mbufs[j], entries[j]);
466 return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
468 int handle_qinq_encap4_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
470 struct task_qinq_encap4 *task = (struct task_qinq_encap4 *)tbase;
471 uint64_t pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
472 struct cpe_data* entries[64];
473 uint8_t out[MAX_PKT_BURST];
474 uint64_t lookup_hit_mask;
476 prefetch_pkts(mbufs, n_pkts);
478 // From GRE ID and IP address, retrieve QinQ and MAC addresses
479 extract_key_bulk(task, mbufs, n_pkts);
480 rte_table_hash_key8_ext_dosig_ops.f_lookup(task->cpe_table, task->fake_packets, pkts_mask, &lookup_hit_mask, (void**)entries);
482 if (likely(lookup_hit_mask == pkts_mask)) {
483 for (uint16_t j = 0; j < n_pkts; ++j) {
484 struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_adj(mbufs[j], UPSTREAM_DELTA);
485 // We are receiving GRE tunnelled packets (and removing UPSTRAM_DELTA bytes), whose length is > 64 bytes
486 // So there should be no padding, but in case the is one, remove it
487 uint16_t padlen = mbuf_calc_padlen(mbufs[j], cpe_pkt, &cpe_pkt->ipv4_hdr);
490 rte_pktmbuf_trim(mbufs[j], padlen);
492 out[j] = handle_qinq_encap4(task, cpe_pkt, mbufs[j], entries[j]);
496 for (uint16_t j = 0; j < n_pkts; ++j) {
497 if (unlikely(!((lookup_hit_mask >> j) & 0x1))) {
498 handle_error(mbufs[j]);
499 out[j] = OUT_DISCARD;
502 struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_adj(mbufs[j], UPSTREAM_DELTA);
503 uint16_t padlen = mbuf_calc_padlen(mbufs[j], cpe_pkt, &cpe_pkt->ipv4_hdr);
506 rte_pktmbuf_trim(mbufs[j], padlen);
508 out[j] = handle_qinq_encap4(task, cpe_pkt, mbufs[j], entries[j]);
512 return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
515 static inline uint8_t handle_qinq_encap4(struct task_qinq_encap4 *task, struct cpe_pkt *cpe_pkt, struct rte_mbuf *mbuf, struct cpe_data *entry)
517 PROX_ASSERT(cpe_pkt);
519 if (cpe_pkt->ipv4_hdr.time_to_live) {
520 cpe_pkt->ipv4_hdr.time_to_live--;
523 plog_info("TTL = 0 => Dropping\n");
526 cpe_pkt->ipv4_hdr.hdr_checksum = 0;
528 restore_cpe(cpe_pkt, entry, task->qinq_tag, task->src_mac);
530 if (task->runtime_flags & TASK_CLASSIFY) {
531 uint8_t queue = task->dscp[cpe_pkt->ipv4_hdr.type_of_service >> 2] & 0x3;
532 uint8_t tc = task->dscp[cpe_pkt->ipv4_hdr.type_of_service >> 2] >> 2;
534 rte_sched_port_pkt_write(mbuf, 0, entry->user, tc, queue, 0);
536 #ifdef ENABLE_EXTRA_USER_STATISTICS
537 task->stats_per_user[entry->user]++;
539 if (task->runtime_flags & TASK_TX_CRC) {
540 prox_ip_cksum(mbuf, &cpe_pkt->ipv4_hdr, sizeof(struct qinq_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
542 return entry->mac_port.out_idx;
545 static void flow_iter_next(struct flow_iter *iter, struct task_args *targ)
549 uint8_t flag_features = iter->data;
551 if (flag_features & TASK_FEATURE_LUT_QINQ_RSS) {
552 // If there is a load balancer, number of worker thread is indicated by targ->nb_slave_threads and n_rxq = 0
553 // If there is no load balancers, number of worker thread is indicated by n_rxq and nb_slave_threads = 0
554 uint8_t nb_worker_threads, worker_thread_id;
555 nb_worker_threads = 1;
556 worker_thread_id = 1;
557 if (targ->nb_slave_threads) {
558 nb_worker_threads = targ->nb_slave_threads;
559 worker_thread_id = targ->worker_thread_id;
560 } else if (prox_port_cfg[targ->rx_port_queue[0].port].n_rxq) {
561 nb_worker_threads = prox_port_cfg[targ->rx_port_queue[0].port].n_rxq;
562 worker_thread_id = targ->rx_port_queue[0].queue;
564 plog_err("Unexpected: unknown number of worker thread\n");
567 if (targ->nb_slave_threads == 0 || rss_to_queue(get_qinq_gre_map(targ)->entries[iter->idx].rss, nb_worker_threads) == worker_thread_id)
569 } else if (flag_features & TASK_FEATURE_LUT_QINQ_HASH) {
570 uint64_t cvlan = rte_bswap16(get_qinq_gre_map(targ)->entries[iter->idx].cvlan & 0xFF0F);
571 uint64_t svlan = rte_bswap16(get_qinq_gre_map(targ)->entries[iter->idx].svlan & 0xFF0F);
572 uint64_t qinq = rte_bswap64((svlan << 32) | cvlan);
573 uint8_t queue = hash_crc32(&qinq, 8, 0) % targ->nb_slave_threads;
574 if (queue == targ->worker_thread_id)
576 } else if (flag_features & TASK_FEATURE_GRE_ID) {
577 if (get_qinq_gre_map(targ)->entries[iter->idx].gre_id % targ->nb_slave_threads == targ->worker_thread_id)
580 } while (iter->idx != (int)get_qinq_gre_map(targ)->count);
583 static void flow_iter_beg(struct flow_iter *iter, struct task_args *targ)
585 uint32_t flag_features = 0;
586 if (targ->lb_friend_core != 0xFF) {
587 struct task_args *lb_targ = &lcore_cfg[targ->lb_friend_core].targs[targ->lb_friend_task];
588 flag_features = lb_targ->task_init->flag_features;
589 plog_info("\t\tWT %d Updated features to %x from friend %d\n", targ->lconf->id, flag_features, targ->lb_friend_core);
591 plog_info("\t\tWT %d has no friend\n", targ->lconf->id);
593 if (targ->nb_slave_threads == 0) {
594 // No slave threads, i.e. using RSS
595 plog_info("feature was %x is now %x\n", flag_features, TASK_FEATURE_LUT_QINQ_RSS);
596 flag_features = TASK_FEATURE_LUT_QINQ_RSS;
598 if ((flag_features & (TASK_FEATURE_GRE_ID|TASK_FEATURE_LUT_QINQ_RSS|TASK_FEATURE_LUT_QINQ_HASH)) == 0) {
599 plog_info("\t\tCould not find flag feature from Load balancer => supposing TASK_FEATURE_GRE_ID\n");
600 flag_features = TASK_FEATURE_GRE_ID;
604 flow_iter_next(iter, targ);
607 static int flow_iter_is_end(struct flow_iter *iter, struct task_args *targ)
609 return iter->idx == (int)get_qinq_gre_map(targ)->count;
612 static uint32_t flow_iter_get_gre_id(struct flow_iter *iter, struct task_args *targ)
614 return get_qinq_gre_map(targ)->entries[iter->idx].gre_id;
617 static struct task_init task_init_qinq_encap4_table = {
619 .mode_str = "qinqencapv4",
620 .early_init = early_init_table,
621 .init = init_task_qinq_encap4,
622 .handle = handle_qinq_encap4_bulk,
623 /* In this case user in qinq_lookup table is the QoS user
624 (from user_table), i.e. usually from 0 to 32K Otherwise it
625 would have been a user from (0 to n_interface x 32K) */
627 .beg = flow_iter_beg,
628 .is_end = flow_iter_is_end,
629 .next = flow_iter_next,
630 .get_gre_id = flow_iter_get_gre_id,
632 .flag_features = TASK_FEATURE_CLASSIFY,
633 .size = sizeof(struct task_qinq_encap4)
636 static struct task_init task_init_qinq_encap4_table_pe = {
638 .mode_str = "qinqencapv4",
639 .sub_mode_str = "pe",
640 .early_init = early_init_table,
641 .init = init_task_qinq_encap4,
642 .handle = handle_qinq_encap4_bulk_pe,
643 .flag_features = TASK_FEATURE_CLASSIFY,
644 .size = sizeof(struct task_qinq_encap4)
647 static struct task_init task_init_qinq_encap4_untag = {
649 .sub_mode_str = "unmpls",
650 .mode_str = "qinqencapv4",
651 .init = init_task_qinq_encap4,
652 .handle = handle_qinq_encap4_untag_bulk,
653 .flag_features = TASK_FEATURE_CLASSIFY,
654 .size = sizeof(struct task_qinq_encap4)
657 __attribute__((constructor)) static void reg_task_qinq_encap4(void)
659 reg_task(&task_init_qinq_encap4_table);
660 reg_task(&task_init_qinq_encap4_table_pe);
661 reg_task(&task_init_qinq_encap4_untag);