2 // Copyright (c) 2010-2020 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
19 #include <rte_version.h>
21 #include "prox_port_cfg.h"
22 #include "prox_malloc.h"
23 #include "task_init.h"
29 #include "thread_generic.h"
30 #include "prox_assert.h"
32 #if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0)
33 #define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE
36 static unsigned first_task = 1;
37 LIST_HEAD(,task_init) head;
39 void reg_task(struct task_init* t)
41 // PROX_PANIC(t->handle == NULL, "No handle function specified for task with name %d\n", t->mode);
43 if (t->thread_x == NULL)
44 t->thread_x = thread_generic;
51 LIST_INSERT_HEAD(&head, t, entries);
54 struct task_init *to_task_init(const char *mode_str, const char *sub_mode_str)
56 struct task_init *cur_t;
58 LIST_FOREACH(cur_t, &head, entries) {
59 if (!strcmp(mode_str, cur_t->mode_str) &&
60 !strcmp(sub_mode_str, cur_t->sub_mode_str)) {
68 static int compare_strcmp(const void *a, const void *b)
70 return strcmp(*(const char * const *)a, *(const char * const *)b);
73 int task_is_master(struct task_args *targ)
75 return (targ->lconf->id == prox_cfg.master);
80 struct task_init *cur_t;
81 char buf[sizeof(cur_t->mode_str) + sizeof(cur_t->sub_mode_str) + 4];
83 int nb_modes = 1; /* master */
84 LIST_FOREACH(cur_t, &head, entries) {
88 char **modes = calloc(nb_modes, sizeof(*modes));
90 *cur_m++ = strdup("master");
91 LIST_FOREACH(cur_t, &head, entries) {
92 snprintf(buf, sizeof(buf), "%s%s%s",
94 (cur_t->sub_mode_str[0] == 0) ? "" : " / ",
96 *cur_m++ = strdup(buf);
98 qsort(modes, nb_modes, sizeof(*modes), compare_strcmp);
100 plog_info("=== List of supported task modes / sub modes ===\n");
101 for (cur_m = modes; nb_modes; ++cur_m, --nb_modes) {
102 plog_info("\t%s\n", *cur_m);
108 static size_t calc_memsize(struct task_args *targ, size_t task_size)
110 size_t memsize = task_size;
112 memsize += sizeof(struct task_base_aux);
114 if (targ->nb_rxports != 0) {
115 memsize += 2 * sizeof(uint8_t)*targ->nb_rxports;
117 if (targ->nb_rxrings != 0 || targ->tx_opt_ring_task) {
118 memsize += sizeof(struct rte_ring *)*targ->nb_rxrings;
120 if (targ->nb_txrings != 0) {
121 memsize += sizeof(struct rte_ring *) * targ->nb_txrings;
122 memsize = RTE_ALIGN_CEIL(memsize, RTE_CACHE_LINE_SIZE);
123 memsize += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * targ->nb_txrings;
125 else if (targ->nb_txports != 0) {
126 memsize += sizeof(struct port_queue) * targ->nb_txports;
127 memsize = RTE_ALIGN_CEIL(memsize, RTE_CACHE_LINE_SIZE);
128 memsize += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * targ->nb_txports;
131 memsize = RTE_ALIGN_CEIL(memsize, RTE_CACHE_LINE_SIZE);
132 memsize += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]);
138 static void *flush_function(struct task_args *targ)
140 if (targ->flags & TASK_ARG_DROP) {
141 return targ->nb_txrings ? flush_queues_sw : flush_queues_hw;
144 return targ->nb_txrings ? flush_queues_no_drop_sw : flush_queues_no_drop_hw;
148 static size_t init_rx_tx_rings_ports(struct task_args *targ, struct task_base *tbase, size_t offset)
150 if (targ->tx_opt_ring_task) {
151 tbase->rx_pkt = rx_pkt_self;
153 else if (targ->nb_rxrings != 0) {
155 if (targ->nb_rxrings == 1) {
156 tbase->rx_pkt = rx_pkt_sw1;
157 tbase->rx_params_sw1.rx_ring = targ->rx_rings[0];
160 tbase->rx_pkt = rx_pkt_sw;
161 tbase->rx_params_sw.nb_rxrings = targ->nb_rxrings;
162 tbase->rx_params_sw.rx_rings = (struct rte_ring **)(((uint8_t *)tbase) + offset);
163 offset += sizeof(struct rte_ring *)*tbase->rx_params_sw.nb_rxrings;
165 for (uint8_t i = 0; i < tbase->rx_params_sw.nb_rxrings; ++i) {
166 tbase->rx_params_sw.rx_rings[i] = targ->rx_rings[i];
169 if (rte_is_power_of_2(targ->nb_rxrings)) {
170 tbase->rx_pkt = rx_pkt_sw_pow2;
171 tbase->rx_params_sw.rxrings_mask = targ->nb_rxrings - 1;
176 if (targ->nb_rxports == 1) {
177 if (targ->flags & TASK_ARG_L3)
178 tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw1_multi_l3 : rx_pkt_hw1_l3;
179 else if (targ->flags & TASK_ARG_NDP)
180 tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw1_multi_ndp : rx_pkt_hw1_ndp;
182 tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw1_multi : rx_pkt_hw1;
183 tbase->rx_params_hw1.rx_pq.port = targ->rx_port_queue[0].port;
184 tbase->rx_params_hw1.rx_pq.queue = targ->rx_port_queue[0].queue;
187 PROX_ASSERT((targ->nb_rxports != 0) || (targ->task_init->flag_features & TASK_FEATURE_NO_RX));
188 if (targ->flags & TASK_ARG_L3)
189 tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_multi_l3 : rx_pkt_hw_l3;
190 else if (targ->flags & TASK_ARG_NDP)
191 tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_multi_ndp : rx_pkt_hw_ndp;
193 tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_multi : rx_pkt_hw;
194 tbase->rx_params_hw.nb_rxports = targ->nb_rxports;
195 tbase->rx_params_hw.rx_pq = (struct port_queue *)(((uint8_t *)tbase) + offset);
196 offset += sizeof(struct port_queue) * tbase->rx_params_hw.nb_rxports;
197 for (int i = 0; i< targ->nb_rxports; i++) {
198 tbase->rx_params_hw.rx_pq[i].port = targ->rx_port_queue[i].port;
199 tbase->rx_params_hw.rx_pq[i].queue = targ->rx_port_queue[i].queue;
202 if (rte_is_power_of_2(targ->nb_rxports)) {
203 if (targ->flags & TASK_ARG_L3)
204 tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_pow2_multi_l3 : rx_pkt_hw_pow2_l3;
205 else if (targ->flags & TASK_ARG_NDP)
206 tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_pow2_multi_ndp : rx_pkt_hw_pow2_ndp;
208 tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_pow2_multi : rx_pkt_hw_pow2;
209 tbase->rx_params_hw.rxport_mask = targ->nb_rxports - 1;
214 if ((targ->nb_txrings != 0) && (!targ->tx_opt_ring) && (!(targ->flags & TASK_ARG_DROP))) {
215 // Transmitting to a ring in NO DROP. We need to make sure the receiving task in not running on the same core.
216 // Otherwise we might end up in a dead lock: trying in a loop to transmit to a task which cannot receive anymore
217 // (as not being scheduled).
219 struct task_args *dtarg;
220 for (unsigned int j = 0; j < targ->nb_txrings; j++) {
221 ct = targ->core_task_set[0].core_task[j];
222 PROX_PANIC(ct.core == targ->lconf->id, "Core %d, task %d: NO_DROP task transmitting to another task (core %d, task %d) running on on same core => potential deadlock\n", targ->lconf->id, targ->id, ct.core, ct.task);
223 //plog_info("Core %d, task %d: NO_DROP task transmitting to another task (core %d, task %d) running on on same core => potential deadlock\n", targ->lconf->id, targ->id, ct.core, ct.task);
226 if ((targ->nb_txrings != 0) && (targ->nb_txports == 1)) {
227 /* Transmitting to multiple rings and one port */
228 plog_info("Initializing with 1 port %d queue %d nb_rings=%d\n", targ->tx_port_queue[0].port, targ->tx_port_queue[0].queue, targ->nb_txrings);
229 tbase->tx_params_hw_sw.tx_port_queue.port = targ->tx_port_queue[0].port;
230 tbase->tx_params_hw_sw.tx_port_queue.queue = targ->tx_port_queue[0].queue;
231 if (!targ->tx_opt_ring) {
232 tbase->tx_params_hw_sw.nb_txrings = targ->nb_txrings;
233 tbase->tx_params_hw_sw.tx_rings = (struct rte_ring **)(((uint8_t *)tbase) + offset);
234 offset += sizeof(struct rte_ring *)*tbase->tx_params_hw_sw.nb_txrings;
236 for (uint8_t i = 0; i < tbase->tx_params_hw_sw.nb_txrings; ++i) {
237 tbase->tx_params_hw_sw.tx_rings[i] = targ->tx_rings[i];
240 offset = RTE_ALIGN_CEIL(offset, RTE_CACHE_LINE_SIZE);
241 tbase->ws_mbuf = (struct ws_mbuf *)(((uint8_t *)tbase) + offset);
242 offset += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * tbase->tx_params_hw_sw.nb_txrings;
245 else if (!targ->tx_opt_ring) {
246 if (targ->nb_txrings != 0) {
247 tbase->tx_params_sw.nb_txrings = targ->nb_txrings;
248 tbase->tx_params_sw.tx_rings = (struct rte_ring **)(((uint8_t *)tbase) + offset);
249 offset += sizeof(struct rte_ring *)*tbase->tx_params_sw.nb_txrings;
251 for (uint8_t i = 0; i < tbase->tx_params_sw.nb_txrings; ++i) {
252 tbase->tx_params_sw.tx_rings[i] = targ->tx_rings[i];
255 offset = RTE_ALIGN_CEIL(offset, RTE_CACHE_LINE_SIZE);
256 tbase->ws_mbuf = (struct ws_mbuf *)(((uint8_t *)tbase) + offset);
257 offset += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * tbase->tx_params_sw.nb_txrings;
259 else if (targ->nb_txports != 0) {
260 tbase->tx_params_hw.nb_txports = targ->nb_txports;
261 tbase->tx_params_hw.tx_port_queue = (struct port_queue *)(((uint8_t *)tbase) + offset);
262 offset += sizeof(struct port_queue) * tbase->tx_params_hw.nb_txports;
263 for (uint8_t i = 0; i < tbase->tx_params_hw.nb_txports; ++i) {
264 tbase->tx_params_hw.tx_port_queue[i].port = targ->tx_port_queue[i].port;
265 tbase->tx_params_hw.tx_port_queue[i].queue = targ->tx_port_queue[i].queue;
268 offset = RTE_ALIGN_CEIL(offset, RTE_CACHE_LINE_SIZE);
269 tbase->ws_mbuf = (struct ws_mbuf *)(((uint8_t *)tbase) + offset);
270 offset += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * tbase->tx_params_hw.nb_txports;
273 offset = RTE_ALIGN_CEIL(offset, RTE_CACHE_LINE_SIZE);
274 tbase->ws_mbuf = (struct ws_mbuf *)(((uint8_t *)tbase) + offset);
275 offset += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]);
278 struct ws_mbuf* w = tbase->ws_mbuf;
279 struct task_args *prev = targ->tx_opt_ring_task;
282 prev->tbase->ws_mbuf = w;
283 prev = prev->tx_opt_ring_task;
287 if (targ->nb_txrings == 1 || targ->nb_txports == 1 || targ->tx_opt_ring) {
288 if (targ->task_init->flag_features & TASK_FEATURE_NEVER_DISCARDS) {
289 if (targ->tx_opt_ring) {
290 tbase->tx_pkt = tx_pkt_never_discard_self;
292 else if (targ->flags & TASK_ARG_DROP) {
293 if (targ->task_init->flag_features & TASK_FEATURE_THROUGHPUT_OPT)
294 tbase->tx_pkt = targ->nb_txrings ? tx_pkt_never_discard_sw1 : tx_pkt_never_discard_hw1_thrpt_opt;
296 tbase->tx_pkt = targ->nb_txrings ? tx_pkt_never_discard_sw1 : tx_pkt_never_discard_hw1_lat_opt;
299 if (targ->task_init->flag_features & TASK_FEATURE_THROUGHPUT_OPT)
300 tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_never_discard_sw1 : tx_pkt_no_drop_never_discard_hw1_thrpt_opt;
302 tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_never_discard_sw1 : tx_pkt_no_drop_never_discard_hw1_lat_opt;
304 if ((targ->nb_txrings) || ((targ->task_init->flag_features & TASK_FEATURE_THROUGHPUT_OPT) == 0))
305 tbase->flags |= FLAG_NEVER_FLUSH;
307 targ->lconf->flush_queues[targ->task] = flush_function(targ);
310 if (targ->tx_opt_ring) {
311 tbase->tx_pkt = tx_pkt_self;
313 else if (targ->flags & TASK_ARG_DROP) {
314 tbase->tx_pkt = targ->nb_txrings ? tx_pkt_sw1 : tx_pkt_hw1;
317 tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_sw1 : tx_pkt_no_drop_hw1;
319 tbase->flags |= FLAG_NEVER_FLUSH;
323 if (targ->flags & TASK_ARG_DROP) {
324 tbase->tx_pkt = targ->nb_txrings ? tx_pkt_sw : tx_pkt_hw;
327 tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_sw : tx_pkt_no_drop_hw;
330 targ->lconf->flush_queues[targ->task] = flush_function(targ);
333 if (targ->task_init->flag_features & TASK_FEATURE_NO_RX) {
334 tbase->rx_pkt = rx_pkt_dummy;
337 if (targ->nb_txrings == 0 && targ->nb_txports == 0) {
338 tbase->tx_pkt = tx_pkt_drop_all;
344 struct task_base *init_task_struct(struct task_args *targ)
346 struct task_init* t = targ->task_init;
348 size_t memsize = calc_memsize(targ, t->size);
349 uint8_t task_socket = rte_lcore_to_socket_id(targ->lconf->id);
350 struct task_base *tbase = prox_zmalloc(memsize, task_socket);
351 PROX_PANIC(tbase == NULL, "Failed to allocate memory for task (%zu bytes)", memsize);
354 if (targ->nb_txrings == 0 && targ->nb_txports == 0)
355 tbase->flags |= FLAG_NEVER_FLUSH;
357 offset = init_rx_tx_rings_ports(targ, tbase, offset);
358 tbase->aux = (struct task_base_aux *)(((uint8_t *)tbase) + offset);
360 if (targ->task_init->flag_features & TASK_FEATURE_TSC_RX) {
361 task_base_add_rx_pkt_function(tbase, rx_pkt_tsc);
364 offset += sizeof(struct task_base_aux);
366 tbase->handle_bulk = t->handle;
368 if (targ->flags & (TASK_ARG_L3|TASK_ARG_NDP)) {
369 plog_info("\t\tTask (%d,%d) configured in L3/NDP mode\n", targ->lconf->id, targ->id);
370 tbase->l3.ctrl_plane_ring = targ->ctrl_plane_ring;
371 if (targ->nb_txports != 0) {
372 tbase->aux->tx_pkt_l2 = tbase->tx_pkt;
373 tbase->aux->tx_ctrlplane_pkt = targ->nb_txrings ? tx_ctrlplane_sw : tx_ctrlplane_hw;
374 if (targ->flags & TASK_ARG_L3) {
375 tbase->tx_pkt = tx_pkt_l3;
376 task_init_l3(tbase, targ);
377 } else if (targ->flags & TASK_ARG_NDP) {
378 tbase->tx_pkt = tx_pkt_ndp;
379 task_init_l3(tbase, targ);
381 // Make sure control plane packets such as arp are not dropped
387 t->init(tbase, targ);
389 tbase->aux->start = t->start;
390 tbase->aux->stop = t->stop;
391 tbase->aux->start_first = t->start_first;
392 tbase->aux->stop_last = t->stop_last;
393 if ((targ->nb_txrings != 0) && (targ->nb_txports == 1)) {
394 tbase->aux->tx_pkt_hw = tx_pkt_no_drop_never_discard_hw1_no_pointer;
396 if (targ->tx_opt_ring) {
397 tbase->aux->tx_pkt_try = tx_try_self;
398 } else if (targ->nb_txrings == 1) {
399 tbase->aux->tx_pkt_try = tx_try_sw1;
400 } else if (targ->nb_txports) {
401 tbase->aux->tx_pkt_try = tx_try_hw1;
407 struct task_args *find_reachable_task_sending_to_port(struct task_args *from)
409 if (!from->nb_txrings) {
410 if (from->tx_port_queue[0].port != OUT_DISCARD)
417 struct task_args *dtarg, *ret;
419 for (uint32_t i = 0; i < from->nb_txrings; ++i) {
420 ct = from->core_task_set[0].core_task[i];
421 dtarg = core_targ_get(ct.core, ct.task);
422 ret = find_reachable_task_sending_to_port(dtarg);
429 struct prox_port_cfg *find_reachable_port(struct task_args *from)
431 struct task_args *dst = find_reachable_task_sending_to_port(from);
434 int port_id = dst->tx_port_queue[0].port;
436 return &prox_port_cfg[port_id];