2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
19 #include <rte_version.h>
21 #include "prox_port_cfg.h"
22 #include "prox_malloc.h"
23 #include "task_init.h"
29 #include "thread_generic.h"
30 #include "prox_assert.h"
32 #if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0)
33 #define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE
36 static unsigned first_task = 1;
37 LIST_HEAD(,task_init) head;
39 void reg_task(struct task_init* t)
41 PROX_PANIC(t->handle == NULL, "No handle function specified for task with name %d\n", t->mode);
43 if (t->thread_x == NULL)
44 t->thread_x = thread_generic;
51 LIST_INSERT_HEAD(&head, t, entries);
54 struct task_init *to_task_init(const char *mode_str, const char *sub_mode_str)
56 struct task_init *cur_t;
58 LIST_FOREACH(cur_t, &head, entries) {
59 if (!strcmp(mode_str, cur_t->mode_str) &&
60 !strcmp(sub_mode_str, cur_t->sub_mode_str)) {
68 static int compare_strcmp(const void *a, const void *b)
70 return strcmp(*(const char * const *)a, *(const char * const *)b);
75 struct task_init *cur_t;
76 char buf[sizeof(cur_t->mode_str) + sizeof(cur_t->sub_mode_str) + 4];
78 int nb_modes = 1; /* master */
79 LIST_FOREACH(cur_t, &head, entries) {
83 char **modes = calloc(nb_modes, sizeof(*modes));
85 *cur_m++ = strdup("master");
86 LIST_FOREACH(cur_t, &head, entries) {
87 snprintf(buf, sizeof(buf), "%s%s%s",
89 (cur_t->sub_mode_str[0] == 0) ? "" : " / ",
91 *cur_m++ = strdup(buf);
93 qsort(modes, nb_modes, sizeof(*modes), compare_strcmp);
95 plog_info("=== List of supported task modes / sub modes ===\n");
96 for (cur_m = modes; nb_modes; ++cur_m, --nb_modes) {
97 plog_info("\t%s\n", *cur_m);
103 static size_t calc_memsize(struct task_args *targ, size_t task_size)
105 size_t memsize = task_size;
107 memsize += sizeof(struct task_base_aux);
109 if (targ->nb_rxports != 0) {
110 memsize += 2 * sizeof(uint8_t)*targ->nb_rxports;
112 if (targ->nb_rxrings != 0 || targ->tx_opt_ring_task) {
113 memsize += sizeof(struct rte_ring *)*targ->nb_rxrings;
115 if (targ->nb_txrings != 0) {
116 memsize += sizeof(struct rte_ring *) * targ->nb_txrings;
117 memsize = RTE_ALIGN_CEIL(memsize, RTE_CACHE_LINE_SIZE);
118 memsize += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * targ->nb_txrings;
120 else if (targ->nb_txports != 0) {
121 memsize += sizeof(struct port_queue) * targ->nb_txports;
122 memsize = RTE_ALIGN_CEIL(memsize, RTE_CACHE_LINE_SIZE);
123 memsize += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * targ->nb_txports;
126 memsize = RTE_ALIGN_CEIL(memsize, RTE_CACHE_LINE_SIZE);
127 memsize += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]);
133 static void *flush_function(struct task_args *targ)
135 if (targ->flags & TASK_ARG_DROP) {
136 return targ->nb_txrings ? flush_queues_sw : flush_queues_hw;
139 return targ->nb_txrings ? flush_queues_no_drop_sw : flush_queues_no_drop_hw;
143 static size_t init_rx_tx_rings_ports(struct task_args *targ, struct task_base *tbase, size_t offset)
145 if (targ->tx_opt_ring_task) {
146 tbase->rx_pkt = rx_pkt_self;
148 else if (targ->nb_rxrings != 0) {
150 if (targ->nb_rxrings == 1) {
151 tbase->rx_pkt = rx_pkt_sw1;
152 tbase->rx_params_sw1.rx_ring = targ->rx_rings[0];
155 tbase->rx_pkt = rx_pkt_sw;
156 tbase->rx_params_sw.nb_rxrings = targ->nb_rxrings;
157 tbase->rx_params_sw.rx_rings = (struct rte_ring **)(((uint8_t *)tbase) + offset);
158 offset += sizeof(struct rte_ring *)*tbase->rx_params_sw.nb_rxrings;
160 for (uint8_t i = 0; i < tbase->rx_params_sw.nb_rxrings; ++i) {
161 tbase->rx_params_sw.rx_rings[i] = targ->rx_rings[i];
164 if (rte_is_power_of_2(targ->nb_rxrings)) {
165 tbase->rx_pkt = rx_pkt_sw_pow2;
166 tbase->rx_params_sw.rxrings_mask = targ->nb_rxrings - 1;
171 if (targ->nb_rxports == 1) {
172 tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw1_multi : rx_pkt_hw1;
173 tbase->rx_params_hw1.rx_pq.port = targ->rx_port_queue[0].port;
174 tbase->rx_params_hw1.rx_pq.queue = targ->rx_port_queue[0].queue;
177 PROX_ASSERT((targ->nb_rxports != 0) || (targ->task_init->flag_features & TASK_FEATURE_NO_RX));
178 tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_multi : rx_pkt_hw;
179 tbase->rx_params_hw.nb_rxports = targ->nb_rxports;
180 tbase->rx_params_hw.rx_pq = (struct port_queue *)(((uint8_t *)tbase) + offset);
181 offset += sizeof(struct port_queue) * tbase->rx_params_hw.nb_rxports;
182 for (int i = 0; i< targ->nb_rxports; i++) {
183 tbase->rx_params_hw.rx_pq[i].port = targ->rx_port_queue[i].port;
184 tbase->rx_params_hw.rx_pq[i].queue = targ->rx_port_queue[i].queue;
187 if (rte_is_power_of_2(targ->nb_rxports)) {
188 tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_pow2_multi : rx_pkt_hw_pow2;
189 tbase->rx_params_hw.rxport_mask = targ->nb_rxports - 1;
194 if ((targ->nb_txrings != 0) && (!targ->tx_opt_ring) && (!(targ->flags & TASK_ARG_DROP))) {
195 // Transmitting to a ring in NO DROP. We need to make sure the receiving task in not running on the same core.
196 // Otherwise we might end up in a dead lock: trying in a loop to transmit to a task which cannot receive anymore
197 // (as npt being scheduled).
199 struct task_args *dtarg;
200 for (unsigned int j = 0; j < targ->nb_txrings; j++) {
201 ct = targ->core_task_set[0].core_task[j];
202 PROX_PANIC(ct.core == targ->lconf->id, "Core %d, task %d: NO_DROP task transmitting to another task (core %d, task %d) running on on same core => potential deadlock\n", targ->lconf->id, targ->id, ct.core, ct.task);
203 //plog_info("Core %d, task %d: NO_DROP task transmitting to another task (core %d, task %d) running on on same core => potential deadlock\n", targ->lconf->id, targ->id, ct.core, ct.task);
206 if ((targ->nb_txrings != 0) && (targ->nb_txports == 1)) {
207 /* Transmitting to multiple rings and one port */
208 plog_info("Initializing with 1 port %d queue %d nb_rings=%d\n", targ->tx_port_queue[0].port, targ->tx_port_queue[0].queue, targ->nb_txrings);
209 tbase->tx_params_hw_sw.tx_port_queue.port = targ->tx_port_queue[0].port;
210 tbase->tx_params_hw_sw.tx_port_queue.queue = targ->tx_port_queue[0].queue;
211 if (!targ->tx_opt_ring) {
212 tbase->tx_params_hw_sw.nb_txrings = targ->nb_txrings;
213 tbase->tx_params_hw_sw.tx_rings = (struct rte_ring **)(((uint8_t *)tbase) + offset);
214 offset += sizeof(struct rte_ring *)*tbase->tx_params_hw_sw.nb_txrings;
216 for (uint8_t i = 0; i < tbase->tx_params_hw_sw.nb_txrings; ++i) {
217 tbase->tx_params_hw_sw.tx_rings[i] = targ->tx_rings[i];
220 offset = RTE_ALIGN_CEIL(offset, RTE_CACHE_LINE_SIZE);
221 tbase->ws_mbuf = (struct ws_mbuf *)(((uint8_t *)tbase) + offset);
222 offset += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * tbase->tx_params_hw_sw.nb_txrings;
225 else if (!targ->tx_opt_ring) {
226 if (targ->nb_txrings != 0) {
227 tbase->tx_params_sw.nb_txrings = targ->nb_txrings;
228 tbase->tx_params_sw.tx_rings = (struct rte_ring **)(((uint8_t *)tbase) + offset);
229 offset += sizeof(struct rte_ring *)*tbase->tx_params_sw.nb_txrings;
231 for (uint8_t i = 0; i < tbase->tx_params_sw.nb_txrings; ++i) {
232 tbase->tx_params_sw.tx_rings[i] = targ->tx_rings[i];
235 offset = RTE_ALIGN_CEIL(offset, RTE_CACHE_LINE_SIZE);
236 tbase->ws_mbuf = (struct ws_mbuf *)(((uint8_t *)tbase) + offset);
237 offset += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * tbase->tx_params_sw.nb_txrings;
239 else if (targ->nb_txports != 0) {
240 tbase->tx_params_hw.nb_txports = targ->nb_txports;
241 tbase->tx_params_hw.tx_port_queue = (struct port_queue *)(((uint8_t *)tbase) + offset);
242 offset += sizeof(struct port_queue) * tbase->tx_params_hw.nb_txports;
243 for (uint8_t i = 0; i < tbase->tx_params_hw.nb_txports; ++i) {
244 tbase->tx_params_hw.tx_port_queue[i].port = targ->tx_port_queue[i].port;
245 tbase->tx_params_hw.tx_port_queue[i].queue = targ->tx_port_queue[i].queue;
248 offset = RTE_ALIGN_CEIL(offset, RTE_CACHE_LINE_SIZE);
249 tbase->ws_mbuf = (struct ws_mbuf *)(((uint8_t *)tbase) + offset);
250 offset += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * tbase->tx_params_hw.nb_txports;
253 offset = RTE_ALIGN_CEIL(offset, RTE_CACHE_LINE_SIZE);
254 tbase->ws_mbuf = (struct ws_mbuf *)(((uint8_t *)tbase) + offset);
255 offset += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]);
258 struct ws_mbuf* w = tbase->ws_mbuf;
259 struct task_args *prev = targ->tx_opt_ring_task;
262 prev->tbase->ws_mbuf = w;
263 prev = prev->tx_opt_ring_task;
266 if (targ->nb_txrings == 1 || targ->nb_txports == 1 || targ->tx_opt_ring) {
267 if (targ->task_init->flag_features & TASK_FEATURE_NEVER_DISCARDS) {
268 if (targ->tx_opt_ring) {
269 tbase->tx_pkt = tx_pkt_never_discard_self;
271 else if (targ->flags & TASK_ARG_DROP) {
272 if (targ->task_init->flag_features & TASK_FEATURE_THROUGHPUT_OPT)
273 tbase->tx_pkt = targ->nb_txrings ? tx_pkt_never_discard_sw1 : tx_pkt_never_discard_hw1_thrpt_opt;
275 tbase->tx_pkt = targ->nb_txrings ? tx_pkt_never_discard_sw1 : tx_pkt_never_discard_hw1_lat_opt;
278 if (targ->task_init->flag_features & TASK_FEATURE_THROUGHPUT_OPT)
279 tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_never_discard_sw1 : tx_pkt_no_drop_never_discard_hw1_thrpt_opt;
281 tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_never_discard_sw1 : tx_pkt_no_drop_never_discard_hw1_lat_opt;
283 if ((targ->nb_txrings) || ((targ->task_init->flag_features & TASK_FEATURE_THROUGHPUT_OPT) == 0))
284 tbase->flags |= FLAG_NEVER_FLUSH;
286 targ->lconf->flush_queues[targ->task] = flush_function(targ);
289 if (targ->tx_opt_ring) {
290 tbase->tx_pkt = tx_pkt_self;
292 else if (targ->flags & TASK_ARG_DROP) {
293 tbase->tx_pkt = targ->nb_txrings ? tx_pkt_sw1 : tx_pkt_hw1;
296 tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_sw1 : tx_pkt_no_drop_hw1;
298 tbase->flags |= FLAG_NEVER_FLUSH;
302 if (targ->flags & TASK_ARG_DROP) {
303 tbase->tx_pkt = targ->nb_txrings ? tx_pkt_sw : tx_pkt_hw;
306 tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_sw : tx_pkt_no_drop_hw;
309 targ->lconf->flush_queues[targ->task] = flush_function(targ);
312 if (targ->task_init->flag_features & TASK_FEATURE_NO_RX) {
313 tbase->rx_pkt = rx_pkt_dummy;
316 if (targ->nb_txrings == 0 && targ->nb_txports == 0) {
317 tbase->tx_pkt = tx_pkt_drop_all;
323 struct task_base *init_task_struct(struct task_args *targ)
325 struct task_init* t = targ->task_init;
327 size_t memsize = calc_memsize(targ, t->size);
328 uint8_t task_socket = rte_lcore_to_socket_id(targ->lconf->id);
329 struct task_base *tbase = prox_zmalloc(memsize, task_socket);
330 PROX_PANIC(tbase == NULL, "Failed to allocate memory for task (%zu bytes)", memsize);
333 if (targ->nb_txrings == 0 && targ->nb_txports == 0)
334 tbase->flags |= FLAG_NEVER_FLUSH;
336 offset = init_rx_tx_rings_ports(targ, tbase, offset);
337 tbase->aux = (struct task_base_aux *)(((uint8_t *)tbase) + offset);
339 if (targ->task_init->flag_features & TASK_FEATURE_RX_ALL) {
340 task_base_add_rx_pkt_function(tbase, rx_pkt_all);
341 tbase->aux->all_mbufs = prox_zmalloc(MAX_RX_PKT_ALL * sizeof(* tbase->aux->all_mbufs), task_socket);
343 if (targ->task_init->flag_features & TASK_FEATURE_TSC_RX) {
344 task_base_add_rx_pkt_function(tbase, rx_pkt_tsc);
347 offset += sizeof(struct task_base_aux);
349 tbase->handle_bulk = t->handle;
353 t->init(tbase, targ);
355 tbase->aux->start = t->start;
356 tbase->aux->stop = t->stop;
357 tbase->aux->start_first = t->start_first;
358 tbase->aux->stop_last = t->stop_last;
359 if ((targ->nb_txrings != 0) && (targ->nb_txports == 1)) {
360 tbase->aux->tx_pkt_hw = tx_pkt_no_drop_never_discard_hw1_no_pointer;
362 if (targ->tx_opt_ring) {
363 tbase->aux->tx_pkt_try = tx_try_self;
364 } else if (targ->nb_txrings == 1) {
365 tbase->aux->tx_pkt_try = tx_try_sw1;
366 } else if (targ->nb_txports) {
367 tbase->aux->tx_pkt_try = tx_try_hw1;
373 struct task_args *find_reachable_task_sending_to_port(struct task_args *from)
375 if (!from->nb_txrings)
379 struct task_args *dtarg, *ret;
381 for (uint32_t i = 0; i < from->nb_txrings; ++i) {
382 ct = from->core_task_set[0].core_task[i];
383 dtarg = core_targ_get(ct.core, ct.task);
384 ret = find_reachable_task_sending_to_port(dtarg);
391 struct prox_port_cfg *find_reachable_port(struct task_args *from)
393 struct task_args *dst = find_reachable_task_sending_to_port(from);
396 int port_id = dst->tx_port_queue[0].port;
398 return &prox_port_cfg[port_id];