Renaming of defines for clarity
[samplevnf.git] / VNFs / DPPD-PROX / task_init.c
1 /*
2 // Copyright (c) 2010-2020 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //     http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 #include <string.h>
18 #include <stdio.h>
19 #include <rte_version.h>
20
21 #include "prox_port_cfg.h"
22 #include "prox_malloc.h"
23 #include "task_init.h"
24 #include "rx_pkt.h"
25 #include "tx_pkt.h"
26 #include "log.h"
27 #include "quit.h"
28 #include "lconf.h"
29 #include "thread_generic.h"
30 #include "prox_assert.h"
31
32 #if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0)
33 #define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE
34 #endif
35
36 static unsigned first_task = 1;
37 LIST_HEAD(,task_init) head;
38
39 void reg_task(struct task_init* t)
40 {
41         // PROX_PANIC(t->handle == NULL, "No handle function specified for task with name %d\n", t->mode);
42
43         if (t->thread_x == NULL)
44                 t->thread_x = thread_generic;
45
46         if (first_task) {
47                 first_task = 0;
48                 LIST_INIT(&head);
49         }
50
51         LIST_INSERT_HEAD(&head, t, entries);
52 }
53
54 struct task_init *to_task_init(const char *mode_str, const char *sub_mode_str)
55 {
56         struct task_init *cur_t;
57
58         LIST_FOREACH(cur_t, &head, entries) {
59                 if (!strcmp(mode_str, cur_t->mode_str) &&
60                     !strcmp(sub_mode_str, cur_t->sub_mode_str)) {
61                         return cur_t;
62                 }
63         }
64
65         return NULL;
66 }
67
68 static int compare_strcmp(const void *a, const void *b)
69 {
70         return strcmp(*(const char * const *)a, *(const char * const *)b);
71 }
72
73 int task_is_master(struct task_args *targ)
74 {
75         return (targ->lconf->id == prox_cfg.master);
76 }
77
78 void tasks_list(void)
79 {
80         struct task_init *cur_t;
81         char buf[sizeof(cur_t->mode_str) + sizeof(cur_t->sub_mode_str) + 4];
82
83         int nb_modes = 1; /* master */
84         LIST_FOREACH(cur_t, &head, entries) {
85                 ++nb_modes;
86         }
87
88         char **modes = calloc(nb_modes, sizeof(*modes));
89         char **cur_m = modes;
90         *cur_m++ = strdup("master");
91         LIST_FOREACH(cur_t, &head, entries) {
92                 snprintf(buf, sizeof(buf), "%s%s%s",
93                         cur_t->mode_str,
94                         (cur_t->sub_mode_str[0] == 0) ? "" : " / ",
95                         cur_t->sub_mode_str);
96                 *cur_m++ = strdup(buf);
97         }
98         qsort(modes, nb_modes, sizeof(*modes), compare_strcmp);
99
100         plog_info("=== List of supported task modes / sub modes ===\n");
101         for (cur_m = modes; nb_modes; ++cur_m, --nb_modes) {
102                 plog_info("\t%s\n", *cur_m);
103                 free(*cur_m);
104         }
105         free(modes);
106 }
107
108 static size_t calc_memsize(struct task_args *targ, size_t task_size)
109 {
110         size_t memsize = task_size;
111
112         memsize += sizeof(struct task_base_aux);
113
114         if (targ->nb_rxports != 0) {
115                 memsize += 2 * sizeof(uint8_t)*targ->nb_rxports;
116         }
117         if (targ->nb_rxrings != 0 || targ->tx_opt_ring_task) {
118                 memsize += sizeof(struct rte_ring *)*targ->nb_rxrings;
119         }
120         if (targ->nb_txrings != 0) {
121                 memsize += sizeof(struct rte_ring *) * targ->nb_txrings;
122                 memsize = RTE_ALIGN_CEIL(memsize, RTE_CACHE_LINE_SIZE);
123                 memsize += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * targ->nb_txrings;
124         }
125         else if (targ->nb_txports != 0) {
126                 memsize += sizeof(struct port_queue) * targ->nb_txports;
127                 memsize = RTE_ALIGN_CEIL(memsize, RTE_CACHE_LINE_SIZE);
128                 memsize += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * targ->nb_txports;
129         }
130         else {
131                 memsize = RTE_ALIGN_CEIL(memsize, RTE_CACHE_LINE_SIZE);
132                 memsize += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]);
133         }
134
135         return memsize;
136 }
137
138 static void *flush_function(struct task_args *targ)
139 {
140         if (targ->flags & TASK_ARG_DROP) {
141                 return targ->nb_txrings ? flush_queues_sw : flush_queues_hw;
142         }
143         else {
144                 return targ->nb_txrings ? flush_queues_no_drop_sw : flush_queues_no_drop_hw;
145         }
146 }
147
148 static size_t init_rx_tx_rings_ports(struct task_args *targ, struct task_base *tbase, size_t offset)
149 {
150         if (targ->tx_opt_ring_task) {
151                 tbase->rx_pkt = rx_pkt_self;
152         }
153         else if (targ->nb_rxrings != 0) {
154
155                 if (targ->nb_rxrings == 1) {
156                         tbase->rx_pkt = rx_pkt_sw1;
157                         tbase->rx_params_sw1.rx_ring = targ->rx_rings[0];
158                 }
159                 else {
160                         tbase->rx_pkt = rx_pkt_sw;
161                         tbase->rx_params_sw.nb_rxrings = targ->nb_rxrings;
162                         tbase->rx_params_sw.rx_rings = (struct rte_ring **)(((uint8_t *)tbase) + offset);
163                         offset += sizeof(struct rte_ring *)*tbase->rx_params_sw.nb_rxrings;
164
165                         for (uint8_t i = 0; i < tbase->rx_params_sw.nb_rxrings; ++i) {
166                                 tbase->rx_params_sw.rx_rings[i] = targ->rx_rings[i];
167                         }
168
169                         if (rte_is_power_of_2(targ->nb_rxrings)) {
170                                 tbase->rx_pkt = rx_pkt_sw_pow2;
171                                 tbase->rx_params_sw.rxrings_mask = targ->nb_rxrings - 1;
172                         }
173                 }
174         }
175         else {
176                 if (targ->nb_rxports == 1) {
177                         if (targ->flags & TASK_ARG_L3)
178                                 tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw1_multi_l3 : rx_pkt_hw1_l3;
179                         else if (targ->flags & TASK_ARG_NDP)
180                                 tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw1_multi_ndp : rx_pkt_hw1_ndp;
181                         else
182                                 tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw1_multi : rx_pkt_hw1;
183                         tbase->rx_params_hw1.rx_pq.port =  targ->rx_port_queue[0].port;
184                         tbase->rx_params_hw1.rx_pq.queue = targ->rx_port_queue[0].queue;
185                 }
186                 else {
187                         PROX_ASSERT((targ->nb_rxports != 0) || (targ->task_init->flag_features & TASK_FEATURE_NO_RX));
188                         if (targ->flags & TASK_ARG_L3)
189                                 tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_multi_l3 : rx_pkt_hw_l3;
190                         else if (targ->flags & TASK_ARG_NDP)
191                                 tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_multi_ndp : rx_pkt_hw_ndp;
192                         else
193                                 tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_multi : rx_pkt_hw;
194                         tbase->rx_params_hw.nb_rxports = targ->nb_rxports;
195                         tbase->rx_params_hw.rx_pq = (struct port_queue *)(((uint8_t *)tbase) + offset);
196                         offset += sizeof(struct port_queue) * tbase->rx_params_hw.nb_rxports;
197                         for (int i = 0; i< targ->nb_rxports; i++) {
198                                 tbase->rx_params_hw.rx_pq[i].port = targ->rx_port_queue[i].port;
199                                 tbase->rx_params_hw.rx_pq[i].queue = targ->rx_port_queue[i].queue;
200                         }
201
202                         if (rte_is_power_of_2(targ->nb_rxports)) {
203                                 if (targ->flags & TASK_ARG_L3)
204                                         tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_pow2_multi_l3 : rx_pkt_hw_pow2_l3;
205                                 else if (targ->flags & TASK_ARG_NDP)
206                                         tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_pow2_multi_ndp : rx_pkt_hw_pow2_ndp;
207                                 else
208                                         tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_pow2_multi : rx_pkt_hw_pow2;
209                                 tbase->rx_params_hw.rxport_mask = targ->nb_rxports - 1;
210                         }
211                 }
212         }
213
214         if ((targ->nb_txrings != 0) && (!targ->tx_opt_ring) && (!(targ->flags & TASK_ARG_DROP))) {
215                 // Transmitting to a ring in NO DROP. We need to make sure the receiving task in not running on the same core.
216                 // Otherwise we might end up in a dead lock: trying in a loop to transmit to a task which cannot receive anymore
217                 // (as not being scheduled).
218                 struct core_task ct;
219                 struct task_args *dtarg;
220                 for (unsigned int j = 0; j < targ->nb_txrings; j++) {
221                         ct = targ->core_task_set[0].core_task[j];
222                         PROX_PANIC(ct.core == targ->lconf->id, "Core %d, task %d: NO_DROP task transmitting to another task (core %d, task %d) running on on same core => potential deadlock\n", targ->lconf->id, targ->id, ct.core, ct.task);
223                         //plog_info("Core %d, task %d: NO_DROP task transmitting to another task (core %d, task %d) running on on same core => potential deadlock\n", targ->lconf->id, targ->id, ct.core, ct.task);
224                 }
225         }
226         if ((targ->nb_txrings != 0) && (targ->nb_txports == 1)) {
227                 /* Transmitting to multiple rings and one port */
228                 plog_info("Initializing with 1 port %d queue %d nb_rings=%d\n", targ->tx_port_queue[0].port, targ->tx_port_queue[0].queue, targ->nb_txrings);
229                 tbase->tx_params_hw_sw.tx_port_queue.port =  targ->tx_port_queue[0].port;
230                 tbase->tx_params_hw_sw.tx_port_queue.queue =  targ->tx_port_queue[0].queue;
231                 if (!targ->tx_opt_ring) {
232                         tbase->tx_params_hw_sw.nb_txrings = targ->nb_txrings;
233                         tbase->tx_params_hw_sw.tx_rings = (struct rte_ring **)(((uint8_t *)tbase) + offset);
234                         offset += sizeof(struct rte_ring *)*tbase->tx_params_hw_sw.nb_txrings;
235
236                         for (uint8_t i = 0; i < tbase->tx_params_hw_sw.nb_txrings; ++i) {
237                                 tbase->tx_params_hw_sw.tx_rings[i] = targ->tx_rings[i];
238                         }
239
240                         offset = RTE_ALIGN_CEIL(offset, RTE_CACHE_LINE_SIZE);
241                         tbase->ws_mbuf = (struct ws_mbuf *)(((uint8_t *)tbase) + offset);
242                         offset += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * tbase->tx_params_hw_sw.nb_txrings;
243                 }
244         }
245         else if (!targ->tx_opt_ring) {
246                 if (targ->nb_txrings != 0) {
247                         tbase->tx_params_sw.nb_txrings = targ->nb_txrings;
248                         tbase->tx_params_sw.tx_rings = (struct rte_ring **)(((uint8_t *)tbase) + offset);
249                         offset += sizeof(struct rte_ring *)*tbase->tx_params_sw.nb_txrings;
250
251                         for (uint8_t i = 0; i < tbase->tx_params_sw.nb_txrings; ++i) {
252                                 tbase->tx_params_sw.tx_rings[i] = targ->tx_rings[i];
253                         }
254
255                         offset = RTE_ALIGN_CEIL(offset, RTE_CACHE_LINE_SIZE);
256                         tbase->ws_mbuf = (struct ws_mbuf *)(((uint8_t *)tbase) + offset);
257                         offset += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * tbase->tx_params_sw.nb_txrings;
258                 }
259                 else if (targ->nb_txports != 0) {
260                         tbase->tx_params_hw.nb_txports = targ->nb_txports;
261                         tbase->tx_params_hw.tx_port_queue = (struct port_queue *)(((uint8_t *)tbase) + offset);
262                         offset += sizeof(struct port_queue) * tbase->tx_params_hw.nb_txports;
263                         for (uint8_t i = 0; i < tbase->tx_params_hw.nb_txports; ++i) {
264                                 tbase->tx_params_hw.tx_port_queue[i].port = targ->tx_port_queue[i].port;
265                                 tbase->tx_params_hw.tx_port_queue[i].queue = targ->tx_port_queue[i].queue;
266                         }
267
268                         offset = RTE_ALIGN_CEIL(offset, RTE_CACHE_LINE_SIZE);
269                         tbase->ws_mbuf = (struct ws_mbuf *)(((uint8_t *)tbase) + offset);
270                         offset += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * tbase->tx_params_hw.nb_txports;
271                 }
272                 else {
273                         offset = RTE_ALIGN_CEIL(offset, RTE_CACHE_LINE_SIZE);
274                         tbase->ws_mbuf = (struct ws_mbuf *)(((uint8_t *)tbase) + offset);
275                         offset += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]);
276                 }
277
278                 struct ws_mbuf* w = tbase->ws_mbuf;
279                 struct task_args *prev = targ->tx_opt_ring_task;
280
281                 while (prev) {
282                         prev->tbase->ws_mbuf = w;
283                         prev = prev->tx_opt_ring_task;
284                 }
285         }
286
287         if (targ->nb_txrings == 1 || targ->nb_txports == 1 || targ->tx_opt_ring) {
288                 if (targ->task_init->flag_features & TASK_FEATURE_NEVER_DISCARDS) {
289                         if (targ->tx_opt_ring) {
290                                 tbase->tx_pkt = tx_pkt_never_discard_self;
291                         }
292                         else if (targ->flags & TASK_ARG_DROP) {
293                                 if (targ->task_init->flag_features & TASK_FEATURE_THROUGHPUT_OPT)
294                                         tbase->tx_pkt = targ->nb_txrings ? tx_pkt_never_discard_sw1 : tx_pkt_never_discard_hw1_thrpt_opt;
295                                 else
296                                         tbase->tx_pkt = targ->nb_txrings ? tx_pkt_never_discard_sw1 : tx_pkt_never_discard_hw1_lat_opt;
297                         }
298                         else {
299                                 if (targ->task_init->flag_features & TASK_FEATURE_THROUGHPUT_OPT)
300                                         tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_never_discard_sw1 : tx_pkt_no_drop_never_discard_hw1_thrpt_opt;
301                                 else
302                                         tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_never_discard_sw1 : tx_pkt_no_drop_never_discard_hw1_lat_opt;
303                         }
304                         if ((targ->nb_txrings) || ((targ->task_init->flag_features & TASK_FEATURE_THROUGHPUT_OPT) == 0))
305                                 tbase->flags |= TBASE_FLAG_NEVER_FLUSH;
306                         else
307                                 targ->lconf->flush_queues[targ->task] = flush_function(targ);
308                 }
309                 else {
310                         if (targ->tx_opt_ring) {
311                                 tbase->tx_pkt = tx_pkt_self;
312                         }
313                         else if (targ->flags & TASK_ARG_DROP) {
314                                 tbase->tx_pkt = targ->nb_txrings ? tx_pkt_sw1 : tx_pkt_hw1;
315                         }
316                         else {
317                                 tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_sw1 : tx_pkt_no_drop_hw1;
318                         }
319                         tbase->flags |= TBASE_FLAG_NEVER_FLUSH;
320                 }
321         }
322         else {
323                 if (targ->flags & TASK_ARG_DROP) {
324                         tbase->tx_pkt = targ->nb_txrings ? tx_pkt_sw : tx_pkt_hw;
325                 }
326                 else {
327                         tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_sw : tx_pkt_no_drop_hw;
328                 }
329
330                 targ->lconf->flush_queues[targ->task] = flush_function(targ);
331         }
332
333         if (targ->task_init->flag_features & TASK_FEATURE_NO_RX) {
334                 tbase->rx_pkt = rx_pkt_dummy;
335         }
336
337         if (targ->nb_txrings == 0 && targ->nb_txports == 0) {
338                 tbase->tx_pkt = tx_pkt_drop_all;
339         }
340
341         return offset;
342 }
343
344 struct task_base *init_task_struct(struct task_args *targ)
345 {
346         struct task_init* t = targ->task_init;
347         size_t offset = 0;
348         size_t memsize = calc_memsize(targ, t->size);
349         uint8_t task_socket = rte_lcore_to_socket_id(targ->lconf->id);
350         struct task_base *tbase = prox_zmalloc(memsize, task_socket);
351         PROX_PANIC(tbase == NULL, "Failed to allocate memory for task (%zu bytes)", memsize);
352         offset += t->size;
353
354         if (targ->nb_txrings == 0 && targ->nb_txports == 0)
355                 tbase->flags |= TBASE_FLAG_NEVER_FLUSH;
356
357         offset = init_rx_tx_rings_ports(targ, tbase, offset);
358         tbase->aux = (struct task_base_aux *)(((uint8_t *)tbase) + offset);
359
360         if (targ->task_init->flag_features & TASK_FEATURE_TSC_RX) {
361                 task_base_add_rx_pkt_function(tbase, rx_pkt_tsc);
362         }
363
364         offset += sizeof(struct task_base_aux);
365
366         tbase->handle_bulk = t->handle;
367
368         if (targ->flags & (TASK_ARG_L3|TASK_ARG_NDP)) {
369                 plog_info("\t\tTask (%d,%d) configured in L3/NDP mode\n", targ->lconf->id, targ->id);
370                 tbase->l3.ctrl_plane_ring = targ->ctrl_plane_ring;
371                 if (targ->nb_txports != 0) {
372                         tbase->aux->tx_pkt_l2 = tbase->tx_pkt;
373                         tbase->aux->tx_ctrlplane_pkt = targ->nb_txrings ? tx_ctrlplane_sw : tx_ctrlplane_hw;
374                         if (targ->flags & TASK_ARG_L3) {
375                                 tbase->tx_pkt = tx_pkt_l3;
376                                 task_init_l3(tbase, targ);
377                         } else if (targ->flags & TASK_ARG_NDP) {
378                                 tbase->tx_pkt = tx_pkt_ndp;
379                                 task_init_l3(tbase, targ);
380                         }
381                         // Make sure control plane packets such as arp are not dropped
382                 }
383         }
384
385         targ->tbase = tbase;
386         if (t->init) {
387                 t->init(tbase, targ);
388         }
389         tbase->aux->start = t->start;
390         tbase->aux->stop = t->stop;
391         tbase->aux->start_first = t->start_first;
392         tbase->aux->stop_last = t->stop_last;
393         if ((targ->nb_txrings != 0) && (targ->nb_txports == 1)) {
394                 tbase->aux->tx_pkt_hw = tx_pkt_no_drop_never_discard_hw1_no_pointer;
395         }
396         if (targ->tx_opt_ring) {
397                 tbase->aux->tx_pkt_try = tx_try_self;
398         } else if (targ->nb_txrings == 1) {
399                 tbase->aux->tx_pkt_try = tx_try_sw1;
400         } else if (targ->nb_txports) {
401                 tbase->aux->tx_pkt_try = tx_try_hw1;
402         }
403
404         return tbase;
405 }
406
407 struct task_args *find_reachable_task_sending_to_port(struct task_args *from)
408 {
409         if (!from->nb_txrings) {
410                 if (from->tx_port_queue[0].port != OUT_DISCARD)
411                         return from;
412                 else
413                         return NULL;
414         }
415
416         struct core_task ct;
417         struct task_args *dtarg, *ret;
418
419         for (uint32_t i = 0; i < from->nb_txrings; ++i) {
420                 ct = from->core_task_set[0].core_task[i];
421                 dtarg = core_targ_get(ct.core, ct.task);
422                 ret = find_reachable_task_sending_to_port(dtarg);
423                 if (ret)
424                         return ret;
425         }
426         return NULL;
427 }
428
429 struct prox_port_cfg *find_reachable_port(struct task_args *from)
430 {
431         struct task_args *dst = find_reachable_task_sending_to_port(from);
432
433         if (dst) {
434                 int port_id = dst->tx_port_queue[0].port;
435
436                 return &prox_port_cfg[port_id];
437         }
438         return NULL;
439 }