Merge "[l2l3 stack] implements new arp state machine & arp buffering"
[samplevnf.git] / VNFs / DPPD-PROX / task_init.c
1 /*
2 // Copyright (c) 2010-2017 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //     http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 #include <string.h>
18 #include <stdio.h>
19 #include <rte_version.h>
20
21 #include "prox_port_cfg.h"
22 #include "prox_malloc.h"
23 #include "task_init.h"
24 #include "rx_pkt.h"
25 #include "tx_pkt.h"
26 #include "log.h"
27 #include "quit.h"
28 #include "lconf.h"
29 #include "thread_generic.h"
30 #include "prox_assert.h"
31
32 #if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0)
33 #define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE
34 #endif
35
36 static unsigned first_task = 1;
37 LIST_HEAD(,task_init) head;
38
39 void reg_task(struct task_init* t)
40 {
41         PROX_PANIC(t->handle == NULL, "No handle function specified for task with name %d\n", t->mode);
42
43         if (t->thread_x == NULL)
44                 t->thread_x = thread_generic;
45
46         if (first_task) {
47                 first_task = 0;
48                 LIST_INIT(&head);
49         }
50
51         LIST_INSERT_HEAD(&head, t, entries);
52 }
53
54 struct task_init *to_task_init(const char *mode_str, const char *sub_mode_str)
55 {
56         struct task_init *cur_t;
57
58         LIST_FOREACH(cur_t, &head, entries) {
59                 if (!strcmp(mode_str, cur_t->mode_str) &&
60                     !strcmp(sub_mode_str, cur_t->sub_mode_str)) {
61                         return cur_t;
62                 }
63         }
64
65         return NULL;
66 }
67
68 static int compare_strcmp(const void *a, const void *b)
69 {
70         return strcmp(*(const char * const *)a, *(const char * const *)b);
71 }
72
73 void tasks_list(void)
74 {
75         struct task_init *cur_t;
76         char buf[sizeof(cur_t->mode_str) + sizeof(cur_t->sub_mode_str) + 4];
77
78         int nb_modes = 1; /* master */
79         LIST_FOREACH(cur_t, &head, entries) {
80                 ++nb_modes;
81         }
82
83         char **modes = calloc(nb_modes, sizeof(*modes));
84         char **cur_m = modes;
85         *cur_m++ = strdup("master");
86         LIST_FOREACH(cur_t, &head, entries) {
87                 snprintf(buf, sizeof(buf), "%s%s%s",
88                         cur_t->mode_str,
89                         (cur_t->sub_mode_str[0] == 0) ? "" : " / ",
90                         cur_t->sub_mode_str);
91                 *cur_m++ = strdup(buf);
92         }
93         qsort(modes, nb_modes, sizeof(*modes), compare_strcmp);
94
95         plog_info("=== List of supported task modes / sub modes ===\n");
96         for (cur_m = modes; nb_modes; ++cur_m, --nb_modes) {
97                 plog_info("\t%s\n", *cur_m);
98                 free(*cur_m);
99         }
100         free(modes);
101 }
102
103 static size_t calc_memsize(struct task_args *targ, size_t task_size)
104 {
105         size_t memsize = task_size;
106
107         memsize += sizeof(struct task_base_aux);
108
109         if (targ->nb_rxports != 0) {
110                 memsize += 2 * sizeof(uint8_t)*targ->nb_rxports;
111         }
112         if (targ->nb_rxrings != 0 || targ->tx_opt_ring_task) {
113                 memsize += sizeof(struct rte_ring *)*targ->nb_rxrings;
114         }
115         if (targ->nb_txrings != 0) {
116                 memsize += sizeof(struct rte_ring *) * targ->nb_txrings;
117                 memsize = RTE_ALIGN_CEIL(memsize, RTE_CACHE_LINE_SIZE);
118                 memsize += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * targ->nb_txrings;
119         }
120         else if (targ->nb_txports != 0) {
121                 memsize += sizeof(struct port_queue) * targ->nb_txports;
122                 memsize = RTE_ALIGN_CEIL(memsize, RTE_CACHE_LINE_SIZE);
123                 memsize += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * targ->nb_txports;
124         }
125         else {
126                 memsize = RTE_ALIGN_CEIL(memsize, RTE_CACHE_LINE_SIZE);
127                 memsize += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]);
128         }
129
130         return memsize;
131 }
132
133 static void *flush_function(struct task_args *targ)
134 {
135         if (targ->flags & TASK_ARG_DROP) {
136                 return targ->nb_txrings ? flush_queues_sw : flush_queues_hw;
137         }
138         else {
139                 return targ->nb_txrings ? flush_queues_no_drop_sw : flush_queues_no_drop_hw;
140         }
141 }
142
143 static size_t init_rx_tx_rings_ports(struct task_args *targ, struct task_base *tbase, size_t offset)
144 {
145         if (targ->tx_opt_ring_task) {
146                 tbase->rx_pkt = rx_pkt_self;
147         }
148         else if (targ->nb_rxrings != 0) {
149
150                 if (targ->nb_rxrings == 1) {
151                         tbase->rx_pkt = rx_pkt_sw1;
152                         tbase->rx_params_sw1.rx_ring = targ->rx_rings[0];
153                 }
154                 else {
155                         tbase->rx_pkt = rx_pkt_sw;
156                         tbase->rx_params_sw.nb_rxrings = targ->nb_rxrings;
157                         tbase->rx_params_sw.rx_rings = (struct rte_ring **)(((uint8_t *)tbase) + offset);
158                         offset += sizeof(struct rte_ring *)*tbase->rx_params_sw.nb_rxrings;
159
160                         for (uint8_t i = 0; i < tbase->rx_params_sw.nb_rxrings; ++i) {
161                                 tbase->rx_params_sw.rx_rings[i] = targ->rx_rings[i];
162                         }
163
164                         if (rte_is_power_of_2(targ->nb_rxrings)) {
165                                 tbase->rx_pkt = rx_pkt_sw_pow2;
166                                 tbase->rx_params_sw.rxrings_mask = targ->nb_rxrings - 1;
167                         }
168                 }
169         }
170         else {
171                 if (targ->nb_rxports == 1) {
172                         tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw1_multi : rx_pkt_hw1;
173                         tbase->rx_params_hw1.rx_pq.port =  targ->rx_port_queue[0].port;
174                         tbase->rx_params_hw1.rx_pq.queue = targ->rx_port_queue[0].queue;
175                 }
176                 else {
177                         PROX_ASSERT((targ->nb_rxports != 0) || (targ->task_init->flag_features & TASK_FEATURE_NO_RX));
178                         tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_multi : rx_pkt_hw;
179                         tbase->rx_params_hw.nb_rxports = targ->nb_rxports;
180                         tbase->rx_params_hw.rx_pq = (struct port_queue *)(((uint8_t *)tbase) + offset);
181                         offset += sizeof(struct port_queue) * tbase->rx_params_hw.nb_rxports;
182                         for (int i = 0; i< targ->nb_rxports; i++) {
183                                 tbase->rx_params_hw.rx_pq[i].port = targ->rx_port_queue[i].port;
184                                 tbase->rx_params_hw.rx_pq[i].queue = targ->rx_port_queue[i].queue;
185                         }
186
187                         if (rte_is_power_of_2(targ->nb_rxports)) {
188                                 tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_pow2_multi : rx_pkt_hw_pow2;
189                                 tbase->rx_params_hw.rxport_mask = targ->nb_rxports - 1;
190                         }
191                 }
192         }
193
194         if ((targ->nb_txrings != 0) && (!targ->tx_opt_ring) && (!(targ->flags & TASK_ARG_DROP))) {
195                 // Transmitting to a ring in NO DROP. We need to make sure the receiving task in not running on the same core.
196                 // Otherwise we might end up in a dead lock: trying in a loop to transmit to a task which cannot receive anymore
197                 // (as npt being scheduled).
198                 struct core_task ct;
199                 struct task_args *dtarg;
200                 for (unsigned int j = 0; j < targ->nb_txrings; j++) {
201                         ct = targ->core_task_set[0].core_task[j];
202                         PROX_PANIC(ct.core == targ->lconf->id, "Core %d, task %d: NO_DROP task transmitting to another task (core %d, task %d) running on on same core => potential deadlock\n", targ->lconf->id, targ->id, ct.core, ct.task);
203                         //plog_info("Core %d, task %d: NO_DROP task transmitting to another task (core %d, task %d) running on on same core => potential deadlock\n", targ->lconf->id, targ->id, ct.core, ct.task);
204                 }
205         }
206         if ((targ->nb_txrings != 0) && (targ->nb_txports == 1)) {
207                 /* Transmitting to multiple rings and one port */
208                 plog_info("Initializing with 1 port %d queue %d nb_rings=%d\n", targ->tx_port_queue[0].port, targ->tx_port_queue[0].queue, targ->nb_txrings);
209                 tbase->tx_params_hw_sw.tx_port_queue.port =  targ->tx_port_queue[0].port;
210                 tbase->tx_params_hw_sw.tx_port_queue.queue =  targ->tx_port_queue[0].queue;
211                 if (!targ->tx_opt_ring) {
212                         tbase->tx_params_hw_sw.nb_txrings = targ->nb_txrings;
213                         tbase->tx_params_hw_sw.tx_rings = (struct rte_ring **)(((uint8_t *)tbase) + offset);
214                         offset += sizeof(struct rte_ring *)*tbase->tx_params_hw_sw.nb_txrings;
215
216                         for (uint8_t i = 0; i < tbase->tx_params_hw_sw.nb_txrings; ++i) {
217                                 tbase->tx_params_hw_sw.tx_rings[i] = targ->tx_rings[i];
218                         }
219
220                         offset = RTE_ALIGN_CEIL(offset, RTE_CACHE_LINE_SIZE);
221                         tbase->ws_mbuf = (struct ws_mbuf *)(((uint8_t *)tbase) + offset);
222                         offset += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * tbase->tx_params_hw_sw.nb_txrings;
223                 }
224         }
225         else if (!targ->tx_opt_ring) {
226                 if (targ->nb_txrings != 0) {
227                         tbase->tx_params_sw.nb_txrings = targ->nb_txrings;
228                         tbase->tx_params_sw.tx_rings = (struct rte_ring **)(((uint8_t *)tbase) + offset);
229                         offset += sizeof(struct rte_ring *)*tbase->tx_params_sw.nb_txrings;
230
231                         for (uint8_t i = 0; i < tbase->tx_params_sw.nb_txrings; ++i) {
232                                 tbase->tx_params_sw.tx_rings[i] = targ->tx_rings[i];
233                         }
234
235                         offset = RTE_ALIGN_CEIL(offset, RTE_CACHE_LINE_SIZE);
236                         tbase->ws_mbuf = (struct ws_mbuf *)(((uint8_t *)tbase) + offset);
237                         offset += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * tbase->tx_params_sw.nb_txrings;
238                 }
239                 else if (targ->nb_txports != 0) {
240                         tbase->tx_params_hw.nb_txports = targ->nb_txports;
241                         tbase->tx_params_hw.tx_port_queue = (struct port_queue *)(((uint8_t *)tbase) + offset);
242                         offset += sizeof(struct port_queue) * tbase->tx_params_hw.nb_txports;
243                         for (uint8_t i = 0; i < tbase->tx_params_hw.nb_txports; ++i) {
244                                 tbase->tx_params_hw.tx_port_queue[i].port = targ->tx_port_queue[i].port;
245                                 tbase->tx_params_hw.tx_port_queue[i].queue = targ->tx_port_queue[i].queue;
246                         }
247
248                         offset = RTE_ALIGN_CEIL(offset, RTE_CACHE_LINE_SIZE);
249                         tbase->ws_mbuf = (struct ws_mbuf *)(((uint8_t *)tbase) + offset);
250                         offset += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * tbase->tx_params_hw.nb_txports;
251                 }
252                 else {
253                         offset = RTE_ALIGN_CEIL(offset, RTE_CACHE_LINE_SIZE);
254                         tbase->ws_mbuf = (struct ws_mbuf *)(((uint8_t *)tbase) + offset);
255                         offset += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]);
256                 }
257
258                 struct ws_mbuf* w = tbase->ws_mbuf;
259                 struct task_args *prev = targ->tx_opt_ring_task;
260
261                 while (prev) {
262                         prev->tbase->ws_mbuf = w;
263                         prev = prev->tx_opt_ring_task;
264                 }
265         }
266         if (targ->nb_txrings == 1 || targ->nb_txports == 1 || targ->tx_opt_ring) {
267                 if (targ->task_init->flag_features & TASK_FEATURE_NEVER_DISCARDS) {
268                         if (targ->tx_opt_ring) {
269                                 tbase->tx_pkt = tx_pkt_never_discard_self;
270                         }
271                         else if (targ->flags & TASK_ARG_DROP) {
272                                 if (targ->task_init->flag_features & TASK_FEATURE_THROUGHPUT_OPT)
273                                         tbase->tx_pkt = targ->nb_txrings ? tx_pkt_never_discard_sw1 : tx_pkt_never_discard_hw1_thrpt_opt;
274                                 else
275                                         tbase->tx_pkt = targ->nb_txrings ? tx_pkt_never_discard_sw1 : tx_pkt_never_discard_hw1_lat_opt;
276                         }
277                         else {
278                                 if (targ->task_init->flag_features & TASK_FEATURE_THROUGHPUT_OPT)
279                                         tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_never_discard_sw1 : tx_pkt_no_drop_never_discard_hw1_thrpt_opt;
280                                 else
281                                         tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_never_discard_sw1 : tx_pkt_no_drop_never_discard_hw1_lat_opt;
282                         }
283                         if ((targ->nb_txrings) || ((targ->task_init->flag_features & TASK_FEATURE_THROUGHPUT_OPT) == 0))
284                                 tbase->flags |= FLAG_NEVER_FLUSH;
285                         else
286                                 targ->lconf->flush_queues[targ->task] = flush_function(targ);
287                 }
288                 else {
289                         if (targ->tx_opt_ring) {
290                                 tbase->tx_pkt = tx_pkt_self;
291                         }
292                         else if (targ->flags & TASK_ARG_DROP) {
293                                 tbase->tx_pkt = targ->nb_txrings ? tx_pkt_sw1 : tx_pkt_hw1;
294                         }
295                         else {
296                                 tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_sw1 : tx_pkt_no_drop_hw1;
297                         }
298                         tbase->flags |= FLAG_NEVER_FLUSH;
299                 }
300         }
301         else {
302                 if (targ->flags & TASK_ARG_DROP) {
303                         tbase->tx_pkt = targ->nb_txrings ? tx_pkt_sw : tx_pkt_hw;
304                 }
305                 else {
306                         tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_sw : tx_pkt_no_drop_hw;
307                 }
308
309                 targ->lconf->flush_queues[targ->task] = flush_function(targ);
310         }
311
312         if (targ->task_init->flag_features & TASK_FEATURE_NO_RX) {
313                 tbase->rx_pkt = rx_pkt_dummy;
314         }
315
316         if (targ->nb_txrings == 0 && targ->nb_txports == 0) {
317                 tbase->tx_pkt = tx_pkt_drop_all;
318         }
319
320         return offset;
321 }
322
323 struct task_base *init_task_struct(struct task_args *targ)
324 {
325         struct task_init* t = targ->task_init;
326         size_t offset = 0;
327         size_t memsize = calc_memsize(targ, t->size);
328         uint8_t task_socket = rte_lcore_to_socket_id(targ->lconf->id);
329         struct task_base *tbase = prox_zmalloc(memsize, task_socket);
330         PROX_PANIC(tbase == NULL, "Failed to allocate memory for task (%zu bytes)", memsize);
331         offset += t->size;
332
333         if (targ->nb_txrings == 0 && targ->nb_txports == 0)
334                 tbase->flags |= FLAG_NEVER_FLUSH;
335
336         offset = init_rx_tx_rings_ports(targ, tbase, offset);
337         tbase->aux = (struct task_base_aux *)(((uint8_t *)tbase) + offset);
338
339         if (targ->task_init->flag_features & TASK_FEATURE_RX_ALL) {
340                 task_base_add_rx_pkt_function(tbase, rx_pkt_all);
341                 tbase->aux->all_mbufs = prox_zmalloc(MAX_RX_PKT_ALL * sizeof(* tbase->aux->all_mbufs), task_socket);
342         }
343         if (targ->task_init->flag_features & TASK_FEATURE_TSC_RX) {
344                 task_base_add_rx_pkt_function(tbase, rx_pkt_tsc);
345         }
346
347         offset += sizeof(struct task_base_aux);
348
349         tbase->handle_bulk = t->handle;
350
351         targ->tbase = tbase;
352         if (t->init) {
353                 t->init(tbase, targ);
354         }
355         tbase->aux->start = t->start;
356         tbase->aux->stop = t->stop;
357         tbase->aux->start_first = t->start_first;
358         tbase->aux->stop_last = t->stop_last;
359         if ((targ->nb_txrings != 0) && (targ->nb_txports == 1)) {
360                 tbase->aux->tx_pkt_hw = tx_pkt_no_drop_never_discard_hw1_no_pointer;
361         }
362         if (targ->tx_opt_ring) {
363                 tbase->aux->tx_pkt_try = tx_try_self;
364         } else if (targ->nb_txrings == 1) {
365                 tbase->aux->tx_pkt_try = tx_try_sw1;
366         } else if (targ->nb_txports) {
367                 tbase->aux->tx_pkt_try = tx_try_hw1;
368         }
369
370         return tbase;
371 }
372
373 struct task_args *find_reachable_task_sending_to_port(struct task_args *from)
374 {
375         if (!from->nb_txrings)
376                 return from;
377
378         struct core_task ct;
379         struct task_args *dtarg, *ret;
380
381         for (uint32_t i = 0; i < from->nb_txrings; ++i) {
382                 ct = from->core_task_set[0].core_task[i];
383                 dtarg = core_targ_get(ct.core, ct.task);
384                 ret = find_reachable_task_sending_to_port(dtarg);
385                 if (ret)
386                         return ret;
387         }
388         return NULL;
389 }
390
391 struct prox_port_cfg *find_reachable_port(struct task_args *from)
392 {
393         struct task_args *dst = find_reachable_task_sending_to_port(from);
394
395         if (dst) {
396                 int port_id = dst->tx_port_queue[0].port;
397
398                 return &prox_port_cfg[port_id];
399         }
400         return NULL;
401 }