Merge "Fix handing configuration error (missing tx port or ring in gen mode)"
[samplevnf.git] / VNFs / DPPD-PROX / main.c
1 /*
2 // Copyright (c) 2010-2017 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //     http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 #include <string.h>
18 #include <locale.h>
19 #include <unistd.h>
20 #include <signal.h>
21
22 #include <rte_cycles.h>
23 #include <rte_atomic.h>
24 #include <rte_table_hash.h>
25 #include <rte_memzone.h>
26 #include <rte_errno.h>
27
28 #include "prox_malloc.h"
29 #include "run.h"
30 #include "main.h"
31 #include "log.h"
32 #include "quit.h"
33 #include "clock.h"
34 #include "defines.h"
35 #include "version.h"
36 #include "prox_args.h"
37 #include "prox_assert.h"
38 #include "prox_cfg.h"
39 #include "prox_shared.h"
40 #include "prox_port_cfg.h"
41 #include "toeplitz.h"
42 #include "hash_utils.h"
43 #include "handle_lb_net.h"
44 #include "prox_cksum.h"
45 #include "thread_nop.h"
46 #include "thread_generic.h"
47 #include "thread_pipeline.h"
48 #include "cqm.h"
49 #include "handle_master.h"
50
51 #if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0)
52 #define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE
53 #endif
54
55 uint8_t lb_nb_txrings = 0xff;
56 struct rte_ring *ctrl_rings[RTE_MAX_LCORE*MAX_TASKS_PER_CORE];
57
58 static void __attribute__((noreturn)) prox_usage(const char *prgname)
59 {
60         plog_info("\nUsage: %s [-f CONFIG_FILE] [-a|-e] [-m|-s|-i] [-w DEF] [-u] [-t]\n"
61                   "\t-f CONFIG_FILE : configuration file to load, ./prox.cfg by default\n"
62                   "\t-l LOG_FILE : log file name, ./prox.log by default\n"
63                   "\t-p : include PID in log file name if default log file is used\n"
64                   "\t-o DISPLAY: Set display to use, can be 'curses' (default), 'cli' or 'none'\n"
65                   "\t-v verbosity : initial logging verbosity\n"
66                   "\t-a : autostart all cores (by default)\n"
67                   "\t-e : don't autostart\n"
68                   "\t-n : Create NULL devices instead of using PCI devices, useful together with -i\n"
69                   "\t-m : list supported task modes and exit\n"
70                   "\t-s : check configuration file syntax and exit\n"
71                   "\t-i : check initialization sequence and exit\n"
72                   "\t-u : Listen on UDS /tmp/prox.sock\n"
73                   "\t-t : Listen on TCP port 8474\n"
74                   "\t-q : Pass argument to Lua interpreter, useful to define variables\n"
75                   "\t-w : define variable using syntax varname=value\n"
76                   "\t     takes precedence over variables defined in CONFIG_FILE\n"
77                   "\t-k : Log statistics to file \"stats_dump\" in current directory\n"
78                   "\t-d : Run as daemon, the parent process will block until PROX is not initialized\n"
79                   "\t-z : Ignore CPU topology, implies -i\n"
80                   "\t-r : Change initial screen refresh rate. If set to a lower than 0.001 seconds,\n"
81                   "\t     screen refreshing will be disabled\n"
82                   , prgname);
83         exit(EXIT_FAILURE);
84 }
85
86 static void check_mixed_normal_pipeline(void)
87 {
88         struct lcore_cfg *lconf = NULL;
89         uint32_t lcore_id = -1;
90
91         while (prox_core_next(&lcore_id, 0) == 0) {
92                 lconf = &lcore_cfg[lcore_id];
93
94                 int all_thread_nop = 1;
95                 int generic = 0;
96                 int pipeline = 0;
97                 int l3 = 0;
98                 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
99                         struct task_args *targ = &lconf->targs[task_id];
100                         l3 = !strcmp("l3", targ->sub_mode_str);
101                         all_thread_nop = all_thread_nop && !l3 &&
102                                 targ->task_init->thread_x == thread_nop;
103
104                         pipeline = pipeline || targ->task_init->thread_x == thread_pipeline;
105                         generic = generic || targ->task_init->thread_x == thread_generic || l3;
106                 }
107                 PROX_PANIC(generic && pipeline, "Can't run both pipeline and normal thread on same core\n");
108
109                 if (all_thread_nop)
110                         lconf->thread_x = thread_nop;
111                 else {
112                         lconf->thread_x = thread_generic;
113                 }
114         }
115 }
116
117 static void check_zero_rx(void)
118 {
119         struct lcore_cfg *lconf = NULL;
120         struct task_args *targ;
121
122         while (core_targ_next(&lconf, &targ, 0) == 0) {
123                 if (targ->nb_rxports != 0) {
124                         PROX_PANIC(task_init_flag_set(targ->task_init, TASK_FEATURE_NO_RX),
125                            "\tCore %u task %u: rx_ports configured while mode %s does not use it\n", lconf->id, targ->id, targ->task_init->mode_str);
126                 }
127         }
128 }
129
130 static void check_missing_rx(void)
131 {
132         struct lcore_cfg *lconf = NULL, *rx_lconf = NULL, *tx_lconf = NULL;
133         struct task_args *targ, *rx_targ = NULL, *tx_targ = NULL;
134         struct prox_port_cfg *port;
135         uint8_t port_id, rx_port_id, ok;
136
137         while (core_targ_next(&lconf, &targ, 0) == 0) {
138                 PROX_PANIC((targ->flags & TASK_ARG_RX_RING) && targ->rx_rings[0] == 0 && !targ->tx_opt_ring_task,
139                            "Configuration Error - Core %u task %u Receiving from ring, but nobody xmitting to this ring\n", lconf->id, targ->id);
140                 if (targ->nb_rxports == 0 && targ->nb_rxrings == 0) {
141                         PROX_PANIC(!task_init_flag_set(targ->task_init, TASK_FEATURE_NO_RX),
142                                    "\tCore %u task %u: no rx_ports and no rx_rings configured while required by mode %s\n", lconf->id, targ->id, targ->task_init->mode_str);
143                 }
144         }
145
146         lconf = NULL;
147         while (core_targ_next(&lconf, &targ, 0) == 0) {
148                 if (strcmp(targ->sub_mode_str, "l3") != 0)
149                         continue;
150
151                 PROX_PANIC((targ->nb_rxports == 0) && (targ->nb_txports == 0), "L3 task must have a RX or a TX port\n");
152                 // If the L3 sub_mode receives from a port, check that there is at least one core/task
153                 // transmitting to this port in L3 sub_mode
154                 for (uint8_t i = 0; i < targ->nb_rxports; ++i) {
155                         rx_port_id = targ->rx_port_queue[i].port;
156                         ok = 0;
157                         tx_lconf = NULL;
158                         while (core_targ_next(&tx_lconf, &tx_targ, 0) == 0) {
159                                 if ((port_id = tx_targ->tx_port_queue[0].port) == OUT_DISCARD)
160                                         continue;
161                                 if ((rx_port_id == port_id) && (tx_targ->flags & TASK_ARG_L3)){
162                                         ok = 1;
163                                         break;
164                                 }
165                         }
166                         PROX_PANIC(ok == 0, "RX L3 sub mode for port %d on core %d task %d, but no core/task transmitting on that port\n", rx_port_id, lconf->id, targ->id);
167                 }
168
169                 // If the L3 sub_mode transmits to a port, check that there is at least one core/task
170                 // receiving from that port in L3 sub_mode.
171                 if ((port_id = targ->tx_port_queue[0].port) == OUT_DISCARD)
172                         continue;
173                 rx_lconf = NULL;
174                 ok = 0;
175                 plog_info("\tCore %d task %d transmitting to port %d in L3 mode\n", lconf->id, targ->id, port_id);
176                 while (core_targ_next(&rx_lconf, &rx_targ, 0) == 0) {
177                         for (uint8_t i = 0; i < rx_targ->nb_rxports; ++i) {
178                                 rx_port_id = rx_targ->rx_port_queue[i].port;
179                                 if ((rx_port_id == port_id) && (rx_targ->flags & TASK_ARG_L3)){
180                                         ok = 1;
181                                         break;
182                                 }
183                         }
184                         if (ok == 1) {
185                                 plog_info("\tCore %d task %d has found core %d task %d receiving from port %d\n", lconf->id, targ->id, rx_lconf->id, rx_targ->id, port_id);
186                                 break;
187                         }
188                 }
189                 PROX_PANIC(ok == 0, "L3 sub mode for port %d on core %d task %d, but no core/task receiving on that port\n", port_id, lconf->id, targ->id);
190         }
191 }
192
193 static void check_cfg_consistent(void)
194 {
195         check_missing_rx();
196         check_zero_rx();
197         check_mixed_normal_pipeline();
198 }
199
200 static void plog_all_rings(void)
201 {
202         struct lcore_cfg *lconf = NULL;
203         struct task_args *targ;
204
205         while (core_targ_next(&lconf, &targ, 0) == 0) {
206                 for (uint8_t ring_idx = 0; ring_idx < targ->nb_rxrings; ++ring_idx) {
207                         plog_info("\tCore %u, task %u, rx_ring[%u] %p\n", lconf->id, targ->id, ring_idx, targ->rx_rings[ring_idx]);
208                 }
209         }
210 }
211
212 static int chain_flag_state(struct task_args *targ, uint64_t flag, int is_set)
213 {
214         if (task_init_flag_set(targ->task_init, flag) == is_set)
215                 return 1;
216
217         int ret = 0;
218
219         for (uint32_t i = 0; i < targ->n_prev_tasks; ++i) {
220                 ret = chain_flag_state(targ->prev_tasks[i], flag, is_set);
221                 if (ret)
222                         return 1;
223         }
224         return 0;
225 }
226
227 static int chain_flag_always_set(struct task_args *targ, uint64_t flag)
228 {
229         return (!chain_flag_state(targ, flag, 0));
230 }
231
232 static int chain_flag_never_set(struct task_args *targ, uint64_t flag)
233 {
234         return (!chain_flag_state(targ, flag, 1));
235 }
236
237 static int chain_flag_sometimes_set(struct task_args *targ, uint64_t flag)
238 {
239         return (chain_flag_state(targ, flag, 1));
240 }
241
242 static void configure_if_tx_queues(struct task_args *targ, uint8_t socket)
243 {
244         uint8_t if_port;
245
246         for (uint8_t i = 0; i < targ->nb_txports; ++i) {
247                 if_port = targ->tx_port_queue[i].port;
248
249                 PROX_PANIC(if_port == OUT_DISCARD, "port misconfigured, exiting\n");
250
251                 PROX_PANIC(!prox_port_cfg[if_port].active, "\tPort %u not used, skipping...\n", if_port);
252
253                 int dsocket = prox_port_cfg[if_port].socket;
254                 if (dsocket != -1 && dsocket != socket) {
255                         plog_warn("TX core on socket %d while device on socket %d\n", socket, dsocket);
256                 }
257
258                 if (prox_port_cfg[if_port].tx_ring[0] == '\0') {  // Rings-backed port can use single queue
259                         targ->tx_port_queue[i].queue = prox_port_cfg[if_port].n_txq;
260                         prox_port_cfg[if_port].n_txq++;
261                 } else {
262                         prox_port_cfg[if_port].n_txq = 1;
263                         targ->tx_port_queue[i].queue = 0;
264                 }
265                 /* By default OFFLOAD is enabled, but if the whole
266                    chain has NOOFFLOADS set all the way until the
267                    first task that receives from a port, it will be
268                    disabled for the destination port. */
269 #if RTE_VERSION < RTE_VERSION_NUM(18,8,0,1)
270                 if (chain_flag_always_set(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS)) {
271                         prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS;
272                 }
273 #else
274                 if (chain_flag_always_set(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS)) {
275                         prox_port_cfg[if_port].requested_tx_offload &= ~(DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
276                 }
277 #endif
278         }
279 }
280
281 static void configure_if_rx_queues(struct task_args *targ, uint8_t socket)
282 {
283         struct prox_port_cfg *port;
284         for (int i = 0; i < targ->nb_rxports; i++) {
285                 uint8_t if_port = targ->rx_port_queue[i].port;
286
287                 if (if_port == OUT_DISCARD) {
288                         return;
289                 }
290
291                 port = &prox_port_cfg[if_port];
292                 PROX_PANIC(!port->active, "Port %u not used, aborting...\n", if_port);
293
294                 if(port->rx_ring[0] != '\0') {
295                         port->n_rxq = 0;
296                 }
297
298                 // If the mbuf size (of the rx task) is not big enough, we might receive multiple segments
299                 // This is usually the case when setting a big mtu size i.e. enabling jumbo frames.
300                 // If the packets get transmitted, then multi segments will have to be enabled on the TX port
301                 uint16_t max_frame_size = port->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE;
302                 if (max_frame_size + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM > targ->mbuf_size) {
303                         targ->task_init->flag_features |= TASK_FEATURE_TXQ_FLAGS_MULTSEGS;
304                 }
305                 targ->rx_port_queue[i].queue = port->n_rxq;
306                 port->pool[targ->rx_port_queue[i].queue] = targ->pool;
307                 port->pool_size[targ->rx_port_queue[i].queue] = targ->nb_mbuf - 1;
308                 port->n_rxq++;
309
310                 int dsocket = port->socket;
311                 if (dsocket != -1 && dsocket != socket) {
312                         plog_warn("RX core on socket %d while device on socket %d\n", socket, dsocket);
313                 }
314         }
315 }
316
317 static void configure_if_queues(void)
318 {
319         struct lcore_cfg *lconf = NULL;
320         struct task_args *targ;
321         uint8_t socket;
322
323         while (core_targ_next(&lconf, &targ, 0) == 0) {
324                 socket = rte_lcore_to_socket_id(lconf->id);
325
326                 configure_if_rx_queues(targ, socket);
327                 configure_if_tx_queues(targ, socket);
328         }
329 }
330
331 static void configure_tx_queue_flags(void)
332 {
333         struct lcore_cfg *lconf = NULL;
334         struct task_args *targ;
335         uint8_t socket;
336         uint8_t if_port;
337
338         while (core_targ_next(&lconf, &targ, 0) == 0) {
339                 socket = rte_lcore_to_socket_id(lconf->id);
340                 for (uint8_t i = 0; i < targ->nb_txports; ++i) {
341                         if_port = targ->tx_port_queue[i].port;
342 #if RTE_VERSION < RTE_VERSION_NUM(18,8,0,1)
343                         /* Set the ETH_TXQ_FLAGS_NOREFCOUNT flag if none of
344                         the tasks up to the task transmitting to the port
345                         use refcnt. */
346                         if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT)) {
347                                 prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
348                         }
349 #else
350                         /* Set the DEV_TX_OFFLOAD_MBUF_FAST_FREE flag if none of
351                         the tasks up to the task transmitting to the port
352                         use refcnt and per-queue all mbufs comes from the same mempool. */
353                         if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT)) {
354                                 if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTIPLE_MEMPOOL))
355                                         prox_port_cfg[if_port].requested_tx_offload |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
356                         }
357 #endif
358                 }
359         }
360 }
361
362 static void configure_multi_segments(void)
363 {
364         struct lcore_cfg *lconf = NULL;
365         struct task_args *targ;
366         uint8_t if_port;
367
368         while (core_targ_next(&lconf, &targ, 0) == 0) {
369                 for (uint8_t i = 0; i < targ->nb_txports; ++i) {
370                         if_port = targ->tx_port_queue[i].port;
371                         // Multi segment is disabled for most tasks. It is only enabled for tasks requiring big packets.
372 #if RTE_VERSION < RTE_VERSION_NUM(18,8,0,1)
373                         // We can only enable "no multi segment" if no such task exists in the chain of tasks.
374                         if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTSEGS)) {
375                                 prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
376                         }
377 #else
378                         // We enable "multi segment" if at least one task requires it in the chain of tasks.
379                         if (chain_flag_sometimes_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTSEGS)) {
380                                 prox_port_cfg[if_port].requested_tx_offload |= DEV_TX_OFFLOAD_MULTI_SEGS;
381                         }
382 #endif
383                 }
384         }
385 }
386
387 static const char *gen_ring_name(void)
388 {
389         static char retval[] = "XX";
390         static const char* ring_names =
391                 "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
392                 "abcdefghijklmnopqrstuvwxyz"
393                 "[\\]^_`!\"#$%&'()*+,-./:;<="
394                 ">?@{|}0123456789";
395         static int idx2 = 0;
396
397         int idx = idx2;
398
399         retval[0] = ring_names[idx % strlen(ring_names)];
400         idx /= strlen(ring_names);
401         retval[1] = idx ? ring_names[(idx - 1) % strlen(ring_names)] : 0;
402
403         idx2++;
404
405         return retval;
406 }
407
408 struct ring_init_stats {
409         uint32_t n_pkt_rings;
410         uint32_t n_ctrl_rings;
411         uint32_t n_opt_rings;
412 };
413
414 static uint32_t ring_init_stats_total(const struct ring_init_stats *ris)
415 {
416         return ris->n_pkt_rings + ris->n_ctrl_rings + ris->n_opt_rings;
417 }
418
419 static uint32_t count_incoming_tasks(uint32_t lcore_worker, uint32_t dest_task)
420 {
421         struct lcore_cfg *lconf = NULL;
422         struct task_args *targ;
423         uint32_t ret = 0;
424         struct core_task ct;
425
426         while (core_targ_next(&lconf, &targ, 0) == 0) {
427                 for (uint8_t idxx = 0; idxx < MAX_PROTOCOLS; ++idxx) {
428                         for (uint8_t ridx = 0; ridx < targ->core_task_set[idxx].n_elems; ++ridx) {
429                                 ct = targ->core_task_set[idxx].core_task[ridx];
430
431                                 if (dest_task == ct.task && lcore_worker == ct.core)
432                                         ret++;
433                         }
434                 }
435         }
436         return ret;
437 }
438
439 static struct rte_ring *get_existing_ring(uint32_t lcore_id, uint32_t task_id)
440 {
441         if (!prox_core_active(lcore_id, 0))
442                 return NULL;
443
444         struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
445
446         if (task_id >= lconf->n_tasks_all)
447                 return NULL;
448
449         if (lconf->targs[task_id].nb_rxrings == 0)
450                 return NULL;
451
452         return lconf->targs[task_id].rx_rings[0];
453 }
454
455 static struct rte_ring *init_ring_between_tasks(struct lcore_cfg *lconf, struct task_args *starg,
456                                     const struct core_task ct, uint8_t ring_idx, int idx,
457                                     struct ring_init_stats *ris)
458 {
459         uint8_t socket;
460         struct rte_ring *ring = NULL;
461         struct lcore_cfg *lworker;
462         struct task_args *dtarg;
463
464         PROX_ASSERT(prox_core_active(ct.core, 0));
465         lworker = &lcore_cfg[ct.core];
466
467         /* socket used is the one that the sending core resides on */
468         socket = rte_lcore_to_socket_id(lconf->id);
469
470         plog_info("\t\tCreating ring on socket %u with size %u\n"
471                   "\t\t\tsource core, task and socket = %u, %u, %u\n"
472                   "\t\t\tdestination core, task and socket = %u, %u, %u\n"
473                   "\t\t\tdestination worker id = %u\n",
474                   socket, starg->ring_size,
475                   lconf->id, starg->id, socket,
476                   ct.core, ct.task, rte_lcore_to_socket_id(ct.core),
477                   ring_idx);
478
479         if (ct.type) {
480                 struct rte_ring **dring = NULL;
481
482                 if (ct.type == CTRL_TYPE_MSG)
483                         dring = &lworker->ctrl_rings_m[ct.task];
484                 else if (ct.type == CTRL_TYPE_PKT) {
485                         dring = &lworker->ctrl_rings_p[ct.task];
486                         starg->flags |= TASK_ARG_CTRL_RINGS_P;
487                 }
488
489                 if (*dring == NULL)
490                         ring = rte_ring_create(gen_ring_name(), starg->ring_size, socket, RING_F_SC_DEQ);
491                 else
492                         ring = *dring;
493                 PROX_PANIC(ring == NULL, "Cannot create ring to connect I/O core %u with worker core %u\n", lconf->id, ct.core);
494
495                 starg->tx_rings[starg->tot_n_txrings_inited] = ring;
496                 starg->tot_n_txrings_inited++;
497                 *dring = ring;
498                 if (lconf->id == prox_cfg.master) {
499                         ctrl_rings[ct.core*MAX_TASKS_PER_CORE + ct.task] = ring;
500                 } else if (ct.core == prox_cfg.master) {
501                         starg->ctrl_plane_ring = ring;
502                 }
503
504                 plog_info("\t\t\tCore %u task %u to -> core %u task %u ctrl_ring %s %p %s\n",
505                           lconf->id, starg->id, ct.core, ct.task, ct.type == CTRL_TYPE_PKT?
506                           "pkt" : "msg", ring, ring->name);
507                 ris->n_ctrl_rings++;
508                 return ring;
509         }
510
511         dtarg = &lworker->targs[ct.task];
512         lworker->targs[ct.task].worker_thread_id = ring_idx;
513         PROX_ASSERT(dtarg->flags & TASK_ARG_RX_RING);
514         PROX_ASSERT(ct.task < lworker->n_tasks_all);
515
516         /* If all the following conditions are met, the ring can be
517            optimized away. */
518         if (!task_is_master(starg) && !task_is_master(dtarg) && starg->lconf->id == dtarg->lconf->id &&
519             starg->nb_txrings == 1 && idx == 0 && dtarg->task &&
520             dtarg->tot_rxrings == 1 && starg->task == dtarg->task - 1) {
521                 plog_info("\t\tOptimizing away ring on core %u from task %u to task %u\n",
522                           dtarg->lconf->id, starg->task, dtarg->task);
523                 /* No need to set up ws_mbuf. */
524                 starg->tx_opt_ring = 1;
525                 /* During init of destination task, the buffer in the
526                    source task will be initialized. */
527                 dtarg->tx_opt_ring_task = starg;
528                 ris->n_opt_rings++;
529                 ++dtarg->nb_rxrings;
530                 return NULL;
531         }
532
533         int ring_created = 1;
534         /* Only create multi-producer rings if configured to do so AND
535            there is only one task sending to the task */
536         if ((prox_cfg.flags & DSF_MP_RINGS && count_incoming_tasks(ct.core, ct.task) > 1)
537                 || (prox_cfg.flags & DSF_ENABLE_BYPASS)) {
538                 ring = get_existing_ring(ct.core, ct.task);
539
540                 if (ring) {
541                         plog_info("\t\tCore %u task %u creatign MP ring %p to core %u task %u\n",
542                                   lconf->id, starg->id, ring, ct.core, ct.task);
543                         ring_created = 0;
544                 }
545                 else {
546                         ring = rte_ring_create(gen_ring_name(), starg->ring_size, socket, RING_F_SC_DEQ);
547                         plog_info("\t\tCore %u task %u using MP ring %p from core %u task %u\n",
548                                   lconf->id, starg->id, ring, ct.core, ct.task);
549                 }
550         }
551         else
552                 ring = rte_ring_create(gen_ring_name(), starg->ring_size, socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
553
554         PROX_PANIC(ring == NULL, "Cannot create ring to connect I/O core %u with worker core %u\n", lconf->id, ct.core);
555
556         starg->tx_rings[starg->tot_n_txrings_inited] = ring;
557         starg->tot_n_txrings_inited++;
558
559         if (ring_created) {
560                 PROX_ASSERT(dtarg->nb_rxrings < MAX_RINGS_PER_TASK);
561                 dtarg->rx_rings[dtarg->nb_rxrings] = ring;
562                 ++dtarg->nb_rxrings;
563                 if (dtarg->nb_rxrings > 1)
564                         dtarg->task_init->flag_features |= TASK_FEATURE_TXQ_FLAGS_MULTIPLE_MEMPOOL;
565         }
566         dtarg->nb_slave_threads = starg->core_task_set[idx].n_elems;
567         dtarg->lb_friend_core = lconf->id;
568         dtarg->lb_friend_task = starg->id;
569         plog_info("\t\tWorker thread %d has core %d, task %d as a lb friend\n", ct.core, lconf->id, starg->id);
570         plog_info("\t\tCore %u task %u tx_ring[%u] -> core %u task %u rx_ring[%u] %p %s %u WT\n",
571                   lconf->id, starg->id, ring_idx, ct.core, ct.task, dtarg->nb_rxrings, ring, ring->name,
572                   dtarg->nb_slave_threads);
573         ++ris->n_pkt_rings;
574         return ring;
575 }
576
577 static void init_rings(void)
578 {
579         struct lcore_cfg *lconf = NULL;
580         struct task_args *starg;
581         struct ring_init_stats ris = {0};
582
583         while (core_targ_next(&lconf, &starg, 1) == 0) {
584                 plog_info("\t*** Initializing rings on core %u, task %u ***\n", lconf->id, starg->id);
585                 for (uint8_t idx = 0; idx < MAX_PROTOCOLS; ++idx) {
586                         for (uint8_t ring_idx = 0; ring_idx < starg->core_task_set[idx].n_elems; ++ring_idx) {
587                                 PROX_ASSERT(ring_idx < MAX_WT_PER_LB);
588                                 PROX_ASSERT(starg->tot_n_txrings_inited < MAX_RINGS_PER_TASK);
589
590                                 struct core_task ct = starg->core_task_set[idx].core_task[ring_idx];
591                                 init_ring_between_tasks(lconf, starg, ct, ring_idx, idx, &ris);
592                         }
593                 }
594         }
595
596         plog_info("\tInitialized %d rings:\n"
597                   "\t\tNumber of packet rings: %u\n"
598                   "\t\tNumber of control rings: %u\n"
599                   "\t\tNumber of optimized rings: %u\n",
600                   ring_init_stats_total(&ris),
601                   ris.n_pkt_rings,
602                   ris.n_ctrl_rings,
603                   ris.n_opt_rings);
604
605         lconf = NULL;
606         struct prox_port_cfg *port;
607         while (core_targ_next(&lconf, &starg, 1) == 0) {
608                 if ((starg->task_init) && (starg->flags & TASK_ARG_L3)) {
609                         struct core_task ct;
610                         ct.core = prox_cfg.master;
611                         ct.task = 0;
612                         ct.type = CTRL_TYPE_PKT;
613                         struct rte_ring *rx_ring = init_ring_between_tasks(lconf, starg, ct, 0, 0, &ris);
614
615                         ct.core = lconf->id;
616                         ct.task = starg->id;;
617                         struct rte_ring *tx_ring = init_ring_between_tasks(&lcore_cfg[prox_cfg.master], lcore_cfg[prox_cfg.master].targs, ct, 0, 0, &ris);
618                 }
619         }
620 }
621
622 static void shuffle_mempool(struct rte_mempool* mempool, uint32_t nb_mbuf)
623 {
624         struct rte_mbuf** pkts = prox_zmalloc(nb_mbuf * sizeof(*pkts), rte_socket_id());
625         uint64_t got = 0;
626
627         while ((got < nb_mbuf) && (rte_mempool_get_bulk(mempool, (void**)(pkts + got), 1) == 0))
628                 ++got;
629
630         nb_mbuf = got;
631         while (got) {
632                 int idx;
633                 do {
634                         idx = rand() % nb_mbuf;
635                 } while (pkts[idx] == 0);
636
637                 rte_mempool_put_bulk(mempool, (void**)&pkts[idx], 1);
638                 pkts[idx] = 0;
639                 --got;
640         };
641         prox_free(pkts);
642 }
643
644 static void set_mbuf_size(struct task_args *targ)
645 {
646         /* mbuf size can be set
647          *  - from config file (highest priority, overwriting any other config) - should only be used as workaround
648          *  - defaulted to MBUF_SIZE.
649          * Except if set explicitely, ensure that size is big enough for vmxnet3 driver
650          */
651         if (targ->mbuf_size)
652                 return;
653
654         targ->mbuf_size = MBUF_SIZE;
655         struct prox_port_cfg *port;
656         uint16_t max_frame_size = 0, min_buffer_size = 0;
657         int i40e = 0;
658         for (int i = 0; i < targ->nb_rxports; i++) {
659                 uint8_t if_port = targ->rx_port_queue[i].port;
660
661                 if (if_port == OUT_DISCARD) {
662                         continue;
663                 }
664                 port = &prox_port_cfg[if_port];
665                 if (max_frame_size < port->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE)
666                         max_frame_size = port->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE;
667                 if (min_buffer_size < port->min_rx_bufsize)
668                         min_buffer_size = port->min_rx_bufsize;
669
670                 // Check whether we receive from i40e. This driver have extra mbuf size requirements
671                 if (strcmp(port->short_name, "i40e") == 0)
672                         i40e = 1;
673         }
674         if (i40e) {
675                 // i40e supports a maximum of 5 descriptors chained
676                 uint16_t required_mbuf_size = RTE_ALIGN(max_frame_size / 5, 128) + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
677                 if (required_mbuf_size > targ->mbuf_size) {
678                         targ->mbuf_size = required_mbuf_size;
679                         plog_info("\t\tSetting mbuf_size to %u to support frame_size %u\n", targ->mbuf_size, max_frame_size);
680                 }
681         }
682         if (min_buffer_size > targ->mbuf_size) {
683                 plog_warn("Mbuf size might be too small. This might result in packet segmentation and memory leak\n");
684         }
685
686 }
687
688 static void setup_mempools_unique_per_socket(void)
689 {
690         uint32_t flags = 0;
691         char name[64];
692         struct lcore_cfg *lconf = NULL;
693         struct task_args *targ;
694
695         struct rte_mempool     *pool[MAX_SOCKETS];
696         uint32_t mbuf_count[MAX_SOCKETS] = {0};
697         uint32_t nb_cache_mbuf[MAX_SOCKETS] = {0};
698         uint32_t mbuf_size[MAX_SOCKETS] = {0};
699
700         while (core_targ_next_early(&lconf, &targ, 0) == 0) {
701                 PROX_PANIC(targ->task_init == NULL, "task_init = NULL, is mode specified for core %d, task %d ?\n", lconf->id, targ->id);
702                 uint8_t socket = rte_lcore_to_socket_id(lconf->id);
703                 PROX_ASSERT(socket < MAX_SOCKETS);
704
705                 set_mbuf_size(targ);
706                 if (targ->rx_port_queue[0].port != OUT_DISCARD) {
707                         struct prox_port_cfg* port_cfg = &prox_port_cfg[targ->rx_port_queue[0].port];
708                         PROX_ASSERT(targ->nb_mbuf != 0);
709                         mbuf_count[socket] += targ->nb_mbuf;
710                         if (nb_cache_mbuf[socket] == 0)
711                                 nb_cache_mbuf[socket] = targ->nb_cache_mbuf;
712                         else {
713                                 PROX_PANIC(nb_cache_mbuf[socket] != targ->nb_cache_mbuf,
714                                            "all mbuf_cache must have the same size if using a unique mempool per socket\n");
715                         }
716                         if (mbuf_size[socket] == 0)
717                                 mbuf_size[socket] = targ->mbuf_size;
718                         else {
719                                 PROX_PANIC(mbuf_size[socket] != targ->mbuf_size,
720                                            "all mbuf_size must have the same size if using a unique mempool per socket\n");
721                         }
722                 }
723         }
724         for (int i = 0 ; i < MAX_SOCKETS; i++) {
725                 if (mbuf_count[i] != 0) {
726                         sprintf(name, "socket_%u_pool", i);
727                         pool[i] = rte_mempool_create(name,
728                                                      mbuf_count[i] - 1, mbuf_size[i],
729                                                      nb_cache_mbuf[i],
730                                                      sizeof(struct rte_pktmbuf_pool_private),
731                                                      rte_pktmbuf_pool_init, NULL,
732                                                      prox_pktmbuf_init, NULL,
733                                                      i, flags);
734                         PROX_PANIC(pool[i] == NULL, "\t\tError: cannot create mempool for socket %u\n", i);
735                         plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", pool[i],
736                                   mbuf_count[i], mbuf_size[i], nb_cache_mbuf[i], i);
737
738                         if (prox_cfg.flags & DSF_SHUFFLE) {
739                                 shuffle_mempool(pool[i], mbuf_count[i]);
740                         }
741                 }
742         }
743
744         lconf = NULL;
745         while (core_targ_next_early(&lconf, &targ, 0) == 0) {
746                 uint8_t socket = rte_lcore_to_socket_id(lconf->id);
747
748                 if (targ->rx_port_queue[0].port != OUT_DISCARD) {
749                         /* use this pool for the interface that the core is receiving from */
750                         /* If one core receives from multiple ports, all the ports use the same mempool */
751                         targ->pool = pool[socket];
752                         /* Set the number of mbuf to the number of the unique mempool, so that the used and free work */
753                         targ->nb_mbuf = mbuf_count[socket];
754                         plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", targ->pool,
755                                   targ->nb_mbuf, mbuf_size[socket], targ->nb_cache_mbuf, socket);
756                 }
757         }
758 }
759
760 static void setup_mempool_for_rx_task(struct lcore_cfg *lconf, struct task_args *targ)
761 {
762         const uint8_t socket = rte_lcore_to_socket_id(lconf->id);
763         struct prox_port_cfg *port_cfg = &prox_port_cfg[targ->rx_port_queue[0].port];
764         const struct rte_memzone *mz;
765         struct rte_mempool *mp = NULL;
766         uint32_t flags = 0;
767         char memzone_name[64];
768         char name[64];
769
770         set_mbuf_size(targ);
771
772         /* allocate memory pool for packets */
773         PROX_ASSERT(targ->nb_mbuf != 0);
774
775         if (targ->pool_name[0] == '\0') {
776                 sprintf(name, "core_%u_port_%u_pool", lconf->id, targ->id);
777         }
778
779         snprintf(memzone_name, sizeof(memzone_name)-1, "MP_%s", targ->pool_name);
780         mz = rte_memzone_lookup(memzone_name);
781
782         if (mz != NULL) {
783                 mp = (struct rte_mempool*)mz->addr;
784
785                 targ->nb_mbuf = mp->size;
786                 targ->pool = mp;
787         }
788
789 #ifdef RTE_LIBRTE_IVSHMEM_FALSE
790         if (mz != NULL && mp != NULL && mp->phys_addr != mz->ioremap_addr) {
791                 /* Init mbufs with ioremap_addr for dma */
792                 mp->phys_addr = mz->ioremap_addr;
793                 mp->elt_pa[0] = mp->phys_addr + (mp->elt_va_start - (uintptr_t)mp);
794
795                 struct prox_pktmbuf_reinit_args init_args;
796                 init_args.mp = mp;
797                 init_args.lconf = lconf;
798
799                 uint32_t elt_sz = mp->elt_size + mp->header_size + mp->trailer_size;
800                 rte_mempool_obj_iter((void*)mp->elt_va_start, mp->size, elt_sz, 1,
801                                      mp->elt_pa, mp->pg_num, mp->pg_shift, prox_pktmbuf_reinit, &init_args);
802         }
803 #endif
804
805         /* Use this pool for the interface that the core is
806            receiving from if one core receives from multiple
807            ports, all the ports use the same mempool */
808         if (targ->pool == NULL) {
809                 plog_info("\t\tCreating mempool with name '%s'\n", name);
810                 targ->pool = rte_mempool_create(name,
811                                                 targ->nb_mbuf - 1, targ->mbuf_size,
812                                                 targ->nb_cache_mbuf,
813                                                 sizeof(struct rte_pktmbuf_pool_private),
814                                                 rte_pktmbuf_pool_init, NULL,
815                                                 prox_pktmbuf_init, lconf,
816                                                 socket, flags);
817         }
818
819         PROX_PANIC(targ->pool == NULL,
820                    "\t\tError: cannot create mempool for core %u port %u: %s\n", lconf->id, targ->id, rte_strerror(rte_errno));
821
822         plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", targ->pool,
823                   targ->nb_mbuf, targ->mbuf_size, targ->nb_cache_mbuf, socket);
824         if (prox_cfg.flags & DSF_SHUFFLE) {
825                 shuffle_mempool(targ->pool, targ->nb_mbuf);
826         }
827 }
828
829 static void setup_mempools_multiple_per_socket(void)
830 {
831         struct lcore_cfg *lconf = NULL;
832         struct task_args *targ;
833
834         while (core_targ_next_early(&lconf, &targ, 0) == 0) {
835                 PROX_PANIC(targ->task_init == NULL, "task_init = NULL, is mode specified for core %d, task %d ?\n", lconf->id, targ->id);
836                 if (targ->rx_port_queue[0].port == OUT_DISCARD)
837                         continue;
838                 setup_mempool_for_rx_task(lconf, targ);
839         }
840 }
841
842 static void setup_mempools(void)
843 {
844         if (prox_cfg.flags & UNIQUE_MEMPOOL_PER_SOCKET)
845                 setup_mempools_unique_per_socket();
846         else
847                 setup_mempools_multiple_per_socket();
848 }
849
850 static void set_task_lconf(void)
851 {
852         struct lcore_cfg *lconf;
853         uint32_t lcore_id = -1;
854
855         while(prox_core_next(&lcore_id, 1) == 0) {
856                 lconf = &lcore_cfg[lcore_id];
857                 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
858                         lconf->targs[task_id].lconf = lconf;
859                 }
860         }
861 }
862
863 static void set_dest_threads(void)
864 {
865         struct lcore_cfg *lconf = NULL;
866         struct task_args *targ;
867
868         while (core_targ_next(&lconf, &targ, 0) == 0) {
869                 for (uint8_t idx = 0; idx < MAX_PROTOCOLS; ++idx) {
870                         for (uint8_t ring_idx = 0; ring_idx < targ->core_task_set[idx].n_elems; ++ring_idx) {
871                                 struct core_task ct = targ->core_task_set[idx].core_task[ring_idx];
872
873                                 struct task_args *dest_task = core_targ_get(ct.core, ct.task);
874                                 dest_task->prev_tasks[dest_task->n_prev_tasks++] = targ;
875                         }
876                 }
877         }
878 }
879
880 static void setup_all_task_structs_early_init(void)
881 {
882         struct lcore_cfg *lconf = NULL;
883         struct task_args *targ;
884
885         plog_info("\t*** Calling early init on all tasks ***\n");
886         while (core_targ_next(&lconf, &targ, 0) == 0) {
887                 if (targ->task_init->early_init) {
888                         targ->task_init->early_init(targ);
889                 }
890         }
891 }
892
893 static void setup_all_task_structs(void)
894 {
895         struct lcore_cfg *lconf;
896         uint32_t lcore_id = -1;
897         struct task_base *tmaster = NULL;
898
899         while(prox_core_next(&lcore_id, 1) == 0) {
900                 lconf = &lcore_cfg[lcore_id];
901                 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
902                         if (task_is_master(&lconf->targs[task_id])) {
903                                 plog_info("\tInitializing MASTER struct for core %d task %d\n", lcore_id, task_id);
904                                 lconf->tasks_all[task_id] = init_task_struct(&lconf->targs[task_id]);
905                                 tmaster = lconf->tasks_all[task_id];
906                         }
907                 }
908         }
909         PROX_PANIC(tmaster == NULL, "Can't initialize master task\n");
910         lcore_id = -1;
911
912         while(prox_core_next(&lcore_id, 1) == 0) {
913                 lconf = &lcore_cfg[lcore_id];
914                 plog_info("\tInitializing struct for core %d with %d task\n", lcore_id, lconf->n_tasks_all);
915                 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
916                         if (!task_is_master(&lconf->targs[task_id])) {
917                                 plog_info("\tInitializing struct for core %d task %d\n", lcore_id, task_id);
918                                 lconf->targs[task_id].tmaster = tmaster;
919                                 lconf->tasks_all[task_id] = init_task_struct(&lconf->targs[task_id]);
920                         }
921                 }
922         }
923 }
924
925 static void init_port_activate(void)
926 {
927         struct lcore_cfg *lconf = NULL;
928         struct task_args *targ;
929         uint8_t port_id = 0;
930
931         while (core_targ_next_early(&lconf, &targ, 0) == 0) {
932                 for (int i = 0; i < targ->nb_rxports; i++) {
933                         port_id = targ->rx_port_queue[i].port;
934                         prox_port_cfg[port_id].active = 1;
935                 }
936
937                 for (int i = 0; i < targ->nb_txports; i++) {
938                         port_id = targ->tx_port_queue[i].port;
939                         prox_port_cfg[port_id].active = 1;
940                 }
941         }
942 }
943
944 /* Initialize cores and allocate mempools */
945 static void init_lcores(void)
946 {
947         struct lcore_cfg *lconf = 0;
948         uint32_t lcore_id = -1;
949
950         while(prox_core_next(&lcore_id, 0) == 0) {
951                 uint8_t socket = rte_lcore_to_socket_id(lcore_id);
952                 PROX_PANIC(socket + 1 > MAX_SOCKETS, "Can't configure core %u (on socket %u). MAX_SOCKET is set to %d\n", lcore_id, socket, MAX_SOCKETS);
953         }
954
955         /* need to allocate mempools as the first thing to use the lowest possible address range */
956         plog_info("=== Initializing mempools ===\n");
957         setup_mempools();
958
959         lcore_cfg_alloc_hp();
960
961         set_dest_threads();
962         set_task_lconf();
963
964         plog_info("=== Initializing port addresses ===\n");
965         init_port_addr();
966
967         plog_info("=== Initializing queue numbers on cores ===\n");
968         configure_if_queues();
969
970         plog_info("=== Initializing rings on cores ===\n");
971         init_rings();
972
973         configure_multi_segments();
974         configure_tx_queue_flags();
975
976         plog_info("=== Checking configuration consistency ===\n");
977         check_cfg_consistent();
978
979         plog_all_rings();
980 }
981
982 static int setup_prox(int argc, char **argv)
983 {
984         if (prox_read_config_file() != 0 ||
985             prox_setup_rte(argv[0]) != 0) {
986                 return -1;
987         }
988
989         if (prox_cfg.flags & DSF_CHECK_SYNTAX) {
990                 plog_info("=== Configuration file syntax has been checked ===\n\n");
991                 exit(EXIT_SUCCESS);
992         }
993
994         init_port_activate();
995         plog_info("=== Initializing rte devices ===\n");
996         if (!(prox_cfg.flags & DSF_USE_DUMMY_DEVICES))
997                 init_rte_ring_dev();
998         init_rte_dev(prox_cfg.flags & DSF_USE_DUMMY_DEVICES);
999         plog_info("=== Calibrating TSC overhead ===\n");
1000         clock_init();
1001         plog_info("\tTSC running at %"PRIu64" Hz\n", rte_get_tsc_hz());
1002
1003         init_lcores();
1004         plog_info("=== Initializing ports ===\n");
1005         init_port_all();
1006
1007         setup_all_task_structs_early_init();
1008         plog_info("=== Initializing tasks ===\n");
1009         setup_all_task_structs();
1010
1011         if (prox_cfg.logbuf_size) {
1012                 prox_cfg.logbuf = prox_zmalloc(prox_cfg.logbuf_size, rte_socket_id());
1013                 PROX_PANIC(prox_cfg.logbuf == NULL, "Failed to allocate memory for logbuf with size = %d\n", prox_cfg.logbuf_size);
1014         }
1015
1016         if (prox_cfg.flags & DSF_CHECK_INIT) {
1017                 plog_info("=== Initialization sequence completed ===\n\n");
1018                 exit(EXIT_SUCCESS);
1019         }
1020
1021         /* Current way that works to disable DPDK logging */
1022         FILE *f = fopen("/dev/null", "r");
1023         rte_openlog_stream(f);
1024         plog_info("=== PROX started ===\n");
1025         return 0;
1026 }
1027
1028 static int success = 0;
1029 static void siguser_handler(int signal)
1030 {
1031         if (signal == SIGUSR1)
1032                 success = 1;
1033         else
1034                 success = 0;
1035 }
1036
1037 static void sigabrt_handler(__attribute__((unused)) int signum)
1038 {
1039         /* restore default disposition for SIGABRT and SIGPIPE */
1040         signal(SIGABRT, SIG_DFL);
1041         signal(SIGPIPE, SIG_DFL);
1042
1043         /* ignore further Ctrl-C */
1044         signal(SIGINT, SIG_IGN);
1045
1046         /* more drastic exit on tedious termination signal */
1047         plog_info("Aborting...\n");
1048         if (lcore_cfg != NULL) {
1049                 uint32_t lcore_id;
1050                 pthread_t thread_id, tid0, tid = pthread_self();
1051                 memset(&tid0, 0, sizeof(tid0));
1052
1053                 /* cancel all threads except current one */
1054                 lcore_id = -1;
1055                 while (prox_core_next(&lcore_id, 1) == 0) {
1056                         thread_id = lcore_cfg[lcore_id].thread_id;
1057                         if (pthread_equal(thread_id, tid0))
1058                                 continue;
1059                         if (pthread_equal(thread_id, tid))
1060                                 continue;
1061                         pthread_cancel(thread_id);
1062                 }
1063
1064                 /* wait for cancelled threads to terminate */
1065                 lcore_id = -1;
1066                 while (prox_core_next(&lcore_id, 1) == 0) {
1067                         thread_id = lcore_cfg[lcore_id].thread_id;
1068                         if (pthread_equal(thread_id, tid0))
1069                                 continue;
1070                         if (pthread_equal(thread_id, tid))
1071                                 continue;
1072                         pthread_join(thread_id, NULL);
1073                 }
1074         }
1075
1076         /* close ncurses */
1077         display_end();
1078
1079         /* close ports on termination signal */
1080         close_ports_atexit();
1081
1082         /* terminate now */
1083         abort();
1084 }
1085
1086 static void sigterm_handler(int signum)
1087 {
1088         /* abort on second Ctrl-C */
1089         if (signum == SIGINT)
1090                 signal(SIGINT, sigabrt_handler);
1091
1092         /* gracefully quit on harmless termination signal */
1093         /* ports will subsequently get closed at resulting exit */
1094         quit();
1095 }
1096
1097 int main(int argc, char **argv)
1098 {
1099         /* set en_US locale to print big numbers with ',' */
1100         setlocale(LC_NUMERIC, "en_US.utf-8");
1101
1102         if (prox_parse_args(argc, argv) != 0){
1103                 prox_usage(argv[0]);
1104         }
1105
1106         plog_init(prox_cfg.log_name, prox_cfg.log_name_pid);
1107         plog_info("=== " PROGRAM_NAME " " VERSION_STR " ===\n");
1108         plog_info("\tUsing DPDK %s\n", rte_version() + sizeof(RTE_VER_PREFIX));
1109         read_rdt_info();
1110
1111         if (prox_cfg.flags & DSF_LIST_TASK_MODES) {
1112                 /* list supported task modes and exit */
1113                 tasks_list();
1114                 return EXIT_SUCCESS;
1115         }
1116
1117         /* close ports at normal exit */
1118         atexit(close_ports_atexit);
1119         /* gracefully quit on harmless termination signals */
1120         signal(SIGHUP, sigterm_handler);
1121         signal(SIGINT, sigterm_handler);
1122         signal(SIGQUIT, sigterm_handler);
1123         signal(SIGTERM, sigterm_handler);
1124         signal(SIGUSR1, sigterm_handler);
1125         signal(SIGUSR2, sigterm_handler);
1126         /* more drastic exit on tedious termination signals */
1127         signal(SIGABRT, sigabrt_handler);
1128         signal(SIGPIPE, sigabrt_handler);
1129
1130         if (prox_cfg.flags & DSF_DAEMON) {
1131                 signal(SIGUSR1, siguser_handler);
1132                 signal(SIGUSR2, siguser_handler);
1133                 plog_info("=== Running in Daemon mode ===\n");
1134                 plog_info("\tForking child and waiting for setup completion\n");
1135
1136                 pid_t ppid = getpid();
1137                 pid_t pid = fork();
1138                 if (pid < 0) {
1139                         plog_err("Failed to fork process to run in daemon mode\n");
1140                         return EXIT_FAILURE;
1141                 }
1142
1143                 if (pid == 0) {
1144                         fclose(stdin);
1145                         fclose(stdout);
1146                         fclose(stderr);
1147                         if (setsid() < 0) {
1148                                 kill(ppid, SIGUSR2);
1149                                 return EXIT_FAILURE;
1150                         }
1151                         if (setup_prox(argc, argv) != 0) {
1152                                 kill(ppid, SIGUSR2);
1153                                 return EXIT_FAILURE;
1154                         }
1155                         else {
1156                                 kill(ppid, SIGUSR1);
1157                                 run(prox_cfg.flags);
1158                                 return EXIT_SUCCESS;
1159                         }
1160                 }
1161                 else {
1162                         /* Before exiting the parent, wait until the
1163                            child process has finished setting up */
1164                         pause();
1165                         if (prox_cfg.logbuf) {
1166                                 file_print(prox_cfg.logbuf);
1167                         }
1168                         return success? EXIT_SUCCESS : EXIT_FAILURE;
1169                 }
1170         }
1171
1172         if (setup_prox(argc, argv) != 0)
1173                 return EXIT_FAILURE;
1174         run(prox_cfg.flags);
1175         return EXIT_SUCCESS;
1176 }