Support for DPDK 18.05 and DPDK 18.08
[samplevnf.git] / VNFs / DPPD-PROX / main.c
1 /*
2 // Copyright (c) 2010-2017 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //     http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 #include <string.h>
18 #include <locale.h>
19 #include <unistd.h>
20 #include <signal.h>
21
22 #include <rte_cycles.h>
23 #include <rte_atomic.h>
24 #include <rte_table_hash.h>
25 #include <rte_memzone.h>
26 #include <rte_errno.h>
27
28 #include "prox_malloc.h"
29 #include "run.h"
30 #include "main.h"
31 #include "log.h"
32 #include "quit.h"
33 #include "clock.h"
34 #include "defines.h"
35 #include "version.h"
36 #include "prox_args.h"
37 #include "prox_assert.h"
38 #include "prox_cfg.h"
39 #include "prox_shared.h"
40 #include "prox_port_cfg.h"
41 #include "toeplitz.h"
42 #include "hash_utils.h"
43 #include "handle_lb_net.h"
44 #include "prox_cksum.h"
45 #include "thread_nop.h"
46 #include "thread_generic.h"
47 #include "thread_pipeline.h"
48 #include "cqm.h"
49 #include "handle_master.h"
50
51 #if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0)
52 #define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE
53 #endif
54
55 uint8_t lb_nb_txrings = 0xff;
56 struct rte_ring *ctrl_rings[RTE_MAX_LCORE*MAX_TASKS_PER_CORE];
57
58 static void __attribute__((noreturn)) prox_usage(const char *prgname)
59 {
60         plog_info("\nUsage: %s [-f CONFIG_FILE] [-a|-e] [-m|-s|-i] [-w DEF] [-u] [-t]\n"
61                   "\t-f CONFIG_FILE : configuration file to load, ./prox.cfg by default\n"
62                   "\t-l LOG_FILE : log file name, ./prox.log by default\n"
63                   "\t-p : include PID in log file name if default log file is used\n"
64                   "\t-o DISPLAY: Set display to use, can be 'curses' (default), 'cli' or 'none'\n"
65                   "\t-v verbosity : initial logging verbosity\n"
66                   "\t-a : autostart all cores (by default)\n"
67                   "\t-e : don't autostart\n"
68                   "\t-n : Create NULL devices instead of using PCI devices, useful together with -i\n"
69                   "\t-m : list supported task modes and exit\n"
70                   "\t-s : check configuration file syntax and exit\n"
71                   "\t-i : check initialization sequence and exit\n"
72                   "\t-u : Listen on UDS /tmp/prox.sock\n"
73                   "\t-t : Listen on TCP port 8474\n"
74                   "\t-q : Pass argument to Lua interpreter, useful to define variables\n"
75                   "\t-w : define variable using syntax varname=value\n"
76                   "\t     takes precedence over variables defined in CONFIG_FILE\n"
77                   "\t-k : Log statistics to file \"stats_dump\" in current directory\n"
78                   "\t-d : Run as daemon, the parent process will block until PROX is not initialized\n"
79                   "\t-z : Ignore CPU topology, implies -i\n"
80                   "\t-r : Change initial screen refresh rate. If set to a lower than 0.001 seconds,\n"
81                   "\t     screen refreshing will be disabled\n"
82                   , prgname);
83         exit(EXIT_FAILURE);
84 }
85
86 static void check_mixed_normal_pipeline(void)
87 {
88         struct lcore_cfg *lconf = NULL;
89         uint32_t lcore_id = -1;
90
91         while (prox_core_next(&lcore_id, 0) == 0) {
92                 lconf = &lcore_cfg[lcore_id];
93
94                 int all_thread_nop = 1;
95                 int generic = 0;
96                 int pipeline = 0;
97                 int l3 = 0;
98                 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
99                         struct task_args *targ = &lconf->targs[task_id];
100                         l3 = !strcmp("l3", targ->sub_mode_str);
101                         all_thread_nop = all_thread_nop && !l3 &&
102                                 targ->task_init->thread_x == thread_nop;
103
104                         pipeline = pipeline || targ->task_init->thread_x == thread_pipeline;
105                         generic = generic || targ->task_init->thread_x == thread_generic || l3;
106                 }
107                 PROX_PANIC(generic && pipeline, "Can't run both pipeline and normal thread on same core\n");
108
109                 if (all_thread_nop)
110                         lconf->thread_x = thread_nop;
111                 else {
112                         lconf->thread_x = thread_generic;
113                 }
114         }
115 }
116
117 static void check_zero_rx(void)
118 {
119         struct lcore_cfg *lconf = NULL;
120         struct task_args *targ;
121
122         while (core_targ_next(&lconf, &targ, 0) == 0) {
123                 if (targ->nb_rxports != 0) {
124                         PROX_PANIC(task_init_flag_set(targ->task_init, TASK_FEATURE_NO_RX),
125                            "\tCore %u task %u: rx_ports configured while mode %s does not use it\n", lconf->id, targ->id, targ->task_init->mode_str);
126                 }
127         }
128 }
129
130 static void check_missing_rx(void)
131 {
132         struct lcore_cfg *lconf = NULL, *rx_lconf = NULL, *tx_lconf = NULL;
133         struct task_args *targ, *rx_targ = NULL, *tx_targ = NULL;
134         struct prox_port_cfg *port;
135         uint8_t port_id, rx_port_id, ok;
136
137         while (core_targ_next(&lconf, &targ, 0) == 0) {
138                 PROX_PANIC((targ->flags & TASK_ARG_RX_RING) && targ->rx_rings[0] == 0 && !targ->tx_opt_ring_task,
139                            "Configuration Error - Core %u task %u Receiving from ring, but nobody xmitting to this ring\n", lconf->id, targ->id);
140                 if (targ->nb_rxports == 0 && targ->nb_rxrings == 0) {
141                         PROX_PANIC(!task_init_flag_set(targ->task_init, TASK_FEATURE_NO_RX),
142                                    "\tCore %u task %u: no rx_ports and no rx_rings configured while required by mode %s\n", lconf->id, targ->id, targ->task_init->mode_str);
143                 }
144         }
145
146         lconf = NULL;
147         while (core_targ_next(&lconf, &targ, 0) == 0) {
148                 if (strcmp(targ->sub_mode_str, "l3") != 0)
149                         continue;
150
151                 PROX_PANIC((targ->nb_rxports == 0) && (targ->nb_txports == 0), "L3 task must have a RX or a TX port\n");
152                 // If the L3 sub_mode receives from a port, check that there is at least one core/task
153                 // transmitting to this port in L3 sub_mode
154                 for (uint8_t i = 0; i < targ->nb_rxports; ++i) {
155                         rx_port_id = targ->rx_port_queue[i].port;
156                         ok = 0;
157                         tx_lconf = NULL;
158                         while (core_targ_next(&tx_lconf, &tx_targ, 0) == 0) {
159                                 if ((port_id = tx_targ->tx_port_queue[0].port) == OUT_DISCARD)
160                                         continue;
161                                 if ((rx_port_id == port_id) && (tx_targ->flags & TASK_ARG_L3)){
162                                         ok = 1;
163                                         break;
164                                 }
165                         }
166                         PROX_PANIC(ok == 0, "RX L3 sub mode for port %d on core %d task %d, but no core/task transmitting on that port\n", rx_port_id, lconf->id, targ->id);
167                 }
168
169                 // If the L3 sub_mode transmits to a port, check that there is at least one core/task
170                 // receiving from that port in L3 sub_mode.
171                 if ((port_id = targ->tx_port_queue[0].port) == OUT_DISCARD)
172                         continue;
173                 rx_lconf = NULL;
174                 ok = 0;
175                 plog_info("\tCore %d task %d transmitting to port %d in L3 mode\n", lconf->id, targ->id, port_id);
176                 while (core_targ_next(&rx_lconf, &rx_targ, 0) == 0) {
177                         for (uint8_t i = 0; i < rx_targ->nb_rxports; ++i) {
178                                 rx_port_id = rx_targ->rx_port_queue[i].port;
179                                 if ((rx_port_id == port_id) && (rx_targ->flags & TASK_ARG_L3)){
180                                         ok = 1;
181                                         break;
182                                 }
183                         }
184                         if (ok == 1) {
185                                 plog_info("\tCore %d task %d has found core %d task %d receiving from port %d\n", lconf->id, targ->id, rx_lconf->id, rx_targ->id, port_id);
186                                 break;
187                         }
188                 }
189                 PROX_PANIC(ok == 0, "L3 sub mode for port %d on core %d task %d, but no core/task receiving on that port\n", port_id, lconf->id, targ->id);
190         }
191 }
192
193 static void check_cfg_consistent(void)
194 {
195         check_missing_rx();
196         check_zero_rx();
197         check_mixed_normal_pipeline();
198 }
199
200 static void plog_all_rings(void)
201 {
202         struct lcore_cfg *lconf = NULL;
203         struct task_args *targ;
204
205         while (core_targ_next(&lconf, &targ, 0) == 0) {
206                 for (uint8_t ring_idx = 0; ring_idx < targ->nb_rxrings; ++ring_idx) {
207                         plog_info("\tCore %u, task %u, rx_ring[%u] %p\n", lconf->id, targ->id, ring_idx, targ->rx_rings[ring_idx]);
208                 }
209         }
210 }
211
212 static int chain_flag_state(struct task_args *targ, uint64_t flag, int is_set)
213 {
214         if (task_init_flag_set(targ->task_init, flag) == is_set)
215                 return 1;
216
217         int ret = 0;
218
219         for (uint32_t i = 0; i < targ->n_prev_tasks; ++i) {
220                 ret = chain_flag_state(targ->prev_tasks[i], flag, is_set);
221                 if (ret)
222                         return 1;
223         }
224         return 0;
225 }
226
227 static int chain_flag_always_set(struct task_args *targ, uint64_t flag)
228 {
229         return (!chain_flag_state(targ, flag, 0));
230 }
231
232 static int chain_flag_never_set(struct task_args *targ, uint64_t flag)
233 {
234         return (!chain_flag_state(targ, flag, 1));
235 }
236
237 static int chain_flag_sometimes_set(struct task_args *targ, uint64_t flag)
238 {
239         return (chain_flag_state(targ, flag, 1));
240 }
241
242 static void configure_if_tx_queues(struct task_args *targ, uint8_t socket)
243 {
244         uint8_t if_port;
245
246         for (uint8_t i = 0; i < targ->nb_txports; ++i) {
247                 if_port = targ->tx_port_queue[i].port;
248
249                 PROX_PANIC(if_port == OUT_DISCARD, "port misconfigured, exiting\n");
250
251                 PROX_PANIC(!prox_port_cfg[if_port].active, "\tPort %u not used, skipping...\n", if_port);
252
253                 int dsocket = prox_port_cfg[if_port].socket;
254                 if (dsocket != -1 && dsocket != socket) {
255                         plog_warn("TX core on socket %d while device on socket %d\n", socket, dsocket);
256                 }
257
258                 if (prox_port_cfg[if_port].tx_ring[0] == '\0') {  // Rings-backed port can use single queue
259                         targ->tx_port_queue[i].queue = prox_port_cfg[if_port].n_txq;
260                         prox_port_cfg[if_port].n_txq++;
261                 } else {
262                         prox_port_cfg[if_port].n_txq = 1;
263                         targ->tx_port_queue[i].queue = 0;
264                 }
265                 /* By default OFFLOAD is enabled, but if the whole
266                    chain has NOOFFLOADS set all the way until the
267                    first task that receives from a port, it will be
268                    disabled for the destination port. */
269 #if RTE_VERSION < RTE_VERSION_NUM(18,8,0,1)
270                 if (chain_flag_always_set(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS)) {
271                         prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS;
272                 }
273 #else
274                 if (chain_flag_always_set(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS)) {
275                         prox_port_cfg[if_port].requested_tx_offload &= ~(DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
276                 }
277 #endif
278         }
279 }
280
281 static void configure_if_rx_queues(struct task_args *targ, uint8_t socket)
282 {
283         struct prox_port_cfg *port;
284         for (int i = 0; i < targ->nb_rxports; i++) {
285                 uint8_t if_port = targ->rx_port_queue[i].port;
286
287                 if (if_port == OUT_DISCARD) {
288                         return;
289                 }
290
291                 port = &prox_port_cfg[if_port];
292                 PROX_PANIC(!port->active, "Port %u not used, aborting...\n", if_port);
293
294                 if(port->rx_ring[0] != '\0') {
295                         port->n_rxq = 0;
296                 }
297
298                 // If the mbuf size (of the rx task) is not big enough, we might receive multiple segments
299                 // This is usually the case when setting a big mtu size i.e. enabling jumbo frames.
300                 // If the packets get transmitted, then multi segments will have to be enabled on the TX port
301                 uint16_t max_frame_size = port->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE;
302                 if (max_frame_size + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM > targ->mbuf_size) {
303                         targ->task_init->flag_features |= TASK_FEATURE_TXQ_FLAGS_MULTSEGS;
304                 }
305                 targ->rx_port_queue[i].queue = port->n_rxq;
306                 port->pool[targ->rx_port_queue[i].queue] = targ->pool;
307                 port->pool_size[targ->rx_port_queue[i].queue] = targ->nb_mbuf - 1;
308                 port->n_rxq++;
309
310                 int dsocket = port->socket;
311                 if (dsocket != -1 && dsocket != socket) {
312                         plog_warn("RX core on socket %d while device on socket %d\n", socket, dsocket);
313                 }
314         }
315 }
316
317 static void configure_if_queues(void)
318 {
319         struct lcore_cfg *lconf = NULL;
320         struct task_args *targ;
321         uint8_t socket;
322
323         while (core_targ_next(&lconf, &targ, 0) == 0) {
324                 socket = rte_lcore_to_socket_id(lconf->id);
325
326                 configure_if_rx_queues(targ, socket);
327                 configure_if_tx_queues(targ, socket);
328         }
329 }
330
331 static void configure_tx_queue_flags(void)
332 {
333         struct lcore_cfg *lconf = NULL;
334         struct task_args *targ;
335         uint8_t socket;
336         uint8_t if_port;
337
338         while (core_targ_next(&lconf, &targ, 0) == 0) {
339                 socket = rte_lcore_to_socket_id(lconf->id);
340                 for (uint8_t i = 0; i < targ->nb_txports; ++i) {
341                         if_port = targ->tx_port_queue[i].port;
342 #if RTE_VERSION < RTE_VERSION_NUM(18,8,0,1)
343                         /* Set the ETH_TXQ_FLAGS_NOREFCOUNT flag if none of
344                         the tasks up to the task transmitting to the port
345                         use refcnt. */
346                         if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT)) {
347                                 prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
348                         }
349 #else
350                         /* Set the DEV_TX_OFFLOAD_MBUF_FAST_FREE flag if none of
351                         the tasks up to the task transmitting to the port
352                         use refcnt and per-queue all mbufs comes from the same mempool. */
353                         if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT)) {
354                                 if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTIPLE_MEMPOOL))
355                                         prox_port_cfg[if_port].requested_tx_offload |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
356                         }
357 #endif
358                 }
359         }
360 }
361
362 static void configure_multi_segments(void)
363 {
364         struct lcore_cfg *lconf = NULL;
365         struct task_args *targ;
366         uint8_t if_port;
367
368         while (core_targ_next(&lconf, &targ, 0) == 0) {
369                 for (uint8_t i = 0; i < targ->nb_txports; ++i) {
370                         if_port = targ->tx_port_queue[i].port;
371                         // Multi segment is disabled for most tasks. It is only enabled for tasks requiring big packets.
372 #if RTE_VERSION < RTE_VERSION_NUM(18,8,0,1)
373                         // We can only enable "no multi segment" if no such task exists in the chain of tasks.
374                         if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTSEGS)) {
375                                 prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
376                         }
377 #else
378                         // We enable "multi segment" if at least one task requires it in the chain of tasks.
379                         if (chain_flag_sometimes_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTSEGS)) {
380                                 prox_port_cfg[if_port].requested_tx_offload |= DEV_TX_OFFLOAD_MULTI_SEGS;
381                         }
382 #endif
383                 }
384         }
385 }
386
387 static const char *gen_ring_name(void)
388 {
389         static char retval[] = "XX";
390         static const char* ring_names =
391                 "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
392                 "abcdefghijklmnopqrstuvwxyz"
393                 "[\\]^_`!\"#$%&'()*+,-./:;<="
394                 ">?@{|}0123456789";
395         static int idx2 = 0;
396
397         int idx = idx2;
398
399         retval[0] = ring_names[idx % strlen(ring_names)];
400         idx /= strlen(ring_names);
401         retval[1] = idx ? ring_names[(idx - 1) % strlen(ring_names)] : 0;
402
403         idx2++;
404
405         return retval;
406 }
407
408 struct ring_init_stats {
409         uint32_t n_pkt_rings;
410         uint32_t n_ctrl_rings;
411         uint32_t n_opt_rings;
412 };
413
414 static uint32_t ring_init_stats_total(const struct ring_init_stats *ris)
415 {
416         return ris->n_pkt_rings + ris->n_ctrl_rings + ris->n_opt_rings;
417 }
418
419 static uint32_t count_incoming_tasks(uint32_t lcore_worker, uint32_t dest_task)
420 {
421         struct lcore_cfg *lconf = NULL;
422         struct task_args *targ;
423         uint32_t ret = 0;
424         struct core_task ct;
425
426         while (core_targ_next(&lconf, &targ, 0) == 0) {
427                 for (uint8_t idxx = 0; idxx < MAX_PROTOCOLS; ++idxx) {
428                         for (uint8_t ridx = 0; ridx < targ->core_task_set[idxx].n_elems; ++ridx) {
429                                 ct = targ->core_task_set[idxx].core_task[ridx];
430
431                                 if (dest_task == ct.task && lcore_worker == ct.core)
432                                         ret++;
433                         }
434                 }
435         }
436         return ret;
437 }
438
439 static struct rte_ring *get_existing_ring(uint32_t lcore_id, uint32_t task_id)
440 {
441         if (!prox_core_active(lcore_id, 0))
442                 return NULL;
443
444         struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
445
446         if (task_id >= lconf->n_tasks_all)
447                 return NULL;
448
449         if (lconf->targs[task_id].nb_rxrings == 0)
450                 return NULL;
451
452         return lconf->targs[task_id].rx_rings[0];
453 }
454
455 static struct rte_ring *init_ring_between_tasks(struct lcore_cfg *lconf, struct task_args *starg,
456                                     const struct core_task ct, uint8_t ring_idx, int idx,
457                                     struct ring_init_stats *ris)
458 {
459         uint8_t socket;
460         struct rte_ring *ring = NULL;
461         struct lcore_cfg *lworker;
462         struct task_args *dtarg;
463
464         PROX_ASSERT(prox_core_active(ct.core, 0));
465         lworker = &lcore_cfg[ct.core];
466
467         /* socket used is the one that the sending core resides on */
468         socket = rte_lcore_to_socket_id(lconf->id);
469
470         plog_info("\t\tCreating ring on socket %u with size %u\n"
471                   "\t\t\tsource core, task and socket = %u, %u, %u\n"
472                   "\t\t\tdestination core, task and socket = %u, %u, %u\n"
473                   "\t\t\tdestination worker id = %u\n",
474                   socket, starg->ring_size,
475                   lconf->id, starg->id, socket,
476                   ct.core, ct.task, rte_lcore_to_socket_id(ct.core),
477                   ring_idx);
478
479         if (ct.type) {
480                 struct rte_ring **dring = NULL;
481
482                 if (ct.type == CTRL_TYPE_MSG)
483                         dring = &lworker->ctrl_rings_m[ct.task];
484                 else if (ct.type == CTRL_TYPE_PKT) {
485                         dring = &lworker->ctrl_rings_p[ct.task];
486                         starg->flags |= TASK_ARG_CTRL_RINGS_P;
487                 }
488
489                 if (*dring == NULL)
490                         ring = rte_ring_create(gen_ring_name(), starg->ring_size, socket, RING_F_SC_DEQ);
491                 else
492                         ring = *dring;
493                 PROX_PANIC(ring == NULL, "Cannot create ring to connect I/O core %u with worker core %u\n", lconf->id, ct.core);
494
495                 starg->tx_rings[starg->tot_n_txrings_inited] = ring;
496                 starg->tot_n_txrings_inited++;
497                 *dring = ring;
498                 if (lconf->id == prox_cfg.master) {
499                         ctrl_rings[ct.core*MAX_TASKS_PER_CORE + ct.task] = ring;
500                 } else if (ct.core == prox_cfg.master) {
501                         starg->ctrl_plane_ring = ring;
502                 }
503
504                 plog_info("\t\tCore %u task %u to -> core %u task %u ctrl_ring %s %p %s\n",
505                           lconf->id, starg->id, ct.core, ct.task, ct.type == CTRL_TYPE_PKT?
506                           "pkt" : "msg", ring, ring->name);
507                 ris->n_ctrl_rings++;
508                 return ring;
509         }
510
511         dtarg = &lworker->targs[ct.task];
512         lworker->targs[ct.task].worker_thread_id = ring_idx;
513         PROX_ASSERT(dtarg->flags & TASK_ARG_RX_RING);
514         PROX_ASSERT(ct.task < lworker->n_tasks_all);
515
516         /* If all the following conditions are met, the ring can be
517            optimized away. */
518         if (!task_is_master(starg) && !task_is_master(dtarg) && starg->lconf->id == dtarg->lconf->id &&
519             starg->nb_txrings == 1 && idx == 0 && dtarg->task &&
520             dtarg->tot_rxrings == 1 && starg->task == dtarg->task - 1) {
521                 plog_info("\t\tOptimizing away ring on core %u from task %u to task %u\n",
522                           dtarg->lconf->id, starg->task, dtarg->task);
523                 /* No need to set up ws_mbuf. */
524                 starg->tx_opt_ring = 1;
525                 /* During init of destination task, the buffer in the
526                    source task will be initialized. */
527                 dtarg->tx_opt_ring_task = starg;
528                 ris->n_opt_rings++;
529                 ++dtarg->nb_rxrings;
530                 return NULL;
531         }
532
533         int ring_created = 1;
534         /* Only create multi-producer rings if configured to do so AND
535            there is only one task sending to the task */
536         if ((prox_cfg.flags & DSF_MP_RINGS && count_incoming_tasks(ct.core, ct.task) > 1)
537                 || (prox_cfg.flags & DSF_ENABLE_BYPASS)) {
538                 ring = get_existing_ring(ct.core, ct.task);
539
540                 if (ring) {
541                         plog_info("\t\tCore %u task %u creatign MP ring %p to core %u task %u\n",
542                                   lconf->id, starg->id, ring, ct.core, ct.task);
543                         ring_created = 0;
544                 }
545                 else {
546                         ring = rte_ring_create(gen_ring_name(), starg->ring_size, socket, RING_F_SC_DEQ);
547                         plog_info("\t\tCore %u task %u using MP ring %p from core %u task %u\n",
548                                   lconf->id, starg->id, ring, ct.core, ct.task);
549                 }
550         }
551         else
552                 ring = rte_ring_create(gen_ring_name(), starg->ring_size, socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
553
554         PROX_PANIC(ring == NULL, "Cannot create ring to connect I/O core %u with worker core %u\n", lconf->id, ct.core);
555
556         starg->tx_rings[starg->tot_n_txrings_inited] = ring;
557         starg->tot_n_txrings_inited++;
558
559         if (ring_created) {
560                 PROX_ASSERT(dtarg->nb_rxrings < MAX_RINGS_PER_TASK);
561                 dtarg->rx_rings[dtarg->nb_rxrings] = ring;
562                 ++dtarg->nb_rxrings;
563                 if (dtarg->nb_rxrings > 1)
564                         dtarg->task_init->flag_features |= TASK_FEATURE_TXQ_FLAGS_MULTIPLE_MEMPOOL;
565         }
566         dtarg->nb_slave_threads = starg->core_task_set[idx].n_elems;
567         dtarg->lb_friend_core = lconf->id;
568         dtarg->lb_friend_task = starg->id;
569         plog_info("\t\tWorker thread %d has core %d, task %d as a lb friend\n", ct.core, lconf->id, starg->id);
570         plog_info("\t\tCore %u task %u tx_ring[%u] -> core %u task %u rx_ring[%u] %p %s %u WT\n",
571                   lconf->id, starg->id, ring_idx, ct.core, ct.task, dtarg->nb_rxrings, ring, ring->name,
572                   dtarg->nb_slave_threads);
573         ++ris->n_pkt_rings;
574         return ring;
575 }
576
577 static void init_rings(void)
578 {
579         struct lcore_cfg *lconf = NULL;
580         struct task_args *starg;
581         struct ring_init_stats ris = {0};
582
583         while (core_targ_next(&lconf, &starg, 1) == 0) {
584                 plog_info("\t*** Initializing rings on core %u, task %u ***\n", lconf->id, starg->id);
585                 for (uint8_t idx = 0; idx < MAX_PROTOCOLS; ++idx) {
586                         for (uint8_t ring_idx = 0; ring_idx < starg->core_task_set[idx].n_elems; ++ring_idx) {
587                                 PROX_ASSERT(ring_idx < MAX_WT_PER_LB);
588                                 PROX_ASSERT(starg->tot_n_txrings_inited < MAX_RINGS_PER_TASK);
589
590                                 struct core_task ct = starg->core_task_set[idx].core_task[ring_idx];
591                                 init_ring_between_tasks(lconf, starg, ct, ring_idx, idx, &ris);
592                         }
593                 }
594         }
595
596         plog_info("\tInitialized %d rings:\n"
597                   "\t\tNumber of packet rings: %u\n"
598                   "\t\tNumber of control rings: %u\n"
599                   "\t\tNumber of optimized rings: %u\n",
600                   ring_init_stats_total(&ris),
601                   ris.n_pkt_rings,
602                   ris.n_ctrl_rings,
603                   ris.n_opt_rings);
604
605         lconf = NULL;
606         struct prox_port_cfg *port;
607         while (core_targ_next(&lconf, &starg, 1) == 0) {
608                 if ((starg->task_init) && (starg->flags & TASK_ARG_L3)) {
609                         struct core_task ct;
610                         ct.core = prox_cfg.master;
611                         ct.task = 0;
612                         ct.type = CTRL_TYPE_PKT;
613                         struct rte_ring *rx_ring = init_ring_between_tasks(lconf, starg, ct, 0, 0, &ris);
614
615                         ct.core = lconf->id;
616                         ct.task = starg->id;;
617                         struct rte_ring *tx_ring = init_ring_between_tasks(lcore_cfg, lcore_cfg[prox_cfg.master].targs, ct, 0, 0, &ris);
618                 }
619         }
620 }
621
622 static void shuffle_mempool(struct rte_mempool* mempool, uint32_t nb_mbuf)
623 {
624         struct rte_mbuf** pkts = prox_zmalloc(nb_mbuf * sizeof(*pkts), rte_socket_id());
625         uint64_t got = 0;
626
627         while (rte_mempool_get_bulk(mempool, (void**)(pkts + got), 1) == 0)
628                 ++got;
629
630         while (got) {
631                 int idx;
632                 do {
633                         idx = rand() % nb_mbuf - 1;
634                 } while (pkts[idx] == 0);
635
636                 rte_mempool_put_bulk(mempool, (void**)&pkts[idx], 1);
637                 pkts[idx] = 0;
638                 --got;
639         };
640         prox_free(pkts);
641 }
642
643 static void set_mbuf_size(struct task_args *targ)
644 {
645         /* mbuf size can be set
646          *  - from config file (highest priority, overwriting any other config) - should only be used as workaround
647          *  - defaulted to MBUF_SIZE.
648          * Except if set explicitely, ensure that size is big enough for vmxnet3 driver
649          */
650         if (targ->mbuf_size)
651                 return;
652
653         targ->mbuf_size = MBUF_SIZE;
654         struct prox_port_cfg *port;
655         uint16_t max_frame_size = 0, min_buffer_size = 0;
656         int i40e = 0;
657         for (int i = 0; i < targ->nb_rxports; i++) {
658                 uint8_t if_port = targ->rx_port_queue[i].port;
659
660                 if (if_port == OUT_DISCARD) {
661                         continue;
662                 }
663                 port = &prox_port_cfg[if_port];
664                 if (max_frame_size < port->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE)
665                         max_frame_size = port->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE;
666                 if (min_buffer_size < port->min_rx_bufsize)
667                         min_buffer_size = port->min_rx_bufsize;
668
669                 // Check whether we receive from i40e. This driver have extra mbuf size requirements
670                 if (strcmp(port->short_name, "i40e") == 0)
671                         i40e = 1;
672         }
673         if (i40e) {
674                 // i40e supports a maximum of 5 descriptors chained
675                 uint16_t required_mbuf_size = RTE_ALIGN(max_frame_size / 5, 128) + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
676                 if (required_mbuf_size > targ->mbuf_size) {
677                         targ->mbuf_size = required_mbuf_size;
678                         plog_info("\t\tSetting mbuf_size to %u to support frame_size %u\n", targ->mbuf_size, max_frame_size);
679                 }
680         }
681         if (min_buffer_size > targ->mbuf_size) {
682                 plog_warn("Mbuf size might be too small. This might result in packet segmentation and memory leak\n");
683         }
684
685 }
686
687 static void setup_mempools_unique_per_socket(void)
688 {
689         uint32_t flags = 0;
690         char name[64];
691         struct lcore_cfg *lconf = NULL;
692         struct task_args *targ;
693
694         struct rte_mempool     *pool[MAX_SOCKETS];
695         uint32_t mbuf_count[MAX_SOCKETS] = {0};
696         uint32_t nb_cache_mbuf[MAX_SOCKETS] = {0};
697         uint32_t mbuf_size[MAX_SOCKETS] = {0};
698
699         while (core_targ_next_early(&lconf, &targ, 0) == 0) {
700                 PROX_PANIC(targ->task_init == NULL, "task_init = NULL, is mode specified for core %d, task %d ?\n", lconf->id, targ->id);
701                 uint8_t socket = rte_lcore_to_socket_id(lconf->id);
702                 PROX_ASSERT(socket < MAX_SOCKETS);
703
704                 set_mbuf_size(targ);
705                 if (targ->rx_port_queue[0].port != OUT_DISCARD) {
706                         struct prox_port_cfg* port_cfg = &prox_port_cfg[targ->rx_port_queue[0].port];
707                         PROX_ASSERT(targ->nb_mbuf != 0);
708                         mbuf_count[socket] += targ->nb_mbuf;
709                         if (nb_cache_mbuf[socket] == 0)
710                                 nb_cache_mbuf[socket] = targ->nb_cache_mbuf;
711                         else {
712                                 PROX_PANIC(nb_cache_mbuf[socket] != targ->nb_cache_mbuf,
713                                            "all mbuf_cache must have the same size if using a unique mempool per socket\n");
714                         }
715                         if (mbuf_size[socket] == 0)
716                                 mbuf_size[socket] = targ->mbuf_size;
717                         else {
718                                 PROX_PANIC(mbuf_size[socket] != targ->mbuf_size,
719                                            "all mbuf_size must have the same size if using a unique mempool per socket\n");
720                         }
721                 }
722         }
723         for (int i = 0 ; i < MAX_SOCKETS; i++) {
724                 if (mbuf_count[i] != 0) {
725                         sprintf(name, "socket_%u_pool", i);
726                         pool[i] = rte_mempool_create(name,
727                                                      mbuf_count[i] - 1, mbuf_size[i],
728                                                      nb_cache_mbuf[i],
729                                                      sizeof(struct rte_pktmbuf_pool_private),
730                                                      rte_pktmbuf_pool_init, NULL,
731                                                      prox_pktmbuf_init, NULL,
732                                                      i, flags);
733                         PROX_PANIC(pool[i] == NULL, "\t\tError: cannot create mempool for socket %u\n", i);
734                         plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", pool[i],
735                                   mbuf_count[i], mbuf_size[i], nb_cache_mbuf[i], i);
736
737                         if (prox_cfg.flags & DSF_SHUFFLE) {
738                                 shuffle_mempool(pool[i], mbuf_count[i]);
739                         }
740                 }
741         }
742
743         lconf = NULL;
744         while (core_targ_next_early(&lconf, &targ, 0) == 0) {
745                 uint8_t socket = rte_lcore_to_socket_id(lconf->id);
746
747                 if (targ->rx_port_queue[0].port != OUT_DISCARD) {
748                         /* use this pool for the interface that the core is receiving from */
749                         /* If one core receives from multiple ports, all the ports use the same mempool */
750                         targ->pool = pool[socket];
751                         /* Set the number of mbuf to the number of the unique mempool, so that the used and free work */
752                         targ->nb_mbuf = mbuf_count[socket];
753                         plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", targ->pool,
754                                   targ->nb_mbuf, mbuf_size[socket], targ->nb_cache_mbuf, socket);
755                 }
756         }
757 }
758
759 static void setup_mempool_for_rx_task(struct lcore_cfg *lconf, struct task_args *targ)
760 {
761         const uint8_t socket = rte_lcore_to_socket_id(lconf->id);
762         struct prox_port_cfg *port_cfg = &prox_port_cfg[targ->rx_port_queue[0].port];
763         const struct rte_memzone *mz;
764         struct rte_mempool *mp = NULL;
765         uint32_t flags = 0;
766         char memzone_name[64];
767         char name[64];
768
769         set_mbuf_size(targ);
770
771         /* allocate memory pool for packets */
772         PROX_ASSERT(targ->nb_mbuf != 0);
773
774         if (targ->pool_name[0] == '\0') {
775                 sprintf(name, "core_%u_port_%u_pool", lconf->id, targ->id);
776         }
777
778         snprintf(memzone_name, sizeof(memzone_name)-1, "MP_%s", targ->pool_name);
779         mz = rte_memzone_lookup(memzone_name);
780
781         if (mz != NULL) {
782                 mp = (struct rte_mempool*)mz->addr;
783
784                 targ->nb_mbuf = mp->size;
785                 targ->pool = mp;
786         }
787
788 #ifdef RTE_LIBRTE_IVSHMEM_FALSE
789         if (mz != NULL && mp != NULL && mp->phys_addr != mz->ioremap_addr) {
790                 /* Init mbufs with ioremap_addr for dma */
791                 mp->phys_addr = mz->ioremap_addr;
792                 mp->elt_pa[0] = mp->phys_addr + (mp->elt_va_start - (uintptr_t)mp);
793
794                 struct prox_pktmbuf_reinit_args init_args;
795                 init_args.mp = mp;
796                 init_args.lconf = lconf;
797
798                 uint32_t elt_sz = mp->elt_size + mp->header_size + mp->trailer_size;
799                 rte_mempool_obj_iter((void*)mp->elt_va_start, mp->size, elt_sz, 1,
800                                      mp->elt_pa, mp->pg_num, mp->pg_shift, prox_pktmbuf_reinit, &init_args);
801         }
802 #endif
803
804         /* Use this pool for the interface that the core is
805            receiving from if one core receives from multiple
806            ports, all the ports use the same mempool */
807         if (targ->pool == NULL) {
808                 plog_info("\t\tCreating mempool with name '%s'\n", name);
809                 targ->pool = rte_mempool_create(name,
810                                                 targ->nb_mbuf - 1, targ->mbuf_size,
811                                                 targ->nb_cache_mbuf,
812                                                 sizeof(struct rte_pktmbuf_pool_private),
813                                                 rte_pktmbuf_pool_init, NULL,
814                                                 prox_pktmbuf_init, lconf,
815                                                 socket, flags);
816         }
817
818         PROX_PANIC(targ->pool == NULL,
819                    "\t\tError: cannot create mempool for core %u port %u: %s\n", lconf->id, targ->id, rte_strerror(rte_errno));
820
821         plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", targ->pool,
822                   targ->nb_mbuf, targ->mbuf_size, targ->nb_cache_mbuf, socket);
823         if (prox_cfg.flags & DSF_SHUFFLE) {
824                 shuffle_mempool(targ->pool, targ->nb_mbuf);
825         }
826 }
827
828 static void setup_mempools_multiple_per_socket(void)
829 {
830         struct lcore_cfg *lconf = NULL;
831         struct task_args *targ;
832
833         while (core_targ_next_early(&lconf, &targ, 0) == 0) {
834                 PROX_PANIC(targ->task_init == NULL, "task_init = NULL, is mode specified for core %d, task %d ?\n", lconf->id, targ->id);
835                 if (targ->rx_port_queue[0].port == OUT_DISCARD)
836                         continue;
837                 setup_mempool_for_rx_task(lconf, targ);
838         }
839 }
840
841 static void setup_mempools(void)
842 {
843         if (prox_cfg.flags & UNIQUE_MEMPOOL_PER_SOCKET)
844                 setup_mempools_unique_per_socket();
845         else
846                 setup_mempools_multiple_per_socket();
847 }
848
849 static void set_task_lconf(void)
850 {
851         struct lcore_cfg *lconf;
852         uint32_t lcore_id = -1;
853
854         while(prox_core_next(&lcore_id, 1) == 0) {
855                 lconf = &lcore_cfg[lcore_id];
856                 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
857                         lconf->targs[task_id].lconf = lconf;
858                 }
859         }
860 }
861
862 static void set_dest_threads(void)
863 {
864         struct lcore_cfg *lconf = NULL;
865         struct task_args *targ;
866
867         while (core_targ_next(&lconf, &targ, 0) == 0) {
868                 for (uint8_t idx = 0; idx < MAX_PROTOCOLS; ++idx) {
869                         for (uint8_t ring_idx = 0; ring_idx < targ->core_task_set[idx].n_elems; ++ring_idx) {
870                                 struct core_task ct = targ->core_task_set[idx].core_task[ring_idx];
871
872                                 struct task_args *dest_task = core_targ_get(ct.core, ct.task);
873                                 dest_task->prev_tasks[dest_task->n_prev_tasks++] = targ;
874                         }
875                 }
876         }
877 }
878
879 static void setup_all_task_structs_early_init(void)
880 {
881         struct lcore_cfg *lconf = NULL;
882         struct task_args *targ;
883
884         plog_info("\t*** Calling early init on all tasks ***\n");
885         while (core_targ_next(&lconf, &targ, 0) == 0) {
886                 if (targ->task_init->early_init) {
887                         targ->task_init->early_init(targ);
888                 }
889         }
890 }
891
892 static void setup_all_task_structs(void)
893 {
894         struct lcore_cfg *lconf;
895         uint32_t lcore_id = -1;
896         struct task_base *tmaster = NULL;
897
898         while(prox_core_next(&lcore_id, 1) == 0) {
899                 lconf = &lcore_cfg[lcore_id];
900                 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
901                         if (task_is_master(&lconf->targs[task_id])) {
902                                 plog_info("\tInitializing MASTER struct for core %d task %d\n", lcore_id, task_id);
903                                 lconf->tasks_all[task_id] = init_task_struct(&lconf->targs[task_id]);
904                                 tmaster = lconf->tasks_all[task_id];
905                         }
906                 }
907         }
908         PROX_PANIC(tmaster == NULL, "Can't initialize master task\n");
909         lcore_id = -1;
910
911         while(prox_core_next(&lcore_id, 1) == 0) {
912                 lconf = &lcore_cfg[lcore_id];
913                 plog_info("\tInitializing struct for core %d with %d task\n", lcore_id, lconf->n_tasks_all);
914                 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
915                         if (!task_is_master(&lconf->targs[task_id])) {
916                                 plog_info("\tInitializing struct for core %d task %d\n", lcore_id, task_id);
917                                 lconf->targs[task_id].tmaster = tmaster;
918                                 lconf->tasks_all[task_id] = init_task_struct(&lconf->targs[task_id]);
919                         }
920                 }
921         }
922 }
923
924 static void init_port_activate(void)
925 {
926         struct lcore_cfg *lconf = NULL;
927         struct task_args *targ;
928         uint8_t port_id = 0;
929
930         while (core_targ_next_early(&lconf, &targ, 0) == 0) {
931                 for (int i = 0; i < targ->nb_rxports; i++) {
932                         port_id = targ->rx_port_queue[i].port;
933                         prox_port_cfg[port_id].active = 1;
934                 }
935
936                 for (int i = 0; i < targ->nb_txports; i++) {
937                         port_id = targ->tx_port_queue[i].port;
938                         prox_port_cfg[port_id].active = 1;
939                 }
940         }
941 }
942
943 /* Initialize cores and allocate mempools */
944 static void init_lcores(void)
945 {
946         struct lcore_cfg *lconf = 0;
947         uint32_t lcore_id = -1;
948
949         while(prox_core_next(&lcore_id, 0) == 0) {
950                 uint8_t socket = rte_lcore_to_socket_id(lcore_id);
951                 PROX_PANIC(socket + 1 > MAX_SOCKETS, "Can't configure core %u (on socket %u). MAX_SOCKET is set to %d\n", lcore_id, socket, MAX_SOCKETS);
952         }
953
954         /* need to allocate mempools as the first thing to use the lowest possible address range */
955         plog_info("=== Initializing mempools ===\n");
956         setup_mempools();
957
958         lcore_cfg_alloc_hp();
959
960         set_dest_threads();
961         set_task_lconf();
962
963         plog_info("=== Initializing port addresses ===\n");
964         init_port_addr();
965
966         plog_info("=== Initializing queue numbers on cores ===\n");
967         configure_if_queues();
968
969         plog_info("=== Initializing rings on cores ===\n");
970         init_rings();
971
972         configure_multi_segments();
973         configure_tx_queue_flags();
974
975         plog_info("=== Checking configuration consistency ===\n");
976         check_cfg_consistent();
977
978         plog_all_rings();
979 }
980
981 static int setup_prox(int argc, char **argv)
982 {
983         if (prox_read_config_file() != 0 ||
984             prox_setup_rte(argv[0]) != 0) {
985                 return -1;
986         }
987
988         if (prox_cfg.flags & DSF_CHECK_SYNTAX) {
989                 plog_info("=== Configuration file syntax has been checked ===\n\n");
990                 exit(EXIT_SUCCESS);
991         }
992
993         init_port_activate();
994         plog_info("=== Initializing rte devices ===\n");
995         if (!(prox_cfg.flags & DSF_USE_DUMMY_DEVICES))
996                 init_rte_ring_dev();
997         init_rte_dev(prox_cfg.flags & DSF_USE_DUMMY_DEVICES);
998         plog_info("=== Calibrating TSC overhead ===\n");
999         clock_init();
1000         plog_info("\tTSC running at %"PRIu64" Hz\n", rte_get_tsc_hz());
1001
1002         init_lcores();
1003         plog_info("=== Initializing ports ===\n");
1004         init_port_all();
1005
1006         setup_all_task_structs_early_init();
1007         plog_info("=== Initializing tasks ===\n");
1008         setup_all_task_structs();
1009
1010         if (prox_cfg.logbuf_size) {
1011                 prox_cfg.logbuf = prox_zmalloc(prox_cfg.logbuf_size, rte_socket_id());
1012                 PROX_PANIC(prox_cfg.logbuf == NULL, "Failed to allocate memory for logbuf with size = %d\n", prox_cfg.logbuf_size);
1013         }
1014
1015         if (prox_cfg.flags & DSF_CHECK_INIT) {
1016                 plog_info("=== Initialization sequence completed ===\n\n");
1017                 exit(EXIT_SUCCESS);
1018         }
1019
1020         /* Current way that works to disable DPDK logging */
1021         FILE *f = fopen("/dev/null", "r");
1022         rte_openlog_stream(f);
1023         plog_info("=== PROX started ===\n");
1024         return 0;
1025 }
1026
1027 static int success = 0;
1028 static void siguser_handler(int signal)
1029 {
1030         if (signal == SIGUSR1)
1031                 success = 1;
1032         else
1033                 success = 0;
1034 }
1035
1036 static void sigabrt_handler(__attribute__((unused)) int signum)
1037 {
1038         /* restore default disposition for SIGABRT and SIGPIPE */
1039         signal(SIGABRT, SIG_DFL);
1040         signal(SIGPIPE, SIG_DFL);
1041
1042         /* ignore further Ctrl-C */
1043         signal(SIGINT, SIG_IGN);
1044
1045         /* more drastic exit on tedious termination signal */
1046         plog_info("Aborting...\n");
1047         if (lcore_cfg != NULL) {
1048                 uint32_t lcore_id;
1049                 pthread_t thread_id, tid0, tid = pthread_self();
1050                 memset(&tid0, 0, sizeof(tid0));
1051
1052                 /* cancel all threads except current one */
1053                 lcore_id = -1;
1054                 while (prox_core_next(&lcore_id, 1) == 0) {
1055                         thread_id = lcore_cfg[lcore_id].thread_id;
1056                         if (pthread_equal(thread_id, tid0))
1057                                 continue;
1058                         if (pthread_equal(thread_id, tid))
1059                                 continue;
1060                         pthread_cancel(thread_id);
1061                 }
1062
1063                 /* wait for cancelled threads to terminate */
1064                 lcore_id = -1;
1065                 while (prox_core_next(&lcore_id, 1) == 0) {
1066                         thread_id = lcore_cfg[lcore_id].thread_id;
1067                         if (pthread_equal(thread_id, tid0))
1068                                 continue;
1069                         if (pthread_equal(thread_id, tid))
1070                                 continue;
1071                         pthread_join(thread_id, NULL);
1072                 }
1073         }
1074
1075         /* close ncurses */
1076         display_end();
1077
1078         /* close ports on termination signal */
1079         close_ports_atexit();
1080
1081         /* terminate now */
1082         abort();
1083 }
1084
1085 static void sigterm_handler(int signum)
1086 {
1087         /* abort on second Ctrl-C */
1088         if (signum == SIGINT)
1089                 signal(SIGINT, sigabrt_handler);
1090
1091         /* gracefully quit on harmless termination signal */
1092         /* ports will subsequently get closed at resulting exit */
1093         quit();
1094 }
1095
1096 int main(int argc, char **argv)
1097 {
1098         /* set en_US locale to print big numbers with ',' */
1099         setlocale(LC_NUMERIC, "en_US.utf-8");
1100
1101         if (prox_parse_args(argc, argv) != 0){
1102                 prox_usage(argv[0]);
1103         }
1104
1105         plog_init(prox_cfg.log_name, prox_cfg.log_name_pid);
1106         plog_info("=== " PROGRAM_NAME " " VERSION_STR " ===\n");
1107         plog_info("\tUsing DPDK %s\n", rte_version() + sizeof(RTE_VER_PREFIX));
1108         read_rdt_info();
1109
1110         if (prox_cfg.flags & DSF_LIST_TASK_MODES) {
1111                 /* list supported task modes and exit */
1112                 tasks_list();
1113                 return EXIT_SUCCESS;
1114         }
1115
1116         /* close ports at normal exit */
1117         atexit(close_ports_atexit);
1118         /* gracefully quit on harmless termination signals */
1119         signal(SIGHUP, sigterm_handler);
1120         signal(SIGINT, sigterm_handler);
1121         signal(SIGQUIT, sigterm_handler);
1122         signal(SIGTERM, sigterm_handler);
1123         signal(SIGUSR1, sigterm_handler);
1124         signal(SIGUSR2, sigterm_handler);
1125         /* more drastic exit on tedious termination signals */
1126         signal(SIGABRT, sigabrt_handler);
1127         signal(SIGPIPE, sigabrt_handler);
1128
1129         if (prox_cfg.flags & DSF_DAEMON) {
1130                 signal(SIGUSR1, siguser_handler);
1131                 signal(SIGUSR2, siguser_handler);
1132                 plog_info("=== Running in Daemon mode ===\n");
1133                 plog_info("\tForking child and waiting for setup completion\n");
1134
1135                 pid_t ppid = getpid();
1136                 pid_t pid = fork();
1137                 if (pid < 0) {
1138                         plog_err("Failed to fork process to run in daemon mode\n");
1139                         return EXIT_FAILURE;
1140                 }
1141
1142                 if (pid == 0) {
1143                         fclose(stdin);
1144                         fclose(stdout);
1145                         fclose(stderr);
1146                         if (setsid() < 0) {
1147                                 kill(ppid, SIGUSR2);
1148                                 return EXIT_FAILURE;
1149                         }
1150                         if (setup_prox(argc, argv) != 0) {
1151                                 kill(ppid, SIGUSR2);
1152                                 return EXIT_FAILURE;
1153                         }
1154                         else {
1155                                 kill(ppid, SIGUSR1);
1156                                 run(prox_cfg.flags);
1157                                 return EXIT_SUCCESS;
1158                         }
1159                 }
1160                 else {
1161                         /* Before exiting the parent, wait until the
1162                            child process has finished setting up */
1163                         pause();
1164                         if (prox_cfg.logbuf) {
1165                                 file_print(prox_cfg.logbuf);
1166                         }
1167                         return success? EXIT_SUCCESS : EXIT_FAILURE;
1168                 }
1169         }
1170
1171         if (setup_prox(argc, argv) != 0)
1172                 return EXIT_FAILURE;
1173         run(prox_cfg.flags);
1174         return EXIT_SUCCESS;
1175 }