Merge "[l2l3 stack] implements new nd state machine & nd buffering"
[samplevnf.git] / VNFs / DPPD-PROX / thread_generic.c
1 /*
2 // Copyright (c) 2010-2017 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //     http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 #include <rte_cycles.h>
18 #include <rte_table_hash.h>
19
20 #include "log.h"
21 #include "thread_generic.h"
22 #include "stats.h"
23 #include "tx_pkt.h"
24 #include "lconf.h"
25 #include "hash_entry_types.h"
26 #include "defines.h"
27 #include "hash_utils.h"
28
29 struct tsc_task {
30         uint64_t tsc;
31         uint64_t (* tsc_task)(struct lcore_cfg *lconf);
32 };
33
34 static uint64_t tsc_drain(struct lcore_cfg *lconf)
35 {
36         lconf_flush_all_queues(lconf);
37         return DRAIN_TIMEOUT;
38 }
39
40 static uint64_t tsc_term(struct lcore_cfg *lconf)
41 {
42         if (lconf_is_req(lconf) && lconf_do_flags(lconf)) {
43                 lconf_flush_all_queues(lconf);
44                 return -2;
45         }
46         return TERM_TIMEOUT;
47 }
48
49 static uint64_t tsc_period(struct lcore_cfg *lconf)
50 {
51         lconf->period_func(lconf->period_data);
52         return lconf->period_timeout;
53 }
54
55 static uint64_t tsc_ctrl(struct lcore_cfg *lconf)
56 {
57         const uint8_t n_tasks_all = lconf->n_tasks_all;
58         void *msgs[MAX_RING_BURST];
59         uint16_t n_msgs;
60
61         for (uint8_t task_id = 0; task_id < n_tasks_all; ++task_id) {
62                 if (lconf->ctrl_rings_m[task_id] && lconf->ctrl_func_m[task_id]) {
63 #if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1)
64                         n_msgs = rte_ring_sc_dequeue_burst(lconf->ctrl_rings_m[task_id], msgs, MAX_RING_BURST);
65 #else
66                         n_msgs = rte_ring_sc_dequeue_burst(lconf->ctrl_rings_m[task_id], msgs, MAX_RING_BURST, NULL);
67 #endif
68                         if (n_msgs) {
69                                 lconf->ctrl_func_m[task_id](lconf->tasks_all[task_id], msgs, n_msgs);
70                         }
71                 }
72                 if (lconf->ctrl_rings_p[task_id] && lconf->ctrl_func_p[task_id]) {
73 #if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1)
74                         n_msgs = rte_ring_sc_dequeue_burst(lconf->ctrl_rings_p[task_id], msgs, MAX_RING_BURST);
75 #else
76                         n_msgs = rte_ring_sc_dequeue_burst(lconf->ctrl_rings_p[task_id], msgs, MAX_RING_BURST, NULL);
77 #endif
78                         if (n_msgs) {
79                                 lconf->ctrl_func_p[task_id](lconf->tasks_all[task_id], (struct rte_mbuf **)msgs, n_msgs);
80                         }
81                 }
82         }
83         return lconf->ctrl_timeout;
84 }
85
86 int thread_generic(struct lcore_cfg *lconf)
87 {
88         struct task_base *tasks[MAX_TASKS_PER_CORE];
89         int next[MAX_TASKS_PER_CORE] = {0};
90         struct rte_mbuf **mbufs;
91         uint64_t cur_tsc = rte_rdtsc();
92         uint8_t zero_rx[MAX_TASKS_PER_CORE] = {0};
93         struct tsc_task tsc_tasks[] = {
94                 {.tsc = cur_tsc, .tsc_task = tsc_term},
95                 {.tsc = cur_tsc + DRAIN_TIMEOUT, .tsc_task = tsc_drain},
96                 {.tsc = -1},
97                 {.tsc = -1},
98                 {.tsc = -1},
99         };
100         uint8_t n_tasks_run = lconf->n_tasks_run;
101
102         if (lconf->period_func) {
103                 tsc_tasks[2].tsc = cur_tsc + lconf->period_timeout;
104                 tsc_tasks[2].tsc_task = tsc_period;
105         }
106
107         for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
108                 if (lconf->ctrl_func_m[task_id]) {
109                         tsc_tasks[3].tsc = cur_tsc + lconf->ctrl_timeout;
110                         tsc_tasks[3].tsc_task = tsc_ctrl;
111                         break;
112                 }
113                 if (lconf->ctrl_func_p[task_id]) {
114                         tsc_tasks[3].tsc = cur_tsc + lconf->ctrl_timeout;
115                         tsc_tasks[3].tsc_task = tsc_ctrl;
116                         break;
117                 }
118         }
119
120         /* sort tsc tasks */
121         for (size_t i = 0; i < sizeof(tsc_tasks)/sizeof(tsc_tasks[0]); ++i) {
122                 for (size_t j = i + 1; j < sizeof(tsc_tasks)/sizeof(tsc_tasks[0]); ++j) {
123                         if (tsc_tasks[i].tsc > tsc_tasks[j].tsc) {
124                                 struct tsc_task tmp = tsc_tasks[i];
125                                 tsc_tasks[i] = tsc_tasks[j];
126                                 tsc_tasks[j] = tmp;
127                         }
128                 }
129         }
130         struct tsc_task next_tsc = tsc_tasks[0];
131
132         for (;;) {
133                 cur_tsc = rte_rdtsc();
134                 /* Sort scheduled tsc_tasks starting from earliest
135                    first. A linear search is performed moving
136                    tsc_tasks that are scheduled earlier to the front
137                    of the list. There is a high frequency tsc_task in
138                    most cases. As a consequence, the currently
139                    scheduled tsc_task will be rescheduled to be
140                    executed as the first again. If many tsc_tasks are
141                    to be used, the algorithm should be replaced with a
142                    priority-queue (heap). */
143                 if (unlikely(cur_tsc >= next_tsc.tsc)) {
144                         uint64_t resched_diff = tsc_tasks[0].tsc_task(lconf);
145
146                         if (resched_diff == (uint64_t)-2) {
147                                 n_tasks_run = lconf->n_tasks_run;
148                                 if (!n_tasks_run)
149                                         return 0;
150                                 for (int i = 0; i < lconf->n_tasks_run; ++i) {
151                                         tasks[i] = lconf->tasks_run[i];
152
153                                         uint8_t task_id = lconf_get_task_id(lconf, tasks[i]);
154                                         if (lconf->targs[task_id].task_init->flag_features & TASK_FEATURE_ZERO_RX)
155                                                 zero_rx[i] = 1;
156                                 }
157                         }
158
159                         uint64_t new_tsc = tsc_tasks[0].tsc + resched_diff;
160                         tsc_tasks[0].tsc = new_tsc;
161                         next_tsc.tsc = new_tsc;
162
163                         for (size_t i = 1; i < sizeof(tsc_tasks)/sizeof(tsc_tasks[0]); ++i) {
164                                 if (new_tsc < tsc_tasks[i].tsc) {
165                                         if (i > 1) {
166                                                 tsc_tasks[i - 1] = next_tsc;
167                                                 next_tsc = tsc_tasks[0];
168                                         }
169                                         break;
170                                 }
171                                 else
172                                         tsc_tasks[i - 1] = tsc_tasks[i];
173                         }
174                 }
175
176                 uint16_t nb_rx;
177                 for (uint8_t task_id = 0; task_id < n_tasks_run; ++task_id) {
178                         struct task_base *t = tasks[task_id];
179                         struct task_args *targ = &lconf->targs[task_id];
180                         // Do not skip a task receiving packets from an optimized ring
181                         // as the transmitting task expects such a receiving task to always run and consume
182                         // the transmitted packets.
183                         if (unlikely(next[task_id] && (targ->tx_opt_ring_task == NULL))) {
184                                 // plogx_info("task %d is too busy\n", task_id);
185                                 next[task_id] = 0;
186                         } else {
187                                 nb_rx = t->rx_pkt(t, &mbufs);
188                                 if (likely(nb_rx || zero_rx[task_id])) {
189                                         next[task_id] = t->handle_bulk(t, mbufs, nb_rx);
190                                 }
191                         }
192
193                 }
194         }
195         return 0;
196 }