2 // Copyright (c) 2010-2020 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
17 #include "prox_malloc.h"
25 struct lcore_cfg *lcore_cfg;
26 /* only used at initialization time */
27 struct lcore_cfg lcore_cfg_init[RTE_MAX_LCORE];
29 static int core_targ_next_from(struct lcore_cfg **lconf, struct task_args **targ, struct lcore_cfg *lcore_cfg, const int with_master)
31 uint32_t lcore_id, task_id;
33 if (*lconf && *targ) {
34 lcore_id = *lconf - lcore_cfg;
35 task_id = *targ - lcore_cfg[lcore_id].targs;
37 if (task_id + 1 < lcore_cfg[lcore_id].n_tasks_all) {
38 *targ = &lcore_cfg[lcore_id].targs[task_id + 1];
41 if (prox_core_next(&lcore_id, with_master))
43 *lconf = &lcore_cfg[lcore_id];
44 *targ = &lcore_cfg[lcore_id].targs[0];
50 if (prox_core_next(&lcore_id, with_master))
52 *lconf = &lcore_cfg[lcore_id];
53 *targ = &lcore_cfg[lcore_id].targs[0];
58 int core_targ_next(struct lcore_cfg **lconf, struct task_args **targ, const int with_master)
60 return core_targ_next_from(lconf, targ, lcore_cfg, with_master);
63 int core_targ_next_early(struct lcore_cfg **lconf, struct task_args **targ, const int with_master)
65 return core_targ_next_from(lconf, targ, lcore_cfg_init, with_master);
68 struct task_args *core_targ_get(uint32_t lcore_id, uint32_t task_id)
70 return &lcore_cfg[lcore_id].targs[task_id];
73 void lcore_cfg_alloc_hp(void)
75 size_t mem_size = RTE_MAX_LCORE * sizeof(struct lcore_cfg);
77 lcore_cfg = prox_zmalloc(mem_size, rte_socket_id());
78 PROX_PANIC(lcore_cfg == NULL, "Could not allocate memory for core control structures\n");
79 rte_memcpy(lcore_cfg, lcore_cfg_init, mem_size);
81 /* get thread ID for master core */
82 lcore_cfg[rte_lcore_id()].thread_id = pthread_self();
85 int lconf_run(__attribute__((unused)) void *dummy)
87 uint32_t lcore_id = rte_lcore_id();
88 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
90 /* get thread ID, and set cancellation type to asynchronous */
91 lconf->thread_id = pthread_self();
92 int ret = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
94 plog_warn("pthread_setcanceltype() failed on core %u: %i\n", lcore_id, ret);
96 plog_info("Entering main loop on core %u\n", lcore_id);
97 return lconf->thread_x(lconf);
100 static void msg_stop(struct lcore_cfg *lconf)
103 struct task_base *t = NULL;
105 if (lconf->msg.task_id == -1) {
106 for (int i = 0; i < lconf->n_tasks_all; ++i) {
107 if (lconf->task_is_running[i]) {
108 lconf->task_is_running[i] = 0;
109 t = lconf->tasks_all[i];
114 lconf->n_tasks_run = 0;
116 if (t && t->aux->stop_last)
117 t->aux->stop_last(t);
120 for (int i = 0; i < lconf->n_tasks_run; ++i) {
121 if (lconf_get_task_id(lconf, lconf->tasks_run[i]) == lconf->msg.task_id) {
124 else if (idx != -1) {
125 lconf->tasks_run[idx] = lconf->tasks_run[i];
130 // Check that task id is valid and running
134 lconf->task_is_running[lconf->msg.task_id] = 0;
136 t = lconf->tasks_all[lconf->msg.task_id];
139 lconf->n_tasks_run--;
140 if (lconf->n_tasks_run == 0 && t->aux->stop_last)
141 t->aux->stop_last(t);
145 static void msg_start(struct lcore_cfg *lconf)
148 struct task_base *t = NULL;
150 if (lconf->msg.task_id == -1) {
151 for (int i = 0; i < lconf->n_tasks_all; ++i) {
152 t = lconf->tasks_run[i] = lconf->tasks_all[i];
153 lconf->task_is_running[i] = 1;
154 if (lconf->n_tasks_run == 0 && t->aux->start_first) {
155 t->aux->start_first(t);
156 lconf->n_tasks_run = 1;
161 lconf->n_tasks_run = lconf->n_tasks_all;
165 // Check that task id is valid
166 if (lconf->msg.task_id >= lconf->n_tasks_all)
169 if (lconf->n_tasks_run == 0) {
170 t = lconf->tasks_run[0] = lconf->tasks_all[lconf->msg.task_id];
171 lconf->n_tasks_run = 1;
172 lconf->task_is_running[lconf->msg.task_id] = 1;
174 if (t->aux->start_first)
175 t->aux->start_first(t);
180 if (lconf->task_is_running[lconf->msg.task_id])
182 for (int i = lconf->n_tasks_run - 1; i >= 0; --i) {
183 idx = lconf_get_task_id(lconf, lconf->tasks_run[i]);
184 if (idx == lconf->msg.task_id) {
185 // We should not come here as checking earlier if task id is running...
186 plog_warn("Unexpectedly get request to start task %d already running\n", idx);
189 else if (idx > lconf->msg.task_id) {
190 lconf->tasks_run[i + 1] = lconf->tasks_run[i];
192 lconf->tasks_run[i] = lconf->tasks_all[lconf->msg.task_id];
193 lconf->n_tasks_run++;
198 lconf->tasks_run[i + 1] = lconf->tasks_all[lconf->msg.task_id];
199 lconf->n_tasks_run++;
203 lconf->task_is_running[lconf->msg.task_id] = 1;
205 if (lconf->tasks_all[lconf->msg.task_id]->aux->start)
206 lconf->tasks_all[lconf->msg.task_id]->aux->start(lconf->tasks_all[lconf->msg.task_id]);
210 int lconf_do_flags(struct lcore_cfg *lconf)
215 if ((lconf->msg.type == LCONF_MSG_TRACE) && (lconf->tasks_all[lconf->msg.task_id]->tx_pkt == tx_pkt_drop_all)) {
216 /* We are asked to dump packets through command dump.
217 * This usually means map RX and TX packets before printing them.
218 * However we do not transmit the packets in this case => use the DUMP_RX function.
219 * This will prevent seeing the received packets also printed as TX[255] (= dropped)
221 lconf->msg.type = LCONF_MSG_DUMP_RX;
224 switch (lconf->msg.type) {
229 case LCONF_MSG_START:
233 case LCONF_MSG_DUMP_RX:
234 case LCONF_MSG_DUMP_TX:
236 t = lconf->tasks_all[lconf->msg.task_id];
238 if (lconf->msg.val) {
239 if (lconf->msg.type == LCONF_MSG_DUMP ||
240 lconf->msg.type == LCONF_MSG_DUMP_RX) {
241 t->aux->task_rt_dump.n_print_rx = lconf->msg.val;
243 task_base_add_rx_pkt_function(t, rx_pkt_dump);
246 if (lconf->msg.type == LCONF_MSG_DUMP ||
247 lconf->msg.type == LCONF_MSG_DUMP_TX) {
248 t->aux->task_rt_dump.n_print_tx = lconf->msg.val;
249 if ((t->tx_pkt == tx_pkt_l3) || (t->tx_pkt == tx_pkt_ndp)) {
250 if (t->aux->tx_pkt_orig)
251 t->aux->tx_pkt_l2 = t->aux->tx_pkt_orig;
252 t->aux->tx_pkt_orig = t->aux->tx_pkt_l2;
253 t->aux->tx_pkt_l2 = tx_pkt_dump;
255 if (t->aux->tx_pkt_orig)
256 t->tx_pkt = t->aux->tx_pkt_orig;
257 t->aux->tx_pkt_orig = t->tx_pkt;
258 t->tx_pkt = tx_pkt_dump;
263 case LCONF_MSG_TRACE:
264 t = lconf->tasks_all[lconf->msg.task_id];
266 if (lconf->msg.val) {
267 if (task_base_get_original_rx_pkt_function(t) != rx_pkt_dummy) {
268 t->aux->task_rt_dump.n_trace = lconf->msg.val;
269 task_base_add_rx_pkt_function(t, rx_pkt_trace);
270 if ((t->tx_pkt == tx_pkt_l3) || (t->tx_pkt == tx_pkt_ndp)) {
271 if (t->aux->tx_pkt_orig)
272 t->aux->tx_pkt_l2 = t->aux->tx_pkt_orig;
273 t->aux->tx_pkt_orig = t->aux->tx_pkt_l2;
274 t->aux->tx_pkt_l2 = tx_pkt_trace;
276 if (t->aux->tx_pkt_orig)
277 t->tx_pkt = t->aux->tx_pkt_orig;
278 t->aux->tx_pkt_orig = t->tx_pkt;
279 t->tx_pkt = tx_pkt_trace;
282 t->aux->task_rt_dump.n_print_tx = lconf->msg.val;
283 if ((t->tx_pkt == tx_pkt_l3) || (t->tx_pkt == tx_pkt_ndp)) {
284 if (t->aux->tx_pkt_orig)
285 t->aux->tx_pkt_l2 = t->aux->tx_pkt_orig;
286 t->aux->tx_pkt_orig = t->aux->tx_pkt_l2;
287 t->aux->tx_pkt_l2 = tx_pkt_dump;
289 if (t->aux->tx_pkt_orig)
290 t->tx_pkt = t->aux->tx_pkt_orig;
291 t->aux->tx_pkt_orig = t->tx_pkt;
292 t->tx_pkt = tx_pkt_dump;
297 case LCONF_MSG_RX_DISTR_START:
298 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
299 t = lconf->tasks_all[task_id];
300 task_base_add_rx_pkt_function(t, rx_pkt_distr);
301 memset(t->aux->rx_bucket, 0, sizeof(t->aux->rx_bucket));
302 lconf->flags |= LCONF_FLAG_RX_DISTR_ACTIVE;
305 case LCONF_MSG_TX_DISTR_START:
306 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
307 t = lconf->tasks_all[task_id];
309 if ((t->tx_pkt == tx_pkt_l3) || (t->tx_pkt == tx_pkt_ndp)) {
310 t->aux->tx_pkt_orig = t->aux->tx_pkt_l2;
311 t->aux->tx_pkt_l2 = tx_pkt_distr;
313 t->aux->tx_pkt_orig = t->tx_pkt;
314 t->tx_pkt = tx_pkt_distr;
316 memset(t->aux->tx_bucket, 0, sizeof(t->aux->tx_bucket));
317 lconf->flags |= LCONF_FLAG_TX_DISTR_ACTIVE;
320 case LCONF_MSG_RX_DISTR_STOP:
321 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
322 t = lconf->tasks_all[task_id];
323 task_base_del_rx_pkt_function(t, rx_pkt_distr);
324 lconf->flags &= ~LCONF_FLAG_RX_DISTR_ACTIVE;
327 case LCONF_MSG_TX_DISTR_STOP:
328 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
329 t = lconf->tasks_all[task_id];
330 if (t->aux->tx_pkt_orig) {
331 if ((t->tx_pkt == tx_pkt_l3) || (t->tx_pkt == tx_pkt_ndp)) {
332 t->aux->tx_pkt_l2 = t->aux->tx_pkt_orig;
333 t->aux->tx_pkt_orig = NULL;
335 t->tx_pkt = t->aux->tx_pkt_orig;
336 t->aux->tx_pkt_orig = NULL;
338 lconf->flags &= ~LCONF_FLAG_TX_DISTR_ACTIVE;
342 case LCONF_MSG_RX_DISTR_RESET:
343 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
344 t = lconf->tasks_all[task_id];
346 memset(t->aux->rx_bucket, 0, sizeof(t->aux->rx_bucket));
349 case LCONF_MSG_TX_DISTR_RESET:
350 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
351 t = lconf->tasks_all[task_id];
353 memset(t->aux->tx_bucket, 0, sizeof(t->aux->tx_bucket));
356 case LCONF_MSG_RX_BW_START:
357 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
358 t = lconf->tasks_all[task_id];
359 task_base_add_rx_pkt_function(t, rx_pkt_bw);
360 lconf->flags |= LCONF_FLAG_RX_BW_ACTIVE;
363 case LCONF_MSG_RX_BW_STOP:
364 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
365 t = lconf->tasks_all[task_id];
366 task_base_del_rx_pkt_function(t, rx_pkt_bw);
367 lconf->flags &= ~LCONF_FLAG_RX_BW_ACTIVE;
370 case LCONF_MSG_TX_BW_START:
371 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
372 t = lconf->tasks_all[task_id];
374 if ((t->tx_pkt == tx_pkt_l3) || (t->tx_pkt == tx_pkt_ndp)) {
375 t->aux->tx_pkt_orig = t->aux->tx_pkt_l2;
376 t->aux->tx_pkt_l2 = tx_pkt_bw;
378 t->aux->tx_pkt_orig = t->tx_pkt;
379 t->tx_pkt = tx_pkt_bw;
381 lconf->flags |= LCONF_FLAG_TX_BW_ACTIVE;
384 case LCONF_MSG_TX_BW_STOP:
385 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
386 t = lconf->tasks_all[task_id];
387 if (t->aux->tx_pkt_orig) {
388 if ((t->tx_pkt == tx_pkt_l3) || (t->tx_pkt == tx_pkt_ndp)) {
389 t->aux->tx_pkt_l2 = t->aux->tx_pkt_orig;
390 t->aux->tx_pkt_orig = NULL;
392 t->tx_pkt = t->aux->tx_pkt_orig;
393 t->aux->tx_pkt_orig = NULL;
395 lconf->flags &= ~LCONF_FLAG_TX_BW_ACTIVE;
401 lconf_unset_req(lconf);
405 int lconf_get_task_id(const struct lcore_cfg *lconf, const struct task_base *task)
407 for (int i = 0; i < lconf->n_tasks_all; ++i) {
408 if (lconf->tasks_all[i] == task)
415 int lconf_task_is_running(const struct lcore_cfg *lconf, uint8_t task_id)
417 return lconf->task_is_running[task_id];