2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
17 #include <rte_cycles.h>
18 #include <rte_table_hash.h>
21 #include "thread_generic.h"
25 #include "hash_entry_types.h"
27 #include "hash_utils.h"
31 uint64_t (* tsc_task)(struct lcore_cfg *lconf);
34 static uint64_t tsc_drain(struct lcore_cfg *lconf)
36 lconf_flush_all_queues(lconf);
40 static uint64_t tsc_term(struct lcore_cfg *lconf)
42 if (lconf_is_req(lconf) && lconf_do_flags(lconf)) {
43 lconf_flush_all_queues(lconf);
49 static uint64_t tsc_period(struct lcore_cfg *lconf)
51 lconf->period_func(lconf->period_data);
52 return lconf->period_timeout;
55 static uint64_t tsc_ctrl(struct lcore_cfg *lconf)
57 const uint8_t n_tasks_all = lconf->n_tasks_all;
58 void *msgs[MAX_RING_BURST];
61 for (uint8_t task_id = 0; task_id < n_tasks_all; ++task_id) {
62 if (lconf->ctrl_rings_m[task_id] && lconf->ctrl_func_m[task_id]) {
63 #if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1)
64 n_msgs = rte_ring_sc_dequeue_burst(lconf->ctrl_rings_m[task_id], msgs, MAX_RING_BURST);
66 n_msgs = rte_ring_sc_dequeue_burst(lconf->ctrl_rings_m[task_id], msgs, MAX_RING_BURST, NULL);
69 lconf->ctrl_func_m[task_id](lconf->tasks_all[task_id], msgs, n_msgs);
72 if (lconf->ctrl_rings_p[task_id] && lconf->ctrl_func_p[task_id]) {
73 #if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1)
74 n_msgs = rte_ring_sc_dequeue_burst(lconf->ctrl_rings_p[task_id], msgs, MAX_RING_BURST);
76 n_msgs = rte_ring_sc_dequeue_burst(lconf->ctrl_rings_p[task_id], msgs, MAX_RING_BURST, NULL);
79 lconf->ctrl_func_p[task_id](lconf->tasks_all[task_id], (struct rte_mbuf **)msgs, n_msgs);
83 return lconf->ctrl_timeout;
86 int thread_generic(struct lcore_cfg *lconf)
88 struct task_base *tasks[MAX_TASKS_PER_CORE];
89 int next[MAX_TASKS_PER_CORE] = {0};
90 struct rte_mbuf **mbufs;
91 uint64_t cur_tsc = rte_rdtsc();
92 uint8_t zero_rx[MAX_TASKS_PER_CORE] = {0};
93 struct tsc_task tsc_tasks[] = {
94 {.tsc = cur_tsc, .tsc_task = tsc_term},
95 {.tsc = cur_tsc + DRAIN_TIMEOUT, .tsc_task = tsc_drain},
100 uint8_t n_tasks_run = lconf->n_tasks_run;
102 if (lconf->period_func) {
103 tsc_tasks[2].tsc = cur_tsc + lconf->period_timeout;
104 tsc_tasks[2].tsc_task = tsc_period;
107 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
108 if (lconf->ctrl_func_m[task_id]) {
109 tsc_tasks[3].tsc = cur_tsc + lconf->ctrl_timeout;
110 tsc_tasks[3].tsc_task = tsc_ctrl;
113 if (lconf->ctrl_func_p[task_id]) {
114 tsc_tasks[3].tsc = cur_tsc + lconf->ctrl_timeout;
115 tsc_tasks[3].tsc_task = tsc_ctrl;
121 for (size_t i = 0; i < sizeof(tsc_tasks)/sizeof(tsc_tasks[0]); ++i) {
122 for (size_t j = i + 1; j < sizeof(tsc_tasks)/sizeof(tsc_tasks[0]); ++j) {
123 if (tsc_tasks[i].tsc > tsc_tasks[j].tsc) {
124 struct tsc_task tmp = tsc_tasks[i];
125 tsc_tasks[i] = tsc_tasks[j];
130 struct tsc_task next_tsc = tsc_tasks[0];
133 cur_tsc = rte_rdtsc();
134 /* Sort scheduled tsc_tasks starting from earliest
135 first. A linear search is performed moving
136 tsc_tasks that are scheduled earlier to the front
137 of the list. There is a high frequency tsc_task in
138 most cases. As a consequence, the currently
139 scheduled tsc_task will be rescheduled to be
140 executed as the first again. If many tsc_tasks are
141 to be used, the algorithm should be replaced with a
142 priority-queue (heap). */
143 if (unlikely(cur_tsc >= next_tsc.tsc)) {
144 uint64_t resched_diff = tsc_tasks[0].tsc_task(lconf);
146 if (resched_diff == (uint64_t)-2) {
147 n_tasks_run = lconf->n_tasks_run;
150 for (int i = 0; i < lconf->n_tasks_run; ++i) {
151 tasks[i] = lconf->tasks_run[i];
153 uint8_t task_id = lconf_get_task_id(lconf, tasks[i]);
154 if (lconf->targs[task_id].task_init->flag_features & TASK_FEATURE_ZERO_RX)
159 uint64_t new_tsc = tsc_tasks[0].tsc + resched_diff;
160 tsc_tasks[0].tsc = new_tsc;
161 next_tsc.tsc = new_tsc;
163 for (size_t i = 1; i < sizeof(tsc_tasks)/sizeof(tsc_tasks[0]); ++i) {
164 if (new_tsc < tsc_tasks[i].tsc) {
166 tsc_tasks[i - 1] = next_tsc;
167 next_tsc = tsc_tasks[0];
172 tsc_tasks[i - 1] = tsc_tasks[i];
177 for (uint8_t task_id = 0; task_id < n_tasks_run; ++task_id) {
178 struct task_base *t = tasks[task_id];
179 struct task_args *targ = &lconf->targs[task_id];
180 // Do not skip a task receiving packets from an optimized ring
181 // as the transmitting task expects such a receiving task to always run and consume
182 // the transmitted packets.
183 if (unlikely(next[task_id] && (targ->tx_opt_ring_task == NULL))) {
184 // plogx_info("task %d is too busy\n", task_id);
187 nb_rx = t->rx_pkt(t, &mbufs);
188 if (likely(nb_rx || zero_rx[task_id])) {
189 next[task_id] = t->handle_bulk(t, mbufs, nb_rx);