2 // Copyright (c) 2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
17 #include <rte_common.h>
18 #include <rte_cycles.h>
19 #include <rte_pipeline.h>
21 #include "pipeline_common_be.h"
25 #if APP_THREAD_HEADROOM_STATS_COLLECT
27 #define PIPELINE_RUN_REGULAR(thread, pipeline) \
29 uint64_t t0 = rte_rdtsc_precise(); \
30 int n_pkts = rte_pipeline_run(pipeline->p); \
33 uint64_t t1 = rte_rdtsc_precise(); \
35 thread->headroom_cycles += t1 - t0; \
40 #define PIPELINE_RUN_CUSTOM(thread, data) \
42 uint64_t t0 = rte_rdtsc_precise(); \
43 int n_pkts = data->f_run(data->be); \
46 uint64_t t1 = rte_rdtsc_precise(); \
48 thread->headroom_cycles += t1 - t0; \
54 #define PIPELINE_RUN_REGULAR(thread, pipeline) \
55 rte_pipeline_run(pipeline->p)
57 #define PIPELINE_RUN_CUSTOM(thread, data) \
63 thread_msg_recv(struct rte_ring *r)
66 int status = rte_ring_sc_dequeue(r, &msg);
75 thread_msg_send(struct rte_ring *r,
81 status = rte_ring_sp_enqueue(r, msg);
82 } while (status == -ENOBUFS);
86 thread_pipeline_enable(struct app_thread_data *t,
87 struct thread_pipeline_enable_msg_req *req)
89 struct app_thread_pipeline_data *p;
91 if (req->f_run == NULL) {
92 if (t->n_regular >= APP_MAX_THREAD_PIPELINES)
95 if (t->n_custom >= APP_MAX_THREAD_PIPELINES)
99 p = (req->f_run == NULL) ?
100 &t->regular[t->n_regular] :
101 &t->custom[t->n_custom];
103 p->pipeline_id = req->pipeline_id;
105 p->f_run = req->f_run;
106 p->f_timer = req->f_timer;
107 p->timer_period = req->timer_period;
110 if (req->f_run == NULL)
119 thread_pipeline_disable(struct app_thread_data *t,
120 struct thread_pipeline_disable_msg_req *req)
122 uint32_t n_regular = RTE_MIN(t->n_regular, RTE_DIM(t->regular));
123 uint32_t n_custom = RTE_MIN(t->n_custom, RTE_DIM(t->custom));
126 /* search regular pipelines of current thread */
127 for (i = 0; i < n_regular; i++) {
128 if (t->regular[i].pipeline_id != req->pipeline_id)
131 if (i < n_regular - 1)
132 memcpy(&t->regular[i],
134 (n_regular - 1 - i) * sizeof(struct app_thread_pipeline_data));
137 t->n_regular = n_regular;
142 /* search custom pipelines of current thread */
143 for (i = 0; i < n_custom; i++) {
144 if (t->custom[i].pipeline_id != req->pipeline_id)
147 if (i < n_custom - 1)
148 memcpy(&t->custom[i],
150 (n_custom - 1 - i) * sizeof(struct app_thread_pipeline_data));
153 t->n_custom = n_custom;
158 /* return if pipeline not found */
163 thread_msg_req_handle(struct app_thread_data *t)
166 struct thread_msg_req *req;
167 struct thread_msg_rsp *rsp;
169 msg_ptr = thread_msg_recv(t->msgq_in);
175 case THREAD_MSG_REQ_PIPELINE_ENABLE: {
176 rsp->status = thread_pipeline_enable(t,
177 (struct thread_pipeline_enable_msg_req *) req);
178 thread_msg_send(t->msgq_out, rsp);
182 case THREAD_MSG_REQ_PIPELINE_DISABLE: {
183 rsp->status = thread_pipeline_disable(t,
184 (struct thread_pipeline_disable_msg_req *) req);
185 thread_msg_send(t->msgq_out, rsp);
189 case THREAD_MSG_REQ_HEADROOM_READ: {
190 struct thread_headroom_read_msg_rsp *rsp =
191 (struct thread_headroom_read_msg_rsp *)
194 rsp->headroom_ratio = t->headroom_ratio;
196 thread_msg_send(t->msgq_out, rsp);
207 thread_headroom_update(struct app_thread_data *t, uint64_t time)
209 uint64_t time_diff = time - t->headroom_time;
212 ((double) t->headroom_cycles) / ((double) time_diff);
214 t->headroom_cycles = 0;
215 t->headroom_time = rte_rdtsc_precise();
219 app_thread(void *arg)
221 struct app_params *app = (struct app_params *) arg;
222 uint32_t core_id = rte_lcore_id(), i, j;
223 struct app_thread_data *t = &app->thread_data[core_id];
226 uint32_t n_regular = RTE_MIN(t->n_regular, RTE_DIM(t->regular));
227 uint32_t n_custom = RTE_MIN(t->n_custom, RTE_DIM(t->custom));
229 /* Run regular pipelines */
230 for (j = 0; j < n_regular; j++) {
231 struct app_thread_pipeline_data *data = &t->regular[j];
232 struct pipeline *p = data->be;
234 PIPELINE_RUN_REGULAR(t, p);
237 /* Run custom pipelines */
238 for (j = 0; j < n_custom; j++) {
239 struct app_thread_pipeline_data *data = &t->custom[j];
241 PIPELINE_RUN_CUSTOM(t, data);
245 if ((i & 0xF) == 0) {
246 uint64_t time = rte_get_tsc_cycles();
247 uint64_t t_deadline = UINT64_MAX;
249 if (time < t->deadline)
252 /* Timer for regular pipelines */
253 for (j = 0; j < n_regular; j++) {
254 struct app_thread_pipeline_data *data =
256 uint64_t p_deadline = data->deadline;
258 if (p_deadline <= time) {
259 data->f_timer(data->be);
260 p_deadline = time + data->timer_period;
261 data->deadline = p_deadline;
264 if (p_deadline < t_deadline)
265 t_deadline = p_deadline;
268 /* Timer for custom pipelines */
269 for (j = 0; j < n_custom; j++) {
270 struct app_thread_pipeline_data *data =
272 uint64_t p_deadline = data->deadline;
274 if (p_deadline <= time) {
275 data->f_timer(data->be);
276 p_deadline = time + data->timer_period;
277 data->deadline = p_deadline;
280 if (p_deadline < t_deadline)
281 t_deadline = p_deadline;
284 /* Timer for thread message request */
286 uint64_t deadline = t->thread_req_deadline;
288 if (deadline <= time) {
289 thread_msg_req_handle(t);
290 thread_headroom_update(t, time);
291 deadline = time + t->timer_period;
292 t->thread_req_deadline = deadline;
295 if (deadline < t_deadline)
296 t_deadline = deadline;
300 t->deadline = t_deadline;