2 // Copyright (c) 2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
17 #include <rte_common.h>
18 #include <rte_cycles.h>
19 #include <rte_pipeline.h>
21 #include "pipeline_common_be.h"
25 #if APP_THREAD_HEADROOM_STATS_COLLECT
27 #define PIPELINE_RUN_REGULAR(thread, pipeline) \
29 uint64_t t0 = rte_rdtsc_precise(); \
30 int n_pkts = rte_pipeline_run(pipeline->p); \
33 uint64_t t1 = rte_rdtsc_precise(); \
35 thread->headroom_cycles += t1 - t0; \
40 #define PIPELINE_RUN_CUSTOM(thread, data) \
42 uint64_t t0 = rte_rdtsc_precise(); \
43 int n_pkts = data->f_run(data->be); \
46 uint64_t t1 = rte_rdtsc_precise(); \
48 thread->headroom_cycles += t1 - t0; \
54 #define PIPELINE_RUN_REGULAR(thread, pipeline) \
55 rte_pipeline_run(pipeline->p)
57 #define PIPELINE_RUN_CUSTOM(thread, data) \
62 uint32_t exit_app_thread = 0;
65 thread_msg_recv(struct rte_ring *r)
68 int status = rte_ring_sc_dequeue(r, &msg);
77 thread_msg_send(struct rte_ring *r,
83 status = rte_ring_sp_enqueue(r, msg);
84 } while (status == -ENOBUFS);
88 thread_pipeline_enable(struct app_thread_data *t,
89 struct thread_pipeline_enable_msg_req *req)
91 struct app_thread_pipeline_data *p;
93 if (req->f_run == NULL) {
94 if (t->n_regular >= APP_MAX_THREAD_PIPELINES)
97 if (t->n_custom >= APP_MAX_THREAD_PIPELINES)
101 p = (req->f_run == NULL) ?
102 &t->regular[t->n_regular] :
103 &t->custom[t->n_custom];
105 p->pipeline_id = req->pipeline_id;
107 p->f_run = req->f_run;
108 p->f_timer = req->f_timer;
109 p->timer_period = req->timer_period;
112 if (req->f_run == NULL)
121 thread_pipeline_disable(struct app_thread_data *t,
122 struct thread_pipeline_disable_msg_req *req)
124 uint32_t n_regular = RTE_MIN(t->n_regular, RTE_DIM(t->regular));
125 uint32_t n_custom = RTE_MIN(t->n_custom, RTE_DIM(t->custom));
128 /* search regular pipelines of current thread */
129 for (i = 0; i < n_regular; i++) {
130 if (t->regular[i].pipeline_id != req->pipeline_id)
133 if (i < n_regular - 1)
134 memcpy(&t->regular[i],
136 (n_regular - 1 - i) * sizeof(struct app_thread_pipeline_data));
139 t->n_regular = n_regular;
144 /* search custom pipelines of current thread */
145 for (i = 0; i < n_custom; i++) {
146 if (t->custom[i].pipeline_id != req->pipeline_id)
149 if (i < n_custom - 1)
150 memcpy(&t->custom[i],
152 (n_custom - 1 - i) * sizeof(struct app_thread_pipeline_data));
155 t->n_custom = n_custom;
160 /* return if pipeline not found */
165 thread_msg_req_handle(struct app_thread_data *t)
168 struct thread_msg_req *req;
169 struct thread_msg_rsp *rsp;
171 msg_ptr = thread_msg_recv(t->msgq_in);
177 case THREAD_MSG_REQ_PIPELINE_ENABLE: {
178 rsp->status = thread_pipeline_enable(t,
179 (struct thread_pipeline_enable_msg_req *) req);
180 thread_msg_send(t->msgq_out, rsp);
184 case THREAD_MSG_REQ_PIPELINE_DISABLE: {
185 rsp->status = thread_pipeline_disable(t,
186 (struct thread_pipeline_disable_msg_req *) req);
187 thread_msg_send(t->msgq_out, rsp);
191 case THREAD_MSG_REQ_HEADROOM_READ: {
192 struct thread_headroom_read_msg_rsp *rsp =
193 (struct thread_headroom_read_msg_rsp *)
196 rsp->headroom_ratio = t->headroom_ratio;
198 thread_msg_send(t->msgq_out, rsp);
209 thread_headroom_update(struct app_thread_data *t, uint64_t time)
211 uint64_t time_diff = time - t->headroom_time;
214 ((double) t->headroom_cycles) / ((double) time_diff);
216 t->headroom_cycles = 0;
217 t->headroom_time = rte_rdtsc_precise();
221 app_thread(void *arg)
223 struct app_params *app = (struct app_params *) arg;
224 uint32_t core_id = rte_lcore_id(), i, j;
225 struct app_thread_data *t = &app->thread_data[core_id];
228 uint32_t n_regular = RTE_MIN(t->n_regular, RTE_DIM(t->regular));
229 uint32_t n_custom = RTE_MIN(t->n_custom, RTE_DIM(t->custom));
234 /* Run regular pipelines */
235 for (j = 0; j < n_regular; j++) {
236 struct app_thread_pipeline_data *data = &t->regular[j];
237 struct pipeline *p = data->be;
239 PIPELINE_RUN_REGULAR(t, p);
242 /* Run custom pipelines */
243 for (j = 0; j < n_custom; j++) {
244 struct app_thread_pipeline_data *data = &t->custom[j];
246 PIPELINE_RUN_CUSTOM(t, data);
250 if ((i & 0xF) == 0) {
251 uint64_t time = rte_get_tsc_cycles();
252 uint64_t t_deadline = UINT64_MAX;
254 if (time < t->deadline)
257 /* Timer for regular pipelines */
258 for (j = 0; j < n_regular; j++) {
259 struct app_thread_pipeline_data *data =
261 uint64_t p_deadline = data->deadline;
263 if (p_deadline <= time) {
264 data->f_timer(data->be);
265 p_deadline = time + data->timer_period;
266 data->deadline = p_deadline;
269 if (p_deadline < t_deadline)
270 t_deadline = p_deadline;
273 /* Timer for custom pipelines */
274 for (j = 0; j < n_custom; j++) {
275 struct app_thread_pipeline_data *data =
277 uint64_t p_deadline = data->deadline;
279 if (p_deadline <= time) {
280 data->f_timer(data->be);
281 p_deadline = time + data->timer_period;
282 data->deadline = p_deadline;
285 if (p_deadline < t_deadline)
286 t_deadline = p_deadline;
289 /* Timer for thread message request */
291 uint64_t deadline = t->thread_req_deadline;
293 if (deadline <= time) {
294 thread_msg_req_handle(t);
295 thread_headroom_update(t, time);
296 deadline = time + t->timer_period;
297 t->thread_req_deadline = deadline;
300 if (deadline < t_deadline)
301 t_deadline = deadline;
305 t->deadline = t_deadline;