2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
19 #include "stats_task.h"
21 #include "prox_globals.h"
24 struct lcore_task_stats {
25 struct task_stats task_stats[MAX_TASKS_PER_CORE];
28 #define TASK_STATS_RX 0x01
29 #define TASK_STATS_TX 0x02
32 static struct lcore_task_stats lcore_task_stats_all[RTE_MAX_LCORE];
33 static struct task_stats *task_stats_set[RTE_MAX_LCORE * MAX_TASKS_PER_CORE];
34 static uint8_t nb_tasks_tot;
35 int stats_get_n_tasks_tot(void)
40 struct task_stats *stats_get_task_stats(uint32_t lcore_id, uint32_t task_id)
42 return &lcore_task_stats_all[lcore_id].task_stats[task_id];
45 struct task_stats_sample *stats_get_task_stats_sample(uint32_t lcore_id, uint32_t task_id, int l)
47 return &lcore_task_stats_all[lcore_id].task_stats[task_id].sample[l == last_stat];
50 void stats_task_reset(void)
52 struct task_stats *cur_task_stats;
54 for (uint8_t task_id = 0; task_id < nb_tasks_tot; ++task_id) {
55 cur_task_stats = task_stats_set[task_id];
56 cur_task_stats->tot_rx_pkt_count = 0;
57 cur_task_stats->tot_tx_pkt_count = 0;
58 cur_task_stats->tot_drop_tx_fail = 0;
59 cur_task_stats->tot_drop_discard = 0;
60 cur_task_stats->tot_drop_handled = 0;
61 cur_task_stats->tot_rx_non_dp = 0;
62 cur_task_stats->tot_tx_non_dp = 0;
66 uint64_t stats_core_task_tot_rx(uint8_t lcore_id, uint8_t task_id)
68 return lcore_task_stats_all[lcore_id].task_stats[task_id].tot_rx_pkt_count;
71 uint64_t stats_core_task_tot_tx(uint8_t lcore_id, uint8_t task_id)
73 return lcore_task_stats_all[lcore_id].task_stats[task_id].tot_tx_pkt_count;
76 uint64_t stats_core_task_tot_drop(uint8_t lcore_id, uint8_t task_id)
78 return lcore_task_stats_all[lcore_id].task_stats[task_id].tot_drop_tx_fail +
79 lcore_task_stats_all[lcore_id].task_stats[task_id].tot_drop_discard +
80 lcore_task_stats_all[lcore_id].task_stats[task_id].tot_drop_handled;
83 uint64_t stats_core_task_tot_tx_non_dp(uint8_t lcore_id, uint8_t task_id)
85 return lcore_task_stats_all[lcore_id].task_stats[task_id].tot_tx_non_dp;
88 uint64_t stats_core_task_tot_rx_non_dp(uint8_t lcore_id, uint8_t task_id)
90 return lcore_task_stats_all[lcore_id].task_stats[task_id].tot_rx_non_dp;
93 uint64_t stats_core_task_last_tsc(uint8_t lcore_id, uint8_t task_id)
95 return lcore_task_stats_all[lcore_id].task_stats[task_id].sample[last_stat].tsc;
98 static void init_core_port(struct task_stats *ts, struct task_rt_stats *stats, uint8_t flags)
104 void stats_task_post_proc(void)
106 for (uint8_t task_id = 0; task_id < nb_tasks_tot; ++task_id) {
107 struct task_stats *cur_task_stats = task_stats_set[task_id];
108 const struct task_stats_sample *last = &cur_task_stats->sample[last_stat];
109 const struct task_stats_sample *prev = &cur_task_stats->sample[!last_stat];
111 /* no total stats for empty loops */
112 cur_task_stats->tot_rx_pkt_count += last->rx_pkt_count - prev->rx_pkt_count;
113 cur_task_stats->tot_tx_pkt_count += last->tx_pkt_count - prev->tx_pkt_count;
114 cur_task_stats->tot_drop_tx_fail += last->drop_tx_fail - prev->drop_tx_fail;
115 cur_task_stats->tot_drop_discard += last->drop_discard - prev->drop_discard;
116 cur_task_stats->tot_drop_handled += last->drop_handled - prev->drop_handled;
117 cur_task_stats->tot_rx_non_dp += last->rx_non_dp - prev->rx_non_dp;
118 cur_task_stats->tot_tx_non_dp += last->tx_non_dp - prev->tx_non_dp;
122 void stats_task_update(void)
124 uint64_t before, after;
126 for (uint8_t task_id = 0; task_id < nb_tasks_tot; ++task_id) {
127 struct task_stats *cur_task_stats = task_stats_set[task_id];
128 struct task_rt_stats *stats = cur_task_stats->stats;
129 struct task_stats_sample *last = &cur_task_stats->sample[last_stat];
131 /* Read TX first and RX second, in order to prevent displaying
132 a negative packet loss. Depending on the configuration
133 (when forwarding, for example), TX might be bigger than RX. */
134 before = rte_rdtsc();
135 last->tx_pkt_count = stats->tx_pkt_count;
136 last->drop_tx_fail = stats->drop_tx_fail;
137 last->drop_discard = stats->drop_discard;
138 last->drop_handled = stats->drop_handled;
139 last->rx_pkt_count = stats->rx_pkt_count;
140 last->empty_cycles = stats->idle_cycles;
141 last->tx_bytes = stats->tx_bytes;
142 last->rx_bytes = stats->rx_bytes;
143 last->drop_bytes = stats->drop_bytes;
144 last->rx_non_dp = stats->rx_non_dp;
145 last->tx_non_dp = stats->tx_non_dp;
147 last->tsc = (before >> 1) + (after >> 1);
151 void stats_task_get_host_rx_tx_packets(uint64_t *rx, uint64_t *tx, uint64_t *tsc)
153 const struct task_stats *t;
158 for (uint8_t task_id = 0; task_id < nb_tasks_tot; ++task_id) {
159 t = task_stats_set[task_id];
161 if (t->flags & TASK_STATS_RX)
162 *rx += t->tot_rx_pkt_count;
164 if (t->flags & TASK_STATS_TX)
165 *tx += t->tot_tx_pkt_count;
168 *tsc = task_stats_set[nb_tasks_tot - 1]->sample[last_stat].tsc;
171 /* Populate active_stats_set for stats reporting, the order of the
172 cores is important for gathering the most accurate statistics. TX
173 cores should be updated before RX cores (to prevent negative Loss
174 stats). The total number of tasks are saved in nb_tasks_tot. */
175 void stats_task_init(void)
177 struct lcore_cfg *lconf;
180 /* add cores that are receiving from and sending to physical ports first */
182 while(prox_core_next(&lcore_id, 0) == 0) {
183 lconf = &lcore_cfg[lcore_id];
184 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
185 struct task_args *targ = &lconf->targs[task_id];
186 struct task_rt_stats *stats = &lconf->tasks_all[task_id]->aux->stats;
187 if (targ->nb_rxrings == 0 && targ->nb_txrings == 0) {
188 struct task_stats *ts = &lcore_task_stats_all[lcore_id].task_stats[task_id];
190 init_core_port(ts, stats, TASK_STATS_RX | TASK_STATS_TX);
191 task_stats_set[nb_tasks_tot++] = ts;
196 /* add cores that are sending to physical ports second */
198 while(prox_core_next(&lcore_id, 0) == 0) {
199 lconf = &lcore_cfg[lcore_id];
200 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
201 struct task_args *targ = &lconf->targs[task_id];
202 struct task_rt_stats *stats = &lconf->tasks_all[task_id]->aux->stats;
203 if (targ->nb_rxrings != 0 && targ->nb_txrings == 0) {
204 struct task_stats *ts = &lcore_task_stats_all[lcore_id].task_stats[task_id];
206 init_core_port(ts, stats, TASK_STATS_TX);
207 task_stats_set[nb_tasks_tot++] = ts;
212 /* add cores that are receiving from physical ports third */
214 while(prox_core_next(&lcore_id, 0) == 0) {
215 lconf = &lcore_cfg[lcore_id];
216 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
217 struct task_args *targ = &lconf->targs[task_id];
218 struct task_rt_stats *stats = &lconf->tasks_all[task_id]->aux->stats;
219 if (targ->nb_rxrings == 0 && targ->nb_txrings != 0) {
220 struct task_stats *ts = &lcore_task_stats_all[lcore_id].task_stats[task_id];
222 init_core_port(ts, stats, TASK_STATS_RX);
223 task_stats_set[nb_tasks_tot++] = ts;
228 /* add cores that are working internally (no physical ports attached) */
230 while(prox_core_next(&lcore_id, 0) == 0) {
231 lconf = &lcore_cfg[lcore_id];
232 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
233 struct task_args *targ = &lconf->targs[task_id];
234 struct task_rt_stats *stats = &lconf->tasks_all[task_id]->aux->stats;
235 if (targ->nb_rxrings != 0 && targ->nb_txrings != 0) {
236 struct task_stats *ts = &lcore_task_stats_all[lcore_id].task_stats[task_id];
238 init_core_port(ts, stats, 0);
239 task_stats_set[nb_tasks_tot++] = ts;