2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
19 #include "stats_task.h"
21 #include "prox_globals.h"
24 struct lcore_task_stats {
25 struct task_stats task_stats[MAX_TASKS_PER_CORE];
28 #define TASK_STATS_RX 0x01
29 #define TASK_STATS_TX 0x02
32 static struct lcore_task_stats lcore_task_stats_all[RTE_MAX_LCORE];
33 static struct task_stats *task_stats_set[RTE_MAX_LCORE * MAX_TASKS_PER_CORE];
34 static uint8_t nb_tasks_tot;
35 int stats_get_n_tasks_tot(void)
40 struct task_stats *stats_get_task_stats(uint32_t lcore_id, uint32_t task_id)
42 return &lcore_task_stats_all[lcore_id].task_stats[task_id];
45 struct task_stats_sample *stats_get_task_stats_sample(uint32_t lcore_id, uint32_t task_id, int l)
47 return &lcore_task_stats_all[lcore_id].task_stats[task_id].sample[l == last_stat];
50 void stats_task_reset(void)
52 struct task_stats *cur_task_stats;
54 for (uint8_t task_id = 0; task_id < nb_tasks_tot; ++task_id) {
55 cur_task_stats = task_stats_set[task_id];
56 cur_task_stats->tot_rx_pkt_count = 0;
57 cur_task_stats->tot_tx_pkt_count = 0;
58 cur_task_stats->tot_drop_tx_fail = 0;
59 cur_task_stats->tot_drop_discard = 0;
60 cur_task_stats->tot_drop_handled = 0;
61 cur_task_stats->tot_rx_non_dp = 0;
62 cur_task_stats->tot_tx_non_dp = 0;
66 uint64_t stats_core_task_tot_rx(uint8_t lcore_id, uint8_t task_id)
68 return lcore_task_stats_all[lcore_id].task_stats[task_id].tot_rx_pkt_count;
71 uint64_t stats_core_task_tot_tx(uint8_t lcore_id, uint8_t task_id)
73 return lcore_task_stats_all[lcore_id].task_stats[task_id].tot_tx_pkt_count;
76 uint64_t stats_core_task_tot_tx_fail(uint8_t lcore_id, uint8_t task_id)
78 return lcore_task_stats_all[lcore_id].task_stats[task_id].tot_drop_tx_fail;
81 uint64_t stats_core_task_tot_drop(uint8_t lcore_id, uint8_t task_id)
83 return lcore_task_stats_all[lcore_id].task_stats[task_id].tot_drop_tx_fail +
84 lcore_task_stats_all[lcore_id].task_stats[task_id].tot_drop_discard +
85 lcore_task_stats_all[lcore_id].task_stats[task_id].tot_drop_handled;
88 uint64_t stats_core_task_tot_tx_non_dp(uint8_t lcore_id, uint8_t task_id)
90 return lcore_task_stats_all[lcore_id].task_stats[task_id].tot_tx_non_dp;
93 uint64_t stats_core_task_tot_rx_non_dp(uint8_t lcore_id, uint8_t task_id)
95 return lcore_task_stats_all[lcore_id].task_stats[task_id].tot_rx_non_dp;
98 uint64_t stats_core_task_last_tsc(uint8_t lcore_id, uint8_t task_id)
100 return lcore_task_stats_all[lcore_id].task_stats[task_id].sample[last_stat].tsc;
103 static void init_core_port(struct task_stats *ts, struct task_rt_stats *stats, uint8_t flags)
109 void stats_task_post_proc(void)
111 for (uint8_t task_id = 0; task_id < nb_tasks_tot; ++task_id) {
112 struct task_stats *cur_task_stats = task_stats_set[task_id];
113 const struct task_stats_sample *last = &cur_task_stats->sample[last_stat];
114 const struct task_stats_sample *prev = &cur_task_stats->sample[!last_stat];
116 /* no total stats for empty loops */
117 cur_task_stats->tot_rx_pkt_count += last->rx_pkt_count - prev->rx_pkt_count;
118 cur_task_stats->tot_tx_pkt_count += last->tx_pkt_count - prev->tx_pkt_count;
119 cur_task_stats->tot_drop_tx_fail += last->drop_tx_fail - prev->drop_tx_fail;
120 cur_task_stats->tot_drop_discard += last->drop_discard - prev->drop_discard;
121 cur_task_stats->tot_drop_handled += last->drop_handled - prev->drop_handled;
122 cur_task_stats->tot_rx_non_dp += last->rx_non_dp - prev->rx_non_dp;
123 cur_task_stats->tot_tx_non_dp += last->tx_non_dp - prev->tx_non_dp;
127 void stats_task_update(void)
129 uint64_t before, after;
131 for (uint8_t task_id = 0; task_id < nb_tasks_tot; ++task_id) {
132 struct task_stats *cur_task_stats = task_stats_set[task_id];
133 struct task_rt_stats *stats = cur_task_stats->stats;
134 struct task_stats_sample *last = &cur_task_stats->sample[last_stat];
136 /* Read TX first and RX second, in order to prevent displaying
137 a negative packet loss. Depending on the configuration
138 (when forwarding, for example), TX might be bigger than RX. */
139 before = rte_rdtsc();
140 last->tx_pkt_count = stats->tx_pkt_count;
141 last->drop_tx_fail = stats->drop_tx_fail;
142 last->drop_discard = stats->drop_discard;
143 last->drop_handled = stats->drop_handled;
144 last->rx_pkt_count = stats->rx_pkt_count;
145 last->empty_cycles = stats->idle_cycles;
146 last->tx_bytes = stats->tx_bytes;
147 last->rx_bytes = stats->rx_bytes;
148 last->drop_bytes = stats->drop_bytes;
149 last->rx_non_dp = stats->rx_non_dp;
150 last->tx_non_dp = stats->tx_non_dp;
152 last->tsc = (before >> 1) + (after >> 1);
156 void stats_task_get_host_rx_tx_packets(uint64_t *rx, uint64_t *tx, uint64_t *tsc)
158 const struct task_stats *t;
163 for (uint8_t task_id = 0; task_id < nb_tasks_tot; ++task_id) {
164 t = task_stats_set[task_id];
166 if (t->flags & TASK_STATS_RX)
167 *rx += t->tot_rx_pkt_count;
169 if (t->flags & TASK_STATS_TX)
170 *tx += t->tot_tx_pkt_count;
173 *tsc = task_stats_set[nb_tasks_tot - 1]->sample[last_stat].tsc;
176 /* Populate active_stats_set for stats reporting, the order of the
177 cores is important for gathering the most accurate statistics. TX
178 cores should be updated before RX cores (to prevent negative Loss
179 stats). The total number of tasks are saved in nb_tasks_tot. */
180 void stats_task_init(void)
182 struct lcore_cfg *lconf;
185 /* add cores that are receiving from and sending to physical ports first */
187 while(prox_core_next(&lcore_id, 0) == 0) {
188 lconf = &lcore_cfg[lcore_id];
189 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
190 struct task_args *targ = &lconf->targs[task_id];
191 struct task_rt_stats *stats = &lconf->tasks_all[task_id]->aux->stats;
192 if (targ->nb_rxrings == 0 && targ->nb_txrings == 0) {
193 struct task_stats *ts = &lcore_task_stats_all[lcore_id].task_stats[task_id];
195 init_core_port(ts, stats, TASK_STATS_RX | TASK_STATS_TX);
196 task_stats_set[nb_tasks_tot++] = ts;
201 /* add cores that are sending to physical ports second */
203 while(prox_core_next(&lcore_id, 0) == 0) {
204 lconf = &lcore_cfg[lcore_id];
205 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
206 struct task_args *targ = &lconf->targs[task_id];
207 struct task_rt_stats *stats = &lconf->tasks_all[task_id]->aux->stats;
208 if (targ->nb_rxrings != 0 && targ->nb_txrings == 0) {
209 struct task_stats *ts = &lcore_task_stats_all[lcore_id].task_stats[task_id];
211 init_core_port(ts, stats, TASK_STATS_TX);
212 task_stats_set[nb_tasks_tot++] = ts;
217 /* add cores that are receiving from physical ports third */
219 while(prox_core_next(&lcore_id, 0) == 0) {
220 lconf = &lcore_cfg[lcore_id];
221 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
222 struct task_args *targ = &lconf->targs[task_id];
223 struct task_rt_stats *stats = &lconf->tasks_all[task_id]->aux->stats;
224 if (targ->nb_rxrings == 0 && targ->nb_txrings != 0) {
225 struct task_stats *ts = &lcore_task_stats_all[lcore_id].task_stats[task_id];
227 init_core_port(ts, stats, TASK_STATS_RX);
228 task_stats_set[nb_tasks_tot++] = ts;
233 /* add cores that are working internally (no physical ports attached) */
235 while(prox_core_next(&lcore_id, 0) == 0) {
236 lconf = &lcore_cfg[lcore_id];
237 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
238 struct task_args *targ = &lconf->targs[task_id];
239 struct task_rt_stats *stats = &lconf->tasks_all[task_id]->aux->stats;
240 if (targ->nb_rxrings != 0 && targ->nb_txrings != 0) {
241 struct task_stats *ts = &lcore_task_stats_all[lcore_id].task_stats[task_id];
243 init_core_port(ts, stats, 0);
244 task_stats_set[nb_tasks_tot++] = ts;