2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
19 #include "stats_task.h"
21 #include "prox_globals.h"
24 struct lcore_task_stats {
25 struct task_stats task_stats[MAX_TASKS_PER_CORE];
28 #define TASK_STATS_RX 0x01
29 #define TASK_STATS_TX 0x02
32 static struct lcore_task_stats lcore_task_stats_all[RTE_MAX_LCORE];
33 static struct task_stats *task_stats_set[RTE_MAX_LCORE * MAX_TASKS_PER_CORE];
34 static uint8_t nb_tasks_tot;
35 int stats_get_n_tasks_tot(void)
40 struct task_stats *stats_get_task_stats(uint32_t lcore_id, uint32_t task_id)
42 return &lcore_task_stats_all[lcore_id].task_stats[task_id];
45 struct task_stats_sample *stats_get_task_stats_sample(uint32_t lcore_id, uint32_t task_id, int l)
47 return &lcore_task_stats_all[lcore_id].task_stats[task_id].sample[l == last_stat];
50 void stats_task_reset(void)
52 struct task_stats *cur_task_stats;
54 for (uint8_t task_id = 0; task_id < nb_tasks_tot; ++task_id) {
55 cur_task_stats = task_stats_set[task_id];
56 cur_task_stats->tot_rx_pkt_count = 0;
57 cur_task_stats->tot_tx_pkt_count = 0;
58 cur_task_stats->tot_drop_tx_fail = 0;
59 cur_task_stats->tot_drop_discard = 0;
60 cur_task_stats->tot_drop_handled = 0;
64 uint64_t stats_core_task_tot_rx(uint8_t lcore_id, uint8_t task_id)
66 return lcore_task_stats_all[lcore_id].task_stats[task_id].tot_rx_pkt_count;
69 uint64_t stats_core_task_tot_tx(uint8_t lcore_id, uint8_t task_id)
71 return lcore_task_stats_all[lcore_id].task_stats[task_id].tot_tx_pkt_count;
74 uint64_t stats_core_task_tot_drop(uint8_t lcore_id, uint8_t task_id)
76 return lcore_task_stats_all[lcore_id].task_stats[task_id].tot_drop_tx_fail +
77 lcore_task_stats_all[lcore_id].task_stats[task_id].tot_drop_discard +
78 lcore_task_stats_all[lcore_id].task_stats[task_id].tot_drop_handled;
81 uint64_t stats_core_task_last_tsc(uint8_t lcore_id, uint8_t task_id)
83 return lcore_task_stats_all[lcore_id].task_stats[task_id].sample[last_stat].tsc;
86 static void init_core_port(struct task_stats *ts, struct task_rt_stats *stats, uint8_t flags)
92 void stats_task_post_proc(void)
94 for (uint8_t task_id = 0; task_id < nb_tasks_tot; ++task_id) {
95 struct task_stats *cur_task_stats = task_stats_set[task_id];
96 const struct task_stats_sample *last = &cur_task_stats->sample[last_stat];
97 const struct task_stats_sample *prev = &cur_task_stats->sample[!last_stat];
99 /* no total stats for empty loops */
100 cur_task_stats->tot_rx_pkt_count += last->rx_pkt_count - prev->rx_pkt_count;
101 cur_task_stats->tot_tx_pkt_count += last->tx_pkt_count - prev->tx_pkt_count;
102 cur_task_stats->tot_drop_tx_fail += last->drop_tx_fail - prev->drop_tx_fail;
103 cur_task_stats->tot_drop_discard += last->drop_discard - prev->drop_discard;
104 cur_task_stats->tot_drop_handled += last->drop_handled - prev->drop_handled;
108 void stats_task_update(void)
110 uint64_t before, after;
112 for (uint8_t task_id = 0; task_id < nb_tasks_tot; ++task_id) {
113 struct task_stats *cur_task_stats = task_stats_set[task_id];
114 struct task_rt_stats *stats = cur_task_stats->stats;
115 struct task_stats_sample *last = &cur_task_stats->sample[last_stat];
117 /* Read TX first and RX second, in order to prevent displaying
118 a negative packet loss. Depending on the configuration
119 (when forwarding, for example), TX might be bigger than RX. */
120 before = rte_rdtsc();
121 last->tx_pkt_count = stats->tx_pkt_count;
122 last->drop_tx_fail = stats->drop_tx_fail;
123 last->drop_discard = stats->drop_discard;
124 last->drop_handled = stats->drop_handled;
125 last->rx_pkt_count = stats->rx_pkt_count;
126 last->empty_cycles = stats->idle_cycles;
127 last->tx_bytes = stats->tx_bytes;
128 last->rx_bytes = stats->rx_bytes;
129 last->drop_bytes = stats->drop_bytes;
131 last->tsc = (before >> 1) + (after >> 1);
135 void stats_task_get_host_rx_tx_packets(uint64_t *rx, uint64_t *tx, uint64_t *tsc)
137 const struct task_stats *t;
142 for (uint8_t task_id = 0; task_id < nb_tasks_tot; ++task_id) {
143 t = task_stats_set[task_id];
145 if (t->flags & TASK_STATS_RX)
146 *rx += t->tot_rx_pkt_count;
148 if (t->flags & TASK_STATS_TX)
149 *tx += t->tot_tx_pkt_count;
152 *tsc = task_stats_set[nb_tasks_tot - 1]->sample[last_stat].tsc;
155 /* Populate active_stats_set for stats reporting, the order of the
156 cores is important for gathering the most accurate statistics. TX
157 cores should be updated before RX cores (to prevent negative Loss
158 stats). The total number of tasks are saved in nb_tasks_tot. */
159 void stats_task_init(void)
161 struct lcore_cfg *lconf;
164 /* add cores that are receiving from and sending to physical ports first */
166 while(prox_core_next(&lcore_id, 0) == 0) {
167 lconf = &lcore_cfg[lcore_id];
168 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
169 struct task_args *targ = &lconf->targs[task_id];
170 struct task_rt_stats *stats = &lconf->tasks_all[task_id]->aux->stats;
171 if (targ->nb_rxrings == 0 && targ->nb_txrings == 0) {
172 struct task_stats *ts = &lcore_task_stats_all[lcore_id].task_stats[task_id];
174 init_core_port(ts, stats, TASK_STATS_RX | TASK_STATS_TX);
175 task_stats_set[nb_tasks_tot++] = ts;
180 /* add cores that are sending to physical ports second */
182 while(prox_core_next(&lcore_id, 0) == 0) {
183 lconf = &lcore_cfg[lcore_id];
184 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
185 struct task_args *targ = &lconf->targs[task_id];
186 struct task_rt_stats *stats = &lconf->tasks_all[task_id]->aux->stats;
187 if (targ->nb_rxrings != 0 && targ->nb_txrings == 0) {
188 struct task_stats *ts = &lcore_task_stats_all[lcore_id].task_stats[task_id];
190 init_core_port(ts, stats, TASK_STATS_TX);
191 task_stats_set[nb_tasks_tot++] = ts;
196 /* add cores that are receiving from physical ports third */
198 while(prox_core_next(&lcore_id, 0) == 0) {
199 lconf = &lcore_cfg[lcore_id];
200 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
201 struct task_args *targ = &lconf->targs[task_id];
202 struct task_rt_stats *stats = &lconf->tasks_all[task_id]->aux->stats;
203 if (targ->nb_rxrings == 0 && targ->nb_txrings != 0) {
204 struct task_stats *ts = &lcore_task_stats_all[lcore_id].task_stats[task_id];
206 init_core_port(ts, stats, TASK_STATS_RX);
207 task_stats_set[nb_tasks_tot++] = ts;
212 /* add cores that are working internally (no physical ports attached) */
214 while(prox_core_next(&lcore_id, 0) == 0) {
215 lconf = &lcore_cfg[lcore_id];
216 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
217 struct task_args *targ = &lconf->targs[task_id];
218 struct task_rt_stats *stats = &lconf->tasks_all[task_id]->aux->stats;
219 if (targ->nb_rxrings != 0 && targ->nb_txrings != 0) {
220 struct task_stats *ts = &lcore_task_stats_all[lcore_id].task_stats[task_id];
222 init_core_port(ts, stats, 0);
223 task_stats_set[nb_tasks_tot++] = ts;