2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
17 #ifndef _STATS_TASK_H_
18 #define _STATS_TASK_H_
24 /* The struct task_stats is read/write from the task itself and
25 read-only from the core that collects the stats. Since only the
26 task executing the actual work ever modifies the stats, no locking
27 is required. Both a read and a write are atomic (assuming the
28 correct alignment). From this, it followed that the statistics can
29 be incremented directly by the task itself. In cases where these
30 assumptions do not hold, a possible solution (although slightly
31 less accurate) would be to keep accumulate statistics temporarily
32 in a separate structure and periodically copying the statistics to
33 the statistics core through atomic primitives, for example through
34 rte_atomic32_set(). The accuracy would be determined by the
35 frequency at which the statistics are transferred to the statistics
38 struct task_rt_stats {
39 uint32_t rx_pkt_count;
40 uint32_t tx_pkt_count;
41 uint32_t drop_tx_fail;
42 uint32_t drop_discard;
43 uint32_t drop_handled;
50 } __attribute__((packed)) __rte_cache_aligned;
53 #define TASK_STATS_ADD_IDLE(stats, cycles) do { \
54 (stats)->idle_cycles += (cycles) + rdtsc_overhead_stats; \
57 #define TASK_STATS_ADD_TX(stats, ntx) do { \
58 (stats)->tx_pkt_count += ntx; \
61 #define TASK_STATS_ADD_DROP_TX_FAIL(stats, ntx) do { \
62 (stats)->drop_tx_fail += ntx; \
65 #define TASK_STATS_ADD_DROP_HANDLED(stats, ntx) do { \
66 (stats)->drop_handled += ntx; \
69 #define TASK_STATS_ADD_DROP_DISCARD(stats, ntx) do { \
70 (stats)->drop_discard += ntx; \
73 #define TASK_STATS_ADD_RX(stats, ntx) do { \
74 (stats)->rx_pkt_count += ntx; \
77 #define TASK_STATS_ADD_RX_NON_DP(stats, ntx) do { \
78 (stats)->rx_non_dp += ntx; \
81 #define TASK_STATS_ADD_TX_NON_DP(stats, ntx) do { \
82 (stats)->tx_non_dp += ntx; \
85 #define TASK_STATS_ADD_RX_BYTES(stats, bytes) do { \
86 (stats)->rx_bytes += bytes; \
89 #define TASK_STATS_ADD_TX_BYTES(stats, bytes) do { \
90 (stats)->tx_bytes += bytes; \
93 #define TASK_STATS_ADD_DROP_BYTES(stats, bytes) do { \
94 (stats)->drop_bytes += bytes; \
97 #define START_EMPTY_MEASSURE() uint64_t cur_tsc = rte_rdtsc();
99 #define TASK_STATS_ADD_IDLE(stats, cycles) do {} while(0)
100 #define TASK_STATS_ADD_TX(stats, ntx) do {} while(0)
101 #define TASK_STATS_ADD_DROP_TX_FAIL(stats, ntx) do {} while(0)
102 #define TASK_STATS_ADD_DROP_HANDLED(stats, ntx) do {} while(0)
103 #define TASK_STATS_ADD_DROP_DISCARD(stats, ntx) do {} while(0)
104 #define TASK_STATS_ADD_RX(stats, ntx) do {} while(0)
105 #define TASK_STATS_ADD_RX_BYTES(stats, bytes) do {} while(0)
106 #define TASK_STATS_ADD_TX_BYTES(stats, bytes) do {} while(0)
107 #define TASK_STATS_ADD_DROP_BYTES(stats, bytes) do {} while(0)
108 #define START_EMPTY_MEASSURE() do {} while(0)
111 struct task_stats_sample {
113 uint32_t tx_pkt_count;
114 uint32_t drop_tx_fail;
115 uint32_t drop_discard;
116 uint32_t drop_handled;
117 uint32_t rx_pkt_count;
118 uint32_t empty_cycles;
127 uint64_t tot_tx_pkt_count;
128 uint64_t tot_drop_tx_fail;
129 uint64_t tot_drop_discard;
130 uint64_t tot_drop_handled;
131 uint64_t tot_rx_pkt_count;
132 uint64_t tot_tx_non_dp;
133 uint64_t tot_rx_non_dp;
135 struct task_stats_sample sample[2];
137 struct task_rt_stats *stats;
138 /* flags set if total RX/TX values need to be reported set at
139 initialization time, only need to access stats values in port */
143 void stats_task_reset(void);
144 void stats_task_post_proc(void);
145 void stats_task_update(void);
146 void stats_task_init(void);
148 int stats_get_n_tasks_tot(void);
150 struct task_stats *stats_get_task_stats(uint32_t lcore_id, uint32_t task_id);
151 struct task_stats_sample *stats_get_task_stats_sample(uint32_t lcore_id, uint32_t task_id, int last);
152 void stats_task_get_host_rx_tx_packets(uint64_t *rx, uint64_t *tx, uint64_t *tsc);
154 uint64_t stats_core_task_tot_rx(uint8_t lcore_id, uint8_t task_id);
155 uint64_t stats_core_task_tot_tx(uint8_t lcore_id, uint8_t task_id);
156 uint64_t stats_core_task_tot_tx_fail(uint8_t lcore_id, uint8_t task_id);
157 uint64_t stats_core_task_tot_drop(uint8_t lcore_id, uint8_t task_id);
158 uint64_t stats_core_task_last_tsc(uint8_t lcore_id, uint8_t task_id);
159 uint64_t stats_core_task_tot_rx_non_dp(uint8_t lcore_id, uint8_t task_id);
160 uint64_t stats_core_task_tot_tx_non_dp(uint8_t lcore_id, uint8_t task_id);
162 #endif /* _STATS_TASK_H_ */