Support packets in flight
[samplevnf.git] / VNFs / DPPD-PROX / stats_cons_log.c
1 /*
2 // Copyright (c) 2010-2017 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //     http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 #include <rte_cycles.h>
18
19 #include "stats.h"
20 #include "stats_l4gen.h"
21 #include "stats_cons_log.h"
22 #include "prox_cfg.h"
23 #include "prox_args.h"
24 #include "prox_assert.h"
25 #include "commands.h"
26
27 static struct stats_cons stats_cons_log = {
28         .init = stats_cons_log_init,
29         .notify = stats_cons_log_notify,
30         .finish = stats_cons_log_finish,
31 #ifndef DPI_STATS
32         .flags = STATS_CONS_F_ALL,
33 #else
34         .flags = STATS_CONS_F_PORTS|STATS_CONS_F_TASKS,
35 #endif
36 };
37
38 struct header {
39         uint64_t hz;
40         uint64_t now;
41         uint64_t n_entries;
42         uint64_t n_entry_fields;
43         uint8_t  n_entry_field_size[64];
44 };
45
46 static void header_init(struct header *hdr, uint64_t hz, uint64_t now, uint64_t n_entries) {
47         memset(hdr, 0, sizeof(*hdr));
48         hdr->hz = hz;
49         hdr->now = now;
50         hdr->n_entries = n_entries;
51 }
52
53 static void header_add_field(struct header *hdr, uint8_t size) {
54         hdr->n_entry_field_size[hdr->n_entry_fields++] = size;
55 }
56
57 static void header_write(struct header *hdr, FILE *fp) {
58         size_t header_size_no_fields = sizeof(*hdr) - sizeof(hdr->n_entry_field_size);
59         size_t header_size_effective = header_size_no_fields + hdr->n_entry_fields;
60
61         fwrite(hdr, header_size_effective, 1, fp);
62 }
63
64 #define BUFFERED_RECORD_LEN 16384
65
66 #define STATS_DUMP_FILE_NAME "stats_dump"
67 static FILE *fp;
68
69 struct entry {
70         uint32_t lcore_id;
71         uint32_t task_id;
72 #ifndef DPI_STATS
73         uint32_t l4_stats_id;
74 #endif
75 };
76
77 static struct entry entries[64];
78 static uint64_t n_entries;
79
80 #ifndef DPI_STATS
81 struct record {
82         uint32_t lcore_id;
83         uint32_t task_id;
84         uint64_t active_connections;
85         uint64_t bundles_created;
86         uint64_t rx_bytes;
87         uint64_t tx_bytes;
88         uint64_t tsc;
89 } __attribute__((packed));
90 #else
91 struct record {
92         uint32_t lcore_id;
93         uint32_t task_id;
94         uint64_t rx_bytes;
95         uint64_t tx_bytes;
96         uint64_t drop_bytes;
97         uint64_t tsc;
98 } __attribute__((packed));
99 #endif
100
101 static struct record buf[BUFFERED_RECORD_LEN];
102 static size_t buf_pos = 0;
103
104 struct stats_cons *stats_cons_log_get(void)
105 {
106         return &stats_cons_log;
107 }
108
109 #ifndef DPI_STATS
110 void stats_cons_log_init(void)
111 {
112         fp = fopen(STATS_DUMP_FILE_NAME, "w");
113         if (!fp)
114                 return;
115
116         uint32_t lcore_id = -1;
117
118         while(prox_core_next(&lcore_id, 0) == 0) {
119                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
120
121                 if (lconf->n_tasks_all && (strcmp(lconf->targs[0].task_init->mode_str, "genl4") ||
122                                            strcmp(lconf->targs[0].task_init->sub_mode_str, "")))
123                         continue;
124
125                 for (uint32_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
126                         entries[n_entries].lcore_id = lcore_id;
127                         entries[n_entries].task_id = task_id;
128                         entries[n_entries].l4_stats_id = n_entries;
129                         n_entries++;
130                         if (n_entries == sizeof(entries)/sizeof(entries[0]))
131                                 break;
132                 }
133                 cmd_rx_bw_start(lcore_id);
134                 cmd_tx_bw_start(lcore_id);
135                 if (n_entries == sizeof(entries)/sizeof(entries[0]))
136                         break;
137         }
138
139         struct header hdr;
140
141         header_init(&hdr, rte_get_tsc_hz(), rte_rdtsc(), n_entries);
142         header_add_field(&hdr, sizeof(((struct record *)0)->lcore_id));
143         header_add_field(&hdr, sizeof(((struct record *)0)->task_id));
144         header_add_field(&hdr, sizeof(((struct record *)0)->active_connections));
145         header_add_field(&hdr, sizeof(((struct record *)0)->bundles_created));
146         header_add_field(&hdr, sizeof(((struct record *)0)->rx_bytes));
147         header_add_field(&hdr, sizeof(((struct record *)0)->tx_bytes));
148         header_add_field(&hdr, sizeof(((struct record *)0)->tsc));
149
150         header_write(&hdr, fp);
151 }
152
153 void stats_cons_log_notify(void)
154 {
155         const uint32_t n_l4gen = stats_get_n_l4gen();
156
157         if (buf_pos + n_entries > sizeof(buf)/sizeof(buf[0])) {
158                 fwrite(buf, sizeof(buf[0]), buf_pos, fp);
159                 buf_pos = 0;
160         }
161         PROX_ASSERT(buf_pos + n_entries <= sizeof(buf)/sizeof(buf[0]));
162
163         for (uint32_t i = 0; i < n_entries; ++i) {
164                 uint32_t c = entries[i].lcore_id;
165                 uint32_t t = entries[i].task_id;
166                 uint32_t j = entries[i].l4_stats_id;
167                 struct l4_stats_sample *clast = stats_get_l4_stats_sample(j, 1);
168                 struct task_stats *l = stats_get_task_stats(c, t);
169                 struct task_stats_sample *last = stats_get_task_stats_sample(c, t, 1);
170
171                 buf[buf_pos].lcore_id = c;
172                 buf[buf_pos].task_id  = t;
173
174                 uint64_t tot_created = clast->stats.tcp_created + clast->stats.udp_created;
175                 uint64_t tot_finished = clast->stats.tcp_finished_retransmit + clast->stats.tcp_finished_no_retransmit +
176                         clast->stats.udp_finished + clast->stats.udp_expired + clast->stats.tcp_expired;
177
178                 buf[buf_pos].active_connections = tot_created - tot_finished;
179                 buf[buf_pos].bundles_created = clast->stats.bundles_created;
180                 buf[buf_pos].rx_bytes = last->rx_bytes;
181                 buf[buf_pos].tx_bytes = last->tx_bytes;
182                 buf[buf_pos].tsc = clast->tsc;
183
184                 buf_pos++;
185         }
186 }
187
188 #else
189 void stats_cons_log_init(void)
190 {
191         uint64_t el = rte_get_tsc_hz();
192         uint64_t now = rte_rdtsc();
193
194         fp = fopen(STATS_DUMP_FILE_NAME, "w");
195         if (!fp)
196                 return;
197
198         uint32_t lcore_id = -1;
199
200         while(prox_core_next(&lcore_id, 0) == 0) {
201                 struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
202
203                 if (!lconf->n_tasks_all)
204                         continue;
205
206                 for (uint32_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
207                         if (strcmp(lconf->targs[task_id].task_init->mode_str, "lbpos"))
208                                 continue;
209
210                         entries[n_entries].lcore_id = lcore_id;
211                         entries[n_entries].task_id = task_id;
212                         n_entries++;
213                         if (n_entries == sizeof(entries)/sizeof(entries[0]))
214                                 break;
215                 }
216                 cmd_rx_bw_start(lcore_id);
217                 cmd_tx_bw_start(lcore_id);
218                 if (n_entries == sizeof(entries)/sizeof(entries[0]))
219                         break;
220         }
221
222         struct header hdr;
223
224         header_init(&hdr, rte_get_tsc_hz(), rte_rdtsc(), n_entries);
225         header_add_field(&hdr, sizeof(((struct record *)0)->lcore_id));
226         header_add_field(&hdr, sizeof(((struct record *)0)->task_id));
227         header_add_field(&hdr, sizeof(((struct record *)0)->rx_bytes));
228         header_add_field(&hdr, sizeof(((struct record *)0)->tx_bytes));
229         header_add_field(&hdr, sizeof(((struct record *)0)->drop_bytes));
230         header_add_field(&hdr, sizeof(((struct record *)0)->tsc));
231         header_write(&hdr, fp);
232 }
233
234 void stats_cons_log_notify(void)
235 {
236         for (uint32_t i = 0; i < n_entries; ++i) {
237                 uint32_t c = entries[i].lcore_id;
238                 uint32_t t = entries[i].task_id;
239                 struct task_stats *l = stats_get_task_stats(c, t);
240                 struct task_stats_sample *last = stats_get_task_stats_sample(c, t, 1);
241
242                 buf[buf_pos].lcore_id = c;
243                 buf[buf_pos].task_id  = t;
244                 buf[buf_pos].tx_bytes = last->tx_bytes;
245                 buf[buf_pos].rx_bytes = last->rx_bytes;
246                 buf[buf_pos].drop_bytes = last->drop_bytes;
247                 /* buf[buf_pos].drop_tx_fail = l->tot_drop_tx_fail; */
248                 buf[buf_pos].tsc = last->tsc;
249
250                 buf_pos++;
251
252                 if (buf_pos == sizeof(buf)/sizeof(buf[0])) {
253                         fwrite(buf, sizeof(buf), 1, fp);
254                         buf_pos = 0;
255                 }
256         }
257 }
258 #endif
259
260 void stats_cons_log_finish(void)
261 {
262         if (fp) {
263                 if (buf_pos) {
264                         fwrite(buf, sizeof(buf[0]), buf_pos, fp);
265                         buf_pos = 0;
266                 }
267                 fclose(fp);
268         }
269 }