Prepare for DPDK 19.08 support
[samplevnf.git] / VNFs / DPPD-PROX / handle_impair.c
1 /*
2 // Copyright (c) 2010-2017 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //     http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 #include <string.h>
18 #include <stdio.h>
19 #include <rte_cycles.h>
20 #include <rte_version.h>
21
22 #include "prox_malloc.h"
23 #include "lconf.h"
24 #include "log.h"
25 #include "random.h"
26 #include "handle_impair.h"
27 #include "prefetch.h"
28 #include "prox_port_cfg.h"
29
30 #if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0)
31 #define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE
32 #endif
33
34 #define DELAY_ACCURACY  11              // accuracy of 2048 cycles ~= 1 micro-second
35 #define DELAY_MAX_MASK  0x1FFFFF        // Maximum 2M * 2K cycles ~1 second
36
37 struct queue_elem {
38         struct rte_mbuf *mbuf;
39         uint64_t        tsc;
40 };
41
42 struct queue {
43         struct queue_elem *queue_elem;
44         unsigned queue_head;
45         unsigned queue_tail;
46 };
47
48 struct task_impair {
49         struct task_base base;
50         struct queue_elem *queue;
51         uint32_t random_delay_us;
52         uint32_t delay_us;
53         uint64_t delay_time;
54         uint64_t delay_time_mask;
55         unsigned queue_head;
56         unsigned queue_tail;
57         unsigned queue_mask;
58         int tresh;
59         unsigned int seed;
60         struct random state;
61         uint64_t last_idx;
62         struct queue *buffer;
63         uint32_t socket_id;
64         uint32_t flags;
65         uint8_t src_mac[6];
66 };
67
68 #define IMPAIR_NEED_UPDATE     1
69 #define IMPAIR_SET_MAC         2
70
71 static int handle_bulk_impair(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts);
72 static int handle_bulk_impair_random(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts);
73 static int handle_bulk_random_drop(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts);
74
75 void task_impair_set_proba(struct task_base *tbase, float proba)
76 {
77         struct task_impair *task = (struct task_impair *)tbase;
78         task->tresh = ((uint64_t) RAND_MAX) * (uint32_t)(proba * 10000) / 1000000;
79 }
80
81 void task_impair_set_delay_us(struct task_base *tbase, uint32_t delay_us, uint32_t random_delay_us)
82 {
83         struct task_impair *task = (struct task_impair *)tbase;
84         task->flags |= IMPAIR_NEED_UPDATE;
85         task->random_delay_us = random_delay_us;
86         task->delay_us = delay_us;
87 }
88
89 static void task_impair_update(struct task_base *tbase)
90 {
91         struct task_impair *task = (struct task_impair *)tbase;
92         uint32_t queue_len = 0;
93         size_t mem_size;
94         if ((task->flags & IMPAIR_NEED_UPDATE) == 0)
95                 return;
96         task->flags &= ~IMPAIR_NEED_UPDATE;
97         uint64_t now = rte_rdtsc();
98         uint8_t out[MAX_PKT_BURST] = {0};
99         uint64_t now_idx = (now >> DELAY_ACCURACY) & DELAY_MAX_MASK;
100
101         if (task->random_delay_us) {
102                 tbase->handle_bulk = handle_bulk_impair_random;
103                 task->delay_time = usec_to_tsc(task->random_delay_us);
104                 task->delay_time_mask = rte_align32pow2(task->delay_time) - 1;
105                 queue_len = rte_align32pow2((1250L * task->random_delay_us) / 84 / (DELAY_MAX_MASK + 1));
106         } else if (task->delay_us == 0) {
107                 tbase->handle_bulk = handle_bulk_random_drop;
108                 task->delay_time = 0;
109         } else {
110                 tbase->handle_bulk = handle_bulk_impair;
111                 task->delay_time = usec_to_tsc(task->delay_us);
112                 queue_len = rte_align32pow2(1250 * task->delay_us / 84);
113         }
114         if (task->queue) {
115                 struct rte_mbuf *new_mbufs[MAX_PKT_BURST];
116                 while (task->queue_tail != task->queue_head) {
117                         now = rte_rdtsc();
118                         uint16_t idx = 0;
119                         while (idx < MAX_PKT_BURST && task->queue_tail != task->queue_head) {
120                                 if (task->queue[task->queue_tail].tsc <= now) {
121                                         out[idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
122                                         new_mbufs[idx++] = task->queue[task->queue_tail].mbuf;
123                                         task->queue_tail = (task->queue_tail + 1) & task->queue_mask;
124                                 }
125                                 else {
126                                         break;
127                                 }
128                         }
129                         if (idx)
130                                 task->base.tx_pkt(&task->base, new_mbufs, idx, out);
131                 }
132                 prox_free(task->queue);
133                 task->queue = NULL;
134         }
135         if (task->buffer) {
136                 struct rte_mbuf *new_mbufs[MAX_PKT_BURST];
137                 while (task->last_idx != ((now_idx - 1) & DELAY_MAX_MASK)) {
138                         now = rte_rdtsc();
139                         uint16_t pkt_idx = 0;
140                         while ((pkt_idx < MAX_PKT_BURST) && (task->last_idx != ((now_idx - 1) & DELAY_MAX_MASK))) {
141                                 struct queue *queue = &task->buffer[task->last_idx];
142                                 while ((pkt_idx < MAX_PKT_BURST) && (queue->queue_tail != queue->queue_head)) {
143                                         out[pkt_idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
144                                         new_mbufs[pkt_idx++] = queue->queue_elem[queue->queue_tail].mbuf;
145                                         queue->queue_tail = (queue->queue_tail + 1) & task->queue_mask;
146                                 }
147                                 task->last_idx = (task->last_idx + 1) & DELAY_MAX_MASK;
148                         }
149
150                         if (pkt_idx)
151                                 task->base.tx_pkt(&task->base, new_mbufs, pkt_idx, out);
152                 }
153                 for (int i = 0; i < DELAY_MAX_MASK + 1; i++) {
154                         if (task->buffer[i].queue_elem)
155                                 prox_free(task->buffer[i].queue_elem);
156                 }
157                 prox_free(task->buffer);
158                 task->buffer = NULL;
159         }
160
161         if (queue_len < MAX_PKT_BURST)
162                 queue_len= MAX_PKT_BURST;
163         task->queue_mask = queue_len - 1;
164         if (task->queue_mask < MAX_PKT_BURST - 1)
165                 task->queue_mask = MAX_PKT_BURST - 1;
166         mem_size = (task->queue_mask + 1) * sizeof(task->queue[0]);
167
168         if (task->delay_us) {
169                 task->queue_head = 0;
170                 task->queue_tail = 0;
171                 task->queue = prox_zmalloc(mem_size, task->socket_id);
172                 if (task->queue == NULL) {
173                         plog_err("Not enough memory to allocate queue\n");
174                         task->queue_mask = 0;
175                 }
176         } else if (task->random_delay_us) {
177                 size_t size = (DELAY_MAX_MASK + 1) * sizeof(struct queue);
178                 plog_info("Allocating %zd bytes\n", size);
179                 task->buffer = prox_zmalloc(size, task->socket_id);
180                 PROX_PANIC(task->buffer == NULL, "Not enough memory to allocate buffer\n");
181                 plog_info("Allocating %d x %zd bytes\n", DELAY_MAX_MASK + 1, mem_size);
182
183                 for (int i = 0; i < DELAY_MAX_MASK + 1; i++) {
184                         task->buffer[i].queue_elem = prox_zmalloc(mem_size, task->socket_id);
185                         PROX_PANIC(task->buffer[i].queue_elem == NULL, "Not enough memory to allocate buffer elems\n");
186                 }
187         }
188         random_init_seed(&task->state);
189 }
190
191 static int handle_bulk_random_drop(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
192 {
193         struct task_impair *task = (struct task_impair *)tbase;
194         uint8_t out[MAX_PKT_BURST];
195         prox_rte_ether_hdr * hdr[MAX_PKT_BURST];
196         int ret = 0;
197         for (uint16_t i = 0; i < n_pkts; ++i) {
198                 PREFETCH0(mbufs[i]);
199         }
200         for (uint16_t i = 0; i < n_pkts; ++i) {
201                 hdr[i] = rte_pktmbuf_mtod(mbufs[i], prox_rte_ether_hdr *);
202                 PREFETCH0(hdr[i]);
203         }
204         if (task->flags & IMPAIR_SET_MAC) {
205                 for (uint16_t i = 0; i < n_pkts; ++i) {
206                         prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_mac[0], &hdr[i]->s_addr);
207                         out[i] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
208                 }
209         } else {
210                 for (uint16_t i = 0; i < n_pkts; ++i) {
211                         out[i] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
212                 }
213         }
214         ret = task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
215         task_impair_update(tbase);
216         return ret;
217 }
218
219 static int handle_bulk_impair(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
220 {
221         struct task_impair *task = (struct task_impair *)tbase;
222         uint64_t now = rte_rdtsc();
223         uint8_t out[MAX_PKT_BURST] = {0};
224         uint16_t enqueue_failed;
225         uint16_t i;
226         int ret = 0;
227         prox_rte_ether_hdr * hdr[MAX_PKT_BURST];
228         for (uint16_t i = 0; i < n_pkts; ++i) {
229                 PREFETCH0(mbufs[i]);
230         }
231         for (uint16_t i = 0; i < n_pkts; ++i) {
232                 hdr[i] = rte_pktmbuf_mtod(mbufs[i], prox_rte_ether_hdr *);
233                 PREFETCH0(hdr[i]);
234         }
235
236         int nb_empty_slots = (task->queue_tail - task->queue_head + task->queue_mask) & task->queue_mask;
237         if (likely(nb_empty_slots >= n_pkts)) {
238                 /* We know n_pkts fits, no need to check for every packet */
239                 for (i = 0; i < n_pkts; ++i) {
240                         if (task->flags & IMPAIR_SET_MAC)
241                                 prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_mac[0], &hdr[i]->s_addr);
242                         task->queue[task->queue_head].tsc = now + task->delay_time;
243                         task->queue[task->queue_head].mbuf = mbufs[i];
244                         task->queue_head = (task->queue_head + 1) & task->queue_mask;
245                 }
246         } else {
247                 for (i = 0; i < n_pkts; ++i) {
248                         if (((task->queue_head + 1) & task->queue_mask) != task->queue_tail) {
249                                 if (task->flags & IMPAIR_SET_MAC)
250                                         prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_mac[0], &hdr[i]->s_addr);
251                                 task->queue[task->queue_head].tsc = now + task->delay_time;
252                                 task->queue[task->queue_head].mbuf = mbufs[i];
253                                 task->queue_head = (task->queue_head + 1) & task->queue_mask;
254                         }
255                         else {
256                                 /* Rest does not fit, need to drop those packets. */
257                                 enqueue_failed = i;
258                                 for (;i < n_pkts; ++i) {
259                                         out[i] = OUT_DISCARD;
260                                 }
261                                 ret+= task->base.tx_pkt(&task->base, mbufs + enqueue_failed,
262                                                 n_pkts - enqueue_failed, out + enqueue_failed);
263                                 break;
264                         }
265                 }
266         }
267
268         struct rte_mbuf *new_mbufs[MAX_PKT_BURST];
269         uint16_t idx = 0;
270
271         if (task->tresh != RAND_MAX) {
272                 while (idx < MAX_PKT_BURST && task->queue_tail != task->queue_head) {
273                         if (task->queue[task->queue_tail].tsc <= now) {
274                                 out[idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
275                                 new_mbufs[idx] = task->queue[task->queue_tail].mbuf;
276                                 PREFETCH0(new_mbufs[idx]);
277                                 PREFETCH0(&new_mbufs[idx]->cacheline1);
278                                 idx++;
279                                 task->queue_tail = (task->queue_tail + 1) & task->queue_mask;
280                         }
281                         else {
282                                 break;
283                         }
284                 }
285         } else {
286                 while (idx < MAX_PKT_BURST && task->queue_tail != task->queue_head) {
287                         if (task->queue[task->queue_tail].tsc <= now) {
288                                 out[idx] = 0;
289                                 new_mbufs[idx] = task->queue[task->queue_tail].mbuf;
290                                 PREFETCH0(new_mbufs[idx]);
291                                 PREFETCH0(&new_mbufs[idx]->cacheline1);
292                                 idx++;
293                                 task->queue_tail = (task->queue_tail + 1) & task->queue_mask;
294                         }
295                         else {
296                                 break;
297                         }
298                 }
299         }
300
301         if (idx)
302                 ret+= task->base.tx_pkt(&task->base, new_mbufs, idx, out);
303         task_impair_update(tbase);
304         return ret;
305 }
306
307 /*
308  * We want to avoid using division and mod for performance reasons.
309  * We also want to support up to one second delay, and express it in tsc
310  * So the delay in tsc needs up to 32 bits (supposing procesor freq is less than 4GHz).
311  * If the max_delay is smaller, we make sure we use less bits.
312  * Note that we lose the MSB of the xorshift - 64 bits could hold
313  * two or three delays in TSC - but would probably make implementation more complex
314  * and not huge gain expected. Maybe room for optimization.
315  * Using this implementation, we might have to run random more than once for a delay
316  * but in average this should occur less than 50% of the time.
317 */
318
319 static inline uint64_t random_delay(struct random *state, uint64_t max_delay, uint64_t max_delay_mask)
320 {
321         uint64_t val;
322         while(1) {
323                 val = random_next(state);
324                 if ((val & max_delay_mask) < max_delay)
325                         return (val & max_delay_mask);
326         }
327 }
328
329 static int handle_bulk_impair_random(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
330 {
331         struct task_impair *task = (struct task_impair *)tbase;
332         uint64_t now = rte_rdtsc();
333         uint8_t out[MAX_PKT_BURST];
334         uint16_t enqueue_failed;
335         uint16_t i;
336         int ret = 0;
337         uint64_t packet_time, idx;
338         uint64_t now_idx = (now >> DELAY_ACCURACY) & DELAY_MAX_MASK;
339         prox_rte_ether_hdr * hdr[MAX_PKT_BURST];
340         for (uint16_t i = 0; i < n_pkts; ++i) {
341                 PREFETCH0(mbufs[i]);
342         }
343         for (uint16_t i = 0; i < n_pkts; ++i) {
344                 hdr[i] = rte_pktmbuf_mtod(mbufs[i], prox_rte_ether_hdr *);
345                 PREFETCH0(hdr[i]);
346         }
347
348         for (i = 0; i < n_pkts; ++i) {
349                 packet_time = now + random_delay(&task->state, task->delay_time, task->delay_time_mask);
350                 idx = (packet_time >> DELAY_ACCURACY) & DELAY_MAX_MASK;
351                 while (idx != ((now_idx - 1) & DELAY_MAX_MASK)) {
352                         struct queue *queue = &task->buffer[idx];
353                         if (((queue->queue_head + 1) & task->queue_mask) != queue->queue_tail) {
354                                 if (task->flags & IMPAIR_SET_MAC)
355                                         prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_mac[0], &hdr[i]->s_addr);
356                                 queue->queue_elem[queue->queue_head].mbuf = mbufs[i];
357                                 queue->queue_head = (queue->queue_head + 1) & task->queue_mask;
358                                 break;
359                         } else {
360                                 idx = (idx + 1) & DELAY_MAX_MASK;
361                         }
362                 }
363                 if (idx == ((now_idx - 1) & DELAY_MAX_MASK)) {
364                         /* Rest does not fit, need to drop packet. Note that further packets might fit as might want to be sent earlier */
365                         out[0] = OUT_DISCARD;
366                         ret+= task->base.tx_pkt(&task->base, mbufs + i, 1, out);
367                         plog_warn("Unexpectdly dropping packets\n");
368                 }
369         }
370
371         struct rte_mbuf *new_mbufs[MAX_PKT_BURST];
372         uint16_t pkt_idx = 0;
373
374         while ((pkt_idx < MAX_PKT_BURST) && (task->last_idx != ((now_idx - 1) & DELAY_MAX_MASK))) {
375                 struct queue *queue = &task->buffer[task->last_idx];
376                 while ((pkt_idx < MAX_PKT_BURST) && (queue->queue_tail != queue->queue_head)) {
377                         out[pkt_idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
378                         new_mbufs[pkt_idx] = queue->queue_elem[queue->queue_tail].mbuf;
379                         PREFETCH0(new_mbufs[pkt_idx]);
380                         PREFETCH0(&new_mbufs[pkt_idx]->cacheline1);
381                         pkt_idx++;
382                         queue->queue_tail = (queue->queue_tail + 1) & task->queue_mask;
383                 }
384                 task->last_idx = (task->last_idx + 1) & DELAY_MAX_MASK;
385         }
386
387         if (pkt_idx)
388                 ret+= task->base.tx_pkt(&task->base, new_mbufs, pkt_idx, out);
389         task_impair_update(tbase);
390         return ret;
391 }
392
393 static void init_task(struct task_base *tbase, struct task_args *targ)
394 {
395         struct task_impair *task = (struct task_impair *)tbase;
396         uint32_t queue_len = 0;
397         size_t mem_size;
398         unsigned socket_id;
399         uint64_t delay_us = 0;
400
401         task->seed = rte_rdtsc();
402         if (targ->probability == 0)
403                 targ->probability = 1000000;
404
405         task->tresh = ((uint64_t) RAND_MAX) * targ->probability / 1000000;
406
407         if ((targ->delay_us == 0) && (targ->random_delay_us == 0)) {
408                 tbase->handle_bulk = handle_bulk_random_drop;
409                 task->delay_time = 0;
410         } else if (targ->random_delay_us) {
411                 tbase->handle_bulk = handle_bulk_impair_random;
412                 task->delay_time = usec_to_tsc(targ->random_delay_us);
413                 task->delay_time_mask = rte_align32pow2(task->delay_time) - 1;
414                 delay_us = targ->random_delay_us;
415                 queue_len = rte_align32pow2((1250L * delay_us) / 84 / (DELAY_MAX_MASK + 1));
416         } else {
417                 task->delay_time = usec_to_tsc(targ->delay_us);
418                 delay_us = targ->delay_us;
419                 queue_len = rte_align32pow2(1250 * delay_us / 84);
420         }
421         /* Assume Line-rate is maximum transmit speed.
422            TODO: take link speed if tx is port.
423         */
424         if (queue_len < MAX_PKT_BURST)
425                 queue_len= MAX_PKT_BURST;
426         task->queue_mask = queue_len - 1;
427         if (task->queue_mask < MAX_PKT_BURST - 1)
428                 task->queue_mask = MAX_PKT_BURST - 1;
429
430         mem_size = (task->queue_mask + 1) * sizeof(task->queue[0]);
431         socket_id = rte_lcore_to_socket_id(targ->lconf->id);
432         task->socket_id = rte_lcore_to_socket_id(targ->lconf->id);
433
434         if (targ->delay_us) {
435                 task->queue = prox_zmalloc(mem_size, socket_id);
436                 PROX_PANIC(task->queue == NULL, "Not enough memory to allocate queue\n");
437                 task->queue_head = 0;
438                 task->queue_tail = 0;
439         } else if (targ->random_delay_us) {
440                 size_t size = (DELAY_MAX_MASK + 1) * sizeof(struct queue);
441                 plog_info("Allocating %zd bytes\n", size);
442                 task->buffer = prox_zmalloc(size, socket_id);
443                 PROX_PANIC(task->buffer == NULL, "Not enough memory to allocate buffer\n");
444                 plog_info("Allocating %d x %zd bytes\n", DELAY_MAX_MASK + 1, mem_size);
445
446                 for (int i = 0; i < DELAY_MAX_MASK + 1; i++) {
447                         task->buffer[i].queue_elem = prox_zmalloc(mem_size, socket_id);
448                         PROX_PANIC(task->buffer[i].queue_elem == NULL, "Not enough memory to allocate buffer elems\n");
449                 }
450         }
451         random_init_seed(&task->state);
452         if (targ->nb_txports) {
453                 memcpy(&task->src_mac[0], &prox_port_cfg[tbase->tx_params_hw.tx_port_queue[0].port].eth_addr, sizeof(prox_rte_ether_addr));
454                 task->flags = IMPAIR_SET_MAC;
455         } else {
456                 task->flags = 0;
457         }
458 }
459
460 static struct task_init tinit = {
461         .mode_str = "impair",
462         .init = init_task,
463         .handle = handle_bulk_impair,
464         .flag_features = TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS | TASK_FEATURE_ZERO_RX,
465         .size = sizeof(struct task_impair)
466 };
467
468 __attribute__((constructor)) static void ctor(void)
469 {
470         reg_task(&tinit);
471 }