2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
20 #include "mbuf_utils.h"
21 #include "task_init.h"
22 #include "task_base.h"
25 #include "prox_port_cfg.h"
27 #include "prox_cksum.h"
30 /* Task that sends packets to multiple outputs. Note that in case of n
31 outputs, the output packet rate is n times the input packet
32 rate. Also, since the packet is duplicated by increasing the
33 refcnt, a change to a packet in subsequent tasks connected through
34 one of the outputs of this task will also change the packets as
35 seen by tasks connected behind through other outputs. The correct
36 way to resolve this is to create deep copies of the packet. */
38 struct task_base base;
44 struct task_mirror_copy {
45 struct task_base base;
46 struct rte_mempool *mempool;
50 static int handle_mirror_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
53 struct task_mirror *task = (struct task_mirror *)tbase;
54 uint8_t out[MAX_PKT_BURST];
55 struct rte_mbuf *mbufs2[MAX_PKT_BURST];
57 /* Since after calling tx_pkt the mbufs parameter of a handle
58 function becomes invalid and handle_mirror calls tx_pkt
59 multiple times, the pointers are copied first. This copy is
60 used in each call to tx_pkt below. */
61 rte_memcpy(mbufs2, mbufs, sizeof(mbufs[0]) * n_pkts);
62 /* prefetch for optimization */
63 prox_rte_ether_hdr * hdr[MAX_PKT_BURST];
64 for (uint16_t j = 0; j < n_pkts; ++j) {
67 for (uint16_t j = 0; j < n_pkts; ++j) {
68 hdr[j] = rte_pktmbuf_mtod(mbufs2[j], prox_rte_ether_hdr *);
71 for (uint16_t j = 0; j < n_pkts; ++j) {
72 rte_pktmbuf_refcnt_update(mbufs2[j], task->n_dests * task->multiplier - 1);
73 prox_rte_ipv4_hdr *pip = (prox_rte_ipv4_hdr *) (hdr[j] + 1);
74 if ((task->mirror_size != 0) && (hdr[j]->ether_type == ETYPE_IPv4) && ((pip->next_proto_id == IPPROTO_UDP) || (pip->next_proto_id == IPPROTO_TCP))) {
75 rte_pktmbuf_pkt_len(mbufs2[j]) = task->mirror_size;
76 rte_pktmbuf_data_len(mbufs2[j]) = task->mirror_size;
77 pip->total_length = rte_bswap16(task->mirror_size-sizeof(prox_rte_ether_hdr));
78 pip->hdr_checksum = 0;
79 prox_ip_cksum_sw(pip);
80 int l4_len = task->mirror_size - sizeof(prox_rte_ether_hdr) - sizeof(prox_rte_ipv4_hdr);
81 if (pip->next_proto_id == IPPROTO_UDP) {
82 prox_rte_udp_hdr *udp = (prox_rte_udp_hdr *)(((uint8_t *)pip) + sizeof(prox_rte_ipv4_hdr));
83 udp->dgram_len = rte_bswap16(l4_len);
84 prox_udp_cksum_sw(udp, l4_len, pip->src_addr, pip->dst_addr);
85 } else if (pip->next_proto_id == IPPROTO_TCP) {
86 prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr *)(((uint8_t *)pip) + sizeof(prox_rte_ipv4_hdr));
87 prox_tcp_cksum_sw(tcp, l4_len, pip->src_addr, pip->dst_addr);
91 for (uint16_t j = 0; j < task->n_dests; ++j) {
92 memset(out, j, n_pkts);
93 for (uint16_t i = 0; i < task->multiplier; ++i) {
94 ret += task->base.tx_pkt(&task->base, mbufs2, n_pkts, out);
100 static int handle_mirror_bulk_copy(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
102 struct task_mirror_copy *task = (struct task_mirror_copy *)tbase;
103 uint8_t out[MAX_PKT_BURST];
106 /* Send copies of the packet to all but the first
108 struct rte_mbuf *new_pkts[MAX_PKT_BURST];
110 for (uint16_t j = 1; j < task->n_dests; ++j) {
111 if (rte_mempool_get_bulk(task->mempool, (void **)new_pkts, n_pkts) < 0) {
114 /* Finally, forward the incoming packets. */
115 for (uint16_t i = 0; i < n_pkts; ++i) {
120 init_mbuf_seg(new_pkts[i]);
122 pkt_len = rte_pktmbuf_pkt_len(mbufs[i]);
123 rte_pktmbuf_pkt_len(new_pkts[i]) = pkt_len;
124 rte_pktmbuf_data_len(new_pkts[i]) = pkt_len;
126 dst = rte_pktmbuf_mtod(new_pkts[i], void *);
127 src = rte_pktmbuf_mtod(mbufs[i], void *);
129 rte_memcpy(dst, src, pkt_len);
131 ret+= task->base.tx_pkt(&task->base, new_pkts, n_pkts, out);
134 /* Finally, forward the incoming packets to the first destination. */
135 memset(out, 0, n_pkts);
136 ret+= task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
140 static void init_task_mirror(struct task_base *tbase, struct task_args *targ)
142 struct task_mirror *task = (struct task_mirror *)tbase;
143 task->n_dests = targ->nb_txports? targ->nb_txports : targ->nb_txrings;
144 task->multiplier = targ->multiplier? targ->multiplier : 1;
145 task->mirror_size = targ->mirror_size > 63? targ->mirror_size: 0;
148 static void init_task_mirror_copy(struct task_base *tbase, struct task_args *targ)
150 static char name[] = "mirror_pool";
151 struct task_mirror_copy *task = (struct task_mirror_copy *)tbase;
152 const int sock_id = rte_lcore_to_socket_id(targ->lconf->id);
153 task->n_dests = targ->nb_txports? targ->nb_txports : targ->nb_txrings;
156 task->mempool = rte_mempool_create(name,
157 targ->nb_mbuf - 1, MBUF_SIZE,
159 sizeof(struct rte_pktmbuf_pool_private),
160 rte_pktmbuf_pool_init, NULL,
163 PROX_PANIC(task->mempool == NULL,
164 "Failed to allocate memory pool on socket %u with %u elements\n",
165 sock_id, targ->nb_mbuf - 1);
166 task->n_dests = targ->nb_txports? targ->nb_txports : targ->nb_txrings;
169 static struct task_init task_init_mirror = {
170 .mode_str = "mirror",
171 .init = init_task_mirror,
172 .handle = handle_mirror_bulk,
173 .flag_features = TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS | TASK_FEATURE_TXQ_FLAGS_REFCOUNT,
174 .size = sizeof(struct task_mirror),
177 static struct task_init task_init_mirror2 = {
178 .mode_str = "mirror",
179 .sub_mode_str = "copy",
180 .init = init_task_mirror_copy,
181 .handle = handle_mirror_bulk_copy,
182 .flag_features = TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS,
183 .size = sizeof(struct task_mirror),
186 __attribute__((constructor)) static void reg_task_mirror(void)
188 reg_task(&task_init_mirror);
189 reg_task(&task_init_mirror2);