2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
17 #include <rte_ethdev.h>
18 #include <rte_ether.h>
21 #include <rte_version.h>
22 #if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
23 #include <rte_vxlan.h>
26 #include "vxlangpe_nsh.h"
27 #include "task_base.h"
29 #include "task_init.h"
30 #include "thread_generic.h"
34 #define VXLAN_GPE_HDR_SZ sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(prox_rte_udp_hdr) + sizeof(prox_rte_vxlan_gpe_hdr) + sizeof(struct nsh_hdr)
35 #define ETHER_NSH_TYPE 0x4F89 /* 0x894F in little endian */
36 #define VXLAN_GPE_NSH_TYPE 0xB612 /* 4790 in little endian */
37 #define VXLAN_GPE_NP 0x4
39 uint16_t decap_nsh_packets(struct rte_mbuf **mbufs, uint16_t n_pkts);
40 uint16_t encap_nsh_packets(struct rte_mbuf **mbufs, uint16_t n_pkts);
42 struct task_decap_nsh {
43 struct task_base base;
46 struct task_encap_nsh {
47 struct task_base base;
50 static void init_task_decap_nsh(__attribute__((unused)) struct task_base *tbase,
51 __attribute__((unused)) struct task_args *targ)
56 static inline uint8_t handle_decap_nsh(__attribute__((unused)) struct task_decap_nsh *task, struct rte_mbuf *mbuf)
58 prox_rte_ether_hdr *eth_hdr = NULL;
59 prox_rte_udp_hdr *udp_hdr = NULL;
60 prox_rte_vxlan_gpe_hdr *vxlan_gpe_hdr = NULL;
63 eth_hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
64 if (eth_hdr->ether_type == ETHER_NSH_TYPE) {
65 /* "decapsulate" Ethernet + NSH header by moving packet pointer */
66 hdr_len = sizeof(prox_rte_ether_hdr) + sizeof(struct nsh_hdr);
68 mbuf->data_len = (uint16_t)(mbuf->data_len - hdr_len);
69 mbuf->data_off += hdr_len;
70 mbuf->pkt_len = (uint32_t)(mbuf->pkt_len - hdr_len);
71 #if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
72 /* save length of header in the dynfield1 of rte_mbuf */
73 mbuf->dynfield1[0] = hdr_len;
75 /* save length of header in reserved 16bits of rte_mbuf */
76 mbuf->udata64 = hdr_len;
80 if (mbuf->data_len < VXLAN_GPE_HDR_SZ) {
81 #if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
82 mbuf->dynfield1[0] = 0;
89 /* check the UDP destination port */
90 udp_hdr = (prox_rte_udp_hdr *)(((unsigned char *)eth_hdr) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr));
91 if (udp_hdr->dst_port != VXLAN_GPE_NSH_TYPE) {
92 #if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
93 mbuf->dynfield1[0] = 0;
100 /* check the Next Protocol field in VxLAN-GPE header */
101 vxlan_gpe_hdr = (prox_rte_vxlan_gpe_hdr *)(((unsigned char *)eth_hdr) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(prox_rte_udp_hdr));
102 if (vxlan_gpe_hdr->proto != VXLAN_GPE_NP) {
103 #if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
104 mbuf->dynfield1[0] = 0;
111 /* "decapsulate" VxLAN-GPE + NSH header by moving packet pointer */
112 hdr_len = VXLAN_GPE_HDR_SZ;
114 mbuf->data_len = (uint16_t)(mbuf->data_len - hdr_len);
115 mbuf->data_off += hdr_len;
116 mbuf->pkt_len = (uint32_t)(mbuf->pkt_len - hdr_len);
117 #if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
118 /* save length of header in the dynfield1 of rte_mbuf */
119 mbuf->dynfield1[0] = hdr_len;
121 /* save length of header in reserved 16bits of rte_mbuf */
122 mbuf->udata64 = hdr_len;
129 static int handle_decap_nsh_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
131 struct task_decap_nsh *task = (struct task_decap_nsh *)tbase;
132 uint8_t out[MAX_PKT_BURST];
135 prefetch_first(mbufs, n_pkts);
136 for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) {
137 #ifdef PROX_PREFETCH_OFFSET
138 PREFETCH0(mbufs[j + PREFETCH_OFFSET]);
139 PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *));
141 out[j] = handle_decap_nsh(task, mbufs[j]);
143 #ifdef PROX_PREFETCH_OFFSET
144 PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *));
145 for (; j < n_pkts; ++j) {
146 out[j] = handle_decap_nsh(task, mbufs[j]);
149 return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
152 static void init_task_encap_nsh(__attribute__((unused)) struct task_base *tbase,
153 __attribute__((unused)) struct task_args *targ)
158 static inline uint8_t handle_encap_nsh(__attribute__((unused)) struct task_encap_nsh *task, struct rte_mbuf *mbuf)
160 prox_rte_ether_hdr *eth_hdr = NULL;
161 struct nsh_hdr *nsh_hdr = NULL;
162 prox_rte_udp_hdr *udp_hdr = NULL;
163 prox_rte_vxlan_gpe_hdr *vxlan_gpe_hdr = NULL;
168 #if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
169 if (mbuf->dynfield1[0] == 0)
171 if (mbuf->udata64 == 0)
175 #if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
176 /* use header length saved in dynfields1 of rte_mbuf to
177 "encapsulate" transport + NSH header by moving packet pointer */
178 mbuf->data_len = (uint16_t)(mbuf->data_len + mbuf->dynfield1[0]);
179 mbuf->data_off -= mbuf->dynfield1[0];
180 mbuf->pkt_len = (uint32_t)(mbuf->pkt_len + mbuf->dynfield1[0]);
182 /* use header length saved in reserved 16bits of rte_mbuf to
183 "encapsulate" transport + NSH header by moving packet pointer */
184 mbuf->data_len = (uint16_t)(mbuf->data_len + mbuf->udata64);
185 mbuf->data_off -= mbuf->udata64;
186 mbuf->pkt_len = (uint32_t)(mbuf->pkt_len + mbuf->udata64);
189 eth_hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
190 if (eth_hdr->ether_type == ETHER_NSH_TYPE) {
191 nsh_hdr = (struct nsh_hdr *) (((unsigned char *)eth_hdr) + sizeof(prox_rte_ether_hdr));
193 /* decrement Service Index in NSH header */
194 if (nsh_hdr->sf_index > 0)
195 nsh_hdr->sf_index -= 1;
198 /* "encapsulate" VxLAN-GPE + NSH header by moving packet pointer */
199 if (mbuf->data_len < VXLAN_GPE_HDR_SZ)
202 /* check the UDP destination port */
203 udp_hdr = (prox_rte_udp_hdr *)(((unsigned char *)eth_hdr) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr));
204 if (udp_hdr->dst_port != VXLAN_GPE_NSH_TYPE)
207 /* check the Next Protocol field in VxLAN-GPE header */
208 vxlan_gpe_hdr = (prox_rte_vxlan_gpe_hdr *)(((unsigned char *)eth_hdr) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(prox_rte_udp_hdr));
209 if (vxlan_gpe_hdr->proto != VXLAN_GPE_NP)
212 /* decrement Service Index in NSH header */
213 nsh_hdr = (struct nsh_hdr *)(((unsigned char *)vxlan_gpe_hdr) + sizeof(prox_rte_vxlan_gpe_hdr));
214 if (nsh_hdr->sf_index > 0)
215 nsh_hdr->sf_index -= 1;
221 static int handle_encap_nsh_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
223 struct task_encap_nsh *task = (struct task_encap_nsh *)tbase;
224 uint8_t out[MAX_PKT_BURST];
227 prefetch_first(mbufs, n_pkts);
228 for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) {
229 #ifdef PROX_PREFETCH_OFFSET
230 PREFETCH0(mbufs[j + PREFETCH_OFFSET]);
231 PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *));
233 out[j] = handle_encap_nsh(task, mbufs[j]);
235 #ifdef PROX_PREFETCH_OFFSET
236 PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *));
237 for (; j < n_pkts; ++j) {
238 out[j] = handle_encap_nsh(task, mbufs[j]);
241 return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
244 static struct task_init task_init_decap_nsh = {
245 .mode_str = "decapnsh",
246 .init = init_task_decap_nsh,
247 .handle = handle_decap_nsh_bulk,
248 .thread_x = thread_generic,
249 .size = sizeof(struct task_decap_nsh)
252 static struct task_init task_init_encap_nsh = {
253 .mode_str = "encapnsh",
254 .init = init_task_encap_nsh,
255 .handle = handle_encap_nsh_bulk,
256 .size = sizeof(struct task_encap_nsh)
259 __attribute__((constructor)) static void reg_task_nshtag(void)
261 reg_task(&task_init_decap_nsh);
262 reg_task(&task_init_encap_nsh);