2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
17 #include <rte_ethdev.h>
18 #include <rte_ether.h>
21 #include <rte_version.h>
22 #if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
23 #include <rte_vxlan.h>
26 #include "vxlangpe_nsh.h"
27 #include "task_base.h"
29 #include "task_init.h"
30 #include "thread_generic.h"
34 #define VXLAN_GPE_HDR_SZ sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(prox_rte_udp_hdr) + sizeof(prox_rte_vxlan_gpe_hdr) + sizeof(struct nsh_hdr)
35 #define ETHER_NSH_TYPE 0x4F89 /* 0x894F in little endian */
36 #define VXLAN_GPE_NSH_TYPE 0xB612 /* 4790 in little endian */
37 #define VXLAN_GPE_NP 0x4
39 uint16_t decap_nsh_packets(struct rte_mbuf **mbufs, uint16_t n_pkts);
40 uint16_t encap_nsh_packets(struct rte_mbuf **mbufs, uint16_t n_pkts);
42 struct task_decap_nsh {
43 struct task_base base;
46 struct task_encap_nsh {
47 struct task_base base;
50 static void init_task_decap_nsh(__attribute__((unused)) struct task_base *tbase,
51 __attribute__((unused)) struct task_args *targ)
56 static inline uint8_t handle_decap_nsh(__attribute__((unused)) struct task_decap_nsh *task, struct rte_mbuf *mbuf)
58 prox_rte_ether_hdr *eth_hdr = NULL;
59 prox_rte_udp_hdr *udp_hdr = NULL;
60 prox_rte_vxlan_gpe_hdr *vxlan_gpe_hdr = NULL;
63 eth_hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
64 if (eth_hdr->ether_type == ETHER_NSH_TYPE) {
65 /* "decapsulate" Ethernet + NSH header by moving packet pointer */
66 hdr_len = sizeof(prox_rte_ether_hdr) + sizeof(struct nsh_hdr);
68 mbuf->data_len = (uint16_t)(mbuf->data_len - hdr_len);
69 mbuf->data_off += hdr_len;
70 mbuf->pkt_len = (uint32_t)(mbuf->pkt_len - hdr_len);
71 /* save length of header in reserved 16bits of rte_mbuf */
72 mbuf->udata64 = hdr_len;
75 if (mbuf->data_len < VXLAN_GPE_HDR_SZ) {
80 /* check the UDP destination port */
81 udp_hdr = (prox_rte_udp_hdr *)(((unsigned char *)eth_hdr) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr));
82 if (udp_hdr->dst_port != VXLAN_GPE_NSH_TYPE) {
87 /* check the Next Protocol field in VxLAN-GPE header */
88 vxlan_gpe_hdr = (prox_rte_vxlan_gpe_hdr *)(((unsigned char *)eth_hdr) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(prox_rte_udp_hdr));
89 if (vxlan_gpe_hdr->proto != VXLAN_GPE_NP) {
94 /* "decapsulate" VxLAN-GPE + NSH header by moving packet pointer */
95 hdr_len = VXLAN_GPE_HDR_SZ;
97 mbuf->data_len = (uint16_t)(mbuf->data_len - hdr_len);
98 mbuf->data_off += hdr_len;
99 mbuf->pkt_len = (uint32_t)(mbuf->pkt_len - hdr_len);
100 /* save length of header in reserved 16bits of rte_mbuf */
101 mbuf->udata64 = hdr_len;
107 static int handle_decap_nsh_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
109 struct task_decap_nsh *task = (struct task_decap_nsh *)tbase;
110 uint8_t out[MAX_PKT_BURST];
113 prefetch_first(mbufs, n_pkts);
114 for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) {
115 #ifdef PROX_PREFETCH_OFFSET
116 PREFETCH0(mbufs[j + PREFETCH_OFFSET]);
117 PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *));
119 out[j] = handle_decap_nsh(task, mbufs[j]);
121 #ifdef PROX_PREFETCH_OFFSET
122 PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *));
123 for (; j < n_pkts; ++j) {
124 out[j] = handle_decap_nsh(task, mbufs[j]);
127 return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
130 static void init_task_encap_nsh(__attribute__((unused)) struct task_base *tbase,
131 __attribute__((unused)) struct task_args *targ)
136 static inline uint8_t handle_encap_nsh(__attribute__((unused)) struct task_encap_nsh *task, struct rte_mbuf *mbuf)
138 prox_rte_ether_hdr *eth_hdr = NULL;
139 struct nsh_hdr *nsh_hdr = NULL;
140 prox_rte_udp_hdr *udp_hdr = NULL;
141 prox_rte_vxlan_gpe_hdr *vxlan_gpe_hdr = NULL;
146 if (mbuf->udata64 == 0)
149 /* use header length saved in reserved 16bits of rte_mbuf to
150 "encapsulate" transport + NSH header by moving packet pointer */
151 mbuf->data_len = (uint16_t)(mbuf->data_len + mbuf->udata64);
152 mbuf->data_off -= mbuf->udata64;
153 mbuf->pkt_len = (uint32_t)(mbuf->pkt_len + mbuf->udata64);
155 eth_hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
156 if (eth_hdr->ether_type == ETHER_NSH_TYPE) {
157 nsh_hdr = (struct nsh_hdr *) (((unsigned char *)eth_hdr) + sizeof(prox_rte_ether_hdr));
159 /* decrement Service Index in NSH header */
160 if (nsh_hdr->sf_index > 0)
161 nsh_hdr->sf_index -= 1;
164 /* "encapsulate" VxLAN-GPE + NSH header by moving packet pointer */
165 if (mbuf->data_len < VXLAN_GPE_HDR_SZ)
168 /* check the UDP destination port */
169 udp_hdr = (prox_rte_udp_hdr *)(((unsigned char *)eth_hdr) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr));
170 if (udp_hdr->dst_port != VXLAN_GPE_NSH_TYPE)
173 /* check the Next Protocol field in VxLAN-GPE header */
174 vxlan_gpe_hdr = (prox_rte_vxlan_gpe_hdr *)(((unsigned char *)eth_hdr) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(prox_rte_udp_hdr));
175 if (vxlan_gpe_hdr->proto != VXLAN_GPE_NP)
178 /* decrement Service Index in NSH header */
179 nsh_hdr = (struct nsh_hdr *)(((unsigned char *)vxlan_gpe_hdr) + sizeof(prox_rte_vxlan_gpe_hdr));
180 if (nsh_hdr->sf_index > 0)
181 nsh_hdr->sf_index -= 1;
187 static int handle_encap_nsh_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
189 struct task_encap_nsh *task = (struct task_encap_nsh *)tbase;
190 uint8_t out[MAX_PKT_BURST];
193 prefetch_first(mbufs, n_pkts);
194 for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) {
195 #ifdef PROX_PREFETCH_OFFSET
196 PREFETCH0(mbufs[j + PREFETCH_OFFSET]);
197 PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *));
199 out[j] = handle_encap_nsh(task, mbufs[j]);
201 #ifdef PROX_PREFETCH_OFFSET
202 PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *));
203 for (; j < n_pkts; ++j) {
204 out[j] = handle_encap_nsh(task, mbufs[j]);
207 return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
210 static struct task_init task_init_decap_nsh = {
211 .mode_str = "decapnsh",
212 .init = init_task_decap_nsh,
213 .handle = handle_decap_nsh_bulk,
214 .thread_x = thread_generic,
215 .size = sizeof(struct task_decap_nsh)
218 static struct task_init task_init_encap_nsh = {
219 .mode_str = "encapnsh",
220 .init = init_task_encap_nsh,
221 .handle = handle_encap_nsh_bulk,
222 .size = sizeof(struct task_encap_nsh)
225 __attribute__((constructor)) static void reg_task_nshtag(void)
227 reg_task(&task_init_decap_nsh);
228 reg_task(&task_init_encap_nsh);