2 // Copyright (c) 2010-2020 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
20 #include <rte_common.h>
21 #ifndef __rte_cache_aligned
22 #include <rte_memory.h>
26 #include "prox_globals.h"
27 #include "stats_task.h"
28 #include "packet_utils.h"
30 // runtime_flags 16 bits only
31 #define TASK_MPLS_TAGGING 0x0001
32 #define TASK_ROUTING 0x0002
33 #define TASK_CLASSIFY 0x0004
34 #define TASK_CTRL_HANDLE_ARP 0x0008
35 #define TASK_MARK 0x0020
36 #define TASK_FP_HANDLE_ARP 0x0040
37 #define TASK_TX_CRC 0x0080
38 #define TASK_L3 0x0100
39 #define TASK_DO_NOT_FWD_GENEVE 0x0200
41 // flag_features 64 bits
42 #define TASK_FEATURE_ROUTING 0x0001
43 #define TASK_FEATURE_CLASSIFY 0x0002
44 #define TASK_FEATURE_MULTI_RX 0x0004
45 #define TASK_FEATURE_NEVER_DISCARDS 0x0008
46 #define TASK_FEATURE_NO_RX 0x0010
47 #define TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS 0x0020
48 #define TASK_FEATURE_TXQ_FLAGS_MULTSEGS 0x0040
49 #define TASK_FEATURE_ZERO_RX 0x0080
50 #define TASK_FEATURE_TXQ_FLAGS_REFCOUNT 0x0100
51 #define TASK_FEATURE_TSC_RX 0x0200
52 #define TASK_FEATURE_THROUGHPUT_OPT 0x0400
53 #define TASK_FEATURE_GRE_ID 0x1000
54 #define TASK_FEATURE_LUT_QINQ_RSS 0x2000
55 #define TASK_FEATURE_LUT_QINQ_HASH 0x4000
56 #define TASK_FEATURE_RX_ALL 0x8000
57 #define TASK_FEATURE_TXQ_FLAGS_MULTIPLE_MEMPOOL 0x20000
59 #define FLAG_TX_FLUSH 0x01
60 #define FLAG_NEVER_FLUSH 0x02
61 // Task specific flags
62 #define BASE_FLAG_LUT_QINQ_HASH 0x08
63 #define BASE_FLAG_LUT_QINQ_RSS 0x10
65 #define OUT_DISCARD 0xFF
66 #define OUT_HANDLED 0xFE
68 #define WS_MBUF_MASK (2 * MAX_PKT_BURST - 1)
70 /* struct ws_mbuf stores the working set of mbufs. It starts with a
71 prod/cons index to keep track of the number of elemenets. */
77 uint16_t pad; /* reserved */
78 } idx[MAX_RINGS_PER_TASK];
79 struct rte_mbuf *mbuf[][MAX_RING_BURST * 3] __rte_cache_aligned;
85 } __attribute__((packed));
92 uint8_t last_read_portid;
93 struct port_queue *rx_pq;
94 } __attribute__((packed));
96 struct rx_params_hw1 {
97 struct port_queue rx_pq;
98 } __attribute__((packed));
100 struct rx_params_sw {
103 uint8_t rxrings_mask; /* Used if rte_is_power_of_2(nb_rxrings)*/
105 uint8_t last_read_ring;
106 struct rte_ring **rx_rings;
107 } __attribute__((packed));
109 /* If there is only one input ring, the pointer to it can be stored
110 directly into the task_base instead of having to use a pointer to a
111 set of rings which would require two dereferences. */
112 struct rx_params_sw1 {
113 struct rte_ring *rx_ring;
114 } __attribute__((packed));
116 struct tx_params_hw {
118 struct port_queue *tx_port_queue;
119 } __attribute__((packed));
121 struct tx_params_sw {
123 struct rte_ring **tx_rings;
124 } __attribute__((packed));
126 struct tx_params_hw_sw { /* Only one port supported in this mode */
128 struct rte_ring **tx_rings;
129 struct port_queue tx_port_queue;
130 } __attribute__((packed));
132 struct task_rt_dump {
138 void *pkt_mbuf_addr[MAX_RING_BURST]; /* To track reordering */
139 uint8_t pkt_cpy[MAX_RING_BURST][DUMP_PKT_LEN];
140 uint16_t pkt_cpy_len[MAX_RING_BURST];
145 #define MAX_RX_PKT_ALL 16384
147 #define RX_BUCKET_SIZE (2 * MAX_RING_BURST + 1) /* Limit RX bucket size */
148 #define TX_BUCKET_SIZE (MAX_RING_BURST +1)
150 #define MAX_STACKED_RX_FUCTIONS 16
152 typedef uint16_t (*rx_pkt_func) (struct task_base *tbase, struct rte_mbuf ***mbufs);
154 struct task_base_aux {
155 /* Not used when PROX_STATS is not defined */
156 struct task_rt_stats stats;
158 /* Used if TASK_TSC_RX is enabled*/
164 struct rte_mbuf **all_mbufs;
166 uint16_t rx_prev_count;
167 uint16_t rx_prev_idx;
168 uint16_t (*rx_pkt_prev[MAX_STACKED_RX_FUCTIONS])(struct task_base *tbase, struct rte_mbuf ***mbufs);
170 uint32_t rx_bucket[RX_BUCKET_SIZE];
171 uint32_t tx_bucket[TX_BUCKET_SIZE];
172 int (*tx_pkt_l2)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out);
173 int (*tx_pkt_orig)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out);
174 int (*tx_pkt_hw)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out);
175 uint16_t (*tx_pkt_try)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts);
176 void (*stop)(struct task_base *tbase);
177 int (*tx_ctrlplane_pkt)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out);
178 void (*start)(struct task_base *tbase);
179 void (*stop_last)(struct task_base *tbase);
180 void (*start_first)(struct task_base *tbase);
181 struct task_rt_dump task_rt_dump;
182 struct rte_mbuf *mbuf;
185 /* The task_base is accessed for _all_ task types. In case
186 no debugging or l3 is needed, it has been optimized to fit
187 into a single cache line to minimize cache pollution */
189 int (*handle_bulk)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts);
190 int (*tx_pkt)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out);
191 uint16_t (*rx_pkt)(struct task_base *tbase, struct rte_mbuf ***mbufs);
193 struct task_base_aux* aux;
194 /* The working set of mbufs contains mbufs that are currently
196 struct ws_mbuf *ws_mbuf;
201 struct rx_params_hw rx_params_hw;
202 struct rx_params_hw1 rx_params_hw1;
203 struct rx_params_sw rx_params_sw;
204 struct rx_params_sw1 rx_params_sw1;
208 struct tx_params_hw tx_params_hw;
209 struct tx_params_sw tx_params_sw;
210 struct tx_params_hw_sw tx_params_hw_sw;
213 } __attribute__((packed)) __rte_cache_aligned;
215 static void task_base_add_rx_pkt_function(struct task_base *tbase, rx_pkt_func to_add)
217 if (tbase->aux->rx_prev_count == MAX_STACKED_RX_FUCTIONS) {
221 for (int16_t i = tbase->aux->rx_prev_count; i > 0; --i) {
222 tbase->aux->rx_pkt_prev[i] = tbase->aux->rx_pkt_prev[i - 1];
224 tbase->aux->rx_pkt_prev[0] = tbase->rx_pkt;
225 tbase->rx_pkt = to_add;
226 tbase->aux->rx_prev_count++;
229 static void task_base_del_rx_pkt_function(struct task_base *tbase, rx_pkt_func to_del)
234 if (unlikely(tbase->aux->rx_prev_count == 0)) {
236 } else if (tbase->rx_pkt == to_del) {
237 tbase->rx_pkt = tbase->aux->rx_pkt_prev[0];
238 for (int16_t i = 0; i < tbase->aux->rx_prev_count - 1; ++i) {
239 tbase->aux->rx_pkt_prev[i] = tbase->aux->rx_pkt_prev[i + 1];
243 for (int16_t i = 0; i < tbase->aux->rx_prev_count; ++i) {
244 if (found || tbase->aux->rx_pkt_prev[i] != to_del)
245 tbase->aux->rx_pkt_prev[cur++] = tbase->aux->rx_pkt_prev[i];
251 tbase->aux->rx_prev_count--;
254 static rx_pkt_func task_base_get_original_rx_pkt_function(struct task_base *tbase)
256 if (tbase->aux->rx_prev_count == 0)
257 return tbase->rx_pkt;
259 return tbase->aux->rx_pkt_prev[tbase->aux->rx_prev_count - 1];
262 #endif /* _TASK_BASE_H_ */