2 // Copyright (c) 2010-2020 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
20 #include <rte_common.h>
21 #ifndef __rte_cache_aligned
22 #include <rte_memory.h>
26 #include "prox_globals.h"
27 #include "stats_task.h"
28 #include "packet_utils.h"
30 // runtime_flags 16 bits only
31 #define TASK_MPLS_TAGGING 0x0001
32 #define TASK_ROUTING 0x0002
33 #define TASK_CLASSIFY 0x0004
34 #define TASK_CTRL_HANDLE_ARP 0x0008
35 #define TASK_MARK 0x0020
36 #define TASK_FP_HANDLE_ARP 0x0040
37 #define TASK_TX_CRC 0x0080
38 #define TASK_L3 0x0100
40 // flag_features 64 bits
41 #define TASK_FEATURE_ROUTING 0x0001
42 #define TASK_FEATURE_CLASSIFY 0x0002
43 #define TASK_FEATURE_MULTI_RX 0x0004
44 #define TASK_FEATURE_NEVER_DISCARDS 0x0008
45 #define TASK_FEATURE_NO_RX 0x0010
46 #define TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS 0x0020
47 #define TASK_FEATURE_TXQ_FLAGS_MULTSEGS 0x0040
48 #define TASK_FEATURE_ZERO_RX 0x0080
49 #define TASK_FEATURE_TXQ_FLAGS_REFCOUNT 0x0100
50 #define TASK_FEATURE_TSC_RX 0x0200
51 #define TASK_FEATURE_THROUGHPUT_OPT 0x0400
52 #define TASK_FEATURE_GRE_ID 0x1000
53 #define TASK_FEATURE_LUT_QINQ_RSS 0x2000
54 #define TASK_FEATURE_LUT_QINQ_HASH 0x4000
55 #define TASK_FEATURE_RX_ALL 0x8000
56 #define TASK_FEATURE_TXQ_FLAGS_MULTIPLE_MEMPOOL 0x20000
58 #define FLAG_TX_FLUSH 0x01
59 #define FLAG_NEVER_FLUSH 0x02
60 // Task specific flags
61 #define BASE_FLAG_LUT_QINQ_HASH 0x08
62 #define BASE_FLAG_LUT_QINQ_RSS 0x10
64 #define OUT_DISCARD 0xFF
65 #define OUT_HANDLED 0xFE
67 #define WS_MBUF_MASK (2 * MAX_PKT_BURST - 1)
69 /* struct ws_mbuf stores the working set of mbufs. It starts with a
70 prod/cons index to keep track of the number of elemenets. */
76 uint16_t pad; /* reserved */
77 } idx[MAX_RINGS_PER_TASK];
78 struct rte_mbuf *mbuf[][MAX_RING_BURST * 3] __rte_cache_aligned;
84 } __attribute__((packed));
91 uint8_t last_read_portid;
92 struct port_queue *rx_pq;
93 } __attribute__((packed));
95 struct rx_params_hw1 {
96 struct port_queue rx_pq;
97 } __attribute__((packed));
102 uint8_t rxrings_mask; /* Used if rte_is_power_of_2(nb_rxrings)*/
104 uint8_t last_read_ring;
105 struct rte_ring **rx_rings;
106 } __attribute__((packed));
108 /* If there is only one input ring, the pointer to it can be stored
109 directly into the task_base instead of having to use a pointer to a
110 set of rings which would require two dereferences. */
111 struct rx_params_sw1 {
112 struct rte_ring *rx_ring;
113 } __attribute__((packed));
115 struct tx_params_hw {
117 struct port_queue *tx_port_queue;
118 } __attribute__((packed));
120 struct tx_params_sw {
122 struct rte_ring **tx_rings;
123 } __attribute__((packed));
125 struct tx_params_hw_sw { /* Only one port supported in this mode */
127 struct rte_ring **tx_rings;
128 struct port_queue tx_port_queue;
129 } __attribute__((packed));
131 struct task_rt_dump {
137 void *pkt_mbuf_addr[MAX_RING_BURST]; /* To track reordering */
138 uint8_t pkt_cpy[MAX_RING_BURST][DUMP_PKT_LEN];
139 uint16_t pkt_cpy_len[MAX_RING_BURST];
144 #define MAX_RX_PKT_ALL 16384
146 #define RX_BUCKET_SIZE (2 * MAX_RING_BURST + 1) /* Limit RX bucket size */
147 #define TX_BUCKET_SIZE (MAX_RING_BURST +1)
149 #define MAX_STACKED_RX_FUCTIONS 16
151 typedef uint16_t (*rx_pkt_func) (struct task_base *tbase, struct rte_mbuf ***mbufs);
153 struct task_base_aux {
154 /* Not used when PROX_STATS is not defined */
155 struct task_rt_stats stats;
157 /* Used if TASK_TSC_RX is enabled*/
163 struct rte_mbuf **all_mbufs;
165 uint16_t rx_prev_count;
166 uint16_t rx_prev_idx;
167 uint16_t (*rx_pkt_prev[MAX_STACKED_RX_FUCTIONS])(struct task_base *tbase, struct rte_mbuf ***mbufs);
169 uint32_t rx_bucket[RX_BUCKET_SIZE];
170 uint32_t tx_bucket[TX_BUCKET_SIZE];
171 int (*tx_pkt_l2)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out);
172 int (*tx_pkt_orig)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out);
173 int (*tx_pkt_hw)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out);
174 uint16_t (*tx_pkt_try)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts);
175 void (*stop)(struct task_base *tbase);
176 int (*tx_ctrlplane_pkt)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out);
177 void (*start)(struct task_base *tbase);
178 void (*stop_last)(struct task_base *tbase);
179 void (*start_first)(struct task_base *tbase);
180 struct task_rt_dump task_rt_dump;
181 struct rte_mbuf *mbuf;
184 /* The task_base is accessed for _all_ task types. In case
185 no debugging or l3 is needed, it has been optimized to fit
186 into a single cache line to minimize cache pollution */
188 int (*handle_bulk)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts);
189 int (*tx_pkt)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out);
190 uint16_t (*rx_pkt)(struct task_base *tbase, struct rte_mbuf ***mbufs);
192 struct task_base_aux* aux;
193 /* The working set of mbufs contains mbufs that are currently
195 struct ws_mbuf *ws_mbuf;
200 struct rx_params_hw rx_params_hw;
201 struct rx_params_hw1 rx_params_hw1;
202 struct rx_params_sw rx_params_sw;
203 struct rx_params_sw1 rx_params_sw1;
207 struct tx_params_hw tx_params_hw;
208 struct tx_params_sw tx_params_sw;
209 struct tx_params_hw_sw tx_params_hw_sw;
212 } __attribute__((packed)) __rte_cache_aligned;
214 static void task_base_add_rx_pkt_function(struct task_base *tbase, rx_pkt_func to_add)
216 if (tbase->aux->rx_prev_count == MAX_STACKED_RX_FUCTIONS) {
220 for (int16_t i = tbase->aux->rx_prev_count; i > 0; --i) {
221 tbase->aux->rx_pkt_prev[i] = tbase->aux->rx_pkt_prev[i - 1];
223 tbase->aux->rx_pkt_prev[0] = tbase->rx_pkt;
224 tbase->rx_pkt = to_add;
225 tbase->aux->rx_prev_count++;
228 static void task_base_del_rx_pkt_function(struct task_base *tbase, rx_pkt_func to_del)
233 if (unlikely(tbase->aux->rx_prev_count == 0)) {
235 } else if (tbase->rx_pkt == to_del) {
236 tbase->rx_pkt = tbase->aux->rx_pkt_prev[0];
237 for (int16_t i = 0; i < tbase->aux->rx_prev_count - 1; ++i) {
238 tbase->aux->rx_pkt_prev[i] = tbase->aux->rx_pkt_prev[i + 1];
242 for (int16_t i = 0; i < tbase->aux->rx_prev_count; ++i) {
243 if (found || tbase->aux->rx_pkt_prev[i] != to_del)
244 tbase->aux->rx_pkt_prev[cur++] = tbase->aux->rx_pkt_prev[i];
250 tbase->aux->rx_prev_count--;
253 static rx_pkt_func task_base_get_original_rx_pkt_function(struct task_base *tbase)
255 if (tbase->aux->rx_prev_count == 0)
256 return tbase->rx_pkt;
258 return tbase->aux->rx_pkt_prev[tbase->aux->rx_prev_count - 1];
261 #endif /* _TASK_BASE_H_ */