1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
3 * This file is open source software, licensed to you under the terms
4 * of the Apache License, Version 2.0 (the "License"). See the NOTICE file
5 * distributed with this work for additional information regarding copyright
6 * ownership. You may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing,
13 * software distributed under the License is distributed on an
14 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 * KIND, either express or implied. See the License for the
16 * specific language governing permissions and limitations
20 * Copyright (C) 2014 Cloudius Systems, Ltd.
23 * Ceph - scalable distributed file system
25 * Copyright (C) 2015 XSky <haomai@xsky.com>
27 * Author: Haomai Wang <haomaiwang@gmail.com>
29 * This is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU Lesser General Public
31 * License version 2.1, as published by the Free Software
32 * Foundation. See file COPYING.
36 #ifndef CEPH_DPDK_DEV_H
37 #define CEPH_DPDK_DEV_H
41 #include <rte_config.h>
42 #include <rte_common.h>
43 #include <rte_ethdev.h>
44 #include <rte_malloc.h>
45 #include <rte_version.h>
47 #include "include/page.h"
48 #include "common/Tub.h"
49 #include "common/perf_counters.h"
50 #include "msg/async/Event.h"
52 #include "circular_buffer.h"
62 void operator()(void* p) { ::free(p); }
67 l_dpdk_dev_first = 58800,
69 l_dpdk_dev_rx_total_errors,
70 l_dpdk_dev_tx_total_errors,
71 l_dpdk_dev_rx_badcrc_errors,
72 l_dpdk_dev_rx_dropped_errors,
73 l_dpdk_dev_rx_nombuf_errors,
78 l_dpdk_qp_first = 58900,
81 l_dpdk_qp_rx_bad_checksum_errors,
82 l_dpdk_qp_rx_no_memory_errors,
85 l_dpdk_qp_rx_last_bunch,
86 l_dpdk_qp_tx_last_bunch,
87 l_dpdk_qp_rx_fragments,
88 l_dpdk_qp_tx_fragments,
89 l_dpdk_qp_rx_copy_ops,
90 l_dpdk_qp_tx_copy_ops,
91 l_dpdk_qp_rx_copy_bytes,
92 l_dpdk_qp_tx_copy_bytes,
93 l_dpdk_qp_rx_linearize_ops,
94 l_dpdk_qp_tx_linearize_ops,
95 l_dpdk_qp_tx_queue_length,
102 class DPDKQueuePair {
103 using packet_provider_type = std::function<Tub<Packet> ()>;
105 void configure_proxies(const std::map<unsigned, float>& cpu_weights);
106 // build REdirection TAble for cpu_weights map: target cpu -> weight
107 void build_sw_reta(const std::map<unsigned, float>& cpu_weights);
108 void proxy_send(Packet p) {
109 _proxy_packetq.push_back(std::move(p));
111 void register_packet_provider(packet_provider_type func) {
112 _pkt_providers.push_back(std::move(func));
115 friend class DPDKDevice;
117 class tx_buf_factory;
120 friend class DPDKQueuePair;
122 static tx_buf* me(rte_mbuf* mbuf) {
123 return reinterpret_cast<tx_buf*>(mbuf);
128 * Checks if the original packet of a given cluster should be linearized
129 * due to HW limitations.
131 * @param head head of a cluster to check
133 * @return TRUE if a packet should be linearized.
135 static bool i40e_should_linearize(rte_mbuf *head);
138 * Sets the offload info in the head buffer of an rte_mbufs cluster.
140 * @param p an original packet the cluster is built for
141 * @param qp QP handle
142 * @param head a head of an rte_mbufs cluster
144 static void set_cluster_offload_info(const Packet& p, const DPDKQueuePair& qp, rte_mbuf* head);
147 * Creates a tx_buf cluster representing a given packet in a "zero-copy"
150 * @param p packet to translate
151 * @param qp DPDKQueuePair handle
153 * @return the HEAD tx_buf of the cluster or nullptr in case of a
156 static tx_buf* from_packet_zc(
157 CephContext *cct, Packet&& p, DPDKQueuePair& qp);
160 * Copy the contents of the "packet" into the given cluster of
163 * @note Size of the cluster has to be big enough to accommodate all the
164 * contents of the given packet.
166 * @param p packet to copy
167 * @param head head of the rte_mbuf's cluster
169 static void copy_packet_to_cluster(const Packet& p, rte_mbuf* head);
172 * Creates a tx_buf cluster representing a given packet in a "copy" way.
174 * @param p packet to translate
175 * @param qp DPDKQueuePair handle
177 * @return the HEAD tx_buf of the cluster or nullptr in case of a
180 static tx_buf* from_packet_copy(Packet&& p, DPDKQueuePair& qp);
183 * Zero-copy handling of a single fragment.
185 * @param do_one_buf Functor responsible for a single rte_mbuf
187 * @param qp DPDKQueuePair handle (in)
188 * @param frag Fragment to copy (in)
189 * @param head Head of the cluster (out)
190 * @param last_seg Last segment of the cluster (out)
191 * @param nsegs Number of segments in the cluster (out)
193 * @return TRUE in case of success
195 template <class DoOneBufFunc>
196 static bool do_one_frag(DoOneBufFunc do_one_buf, DPDKQueuePair& qp,
197 fragment& frag, rte_mbuf*& head,
198 rte_mbuf*& last_seg, unsigned& nsegs) {
199 size_t len, left_to_set = frag.size;
200 char* base = frag.base;
204 // TODO: assert() in a fast path! Remove me ASAP!
207 // Create a HEAD of mbufs' cluster and set the first bytes into it
208 len = do_one_buf(qp, head, base, left_to_set);
218 // Set the rest of the data into the new mbufs and chain them to
221 rte_mbuf* prev_seg = head;
222 while (left_to_set) {
223 len = do_one_buf(qp, m, base, left_to_set);
237 // Return the last mbuf in the cluster
244 * Zero-copy handling of a single fragment.
246 * @param qp DPDKQueuePair handle (in)
247 * @param frag Fragment to copy (in)
248 * @param head Head of the cluster (out)
249 * @param last_seg Last segment of the cluster (out)
250 * @param nsegs Number of segments in the cluster (out)
252 * @return TRUE in case of success
254 static bool translate_one_frag(DPDKQueuePair& qp, fragment& frag,
255 rte_mbuf*& head, rte_mbuf*& last_seg,
257 return do_one_frag(set_one_data_buf, qp, frag, head,
262 * Copies one fragment into the cluster of rte_mbuf's.
264 * @param qp DPDKQueuePair handle (in)
265 * @param frag Fragment to copy (in)
266 * @param head Head of the cluster (out)
267 * @param last_seg Last segment of the cluster (out)
268 * @param nsegs Number of segments in the cluster (out)
270 * We return the "last_seg" to avoid traversing the cluster in order to get
273 * @return TRUE in case of success
275 static bool copy_one_frag(DPDKQueuePair& qp, fragment& frag,
276 rte_mbuf*& head, rte_mbuf*& last_seg,
278 return do_one_frag(copy_one_data_buf, qp, frag, head,
283 * Allocates a single rte_mbuf and sets it to point to a given data
286 * @param qp DPDKQueuePair handle (in)
287 * @param m New allocated rte_mbuf (out)
288 * @param va virtual address of a data buffer (in)
289 * @param buf_len length of the data to copy (in)
291 * @return The actual number of bytes that has been set in the mbuf
293 static size_t set_one_data_buf(
294 DPDKQueuePair& qp, rte_mbuf*& m, char* va, size_t buf_len) {
295 static constexpr size_t max_frag_len = 15 * 1024; // 15K
297 // FIXME: current all tx buf is alloced without rte_malloc
298 return copy_one_data_buf(qp, m, va, buf_len);
300 // Currently we break a buffer on a 15K boundary because 82599
301 // devices have a 15.5K limitation on a maximum single fragment
304 phys_addr_t pa = rte_malloc_virt2phy(va);
306 return copy_one_data_buf(qp, m, va, buf_len);
309 tx_buf* buf = qp.get_tx_buf();
314 size_t len = std::min(buf_len, max_frag_len);
316 buf->set_zc_info(va, pa, len);
317 m = buf->rte_mbuf_p();
323 * Allocates a single rte_mbuf and copies a given data into it.
325 * @param qp DPDKQueuePair handle (in)
326 * @param m New allocated rte_mbuf (out)
327 * @param data Data to copy from (in)
328 * @param buf_len length of the data to copy (in)
330 * @return The actual number of bytes that has been copied
332 static size_t copy_one_data_buf(
333 DPDKQueuePair& qp, rte_mbuf*& m, char* data, size_t buf_len);
336 * Checks if the first fragment of the given packet satisfies the
337 * zero-copy flow requirement: its first 128 bytes should not cross the
338 * 4K page boundary. This is required in order to avoid splitting packet
341 * @param p packet to check
343 * @return TRUE if packet is ok and FALSE otherwise.
345 static bool check_frag0(Packet& p)
348 // First frag is special - it has headers that should not be split.
349 // If the addressing is such that the first fragment has to be
350 // split, then send this packet in a (non-zero) copy flow. We'll
351 // check if the first 128 bytes of the first fragment reside in the
352 // physically contiguous area. If that's the case - we are good to
355 if (p.frag(0).size < 128)
362 tx_buf(tx_buf_factory& fc) : _fc(fc) {
364 _buf_physaddr = _mbuf.buf_physaddr;
365 _data_off = _mbuf.data_off;
368 rte_mbuf* rte_mbuf_p() { return &_mbuf; }
370 void set_zc_info(void* va, phys_addr_t pa, size_t len) {
372 _mbuf.data_len = len;
375 // Set the mbuf to point to our data
377 _mbuf.buf_physaddr = pa;
385 // If this mbuf was the last in a cluster and contains an
386 // original packet object then call the destructor of the
387 // original packet object.
391 // Reset the std::optional. This in particular is going
392 // to call the "packet"'s destructor and reset the
393 // "optional" state to "nonengaged".
397 } else if (!_is_zc) {
401 // Restore the rte_mbuf fields we trashed in set_zc_info()
402 _mbuf.buf_physaddr = _buf_physaddr;
403 _mbuf.buf_addr = rte_mbuf_to_baddr(&_mbuf);
404 _mbuf.data_off = _data_off;
410 struct rte_mbuf *m = &_mbuf, *m_next;
412 while (m != nullptr) {
414 rte_pktmbuf_reset(m);
420 void set_packet(Packet&& p) {
425 struct rte_mbuf _mbuf;
426 MARKER private_start;
428 phys_addr_t _buf_physaddr;
430 // TRUE if underlying mbuf has been used in the zero-copy flow
432 // buffers' factory the buffer came from
437 class tx_buf_factory {
439 // Number of buffers to free in each GC iteration:
440 // We want the buffers to be allocated from the mempool as many as
443 // On the other hand if there is no Tx for some time we want the
444 // completions to be eventually handled. Thus we choose the smallest
445 // possible packets count number here.
447 static constexpr int gc_count = 1;
449 tx_buf_factory(CephContext *c, DPDKDevice *dev, uint8_t qid);
451 // put all mbuf back into mempool in order to make the next factory work
453 rte_mempool_put_bulk(_pool, (void**)_ring.data(),
459 * @note Should not be called if there are no free tx_buf's
461 * @return a free tx_buf object
464 // Take completed from the HW first
465 tx_buf *pkt = get_one_completed();
472 // If there are no completed at the moment - take from the
485 void put(tx_buf* buf) {
487 _ring.push_back(buf);
491 for (int cnt = 0; cnt < gc_count; ++cnt) {
492 auto tx_buf_p = get_one_completed();
504 * Fill the mbufs circular buffer: after this the _pool will become
505 * empty. We will use it to catch the completed buffers:
507 * - Underlying PMD drivers will "free" the mbufs once they are
509 * - We will poll the _pktmbuf_pool_tx till it's empty and release
510 * all the buffers from the freed mbufs.
512 void init_factory() {
513 while (rte_mbuf* mbuf = rte_pktmbuf_alloc(_pool)) {
514 _ring.push_back(new(tx_buf::me(mbuf)) tx_buf{*this});
519 * PMD puts the completed buffers back into the mempool they have
520 * originally come from.
522 * @note rte_pktmbuf_alloc() resets the mbuf so there is no need to call
523 * rte_pktmbuf_reset() here again.
525 * @return a single tx_buf that has been completed by HW.
527 tx_buf* get_one_completed() {
528 return tx_buf::me(rte_pktmbuf_alloc(_pool));
533 std::vector<tx_buf*> _ring;
534 rte_mempool* _pool = nullptr;
538 explicit DPDKQueuePair(CephContext *c, EventCenter *cen, DPDKDevice* dev, uint8_t qid);
540 if (device_stat_time_fd) {
541 center->delete_time_event(device_stat_time_fd);
547 _rx_poller.construct(this);
550 uint32_t send(circular_buffer<Packet>& pb) {
552 return _send(pb, [&] (Packet&& p) {
553 return tx_buf::from_packet_zc(cct, std::move(p), *this);
557 DPDKDevice& port() const { return *_dev; }
558 tx_buf* get_tx_buf() { return _tx_buf_factory.get(); }
563 template <class Func>
564 uint32_t _send(circular_buffer<Packet>& pb, Func &&packet_to_tx_buf_p) {
565 if (_tx_burst.size() == 0) {
566 for (auto&& p : pb) {
567 // TODO: assert() in a fast path! Remove me ASAP!
570 tx_buf* buf = packet_to_tx_buf_p(std::move(p));
575 _tx_burst.push_back(buf->rte_mbuf_p());
579 uint16_t sent = rte_eth_tx_burst(_dev_port_idx, _qid,
580 _tx_burst.data() + _tx_burst_idx,
581 _tx_burst.size() - _tx_burst_idx);
583 uint64_t nr_frags = 0, bytes = 0;
585 for (int i = 0; i < sent; i++) {
586 rte_mbuf* m = _tx_burst[_tx_burst_idx + i];
588 nr_frags += m->nb_segs;
592 perf_logger->inc(l_dpdk_qp_tx_fragments, nr_frags);
593 perf_logger->inc(l_dpdk_qp_tx_bytes, bytes);
595 _tx_burst_idx += sent;
597 if (_tx_burst_idx == _tx_burst.size()) {
606 * Allocate a new data buffer and set the mbuf to point to it.
608 * Do some DPDK hacks to work on PMD: it assumes that the buf_addr
609 * points to the private data of RTE_PKTMBUF_HEADROOM before the actual
612 * @param m mbuf to update
614 static bool refill_rx_mbuf(rte_mbuf* m, size_t size,
615 std::vector<void*> &datas) {
618 void *data = datas.back();
622 // Set the mbuf to point to our data.
624 // Do some DPDK hacks to work on PMD: it assumes that the buf_addr
625 // points to the private data of RTE_PKTMBUF_HEADROOM before the
626 // actual data buffer.
628 m->buf_addr = (char*)data - RTE_PKTMBUF_HEADROOM;
629 m->buf_physaddr = rte_malloc_virt2phy(data) - RTE_PKTMBUF_HEADROOM;
633 static bool init_noninline_rx_mbuf(rte_mbuf* m, size_t size,
634 std::vector<void*> &datas) {
635 if (!refill_rx_mbuf(m, size, datas)) {
638 // The below fields stay constant during the execution.
639 m->buf_len = size + RTE_PKTMBUF_HEADROOM;
640 m->data_off = RTE_PKTMBUF_HEADROOM;
644 bool init_rx_mbuf_pool();
645 bool rx_gc(bool force=false);
646 bool refill_one_cluster(rte_mbuf* head);
649 * Polls for a burst of incoming packets. This function will not block and
650 * will immediately return after processing all available packets.
656 * Translates an rte_mbuf's into packet and feeds them to _rx_stream.
658 * @param bufs An array of received rte_mbuf's
659 * @param count Number of buffers in the bufs[]
661 void process_packets(struct rte_mbuf **bufs, uint16_t count);
664 * Translate rte_mbuf into the "packet".
665 * @param m mbuf to translate
667 * @return a "optional" object representing the newly received data if in an
668 * "engaged" state or an error if in a "disengaged" state.
670 Tub<Packet> from_mbuf(rte_mbuf* m);
673 * Transform an LRO rte_mbuf cluster into the "packet" object.
674 * @param m HEAD of the mbufs' cluster to transform
676 * @return a "optional" object representing the newly received LRO packet if
677 * in an "engaged" state or an error if in a "disengaged" state.
679 Tub<Packet> from_mbuf_lro(rte_mbuf* m);
683 std::vector<packet_provider_type> _pkt_providers;
684 Tub<std::array<uint8_t, 128>> _sw_reta;
685 circular_buffer<Packet> _proxy_packetq;
686 stream<Packet> _rx_stream;
687 circular_buffer<Packet> _tx_packetq;
688 std::vector<void*> _alloc_bufs;
690 PerfCounters *perf_logger;
692 uint8_t _dev_port_idx;
695 rte_mempool *_pktmbuf_pool_rx;
696 std::vector<rte_mbuf*> _rx_free_pkts;
697 std::vector<rte_mbuf*> _rx_free_bufs;
698 std::vector<fragment> _frags;
699 std::vector<char*> _bufs;
700 size_t _num_rx_free_segs = 0;
701 uint64_t device_stat_time_fd = 0;
704 uint64_t rx_cycles = 0;
705 uint64_t rx_count = 0;
706 uint64_t tx_cycles = 0;
707 uint64_t tx_count = 0;
710 class DPDKTXPoller : public EventCenter::Poller {
714 explicit DPDKTXPoller(DPDKQueuePair *qp)
715 : EventCenter::Poller(qp->center, "DPDK::DPDKTXPoller"), qp(qp) {}
718 return qp->poll_tx();
722 class DPDKRXGCPoller : public EventCenter::Poller {
726 explicit DPDKRXGCPoller(DPDKQueuePair *qp)
727 : EventCenter::Poller(qp->center, "DPDK::DPDKRXGCPoller"), qp(qp) {}
733 tx_buf_factory _tx_buf_factory;
734 class DPDKRXPoller : public EventCenter::Poller {
738 explicit DPDKRXPoller(DPDKQueuePair *qp)
739 : EventCenter::Poller(qp->center, "DPDK::DPDKRXPoller"), qp(qp) {}
742 return qp->poll_rx_once();
745 Tub<DPDKRXPoller> _rx_poller;
746 class DPDKTXGCPoller : public EventCenter::Poller {
750 explicit DPDKTXGCPoller(DPDKQueuePair *qp)
751 : EventCenter::Poller(qp->center, "DPDK::DPDKTXGCPoller"), qp(qp) {}
754 return qp->_tx_buf_factory.gc();
757 std::vector<rte_mbuf*> _tx_burst;
758 uint16_t _tx_burst_idx = 0;
764 PerfCounters *perf_logger;
765 std::vector<std::unique_ptr<DPDKQueuePair>> _queues;
766 std::vector<DPDKWorker*> workers;
767 size_t _rss_table_bits = 0;
769 uint16_t _num_queues;
771 hw_features _hw_features;
772 uint8_t _queues_ready = 0;
776 std::vector<uint8_t> _redir_table;
777 rss_key_type _rss_key;
778 bool _is_i40e_device = false;
779 bool _is_vmxnet3_device = false;
782 rte_eth_dev_info _dev_info = {};
785 * The final stage of a port initialization.
786 * @note Must be called *after* all queues from stage (2) have been
789 int init_port_fini();
793 * Port initialization consists of 3 main stages:
794 * 1) General port initialization which ends with a call to
795 * rte_eth_dev_configure() where we request the needed number of Rx and
797 * 2) Individual queues initialization. This is done in the constructor of
798 * DPDKQueuePair class. In particular the memory pools for queues are allocated
800 * 3) The final stage of the initialization which starts with the call of
801 * rte_eth_dev_start() after which the port becomes fully functional. We
802 * will also wait for a link to get up in this stage.
807 * First stage of the port initialization.
809 * @return 0 in case of success and an appropriate error code in case of an
812 int init_port_start();
815 * Check the link status of out port in up to 9s, and print them finally.
817 int check_port_link_status();
820 * Configures the HW Flow Control
822 void set_hw_flow_control();
825 DPDKDevice(CephContext *c, uint8_t port_idx, uint16_t num_queues, bool use_lro, bool enable_fc):
826 cct(c), _port_idx(port_idx), _num_queues(num_queues),
827 _home_cpu(0), _use_lro(use_lro),
828 _enable_fc(enable_fc) {
829 _queues = std::vector<std::unique_ptr<DPDKQueuePair>>(_num_queues);
830 /* now initialise the port we will use */
831 int ret = init_port_start();
833 rte_exit(EXIT_FAILURE, "Cannot initialise port %u\n", _port_idx);
835 string name(std::string("port") + std::to_string(port_idx));
836 PerfCountersBuilder plb(cct, name, l_dpdk_dev_first, l_dpdk_dev_last);
838 plb.add_u64_counter(l_dpdk_dev_rx_mcast, "dpdk_device_receive_multicast_packets", "DPDK received multicast packets");
839 plb.add_u64_counter(l_dpdk_dev_rx_badcrc_errors, "dpdk_device_receive_badcrc_errors", "DPDK received bad crc errors");
841 plb.add_u64_counter(l_dpdk_dev_rx_total_errors, "dpdk_device_receive_total_errors", "DPDK received total_errors");
842 plb.add_u64_counter(l_dpdk_dev_tx_total_errors, "dpdk_device_send_total_errors", "DPDK sendd total_errors");
843 plb.add_u64_counter(l_dpdk_dev_rx_dropped_errors, "dpdk_device_receive_dropped_errors", "DPDK received dropped errors");
844 plb.add_u64_counter(l_dpdk_dev_rx_nombuf_errors, "dpdk_device_receive_nombuf_errors", "DPDK received RX mbuf allocation errors");
846 perf_logger = plb.create_perf_counters();
847 cct->get_perfcounters_collection()->add(perf_logger);
851 rte_eth_dev_stop(_port_idx);
854 DPDKQueuePair& queue_for_cpu(unsigned cpu) { return *_queues[cpu]; }
855 void l2receive(int qid, Packet p) {
856 _queues[qid]->_rx_stream.produce(std::move(p));
858 subscription<Packet> receive(unsigned cpuid, std::function<int (Packet)> next_packet) {
859 auto sub = _queues[cpuid]->_rx_stream.listen(std::move(next_packet));
860 _queues[cpuid]->rx_start();
861 return std::move(sub);
863 ethernet_address hw_address() {
864 struct ether_addr mac;
865 rte_eth_macaddr_get(_port_idx, &mac);
867 return mac.addr_bytes;
869 hw_features get_hw_features() {
872 const rss_key_type& rss_key() const { return _rss_key; }
873 uint16_t hw_queues_count() { return _num_queues; }
874 std::unique_ptr<DPDKQueuePair> init_local_queue(CephContext *c, EventCenter *center, string hugepages, uint16_t qid) {
875 std::unique_ptr<DPDKQueuePair> qp;
876 qp = std::unique_ptr<DPDKQueuePair>(new DPDKQueuePair(c, center, this, qid));
877 return std::move(qp);
879 unsigned hash2qid(uint32_t hash) {
880 // return hash % hw_queues_count();
881 return _redir_table[hash & (_redir_table.size() - 1)];
883 void set_local_queue(unsigned i, std::unique_ptr<DPDKQueuePair> qp) {
885 _queues[i] = std::move(qp);
887 void unset_local_queue(unsigned i) {
891 template <typename Func>
892 unsigned forward_dst(unsigned src_cpuid, Func&& hashfn) {
893 auto& qp = queue_for_cpu(src_cpuid);
897 assert(!qp._sw_reta);
898 auto hash = hashfn() >> _rss_table_bits;
899 auto& reta = *qp._sw_reta;
900 return reta[hash % reta.size()];
902 unsigned hash2cpu(uint32_t hash) {
903 // there is an assumption here that qid == get_id() which will
904 // not necessary be true in the future
905 return forward_dst(hash2qid(hash), [hash] { return hash; });
908 hw_features& hw_features_ref() { return _hw_features; }
910 const rte_eth_rxconf* def_rx_conf() const {
911 return &_dev_info.default_rxconf;
914 const rte_eth_txconf* def_tx_conf() const {
915 return &_dev_info.default_txconf;
919 * Set the RSS table in the device and store it in the internal vector.
921 void set_rss_table();
923 uint8_t port_idx() { return _port_idx; }
924 bool is_i40e_device() const {
925 return _is_i40e_device;
927 bool is_vmxnet3_device() const {
928 return _is_vmxnet3_device;
933 std::unique_ptr<DPDKDevice> create_dpdk_net_device(
934 CephContext *c, unsigned cores, uint8_t port_idx = 0,
935 bool use_lro = true, bool enable_fc = true);
939 * @return Number of bytes needed for mempool objects of each QP.
941 uint32_t qp_mempool_obj_size();
943 #endif // CEPH_DPDK_DEV_H