DPDK  17.05.2
rte_ethdev.h
Go to the documentation of this file.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  * * Neither the name of Intel Corporation nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef _RTE_ETHDEV_H_
35 #define _RTE_ETHDEV_H_
36 
169 #ifdef __cplusplus
170 extern "C" {
171 #endif
172 
173 #include <stdint.h>
174 
175 #include <rte_dev.h>
176 
177 /* Use this macro to check if LRO API is supported */
178 #define RTE_ETHDEV_HAS_LRO_SUPPORT
179 
180 #include <rte_log.h>
181 #include <rte_interrupts.h>
182 #include <rte_dev.h>
183 #include <rte_devargs.h>
184 #include <rte_errno.h>
185 #include "rte_ether.h"
186 #include "rte_eth_ctrl.h"
187 #include "rte_dev_info.h"
188 
189 struct rte_mbuf;
190 
198  uint64_t ipackets;
199  uint64_t opackets;
200  uint64_t ibytes;
201  uint64_t obytes;
202  uint64_t imissed;
206  uint64_t ierrors;
207  uint64_t oerrors;
208  uint64_t rx_nombuf;
209  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
211  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
213  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
215  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
217  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
219 };
220 
224 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
225 #define ETH_LINK_SPEED_FIXED (1 << 0)
226 #define ETH_LINK_SPEED_10M_HD (1 << 1)
227 #define ETH_LINK_SPEED_10M (1 << 2)
228 #define ETH_LINK_SPEED_100M_HD (1 << 3)
229 #define ETH_LINK_SPEED_100M (1 << 4)
230 #define ETH_LINK_SPEED_1G (1 << 5)
231 #define ETH_LINK_SPEED_2_5G (1 << 6)
232 #define ETH_LINK_SPEED_5G (1 << 7)
233 #define ETH_LINK_SPEED_10G (1 << 8)
234 #define ETH_LINK_SPEED_20G (1 << 9)
235 #define ETH_LINK_SPEED_25G (1 << 10)
236 #define ETH_LINK_SPEED_40G (1 << 11)
237 #define ETH_LINK_SPEED_50G (1 << 12)
238 #define ETH_LINK_SPEED_56G (1 << 13)
239 #define ETH_LINK_SPEED_100G (1 << 14)
244 #define ETH_SPEED_NUM_NONE 0
245 #define ETH_SPEED_NUM_10M 10
246 #define ETH_SPEED_NUM_100M 100
247 #define ETH_SPEED_NUM_1G 1000
248 #define ETH_SPEED_NUM_2_5G 2500
249 #define ETH_SPEED_NUM_5G 5000
250 #define ETH_SPEED_NUM_10G 10000
251 #define ETH_SPEED_NUM_20G 20000
252 #define ETH_SPEED_NUM_25G 25000
253 #define ETH_SPEED_NUM_40G 40000
254 #define ETH_SPEED_NUM_50G 50000
255 #define ETH_SPEED_NUM_56G 56000
256 #define ETH_SPEED_NUM_100G 100000
261 __extension__
262 struct rte_eth_link {
263  uint32_t link_speed;
264  uint16_t link_duplex : 1;
265  uint16_t link_autoneg : 1;
266  uint16_t link_status : 1;
267 } __attribute__((aligned(8)));
269 /* Utility constants */
270 #define ETH_LINK_HALF_DUPLEX 0
271 #define ETH_LINK_FULL_DUPLEX 1
272 #define ETH_LINK_DOWN 0
273 #define ETH_LINK_UP 1
274 #define ETH_LINK_FIXED 0
275 #define ETH_LINK_AUTONEG 1
281 struct rte_eth_thresh {
282  uint8_t pthresh;
283  uint8_t hthresh;
284  uint8_t wthresh;
285 };
286 
290 #define ETH_MQ_RX_RSS_FLAG 0x1
291 #define ETH_MQ_RX_DCB_FLAG 0x2
292 #define ETH_MQ_RX_VMDQ_FLAG 0x4
293 
301 
305  ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
307  ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
308 
310  ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
312  ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
314  ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
317  ETH_MQ_RX_VMDQ_FLAG,
318 };
319 
323 #define ETH_RSS ETH_MQ_RX_RSS
324 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
325 #define ETH_DCB_RX ETH_MQ_RX_DCB
326 
336 };
337 
341 #define ETH_DCB_NONE ETH_MQ_TX_NONE
342 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
343 #define ETH_DCB_TX ETH_MQ_TX_DCB
344 
351  uint32_t max_rx_pkt_len;
352  uint16_t split_hdr_size;
353  __extension__
354  uint16_t header_split : 1,
355  hw_ip_checksum : 1,
356  hw_vlan_filter : 1,
357  hw_vlan_strip : 1,
358  hw_vlan_extend : 1,
359  jumbo_frame : 1,
360  hw_strip_crc : 1,
361  enable_scatter : 1,
362  enable_lro : 1;
363 };
364 
370  ETH_VLAN_TYPE_UNKNOWN = 0,
373  ETH_VLAN_TYPE_MAX,
374 };
375 
394  uint8_t *rss_key;
395  uint8_t rss_key_len;
396  uint64_t rss_hf;
397 };
398 
399 /*
400  * The RSS offload types are defined based on flow types which are defined
401  * in rte_eth_ctrl.h. Different NIC hardwares may support different RSS offload
402  * types. The supported flow types or RSS offload types can be queried by
403  * rte_eth_dev_info_get().
404  */
405 #define ETH_RSS_IPV4 (1ULL << RTE_ETH_FLOW_IPV4)
406 #define ETH_RSS_FRAG_IPV4 (1ULL << RTE_ETH_FLOW_FRAG_IPV4)
407 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
408 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP)
409 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP)
410 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER)
411 #define ETH_RSS_IPV6 (1ULL << RTE_ETH_FLOW_IPV6)
412 #define ETH_RSS_FRAG_IPV6 (1ULL << RTE_ETH_FLOW_FRAG_IPV6)
413 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP)
414 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
415 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP)
416 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)
417 #define ETH_RSS_L2_PAYLOAD (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)
418 #define ETH_RSS_IPV6_EX (1ULL << RTE_ETH_FLOW_IPV6_EX)
419 #define ETH_RSS_IPV6_TCP_EX (1ULL << RTE_ETH_FLOW_IPV6_TCP_EX)
420 #define ETH_RSS_IPV6_UDP_EX (1ULL << RTE_ETH_FLOW_IPV6_UDP_EX)
421 #define ETH_RSS_PORT (1ULL << RTE_ETH_FLOW_PORT)
422 #define ETH_RSS_VXLAN (1ULL << RTE_ETH_FLOW_VXLAN)
423 #define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE)
424 #define ETH_RSS_NVGRE (1ULL << RTE_ETH_FLOW_NVGRE)
425 
426 #define ETH_RSS_IP ( \
427  ETH_RSS_IPV4 | \
428  ETH_RSS_FRAG_IPV4 | \
429  ETH_RSS_NONFRAG_IPV4_OTHER | \
430  ETH_RSS_IPV6 | \
431  ETH_RSS_FRAG_IPV6 | \
432  ETH_RSS_NONFRAG_IPV6_OTHER | \
433  ETH_RSS_IPV6_EX)
434 
435 #define ETH_RSS_UDP ( \
436  ETH_RSS_NONFRAG_IPV4_UDP | \
437  ETH_RSS_NONFRAG_IPV6_UDP | \
438  ETH_RSS_IPV6_UDP_EX)
439 
440 #define ETH_RSS_TCP ( \
441  ETH_RSS_NONFRAG_IPV4_TCP | \
442  ETH_RSS_NONFRAG_IPV6_TCP | \
443  ETH_RSS_IPV6_TCP_EX)
444 
445 #define ETH_RSS_SCTP ( \
446  ETH_RSS_NONFRAG_IPV4_SCTP | \
447  ETH_RSS_NONFRAG_IPV6_SCTP)
448 
449 #define ETH_RSS_TUNNEL ( \
450  ETH_RSS_VXLAN | \
451  ETH_RSS_GENEVE | \
452  ETH_RSS_NVGRE)
453 
454 
456 #define ETH_RSS_PROTO_MASK ( \
457  ETH_RSS_IPV4 | \
458  ETH_RSS_FRAG_IPV4 | \
459  ETH_RSS_NONFRAG_IPV4_TCP | \
460  ETH_RSS_NONFRAG_IPV4_UDP | \
461  ETH_RSS_NONFRAG_IPV4_SCTP | \
462  ETH_RSS_NONFRAG_IPV4_OTHER | \
463  ETH_RSS_IPV6 | \
464  ETH_RSS_FRAG_IPV6 | \
465  ETH_RSS_NONFRAG_IPV6_TCP | \
466  ETH_RSS_NONFRAG_IPV6_UDP | \
467  ETH_RSS_NONFRAG_IPV6_SCTP | \
468  ETH_RSS_NONFRAG_IPV6_OTHER | \
469  ETH_RSS_L2_PAYLOAD | \
470  ETH_RSS_IPV6_EX | \
471  ETH_RSS_IPV6_TCP_EX | \
472  ETH_RSS_IPV6_UDP_EX | \
473  ETH_RSS_PORT | \
474  ETH_RSS_VXLAN | \
475  ETH_RSS_GENEVE | \
476  ETH_RSS_NVGRE)
477 
478 /*
479  * Definitions used for redirection table entry size.
480  * Some RSS RETA sizes may not be supported by some drivers, check the
481  * documentation or the description of relevant functions for more details.
482  */
483 #define ETH_RSS_RETA_SIZE_64 64
484 #define ETH_RSS_RETA_SIZE_128 128
485 #define ETH_RSS_RETA_SIZE_256 256
486 #define ETH_RSS_RETA_SIZE_512 512
487 #define RTE_RETA_GROUP_SIZE 64
488 
489 /* Definitions used for VMDQ and DCB functionality */
490 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
491 #define ETH_DCB_NUM_USER_PRIORITIES 8
492 #define ETH_VMDQ_DCB_NUM_QUEUES 128
493 #define ETH_DCB_NUM_QUEUES 128
495 /* DCB capability defines */
496 #define ETH_DCB_PG_SUPPORT 0x00000001
497 #define ETH_DCB_PFC_SUPPORT 0x00000002
499 /* Definitions used for VLAN Offload functionality */
500 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
501 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
502 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
504 /* Definitions used for mask VLAN setting */
505 #define ETH_VLAN_STRIP_MASK 0x0001
506 #define ETH_VLAN_FILTER_MASK 0x0002
507 #define ETH_VLAN_EXTEND_MASK 0x0004
508 #define ETH_VLAN_ID_MAX 0x0FFF
510 /* Definitions used for receive MAC address */
511 #define ETH_NUM_RECEIVE_MAC_ADDR 128
513 /* Definitions used for unicast hash */
514 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
516 /* Definitions used for VMDQ pool rx mode setting */
517 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
518 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
519 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
520 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
521 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
524 #define ETH_MIRROR_MAX_VLANS 64
525 
526 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
527 #define ETH_MIRROR_UPLINK_PORT 0x02
528 #define ETH_MIRROR_DOWNLINK_PORT 0x04
529 #define ETH_MIRROR_VLAN 0x08
530 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
535 struct rte_eth_vlan_mirror {
536  uint64_t vlan_mask;
538  uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
539 };
540 
545  uint8_t rule_type;
546  uint8_t dst_pool;
547  uint64_t pool_mask;
550 };
551 
559  uint64_t mask;
561  uint16_t reta[RTE_RETA_GROUP_SIZE];
563 };
564 
570  ETH_4_TCS = 4,
572 };
573 
583 };
584 
585 /* This structure may be extended in future. */
586 struct rte_eth_dcb_rx_conf {
587  enum rte_eth_nb_tcs nb_tcs;
589  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
590 };
591 
592 struct rte_eth_vmdq_dcb_tx_conf {
593  enum rte_eth_nb_pools nb_queue_pools;
595  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
596 };
597 
598 struct rte_eth_dcb_tx_conf {
599  enum rte_eth_nb_tcs nb_tcs;
601  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
602 };
603 
604 struct rte_eth_vmdq_tx_conf {
605  enum rte_eth_nb_pools nb_queue_pools;
606 };
607 
622  uint8_t default_pool;
623  uint8_t nb_pool_maps;
624  struct {
625  uint16_t vlan_id;
626  uint64_t pools;
630 };
631 
632 struct rte_eth_vmdq_rx_conf {
633  enum rte_eth_nb_pools nb_queue_pools;
634  uint8_t enable_default_pool;
635  uint8_t default_pool;
636  uint8_t enable_loop_back;
637  uint8_t nb_pool_maps;
638  uint32_t rx_mode;
639  struct {
640  uint16_t vlan_id;
641  uint64_t pools;
642  } pool_map[ETH_VMDQ_MAX_VLAN_FILTERS];
643 };
644 
651  /* For i40e specifically */
652  uint16_t pvid;
653  __extension__
654  uint8_t hw_vlan_reject_tagged : 1,
660 };
661 
667  uint16_t rx_free_thresh;
668  uint8_t rx_drop_en;
670 };
671 
672 #define ETH_TXQ_FLAGS_NOMULTSEGS 0x0001
673 #define ETH_TXQ_FLAGS_NOREFCOUNT 0x0002
674 #define ETH_TXQ_FLAGS_NOMULTMEMP 0x0004
675 #define ETH_TXQ_FLAGS_NOVLANOFFL 0x0100
676 #define ETH_TXQ_FLAGS_NOXSUMSCTP 0x0200
677 #define ETH_TXQ_FLAGS_NOXSUMUDP 0x0400
678 #define ETH_TXQ_FLAGS_NOXSUMTCP 0x0800
679 #define ETH_TXQ_FLAGS_NOOFFLOADS \
680  (ETH_TXQ_FLAGS_NOVLANOFFL | ETH_TXQ_FLAGS_NOXSUMSCTP | \
681  ETH_TXQ_FLAGS_NOXSUMUDP | ETH_TXQ_FLAGS_NOXSUMTCP)
682 #define ETH_TXQ_FLAGS_NOXSUMS \
683  (ETH_TXQ_FLAGS_NOXSUMSCTP | ETH_TXQ_FLAGS_NOXSUMUDP | \
684  ETH_TXQ_FLAGS_NOXSUMTCP)
685 
690  uint16_t tx_rs_thresh;
691  uint16_t tx_free_thresh;
694  uint32_t txq_flags;
696 };
697 
702  uint16_t nb_max;
703  uint16_t nb_min;
704  uint16_t nb_align;
714  uint16_t nb_seg_max;
715 
727  uint16_t nb_mtu_seg_max;
728 };
729 
738 };
739 
746  uint32_t high_water;
747  uint32_t low_water;
748  uint16_t pause_time;
749  uint16_t send_xon;
752  uint8_t autoneg;
753 };
754 
762  uint8_t priority;
763 };
764 
773 };
774 
782 };
783 
795  uint8_t drop_queue;
796  struct rte_eth_fdir_masks mask;
799 };
800 
809  uint16_t udp_port;
810  uint8_t prot_type;
811 };
812 
818  uint32_t lsc:1;
820  uint32_t rxq:1;
822  uint32_t rmv:1;
823 };
824 
830 struct rte_eth_conf {
831  uint32_t link_speeds;
840  uint32_t lpbk_mode;
845  struct {
849  struct rte_eth_dcb_rx_conf dcb_rx_conf;
851  struct rte_eth_vmdq_rx_conf vmdq_rx_conf;
853  } rx_adv_conf;
854  union {
855  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
857  struct rte_eth_dcb_tx_conf dcb_tx_conf;
859  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
861  } tx_adv_conf;
867 };
868 
878 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
879 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
880 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
881 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
882 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
883 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
884 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
885 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
886 
890 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
891 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
892 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
893 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
894 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
895 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
896 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
897 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
898 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
899 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200
900 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400
901 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800
902 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000
903 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
904 
905 struct rte_pci_device;
906 
912  const char *driver_name;
913  unsigned int if_index;
915  uint32_t min_rx_bufsize;
916  uint32_t max_rx_pktlen;
917  uint16_t max_rx_queues;
918  uint16_t max_tx_queues;
919  uint32_t max_mac_addrs;
920  uint32_t max_hash_mac_addrs;
922  uint16_t max_vfs;
923  uint16_t max_vmdq_pools;
924  uint32_t rx_offload_capa;
925  uint32_t tx_offload_capa;
926  uint16_t reta_size;
928  uint8_t hash_key_size;
933  uint16_t vmdq_queue_base;
934  uint16_t vmdq_queue_num;
935  uint16_t vmdq_pool_base;
938  uint32_t speed_capa;
940  uint16_t nb_rx_queues;
941  uint16_t nb_tx_queues;
942 };
943 
949  struct rte_mempool *mp;
951  uint8_t scattered_rx;
952  uint16_t nb_desc;
954 
961  uint16_t nb_desc;
963 
965 #define RTE_ETH_XSTATS_NAME_SIZE 64
966 
977  uint64_t id;
978  uint64_t value;
979 };
980 
990 };
991 
992 #define ETH_DCB_NUM_TCS 8
993 #define ETH_MAX_VMDQ_POOL 64
994 
1001  struct {
1002  uint8_t base;
1003  uint8_t nb_queue;
1004  } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1006  struct {
1007  uint8_t base;
1008  uint8_t nb_queue;
1009  } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1010 };
1011 
1017  uint8_t nb_tcs;
1019  uint8_t tc_bws[ETH_DCB_NUM_TCS];
1022 };
1023 
1027 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1028 #define RTE_ETH_QUEUE_STATE_STARTED 1
1029 
1030 struct rte_eth_dev;
1031 
1032 struct rte_eth_dev_callback;
1034 TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback);
1035 
1036 /* Macros to check for valid port */
1037 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1038  if (!rte_eth_dev_is_valid_port(port_id)) { \
1039  RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
1040  return retval; \
1041  } \
1042 } while (0)
1043 
1044 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1045  if (!rte_eth_dev_is_valid_port(port_id)) { \
1046  RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
1047  return; \
1048  } \
1049 } while (0)
1050 
1056 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1057 
1058 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1059 
1060 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1061 
1062 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1063 
1064 /*
1065  * Definitions of all functions exported by an Ethernet driver through the
1066  * the generic structure of type *eth_dev_ops* supplied in the *rte_eth_dev*
1067  * structure associated with an Ethernet device.
1068  */
1069 
1070 typedef int (*eth_dev_configure_t)(struct rte_eth_dev *dev);
1073 typedef int (*eth_dev_start_t)(struct rte_eth_dev *dev);
1076 typedef void (*eth_dev_stop_t)(struct rte_eth_dev *dev);
1079 typedef int (*eth_dev_set_link_up_t)(struct rte_eth_dev *dev);
1082 typedef int (*eth_dev_set_link_down_t)(struct rte_eth_dev *dev);
1085 typedef void (*eth_dev_close_t)(struct rte_eth_dev *dev);
1088 typedef void (*eth_promiscuous_enable_t)(struct rte_eth_dev *dev);
1091 typedef void (*eth_promiscuous_disable_t)(struct rte_eth_dev *dev);
1094 typedef void (*eth_allmulticast_enable_t)(struct rte_eth_dev *dev);
1097 typedef void (*eth_allmulticast_disable_t)(struct rte_eth_dev *dev);
1100 typedef int (*eth_link_update_t)(struct rte_eth_dev *dev,
1101  int wait_to_complete);
1104 typedef void (*eth_stats_get_t)(struct rte_eth_dev *dev,
1105  struct rte_eth_stats *igb_stats);
1108 typedef void (*eth_stats_reset_t)(struct rte_eth_dev *dev);
1111 typedef int (*eth_xstats_get_t)(struct rte_eth_dev *dev,
1112  struct rte_eth_xstat *stats, unsigned n);
1115 typedef int (*eth_xstats_get_by_id_t)(struct rte_eth_dev *dev,
1116  const uint64_t *ids,
1117  uint64_t *values,
1118  unsigned int n);
1121 typedef void (*eth_xstats_reset_t)(struct rte_eth_dev *dev);
1124 typedef int (*eth_xstats_get_names_t)(struct rte_eth_dev *dev,
1125  struct rte_eth_xstat_name *xstats_names, unsigned size);
1128 typedef int (*eth_xstats_get_names_by_id_t)(struct rte_eth_dev *dev,
1129  struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
1130  unsigned int size);
1133 typedef int (*eth_queue_stats_mapping_set_t)(struct rte_eth_dev *dev,
1134  uint16_t queue_id,
1135  uint8_t stat_idx,
1136  uint8_t is_rx);
1139 typedef void (*eth_dev_infos_get_t)(struct rte_eth_dev *dev,
1140  struct rte_eth_dev_info *dev_info);
1143 typedef const uint32_t *(*eth_dev_supported_ptypes_get_t)(struct rte_eth_dev *dev);
1146 typedef int (*eth_queue_start_t)(struct rte_eth_dev *dev,
1147  uint16_t queue_id);
1150 typedef int (*eth_queue_stop_t)(struct rte_eth_dev *dev,
1151  uint16_t queue_id);
1154 typedef int (*eth_rx_queue_setup_t)(struct rte_eth_dev *dev,
1155  uint16_t rx_queue_id,
1156  uint16_t nb_rx_desc,
1157  unsigned int socket_id,
1158  const struct rte_eth_rxconf *rx_conf,
1159  struct rte_mempool *mb_pool);
1162 typedef int (*eth_tx_queue_setup_t)(struct rte_eth_dev *dev,
1163  uint16_t tx_queue_id,
1164  uint16_t nb_tx_desc,
1165  unsigned int socket_id,
1166  const struct rte_eth_txconf *tx_conf);
1169 typedef int (*eth_rx_enable_intr_t)(struct rte_eth_dev *dev,
1170  uint16_t rx_queue_id);
1173 typedef int (*eth_rx_disable_intr_t)(struct rte_eth_dev *dev,
1174  uint16_t rx_queue_id);
1177 typedef void (*eth_queue_release_t)(void *queue);
1180 typedef uint32_t (*eth_rx_queue_count_t)(struct rte_eth_dev *dev,
1181  uint16_t rx_queue_id);
1184 typedef int (*eth_rx_descriptor_done_t)(void *rxq, uint16_t offset);
1187 typedef int (*eth_rx_descriptor_status_t)(void *rxq, uint16_t offset);
1190 typedef int (*eth_tx_descriptor_status_t)(void *txq, uint16_t offset);
1193 typedef int (*eth_fw_version_get_t)(struct rte_eth_dev *dev,
1194  char *fw_version, size_t fw_size);
1197 typedef int (*eth_tx_done_cleanup_t)(void *txq, uint32_t free_cnt);
1200 typedef void (*eth_rxq_info_get_t)(struct rte_eth_dev *dev,
1201  uint16_t rx_queue_id, struct rte_eth_rxq_info *qinfo);
1202 
1203 typedef void (*eth_txq_info_get_t)(struct rte_eth_dev *dev,
1204  uint16_t tx_queue_id, struct rte_eth_txq_info *qinfo);
1205 
1206 typedef int (*mtu_set_t)(struct rte_eth_dev *dev, uint16_t mtu);
1209 typedef int (*vlan_filter_set_t)(struct rte_eth_dev *dev,
1210  uint16_t vlan_id,
1211  int on);
1214 typedef int (*vlan_tpid_set_t)(struct rte_eth_dev *dev,
1215  enum rte_vlan_type type, uint16_t tpid);
1218 typedef void (*vlan_offload_set_t)(struct rte_eth_dev *dev, int mask);
1221 typedef int (*vlan_pvid_set_t)(struct rte_eth_dev *dev,
1222  uint16_t vlan_id,
1223  int on);
1226 typedef void (*vlan_strip_queue_set_t)(struct rte_eth_dev *dev,
1227  uint16_t rx_queue_id,
1228  int on);
1231 typedef uint16_t (*eth_rx_burst_t)(void *rxq,
1232  struct rte_mbuf **rx_pkts,
1233  uint16_t nb_pkts);
1236 typedef uint16_t (*eth_tx_burst_t)(void *txq,
1237  struct rte_mbuf **tx_pkts,
1238  uint16_t nb_pkts);
1241 typedef uint16_t (*eth_tx_prep_t)(void *txq,
1242  struct rte_mbuf **tx_pkts,
1243  uint16_t nb_pkts);
1246 typedef int (*flow_ctrl_get_t)(struct rte_eth_dev *dev,
1247  struct rte_eth_fc_conf *fc_conf);
1250 typedef int (*flow_ctrl_set_t)(struct rte_eth_dev *dev,
1251  struct rte_eth_fc_conf *fc_conf);
1254 typedef int (*priority_flow_ctrl_set_t)(struct rte_eth_dev *dev,
1255  struct rte_eth_pfc_conf *pfc_conf);
1258 typedef int (*reta_update_t)(struct rte_eth_dev *dev,
1259  struct rte_eth_rss_reta_entry64 *reta_conf,
1260  uint16_t reta_size);
1263 typedef int (*reta_query_t)(struct rte_eth_dev *dev,
1264  struct rte_eth_rss_reta_entry64 *reta_conf,
1265  uint16_t reta_size);
1268 typedef int (*rss_hash_update_t)(struct rte_eth_dev *dev,
1269  struct rte_eth_rss_conf *rss_conf);
1272 typedef int (*rss_hash_conf_get_t)(struct rte_eth_dev *dev,
1273  struct rte_eth_rss_conf *rss_conf);
1276 typedef int (*eth_dev_led_on_t)(struct rte_eth_dev *dev);
1279 typedef int (*eth_dev_led_off_t)(struct rte_eth_dev *dev);
1282 typedef void (*eth_mac_addr_remove_t)(struct rte_eth_dev *dev, uint32_t index);
1285 typedef int (*eth_mac_addr_add_t)(struct rte_eth_dev *dev,
1286  struct ether_addr *mac_addr,
1287  uint32_t index,
1288  uint32_t vmdq);
1291 typedef void (*eth_mac_addr_set_t)(struct rte_eth_dev *dev,
1292  struct ether_addr *mac_addr);
1295 typedef int (*eth_uc_hash_table_set_t)(struct rte_eth_dev *dev,
1296  struct ether_addr *mac_addr,
1297  uint8_t on);
1300 typedef int (*eth_uc_all_hash_table_set_t)(struct rte_eth_dev *dev,
1301  uint8_t on);
1304 typedef int (*eth_set_queue_rate_limit_t)(struct rte_eth_dev *dev,
1305  uint16_t queue_idx,
1306  uint16_t tx_rate);
1309 typedef int (*eth_mirror_rule_set_t)(struct rte_eth_dev *dev,
1310  struct rte_eth_mirror_conf *mirror_conf,
1311  uint8_t rule_id,
1312  uint8_t on);
1315 typedef int (*eth_mirror_rule_reset_t)(struct rte_eth_dev *dev,
1316  uint8_t rule_id);
1319 typedef int (*eth_udp_tunnel_port_add_t)(struct rte_eth_dev *dev,
1320  struct rte_eth_udp_tunnel *tunnel_udp);
1323 typedef int (*eth_udp_tunnel_port_del_t)(struct rte_eth_dev *dev,
1324  struct rte_eth_udp_tunnel *tunnel_udp);
1327 typedef int (*eth_set_mc_addr_list_t)(struct rte_eth_dev *dev,
1328  struct ether_addr *mc_addr_set,
1329  uint32_t nb_mc_addr);
1332 typedef int (*eth_timesync_enable_t)(struct rte_eth_dev *dev);
1335 typedef int (*eth_timesync_disable_t)(struct rte_eth_dev *dev);
1338 typedef int (*eth_timesync_read_rx_timestamp_t)(struct rte_eth_dev *dev,
1339  struct timespec *timestamp,
1340  uint32_t flags);
1343 typedef int (*eth_timesync_read_tx_timestamp_t)(struct rte_eth_dev *dev,
1344  struct timespec *timestamp);
1347 typedef int (*eth_timesync_adjust_time)(struct rte_eth_dev *dev, int64_t);
1350 typedef int (*eth_timesync_read_time)(struct rte_eth_dev *dev,
1351  struct timespec *timestamp);
1354 typedef int (*eth_timesync_write_time)(struct rte_eth_dev *dev,
1355  const struct timespec *timestamp);
1358 typedef int (*eth_get_reg_t)(struct rte_eth_dev *dev,
1359  struct rte_dev_reg_info *info);
1362 typedef int (*eth_get_eeprom_length_t)(struct rte_eth_dev *dev);
1365 typedef int (*eth_get_eeprom_t)(struct rte_eth_dev *dev,
1366  struct rte_dev_eeprom_info *info);
1369 typedef int (*eth_set_eeprom_t)(struct rte_eth_dev *dev,
1370  struct rte_dev_eeprom_info *info);
1373 typedef int (*eth_l2_tunnel_eth_type_conf_t)
1374  (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
1377 typedef int (*eth_l2_tunnel_offload_set_t)
1378  (struct rte_eth_dev *dev,
1379  struct rte_eth_l2_tunnel_conf *l2_tunnel,
1380  uint32_t mask,
1381  uint8_t en);
1384 #ifdef RTE_NIC_BYPASS
1385 
1386 enum {
1387  RTE_BYPASS_MODE_NONE,
1388  RTE_BYPASS_MODE_NORMAL,
1389  RTE_BYPASS_MODE_BYPASS,
1390  RTE_BYPASS_MODE_ISOLATE,
1391  RTE_BYPASS_MODE_NUM,
1392 };
1393 
1394 #define RTE_BYPASS_MODE_VALID(x) \
1395  ((x) > RTE_BYPASS_MODE_NONE && (x) < RTE_BYPASS_MODE_NUM)
1396 
1397 enum {
1398  RTE_BYPASS_EVENT_NONE,
1399  RTE_BYPASS_EVENT_START,
1400  RTE_BYPASS_EVENT_OS_ON = RTE_BYPASS_EVENT_START,
1401  RTE_BYPASS_EVENT_POWER_ON,
1402  RTE_BYPASS_EVENT_OS_OFF,
1403  RTE_BYPASS_EVENT_POWER_OFF,
1404  RTE_BYPASS_EVENT_TIMEOUT,
1405  RTE_BYPASS_EVENT_NUM
1406 };
1407 
1408 #define RTE_BYPASS_EVENT_VALID(x) \
1409  ((x) > RTE_BYPASS_EVENT_NONE && (x) < RTE_BYPASS_MODE_NUM)
1410 
1411 enum {
1412  RTE_BYPASS_TMT_OFF, /* timeout disabled. */
1413  RTE_BYPASS_TMT_1_5_SEC, /* timeout for 1.5 seconds */
1414  RTE_BYPASS_TMT_2_SEC, /* timeout for 2 seconds */
1415  RTE_BYPASS_TMT_3_SEC, /* timeout for 3 seconds */
1416  RTE_BYPASS_TMT_4_SEC, /* timeout for 4 seconds */
1417  RTE_BYPASS_TMT_8_SEC, /* timeout for 8 seconds */
1418  RTE_BYPASS_TMT_16_SEC, /* timeout for 16 seconds */
1419  RTE_BYPASS_TMT_32_SEC, /* timeout for 32 seconds */
1420  RTE_BYPASS_TMT_NUM
1421 };
1422 
1423 #define RTE_BYPASS_TMT_VALID(x) \
1424  ((x) == RTE_BYPASS_TMT_OFF || \
1425  ((x) > RTE_BYPASS_TMT_OFF && (x) < RTE_BYPASS_TMT_NUM))
1426 
1427 typedef void (*bypass_init_t)(struct rte_eth_dev *dev);
1428 typedef int32_t (*bypass_state_set_t)(struct rte_eth_dev *dev, uint32_t *new_state);
1429 typedef int32_t (*bypass_state_show_t)(struct rte_eth_dev *dev, uint32_t *state);
1430 typedef int32_t (*bypass_event_set_t)(struct rte_eth_dev *dev, uint32_t state, uint32_t event);
1431 typedef int32_t (*bypass_event_show_t)(struct rte_eth_dev *dev, uint32_t event_shift, uint32_t *event);
1432 typedef int32_t (*bypass_wd_timeout_set_t)(struct rte_eth_dev *dev, uint32_t timeout);
1433 typedef int32_t (*bypass_wd_timeout_show_t)(struct rte_eth_dev *dev, uint32_t *wd_timeout);
1434 typedef int32_t (*bypass_ver_show_t)(struct rte_eth_dev *dev, uint32_t *ver);
1435 typedef int32_t (*bypass_wd_reset_t)(struct rte_eth_dev *dev);
1436 #endif
1437 
1438 typedef int (*eth_filter_ctrl_t)(struct rte_eth_dev *dev,
1439  enum rte_filter_type filter_type,
1440  enum rte_filter_op filter_op,
1441  void *arg);
1444 typedef int (*eth_get_dcb_info)(struct rte_eth_dev *dev,
1445  struct rte_eth_dcb_info *dcb_info);
1451 struct eth_dev_ops {
1452  eth_dev_configure_t dev_configure;
1453  eth_dev_start_t dev_start;
1454  eth_dev_stop_t dev_stop;
1455  eth_dev_set_link_up_t dev_set_link_up;
1456  eth_dev_set_link_down_t dev_set_link_down;
1457  eth_dev_close_t dev_close;
1458  eth_link_update_t link_update;
1460  eth_promiscuous_enable_t promiscuous_enable;
1461  eth_promiscuous_disable_t promiscuous_disable;
1462  eth_allmulticast_enable_t allmulticast_enable;
1463  eth_allmulticast_disable_t allmulticast_disable;
1464  eth_mac_addr_remove_t mac_addr_remove;
1465  eth_mac_addr_add_t mac_addr_add;
1466  eth_mac_addr_set_t mac_addr_set;
1467  eth_set_mc_addr_list_t set_mc_addr_list;
1468  mtu_set_t mtu_set;
1470  eth_stats_get_t stats_get;
1471  eth_stats_reset_t stats_reset;
1472  eth_xstats_get_t xstats_get;
1473  eth_xstats_reset_t xstats_reset;
1474  eth_xstats_get_names_t xstats_get_names;
1476  eth_queue_stats_mapping_set_t queue_stats_mapping_set;
1479  eth_dev_infos_get_t dev_infos_get;
1480  eth_rxq_info_get_t rxq_info_get;
1481  eth_txq_info_get_t txq_info_get;
1482  eth_fw_version_get_t fw_version_get;
1483  eth_dev_supported_ptypes_get_t dev_supported_ptypes_get;
1486  vlan_filter_set_t vlan_filter_set;
1487  vlan_tpid_set_t vlan_tpid_set;
1488  vlan_strip_queue_set_t vlan_strip_queue_set;
1489  vlan_offload_set_t vlan_offload_set;
1490  vlan_pvid_set_t vlan_pvid_set;
1492  eth_queue_start_t rx_queue_start;
1493  eth_queue_stop_t rx_queue_stop;
1494  eth_queue_start_t tx_queue_start;
1495  eth_queue_stop_t tx_queue_stop;
1496  eth_rx_queue_setup_t rx_queue_setup;
1497  eth_queue_release_t rx_queue_release;
1498  eth_rx_queue_count_t rx_queue_count;
1500  eth_rx_descriptor_done_t rx_descriptor_done;
1501  eth_rx_descriptor_status_t rx_descriptor_status;
1503  eth_tx_descriptor_status_t tx_descriptor_status;
1505  eth_rx_enable_intr_t rx_queue_intr_enable;
1506  eth_rx_disable_intr_t rx_queue_intr_disable;
1507  eth_tx_queue_setup_t tx_queue_setup;
1508  eth_queue_release_t tx_queue_release;
1509  eth_tx_done_cleanup_t tx_done_cleanup;
1511  eth_dev_led_on_t dev_led_on;
1512  eth_dev_led_off_t dev_led_off;
1514  flow_ctrl_get_t flow_ctrl_get;
1515  flow_ctrl_set_t flow_ctrl_set;
1516  priority_flow_ctrl_set_t priority_flow_ctrl_set;
1518  eth_uc_hash_table_set_t uc_hash_table_set;
1519  eth_uc_all_hash_table_set_t uc_all_hash_table_set;
1521  eth_mirror_rule_set_t mirror_rule_set;
1522  eth_mirror_rule_reset_t mirror_rule_reset;
1524  eth_udp_tunnel_port_add_t udp_tunnel_port_add;
1525  eth_udp_tunnel_port_del_t udp_tunnel_port_del;
1526  eth_l2_tunnel_eth_type_conf_t l2_tunnel_eth_type_conf;
1528  eth_l2_tunnel_offload_set_t l2_tunnel_offload_set;
1531  eth_set_queue_rate_limit_t set_queue_rate_limit;
1533  rss_hash_update_t rss_hash_update;
1534  rss_hash_conf_get_t rss_hash_conf_get;
1535  reta_update_t reta_update;
1536  reta_query_t reta_query;
1538  eth_get_reg_t get_reg;
1539  eth_get_eeprom_length_t get_eeprom_length;
1540  eth_get_eeprom_t get_eeprom;
1541  eth_set_eeprom_t set_eeprom;
1543  /* bypass control */
1544 #ifdef RTE_NIC_BYPASS
1545  bypass_init_t bypass_init;
1546  bypass_state_set_t bypass_state_set;
1547  bypass_state_show_t bypass_state_show;
1548  bypass_event_set_t bypass_event_set;
1549  bypass_event_show_t bypass_event_show;
1550  bypass_wd_timeout_set_t bypass_wd_timeout_set;
1551  bypass_wd_timeout_show_t bypass_wd_timeout_show;
1552  bypass_ver_show_t bypass_ver_show;
1553  bypass_wd_reset_t bypass_wd_reset;
1554 #endif
1555 
1556  eth_filter_ctrl_t filter_ctrl;
1558  eth_get_dcb_info get_dcb_info;
1560  eth_timesync_enable_t timesync_enable;
1562  eth_timesync_disable_t timesync_disable;
1564  eth_timesync_read_rx_timestamp_t timesync_read_rx_timestamp;
1566  eth_timesync_read_tx_timestamp_t timesync_read_tx_timestamp;
1568  eth_timesync_adjust_time timesync_adjust_time;
1569  eth_timesync_read_time timesync_read_time;
1570  eth_timesync_write_time timesync_write_time;
1572  eth_xstats_get_by_id_t xstats_get_by_id;
1574  eth_xstats_get_names_by_id_t xstats_get_names_by_id;
1576 };
1577 
1600 typedef uint16_t (*rte_rx_callback_fn)(uint8_t port, uint16_t queue,
1601  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1602  void *user_param);
1603 
1624 typedef uint16_t (*rte_tx_callback_fn)(uint8_t port, uint16_t queue,
1625  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1626 
1632 struct rte_eth_rxtx_callback {
1633  struct rte_eth_rxtx_callback *next;
1634  union{
1635  rte_rx_callback_fn rx;
1636  rte_tx_callback_fn tx;
1637  } fn;
1638  void *param;
1639 };
1640 
1645  RTE_ETH_DEV_UNUSED = 0,
1646  RTE_ETH_DEV_ATTACHED,
1647 };
1648 
1659 struct rte_eth_dev {
1660  eth_rx_burst_t rx_pkt_burst;
1661  eth_tx_burst_t tx_pkt_burst;
1662  eth_tx_prep_t tx_pkt_prepare;
1663  struct rte_eth_dev_data *data;
1664  const struct eth_dev_ops *dev_ops;
1665  struct rte_device *device;
1666  struct rte_intr_handle *intr_handle;
1668  struct rte_eth_dev_cb_list link_intr_cbs;
1673  struct rte_eth_rxtx_callback *post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
1678  struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
1679  enum rte_eth_dev_state state;
1681 
1682 struct rte_eth_dev_sriov {
1683  uint8_t active;
1684  uint8_t nb_q_per_pool;
1685  uint16_t def_vmdq_idx;
1686  uint16_t def_pool_q_idx;
1687 };
1688 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1689 
1690 #define RTE_ETH_NAME_MAX_LEN (32)
1691 
1699 struct rte_eth_dev_data {
1700  char name[RTE_ETH_NAME_MAX_LEN];
1702  void **rx_queues;
1703  void **tx_queues;
1704  uint16_t nb_rx_queues;
1705  uint16_t nb_tx_queues;
1707  struct rte_eth_dev_sriov sriov;
1709  void *dev_private;
1711  struct rte_eth_link dev_link;
1714  struct rte_eth_conf dev_conf;
1715  uint16_t mtu;
1717  uint32_t min_rx_buf_size;
1720  uint64_t rx_mbuf_alloc_failed;
1721  struct ether_addr* mac_addrs;
1722  uint64_t mac_pool_sel[ETH_NUM_RECEIVE_MAC_ADDR];
1724  struct ether_addr* hash_mac_addrs;
1726  uint8_t port_id;
1727  __extension__
1728  uint8_t promiscuous : 1,
1729  scattered_rx : 1,
1730  all_multicast : 1,
1731  dev_started : 1,
1732  lro : 1;
1733  uint8_t rx_queue_state[RTE_MAX_QUEUES_PER_PORT];
1735  uint8_t tx_queue_state[RTE_MAX_QUEUES_PER_PORT];
1737  uint32_t dev_flags;
1738  enum rte_kernel_driver kdrv;
1739  int numa_node;
1740  const char *drv_name;
1741 };
1742 
1744 #define RTE_ETH_DEV_DETACHABLE 0x0001
1745 
1746 #define RTE_ETH_DEV_INTR_LSC 0x0002
1747 
1748 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1749 
1750 #define RTE_ETH_DEV_INTR_RMV 0x0008
1751 
1757 extern struct rte_eth_dev rte_eth_devices[];
1758 
1767 uint8_t rte_eth_find_next(uint8_t port_id);
1768 
1772 #define RTE_ETH_FOREACH_DEV(p) \
1773  for (p = rte_eth_find_next(0); \
1774  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
1775  p = rte_eth_find_next(p + 1))
1776 
1777 
1791 uint8_t rte_eth_dev_count(void);
1792 
1802 struct rte_eth_dev *rte_eth_dev_allocated(const char *name);
1803 
1814 struct rte_eth_dev *rte_eth_dev_allocate(const char *name);
1815 
1827 struct rte_eth_dev *rte_eth_dev_attach_secondary(const char *name);
1828 
1838 int rte_eth_dev_release_port(struct rte_eth_dev *eth_dev);
1839 
1852 int rte_eth_dev_attach(const char *devargs, uint8_t *port_id);
1853 
1866 int rte_eth_dev_detach(uint8_t port_id, char *devname);
1867 
1879 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
1880 
1910 int rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_queue,
1911  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
1912 
1924 void _rte_eth_dev_reset(struct rte_eth_dev *dev);
1925 
1965 int rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1966  uint16_t nb_rx_desc, unsigned int socket_id,
1967  const struct rte_eth_rxconf *rx_conf,
1968  struct rte_mempool *mb_pool);
1969 
2013 int rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
2014  uint16_t nb_tx_desc, unsigned int socket_id,
2015  const struct rte_eth_txconf *tx_conf);
2016 
2027 int rte_eth_dev_socket_id(uint8_t port_id);
2028 
2038 int rte_eth_dev_is_valid_port(uint8_t port_id);
2039 
2055 int rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id);
2056 
2071 int rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id);
2072 
2088 int rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id);
2089 
2104 int rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id);
2105 
2106 
2107 
2123 int rte_eth_dev_start(uint8_t port_id);
2124 
2132 void rte_eth_dev_stop(uint8_t port_id);
2133 
2134 
2147 int rte_eth_dev_set_link_up(uint8_t port_id);
2148 
2158 int rte_eth_dev_set_link_down(uint8_t port_id);
2159 
2168 void rte_eth_dev_close(uint8_t port_id);
2169 
2176 void rte_eth_promiscuous_enable(uint8_t port_id);
2177 
2184 void rte_eth_promiscuous_disable(uint8_t port_id);
2185 
2196 int rte_eth_promiscuous_get(uint8_t port_id);
2197 
2204 void rte_eth_allmulticast_enable(uint8_t port_id);
2205 
2212 void rte_eth_allmulticast_disable(uint8_t port_id);
2213 
2224 int rte_eth_allmulticast_get(uint8_t port_id);
2225 
2237 void rte_eth_link_get(uint8_t port_id, struct rte_eth_link *link);
2238 
2250 void rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *link);
2251 
2269 int rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats);
2270 
2277 void rte_eth_stats_reset(uint8_t port_id);
2278 
2299 int rte_eth_xstats_get_names(uint8_t port_id,
2300  struct rte_eth_xstat_name *xstats_names,
2301  unsigned int size);
2302 
2325 int rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
2326  unsigned int n);
2327 
2350 int
2351 rte_eth_xstats_get_names_by_id(uint8_t port_id,
2352  struct rte_eth_xstat_name *xstats_names, unsigned int size,
2353  uint64_t *ids);
2354 
2378 int rte_eth_xstats_get_by_id(uint8_t port_id, const uint64_t *ids,
2379  uint64_t *values, unsigned int n);
2380 
2398 int rte_eth_xstats_get_id_by_name(uint8_t port_id, const char *xstat_name,
2399  uint64_t *id);
2400 
2407 void rte_eth_xstats_reset(uint8_t port_id);
2408 
2426 int rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id,
2427  uint16_t tx_queue_id, uint8_t stat_idx);
2428 
2446 int rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id,
2447  uint16_t rx_queue_id,
2448  uint8_t stat_idx);
2449 
2459 void rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr);
2460 
2470 void rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info);
2471 
2490 int rte_eth_dev_fw_version_get(uint8_t port_id,
2491  char *fw_version, size_t fw_size);
2492 
2531 int rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
2532  uint32_t *ptypes, int num);
2533 
2545 int rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu);
2546 
2561 int rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu);
2562 
2581 int rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on);
2582 
2602 int rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id,
2603  int on);
2604 
2621 int rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
2622  enum rte_vlan_type vlan_type,
2623  uint16_t tag_type);
2624 
2645 int rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask);
2646 
2659 int rte_eth_dev_get_vlan_offload(uint8_t port_id);
2660 
2675 int rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on);
2676 
2759 static inline uint16_t
2760 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2761  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
2762 {
2763  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2764 
2765 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2766  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
2767  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
2768 
2769  if (queue_id >= dev->data->nb_rx_queues) {
2770  RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2771  return 0;
2772  }
2773 #endif
2774  int16_t nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2775  rx_pkts, nb_pkts);
2776 
2777 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
2778  struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
2779 
2780  if (unlikely(cb != NULL)) {
2781  do {
2782  nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
2783  nb_pkts, cb->param);
2784  cb = cb->next;
2785  } while (cb != NULL);
2786  }
2787 #endif
2788 
2789  return nb_rx;
2790 }
2791 
2804 static inline int
2805 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2806 {
2807  struct rte_eth_dev *dev;
2808 
2809  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2810  dev = &rte_eth_devices[port_id];
2811  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
2812  if (queue_id >= dev->data->nb_rx_queues)
2813  return -EINVAL;
2814 
2815  return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2816 }
2817 
2833 static inline int
2834 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2835 {
2836  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2837  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2838  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2839  return (*dev->dev_ops->rx_descriptor_done)( \
2840  dev->data->rx_queues[queue_id], offset);
2841 }
2842 
2843 #define RTE_ETH_RX_DESC_AVAIL 0
2844 #define RTE_ETH_RX_DESC_DONE 1
2845 #define RTE_ETH_RX_DESC_UNAVAIL 2
2880 static inline int
2881 rte_eth_rx_descriptor_status(uint8_t port_id, uint16_t queue_id,
2882  uint16_t offset)
2883 {
2884  struct rte_eth_dev *dev;
2885  void *rxq;
2886 
2887 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2888  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2889 #endif
2890  dev = &rte_eth_devices[port_id];
2891 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2892  if (queue_id >= dev->data->nb_rx_queues)
2893  return -ENODEV;
2894 #endif
2895  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
2896  rxq = dev->data->rx_queues[queue_id];
2897 
2898  return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
2899 }
2900 
2901 #define RTE_ETH_TX_DESC_FULL 0
2902 #define RTE_ETH_TX_DESC_DONE 1
2903 #define RTE_ETH_TX_DESC_UNAVAIL 2
2938 static inline int rte_eth_tx_descriptor_status(uint8_t port_id,
2939  uint16_t queue_id, uint16_t offset)
2940 {
2941  struct rte_eth_dev *dev;
2942  void *txq;
2943 
2944 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2945  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2946 #endif
2947  dev = &rte_eth_devices[port_id];
2948 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2949  if (queue_id >= dev->data->nb_tx_queues)
2950  return -ENODEV;
2951 #endif
2952  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
2953  txq = dev->data->tx_queues[queue_id];
2954 
2955  return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
2956 }
2957 
3017 static inline uint16_t
3018 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
3019  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3020 {
3021  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3022 
3023 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3024  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
3025  RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
3026 
3027  if (queue_id >= dev->data->nb_tx_queues) {
3028  RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3029  return 0;
3030  }
3031 #endif
3032 
3033 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
3034  struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3035 
3036  if (unlikely(cb != NULL)) {
3037  do {
3038  nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
3039  cb->param);
3040  cb = cb->next;
3041  } while (cb != NULL);
3042  }
3043 #endif
3044 
3045  return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
3046 }
3047 
3104 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
3105 
3106 static inline uint16_t
3107 rte_eth_tx_prepare(uint8_t port_id, uint16_t queue_id,
3108  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3109 {
3110  struct rte_eth_dev *dev;
3111 
3112 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3113  if (!rte_eth_dev_is_valid_port(port_id)) {
3114  RTE_PMD_DEBUG_TRACE("Invalid TX port_id=%d\n", port_id);
3115  rte_errno = -EINVAL;
3116  return 0;
3117  }
3118 #endif
3119 
3120  dev = &rte_eth_devices[port_id];
3121 
3122 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3123  if (queue_id >= dev->data->nb_tx_queues) {
3124  RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3125  rte_errno = -EINVAL;
3126  return 0;
3127  }
3128 #endif
3129 
3130  if (!dev->tx_pkt_prepare)
3131  return nb_pkts;
3132 
3133  return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
3134  tx_pkts, nb_pkts);
3135 }
3136 
3137 #else
3138 
3139 /*
3140  * Native NOOP operation for compilation targets which doesn't require any
3141  * preparations steps, and functional NOOP may introduce unnecessary performance
3142  * drop.
3143  *
3144  * Generally this is not a good idea to turn it on globally and didn't should
3145  * be used if behavior of tx_preparation can change.
3146  */
3147 
3148 static inline uint16_t
3149 rte_eth_tx_prepare(__rte_unused uint8_t port_id, __rte_unused uint16_t queue_id,
3150  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3151 {
3152  return nb_pkts;
3153 }
3154 
3155 #endif
3156 
3157 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
3158  void *userdata);
3159 
3165  buffer_tx_error_fn error_callback;
3166  void *error_userdata;
3167  uint16_t size;
3168  uint16_t length;
3169  struct rte_mbuf *pkts[];
3171 };
3172 
3179 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
3180  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3181 
3192 int
3193 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
3194 
3217 static inline uint16_t
3218 rte_eth_tx_buffer_flush(uint8_t port_id, uint16_t queue_id,
3219  struct rte_eth_dev_tx_buffer *buffer)
3220 {
3221  uint16_t sent;
3222  uint16_t to_send = buffer->length;
3223 
3224  if (to_send == 0)
3225  return 0;
3226 
3227  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
3228 
3229  buffer->length = 0;
3230 
3231  /* All packets sent, or to be dealt with by callback below */
3232  if (unlikely(sent != to_send))
3233  buffer->error_callback(&buffer->pkts[sent], to_send - sent,
3234  buffer->error_userdata);
3235 
3236  return sent;
3237 }
3238 
3269 static inline uint16_t __attribute__((always_inline))
3270 rte_eth_tx_buffer(uint8_t port_id, uint16_t queue_id,
3271  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
3272 {
3273  buffer->pkts[buffer->length++] = tx_pkt;
3274  if (buffer->length < buffer->size)
3275  return 0;
3276 
3277  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
3278 }
3279 
3304 int
3306  buffer_tx_error_fn callback, void *userdata);
3307 
3330 void
3331 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3332  void *userdata);
3333 
3357 void
3358 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3359  void *userdata);
3360 
3385 int
3386 rte_eth_tx_done_cleanup(uint8_t port_id, uint16_t queue_id, uint32_t free_cnt);
3387 
3402 };
3403 
3404 typedef void (*rte_eth_dev_cb_fn)(uint8_t port_id, \
3405  enum rte_eth_event_type event, void *cb_arg);
3431 int rte_eth_dev_callback_register(uint8_t port_id,
3432  enum rte_eth_event_type event,
3433  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3434 
3452 int rte_eth_dev_callback_unregister(uint8_t port_id,
3453  enum rte_eth_event_type event,
3454  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3455 
3473 void _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3474  enum rte_eth_event_type event, void *cb_arg);
3475 
3496 int rte_eth_dev_rx_intr_enable(uint8_t port_id, uint16_t queue_id);
3497 
3517 int rte_eth_dev_rx_intr_disable(uint8_t port_id, uint16_t queue_id);
3518 
3536 int rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data);
3537 
3559 int rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
3560  int epfd, int op, void *data);
3561 
3574 int rte_eth_led_on(uint8_t port_id);
3575 
3588 int rte_eth_led_off(uint8_t port_id);
3589 
3602 int rte_eth_dev_flow_ctrl_get(uint8_t port_id,
3603  struct rte_eth_fc_conf *fc_conf);
3604 
3619 int rte_eth_dev_flow_ctrl_set(uint8_t port_id,
3620  struct rte_eth_fc_conf *fc_conf);
3621 
3637 int rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id,
3638  struct rte_eth_pfc_conf *pfc_conf);
3639 
3658 int rte_eth_dev_mac_addr_add(uint8_t port, struct ether_addr *mac_addr,
3659  uint32_t pool);
3660 
3674 int rte_eth_dev_mac_addr_remove(uint8_t port, struct ether_addr *mac_addr);
3675 
3689 int rte_eth_dev_default_mac_addr_set(uint8_t port, struct ether_addr *mac_addr);
3690 
3691 
3707 int rte_eth_dev_rss_reta_update(uint8_t port,
3708  struct rte_eth_rss_reta_entry64 *reta_conf,
3709  uint16_t reta_size);
3710 
3726 int rte_eth_dev_rss_reta_query(uint8_t port,
3727  struct rte_eth_rss_reta_entry64 *reta_conf,
3728  uint16_t reta_size);
3729 
3748 int rte_eth_dev_uc_hash_table_set(uint8_t port,struct ether_addr *addr,
3749  uint8_t on);
3750 
3768 int rte_eth_dev_uc_all_hash_table_set(uint8_t port,uint8_t on);
3769 
3791 int rte_eth_mirror_rule_set(uint8_t port_id,
3792  struct rte_eth_mirror_conf *mirror_conf,
3793  uint8_t rule_id,
3794  uint8_t on);
3795 
3809 int rte_eth_mirror_rule_reset(uint8_t port_id,
3810  uint8_t rule_id);
3811 
3827 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
3828  uint16_t tx_rate);
3829 
3841 int rte_eth_dev_bypass_init(uint8_t port);
3842 
3858 int rte_eth_dev_bypass_state_show(uint8_t port, uint32_t *state);
3859 
3875 int rte_eth_dev_bypass_state_set(uint8_t port, uint32_t *new_state);
3876 
3899 int rte_eth_dev_bypass_event_show(uint8_t port, uint32_t event, uint32_t *state);
3900 
3923 int rte_eth_dev_bypass_event_store(uint8_t port, uint32_t event, uint32_t state);
3924 
3945 int rte_eth_dev_wd_timeout_store(uint8_t port, uint32_t timeout);
3946 
3959 int rte_eth_dev_bypass_ver_show(uint8_t port, uint32_t *ver);
3960 
3981 int rte_eth_dev_bypass_wd_timeout_show(uint8_t port, uint32_t *wd_timeout);
3982 
3993 int rte_eth_dev_bypass_wd_reset(uint8_t port);
3994 
4008 int rte_eth_dev_rss_hash_update(uint8_t port_id,
4009  struct rte_eth_rss_conf *rss_conf);
4010 
4024 int
4025 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
4026  struct rte_eth_rss_conf *rss_conf);
4027 
4045 int
4046 rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
4047  struct rte_eth_udp_tunnel *tunnel_udp);
4048 
4067 int
4068 rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
4069  struct rte_eth_udp_tunnel *tunnel_udp);
4070 
4084 int rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type);
4085 
4104 int rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
4105  enum rte_filter_op filter_op, void *arg);
4106 
4119 int rte_eth_dev_get_dcb_info(uint8_t port_id,
4120  struct rte_eth_dcb_info *dcb_info);
4121 
4146 void *rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
4147  rte_rx_callback_fn fn, void *user_param);
4148 
4174 void *rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
4175  rte_rx_callback_fn fn, void *user_param);
4176 
4201 void *rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
4202  rte_tx_callback_fn fn, void *user_param);
4203 
4234 int rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
4235  struct rte_eth_rxtx_callback *user_cb);
4236 
4267 int rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
4268  struct rte_eth_rxtx_callback *user_cb);
4269 
4287 int rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
4288  struct rte_eth_rxq_info *qinfo);
4289 
4307 int rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
4308  struct rte_eth_txq_info *qinfo);
4309 
4326 int rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info);
4327 
4339 int rte_eth_dev_get_eeprom_length(uint8_t port_id);
4340 
4355 int rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info);
4356 
4371 int rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info);
4372 
4390 int rte_eth_dev_set_mc_addr_list(uint8_t port_id,
4391  struct ether_addr *mc_addr_set,
4392  uint32_t nb_mc_addr);
4393 
4405 int rte_eth_timesync_enable(uint8_t port_id);
4406 
4418 int rte_eth_timesync_disable(uint8_t port_id);
4419 
4437 int rte_eth_timesync_read_rx_timestamp(uint8_t port_id,
4438  struct timespec *timestamp, uint32_t flags);
4439 
4454 int rte_eth_timesync_read_tx_timestamp(uint8_t port_id,
4455  struct timespec *timestamp);
4456 
4473 int rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta);
4474 
4489 int rte_eth_timesync_read_time(uint8_t port_id, struct timespec *time);
4490 
4508 int rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *time);
4509 
4529 const struct rte_memzone *
4530 rte_eth_dma_zone_reserve(const struct rte_eth_dev *eth_dev, const char *name,
4531  uint16_t queue_id, size_t size,
4532  unsigned align, int socket_id);
4533 
4548 int
4549 rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
4550  struct rte_eth_l2_tunnel_conf *l2_tunnel);
4551 
4575 int
4576 rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
4577  struct rte_eth_l2_tunnel_conf *l2_tunnel,
4578  uint32_t mask,
4579  uint8_t en);
4580 
4593 int
4594 rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id);
4595 
4607 int
4608 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name);
4609 
4610 #ifdef __cplusplus
4611 }
4612 #endif
4613 
4614 #endif /* _RTE_ETHDEV_H_ */