DPDK  17.08.2
rte_ethdev.h
Go to the documentation of this file.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  * * Neither the name of Intel Corporation nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef _RTE_ETHDEV_H_
35 #define _RTE_ETHDEV_H_
36 
169 #ifdef __cplusplus
170 extern "C" {
171 #endif
172 
173 #include <stdint.h>
174 
175 /* Use this macro to check if LRO API is supported */
176 #define RTE_ETHDEV_HAS_LRO_SUPPORT
177 
178 #include <rte_log.h>
179 #include <rte_interrupts.h>
180 #include <rte_dev.h>
181 #include <rte_devargs.h>
182 #include <rte_errno.h>
183 #include "rte_ether.h"
184 #include "rte_eth_ctrl.h"
185 #include "rte_dev_info.h"
186 
187 struct rte_mbuf;
188 
196  uint64_t ipackets;
197  uint64_t opackets;
198  uint64_t ibytes;
199  uint64_t obytes;
200  uint64_t imissed;
204  uint64_t ierrors;
205  uint64_t oerrors;
206  uint64_t rx_nombuf;
207  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
209  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
211  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
213  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
215  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
217 };
218 
222 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
223 #define ETH_LINK_SPEED_FIXED (1 << 0)
224 #define ETH_LINK_SPEED_10M_HD (1 << 1)
225 #define ETH_LINK_SPEED_10M (1 << 2)
226 #define ETH_LINK_SPEED_100M_HD (1 << 3)
227 #define ETH_LINK_SPEED_100M (1 << 4)
228 #define ETH_LINK_SPEED_1G (1 << 5)
229 #define ETH_LINK_SPEED_2_5G (1 << 6)
230 #define ETH_LINK_SPEED_5G (1 << 7)
231 #define ETH_LINK_SPEED_10G (1 << 8)
232 #define ETH_LINK_SPEED_20G (1 << 9)
233 #define ETH_LINK_SPEED_25G (1 << 10)
234 #define ETH_LINK_SPEED_40G (1 << 11)
235 #define ETH_LINK_SPEED_50G (1 << 12)
236 #define ETH_LINK_SPEED_56G (1 << 13)
237 #define ETH_LINK_SPEED_100G (1 << 14)
242 #define ETH_SPEED_NUM_NONE 0
243 #define ETH_SPEED_NUM_10M 10
244 #define ETH_SPEED_NUM_100M 100
245 #define ETH_SPEED_NUM_1G 1000
246 #define ETH_SPEED_NUM_2_5G 2500
247 #define ETH_SPEED_NUM_5G 5000
248 #define ETH_SPEED_NUM_10G 10000
249 #define ETH_SPEED_NUM_20G 20000
250 #define ETH_SPEED_NUM_25G 25000
251 #define ETH_SPEED_NUM_40G 40000
252 #define ETH_SPEED_NUM_50G 50000
253 #define ETH_SPEED_NUM_56G 56000
254 #define ETH_SPEED_NUM_100G 100000
259 __extension__
260 struct rte_eth_link {
261  uint32_t link_speed;
262  uint16_t link_duplex : 1;
263  uint16_t link_autoneg : 1;
264  uint16_t link_status : 1;
265 } __attribute__((aligned(8)));
267 /* Utility constants */
268 #define ETH_LINK_HALF_DUPLEX 0
269 #define ETH_LINK_FULL_DUPLEX 1
270 #define ETH_LINK_DOWN 0
271 #define ETH_LINK_UP 1
272 #define ETH_LINK_FIXED 0
273 #define ETH_LINK_AUTONEG 1
279 struct rte_eth_thresh {
280  uint8_t pthresh;
281  uint8_t hthresh;
282  uint8_t wthresh;
283 };
284 
288 #define ETH_MQ_RX_RSS_FLAG 0x1
289 #define ETH_MQ_RX_DCB_FLAG 0x2
290 #define ETH_MQ_RX_VMDQ_FLAG 0x4
291 
299 
303  ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
305  ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
306 
308  ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
310  ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
312  ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
315  ETH_MQ_RX_VMDQ_FLAG,
316 };
317 
321 #define ETH_RSS ETH_MQ_RX_RSS
322 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
323 #define ETH_DCB_RX ETH_MQ_RX_DCB
324 
334 };
335 
339 #define ETH_DCB_NONE ETH_MQ_TX_NONE
340 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
341 #define ETH_DCB_TX ETH_MQ_TX_DCB
342 
349  uint32_t max_rx_pkt_len;
350  uint16_t split_hdr_size;
351  __extension__
352  uint16_t header_split : 1,
353  hw_ip_checksum : 1,
354  hw_vlan_filter : 1,
355  hw_vlan_strip : 1,
356  hw_vlan_extend : 1,
357  jumbo_frame : 1,
358  hw_strip_crc : 1,
359  enable_scatter : 1,
360  enable_lro : 1;
361 };
362 
368  ETH_VLAN_TYPE_UNKNOWN = 0,
371  ETH_VLAN_TYPE_MAX,
372 };
373 
379  uint64_t ids[64];
380 };
381 
400  uint8_t *rss_key;
401  uint8_t rss_key_len;
402  uint64_t rss_hf;
403 };
404 
405 /*
406  * The RSS offload types are defined based on flow types which are defined
407  * in rte_eth_ctrl.h. Different NIC hardwares may support different RSS offload
408  * types. The supported flow types or RSS offload types can be queried by
409  * rte_eth_dev_info_get().
410  */
411 #define ETH_RSS_IPV4 (1ULL << RTE_ETH_FLOW_IPV4)
412 #define ETH_RSS_FRAG_IPV4 (1ULL << RTE_ETH_FLOW_FRAG_IPV4)
413 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
414 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP)
415 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP)
416 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER)
417 #define ETH_RSS_IPV6 (1ULL << RTE_ETH_FLOW_IPV6)
418 #define ETH_RSS_FRAG_IPV6 (1ULL << RTE_ETH_FLOW_FRAG_IPV6)
419 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP)
420 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
421 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP)
422 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)
423 #define ETH_RSS_L2_PAYLOAD (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)
424 #define ETH_RSS_IPV6_EX (1ULL << RTE_ETH_FLOW_IPV6_EX)
425 #define ETH_RSS_IPV6_TCP_EX (1ULL << RTE_ETH_FLOW_IPV6_TCP_EX)
426 #define ETH_RSS_IPV6_UDP_EX (1ULL << RTE_ETH_FLOW_IPV6_UDP_EX)
427 #define ETH_RSS_PORT (1ULL << RTE_ETH_FLOW_PORT)
428 #define ETH_RSS_VXLAN (1ULL << RTE_ETH_FLOW_VXLAN)
429 #define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE)
430 #define ETH_RSS_NVGRE (1ULL << RTE_ETH_FLOW_NVGRE)
431 
432 #define ETH_RSS_IP ( \
433  ETH_RSS_IPV4 | \
434  ETH_RSS_FRAG_IPV4 | \
435  ETH_RSS_NONFRAG_IPV4_OTHER | \
436  ETH_RSS_IPV6 | \
437  ETH_RSS_FRAG_IPV6 | \
438  ETH_RSS_NONFRAG_IPV6_OTHER | \
439  ETH_RSS_IPV6_EX)
440 
441 #define ETH_RSS_UDP ( \
442  ETH_RSS_NONFRAG_IPV4_UDP | \
443  ETH_RSS_NONFRAG_IPV6_UDP | \
444  ETH_RSS_IPV6_UDP_EX)
445 
446 #define ETH_RSS_TCP ( \
447  ETH_RSS_NONFRAG_IPV4_TCP | \
448  ETH_RSS_NONFRAG_IPV6_TCP | \
449  ETH_RSS_IPV6_TCP_EX)
450 
451 #define ETH_RSS_SCTP ( \
452  ETH_RSS_NONFRAG_IPV4_SCTP | \
453  ETH_RSS_NONFRAG_IPV6_SCTP)
454 
455 #define ETH_RSS_TUNNEL ( \
456  ETH_RSS_VXLAN | \
457  ETH_RSS_GENEVE | \
458  ETH_RSS_NVGRE)
459 
460 
462 #define ETH_RSS_PROTO_MASK ( \
463  ETH_RSS_IPV4 | \
464  ETH_RSS_FRAG_IPV4 | \
465  ETH_RSS_NONFRAG_IPV4_TCP | \
466  ETH_RSS_NONFRAG_IPV4_UDP | \
467  ETH_RSS_NONFRAG_IPV4_SCTP | \
468  ETH_RSS_NONFRAG_IPV4_OTHER | \
469  ETH_RSS_IPV6 | \
470  ETH_RSS_FRAG_IPV6 | \
471  ETH_RSS_NONFRAG_IPV6_TCP | \
472  ETH_RSS_NONFRAG_IPV6_UDP | \
473  ETH_RSS_NONFRAG_IPV6_SCTP | \
474  ETH_RSS_NONFRAG_IPV6_OTHER | \
475  ETH_RSS_L2_PAYLOAD | \
476  ETH_RSS_IPV6_EX | \
477  ETH_RSS_IPV6_TCP_EX | \
478  ETH_RSS_IPV6_UDP_EX | \
479  ETH_RSS_PORT | \
480  ETH_RSS_VXLAN | \
481  ETH_RSS_GENEVE | \
482  ETH_RSS_NVGRE)
483 
484 /*
485  * Definitions used for redirection table entry size.
486  * Some RSS RETA sizes may not be supported by some drivers, check the
487  * documentation or the description of relevant functions for more details.
488  */
489 #define ETH_RSS_RETA_SIZE_64 64
490 #define ETH_RSS_RETA_SIZE_128 128
491 #define ETH_RSS_RETA_SIZE_256 256
492 #define ETH_RSS_RETA_SIZE_512 512
493 #define RTE_RETA_GROUP_SIZE 64
494 
495 /* Definitions used for VMDQ and DCB functionality */
496 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
497 #define ETH_DCB_NUM_USER_PRIORITIES 8
498 #define ETH_VMDQ_DCB_NUM_QUEUES 128
499 #define ETH_DCB_NUM_QUEUES 128
501 /* DCB capability defines */
502 #define ETH_DCB_PG_SUPPORT 0x00000001
503 #define ETH_DCB_PFC_SUPPORT 0x00000002
505 /* Definitions used for VLAN Offload functionality */
506 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
507 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
508 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
510 /* Definitions used for mask VLAN setting */
511 #define ETH_VLAN_STRIP_MASK 0x0001
512 #define ETH_VLAN_FILTER_MASK 0x0002
513 #define ETH_VLAN_EXTEND_MASK 0x0004
514 #define ETH_VLAN_ID_MAX 0x0FFF
516 /* Definitions used for receive MAC address */
517 #define ETH_NUM_RECEIVE_MAC_ADDR 128
519 /* Definitions used for unicast hash */
520 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
522 /* Definitions used for VMDQ pool rx mode setting */
523 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
524 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
525 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
526 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
527 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
530 #define ETH_MIRROR_MAX_VLANS 64
531 
532 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
533 #define ETH_MIRROR_UPLINK_PORT 0x02
534 #define ETH_MIRROR_DOWNLINK_PORT 0x04
535 #define ETH_MIRROR_VLAN 0x08
536 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
541 struct rte_eth_vlan_mirror {
542  uint64_t vlan_mask;
544  uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
545 };
546 
551  uint8_t rule_type;
552  uint8_t dst_pool;
553  uint64_t pool_mask;
556 };
557 
565  uint64_t mask;
567  uint16_t reta[RTE_RETA_GROUP_SIZE];
569 };
570 
576  ETH_4_TCS = 4,
578 };
579 
589 };
590 
591 /* This structure may be extended in future. */
592 struct rte_eth_dcb_rx_conf {
593  enum rte_eth_nb_tcs nb_tcs;
595  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
596 };
597 
598 struct rte_eth_vmdq_dcb_tx_conf {
599  enum rte_eth_nb_pools nb_queue_pools;
601  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
602 };
603 
604 struct rte_eth_dcb_tx_conf {
605  enum rte_eth_nb_tcs nb_tcs;
607  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
608 };
609 
610 struct rte_eth_vmdq_tx_conf {
611  enum rte_eth_nb_pools nb_queue_pools;
612 };
613 
628  uint8_t default_pool;
629  uint8_t nb_pool_maps;
630  struct {
631  uint16_t vlan_id;
632  uint64_t pools;
636 };
637 
659  uint8_t default_pool;
661  uint8_t nb_pool_maps;
662  uint32_t rx_mode;
663  struct {
664  uint16_t vlan_id;
665  uint64_t pools;
667 };
668 
675  /* For i40e specifically */
676  uint16_t pvid;
677  __extension__
678  uint8_t hw_vlan_reject_tagged : 1,
684 };
685 
691  uint16_t rx_free_thresh;
692  uint8_t rx_drop_en;
694 };
695 
696 #define ETH_TXQ_FLAGS_NOMULTSEGS 0x0001
697 #define ETH_TXQ_FLAGS_NOREFCOUNT 0x0002
698 #define ETH_TXQ_FLAGS_NOMULTMEMP 0x0004
699 #define ETH_TXQ_FLAGS_NOVLANOFFL 0x0100
700 #define ETH_TXQ_FLAGS_NOXSUMSCTP 0x0200
701 #define ETH_TXQ_FLAGS_NOXSUMUDP 0x0400
702 #define ETH_TXQ_FLAGS_NOXSUMTCP 0x0800
703 #define ETH_TXQ_FLAGS_NOOFFLOADS \
704  (ETH_TXQ_FLAGS_NOVLANOFFL | ETH_TXQ_FLAGS_NOXSUMSCTP | \
705  ETH_TXQ_FLAGS_NOXSUMUDP | ETH_TXQ_FLAGS_NOXSUMTCP)
706 #define ETH_TXQ_FLAGS_NOXSUMS \
707  (ETH_TXQ_FLAGS_NOXSUMSCTP | ETH_TXQ_FLAGS_NOXSUMUDP | \
708  ETH_TXQ_FLAGS_NOXSUMTCP)
709 
714  uint16_t tx_rs_thresh;
715  uint16_t tx_free_thresh;
718  uint32_t txq_flags;
720 };
721 
726  uint16_t nb_max;
727  uint16_t nb_min;
728  uint16_t nb_align;
738  uint16_t nb_seg_max;
739 
751  uint16_t nb_mtu_seg_max;
752 };
753 
762 };
763 
770  uint32_t high_water;
771  uint32_t low_water;
772  uint16_t pause_time;
773  uint16_t send_xon;
776  uint8_t autoneg;
777 };
778 
786  uint8_t priority;
787 };
788 
797 };
798 
806 };
807 
819  uint8_t drop_queue;
820  struct rte_eth_fdir_masks mask;
823 };
824 
833  uint16_t udp_port;
834  uint8_t prot_type;
835 };
836 
842  uint32_t lsc:1;
844  uint32_t rxq:1;
846  uint32_t rmv:1;
847 };
848 
854 struct rte_eth_conf {
855  uint32_t link_speeds;
864  uint32_t lpbk_mode;
869  struct {
873  struct rte_eth_dcb_rx_conf dcb_rx_conf;
877  } rx_adv_conf;
878  union {
879  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
881  struct rte_eth_dcb_tx_conf dcb_tx_conf;
883  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
885  } tx_adv_conf;
891 };
892 
902 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
903 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
904 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
905 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
906 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
907 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
908 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
909 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
910 
914 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
915 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
916 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
917 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
918 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
919 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
920 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
921 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
922 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
923 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200
924 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400
925 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800
926 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000
927 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
928 #define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000
929 
933 struct rte_pci_device;
934 
940  const char *driver_name;
941  unsigned int if_index;
943  uint32_t min_rx_bufsize;
944  uint32_t max_rx_pktlen;
945  uint16_t max_rx_queues;
946  uint16_t max_tx_queues;
947  uint32_t max_mac_addrs;
948  uint32_t max_hash_mac_addrs;
950  uint16_t max_vfs;
951  uint16_t max_vmdq_pools;
952  uint32_t rx_offload_capa;
953  uint32_t tx_offload_capa;
954  uint16_t reta_size;
956  uint8_t hash_key_size;
961  uint16_t vmdq_queue_base;
962  uint16_t vmdq_queue_num;
963  uint16_t vmdq_pool_base;
966  uint32_t speed_capa;
968  uint16_t nb_rx_queues;
969  uint16_t nb_tx_queues;
970 };
971 
977  struct rte_mempool *mp;
979  uint8_t scattered_rx;
980  uint16_t nb_desc;
982 
989  uint16_t nb_desc;
991 
993 #define RTE_ETH_XSTATS_NAME_SIZE 64
994 
1005  uint64_t id;
1006  uint64_t value;
1007 };
1008 
1018 };
1019 
1020 #define ETH_DCB_NUM_TCS 8
1021 #define ETH_MAX_VMDQ_POOL 64
1022 
1029  struct {
1030  uint8_t base;
1031  uint8_t nb_queue;
1032  } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1034  struct {
1035  uint8_t base;
1036  uint8_t nb_queue;
1037  } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1038 };
1039 
1045  uint8_t nb_tcs;
1047  uint8_t tc_bws[ETH_DCB_NUM_TCS];
1050 };
1051 
1055 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1056 #define RTE_ETH_QUEUE_STATE_STARTED 1
1057 
1058 struct rte_eth_dev;
1059 
1060 struct rte_eth_dev_callback;
1062 TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback);
1063 
1064 /* Macros to check for valid port */
1065 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1066  if (!rte_eth_dev_is_valid_port(port_id)) { \
1067  RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
1068  return retval; \
1069  } \
1070 } while (0)
1071 
1072 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1073  if (!rte_eth_dev_is_valid_port(port_id)) { \
1074  RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
1075  return; \
1076  } \
1077 } while (0)
1078 
1079 #define RTE_ETH_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
1080 
1086 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1087 
1088 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1089 
1090 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1091 
1092 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1093 
1094 /*
1095  * Definitions of all functions exported by an Ethernet driver through the
1096  * the generic structure of type *eth_dev_ops* supplied in the *rte_eth_dev*
1097  * structure associated with an Ethernet device.
1098  */
1099 
1100 typedef int (*eth_dev_configure_t)(struct rte_eth_dev *dev);
1103 typedef int (*eth_dev_start_t)(struct rte_eth_dev *dev);
1106 typedef void (*eth_dev_stop_t)(struct rte_eth_dev *dev);
1109 typedef int (*eth_dev_set_link_up_t)(struct rte_eth_dev *dev);
1112 typedef int (*eth_dev_set_link_down_t)(struct rte_eth_dev *dev);
1115 typedef void (*eth_dev_close_t)(struct rte_eth_dev *dev);
1118 typedef void (*eth_promiscuous_enable_t)(struct rte_eth_dev *dev);
1121 typedef void (*eth_promiscuous_disable_t)(struct rte_eth_dev *dev);
1124 typedef void (*eth_allmulticast_enable_t)(struct rte_eth_dev *dev);
1127 typedef void (*eth_allmulticast_disable_t)(struct rte_eth_dev *dev);
1130 typedef int (*eth_link_update_t)(struct rte_eth_dev *dev,
1131  int wait_to_complete);
1134 typedef void (*eth_stats_get_t)(struct rte_eth_dev *dev,
1135  struct rte_eth_stats *igb_stats);
1138 typedef void (*eth_stats_reset_t)(struct rte_eth_dev *dev);
1141 typedef int (*eth_xstats_get_t)(struct rte_eth_dev *dev,
1142  struct rte_eth_xstat *stats, unsigned n);
1145 typedef int (*eth_xstats_get_by_id_t)(struct rte_eth_dev *dev,
1146  const uint64_t *ids,
1147  uint64_t *values,
1148  unsigned int n);
1151 typedef void (*eth_xstats_reset_t)(struct rte_eth_dev *dev);
1154 typedef int (*eth_xstats_get_names_t)(struct rte_eth_dev *dev,
1155  struct rte_eth_xstat_name *xstats_names, unsigned size);
1158 typedef int (*eth_xstats_get_names_by_id_t)(struct rte_eth_dev *dev,
1159  struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
1160  unsigned int size);
1163 typedef int (*eth_queue_stats_mapping_set_t)(struct rte_eth_dev *dev,
1164  uint16_t queue_id,
1165  uint8_t stat_idx,
1166  uint8_t is_rx);
1169 typedef void (*eth_dev_infos_get_t)(struct rte_eth_dev *dev,
1170  struct rte_eth_dev_info *dev_info);
1173 typedef const uint32_t *(*eth_dev_supported_ptypes_get_t)(struct rte_eth_dev *dev);
1176 typedef int (*eth_queue_start_t)(struct rte_eth_dev *dev,
1177  uint16_t queue_id);
1180 typedef int (*eth_queue_stop_t)(struct rte_eth_dev *dev,
1181  uint16_t queue_id);
1184 typedef int (*eth_rx_queue_setup_t)(struct rte_eth_dev *dev,
1185  uint16_t rx_queue_id,
1186  uint16_t nb_rx_desc,
1187  unsigned int socket_id,
1188  const struct rte_eth_rxconf *rx_conf,
1189  struct rte_mempool *mb_pool);
1192 typedef int (*eth_tx_queue_setup_t)(struct rte_eth_dev *dev,
1193  uint16_t tx_queue_id,
1194  uint16_t nb_tx_desc,
1195  unsigned int socket_id,
1196  const struct rte_eth_txconf *tx_conf);
1199 typedef int (*eth_rx_enable_intr_t)(struct rte_eth_dev *dev,
1200  uint16_t rx_queue_id);
1203 typedef int (*eth_rx_disable_intr_t)(struct rte_eth_dev *dev,
1204  uint16_t rx_queue_id);
1207 typedef void (*eth_queue_release_t)(void *queue);
1210 typedef uint32_t (*eth_rx_queue_count_t)(struct rte_eth_dev *dev,
1211  uint16_t rx_queue_id);
1214 typedef int (*eth_rx_descriptor_done_t)(void *rxq, uint16_t offset);
1217 typedef int (*eth_rx_descriptor_status_t)(void *rxq, uint16_t offset);
1220 typedef int (*eth_tx_descriptor_status_t)(void *txq, uint16_t offset);
1223 typedef int (*eth_fw_version_get_t)(struct rte_eth_dev *dev,
1224  char *fw_version, size_t fw_size);
1227 typedef int (*eth_tx_done_cleanup_t)(void *txq, uint32_t free_cnt);
1230 typedef void (*eth_rxq_info_get_t)(struct rte_eth_dev *dev,
1231  uint16_t rx_queue_id, struct rte_eth_rxq_info *qinfo);
1232 
1233 typedef void (*eth_txq_info_get_t)(struct rte_eth_dev *dev,
1234  uint16_t tx_queue_id, struct rte_eth_txq_info *qinfo);
1235 
1236 typedef int (*mtu_set_t)(struct rte_eth_dev *dev, uint16_t mtu);
1239 typedef int (*vlan_filter_set_t)(struct rte_eth_dev *dev,
1240  uint16_t vlan_id,
1241  int on);
1244 typedef int (*vlan_tpid_set_t)(struct rte_eth_dev *dev,
1245  enum rte_vlan_type type, uint16_t tpid);
1248 typedef void (*vlan_offload_set_t)(struct rte_eth_dev *dev, int mask);
1251 typedef int (*vlan_pvid_set_t)(struct rte_eth_dev *dev,
1252  uint16_t vlan_id,
1253  int on);
1256 typedef void (*vlan_strip_queue_set_t)(struct rte_eth_dev *dev,
1257  uint16_t rx_queue_id,
1258  int on);
1261 typedef uint16_t (*eth_rx_burst_t)(void *rxq,
1262  struct rte_mbuf **rx_pkts,
1263  uint16_t nb_pkts);
1266 typedef uint16_t (*eth_tx_burst_t)(void *txq,
1267  struct rte_mbuf **tx_pkts,
1268  uint16_t nb_pkts);
1271 typedef uint16_t (*eth_tx_prep_t)(void *txq,
1272  struct rte_mbuf **tx_pkts,
1273  uint16_t nb_pkts);
1276 typedef int (*flow_ctrl_get_t)(struct rte_eth_dev *dev,
1277  struct rte_eth_fc_conf *fc_conf);
1280 typedef int (*flow_ctrl_set_t)(struct rte_eth_dev *dev,
1281  struct rte_eth_fc_conf *fc_conf);
1284 typedef int (*priority_flow_ctrl_set_t)(struct rte_eth_dev *dev,
1285  struct rte_eth_pfc_conf *pfc_conf);
1288 typedef int (*reta_update_t)(struct rte_eth_dev *dev,
1289  struct rte_eth_rss_reta_entry64 *reta_conf,
1290  uint16_t reta_size);
1293 typedef int (*reta_query_t)(struct rte_eth_dev *dev,
1294  struct rte_eth_rss_reta_entry64 *reta_conf,
1295  uint16_t reta_size);
1298 typedef int (*rss_hash_update_t)(struct rte_eth_dev *dev,
1299  struct rte_eth_rss_conf *rss_conf);
1302 typedef int (*rss_hash_conf_get_t)(struct rte_eth_dev *dev,
1303  struct rte_eth_rss_conf *rss_conf);
1306 typedef int (*eth_dev_led_on_t)(struct rte_eth_dev *dev);
1309 typedef int (*eth_dev_led_off_t)(struct rte_eth_dev *dev);
1312 typedef void (*eth_mac_addr_remove_t)(struct rte_eth_dev *dev, uint32_t index);
1315 typedef int (*eth_mac_addr_add_t)(struct rte_eth_dev *dev,
1316  struct ether_addr *mac_addr,
1317  uint32_t index,
1318  uint32_t vmdq);
1321 typedef void (*eth_mac_addr_set_t)(struct rte_eth_dev *dev,
1322  struct ether_addr *mac_addr);
1325 typedef int (*eth_uc_hash_table_set_t)(struct rte_eth_dev *dev,
1326  struct ether_addr *mac_addr,
1327  uint8_t on);
1330 typedef int (*eth_uc_all_hash_table_set_t)(struct rte_eth_dev *dev,
1331  uint8_t on);
1334 typedef int (*eth_set_queue_rate_limit_t)(struct rte_eth_dev *dev,
1335  uint16_t queue_idx,
1336  uint16_t tx_rate);
1339 typedef int (*eth_mirror_rule_set_t)(struct rte_eth_dev *dev,
1340  struct rte_eth_mirror_conf *mirror_conf,
1341  uint8_t rule_id,
1342  uint8_t on);
1345 typedef int (*eth_mirror_rule_reset_t)(struct rte_eth_dev *dev,
1346  uint8_t rule_id);
1349 typedef int (*eth_udp_tunnel_port_add_t)(struct rte_eth_dev *dev,
1350  struct rte_eth_udp_tunnel *tunnel_udp);
1353 typedef int (*eth_udp_tunnel_port_del_t)(struct rte_eth_dev *dev,
1354  struct rte_eth_udp_tunnel *tunnel_udp);
1357 typedef int (*eth_set_mc_addr_list_t)(struct rte_eth_dev *dev,
1358  struct ether_addr *mc_addr_set,
1359  uint32_t nb_mc_addr);
1362 typedef int (*eth_timesync_enable_t)(struct rte_eth_dev *dev);
1365 typedef int (*eth_timesync_disable_t)(struct rte_eth_dev *dev);
1368 typedef int (*eth_timesync_read_rx_timestamp_t)(struct rte_eth_dev *dev,
1369  struct timespec *timestamp,
1370  uint32_t flags);
1373 typedef int (*eth_timesync_read_tx_timestamp_t)(struct rte_eth_dev *dev,
1374  struct timespec *timestamp);
1377 typedef int (*eth_timesync_adjust_time)(struct rte_eth_dev *dev, int64_t);
1380 typedef int (*eth_timesync_read_time)(struct rte_eth_dev *dev,
1381  struct timespec *timestamp);
1384 typedef int (*eth_timesync_write_time)(struct rte_eth_dev *dev,
1385  const struct timespec *timestamp);
1388 typedef int (*eth_get_reg_t)(struct rte_eth_dev *dev,
1389  struct rte_dev_reg_info *info);
1392 typedef int (*eth_get_eeprom_length_t)(struct rte_eth_dev *dev);
1395 typedef int (*eth_get_eeprom_t)(struct rte_eth_dev *dev,
1396  struct rte_dev_eeprom_info *info);
1399 typedef int (*eth_set_eeprom_t)(struct rte_eth_dev *dev,
1400  struct rte_dev_eeprom_info *info);
1403 typedef int (*eth_l2_tunnel_eth_type_conf_t)
1404  (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
1407 typedef int (*eth_l2_tunnel_offload_set_t)
1408  (struct rte_eth_dev *dev,
1409  struct rte_eth_l2_tunnel_conf *l2_tunnel,
1410  uint32_t mask,
1411  uint8_t en);
1415 typedef int (*eth_filter_ctrl_t)(struct rte_eth_dev *dev,
1416  enum rte_filter_type filter_type,
1417  enum rte_filter_op filter_op,
1418  void *arg);
1421 typedef int (*eth_tm_ops_get_t)(struct rte_eth_dev *dev, void *ops);
1424 typedef int (*eth_get_dcb_info)(struct rte_eth_dev *dev,
1425  struct rte_eth_dcb_info *dcb_info);
1431 struct eth_dev_ops {
1432  eth_dev_configure_t dev_configure;
1433  eth_dev_start_t dev_start;
1434  eth_dev_stop_t dev_stop;
1435  eth_dev_set_link_up_t dev_set_link_up;
1436  eth_dev_set_link_down_t dev_set_link_down;
1437  eth_dev_close_t dev_close;
1438  eth_link_update_t link_update;
1440  eth_promiscuous_enable_t promiscuous_enable;
1441  eth_promiscuous_disable_t promiscuous_disable;
1442  eth_allmulticast_enable_t allmulticast_enable;
1443  eth_allmulticast_disable_t allmulticast_disable;
1444  eth_mac_addr_remove_t mac_addr_remove;
1445  eth_mac_addr_add_t mac_addr_add;
1446  eth_mac_addr_set_t mac_addr_set;
1447  eth_set_mc_addr_list_t set_mc_addr_list;
1448  mtu_set_t mtu_set;
1450  eth_stats_get_t stats_get;
1451  eth_stats_reset_t stats_reset;
1452  eth_xstats_get_t xstats_get;
1453  eth_xstats_reset_t xstats_reset;
1454  eth_xstats_get_names_t xstats_get_names;
1456  eth_queue_stats_mapping_set_t queue_stats_mapping_set;
1459  eth_dev_infos_get_t dev_infos_get;
1460  eth_rxq_info_get_t rxq_info_get;
1461  eth_txq_info_get_t txq_info_get;
1462  eth_fw_version_get_t fw_version_get;
1463  eth_dev_supported_ptypes_get_t dev_supported_ptypes_get;
1466  vlan_filter_set_t vlan_filter_set;
1467  vlan_tpid_set_t vlan_tpid_set;
1468  vlan_strip_queue_set_t vlan_strip_queue_set;
1469  vlan_offload_set_t vlan_offload_set;
1470  vlan_pvid_set_t vlan_pvid_set;
1472  eth_queue_start_t rx_queue_start;
1473  eth_queue_stop_t rx_queue_stop;
1474  eth_queue_start_t tx_queue_start;
1475  eth_queue_stop_t tx_queue_stop;
1476  eth_rx_queue_setup_t rx_queue_setup;
1477  eth_queue_release_t rx_queue_release;
1478  eth_rx_queue_count_t rx_queue_count;
1480  eth_rx_descriptor_done_t rx_descriptor_done;
1481  eth_rx_descriptor_status_t rx_descriptor_status;
1483  eth_tx_descriptor_status_t tx_descriptor_status;
1485  eth_rx_enable_intr_t rx_queue_intr_enable;
1486  eth_rx_disable_intr_t rx_queue_intr_disable;
1487  eth_tx_queue_setup_t tx_queue_setup;
1488  eth_queue_release_t tx_queue_release;
1489  eth_tx_done_cleanup_t tx_done_cleanup;
1491  eth_dev_led_on_t dev_led_on;
1492  eth_dev_led_off_t dev_led_off;
1494  flow_ctrl_get_t flow_ctrl_get;
1495  flow_ctrl_set_t flow_ctrl_set;
1496  priority_flow_ctrl_set_t priority_flow_ctrl_set;
1498  eth_uc_hash_table_set_t uc_hash_table_set;
1499  eth_uc_all_hash_table_set_t uc_all_hash_table_set;
1501  eth_mirror_rule_set_t mirror_rule_set;
1502  eth_mirror_rule_reset_t mirror_rule_reset;
1504  eth_udp_tunnel_port_add_t udp_tunnel_port_add;
1505  eth_udp_tunnel_port_del_t udp_tunnel_port_del;
1506  eth_l2_tunnel_eth_type_conf_t l2_tunnel_eth_type_conf;
1508  eth_l2_tunnel_offload_set_t l2_tunnel_offload_set;
1511  eth_set_queue_rate_limit_t set_queue_rate_limit;
1513  rss_hash_update_t rss_hash_update;
1514  rss_hash_conf_get_t rss_hash_conf_get;
1515  reta_update_t reta_update;
1516  reta_query_t reta_query;
1518  eth_get_reg_t get_reg;
1519  eth_get_eeprom_length_t get_eeprom_length;
1520  eth_get_eeprom_t get_eeprom;
1521  eth_set_eeprom_t set_eeprom;
1524  eth_filter_ctrl_t filter_ctrl;
1526  eth_get_dcb_info get_dcb_info;
1528  eth_timesync_enable_t timesync_enable;
1530  eth_timesync_disable_t timesync_disable;
1532  eth_timesync_read_rx_timestamp_t timesync_read_rx_timestamp;
1534  eth_timesync_read_tx_timestamp_t timesync_read_tx_timestamp;
1536  eth_timesync_adjust_time timesync_adjust_time;
1537  eth_timesync_read_time timesync_read_time;
1538  eth_timesync_write_time timesync_write_time;
1540  eth_xstats_get_by_id_t xstats_get_by_id;
1542  eth_xstats_get_names_by_id_t xstats_get_names_by_id;
1545  eth_tm_ops_get_t tm_ops_get;
1547 };
1548 
1571 typedef uint16_t (*rte_rx_callback_fn)(uint8_t port, uint16_t queue,
1572  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1573  void *user_param);
1574 
1595 typedef uint16_t (*rte_tx_callback_fn)(uint8_t port, uint16_t queue,
1596  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1597 
1603 struct rte_eth_rxtx_callback {
1604  struct rte_eth_rxtx_callback *next;
1605  union{
1606  rte_rx_callback_fn rx;
1607  rte_tx_callback_fn tx;
1608  } fn;
1609  void *param;
1610 };
1611 
1616  RTE_ETH_DEV_UNUSED = 0,
1617  RTE_ETH_DEV_ATTACHED,
1618  RTE_ETH_DEV_DEFERRED,
1619 };
1620 
1631 struct rte_eth_dev {
1632  eth_rx_burst_t rx_pkt_burst;
1633  eth_tx_burst_t tx_pkt_burst;
1634  eth_tx_prep_t tx_pkt_prepare;
1635  struct rte_eth_dev_data *data;
1636  const struct eth_dev_ops *dev_ops;
1637  struct rte_device *device;
1638  struct rte_intr_handle *intr_handle;
1640  struct rte_eth_dev_cb_list link_intr_cbs;
1645  struct rte_eth_rxtx_callback *post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
1650  struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
1651  enum rte_eth_dev_state state;
1653 
1654 struct rte_eth_dev_sriov {
1655  uint8_t active;
1656  uint8_t nb_q_per_pool;
1657  uint16_t def_vmdq_idx;
1658  uint16_t def_pool_q_idx;
1659 };
1660 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1661 
1662 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
1663 
1671 struct rte_eth_dev_data {
1672  char name[RTE_ETH_NAME_MAX_LEN];
1674  void **rx_queues;
1675  void **tx_queues;
1676  uint16_t nb_rx_queues;
1677  uint16_t nb_tx_queues;
1679  struct rte_eth_dev_sriov sriov;
1681  void *dev_private;
1683  struct rte_eth_link dev_link;
1686  struct rte_eth_conf dev_conf;
1687  uint16_t mtu;
1689  uint32_t min_rx_buf_size;
1692  uint64_t rx_mbuf_alloc_failed;
1693  struct ether_addr* mac_addrs;
1694  uint64_t mac_pool_sel[ETH_NUM_RECEIVE_MAC_ADDR];
1696  struct ether_addr* hash_mac_addrs;
1698  uint8_t port_id;
1699  __extension__
1700  uint8_t promiscuous : 1,
1701  scattered_rx : 1,
1702  all_multicast : 1,
1703  dev_started : 1,
1704  lro : 1;
1705  uint8_t rx_queue_state[RTE_MAX_QUEUES_PER_PORT];
1707  uint8_t tx_queue_state[RTE_MAX_QUEUES_PER_PORT];
1709  uint32_t dev_flags;
1710  enum rte_kernel_driver kdrv;
1711  int numa_node;
1712  struct rte_vlan_filter_conf vlan_filter_conf;
1714 };
1715 
1717 #define RTE_ETH_DEV_DETACHABLE 0x0001
1718 
1719 #define RTE_ETH_DEV_INTR_LSC 0x0002
1720 
1721 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1722 
1723 #define RTE_ETH_DEV_INTR_RMV 0x0008
1724 
1730 extern struct rte_eth_dev rte_eth_devices[];
1731 
1740 uint8_t rte_eth_find_next(uint8_t port_id);
1741 
1745 #define RTE_ETH_FOREACH_DEV(p) \
1746  for (p = rte_eth_find_next(0); \
1747  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
1748  p = rte_eth_find_next(p + 1))
1749 
1750 
1763 uint8_t rte_eth_dev_count(void);
1764 
1774 struct rte_eth_dev *rte_eth_dev_allocated(const char *name);
1775 
1786 struct rte_eth_dev *rte_eth_dev_allocate(const char *name);
1787 
1799 struct rte_eth_dev *rte_eth_dev_attach_secondary(const char *name);
1800 
1810 int rte_eth_dev_release_port(struct rte_eth_dev *eth_dev);
1811 
1824 int rte_eth_dev_attach(const char *devargs, uint8_t *port_id);
1825 
1839 int rte_eth_dev_detach(uint8_t port_id, char *devname);
1840 
1852 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
1853 
1883 int rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_queue,
1884  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
1885 
1897 void _rte_eth_dev_reset(struct rte_eth_dev *dev);
1898 
1938 int rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1939  uint16_t nb_rx_desc, unsigned int socket_id,
1940  const struct rte_eth_rxconf *rx_conf,
1941  struct rte_mempool *mb_pool);
1942 
1986 int rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1987  uint16_t nb_tx_desc, unsigned int socket_id,
1988  const struct rte_eth_txconf *tx_conf);
1989 
2000 int rte_eth_dev_socket_id(uint8_t port_id);
2001 
2011 int rte_eth_dev_is_valid_port(uint8_t port_id);
2012 
2028 int rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id);
2029 
2044 int rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id);
2045 
2061 int rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id);
2062 
2077 int rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id);
2078 
2079 
2080 
2096 int rte_eth_dev_start(uint8_t port_id);
2097 
2105 void rte_eth_dev_stop(uint8_t port_id);
2106 
2107 
2120 int rte_eth_dev_set_link_up(uint8_t port_id);
2121 
2131 int rte_eth_dev_set_link_down(uint8_t port_id);
2132 
2141 void rte_eth_dev_close(uint8_t port_id);
2142 
2149 void rte_eth_promiscuous_enable(uint8_t port_id);
2150 
2157 void rte_eth_promiscuous_disable(uint8_t port_id);
2158 
2169 int rte_eth_promiscuous_get(uint8_t port_id);
2170 
2177 void rte_eth_allmulticast_enable(uint8_t port_id);
2178 
2185 void rte_eth_allmulticast_disable(uint8_t port_id);
2186 
2197 int rte_eth_allmulticast_get(uint8_t port_id);
2198 
2210 void rte_eth_link_get(uint8_t port_id, struct rte_eth_link *link);
2211 
2223 void rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *link);
2224 
2242 int rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats);
2243 
2250 void rte_eth_stats_reset(uint8_t port_id);
2251 
2272 int rte_eth_xstats_get_names(uint8_t port_id,
2273  struct rte_eth_xstat_name *xstats_names,
2274  unsigned int size);
2275 
2298 int rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
2299  unsigned int n);
2300 
2323 int
2324 rte_eth_xstats_get_names_by_id(uint8_t port_id,
2325  struct rte_eth_xstat_name *xstats_names, unsigned int size,
2326  uint64_t *ids);
2327 
2351 int rte_eth_xstats_get_by_id(uint8_t port_id, const uint64_t *ids,
2352  uint64_t *values, unsigned int n);
2353 
2371 int rte_eth_xstats_get_id_by_name(uint8_t port_id, const char *xstat_name,
2372  uint64_t *id);
2373 
2380 void rte_eth_xstats_reset(uint8_t port_id);
2381 
2399 int rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id,
2400  uint16_t tx_queue_id, uint8_t stat_idx);
2401 
2419 int rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id,
2420  uint16_t rx_queue_id,
2421  uint8_t stat_idx);
2422 
2432 void rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr);
2433 
2443 void rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info);
2444 
2463 int rte_eth_dev_fw_version_get(uint8_t port_id,
2464  char *fw_version, size_t fw_size);
2465 
2504 int rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
2505  uint32_t *ptypes, int num);
2506 
2518 int rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu);
2519 
2534 int rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu);
2535 
2554 int rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on);
2555 
2575 int rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id,
2576  int on);
2577 
2594 int rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
2595  enum rte_vlan_type vlan_type,
2596  uint16_t tag_type);
2597 
2618 int rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask);
2619 
2632 int rte_eth_dev_get_vlan_offload(uint8_t port_id);
2633 
2648 int rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on);
2649 
2732 static inline uint16_t
2733 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2734  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
2735 {
2736  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2737 
2738 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2739  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
2740  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
2741 
2742  if (queue_id >= dev->data->nb_rx_queues) {
2743  RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2744  return 0;
2745  }
2746 #endif
2747  int16_t nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2748  rx_pkts, nb_pkts);
2749 
2750 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
2751  struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
2752 
2753  if (unlikely(cb != NULL)) {
2754  do {
2755  nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
2756  nb_pkts, cb->param);
2757  cb = cb->next;
2758  } while (cb != NULL);
2759  }
2760 #endif
2761 
2762  return nb_rx;
2763 }
2764 
2777 static inline int
2778 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2779 {
2780  struct rte_eth_dev *dev;
2781 
2782  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2783  dev = &rte_eth_devices[port_id];
2784  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
2785  if (queue_id >= dev->data->nb_rx_queues)
2786  return -EINVAL;
2787 
2788  return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2789 }
2790 
2806 static inline int
2807 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2808 {
2809  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2810  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2811  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2812  return (*dev->dev_ops->rx_descriptor_done)( \
2813  dev->data->rx_queues[queue_id], offset);
2814 }
2815 
2816 #define RTE_ETH_RX_DESC_AVAIL 0
2817 #define RTE_ETH_RX_DESC_DONE 1
2818 #define RTE_ETH_RX_DESC_UNAVAIL 2
2853 static inline int
2854 rte_eth_rx_descriptor_status(uint8_t port_id, uint16_t queue_id,
2855  uint16_t offset)
2856 {
2857  struct rte_eth_dev *dev;
2858  void *rxq;
2859 
2860 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2861  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2862 #endif
2863  dev = &rte_eth_devices[port_id];
2864 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2865  if (queue_id >= dev->data->nb_rx_queues)
2866  return -ENODEV;
2867 #endif
2868  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
2869  rxq = dev->data->rx_queues[queue_id];
2870 
2871  return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
2872 }
2873 
2874 #define RTE_ETH_TX_DESC_FULL 0
2875 #define RTE_ETH_TX_DESC_DONE 1
2876 #define RTE_ETH_TX_DESC_UNAVAIL 2
2911 static inline int rte_eth_tx_descriptor_status(uint8_t port_id,
2912  uint16_t queue_id, uint16_t offset)
2913 {
2914  struct rte_eth_dev *dev;
2915  void *txq;
2916 
2917 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2918  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2919 #endif
2920  dev = &rte_eth_devices[port_id];
2921 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2922  if (queue_id >= dev->data->nb_tx_queues)
2923  return -ENODEV;
2924 #endif
2925  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
2926  txq = dev->data->tx_queues[queue_id];
2927 
2928  return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
2929 }
2930 
2994 static inline uint16_t
2995 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2996  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2997 {
2998  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2999 
3000 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3001  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
3002  RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
3003 
3004  if (queue_id >= dev->data->nb_tx_queues) {
3005  RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3006  return 0;
3007  }
3008 #endif
3009 
3010 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
3011  struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3012 
3013  if (unlikely(cb != NULL)) {
3014  do {
3015  nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
3016  cb->param);
3017  cb = cb->next;
3018  } while (cb != NULL);
3019  }
3020 #endif
3021 
3022  return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
3023 }
3024 
3081 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
3082 
3083 static inline uint16_t
3084 rte_eth_tx_prepare(uint8_t port_id, uint16_t queue_id,
3085  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3086 {
3087  struct rte_eth_dev *dev;
3088 
3089 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3090  if (!rte_eth_dev_is_valid_port(port_id)) {
3091  RTE_PMD_DEBUG_TRACE("Invalid TX port_id=%d\n", port_id);
3092  rte_errno = -EINVAL;
3093  return 0;
3094  }
3095 #endif
3096 
3097  dev = &rte_eth_devices[port_id];
3098 
3099 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3100  if (queue_id >= dev->data->nb_tx_queues) {
3101  RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3102  rte_errno = -EINVAL;
3103  return 0;
3104  }
3105 #endif
3106 
3107  if (!dev->tx_pkt_prepare)
3108  return nb_pkts;
3109 
3110  return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
3111  tx_pkts, nb_pkts);
3112 }
3113 
3114 #else
3115 
3116 /*
3117  * Native NOOP operation for compilation targets which doesn't require any
3118  * preparations steps, and functional NOOP may introduce unnecessary performance
3119  * drop.
3120  *
3121  * Generally this is not a good idea to turn it on globally and didn't should
3122  * be used if behavior of tx_preparation can change.
3123  */
3124 
3125 static inline uint16_t
3126 rte_eth_tx_prepare(__rte_unused uint8_t port_id, __rte_unused uint16_t queue_id,
3127  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3128 {
3129  return nb_pkts;
3130 }
3131 
3132 #endif
3133 
3134 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
3135  void *userdata);
3136 
3142  buffer_tx_error_fn error_callback;
3143  void *error_userdata;
3144  uint16_t size;
3145  uint16_t length;
3146  struct rte_mbuf *pkts[];
3148 };
3149 
3156 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
3157  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3158 
3169 int
3170 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
3171 
3194 static inline uint16_t
3195 rte_eth_tx_buffer_flush(uint8_t port_id, uint16_t queue_id,
3196  struct rte_eth_dev_tx_buffer *buffer)
3197 {
3198  uint16_t sent;
3199  uint16_t to_send = buffer->length;
3200 
3201  if (to_send == 0)
3202  return 0;
3203 
3204  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
3205 
3206  buffer->length = 0;
3207 
3208  /* All packets sent, or to be dealt with by callback below */
3209  if (unlikely(sent != to_send))
3210  buffer->error_callback(&buffer->pkts[sent], to_send - sent,
3211  buffer->error_userdata);
3212 
3213  return sent;
3214 }
3215 
3246 static __rte_always_inline uint16_t
3247 rte_eth_tx_buffer(uint8_t port_id, uint16_t queue_id,
3248  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
3249 {
3250  buffer->pkts[buffer->length++] = tx_pkt;
3251  if (buffer->length < buffer->size)
3252  return 0;
3253 
3254  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
3255 }
3256 
3281 int
3283  buffer_tx_error_fn callback, void *userdata);
3284 
3307 void
3308 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3309  void *userdata);
3310 
3334 void
3335 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3336  void *userdata);
3337 
3362 int
3363 rte_eth_tx_done_cleanup(uint8_t port_id, uint16_t queue_id, uint32_t free_cnt);
3364 
3379 };
3380 
3381 typedef int (*rte_eth_dev_cb_fn)(uint8_t port_id,
3382  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
3403 int rte_eth_dev_callback_register(uint8_t port_id,
3404  enum rte_eth_event_type event,
3405  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3406 
3424 int rte_eth_dev_callback_unregister(uint8_t port_id,
3425  enum rte_eth_event_type event,
3426  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3427 
3447 int _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3448  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
3449 
3470 int rte_eth_dev_rx_intr_enable(uint8_t port_id, uint16_t queue_id);
3471 
3491 int rte_eth_dev_rx_intr_disable(uint8_t port_id, uint16_t queue_id);
3492 
3510 int rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data);
3511 
3533 int rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
3534  int epfd, int op, void *data);
3535 
3548 int rte_eth_led_on(uint8_t port_id);
3549 
3562 int rte_eth_led_off(uint8_t port_id);
3563 
3576 int rte_eth_dev_flow_ctrl_get(uint8_t port_id,
3577  struct rte_eth_fc_conf *fc_conf);
3578 
3593 int rte_eth_dev_flow_ctrl_set(uint8_t port_id,
3594  struct rte_eth_fc_conf *fc_conf);
3595 
3611 int rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id,
3612  struct rte_eth_pfc_conf *pfc_conf);
3613 
3632 int rte_eth_dev_mac_addr_add(uint8_t port, struct ether_addr *mac_addr,
3633  uint32_t pool);
3634 
3648 int rte_eth_dev_mac_addr_remove(uint8_t port, struct ether_addr *mac_addr);
3649 
3663 int rte_eth_dev_default_mac_addr_set(uint8_t port, struct ether_addr *mac_addr);
3664 
3665 
3681 int rte_eth_dev_rss_reta_update(uint8_t port,
3682  struct rte_eth_rss_reta_entry64 *reta_conf,
3683  uint16_t reta_size);
3684 
3700 int rte_eth_dev_rss_reta_query(uint8_t port,
3701  struct rte_eth_rss_reta_entry64 *reta_conf,
3702  uint16_t reta_size);
3703 
3722 int rte_eth_dev_uc_hash_table_set(uint8_t port,struct ether_addr *addr,
3723  uint8_t on);
3724 
3742 int rte_eth_dev_uc_all_hash_table_set(uint8_t port,uint8_t on);
3743 
3765 int rte_eth_mirror_rule_set(uint8_t port_id,
3766  struct rte_eth_mirror_conf *mirror_conf,
3767  uint8_t rule_id,
3768  uint8_t on);
3769 
3783 int rte_eth_mirror_rule_reset(uint8_t port_id,
3784  uint8_t rule_id);
3785 
3801 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
3802  uint16_t tx_rate);
3803 
3817 int rte_eth_dev_rss_hash_update(uint8_t port_id,
3818  struct rte_eth_rss_conf *rss_conf);
3819 
3833 int
3834 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
3835  struct rte_eth_rss_conf *rss_conf);
3836 
3854 int
3855 rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
3856  struct rte_eth_udp_tunnel *tunnel_udp);
3857 
3876 int
3877 rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
3878  struct rte_eth_udp_tunnel *tunnel_udp);
3879 
3893 int rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type);
3894 
3913 int rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
3914  enum rte_filter_op filter_op, void *arg);
3915 
3928 int rte_eth_dev_get_dcb_info(uint8_t port_id,
3929  struct rte_eth_dcb_info *dcb_info);
3930 
3955 void *rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
3956  rte_rx_callback_fn fn, void *user_param);
3957 
3983 void *rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
3984  rte_rx_callback_fn fn, void *user_param);
3985 
4010 void *rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
4011  rte_tx_callback_fn fn, void *user_param);
4012 
4043 int rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
4044  struct rte_eth_rxtx_callback *user_cb);
4045 
4076 int rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
4077  struct rte_eth_rxtx_callback *user_cb);
4078 
4096 int rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
4097  struct rte_eth_rxq_info *qinfo);
4098 
4116 int rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
4117  struct rte_eth_txq_info *qinfo);
4118 
4135 int rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info);
4136 
4148 int rte_eth_dev_get_eeprom_length(uint8_t port_id);
4149 
4164 int rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info);
4165 
4180 int rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info);
4181 
4199 int rte_eth_dev_set_mc_addr_list(uint8_t port_id,
4200  struct ether_addr *mc_addr_set,
4201  uint32_t nb_mc_addr);
4202 
4214 int rte_eth_timesync_enable(uint8_t port_id);
4215 
4227 int rte_eth_timesync_disable(uint8_t port_id);
4228 
4246 int rte_eth_timesync_read_rx_timestamp(uint8_t port_id,
4247  struct timespec *timestamp, uint32_t flags);
4248 
4263 int rte_eth_timesync_read_tx_timestamp(uint8_t port_id,
4264  struct timespec *timestamp);
4265 
4282 int rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta);
4283 
4298 int rte_eth_timesync_read_time(uint8_t port_id, struct timespec *time);
4299 
4317 int rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *time);
4318 
4338 const struct rte_memzone *
4339 rte_eth_dma_zone_reserve(const struct rte_eth_dev *eth_dev, const char *name,
4340  uint16_t queue_id, size_t size,
4341  unsigned align, int socket_id);
4342 
4357 int
4358 rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
4359  struct rte_eth_l2_tunnel_conf *l2_tunnel);
4360 
4384 int
4385 rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
4386  struct rte_eth_l2_tunnel_conf *l2_tunnel,
4387  uint32_t mask,
4388  uint8_t en);
4389 
4402 int
4403 rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id);
4404 
4416 int
4417 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name);
4418 
4435 int rte_eth_dev_adjust_nb_rx_tx_desc(uint8_t port_id,
4436  uint16_t *nb_rx_desc,
4437  uint16_t *nb_tx_desc);
4438 
4439 #ifdef __cplusplus
4440 }
4441 #endif
4442 
4443 #endif /* _RTE_ETHDEV_H_ */