DPDK  2.2.0
rte_ethdev.h
Go to the documentation of this file.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  * * Neither the name of Intel Corporation nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef _RTE_ETHDEV_H_
35 #define _RTE_ETHDEV_H_
36 
169 #ifdef __cplusplus
170 extern "C" {
171 #endif
172 
173 #include <stdint.h>
174 
175 #include <rte_dev.h>
176 
177 /* Use this macro to check if LRO API is supported */
178 #define RTE_ETHDEV_HAS_LRO_SUPPORT
179 
180 #include <rte_log.h>
181 #include <rte_interrupts.h>
182 #include <rte_pci.h>
183 #include <rte_dev.h>
184 #include <rte_devargs.h>
185 #include "rte_ether.h"
186 #include "rte_eth_ctrl.h"
187 #include "rte_dev_info.h"
188 
189 struct rte_mbuf;
190 
195  uint64_t ipackets;
196  uint64_t opackets;
197  uint64_t ibytes;
198  uint64_t obytes;
199  uint64_t imissed;
203  uint64_t ibadcrc __rte_deprecated;
205  uint64_t ibadlen __rte_deprecated;
207  uint64_t ierrors;
208  uint64_t oerrors;
209  uint64_t imcasts;
211  uint64_t rx_nombuf;
212  uint64_t fdirmatch __rte_deprecated;
214  uint64_t fdirmiss __rte_deprecated;
216  uint64_t tx_pause_xon __rte_deprecated;
218  uint64_t rx_pause_xon __rte_deprecated;
220  uint64_t tx_pause_xoff __rte_deprecated;
222  uint64_t rx_pause_xoff __rte_deprecated;
224  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
226  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
228  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
230  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
232  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
234  uint64_t ilbpackets;
236  uint64_t olbpackets;
238  uint64_t ilbbytes;
240  uint64_t olbbytes;
242 };
243 
247 struct rte_eth_link {
248  uint16_t link_speed;
249  uint16_t link_duplex;
250  uint8_t link_status : 1;
251 }__attribute__((aligned(8)));
253 #define ETH_LINK_SPEED_AUTONEG 0
254 #define ETH_LINK_SPEED_10 10
255 #define ETH_LINK_SPEED_100 100
256 #define ETH_LINK_SPEED_1000 1000
257 #define ETH_LINK_SPEED_10000 10000
258 #define ETH_LINK_SPEED_10G 10000
259 #define ETH_LINK_SPEED_20G 20000
260 #define ETH_LINK_SPEED_40G 40000
262 #define ETH_LINK_AUTONEG_DUPLEX 0
263 #define ETH_LINK_HALF_DUPLEX 1
264 #define ETH_LINK_FULL_DUPLEX 2
270 struct rte_eth_thresh {
271  uint8_t pthresh;
272  uint8_t hthresh;
273  uint8_t wthresh;
274 };
275 
279 #define ETH_MQ_RX_RSS_FLAG 0x1
280 #define ETH_MQ_RX_DCB_FLAG 0x2
281 #define ETH_MQ_RX_VMDQ_FLAG 0x4
282 
290 
294  ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
296  ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
297 
299  ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
301  ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
303  ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
306  ETH_MQ_RX_VMDQ_FLAG,
307 };
308 
312 #define ETH_RSS ETH_MQ_RX_RSS
313 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
314 #define ETH_DCB_RX ETH_MQ_RX_DCB
315 
325 };
326 
330 #define ETH_DCB_NONE ETH_MQ_TX_NONE
331 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
332 #define ETH_DCB_TX ETH_MQ_TX_DCB
333 
340  uint32_t max_rx_pkt_len;
341  uint16_t split_hdr_size;
342  uint16_t header_split : 1,
343  hw_ip_checksum : 1,
344  hw_vlan_filter : 1,
345  hw_vlan_strip : 1,
346  hw_vlan_extend : 1,
347  jumbo_frame : 1,
348  hw_strip_crc : 1,
349  enable_scatter : 1,
350  enable_lro : 1;
351 };
352 
371  uint8_t *rss_key;
372  uint8_t rss_key_len;
373  uint64_t rss_hf;
374 };
375 
376 /*
377  * The RSS offload types are defined based on flow types which are defined
378  * in rte_eth_ctrl.h. Different NIC hardwares may support different RSS offload
379  * types. The supported flow types or RSS offload types can be queried by
380  * rte_eth_dev_info_get().
381  */
382 #define ETH_RSS_IPV4 (1ULL << RTE_ETH_FLOW_IPV4)
383 #define ETH_RSS_FRAG_IPV4 (1ULL << RTE_ETH_FLOW_FRAG_IPV4)
384 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
385 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP)
386 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP)
387 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER)
388 #define ETH_RSS_IPV6 (1ULL << RTE_ETH_FLOW_IPV6)
389 #define ETH_RSS_FRAG_IPV6 (1ULL << RTE_ETH_FLOW_FRAG_IPV6)
390 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP)
391 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
392 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP)
393 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)
394 #define ETH_RSS_L2_PAYLOAD (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)
395 #define ETH_RSS_IPV6_EX (1ULL << RTE_ETH_FLOW_IPV6_EX)
396 #define ETH_RSS_IPV6_TCP_EX (1ULL << RTE_ETH_FLOW_IPV6_TCP_EX)
397 #define ETH_RSS_IPV6_UDP_EX (1ULL << RTE_ETH_FLOW_IPV6_UDP_EX)
398 
399 #define ETH_RSS_IP ( \
400  ETH_RSS_IPV4 | \
401  ETH_RSS_FRAG_IPV4 | \
402  ETH_RSS_NONFRAG_IPV4_OTHER | \
403  ETH_RSS_IPV6 | \
404  ETH_RSS_FRAG_IPV6 | \
405  ETH_RSS_NONFRAG_IPV6_OTHER | \
406  ETH_RSS_IPV6_EX)
407 
408 #define ETH_RSS_UDP ( \
409  ETH_RSS_NONFRAG_IPV4_UDP | \
410  ETH_RSS_NONFRAG_IPV6_UDP | \
411  ETH_RSS_IPV6_UDP_EX)
412 
413 #define ETH_RSS_TCP ( \
414  ETH_RSS_NONFRAG_IPV4_TCP | \
415  ETH_RSS_NONFRAG_IPV6_TCP | \
416  ETH_RSS_IPV6_TCP_EX)
417 
418 #define ETH_RSS_SCTP ( \
419  ETH_RSS_NONFRAG_IPV4_SCTP | \
420  ETH_RSS_NONFRAG_IPV6_SCTP)
421 
423 #define ETH_RSS_PROTO_MASK ( \
424  ETH_RSS_IPV4 | \
425  ETH_RSS_FRAG_IPV4 | \
426  ETH_RSS_NONFRAG_IPV4_TCP | \
427  ETH_RSS_NONFRAG_IPV4_UDP | \
428  ETH_RSS_NONFRAG_IPV4_SCTP | \
429  ETH_RSS_NONFRAG_IPV4_OTHER | \
430  ETH_RSS_IPV6 | \
431  ETH_RSS_FRAG_IPV6 | \
432  ETH_RSS_NONFRAG_IPV6_TCP | \
433  ETH_RSS_NONFRAG_IPV6_UDP | \
434  ETH_RSS_NONFRAG_IPV6_SCTP | \
435  ETH_RSS_NONFRAG_IPV6_OTHER | \
436  ETH_RSS_L2_PAYLOAD | \
437  ETH_RSS_IPV6_EX | \
438  ETH_RSS_IPV6_TCP_EX | \
439  ETH_RSS_IPV6_UDP_EX)
440 
441 /*
442  * Definitions used for redirection table entry size.
443  * Some RSS RETA sizes may not be supported by some drivers, check the
444  * documentation or the description of relevant functions for more details.
445  */
446 #define ETH_RSS_RETA_SIZE_64 64
447 #define ETH_RSS_RETA_SIZE_128 128
448 #define ETH_RSS_RETA_SIZE_512 512
449 #define RTE_RETA_GROUP_SIZE 64
450 
451 /* Definitions used for VMDQ and DCB functionality */
452 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
453 #define ETH_DCB_NUM_USER_PRIORITIES 8
454 #define ETH_VMDQ_DCB_NUM_QUEUES 128
455 #define ETH_DCB_NUM_QUEUES 128
457 /* DCB capability defines */
458 #define ETH_DCB_PG_SUPPORT 0x00000001
459 #define ETH_DCB_PFC_SUPPORT 0x00000002
461 /* Definitions used for VLAN Offload functionality */
462 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
463 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
464 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
466 /* Definitions used for mask VLAN setting */
467 #define ETH_VLAN_STRIP_MASK 0x0001
468 #define ETH_VLAN_FILTER_MASK 0x0002
469 #define ETH_VLAN_EXTEND_MASK 0x0004
470 #define ETH_VLAN_ID_MAX 0x0FFF
472 /* Definitions used for receive MAC address */
473 #define ETH_NUM_RECEIVE_MAC_ADDR 128
475 /* Definitions used for unicast hash */
476 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
478 /* Definitions used for VMDQ pool rx mode setting */
479 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
480 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
481 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
482 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
483 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
486 #define ETH_MIRROR_MAX_VLANS 64
487 
488 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
489 #define ETH_MIRROR_UPLINK_PORT 0x02
490 #define ETH_MIRROR_DOWNLINK_PORT 0x04
491 #define ETH_MIRROR_VLAN 0x08
492 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
497 struct rte_eth_vlan_mirror {
498  uint64_t vlan_mask;
500  uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
501 };
502 
507  uint8_t rule_type;
508  uint8_t dst_pool;
509  uint64_t pool_mask;
512 };
513 
521  uint64_t mask;
523  uint8_t reta[RTE_RETA_GROUP_SIZE];
525 };
526 
532  ETH_4_TCS = 4,
534 };
535 
545 };
546 
547 /* This structure may be extended in future. */
548 struct rte_eth_dcb_rx_conf {
549  enum rte_eth_nb_tcs nb_tcs;
551  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
552 };
553 
554 struct rte_eth_vmdq_dcb_tx_conf {
555  enum rte_eth_nb_pools nb_queue_pools;
557  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
558 };
559 
560 struct rte_eth_dcb_tx_conf {
561  enum rte_eth_nb_tcs nb_tcs;
563  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
564 };
565 
566 struct rte_eth_vmdq_tx_conf {
567  enum rte_eth_nb_pools nb_queue_pools;
568 };
569 
584  uint8_t default_pool;
585  uint8_t nb_pool_maps;
586  struct {
587  uint16_t vlan_id;
588  uint64_t pools;
592 };
593 
594 struct rte_eth_vmdq_rx_conf {
595  enum rte_eth_nb_pools nb_queue_pools;
596  uint8_t enable_default_pool;
597  uint8_t default_pool;
598  uint8_t enable_loop_back;
599  uint8_t nb_pool_maps;
600  uint32_t rx_mode;
601  struct {
602  uint16_t vlan_id;
603  uint64_t pools;
604  } pool_map[ETH_VMDQ_MAX_VLAN_FILTERS];
605 };
606 
613  /* For i40e specifically */
614  uint16_t pvid;
615  uint8_t hw_vlan_reject_tagged : 1,
621 };
622 
628  uint16_t rx_free_thresh;
629  uint8_t rx_drop_en;
631 };
632 
633 #define ETH_TXQ_FLAGS_NOMULTSEGS 0x0001
634 #define ETH_TXQ_FLAGS_NOREFCOUNT 0x0002
635 #define ETH_TXQ_FLAGS_NOMULTMEMP 0x0004
636 #define ETH_TXQ_FLAGS_NOVLANOFFL 0x0100
637 #define ETH_TXQ_FLAGS_NOXSUMSCTP 0x0200
638 #define ETH_TXQ_FLAGS_NOXSUMUDP 0x0400
639 #define ETH_TXQ_FLAGS_NOXSUMTCP 0x0800
640 #define ETH_TXQ_FLAGS_NOOFFLOADS \
641  (ETH_TXQ_FLAGS_NOVLANOFFL | ETH_TXQ_FLAGS_NOXSUMSCTP | \
642  ETH_TXQ_FLAGS_NOXSUMUDP | ETH_TXQ_FLAGS_NOXSUMTCP)
643 #define ETH_TXQ_FLAGS_NOXSUMS \
644  (ETH_TXQ_FLAGS_NOXSUMSCTP | ETH_TXQ_FLAGS_NOXSUMUDP | \
645  ETH_TXQ_FLAGS_NOXSUMTCP)
646 
651  uint16_t tx_rs_thresh;
652  uint16_t tx_free_thresh;
655  uint32_t txq_flags;
657 };
658 
663  uint16_t nb_max;
664  uint16_t nb_min;
665  uint16_t nb_align;
666 };
667 
676 };
677 
684  uint32_t high_water;
685  uint32_t low_water;
686  uint16_t pause_time;
687  uint16_t send_xon;
690  uint8_t autoneg;
691 };
692 
700  uint8_t priority;
701 };
702 
711 };
712 
720 };
721 
733  uint8_t drop_queue;
734  struct rte_eth_fdir_masks mask;
737 };
738 
743  uint16_t udp_port;
744  uint8_t prot_type;
745 };
746 
752  uint16_t lsc;
754  uint16_t rxq;
755 };
756 
762 struct rte_eth_conf {
763  uint16_t link_speed;
765  uint16_t link_duplex;
769  uint32_t lpbk_mode;
774  struct {
778  struct rte_eth_dcb_rx_conf dcb_rx_conf;
780  struct rte_eth_vmdq_rx_conf vmdq_rx_conf;
782  } rx_adv_conf;
783  union {
784  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
786  struct rte_eth_dcb_tx_conf dcb_tx_conf;
788  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
790  } tx_adv_conf;
796 };
797 
807 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
808 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
809 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
810 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
811 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
812 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
813 
817 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
818 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
819 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
820 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
821 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
822 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
823 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
824 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
825 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
826 
827 struct rte_eth_dev_info {
828  struct rte_pci_device *pci_dev;
829  const char *driver_name;
830  unsigned int if_index;
832  uint32_t min_rx_bufsize;
833  uint32_t max_rx_pktlen;
834  uint16_t max_rx_queues;
835  uint16_t max_tx_queues;
836  uint32_t max_mac_addrs;
837  uint32_t max_hash_mac_addrs;
839  uint16_t max_vfs;
840  uint16_t max_vmdq_pools;
841  uint32_t rx_offload_capa;
842  uint32_t tx_offload_capa;
843  uint16_t reta_size;
845  uint8_t hash_key_size;
847  uint64_t flow_type_rss_offloads;
848  struct rte_eth_rxconf default_rxconf;
849  struct rte_eth_txconf default_txconf;
850  uint16_t vmdq_queue_base;
851  uint16_t vmdq_queue_num;
852  uint16_t vmdq_pool_base;
853  struct rte_eth_desc_lim rx_desc_lim;
854  struct rte_eth_desc_lim tx_desc_lim;
855 };
856 
862  struct rte_mempool *mp;
864  uint8_t scattered_rx;
865  uint16_t nb_desc;
867 
874  uint16_t nb_desc;
876 
878 #define RTE_ETH_XSTATS_NAME_SIZE 64
879 
888  char name[RTE_ETH_XSTATS_NAME_SIZE];
889  uint64_t value;
890 };
891 
892 #define ETH_DCB_NUM_TCS 8
893 #define ETH_MAX_VMDQ_POOL 64
894 
901  struct {
902  uint8_t base;
903  uint8_t nb_queue;
904  } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
906  struct {
907  uint8_t base;
908  uint8_t nb_queue;
909  } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
910 };
911 
917  uint8_t nb_tcs;
919  uint8_t tc_bws[ETH_DCB_NUM_TCS];
922 };
923 
927 #define RTE_ETH_QUEUE_STATE_STOPPED 0
928 #define RTE_ETH_QUEUE_STATE_STARTED 1
929 
930 struct rte_eth_dev;
931 
932 struct rte_eth_dev_callback;
934 TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback);
935 
936 
937 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
938 #define RTE_PMD_DEBUG_TRACE(...) \
939  rte_pmd_debug_trace(__func__, __VA_ARGS__)
940 #else
941 #define RTE_PMD_DEBUG_TRACE(...)
942 #endif
943 
944 
945 /* Macros to check for valid port */
946 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
947  if (!rte_eth_dev_is_valid_port(port_id)) { \
948  RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
949  return retval; \
950  } \
951 } while (0)
952 
953 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
954  if (!rte_eth_dev_is_valid_port(port_id)) { \
955  RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
956  return; \
957  } \
958 } while (0)
959 
960 /*
961  * Definitions of all functions exported by an Ethernet driver through the
962  * the generic structure of type *eth_dev_ops* supplied in the *rte_eth_dev*
963  * structure associated with an Ethernet device.
964  */
965 
966 typedef int (*eth_dev_configure_t)(struct rte_eth_dev *dev);
969 typedef int (*eth_dev_start_t)(struct rte_eth_dev *dev);
972 typedef void (*eth_dev_stop_t)(struct rte_eth_dev *dev);
975 typedef int (*eth_dev_set_link_up_t)(struct rte_eth_dev *dev);
978 typedef int (*eth_dev_set_link_down_t)(struct rte_eth_dev *dev);
981 typedef void (*eth_dev_close_t)(struct rte_eth_dev *dev);
984 typedef void (*eth_promiscuous_enable_t)(struct rte_eth_dev *dev);
987 typedef void (*eth_promiscuous_disable_t)(struct rte_eth_dev *dev);
990 typedef void (*eth_allmulticast_enable_t)(struct rte_eth_dev *dev);
993 typedef void (*eth_allmulticast_disable_t)(struct rte_eth_dev *dev);
996 typedef int (*eth_link_update_t)(struct rte_eth_dev *dev,
997  int wait_to_complete);
1000 typedef void (*eth_stats_get_t)(struct rte_eth_dev *dev,
1001  struct rte_eth_stats *igb_stats);
1004 typedef void (*eth_stats_reset_t)(struct rte_eth_dev *dev);
1007 typedef int (*eth_xstats_get_t)(struct rte_eth_dev *dev,
1008  struct rte_eth_xstats *stats, unsigned n);
1011 typedef void (*eth_xstats_reset_t)(struct rte_eth_dev *dev);
1014 typedef int (*eth_queue_stats_mapping_set_t)(struct rte_eth_dev *dev,
1015  uint16_t queue_id,
1016  uint8_t stat_idx,
1017  uint8_t is_rx);
1020 typedef void (*eth_dev_infos_get_t)(struct rte_eth_dev *dev,
1021  struct rte_eth_dev_info *dev_info);
1024 typedef int (*eth_queue_start_t)(struct rte_eth_dev *dev,
1025  uint16_t queue_id);
1028 typedef int (*eth_queue_stop_t)(struct rte_eth_dev *dev,
1029  uint16_t queue_id);
1032 typedef int (*eth_rx_queue_setup_t)(struct rte_eth_dev *dev,
1033  uint16_t rx_queue_id,
1034  uint16_t nb_rx_desc,
1035  unsigned int socket_id,
1036  const struct rte_eth_rxconf *rx_conf,
1037  struct rte_mempool *mb_pool);
1040 typedef int (*eth_tx_queue_setup_t)(struct rte_eth_dev *dev,
1041  uint16_t tx_queue_id,
1042  uint16_t nb_tx_desc,
1043  unsigned int socket_id,
1044  const struct rte_eth_txconf *tx_conf);
1047 typedef int (*eth_rx_enable_intr_t)(struct rte_eth_dev *dev,
1048  uint16_t rx_queue_id);
1051 typedef int (*eth_rx_disable_intr_t)(struct rte_eth_dev *dev,
1052  uint16_t rx_queue_id);
1055 typedef void (*eth_queue_release_t)(void *queue);
1058 typedef uint32_t (*eth_rx_queue_count_t)(struct rte_eth_dev *dev,
1059  uint16_t rx_queue_id);
1062 typedef int (*eth_rx_descriptor_done_t)(void *rxq, uint16_t offset);
1065 typedef void (*eth_rxq_info_get_t)(struct rte_eth_dev *dev,
1066  uint16_t rx_queue_id, struct rte_eth_rxq_info *qinfo);
1067 
1068 typedef void (*eth_txq_info_get_t)(struct rte_eth_dev *dev,
1069  uint16_t tx_queue_id, struct rte_eth_txq_info *qinfo);
1070 
1071 typedef int (*mtu_set_t)(struct rte_eth_dev *dev, uint16_t mtu);
1074 typedef int (*vlan_filter_set_t)(struct rte_eth_dev *dev,
1075  uint16_t vlan_id,
1076  int on);
1079 typedef void (*vlan_tpid_set_t)(struct rte_eth_dev *dev,
1080  uint16_t tpid);
1083 typedef void (*vlan_offload_set_t)(struct rte_eth_dev *dev, int mask);
1086 typedef int (*vlan_pvid_set_t)(struct rte_eth_dev *dev,
1087  uint16_t vlan_id,
1088  int on);
1091 typedef void (*vlan_strip_queue_set_t)(struct rte_eth_dev *dev,
1092  uint16_t rx_queue_id,
1093  int on);
1096 typedef uint16_t (*eth_rx_burst_t)(void *rxq,
1097  struct rte_mbuf **rx_pkts,
1098  uint16_t nb_pkts);
1101 typedef uint16_t (*eth_tx_burst_t)(void *txq,
1102  struct rte_mbuf **tx_pkts,
1103  uint16_t nb_pkts);
1106 typedef int (*flow_ctrl_get_t)(struct rte_eth_dev *dev,
1107  struct rte_eth_fc_conf *fc_conf);
1110 typedef int (*flow_ctrl_set_t)(struct rte_eth_dev *dev,
1111  struct rte_eth_fc_conf *fc_conf);
1114 typedef int (*priority_flow_ctrl_set_t)(struct rte_eth_dev *dev,
1115  struct rte_eth_pfc_conf *pfc_conf);
1118 typedef int (*reta_update_t)(struct rte_eth_dev *dev,
1119  struct rte_eth_rss_reta_entry64 *reta_conf,
1120  uint16_t reta_size);
1123 typedef int (*reta_query_t)(struct rte_eth_dev *dev,
1124  struct rte_eth_rss_reta_entry64 *reta_conf,
1125  uint16_t reta_size);
1128 typedef int (*rss_hash_update_t)(struct rte_eth_dev *dev,
1129  struct rte_eth_rss_conf *rss_conf);
1132 typedef int (*rss_hash_conf_get_t)(struct rte_eth_dev *dev,
1133  struct rte_eth_rss_conf *rss_conf);
1136 typedef int (*eth_dev_led_on_t)(struct rte_eth_dev *dev);
1139 typedef int (*eth_dev_led_off_t)(struct rte_eth_dev *dev);
1142 typedef void (*eth_mac_addr_remove_t)(struct rte_eth_dev *dev, uint32_t index);
1145 typedef void (*eth_mac_addr_add_t)(struct rte_eth_dev *dev,
1146  struct ether_addr *mac_addr,
1147  uint32_t index,
1148  uint32_t vmdq);
1151 typedef void (*eth_mac_addr_set_t)(struct rte_eth_dev *dev,
1152  struct ether_addr *mac_addr);
1155 typedef int (*eth_uc_hash_table_set_t)(struct rte_eth_dev *dev,
1156  struct ether_addr *mac_addr,
1157  uint8_t on);
1160 typedef int (*eth_uc_all_hash_table_set_t)(struct rte_eth_dev *dev,
1161  uint8_t on);
1164 typedef int (*eth_set_vf_rx_mode_t)(struct rte_eth_dev *dev,
1165  uint16_t vf,
1166  uint16_t rx_mode,
1167  uint8_t on);
1170 typedef int (*eth_set_vf_rx_t)(struct rte_eth_dev *dev,
1171  uint16_t vf,
1172  uint8_t on);
1175 typedef int (*eth_set_vf_tx_t)(struct rte_eth_dev *dev,
1176  uint16_t vf,
1177  uint8_t on);
1180 typedef int (*eth_set_vf_vlan_filter_t)(struct rte_eth_dev *dev,
1181  uint16_t vlan,
1182  uint64_t vf_mask,
1183  uint8_t vlan_on);
1186 typedef int (*eth_set_queue_rate_limit_t)(struct rte_eth_dev *dev,
1187  uint16_t queue_idx,
1188  uint16_t tx_rate);
1191 typedef int (*eth_set_vf_rate_limit_t)(struct rte_eth_dev *dev,
1192  uint16_t vf,
1193  uint16_t tx_rate,
1194  uint64_t q_msk);
1197 typedef int (*eth_mirror_rule_set_t)(struct rte_eth_dev *dev,
1198  struct rte_eth_mirror_conf *mirror_conf,
1199  uint8_t rule_id,
1200  uint8_t on);
1203 typedef int (*eth_mirror_rule_reset_t)(struct rte_eth_dev *dev,
1204  uint8_t rule_id);
1207 typedef int (*eth_udp_tunnel_add_t)(struct rte_eth_dev *dev,
1208  struct rte_eth_udp_tunnel *tunnel_udp);
1211 typedef int (*eth_udp_tunnel_del_t)(struct rte_eth_dev *dev,
1212  struct rte_eth_udp_tunnel *tunnel_udp);
1215 typedef int (*eth_set_mc_addr_list_t)(struct rte_eth_dev *dev,
1216  struct ether_addr *mc_addr_set,
1217  uint32_t nb_mc_addr);
1220 typedef int (*eth_timesync_enable_t)(struct rte_eth_dev *dev);
1223 typedef int (*eth_timesync_disable_t)(struct rte_eth_dev *dev);
1226 typedef int (*eth_timesync_read_rx_timestamp_t)(struct rte_eth_dev *dev,
1227  struct timespec *timestamp,
1228  uint32_t flags);
1231 typedef int (*eth_timesync_read_tx_timestamp_t)(struct rte_eth_dev *dev,
1232  struct timespec *timestamp);
1235 typedef int (*eth_timesync_adjust_time)(struct rte_eth_dev *dev, int64_t);
1238 typedef int (*eth_timesync_read_time)(struct rte_eth_dev *dev,
1239  struct timespec *timestamp);
1242 typedef int (*eth_timesync_write_time)(struct rte_eth_dev *dev,
1243  const struct timespec *timestamp);
1246 typedef int (*eth_get_reg_length_t)(struct rte_eth_dev *dev);
1249 typedef int (*eth_get_reg_t)(struct rte_eth_dev *dev,
1250  struct rte_dev_reg_info *info);
1253 typedef int (*eth_get_eeprom_length_t)(struct rte_eth_dev *dev);
1256 typedef int (*eth_get_eeprom_t)(struct rte_eth_dev *dev,
1257  struct rte_dev_eeprom_info *info);
1260 typedef int (*eth_set_eeprom_t)(struct rte_eth_dev *dev,
1261  struct rte_dev_eeprom_info *info);
1264 #ifdef RTE_NIC_BYPASS
1265 
1266 enum {
1267  RTE_BYPASS_MODE_NONE,
1268  RTE_BYPASS_MODE_NORMAL,
1269  RTE_BYPASS_MODE_BYPASS,
1270  RTE_BYPASS_MODE_ISOLATE,
1271  RTE_BYPASS_MODE_NUM,
1272 };
1273 
1274 #define RTE_BYPASS_MODE_VALID(x) \
1275  ((x) > RTE_BYPASS_MODE_NONE && (x) < RTE_BYPASS_MODE_NUM)
1276 
1277 enum {
1278  RTE_BYPASS_EVENT_NONE,
1279  RTE_BYPASS_EVENT_START,
1280  RTE_BYPASS_EVENT_OS_ON = RTE_BYPASS_EVENT_START,
1281  RTE_BYPASS_EVENT_POWER_ON,
1282  RTE_BYPASS_EVENT_OS_OFF,
1283  RTE_BYPASS_EVENT_POWER_OFF,
1284  RTE_BYPASS_EVENT_TIMEOUT,
1285  RTE_BYPASS_EVENT_NUM
1286 };
1287 
1288 #define RTE_BYPASS_EVENT_VALID(x) \
1289  ((x) > RTE_BYPASS_EVENT_NONE && (x) < RTE_BYPASS_MODE_NUM)
1290 
1291 enum {
1292  RTE_BYPASS_TMT_OFF, /* timeout disabled. */
1293  RTE_BYPASS_TMT_1_5_SEC, /* timeout for 1.5 seconds */
1294  RTE_BYPASS_TMT_2_SEC, /* timeout for 2 seconds */
1295  RTE_BYPASS_TMT_3_SEC, /* timeout for 3 seconds */
1296  RTE_BYPASS_TMT_4_SEC, /* timeout for 4 seconds */
1297  RTE_BYPASS_TMT_8_SEC, /* timeout for 8 seconds */
1298  RTE_BYPASS_TMT_16_SEC, /* timeout for 16 seconds */
1299  RTE_BYPASS_TMT_32_SEC, /* timeout for 32 seconds */
1300  RTE_BYPASS_TMT_NUM
1301 };
1302 
1303 #define RTE_BYPASS_TMT_VALID(x) \
1304  ((x) == RTE_BYPASS_TMT_OFF || \
1305  ((x) > RTE_BYPASS_TMT_OFF && (x) < RTE_BYPASS_TMT_NUM))
1306 
1307 typedef void (*bypass_init_t)(struct rte_eth_dev *dev);
1308 typedef int32_t (*bypass_state_set_t)(struct rte_eth_dev *dev, uint32_t *new_state);
1309 typedef int32_t (*bypass_state_show_t)(struct rte_eth_dev *dev, uint32_t *state);
1310 typedef int32_t (*bypass_event_set_t)(struct rte_eth_dev *dev, uint32_t state, uint32_t event);
1311 typedef int32_t (*bypass_event_show_t)(struct rte_eth_dev *dev, uint32_t event_shift, uint32_t *event);
1312 typedef int32_t (*bypass_wd_timeout_set_t)(struct rte_eth_dev *dev, uint32_t timeout);
1313 typedef int32_t (*bypass_wd_timeout_show_t)(struct rte_eth_dev *dev, uint32_t *wd_timeout);
1314 typedef int32_t (*bypass_ver_show_t)(struct rte_eth_dev *dev, uint32_t *ver);
1315 typedef int32_t (*bypass_wd_reset_t)(struct rte_eth_dev *dev);
1316 #endif
1317 
1318 typedef int (*eth_filter_ctrl_t)(struct rte_eth_dev *dev,
1319  enum rte_filter_type filter_type,
1320  enum rte_filter_op filter_op,
1321  void *arg);
1324 typedef int (*eth_get_dcb_info)(struct rte_eth_dev *dev,
1325  struct rte_eth_dcb_info *dcb_info);
1331 struct eth_dev_ops {
1332  eth_dev_configure_t dev_configure;
1333  eth_dev_start_t dev_start;
1334  eth_dev_stop_t dev_stop;
1335  eth_dev_set_link_up_t dev_set_link_up;
1336  eth_dev_set_link_down_t dev_set_link_down;
1337  eth_dev_close_t dev_close;
1338  eth_promiscuous_enable_t promiscuous_enable;
1339  eth_promiscuous_disable_t promiscuous_disable;
1340  eth_allmulticast_enable_t allmulticast_enable;
1341  eth_allmulticast_disable_t allmulticast_disable;
1342  eth_link_update_t link_update;
1343  eth_stats_get_t stats_get;
1344  eth_stats_reset_t stats_reset;
1345  eth_xstats_get_t xstats_get;
1346  eth_xstats_reset_t xstats_reset;
1347  eth_queue_stats_mapping_set_t queue_stats_mapping_set;
1349  eth_dev_infos_get_t dev_infos_get;
1350  mtu_set_t mtu_set;
1351  vlan_filter_set_t vlan_filter_set;
1352  vlan_tpid_set_t vlan_tpid_set;
1353  vlan_strip_queue_set_t vlan_strip_queue_set;
1354  vlan_offload_set_t vlan_offload_set;
1355  vlan_pvid_set_t vlan_pvid_set;
1356  eth_queue_start_t rx_queue_start;
1357  eth_queue_stop_t rx_queue_stop;
1358  eth_queue_start_t tx_queue_start;
1359  eth_queue_stop_t tx_queue_stop;
1360  eth_rx_queue_setup_t rx_queue_setup;
1361  eth_queue_release_t rx_queue_release;
1362  eth_rx_queue_count_t rx_queue_count;
1363  eth_rx_descriptor_done_t rx_descriptor_done;
1365  eth_rx_enable_intr_t rx_queue_intr_enable;
1367  eth_rx_disable_intr_t rx_queue_intr_disable;
1368  eth_tx_queue_setup_t tx_queue_setup;
1369  eth_queue_release_t tx_queue_release;
1370  eth_dev_led_on_t dev_led_on;
1371  eth_dev_led_off_t dev_led_off;
1372  flow_ctrl_get_t flow_ctrl_get;
1373  flow_ctrl_set_t flow_ctrl_set;
1374  priority_flow_ctrl_set_t priority_flow_ctrl_set;
1375  eth_mac_addr_remove_t mac_addr_remove;
1376  eth_mac_addr_add_t mac_addr_add;
1377  eth_mac_addr_set_t mac_addr_set;
1378  eth_uc_hash_table_set_t uc_hash_table_set;
1379  eth_uc_all_hash_table_set_t uc_all_hash_table_set;
1380  eth_mirror_rule_set_t mirror_rule_set;
1381  eth_mirror_rule_reset_t mirror_rule_reset;
1382  eth_set_vf_rx_mode_t set_vf_rx_mode;
1383  eth_set_vf_rx_t set_vf_rx;
1384  eth_set_vf_tx_t set_vf_tx;
1385  eth_set_vf_vlan_filter_t set_vf_vlan_filter;
1386  eth_udp_tunnel_add_t udp_tunnel_add;
1387  eth_udp_tunnel_del_t udp_tunnel_del;
1388  eth_set_queue_rate_limit_t set_queue_rate_limit;
1389  eth_set_vf_rate_limit_t set_vf_rate_limit;
1391  reta_update_t reta_update;
1393  reta_query_t reta_query;
1394 
1395  eth_get_reg_length_t get_reg_length;
1397  eth_get_reg_t get_reg;
1399  eth_get_eeprom_length_t get_eeprom_length;
1401  eth_get_eeprom_t get_eeprom;
1403  eth_set_eeprom_t set_eeprom;
1405  /* bypass control */
1406 #ifdef RTE_NIC_BYPASS
1407  bypass_init_t bypass_init;
1408  bypass_state_set_t bypass_state_set;
1409  bypass_state_show_t bypass_state_show;
1410  bypass_event_set_t bypass_event_set;
1411  bypass_event_show_t bypass_event_show;
1412  bypass_wd_timeout_set_t bypass_wd_timeout_set;
1413  bypass_wd_timeout_show_t bypass_wd_timeout_show;
1414  bypass_ver_show_t bypass_ver_show;
1415  bypass_wd_reset_t bypass_wd_reset;
1416 #endif
1417 
1419  rss_hash_update_t rss_hash_update;
1421  rss_hash_conf_get_t rss_hash_conf_get;
1422  eth_filter_ctrl_t filter_ctrl;
1424  eth_set_mc_addr_list_t set_mc_addr_list;
1425  eth_rxq_info_get_t rxq_info_get;
1427  eth_txq_info_get_t txq_info_get;
1430  eth_timesync_enable_t timesync_enable;
1432  eth_timesync_disable_t timesync_disable;
1434  eth_timesync_read_rx_timestamp_t timesync_read_rx_timestamp;
1436  eth_timesync_read_tx_timestamp_t timesync_read_tx_timestamp;
1437 
1439  eth_get_dcb_info get_dcb_info;
1441  eth_timesync_adjust_time timesync_adjust_time;
1443  eth_timesync_read_time timesync_read_time;
1445  eth_timesync_write_time timesync_write_time;
1446 };
1447 
1470 typedef uint16_t (*rte_rx_callback_fn)(uint8_t port, uint16_t queue,
1471  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1472  void *user_param);
1473 
1494 typedef uint16_t (*rte_tx_callback_fn)(uint8_t port, uint16_t queue,
1495  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1496 
1502 struct rte_eth_rxtx_callback {
1503  struct rte_eth_rxtx_callback *next;
1504  union{
1505  rte_rx_callback_fn rx;
1506  rte_tx_callback_fn tx;
1507  } fn;
1508  void *param;
1509 };
1510 
1520 };
1521 
1532 struct rte_eth_dev {
1533  eth_rx_burst_t rx_pkt_burst;
1534  eth_tx_burst_t tx_pkt_burst;
1535  struct rte_eth_dev_data *data;
1536  const struct eth_driver *driver;
1537  const struct eth_dev_ops *dev_ops;
1538  struct rte_pci_device *pci_dev;
1540  struct rte_eth_dev_cb_list link_intr_cbs;
1545  struct rte_eth_rxtx_callback *post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
1550  struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
1551  uint8_t attached;
1552  enum rte_eth_dev_type dev_type;
1553 };
1554 
1555 struct rte_eth_dev_sriov {
1556  uint8_t active;
1557  uint8_t nb_q_per_pool;
1558  uint16_t def_vmdq_idx;
1559  uint16_t def_pool_q_idx;
1560 };
1561 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1562 
1563 #define RTE_ETH_NAME_MAX_LEN (32)
1564 
1572 struct rte_eth_dev_data {
1573  char name[RTE_ETH_NAME_MAX_LEN];
1575  void **rx_queues;
1576  void **tx_queues;
1577  uint16_t nb_rx_queues;
1578  uint16_t nb_tx_queues;
1580  struct rte_eth_dev_sriov sriov;
1582  void *dev_private;
1584  struct rte_eth_link dev_link;
1587  struct rte_eth_conf dev_conf;
1588  uint16_t mtu;
1590  uint32_t min_rx_buf_size;
1593  uint64_t rx_mbuf_alloc_failed;
1594  struct ether_addr* mac_addrs;
1595  uint64_t mac_pool_sel[ETH_NUM_RECEIVE_MAC_ADDR];
1597  struct ether_addr* hash_mac_addrs;
1599  uint8_t port_id;
1600  uint8_t promiscuous : 1,
1601  scattered_rx : 1,
1602  all_multicast : 1,
1603  dev_started : 1,
1604  lro : 1;
1605  uint8_t rx_queue_state[RTE_MAX_QUEUES_PER_PORT];
1607  uint8_t tx_queue_state[RTE_MAX_QUEUES_PER_PORT];
1609  uint32_t dev_flags;
1610  enum rte_kernel_driver kdrv;
1611  int numa_node;
1612  const char *drv_name;
1613 };
1614 
1616 #define RTE_ETH_DEV_DETACHABLE 0x0001
1617 
1618 #define RTE_ETH_DEV_INTR_LSC 0x0002
1619 
1625 extern struct rte_eth_dev rte_eth_devices[];
1626 
1640 extern uint8_t rte_eth_dev_count(void);
1641 
1651 extern struct rte_eth_dev *rte_eth_dev_allocated(const char *name);
1652 
1663 struct rte_eth_dev *rte_eth_dev_allocate(const char *name,
1664  enum rte_eth_dev_type type);
1665 
1675 int rte_eth_dev_release_port(struct rte_eth_dev *eth_dev);
1676 
1689 int rte_eth_dev_attach(const char *devargs, uint8_t *port_id);
1690 
1703 int rte_eth_dev_detach(uint8_t port_id, char *devname);
1704 
1705 struct eth_driver;
1736 typedef int (*eth_dev_init_t)(struct rte_eth_dev *eth_dev);
1737 
1753 typedef int (*eth_dev_uninit_t)(struct rte_eth_dev *eth_dev);
1754 
1770 struct eth_driver {
1771  struct rte_pci_driver pci_drv;
1772  eth_dev_init_t eth_dev_init;
1773  eth_dev_uninit_t eth_dev_uninit;
1774  unsigned int dev_private_size;
1775 };
1776 
1787 extern void rte_eth_driver_register(struct eth_driver *eth_drv);
1788 
1818 extern int rte_eth_dev_configure(uint8_t port_id,
1819  uint16_t nb_rx_queue,
1820  uint16_t nb_tx_queue,
1821  const struct rte_eth_conf *eth_conf);
1822 
1862 extern int rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1863  uint16_t nb_rx_desc, unsigned int socket_id,
1864  const struct rte_eth_rxconf *rx_conf,
1865  struct rte_mempool *mb_pool);
1866 
1910 extern int rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1911  uint16_t nb_tx_desc, unsigned int socket_id,
1912  const struct rte_eth_txconf *tx_conf);
1913 
1914 /*
1915  * Return the NUMA socket to which an Ethernet device is connected
1916  *
1917  * @param port_id
1918  * The port identifier of the Ethernet device
1919  * @return
1920  * The NUMA socket id to which the Ethernet device is connected or
1921  * a default of zero if the socket could not be determined.
1922  * -1 is returned is the port_id value is out of range.
1923  */
1924 extern int rte_eth_dev_socket_id(uint8_t port_id);
1925 
1926 /*
1927  * Check if port_id of device is attached
1928  *
1929  * @param port_id
1930  * The port identifier of the Ethernet device
1931  * @return
1932  * - 0 if port is out of range or not attached
1933  * - 1 if device is attached
1934  */
1935 extern int rte_eth_dev_is_valid_port(uint8_t port_id);
1936 
1937 /*
1938  * Allocate mbuf from mempool, setup the DMA physical address
1939  * and then start RX for specified queue of a port. It is used
1940  * when rx_deferred_start flag of the specified queue is true.
1941  *
1942  * @param port_id
1943  * The port identifier of the Ethernet device
1944  * @param rx_queue_id
1945  * The index of the rx queue to update the ring.
1946  * The value must be in the range [0, nb_rx_queue - 1] previously supplied
1947  * to rte_eth_dev_configure().
1948  * @return
1949  * - 0: Success, the transmit queue is correctly set up.
1950  * - -EINVAL: The port_id or the queue_id out of range.
1951  * - -ENOTSUP: The function not supported in PMD driver.
1952  */
1953 extern int rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id);
1954 
1955 /*
1956  * Stop specified RX queue of a port
1957  *
1958  * @param port_id
1959  * The port identifier of the Ethernet device
1960  * @param rx_queue_id
1961  * The index of the rx queue to update the ring.
1962  * The value must be in the range [0, nb_rx_queue - 1] previously supplied
1963  * to rte_eth_dev_configure().
1964  * @return
1965  * - 0: Success, the transmit queue is correctly set up.
1966  * - -EINVAL: The port_id or the queue_id out of range.
1967  * - -ENOTSUP: The function not supported in PMD driver.
1968  */
1969 extern int rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id);
1970 
1971 /*
1972  * Start TX for specified queue of a port. It is used when tx_deferred_start
1973  * flag of the specified queue is true.
1974  *
1975  * @param port_id
1976  * The port identifier of the Ethernet device
1977  * @param tx_queue_id
1978  * The index of the tx queue to update the ring.
1979  * The value must be in the range [0, nb_tx_queue - 1] previously supplied
1980  * to rte_eth_dev_configure().
1981  * @return
1982  * - 0: Success, the transmit queue is correctly set up.
1983  * - -EINVAL: The port_id or the queue_id out of range.
1984  * - -ENOTSUP: The function not supported in PMD driver.
1985  */
1986 extern int rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id);
1987 
1988 /*
1989  * Stop specified TX queue of a port
1990  *
1991  * @param port_id
1992  * The port identifier of the Ethernet device
1993  * @param tx_queue_id
1994  * The index of the tx queue to update the ring.
1995  * The value must be in the range [0, nb_tx_queue - 1] previously supplied
1996  * to rte_eth_dev_configure().
1997  * @return
1998  * - 0: Success, the transmit queue is correctly set up.
1999  * - -EINVAL: The port_id or the queue_id out of range.
2000  * - -ENOTSUP: The function not supported in PMD driver.
2001  */
2002 extern int rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id);
2003 
2004 
2005 
2021 extern int rte_eth_dev_start(uint8_t port_id);
2022 
2030 extern void rte_eth_dev_stop(uint8_t port_id);
2031 
2032 
2045 extern int rte_eth_dev_set_link_up(uint8_t port_id);
2046 
2056 extern int rte_eth_dev_set_link_down(uint8_t port_id);
2057 
2066 extern void rte_eth_dev_close(uint8_t port_id);
2067 
2074 extern void rte_eth_promiscuous_enable(uint8_t port_id);
2075 
2082 extern void rte_eth_promiscuous_disable(uint8_t port_id);
2083 
2094 extern int rte_eth_promiscuous_get(uint8_t port_id);
2095 
2102 extern void rte_eth_allmulticast_enable(uint8_t port_id);
2103 
2110 extern void rte_eth_allmulticast_disable(uint8_t port_id);
2111 
2122 extern int rte_eth_allmulticast_get(uint8_t port_id);
2123 
2135 extern void rte_eth_link_get(uint8_t port_id, struct rte_eth_link *link);
2136 
2148 extern void rte_eth_link_get_nowait(uint8_t port_id,
2149  struct rte_eth_link *link);
2150 
2168 extern int rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats);
2169 
2176 extern void rte_eth_stats_reset(uint8_t port_id);
2177 
2199 extern int rte_eth_xstats_get(uint8_t port_id,
2200  struct rte_eth_xstats *xstats, unsigned n);
2201 
2208 extern void rte_eth_xstats_reset(uint8_t port_id);
2209 
2227 extern int rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id,
2228  uint16_t tx_queue_id,
2229  uint8_t stat_idx);
2230 
2248 extern int rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id,
2249  uint16_t rx_queue_id,
2250  uint8_t stat_idx);
2251 
2261 extern void rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr);
2262 
2272 extern void rte_eth_dev_info_get(uint8_t port_id,
2273  struct rte_eth_dev_info *dev_info);
2274 
2286 extern int rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu);
2287 
2301 extern int rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu);
2302 
2321 extern int rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id , int on);
2322 
2342 extern int rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id,
2343  uint16_t rx_queue_id, int on);
2344 
2359 extern int rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tag_type);
2360 
2381 extern int rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask);
2382 
2395 extern int rte_eth_dev_get_vlan_offload(uint8_t port_id);
2396 
2411 extern int rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on);
2412 
2495 static inline uint16_t
2496 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2497  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
2498 {
2499  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2500 
2501 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2502  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
2503  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
2504 
2505  if (queue_id >= dev->data->nb_rx_queues) {
2506  RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2507  return 0;
2508  }
2509 #endif
2510  int16_t nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2511  rx_pkts, nb_pkts);
2512 
2513 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
2514  struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
2515 
2516  if (unlikely(cb != NULL)) {
2517  do {
2518  nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
2519  nb_pkts, cb->param);
2520  cb = cb->next;
2521  } while (cb != NULL);
2522  }
2523 #endif
2524 
2525  return nb_rx;
2526 }
2527 
2540 static inline int
2541 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2542 {
2543  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2544  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2545  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
2546  return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2547 }
2548 
2564 static inline int
2565 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2566 {
2567  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2568  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2569  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2570  return (*dev->dev_ops->rx_descriptor_done)( \
2571  dev->data->rx_queues[queue_id], offset);
2572 }
2573 
2632 static inline uint16_t
2633 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2634  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2635 {
2636  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2637 
2638 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2639  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
2640  RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
2641 
2642  if (queue_id >= dev->data->nb_tx_queues) {
2643  RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2644  return 0;
2645  }
2646 #endif
2647 
2648 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
2649  struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
2650 
2651  if (unlikely(cb != NULL)) {
2652  do {
2653  nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
2654  cb->param);
2655  cb = cb->next;
2656  } while (cb != NULL);
2657  }
2658 #endif
2659 
2660  return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
2661 }
2662 
2670 };
2671 
2672 typedef void (*rte_eth_dev_cb_fn)(uint8_t port_id, \
2673  enum rte_eth_event_type event, void *cb_arg);
2694 int rte_eth_dev_callback_register(uint8_t port_id,
2695  enum rte_eth_event_type event,
2696  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
2697 
2715 int rte_eth_dev_callback_unregister(uint8_t port_id,
2716  enum rte_eth_event_type event,
2717  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
2718 
2732 void _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2733  enum rte_eth_event_type event);
2734 
2755 int rte_eth_dev_rx_intr_enable(uint8_t port_id, uint16_t queue_id);
2756 
2776 int rte_eth_dev_rx_intr_disable(uint8_t port_id, uint16_t queue_id);
2777 
2795 int rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data);
2796 
2818 int rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
2819  int epfd, int op, void *data);
2820 
2833 int rte_eth_led_on(uint8_t port_id);
2834 
2847 int rte_eth_led_off(uint8_t port_id);
2848 
2861 int rte_eth_dev_flow_ctrl_get(uint8_t port_id,
2862  struct rte_eth_fc_conf *fc_conf);
2863 
2878 int rte_eth_dev_flow_ctrl_set(uint8_t port_id,
2879  struct rte_eth_fc_conf *fc_conf);
2880 
2896 int rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id,
2897  struct rte_eth_pfc_conf *pfc_conf);
2898 
2917 int rte_eth_dev_mac_addr_add(uint8_t port, struct ether_addr *mac_addr,
2918  uint32_t pool);
2919 
2933 int rte_eth_dev_mac_addr_remove(uint8_t port, struct ether_addr *mac_addr);
2934 
2948 int rte_eth_dev_default_mac_addr_set(uint8_t port, struct ether_addr *mac_addr);
2949 
2950 
2966 int rte_eth_dev_rss_reta_update(uint8_t port,
2967  struct rte_eth_rss_reta_entry64 *reta_conf,
2968  uint16_t reta_size);
2969 
2985 int rte_eth_dev_rss_reta_query(uint8_t port,
2986  struct rte_eth_rss_reta_entry64 *reta_conf,
2987  uint16_t reta_size);
2988 
3007 int rte_eth_dev_uc_hash_table_set(uint8_t port,struct ether_addr *addr,
3008  uint8_t on);
3009 
3027 int rte_eth_dev_uc_all_hash_table_set(uint8_t port,uint8_t on);
3028 
3051 int rte_eth_dev_set_vf_rxmode(uint8_t port, uint16_t vf, uint16_t rx_mode,
3052  uint8_t on);
3053 
3070 int
3071 rte_eth_dev_set_vf_tx(uint8_t port,uint16_t vf, uint8_t on);
3072 
3089 int
3090 rte_eth_dev_set_vf_rx(uint8_t port,uint16_t vf, uint8_t on);
3091 
3111 int
3112 rte_eth_dev_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
3113  uint64_t vf_mask,
3114  uint8_t vlan_on);
3115 
3137 int rte_eth_mirror_rule_set(uint8_t port_id,
3138  struct rte_eth_mirror_conf *mirror_conf,
3139  uint8_t rule_id,
3140  uint8_t on);
3141 
3155 int rte_eth_mirror_rule_reset(uint8_t port_id,
3156  uint8_t rule_id);
3157 
3173 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
3174  uint16_t tx_rate);
3175 
3193 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf,
3194  uint16_t tx_rate, uint64_t q_msk);
3195 
3207 int rte_eth_dev_bypass_init(uint8_t port);
3208 
3224 int rte_eth_dev_bypass_state_show(uint8_t port, uint32_t *state);
3225 
3241 int rte_eth_dev_bypass_state_set(uint8_t port, uint32_t *new_state);
3242 
3265 int rte_eth_dev_bypass_event_show(uint8_t port, uint32_t event, uint32_t *state);
3266 
3289 int rte_eth_dev_bypass_event_store(uint8_t port, uint32_t event, uint32_t state);
3290 
3311 int rte_eth_dev_wd_timeout_store(uint8_t port, uint32_t timeout);
3312 
3325 int rte_eth_dev_bypass_ver_show(uint8_t port, uint32_t *ver);
3326 
3347 int rte_eth_dev_bypass_wd_timeout_show(uint8_t port, uint32_t *wd_timeout);
3348 
3359 int rte_eth_dev_bypass_wd_reset(uint8_t port);
3360 
3374 int rte_eth_dev_rss_hash_update(uint8_t port_id,
3375  struct rte_eth_rss_conf *rss_conf);
3376 
3390 int
3391 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
3392  struct rte_eth_rss_conf *rss_conf);
3393 
3408 int
3409 rte_eth_dev_udp_tunnel_add(uint8_t port_id,
3410  struct rte_eth_udp_tunnel *tunnel_udp);
3411 
3425 int
3426 rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
3427  struct rte_eth_udp_tunnel *tunnel_udp);
3428 
3442 int rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type);
3443 
3462 int rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
3463  enum rte_filter_op filter_op, void *arg);
3464 
3477 int rte_eth_dev_get_dcb_info(uint8_t port_id,
3478  struct rte_eth_dcb_info *dcb_info);
3479 
3504 void *rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
3505  rte_rx_callback_fn fn, void *user_param);
3506 
3531 void *rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
3532  rte_tx_callback_fn fn, void *user_param);
3533 
3564 int rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
3565  struct rte_eth_rxtx_callback *user_cb);
3566 
3597 int rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3598  struct rte_eth_rxtx_callback *user_cb);
3599 
3617 int rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3618  struct rte_eth_rxq_info *qinfo);
3619 
3637 int rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3638  struct rte_eth_txq_info *qinfo);
3639 
3640 /*
3641  * Retrieve number of available registers for access
3642  *
3643  * @param port_id
3644  * The port identifier of the Ethernet device.
3645  * @return
3646  * - (>=0) number of registers if successful.
3647  * - (-ENOTSUP) if hardware doesn't support.
3648  * - (-ENODEV) if *port_id* invalid.
3649  * - others depends on the specific operations implementation.
3650  */
3651 int rte_eth_dev_get_reg_length(uint8_t port_id);
3652 
3666 int rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info);
3667 
3679 int rte_eth_dev_get_eeprom_length(uint8_t port_id);
3680 
3695 int rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info);
3696 
3711 int rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info);
3712 
3730 int rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3731  struct ether_addr *mc_addr_set,
3732  uint32_t nb_mc_addr);
3733 
3745 extern int rte_eth_timesync_enable(uint8_t port_id);
3746 
3758 extern int rte_eth_timesync_disable(uint8_t port_id);
3759 
3777 extern int rte_eth_timesync_read_rx_timestamp(uint8_t port_id,
3778  struct timespec *timestamp,
3779  uint32_t flags);
3780 
3795 extern int rte_eth_timesync_read_tx_timestamp(uint8_t port_id,
3796  struct timespec *timestamp);
3797 
3814 extern int rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta);
3815 
3830 extern int rte_eth_timesync_read_time(uint8_t port_id, struct timespec *time);
3831 
3849 extern int rte_eth_timesync_write_time(uint8_t port_id,
3850  const struct timespec *time);
3851 
3863 extern void rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev);
3864 
3865 
3885 const struct rte_memzone *
3886 rte_eth_dma_zone_reserve(const struct rte_eth_dev *eth_dev, const char *name,
3887  uint16_t queue_id, size_t size,
3888  unsigned align, int socket_id);
3889 
3890 #ifdef __cplusplus
3891 }
3892 #endif
3893 
3894 #endif /* _RTE_ETHDEV_H_ */