DPDK  17.02.1
rte_ethdev.h
Go to the documentation of this file.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  * * Neither the name of Intel Corporation nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef _RTE_ETHDEV_H_
35 #define _RTE_ETHDEV_H_
36 
169 #ifdef __cplusplus
170 extern "C" {
171 #endif
172 
173 #include <stdint.h>
174 
175 #include <rte_dev.h>
176 
177 /* Use this macro to check if LRO API is supported */
178 #define RTE_ETHDEV_HAS_LRO_SUPPORT
179 
180 #include <rte_log.h>
181 #include <rte_interrupts.h>
182 #include <rte_pci.h>
183 #include <rte_dev.h>
184 #include <rte_devargs.h>
185 #include <rte_errno.h>
186 #include "rte_ether.h"
187 #include "rte_eth_ctrl.h"
188 #include "rte_dev_info.h"
189 
190 struct rte_mbuf;
191 
199  uint64_t ipackets;
200  uint64_t opackets;
201  uint64_t ibytes;
202  uint64_t obytes;
203  uint64_t imissed;
207  uint64_t ierrors;
208  uint64_t oerrors;
209  uint64_t rx_nombuf;
210  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
212  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
214  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
216  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
218  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
220 };
221 
225 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
226 #define ETH_LINK_SPEED_FIXED (1 << 0)
227 #define ETH_LINK_SPEED_10M_HD (1 << 1)
228 #define ETH_LINK_SPEED_10M (1 << 2)
229 #define ETH_LINK_SPEED_100M_HD (1 << 3)
230 #define ETH_LINK_SPEED_100M (1 << 4)
231 #define ETH_LINK_SPEED_1G (1 << 5)
232 #define ETH_LINK_SPEED_2_5G (1 << 6)
233 #define ETH_LINK_SPEED_5G (1 << 7)
234 #define ETH_LINK_SPEED_10G (1 << 8)
235 #define ETH_LINK_SPEED_20G (1 << 9)
236 #define ETH_LINK_SPEED_25G (1 << 10)
237 #define ETH_LINK_SPEED_40G (1 << 11)
238 #define ETH_LINK_SPEED_50G (1 << 12)
239 #define ETH_LINK_SPEED_56G (1 << 13)
240 #define ETH_LINK_SPEED_100G (1 << 14)
245 #define ETH_SPEED_NUM_NONE 0
246 #define ETH_SPEED_NUM_10M 10
247 #define ETH_SPEED_NUM_100M 100
248 #define ETH_SPEED_NUM_1G 1000
249 #define ETH_SPEED_NUM_2_5G 2500
250 #define ETH_SPEED_NUM_5G 5000
251 #define ETH_SPEED_NUM_10G 10000
252 #define ETH_SPEED_NUM_20G 20000
253 #define ETH_SPEED_NUM_25G 25000
254 #define ETH_SPEED_NUM_40G 40000
255 #define ETH_SPEED_NUM_50G 50000
256 #define ETH_SPEED_NUM_56G 56000
257 #define ETH_SPEED_NUM_100G 100000
262 __extension__
263 struct rte_eth_link {
264  uint32_t link_speed;
265  uint16_t link_duplex : 1;
266  uint16_t link_autoneg : 1;
267  uint16_t link_status : 1;
268 } __attribute__((aligned(8)));
270 /* Utility constants */
271 #define ETH_LINK_HALF_DUPLEX 0
272 #define ETH_LINK_FULL_DUPLEX 1
273 #define ETH_LINK_DOWN 0
274 #define ETH_LINK_UP 1
275 #define ETH_LINK_FIXED 0
276 #define ETH_LINK_AUTONEG 1
282 struct rte_eth_thresh {
283  uint8_t pthresh;
284  uint8_t hthresh;
285  uint8_t wthresh;
286 };
287 
291 #define ETH_MQ_RX_RSS_FLAG 0x1
292 #define ETH_MQ_RX_DCB_FLAG 0x2
293 #define ETH_MQ_RX_VMDQ_FLAG 0x4
294 
302 
306  ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
308  ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
309 
311  ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
313  ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
315  ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
318  ETH_MQ_RX_VMDQ_FLAG,
319 };
320 
324 #define ETH_RSS ETH_MQ_RX_RSS
325 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
326 #define ETH_DCB_RX ETH_MQ_RX_DCB
327 
337 };
338 
342 #define ETH_DCB_NONE ETH_MQ_TX_NONE
343 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
344 #define ETH_DCB_TX ETH_MQ_TX_DCB
345 
352  uint32_t max_rx_pkt_len;
353  uint16_t split_hdr_size;
354  __extension__
355  uint16_t header_split : 1,
356  hw_ip_checksum : 1,
357  hw_vlan_filter : 1,
358  hw_vlan_strip : 1,
359  hw_vlan_extend : 1,
360  jumbo_frame : 1,
361  hw_strip_crc : 1,
362  enable_scatter : 1,
363  enable_lro : 1;
364 };
365 
371  ETH_VLAN_TYPE_UNKNOWN = 0,
374  ETH_VLAN_TYPE_MAX,
375 };
376 
395  uint8_t *rss_key;
396  uint8_t rss_key_len;
397  uint64_t rss_hf;
398 };
399 
400 /*
401  * The RSS offload types are defined based on flow types which are defined
402  * in rte_eth_ctrl.h. Different NIC hardwares may support different RSS offload
403  * types. The supported flow types or RSS offload types can be queried by
404  * rte_eth_dev_info_get().
405  */
406 #define ETH_RSS_IPV4 (1ULL << RTE_ETH_FLOW_IPV4)
407 #define ETH_RSS_FRAG_IPV4 (1ULL << RTE_ETH_FLOW_FRAG_IPV4)
408 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
409 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP)
410 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP)
411 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER)
412 #define ETH_RSS_IPV6 (1ULL << RTE_ETH_FLOW_IPV6)
413 #define ETH_RSS_FRAG_IPV6 (1ULL << RTE_ETH_FLOW_FRAG_IPV6)
414 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP)
415 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
416 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP)
417 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)
418 #define ETH_RSS_L2_PAYLOAD (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)
419 #define ETH_RSS_IPV6_EX (1ULL << RTE_ETH_FLOW_IPV6_EX)
420 #define ETH_RSS_IPV6_TCP_EX (1ULL << RTE_ETH_FLOW_IPV6_TCP_EX)
421 #define ETH_RSS_IPV6_UDP_EX (1ULL << RTE_ETH_FLOW_IPV6_UDP_EX)
422 #define ETH_RSS_PORT (1ULL << RTE_ETH_FLOW_PORT)
423 #define ETH_RSS_VXLAN (1ULL << RTE_ETH_FLOW_VXLAN)
424 #define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE)
425 #define ETH_RSS_NVGRE (1ULL << RTE_ETH_FLOW_NVGRE)
426 
427 #define ETH_RSS_IP ( \
428  ETH_RSS_IPV4 | \
429  ETH_RSS_FRAG_IPV4 | \
430  ETH_RSS_NONFRAG_IPV4_OTHER | \
431  ETH_RSS_IPV6 | \
432  ETH_RSS_FRAG_IPV6 | \
433  ETH_RSS_NONFRAG_IPV6_OTHER | \
434  ETH_RSS_IPV6_EX)
435 
436 #define ETH_RSS_UDP ( \
437  ETH_RSS_NONFRAG_IPV4_UDP | \
438  ETH_RSS_NONFRAG_IPV6_UDP | \
439  ETH_RSS_IPV6_UDP_EX)
440 
441 #define ETH_RSS_TCP ( \
442  ETH_RSS_NONFRAG_IPV4_TCP | \
443  ETH_RSS_NONFRAG_IPV6_TCP | \
444  ETH_RSS_IPV6_TCP_EX)
445 
446 #define ETH_RSS_SCTP ( \
447  ETH_RSS_NONFRAG_IPV4_SCTP | \
448  ETH_RSS_NONFRAG_IPV6_SCTP)
449 
450 #define ETH_RSS_TUNNEL ( \
451  ETH_RSS_VXLAN | \
452  ETH_RSS_GENEVE | \
453  ETH_RSS_NVGRE)
454 
455 
457 #define ETH_RSS_PROTO_MASK ( \
458  ETH_RSS_IPV4 | \
459  ETH_RSS_FRAG_IPV4 | \
460  ETH_RSS_NONFRAG_IPV4_TCP | \
461  ETH_RSS_NONFRAG_IPV4_UDP | \
462  ETH_RSS_NONFRAG_IPV4_SCTP | \
463  ETH_RSS_NONFRAG_IPV4_OTHER | \
464  ETH_RSS_IPV6 | \
465  ETH_RSS_FRAG_IPV6 | \
466  ETH_RSS_NONFRAG_IPV6_TCP | \
467  ETH_RSS_NONFRAG_IPV6_UDP | \
468  ETH_RSS_NONFRAG_IPV6_SCTP | \
469  ETH_RSS_NONFRAG_IPV6_OTHER | \
470  ETH_RSS_L2_PAYLOAD | \
471  ETH_RSS_IPV6_EX | \
472  ETH_RSS_IPV6_TCP_EX | \
473  ETH_RSS_IPV6_UDP_EX | \
474  ETH_RSS_PORT | \
475  ETH_RSS_VXLAN | \
476  ETH_RSS_GENEVE | \
477  ETH_RSS_NVGRE)
478 
479 /*
480  * Definitions used for redirection table entry size.
481  * Some RSS RETA sizes may not be supported by some drivers, check the
482  * documentation or the description of relevant functions for more details.
483  */
484 #define ETH_RSS_RETA_SIZE_64 64
485 #define ETH_RSS_RETA_SIZE_128 128
486 #define ETH_RSS_RETA_SIZE_256 256
487 #define ETH_RSS_RETA_SIZE_512 512
488 #define RTE_RETA_GROUP_SIZE 64
489 
490 /* Definitions used for VMDQ and DCB functionality */
491 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
492 #define ETH_DCB_NUM_USER_PRIORITIES 8
493 #define ETH_VMDQ_DCB_NUM_QUEUES 128
494 #define ETH_DCB_NUM_QUEUES 128
496 /* DCB capability defines */
497 #define ETH_DCB_PG_SUPPORT 0x00000001
498 #define ETH_DCB_PFC_SUPPORT 0x00000002
500 /* Definitions used for VLAN Offload functionality */
501 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
502 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
503 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
505 /* Definitions used for mask VLAN setting */
506 #define ETH_VLAN_STRIP_MASK 0x0001
507 #define ETH_VLAN_FILTER_MASK 0x0002
508 #define ETH_VLAN_EXTEND_MASK 0x0004
509 #define ETH_VLAN_ID_MAX 0x0FFF
511 /* Definitions used for receive MAC address */
512 #define ETH_NUM_RECEIVE_MAC_ADDR 128
514 /* Definitions used for unicast hash */
515 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
517 /* Definitions used for VMDQ pool rx mode setting */
518 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
519 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
520 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
521 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
522 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
525 #define ETH_MIRROR_MAX_VLANS 64
526 
527 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
528 #define ETH_MIRROR_UPLINK_PORT 0x02
529 #define ETH_MIRROR_DOWNLINK_PORT 0x04
530 #define ETH_MIRROR_VLAN 0x08
531 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
536 struct rte_eth_vlan_mirror {
537  uint64_t vlan_mask;
539  uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
540 };
541 
546  uint8_t rule_type;
547  uint8_t dst_pool;
548  uint64_t pool_mask;
551 };
552 
560  uint64_t mask;
562  uint16_t reta[RTE_RETA_GROUP_SIZE];
564 };
565 
571  ETH_4_TCS = 4,
573 };
574 
584 };
585 
586 /* This structure may be extended in future. */
587 struct rte_eth_dcb_rx_conf {
588  enum rte_eth_nb_tcs nb_tcs;
590  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
591 };
592 
593 struct rte_eth_vmdq_dcb_tx_conf {
594  enum rte_eth_nb_pools nb_queue_pools;
596  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
597 };
598 
599 struct rte_eth_dcb_tx_conf {
600  enum rte_eth_nb_tcs nb_tcs;
602  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
603 };
604 
605 struct rte_eth_vmdq_tx_conf {
606  enum rte_eth_nb_pools nb_queue_pools;
607 };
608 
623  uint8_t default_pool;
624  uint8_t nb_pool_maps;
625  struct {
626  uint16_t vlan_id;
627  uint64_t pools;
631 };
632 
633 struct rte_eth_vmdq_rx_conf {
634  enum rte_eth_nb_pools nb_queue_pools;
635  uint8_t enable_default_pool;
636  uint8_t default_pool;
637  uint8_t enable_loop_back;
638  uint8_t nb_pool_maps;
639  uint32_t rx_mode;
640  struct {
641  uint16_t vlan_id;
642  uint64_t pools;
643  } pool_map[ETH_VMDQ_MAX_VLAN_FILTERS];
644 };
645 
652  /* For i40e specifically */
653  uint16_t pvid;
654  __extension__
655  uint8_t hw_vlan_reject_tagged : 1,
661 };
662 
668  uint16_t rx_free_thresh;
669  uint8_t rx_drop_en;
671 };
672 
673 #define ETH_TXQ_FLAGS_NOMULTSEGS 0x0001
674 #define ETH_TXQ_FLAGS_NOREFCOUNT 0x0002
675 #define ETH_TXQ_FLAGS_NOMULTMEMP 0x0004
676 #define ETH_TXQ_FLAGS_NOVLANOFFL 0x0100
677 #define ETH_TXQ_FLAGS_NOXSUMSCTP 0x0200
678 #define ETH_TXQ_FLAGS_NOXSUMUDP 0x0400
679 #define ETH_TXQ_FLAGS_NOXSUMTCP 0x0800
680 #define ETH_TXQ_FLAGS_NOOFFLOADS \
681  (ETH_TXQ_FLAGS_NOVLANOFFL | ETH_TXQ_FLAGS_NOXSUMSCTP | \
682  ETH_TXQ_FLAGS_NOXSUMUDP | ETH_TXQ_FLAGS_NOXSUMTCP)
683 #define ETH_TXQ_FLAGS_NOXSUMS \
684  (ETH_TXQ_FLAGS_NOXSUMSCTP | ETH_TXQ_FLAGS_NOXSUMUDP | \
685  ETH_TXQ_FLAGS_NOXSUMTCP)
686 
691  uint16_t tx_rs_thresh;
692  uint16_t tx_free_thresh;
695  uint32_t txq_flags;
697 };
698 
703  uint16_t nb_max;
704  uint16_t nb_min;
705  uint16_t nb_align;
715  uint16_t nb_seg_max;
716 
728  uint16_t nb_mtu_seg_max;
729 };
730 
739 };
740 
747  uint32_t high_water;
748  uint32_t low_water;
749  uint16_t pause_time;
750  uint16_t send_xon;
753  uint8_t autoneg;
754 };
755 
763  uint8_t priority;
764 };
765 
774 };
775 
783 };
784 
796  uint8_t drop_queue;
797  struct rte_eth_fdir_masks mask;
800 };
801 
810  uint16_t udp_port;
811  uint8_t prot_type;
812 };
813 
819  uint16_t lsc;
821  uint16_t rxq;
822 };
823 
829 struct rte_eth_conf {
830  uint32_t link_speeds;
839  uint32_t lpbk_mode;
844  struct {
848  struct rte_eth_dcb_rx_conf dcb_rx_conf;
850  struct rte_eth_vmdq_rx_conf vmdq_rx_conf;
852  } rx_adv_conf;
853  union {
854  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
856  struct rte_eth_dcb_tx_conf dcb_tx_conf;
858  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
860  } tx_adv_conf;
866 };
867 
877 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
878 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
879 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
880 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
881 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
882 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
883 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
884 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
885 
889 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
890 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
891 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
892 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
893 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
894 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
895 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
896 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
897 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
898 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200
899 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400
900 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800
901 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000
902 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
903 
909  const char *driver_name;
910  unsigned int if_index;
912  uint32_t min_rx_bufsize;
913  uint32_t max_rx_pktlen;
914  uint16_t max_rx_queues;
915  uint16_t max_tx_queues;
916  uint32_t max_mac_addrs;
917  uint32_t max_hash_mac_addrs;
919  uint16_t max_vfs;
920  uint16_t max_vmdq_pools;
921  uint32_t rx_offload_capa;
922  uint32_t tx_offload_capa;
923  uint16_t reta_size;
925  uint8_t hash_key_size;
930  uint16_t vmdq_queue_base;
931  uint16_t vmdq_queue_num;
932  uint16_t vmdq_pool_base;
935  uint32_t speed_capa;
937  uint16_t nb_rx_queues;
938  uint16_t nb_tx_queues;
939 };
940 
946  struct rte_mempool *mp;
948  uint8_t scattered_rx;
949  uint16_t nb_desc;
951 
958  uint16_t nb_desc;
960 
962 #define RTE_ETH_XSTATS_NAME_SIZE 64
963 
974  uint64_t id;
975  uint64_t value;
976 };
977 
987 };
988 
989 #define ETH_DCB_NUM_TCS 8
990 #define ETH_MAX_VMDQ_POOL 64
991 
998  struct {
999  uint8_t base;
1000  uint8_t nb_queue;
1001  } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1003  struct {
1004  uint8_t base;
1005  uint8_t nb_queue;
1006  } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1007 };
1008 
1014  uint8_t nb_tcs;
1016  uint8_t tc_bws[ETH_DCB_NUM_TCS];
1019 };
1020 
1024 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1025 #define RTE_ETH_QUEUE_STATE_STARTED 1
1026 
1027 struct rte_eth_dev;
1028 
1029 struct rte_eth_dev_callback;
1031 TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback);
1032 
1033 
1034 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1035 #define RTE_PMD_DEBUG_TRACE(...) \
1036  rte_pmd_debug_trace(__func__, __VA_ARGS__)
1037 #else
1038 #define RTE_PMD_DEBUG_TRACE(...)
1039 #endif
1040 
1041 
1042 /* Macros to check for valid port */
1043 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1044  if (!rte_eth_dev_is_valid_port(port_id)) { \
1045  RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
1046  return retval; \
1047  } \
1048 } while (0)
1049 
1050 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1051  if (!rte_eth_dev_is_valid_port(port_id)) { \
1052  RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
1053  return; \
1054  } \
1055 } while (0)
1056 
1062 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1063 
1064 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1065 
1066 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1067 
1068 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1069 
1070 /*
1071  * Definitions of all functions exported by an Ethernet driver through the
1072  * the generic structure of type *eth_dev_ops* supplied in the *rte_eth_dev*
1073  * structure associated with an Ethernet device.
1074  */
1075 
1076 typedef int (*eth_dev_configure_t)(struct rte_eth_dev *dev);
1079 typedef int (*eth_dev_start_t)(struct rte_eth_dev *dev);
1082 typedef void (*eth_dev_stop_t)(struct rte_eth_dev *dev);
1085 typedef int (*eth_dev_set_link_up_t)(struct rte_eth_dev *dev);
1088 typedef int (*eth_dev_set_link_down_t)(struct rte_eth_dev *dev);
1091 typedef void (*eth_dev_close_t)(struct rte_eth_dev *dev);
1094 typedef void (*eth_promiscuous_enable_t)(struct rte_eth_dev *dev);
1097 typedef void (*eth_promiscuous_disable_t)(struct rte_eth_dev *dev);
1100 typedef void (*eth_allmulticast_enable_t)(struct rte_eth_dev *dev);
1103 typedef void (*eth_allmulticast_disable_t)(struct rte_eth_dev *dev);
1106 typedef int (*eth_link_update_t)(struct rte_eth_dev *dev,
1107  int wait_to_complete);
1110 typedef void (*eth_stats_get_t)(struct rte_eth_dev *dev,
1111  struct rte_eth_stats *igb_stats);
1114 typedef void (*eth_stats_reset_t)(struct rte_eth_dev *dev);
1117 typedef int (*eth_xstats_get_t)(struct rte_eth_dev *dev,
1118  struct rte_eth_xstat *stats, unsigned n);
1121 typedef void (*eth_xstats_reset_t)(struct rte_eth_dev *dev);
1124 typedef int (*eth_xstats_get_names_t)(struct rte_eth_dev *dev,
1125  struct rte_eth_xstat_name *xstats_names, unsigned size);
1128 typedef int (*eth_queue_stats_mapping_set_t)(struct rte_eth_dev *dev,
1129  uint16_t queue_id,
1130  uint8_t stat_idx,
1131  uint8_t is_rx);
1134 typedef void (*eth_dev_infos_get_t)(struct rte_eth_dev *dev,
1135  struct rte_eth_dev_info *dev_info);
1138 typedef const uint32_t *(*eth_dev_supported_ptypes_get_t)(struct rte_eth_dev *dev);
1141 typedef int (*eth_queue_start_t)(struct rte_eth_dev *dev,
1142  uint16_t queue_id);
1145 typedef int (*eth_queue_stop_t)(struct rte_eth_dev *dev,
1146  uint16_t queue_id);
1149 typedef int (*eth_rx_queue_setup_t)(struct rte_eth_dev *dev,
1150  uint16_t rx_queue_id,
1151  uint16_t nb_rx_desc,
1152  unsigned int socket_id,
1153  const struct rte_eth_rxconf *rx_conf,
1154  struct rte_mempool *mb_pool);
1157 typedef int (*eth_tx_queue_setup_t)(struct rte_eth_dev *dev,
1158  uint16_t tx_queue_id,
1159  uint16_t nb_tx_desc,
1160  unsigned int socket_id,
1161  const struct rte_eth_txconf *tx_conf);
1164 typedef int (*eth_rx_enable_intr_t)(struct rte_eth_dev *dev,
1165  uint16_t rx_queue_id);
1168 typedef int (*eth_rx_disable_intr_t)(struct rte_eth_dev *dev,
1169  uint16_t rx_queue_id);
1172 typedef void (*eth_queue_release_t)(void *queue);
1175 typedef uint32_t (*eth_rx_queue_count_t)(struct rte_eth_dev *dev,
1176  uint16_t rx_queue_id);
1179 typedef int (*eth_rx_descriptor_done_t)(void *rxq, uint16_t offset);
1182 typedef int (*eth_fw_version_get_t)(struct rte_eth_dev *dev,
1183  char *fw_version, size_t fw_size);
1186 typedef void (*eth_rxq_info_get_t)(struct rte_eth_dev *dev,
1187  uint16_t rx_queue_id, struct rte_eth_rxq_info *qinfo);
1188 
1189 typedef void (*eth_txq_info_get_t)(struct rte_eth_dev *dev,
1190  uint16_t tx_queue_id, struct rte_eth_txq_info *qinfo);
1191 
1192 typedef int (*mtu_set_t)(struct rte_eth_dev *dev, uint16_t mtu);
1195 typedef int (*vlan_filter_set_t)(struct rte_eth_dev *dev,
1196  uint16_t vlan_id,
1197  int on);
1200 typedef int (*vlan_tpid_set_t)(struct rte_eth_dev *dev,
1201  enum rte_vlan_type type, uint16_t tpid);
1204 typedef void (*vlan_offload_set_t)(struct rte_eth_dev *dev, int mask);
1207 typedef int (*vlan_pvid_set_t)(struct rte_eth_dev *dev,
1208  uint16_t vlan_id,
1209  int on);
1212 typedef void (*vlan_strip_queue_set_t)(struct rte_eth_dev *dev,
1213  uint16_t rx_queue_id,
1214  int on);
1217 typedef uint16_t (*eth_rx_burst_t)(void *rxq,
1218  struct rte_mbuf **rx_pkts,
1219  uint16_t nb_pkts);
1222 typedef uint16_t (*eth_tx_burst_t)(void *txq,
1223  struct rte_mbuf **tx_pkts,
1224  uint16_t nb_pkts);
1227 typedef uint16_t (*eth_tx_prep_t)(void *txq,
1228  struct rte_mbuf **tx_pkts,
1229  uint16_t nb_pkts);
1232 typedef int (*flow_ctrl_get_t)(struct rte_eth_dev *dev,
1233  struct rte_eth_fc_conf *fc_conf);
1236 typedef int (*flow_ctrl_set_t)(struct rte_eth_dev *dev,
1237  struct rte_eth_fc_conf *fc_conf);
1240 typedef int (*priority_flow_ctrl_set_t)(struct rte_eth_dev *dev,
1241  struct rte_eth_pfc_conf *pfc_conf);
1244 typedef int (*reta_update_t)(struct rte_eth_dev *dev,
1245  struct rte_eth_rss_reta_entry64 *reta_conf,
1246  uint16_t reta_size);
1249 typedef int (*reta_query_t)(struct rte_eth_dev *dev,
1250  struct rte_eth_rss_reta_entry64 *reta_conf,
1251  uint16_t reta_size);
1254 typedef int (*rss_hash_update_t)(struct rte_eth_dev *dev,
1255  struct rte_eth_rss_conf *rss_conf);
1258 typedef int (*rss_hash_conf_get_t)(struct rte_eth_dev *dev,
1259  struct rte_eth_rss_conf *rss_conf);
1262 typedef int (*eth_dev_led_on_t)(struct rte_eth_dev *dev);
1265 typedef int (*eth_dev_led_off_t)(struct rte_eth_dev *dev);
1268 typedef void (*eth_mac_addr_remove_t)(struct rte_eth_dev *dev, uint32_t index);
1271 typedef void (*eth_mac_addr_add_t)(struct rte_eth_dev *dev,
1272  struct ether_addr *mac_addr,
1273  uint32_t index,
1274  uint32_t vmdq);
1277 typedef void (*eth_mac_addr_set_t)(struct rte_eth_dev *dev,
1278  struct ether_addr *mac_addr);
1281 typedef int (*eth_uc_hash_table_set_t)(struct rte_eth_dev *dev,
1282  struct ether_addr *mac_addr,
1283  uint8_t on);
1286 typedef int (*eth_uc_all_hash_table_set_t)(struct rte_eth_dev *dev,
1287  uint8_t on);
1290 typedef int (*eth_set_queue_rate_limit_t)(struct rte_eth_dev *dev,
1291  uint16_t queue_idx,
1292  uint16_t tx_rate);
1295 typedef int (*eth_mirror_rule_set_t)(struct rte_eth_dev *dev,
1296  struct rte_eth_mirror_conf *mirror_conf,
1297  uint8_t rule_id,
1298  uint8_t on);
1301 typedef int (*eth_mirror_rule_reset_t)(struct rte_eth_dev *dev,
1302  uint8_t rule_id);
1305 typedef int (*eth_udp_tunnel_port_add_t)(struct rte_eth_dev *dev,
1306  struct rte_eth_udp_tunnel *tunnel_udp);
1309 typedef int (*eth_udp_tunnel_port_del_t)(struct rte_eth_dev *dev,
1310  struct rte_eth_udp_tunnel *tunnel_udp);
1313 typedef int (*eth_set_mc_addr_list_t)(struct rte_eth_dev *dev,
1314  struct ether_addr *mc_addr_set,
1315  uint32_t nb_mc_addr);
1318 typedef int (*eth_timesync_enable_t)(struct rte_eth_dev *dev);
1321 typedef int (*eth_timesync_disable_t)(struct rte_eth_dev *dev);
1324 typedef int (*eth_timesync_read_rx_timestamp_t)(struct rte_eth_dev *dev,
1325  struct timespec *timestamp,
1326  uint32_t flags);
1329 typedef int (*eth_timesync_read_tx_timestamp_t)(struct rte_eth_dev *dev,
1330  struct timespec *timestamp);
1333 typedef int (*eth_timesync_adjust_time)(struct rte_eth_dev *dev, int64_t);
1336 typedef int (*eth_timesync_read_time)(struct rte_eth_dev *dev,
1337  struct timespec *timestamp);
1340 typedef int (*eth_timesync_write_time)(struct rte_eth_dev *dev,
1341  const struct timespec *timestamp);
1344 typedef int (*eth_get_reg_t)(struct rte_eth_dev *dev,
1345  struct rte_dev_reg_info *info);
1348 typedef int (*eth_get_eeprom_length_t)(struct rte_eth_dev *dev);
1351 typedef int (*eth_get_eeprom_t)(struct rte_eth_dev *dev,
1352  struct rte_dev_eeprom_info *info);
1355 typedef int (*eth_set_eeprom_t)(struct rte_eth_dev *dev,
1356  struct rte_dev_eeprom_info *info);
1359 typedef int (*eth_l2_tunnel_eth_type_conf_t)
1360  (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
1363 typedef int (*eth_l2_tunnel_offload_set_t)
1364  (struct rte_eth_dev *dev,
1365  struct rte_eth_l2_tunnel_conf *l2_tunnel,
1366  uint32_t mask,
1367  uint8_t en);
1370 #ifdef RTE_NIC_BYPASS
1371 
1372 enum {
1373  RTE_BYPASS_MODE_NONE,
1374  RTE_BYPASS_MODE_NORMAL,
1375  RTE_BYPASS_MODE_BYPASS,
1376  RTE_BYPASS_MODE_ISOLATE,
1377  RTE_BYPASS_MODE_NUM,
1378 };
1379 
1380 #define RTE_BYPASS_MODE_VALID(x) \
1381  ((x) > RTE_BYPASS_MODE_NONE && (x) < RTE_BYPASS_MODE_NUM)
1382 
1383 enum {
1384  RTE_BYPASS_EVENT_NONE,
1385  RTE_BYPASS_EVENT_START,
1386  RTE_BYPASS_EVENT_OS_ON = RTE_BYPASS_EVENT_START,
1387  RTE_BYPASS_EVENT_POWER_ON,
1388  RTE_BYPASS_EVENT_OS_OFF,
1389  RTE_BYPASS_EVENT_POWER_OFF,
1390  RTE_BYPASS_EVENT_TIMEOUT,
1391  RTE_BYPASS_EVENT_NUM
1392 };
1393 
1394 #define RTE_BYPASS_EVENT_VALID(x) \
1395  ((x) > RTE_BYPASS_EVENT_NONE && (x) < RTE_BYPASS_MODE_NUM)
1396 
1397 enum {
1398  RTE_BYPASS_TMT_OFF, /* timeout disabled. */
1399  RTE_BYPASS_TMT_1_5_SEC, /* timeout for 1.5 seconds */
1400  RTE_BYPASS_TMT_2_SEC, /* timeout for 2 seconds */
1401  RTE_BYPASS_TMT_3_SEC, /* timeout for 3 seconds */
1402  RTE_BYPASS_TMT_4_SEC, /* timeout for 4 seconds */
1403  RTE_BYPASS_TMT_8_SEC, /* timeout for 8 seconds */
1404  RTE_BYPASS_TMT_16_SEC, /* timeout for 16 seconds */
1405  RTE_BYPASS_TMT_32_SEC, /* timeout for 32 seconds */
1406  RTE_BYPASS_TMT_NUM
1407 };
1408 
1409 #define RTE_BYPASS_TMT_VALID(x) \
1410  ((x) == RTE_BYPASS_TMT_OFF || \
1411  ((x) > RTE_BYPASS_TMT_OFF && (x) < RTE_BYPASS_TMT_NUM))
1412 
1413 typedef void (*bypass_init_t)(struct rte_eth_dev *dev);
1414 typedef int32_t (*bypass_state_set_t)(struct rte_eth_dev *dev, uint32_t *new_state);
1415 typedef int32_t (*bypass_state_show_t)(struct rte_eth_dev *dev, uint32_t *state);
1416 typedef int32_t (*bypass_event_set_t)(struct rte_eth_dev *dev, uint32_t state, uint32_t event);
1417 typedef int32_t (*bypass_event_show_t)(struct rte_eth_dev *dev, uint32_t event_shift, uint32_t *event);
1418 typedef int32_t (*bypass_wd_timeout_set_t)(struct rte_eth_dev *dev, uint32_t timeout);
1419 typedef int32_t (*bypass_wd_timeout_show_t)(struct rte_eth_dev *dev, uint32_t *wd_timeout);
1420 typedef int32_t (*bypass_ver_show_t)(struct rte_eth_dev *dev, uint32_t *ver);
1421 typedef int32_t (*bypass_wd_reset_t)(struct rte_eth_dev *dev);
1422 #endif
1423 
1424 typedef int (*eth_filter_ctrl_t)(struct rte_eth_dev *dev,
1425  enum rte_filter_type filter_type,
1426  enum rte_filter_op filter_op,
1427  void *arg);
1430 typedef int (*eth_get_dcb_info)(struct rte_eth_dev *dev,
1431  struct rte_eth_dcb_info *dcb_info);
1437 struct eth_dev_ops {
1438  eth_dev_configure_t dev_configure;
1439  eth_dev_start_t dev_start;
1440  eth_dev_stop_t dev_stop;
1441  eth_dev_set_link_up_t dev_set_link_up;
1442  eth_dev_set_link_down_t dev_set_link_down;
1443  eth_dev_close_t dev_close;
1444  eth_link_update_t link_update;
1446  eth_promiscuous_enable_t promiscuous_enable;
1447  eth_promiscuous_disable_t promiscuous_disable;
1448  eth_allmulticast_enable_t allmulticast_enable;
1449  eth_allmulticast_disable_t allmulticast_disable;
1450  eth_mac_addr_remove_t mac_addr_remove;
1451  eth_mac_addr_add_t mac_addr_add;
1452  eth_mac_addr_set_t mac_addr_set;
1453  eth_set_mc_addr_list_t set_mc_addr_list;
1454  mtu_set_t mtu_set;
1456  eth_stats_get_t stats_get;
1457  eth_stats_reset_t stats_reset;
1458  eth_xstats_get_t xstats_get;
1459  eth_xstats_reset_t xstats_reset;
1460  eth_xstats_get_names_t xstats_get_names;
1462  eth_queue_stats_mapping_set_t queue_stats_mapping_set;
1465  eth_dev_infos_get_t dev_infos_get;
1466  eth_rxq_info_get_t rxq_info_get;
1467  eth_txq_info_get_t txq_info_get;
1468  eth_fw_version_get_t fw_version_get;
1469  eth_dev_supported_ptypes_get_t dev_supported_ptypes_get;
1472  vlan_filter_set_t vlan_filter_set;
1473  vlan_tpid_set_t vlan_tpid_set;
1474  vlan_strip_queue_set_t vlan_strip_queue_set;
1475  vlan_offload_set_t vlan_offload_set;
1476  vlan_pvid_set_t vlan_pvid_set;
1478  eth_queue_start_t rx_queue_start;
1479  eth_queue_stop_t rx_queue_stop;
1480  eth_queue_start_t tx_queue_start;
1481  eth_queue_stop_t tx_queue_stop;
1482  eth_rx_queue_setup_t rx_queue_setup;
1483  eth_queue_release_t rx_queue_release;
1484  eth_rx_queue_count_t rx_queue_count;
1485  eth_rx_descriptor_done_t rx_descriptor_done;
1486  eth_rx_enable_intr_t rx_queue_intr_enable;
1487  eth_rx_disable_intr_t rx_queue_intr_disable;
1488  eth_tx_queue_setup_t tx_queue_setup;
1489  eth_queue_release_t tx_queue_release;
1491  eth_dev_led_on_t dev_led_on;
1492  eth_dev_led_off_t dev_led_off;
1494  flow_ctrl_get_t flow_ctrl_get;
1495  flow_ctrl_set_t flow_ctrl_set;
1496  priority_flow_ctrl_set_t priority_flow_ctrl_set;
1498  eth_uc_hash_table_set_t uc_hash_table_set;
1499  eth_uc_all_hash_table_set_t uc_all_hash_table_set;
1501  eth_mirror_rule_set_t mirror_rule_set;
1502  eth_mirror_rule_reset_t mirror_rule_reset;
1504  eth_udp_tunnel_port_add_t udp_tunnel_port_add;
1505  eth_udp_tunnel_port_del_t udp_tunnel_port_del;
1506  eth_l2_tunnel_eth_type_conf_t l2_tunnel_eth_type_conf;
1508  eth_l2_tunnel_offload_set_t l2_tunnel_offload_set;
1511  eth_set_queue_rate_limit_t set_queue_rate_limit;
1513  rss_hash_update_t rss_hash_update;
1514  rss_hash_conf_get_t rss_hash_conf_get;
1515  reta_update_t reta_update;
1516  reta_query_t reta_query;
1518  eth_get_reg_t get_reg;
1519  eth_get_eeprom_length_t get_eeprom_length;
1520  eth_get_eeprom_t get_eeprom;
1521  eth_set_eeprom_t set_eeprom;
1523  /* bypass control */
1524 #ifdef RTE_NIC_BYPASS
1525  bypass_init_t bypass_init;
1526  bypass_state_set_t bypass_state_set;
1527  bypass_state_show_t bypass_state_show;
1528  bypass_event_set_t bypass_event_set;
1529  bypass_event_show_t bypass_event_show;
1530  bypass_wd_timeout_set_t bypass_wd_timeout_set;
1531  bypass_wd_timeout_show_t bypass_wd_timeout_show;
1532  bypass_ver_show_t bypass_ver_show;
1533  bypass_wd_reset_t bypass_wd_reset;
1534 #endif
1535 
1536  eth_filter_ctrl_t filter_ctrl;
1538  eth_get_dcb_info get_dcb_info;
1540  eth_timesync_enable_t timesync_enable;
1542  eth_timesync_disable_t timesync_disable;
1544  eth_timesync_read_rx_timestamp_t timesync_read_rx_timestamp;
1546  eth_timesync_read_tx_timestamp_t timesync_read_tx_timestamp;
1548  eth_timesync_adjust_time timesync_adjust_time;
1549  eth_timesync_read_time timesync_read_time;
1550  eth_timesync_write_time timesync_write_time;
1551 };
1552 
1575 typedef uint16_t (*rte_rx_callback_fn)(uint8_t port, uint16_t queue,
1576  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1577  void *user_param);
1578 
1599 typedef uint16_t (*rte_tx_callback_fn)(uint8_t port, uint16_t queue,
1600  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1601 
1607 struct rte_eth_rxtx_callback {
1608  struct rte_eth_rxtx_callback *next;
1609  union{
1610  rte_rx_callback_fn rx;
1611  rte_tx_callback_fn tx;
1612  } fn;
1613  void *param;
1614 };
1615 
1626 struct rte_eth_dev {
1627  eth_rx_burst_t rx_pkt_burst;
1628  eth_tx_burst_t tx_pkt_burst;
1629  eth_tx_prep_t tx_pkt_prepare;
1630  struct rte_eth_dev_data *data;
1631  const struct eth_driver *driver;
1632  const struct eth_dev_ops *dev_ops;
1633  struct rte_device *device;
1634  struct rte_intr_handle *intr_handle;
1636  struct rte_eth_dev_cb_list link_intr_cbs;
1641  struct rte_eth_rxtx_callback *post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
1646  struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
1647  uint8_t attached;
1649 
1650 struct rte_eth_dev_sriov {
1651  uint8_t active;
1652  uint8_t nb_q_per_pool;
1653  uint16_t def_vmdq_idx;
1654  uint16_t def_pool_q_idx;
1655 };
1656 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1657 
1658 #define RTE_ETH_NAME_MAX_LEN (32)
1659 
1667 struct rte_eth_dev_data {
1668  char name[RTE_ETH_NAME_MAX_LEN];
1670  void **rx_queues;
1671  void **tx_queues;
1672  uint16_t nb_rx_queues;
1673  uint16_t nb_tx_queues;
1675  struct rte_eth_dev_sriov sriov;
1677  void *dev_private;
1679  struct rte_eth_link dev_link;
1682  struct rte_eth_conf dev_conf;
1683  uint16_t mtu;
1685  uint32_t min_rx_buf_size;
1688  uint64_t rx_mbuf_alloc_failed;
1689  struct ether_addr* mac_addrs;
1690  uint64_t mac_pool_sel[ETH_NUM_RECEIVE_MAC_ADDR];
1692  struct ether_addr* hash_mac_addrs;
1694  uint8_t port_id;
1695  __extension__
1696  uint8_t promiscuous : 1,
1697  scattered_rx : 1,
1698  all_multicast : 1,
1699  dev_started : 1,
1700  lro : 1;
1701  uint8_t rx_queue_state[RTE_MAX_QUEUES_PER_PORT];
1703  uint8_t tx_queue_state[RTE_MAX_QUEUES_PER_PORT];
1705  uint32_t dev_flags;
1706  enum rte_kernel_driver kdrv;
1707  int numa_node;
1708  const char *drv_name;
1709 };
1710 
1712 #define RTE_ETH_DEV_DETACHABLE 0x0001
1713 
1714 #define RTE_ETH_DEV_INTR_LSC 0x0002
1715 
1716 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1717 
1723 extern struct rte_eth_dev rte_eth_devices[];
1724 
1738 uint8_t rte_eth_dev_count(void);
1739 
1749 struct rte_eth_dev *rte_eth_dev_allocated(const char *name);
1750 
1761 struct rte_eth_dev *rte_eth_dev_allocate(const char *name);
1762 
1772 int rte_eth_dev_release_port(struct rte_eth_dev *eth_dev);
1773 
1786 int rte_eth_dev_attach(const char *devargs, uint8_t *port_id);
1787 
1800 int rte_eth_dev_detach(uint8_t port_id, char *devname);
1801 
1802 struct eth_driver;
1833 typedef int (*eth_dev_init_t)(struct rte_eth_dev *eth_dev);
1834 
1850 typedef int (*eth_dev_uninit_t)(struct rte_eth_dev *eth_dev);
1851 
1867 struct eth_driver {
1868  struct rte_pci_driver pci_drv;
1869  eth_dev_init_t eth_dev_init;
1870  eth_dev_uninit_t eth_dev_uninit;
1871  unsigned int dev_private_size;
1872 };
1873 
1885 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
1886 
1916 int rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_queue,
1917  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
1918 
1930 void _rte_eth_dev_reset(struct rte_eth_dev *dev);
1931 
1971 int rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1972  uint16_t nb_rx_desc, unsigned int socket_id,
1973  const struct rte_eth_rxconf *rx_conf,
1974  struct rte_mempool *mb_pool);
1975 
2019 int rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
2020  uint16_t nb_tx_desc, unsigned int socket_id,
2021  const struct rte_eth_txconf *tx_conf);
2022 
2033 int rte_eth_dev_socket_id(uint8_t port_id);
2034 
2044 int rte_eth_dev_is_valid_port(uint8_t port_id);
2045 
2061 int rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id);
2062 
2077 int rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id);
2078 
2094 int rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id);
2095 
2110 int rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id);
2111 
2112 
2113 
2129 int rte_eth_dev_start(uint8_t port_id);
2130 
2138 void rte_eth_dev_stop(uint8_t port_id);
2139 
2140 
2153 int rte_eth_dev_set_link_up(uint8_t port_id);
2154 
2164 int rte_eth_dev_set_link_down(uint8_t port_id);
2165 
2174 void rte_eth_dev_close(uint8_t port_id);
2175 
2182 void rte_eth_promiscuous_enable(uint8_t port_id);
2183 
2190 void rte_eth_promiscuous_disable(uint8_t port_id);
2191 
2202 int rte_eth_promiscuous_get(uint8_t port_id);
2203 
2210 void rte_eth_allmulticast_enable(uint8_t port_id);
2211 
2218 void rte_eth_allmulticast_disable(uint8_t port_id);
2219 
2230 int rte_eth_allmulticast_get(uint8_t port_id);
2231 
2243 void rte_eth_link_get(uint8_t port_id, struct rte_eth_link *link);
2244 
2256 void rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *link);
2257 
2275 int rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats);
2276 
2283 void rte_eth_stats_reset(uint8_t port_id);
2284 
2305 int rte_eth_xstats_get_names(uint8_t port_id,
2306  struct rte_eth_xstat_name *xstats_names,
2307  unsigned size);
2308 
2331 int rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
2332  unsigned n);
2333 
2340 void rte_eth_xstats_reset(uint8_t port_id);
2341 
2359 int rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id,
2360  uint16_t tx_queue_id, uint8_t stat_idx);
2361 
2379 int rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id,
2380  uint16_t rx_queue_id,
2381  uint8_t stat_idx);
2382 
2392 void rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr);
2393 
2403 void rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info);
2404 
2423 int rte_eth_dev_fw_version_get(uint8_t port_id,
2424  char *fw_version, size_t fw_size);
2425 
2464 int rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
2465  uint32_t *ptypes, int num);
2466 
2478 int rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu);
2479 
2494 int rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu);
2495 
2514 int rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on);
2515 
2535 int rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id,
2536  int on);
2537 
2554 int rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
2555  enum rte_vlan_type vlan_type,
2556  uint16_t tag_type);
2557 
2578 int rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask);
2579 
2592 int rte_eth_dev_get_vlan_offload(uint8_t port_id);
2593 
2608 int rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on);
2609 
2692 static inline uint16_t
2693 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2694  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
2695 {
2696  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2697 
2698 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2699  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
2700  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
2701 
2702  if (queue_id >= dev->data->nb_rx_queues) {
2703  RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2704  return 0;
2705  }
2706 #endif
2707  int16_t nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2708  rx_pkts, nb_pkts);
2709 
2710 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
2711  struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
2712 
2713  if (unlikely(cb != NULL)) {
2714  do {
2715  nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
2716  nb_pkts, cb->param);
2717  cb = cb->next;
2718  } while (cb != NULL);
2719  }
2720 #endif
2721 
2722  return nb_rx;
2723 }
2724 
2737 static inline int
2738 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2739 {
2740  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2741  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2742  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
2743  return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2744 }
2745 
2761 static inline int
2762 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2763 {
2764  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2765  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2766  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2767  return (*dev->dev_ops->rx_descriptor_done)( \
2768  dev->data->rx_queues[queue_id], offset);
2769 }
2770 
2830 static inline uint16_t
2831 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2832  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2833 {
2834  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2835 
2836 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2837  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
2838  RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
2839 
2840  if (queue_id >= dev->data->nb_tx_queues) {
2841  RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2842  return 0;
2843  }
2844 #endif
2845 
2846 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
2847  struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
2848 
2849  if (unlikely(cb != NULL)) {
2850  do {
2851  nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
2852  cb->param);
2853  cb = cb->next;
2854  } while (cb != NULL);
2855  }
2856 #endif
2857 
2858  return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
2859 }
2860 
2917 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
2918 
2919 static inline uint16_t
2920 rte_eth_tx_prepare(uint8_t port_id, uint16_t queue_id,
2921  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2922 {
2923  struct rte_eth_dev *dev;
2924 
2925 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2926  if (!rte_eth_dev_is_valid_port(port_id)) {
2927  RTE_PMD_DEBUG_TRACE("Invalid TX port_id=%d\n", port_id);
2928  rte_errno = -EINVAL;
2929  return 0;
2930  }
2931 #endif
2932 
2933  dev = &rte_eth_devices[port_id];
2934 
2935 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2936  if (queue_id >= dev->data->nb_tx_queues) {
2937  RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2938  rte_errno = -EINVAL;
2939  return 0;
2940  }
2941 #endif
2942 
2943  if (!dev->tx_pkt_prepare)
2944  return nb_pkts;
2945 
2946  return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
2947  tx_pkts, nb_pkts);
2948 }
2949 
2950 #else
2951 
2952 /*
2953  * Native NOOP operation for compilation targets which doesn't require any
2954  * preparations steps, and functional NOOP may introduce unnecessary performance
2955  * drop.
2956  *
2957  * Generally this is not a good idea to turn it on globally and didn't should
2958  * be used if behavior of tx_preparation can change.
2959  */
2960 
2961 static inline uint16_t
2962 rte_eth_tx_prepare(__rte_unused uint8_t port_id, __rte_unused uint16_t queue_id,
2963  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2964 {
2965  return nb_pkts;
2966 }
2967 
2968 #endif
2969 
2970 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
2971  void *userdata);
2972 
2978  buffer_tx_error_fn error_callback;
2979  void *error_userdata;
2980  uint16_t size;
2981  uint16_t length;
2982  struct rte_mbuf *pkts[];
2984 };
2985 
2992 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
2993  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
2994 
3005 int
3006 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
3007 
3030 static inline uint16_t
3031 rte_eth_tx_buffer_flush(uint8_t port_id, uint16_t queue_id,
3032  struct rte_eth_dev_tx_buffer *buffer)
3033 {
3034  uint16_t sent;
3035  uint16_t to_send = buffer->length;
3036 
3037  if (to_send == 0)
3038  return 0;
3039 
3040  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
3041 
3042  buffer->length = 0;
3043 
3044  /* All packets sent, or to be dealt with by callback below */
3045  if (unlikely(sent != to_send))
3046  buffer->error_callback(&buffer->pkts[sent], to_send - sent,
3047  buffer->error_userdata);
3048 
3049  return sent;
3050 }
3051 
3082 static inline uint16_t __attribute__((always_inline))
3083 rte_eth_tx_buffer(uint8_t port_id, uint16_t queue_id,
3084  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
3085 {
3086  buffer->pkts[buffer->length++] = tx_pkt;
3087  if (buffer->length < buffer->size)
3088  return 0;
3089 
3090  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
3091 }
3092 
3117 int
3119  buffer_tx_error_fn callback, void *userdata);
3120 
3143 void
3144 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3145  void *userdata);
3146 
3170 void
3171 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3172  void *userdata);
3173 
3187 };
3188 
3189 typedef void (*rte_eth_dev_cb_fn)(uint8_t port_id, \
3190  enum rte_eth_event_type event, void *cb_arg);
3216 int rte_eth_dev_callback_register(uint8_t port_id,
3217  enum rte_eth_event_type event,
3218  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3219 
3237 int rte_eth_dev_callback_unregister(uint8_t port_id,
3238  enum rte_eth_event_type event,
3239  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3240 
3258 void _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3259  enum rte_eth_event_type event, void *cb_arg);
3260 
3281 int rte_eth_dev_rx_intr_enable(uint8_t port_id, uint16_t queue_id);
3282 
3302 int rte_eth_dev_rx_intr_disable(uint8_t port_id, uint16_t queue_id);
3303 
3321 int rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data);
3322 
3344 int rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
3345  int epfd, int op, void *data);
3346 
3359 int rte_eth_led_on(uint8_t port_id);
3360 
3373 int rte_eth_led_off(uint8_t port_id);
3374 
3387 int rte_eth_dev_flow_ctrl_get(uint8_t port_id,
3388  struct rte_eth_fc_conf *fc_conf);
3389 
3404 int rte_eth_dev_flow_ctrl_set(uint8_t port_id,
3405  struct rte_eth_fc_conf *fc_conf);
3406 
3422 int rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id,
3423  struct rte_eth_pfc_conf *pfc_conf);
3424 
3443 int rte_eth_dev_mac_addr_add(uint8_t port, struct ether_addr *mac_addr,
3444  uint32_t pool);
3445 
3459 int rte_eth_dev_mac_addr_remove(uint8_t port, struct ether_addr *mac_addr);
3460 
3474 int rte_eth_dev_default_mac_addr_set(uint8_t port, struct ether_addr *mac_addr);
3475 
3476 
3492 int rte_eth_dev_rss_reta_update(uint8_t port,
3493  struct rte_eth_rss_reta_entry64 *reta_conf,
3494  uint16_t reta_size);
3495 
3511 int rte_eth_dev_rss_reta_query(uint8_t port,
3512  struct rte_eth_rss_reta_entry64 *reta_conf,
3513  uint16_t reta_size);
3514 
3533 int rte_eth_dev_uc_hash_table_set(uint8_t port,struct ether_addr *addr,
3534  uint8_t on);
3535 
3553 int rte_eth_dev_uc_all_hash_table_set(uint8_t port,uint8_t on);
3554 
3576 int rte_eth_mirror_rule_set(uint8_t port_id,
3577  struct rte_eth_mirror_conf *mirror_conf,
3578  uint8_t rule_id,
3579  uint8_t on);
3580 
3594 int rte_eth_mirror_rule_reset(uint8_t port_id,
3595  uint8_t rule_id);
3596 
3612 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
3613  uint16_t tx_rate);
3614 
3626 int rte_eth_dev_bypass_init(uint8_t port);
3627 
3643 int rte_eth_dev_bypass_state_show(uint8_t port, uint32_t *state);
3644 
3660 int rte_eth_dev_bypass_state_set(uint8_t port, uint32_t *new_state);
3661 
3684 int rte_eth_dev_bypass_event_show(uint8_t port, uint32_t event, uint32_t *state);
3685 
3708 int rte_eth_dev_bypass_event_store(uint8_t port, uint32_t event, uint32_t state);
3709 
3730 int rte_eth_dev_wd_timeout_store(uint8_t port, uint32_t timeout);
3731 
3744 int rte_eth_dev_bypass_ver_show(uint8_t port, uint32_t *ver);
3745 
3766 int rte_eth_dev_bypass_wd_timeout_show(uint8_t port, uint32_t *wd_timeout);
3767 
3778 int rte_eth_dev_bypass_wd_reset(uint8_t port);
3779 
3793 int rte_eth_dev_rss_hash_update(uint8_t port_id,
3794  struct rte_eth_rss_conf *rss_conf);
3795 
3809 int
3810 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
3811  struct rte_eth_rss_conf *rss_conf);
3812 
3830 int
3831 rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
3832  struct rte_eth_udp_tunnel *tunnel_udp);
3833 
3852 int
3853 rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
3854  struct rte_eth_udp_tunnel *tunnel_udp);
3855 
3869 int rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type);
3870 
3889 int rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
3890  enum rte_filter_op filter_op, void *arg);
3891 
3904 int rte_eth_dev_get_dcb_info(uint8_t port_id,
3905  struct rte_eth_dcb_info *dcb_info);
3906 
3931 void *rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
3932  rte_rx_callback_fn fn, void *user_param);
3933 
3934 /*
3935 * Add a callback that must be called first on packet RX on a given port
3936 * and queue.
3937 *
3938 * This API configures a first function to be called for each burst of
3939 * packets received on a given NIC port queue. The return value is a pointer
3940 * that can be used to later remove the callback using
3941 * rte_eth_remove_rx_callback().
3942 *
3943 * Multiple functions are called in the order that they are added.
3944 *
3945 * @param port_id
3946 * The port identifier of the Ethernet device.
3947 * @param queue_id
3948 * The queue on the Ethernet device on which the callback is to be added.
3949 * @param fn
3950 * The callback function
3951 * @param user_param
3952 * A generic pointer parameter which will be passed to each invocation of the
3953 * callback function on this port and queue.
3954 *
3955 * @return
3956 * NULL on error.
3957 * On success, a pointer value which can later be used to remove the callback.
3958 */
3959 void *rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
3960  rte_rx_callback_fn fn, void *user_param);
3961 
3986 void *rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
3987  rte_tx_callback_fn fn, void *user_param);
3988 
4019 int rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
4020  struct rte_eth_rxtx_callback *user_cb);
4021 
4052 int rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
4053  struct rte_eth_rxtx_callback *user_cb);
4054 
4072 int rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
4073  struct rte_eth_rxq_info *qinfo);
4074 
4092 int rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
4093  struct rte_eth_txq_info *qinfo);
4094 
4111 int rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info);
4112 
4124 int rte_eth_dev_get_eeprom_length(uint8_t port_id);
4125 
4140 int rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info);
4141 
4156 int rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info);
4157 
4175 int rte_eth_dev_set_mc_addr_list(uint8_t port_id,
4176  struct ether_addr *mc_addr_set,
4177  uint32_t nb_mc_addr);
4178 
4190 int rte_eth_timesync_enable(uint8_t port_id);
4191 
4203 int rte_eth_timesync_disable(uint8_t port_id);
4204 
4222 int rte_eth_timesync_read_rx_timestamp(uint8_t port_id,
4223  struct timespec *timestamp, uint32_t flags);
4224 
4239 int rte_eth_timesync_read_tx_timestamp(uint8_t port_id,
4240  struct timespec *timestamp);
4241 
4258 int rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta);
4259 
4274 int rte_eth_timesync_read_time(uint8_t port_id, struct timespec *time);
4275 
4293 int rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *time);
4294 
4306 void rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev,
4307  struct rte_pci_device *pci_dev);
4308 
4328 const struct rte_memzone *
4329 rte_eth_dma_zone_reserve(const struct rte_eth_dev *eth_dev, const char *name,
4330  uint16_t queue_id, size_t size,
4331  unsigned align, int socket_id);
4332 
4347 int
4348 rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
4349  struct rte_eth_l2_tunnel_conf *l2_tunnel);
4350 
4374 int
4375 rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
4376  struct rte_eth_l2_tunnel_conf *l2_tunnel,
4377  uint32_t mask,
4378  uint8_t en);
4379 
4392 int
4393 rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id);
4394 
4406 int
4407 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name);
4408 
4414 int rte_eth_dev_pci_probe(struct rte_pci_driver *pci_drv,
4415  struct rte_pci_device *pci_dev);
4416 
4422 int rte_eth_dev_pci_remove(struct rte_pci_device *pci_dev);
4423 
4424 #ifdef __cplusplus
4425 }
4426 #endif
4427 
4428 #endif /* _RTE_ETHDEV_H_ */