DPDK  16.07.2
rte_ethdev.h
Go to the documentation of this file.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  * * Neither the name of Intel Corporation nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef _RTE_ETHDEV_H_
35 #define _RTE_ETHDEV_H_
36 
169 #ifdef __cplusplus
170 extern "C" {
171 #endif
172 
173 #include <stdint.h>
174 
175 #include <rte_dev.h>
176 
177 /* Use this macro to check if LRO API is supported */
178 #define RTE_ETHDEV_HAS_LRO_SUPPORT
179 
180 #include <rte_log.h>
181 #include <rte_interrupts.h>
182 #include <rte_pci.h>
183 #include <rte_dev.h>
184 #include <rte_devargs.h>
185 #include "rte_ether.h"
186 #include "rte_eth_ctrl.h"
187 #include "rte_dev_info.h"
188 
189 struct rte_mbuf;
190 
195  uint64_t ipackets;
196  uint64_t opackets;
197  uint64_t ibytes;
198  uint64_t obytes;
199  uint64_t imissed;
203  uint64_t ierrors;
204  uint64_t oerrors;
205  uint64_t rx_nombuf;
206  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
208  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
210  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
212  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
214  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
216 };
217 
221 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
222 #define ETH_LINK_SPEED_FIXED (1 << 0)
223 #define ETH_LINK_SPEED_10M_HD (1 << 1)
224 #define ETH_LINK_SPEED_10M (1 << 2)
225 #define ETH_LINK_SPEED_100M_HD (1 << 3)
226 #define ETH_LINK_SPEED_100M (1 << 4)
227 #define ETH_LINK_SPEED_1G (1 << 5)
228 #define ETH_LINK_SPEED_2_5G (1 << 6)
229 #define ETH_LINK_SPEED_5G (1 << 7)
230 #define ETH_LINK_SPEED_10G (1 << 8)
231 #define ETH_LINK_SPEED_20G (1 << 9)
232 #define ETH_LINK_SPEED_25G (1 << 10)
233 #define ETH_LINK_SPEED_40G (1 << 11)
234 #define ETH_LINK_SPEED_50G (1 << 12)
235 #define ETH_LINK_SPEED_56G (1 << 13)
236 #define ETH_LINK_SPEED_100G (1 << 14)
241 #define ETH_SPEED_NUM_NONE 0
242 #define ETH_SPEED_NUM_10M 10
243 #define ETH_SPEED_NUM_100M 100
244 #define ETH_SPEED_NUM_1G 1000
245 #define ETH_SPEED_NUM_2_5G 2500
246 #define ETH_SPEED_NUM_5G 5000
247 #define ETH_SPEED_NUM_10G 10000
248 #define ETH_SPEED_NUM_20G 20000
249 #define ETH_SPEED_NUM_25G 25000
250 #define ETH_SPEED_NUM_40G 40000
251 #define ETH_SPEED_NUM_50G 50000
252 #define ETH_SPEED_NUM_56G 56000
253 #define ETH_SPEED_NUM_100G 100000
258 struct rte_eth_link {
259  uint32_t link_speed;
260  uint16_t link_duplex : 1;
261  uint16_t link_autoneg : 1;
262  uint16_t link_status : 1;
263 } __attribute__((aligned(8)));
265 /* Utility constants */
266 #define ETH_LINK_HALF_DUPLEX 0
267 #define ETH_LINK_FULL_DUPLEX 1
268 #define ETH_LINK_DOWN 0
269 #define ETH_LINK_UP 1
270 #define ETH_LINK_FIXED 0
271 #define ETH_LINK_AUTONEG 1
277 struct rte_eth_thresh {
278  uint8_t pthresh;
279  uint8_t hthresh;
280  uint8_t wthresh;
281 };
282 
286 #define ETH_MQ_RX_RSS_FLAG 0x1
287 #define ETH_MQ_RX_DCB_FLAG 0x2
288 #define ETH_MQ_RX_VMDQ_FLAG 0x4
289 
297 
301  ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
303  ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
304 
306  ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
308  ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
310  ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
313  ETH_MQ_RX_VMDQ_FLAG,
314 };
315 
319 #define ETH_RSS ETH_MQ_RX_RSS
320 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
321 #define ETH_DCB_RX ETH_MQ_RX_DCB
322 
332 };
333 
337 #define ETH_DCB_NONE ETH_MQ_TX_NONE
338 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
339 #define ETH_DCB_TX ETH_MQ_TX_DCB
340 
347  uint32_t max_rx_pkt_len;
348  uint16_t split_hdr_size;
349  uint16_t header_split : 1,
350  hw_ip_checksum : 1,
351  hw_vlan_filter : 1,
352  hw_vlan_strip : 1,
353  hw_vlan_extend : 1,
354  jumbo_frame : 1,
355  hw_strip_crc : 1,
356  enable_scatter : 1,
357  enable_lro : 1;
358 };
359 
365  ETH_VLAN_TYPE_UNKNOWN = 0,
368  ETH_VLAN_TYPE_MAX,
369 };
370 
389  uint8_t *rss_key;
390  uint8_t rss_key_len;
391  uint64_t rss_hf;
392 };
393 
394 /*
395  * The RSS offload types are defined based on flow types which are defined
396  * in rte_eth_ctrl.h. Different NIC hardwares may support different RSS offload
397  * types. The supported flow types or RSS offload types can be queried by
398  * rte_eth_dev_info_get().
399  */
400 #define ETH_RSS_IPV4 (1ULL << RTE_ETH_FLOW_IPV4)
401 #define ETH_RSS_FRAG_IPV4 (1ULL << RTE_ETH_FLOW_FRAG_IPV4)
402 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
403 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP)
404 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP)
405 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER)
406 #define ETH_RSS_IPV6 (1ULL << RTE_ETH_FLOW_IPV6)
407 #define ETH_RSS_FRAG_IPV6 (1ULL << RTE_ETH_FLOW_FRAG_IPV6)
408 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP)
409 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
410 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP)
411 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)
412 #define ETH_RSS_L2_PAYLOAD (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)
413 #define ETH_RSS_IPV6_EX (1ULL << RTE_ETH_FLOW_IPV6_EX)
414 #define ETH_RSS_IPV6_TCP_EX (1ULL << RTE_ETH_FLOW_IPV6_TCP_EX)
415 #define ETH_RSS_IPV6_UDP_EX (1ULL << RTE_ETH_FLOW_IPV6_UDP_EX)
416 #define ETH_RSS_PORT (1ULL << RTE_ETH_FLOW_PORT)
417 #define ETH_RSS_VXLAN (1ULL << RTE_ETH_FLOW_VXLAN)
418 #define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE)
419 #define ETH_RSS_NVGRE (1ULL << RTE_ETH_FLOW_NVGRE)
420 
421 #define ETH_RSS_IP ( \
422  ETH_RSS_IPV4 | \
423  ETH_RSS_FRAG_IPV4 | \
424  ETH_RSS_NONFRAG_IPV4_OTHER | \
425  ETH_RSS_IPV6 | \
426  ETH_RSS_FRAG_IPV6 | \
427  ETH_RSS_NONFRAG_IPV6_OTHER | \
428  ETH_RSS_IPV6_EX)
429 
430 #define ETH_RSS_UDP ( \
431  ETH_RSS_NONFRAG_IPV4_UDP | \
432  ETH_RSS_NONFRAG_IPV6_UDP | \
433  ETH_RSS_IPV6_UDP_EX)
434 
435 #define ETH_RSS_TCP ( \
436  ETH_RSS_NONFRAG_IPV4_TCP | \
437  ETH_RSS_NONFRAG_IPV6_TCP | \
438  ETH_RSS_IPV6_TCP_EX)
439 
440 #define ETH_RSS_SCTP ( \
441  ETH_RSS_NONFRAG_IPV4_SCTP | \
442  ETH_RSS_NONFRAG_IPV6_SCTP)
443 
444 #define ETH_RSS_TUNNEL ( \
445  ETH_RSS_VXLAN | \
446  ETH_RSS_GENEVE | \
447  ETH_RSS_NVGRE)
448 
449 
451 #define ETH_RSS_PROTO_MASK ( \
452  ETH_RSS_IPV4 | \
453  ETH_RSS_FRAG_IPV4 | \
454  ETH_RSS_NONFRAG_IPV4_TCP | \
455  ETH_RSS_NONFRAG_IPV4_UDP | \
456  ETH_RSS_NONFRAG_IPV4_SCTP | \
457  ETH_RSS_NONFRAG_IPV4_OTHER | \
458  ETH_RSS_IPV6 | \
459  ETH_RSS_FRAG_IPV6 | \
460  ETH_RSS_NONFRAG_IPV6_TCP | \
461  ETH_RSS_NONFRAG_IPV6_UDP | \
462  ETH_RSS_NONFRAG_IPV6_SCTP | \
463  ETH_RSS_NONFRAG_IPV6_OTHER | \
464  ETH_RSS_L2_PAYLOAD | \
465  ETH_RSS_IPV6_EX | \
466  ETH_RSS_IPV6_TCP_EX | \
467  ETH_RSS_IPV6_UDP_EX | \
468  ETH_RSS_PORT | \
469  ETH_RSS_VXLAN | \
470  ETH_RSS_GENEVE | \
471  ETH_RSS_NVGRE)
472 
473 /*
474  * Definitions used for redirection table entry size.
475  * Some RSS RETA sizes may not be supported by some drivers, check the
476  * documentation or the description of relevant functions for more details.
477  */
478 #define ETH_RSS_RETA_SIZE_64 64
479 #define ETH_RSS_RETA_SIZE_128 128
480 #define ETH_RSS_RETA_SIZE_256 256
481 #define ETH_RSS_RETA_SIZE_512 512
482 #define RTE_RETA_GROUP_SIZE 64
483 
484 /* Definitions used for VMDQ and DCB functionality */
485 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
486 #define ETH_DCB_NUM_USER_PRIORITIES 8
487 #define ETH_VMDQ_DCB_NUM_QUEUES 128
488 #define ETH_DCB_NUM_QUEUES 128
490 /* DCB capability defines */
491 #define ETH_DCB_PG_SUPPORT 0x00000001
492 #define ETH_DCB_PFC_SUPPORT 0x00000002
494 /* Definitions used for VLAN Offload functionality */
495 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
496 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
497 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
499 /* Definitions used for mask VLAN setting */
500 #define ETH_VLAN_STRIP_MASK 0x0001
501 #define ETH_VLAN_FILTER_MASK 0x0002
502 #define ETH_VLAN_EXTEND_MASK 0x0004
503 #define ETH_VLAN_ID_MAX 0x0FFF
505 /* Definitions used for receive MAC address */
506 #define ETH_NUM_RECEIVE_MAC_ADDR 128
508 /* Definitions used for unicast hash */
509 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
511 /* Definitions used for VMDQ pool rx mode setting */
512 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
513 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
514 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
515 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
516 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
519 #define ETH_MIRROR_MAX_VLANS 64
520 
521 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
522 #define ETH_MIRROR_UPLINK_PORT 0x02
523 #define ETH_MIRROR_DOWNLINK_PORT 0x04
524 #define ETH_MIRROR_VLAN 0x08
525 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
530 struct rte_eth_vlan_mirror {
531  uint64_t vlan_mask;
533  uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
534 };
535 
540  uint8_t rule_type;
541  uint8_t dst_pool;
542  uint64_t pool_mask;
545 };
546 
554  uint64_t mask;
556  uint16_t reta[RTE_RETA_GROUP_SIZE];
558 };
559 
565  ETH_4_TCS = 4,
567 };
568 
578 };
579 
580 /* This structure may be extended in future. */
581 struct rte_eth_dcb_rx_conf {
582  enum rte_eth_nb_tcs nb_tcs;
584  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
585 };
586 
587 struct rte_eth_vmdq_dcb_tx_conf {
588  enum rte_eth_nb_pools nb_queue_pools;
590  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
591 };
592 
593 struct rte_eth_dcb_tx_conf {
594  enum rte_eth_nb_tcs nb_tcs;
596  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
597 };
598 
599 struct rte_eth_vmdq_tx_conf {
600  enum rte_eth_nb_pools nb_queue_pools;
601 };
602 
617  uint8_t default_pool;
618  uint8_t nb_pool_maps;
619  struct {
620  uint16_t vlan_id;
621  uint64_t pools;
625 };
626 
627 struct rte_eth_vmdq_rx_conf {
628  enum rte_eth_nb_pools nb_queue_pools;
629  uint8_t enable_default_pool;
630  uint8_t default_pool;
631  uint8_t enable_loop_back;
632  uint8_t nb_pool_maps;
633  uint32_t rx_mode;
634  struct {
635  uint16_t vlan_id;
636  uint64_t pools;
637  } pool_map[ETH_VMDQ_MAX_VLAN_FILTERS];
638 };
639 
646  /* For i40e specifically */
647  uint16_t pvid;
648  uint8_t hw_vlan_reject_tagged : 1,
654 };
655 
661  uint16_t rx_free_thresh;
662  uint8_t rx_drop_en;
664 };
665 
666 #define ETH_TXQ_FLAGS_NOMULTSEGS 0x0001
667 #define ETH_TXQ_FLAGS_NOREFCOUNT 0x0002
668 #define ETH_TXQ_FLAGS_NOMULTMEMP 0x0004
669 #define ETH_TXQ_FLAGS_NOVLANOFFL 0x0100
670 #define ETH_TXQ_FLAGS_NOXSUMSCTP 0x0200
671 #define ETH_TXQ_FLAGS_NOXSUMUDP 0x0400
672 #define ETH_TXQ_FLAGS_NOXSUMTCP 0x0800
673 #define ETH_TXQ_FLAGS_NOOFFLOADS \
674  (ETH_TXQ_FLAGS_NOVLANOFFL | ETH_TXQ_FLAGS_NOXSUMSCTP | \
675  ETH_TXQ_FLAGS_NOXSUMUDP | ETH_TXQ_FLAGS_NOXSUMTCP)
676 #define ETH_TXQ_FLAGS_NOXSUMS \
677  (ETH_TXQ_FLAGS_NOXSUMSCTP | ETH_TXQ_FLAGS_NOXSUMUDP | \
678  ETH_TXQ_FLAGS_NOXSUMTCP)
679 
684  uint16_t tx_rs_thresh;
685  uint16_t tx_free_thresh;
688  uint32_t txq_flags;
690 };
691 
696  uint16_t nb_max;
697  uint16_t nb_min;
698  uint16_t nb_align;
699 };
700 
709 };
710 
717  uint32_t high_water;
718  uint32_t low_water;
719  uint16_t pause_time;
720  uint16_t send_xon;
723  uint8_t autoneg;
724 };
725 
733  uint8_t priority;
734 };
735 
744 };
745 
753 };
754 
766  uint8_t drop_queue;
767  struct rte_eth_fdir_masks mask;
770 };
771 
780  uint16_t udp_port;
781  uint8_t prot_type;
782 };
783 
789  uint16_t lsc;
791  uint16_t rxq;
792 };
793 
799 struct rte_eth_conf {
800  uint32_t link_speeds;
809  uint32_t lpbk_mode;
814  struct {
818  struct rte_eth_dcb_rx_conf dcb_rx_conf;
820  struct rte_eth_vmdq_rx_conf vmdq_rx_conf;
822  } rx_adv_conf;
823  union {
824  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
826  struct rte_eth_dcb_tx_conf dcb_tx_conf;
828  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
830  } tx_adv_conf;
836 };
837 
847 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
848 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
849 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
850 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
851 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
852 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
853 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
854 
858 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
859 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
860 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
861 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
862 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
863 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
864 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
865 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
866 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
867 
873  const char *driver_name;
874  unsigned int if_index;
876  uint32_t min_rx_bufsize;
877  uint32_t max_rx_pktlen;
878  uint16_t max_rx_queues;
879  uint16_t max_tx_queues;
880  uint32_t max_mac_addrs;
881  uint32_t max_hash_mac_addrs;
883  uint16_t max_vfs;
884  uint16_t max_vmdq_pools;
885  uint32_t rx_offload_capa;
886  uint32_t tx_offload_capa;
887  uint16_t reta_size;
889  uint8_t hash_key_size;
894  uint16_t vmdq_queue_base;
895  uint16_t vmdq_queue_num;
896  uint16_t vmdq_pool_base;
899  uint32_t speed_capa;
901  uint16_t nb_rx_queues;
902  uint16_t nb_tx_queues;
903 };
904 
910  struct rte_mempool *mp;
912  uint8_t scattered_rx;
913  uint16_t nb_desc;
915 
922  uint16_t nb_desc;
924 
926 #define RTE_ETH_XSTATS_NAME_SIZE 64
927 
936  uint64_t id;
937  uint64_t value;
938 };
939 
947  char name[RTE_ETH_XSTATS_NAME_SIZE];
948 };
949 
950 #define ETH_DCB_NUM_TCS 8
951 #define ETH_MAX_VMDQ_POOL 64
952 
959  struct {
960  uint8_t base;
961  uint8_t nb_queue;
962  } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
964  struct {
965  uint8_t base;
966  uint8_t nb_queue;
967  } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
968 };
969 
975  uint8_t nb_tcs;
977  uint8_t tc_bws[ETH_DCB_NUM_TCS];
980 };
981 
985 #define RTE_ETH_QUEUE_STATE_STOPPED 0
986 #define RTE_ETH_QUEUE_STATE_STARTED 1
987 
988 struct rte_eth_dev;
989 
990 struct rte_eth_dev_callback;
992 TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback);
993 
994 
995 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
996 #define RTE_PMD_DEBUG_TRACE(...) \
997  rte_pmd_debug_trace(__func__, __VA_ARGS__)
998 #else
999 #define RTE_PMD_DEBUG_TRACE(...)
1000 #endif
1001 
1002 
1003 /* Macros to check for valid port */
1004 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1005  if (!rte_eth_dev_is_valid_port(port_id)) { \
1006  RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
1007  return retval; \
1008  } \
1009 } while (0)
1010 
1011 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1012  if (!rte_eth_dev_is_valid_port(port_id)) { \
1013  RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
1014  return; \
1015  } \
1016 } while (0)
1017 
1023 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1024 
1025 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1026 
1027 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1028 
1029 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1030 
1031 /*
1032  * Definitions of all functions exported by an Ethernet driver through the
1033  * the generic structure of type *eth_dev_ops* supplied in the *rte_eth_dev*
1034  * structure associated with an Ethernet device.
1035  */
1036 
1037 typedef int (*eth_dev_configure_t)(struct rte_eth_dev *dev);
1040 typedef int (*eth_dev_start_t)(struct rte_eth_dev *dev);
1043 typedef void (*eth_dev_stop_t)(struct rte_eth_dev *dev);
1046 typedef int (*eth_dev_set_link_up_t)(struct rte_eth_dev *dev);
1049 typedef int (*eth_dev_set_link_down_t)(struct rte_eth_dev *dev);
1052 typedef void (*eth_dev_close_t)(struct rte_eth_dev *dev);
1055 typedef void (*eth_promiscuous_enable_t)(struct rte_eth_dev *dev);
1058 typedef void (*eth_promiscuous_disable_t)(struct rte_eth_dev *dev);
1061 typedef void (*eth_allmulticast_enable_t)(struct rte_eth_dev *dev);
1064 typedef void (*eth_allmulticast_disable_t)(struct rte_eth_dev *dev);
1067 typedef int (*eth_link_update_t)(struct rte_eth_dev *dev,
1068  int wait_to_complete);
1071 typedef void (*eth_stats_get_t)(struct rte_eth_dev *dev,
1072  struct rte_eth_stats *igb_stats);
1075 typedef void (*eth_stats_reset_t)(struct rte_eth_dev *dev);
1078 typedef int (*eth_xstats_get_t)(struct rte_eth_dev *dev,
1079  struct rte_eth_xstat *stats, unsigned n);
1082 typedef void (*eth_xstats_reset_t)(struct rte_eth_dev *dev);
1085 typedef int (*eth_xstats_get_names_t)(struct rte_eth_dev *dev,
1086  struct rte_eth_xstat_name *xstats_names, unsigned size);
1089 typedef int (*eth_queue_stats_mapping_set_t)(struct rte_eth_dev *dev,
1090  uint16_t queue_id,
1091  uint8_t stat_idx,
1092  uint8_t is_rx);
1095 typedef void (*eth_dev_infos_get_t)(struct rte_eth_dev *dev,
1096  struct rte_eth_dev_info *dev_info);
1099 typedef const uint32_t *(*eth_dev_supported_ptypes_get_t)(struct rte_eth_dev *dev);
1102 typedef int (*eth_queue_start_t)(struct rte_eth_dev *dev,
1103  uint16_t queue_id);
1106 typedef int (*eth_queue_stop_t)(struct rte_eth_dev *dev,
1107  uint16_t queue_id);
1110 typedef int (*eth_rx_queue_setup_t)(struct rte_eth_dev *dev,
1111  uint16_t rx_queue_id,
1112  uint16_t nb_rx_desc,
1113  unsigned int socket_id,
1114  const struct rte_eth_rxconf *rx_conf,
1115  struct rte_mempool *mb_pool);
1118 typedef int (*eth_tx_queue_setup_t)(struct rte_eth_dev *dev,
1119  uint16_t tx_queue_id,
1120  uint16_t nb_tx_desc,
1121  unsigned int socket_id,
1122  const struct rte_eth_txconf *tx_conf);
1125 typedef int (*eth_rx_enable_intr_t)(struct rte_eth_dev *dev,
1126  uint16_t rx_queue_id);
1129 typedef int (*eth_rx_disable_intr_t)(struct rte_eth_dev *dev,
1130  uint16_t rx_queue_id);
1133 typedef void (*eth_queue_release_t)(void *queue);
1136 typedef uint32_t (*eth_rx_queue_count_t)(struct rte_eth_dev *dev,
1137  uint16_t rx_queue_id);
1140 typedef int (*eth_rx_descriptor_done_t)(void *rxq, uint16_t offset);
1143 typedef void (*eth_rxq_info_get_t)(struct rte_eth_dev *dev,
1144  uint16_t rx_queue_id, struct rte_eth_rxq_info *qinfo);
1145 
1146 typedef void (*eth_txq_info_get_t)(struct rte_eth_dev *dev,
1147  uint16_t tx_queue_id, struct rte_eth_txq_info *qinfo);
1148 
1149 typedef int (*mtu_set_t)(struct rte_eth_dev *dev, uint16_t mtu);
1152 typedef int (*vlan_filter_set_t)(struct rte_eth_dev *dev,
1153  uint16_t vlan_id,
1154  int on);
1157 typedef int (*vlan_tpid_set_t)(struct rte_eth_dev *dev,
1158  enum rte_vlan_type type, uint16_t tpid);
1161 typedef void (*vlan_offload_set_t)(struct rte_eth_dev *dev, int mask);
1164 typedef int (*vlan_pvid_set_t)(struct rte_eth_dev *dev,
1165  uint16_t vlan_id,
1166  int on);
1169 typedef void (*vlan_strip_queue_set_t)(struct rte_eth_dev *dev,
1170  uint16_t rx_queue_id,
1171  int on);
1174 typedef uint16_t (*eth_rx_burst_t)(void *rxq,
1175  struct rte_mbuf **rx_pkts,
1176  uint16_t nb_pkts);
1179 typedef uint16_t (*eth_tx_burst_t)(void *txq,
1180  struct rte_mbuf **tx_pkts,
1181  uint16_t nb_pkts);
1184 typedef int (*flow_ctrl_get_t)(struct rte_eth_dev *dev,
1185  struct rte_eth_fc_conf *fc_conf);
1188 typedef int (*flow_ctrl_set_t)(struct rte_eth_dev *dev,
1189  struct rte_eth_fc_conf *fc_conf);
1192 typedef int (*priority_flow_ctrl_set_t)(struct rte_eth_dev *dev,
1193  struct rte_eth_pfc_conf *pfc_conf);
1196 typedef int (*reta_update_t)(struct rte_eth_dev *dev,
1197  struct rte_eth_rss_reta_entry64 *reta_conf,
1198  uint16_t reta_size);
1201 typedef int (*reta_query_t)(struct rte_eth_dev *dev,
1202  struct rte_eth_rss_reta_entry64 *reta_conf,
1203  uint16_t reta_size);
1206 typedef int (*rss_hash_update_t)(struct rte_eth_dev *dev,
1207  struct rte_eth_rss_conf *rss_conf);
1210 typedef int (*rss_hash_conf_get_t)(struct rte_eth_dev *dev,
1211  struct rte_eth_rss_conf *rss_conf);
1214 typedef int (*eth_dev_led_on_t)(struct rte_eth_dev *dev);
1217 typedef int (*eth_dev_led_off_t)(struct rte_eth_dev *dev);
1220 typedef void (*eth_mac_addr_remove_t)(struct rte_eth_dev *dev, uint32_t index);
1223 typedef void (*eth_mac_addr_add_t)(struct rte_eth_dev *dev,
1224  struct ether_addr *mac_addr,
1225  uint32_t index,
1226  uint32_t vmdq);
1229 typedef void (*eth_mac_addr_set_t)(struct rte_eth_dev *dev,
1230  struct ether_addr *mac_addr);
1233 typedef int (*eth_uc_hash_table_set_t)(struct rte_eth_dev *dev,
1234  struct ether_addr *mac_addr,
1235  uint8_t on);
1238 typedef int (*eth_uc_all_hash_table_set_t)(struct rte_eth_dev *dev,
1239  uint8_t on);
1242 typedef int (*eth_set_vf_rx_mode_t)(struct rte_eth_dev *dev,
1243  uint16_t vf,
1244  uint16_t rx_mode,
1245  uint8_t on);
1248 typedef int (*eth_set_vf_rx_t)(struct rte_eth_dev *dev,
1249  uint16_t vf,
1250  uint8_t on);
1253 typedef int (*eth_set_vf_tx_t)(struct rte_eth_dev *dev,
1254  uint16_t vf,
1255  uint8_t on);
1258 typedef int (*eth_set_vf_vlan_filter_t)(struct rte_eth_dev *dev,
1259  uint16_t vlan,
1260  uint64_t vf_mask,
1261  uint8_t vlan_on);
1264 typedef int (*eth_set_queue_rate_limit_t)(struct rte_eth_dev *dev,
1265  uint16_t queue_idx,
1266  uint16_t tx_rate);
1269 typedef int (*eth_set_vf_rate_limit_t)(struct rte_eth_dev *dev,
1270  uint16_t vf,
1271  uint16_t tx_rate,
1272  uint64_t q_msk);
1275 typedef int (*eth_mirror_rule_set_t)(struct rte_eth_dev *dev,
1276  struct rte_eth_mirror_conf *mirror_conf,
1277  uint8_t rule_id,
1278  uint8_t on);
1281 typedef int (*eth_mirror_rule_reset_t)(struct rte_eth_dev *dev,
1282  uint8_t rule_id);
1285 typedef int (*eth_udp_tunnel_port_add_t)(struct rte_eth_dev *dev,
1286  struct rte_eth_udp_tunnel *tunnel_udp);
1289 typedef int (*eth_udp_tunnel_port_del_t)(struct rte_eth_dev *dev,
1290  struct rte_eth_udp_tunnel *tunnel_udp);
1293 typedef int (*eth_set_mc_addr_list_t)(struct rte_eth_dev *dev,
1294  struct ether_addr *mc_addr_set,
1295  uint32_t nb_mc_addr);
1298 typedef int (*eth_timesync_enable_t)(struct rte_eth_dev *dev);
1301 typedef int (*eth_timesync_disable_t)(struct rte_eth_dev *dev);
1304 typedef int (*eth_timesync_read_rx_timestamp_t)(struct rte_eth_dev *dev,
1305  struct timespec *timestamp,
1306  uint32_t flags);
1309 typedef int (*eth_timesync_read_tx_timestamp_t)(struct rte_eth_dev *dev,
1310  struct timespec *timestamp);
1313 typedef int (*eth_timesync_adjust_time)(struct rte_eth_dev *dev, int64_t);
1316 typedef int (*eth_timesync_read_time)(struct rte_eth_dev *dev,
1317  struct timespec *timestamp);
1320 typedef int (*eth_timesync_write_time)(struct rte_eth_dev *dev,
1321  const struct timespec *timestamp);
1324 typedef int (*eth_get_reg_t)(struct rte_eth_dev *dev,
1325  struct rte_dev_reg_info *info);
1328 typedef int (*eth_get_eeprom_length_t)(struct rte_eth_dev *dev);
1331 typedef int (*eth_get_eeprom_t)(struct rte_eth_dev *dev,
1332  struct rte_dev_eeprom_info *info);
1335 typedef int (*eth_set_eeprom_t)(struct rte_eth_dev *dev,
1336  struct rte_dev_eeprom_info *info);
1339 typedef int (*eth_l2_tunnel_eth_type_conf_t)
1340  (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
1343 typedef int (*eth_l2_tunnel_offload_set_t)
1344  (struct rte_eth_dev *dev,
1345  struct rte_eth_l2_tunnel_conf *l2_tunnel,
1346  uint32_t mask,
1347  uint8_t en);
1350 #ifdef RTE_NIC_BYPASS
1351 
1352 enum {
1353  RTE_BYPASS_MODE_NONE,
1354  RTE_BYPASS_MODE_NORMAL,
1355  RTE_BYPASS_MODE_BYPASS,
1356  RTE_BYPASS_MODE_ISOLATE,
1357  RTE_BYPASS_MODE_NUM,
1358 };
1359 
1360 #define RTE_BYPASS_MODE_VALID(x) \
1361  ((x) > RTE_BYPASS_MODE_NONE && (x) < RTE_BYPASS_MODE_NUM)
1362 
1363 enum {
1364  RTE_BYPASS_EVENT_NONE,
1365  RTE_BYPASS_EVENT_START,
1366  RTE_BYPASS_EVENT_OS_ON = RTE_BYPASS_EVENT_START,
1367  RTE_BYPASS_EVENT_POWER_ON,
1368  RTE_BYPASS_EVENT_OS_OFF,
1369  RTE_BYPASS_EVENT_POWER_OFF,
1370  RTE_BYPASS_EVENT_TIMEOUT,
1371  RTE_BYPASS_EVENT_NUM
1372 };
1373 
1374 #define RTE_BYPASS_EVENT_VALID(x) \
1375  ((x) > RTE_BYPASS_EVENT_NONE && (x) < RTE_BYPASS_MODE_NUM)
1376 
1377 enum {
1378  RTE_BYPASS_TMT_OFF, /* timeout disabled. */
1379  RTE_BYPASS_TMT_1_5_SEC, /* timeout for 1.5 seconds */
1380  RTE_BYPASS_TMT_2_SEC, /* timeout for 2 seconds */
1381  RTE_BYPASS_TMT_3_SEC, /* timeout for 3 seconds */
1382  RTE_BYPASS_TMT_4_SEC, /* timeout for 4 seconds */
1383  RTE_BYPASS_TMT_8_SEC, /* timeout for 8 seconds */
1384  RTE_BYPASS_TMT_16_SEC, /* timeout for 16 seconds */
1385  RTE_BYPASS_TMT_32_SEC, /* timeout for 32 seconds */
1386  RTE_BYPASS_TMT_NUM
1387 };
1388 
1389 #define RTE_BYPASS_TMT_VALID(x) \
1390  ((x) == RTE_BYPASS_TMT_OFF || \
1391  ((x) > RTE_BYPASS_TMT_OFF && (x) < RTE_BYPASS_TMT_NUM))
1392 
1393 typedef void (*bypass_init_t)(struct rte_eth_dev *dev);
1394 typedef int32_t (*bypass_state_set_t)(struct rte_eth_dev *dev, uint32_t *new_state);
1395 typedef int32_t (*bypass_state_show_t)(struct rte_eth_dev *dev, uint32_t *state);
1396 typedef int32_t (*bypass_event_set_t)(struct rte_eth_dev *dev, uint32_t state, uint32_t event);
1397 typedef int32_t (*bypass_event_show_t)(struct rte_eth_dev *dev, uint32_t event_shift, uint32_t *event);
1398 typedef int32_t (*bypass_wd_timeout_set_t)(struct rte_eth_dev *dev, uint32_t timeout);
1399 typedef int32_t (*bypass_wd_timeout_show_t)(struct rte_eth_dev *dev, uint32_t *wd_timeout);
1400 typedef int32_t (*bypass_ver_show_t)(struct rte_eth_dev *dev, uint32_t *ver);
1401 typedef int32_t (*bypass_wd_reset_t)(struct rte_eth_dev *dev);
1402 #endif
1403 
1404 typedef int (*eth_filter_ctrl_t)(struct rte_eth_dev *dev,
1405  enum rte_filter_type filter_type,
1406  enum rte_filter_op filter_op,
1407  void *arg);
1410 typedef int (*eth_get_dcb_info)(struct rte_eth_dev *dev,
1411  struct rte_eth_dcb_info *dcb_info);
1417 struct eth_dev_ops {
1418  eth_dev_configure_t dev_configure;
1419  eth_dev_start_t dev_start;
1420  eth_dev_stop_t dev_stop;
1421  eth_dev_set_link_up_t dev_set_link_up;
1422  eth_dev_set_link_down_t dev_set_link_down;
1423  eth_dev_close_t dev_close;
1424  eth_promiscuous_enable_t promiscuous_enable;
1425  eth_promiscuous_disable_t promiscuous_disable;
1426  eth_allmulticast_enable_t allmulticast_enable;
1427  eth_allmulticast_disable_t allmulticast_disable;
1428  eth_link_update_t link_update;
1429  eth_stats_get_t stats_get;
1430  eth_stats_reset_t stats_reset;
1431  eth_xstats_get_t xstats_get;
1432  eth_xstats_reset_t xstats_reset;
1433  eth_xstats_get_names_t xstats_get_names;
1435  eth_queue_stats_mapping_set_t queue_stats_mapping_set;
1437  eth_dev_infos_get_t dev_infos_get;
1438  eth_dev_supported_ptypes_get_t dev_supported_ptypes_get;
1440  mtu_set_t mtu_set;
1441  vlan_filter_set_t vlan_filter_set;
1442  vlan_tpid_set_t vlan_tpid_set;
1443  vlan_strip_queue_set_t vlan_strip_queue_set;
1444  vlan_offload_set_t vlan_offload_set;
1445  vlan_pvid_set_t vlan_pvid_set;
1446  eth_queue_start_t rx_queue_start;
1447  eth_queue_stop_t rx_queue_stop;
1448  eth_queue_start_t tx_queue_start;
1449  eth_queue_stop_t tx_queue_stop;
1450  eth_rx_queue_setup_t rx_queue_setup;
1451  eth_queue_release_t rx_queue_release;
1452  eth_rx_queue_count_t rx_queue_count;
1453  eth_rx_descriptor_done_t rx_descriptor_done;
1455  eth_rx_enable_intr_t rx_queue_intr_enable;
1457  eth_rx_disable_intr_t rx_queue_intr_disable;
1458  eth_tx_queue_setup_t tx_queue_setup;
1459  eth_queue_release_t tx_queue_release;
1460  eth_dev_led_on_t dev_led_on;
1461  eth_dev_led_off_t dev_led_off;
1462  flow_ctrl_get_t flow_ctrl_get;
1463  flow_ctrl_set_t flow_ctrl_set;
1464  priority_flow_ctrl_set_t priority_flow_ctrl_set;
1465  eth_mac_addr_remove_t mac_addr_remove;
1466  eth_mac_addr_add_t mac_addr_add;
1467  eth_mac_addr_set_t mac_addr_set;
1468  eth_uc_hash_table_set_t uc_hash_table_set;
1469  eth_uc_all_hash_table_set_t uc_all_hash_table_set;
1470  eth_mirror_rule_set_t mirror_rule_set;
1471  eth_mirror_rule_reset_t mirror_rule_reset;
1472  eth_set_vf_rx_mode_t set_vf_rx_mode;
1473  eth_set_vf_rx_t set_vf_rx;
1474  eth_set_vf_tx_t set_vf_tx;
1475  eth_set_vf_vlan_filter_t set_vf_vlan_filter;
1477  eth_udp_tunnel_port_add_t udp_tunnel_port_add;
1479  eth_udp_tunnel_port_del_t udp_tunnel_port_del;
1480  eth_set_queue_rate_limit_t set_queue_rate_limit;
1481  eth_set_vf_rate_limit_t set_vf_rate_limit;
1483  reta_update_t reta_update;
1485  reta_query_t reta_query;
1486 
1487  eth_get_reg_t get_reg;
1489  eth_get_eeprom_length_t get_eeprom_length;
1491  eth_get_eeprom_t get_eeprom;
1493  eth_set_eeprom_t set_eeprom;
1495  /* bypass control */
1496 #ifdef RTE_NIC_BYPASS
1497  bypass_init_t bypass_init;
1498  bypass_state_set_t bypass_state_set;
1499  bypass_state_show_t bypass_state_show;
1500  bypass_event_set_t bypass_event_set;
1501  bypass_event_show_t bypass_event_show;
1502  bypass_wd_timeout_set_t bypass_wd_timeout_set;
1503  bypass_wd_timeout_show_t bypass_wd_timeout_show;
1504  bypass_ver_show_t bypass_ver_show;
1505  bypass_wd_reset_t bypass_wd_reset;
1506 #endif
1507 
1509  rss_hash_update_t rss_hash_update;
1511  rss_hash_conf_get_t rss_hash_conf_get;
1512  eth_filter_ctrl_t filter_ctrl;
1514  eth_set_mc_addr_list_t set_mc_addr_list;
1515  eth_rxq_info_get_t rxq_info_get;
1517  eth_txq_info_get_t txq_info_get;
1520  eth_timesync_enable_t timesync_enable;
1522  eth_timesync_disable_t timesync_disable;
1524  eth_timesync_read_rx_timestamp_t timesync_read_rx_timestamp;
1526  eth_timesync_read_tx_timestamp_t timesync_read_tx_timestamp;
1527 
1529  eth_get_dcb_info get_dcb_info;
1531  eth_timesync_adjust_time timesync_adjust_time;
1533  eth_timesync_read_time timesync_read_time;
1535  eth_timesync_write_time timesync_write_time;
1537  eth_l2_tunnel_eth_type_conf_t l2_tunnel_eth_type_conf;
1539  eth_l2_tunnel_offload_set_t l2_tunnel_offload_set;
1540 };
1541 
1564 typedef uint16_t (*rte_rx_callback_fn)(uint8_t port, uint16_t queue,
1565  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1566  void *user_param);
1567 
1588 typedef uint16_t (*rte_tx_callback_fn)(uint8_t port, uint16_t queue,
1589  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1590 
1596 struct rte_eth_rxtx_callback {
1597  struct rte_eth_rxtx_callback *next;
1598  union{
1599  rte_rx_callback_fn rx;
1600  rte_tx_callback_fn tx;
1601  } fn;
1602  void *param;
1603 };
1604 
1614 };
1615 
1626 struct rte_eth_dev {
1627  eth_rx_burst_t rx_pkt_burst;
1628  eth_tx_burst_t tx_pkt_burst;
1629  struct rte_eth_dev_data *data;
1630  const struct eth_driver *driver;
1631  const struct eth_dev_ops *dev_ops;
1632  struct rte_pci_device *pci_dev;
1634  struct rte_eth_dev_cb_list link_intr_cbs;
1639  struct rte_eth_rxtx_callback *post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
1644  struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
1645  uint8_t attached;
1646  enum rte_eth_dev_type dev_type;
1648 
1649 struct rte_eth_dev_sriov {
1650  uint8_t active;
1651  uint8_t nb_q_per_pool;
1652  uint16_t def_vmdq_idx;
1653  uint16_t def_pool_q_idx;
1654 };
1655 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1656 
1657 #define RTE_ETH_NAME_MAX_LEN (32)
1658 
1666 struct rte_eth_dev_data {
1667  char name[RTE_ETH_NAME_MAX_LEN];
1669  void **rx_queues;
1670  void **tx_queues;
1671  uint16_t nb_rx_queues;
1672  uint16_t nb_tx_queues;
1674  struct rte_eth_dev_sriov sriov;
1676  void *dev_private;
1678  struct rte_eth_link dev_link;
1681  struct rte_eth_conf dev_conf;
1682  uint16_t mtu;
1684  uint32_t min_rx_buf_size;
1687  uint64_t rx_mbuf_alloc_failed;
1688  struct ether_addr* mac_addrs;
1689  uint64_t mac_pool_sel[ETH_NUM_RECEIVE_MAC_ADDR];
1691  struct ether_addr* hash_mac_addrs;
1693  uint8_t port_id;
1694  uint8_t promiscuous : 1,
1695  scattered_rx : 1,
1696  all_multicast : 1,
1697  dev_started : 1,
1698  lro : 1;
1699  uint8_t rx_queue_state[RTE_MAX_QUEUES_PER_PORT];
1701  uint8_t tx_queue_state[RTE_MAX_QUEUES_PER_PORT];
1703  uint32_t dev_flags;
1704  enum rte_kernel_driver kdrv;
1705  int numa_node;
1706  const char *drv_name;
1707 };
1708 
1710 #define RTE_ETH_DEV_DETACHABLE 0x0001
1711 
1712 #define RTE_ETH_DEV_INTR_LSC 0x0002
1713 
1714 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1715 
1721 extern struct rte_eth_dev rte_eth_devices[];
1722 
1736 uint8_t rte_eth_dev_count(void);
1737 
1747 struct rte_eth_dev *rte_eth_dev_allocated(const char *name);
1748 
1759 struct rte_eth_dev *rte_eth_dev_allocate(const char *name,
1760  enum rte_eth_dev_type type);
1761 
1771 int rte_eth_dev_release_port(struct rte_eth_dev *eth_dev);
1772 
1785 int rte_eth_dev_attach(const char *devargs, uint8_t *port_id);
1786 
1799 int rte_eth_dev_detach(uint8_t port_id, char *devname);
1800 
1801 struct eth_driver;
1832 typedef int (*eth_dev_init_t)(struct rte_eth_dev *eth_dev);
1833 
1849 typedef int (*eth_dev_uninit_t)(struct rte_eth_dev *eth_dev);
1850 
1866 struct eth_driver {
1867  struct rte_pci_driver pci_drv;
1868  eth_dev_init_t eth_dev_init;
1869  eth_dev_uninit_t eth_dev_uninit;
1870  unsigned int dev_private_size;
1871 };
1872 
1883 void rte_eth_driver_register(struct eth_driver *eth_drv);
1884 
1896 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
1897 
1927 int rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_queue,
1928  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
1929 
1969 int rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1970  uint16_t nb_rx_desc, unsigned int socket_id,
1971  const struct rte_eth_rxconf *rx_conf,
1972  struct rte_mempool *mb_pool);
1973 
2017 int rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
2018  uint16_t nb_tx_desc, unsigned int socket_id,
2019  const struct rte_eth_txconf *tx_conf);
2020 
2031 int rte_eth_dev_socket_id(uint8_t port_id);
2032 
2042 int rte_eth_dev_is_valid_port(uint8_t port_id);
2043 
2059 int rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id);
2060 
2075 int rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id);
2076 
2092 int rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id);
2093 
2108 int rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id);
2109 
2110 
2111 
2127 int rte_eth_dev_start(uint8_t port_id);
2128 
2136 void rte_eth_dev_stop(uint8_t port_id);
2137 
2138 
2151 int rte_eth_dev_set_link_up(uint8_t port_id);
2152 
2162 int rte_eth_dev_set_link_down(uint8_t port_id);
2163 
2172 void rte_eth_dev_close(uint8_t port_id);
2173 
2180 void rte_eth_promiscuous_enable(uint8_t port_id);
2181 
2188 void rte_eth_promiscuous_disable(uint8_t port_id);
2189 
2200 int rte_eth_promiscuous_get(uint8_t port_id);
2201 
2208 void rte_eth_allmulticast_enable(uint8_t port_id);
2209 
2216 void rte_eth_allmulticast_disable(uint8_t port_id);
2217 
2228 int rte_eth_allmulticast_get(uint8_t port_id);
2229 
2241 void rte_eth_link_get(uint8_t port_id, struct rte_eth_link *link);
2242 
2254 void rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *link);
2255 
2273 int rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats);
2274 
2281 void rte_eth_stats_reset(uint8_t port_id);
2282 
2302 int rte_eth_xstats_get_names(uint8_t port_id,
2303  struct rte_eth_xstat_name *xstats_names,
2304  unsigned size);
2305 
2327 int rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
2328  unsigned n);
2329 
2336 void rte_eth_xstats_reset(uint8_t port_id);
2337 
2355 int rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id,
2356  uint16_t tx_queue_id, uint8_t stat_idx);
2357 
2375 int rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id,
2376  uint16_t rx_queue_id,
2377  uint8_t stat_idx);
2378 
2388 void rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr);
2389 
2399 void rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info);
2400 
2439 int rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
2440  uint32_t *ptypes, int num);
2441 
2453 int rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu);
2454 
2469 int rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu);
2470 
2489 int rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on);
2490 
2510 int rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id,
2511  int on);
2512 
2529 int rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
2530  enum rte_vlan_type vlan_type,
2531  uint16_t tag_type);
2532 
2553 int rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask);
2554 
2567 int rte_eth_dev_get_vlan_offload(uint8_t port_id);
2568 
2583 int rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on);
2584 
2667 static inline uint16_t
2668 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2669  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
2670 {
2671  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2672 
2673 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2674  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
2675  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
2676 
2677  if (queue_id >= dev->data->nb_rx_queues) {
2678  RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2679  return 0;
2680  }
2681 #endif
2682  int16_t nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2683  rx_pkts, nb_pkts);
2684 
2685 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
2686  struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
2687 
2688  if (unlikely(cb != NULL)) {
2689  do {
2690  nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
2691  nb_pkts, cb->param);
2692  cb = cb->next;
2693  } while (cb != NULL);
2694  }
2695 #endif
2696 
2697  return nb_rx;
2698 }
2699 
2712 static inline int
2713 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2714 {
2715  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2716  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2717  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
2718  return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2719 }
2720 
2736 static inline int
2737 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2738 {
2739  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2740  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2741  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2742  return (*dev->dev_ops->rx_descriptor_done)( \
2743  dev->data->rx_queues[queue_id], offset);
2744 }
2745 
2805 static inline uint16_t
2806 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2807  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2808 {
2809  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2810 
2811 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2812  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
2813  RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
2814 
2815  if (queue_id >= dev->data->nb_tx_queues) {
2816  RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2817  return 0;
2818  }
2819 #endif
2820 
2821 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
2822  struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
2823 
2824  if (unlikely(cb != NULL)) {
2825  do {
2826  nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
2827  cb->param);
2828  cb = cb->next;
2829  } while (cb != NULL);
2830  }
2831 #endif
2832 
2833  return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
2834 }
2835 
2836 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
2837  void *userdata);
2838 
2844  buffer_tx_error_fn error_callback;
2845  void *error_userdata;
2846  uint16_t size;
2847  uint16_t length;
2848  struct rte_mbuf *pkts[];
2850 };
2851 
2858 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
2859  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
2860 
2871 int
2872 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
2873 
2896 static inline uint16_t
2897 rte_eth_tx_buffer_flush(uint8_t port_id, uint16_t queue_id,
2898  struct rte_eth_dev_tx_buffer *buffer)
2899 {
2900  uint16_t sent;
2901  uint16_t to_send = buffer->length;
2902 
2903  if (to_send == 0)
2904  return 0;
2905 
2906  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
2907 
2908  buffer->length = 0;
2909 
2910  /* All packets sent, or to be dealt with by callback below */
2911  if (unlikely(sent != to_send))
2912  buffer->error_callback(&buffer->pkts[sent], to_send - sent,
2913  buffer->error_userdata);
2914 
2915  return sent;
2916 }
2917 
2948 static inline uint16_t __attribute__((always_inline))
2949 rte_eth_tx_buffer(uint8_t port_id, uint16_t queue_id,
2950  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
2951 {
2952  buffer->pkts[buffer->length++] = tx_pkt;
2953  if (buffer->length < buffer->size)
2954  return 0;
2955 
2956  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
2957 }
2958 
2983 int
2985  buffer_tx_error_fn callback, void *userdata);
2986 
3009 void
3010 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3011  void *userdata);
3012 
3036 void
3037 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3038  void *userdata);
3039 
3051 };
3052 
3053 typedef void (*rte_eth_dev_cb_fn)(uint8_t port_id, \
3054  enum rte_eth_event_type event, void *cb_arg);
3075 int rte_eth_dev_callback_register(uint8_t port_id,
3076  enum rte_eth_event_type event,
3077  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3078 
3096 int rte_eth_dev_callback_unregister(uint8_t port_id,
3097  enum rte_eth_event_type event,
3098  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3099 
3113 void _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3114  enum rte_eth_event_type event);
3115 
3136 int rte_eth_dev_rx_intr_enable(uint8_t port_id, uint16_t queue_id);
3137 
3157 int rte_eth_dev_rx_intr_disable(uint8_t port_id, uint16_t queue_id);
3158 
3176 int rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data);
3177 
3199 int rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
3200  int epfd, int op, void *data);
3201 
3214 int rte_eth_led_on(uint8_t port_id);
3215 
3228 int rte_eth_led_off(uint8_t port_id);
3229 
3242 int rte_eth_dev_flow_ctrl_get(uint8_t port_id,
3243  struct rte_eth_fc_conf *fc_conf);
3244 
3259 int rte_eth_dev_flow_ctrl_set(uint8_t port_id,
3260  struct rte_eth_fc_conf *fc_conf);
3261 
3277 int rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id,
3278  struct rte_eth_pfc_conf *pfc_conf);
3279 
3298 int rte_eth_dev_mac_addr_add(uint8_t port, struct ether_addr *mac_addr,
3299  uint32_t pool);
3300 
3314 int rte_eth_dev_mac_addr_remove(uint8_t port, struct ether_addr *mac_addr);
3315 
3329 int rte_eth_dev_default_mac_addr_set(uint8_t port, struct ether_addr *mac_addr);
3330 
3331 
3347 int rte_eth_dev_rss_reta_update(uint8_t port,
3348  struct rte_eth_rss_reta_entry64 *reta_conf,
3349  uint16_t reta_size);
3350 
3366 int rte_eth_dev_rss_reta_query(uint8_t port,
3367  struct rte_eth_rss_reta_entry64 *reta_conf,
3368  uint16_t reta_size);
3369 
3388 int rte_eth_dev_uc_hash_table_set(uint8_t port,struct ether_addr *addr,
3389  uint8_t on);
3390 
3408 int rte_eth_dev_uc_all_hash_table_set(uint8_t port,uint8_t on);
3409 
3432 int rte_eth_dev_set_vf_rxmode(uint8_t port, uint16_t vf, uint16_t rx_mode,
3433  uint8_t on);
3434 
3451 int
3452 rte_eth_dev_set_vf_tx(uint8_t port,uint16_t vf, uint8_t on);
3453 
3470 int
3471 rte_eth_dev_set_vf_rx(uint8_t port,uint16_t vf, uint8_t on);
3472 
3492 int
3493 rte_eth_dev_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
3494  uint64_t vf_mask,
3495  uint8_t vlan_on);
3496 
3518 int rte_eth_mirror_rule_set(uint8_t port_id,
3519  struct rte_eth_mirror_conf *mirror_conf,
3520  uint8_t rule_id,
3521  uint8_t on);
3522 
3536 int rte_eth_mirror_rule_reset(uint8_t port_id,
3537  uint8_t rule_id);
3538 
3554 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
3555  uint16_t tx_rate);
3556 
3574 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf,
3575  uint16_t tx_rate, uint64_t q_msk);
3576 
3588 int rte_eth_dev_bypass_init(uint8_t port);
3589 
3605 int rte_eth_dev_bypass_state_show(uint8_t port, uint32_t *state);
3606 
3622 int rte_eth_dev_bypass_state_set(uint8_t port, uint32_t *new_state);
3623 
3646 int rte_eth_dev_bypass_event_show(uint8_t port, uint32_t event, uint32_t *state);
3647 
3670 int rte_eth_dev_bypass_event_store(uint8_t port, uint32_t event, uint32_t state);
3671 
3692 int rte_eth_dev_wd_timeout_store(uint8_t port, uint32_t timeout);
3693 
3706 int rte_eth_dev_bypass_ver_show(uint8_t port, uint32_t *ver);
3707 
3728 int rte_eth_dev_bypass_wd_timeout_show(uint8_t port, uint32_t *wd_timeout);
3729 
3740 int rte_eth_dev_bypass_wd_reset(uint8_t port);
3741 
3755 int rte_eth_dev_rss_hash_update(uint8_t port_id,
3756  struct rte_eth_rss_conf *rss_conf);
3757 
3771 int
3772 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
3773  struct rte_eth_rss_conf *rss_conf);
3774 
3792 int
3793 rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
3794  struct rte_eth_udp_tunnel *tunnel_udp);
3795 
3814 int
3815 rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
3816  struct rte_eth_udp_tunnel *tunnel_udp);
3817 
3831 int rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type);
3832 
3851 int rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
3852  enum rte_filter_op filter_op, void *arg);
3853 
3866 int rte_eth_dev_get_dcb_info(uint8_t port_id,
3867  struct rte_eth_dcb_info *dcb_info);
3868 
3893 void *rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
3894  rte_rx_callback_fn fn, void *user_param);
3895 
3896 /*
3897 * Add a callback that must be called first on packet RX on a given port
3898 * and queue.
3899 *
3900 * This API configures a first function to be called for each burst of
3901 * packets received on a given NIC port queue. The return value is a pointer
3902 * that can be used to later remove the callback using
3903 * rte_eth_remove_rx_callback().
3904 *
3905 * Multiple functions are called in the order that they are added.
3906 *
3907 * @param port_id
3908 * The port identifier of the Ethernet device.
3909 * @param queue_id
3910 * The queue on the Ethernet device on which the callback is to be added.
3911 * @param fn
3912 * The callback function
3913 * @param user_param
3914 * A generic pointer parameter which will be passed to each invocation of the
3915 * callback function on this port and queue.
3916 *
3917 * @return
3918 * NULL on error.
3919 * On success, a pointer value which can later be used to remove the callback.
3920 */
3921 void *rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
3922  rte_rx_callback_fn fn, void *user_param);
3923 
3948 void *rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
3949  rte_tx_callback_fn fn, void *user_param);
3950 
3981 int rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
3982  struct rte_eth_rxtx_callback *user_cb);
3983 
4014 int rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
4015  struct rte_eth_rxtx_callback *user_cb);
4016 
4034 int rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
4035  struct rte_eth_rxq_info *qinfo);
4036 
4054 int rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
4055  struct rte_eth_txq_info *qinfo);
4056 
4073 int rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info);
4074 
4086 int rte_eth_dev_get_eeprom_length(uint8_t port_id);
4087 
4102 int rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info);
4103 
4118 int rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info);
4119 
4137 int rte_eth_dev_set_mc_addr_list(uint8_t port_id,
4138  struct ether_addr *mc_addr_set,
4139  uint32_t nb_mc_addr);
4140 
4152 int rte_eth_timesync_enable(uint8_t port_id);
4153 
4165 int rte_eth_timesync_disable(uint8_t port_id);
4166 
4184 int rte_eth_timesync_read_rx_timestamp(uint8_t port_id,
4185  struct timespec *timestamp, uint32_t flags);
4186 
4201 int rte_eth_timesync_read_tx_timestamp(uint8_t port_id,
4202  struct timespec *timestamp);
4203 
4220 int rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta);
4221 
4236 int rte_eth_timesync_read_time(uint8_t port_id, struct timespec *time);
4237 
4255 int rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *time);
4256 
4268 void rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev,
4269  struct rte_pci_device *pci_dev);
4270 
4290 const struct rte_memzone *
4291 rte_eth_dma_zone_reserve(const struct rte_eth_dev *eth_dev, const char *name,
4292  uint16_t queue_id, size_t size,
4293  unsigned align, int socket_id);
4294 
4309 int
4310 rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
4311  struct rte_eth_l2_tunnel_conf *l2_tunnel);
4312 
4336 int
4337 rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
4338  struct rte_eth_l2_tunnel_conf *l2_tunnel,
4339  uint32_t mask,
4340  uint8_t en);
4341 
4354 int
4355 rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id);
4356 
4368 int
4369 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name);
4370 
4371 #ifdef __cplusplus
4372 }
4373 #endif
4374 
4375 #endif /* _RTE_ETHDEV_H_ */