DPDK  18.05.1
rte_ethdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
140 #ifdef __cplusplus
141 extern "C" {
142 #endif
143 
144 #include <stdint.h>
145 
146 /* Use this macro to check if LRO API is supported */
147 #define RTE_ETHDEV_HAS_LRO_SUPPORT
148 
149 #include <rte_compat.h>
150 #include <rte_log.h>
151 #include <rte_interrupts.h>
152 #include <rte_dev.h>
153 #include <rte_devargs.h>
154 #include <rte_errno.h>
155 #include <rte_common.h>
156 #include <rte_config.h>
157 
158 #include "rte_ether.h"
159 #include "rte_eth_ctrl.h"
160 #include "rte_dev_info.h"
161 
162 struct rte_mbuf;
163 
171  uint64_t ipackets;
172  uint64_t opackets;
173  uint64_t ibytes;
174  uint64_t obytes;
175  uint64_t imissed;
179  uint64_t ierrors;
180  uint64_t oerrors;
181  uint64_t rx_nombuf;
182  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
184  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
186  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
188  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
190  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
192 };
193 
197 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
198 #define ETH_LINK_SPEED_FIXED (1 << 0)
199 #define ETH_LINK_SPEED_10M_HD (1 << 1)
200 #define ETH_LINK_SPEED_10M (1 << 2)
201 #define ETH_LINK_SPEED_100M_HD (1 << 3)
202 #define ETH_LINK_SPEED_100M (1 << 4)
203 #define ETH_LINK_SPEED_1G (1 << 5)
204 #define ETH_LINK_SPEED_2_5G (1 << 6)
205 #define ETH_LINK_SPEED_5G (1 << 7)
206 #define ETH_LINK_SPEED_10G (1 << 8)
207 #define ETH_LINK_SPEED_20G (1 << 9)
208 #define ETH_LINK_SPEED_25G (1 << 10)
209 #define ETH_LINK_SPEED_40G (1 << 11)
210 #define ETH_LINK_SPEED_50G (1 << 12)
211 #define ETH_LINK_SPEED_56G (1 << 13)
212 #define ETH_LINK_SPEED_100G (1 << 14)
217 #define ETH_SPEED_NUM_NONE 0
218 #define ETH_SPEED_NUM_10M 10
219 #define ETH_SPEED_NUM_100M 100
220 #define ETH_SPEED_NUM_1G 1000
221 #define ETH_SPEED_NUM_2_5G 2500
222 #define ETH_SPEED_NUM_5G 5000
223 #define ETH_SPEED_NUM_10G 10000
224 #define ETH_SPEED_NUM_20G 20000
225 #define ETH_SPEED_NUM_25G 25000
226 #define ETH_SPEED_NUM_40G 40000
227 #define ETH_SPEED_NUM_50G 50000
228 #define ETH_SPEED_NUM_56G 56000
229 #define ETH_SPEED_NUM_100G 100000
234 __extension__
235 struct rte_eth_link {
236  uint32_t link_speed;
237  uint16_t link_duplex : 1;
238  uint16_t link_autoneg : 1;
239  uint16_t link_status : 1;
240 } __attribute__((aligned(8)));
242 /* Utility constants */
243 #define ETH_LINK_HALF_DUPLEX 0
244 #define ETH_LINK_FULL_DUPLEX 1
245 #define ETH_LINK_DOWN 0
246 #define ETH_LINK_UP 1
247 #define ETH_LINK_FIXED 0
248 #define ETH_LINK_AUTONEG 1
254 struct rte_eth_thresh {
255  uint8_t pthresh;
256  uint8_t hthresh;
257  uint8_t wthresh;
258 };
259 
263 #define ETH_MQ_RX_RSS_FLAG 0x1
264 #define ETH_MQ_RX_DCB_FLAG 0x2
265 #define ETH_MQ_RX_VMDQ_FLAG 0x4
266 
274 
278  ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
280  ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
281 
283  ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
285  ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
287  ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
290  ETH_MQ_RX_VMDQ_FLAG,
291 };
292 
296 #define ETH_RSS ETH_MQ_RX_RSS
297 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
298 #define ETH_DCB_RX ETH_MQ_RX_DCB
299 
309 };
310 
314 #define ETH_DCB_NONE ETH_MQ_TX_NONE
315 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
316 #define ETH_DCB_TX ETH_MQ_TX_DCB
317 
324  uint32_t max_rx_pkt_len;
325  uint16_t split_hdr_size;
331  uint64_t offloads;
332  __extension__
338  uint16_t header_split : 1,
339  hw_ip_checksum : 1,
340  hw_vlan_filter : 1,
341  hw_vlan_strip : 1,
342  hw_vlan_extend : 1,
343  jumbo_frame : 1,
344  hw_strip_crc : 1,
345  enable_scatter : 1,
346  enable_lro : 1,
347  hw_timestamp : 1,
348  security : 1,
359 };
360 
366  ETH_VLAN_TYPE_UNKNOWN = 0,
369  ETH_VLAN_TYPE_MAX,
370 };
371 
377  uint64_t ids[64];
378 };
379 
398  uint8_t *rss_key;
399  uint8_t rss_key_len;
400  uint64_t rss_hf;
401 };
402 
403 /*
404  * The RSS offload types are defined based on flow types which are defined
405  * in rte_eth_ctrl.h. Different NIC hardwares may support different RSS offload
406  * types. The supported flow types or RSS offload types can be queried by
407  * rte_eth_dev_info_get().
408  */
409 #define ETH_RSS_IPV4 (1ULL << RTE_ETH_FLOW_IPV4)
410 #define ETH_RSS_FRAG_IPV4 (1ULL << RTE_ETH_FLOW_FRAG_IPV4)
411 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
412 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP)
413 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP)
414 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER)
415 #define ETH_RSS_IPV6 (1ULL << RTE_ETH_FLOW_IPV6)
416 #define ETH_RSS_FRAG_IPV6 (1ULL << RTE_ETH_FLOW_FRAG_IPV6)
417 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP)
418 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
419 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP)
420 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)
421 #define ETH_RSS_L2_PAYLOAD (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)
422 #define ETH_RSS_IPV6_EX (1ULL << RTE_ETH_FLOW_IPV6_EX)
423 #define ETH_RSS_IPV6_TCP_EX (1ULL << RTE_ETH_FLOW_IPV6_TCP_EX)
424 #define ETH_RSS_IPV6_UDP_EX (1ULL << RTE_ETH_FLOW_IPV6_UDP_EX)
425 #define ETH_RSS_PORT (1ULL << RTE_ETH_FLOW_PORT)
426 #define ETH_RSS_VXLAN (1ULL << RTE_ETH_FLOW_VXLAN)
427 #define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE)
428 #define ETH_RSS_NVGRE (1ULL << RTE_ETH_FLOW_NVGRE)
429 
430 #define ETH_RSS_IP ( \
431  ETH_RSS_IPV4 | \
432  ETH_RSS_FRAG_IPV4 | \
433  ETH_RSS_NONFRAG_IPV4_OTHER | \
434  ETH_RSS_IPV6 | \
435  ETH_RSS_FRAG_IPV6 | \
436  ETH_RSS_NONFRAG_IPV6_OTHER | \
437  ETH_RSS_IPV6_EX)
438 
439 #define ETH_RSS_UDP ( \
440  ETH_RSS_NONFRAG_IPV4_UDP | \
441  ETH_RSS_NONFRAG_IPV6_UDP | \
442  ETH_RSS_IPV6_UDP_EX)
443 
444 #define ETH_RSS_TCP ( \
445  ETH_RSS_NONFRAG_IPV4_TCP | \
446  ETH_RSS_NONFRAG_IPV6_TCP | \
447  ETH_RSS_IPV6_TCP_EX)
448 
449 #define ETH_RSS_SCTP ( \
450  ETH_RSS_NONFRAG_IPV4_SCTP | \
451  ETH_RSS_NONFRAG_IPV6_SCTP)
452 
453 #define ETH_RSS_TUNNEL ( \
454  ETH_RSS_VXLAN | \
455  ETH_RSS_GENEVE | \
456  ETH_RSS_NVGRE)
457 
459 #define ETH_RSS_PROTO_MASK ( \
460  ETH_RSS_IPV4 | \
461  ETH_RSS_FRAG_IPV4 | \
462  ETH_RSS_NONFRAG_IPV4_TCP | \
463  ETH_RSS_NONFRAG_IPV4_UDP | \
464  ETH_RSS_NONFRAG_IPV4_SCTP | \
465  ETH_RSS_NONFRAG_IPV4_OTHER | \
466  ETH_RSS_IPV6 | \
467  ETH_RSS_FRAG_IPV6 | \
468  ETH_RSS_NONFRAG_IPV6_TCP | \
469  ETH_RSS_NONFRAG_IPV6_UDP | \
470  ETH_RSS_NONFRAG_IPV6_SCTP | \
471  ETH_RSS_NONFRAG_IPV6_OTHER | \
472  ETH_RSS_L2_PAYLOAD | \
473  ETH_RSS_IPV6_EX | \
474  ETH_RSS_IPV6_TCP_EX | \
475  ETH_RSS_IPV6_UDP_EX | \
476  ETH_RSS_PORT | \
477  ETH_RSS_VXLAN | \
478  ETH_RSS_GENEVE | \
479  ETH_RSS_NVGRE)
480 
481 /*
482  * Definitions used for redirection table entry size.
483  * Some RSS RETA sizes may not be supported by some drivers, check the
484  * documentation or the description of relevant functions for more details.
485  */
486 #define ETH_RSS_RETA_SIZE_64 64
487 #define ETH_RSS_RETA_SIZE_128 128
488 #define ETH_RSS_RETA_SIZE_256 256
489 #define ETH_RSS_RETA_SIZE_512 512
490 #define RTE_RETA_GROUP_SIZE 64
491 
492 /* Definitions used for VMDQ and DCB functionality */
493 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
494 #define ETH_DCB_NUM_USER_PRIORITIES 8
495 #define ETH_VMDQ_DCB_NUM_QUEUES 128
496 #define ETH_DCB_NUM_QUEUES 128
498 /* DCB capability defines */
499 #define ETH_DCB_PG_SUPPORT 0x00000001
500 #define ETH_DCB_PFC_SUPPORT 0x00000002
502 /* Definitions used for VLAN Offload functionality */
503 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
504 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
505 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
507 /* Definitions used for mask VLAN setting */
508 #define ETH_VLAN_STRIP_MASK 0x0001
509 #define ETH_VLAN_FILTER_MASK 0x0002
510 #define ETH_VLAN_EXTEND_MASK 0x0004
511 #define ETH_VLAN_ID_MAX 0x0FFF
513 /* Definitions used for receive MAC address */
514 #define ETH_NUM_RECEIVE_MAC_ADDR 128
516 /* Definitions used for unicast hash */
517 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
519 /* Definitions used for VMDQ pool rx mode setting */
520 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
521 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
522 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
523 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
524 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
527 #define ETH_MIRROR_MAX_VLANS 64
528 
529 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
530 #define ETH_MIRROR_UPLINK_PORT 0x02
531 #define ETH_MIRROR_DOWNLINK_PORT 0x04
532 #define ETH_MIRROR_VLAN 0x08
533 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
538 struct rte_eth_vlan_mirror {
539  uint64_t vlan_mask;
541  uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
542 };
543 
548  uint8_t rule_type;
549  uint8_t dst_pool;
550  uint64_t pool_mask;
553 };
554 
562  uint64_t mask;
564  uint16_t reta[RTE_RETA_GROUP_SIZE];
566 };
567 
573  ETH_4_TCS = 4,
575 };
576 
586 };
587 
588 /* This structure may be extended in future. */
589 struct rte_eth_dcb_rx_conf {
590  enum rte_eth_nb_tcs nb_tcs;
592  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
593 };
594 
595 struct rte_eth_vmdq_dcb_tx_conf {
596  enum rte_eth_nb_pools nb_queue_pools;
598  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
599 };
600 
601 struct rte_eth_dcb_tx_conf {
602  enum rte_eth_nb_tcs nb_tcs;
604  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
605 };
606 
607 struct rte_eth_vmdq_tx_conf {
608  enum rte_eth_nb_pools nb_queue_pools;
609 };
610 
625  uint8_t default_pool;
626  uint8_t nb_pool_maps;
627  struct {
628  uint16_t vlan_id;
629  uint64_t pools;
633 };
634 
656  uint8_t default_pool;
658  uint8_t nb_pool_maps;
659  uint32_t rx_mode;
660  struct {
661  uint16_t vlan_id;
662  uint64_t pools;
664 };
665 
676  uint64_t offloads;
677 
678  /* For i40e specifically */
679  uint16_t pvid;
680  __extension__
681  uint8_t hw_vlan_reject_tagged : 1,
687 };
688 
694  uint16_t rx_free_thresh;
695  uint8_t rx_drop_en;
702  uint64_t offloads;
703 };
704 
705 #define ETH_TXQ_FLAGS_NOMULTSEGS 0x0001
706 #define ETH_TXQ_FLAGS_NOREFCOUNT 0x0002
707 #define ETH_TXQ_FLAGS_NOMULTMEMP 0x0004
708 #define ETH_TXQ_FLAGS_NOVLANOFFL 0x0100
709 #define ETH_TXQ_FLAGS_NOXSUMSCTP 0x0200
710 #define ETH_TXQ_FLAGS_NOXSUMUDP 0x0400
711 #define ETH_TXQ_FLAGS_NOXSUMTCP 0x0800
712 #define ETH_TXQ_FLAGS_NOOFFLOADS \
713  (ETH_TXQ_FLAGS_NOVLANOFFL | ETH_TXQ_FLAGS_NOXSUMSCTP | \
714  ETH_TXQ_FLAGS_NOXSUMUDP | ETH_TXQ_FLAGS_NOXSUMTCP)
715 #define ETH_TXQ_FLAGS_NOXSUMS \
716  (ETH_TXQ_FLAGS_NOXSUMSCTP | ETH_TXQ_FLAGS_NOXSUMUDP | \
717  ETH_TXQ_FLAGS_NOXSUMTCP)
718 
725 #define ETH_TXQ_FLAGS_IGNORE 0x8000
726 
732  uint16_t tx_rs_thresh;
733  uint16_t tx_free_thresh;
736  uint32_t txq_flags;
743  uint64_t offloads;
744 };
745 
750  uint16_t nb_max;
751  uint16_t nb_min;
752  uint16_t nb_align;
762  uint16_t nb_seg_max;
763 
775  uint16_t nb_mtu_seg_max;
776 };
777 
786 };
787 
794  uint32_t high_water;
795  uint32_t low_water;
796  uint16_t pause_time;
797  uint16_t send_xon;
800  uint8_t autoneg;
801 };
802 
810  uint8_t priority;
811 };
812 
821 };
822 
830 };
831 
843  uint8_t drop_queue;
844  struct rte_eth_fdir_masks mask;
847 };
848 
857  uint16_t udp_port;
858  uint8_t prot_type;
859 };
860 
866  uint32_t lsc:1;
868  uint32_t rxq:1;
870  uint32_t rmv:1;
871 };
872 
878 struct rte_eth_conf {
879  uint32_t link_speeds;
888  uint32_t lpbk_mode;
893  struct {
897  struct rte_eth_dcb_rx_conf dcb_rx_conf;
901  } rx_adv_conf;
902  union {
903  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
905  struct rte_eth_dcb_tx_conf dcb_tx_conf;
907  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
909  } tx_adv_conf;
915 };
916 
926 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
927 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
928 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
929 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
930 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
931 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
932 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
933 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
934 #define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100
935 #define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200
936 #define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
937 #define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
938 #define DEV_RX_OFFLOAD_CRC_STRIP 0x00001000
939 #define DEV_RX_OFFLOAD_SCATTER 0x00002000
940 #define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000
941 #define DEV_RX_OFFLOAD_SECURITY 0x00008000
942 #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
943  DEV_RX_OFFLOAD_UDP_CKSUM | \
944  DEV_RX_OFFLOAD_TCP_CKSUM)
945 #define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
946  DEV_RX_OFFLOAD_VLAN_FILTER | \
947  DEV_RX_OFFLOAD_VLAN_EXTEND)
948 
949 /*
950  * If new Rx offload capabilities are defined, they also must be
951  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
952  */
953 
957 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
958 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
959 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
960 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
961 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
962 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
963 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
964 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
965 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
966 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200
967 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400
968 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800
969 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000
970 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
971 #define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000
972 
975 #define DEV_TX_OFFLOAD_MULTI_SEGS 0x00008000
976 
977 #define DEV_TX_OFFLOAD_MBUF_FAST_FREE 0x00010000
978 
982 #define DEV_TX_OFFLOAD_SECURITY 0x00020000
983 
988 #define DEV_TX_OFFLOAD_UDP_TNL_TSO 0x00040000
989 
994 #define DEV_TX_OFFLOAD_IP_TNL_TSO 0x00080000
995 
996 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001
997 
998 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002
999 
1001 /*
1002  * If new Tx offload capabilities are defined, they also must be
1003  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1004  */
1005 
1006 struct rte_pci_device;
1007 
1008 /*
1009  * Fallback default preferred Rx/Tx port parameters.
1010  * These are used if an application requests default parameters
1011  * but the PMD does not provide preferred values.
1012  */
1013 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1014 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1015 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1016 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1017 
1024  uint16_t burst_size;
1025  uint16_t ring_size;
1026  uint16_t nb_queues;
1027 };
1028 
1033 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (0)
1034 
1039  const char *name;
1040  uint16_t domain_id;
1041  uint16_t port_id;
1049 };
1050 
1055  struct rte_device *device;
1056  const char *driver_name;
1057  unsigned int if_index;
1059  const uint32_t *dev_flags;
1060  uint32_t min_rx_bufsize;
1061  uint32_t max_rx_pktlen;
1062  uint16_t max_rx_queues;
1063  uint16_t max_tx_queues;
1064  uint32_t max_mac_addrs;
1065  uint32_t max_hash_mac_addrs;
1067  uint16_t max_vfs;
1068  uint16_t max_vmdq_pools;
1069  uint64_t rx_offload_capa;
1071  uint64_t tx_offload_capa;
1073  uint64_t rx_queue_offload_capa;
1075  uint64_t tx_queue_offload_capa;
1077  uint16_t reta_size;
1079  uint8_t hash_key_size;
1084  uint16_t vmdq_queue_base;
1085  uint16_t vmdq_queue_num;
1086  uint16_t vmdq_pool_base;
1089  uint32_t speed_capa;
1091  uint16_t nb_rx_queues;
1092  uint16_t nb_tx_queues;
1098  uint64_t dev_capa;
1104 };
1105 
1111  struct rte_mempool *mp;
1113  uint8_t scattered_rx;
1114  uint16_t nb_desc;
1116 
1123  uint16_t nb_desc;
1125 
1127 #define RTE_ETH_XSTATS_NAME_SIZE 64
1128 
1139  uint64_t id;
1140  uint64_t value;
1141 };
1142 
1152 };
1153 
1154 #define ETH_DCB_NUM_TCS 8
1155 #define ETH_MAX_VMDQ_POOL 64
1156 
1163  struct {
1164  uint8_t base;
1165  uint8_t nb_queue;
1166  } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1168  struct {
1169  uint8_t base;
1170  uint8_t nb_queue;
1171  } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1172 };
1173 
1179  uint8_t nb_tcs;
1181  uint8_t tc_bws[ETH_DCB_NUM_TCS];
1184 };
1185 
1189 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1190 #define RTE_ETH_QUEUE_STATE_STARTED 1
1191 
1192 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
1193 
1194 /* Macros to check for valid port */
1195 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1196  if (!rte_eth_dev_is_valid_port(port_id)) { \
1197  RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
1198  return retval; \
1199  } \
1200 } while (0)
1201 
1202 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1203  if (!rte_eth_dev_is_valid_port(port_id)) { \
1204  RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
1205  return; \
1206  } \
1207 } while (0)
1208 
1214 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1215 
1216 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1217 
1218 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1219 
1220 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1221 
1244 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
1245  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1246  void *user_param);
1247 
1268 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
1269  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1270 
1283 };
1284 
1285 struct rte_eth_dev_sriov {
1286  uint8_t active;
1287  uint8_t nb_q_per_pool;
1288  uint16_t def_vmdq_idx;
1289  uint16_t def_pool_q_idx;
1290 };
1291 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1292 
1293 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
1294 
1295 #define RTE_ETH_DEV_NO_OWNER 0
1296 
1297 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
1298 
1299 struct rte_eth_dev_owner {
1300  uint64_t id;
1301  char name[RTE_ETH_MAX_OWNER_NAME_LEN];
1302 };
1303 
1305 #define RTE_ETH_DEV_INTR_LSC 0x0002
1306 
1307 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1308 
1309 #define RTE_ETH_DEV_INTR_RMV 0x0008
1310 
1311 #define RTE_ETH_DEV_REPRESENTOR 0x0010
1312 
1324 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
1325  const uint64_t owner_id);
1326 
1330 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
1331  for (p = rte_eth_find_next_owned_by(0, o); \
1332  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
1333  p = rte_eth_find_next_owned_by(p + 1, o))
1334 
1343 uint16_t rte_eth_find_next(uint16_t port_id);
1344 
1348 #define RTE_ETH_FOREACH_DEV(p) \
1349  RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
1350 
1351 
1365 int __rte_experimental rte_eth_dev_owner_new(uint64_t *owner_id);
1366 
1380 int __rte_experimental rte_eth_dev_owner_set(const uint16_t port_id,
1381  const struct rte_eth_dev_owner *owner);
1382 
1396 int __rte_experimental rte_eth_dev_owner_unset(const uint16_t port_id,
1397  const uint64_t owner_id);
1398 
1408 void __rte_experimental rte_eth_dev_owner_delete(const uint64_t owner_id);
1409 
1423 int __rte_experimental rte_eth_dev_owner_get(const uint16_t port_id,
1424  struct rte_eth_dev_owner *owner);
1425 
1438 __rte_deprecated
1439 uint16_t rte_eth_dev_count(void);
1440 
1451 uint16_t rte_eth_dev_count_avail(void);
1452 
1461 uint16_t __rte_experimental rte_eth_dev_count_total(void);
1462 
1475 int rte_eth_dev_attach(const char *devargs, uint16_t *port_id);
1476 
1490 int rte_eth_dev_detach(uint16_t port_id, char *devname);
1491 
1503 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
1504 
1516 const char * __rte_experimental rte_eth_dev_rx_offload_name(uint64_t offload);
1517 
1529 const char * __rte_experimental rte_eth_dev_tx_offload_name(uint64_t offload);
1530 
1568 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
1569  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
1570 
1582 int __rte_experimental
1583 rte_eth_dev_is_removed(uint16_t port_id);
1584 
1634 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1635  uint16_t nb_rx_desc, unsigned int socket_id,
1636  const struct rte_eth_rxconf *rx_conf,
1637  struct rte_mempool *mb_pool);
1638 
1693 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1694  uint16_t nb_tx_desc, unsigned int socket_id,
1695  const struct rte_eth_txconf *tx_conf);
1696 
1707 int rte_eth_dev_socket_id(uint16_t port_id);
1708 
1718 int rte_eth_dev_is_valid_port(uint16_t port_id);
1719 
1736 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
1737 
1753 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
1754 
1771 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
1772 
1788 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
1789 
1805 int rte_eth_dev_start(uint16_t port_id);
1806 
1814 void rte_eth_dev_stop(uint16_t port_id);
1815 
1828 int rte_eth_dev_set_link_up(uint16_t port_id);
1829 
1839 int rte_eth_dev_set_link_down(uint16_t port_id);
1840 
1849 void rte_eth_dev_close(uint16_t port_id);
1850 
1888 int rte_eth_dev_reset(uint16_t port_id);
1889 
1896 void rte_eth_promiscuous_enable(uint16_t port_id);
1897 
1904 void rte_eth_promiscuous_disable(uint16_t port_id);
1905 
1916 int rte_eth_promiscuous_get(uint16_t port_id);
1917 
1924 void rte_eth_allmulticast_enable(uint16_t port_id);
1925 
1932 void rte_eth_allmulticast_disable(uint16_t port_id);
1933 
1944 int rte_eth_allmulticast_get(uint16_t port_id);
1945 
1957 void rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
1958 
1970 void rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
1971 
1989 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
1990 
2001 int rte_eth_stats_reset(uint16_t port_id);
2002 
2023 int rte_eth_xstats_get_names(uint16_t port_id,
2024  struct rte_eth_xstat_name *xstats_names,
2025  unsigned int size);
2026 
2049 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2050  unsigned int n);
2051 
2074 int
2075 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2076  struct rte_eth_xstat_name *xstats_names, unsigned int size,
2077  uint64_t *ids);
2078 
2102 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2103  uint64_t *values, unsigned int size);
2104 
2123 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2124  uint64_t *id);
2125 
2132 void rte_eth_xstats_reset(uint16_t port_id);
2133 
2151 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
2152  uint16_t tx_queue_id, uint8_t stat_idx);
2153 
2171 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
2172  uint16_t rx_queue_id,
2173  uint8_t stat_idx);
2174 
2184 void rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr);
2185 
2195 void rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
2196 
2216 int rte_eth_dev_fw_version_get(uint16_t port_id,
2217  char *fw_version, size_t fw_size);
2218 
2257 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2258  uint32_t *ptypes, int num);
2259 
2271 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
2272 
2288 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
2289 
2309 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
2310 
2330 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2331  int on);
2332 
2350 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2351  enum rte_vlan_type vlan_type,
2352  uint16_t tag_type);
2353 
2375 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
2376 
2389 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
2390 
2405 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
2406 
2407 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
2408  void *userdata);
2409 
2415  buffer_tx_error_fn error_callback;
2416  void *error_userdata;
2417  uint16_t size;
2418  uint16_t length;
2419  struct rte_mbuf *pkts[];
2421 };
2422 
2429 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
2430  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
2431 
2442 int
2443 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
2444 
2469 int
2471  buffer_tx_error_fn callback, void *userdata);
2472 
2495 void
2496 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2497  void *userdata);
2498 
2522 void
2523 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2524  void *userdata);
2525 
2551 int
2552 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
2553 
2569 };
2570 
2578  uint64_t metadata;
2592 };
2593 
2611 };
2612 
2613 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
2614  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
2634 int rte_eth_dev_callback_register(uint16_t port_id,
2635  enum rte_eth_event_type event,
2636  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
2637 
2656 int rte_eth_dev_callback_unregister(uint16_t port_id,
2657  enum rte_eth_event_type event,
2658  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
2659 
2681 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
2682 
2703 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
2704 
2722 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
2723 
2745 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
2746  int epfd, int op, void *data);
2747 
2761 int rte_eth_led_on(uint16_t port_id);
2762 
2776 int rte_eth_led_off(uint16_t port_id);
2777 
2791 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
2792  struct rte_eth_fc_conf *fc_conf);
2793 
2808 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
2809  struct rte_eth_fc_conf *fc_conf);
2810 
2826 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2827  struct rte_eth_pfc_conf *pfc_conf);
2828 
2848 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *mac_addr,
2849  uint32_t pool);
2850 
2864 int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *mac_addr);
2865 
2879 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
2880  struct ether_addr *mac_addr);
2881 
2898 int rte_eth_dev_rss_reta_update(uint16_t port_id,
2899  struct rte_eth_rss_reta_entry64 *reta_conf,
2900  uint16_t reta_size);
2901 
2918 int rte_eth_dev_rss_reta_query(uint16_t port_id,
2919  struct rte_eth_rss_reta_entry64 *reta_conf,
2920  uint16_t reta_size);
2921 
2941 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
2942  uint8_t on);
2943 
2962 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
2963 
2986 int rte_eth_mirror_rule_set(uint16_t port_id,
2987  struct rte_eth_mirror_conf *mirror_conf,
2988  uint8_t rule_id,
2989  uint8_t on);
2990 
3005 int rte_eth_mirror_rule_reset(uint16_t port_id,
3006  uint8_t rule_id);
3007 
3024 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3025  uint16_t tx_rate);
3026 
3041 int rte_eth_dev_rss_hash_update(uint16_t port_id,
3042  struct rte_eth_rss_conf *rss_conf);
3043 
3058 int
3059 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3060  struct rte_eth_rss_conf *rss_conf);
3061 
3080 int
3081 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3082  struct rte_eth_udp_tunnel *tunnel_udp);
3083 
3103 int
3104 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3105  struct rte_eth_udp_tunnel *tunnel_udp);
3106 
3121 int rte_eth_dev_filter_supported(uint16_t port_id,
3122  enum rte_filter_type filter_type);
3123 
3143 int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3144  enum rte_filter_op filter_op, void *arg);
3145 
3159 int rte_eth_dev_get_dcb_info(uint16_t port_id,
3160  struct rte_eth_dcb_info *dcb_info);
3161 
3162 struct rte_eth_rxtx_callback;
3163 
3188 const struct rte_eth_rxtx_callback *
3189 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3190  rte_rx_callback_fn fn, void *user_param);
3191 
3217 const struct rte_eth_rxtx_callback *
3218 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3219  rte_rx_callback_fn fn, void *user_param);
3220 
3245 const struct rte_eth_rxtx_callback *
3246 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3247  rte_tx_callback_fn fn, void *user_param);
3248 
3279 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3280  const struct rte_eth_rxtx_callback *user_cb);
3281 
3312 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3313  const struct rte_eth_rxtx_callback *user_cb);
3314 
3332 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3333  struct rte_eth_rxq_info *qinfo);
3334 
3352 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3353  struct rte_eth_txq_info *qinfo);
3354 
3372 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
3373 
3386 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
3387 
3403 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
3404 
3420 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
3421 
3439 int __rte_experimental
3440 rte_eth_dev_get_module_info(uint16_t port_id,
3441  struct rte_eth_dev_module_info *modinfo);
3442 
3461 int __rte_experimental
3462 rte_eth_dev_get_module_eeprom(uint16_t port_id,
3463  struct rte_dev_eeprom_info *info);
3464 
3483 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3484  struct ether_addr *mc_addr_set,
3485  uint32_t nb_mc_addr);
3486 
3499 int rte_eth_timesync_enable(uint16_t port_id);
3500 
3513 int rte_eth_timesync_disable(uint16_t port_id);
3514 
3533 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
3534  struct timespec *timestamp, uint32_t flags);
3535 
3551 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3552  struct timespec *timestamp);
3553 
3571 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
3572 
3587 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
3588 
3607 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
3608 
3624 int
3625 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
3626  struct rte_eth_l2_tunnel_conf *l2_tunnel);
3627 
3652 int
3653 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
3654  struct rte_eth_l2_tunnel_conf *l2_tunnel,
3655  uint32_t mask,
3656  uint8_t en);
3657 
3673 int
3674 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
3675 
3691 int
3692 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
3693 
3710 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
3711  uint16_t *nb_rx_desc,
3712  uint16_t *nb_tx_desc);
3713 
3728 int
3729 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
3730 
3740 void *
3741 rte_eth_dev_get_sec_ctx(uint16_t port_id);
3742 
3743 
3744 #include <rte_ethdev_core.h>
3745 
3828 static inline uint16_t
3829 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
3830  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
3831 {
3832  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3833  uint16_t nb_rx;
3834 
3835 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3836  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
3837  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
3838 
3839  if (queue_id >= dev->data->nb_rx_queues) {
3840  RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3841  return 0;
3842  }
3843 #endif
3844  nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
3845  rx_pkts, nb_pkts);
3846 
3847 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
3848  if (unlikely(dev->post_rx_burst_cbs[queue_id] != NULL)) {
3849  struct rte_eth_rxtx_callback *cb =
3850  dev->post_rx_burst_cbs[queue_id];
3851 
3852  do {
3853  nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
3854  nb_pkts, cb->param);
3855  cb = cb->next;
3856  } while (cb != NULL);
3857  }
3858 #endif
3859 
3860  return nb_rx;
3861 }
3862 
3875 static inline int
3876 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
3877 {
3878  struct rte_eth_dev *dev;
3879 
3880  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3881  dev = &rte_eth_devices[port_id];
3882  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
3883  if (queue_id >= dev->data->nb_rx_queues)
3884  return -EINVAL;
3885 
3886  return (int)(*dev->dev_ops->rx_queue_count)(dev, queue_id);
3887 }
3888 
3904 static inline int
3905 rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
3906 {
3907  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3908  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3909  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
3910  return (*dev->dev_ops->rx_descriptor_done)( \
3911  dev->data->rx_queues[queue_id], offset);
3912 }
3913 
3914 #define RTE_ETH_RX_DESC_AVAIL 0
3915 #define RTE_ETH_RX_DESC_DONE 1
3916 #define RTE_ETH_RX_DESC_UNAVAIL 2
3951 static inline int
3952 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
3953  uint16_t offset)
3954 {
3955  struct rte_eth_dev *dev;
3956  void *rxq;
3957 
3958 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3959  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3960 #endif
3961  dev = &rte_eth_devices[port_id];
3962 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3963  if (queue_id >= dev->data->nb_rx_queues)
3964  return -ENODEV;
3965 #endif
3966  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
3967  rxq = dev->data->rx_queues[queue_id];
3968 
3969  return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
3970 }
3971 
3972 #define RTE_ETH_TX_DESC_FULL 0
3973 #define RTE_ETH_TX_DESC_DONE 1
3974 #define RTE_ETH_TX_DESC_UNAVAIL 2
4009 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
4010  uint16_t queue_id, uint16_t offset)
4011 {
4012  struct rte_eth_dev *dev;
4013  void *txq;
4014 
4015 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4016  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4017 #endif
4018  dev = &rte_eth_devices[port_id];
4019 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4020  if (queue_id >= dev->data->nb_tx_queues)
4021  return -ENODEV;
4022 #endif
4023  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
4024  txq = dev->data->tx_queues[queue_id];
4025 
4026  return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
4027 }
4028 
4095 static inline uint16_t
4096 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
4097  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4098 {
4099  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4100 
4101 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4102  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4103  RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
4104 
4105  if (queue_id >= dev->data->nb_tx_queues) {
4106  RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
4107  return 0;
4108  }
4109 #endif
4110 
4111 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
4112  struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
4113 
4114  if (unlikely(cb != NULL)) {
4115  do {
4116  nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
4117  cb->param);
4118  cb = cb->next;
4119  } while (cb != NULL);
4120  }
4121 #endif
4122 
4123  return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
4124 }
4125 
4182 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
4183 
4184 static inline uint16_t
4185 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
4186  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4187 {
4188  struct rte_eth_dev *dev;
4189 
4190 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4191  if (!rte_eth_dev_is_valid_port(port_id)) {
4192  RTE_PMD_DEBUG_TRACE("Invalid TX port_id=%d\n", port_id);
4193  rte_errno = -EINVAL;
4194  return 0;
4195  }
4196 #endif
4197 
4198  dev = &rte_eth_devices[port_id];
4199 
4200 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4201  if (queue_id >= dev->data->nb_tx_queues) {
4202  RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
4203  rte_errno = -EINVAL;
4204  return 0;
4205  }
4206 #endif
4207 
4208  if (!dev->tx_pkt_prepare)
4209  return nb_pkts;
4210 
4211  return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
4212  tx_pkts, nb_pkts);
4213 }
4214 
4215 #else
4216 
4217 /*
4218  * Native NOOP operation for compilation targets which doesn't require any
4219  * preparations steps, and functional NOOP may introduce unnecessary performance
4220  * drop.
4221  *
4222  * Generally this is not a good idea to turn it on globally and didn't should
4223  * be used if behavior of tx_preparation can change.
4224  */
4225 
4226 static inline uint16_t
4227 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
4228  __rte_unused uint16_t queue_id,
4229  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4230 {
4231  return nb_pkts;
4232 }
4233 
4234 #endif
4235 
4258 static inline uint16_t
4259 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
4260  struct rte_eth_dev_tx_buffer *buffer)
4261 {
4262  uint16_t sent;
4263  uint16_t to_send = buffer->length;
4264 
4265  if (to_send == 0)
4266  return 0;
4267 
4268  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
4269 
4270  buffer->length = 0;
4271 
4272  /* All packets sent, or to be dealt with by callback below */
4273  if (unlikely(sent != to_send))
4274  buffer->error_callback(&buffer->pkts[sent],
4275  (uint16_t)(to_send - sent),
4276  buffer->error_userdata);
4277 
4278  return sent;
4279 }
4280 
4311 static __rte_always_inline uint16_t
4312 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
4313  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
4314 {
4315  buffer->pkts[buffer->length++] = tx_pkt;
4316  if (buffer->length < buffer->size)
4317  return 0;
4318 
4319  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
4320 }
4321 
4322 #ifdef __cplusplus
4323 }
4324 #endif
4325 
4326 #endif /* _RTE_ETHDEV_H_ */