DPDK  18.02.2
rte_ethdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
140 #ifdef __cplusplus
141 extern "C" {
142 #endif
143 
144 #include <stdint.h>
145 
146 /* Use this macro to check if LRO API is supported */
147 #define RTE_ETHDEV_HAS_LRO_SUPPORT
148 
149 #include <rte_compat.h>
150 #include <rte_log.h>
151 #include <rte_interrupts.h>
152 #include <rte_dev.h>
153 #include <rte_devargs.h>
154 #include <rte_errno.h>
155 #include <rte_common.h>
156 #include <rte_config.h>
157 
158 #include "rte_ether.h"
159 #include "rte_eth_ctrl.h"
160 #include "rte_dev_info.h"
161 
162 struct rte_mbuf;
163 
171  uint64_t ipackets;
172  uint64_t opackets;
173  uint64_t ibytes;
174  uint64_t obytes;
175  uint64_t imissed;
179  uint64_t ierrors;
180  uint64_t oerrors;
181  uint64_t rx_nombuf;
182  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
184  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
186  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
188  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
190  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
192 };
193 
197 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
198 #define ETH_LINK_SPEED_FIXED (1 << 0)
199 #define ETH_LINK_SPEED_10M_HD (1 << 1)
200 #define ETH_LINK_SPEED_10M (1 << 2)
201 #define ETH_LINK_SPEED_100M_HD (1 << 3)
202 #define ETH_LINK_SPEED_100M (1 << 4)
203 #define ETH_LINK_SPEED_1G (1 << 5)
204 #define ETH_LINK_SPEED_2_5G (1 << 6)
205 #define ETH_LINK_SPEED_5G (1 << 7)
206 #define ETH_LINK_SPEED_10G (1 << 8)
207 #define ETH_LINK_SPEED_20G (1 << 9)
208 #define ETH_LINK_SPEED_25G (1 << 10)
209 #define ETH_LINK_SPEED_40G (1 << 11)
210 #define ETH_LINK_SPEED_50G (1 << 12)
211 #define ETH_LINK_SPEED_56G (1 << 13)
212 #define ETH_LINK_SPEED_100G (1 << 14)
217 #define ETH_SPEED_NUM_NONE 0
218 #define ETH_SPEED_NUM_10M 10
219 #define ETH_SPEED_NUM_100M 100
220 #define ETH_SPEED_NUM_1G 1000
221 #define ETH_SPEED_NUM_2_5G 2500
222 #define ETH_SPEED_NUM_5G 5000
223 #define ETH_SPEED_NUM_10G 10000
224 #define ETH_SPEED_NUM_20G 20000
225 #define ETH_SPEED_NUM_25G 25000
226 #define ETH_SPEED_NUM_40G 40000
227 #define ETH_SPEED_NUM_50G 50000
228 #define ETH_SPEED_NUM_56G 56000
229 #define ETH_SPEED_NUM_100G 100000
234 __extension__
235 struct rte_eth_link {
236  uint32_t link_speed;
237  uint16_t link_duplex : 1;
238  uint16_t link_autoneg : 1;
239  uint16_t link_status : 1;
240 } __attribute__((aligned(8)));
242 /* Utility constants */
243 #define ETH_LINK_HALF_DUPLEX 0
244 #define ETH_LINK_FULL_DUPLEX 1
245 #define ETH_LINK_DOWN 0
246 #define ETH_LINK_UP 1
247 #define ETH_LINK_FIXED 0
248 #define ETH_LINK_AUTONEG 1
254 struct rte_eth_thresh {
255  uint8_t pthresh;
256  uint8_t hthresh;
257  uint8_t wthresh;
258 };
259 
263 #define ETH_MQ_RX_RSS_FLAG 0x1
264 #define ETH_MQ_RX_DCB_FLAG 0x2
265 #define ETH_MQ_RX_VMDQ_FLAG 0x4
266 
274 
278  ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
280  ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
281 
283  ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
285  ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
287  ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
290  ETH_MQ_RX_VMDQ_FLAG,
291 };
292 
296 #define ETH_RSS ETH_MQ_RX_RSS
297 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
298 #define ETH_DCB_RX ETH_MQ_RX_DCB
299 
309 };
310 
314 #define ETH_DCB_NONE ETH_MQ_TX_NONE
315 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
316 #define ETH_DCB_TX ETH_MQ_TX_DCB
317 
324  uint32_t max_rx_pkt_len;
325  uint16_t split_hdr_size;
331  uint64_t offloads;
332  __extension__
338  uint16_t header_split : 1,
339  hw_ip_checksum : 1,
340  hw_vlan_filter : 1,
341  hw_vlan_strip : 1,
342  hw_vlan_extend : 1,
343  jumbo_frame : 1,
344  hw_strip_crc : 1,
345  enable_scatter : 1,
346  enable_lro : 1,
347  hw_timestamp : 1,
348  security : 1,
359 };
360 
366  ETH_VLAN_TYPE_UNKNOWN = 0,
369  ETH_VLAN_TYPE_MAX,
370 };
371 
377  uint64_t ids[64];
378 };
379 
398  uint8_t *rss_key;
399  uint8_t rss_key_len;
400  uint64_t rss_hf;
401 };
402 
403 /*
404  * The RSS offload types are defined based on flow types which are defined
405  * in rte_eth_ctrl.h. Different NIC hardwares may support different RSS offload
406  * types. The supported flow types or RSS offload types can be queried by
407  * rte_eth_dev_info_get().
408  */
409 #define ETH_RSS_IPV4 (1ULL << RTE_ETH_FLOW_IPV4)
410 #define ETH_RSS_FRAG_IPV4 (1ULL << RTE_ETH_FLOW_FRAG_IPV4)
411 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
412 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP)
413 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP)
414 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER)
415 #define ETH_RSS_IPV6 (1ULL << RTE_ETH_FLOW_IPV6)
416 #define ETH_RSS_FRAG_IPV6 (1ULL << RTE_ETH_FLOW_FRAG_IPV6)
417 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP)
418 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
419 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP)
420 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)
421 #define ETH_RSS_L2_PAYLOAD (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)
422 #define ETH_RSS_IPV6_EX (1ULL << RTE_ETH_FLOW_IPV6_EX)
423 #define ETH_RSS_IPV6_TCP_EX (1ULL << RTE_ETH_FLOW_IPV6_TCP_EX)
424 #define ETH_RSS_IPV6_UDP_EX (1ULL << RTE_ETH_FLOW_IPV6_UDP_EX)
425 #define ETH_RSS_PORT (1ULL << RTE_ETH_FLOW_PORT)
426 #define ETH_RSS_VXLAN (1ULL << RTE_ETH_FLOW_VXLAN)
427 #define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE)
428 #define ETH_RSS_NVGRE (1ULL << RTE_ETH_FLOW_NVGRE)
429 
430 #define ETH_RSS_IP ( \
431  ETH_RSS_IPV4 | \
432  ETH_RSS_FRAG_IPV4 | \
433  ETH_RSS_NONFRAG_IPV4_OTHER | \
434  ETH_RSS_IPV6 | \
435  ETH_RSS_FRAG_IPV6 | \
436  ETH_RSS_NONFRAG_IPV6_OTHER | \
437  ETH_RSS_IPV6_EX)
438 
439 #define ETH_RSS_UDP ( \
440  ETH_RSS_NONFRAG_IPV4_UDP | \
441  ETH_RSS_NONFRAG_IPV6_UDP | \
442  ETH_RSS_IPV6_UDP_EX)
443 
444 #define ETH_RSS_TCP ( \
445  ETH_RSS_NONFRAG_IPV4_TCP | \
446  ETH_RSS_NONFRAG_IPV6_TCP | \
447  ETH_RSS_IPV6_TCP_EX)
448 
449 #define ETH_RSS_SCTP ( \
450  ETH_RSS_NONFRAG_IPV4_SCTP | \
451  ETH_RSS_NONFRAG_IPV6_SCTP)
452 
453 #define ETH_RSS_TUNNEL ( \
454  ETH_RSS_VXLAN | \
455  ETH_RSS_GENEVE | \
456  ETH_RSS_NVGRE)
457 
459 #define ETH_RSS_PROTO_MASK ( \
460  ETH_RSS_IPV4 | \
461  ETH_RSS_FRAG_IPV4 | \
462  ETH_RSS_NONFRAG_IPV4_TCP | \
463  ETH_RSS_NONFRAG_IPV4_UDP | \
464  ETH_RSS_NONFRAG_IPV4_SCTP | \
465  ETH_RSS_NONFRAG_IPV4_OTHER | \
466  ETH_RSS_IPV6 | \
467  ETH_RSS_FRAG_IPV6 | \
468  ETH_RSS_NONFRAG_IPV6_TCP | \
469  ETH_RSS_NONFRAG_IPV6_UDP | \
470  ETH_RSS_NONFRAG_IPV6_SCTP | \
471  ETH_RSS_NONFRAG_IPV6_OTHER | \
472  ETH_RSS_L2_PAYLOAD | \
473  ETH_RSS_IPV6_EX | \
474  ETH_RSS_IPV6_TCP_EX | \
475  ETH_RSS_IPV6_UDP_EX | \
476  ETH_RSS_PORT | \
477  ETH_RSS_VXLAN | \
478  ETH_RSS_GENEVE | \
479  ETH_RSS_NVGRE)
480 
481 /*
482  * Definitions used for redirection table entry size.
483  * Some RSS RETA sizes may not be supported by some drivers, check the
484  * documentation or the description of relevant functions for more details.
485  */
486 #define ETH_RSS_RETA_SIZE_64 64
487 #define ETH_RSS_RETA_SIZE_128 128
488 #define ETH_RSS_RETA_SIZE_256 256
489 #define ETH_RSS_RETA_SIZE_512 512
490 #define RTE_RETA_GROUP_SIZE 64
491 
492 /* Definitions used for VMDQ and DCB functionality */
493 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
494 #define ETH_DCB_NUM_USER_PRIORITIES 8
495 #define ETH_VMDQ_DCB_NUM_QUEUES 128
496 #define ETH_DCB_NUM_QUEUES 128
498 /* DCB capability defines */
499 #define ETH_DCB_PG_SUPPORT 0x00000001
500 #define ETH_DCB_PFC_SUPPORT 0x00000002
502 /* Definitions used for VLAN Offload functionality */
503 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
504 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
505 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
507 /* Definitions used for mask VLAN setting */
508 #define ETH_VLAN_STRIP_MASK 0x0001
509 #define ETH_VLAN_FILTER_MASK 0x0002
510 #define ETH_VLAN_EXTEND_MASK 0x0004
511 #define ETH_VLAN_ID_MAX 0x0FFF
513 /* Definitions used for receive MAC address */
514 #define ETH_NUM_RECEIVE_MAC_ADDR 128
516 /* Definitions used for unicast hash */
517 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
519 /* Definitions used for VMDQ pool rx mode setting */
520 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
521 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
522 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
523 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
524 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
527 #define ETH_MIRROR_MAX_VLANS 64
528 
529 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
530 #define ETH_MIRROR_UPLINK_PORT 0x02
531 #define ETH_MIRROR_DOWNLINK_PORT 0x04
532 #define ETH_MIRROR_VLAN 0x08
533 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
538 struct rte_eth_vlan_mirror {
539  uint64_t vlan_mask;
541  uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
542 };
543 
548  uint8_t rule_type;
549  uint8_t dst_pool;
550  uint64_t pool_mask;
553 };
554 
562  uint64_t mask;
564  uint16_t reta[RTE_RETA_GROUP_SIZE];
566 };
567 
573  ETH_4_TCS = 4,
575 };
576 
586 };
587 
588 /* This structure may be extended in future. */
589 struct rte_eth_dcb_rx_conf {
590  enum rte_eth_nb_tcs nb_tcs;
592  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
593 };
594 
595 struct rte_eth_vmdq_dcb_tx_conf {
596  enum rte_eth_nb_pools nb_queue_pools;
598  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
599 };
600 
601 struct rte_eth_dcb_tx_conf {
602  enum rte_eth_nb_tcs nb_tcs;
604  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
605 };
606 
607 struct rte_eth_vmdq_tx_conf {
608  enum rte_eth_nb_pools nb_queue_pools;
609 };
610 
625  uint8_t default_pool;
626  uint8_t nb_pool_maps;
627  struct {
628  uint16_t vlan_id;
629  uint64_t pools;
633 };
634 
656  uint8_t default_pool;
658  uint8_t nb_pool_maps;
659  uint32_t rx_mode;
660  struct {
661  uint16_t vlan_id;
662  uint64_t pools;
664 };
665 
676  uint64_t offloads;
677 
678  /* For i40e specifically */
679  uint16_t pvid;
680  __extension__
681  uint8_t hw_vlan_reject_tagged : 1,
687 };
688 
694  uint16_t rx_free_thresh;
695  uint8_t rx_drop_en;
702  uint64_t offloads;
703 };
704 
705 #define ETH_TXQ_FLAGS_NOMULTSEGS 0x0001
706 #define ETH_TXQ_FLAGS_NOREFCOUNT 0x0002
707 #define ETH_TXQ_FLAGS_NOMULTMEMP 0x0004
708 #define ETH_TXQ_FLAGS_NOVLANOFFL 0x0100
709 #define ETH_TXQ_FLAGS_NOXSUMSCTP 0x0200
710 #define ETH_TXQ_FLAGS_NOXSUMUDP 0x0400
711 #define ETH_TXQ_FLAGS_NOXSUMTCP 0x0800
712 #define ETH_TXQ_FLAGS_NOOFFLOADS \
713  (ETH_TXQ_FLAGS_NOVLANOFFL | ETH_TXQ_FLAGS_NOXSUMSCTP | \
714  ETH_TXQ_FLAGS_NOXSUMUDP | ETH_TXQ_FLAGS_NOXSUMTCP)
715 #define ETH_TXQ_FLAGS_NOXSUMS \
716  (ETH_TXQ_FLAGS_NOXSUMSCTP | ETH_TXQ_FLAGS_NOXSUMUDP | \
717  ETH_TXQ_FLAGS_NOXSUMTCP)
718 
725 #define ETH_TXQ_FLAGS_IGNORE 0x8000
726 
732  uint16_t tx_rs_thresh;
733  uint16_t tx_free_thresh;
736  uint32_t txq_flags;
743  uint64_t offloads;
744 };
745 
750  uint16_t nb_max;
751  uint16_t nb_min;
752  uint16_t nb_align;
762  uint16_t nb_seg_max;
763 
775  uint16_t nb_mtu_seg_max;
776 };
777 
786 };
787 
794  uint32_t high_water;
795  uint32_t low_water;
796  uint16_t pause_time;
797  uint16_t send_xon;
800  uint8_t autoneg;
801 };
802 
810  uint8_t priority;
811 };
812 
821 };
822 
830 };
831 
843  uint8_t drop_queue;
844  struct rte_eth_fdir_masks mask;
847 };
848 
857  uint16_t udp_port;
858  uint8_t prot_type;
859 };
860 
866  uint32_t lsc:1;
868  uint32_t rxq:1;
870  uint32_t rmv:1;
871 };
872 
878 struct rte_eth_conf {
879  uint32_t link_speeds;
888  uint32_t lpbk_mode;
893  struct {
897  struct rte_eth_dcb_rx_conf dcb_rx_conf;
901  } rx_adv_conf;
902  union {
903  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
905  struct rte_eth_dcb_tx_conf dcb_tx_conf;
907  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
909  } tx_adv_conf;
915 };
916 
926 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
927 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
928 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
929 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
930 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
931 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
932 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
933 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
934 #define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100
935 #define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200
936 #define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
937 #define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
938 #define DEV_RX_OFFLOAD_CRC_STRIP 0x00001000
939 #define DEV_RX_OFFLOAD_SCATTER 0x00002000
940 #define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000
941 #define DEV_RX_OFFLOAD_SECURITY 0x00008000
942 #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
943  DEV_RX_OFFLOAD_UDP_CKSUM | \
944  DEV_RX_OFFLOAD_TCP_CKSUM)
945 #define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
946  DEV_RX_OFFLOAD_VLAN_FILTER | \
947  DEV_RX_OFFLOAD_VLAN_EXTEND)
948 
949 /*
950  * If new Rx offload capabilities are defined, they also must be
951  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
952  */
953 
957 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
958 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
959 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
960 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
961 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
962 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
963 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
964 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
965 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
966 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200
967 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400
968 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800
969 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000
970 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
971 #define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000
972 
975 #define DEV_TX_OFFLOAD_MULTI_SEGS 0x00008000
976 
977 #define DEV_TX_OFFLOAD_MBUF_FAST_FREE 0x00010000
978 
982 #define DEV_TX_OFFLOAD_SECURITY 0x00020000
983 
984 /*
985  * If new Tx offload capabilities are defined, they also must be
986  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
987  */
988 
989 struct rte_pci_device;
990 
995  struct rte_pci_device *pci_dev;
996  const char *driver_name;
997  unsigned int if_index;
999  uint32_t min_rx_bufsize;
1000  uint32_t max_rx_pktlen;
1001  uint16_t max_rx_queues;
1002  uint16_t max_tx_queues;
1003  uint32_t max_mac_addrs;
1004  uint32_t max_hash_mac_addrs;
1006  uint16_t max_vfs;
1007  uint16_t max_vmdq_pools;
1008  uint64_t rx_offload_capa;
1010  uint64_t tx_offload_capa;
1012  uint64_t rx_queue_offload_capa;
1014  uint64_t tx_queue_offload_capa;
1016  uint16_t reta_size;
1018  uint8_t hash_key_size;
1023  uint16_t vmdq_queue_base;
1024  uint16_t vmdq_queue_num;
1025  uint16_t vmdq_pool_base;
1028  uint32_t speed_capa;
1030  uint16_t nb_rx_queues;
1031  uint16_t nb_tx_queues;
1032 };
1033 
1039  struct rte_mempool *mp;
1041  uint8_t scattered_rx;
1042  uint16_t nb_desc;
1044 
1051  uint16_t nb_desc;
1053 
1055 #define RTE_ETH_XSTATS_NAME_SIZE 64
1056 
1067  uint64_t id;
1068  uint64_t value;
1069 };
1070 
1080 };
1081 
1082 #define ETH_DCB_NUM_TCS 8
1083 #define ETH_MAX_VMDQ_POOL 64
1084 
1091  struct {
1092  uint8_t base;
1093  uint8_t nb_queue;
1094  } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1096  struct {
1097  uint8_t base;
1098  uint8_t nb_queue;
1099  } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1100 };
1101 
1107  uint8_t nb_tcs;
1109  uint8_t tc_bws[ETH_DCB_NUM_TCS];
1112 };
1113 
1117 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1118 #define RTE_ETH_QUEUE_STATE_STARTED 1
1119 
1120 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
1121 
1122 /* Macros to check for valid port */
1123 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1124  if (!rte_eth_dev_is_valid_port(port_id)) { \
1125  RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
1126  return retval; \
1127  } \
1128 } while (0)
1129 
1130 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1131  if (!rte_eth_dev_is_valid_port(port_id)) { \
1132  RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
1133  return; \
1134  } \
1135 } while (0)
1136 
1142 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1143 
1144 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1145 
1146 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1147 
1148 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1149 
1172 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
1173  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1174  void *user_param);
1175 
1196 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
1197  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1198 
1211 };
1212 
1213 struct rte_eth_dev_sriov {
1214  uint8_t active;
1215  uint8_t nb_q_per_pool;
1216  uint16_t def_vmdq_idx;
1217  uint16_t def_pool_q_idx;
1218 };
1219 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1220 
1221 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
1222 
1223 #define RTE_ETH_DEV_NO_OWNER 0
1224 
1225 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
1226 
1227 struct rte_eth_dev_owner {
1228  uint64_t id;
1229  char name[RTE_ETH_MAX_OWNER_NAME_LEN];
1230 };
1231 
1233 #define RTE_ETH_DEV_INTR_LSC 0x0002
1234 
1235 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1236 
1237 #define RTE_ETH_DEV_INTR_RMV 0x0008
1238 
1253 uint64_t __rte_experimental rte_eth_find_next_owned_by(uint16_t port_id,
1254  const uint64_t owner_id);
1255 
1259 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
1260  for (p = rte_eth_find_next_owned_by(0, o); \
1261  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
1262  p = rte_eth_find_next_owned_by(p + 1, o))
1263 
1272 uint16_t rte_eth_find_next(uint16_t port_id);
1273 
1277 #define RTE_ETH_FOREACH_DEV(p) \
1278  RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
1279 
1280 
1294 int __rte_experimental rte_eth_dev_owner_new(uint64_t *owner_id);
1295 
1309 int __rte_experimental rte_eth_dev_owner_set(const uint16_t port_id,
1310  const struct rte_eth_dev_owner *owner);
1311 
1325 int __rte_experimental rte_eth_dev_owner_unset(const uint16_t port_id,
1326  const uint64_t owner_id);
1327 
1337 void __rte_experimental rte_eth_dev_owner_delete(const uint64_t owner_id);
1338 
1352 int __rte_experimental rte_eth_dev_owner_get(const uint16_t port_id,
1353  struct rte_eth_dev_owner *owner);
1354 
1367 uint16_t rte_eth_dev_count(void);
1368 
1381 int rte_eth_dev_attach(const char *devargs, uint16_t *port_id);
1382 
1396 int rte_eth_dev_detach(uint16_t port_id, char *devname);
1397 
1409 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
1410 
1422 const char * __rte_experimental rte_eth_dev_rx_offload_name(uint64_t offload);
1423 
1435 const char * __rte_experimental rte_eth_dev_tx_offload_name(uint64_t offload);
1436 
1469 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
1470  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
1471 
1483 int __rte_experimental
1484 rte_eth_dev_is_removed(uint16_t port_id);
1485 
1528 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1529  uint16_t nb_rx_desc, unsigned int socket_id,
1530  const struct rte_eth_rxconf *rx_conf,
1531  struct rte_mempool *mb_pool);
1532 
1581 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1582  uint16_t nb_tx_desc, unsigned int socket_id,
1583  const struct rte_eth_txconf *tx_conf);
1584 
1595 int rte_eth_dev_socket_id(uint16_t port_id);
1596 
1606 int rte_eth_dev_is_valid_port(uint16_t port_id);
1607 
1624 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
1625 
1641 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
1642 
1659 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
1660 
1676 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
1677 
1693 int rte_eth_dev_start(uint16_t port_id);
1694 
1702 void rte_eth_dev_stop(uint16_t port_id);
1703 
1716 int rte_eth_dev_set_link_up(uint16_t port_id);
1717 
1727 int rte_eth_dev_set_link_down(uint16_t port_id);
1728 
1737 void rte_eth_dev_close(uint16_t port_id);
1738 
1776 int rte_eth_dev_reset(uint16_t port_id);
1777 
1784 void rte_eth_promiscuous_enable(uint16_t port_id);
1785 
1792 void rte_eth_promiscuous_disable(uint16_t port_id);
1793 
1804 int rte_eth_promiscuous_get(uint16_t port_id);
1805 
1812 void rte_eth_allmulticast_enable(uint16_t port_id);
1813 
1820 void rte_eth_allmulticast_disable(uint16_t port_id);
1821 
1832 int rte_eth_allmulticast_get(uint16_t port_id);
1833 
1845 void rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
1846 
1858 void rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
1859 
1877 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
1878 
1889 int rte_eth_stats_reset(uint16_t port_id);
1890 
1911 int rte_eth_xstats_get_names(uint16_t port_id,
1912  struct rte_eth_xstat_name *xstats_names,
1913  unsigned int size);
1914 
1937 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
1938  unsigned int n);
1939 
1962 int
1963 rte_eth_xstats_get_names_by_id(uint16_t port_id,
1964  struct rte_eth_xstat_name *xstats_names, unsigned int size,
1965  uint64_t *ids);
1966 
1990 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
1991  uint64_t *values, unsigned int size);
1992 
2011 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2012  uint64_t *id);
2013 
2020 void rte_eth_xstats_reset(uint16_t port_id);
2021 
2039 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
2040  uint16_t tx_queue_id, uint8_t stat_idx);
2041 
2059 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
2060  uint16_t rx_queue_id,
2061  uint8_t stat_idx);
2062 
2072 void rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr);
2073 
2083 void rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
2084 
2104 int rte_eth_dev_fw_version_get(uint16_t port_id,
2105  char *fw_version, size_t fw_size);
2106 
2145 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2146  uint32_t *ptypes, int num);
2147 
2159 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
2160 
2176 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
2177 
2197 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
2198 
2218 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2219  int on);
2220 
2238 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2239  enum rte_vlan_type vlan_type,
2240  uint16_t tag_type);
2241 
2263 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
2264 
2277 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
2278 
2293 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
2294 
2295 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
2296  void *userdata);
2297 
2303  buffer_tx_error_fn error_callback;
2304  void *error_userdata;
2305  uint16_t size;
2306  uint16_t length;
2307  struct rte_mbuf *pkts[];
2309 };
2310 
2317 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
2318  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
2319 
2330 int
2331 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
2332 
2357 int
2359  buffer_tx_error_fn callback, void *userdata);
2360 
2383 void
2384 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2385  void *userdata);
2386 
2410 void
2411 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2412  void *userdata);
2413 
2439 int
2440 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
2441 
2458 };
2459 
2460 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
2461  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
2481 int rte_eth_dev_callback_register(uint16_t port_id,
2482  enum rte_eth_event_type event,
2483  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
2484 
2503 int rte_eth_dev_callback_unregister(uint16_t port_id,
2504  enum rte_eth_event_type event,
2505  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
2506 
2528 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
2529 
2550 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
2551 
2569 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
2570 
2592 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
2593  int epfd, int op, void *data);
2594 
2608 int rte_eth_led_on(uint16_t port_id);
2609 
2623 int rte_eth_led_off(uint16_t port_id);
2624 
2638 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
2639  struct rte_eth_fc_conf *fc_conf);
2640 
2655 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
2656  struct rte_eth_fc_conf *fc_conf);
2657 
2673 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2674  struct rte_eth_pfc_conf *pfc_conf);
2675 
2695 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *mac_addr,
2696  uint32_t pool);
2697 
2711 int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *mac_addr);
2712 
2726 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
2727  struct ether_addr *mac_addr);
2728 
2745 int rte_eth_dev_rss_reta_update(uint16_t port_id,
2746  struct rte_eth_rss_reta_entry64 *reta_conf,
2747  uint16_t reta_size);
2748 
2765 int rte_eth_dev_rss_reta_query(uint16_t port_id,
2766  struct rte_eth_rss_reta_entry64 *reta_conf,
2767  uint16_t reta_size);
2768 
2788 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
2789  uint8_t on);
2790 
2809 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
2810 
2833 int rte_eth_mirror_rule_set(uint16_t port_id,
2834  struct rte_eth_mirror_conf *mirror_conf,
2835  uint8_t rule_id,
2836  uint8_t on);
2837 
2852 int rte_eth_mirror_rule_reset(uint16_t port_id,
2853  uint8_t rule_id);
2854 
2871 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
2872  uint16_t tx_rate);
2873 
2888 int rte_eth_dev_rss_hash_update(uint16_t port_id,
2889  struct rte_eth_rss_conf *rss_conf);
2890 
2905 int
2906 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2907  struct rte_eth_rss_conf *rss_conf);
2908 
2927 int
2928 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2929  struct rte_eth_udp_tunnel *tunnel_udp);
2930 
2950 int
2951 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2952  struct rte_eth_udp_tunnel *tunnel_udp);
2953 
2968 int rte_eth_dev_filter_supported(uint16_t port_id,
2969  enum rte_filter_type filter_type);
2970 
2990 int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
2991  enum rte_filter_op filter_op, void *arg);
2992 
3006 int rte_eth_dev_get_dcb_info(uint16_t port_id,
3007  struct rte_eth_dcb_info *dcb_info);
3008 
3033 void *rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3034  rte_rx_callback_fn fn, void *user_param);
3035 
3061 void *rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3062  rte_rx_callback_fn fn, void *user_param);
3063 
3088 void *rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3089  rte_tx_callback_fn fn, void *user_param);
3090 
3091 struct rte_eth_rxtx_callback;
3092 
3123 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3124  struct rte_eth_rxtx_callback *user_cb);
3125 
3156 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3157  struct rte_eth_rxtx_callback *user_cb);
3158 
3176 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3177  struct rte_eth_rxq_info *qinfo);
3178 
3196 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3197  struct rte_eth_txq_info *qinfo);
3198 
3216 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
3217 
3230 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
3231 
3247 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
3248 
3264 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
3265 
3284 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3285  struct ether_addr *mc_addr_set,
3286  uint32_t nb_mc_addr);
3287 
3300 int rte_eth_timesync_enable(uint16_t port_id);
3301 
3314 int rte_eth_timesync_disable(uint16_t port_id);
3315 
3334 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
3335  struct timespec *timestamp, uint32_t flags);
3336 
3352 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3353  struct timespec *timestamp);
3354 
3372 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
3373 
3388 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
3389 
3408 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
3409 
3425 int
3426 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
3427  struct rte_eth_l2_tunnel_conf *l2_tunnel);
3428 
3453 int
3454 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
3455  struct rte_eth_l2_tunnel_conf *l2_tunnel,
3456  uint32_t mask,
3457  uint8_t en);
3458 
3471 int
3472 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
3473 
3485 int
3486 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
3487 
3504 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
3505  uint16_t *nb_rx_desc,
3506  uint16_t *nb_tx_desc);
3507 
3522 int
3523 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
3524 
3534 void *
3535 rte_eth_dev_get_sec_ctx(uint8_t port_id);
3536 
3537 
3538 #include <rte_ethdev_core.h>
3539 
3622 static inline uint16_t
3623 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
3624  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
3625 {
3626  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3627  uint16_t nb_rx;
3628 
3629 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3630  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
3631  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
3632 
3633  if (queue_id >= dev->data->nb_rx_queues) {
3634  RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3635  return 0;
3636  }
3637 #endif
3638  nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
3639  rx_pkts, nb_pkts);
3640 
3641 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
3642  if (unlikely(dev->post_rx_burst_cbs[queue_id] != NULL)) {
3643  struct rte_eth_rxtx_callback *cb =
3644  dev->post_rx_burst_cbs[queue_id];
3645 
3646  do {
3647  nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
3648  nb_pkts, cb->param);
3649  cb = cb->next;
3650  } while (cb != NULL);
3651  }
3652 #endif
3653 
3654  return nb_rx;
3655 }
3656 
3669 static inline int
3670 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
3671 {
3672  struct rte_eth_dev *dev;
3673 
3674  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3675  dev = &rte_eth_devices[port_id];
3676  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
3677  if (queue_id >= dev->data->nb_rx_queues)
3678  return -EINVAL;
3679 
3680  return (int)(*dev->dev_ops->rx_queue_count)(dev, queue_id);
3681 }
3682 
3698 static inline int
3699 rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
3700 {
3701  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3702  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3703  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
3704  return (*dev->dev_ops->rx_descriptor_done)( \
3705  dev->data->rx_queues[queue_id], offset);
3706 }
3707 
3708 #define RTE_ETH_RX_DESC_AVAIL 0
3709 #define RTE_ETH_RX_DESC_DONE 1
3710 #define RTE_ETH_RX_DESC_UNAVAIL 2
3745 static inline int
3746 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
3747  uint16_t offset)
3748 {
3749  struct rte_eth_dev *dev;
3750  void *rxq;
3751 
3752 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3753  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3754 #endif
3755  dev = &rte_eth_devices[port_id];
3756 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3757  if (queue_id >= dev->data->nb_rx_queues)
3758  return -ENODEV;
3759 #endif
3760  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
3761  rxq = dev->data->rx_queues[queue_id];
3762 
3763  return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
3764 }
3765 
3766 #define RTE_ETH_TX_DESC_FULL 0
3767 #define RTE_ETH_TX_DESC_DONE 1
3768 #define RTE_ETH_TX_DESC_UNAVAIL 2
3803 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
3804  uint16_t queue_id, uint16_t offset)
3805 {
3806  struct rte_eth_dev *dev;
3807  void *txq;
3808 
3809 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3810  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3811 #endif
3812  dev = &rte_eth_devices[port_id];
3813 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3814  if (queue_id >= dev->data->nb_tx_queues)
3815  return -ENODEV;
3816 #endif
3817  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
3818  txq = dev->data->tx_queues[queue_id];
3819 
3820  return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
3821 }
3822 
3889 static inline uint16_t
3890 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
3891  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3892 {
3893  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3894 
3895 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3896  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
3897  RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
3898 
3899  if (queue_id >= dev->data->nb_tx_queues) {
3900  RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3901  return 0;
3902  }
3903 #endif
3904 
3905 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
3906  struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3907 
3908  if (unlikely(cb != NULL)) {
3909  do {
3910  nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
3911  cb->param);
3912  cb = cb->next;
3913  } while (cb != NULL);
3914  }
3915 #endif
3916 
3917  return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
3918 }
3919 
3976 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
3977 
3978 static inline uint16_t
3979 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
3980  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3981 {
3982  struct rte_eth_dev *dev;
3983 
3984 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3985  if (!rte_eth_dev_is_valid_port(port_id)) {
3986  RTE_PMD_DEBUG_TRACE("Invalid TX port_id=%d\n", port_id);
3987  rte_errno = -EINVAL;
3988  return 0;
3989  }
3990 #endif
3991 
3992  dev = &rte_eth_devices[port_id];
3993 
3994 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3995  if (queue_id >= dev->data->nb_tx_queues) {
3996  RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3997  rte_errno = -EINVAL;
3998  return 0;
3999  }
4000 #endif
4001 
4002  if (!dev->tx_pkt_prepare)
4003  return nb_pkts;
4004 
4005  return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
4006  tx_pkts, nb_pkts);
4007 }
4008 
4009 #else
4010 
4011 /*
4012  * Native NOOP operation for compilation targets which doesn't require any
4013  * preparations steps, and functional NOOP may introduce unnecessary performance
4014  * drop.
4015  *
4016  * Generally this is not a good idea to turn it on globally and didn't should
4017  * be used if behavior of tx_preparation can change.
4018  */
4019 
4020 static inline uint16_t
4021 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
4022  __rte_unused uint16_t queue_id,
4023  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4024 {
4025  return nb_pkts;
4026 }
4027 
4028 #endif
4029 
4052 static inline uint16_t
4053 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
4054  struct rte_eth_dev_tx_buffer *buffer)
4055 {
4056  uint16_t sent;
4057  uint16_t to_send = buffer->length;
4058 
4059  if (to_send == 0)
4060  return 0;
4061 
4062  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
4063 
4064  buffer->length = 0;
4065 
4066  /* All packets sent, or to be dealt with by callback below */
4067  if (unlikely(sent != to_send))
4068  buffer->error_callback(&buffer->pkts[sent],
4069  (uint16_t)(to_send - sent),
4070  buffer->error_userdata);
4071 
4072  return sent;
4073 }
4074 
4105 static __rte_always_inline uint16_t
4106 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
4107  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
4108 {
4109  buffer->pkts[buffer->length++] = tx_pkt;
4110  if (buffer->length < buffer->size)
4111  return 0;
4112 
4113  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
4114 }
4115 
4116 #ifdef __cplusplus
4117 }
4118 #endif
4119 
4120 #endif /* _RTE_ETHDEV_H_ */