DPDK  18.11.11
rte_ethdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
140 #ifdef __cplusplus
141 extern "C" {
142 #endif
143 
144 #include <stdint.h>
145 
146 /* Use this macro to check if LRO API is supported */
147 #define RTE_ETHDEV_HAS_LRO_SUPPORT
148 
149 #include <rte_compat.h>
150 #include <rte_log.h>
151 #include <rte_interrupts.h>
152 #include <rte_dev.h>
153 #include <rte_devargs.h>
154 #include <rte_errno.h>
155 #include <rte_common.h>
156 #include <rte_config.h>
157 #include <rte_ether.h>
158 
159 #include "rte_eth_ctrl.h"
160 #include "rte_dev_info.h"
161 
162 extern int rte_eth_dev_logtype;
163 
164 #define RTE_ETHDEV_LOG(level, ...) \
165  rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
166 
167 struct rte_mbuf;
168 
185 int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
186 
201 uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter);
202 
215 void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
216 
230 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
231  for (rte_eth_iterator_init(iter, devargs), \
232  id = rte_eth_iterator_next(iter); \
233  id != RTE_MAX_ETHPORTS; \
234  id = rte_eth_iterator_next(iter))
235 
243  uint64_t ipackets;
244  uint64_t opackets;
245  uint64_t ibytes;
246  uint64_t obytes;
247  uint64_t imissed;
251  uint64_t ierrors;
252  uint64_t oerrors;
253  uint64_t rx_nombuf;
254  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
256  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
258  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
260  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
262  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
264 };
265 
269 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
270 #define ETH_LINK_SPEED_FIXED (1 << 0)
271 #define ETH_LINK_SPEED_10M_HD (1 << 1)
272 #define ETH_LINK_SPEED_10M (1 << 2)
273 #define ETH_LINK_SPEED_100M_HD (1 << 3)
274 #define ETH_LINK_SPEED_100M (1 << 4)
275 #define ETH_LINK_SPEED_1G (1 << 5)
276 #define ETH_LINK_SPEED_2_5G (1 << 6)
277 #define ETH_LINK_SPEED_5G (1 << 7)
278 #define ETH_LINK_SPEED_10G (1 << 8)
279 #define ETH_LINK_SPEED_20G (1 << 9)
280 #define ETH_LINK_SPEED_25G (1 << 10)
281 #define ETH_LINK_SPEED_40G (1 << 11)
282 #define ETH_LINK_SPEED_50G (1 << 12)
283 #define ETH_LINK_SPEED_56G (1 << 13)
284 #define ETH_LINK_SPEED_100G (1 << 14)
289 #define ETH_SPEED_NUM_NONE 0
290 #define ETH_SPEED_NUM_10M 10
291 #define ETH_SPEED_NUM_100M 100
292 #define ETH_SPEED_NUM_1G 1000
293 #define ETH_SPEED_NUM_2_5G 2500
294 #define ETH_SPEED_NUM_5G 5000
295 #define ETH_SPEED_NUM_10G 10000
296 #define ETH_SPEED_NUM_20G 20000
297 #define ETH_SPEED_NUM_25G 25000
298 #define ETH_SPEED_NUM_40G 40000
299 #define ETH_SPEED_NUM_50G 50000
300 #define ETH_SPEED_NUM_56G 56000
301 #define ETH_SPEED_NUM_100G 100000
306 __extension__
307 struct rte_eth_link {
308  uint32_t link_speed;
309  uint16_t link_duplex : 1;
310  uint16_t link_autoneg : 1;
311  uint16_t link_status : 1;
312 } __attribute__((aligned(8)));
314 /* Utility constants */
315 #define ETH_LINK_HALF_DUPLEX 0
316 #define ETH_LINK_FULL_DUPLEX 1
317 #define ETH_LINK_DOWN 0
318 #define ETH_LINK_UP 1
319 #define ETH_LINK_FIXED 0
320 #define ETH_LINK_AUTONEG 1
326 struct rte_eth_thresh {
327  uint8_t pthresh;
328  uint8_t hthresh;
329  uint8_t wthresh;
330 };
331 
335 #define ETH_MQ_RX_RSS_FLAG 0x1
336 #define ETH_MQ_RX_DCB_FLAG 0x2
337 #define ETH_MQ_RX_VMDQ_FLAG 0x4
338 
346 
350  ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
352  ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
353 
355  ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
357  ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
359  ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
362  ETH_MQ_RX_VMDQ_FLAG,
363 };
364 
368 #define ETH_RSS ETH_MQ_RX_RSS
369 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
370 #define ETH_DCB_RX ETH_MQ_RX_DCB
371 
381 };
382 
386 #define ETH_DCB_NONE ETH_MQ_TX_NONE
387 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
388 #define ETH_DCB_TX ETH_MQ_TX_DCB
389 
396  uint32_t max_rx_pkt_len;
397  uint16_t split_hdr_size;
403  uint64_t offloads;
404 };
405 
411  ETH_VLAN_TYPE_UNKNOWN = 0,
414  ETH_VLAN_TYPE_MAX,
415 };
416 
422  uint64_t ids[64];
423 };
424 
443  uint8_t *rss_key;
444  uint8_t rss_key_len;
445  uint64_t rss_hf;
446 };
447 
448 /*
449  * The RSS offload types are defined based on flow types which are defined
450  * in rte_eth_ctrl.h. Different NIC hardwares may support different RSS offload
451  * types. The supported flow types or RSS offload types can be queried by
452  * rte_eth_dev_info_get().
453  */
454 #define ETH_RSS_IPV4 (1ULL << RTE_ETH_FLOW_IPV4)
455 #define ETH_RSS_FRAG_IPV4 (1ULL << RTE_ETH_FLOW_FRAG_IPV4)
456 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
457 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP)
458 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP)
459 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER)
460 #define ETH_RSS_IPV6 (1ULL << RTE_ETH_FLOW_IPV6)
461 #define ETH_RSS_FRAG_IPV6 (1ULL << RTE_ETH_FLOW_FRAG_IPV6)
462 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP)
463 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
464 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP)
465 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)
466 #define ETH_RSS_L2_PAYLOAD (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)
467 #define ETH_RSS_IPV6_EX (1ULL << RTE_ETH_FLOW_IPV6_EX)
468 #define ETH_RSS_IPV6_TCP_EX (1ULL << RTE_ETH_FLOW_IPV6_TCP_EX)
469 #define ETH_RSS_IPV6_UDP_EX (1ULL << RTE_ETH_FLOW_IPV6_UDP_EX)
470 #define ETH_RSS_PORT (1ULL << RTE_ETH_FLOW_PORT)
471 #define ETH_RSS_VXLAN (1ULL << RTE_ETH_FLOW_VXLAN)
472 #define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE)
473 #define ETH_RSS_NVGRE (1ULL << RTE_ETH_FLOW_NVGRE)
474 
475 #define ETH_RSS_IP ( \
476  ETH_RSS_IPV4 | \
477  ETH_RSS_FRAG_IPV4 | \
478  ETH_RSS_NONFRAG_IPV4_OTHER | \
479  ETH_RSS_IPV6 | \
480  ETH_RSS_FRAG_IPV6 | \
481  ETH_RSS_NONFRAG_IPV6_OTHER | \
482  ETH_RSS_IPV6_EX)
483 
484 #define ETH_RSS_UDP ( \
485  ETH_RSS_NONFRAG_IPV4_UDP | \
486  ETH_RSS_NONFRAG_IPV6_UDP | \
487  ETH_RSS_IPV6_UDP_EX)
488 
489 #define ETH_RSS_TCP ( \
490  ETH_RSS_NONFRAG_IPV4_TCP | \
491  ETH_RSS_NONFRAG_IPV6_TCP | \
492  ETH_RSS_IPV6_TCP_EX)
493 
494 #define ETH_RSS_SCTP ( \
495  ETH_RSS_NONFRAG_IPV4_SCTP | \
496  ETH_RSS_NONFRAG_IPV6_SCTP)
497 
498 #define ETH_RSS_TUNNEL ( \
499  ETH_RSS_VXLAN | \
500  ETH_RSS_GENEVE | \
501  ETH_RSS_NVGRE)
502 
504 #define ETH_RSS_PROTO_MASK ( \
505  ETH_RSS_IPV4 | \
506  ETH_RSS_FRAG_IPV4 | \
507  ETH_RSS_NONFRAG_IPV4_TCP | \
508  ETH_RSS_NONFRAG_IPV4_UDP | \
509  ETH_RSS_NONFRAG_IPV4_SCTP | \
510  ETH_RSS_NONFRAG_IPV4_OTHER | \
511  ETH_RSS_IPV6 | \
512  ETH_RSS_FRAG_IPV6 | \
513  ETH_RSS_NONFRAG_IPV6_TCP | \
514  ETH_RSS_NONFRAG_IPV6_UDP | \
515  ETH_RSS_NONFRAG_IPV6_SCTP | \
516  ETH_RSS_NONFRAG_IPV6_OTHER | \
517  ETH_RSS_L2_PAYLOAD | \
518  ETH_RSS_IPV6_EX | \
519  ETH_RSS_IPV6_TCP_EX | \
520  ETH_RSS_IPV6_UDP_EX | \
521  ETH_RSS_PORT | \
522  ETH_RSS_VXLAN | \
523  ETH_RSS_GENEVE | \
524  ETH_RSS_NVGRE)
525 
526 /*
527  * Definitions used for redirection table entry size.
528  * Some RSS RETA sizes may not be supported by some drivers, check the
529  * documentation or the description of relevant functions for more details.
530  */
531 #define ETH_RSS_RETA_SIZE_64 64
532 #define ETH_RSS_RETA_SIZE_128 128
533 #define ETH_RSS_RETA_SIZE_256 256
534 #define ETH_RSS_RETA_SIZE_512 512
535 #define RTE_RETA_GROUP_SIZE 64
536 
537 /* Definitions used for VMDQ and DCB functionality */
538 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
539 #define ETH_DCB_NUM_USER_PRIORITIES 8
540 #define ETH_VMDQ_DCB_NUM_QUEUES 128
541 #define ETH_DCB_NUM_QUEUES 128
543 /* DCB capability defines */
544 #define ETH_DCB_PG_SUPPORT 0x00000001
545 #define ETH_DCB_PFC_SUPPORT 0x00000002
547 /* Definitions used for VLAN Offload functionality */
548 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
549 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
550 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
552 /* Definitions used for mask VLAN setting */
553 #define ETH_VLAN_STRIP_MASK 0x0001
554 #define ETH_VLAN_FILTER_MASK 0x0002
555 #define ETH_VLAN_EXTEND_MASK 0x0004
556 #define ETH_VLAN_ID_MAX 0x0FFF
558 /* Definitions used for receive MAC address */
559 #define ETH_NUM_RECEIVE_MAC_ADDR 128
561 /* Definitions used for unicast hash */
562 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
564 /* Definitions used for VMDQ pool rx mode setting */
565 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
566 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
567 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
568 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
569 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
572 #define ETH_MIRROR_MAX_VLANS 64
573 
574 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
575 #define ETH_MIRROR_UPLINK_PORT 0x02
576 #define ETH_MIRROR_DOWNLINK_PORT 0x04
577 #define ETH_MIRROR_VLAN 0x08
578 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
583 struct rte_eth_vlan_mirror {
584  uint64_t vlan_mask;
586  uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
587 };
588 
593  uint8_t rule_type;
594  uint8_t dst_pool;
595  uint64_t pool_mask;
598 };
599 
607  uint64_t mask;
609  uint16_t reta[RTE_RETA_GROUP_SIZE];
611 };
612 
618  ETH_4_TCS = 4,
620 };
621 
631 };
632 
633 /* This structure may be extended in future. */
634 struct rte_eth_dcb_rx_conf {
635  enum rte_eth_nb_tcs nb_tcs;
637  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
638 };
639 
640 struct rte_eth_vmdq_dcb_tx_conf {
641  enum rte_eth_nb_pools nb_queue_pools;
643  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
644 };
645 
646 struct rte_eth_dcb_tx_conf {
647  enum rte_eth_nb_tcs nb_tcs;
649  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
650 };
651 
652 struct rte_eth_vmdq_tx_conf {
653  enum rte_eth_nb_pools nb_queue_pools;
654 };
655 
670  uint8_t default_pool;
671  uint8_t nb_pool_maps;
672  struct {
673  uint16_t vlan_id;
674  uint64_t pools;
678 };
679 
701  uint8_t default_pool;
703  uint8_t nb_pool_maps;
704  uint32_t rx_mode;
705  struct {
706  uint16_t vlan_id;
707  uint64_t pools;
709 };
710 
721  uint64_t offloads;
722 
723  /* For i40e specifically */
724  uint16_t pvid;
725  __extension__
726  uint8_t hw_vlan_reject_tagged : 1,
732 };
733 
739  uint16_t rx_free_thresh;
740  uint8_t rx_drop_en;
747  uint64_t offloads;
748 };
749 
755  uint16_t tx_rs_thresh;
756  uint16_t tx_free_thresh;
765  uint64_t offloads;
766 };
767 
772  uint16_t nb_max;
773  uint16_t nb_min;
774  uint16_t nb_align;
784  uint16_t nb_seg_max;
785 
797  uint16_t nb_mtu_seg_max;
798 };
799 
808 };
809 
816  uint32_t high_water;
817  uint32_t low_water;
818  uint16_t pause_time;
819  uint16_t send_xon;
822  uint8_t autoneg;
823 };
824 
832  uint8_t priority;
833 };
834 
843 };
844 
852 };
853 
865  uint8_t drop_queue;
866  struct rte_eth_fdir_masks mask;
869 };
870 
879  uint16_t udp_port;
880  uint8_t prot_type;
881 };
882 
888  uint32_t lsc:1;
890  uint32_t rxq:1;
892  uint32_t rmv:1;
893 };
894 
900 struct rte_eth_conf {
901  uint32_t link_speeds;
910  uint32_t lpbk_mode;
915  struct {
919  struct rte_eth_dcb_rx_conf dcb_rx_conf;
923  } rx_adv_conf;
924  union {
925  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
927  struct rte_eth_dcb_tx_conf dcb_tx_conf;
929  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
931  } tx_adv_conf;
937 };
938 
942 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
943 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
944 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
945 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
946 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
947 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
948 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
949 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
950 #define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100
951 #define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200
952 #define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
953 #define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
954 #define DEV_RX_OFFLOAD_SCATTER 0x00002000
955 #define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000
956 #define DEV_RX_OFFLOAD_SECURITY 0x00008000
957 #define DEV_RX_OFFLOAD_KEEP_CRC 0x00010000
958 #define DEV_RX_OFFLOAD_SCTP_CKSUM 0x00020000
959 #define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM 0x00040000
960 
961 #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
962  DEV_RX_OFFLOAD_UDP_CKSUM | \
963  DEV_RX_OFFLOAD_TCP_CKSUM)
964 #define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
965  DEV_RX_OFFLOAD_VLAN_FILTER | \
966  DEV_RX_OFFLOAD_VLAN_EXTEND)
967 
968 /*
969  * If new Rx offload capabilities are defined, they also must be
970  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
971  */
972 
976 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
977 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
978 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
979 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
980 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
981 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
982 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
983 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
984 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
985 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200
986 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400
987 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800
988 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000
989 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
990 #define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000
991 
994 #define DEV_TX_OFFLOAD_MULTI_SEGS 0x00008000
995 
996 #define DEV_TX_OFFLOAD_MBUF_FAST_FREE 0x00010000
997 
1001 #define DEV_TX_OFFLOAD_SECURITY 0x00020000
1002 
1007 #define DEV_TX_OFFLOAD_UDP_TNL_TSO 0x00040000
1008 
1013 #define DEV_TX_OFFLOAD_IP_TNL_TSO 0x00080000
1014 
1015 #define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM 0x00100000
1016 
1020 #define DEV_TX_OFFLOAD_MATCH_METADATA 0x00200000
1021 /*
1022  * If new Tx offload capabilities are defined, they also must be
1023  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1024  */
1025 
1030 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001
1031 
1032 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002
1033 
1035 /*
1036  * Fallback default preferred Rx/Tx port parameters.
1037  * These are used if an application requests default parameters
1038  * but the PMD does not provide preferred values.
1039  */
1040 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1041 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1042 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1043 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1044 
1051  uint16_t burst_size;
1052  uint16_t ring_size;
1053  uint16_t nb_queues;
1054 };
1055 
1060 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX)
1061 
1066  const char *name;
1067  uint16_t domain_id;
1068  uint16_t port_id;
1076 };
1077 
1088  struct rte_device *device;
1089  const char *driver_name;
1090  unsigned int if_index;
1092  const uint32_t *dev_flags;
1093  uint32_t min_rx_bufsize;
1094  uint32_t max_rx_pktlen;
1095  uint16_t max_rx_queues;
1096  uint16_t max_tx_queues;
1097  uint32_t max_mac_addrs;
1098  uint32_t max_hash_mac_addrs;
1100  uint16_t max_vfs;
1101  uint16_t max_vmdq_pools;
1110  uint16_t reta_size;
1112  uint8_t hash_key_size;
1117  uint16_t vmdq_queue_base;
1118  uint16_t vmdq_queue_num;
1119  uint16_t vmdq_pool_base;
1122  uint32_t speed_capa;
1124  uint16_t nb_rx_queues;
1125  uint16_t nb_tx_queues;
1131  uint64_t dev_capa;
1137 };
1138 
1144  struct rte_mempool *mp;
1146  uint8_t scattered_rx;
1147  uint16_t nb_desc;
1149 
1156  uint16_t nb_desc;
1158 
1160 #define RTE_ETH_XSTATS_NAME_SIZE 64
1161 
1172  uint64_t id;
1173  uint64_t value;
1174 };
1175 
1185 };
1186 
1187 #define ETH_DCB_NUM_TCS 8
1188 #define ETH_MAX_VMDQ_POOL 64
1189 
1196  struct {
1197  uint8_t base;
1198  uint8_t nb_queue;
1199  } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1201  struct {
1202  uint8_t base;
1203  uint8_t nb_queue;
1204  } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1205 };
1206 
1212  uint8_t nb_tcs;
1214  uint8_t tc_bws[ETH_DCB_NUM_TCS];
1217 };
1218 
1222 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1223 #define RTE_ETH_QUEUE_STATE_STARTED 1
1224 
1225 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
1226 
1227 /* Macros to check for valid port */
1228 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1229  if (!rte_eth_dev_is_valid_port(port_id)) { \
1230  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1231  return retval; \
1232  } \
1233 } while (0)
1234 
1235 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1236  if (!rte_eth_dev_is_valid_port(port_id)) { \
1237  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1238  return; \
1239  } \
1240 } while (0)
1241 
1247 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1248 
1249 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1250 
1251 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1252 
1253 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1254 
1277 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
1278  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1279  void *user_param);
1280 
1301 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
1302  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1303 
1314 };
1315 
1316 struct rte_eth_dev_sriov {
1317  uint8_t active;
1318  uint8_t nb_q_per_pool;
1319  uint16_t def_vmdq_idx;
1320  uint16_t def_pool_q_idx;
1321 };
1322 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1323 
1324 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
1325 
1326 #define RTE_ETH_DEV_NO_OWNER 0
1327 
1328 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
1329 
1330 struct rte_eth_dev_owner {
1331  uint64_t id;
1332  char name[RTE_ETH_MAX_OWNER_NAME_LEN];
1333 };
1334 
1339 #define RTE_ETH_DEV_CLOSE_REMOVE 0x0001
1340 
1341 #define RTE_ETH_DEV_INTR_LSC 0x0002
1342 
1343 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1344 
1345 #define RTE_ETH_DEV_INTR_RMV 0x0008
1346 
1347 #define RTE_ETH_DEV_REPRESENTOR 0x0010
1348 
1349 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR 0x0020
1350 
1362 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
1363  const uint64_t owner_id);
1364 
1368 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
1369  for (p = rte_eth_find_next_owned_by(0, o); \
1370  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
1371  p = rte_eth_find_next_owned_by(p + 1, o))
1372 
1381 uint16_t rte_eth_find_next(uint16_t port_id);
1382 
1386 #define RTE_ETH_FOREACH_DEV(p) \
1387  RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
1388 
1389 
1403 int __rte_experimental rte_eth_dev_owner_new(uint64_t *owner_id);
1404 
1418 int __rte_experimental rte_eth_dev_owner_set(const uint16_t port_id,
1419  const struct rte_eth_dev_owner *owner);
1420 
1434 int __rte_experimental rte_eth_dev_owner_unset(const uint16_t port_id,
1435  const uint64_t owner_id);
1436 
1446 void __rte_experimental rte_eth_dev_owner_delete(const uint64_t owner_id);
1447 
1461 int __rte_experimental rte_eth_dev_owner_get(const uint16_t port_id,
1462  struct rte_eth_dev_owner *owner);
1463 
1476 __rte_deprecated
1477 uint16_t rte_eth_dev_count(void);
1478 
1489 uint16_t rte_eth_dev_count_avail(void);
1490 
1499 uint16_t __rte_experimental rte_eth_dev_count_total(void);
1500 
1512 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
1513 
1522 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
1523 
1532 const char *rte_eth_dev_tx_offload_name(uint64_t offload);
1533 
1573 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
1574  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
1575 
1587 int __rte_experimental
1588 rte_eth_dev_is_removed(uint16_t port_id);
1589 
1639 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1640  uint16_t nb_rx_desc, unsigned int socket_id,
1641  const struct rte_eth_rxconf *rx_conf,
1642  struct rte_mempool *mb_pool);
1643 
1692 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1693  uint16_t nb_tx_desc, unsigned int socket_id,
1694  const struct rte_eth_txconf *tx_conf);
1695 
1706 int rte_eth_dev_socket_id(uint16_t port_id);
1707 
1717 int rte_eth_dev_is_valid_port(uint16_t port_id);
1718 
1735 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
1736 
1752 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
1753 
1770 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
1771 
1787 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
1788 
1808 int rte_eth_dev_start(uint16_t port_id);
1809 
1817 void rte_eth_dev_stop(uint16_t port_id);
1818 
1831 int rte_eth_dev_set_link_up(uint16_t port_id);
1832 
1842 int rte_eth_dev_set_link_down(uint16_t port_id);
1843 
1852 void rte_eth_dev_close(uint16_t port_id);
1853 
1891 int rte_eth_dev_reset(uint16_t port_id);
1892 
1899 void rte_eth_promiscuous_enable(uint16_t port_id);
1900 
1907 void rte_eth_promiscuous_disable(uint16_t port_id);
1908 
1919 int rte_eth_promiscuous_get(uint16_t port_id);
1920 
1927 void rte_eth_allmulticast_enable(uint16_t port_id);
1928 
1935 void rte_eth_allmulticast_disable(uint16_t port_id);
1936 
1947 int rte_eth_allmulticast_get(uint16_t port_id);
1948 
1960 void rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
1961 
1973 void rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
1974 
1992 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
1993 
2004 int rte_eth_stats_reset(uint16_t port_id);
2005 
2035 int rte_eth_xstats_get_names(uint16_t port_id,
2036  struct rte_eth_xstat_name *xstats_names,
2037  unsigned int size);
2038 
2068 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2069  unsigned int n);
2070 
2093 int
2094 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2095  struct rte_eth_xstat_name *xstats_names, unsigned int size,
2096  uint64_t *ids);
2097 
2121 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2122  uint64_t *values, unsigned int size);
2123 
2142 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2143  uint64_t *id);
2144 
2151 void rte_eth_xstats_reset(uint16_t port_id);
2152 
2170 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
2171  uint16_t tx_queue_id, uint8_t stat_idx);
2172 
2190 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
2191  uint16_t rx_queue_id,
2192  uint8_t stat_idx);
2193 
2203 void rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr);
2204 
2214 void rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
2215 
2235 int rte_eth_dev_fw_version_get(uint16_t port_id,
2236  char *fw_version, size_t fw_size);
2237 
2276 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2277  uint32_t *ptypes, int num);
2278 
2290 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
2291 
2307 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
2308 
2328 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
2329 
2349 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2350  int on);
2351 
2369 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2370  enum rte_vlan_type vlan_type,
2371  uint16_t tag_type);
2372 
2394 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
2395 
2408 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
2409 
2424 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
2425 
2426 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
2427  void *userdata);
2428 
2434  buffer_tx_error_fn error_callback;
2435  void *error_userdata;
2436  uint16_t size;
2437  uint16_t length;
2438  struct rte_mbuf *pkts[];
2440 };
2441 
2448 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
2449  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
2450 
2461 int
2462 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
2463 
2488 int
2490  buffer_tx_error_fn callback, void *userdata);
2491 
2514 void
2515 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2516  void *userdata);
2517 
2541 void
2542 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2543  void *userdata);
2544 
2570 int
2571 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
2572 
2588 };
2589 
2597  uint64_t metadata;
2611 };
2612 
2630 };
2631 
2632 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
2633  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
2653 int rte_eth_dev_callback_register(uint16_t port_id,
2654  enum rte_eth_event_type event,
2655  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
2656 
2675 int rte_eth_dev_callback_unregister(uint16_t port_id,
2676  enum rte_eth_event_type event,
2677  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
2678 
2700 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
2701 
2722 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
2723 
2741 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
2742 
2764 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
2765  int epfd, int op, void *data);
2766 
2784 int __rte_experimental
2785 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
2786 
2800 int rte_eth_led_on(uint16_t port_id);
2801 
2815 int rte_eth_led_off(uint16_t port_id);
2816 
2830 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
2831  struct rte_eth_fc_conf *fc_conf);
2832 
2847 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
2848  struct rte_eth_fc_conf *fc_conf);
2849 
2865 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2866  struct rte_eth_pfc_conf *pfc_conf);
2867 
2887 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *mac_addr,
2888  uint32_t pool);
2889 
2903 int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *mac_addr);
2904 
2918 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
2919  struct ether_addr *mac_addr);
2920 
2937 int rte_eth_dev_rss_reta_update(uint16_t port_id,
2938  struct rte_eth_rss_reta_entry64 *reta_conf,
2939  uint16_t reta_size);
2940 
2957 int rte_eth_dev_rss_reta_query(uint16_t port_id,
2958  struct rte_eth_rss_reta_entry64 *reta_conf,
2959  uint16_t reta_size);
2960 
2980 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
2981  uint8_t on);
2982 
3001 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
3002 
3025 int rte_eth_mirror_rule_set(uint16_t port_id,
3026  struct rte_eth_mirror_conf *mirror_conf,
3027  uint8_t rule_id,
3028  uint8_t on);
3029 
3044 int rte_eth_mirror_rule_reset(uint16_t port_id,
3045  uint8_t rule_id);
3046 
3063 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3064  uint16_t tx_rate);
3065 
3080 int rte_eth_dev_rss_hash_update(uint16_t port_id,
3081  struct rte_eth_rss_conf *rss_conf);
3082 
3097 int
3098 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3099  struct rte_eth_rss_conf *rss_conf);
3100 
3119 int
3120 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3121  struct rte_eth_udp_tunnel *tunnel_udp);
3122 
3142 int
3143 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3144  struct rte_eth_udp_tunnel *tunnel_udp);
3145 
3160 int rte_eth_dev_filter_supported(uint16_t port_id,
3161  enum rte_filter_type filter_type);
3162 
3182 int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3183  enum rte_filter_op filter_op, void *arg);
3184 
3198 int rte_eth_dev_get_dcb_info(uint16_t port_id,
3199  struct rte_eth_dcb_info *dcb_info);
3200 
3201 struct rte_eth_rxtx_callback;
3202 
3228 const struct rte_eth_rxtx_callback *
3229 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3230  rte_rx_callback_fn fn, void *user_param);
3231 
3258 const struct rte_eth_rxtx_callback *
3259 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3260  rte_rx_callback_fn fn, void *user_param);
3261 
3287 const struct rte_eth_rxtx_callback *
3288 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3289  rte_tx_callback_fn fn, void *user_param);
3290 
3323 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3324  const struct rte_eth_rxtx_callback *user_cb);
3325 
3358 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3359  const struct rte_eth_rxtx_callback *user_cb);
3360 
3378 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3379  struct rte_eth_rxq_info *qinfo);
3380 
3398 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3399  struct rte_eth_txq_info *qinfo);
3400 
3418 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
3419 
3432 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
3433 
3449 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
3450 
3466 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
3467 
3485 int __rte_experimental
3486 rte_eth_dev_get_module_info(uint16_t port_id,
3487  struct rte_eth_dev_module_info *modinfo);
3488 
3507 int __rte_experimental
3508 rte_eth_dev_get_module_eeprom(uint16_t port_id,
3509  struct rte_dev_eeprom_info *info);
3510 
3529 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3530  struct ether_addr *mc_addr_set,
3531  uint32_t nb_mc_addr);
3532 
3545 int rte_eth_timesync_enable(uint16_t port_id);
3546 
3559 int rte_eth_timesync_disable(uint16_t port_id);
3560 
3579 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
3580  struct timespec *timestamp, uint32_t flags);
3581 
3597 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3598  struct timespec *timestamp);
3599 
3617 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
3618 
3633 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
3634 
3653 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
3654 
3670 int
3671 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
3672  struct rte_eth_l2_tunnel_conf *l2_tunnel);
3673 
3698 int
3699 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
3700  struct rte_eth_l2_tunnel_conf *l2_tunnel,
3701  uint32_t mask,
3702  uint8_t en);
3703 
3719 int
3720 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
3721 
3736 int
3737 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
3738 
3755 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
3756  uint16_t *nb_rx_desc,
3757  uint16_t *nb_tx_desc);
3758 
3773 int
3774 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
3775 
3785 void *
3786 rte_eth_dev_get_sec_ctx(uint16_t port_id);
3787 
3788 
3789 #include <rte_ethdev_core.h>
3790 
3873 static inline uint16_t
3874 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
3875  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
3876 {
3877  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3878  uint16_t nb_rx;
3879 
3880 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3881  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
3882  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
3883 
3884  if (queue_id >= dev->data->nb_rx_queues) {
3885  RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
3886  return 0;
3887  }
3888 #endif
3889  nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
3890  rx_pkts, nb_pkts);
3891 
3892 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
3893  struct rte_eth_rxtx_callback *cb;
3894 
3895  /* __ATOMIC_RELEASE memory order was used when the
3896  * call back was inserted into the list.
3897  * Since there is a clear dependency between loading
3898  * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
3899  * not required.
3900  */
3901  cb = __atomic_load_n(&dev->post_rx_burst_cbs[queue_id],
3902  __ATOMIC_RELAXED);
3903 
3904  if (unlikely(cb != NULL)) {
3905  do {
3906  nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
3907  nb_pkts, cb->param);
3908  cb = cb->next;
3909  } while (cb != NULL);
3910  }
3911 #endif
3912 
3913  return nb_rx;
3914 }
3915 
3928 static inline int
3929 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
3930 {
3931  struct rte_eth_dev *dev;
3932 
3933  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3934  dev = &rte_eth_devices[port_id];
3935  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
3936  if (queue_id >= dev->data->nb_rx_queues)
3937  return -EINVAL;
3938 
3939  return (int)(*dev->dev_ops->rx_queue_count)(dev, queue_id);
3940 }
3941 
3957 static inline int
3958 rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
3959 {
3960  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3961  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3962  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
3963  return (*dev->dev_ops->rx_descriptor_done)( \
3964  dev->data->rx_queues[queue_id], offset);
3965 }
3966 
3967 #define RTE_ETH_RX_DESC_AVAIL 0
3968 #define RTE_ETH_RX_DESC_DONE 1
3969 #define RTE_ETH_RX_DESC_UNAVAIL 2
4004 static inline int
4005 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
4006  uint16_t offset)
4007 {
4008  struct rte_eth_dev *dev;
4009  void *rxq;
4010 
4011 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4012  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4013 #endif
4014  dev = &rte_eth_devices[port_id];
4015 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4016  if (queue_id >= dev->data->nb_rx_queues)
4017  return -ENODEV;
4018 #endif
4019  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
4020  rxq = dev->data->rx_queues[queue_id];
4021 
4022  return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
4023 }
4024 
4025 #define RTE_ETH_TX_DESC_FULL 0
4026 #define RTE_ETH_TX_DESC_DONE 1
4027 #define RTE_ETH_TX_DESC_UNAVAIL 2
4062 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
4063  uint16_t queue_id, uint16_t offset)
4064 {
4065  struct rte_eth_dev *dev;
4066  void *txq;
4067 
4068 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4069  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4070 #endif
4071  dev = &rte_eth_devices[port_id];
4072 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4073  if (queue_id >= dev->data->nb_tx_queues)
4074  return -ENODEV;
4075 #endif
4076  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
4077  txq = dev->data->tx_queues[queue_id];
4078 
4079  return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
4080 }
4081 
4148 static inline uint16_t
4149 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
4150  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4151 {
4152  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4153 
4154 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4155  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4156  RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
4157 
4158  if (queue_id >= dev->data->nb_tx_queues) {
4159  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4160  return 0;
4161  }
4162 #endif
4163 
4164 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
4165  struct rte_eth_rxtx_callback *cb;
4166 
4167  /* __ATOMIC_RELEASE memory order was used when the
4168  * call back was inserted into the list.
4169  * Since there is a clear dependency between loading
4170  * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
4171  * not required.
4172  */
4173  cb = __atomic_load_n(&dev->pre_tx_burst_cbs[queue_id],
4174  __ATOMIC_RELAXED);
4175 
4176  if (unlikely(cb != NULL)) {
4177  do {
4178  nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
4179  cb->param);
4180  cb = cb->next;
4181  } while (cb != NULL);
4182  }
4183 #endif
4184 
4185  return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
4186 }
4187 
4241 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
4242 
4243 static inline uint16_t
4244 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
4245  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4246 {
4247  struct rte_eth_dev *dev;
4248 
4249 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4250  if (!rte_eth_dev_is_valid_port(port_id)) {
4251  RTE_ETHDEV_LOG(ERR, "Invalid TX port_id=%u\n", port_id);
4252  rte_errno = EINVAL;
4253  return 0;
4254  }
4255 #endif
4256 
4257  dev = &rte_eth_devices[port_id];
4258 
4259 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4260  if (queue_id >= dev->data->nb_tx_queues) {
4261  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4262  rte_errno = EINVAL;
4263  return 0;
4264  }
4265 #endif
4266 
4267  if (!dev->tx_pkt_prepare)
4268  return nb_pkts;
4269 
4270  return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
4271  tx_pkts, nb_pkts);
4272 }
4273 
4274 #else
4275 
4276 /*
4277  * Native NOOP operation for compilation targets which doesn't require any
4278  * preparations steps, and functional NOOP may introduce unnecessary performance
4279  * drop.
4280  *
4281  * Generally this is not a good idea to turn it on globally and didn't should
4282  * be used if behavior of tx_preparation can change.
4283  */
4284 
4285 static inline uint16_t
4286 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
4287  __rte_unused uint16_t queue_id,
4288  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4289 {
4290  return nb_pkts;
4291 }
4292 
4293 #endif
4294 
4317 static inline uint16_t
4318 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
4319  struct rte_eth_dev_tx_buffer *buffer)
4320 {
4321  uint16_t sent;
4322  uint16_t to_send = buffer->length;
4323 
4324  if (to_send == 0)
4325  return 0;
4326 
4327  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
4328 
4329  buffer->length = 0;
4330 
4331  /* All packets sent, or to be dealt with by callback below */
4332  if (unlikely(sent != to_send))
4333  buffer->error_callback(&buffer->pkts[sent],
4334  (uint16_t)(to_send - sent),
4335  buffer->error_userdata);
4336 
4337  return sent;
4338 }
4339 
4370 static __rte_always_inline uint16_t
4371 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
4372  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
4373 {
4374  buffer->pkts[buffer->length++] = tx_pkt;
4375  if (buffer->length < buffer->size)
4376  return 0;
4377 
4378  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
4379 }
4380 
4381 #ifdef __cplusplus
4382 }
4383 #endif
4384 
4385 #endif /* _RTE_ETHDEV_H_ */
uint16_t nb_rx_queues
Definition: rte_ethdev.h:1124
struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf
Definition: rte_ethdev.h:925
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int __rte_experimental rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
uint8_t tc_bws[ETH_DCB_NUM_TCS]
Definition: rte_ethdev.h:1214
#define ETH_VMDQ_MAX_VLAN_FILTERS
Definition: rte_ethdev.h:538
struct rte_eth_dev_portconf default_rxportconf
Definition: rte_ethdev.h:1127
struct rte_fdir_conf fdir_conf
Definition: rte_ethdev.h:935
int rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id, struct rte_eth_l2_tunnel_conf *l2_tunnel)
uint32_t rmv
Definition: rte_ethdev.h:892
#define __rte_always_inline
Definition: rte_common.h:146
uint16_t tx_rs_thresh
Definition: rte_ethdev.h:755
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
uint16_t nb_desc
Definition: rte_ethdev.h:1156
uint64_t rx_offload_capa
Definition: rte_ethdev.h:1102
uint16_t reta[RTE_RETA_GROUP_SIZE]
Definition: rte_ethdev.h:609
const uint32_t * dev_flags
Definition: rte_ethdev.h:1092
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
void rte_eth_dev_stop(uint16_t port_id)
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:4244
int rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
rte_eth_nb_tcs
Definition: rte_ethdev.h:617
struct rte_eth_vmdq_tx_conf vmdq_tx_conf
Definition: rte_ethdev.h:929
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
void __rte_experimental rte_eth_dev_owner_delete(const uint64_t owner_id)
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:262
struct rte_eth_dcb_tc_queue_mapping::@120 tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS]
enum rte_eth_event_ipsec_subtype subtype
Definition: rte_ethdev.h:2595
struct rte_eth_thresh rx_thresh
Definition: rte_ethdev.h:738
uint16_t rte_eth_find_next(uint16_t port_id)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
int rte_eth_led_off(uint16_t port_id)
rte_fdir_pballoc_type
Definition: rte_ethdev.h:839
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:4005
uint64_t imissed
Definition: rte_ethdev.h:247
static int rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:3958
uint32_t low_water
Definition: rte_ethdev.h:817
uint32_t max_rx_pkt_len
Definition: rte_ethdev.h:396
uint8_t rss_key_len
Definition: rte_ethdev.h:444
void rte_eth_dev_close(uint16_t port_id)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint16_t tx_rate)
uint8_t hthresh
Definition: rte_ethdev.h:328
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
uint64_t rx_queue_offload_capa
Definition: rte_ethdev.h:1106
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
uint16_t reta_size
Definition: rte_ethdev.h:1110
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
void * userdata
Definition: rte_mbuf.h:620
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint32_t lpbk_mode
Definition: rte_ethdev.h:910
enum rte_fdir_status_mode status
Definition: rte_ethdev.h:863
enum rte_eth_tx_mq_mode mq_mode
Definition: rte_ethdev.h:715
#define rte_errno
Definition: rte_errno.h:29
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
uint32_t link_speeds
Definition: rte_ethdev.h:901
uint64_t tx_queue_offload_capa
Definition: rte_ethdev.h:1108
rte_eth_tx_mq_mode
Definition: rte_ethdev.h:376
rte_eth_fc_mode
Definition: rte_ethdev.h:803
int __rte_experimental rte_eth_dev_is_removed(uint16_t port_id)
int rte_eth_mirror_rule_set(uint16_t port_id, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on)
uint8_t enable_default_pool
Definition: rte_ethdev.h:669
uint16_t nb_mtu_seg_max
Definition: rte_ethdev.h:797
#define __rte_unused
Definition: rte_common.h:81
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:260
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
uint64_t opackets
Definition: rte_ethdev.h:244
rte_filter_op
Definition: rte_eth_ctrl.h:81
struct rte_eth_rss_conf rss_conf
Definition: rte_ethdev.h:916
uint8_t hash_key_size
Definition: rte_ethdev.h:1112
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
uint16_t split_hdr_size
Definition: rte_ethdev.h:397
struct rte_mempool * mp
Definition: rte_ethdev.h:1144
uint32_t dcb_capability_en
Definition: rte_ethdev.h:934
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:258
const char * name
Definition: rte_ethdev.h:1066
struct rte_eth_switch_info switch_info
Definition: rte_ethdev.h:1136
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
void rte_eth_allmulticast_enable(uint16_t port_id)
uint32_t rxq
Definition: rte_ethdev.h:890
int rte_eth_dev_set_link_up(uint16_t port_id)
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
struct rte_eth_thresh tx_thresh
Definition: rte_ethdev.h:754
struct rte_eth_desc_lim rx_desc_lim
Definition: rte_ethdev.h:1120
uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES]
Definition: rte_ethdev.h:676
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
uint16_t vmdq_queue_num
Definition: rte_ethdev.h:1118
uint8_t rx_deferred_start
Definition: rte_ethdev.h:741
struct rte_mbuf * pkts[]
Definition: rte_ethdev.h:2438
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
Definition: rte_ethdev.h:2632
struct rte_eth_rxmode rxmode
Definition: rte_ethdev.h:908
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:254
uint32_t high_water
Definition: rte_ethdev.h:816
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:668
struct rte_eth_txconf conf
Definition: rte_ethdev.h:1155
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
struct rte_intr_conf intr_conf
Definition: rte_ethdev.h:936
#define RTE_ETH_XSTATS_NAME_SIZE
Definition: rte_ethdev.h:1160
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
struct rte_eth_desc_lim tx_desc_lim
Definition: rte_ethdev.h:1121
int rte_eth_timesync_disable(uint16_t port_id)
uint64_t offloads
Definition: rte_ethdev.h:747
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *mac_addr)
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type, enum rte_filter_op filter_op, void *arg)
uint16_t send_xon
Definition: rte_ethdev.h:819
int rte_eth_stats_reset(uint16_t port_id)
struct rte_eth_txconf default_txconf
Definition: rte_ethdev.h:1116
#define unlikely(x)
uint16_t nb_max
Definition: rte_ethdev.h:772
uint64_t ibytes
Definition: rte_ethdev.h:245
uint64_t offloads
Definition: rte_ethdev.h:765
uint64_t oerrors
Definition: rte_ethdev.h:252
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
struct rte_eth_dcb_rx_conf dcb_rx_conf
Definition: rte_ethdev.h:919
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct ether_addr *mc_addr_set, uint32_t nb_mc_addr)
struct rte_eth_vmdq_rx_conf vmdq_rx_conf
Definition: rte_ethdev.h:921
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
char name[RTE_ETH_XSTATS_NAME_SIZE]
Definition: rte_ethdev.h:1184
void rte_eth_promiscuous_enable(uint16_t port_id)
struct rte_eth_vmdq_dcb_conf::@115 pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]
uint64_t offloads
Definition: rte_ethdev.h:403
enum rte_eth_rx_mq_mode mq_mode
Definition: rte_ethdev.h:395
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:699
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
uint16_t tx_free_thresh
Definition: rte_ethdev.h:756
uint16_t nb_desc
Definition: rte_ethdev.h:1147
void rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:3874
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
int rte_eth_dev_start(uint16_t port_id)
uint16_t max_vmdq_pools
Definition: rte_ethdev.h:1101
uint8_t scattered_rx
Definition: rte_ethdev.h:1146
int rte_eth_dev_reset(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
struct rte_eth_dcb_tx_conf dcb_tx_conf
Definition: rte_ethdev.h:927
uint64_t offloads
Definition: rte_ethdev.h:721
uint16_t vmdq_pool_base
Definition: rte_ethdev.h:1119
uint64_t tx_offload_capa
Definition: rte_ethdev.h:1104
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:256
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr, uint8_t on)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
Definition: rte_ethdev.h:1301
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
uint16_t __rte_experimental rte_eth_dev_count_total(void)
uint64_t obytes
Definition: rte_ethdev.h:246
uint8_t enable_loop_back
Definition: rte_ethdev.h:702
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
struct rte_eth_rxconf conf
Definition: rte_ethdev.h:1145
void rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
#define ETH_DCB_NUM_USER_PRIORITIES
Definition: rte_ethdev.h:539
void rte_eth_promiscuous_disable(uint16_t port_id)
uint16_t max_tx_queues
Definition: rte_ethdev.h:1096
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
rte_eth_dev_state
Definition: rte_ethdev.h:1307
uint16_t rx_free_thresh
Definition: rte_ethdev.h:739
struct rte_eth_vlan_mirror vlan
Definition: rte_ethdev.h:597
uint64_t dev_capa
Definition: rte_ethdev.h:1131
uint64_t ierrors
Definition: rte_ethdev.h:251
union rte_eth_conf::@118 tx_adv_conf
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
__extension__ uint8_t hw_vlan_insert_pvid
Definition: rte_ethdev.h:727
uint8_t priority
Definition: rte_ethdev.h:832
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
uint64_t flow_type_rss_offloads
Definition: rte_ethdev.h:1114
int __rte_experimental rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
struct rte_eth_dev_portconf default_txportconf
Definition: rte_ethdev.h:1129
rte_vlan_type
Definition: rte_ethdev.h:410
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
uint16_t nb_seg_max
Definition: rte_ethdev.h:784
uint8_t prio_tc[ETH_DCB_NUM_USER_PRIORITIES]
Definition: rte_ethdev.h:1213
uint64_t ipackets
Definition: rte_ethdev.h:243
uint16_t max_vfs
Definition: rte_ethdev.h:1100
uint16_t pause_time
Definition: rte_ethdev.h:818
struct rte_eth_dcb_tc_queue_mapping tc_queue
Definition: rte_ethdev.h:1216
int __rte_experimental rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
rte_filter_type
Definition: rte_eth_ctrl.h:63
int rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id, struct rte_eth_l2_tunnel_conf *l2_tunnel, uint32_t mask, uint8_t en)
uint64_t rx_nombuf
Definition: rte_ethdev.h:253
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
Definition: rte_ethdev.h:4371
__extension__ uint8_t hw_vlan_reject_untagged
Definition: rte_ethdev.h:727
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
#define __rte_cache_min_aligned
Definition: rte_memory.h:71
struct rte_eth_vmdq_rx_conf::@116 pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *mac_addr, uint32_t pool)
#define ETH_MQ_RX_RSS_FLAG
Definition: rte_ethdev.h:335
uint16_t vmdq_queue_base
Definition: rte_ethdev.h:1117
rte_eth_event_ipsec_subtype
Definition: rte_ethdev.h:2577
rte_eth_nb_pools
Definition: rte_ethdev.h:626
void rte_eth_xstats_reset(uint16_t port_id)
#define ETH_MIRROR_MAX_VLANS
Definition: rte_ethdev.h:572
uint16_t nb_align
Definition: rte_ethdev.h:774
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *mac_addr)
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
rte_eth_rx_mq_mode
Definition: rte_ethdev.h:343
const char * driver_name
Definition: rte_ethdev.h:1089
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:3929
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_socket_id(uint16_t port_id)
uint8_t enable_default_pool
Definition: rte_ethdev.h:700
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
uint16_t nb_tx_queues
Definition: rte_ethdev.h:1125
struct rte_eth_fdir_flex_conf flex_conf
Definition: rte_ethdev.h:867
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
int __rte_experimental rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
int rte_eth_dev_filter_supported(uint16_t port_id, enum rte_filter_type filter_type)
int __rte_experimental rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
uint32_t max_mac_addrs
Definition: rte_ethdev.h:1097
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
uint64_t value
Definition: rte_ethdev.h:1173
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
struct rte_eth_conf::@117 rx_adv_conf
int rte_eth_promiscuous_get(uint16_t port_id)
int rte_eth_led_on(uint16_t port_id)
enum rte_fdir_pballoc_type pballoc
Definition: rte_ethdev.h:862
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
uint32_t max_rx_pktlen
Definition: rte_ethdev.h:1094
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
uint64_t rss_hf
Definition: rte_ethdev.h:445
int __rte_experimental rte_eth_dev_owner_new(uint64_t *owner_id)
uint64_t id
Definition: rte_ethdev.h:1172
__extension__ uint8_t hw_vlan_reject_tagged
Definition: rte_ethdev.h:727
enum rte_fdir_mode mode
Definition: rte_ethdev.h:861
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
unsigned int if_index
Definition: rte_ethdev.h:1090
void rte_eth_allmulticast_disable(uint16_t port_id)
uint8_t mac_ctrl_frame_fwd
Definition: rte_ethdev.h:821
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
Definition: rte_ethdev.h:1277
enum rte_eth_fc_mode mode
Definition: rte_ethdev.h:820
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
rte_fdir_mode
Definition: rte_eth_ctrl.h:658
struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf
Definition: rte_ethdev.h:917
uint8_t * rss_key
Definition: rte_ethdev.h:443
rte_fdir_status_mode
Definition: rte_ethdev.h:848
struct rte_eth_dcb_tc_queue_mapping::@119 tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS]
uint8_t tx_deferred_start
Definition: rte_ethdev.h:759
uint8_t wthresh
Definition: rte_ethdev.h:329
uint16_t max_rx_queues
Definition: rte_ethdev.h:1095
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
void rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
struct rte_eth_fc_conf fc
Definition: rte_ethdev.h:831
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
struct rte_eth_txmode txmode
Definition: rte_ethdev.h:909
int rte_eth_allmulticast_get(uint16_t port_id)
uint8_t rx_drop_en
Definition: rte_ethdev.h:740
int rte_eth_dev_is_valid_port(uint16_t port_id)
uint16_t nb_min
Definition: rte_ethdev.h:773
int rte_eth_timesync_enable(uint16_t port_id)
uint8_t pthresh
Definition: rte_ethdev.h:327
void rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
int __rte_experimental rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
struct rte_eth_rxconf default_rxconf
Definition: rte_ethdev.h:1115
uint32_t speed_capa
Definition: rte_ethdev.h:1122
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:4149
uint8_t drop_queue
Definition: rte_ethdev.h:865
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_set_link_down(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
__rte_deprecated uint16_t rte_eth_dev_count(void)
uint8_t autoneg
Definition: rte_ethdev.h:822
uint32_t min_rx_bufsize
Definition: rte_ethdev.h:1093
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
uint32_t lsc
Definition: rte_ethdev.h:888
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
Definition: rte_ethdev.h:4318
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
rte_eth_event_type
Definition: rte_ethdev.h:2616