DPDK  19.11.0-rc2
rte_ethdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
142 #ifdef __cplusplus
143 extern "C" {
144 #endif
145 
146 #include <stdint.h>
147 
148 /* Use this macro to check if LRO API is supported */
149 #define RTE_ETHDEV_HAS_LRO_SUPPORT
150 
151 #include <rte_compat.h>
152 #include <rte_log.h>
153 #include <rte_interrupts.h>
154 #include <rte_dev.h>
155 #include <rte_devargs.h>
156 #include <rte_errno.h>
157 #include <rte_common.h>
158 #include <rte_config.h>
159 #include <rte_ether.h>
160 
161 #include "rte_dev_info.h"
162 
163 extern int rte_eth_dev_logtype;
164 
165 #define RTE_ETHDEV_LOG(level, ...) \
166  rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
167 
168 struct rte_mbuf;
169 
186 int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
187 
202 uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter);
203 
216 void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
217 
231 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
232  for (rte_eth_iterator_init(iter, devargs), \
233  id = rte_eth_iterator_next(iter); \
234  id != RTE_MAX_ETHPORTS; \
235  id = rte_eth_iterator_next(iter))
236 
244  uint64_t ipackets;
245  uint64_t opackets;
246  uint64_t ibytes;
247  uint64_t obytes;
248  uint64_t imissed;
252  uint64_t ierrors;
253  uint64_t oerrors;
254  uint64_t rx_nombuf;
255  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
257  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
259  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
261  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
263  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
265 };
266 
270 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
271 #define ETH_LINK_SPEED_FIXED (1 << 0)
272 #define ETH_LINK_SPEED_10M_HD (1 << 1)
273 #define ETH_LINK_SPEED_10M (1 << 2)
274 #define ETH_LINK_SPEED_100M_HD (1 << 3)
275 #define ETH_LINK_SPEED_100M (1 << 4)
276 #define ETH_LINK_SPEED_1G (1 << 5)
277 #define ETH_LINK_SPEED_2_5G (1 << 6)
278 #define ETH_LINK_SPEED_5G (1 << 7)
279 #define ETH_LINK_SPEED_10G (1 << 8)
280 #define ETH_LINK_SPEED_20G (1 << 9)
281 #define ETH_LINK_SPEED_25G (1 << 10)
282 #define ETH_LINK_SPEED_40G (1 << 11)
283 #define ETH_LINK_SPEED_50G (1 << 12)
284 #define ETH_LINK_SPEED_56G (1 << 13)
285 #define ETH_LINK_SPEED_100G (1 << 14)
290 #define ETH_SPEED_NUM_NONE 0
291 #define ETH_SPEED_NUM_10M 10
292 #define ETH_SPEED_NUM_100M 100
293 #define ETH_SPEED_NUM_1G 1000
294 #define ETH_SPEED_NUM_2_5G 2500
295 #define ETH_SPEED_NUM_5G 5000
296 #define ETH_SPEED_NUM_10G 10000
297 #define ETH_SPEED_NUM_20G 20000
298 #define ETH_SPEED_NUM_25G 25000
299 #define ETH_SPEED_NUM_40G 40000
300 #define ETH_SPEED_NUM_50G 50000
301 #define ETH_SPEED_NUM_56G 56000
302 #define ETH_SPEED_NUM_100G 100000
307 __extension__
308 struct rte_eth_link {
309  uint32_t link_speed;
310  uint16_t link_duplex : 1;
311  uint16_t link_autoneg : 1;
312  uint16_t link_status : 1;
313 } __attribute__((aligned(8)));
315 /* Utility constants */
316 #define ETH_LINK_HALF_DUPLEX 0
317 #define ETH_LINK_FULL_DUPLEX 1
318 #define ETH_LINK_DOWN 0
319 #define ETH_LINK_UP 1
320 #define ETH_LINK_FIXED 0
321 #define ETH_LINK_AUTONEG 1
327 struct rte_eth_thresh {
328  uint8_t pthresh;
329  uint8_t hthresh;
330  uint8_t wthresh;
331 };
332 
336 #define ETH_MQ_RX_RSS_FLAG 0x1
337 #define ETH_MQ_RX_DCB_FLAG 0x2
338 #define ETH_MQ_RX_VMDQ_FLAG 0x4
339 
347 
351  ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
353  ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
354 
356  ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
358  ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
360  ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
363  ETH_MQ_RX_VMDQ_FLAG,
364 };
365 
369 #define ETH_RSS ETH_MQ_RX_RSS
370 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
371 #define ETH_DCB_RX ETH_MQ_RX_DCB
372 
382 };
383 
387 #define ETH_DCB_NONE ETH_MQ_TX_NONE
388 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
389 #define ETH_DCB_TX ETH_MQ_TX_DCB
390 
397  uint32_t max_rx_pkt_len;
400  uint16_t split_hdr_size;
406  uint64_t offloads;
407 
408  uint64_t reserved_64s[2];
409  void *reserved_ptrs[2];
410 };
411 
417  ETH_VLAN_TYPE_UNKNOWN = 0,
420  ETH_VLAN_TYPE_MAX,
421 };
422 
428  uint64_t ids[64];
429 };
430 
449  uint8_t *rss_key;
450  uint8_t rss_key_len;
451  uint64_t rss_hf;
452 };
453 
454 /*
455  * A packet can be identified by hardware as different flow types. Different
456  * NIC hardware may support different flow types.
457  * Basically, the NIC hardware identifies the flow type as deep protocol as
458  * possible, and exclusively. For example, if a packet is identified as
459  * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
460  * though it is an actual IPV4 packet.
461  */
462 #define RTE_ETH_FLOW_UNKNOWN 0
463 #define RTE_ETH_FLOW_RAW 1
464 #define RTE_ETH_FLOW_IPV4 2
465 #define RTE_ETH_FLOW_FRAG_IPV4 3
466 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
467 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
468 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
469 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
470 #define RTE_ETH_FLOW_IPV6 8
471 #define RTE_ETH_FLOW_FRAG_IPV6 9
472 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
473 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
474 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
475 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
476 #define RTE_ETH_FLOW_L2_PAYLOAD 14
477 #define RTE_ETH_FLOW_IPV6_EX 15
478 #define RTE_ETH_FLOW_IPV6_TCP_EX 16
479 #define RTE_ETH_FLOW_IPV6_UDP_EX 17
480 #define RTE_ETH_FLOW_PORT 18
481 
482 #define RTE_ETH_FLOW_VXLAN 19
483 #define RTE_ETH_FLOW_GENEVE 20
484 #define RTE_ETH_FLOW_NVGRE 21
485 #define RTE_ETH_FLOW_VXLAN_GPE 22
486 #define RTE_ETH_FLOW_GTPU 23
487 #define RTE_ETH_FLOW_MAX 24
488 
489 /*
490  * Below macros are defined for RSS offload types, they can be used to
491  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
492  */
493 #define ETH_RSS_IPV4 (1ULL << 2)
494 #define ETH_RSS_FRAG_IPV4 (1ULL << 3)
495 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << 4)
496 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << 5)
497 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << 6)
498 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << 7)
499 #define ETH_RSS_IPV6 (1ULL << 8)
500 #define ETH_RSS_FRAG_IPV6 (1ULL << 9)
501 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << 10)
502 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << 11)
503 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << 12)
504 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << 13)
505 #define ETH_RSS_L2_PAYLOAD (1ULL << 14)
506 #define ETH_RSS_IPV6_EX (1ULL << 15)
507 #define ETH_RSS_IPV6_TCP_EX (1ULL << 16)
508 #define ETH_RSS_IPV6_UDP_EX (1ULL << 17)
509 #define ETH_RSS_PORT (1ULL << 18)
510 #define ETH_RSS_VXLAN (1ULL << 19)
511 #define ETH_RSS_GENEVE (1ULL << 20)
512 #define ETH_RSS_NVGRE (1ULL << 21)
513 #define ETH_RSS_GTPU (1ULL << 23)
514 
515 /*
516  * We use the following macros to combine with above ETH_RSS_* for
517  * more specific input set selection. These bits are defined starting
518  * from the high end of the 64 bits.
519  * Note: If we use above ETH_RSS_* without SRC/DST_ONLY, it represents
520  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
521  * the same level are used simultaneously, it is the same case as none of
522  * them are added.
523  */
524 #define ETH_RSS_L3_SRC_ONLY (1ULL << 63)
525 #define ETH_RSS_L3_DST_ONLY (1ULL << 62)
526 #define ETH_RSS_L4_SRC_ONLY (1ULL << 61)
527 #define ETH_RSS_L4_DST_ONLY (1ULL << 60)
528 
539 static inline uint64_t
540 rte_eth_rss_hf_refine(uint64_t rss_hf)
541 {
542  if ((rss_hf & ETH_RSS_L3_SRC_ONLY) && (rss_hf & ETH_RSS_L3_DST_ONLY))
543  rss_hf &= ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
544 
545  if ((rss_hf & ETH_RSS_L4_SRC_ONLY) && (rss_hf & ETH_RSS_L4_DST_ONLY))
546  rss_hf &= ~(ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
547 
548  return rss_hf;
549 }
550 
551 #define ETH_RSS_IP ( \
552  ETH_RSS_IPV4 | \
553  ETH_RSS_FRAG_IPV4 | \
554  ETH_RSS_NONFRAG_IPV4_OTHER | \
555  ETH_RSS_IPV6 | \
556  ETH_RSS_FRAG_IPV6 | \
557  ETH_RSS_NONFRAG_IPV6_OTHER | \
558  ETH_RSS_IPV6_EX)
559 
560 #define ETH_RSS_UDP ( \
561  ETH_RSS_NONFRAG_IPV4_UDP | \
562  ETH_RSS_NONFRAG_IPV6_UDP | \
563  ETH_RSS_IPV6_UDP_EX)
564 
565 #define ETH_RSS_TCP ( \
566  ETH_RSS_NONFRAG_IPV4_TCP | \
567  ETH_RSS_NONFRAG_IPV6_TCP | \
568  ETH_RSS_IPV6_TCP_EX)
569 
570 #define ETH_RSS_SCTP ( \
571  ETH_RSS_NONFRAG_IPV4_SCTP | \
572  ETH_RSS_NONFRAG_IPV6_SCTP)
573 
574 #define ETH_RSS_TUNNEL ( \
575  ETH_RSS_VXLAN | \
576  ETH_RSS_GENEVE | \
577  ETH_RSS_NVGRE)
578 
580 #define ETH_RSS_PROTO_MASK ( \
581  ETH_RSS_IPV4 | \
582  ETH_RSS_FRAG_IPV4 | \
583  ETH_RSS_NONFRAG_IPV4_TCP | \
584  ETH_RSS_NONFRAG_IPV4_UDP | \
585  ETH_RSS_NONFRAG_IPV4_SCTP | \
586  ETH_RSS_NONFRAG_IPV4_OTHER | \
587  ETH_RSS_IPV6 | \
588  ETH_RSS_FRAG_IPV6 | \
589  ETH_RSS_NONFRAG_IPV6_TCP | \
590  ETH_RSS_NONFRAG_IPV6_UDP | \
591  ETH_RSS_NONFRAG_IPV6_SCTP | \
592  ETH_RSS_NONFRAG_IPV6_OTHER | \
593  ETH_RSS_L2_PAYLOAD | \
594  ETH_RSS_IPV6_EX | \
595  ETH_RSS_IPV6_TCP_EX | \
596  ETH_RSS_IPV6_UDP_EX | \
597  ETH_RSS_PORT | \
598  ETH_RSS_VXLAN | \
599  ETH_RSS_GENEVE | \
600  ETH_RSS_NVGRE)
601 
602 /*
603  * Definitions used for redirection table entry size.
604  * Some RSS RETA sizes may not be supported by some drivers, check the
605  * documentation or the description of relevant functions for more details.
606  */
607 #define ETH_RSS_RETA_SIZE_64 64
608 #define ETH_RSS_RETA_SIZE_128 128
609 #define ETH_RSS_RETA_SIZE_256 256
610 #define ETH_RSS_RETA_SIZE_512 512
611 #define RTE_RETA_GROUP_SIZE 64
612 
613 /* Definitions used for VMDQ and DCB functionality */
614 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
615 #define ETH_DCB_NUM_USER_PRIORITIES 8
616 #define ETH_VMDQ_DCB_NUM_QUEUES 128
617 #define ETH_DCB_NUM_QUEUES 128
619 /* DCB capability defines */
620 #define ETH_DCB_PG_SUPPORT 0x00000001
621 #define ETH_DCB_PFC_SUPPORT 0x00000002
623 /* Definitions used for VLAN Offload functionality */
624 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
625 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
626 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
627 #define ETH_QINQ_STRIP_OFFLOAD 0x0008
629 /* Definitions used for mask VLAN setting */
630 #define ETH_VLAN_STRIP_MASK 0x0001
631 #define ETH_VLAN_FILTER_MASK 0x0002
632 #define ETH_VLAN_EXTEND_MASK 0x0004
633 #define ETH_QINQ_STRIP_MASK 0x0008
634 #define ETH_VLAN_ID_MAX 0x0FFF
636 /* Definitions used for receive MAC address */
637 #define ETH_NUM_RECEIVE_MAC_ADDR 128
639 /* Definitions used for unicast hash */
640 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
642 /* Definitions used for VMDQ pool rx mode setting */
643 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
644 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
645 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
646 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
647 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
650 #define ETH_MIRROR_MAX_VLANS 64
651 
652 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
653 #define ETH_MIRROR_UPLINK_PORT 0x02
654 #define ETH_MIRROR_DOWNLINK_PORT 0x04
655 #define ETH_MIRROR_VLAN 0x08
656 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
661 struct rte_eth_vlan_mirror {
662  uint64_t vlan_mask;
664  uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
665 };
666 
671  uint8_t rule_type;
672  uint8_t dst_pool;
673  uint64_t pool_mask;
676 };
677 
685  uint64_t mask;
687  uint16_t reta[RTE_RETA_GROUP_SIZE];
689 };
690 
696  ETH_4_TCS = 4,
698 };
699 
709 };
710 
711 /* This structure may be extended in future. */
712 struct rte_eth_dcb_rx_conf {
713  enum rte_eth_nb_tcs nb_tcs;
715  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
716 };
717 
718 struct rte_eth_vmdq_dcb_tx_conf {
719  enum rte_eth_nb_pools nb_queue_pools;
721  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
722 };
723 
724 struct rte_eth_dcb_tx_conf {
725  enum rte_eth_nb_tcs nb_tcs;
727  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
728 };
729 
730 struct rte_eth_vmdq_tx_conf {
731  enum rte_eth_nb_pools nb_queue_pools;
732 };
733 
748  uint8_t default_pool;
749  uint8_t nb_pool_maps;
750  struct {
751  uint16_t vlan_id;
752  uint64_t pools;
756 };
757 
779  uint8_t default_pool;
781  uint8_t nb_pool_maps;
782  uint32_t rx_mode;
783  struct {
784  uint16_t vlan_id;
785  uint64_t pools;
787 };
788 
799  uint64_t offloads;
800 
801  /* For i40e specifically */
802  uint16_t pvid;
803  __extension__
804  uint8_t hw_vlan_reject_tagged : 1,
811  uint64_t reserved_64s[2];
812  void *reserved_ptrs[2];
813 };
814 
820  uint16_t rx_free_thresh;
821  uint8_t rx_drop_en;
828  uint64_t offloads;
829 
830  uint64_t reserved_64s[2];
831  void *reserved_ptrs[2];
832 };
833 
839  uint16_t tx_rs_thresh;
840  uint16_t tx_free_thresh;
849  uint64_t offloads;
850 
851  uint64_t reserved_64s[2];
852  void *reserved_ptrs[2];
853 };
854 
863  uint16_t max_nb_queues;
865  uint16_t max_rx_2_tx;
867  uint16_t max_tx_2_rx;
868  uint16_t max_nb_desc;
869 };
870 
871 #define RTE_ETH_MAX_HAIRPIN_PEERS 32
872 
880  uint16_t port;
881  uint16_t queue;
882 };
883 
891  uint16_t peer_count;
892  struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
893 };
894 
899  uint16_t nb_max;
900  uint16_t nb_min;
901  uint16_t nb_align;
911  uint16_t nb_seg_max;
912 
924  uint16_t nb_mtu_seg_max;
925 };
926 
935 };
936 
943  uint32_t high_water;
944  uint32_t low_water;
945  uint16_t pause_time;
946  uint16_t send_xon;
949  uint8_t autoneg;
950 };
951 
959  uint8_t priority;
960 };
961 
966  RTE_TUNNEL_TYPE_NONE = 0,
967  RTE_TUNNEL_TYPE_VXLAN,
968  RTE_TUNNEL_TYPE_GENEVE,
969  RTE_TUNNEL_TYPE_TEREDO,
970  RTE_TUNNEL_TYPE_NVGRE,
971  RTE_TUNNEL_TYPE_IP_IN_GRE,
972  RTE_L2_TUNNEL_TYPE_E_TAG,
973  RTE_TUNNEL_TYPE_VXLAN_GPE,
974  RTE_TUNNEL_TYPE_MAX,
975 };
976 
977 /* Deprecated API file for rte_eth_dev_filter_* functions */
978 #include "rte_eth_ctrl.h"
979 
988 };
989 
997 };
998 
1010  uint8_t drop_queue;
1011  struct rte_eth_fdir_masks mask;
1014 };
1015 
1024  uint16_t udp_port;
1025  uint8_t prot_type;
1026 };
1027 
1033  uint32_t lsc:1;
1035  uint32_t rxq:1;
1037  uint32_t rmv:1;
1038 };
1039 
1046  uint32_t link_speeds;
1055  uint32_t lpbk_mode;
1060  struct {
1064  struct rte_eth_dcb_rx_conf dcb_rx_conf;
1068  } rx_adv_conf;
1069  union {
1070  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1072  struct rte_eth_dcb_tx_conf dcb_tx_conf;
1074  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1076  } tx_adv_conf;
1082 };
1083 
1087 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
1088 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
1089 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
1090 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
1091 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
1092 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
1093 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
1094 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
1095 #define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100
1096 #define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200
1097 #define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
1098 #define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
1099 #define DEV_RX_OFFLOAD_SCATTER 0x00002000
1100 #define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000
1101 #define DEV_RX_OFFLOAD_SECURITY 0x00008000
1102 #define DEV_RX_OFFLOAD_KEEP_CRC 0x00010000
1103 #define DEV_RX_OFFLOAD_SCTP_CKSUM 0x00020000
1104 #define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM 0x00040000
1105 #define DEV_RX_OFFLOAD_RSS_HASH 0x00080000
1106 
1107 #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
1108  DEV_RX_OFFLOAD_UDP_CKSUM | \
1109  DEV_RX_OFFLOAD_TCP_CKSUM)
1110 #define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
1111  DEV_RX_OFFLOAD_VLAN_FILTER | \
1112  DEV_RX_OFFLOAD_VLAN_EXTEND | \
1113  DEV_RX_OFFLOAD_QINQ_STRIP)
1114 
1115 /*
1116  * If new Rx offload capabilities are defined, they also must be
1117  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1118  */
1119 
1123 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
1124 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
1125 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
1126 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
1127 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
1128 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
1129 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
1130 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
1131 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
1132 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200
1133 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400
1134 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800
1135 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000
1136 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
1137 #define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000
1138 
1141 #define DEV_TX_OFFLOAD_MULTI_SEGS 0x00008000
1142 
1143 #define DEV_TX_OFFLOAD_MBUF_FAST_FREE 0x00010000
1144 
1148 #define DEV_TX_OFFLOAD_SECURITY 0x00020000
1149 
1154 #define DEV_TX_OFFLOAD_UDP_TNL_TSO 0x00040000
1155 
1160 #define DEV_TX_OFFLOAD_IP_TNL_TSO 0x00080000
1161 
1162 #define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM 0x00100000
1163 
1164 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001
1165 
1166 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002
1167 
1169 /*
1170  * If new Tx offload capabilities are defined, they also must be
1171  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1172  */
1173 
1174 /*
1175  * Fallback default preferred Rx/Tx port parameters.
1176  * These are used if an application requests default parameters
1177  * but the PMD does not provide preferred values.
1178  */
1179 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1180 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1181 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1182 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1183 
1190  uint16_t burst_size;
1191  uint16_t ring_size;
1192  uint16_t nb_queues;
1193 };
1194 
1199 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (0)
1200 
1205  const char *name;
1206  uint16_t domain_id;
1207  uint16_t port_id;
1215 };
1216 
1227  struct rte_device *device;
1228  const char *driver_name;
1229  unsigned int if_index;
1231  uint16_t min_mtu;
1232  uint16_t max_mtu;
1233  const uint32_t *dev_flags;
1234  uint32_t min_rx_bufsize;
1235  uint32_t max_rx_pktlen;
1238  uint16_t max_rx_queues;
1239  uint16_t max_tx_queues;
1240  uint32_t max_mac_addrs;
1241  uint32_t max_hash_mac_addrs;
1243  uint16_t max_vfs;
1244  uint16_t max_vmdq_pools;
1253  uint16_t reta_size;
1255  uint8_t hash_key_size;
1260  uint16_t vmdq_queue_base;
1261  uint16_t vmdq_queue_num;
1262  uint16_t vmdq_pool_base;
1265  uint32_t speed_capa;
1267  uint16_t nb_rx_queues;
1268  uint16_t nb_tx_queues;
1274  uint64_t dev_capa;
1280 
1281  uint64_t reserved_64s[2];
1282  void *reserved_ptrs[2];
1283 };
1284 
1290  struct rte_mempool *mp;
1292  uint8_t scattered_rx;
1293  uint16_t nb_desc;
1295 
1302  uint16_t nb_desc;
1304 
1305 /* Generic Burst mode flag definition, values can be ORed. */
1306 
1312 #define RTE_ETH_BURST_FLAG_PER_QUEUE (1ULL << 0)
1313 
1319  uint64_t flags;
1321 #define RTE_ETH_BURST_MODE_INFO_SIZE 1024
1322  char info[RTE_ETH_BURST_MODE_INFO_SIZE];
1323 };
1324 
1326 #define RTE_ETH_XSTATS_NAME_SIZE 64
1327 
1338  uint64_t id;
1339  uint64_t value;
1340 };
1341 
1351 };
1352 
1353 #define ETH_DCB_NUM_TCS 8
1354 #define ETH_MAX_VMDQ_POOL 64
1355 
1362  struct {
1363  uint8_t base;
1364  uint8_t nb_queue;
1365  } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1367  struct {
1368  uint8_t base;
1369  uint8_t nb_queue;
1370  } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1371 };
1372 
1378  uint8_t nb_tcs;
1379  uint8_t prio_tc[ETH_DCB_NUM_USER_PRIORITIES];
1380  uint8_t tc_bws[ETH_DCB_NUM_TCS];
1383 };
1384 
1385 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
1386 
1387 /* Macros to check for valid port */
1388 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1389  if (!rte_eth_dev_is_valid_port(port_id)) { \
1390  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1391  return retval; \
1392  } \
1393 } while (0)
1394 
1395 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1396  if (!rte_eth_dev_is_valid_port(port_id)) { \
1397  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1398  return; \
1399  } \
1400 } while (0)
1401 
1407 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1408 
1409 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1410 
1411 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1412 
1413 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1414 
1437 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
1438  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1439  void *user_param);
1440 
1461 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
1462  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1463 
1474 };
1475 
1476 struct rte_eth_dev_sriov {
1477  uint8_t active;
1478  uint8_t nb_q_per_pool;
1479  uint16_t def_vmdq_idx;
1480  uint16_t def_pool_q_idx;
1481 };
1482 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1483 
1484 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
1485 
1486 #define RTE_ETH_DEV_NO_OWNER 0
1487 
1488 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
1489 
1490 struct rte_eth_dev_owner {
1491  uint64_t id;
1492  char name[RTE_ETH_MAX_OWNER_NAME_LEN];
1493 };
1494 
1499 #define RTE_ETH_DEV_CLOSE_REMOVE 0x0001
1500 
1501 #define RTE_ETH_DEV_INTR_LSC 0x0002
1502 
1503 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1504 
1505 #define RTE_ETH_DEV_INTR_RMV 0x0008
1506 
1507 #define RTE_ETH_DEV_REPRESENTOR 0x0010
1508 
1509 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR 0x0020
1510 
1522 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
1523  const uint64_t owner_id);
1524 
1528 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
1529  for (p = rte_eth_find_next_owned_by(0, o); \
1530  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
1531  p = rte_eth_find_next_owned_by(p + 1, o))
1532 
1541 uint16_t rte_eth_find_next(uint16_t port_id);
1542 
1546 #define RTE_ETH_FOREACH_DEV(p) \
1547  RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
1548 
1563 __rte_experimental
1564 uint16_t
1565 rte_eth_find_next_of(uint16_t port_id_start,
1566  const struct rte_device *parent);
1567 
1576 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
1577  for (port_id = rte_eth_find_next_of(0, parent); \
1578  port_id < RTE_MAX_ETHPORTS; \
1579  port_id = rte_eth_find_next_of(port_id + 1, parent))
1580 
1595 __rte_experimental
1596 uint16_t
1597 rte_eth_find_next_sibling(uint16_t port_id_start,
1598  uint16_t ref_port_id);
1599 
1610 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
1611  for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
1612  port_id < RTE_MAX_ETHPORTS; \
1613  port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
1614 
1628 __rte_experimental
1629 int rte_eth_dev_owner_new(uint64_t *owner_id);
1630 
1644 __rte_experimental
1645 int rte_eth_dev_owner_set(const uint16_t port_id,
1646  const struct rte_eth_dev_owner *owner);
1647 
1661 __rte_experimental
1662 int rte_eth_dev_owner_unset(const uint16_t port_id,
1663  const uint64_t owner_id);
1664 
1676 __rte_experimental
1677 int rte_eth_dev_owner_delete(const uint64_t owner_id);
1678 
1692 __rte_experimental
1693 int rte_eth_dev_owner_get(const uint16_t port_id,
1694  struct rte_eth_dev_owner *owner);
1695 
1706 uint16_t rte_eth_dev_count_avail(void);
1707 
1716 uint16_t rte_eth_dev_count_total(void);
1717 
1729 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
1730 
1739 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
1740 
1749 const char *rte_eth_dev_tx_offload_name(uint64_t offload);
1750 
1790 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
1791  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
1792 
1804 __rte_experimental
1805 int
1806 rte_eth_dev_is_removed(uint16_t port_id);
1807 
1857 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1858  uint16_t nb_rx_desc, unsigned int socket_id,
1859  const struct rte_eth_rxconf *rx_conf,
1860  struct rte_mempool *mb_pool);
1861 
1888 __rte_experimental
1890  (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
1891  const struct rte_eth_hairpin_conf *conf);
1892 
1941 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1942  uint16_t nb_tx_desc, unsigned int socket_id,
1943  const struct rte_eth_txconf *tx_conf);
1944 
1969 __rte_experimental
1971  (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
1972  const struct rte_eth_hairpin_conf *conf);
1973 
1984 int rte_eth_dev_socket_id(uint16_t port_id);
1985 
1995 int rte_eth_dev_is_valid_port(uint16_t port_id);
1996 
2013 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2014 
2030 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2031 
2048 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2049 
2065 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2066 
2086 int rte_eth_dev_start(uint16_t port_id);
2087 
2095 void rte_eth_dev_stop(uint16_t port_id);
2096 
2109 int rte_eth_dev_set_link_up(uint16_t port_id);
2110 
2120 int rte_eth_dev_set_link_down(uint16_t port_id);
2121 
2130 void rte_eth_dev_close(uint16_t port_id);
2131 
2169 int rte_eth_dev_reset(uint16_t port_id);
2170 
2182 int rte_eth_promiscuous_enable(uint16_t port_id);
2183 
2195 int rte_eth_promiscuous_disable(uint16_t port_id);
2196 
2207 int rte_eth_promiscuous_get(uint16_t port_id);
2208 
2220 int rte_eth_allmulticast_enable(uint16_t port_id);
2221 
2233 int rte_eth_allmulticast_disable(uint16_t port_id);
2234 
2245 int rte_eth_allmulticast_get(uint16_t port_id);
2246 
2262 int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
2263 
2279 int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
2280 
2298 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
2299 
2311 int rte_eth_stats_reset(uint16_t port_id);
2312 
2342 int rte_eth_xstats_get_names(uint16_t port_id,
2343  struct rte_eth_xstat_name *xstats_names,
2344  unsigned int size);
2345 
2375 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2376  unsigned int n);
2377 
2400 int
2401 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2402  struct rte_eth_xstat_name *xstats_names, unsigned int size,
2403  uint64_t *ids);
2404 
2428 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2429  uint64_t *values, unsigned int size);
2430 
2449 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2450  uint64_t *id);
2451 
2464 int rte_eth_xstats_reset(uint16_t port_id);
2465 
2483 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
2484  uint16_t tx_queue_id, uint8_t stat_idx);
2485 
2503 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
2504  uint16_t rx_queue_id,
2505  uint8_t stat_idx);
2506 
2519 int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
2520 
2563 int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
2564 
2584 int rte_eth_dev_fw_version_get(uint16_t port_id,
2585  char *fw_version, size_t fw_size);
2586 
2625 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2626  uint32_t *ptypes, int num);
2660 __rte_experimental
2661 int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
2662  uint32_t *set_ptypes, unsigned int num);
2663 
2675 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
2676 
2694 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
2695 
2715 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
2716 
2736 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2737  int on);
2738 
2756 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2757  enum rte_vlan_type vlan_type,
2758  uint16_t tag_type);
2759 
2782 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
2783 
2797 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
2798 
2813 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
2814 
2815 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
2816  void *userdata);
2817 
2823  buffer_tx_error_fn error_callback;
2824  void *error_userdata;
2825  uint16_t size;
2826  uint16_t length;
2827  struct rte_mbuf *pkts[];
2829 };
2830 
2837 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
2838  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
2839 
2850 int
2851 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
2852 
2877 int
2879  buffer_tx_error_fn callback, void *userdata);
2880 
2903 void
2904 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2905  void *userdata);
2906 
2930 void
2931 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2932  void *userdata);
2933 
2959 int
2960 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
2961 
2977 };
2978 
2986  uint64_t metadata;
3000 };
3001 
3019 };
3020 
3021 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
3022  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
3042 int rte_eth_dev_callback_register(uint16_t port_id,
3043  enum rte_eth_event_type event,
3044  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3045 
3064 int rte_eth_dev_callback_unregister(uint16_t port_id,
3065  enum rte_eth_event_type event,
3066  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3067 
3089 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
3090 
3111 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
3112 
3130 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
3131 
3153 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3154  int epfd, int op, void *data);
3155 
3173 __rte_experimental
3174 int
3175 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
3176 
3190 int rte_eth_led_on(uint16_t port_id);
3191 
3205 int rte_eth_led_off(uint16_t port_id);
3206 
3220 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
3221  struct rte_eth_fc_conf *fc_conf);
3222 
3237 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
3238  struct rte_eth_fc_conf *fc_conf);
3239 
3255 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3256  struct rte_eth_pfc_conf *pfc_conf);
3257 
3277 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
3278  uint32_t pool);
3279 
3293 int rte_eth_dev_mac_addr_remove(uint16_t port_id,
3294  struct rte_ether_addr *mac_addr);
3295 
3309 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
3310  struct rte_ether_addr *mac_addr);
3311 
3328 int rte_eth_dev_rss_reta_update(uint16_t port_id,
3329  struct rte_eth_rss_reta_entry64 *reta_conf,
3330  uint16_t reta_size);
3331 
3349 int rte_eth_dev_rss_reta_query(uint16_t port_id,
3350  struct rte_eth_rss_reta_entry64 *reta_conf,
3351  uint16_t reta_size);
3352 
3372 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
3373  uint8_t on);
3374 
3393 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
3394 
3417 int rte_eth_mirror_rule_set(uint16_t port_id,
3418  struct rte_eth_mirror_conf *mirror_conf,
3419  uint8_t rule_id,
3420  uint8_t on);
3421 
3436 int rte_eth_mirror_rule_reset(uint16_t port_id,
3437  uint8_t rule_id);
3438 
3455 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3456  uint16_t tx_rate);
3457 
3472 int rte_eth_dev_rss_hash_update(uint16_t port_id,
3473  struct rte_eth_rss_conf *rss_conf);
3474 
3489 int
3490 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3491  struct rte_eth_rss_conf *rss_conf);
3492 
3511 int
3512 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3513  struct rte_eth_udp_tunnel *tunnel_udp);
3514 
3534 int
3535 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3536  struct rte_eth_udp_tunnel *tunnel_udp);
3537 
3552 __rte_deprecated
3553 int rte_eth_dev_filter_supported(uint16_t port_id,
3554  enum rte_filter_type filter_type);
3555 
3575 __rte_deprecated
3576 int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3577  enum rte_filter_op filter_op, void *arg);
3578 
3592 int rte_eth_dev_get_dcb_info(uint16_t port_id,
3593  struct rte_eth_dcb_info *dcb_info);
3594 
3595 struct rte_eth_rxtx_callback;
3596 
3621 const struct rte_eth_rxtx_callback *
3622 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3623  rte_rx_callback_fn fn, void *user_param);
3624 
3650 const struct rte_eth_rxtx_callback *
3651 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3652  rte_rx_callback_fn fn, void *user_param);
3653 
3678 const struct rte_eth_rxtx_callback *
3679 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3680  rte_tx_callback_fn fn, void *user_param);
3681 
3712 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3713  const struct rte_eth_rxtx_callback *user_cb);
3714 
3745 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3746  const struct rte_eth_rxtx_callback *user_cb);
3747 
3766 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3767  struct rte_eth_rxq_info *qinfo);
3768 
3787 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3788  struct rte_eth_txq_info *qinfo);
3789 
3807 __rte_experimental
3808 int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
3809  struct rte_eth_burst_mode *mode);
3810 
3828 __rte_experimental
3829 int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
3830  struct rte_eth_burst_mode *mode);
3831 
3849 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
3850 
3863 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
3864 
3880 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
3881 
3897 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
3898 
3916 __rte_experimental
3917 int
3918 rte_eth_dev_get_module_info(uint16_t port_id,
3919  struct rte_eth_dev_module_info *modinfo);
3920 
3939 __rte_experimental
3940 int
3941 rte_eth_dev_get_module_eeprom(uint16_t port_id,
3942  struct rte_dev_eeprom_info *info);
3943 
3962 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3963  struct rte_ether_addr *mc_addr_set,
3964  uint32_t nb_mc_addr);
3965 
3978 int rte_eth_timesync_enable(uint16_t port_id);
3979 
3992 int rte_eth_timesync_disable(uint16_t port_id);
3993 
4012 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
4013  struct timespec *timestamp, uint32_t flags);
4014 
4030 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4031  struct timespec *timestamp);
4032 
4050 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
4051 
4066 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
4067 
4086 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
4087 
4132 __rte_experimental
4133 int
4134 rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
4135 
4151 int
4152 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4153  struct rte_eth_l2_tunnel_conf *l2_tunnel);
4154 
4179 int
4180 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4181  struct rte_eth_l2_tunnel_conf *l2_tunnel,
4182  uint32_t mask,
4183  uint8_t en);
4184 
4200 int
4201 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
4202 
4217 int
4218 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
4219 
4236 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4237  uint16_t *nb_rx_desc,
4238  uint16_t *nb_tx_desc);
4239 
4254 int
4255 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
4256 
4266 void *
4267 rte_eth_dev_get_sec_ctx(uint16_t port_id);
4268 
4283 __rte_experimental
4284 int rte_eth_dev_hairpin_capability_get(uint16_t port_id,
4285  struct rte_eth_hairpin_cap *cap);
4286 
4287 #include <rte_ethdev_core.h>
4288 
4371 static inline uint16_t
4372 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
4373  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
4374 {
4375  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4376  uint16_t nb_rx;
4377 
4378 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4379  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4380  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
4381 
4382  if (queue_id >= dev->data->nb_rx_queues) {
4383  RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4384  return 0;
4385  }
4386 #endif
4387  nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
4388  rx_pkts, nb_pkts);
4389 
4390 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
4391  if (unlikely(dev->post_rx_burst_cbs[queue_id] != NULL)) {
4392  struct rte_eth_rxtx_callback *cb =
4393  dev->post_rx_burst_cbs[queue_id];
4394 
4395  do {
4396  nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
4397  nb_pkts, cb->param);
4398  cb = cb->next;
4399  } while (cb != NULL);
4400  }
4401 #endif
4402 
4403  return nb_rx;
4404 }
4405 
4418 static inline int
4419 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
4420 {
4421  struct rte_eth_dev *dev;
4422 
4423  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4424  dev = &rte_eth_devices[port_id];
4425  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
4426  if (queue_id >= dev->data->nb_rx_queues)
4427  return -EINVAL;
4428 
4429  return (int)(*dev->dev_ops->rx_queue_count)(dev, queue_id);
4430 }
4431 
4447 static inline int
4448 rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
4449 {
4450  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4451  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4452  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
4453  return (*dev->dev_ops->rx_descriptor_done)( \
4454  dev->data->rx_queues[queue_id], offset);
4455 }
4456 
4457 #define RTE_ETH_RX_DESC_AVAIL 0
4458 #define RTE_ETH_RX_DESC_DONE 1
4459 #define RTE_ETH_RX_DESC_UNAVAIL 2
4494 static inline int
4495 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
4496  uint16_t offset)
4497 {
4498  struct rte_eth_dev *dev;
4499  void *rxq;
4500 
4501 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4502  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4503 #endif
4504  dev = &rte_eth_devices[port_id];
4505 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4506  if (queue_id >= dev->data->nb_rx_queues)
4507  return -ENODEV;
4508 #endif
4509  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
4510  rxq = dev->data->rx_queues[queue_id];
4511 
4512  return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
4513 }
4514 
4515 #define RTE_ETH_TX_DESC_FULL 0
4516 #define RTE_ETH_TX_DESC_DONE 1
4517 #define RTE_ETH_TX_DESC_UNAVAIL 2
4552 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
4553  uint16_t queue_id, uint16_t offset)
4554 {
4555  struct rte_eth_dev *dev;
4556  void *txq;
4557 
4558 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4559  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4560 #endif
4561  dev = &rte_eth_devices[port_id];
4562 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4563  if (queue_id >= dev->data->nb_tx_queues)
4564  return -ENODEV;
4565 #endif
4566  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
4567  txq = dev->data->tx_queues[queue_id];
4568 
4569  return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
4570 }
4571 
4638 static inline uint16_t
4639 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
4640  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4641 {
4642  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4643 
4644 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4645  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4646  RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
4647 
4648  if (queue_id >= dev->data->nb_tx_queues) {
4649  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4650  return 0;
4651  }
4652 #endif
4653 
4654 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
4655  struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
4656 
4657  if (unlikely(cb != NULL)) {
4658  do {
4659  nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
4660  cb->param);
4661  cb = cb->next;
4662  } while (cb != NULL);
4663  }
4664 #endif
4665 
4666  return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
4667 }
4668 
4722 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
4723 
4724 static inline uint16_t
4725 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
4726  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4727 {
4728  struct rte_eth_dev *dev;
4729 
4730 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4731  if (!rte_eth_dev_is_valid_port(port_id)) {
4732  RTE_ETHDEV_LOG(ERR, "Invalid TX port_id=%u\n", port_id);
4733  rte_errno = EINVAL;
4734  return 0;
4735  }
4736 #endif
4737 
4738  dev = &rte_eth_devices[port_id];
4739 
4740 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4741  if (queue_id >= dev->data->nb_tx_queues) {
4742  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4743  rte_errno = EINVAL;
4744  return 0;
4745  }
4746 #endif
4747 
4748  if (!dev->tx_pkt_prepare)
4749  return nb_pkts;
4750 
4751  return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
4752  tx_pkts, nb_pkts);
4753 }
4754 
4755 #else
4756 
4757 /*
4758  * Native NOOP operation for compilation targets which doesn't require any
4759  * preparations steps, and functional NOOP may introduce unnecessary performance
4760  * drop.
4761  *
4762  * Generally this is not a good idea to turn it on globally and didn't should
4763  * be used if behavior of tx_preparation can change.
4764  */
4765 
4766 static inline uint16_t
4767 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
4768  __rte_unused uint16_t queue_id,
4769  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4770 {
4771  return nb_pkts;
4772 }
4773 
4774 #endif
4775 
4798 static inline uint16_t
4799 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
4800  struct rte_eth_dev_tx_buffer *buffer)
4801 {
4802  uint16_t sent;
4803  uint16_t to_send = buffer->length;
4804 
4805  if (to_send == 0)
4806  return 0;
4807 
4808  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
4809 
4810  buffer->length = 0;
4811 
4812  /* All packets sent, or to be dealt with by callback below */
4813  if (unlikely(sent != to_send))
4814  buffer->error_callback(&buffer->pkts[sent],
4815  (uint16_t)(to_send - sent),
4816  buffer->error_userdata);
4817 
4818  return sent;
4819 }
4820 
4851 static __rte_always_inline uint16_t
4852 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
4853  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
4854 {
4855  buffer->pkts[buffer->length++] = tx_pkt;
4856  if (buffer->length < buffer->size)
4857  return 0;
4858 
4859  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
4860 }
4861 
4862 #ifdef __cplusplus
4863 }
4864 #endif
4865 
4866 #endif /* _RTE_ETHDEV_H_ */
uint16_t nb_rx_queues
Definition: rte_ethdev.h:1267
struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf
Definition: rte_ethdev.h:1070
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_promiscuous_disable(uint16_t port_id)
#define ETH_VMDQ_MAX_VLAN_FILTERS
Definition: rte_ethdev.h:614
struct rte_eth_dev_portconf default_rxportconf
Definition: rte_ethdev.h:1270
struct rte_fdir_conf fdir_conf
Definition: rte_ethdev.h:1080
int rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id, struct rte_eth_l2_tunnel_conf *l2_tunnel)
uint32_t rmv
Definition: rte_ethdev.h:1037
#define __rte_always_inline
Definition: rte_common.h:153
uint16_t tx_rs_thresh
Definition: rte_ethdev.h:839
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
uint16_t nb_desc
Definition: rte_ethdev.h:1302
char info[RTE_ETH_BURST_MODE_INFO_SIZE]
Definition: rte_ethdev.h:1322
uint64_t rx_offload_capa
Definition: rte_ethdev.h:1245
uint16_t reta[RTE_RETA_GROUP_SIZE]
Definition: rte_ethdev.h:687
const uint32_t * dev_flags
Definition: rte_ethdev.h:1233
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
struct rte_eth_vmdq_dcb_conf::@135 pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]
void rte_eth_dev_stop(uint16_t port_id)
__rte_experimental int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
#define __rte_cache_min_aligned
Definition: rte_common.h:320
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:4725
int rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
rte_eth_nb_tcs
Definition: rte_ethdev.h:695
struct rte_eth_vmdq_tx_conf vmdq_tx_conf
Definition: rte_ethdev.h:1074
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:263
__rte_experimental int rte_eth_dev_owner_new(uint64_t *owner_id)
__rte_experimental int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
struct rte_eth_thresh rx_thresh
Definition: rte_ethdev.h:819
uint16_t rte_eth_find_next(uint16_t port_id)
__rte_experimental int rte_eth_dev_is_removed(uint16_t port_id)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
int rte_eth_led_off(uint16_t port_id)
rte_fdir_pballoc_type
Definition: rte_ethdev.h:984
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:4495
__rte_experimental uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
uint64_t imissed
Definition: rte_ethdev.h:248
static int rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:4448
uint32_t low_water
Definition: rte_ethdev.h:944
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
uint32_t max_rx_pkt_len
Definition: rte_ethdev.h:397
uint8_t rss_key_len
Definition: rte_ethdev.h:450
void rte_eth_dev_close(uint16_t port_id)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint16_t tx_rate)
uint8_t hthresh
Definition: rte_ethdev.h:329
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
uint64_t rx_queue_offload_capa
Definition: rte_ethdev.h:1249
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
uint16_t reta_size
Definition: rte_ethdev.h:1253
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
void * userdata
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint32_t lpbk_mode
Definition: rte_ethdev.h:1055
enum rte_fdir_status_mode status
Definition: rte_ethdev.h:1008
enum rte_eth_tx_mq_mode mq_mode
Definition: rte_ethdev.h:793
#define rte_errno
Definition: rte_errno.h:29
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
uint32_t link_speeds
Definition: rte_ethdev.h:1046
uint64_t tx_queue_offload_capa
Definition: rte_ethdev.h:1251
rte_eth_tx_mq_mode
Definition: rte_ethdev.h:377
rte_eth_fc_mode
Definition: rte_ethdev.h:930
int rte_eth_mirror_rule_set(uint16_t port_id, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on)
uint8_t enable_default_pool
Definition: rte_ethdev.h:747
uint16_t nb_mtu_seg_max
Definition: rte_ethdev.h:924
#define __rte_unused
Definition: rte_common.h:84
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:261
uint16_t max_rx_2_tx
Definition: rte_ethdev.h:865
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
uint64_t opackets
Definition: rte_ethdev.h:245
rte_filter_op
Definition: rte_eth_ctrl.h:46
struct rte_eth_rss_conf rss_conf
Definition: rte_ethdev.h:1061
uint8_t hash_key_size
Definition: rte_ethdev.h:1255
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
uint16_t split_hdr_size
Definition: rte_ethdev.h:400
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
struct rte_mempool * mp
Definition: rte_ethdev.h:1290
uint32_t dcb_capability_en
Definition: rte_ethdev.h:1079
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:259
const char * name
Definition: rte_ethdev.h:1205
struct rte_eth_switch_info switch_info
Definition: rte_ethdev.h:1279
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
uint32_t rxq
Definition: rte_ethdev.h:1035
int rte_eth_dev_set_link_up(uint16_t port_id)
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
struct rte_eth_thresh tx_thresh
Definition: rte_ethdev.h:838
struct rte_eth_desc_lim rx_desc_lim
Definition: rte_ethdev.h:1263
struct rte_eth_conf::@137 rx_adv_conf
uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES]
Definition: rte_ethdev.h:754
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
__rte_experimental int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
__rte_experimental int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
uint16_t vmdq_queue_num
Definition: rte_ethdev.h:1261
union rte_eth_conf::@138 tx_adv_conf
uint8_t rx_deferred_start
Definition: rte_ethdev.h:822
struct rte_mbuf * pkts[]
Definition: rte_ethdev.h:2827
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
Definition: rte_ethdev.h:3021
struct rte_eth_rxmode rxmode
Definition: rte_ethdev.h:1053
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:255
uint32_t high_water
Definition: rte_ethdev.h:943
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:746
struct rte_eth_txconf conf
Definition: rte_ethdev.h:1301
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
struct rte_intr_conf intr_conf
Definition: rte_ethdev.h:1081
#define RTE_ETH_XSTATS_NAME_SIZE
Definition: rte_ethdev.h:1326
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
struct rte_eth_desc_lim tx_desc_lim
Definition: rte_ethdev.h:1264
int rte_eth_timesync_disable(uint16_t port_id)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint64_t offloads
Definition: rte_ethdev.h:828
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
uint16_t send_xon
Definition: rte_ethdev.h:946
int rte_eth_stats_reset(uint16_t port_id)
int rte_eth_allmulticast_enable(uint16_t port_id)
struct rte_eth_txconf default_txconf
Definition: rte_ethdev.h:1259
#define unlikely(x)
uint16_t nb_max
Definition: rte_ethdev.h:899
uint64_t ibytes
Definition: rte_ethdev.h:246
uint64_t offloads
Definition: rte_ethdev.h:849
uint16_t max_nb_queues
Definition: rte_ethdev.h:863
uint64_t oerrors
Definition: rte_ethdev.h:253
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
struct rte_eth_dcb_rx_conf dcb_rx_conf
Definition: rte_ethdev.h:1064
struct rte_eth_vmdq_rx_conf vmdq_rx_conf
Definition: rte_ethdev.h:1066
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
uint16_t max_mtu
Definition: rte_ethdev.h:1232
uint64_t offloads
Definition: rte_ethdev.h:406
enum rte_eth_rx_mq_mode mq_mode
Definition: rte_ethdev.h:396
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:777
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
uint16_t tx_free_thresh
Definition: rte_ethdev.h:840
uint16_t nb_desc
Definition: rte_ethdev.h:1293
__rte_experimental uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:4372
int rte_eth_allmulticast_disable(uint16_t port_id)
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
int rte_eth_dev_start(uint16_t port_id)
uint16_t max_vmdq_pools
Definition: rte_ethdev.h:1244
uint8_t scattered_rx
Definition: rte_ethdev.h:1292
int rte_eth_dev_reset(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
struct rte_eth_dcb_tx_conf dcb_tx_conf
Definition: rte_ethdev.h:1072
uint64_t offloads
Definition: rte_ethdev.h:799
uint16_t vmdq_pool_base
Definition: rte_ethdev.h:1262
uint64_t tx_offload_capa
Definition: rte_ethdev.h:1247
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:811
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:257
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
uint16_t min_mtu
Definition: rte_ethdev.h:1231
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
Definition: rte_ethdev.h:1461
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1282
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
uint64_t obytes
Definition: rte_ethdev.h:247
uint8_t enable_loop_back
Definition: rte_ethdev.h:780
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:408
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
struct rte_eth_rxconf conf
Definition: rte_ethdev.h:1291
#define ETH_DCB_NUM_USER_PRIORITIES
Definition: rte_ethdev.h:615
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
uint16_t max_tx_queues
Definition: rte_ethdev.h:1239
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_xstats_reset(uint16_t port_id)
rte_eth_dev_state
Definition: rte_ethdev.h:1467
uint16_t rx_free_thresh
Definition: rte_ethdev.h:820
struct rte_eth_vlan_mirror vlan
Definition: rte_ethdev.h:675
uint64_t dev_capa
Definition: rte_ethdev.h:1274
uint16_t max_tx_2_rx
Definition: rte_ethdev.h:867
uint64_t ierrors
Definition: rte_ethdev.h:252
uint16_t max_nb_desc
Definition: rte_ethdev.h:868
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
__extension__ uint8_t hw_vlan_insert_pvid
Definition: rte_ethdev.h:805
uint8_t priority
Definition: rte_ethdev.h:959
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
uint64_t flow_type_rss_offloads
Definition: rte_ethdev.h:1257
uint16_t rte_eth_dev_count_total(void)
int rte_eth_promiscuous_enable(uint16_t port_id)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
struct rte_eth_dev_portconf default_txportconf
Definition: rte_ethdev.h:1272
rte_vlan_type
Definition: rte_ethdev.h:416
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
uint16_t nb_seg_max
Definition: rte_ethdev.h:911
uint64_t ipackets
Definition: rte_ethdev.h:244
uint16_t max_vfs
Definition: rte_ethdev.h:1243
uint16_t pause_time
Definition: rte_ethdev.h:945
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
rte_filter_type
Definition: rte_eth_ctrl.h:28
int rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id, struct rte_eth_l2_tunnel_conf *l2_tunnel, uint32_t mask, uint8_t en)
uint64_t rx_nombuf
Definition: rte_ethdev.h:254
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
Definition: rte_ethdev.h:4852
__extension__ uint8_t hw_vlan_reject_untagged
Definition: rte_ethdev.h:805
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
#define ETH_MQ_RX_RSS_FLAG
Definition: rte_ethdev.h:336
uint16_t vmdq_queue_base
Definition: rte_ethdev.h:1260
rte_eth_event_ipsec_subtype
Definition: rte_ethdev.h:2966
rte_eth_nb_pools
Definition: rte_ethdev.h:704
#define ETH_MIRROR_MAX_VLANS
Definition: rte_ethdev.h:650
uint16_t nb_align
Definition: rte_ethdev.h:901
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
rte_eth_rx_mq_mode
Definition: rte_ethdev.h:344
__rte_experimental int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
const char * driver_name
Definition: rte_ethdev.h:1228
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:4419
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_socket_id(uint16_t port_id)
uint8_t enable_default_pool
Definition: rte_ethdev.h:778
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
uint16_t nb_tx_queues
Definition: rte_ethdev.h:1268
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
struct rte_eth_fdir_flex_conf flex_conf
Definition: rte_ethdev.h:1012
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:851
uint32_t max_mac_addrs
Definition: rte_ethdev.h:1240
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
rte_eth_tunnel_type
Definition: rte_ethdev.h:965
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
uint64_t value
Definition: rte_ethdev.h:1339
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
Definition: rte_ethdev.h:540
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_promiscuous_get(uint16_t port_id)
int rte_eth_led_on(uint16_t port_id)
enum rte_fdir_pballoc_type pballoc
Definition: rte_ethdev.h:1007
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
__rte_experimental int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
uint32_t max_rx_pktlen
Definition: rte_ethdev.h:1235
__rte_experimental int rte_eth_dev_owner_delete(const uint64_t owner_id)
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
uint64_t rss_hf
Definition: rte_ethdev.h:451
void * reserved_ptrs[2]
Definition: rte_ethdev.h:852
uint64_t id
Definition: rte_ethdev.h:1338
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1281
__extension__ uint8_t hw_vlan_reject_tagged
Definition: rte_ethdev.h:805
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:830
enum rte_fdir_mode mode
Definition: rte_ethdev.h:1006
struct rte_eth_vmdq_rx_conf::@136 pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
unsigned int if_index
Definition: rte_ethdev.h:1229
__rte_deprecated int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type, enum rte_filter_op filter_op, void *arg)
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
uint8_t mac_ctrl_frame_fwd
Definition: rte_ethdev.h:948
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
Definition: rte_ethdev.h:1437
enum rte_eth_fc_mode mode
Definition: rte_ethdev.h:947
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
rte_fdir_mode
Definition: rte_eth_ctrl.h:603
struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf
Definition: rte_ethdev.h:1062
uint8_t * rss_key
Definition: rte_ethdev.h:449
rte_fdir_status_mode
Definition: rte_ethdev.h:993
__rte_deprecated int rte_eth_dev_filter_supported(uint16_t port_id, enum rte_filter_type filter_type)
void * reserved_ptrs[2]
Definition: rte_ethdev.h:812
uint8_t tx_deferred_start
Definition: rte_ethdev.h:843
uint8_t wthresh
Definition: rte_ethdev.h:330
void * reserved_ptrs[2]
Definition: rte_ethdev.h:831
uint16_t max_rx_queues
Definition: rte_ethdev.h:1238
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:399
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
struct rte_eth_fc_conf fc
Definition: rte_ethdev.h:958
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
struct rte_eth_txmode txmode
Definition: rte_ethdev.h:1054
int rte_eth_allmulticast_get(uint16_t port_id)
uint8_t rx_drop_en
Definition: rte_ethdev.h:821
int rte_eth_dev_is_valid_port(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:1237
uint16_t nb_min
Definition: rte_ethdev.h:900
void * reserved_ptrs[2]
Definition: rte_ethdev.h:409
int rte_eth_timesync_enable(uint16_t port_id)
uint8_t pthresh
Definition: rte_ethdev.h:328
struct rte_eth_rxconf default_rxconf
Definition: rte_ethdev.h:1258
uint32_t speed_capa
Definition: rte_ethdev.h:1265
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:4639
uint8_t drop_queue
Definition: rte_ethdev.h:1010
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_set_link_down(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
uint8_t autoneg
Definition: rte_ethdev.h:949
uint32_t min_rx_bufsize
Definition: rte_ethdev.h:1234
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
__rte_experimental int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
uint32_t lsc
Definition: rte_ethdev.h:1033
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
Definition: rte_ethdev.h:4799
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
rte_eth_event_type
Definition: rte_ethdev.h:3005