DPDK  20.05.0
rte_ethdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
142 #ifdef __cplusplus
143 extern "C" {
144 #endif
145 
146 #include <stdint.h>
147 
148 /* Use this macro to check if LRO API is supported */
149 #define RTE_ETHDEV_HAS_LRO_SUPPORT
150 
151 #include <rte_compat.h>
152 #include <rte_log.h>
153 #include <rte_interrupts.h>
154 #include <rte_dev.h>
155 #include <rte_devargs.h>
156 #include <rte_errno.h>
157 #include <rte_common.h>
158 #include <rte_config.h>
159 #include <rte_ether.h>
160 
161 #include "rte_ethdev_trace_fp.h"
162 #include "rte_dev_info.h"
163 
164 extern int rte_eth_dev_logtype;
165 
166 #define RTE_ETHDEV_LOG(level, ...) \
167  rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
168 
169 struct rte_mbuf;
170 
187 int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
188 
203 uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter);
204 
217 void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
218 
232 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
233  for (rte_eth_iterator_init(iter, devargs), \
234  id = rte_eth_iterator_next(iter); \
235  id != RTE_MAX_ETHPORTS; \
236  id = rte_eth_iterator_next(iter))
237 
245  uint64_t ipackets;
246  uint64_t opackets;
247  uint64_t ibytes;
248  uint64_t obytes;
249  uint64_t imissed;
253  uint64_t ierrors;
254  uint64_t oerrors;
255  uint64_t rx_nombuf;
256  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
258  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
260  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
262  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
264  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
266 };
267 
271 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
272 #define ETH_LINK_SPEED_FIXED (1 << 0)
273 #define ETH_LINK_SPEED_10M_HD (1 << 1)
274 #define ETH_LINK_SPEED_10M (1 << 2)
275 #define ETH_LINK_SPEED_100M_HD (1 << 3)
276 #define ETH_LINK_SPEED_100M (1 << 4)
277 #define ETH_LINK_SPEED_1G (1 << 5)
278 #define ETH_LINK_SPEED_2_5G (1 << 6)
279 #define ETH_LINK_SPEED_5G (1 << 7)
280 #define ETH_LINK_SPEED_10G (1 << 8)
281 #define ETH_LINK_SPEED_20G (1 << 9)
282 #define ETH_LINK_SPEED_25G (1 << 10)
283 #define ETH_LINK_SPEED_40G (1 << 11)
284 #define ETH_LINK_SPEED_50G (1 << 12)
285 #define ETH_LINK_SPEED_56G (1 << 13)
286 #define ETH_LINK_SPEED_100G (1 << 14)
287 #define ETH_LINK_SPEED_200G (1 << 15)
292 #define ETH_SPEED_NUM_NONE 0
293 #define ETH_SPEED_NUM_10M 10
294 #define ETH_SPEED_NUM_100M 100
295 #define ETH_SPEED_NUM_1G 1000
296 #define ETH_SPEED_NUM_2_5G 2500
297 #define ETH_SPEED_NUM_5G 5000
298 #define ETH_SPEED_NUM_10G 10000
299 #define ETH_SPEED_NUM_20G 20000
300 #define ETH_SPEED_NUM_25G 25000
301 #define ETH_SPEED_NUM_40G 40000
302 #define ETH_SPEED_NUM_50G 50000
303 #define ETH_SPEED_NUM_56G 56000
304 #define ETH_SPEED_NUM_100G 100000
305 #define ETH_SPEED_NUM_200G 200000
310 __extension__
311 struct rte_eth_link {
312  uint32_t link_speed;
313  uint16_t link_duplex : 1;
314  uint16_t link_autoneg : 1;
315  uint16_t link_status : 1;
316 } __rte_aligned(8);
318 /* Utility constants */
319 #define ETH_LINK_HALF_DUPLEX 0
320 #define ETH_LINK_FULL_DUPLEX 1
321 #define ETH_LINK_DOWN 0
322 #define ETH_LINK_UP 1
323 #define ETH_LINK_FIXED 0
324 #define ETH_LINK_AUTONEG 1
330 struct rte_eth_thresh {
331  uint8_t pthresh;
332  uint8_t hthresh;
333  uint8_t wthresh;
334 };
335 
339 #define ETH_MQ_RX_RSS_FLAG 0x1
340 #define ETH_MQ_RX_DCB_FLAG 0x2
341 #define ETH_MQ_RX_VMDQ_FLAG 0x4
342 
350 
354  ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
356  ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
357 
359  ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
361  ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
363  ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
366  ETH_MQ_RX_VMDQ_FLAG,
367 };
368 
372 #define ETH_RSS ETH_MQ_RX_RSS
373 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
374 #define ETH_DCB_RX ETH_MQ_RX_DCB
375 
385 };
386 
390 #define ETH_DCB_NONE ETH_MQ_TX_NONE
391 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
392 #define ETH_DCB_TX ETH_MQ_TX_DCB
393 
400  uint32_t max_rx_pkt_len;
403  uint16_t split_hdr_size;
409  uint64_t offloads;
410 
411  uint64_t reserved_64s[2];
412  void *reserved_ptrs[2];
413 };
414 
420  ETH_VLAN_TYPE_UNKNOWN = 0,
423  ETH_VLAN_TYPE_MAX,
424 };
425 
431  uint64_t ids[64];
432 };
433 
452  uint8_t *rss_key;
453  uint8_t rss_key_len;
454  uint64_t rss_hf;
455 };
456 
457 /*
458  * A packet can be identified by hardware as different flow types. Different
459  * NIC hardware may support different flow types.
460  * Basically, the NIC hardware identifies the flow type as deep protocol as
461  * possible, and exclusively. For example, if a packet is identified as
462  * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
463  * though it is an actual IPV4 packet.
464  */
465 #define RTE_ETH_FLOW_UNKNOWN 0
466 #define RTE_ETH_FLOW_RAW 1
467 #define RTE_ETH_FLOW_IPV4 2
468 #define RTE_ETH_FLOW_FRAG_IPV4 3
469 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
470 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
471 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
472 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
473 #define RTE_ETH_FLOW_IPV6 8
474 #define RTE_ETH_FLOW_FRAG_IPV6 9
475 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
476 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
477 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
478 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
479 #define RTE_ETH_FLOW_L2_PAYLOAD 14
480 #define RTE_ETH_FLOW_IPV6_EX 15
481 #define RTE_ETH_FLOW_IPV6_TCP_EX 16
482 #define RTE_ETH_FLOW_IPV6_UDP_EX 17
483 #define RTE_ETH_FLOW_PORT 18
484 
485 #define RTE_ETH_FLOW_VXLAN 19
486 #define RTE_ETH_FLOW_GENEVE 20
487 #define RTE_ETH_FLOW_NVGRE 21
488 #define RTE_ETH_FLOW_VXLAN_GPE 22
489 #define RTE_ETH_FLOW_GTPU 23
490 #define RTE_ETH_FLOW_MAX 24
491 
492 /*
493  * Below macros are defined for RSS offload types, they can be used to
494  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
495  */
496 #define ETH_RSS_IPV4 (1ULL << 2)
497 #define ETH_RSS_FRAG_IPV4 (1ULL << 3)
498 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << 4)
499 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << 5)
500 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << 6)
501 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << 7)
502 #define ETH_RSS_IPV6 (1ULL << 8)
503 #define ETH_RSS_FRAG_IPV6 (1ULL << 9)
504 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << 10)
505 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << 11)
506 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << 12)
507 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << 13)
508 #define ETH_RSS_L2_PAYLOAD (1ULL << 14)
509 #define ETH_RSS_IPV6_EX (1ULL << 15)
510 #define ETH_RSS_IPV6_TCP_EX (1ULL << 16)
511 #define ETH_RSS_IPV6_UDP_EX (1ULL << 17)
512 #define ETH_RSS_PORT (1ULL << 18)
513 #define ETH_RSS_VXLAN (1ULL << 19)
514 #define ETH_RSS_GENEVE (1ULL << 20)
515 #define ETH_RSS_NVGRE (1ULL << 21)
516 #define ETH_RSS_GTPU (1ULL << 23)
517 #define ETH_RSS_ETH (1ULL << 24)
518 #define ETH_RSS_S_VLAN (1ULL << 25)
519 #define ETH_RSS_C_VLAN (1ULL << 26)
520 #define ETH_RSS_ESP (1ULL << 27)
521 #define ETH_RSS_AH (1ULL << 28)
522 #define ETH_RSS_L2TPV3 (1ULL << 29)
523 #define ETH_RSS_PFCP (1ULL << 30)
524 
525 
526 /*
527  * We use the following macros to combine with above ETH_RSS_* for
528  * more specific input set selection. These bits are defined starting
529  * from the high end of the 64 bits.
530  * Note: If we use above ETH_RSS_* without SRC/DST_ONLY, it represents
531  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
532  * the same level are used simultaneously, it is the same case as none of
533  * them are added.
534  */
535 #define ETH_RSS_L3_SRC_ONLY (1ULL << 63)
536 #define ETH_RSS_L3_DST_ONLY (1ULL << 62)
537 #define ETH_RSS_L4_SRC_ONLY (1ULL << 61)
538 #define ETH_RSS_L4_DST_ONLY (1ULL << 60)
539 #define ETH_RSS_L2_SRC_ONLY (1ULL << 59)
540 #define ETH_RSS_L2_DST_ONLY (1ULL << 58)
541 
552 static inline uint64_t
553 rte_eth_rss_hf_refine(uint64_t rss_hf)
554 {
555  if ((rss_hf & ETH_RSS_L3_SRC_ONLY) && (rss_hf & ETH_RSS_L3_DST_ONLY))
556  rss_hf &= ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
557 
558  if ((rss_hf & ETH_RSS_L4_SRC_ONLY) && (rss_hf & ETH_RSS_L4_DST_ONLY))
559  rss_hf &= ~(ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
560 
561  return rss_hf;
562 }
563 
564 #define ETH_RSS_IP ( \
565  ETH_RSS_IPV4 | \
566  ETH_RSS_FRAG_IPV4 | \
567  ETH_RSS_NONFRAG_IPV4_OTHER | \
568  ETH_RSS_IPV6 | \
569  ETH_RSS_FRAG_IPV6 | \
570  ETH_RSS_NONFRAG_IPV6_OTHER | \
571  ETH_RSS_IPV6_EX)
572 
573 #define ETH_RSS_UDP ( \
574  ETH_RSS_NONFRAG_IPV4_UDP | \
575  ETH_RSS_NONFRAG_IPV6_UDP | \
576  ETH_RSS_IPV6_UDP_EX)
577 
578 #define ETH_RSS_TCP ( \
579  ETH_RSS_NONFRAG_IPV4_TCP | \
580  ETH_RSS_NONFRAG_IPV6_TCP | \
581  ETH_RSS_IPV6_TCP_EX)
582 
583 #define ETH_RSS_SCTP ( \
584  ETH_RSS_NONFRAG_IPV4_SCTP | \
585  ETH_RSS_NONFRAG_IPV6_SCTP)
586 
587 #define ETH_RSS_TUNNEL ( \
588  ETH_RSS_VXLAN | \
589  ETH_RSS_GENEVE | \
590  ETH_RSS_NVGRE)
591 
592 #define ETH_RSS_VLAN ( \
593  ETH_RSS_S_VLAN | \
594  ETH_RSS_C_VLAN)
595 
597 #define ETH_RSS_PROTO_MASK ( \
598  ETH_RSS_IPV4 | \
599  ETH_RSS_FRAG_IPV4 | \
600  ETH_RSS_NONFRAG_IPV4_TCP | \
601  ETH_RSS_NONFRAG_IPV4_UDP | \
602  ETH_RSS_NONFRAG_IPV4_SCTP | \
603  ETH_RSS_NONFRAG_IPV4_OTHER | \
604  ETH_RSS_IPV6 | \
605  ETH_RSS_FRAG_IPV6 | \
606  ETH_RSS_NONFRAG_IPV6_TCP | \
607  ETH_RSS_NONFRAG_IPV6_UDP | \
608  ETH_RSS_NONFRAG_IPV6_SCTP | \
609  ETH_RSS_NONFRAG_IPV6_OTHER | \
610  ETH_RSS_L2_PAYLOAD | \
611  ETH_RSS_IPV6_EX | \
612  ETH_RSS_IPV6_TCP_EX | \
613  ETH_RSS_IPV6_UDP_EX | \
614  ETH_RSS_PORT | \
615  ETH_RSS_VXLAN | \
616  ETH_RSS_GENEVE | \
617  ETH_RSS_NVGRE)
618 
619 /*
620  * Definitions used for redirection table entry size.
621  * Some RSS RETA sizes may not be supported by some drivers, check the
622  * documentation or the description of relevant functions for more details.
623  */
624 #define ETH_RSS_RETA_SIZE_64 64
625 #define ETH_RSS_RETA_SIZE_128 128
626 #define ETH_RSS_RETA_SIZE_256 256
627 #define ETH_RSS_RETA_SIZE_512 512
628 #define RTE_RETA_GROUP_SIZE 64
629 
630 /* Definitions used for VMDQ and DCB functionality */
631 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
632 #define ETH_DCB_NUM_USER_PRIORITIES 8
633 #define ETH_VMDQ_DCB_NUM_QUEUES 128
634 #define ETH_DCB_NUM_QUEUES 128
636 /* DCB capability defines */
637 #define ETH_DCB_PG_SUPPORT 0x00000001
638 #define ETH_DCB_PFC_SUPPORT 0x00000002
640 /* Definitions used for VLAN Offload functionality */
641 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
642 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
643 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
644 #define ETH_QINQ_STRIP_OFFLOAD 0x0008
646 /* Definitions used for mask VLAN setting */
647 #define ETH_VLAN_STRIP_MASK 0x0001
648 #define ETH_VLAN_FILTER_MASK 0x0002
649 #define ETH_VLAN_EXTEND_MASK 0x0004
650 #define ETH_QINQ_STRIP_MASK 0x0008
651 #define ETH_VLAN_ID_MAX 0x0FFF
653 /* Definitions used for receive MAC address */
654 #define ETH_NUM_RECEIVE_MAC_ADDR 128
656 /* Definitions used for unicast hash */
657 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
659 /* Definitions used for VMDQ pool rx mode setting */
660 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
661 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
662 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
663 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
664 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
667 #define ETH_MIRROR_MAX_VLANS 64
668 
669 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
670 #define ETH_MIRROR_UPLINK_PORT 0x02
671 #define ETH_MIRROR_DOWNLINK_PORT 0x04
672 #define ETH_MIRROR_VLAN 0x08
673 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
678 struct rte_eth_vlan_mirror {
679  uint64_t vlan_mask;
681  uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
682 };
683 
688  uint8_t rule_type;
689  uint8_t dst_pool;
690  uint64_t pool_mask;
693 };
694 
702  uint64_t mask;
704  uint16_t reta[RTE_RETA_GROUP_SIZE];
706 };
707 
713  ETH_4_TCS = 4,
715 };
716 
726 };
727 
728 /* This structure may be extended in future. */
729 struct rte_eth_dcb_rx_conf {
730  enum rte_eth_nb_tcs nb_tcs;
732  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
733 };
734 
735 struct rte_eth_vmdq_dcb_tx_conf {
736  enum rte_eth_nb_pools nb_queue_pools;
738  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
739 };
740 
741 struct rte_eth_dcb_tx_conf {
742  enum rte_eth_nb_tcs nb_tcs;
744  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
745 };
746 
747 struct rte_eth_vmdq_tx_conf {
748  enum rte_eth_nb_pools nb_queue_pools;
749 };
750 
765  uint8_t default_pool;
766  uint8_t nb_pool_maps;
767  struct {
768  uint16_t vlan_id;
769  uint64_t pools;
773 };
774 
796  uint8_t default_pool;
798  uint8_t nb_pool_maps;
799  uint32_t rx_mode;
800  struct {
801  uint16_t vlan_id;
802  uint64_t pools;
804 };
805 
816  uint64_t offloads;
817 
818  /* For i40e specifically */
819  uint16_t pvid;
820  __extension__
821  uint8_t hw_vlan_reject_tagged : 1,
828  uint64_t reserved_64s[2];
829  void *reserved_ptrs[2];
830 };
831 
837  uint16_t rx_free_thresh;
838  uint8_t rx_drop_en;
845  uint64_t offloads;
846 
847  uint64_t reserved_64s[2];
848  void *reserved_ptrs[2];
849 };
850 
856  uint16_t tx_rs_thresh;
857  uint16_t tx_free_thresh;
866  uint64_t offloads;
867 
868  uint64_t reserved_64s[2];
869  void *reserved_ptrs[2];
870 };
871 
880  uint16_t max_nb_queues;
882  uint16_t max_rx_2_tx;
884  uint16_t max_tx_2_rx;
885  uint16_t max_nb_desc;
886 };
887 
888 #define RTE_ETH_MAX_HAIRPIN_PEERS 32
889 
897  uint16_t port;
898  uint16_t queue;
899 };
900 
908  uint16_t peer_count;
909  struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
910 };
911 
916  uint16_t nb_max;
917  uint16_t nb_min;
918  uint16_t nb_align;
928  uint16_t nb_seg_max;
929 
941  uint16_t nb_mtu_seg_max;
942 };
943 
952 };
953 
960  uint32_t high_water;
961  uint32_t low_water;
962  uint16_t pause_time;
963  uint16_t send_xon;
966  uint8_t autoneg;
967 };
968 
976  uint8_t priority;
977 };
978 
983  RTE_TUNNEL_TYPE_NONE = 0,
984  RTE_TUNNEL_TYPE_VXLAN,
985  RTE_TUNNEL_TYPE_GENEVE,
986  RTE_TUNNEL_TYPE_TEREDO,
987  RTE_TUNNEL_TYPE_NVGRE,
988  RTE_TUNNEL_TYPE_IP_IN_GRE,
989  RTE_L2_TUNNEL_TYPE_E_TAG,
990  RTE_TUNNEL_TYPE_VXLAN_GPE,
991  RTE_TUNNEL_TYPE_MAX,
992 };
993 
994 /* Deprecated API file for rte_eth_dev_filter_* functions */
995 #include "rte_eth_ctrl.h"
996 
1005 };
1006 
1014 };
1015 
1027  uint8_t drop_queue;
1028  struct rte_eth_fdir_masks mask;
1031 };
1032 
1041  uint16_t udp_port;
1042  uint8_t prot_type;
1043 };
1044 
1050  uint32_t lsc:1;
1052  uint32_t rxq:1;
1054  uint32_t rmv:1;
1055 };
1056 
1063  uint32_t link_speeds;
1072  uint32_t lpbk_mode;
1077  struct {
1081  struct rte_eth_dcb_rx_conf dcb_rx_conf;
1085  } rx_adv_conf;
1086  union {
1087  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1089  struct rte_eth_dcb_tx_conf dcb_tx_conf;
1091  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1093  } tx_adv_conf;
1099 };
1100 
1104 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
1105 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
1106 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
1107 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
1108 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
1109 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
1110 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
1111 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
1112 #define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100
1113 #define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200
1114 #define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
1115 #define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
1116 #define DEV_RX_OFFLOAD_SCATTER 0x00002000
1117 #define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000
1118 #define DEV_RX_OFFLOAD_SECURITY 0x00008000
1119 #define DEV_RX_OFFLOAD_KEEP_CRC 0x00010000
1120 #define DEV_RX_OFFLOAD_SCTP_CKSUM 0x00020000
1121 #define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM 0x00040000
1122 #define DEV_RX_OFFLOAD_RSS_HASH 0x00080000
1123 
1124 #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
1125  DEV_RX_OFFLOAD_UDP_CKSUM | \
1126  DEV_RX_OFFLOAD_TCP_CKSUM)
1127 #define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
1128  DEV_RX_OFFLOAD_VLAN_FILTER | \
1129  DEV_RX_OFFLOAD_VLAN_EXTEND | \
1130  DEV_RX_OFFLOAD_QINQ_STRIP)
1131 
1132 /*
1133  * If new Rx offload capabilities are defined, they also must be
1134  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1135  */
1136 
1140 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
1141 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
1142 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
1143 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
1144 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
1145 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
1146 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
1147 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
1148 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
1149 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200
1150 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400
1151 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800
1152 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000
1153 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
1154 #define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000
1155 
1158 #define DEV_TX_OFFLOAD_MULTI_SEGS 0x00008000
1159 
1160 #define DEV_TX_OFFLOAD_MBUF_FAST_FREE 0x00010000
1161 
1165 #define DEV_TX_OFFLOAD_SECURITY 0x00020000
1166 
1171 #define DEV_TX_OFFLOAD_UDP_TNL_TSO 0x00040000
1172 
1177 #define DEV_TX_OFFLOAD_IP_TNL_TSO 0x00080000
1178 
1179 #define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM 0x00100000
1180 
1181 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001
1182 
1183 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002
1184 
1186 /*
1187  * If new Tx offload capabilities are defined, they also must be
1188  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1189  */
1190 
1191 /*
1192  * Fallback default preferred Rx/Tx port parameters.
1193  * These are used if an application requests default parameters
1194  * but the PMD does not provide preferred values.
1195  */
1196 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1197 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1198 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1199 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1200 
1207  uint16_t burst_size;
1208  uint16_t ring_size;
1209  uint16_t nb_queues;
1210 };
1211 
1216 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX)
1217 
1222  const char *name;
1223  uint16_t domain_id;
1224  uint16_t port_id;
1232 };
1233 
1244  struct rte_device *device;
1245  const char *driver_name;
1246  unsigned int if_index;
1248  uint16_t min_mtu;
1249  uint16_t max_mtu;
1250  const uint32_t *dev_flags;
1251  uint32_t min_rx_bufsize;
1252  uint32_t max_rx_pktlen;
1255  uint16_t max_rx_queues;
1256  uint16_t max_tx_queues;
1257  uint32_t max_mac_addrs;
1258  uint32_t max_hash_mac_addrs;
1260  uint16_t max_vfs;
1261  uint16_t max_vmdq_pools;
1270  uint16_t reta_size;
1272  uint8_t hash_key_size;
1277  uint16_t vmdq_queue_base;
1278  uint16_t vmdq_queue_num;
1279  uint16_t vmdq_pool_base;
1282  uint32_t speed_capa;
1284  uint16_t nb_rx_queues;
1285  uint16_t nb_tx_queues;
1291  uint64_t dev_capa;
1297 
1298  uint64_t reserved_64s[2];
1299  void *reserved_ptrs[2];
1300 };
1301 
1307  struct rte_mempool *mp;
1309  uint8_t scattered_rx;
1310  uint16_t nb_desc;
1312 
1319  uint16_t nb_desc;
1321 
1322 /* Generic Burst mode flag definition, values can be ORed. */
1323 
1329 #define RTE_ETH_BURST_FLAG_PER_QUEUE (1ULL << 0)
1330 
1336  uint64_t flags;
1338 #define RTE_ETH_BURST_MODE_INFO_SIZE 1024
1339  char info[RTE_ETH_BURST_MODE_INFO_SIZE];
1340 };
1341 
1343 #define RTE_ETH_XSTATS_NAME_SIZE 64
1344 
1355  uint64_t id;
1356  uint64_t value;
1357 };
1358 
1368 };
1369 
1370 #define ETH_DCB_NUM_TCS 8
1371 #define ETH_MAX_VMDQ_POOL 64
1372 
1379  struct {
1380  uint8_t base;
1381  uint8_t nb_queue;
1382  } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1384  struct {
1385  uint8_t base;
1386  uint8_t nb_queue;
1387  } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1388 };
1389 
1395  uint8_t nb_tcs;
1396  uint8_t prio_tc[ETH_DCB_NUM_USER_PRIORITIES];
1397  uint8_t tc_bws[ETH_DCB_NUM_TCS];
1400 };
1401 
1402 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
1403 
1404 /* Macros to check for valid port */
1405 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1406  if (!rte_eth_dev_is_valid_port(port_id)) { \
1407  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1408  return retval; \
1409  } \
1410 } while (0)
1411 
1412 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1413  if (!rte_eth_dev_is_valid_port(port_id)) { \
1414  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1415  return; \
1416  } \
1417 } while (0)
1418 
1424 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1425 
1426 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1427 
1428 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1429 
1430 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1431 
1454 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
1455  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1456  void *user_param);
1457 
1478 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
1479  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1480 
1491 };
1492 
1493 struct rte_eth_dev_sriov {
1494  uint8_t active;
1495  uint8_t nb_q_per_pool;
1496  uint16_t def_vmdq_idx;
1497  uint16_t def_pool_q_idx;
1498 };
1499 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1500 
1501 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
1502 
1503 #define RTE_ETH_DEV_NO_OWNER 0
1504 
1505 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
1506 
1507 struct rte_eth_dev_owner {
1508  uint64_t id;
1509  char name[RTE_ETH_MAX_OWNER_NAME_LEN];
1510 };
1511 
1516 #define RTE_ETH_DEV_CLOSE_REMOVE 0x0001
1517 
1518 #define RTE_ETH_DEV_INTR_LSC 0x0002
1519 
1520 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1521 
1522 #define RTE_ETH_DEV_INTR_RMV 0x0008
1523 
1524 #define RTE_ETH_DEV_REPRESENTOR 0x0010
1525 
1526 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR 0x0020
1527 
1539 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
1540  const uint64_t owner_id);
1541 
1545 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
1546  for (p = rte_eth_find_next_owned_by(0, o); \
1547  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
1548  p = rte_eth_find_next_owned_by(p + 1, o))
1549 
1558 uint16_t rte_eth_find_next(uint16_t port_id);
1559 
1563 #define RTE_ETH_FOREACH_DEV(p) \
1564  RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
1565 
1580 __rte_experimental
1581 uint16_t
1582 rte_eth_find_next_of(uint16_t port_id_start,
1583  const struct rte_device *parent);
1584 
1593 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
1594  for (port_id = rte_eth_find_next_of(0, parent); \
1595  port_id < RTE_MAX_ETHPORTS; \
1596  port_id = rte_eth_find_next_of(port_id + 1, parent))
1597 
1612 __rte_experimental
1613 uint16_t
1614 rte_eth_find_next_sibling(uint16_t port_id_start,
1615  uint16_t ref_port_id);
1616 
1627 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
1628  for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
1629  port_id < RTE_MAX_ETHPORTS; \
1630  port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
1631 
1645 __rte_experimental
1646 int rte_eth_dev_owner_new(uint64_t *owner_id);
1647 
1661 __rte_experimental
1662 int rte_eth_dev_owner_set(const uint16_t port_id,
1663  const struct rte_eth_dev_owner *owner);
1664 
1678 __rte_experimental
1679 int rte_eth_dev_owner_unset(const uint16_t port_id,
1680  const uint64_t owner_id);
1681 
1693 __rte_experimental
1694 int rte_eth_dev_owner_delete(const uint64_t owner_id);
1695 
1709 __rte_experimental
1710 int rte_eth_dev_owner_get(const uint16_t port_id,
1711  struct rte_eth_dev_owner *owner);
1712 
1723 uint16_t rte_eth_dev_count_avail(void);
1724 
1733 uint16_t rte_eth_dev_count_total(void);
1734 
1746 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
1747 
1756 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
1757 
1766 const char *rte_eth_dev_tx_offload_name(uint64_t offload);
1767 
1807 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
1808  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
1809 
1821 __rte_experimental
1822 int
1823 rte_eth_dev_is_removed(uint16_t port_id);
1824 
1874 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1875  uint16_t nb_rx_desc, unsigned int socket_id,
1876  const struct rte_eth_rxconf *rx_conf,
1877  struct rte_mempool *mb_pool);
1878 
1905 __rte_experimental
1907  (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
1908  const struct rte_eth_hairpin_conf *conf);
1909 
1958 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1959  uint16_t nb_tx_desc, unsigned int socket_id,
1960  const struct rte_eth_txconf *tx_conf);
1961 
1986 __rte_experimental
1988  (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
1989  const struct rte_eth_hairpin_conf *conf);
1990 
2001 int rte_eth_dev_socket_id(uint16_t port_id);
2002 
2012 int rte_eth_dev_is_valid_port(uint16_t port_id);
2013 
2030 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2031 
2047 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2048 
2065 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2066 
2082 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2083 
2103 int rte_eth_dev_start(uint16_t port_id);
2104 
2112 void rte_eth_dev_stop(uint16_t port_id);
2113 
2126 int rte_eth_dev_set_link_up(uint16_t port_id);
2127 
2137 int rte_eth_dev_set_link_down(uint16_t port_id);
2138 
2147 void rte_eth_dev_close(uint16_t port_id);
2148 
2186 int rte_eth_dev_reset(uint16_t port_id);
2187 
2199 int rte_eth_promiscuous_enable(uint16_t port_id);
2200 
2212 int rte_eth_promiscuous_disable(uint16_t port_id);
2213 
2224 int rte_eth_promiscuous_get(uint16_t port_id);
2225 
2237 int rte_eth_allmulticast_enable(uint16_t port_id);
2238 
2250 int rte_eth_allmulticast_disable(uint16_t port_id);
2251 
2262 int rte_eth_allmulticast_get(uint16_t port_id);
2263 
2279 int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
2280 
2296 int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
2297 
2315 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
2316 
2328 int rte_eth_stats_reset(uint16_t port_id);
2329 
2359 int rte_eth_xstats_get_names(uint16_t port_id,
2360  struct rte_eth_xstat_name *xstats_names,
2361  unsigned int size);
2362 
2392 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2393  unsigned int n);
2394 
2417 int
2418 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2419  struct rte_eth_xstat_name *xstats_names, unsigned int size,
2420  uint64_t *ids);
2421 
2445 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2446  uint64_t *values, unsigned int size);
2447 
2466 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2467  uint64_t *id);
2468 
2481 int rte_eth_xstats_reset(uint16_t port_id);
2482 
2500 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
2501  uint16_t tx_queue_id, uint8_t stat_idx);
2502 
2520 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
2521  uint16_t rx_queue_id,
2522  uint8_t stat_idx);
2523 
2536 int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
2537 
2580 int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
2581 
2601 int rte_eth_dev_fw_version_get(uint16_t port_id,
2602  char *fw_version, size_t fw_size);
2603 
2642 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2643  uint32_t *ptypes, int num);
2677 __rte_experimental
2678 int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
2679  uint32_t *set_ptypes, unsigned int num);
2680 
2692 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
2693 
2711 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
2712 
2732 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
2733 
2753 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2754  int on);
2755 
2773 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2774  enum rte_vlan_type vlan_type,
2775  uint16_t tag_type);
2776 
2799 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
2800 
2814 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
2815 
2830 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
2831 
2832 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
2833  void *userdata);
2834 
2840  buffer_tx_error_fn error_callback;
2841  void *error_userdata;
2842  uint16_t size;
2843  uint16_t length;
2844  struct rte_mbuf *pkts[];
2846 };
2847 
2854 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
2855  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
2856 
2867 int
2868 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
2869 
2894 int
2896  buffer_tx_error_fn callback, void *userdata);
2897 
2920 void
2921 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2922  void *userdata);
2923 
2947 void
2948 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2949  void *userdata);
2950 
2976 int
2977 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
2978 
2994 };
2995 
3003  uint64_t metadata;
3017 };
3018 
3037 };
3038 
3039 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
3040  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
3060 int rte_eth_dev_callback_register(uint16_t port_id,
3061  enum rte_eth_event_type event,
3062  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3063 
3082 int rte_eth_dev_callback_unregister(uint16_t port_id,
3083  enum rte_eth_event_type event,
3084  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3085 
3107 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
3108 
3129 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
3130 
3148 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
3149 
3171 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3172  int epfd, int op, void *data);
3173 
3191 __rte_experimental
3192 int
3193 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
3194 
3208 int rte_eth_led_on(uint16_t port_id);
3209 
3223 int rte_eth_led_off(uint16_t port_id);
3224 
3238 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
3239  struct rte_eth_fc_conf *fc_conf);
3240 
3255 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
3256  struct rte_eth_fc_conf *fc_conf);
3257 
3273 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3274  struct rte_eth_pfc_conf *pfc_conf);
3275 
3295 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
3296  uint32_t pool);
3297 
3311 int rte_eth_dev_mac_addr_remove(uint16_t port_id,
3312  struct rte_ether_addr *mac_addr);
3313 
3327 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
3328  struct rte_ether_addr *mac_addr);
3329 
3346 int rte_eth_dev_rss_reta_update(uint16_t port_id,
3347  struct rte_eth_rss_reta_entry64 *reta_conf,
3348  uint16_t reta_size);
3349 
3367 int rte_eth_dev_rss_reta_query(uint16_t port_id,
3368  struct rte_eth_rss_reta_entry64 *reta_conf,
3369  uint16_t reta_size);
3370 
3390 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
3391  uint8_t on);
3392 
3411 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
3412 
3435 int rte_eth_mirror_rule_set(uint16_t port_id,
3436  struct rte_eth_mirror_conf *mirror_conf,
3437  uint8_t rule_id,
3438  uint8_t on);
3439 
3454 int rte_eth_mirror_rule_reset(uint16_t port_id,
3455  uint8_t rule_id);
3456 
3473 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3474  uint16_t tx_rate);
3475 
3490 int rte_eth_dev_rss_hash_update(uint16_t port_id,
3491  struct rte_eth_rss_conf *rss_conf);
3492 
3507 int
3508 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3509  struct rte_eth_rss_conf *rss_conf);
3510 
3529 int
3530 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3531  struct rte_eth_udp_tunnel *tunnel_udp);
3532 
3552 int
3553 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3554  struct rte_eth_udp_tunnel *tunnel_udp);
3555 
3570 __rte_deprecated
3571 int rte_eth_dev_filter_supported(uint16_t port_id,
3572  enum rte_filter_type filter_type);
3573 
3593 __rte_deprecated
3594 int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3595  enum rte_filter_op filter_op, void *arg);
3596 
3610 int rte_eth_dev_get_dcb_info(uint16_t port_id,
3611  struct rte_eth_dcb_info *dcb_info);
3612 
3613 struct rte_eth_rxtx_callback;
3614 
3639 const struct rte_eth_rxtx_callback *
3640 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3641  rte_rx_callback_fn fn, void *user_param);
3642 
3668 const struct rte_eth_rxtx_callback *
3669 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3670  rte_rx_callback_fn fn, void *user_param);
3671 
3696 const struct rte_eth_rxtx_callback *
3697 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3698  rte_tx_callback_fn fn, void *user_param);
3699 
3730 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3731  const struct rte_eth_rxtx_callback *user_cb);
3732 
3763 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3764  const struct rte_eth_rxtx_callback *user_cb);
3765 
3784 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3785  struct rte_eth_rxq_info *qinfo);
3786 
3805 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3806  struct rte_eth_txq_info *qinfo);
3807 
3825 __rte_experimental
3826 int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
3827  struct rte_eth_burst_mode *mode);
3828 
3846 __rte_experimental
3847 int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
3848  struct rte_eth_burst_mode *mode);
3849 
3867 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
3868 
3881 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
3882 
3898 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
3899 
3915 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
3916 
3934 __rte_experimental
3935 int
3936 rte_eth_dev_get_module_info(uint16_t port_id,
3937  struct rte_eth_dev_module_info *modinfo);
3938 
3957 __rte_experimental
3958 int
3959 rte_eth_dev_get_module_eeprom(uint16_t port_id,
3960  struct rte_dev_eeprom_info *info);
3961 
3980 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3981  struct rte_ether_addr *mc_addr_set,
3982  uint32_t nb_mc_addr);
3983 
3996 int rte_eth_timesync_enable(uint16_t port_id);
3997 
4010 int rte_eth_timesync_disable(uint16_t port_id);
4011 
4030 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
4031  struct timespec *timestamp, uint32_t flags);
4032 
4048 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4049  struct timespec *timestamp);
4050 
4068 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
4069 
4084 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
4085 
4104 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
4105 
4150 __rte_experimental
4151 int
4152 rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
4153 
4169 int
4170 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4171  struct rte_eth_l2_tunnel_conf *l2_tunnel);
4172 
4197 int
4198 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4199  struct rte_eth_l2_tunnel_conf *l2_tunnel,
4200  uint32_t mask,
4201  uint8_t en);
4202 
4218 int
4219 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
4220 
4235 int
4236 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
4237 
4254 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4255  uint16_t *nb_rx_desc,
4256  uint16_t *nb_tx_desc);
4257 
4272 int
4273 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
4274 
4284 void *
4285 rte_eth_dev_get_sec_ctx(uint16_t port_id);
4286 
4301 __rte_experimental
4302 int rte_eth_dev_hairpin_capability_get(uint16_t port_id,
4303  struct rte_eth_hairpin_cap *cap);
4304 
4305 #include <rte_ethdev_core.h>
4306 
4389 static inline uint16_t
4390 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
4391  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
4392 {
4393  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4394  uint16_t nb_rx;
4395 
4396 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4397  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4398  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
4399 
4400  if (queue_id >= dev->data->nb_rx_queues) {
4401  RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4402  return 0;
4403  }
4404 #endif
4405  nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
4406  rx_pkts, nb_pkts);
4407 
4408 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
4409  if (unlikely(dev->post_rx_burst_cbs[queue_id] != NULL)) {
4410  struct rte_eth_rxtx_callback *cb =
4411  dev->post_rx_burst_cbs[queue_id];
4412 
4413  do {
4414  nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
4415  nb_pkts, cb->param);
4416  cb = cb->next;
4417  } while (cb != NULL);
4418  }
4419 #endif
4420 
4421  rte_ethdev_trace_rx_burst(port_id, queue_id, (void **)rx_pkts, nb_rx);
4422  return nb_rx;
4423 }
4424 
4437 static inline int
4438 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
4439 {
4440  struct rte_eth_dev *dev;
4441 
4442  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4443  dev = &rte_eth_devices[port_id];
4444  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
4445  if (queue_id >= dev->data->nb_rx_queues)
4446  return -EINVAL;
4447 
4448  return (int)(*dev->dev_ops->rx_queue_count)(dev, queue_id);
4449 }
4450 
4466 static inline int
4467 rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
4468 {
4469  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4470  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4471  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
4472  return (*dev->dev_ops->rx_descriptor_done)( \
4473  dev->data->rx_queues[queue_id], offset);
4474 }
4475 
4476 #define RTE_ETH_RX_DESC_AVAIL 0
4477 #define RTE_ETH_RX_DESC_DONE 1
4478 #define RTE_ETH_RX_DESC_UNAVAIL 2
4513 static inline int
4514 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
4515  uint16_t offset)
4516 {
4517  struct rte_eth_dev *dev;
4518  void *rxq;
4519 
4520 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4521  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4522 #endif
4523  dev = &rte_eth_devices[port_id];
4524 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4525  if (queue_id >= dev->data->nb_rx_queues)
4526  return -ENODEV;
4527 #endif
4528  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
4529  rxq = dev->data->rx_queues[queue_id];
4530 
4531  return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
4532 }
4533 
4534 #define RTE_ETH_TX_DESC_FULL 0
4535 #define RTE_ETH_TX_DESC_DONE 1
4536 #define RTE_ETH_TX_DESC_UNAVAIL 2
4571 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
4572  uint16_t queue_id, uint16_t offset)
4573 {
4574  struct rte_eth_dev *dev;
4575  void *txq;
4576 
4577 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4578  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4579 #endif
4580  dev = &rte_eth_devices[port_id];
4581 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4582  if (queue_id >= dev->data->nb_tx_queues)
4583  return -ENODEV;
4584 #endif
4585  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
4586  txq = dev->data->tx_queues[queue_id];
4587 
4588  return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
4589 }
4590 
4657 static inline uint16_t
4658 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
4659  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4660 {
4661  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4662 
4663 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4664  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4665  RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
4666 
4667  if (queue_id >= dev->data->nb_tx_queues) {
4668  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4669  return 0;
4670  }
4671 #endif
4672 
4673 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
4674  struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
4675 
4676  if (unlikely(cb != NULL)) {
4677  do {
4678  nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
4679  cb->param);
4680  cb = cb->next;
4681  } while (cb != NULL);
4682  }
4683 #endif
4684 
4685  rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts,
4686  nb_pkts);
4687  return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
4688 }
4689 
4743 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
4744 
4745 static inline uint16_t
4746 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
4747  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4748 {
4749  struct rte_eth_dev *dev;
4750 
4751 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4752  if (!rte_eth_dev_is_valid_port(port_id)) {
4753  RTE_ETHDEV_LOG(ERR, "Invalid TX port_id=%u\n", port_id);
4754  rte_errno = EINVAL;
4755  return 0;
4756  }
4757 #endif
4758 
4759  dev = &rte_eth_devices[port_id];
4760 
4761 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4762  if (queue_id >= dev->data->nb_tx_queues) {
4763  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4764  rte_errno = EINVAL;
4765  return 0;
4766  }
4767 #endif
4768 
4769  if (!dev->tx_pkt_prepare)
4770  return nb_pkts;
4771 
4772  return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
4773  tx_pkts, nb_pkts);
4774 }
4775 
4776 #else
4777 
4778 /*
4779  * Native NOOP operation for compilation targets which doesn't require any
4780  * preparations steps, and functional NOOP may introduce unnecessary performance
4781  * drop.
4782  *
4783  * Generally this is not a good idea to turn it on globally and didn't should
4784  * be used if behavior of tx_preparation can change.
4785  */
4786 
4787 static inline uint16_t
4788 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
4789  __rte_unused uint16_t queue_id,
4790  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4791 {
4792  return nb_pkts;
4793 }
4794 
4795 #endif
4796 
4819 static inline uint16_t
4820 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
4821  struct rte_eth_dev_tx_buffer *buffer)
4822 {
4823  uint16_t sent;
4824  uint16_t to_send = buffer->length;
4825 
4826  if (to_send == 0)
4827  return 0;
4828 
4829  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
4830 
4831  buffer->length = 0;
4832 
4833  /* All packets sent, or to be dealt with by callback below */
4834  if (unlikely(sent != to_send))
4835  buffer->error_callback(&buffer->pkts[sent],
4836  (uint16_t)(to_send - sent),
4837  buffer->error_userdata);
4838 
4839  return sent;
4840 }
4841 
4872 static __rte_always_inline uint16_t
4873 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
4874  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
4875 {
4876  buffer->pkts[buffer->length++] = tx_pkt;
4877  if (buffer->length < buffer->size)
4878  return 0;
4879 
4880  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
4881 }
4882 
4883 #ifdef __cplusplus
4884 }
4885 #endif
4886 
4887 #endif /* _RTE_ETHDEV_H_ */
uint16_t nb_rx_queues
Definition: rte_ethdev.h:1284
struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf
Definition: rte_ethdev.h:1087
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_promiscuous_disable(uint16_t port_id)
#define ETH_VMDQ_MAX_VLAN_FILTERS
Definition: rte_ethdev.h:631
struct rte_eth_conf::@139 rx_adv_conf
struct rte_eth_dev_portconf default_rxportconf
Definition: rte_ethdev.h:1287
struct rte_fdir_conf fdir_conf
Definition: rte_ethdev.h:1097
int rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id, struct rte_eth_l2_tunnel_conf *l2_tunnel)
uint32_t rmv
Definition: rte_ethdev.h:1054
#define __rte_always_inline
Definition: rte_common.h:193
uint16_t tx_rs_thresh
Definition: rte_ethdev.h:856
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
uint16_t nb_desc
Definition: rte_ethdev.h:1319
char info[RTE_ETH_BURST_MODE_INFO_SIZE]
Definition: rte_ethdev.h:1339
uint64_t rx_offload_capa
Definition: rte_ethdev.h:1262
uint16_t reta[RTE_RETA_GROUP_SIZE]
Definition: rte_ethdev.h:704
const uint32_t * dev_flags
Definition: rte_ethdev.h:1250
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
struct rte_eth_vmdq_dcb_conf::@137 pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
void rte_eth_dev_stop(uint16_t port_id)
__rte_experimental int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
#define __rte_cache_min_aligned
Definition: rte_common.h:370
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:4746
int rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
rte_eth_nb_tcs
Definition: rte_ethdev.h:712
struct rte_eth_vmdq_tx_conf vmdq_tx_conf
Definition: rte_ethdev.h:1091
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:264
__rte_experimental int rte_eth_dev_owner_new(uint64_t *owner_id)
__rte_experimental int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
struct rte_eth_thresh rx_thresh
Definition: rte_ethdev.h:836
uint16_t rte_eth_find_next(uint16_t port_id)
__rte_experimental int rte_eth_dev_is_removed(uint16_t port_id)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
int rte_eth_led_off(uint16_t port_id)
rte_fdir_pballoc_type
Definition: rte_ethdev.h:1001
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:4514
__rte_experimental uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
uint64_t imissed
Definition: rte_ethdev.h:249
static int rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:4467
uint32_t low_water
Definition: rte_ethdev.h:961
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
uint32_t max_rx_pkt_len
Definition: rte_ethdev.h:400
uint8_t rss_key_len
Definition: rte_ethdev.h:453
void rte_eth_dev_close(uint16_t port_id)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint16_t tx_rate)
uint8_t hthresh
Definition: rte_ethdev.h:332
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
uint64_t rx_queue_offload_capa
Definition: rte_ethdev.h:1266
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
uint16_t reta_size
Definition: rte_ethdev.h:1270
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
void * userdata
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint32_t lpbk_mode
Definition: rte_ethdev.h:1072
enum rte_fdir_status_mode status
Definition: rte_ethdev.h:1025
enum rte_eth_tx_mq_mode mq_mode
Definition: rte_ethdev.h:810
#define rte_errno
Definition: rte_errno.h:29
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
uint32_t link_speeds
Definition: rte_ethdev.h:1063
uint64_t tx_queue_offload_capa
Definition: rte_ethdev.h:1268
rte_eth_tx_mq_mode
Definition: rte_ethdev.h:380
rte_eth_fc_mode
Definition: rte_ethdev.h:947
int rte_eth_mirror_rule_set(uint16_t port_id, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on)
uint8_t enable_default_pool
Definition: rte_ethdev.h:764
uint16_t nb_mtu_seg_max
Definition: rte_ethdev.h:941
#define __rte_unused
Definition: rte_common.h:104
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:262
uint16_t max_rx_2_tx
Definition: rte_ethdev.h:882
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
uint64_t opackets
Definition: rte_ethdev.h:246
rte_filter_op
Definition: rte_eth_ctrl.h:46
struct rte_eth_rss_conf rss_conf
Definition: rte_ethdev.h:1078
uint8_t hash_key_size
Definition: rte_ethdev.h:1272
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
uint16_t split_hdr_size
Definition: rte_ethdev.h:403
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
struct rte_mempool * mp
Definition: rte_ethdev.h:1307
uint32_t dcb_capability_en
Definition: rte_ethdev.h:1096
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:260
const char * name
Definition: rte_ethdev.h:1222
struct rte_eth_switch_info switch_info
Definition: rte_ethdev.h:1296
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
uint32_t rxq
Definition: rte_ethdev.h:1052
int rte_eth_dev_set_link_up(uint16_t port_id)
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
struct rte_eth_thresh tx_thresh
Definition: rte_ethdev.h:855
struct rte_eth_desc_lim rx_desc_lim
Definition: rte_ethdev.h:1280
uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES]
Definition: rte_ethdev.h:771
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
__rte_experimental int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
__rte_experimental int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
uint16_t vmdq_queue_num
Definition: rte_ethdev.h:1278
uint8_t rx_deferred_start
Definition: rte_ethdev.h:839
struct rte_mbuf * pkts[]
Definition: rte_ethdev.h:2844
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
Definition: rte_ethdev.h:3039
struct rte_eth_rxmode rxmode
Definition: rte_ethdev.h:1070
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:256
uint32_t high_water
Definition: rte_ethdev.h:960
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:763
union rte_eth_conf::@140 tx_adv_conf
struct rte_eth_txconf conf
Definition: rte_ethdev.h:1318
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
struct rte_intr_conf intr_conf
Definition: rte_ethdev.h:1098
#define RTE_ETH_XSTATS_NAME_SIZE
Definition: rte_ethdev.h:1343
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
struct rte_eth_desc_lim tx_desc_lim
Definition: rte_ethdev.h:1281
int rte_eth_timesync_disable(uint16_t port_id)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint64_t offloads
Definition: rte_ethdev.h:845
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
uint16_t send_xon
Definition: rte_ethdev.h:963
int rte_eth_stats_reset(uint16_t port_id)
int rte_eth_allmulticast_enable(uint16_t port_id)
struct rte_eth_txconf default_txconf
Definition: rte_ethdev.h:1276
#define unlikely(x)
uint16_t nb_max
Definition: rte_ethdev.h:916
uint64_t ibytes
Definition: rte_ethdev.h:247
uint64_t offloads
Definition: rte_ethdev.h:866
uint16_t max_nb_queues
Definition: rte_ethdev.h:880
uint64_t oerrors
Definition: rte_ethdev.h:254
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
struct rte_eth_dcb_rx_conf dcb_rx_conf
Definition: rte_ethdev.h:1081
struct rte_eth_vmdq_rx_conf vmdq_rx_conf
Definition: rte_ethdev.h:1083
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
uint16_t max_mtu
Definition: rte_ethdev.h:1249
uint64_t offloads
Definition: rte_ethdev.h:409
enum rte_eth_rx_mq_mode mq_mode
Definition: rte_ethdev.h:399
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:794
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
uint16_t tx_free_thresh
Definition: rte_ethdev.h:857
uint16_t nb_desc
Definition: rte_ethdev.h:1310
__rte_experimental uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:4390
int rte_eth_allmulticast_disable(uint16_t port_id)
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
int rte_eth_dev_start(uint16_t port_id)
uint16_t max_vmdq_pools
Definition: rte_ethdev.h:1261
uint8_t scattered_rx
Definition: rte_ethdev.h:1309
int rte_eth_dev_reset(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
struct rte_eth_dcb_tx_conf dcb_tx_conf
Definition: rte_ethdev.h:1089
uint64_t offloads
Definition: rte_ethdev.h:816
uint16_t vmdq_pool_base
Definition: rte_ethdev.h:1279
uint64_t tx_offload_capa
Definition: rte_ethdev.h:1264
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:828
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:258
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
uint16_t min_mtu
Definition: rte_ethdev.h:1248
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
Definition: rte_ethdev.h:1478
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1299
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
uint64_t obytes
Definition: rte_ethdev.h:248
uint8_t enable_loop_back
Definition: rte_ethdev.h:797
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:411
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
struct rte_eth_rxconf conf
Definition: rte_ethdev.h:1308
#define ETH_DCB_NUM_USER_PRIORITIES
Definition: rte_ethdev.h:632
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
uint16_t max_tx_queues
Definition: rte_ethdev.h:1256
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_xstats_reset(uint16_t port_id)
rte_eth_dev_state
Definition: rte_ethdev.h:1484
uint16_t rx_free_thresh
Definition: rte_ethdev.h:837
struct rte_eth_vlan_mirror vlan
Definition: rte_ethdev.h:692
uint64_t dev_capa
Definition: rte_ethdev.h:1291
uint16_t max_tx_2_rx
Definition: rte_ethdev.h:884
uint64_t ierrors
Definition: rte_ethdev.h:253
uint16_t max_nb_desc
Definition: rte_ethdev.h:885
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
__extension__ uint8_t hw_vlan_insert_pvid
Definition: rte_ethdev.h:822
uint8_t priority
Definition: rte_ethdev.h:976
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
uint64_t flow_type_rss_offloads
Definition: rte_ethdev.h:1274
uint16_t rte_eth_dev_count_total(void)
int rte_eth_promiscuous_enable(uint16_t port_id)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
struct rte_eth_dev_portconf default_txportconf
Definition: rte_ethdev.h:1289
rte_vlan_type
Definition: rte_ethdev.h:419
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
uint16_t nb_seg_max
Definition: rte_ethdev.h:928
uint64_t ipackets
Definition: rte_ethdev.h:245
uint16_t max_vfs
Definition: rte_ethdev.h:1260
uint16_t pause_time
Definition: rte_ethdev.h:962
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
rte_filter_type
Definition: rte_eth_ctrl.h:28
int rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id, struct rte_eth_l2_tunnel_conf *l2_tunnel, uint32_t mask, uint8_t en)
uint64_t rx_nombuf
Definition: rte_ethdev.h:255
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
Definition: rte_ethdev.h:4873
__extension__ uint8_t hw_vlan_reject_untagged
Definition: rte_ethdev.h:822
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
#define ETH_MQ_RX_RSS_FLAG
Definition: rte_ethdev.h:339
uint16_t vmdq_queue_base
Definition: rte_ethdev.h:1277
rte_eth_event_ipsec_subtype
Definition: rte_ethdev.h:2983
rte_eth_nb_pools
Definition: rte_ethdev.h:721
#define ETH_MIRROR_MAX_VLANS
Definition: rte_ethdev.h:667
uint16_t nb_align
Definition: rte_ethdev.h:918
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
rte_eth_rx_mq_mode
Definition: rte_ethdev.h:347
__rte_experimental int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
const char * driver_name
Definition: rte_ethdev.h:1245
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:4438
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_socket_id(uint16_t port_id)
uint8_t enable_default_pool
Definition: rte_ethdev.h:795
__extension__ struct rte_eth_link __rte_aligned(8)
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
uint16_t nb_tx_queues
Definition: rte_ethdev.h:1285
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
struct rte_eth_fdir_flex_conf flex_conf
Definition: rte_ethdev.h:1029
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:868
uint32_t max_mac_addrs
Definition: rte_ethdev.h:1257
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
rte_eth_tunnel_type
Definition: rte_ethdev.h:982
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
uint64_t value
Definition: rte_ethdev.h:1356
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
Definition: rte_ethdev.h:553
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_promiscuous_get(uint16_t port_id)
int rte_eth_led_on(uint16_t port_id)
enum rte_fdir_pballoc_type pballoc
Definition: rte_ethdev.h:1024
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
__rte_experimental int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
struct rte_eth_vmdq_rx_conf::@138 pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]
uint32_t max_rx_pktlen
Definition: rte_ethdev.h:1252
__rte_experimental int rte_eth_dev_owner_delete(const uint64_t owner_id)
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
uint64_t rss_hf
Definition: rte_ethdev.h:454
void * reserved_ptrs[2]
Definition: rte_ethdev.h:869
uint64_t id
Definition: rte_ethdev.h:1355
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1298
__extension__ uint8_t hw_vlan_reject_tagged
Definition: rte_ethdev.h:822
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:847
enum rte_fdir_mode mode
Definition: rte_ethdev.h:1023
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
unsigned int if_index
Definition: rte_ethdev.h:1246
__rte_deprecated int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type, enum rte_filter_op filter_op, void *arg)
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
uint8_t mac_ctrl_frame_fwd
Definition: rte_ethdev.h:965
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
Definition: rte_ethdev.h:1454
enum rte_eth_fc_mode mode
Definition: rte_ethdev.h:964
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
rte_fdir_mode
Definition: rte_eth_ctrl.h:603
struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf
Definition: rte_ethdev.h:1079
uint8_t * rss_key
Definition: rte_ethdev.h:452
rte_fdir_status_mode
Definition: rte_ethdev.h:1010
__rte_deprecated int rte_eth_dev_filter_supported(uint16_t port_id, enum rte_filter_type filter_type)
void * reserved_ptrs[2]
Definition: rte_ethdev.h:829
uint8_t tx_deferred_start
Definition: rte_ethdev.h:860
uint8_t wthresh
Definition: rte_ethdev.h:333
void * reserved_ptrs[2]
Definition: rte_ethdev.h:848
uint16_t max_rx_queues
Definition: rte_ethdev.h:1255
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:402
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
struct rte_eth_fc_conf fc
Definition: rte_ethdev.h:975
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
struct rte_eth_txmode txmode
Definition: rte_ethdev.h:1071
int rte_eth_allmulticast_get(uint16_t port_id)
uint8_t rx_drop_en
Definition: rte_ethdev.h:838
int rte_eth_dev_is_valid_port(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:1254
uint16_t nb_min
Definition: rte_ethdev.h:917
void * reserved_ptrs[2]
Definition: rte_ethdev.h:412
int rte_eth_timesync_enable(uint16_t port_id)
uint8_t pthresh
Definition: rte_ethdev.h:331
struct rte_eth_rxconf default_rxconf
Definition: rte_ethdev.h:1275
uint32_t speed_capa
Definition: rte_ethdev.h:1282
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:4658
uint8_t drop_queue
Definition: rte_ethdev.h:1027
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_set_link_down(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
uint8_t autoneg
Definition: rte_ethdev.h:966
uint32_t min_rx_bufsize
Definition: rte_ethdev.h:1251
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
__rte_experimental int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
uint32_t lsc
Definition: rte_ethdev.h:1050
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
Definition: rte_ethdev.h:4820
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
rte_eth_event_type
Definition: rte_ethdev.h:3022