DPDK  20.08.0
rte_ethdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
142 #ifdef __cplusplus
143 extern "C" {
144 #endif
145 
146 #include <stdint.h>
147 
148 /* Use this macro to check if LRO API is supported */
149 #define RTE_ETHDEV_HAS_LRO_SUPPORT
150 
151 #include <rte_compat.h>
152 #include <rte_log.h>
153 #include <rte_interrupts.h>
154 #include <rte_dev.h>
155 #include <rte_devargs.h>
156 #include <rte_errno.h>
157 #include <rte_common.h>
158 #include <rte_config.h>
159 #include <rte_ether.h>
160 
161 #include "rte_ethdev_trace_fp.h"
162 #include "rte_dev_info.h"
163 
164 extern int rte_eth_dev_logtype;
165 
166 #define RTE_ETHDEV_LOG(level, ...) \
167  rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
168 
169 struct rte_mbuf;
170 
187 int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
188 
203 uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter);
204 
217 void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
218 
232 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
233  for (rte_eth_iterator_init(iter, devargs), \
234  id = rte_eth_iterator_next(iter); \
235  id != RTE_MAX_ETHPORTS; \
236  id = rte_eth_iterator_next(iter))
237 
245  uint64_t ipackets;
246  uint64_t opackets;
247  uint64_t ibytes;
248  uint64_t obytes;
249  uint64_t imissed;
253  uint64_t ierrors;
254  uint64_t oerrors;
255  uint64_t rx_nombuf;
256  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
258  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
260  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
262  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
264  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
266 };
267 
271 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
272 #define ETH_LINK_SPEED_FIXED (1 << 0)
273 #define ETH_LINK_SPEED_10M_HD (1 << 1)
274 #define ETH_LINK_SPEED_10M (1 << 2)
275 #define ETH_LINK_SPEED_100M_HD (1 << 3)
276 #define ETH_LINK_SPEED_100M (1 << 4)
277 #define ETH_LINK_SPEED_1G (1 << 5)
278 #define ETH_LINK_SPEED_2_5G (1 << 6)
279 #define ETH_LINK_SPEED_5G (1 << 7)
280 #define ETH_LINK_SPEED_10G (1 << 8)
281 #define ETH_LINK_SPEED_20G (1 << 9)
282 #define ETH_LINK_SPEED_25G (1 << 10)
283 #define ETH_LINK_SPEED_40G (1 << 11)
284 #define ETH_LINK_SPEED_50G (1 << 12)
285 #define ETH_LINK_SPEED_56G (1 << 13)
286 #define ETH_LINK_SPEED_100G (1 << 14)
287 #define ETH_LINK_SPEED_200G (1 << 15)
292 #define ETH_SPEED_NUM_NONE 0
293 #define ETH_SPEED_NUM_10M 10
294 #define ETH_SPEED_NUM_100M 100
295 #define ETH_SPEED_NUM_1G 1000
296 #define ETH_SPEED_NUM_2_5G 2500
297 #define ETH_SPEED_NUM_5G 5000
298 #define ETH_SPEED_NUM_10G 10000
299 #define ETH_SPEED_NUM_20G 20000
300 #define ETH_SPEED_NUM_25G 25000
301 #define ETH_SPEED_NUM_40G 40000
302 #define ETH_SPEED_NUM_50G 50000
303 #define ETH_SPEED_NUM_56G 56000
304 #define ETH_SPEED_NUM_100G 100000
305 #define ETH_SPEED_NUM_200G 200000
310 __extension__
311 struct rte_eth_link {
312  uint32_t link_speed;
313  uint16_t link_duplex : 1;
314  uint16_t link_autoneg : 1;
315  uint16_t link_status : 1;
316 } __rte_aligned(8);
318 /* Utility constants */
319 #define ETH_LINK_HALF_DUPLEX 0
320 #define ETH_LINK_FULL_DUPLEX 1
321 #define ETH_LINK_DOWN 0
322 #define ETH_LINK_UP 1
323 #define ETH_LINK_FIXED 0
324 #define ETH_LINK_AUTONEG 1
330 struct rte_eth_thresh {
331  uint8_t pthresh;
332  uint8_t hthresh;
333  uint8_t wthresh;
334 };
335 
339 #define ETH_MQ_RX_RSS_FLAG 0x1
340 #define ETH_MQ_RX_DCB_FLAG 0x2
341 #define ETH_MQ_RX_VMDQ_FLAG 0x4
342 
350 
354  ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
356  ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
357 
359  ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
361  ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
363  ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
366  ETH_MQ_RX_VMDQ_FLAG,
367 };
368 
372 #define ETH_RSS ETH_MQ_RX_RSS
373 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
374 #define ETH_DCB_RX ETH_MQ_RX_DCB
375 
385 };
386 
390 #define ETH_DCB_NONE ETH_MQ_TX_NONE
391 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
392 #define ETH_DCB_TX ETH_MQ_TX_DCB
393 
400  uint32_t max_rx_pkt_len;
403  uint16_t split_hdr_size;
409  uint64_t offloads;
410 
411  uint64_t reserved_64s[2];
412  void *reserved_ptrs[2];
413 };
414 
420  ETH_VLAN_TYPE_UNKNOWN = 0,
423  ETH_VLAN_TYPE_MAX,
424 };
425 
431  uint64_t ids[64];
432 };
433 
452  uint8_t *rss_key;
453  uint8_t rss_key_len;
454  uint64_t rss_hf;
455 };
456 
457 /*
458  * A packet can be identified by hardware as different flow types. Different
459  * NIC hardware may support different flow types.
460  * Basically, the NIC hardware identifies the flow type as deep protocol as
461  * possible, and exclusively. For example, if a packet is identified as
462  * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
463  * though it is an actual IPV4 packet.
464  */
465 #define RTE_ETH_FLOW_UNKNOWN 0
466 #define RTE_ETH_FLOW_RAW 1
467 #define RTE_ETH_FLOW_IPV4 2
468 #define RTE_ETH_FLOW_FRAG_IPV4 3
469 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
470 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
471 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
472 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
473 #define RTE_ETH_FLOW_IPV6 8
474 #define RTE_ETH_FLOW_FRAG_IPV6 9
475 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
476 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
477 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
478 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
479 #define RTE_ETH_FLOW_L2_PAYLOAD 14
480 #define RTE_ETH_FLOW_IPV6_EX 15
481 #define RTE_ETH_FLOW_IPV6_TCP_EX 16
482 #define RTE_ETH_FLOW_IPV6_UDP_EX 17
483 #define RTE_ETH_FLOW_PORT 18
484 
485 #define RTE_ETH_FLOW_VXLAN 19
486 #define RTE_ETH_FLOW_GENEVE 20
487 #define RTE_ETH_FLOW_NVGRE 21
488 #define RTE_ETH_FLOW_VXLAN_GPE 22
489 #define RTE_ETH_FLOW_GTPU 23
490 #define RTE_ETH_FLOW_MAX 24
491 
492 /*
493  * Below macros are defined for RSS offload types, they can be used to
494  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
495  */
496 #define ETH_RSS_IPV4 (1ULL << 2)
497 #define ETH_RSS_FRAG_IPV4 (1ULL << 3)
498 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << 4)
499 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << 5)
500 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << 6)
501 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << 7)
502 #define ETH_RSS_IPV6 (1ULL << 8)
503 #define ETH_RSS_FRAG_IPV6 (1ULL << 9)
504 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << 10)
505 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << 11)
506 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << 12)
507 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << 13)
508 #define ETH_RSS_L2_PAYLOAD (1ULL << 14)
509 #define ETH_RSS_IPV6_EX (1ULL << 15)
510 #define ETH_RSS_IPV6_TCP_EX (1ULL << 16)
511 #define ETH_RSS_IPV6_UDP_EX (1ULL << 17)
512 #define ETH_RSS_PORT (1ULL << 18)
513 #define ETH_RSS_VXLAN (1ULL << 19)
514 #define ETH_RSS_GENEVE (1ULL << 20)
515 #define ETH_RSS_NVGRE (1ULL << 21)
516 #define ETH_RSS_GTPU (1ULL << 23)
517 #define ETH_RSS_ETH (1ULL << 24)
518 #define ETH_RSS_S_VLAN (1ULL << 25)
519 #define ETH_RSS_C_VLAN (1ULL << 26)
520 #define ETH_RSS_ESP (1ULL << 27)
521 #define ETH_RSS_AH (1ULL << 28)
522 #define ETH_RSS_L2TPV3 (1ULL << 29)
523 #define ETH_RSS_PFCP (1ULL << 30)
524 #define ETH_RSS_PPPOE (1ULL << 31)
525 
526 /*
527  * We use the following macros to combine with above ETH_RSS_* for
528  * more specific input set selection. These bits are defined starting
529  * from the high end of the 64 bits.
530  * Note: If we use above ETH_RSS_* without SRC/DST_ONLY, it represents
531  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
532  * the same level are used simultaneously, it is the same case as none of
533  * them are added.
534  */
535 #define ETH_RSS_L3_SRC_ONLY (1ULL << 63)
536 #define ETH_RSS_L3_DST_ONLY (1ULL << 62)
537 #define ETH_RSS_L4_SRC_ONLY (1ULL << 61)
538 #define ETH_RSS_L4_DST_ONLY (1ULL << 60)
539 #define ETH_RSS_L2_SRC_ONLY (1ULL << 59)
540 #define ETH_RSS_L2_DST_ONLY (1ULL << 58)
541 
542 /*
543  * Only select IPV6 address prefix as RSS input set according to
544  * https://tools.ietf.org/html/rfc6052
545  * Must be combined with ETH_RSS_IPV6, ETH_RSS_NONFRAG_IPV6_UDP,
546  * ETH_RSS_NONFRAG_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_SCTP.
547  */
548 #define RTE_ETH_RSS_L3_PRE32 (1ULL << 57)
549 #define RTE_ETH_RSS_L3_PRE40 (1ULL << 56)
550 #define RTE_ETH_RSS_L3_PRE48 (1ULL << 55)
551 #define RTE_ETH_RSS_L3_PRE56 (1ULL << 54)
552 #define RTE_ETH_RSS_L3_PRE64 (1ULL << 53)
553 #define RTE_ETH_RSS_L3_PRE96 (1ULL << 52)
554 
565 static inline uint64_t
566 rte_eth_rss_hf_refine(uint64_t rss_hf)
567 {
568  if ((rss_hf & ETH_RSS_L3_SRC_ONLY) && (rss_hf & ETH_RSS_L3_DST_ONLY))
569  rss_hf &= ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
570 
571  if ((rss_hf & ETH_RSS_L4_SRC_ONLY) && (rss_hf & ETH_RSS_L4_DST_ONLY))
572  rss_hf &= ~(ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
573 
574  return rss_hf;
575 }
576 
577 #define ETH_RSS_IPV6_PRE32 ( \
578  ETH_RSS_IPV6 | \
579  RTE_ETH_RSS_L3_PRE32)
580 
581 #define ETH_RSS_IPV6_PRE40 ( \
582  ETH_RSS_IPV6 | \
583  RTE_ETH_RSS_L3_PRE40)
584 
585 #define ETH_RSS_IPV6_PRE48 ( \
586  ETH_RSS_IPV6 | \
587  RTE_ETH_RSS_L3_PRE48)
588 
589 #define ETH_RSS_IPV6_PRE56 ( \
590  ETH_RSS_IPV6 | \
591  RTE_ETH_RSS_L3_PRE56)
592 
593 #define ETH_RSS_IPV6_PRE64 ( \
594  ETH_RSS_IPV6 | \
595  RTE_ETH_RSS_L3_PRE64)
596 
597 #define ETH_RSS_IPV6_PRE96 ( \
598  ETH_RSS_IPV6 | \
599  RTE_ETH_RSS_L3_PRE96)
600 
601 #define ETH_RSS_IPV6_PRE32_UDP ( \
602  ETH_RSS_NONFRAG_IPV6_UDP | \
603  RTE_ETH_RSS_L3_PRE32)
604 
605 #define ETH_RSS_IPV6_PRE40_UDP ( \
606  ETH_RSS_NONFRAG_IPV6_UDP | \
607  RTE_ETH_RSS_L3_PRE40)
608 
609 #define ETH_RSS_IPV6_PRE48_UDP ( \
610  ETH_RSS_NONFRAG_IPV6_UDP | \
611  RTE_ETH_RSS_L3_PRE48)
612 
613 #define ETH_RSS_IPV6_PRE56_UDP ( \
614  ETH_RSS_NONFRAG_IPV6_UDP | \
615  RTE_ETH_RSS_L3_PRE56)
616 
617 #define ETH_RSS_IPV6_PRE64_UDP ( \
618  ETH_RSS_NONFRAG_IPV6_UDP | \
619  RTE_ETH_RSS_L3_PRE64)
620 
621 #define ETH_RSS_IPV6_PRE96_UDP ( \
622  ETH_RSS_NONFRAG_IPV6_UDP | \
623  RTE_ETH_RSS_L3_PRE96)
624 
625 #define ETH_RSS_IPV6_PRE32_TCP ( \
626  ETH_RSS_NONFRAG_IPV6_TCP | \
627  RTE_ETH_RSS_L3_PRE32)
628 
629 #define ETH_RSS_IPV6_PRE40_TCP ( \
630  ETH_RSS_NONFRAG_IPV6_TCP | \
631  RTE_ETH_RSS_L3_PRE40)
632 
633 #define ETH_RSS_IPV6_PRE48_TCP ( \
634  ETH_RSS_NONFRAG_IPV6_TCP | \
635  RTE_ETH_RSS_L3_PRE48)
636 
637 #define ETH_RSS_IPV6_PRE56_TCP ( \
638  ETH_RSS_NONFRAG_IPV6_TCP | \
639  RTE_ETH_RSS_L3_PRE56)
640 
641 #define ETH_RSS_IPV6_PRE64_TCP ( \
642  ETH_RSS_NONFRAG_IPV6_TCP | \
643  RTE_ETH_RSS_L3_PRE64)
644 
645 #define ETH_RSS_IPV6_PRE96_TCP ( \
646  ETH_RSS_NONFRAG_IPV6_TCP | \
647  RTE_ETH_RSS_L3_PRE96)
648 
649 #define ETH_RSS_IPV6_PRE32_SCTP ( \
650  ETH_RSS_NONFRAG_IPV6_SCTP | \
651  RTE_ETH_RSS_L3_PRE32)
652 
653 #define ETH_RSS_IPV6_PRE40_SCTP ( \
654  ETH_RSS_NONFRAG_IPV6_SCTP | \
655  RTE_ETH_RSS_L3_PRE40)
656 
657 #define ETH_RSS_IPV6_PRE48_SCTP ( \
658  ETH_RSS_NONFRAG_IPV6_SCTP | \
659  RTE_ETH_RSS_L3_PRE48)
660 
661 #define ETH_RSS_IPV6_PRE56_SCTP ( \
662  ETH_RSS_NONFRAG_IPV6_SCTP | \
663  RTE_ETH_RSS_L3_PRE56)
664 
665 #define ETH_RSS_IPV6_PRE64_SCTP ( \
666  ETH_RSS_NONFRAG_IPV6_SCTP | \
667  RTE_ETH_RSS_L3_PRE64)
668 
669 #define ETH_RSS_IPV6_PRE96_SCTP ( \
670  ETH_RSS_NONFRAG_IPV6_SCTP | \
671  RTE_ETH_RSS_L3_PRE96)
672 
673 #define ETH_RSS_IP ( \
674  ETH_RSS_IPV4 | \
675  ETH_RSS_FRAG_IPV4 | \
676  ETH_RSS_NONFRAG_IPV4_OTHER | \
677  ETH_RSS_IPV6 | \
678  ETH_RSS_FRAG_IPV6 | \
679  ETH_RSS_NONFRAG_IPV6_OTHER | \
680  ETH_RSS_IPV6_EX)
681 
682 #define ETH_RSS_UDP ( \
683  ETH_RSS_NONFRAG_IPV4_UDP | \
684  ETH_RSS_NONFRAG_IPV6_UDP | \
685  ETH_RSS_IPV6_UDP_EX)
686 
687 #define ETH_RSS_TCP ( \
688  ETH_RSS_NONFRAG_IPV4_TCP | \
689  ETH_RSS_NONFRAG_IPV6_TCP | \
690  ETH_RSS_IPV6_TCP_EX)
691 
692 #define ETH_RSS_SCTP ( \
693  ETH_RSS_NONFRAG_IPV4_SCTP | \
694  ETH_RSS_NONFRAG_IPV6_SCTP)
695 
696 #define ETH_RSS_TUNNEL ( \
697  ETH_RSS_VXLAN | \
698  ETH_RSS_GENEVE | \
699  ETH_RSS_NVGRE)
700 
701 #define ETH_RSS_VLAN ( \
702  ETH_RSS_S_VLAN | \
703  ETH_RSS_C_VLAN)
704 
706 #define ETH_RSS_PROTO_MASK ( \
707  ETH_RSS_IPV4 | \
708  ETH_RSS_FRAG_IPV4 | \
709  ETH_RSS_NONFRAG_IPV4_TCP | \
710  ETH_RSS_NONFRAG_IPV4_UDP | \
711  ETH_RSS_NONFRAG_IPV4_SCTP | \
712  ETH_RSS_NONFRAG_IPV4_OTHER | \
713  ETH_RSS_IPV6 | \
714  ETH_RSS_FRAG_IPV6 | \
715  ETH_RSS_NONFRAG_IPV6_TCP | \
716  ETH_RSS_NONFRAG_IPV6_UDP | \
717  ETH_RSS_NONFRAG_IPV6_SCTP | \
718  ETH_RSS_NONFRAG_IPV6_OTHER | \
719  ETH_RSS_L2_PAYLOAD | \
720  ETH_RSS_IPV6_EX | \
721  ETH_RSS_IPV6_TCP_EX | \
722  ETH_RSS_IPV6_UDP_EX | \
723  ETH_RSS_PORT | \
724  ETH_RSS_VXLAN | \
725  ETH_RSS_GENEVE | \
726  ETH_RSS_NVGRE)
727 
728 /*
729  * Definitions used for redirection table entry size.
730  * Some RSS RETA sizes may not be supported by some drivers, check the
731  * documentation or the description of relevant functions for more details.
732  */
733 #define ETH_RSS_RETA_SIZE_64 64
734 #define ETH_RSS_RETA_SIZE_128 128
735 #define ETH_RSS_RETA_SIZE_256 256
736 #define ETH_RSS_RETA_SIZE_512 512
737 #define RTE_RETA_GROUP_SIZE 64
738 
739 /* Definitions used for VMDQ and DCB functionality */
740 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
741 #define ETH_DCB_NUM_USER_PRIORITIES 8
742 #define ETH_VMDQ_DCB_NUM_QUEUES 128
743 #define ETH_DCB_NUM_QUEUES 128
745 /* DCB capability defines */
746 #define ETH_DCB_PG_SUPPORT 0x00000001
747 #define ETH_DCB_PFC_SUPPORT 0x00000002
749 /* Definitions used for VLAN Offload functionality */
750 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
751 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
752 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
753 #define ETH_QINQ_STRIP_OFFLOAD 0x0008
755 /* Definitions used for mask VLAN setting */
756 #define ETH_VLAN_STRIP_MASK 0x0001
757 #define ETH_VLAN_FILTER_MASK 0x0002
758 #define ETH_VLAN_EXTEND_MASK 0x0004
759 #define ETH_QINQ_STRIP_MASK 0x0008
760 #define ETH_VLAN_ID_MAX 0x0FFF
762 /* Definitions used for receive MAC address */
763 #define ETH_NUM_RECEIVE_MAC_ADDR 128
765 /* Definitions used for unicast hash */
766 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
768 /* Definitions used for VMDQ pool rx mode setting */
769 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
770 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
771 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
772 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
773 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
776 #define ETH_MIRROR_MAX_VLANS 64
777 
778 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
779 #define ETH_MIRROR_UPLINK_PORT 0x02
780 #define ETH_MIRROR_DOWNLINK_PORT 0x04
781 #define ETH_MIRROR_VLAN 0x08
782 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
787 struct rte_eth_vlan_mirror {
788  uint64_t vlan_mask;
790  uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
791 };
792 
797  uint8_t rule_type;
798  uint8_t dst_pool;
799  uint64_t pool_mask;
802 };
803 
811  uint64_t mask;
813  uint16_t reta[RTE_RETA_GROUP_SIZE];
815 };
816 
822  ETH_4_TCS = 4,
824 };
825 
835 };
836 
837 /* This structure may be extended in future. */
838 struct rte_eth_dcb_rx_conf {
839  enum rte_eth_nb_tcs nb_tcs;
841  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
842 };
843 
844 struct rte_eth_vmdq_dcb_tx_conf {
845  enum rte_eth_nb_pools nb_queue_pools;
847  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
848 };
849 
850 struct rte_eth_dcb_tx_conf {
851  enum rte_eth_nb_tcs nb_tcs;
853  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
854 };
855 
856 struct rte_eth_vmdq_tx_conf {
857  enum rte_eth_nb_pools nb_queue_pools;
858 };
859 
874  uint8_t default_pool;
875  uint8_t nb_pool_maps;
876  struct {
877  uint16_t vlan_id;
878  uint64_t pools;
882 };
883 
905  uint8_t default_pool;
907  uint8_t nb_pool_maps;
908  uint32_t rx_mode;
909  struct {
910  uint16_t vlan_id;
911  uint64_t pools;
913 };
914 
925  uint64_t offloads;
926 
927  uint16_t pvid;
928  __extension__
929  uint8_t hw_vlan_reject_tagged : 1,
936  uint64_t reserved_64s[2];
937  void *reserved_ptrs[2];
938 };
939 
945  uint16_t rx_free_thresh;
946  uint8_t rx_drop_en;
953  uint64_t offloads;
954 
955  uint64_t reserved_64s[2];
956  void *reserved_ptrs[2];
957 };
958 
964  uint16_t tx_rs_thresh;
965  uint16_t tx_free_thresh;
974  uint64_t offloads;
975 
976  uint64_t reserved_64s[2];
977  void *reserved_ptrs[2];
978 };
979 
988  uint16_t max_nb_queues;
990  uint16_t max_rx_2_tx;
992  uint16_t max_tx_2_rx;
993  uint16_t max_nb_desc;
994 };
995 
996 #define RTE_ETH_MAX_HAIRPIN_PEERS 32
997 
1005  uint16_t port;
1006  uint16_t queue;
1007 };
1008 
1016  uint16_t peer_count;
1017  struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
1018 };
1019 
1024  uint16_t nb_max;
1025  uint16_t nb_min;
1026  uint16_t nb_align;
1036  uint16_t nb_seg_max;
1037 
1049  uint16_t nb_mtu_seg_max;
1050 };
1051 
1060 };
1061 
1068  uint32_t high_water;
1069  uint32_t low_water;
1070  uint16_t pause_time;
1071  uint16_t send_xon;
1074  uint8_t autoneg;
1075 };
1076 
1084  uint8_t priority;
1085 };
1086 
1091  RTE_TUNNEL_TYPE_NONE = 0,
1092  RTE_TUNNEL_TYPE_VXLAN,
1093  RTE_TUNNEL_TYPE_GENEVE,
1094  RTE_TUNNEL_TYPE_TEREDO,
1095  RTE_TUNNEL_TYPE_NVGRE,
1096  RTE_TUNNEL_TYPE_IP_IN_GRE,
1097  RTE_L2_TUNNEL_TYPE_E_TAG,
1098  RTE_TUNNEL_TYPE_VXLAN_GPE,
1099  RTE_TUNNEL_TYPE_MAX,
1100 };
1101 
1102 /* Deprecated API file for rte_eth_dev_filter_* functions */
1103 #include "rte_eth_ctrl.h"
1104 
1113 };
1114 
1122 };
1123 
1135  uint8_t drop_queue;
1136  struct rte_eth_fdir_masks mask;
1139 };
1140 
1149  uint16_t udp_port;
1150  uint8_t prot_type;
1151 };
1152 
1158  uint32_t lsc:1;
1160  uint32_t rxq:1;
1162  uint32_t rmv:1;
1163 };
1164 
1171  uint32_t link_speeds;
1180  uint32_t lpbk_mode;
1185  struct {
1189  struct rte_eth_dcb_rx_conf dcb_rx_conf;
1193  } rx_adv_conf;
1194  union {
1195  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1197  struct rte_eth_dcb_tx_conf dcb_tx_conf;
1199  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1201  } tx_adv_conf;
1207 };
1208 
1212 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
1213 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
1214 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
1215 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
1216 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
1217 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
1218 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
1219 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
1220 #define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100
1221 #define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200
1222 #define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
1223 #define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
1224 #define DEV_RX_OFFLOAD_SCATTER 0x00002000
1225 #define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000
1226 #define DEV_RX_OFFLOAD_SECURITY 0x00008000
1227 #define DEV_RX_OFFLOAD_KEEP_CRC 0x00010000
1228 #define DEV_RX_OFFLOAD_SCTP_CKSUM 0x00020000
1229 #define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM 0x00040000
1230 #define DEV_RX_OFFLOAD_RSS_HASH 0x00080000
1231 
1232 #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
1233  DEV_RX_OFFLOAD_UDP_CKSUM | \
1234  DEV_RX_OFFLOAD_TCP_CKSUM)
1235 #define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
1236  DEV_RX_OFFLOAD_VLAN_FILTER | \
1237  DEV_RX_OFFLOAD_VLAN_EXTEND | \
1238  DEV_RX_OFFLOAD_QINQ_STRIP)
1239 
1240 /*
1241  * If new Rx offload capabilities are defined, they also must be
1242  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1243  */
1244 
1248 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
1249 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
1250 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
1251 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
1252 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
1253 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
1254 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
1255 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
1256 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
1257 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200
1258 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400
1259 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800
1260 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000
1261 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
1262 #define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000
1263 
1266 #define DEV_TX_OFFLOAD_MULTI_SEGS 0x00008000
1267 
1268 #define DEV_TX_OFFLOAD_MBUF_FAST_FREE 0x00010000
1269 
1273 #define DEV_TX_OFFLOAD_SECURITY 0x00020000
1274 
1279 #define DEV_TX_OFFLOAD_UDP_TNL_TSO 0x00040000
1280 
1285 #define DEV_TX_OFFLOAD_IP_TNL_TSO 0x00080000
1286 
1287 #define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM 0x00100000
1288 
1290 #define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000
1291 
1292 
1293 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001
1294 
1295 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002
1296 
1298 /*
1299  * If new Tx offload capabilities are defined, they also must be
1300  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1301  */
1302 
1303 /*
1304  * Fallback default preferred Rx/Tx port parameters.
1305  * These are used if an application requests default parameters
1306  * but the PMD does not provide preferred values.
1307  */
1308 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1309 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1310 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1311 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1312 
1319  uint16_t burst_size;
1320  uint16_t ring_size;
1321  uint16_t nb_queues;
1322 };
1323 
1328 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX)
1329 
1334  const char *name;
1335  uint16_t domain_id;
1336  uint16_t port_id;
1344 };
1345 
1356  struct rte_device *device;
1357  const char *driver_name;
1358  unsigned int if_index;
1360  uint16_t min_mtu;
1361  uint16_t max_mtu;
1362  const uint32_t *dev_flags;
1363  uint32_t min_rx_bufsize;
1364  uint32_t max_rx_pktlen;
1367  uint16_t max_rx_queues;
1368  uint16_t max_tx_queues;
1369  uint32_t max_mac_addrs;
1370  uint32_t max_hash_mac_addrs;
1372  uint16_t max_vfs;
1373  uint16_t max_vmdq_pools;
1382  uint16_t reta_size;
1384  uint8_t hash_key_size;
1389  uint16_t vmdq_queue_base;
1390  uint16_t vmdq_queue_num;
1391  uint16_t vmdq_pool_base;
1394  uint32_t speed_capa;
1396  uint16_t nb_rx_queues;
1397  uint16_t nb_tx_queues;
1403  uint64_t dev_capa;
1409 
1410  uint64_t reserved_64s[2];
1411  void *reserved_ptrs[2];
1412 };
1413 
1419  struct rte_mempool *mp;
1421  uint8_t scattered_rx;
1422  uint16_t nb_desc;
1424 
1431  uint16_t nb_desc;
1433 
1434 /* Generic Burst mode flag definition, values can be ORed. */
1435 
1441 #define RTE_ETH_BURST_FLAG_PER_QUEUE (1ULL << 0)
1442 
1448  uint64_t flags;
1450 #define RTE_ETH_BURST_MODE_INFO_SIZE 1024
1451  char info[RTE_ETH_BURST_MODE_INFO_SIZE];
1452 };
1453 
1455 #define RTE_ETH_XSTATS_NAME_SIZE 64
1456 
1467  uint64_t id;
1468  uint64_t value;
1469 };
1470 
1480 };
1481 
1482 #define ETH_DCB_NUM_TCS 8
1483 #define ETH_MAX_VMDQ_POOL 64
1484 
1491  struct {
1492  uint8_t base;
1493  uint8_t nb_queue;
1494  } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1496  struct {
1497  uint8_t base;
1498  uint8_t nb_queue;
1499  } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1500 };
1501 
1507  uint8_t nb_tcs;
1508  uint8_t prio_tc[ETH_DCB_NUM_USER_PRIORITIES];
1509  uint8_t tc_bws[ETH_DCB_NUM_TCS];
1512 };
1513 
1514 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
1515 
1516 /* Macros to check for valid port */
1517 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1518  if (!rte_eth_dev_is_valid_port(port_id)) { \
1519  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1520  return retval; \
1521  } \
1522 } while (0)
1523 
1524 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1525  if (!rte_eth_dev_is_valid_port(port_id)) { \
1526  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1527  return; \
1528  } \
1529 } while (0)
1530 
1536 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1537 
1538 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1539 
1540 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1541 
1542 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1543 
1566 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
1567  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1568  void *user_param);
1569 
1590 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
1591  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1592 
1603 };
1604 
1605 struct rte_eth_dev_sriov {
1606  uint8_t active;
1607  uint8_t nb_q_per_pool;
1608  uint16_t def_vmdq_idx;
1609  uint16_t def_pool_q_idx;
1610 };
1611 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1612 
1613 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
1614 
1615 #define RTE_ETH_DEV_NO_OWNER 0
1616 
1617 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
1618 
1619 struct rte_eth_dev_owner {
1620  uint64_t id;
1621  char name[RTE_ETH_MAX_OWNER_NAME_LEN];
1622 };
1623 
1628 #define RTE_ETH_DEV_CLOSE_REMOVE 0x0001
1629 
1630 #define RTE_ETH_DEV_INTR_LSC 0x0002
1631 
1632 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1633 
1634 #define RTE_ETH_DEV_INTR_RMV 0x0008
1635 
1636 #define RTE_ETH_DEV_REPRESENTOR 0x0010
1637 
1638 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR 0x0020
1639 
1651 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
1652  const uint64_t owner_id);
1653 
1657 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
1658  for (p = rte_eth_find_next_owned_by(0, o); \
1659  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
1660  p = rte_eth_find_next_owned_by(p + 1, o))
1661 
1670 uint16_t rte_eth_find_next(uint16_t port_id);
1671 
1675 #define RTE_ETH_FOREACH_DEV(p) \
1676  RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
1677 
1692 __rte_experimental
1693 uint16_t
1694 rte_eth_find_next_of(uint16_t port_id_start,
1695  const struct rte_device *parent);
1696 
1705 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
1706  for (port_id = rte_eth_find_next_of(0, parent); \
1707  port_id < RTE_MAX_ETHPORTS; \
1708  port_id = rte_eth_find_next_of(port_id + 1, parent))
1709 
1724 __rte_experimental
1725 uint16_t
1726 rte_eth_find_next_sibling(uint16_t port_id_start,
1727  uint16_t ref_port_id);
1728 
1739 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
1740  for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
1741  port_id < RTE_MAX_ETHPORTS; \
1742  port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
1743 
1757 __rte_experimental
1758 int rte_eth_dev_owner_new(uint64_t *owner_id);
1759 
1773 __rte_experimental
1774 int rte_eth_dev_owner_set(const uint16_t port_id,
1775  const struct rte_eth_dev_owner *owner);
1776 
1790 __rte_experimental
1791 int rte_eth_dev_owner_unset(const uint16_t port_id,
1792  const uint64_t owner_id);
1793 
1805 __rte_experimental
1806 int rte_eth_dev_owner_delete(const uint64_t owner_id);
1807 
1821 __rte_experimental
1822 int rte_eth_dev_owner_get(const uint16_t port_id,
1823  struct rte_eth_dev_owner *owner);
1824 
1835 uint16_t rte_eth_dev_count_avail(void);
1836 
1845 uint16_t rte_eth_dev_count_total(void);
1846 
1858 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
1859 
1868 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
1869 
1878 const char *rte_eth_dev_tx_offload_name(uint64_t offload);
1879 
1919 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
1920  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
1921 
1933 __rte_experimental
1934 int
1935 rte_eth_dev_is_removed(uint16_t port_id);
1936 
1986 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1987  uint16_t nb_rx_desc, unsigned int socket_id,
1988  const struct rte_eth_rxconf *rx_conf,
1989  struct rte_mempool *mb_pool);
1990 
2017 __rte_experimental
2019  (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2020  const struct rte_eth_hairpin_conf *conf);
2021 
2070 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2071  uint16_t nb_tx_desc, unsigned int socket_id,
2072  const struct rte_eth_txconf *tx_conf);
2073 
2098 __rte_experimental
2100  (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2101  const struct rte_eth_hairpin_conf *conf);
2102 
2113 int rte_eth_dev_socket_id(uint16_t port_id);
2114 
2124 int rte_eth_dev_is_valid_port(uint16_t port_id);
2125 
2142 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2143 
2159 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2160 
2177 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2178 
2194 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2195 
2215 int rte_eth_dev_start(uint16_t port_id);
2216 
2224 void rte_eth_dev_stop(uint16_t port_id);
2225 
2238 int rte_eth_dev_set_link_up(uint16_t port_id);
2239 
2249 int rte_eth_dev_set_link_down(uint16_t port_id);
2250 
2259 void rte_eth_dev_close(uint16_t port_id);
2260 
2298 int rte_eth_dev_reset(uint16_t port_id);
2299 
2311 int rte_eth_promiscuous_enable(uint16_t port_id);
2312 
2324 int rte_eth_promiscuous_disable(uint16_t port_id);
2325 
2336 int rte_eth_promiscuous_get(uint16_t port_id);
2337 
2349 int rte_eth_allmulticast_enable(uint16_t port_id);
2350 
2362 int rte_eth_allmulticast_disable(uint16_t port_id);
2363 
2374 int rte_eth_allmulticast_get(uint16_t port_id);
2375 
2391 int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
2392 
2408 int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
2409 
2427 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
2428 
2440 int rte_eth_stats_reset(uint16_t port_id);
2441 
2471 int rte_eth_xstats_get_names(uint16_t port_id,
2472  struct rte_eth_xstat_name *xstats_names,
2473  unsigned int size);
2474 
2504 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2505  unsigned int n);
2506 
2529 int
2530 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2531  struct rte_eth_xstat_name *xstats_names, unsigned int size,
2532  uint64_t *ids);
2533 
2557 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2558  uint64_t *values, unsigned int size);
2559 
2578 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2579  uint64_t *id);
2580 
2593 int rte_eth_xstats_reset(uint16_t port_id);
2594 
2612 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
2613  uint16_t tx_queue_id, uint8_t stat_idx);
2614 
2632 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
2633  uint16_t rx_queue_id,
2634  uint8_t stat_idx);
2635 
2648 int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
2649 
2692 int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
2693 
2713 int rte_eth_dev_fw_version_get(uint16_t port_id,
2714  char *fw_version, size_t fw_size);
2715 
2754 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2755  uint32_t *ptypes, int num);
2789 __rte_experimental
2790 int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
2791  uint32_t *set_ptypes, unsigned int num);
2792 
2804 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
2805 
2823 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
2824 
2844 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
2845 
2864 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2865  int on);
2866 
2883 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2884  enum rte_vlan_type vlan_type,
2885  uint16_t tag_type);
2886 
2904 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
2905 
2919 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
2920 
2935 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
2936 
2937 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
2938  void *userdata);
2939 
2945  buffer_tx_error_fn error_callback;
2946  void *error_userdata;
2947  uint16_t size;
2948  uint16_t length;
2949  struct rte_mbuf *pkts[];
2951 };
2952 
2959 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
2960  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
2961 
2972 int
2973 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
2974 
2999 int
3001  buffer_tx_error_fn callback, void *userdata);
3002 
3025 void
3026 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3027  void *userdata);
3028 
3052 void
3053 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3054  void *userdata);
3055 
3081 int
3082 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
3083 
3099 };
3100 
3108  uint64_t metadata;
3122 };
3123 
3142 };
3143 
3144 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
3145  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
3165 int rte_eth_dev_callback_register(uint16_t port_id,
3166  enum rte_eth_event_type event,
3167  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3168 
3187 int rte_eth_dev_callback_unregister(uint16_t port_id,
3188  enum rte_eth_event_type event,
3189  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3190 
3212 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
3213 
3234 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
3235 
3253 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
3254 
3276 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3277  int epfd, int op, void *data);
3278 
3296 __rte_experimental
3297 int
3298 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
3299 
3313 int rte_eth_led_on(uint16_t port_id);
3314 
3328 int rte_eth_led_off(uint16_t port_id);
3329 
3343 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
3344  struct rte_eth_fc_conf *fc_conf);
3345 
3360 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
3361  struct rte_eth_fc_conf *fc_conf);
3362 
3378 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3379  struct rte_eth_pfc_conf *pfc_conf);
3380 
3399 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
3400  uint32_t pool);
3401 
3415 int rte_eth_dev_mac_addr_remove(uint16_t port_id,
3416  struct rte_ether_addr *mac_addr);
3417 
3431 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
3432  struct rte_ether_addr *mac_addr);
3433 
3450 int rte_eth_dev_rss_reta_update(uint16_t port_id,
3451  struct rte_eth_rss_reta_entry64 *reta_conf,
3452  uint16_t reta_size);
3453 
3471 int rte_eth_dev_rss_reta_query(uint16_t port_id,
3472  struct rte_eth_rss_reta_entry64 *reta_conf,
3473  uint16_t reta_size);
3474 
3494 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
3495  uint8_t on);
3496 
3515 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
3516 
3539 int rte_eth_mirror_rule_set(uint16_t port_id,
3540  struct rte_eth_mirror_conf *mirror_conf,
3541  uint8_t rule_id,
3542  uint8_t on);
3543 
3558 int rte_eth_mirror_rule_reset(uint16_t port_id,
3559  uint8_t rule_id);
3560 
3577 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3578  uint16_t tx_rate);
3579 
3594 int rte_eth_dev_rss_hash_update(uint16_t port_id,
3595  struct rte_eth_rss_conf *rss_conf);
3596 
3611 int
3612 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3613  struct rte_eth_rss_conf *rss_conf);
3614 
3633 int
3634 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3635  struct rte_eth_udp_tunnel *tunnel_udp);
3636 
3656 int
3657 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3658  struct rte_eth_udp_tunnel *tunnel_udp);
3659 
3674 __rte_deprecated
3675 int rte_eth_dev_filter_supported(uint16_t port_id,
3676  enum rte_filter_type filter_type);
3677 
3697 __rte_deprecated
3698 int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3699  enum rte_filter_op filter_op, void *arg);
3700 
3714 int rte_eth_dev_get_dcb_info(uint16_t port_id,
3715  struct rte_eth_dcb_info *dcb_info);
3716 
3717 struct rte_eth_rxtx_callback;
3718 
3743 const struct rte_eth_rxtx_callback *
3744 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3745  rte_rx_callback_fn fn, void *user_param);
3746 
3772 const struct rte_eth_rxtx_callback *
3773 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3774  rte_rx_callback_fn fn, void *user_param);
3775 
3800 const struct rte_eth_rxtx_callback *
3801 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3802  rte_tx_callback_fn fn, void *user_param);
3803 
3834 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3835  const struct rte_eth_rxtx_callback *user_cb);
3836 
3867 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3868  const struct rte_eth_rxtx_callback *user_cb);
3869 
3888 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3889  struct rte_eth_rxq_info *qinfo);
3890 
3909 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3910  struct rte_eth_txq_info *qinfo);
3911 
3929 __rte_experimental
3930 int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
3931  struct rte_eth_burst_mode *mode);
3932 
3950 __rte_experimental
3951 int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
3952  struct rte_eth_burst_mode *mode);
3953 
3971 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
3972 
3985 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
3986 
4002 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
4003 
4019 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
4020 
4038 __rte_experimental
4039 int
4040 rte_eth_dev_get_module_info(uint16_t port_id,
4041  struct rte_eth_dev_module_info *modinfo);
4042 
4061 __rte_experimental
4062 int
4063 rte_eth_dev_get_module_eeprom(uint16_t port_id,
4064  struct rte_dev_eeprom_info *info);
4065 
4084 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
4085  struct rte_ether_addr *mc_addr_set,
4086  uint32_t nb_mc_addr);
4087 
4100 int rte_eth_timesync_enable(uint16_t port_id);
4101 
4114 int rte_eth_timesync_disable(uint16_t port_id);
4115 
4134 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
4135  struct timespec *timestamp, uint32_t flags);
4136 
4152 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4153  struct timespec *timestamp);
4154 
4172 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
4173 
4188 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
4189 
4208 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
4209 
4254 __rte_experimental
4255 int
4256 rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
4257 
4273 int
4274 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4275  struct rte_eth_l2_tunnel_conf *l2_tunnel);
4276 
4301 int
4302 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4303  struct rte_eth_l2_tunnel_conf *l2_tunnel,
4304  uint32_t mask,
4305  uint8_t en);
4306 
4322 int
4323 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
4324 
4339 int
4340 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
4341 
4358 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4359  uint16_t *nb_rx_desc,
4360  uint16_t *nb_tx_desc);
4361 
4376 int
4377 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
4378 
4388 void *
4389 rte_eth_dev_get_sec_ctx(uint16_t port_id);
4390 
4405 __rte_experimental
4406 int rte_eth_dev_hairpin_capability_get(uint16_t port_id,
4407  struct rte_eth_hairpin_cap *cap);
4408 
4409 #include <rte_ethdev_core.h>
4410 
4493 static inline uint16_t
4494 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
4495  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
4496 {
4497  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4498  uint16_t nb_rx;
4499 
4500 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4501  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4502  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
4503 
4504  if (queue_id >= dev->data->nb_rx_queues) {
4505  RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4506  return 0;
4507  }
4508 #endif
4509  nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
4510  rx_pkts, nb_pkts);
4511 
4512 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
4513  if (unlikely(dev->post_rx_burst_cbs[queue_id] != NULL)) {
4514  struct rte_eth_rxtx_callback *cb =
4515  dev->post_rx_burst_cbs[queue_id];
4516 
4517  do {
4518  nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
4519  nb_pkts, cb->param);
4520  cb = cb->next;
4521  } while (cb != NULL);
4522  }
4523 #endif
4524 
4525  rte_ethdev_trace_rx_burst(port_id, queue_id, (void **)rx_pkts, nb_rx);
4526  return nb_rx;
4527 }
4528 
4541 static inline int
4542 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
4543 {
4544  struct rte_eth_dev *dev;
4545 
4546  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4547  dev = &rte_eth_devices[port_id];
4548  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
4549  if (queue_id >= dev->data->nb_rx_queues)
4550  return -EINVAL;
4551 
4552  return (int)(*dev->dev_ops->rx_queue_count)(dev, queue_id);
4553 }
4554 
4570 static inline int
4571 rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
4572 {
4573  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4574  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4575  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
4576  return (*dev->dev_ops->rx_descriptor_done)( \
4577  dev->data->rx_queues[queue_id], offset);
4578 }
4579 
4580 #define RTE_ETH_RX_DESC_AVAIL 0
4581 #define RTE_ETH_RX_DESC_DONE 1
4582 #define RTE_ETH_RX_DESC_UNAVAIL 2
4617 static inline int
4618 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
4619  uint16_t offset)
4620 {
4621  struct rte_eth_dev *dev;
4622  void *rxq;
4623 
4624 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4625  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4626 #endif
4627  dev = &rte_eth_devices[port_id];
4628 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4629  if (queue_id >= dev->data->nb_rx_queues)
4630  return -ENODEV;
4631 #endif
4632  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
4633  rxq = dev->data->rx_queues[queue_id];
4634 
4635  return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
4636 }
4637 
4638 #define RTE_ETH_TX_DESC_FULL 0
4639 #define RTE_ETH_TX_DESC_DONE 1
4640 #define RTE_ETH_TX_DESC_UNAVAIL 2
4675 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
4676  uint16_t queue_id, uint16_t offset)
4677 {
4678  struct rte_eth_dev *dev;
4679  void *txq;
4680 
4681 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4682  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4683 #endif
4684  dev = &rte_eth_devices[port_id];
4685 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4686  if (queue_id >= dev->data->nb_tx_queues)
4687  return -ENODEV;
4688 #endif
4689  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
4690  txq = dev->data->tx_queues[queue_id];
4691 
4692  return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
4693 }
4694 
4761 static inline uint16_t
4762 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
4763  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4764 {
4765  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4766 
4767 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4768  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4769  RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
4770 
4771  if (queue_id >= dev->data->nb_tx_queues) {
4772  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4773  return 0;
4774  }
4775 #endif
4776 
4777 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
4778  struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
4779 
4780  if (unlikely(cb != NULL)) {
4781  do {
4782  nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
4783  cb->param);
4784  cb = cb->next;
4785  } while (cb != NULL);
4786  }
4787 #endif
4788 
4789  rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts,
4790  nb_pkts);
4791  return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
4792 }
4793 
4847 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
4848 
4849 static inline uint16_t
4850 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
4851  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4852 {
4853  struct rte_eth_dev *dev;
4854 
4855 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4856  if (!rte_eth_dev_is_valid_port(port_id)) {
4857  RTE_ETHDEV_LOG(ERR, "Invalid TX port_id=%u\n", port_id);
4858  rte_errno = EINVAL;
4859  return 0;
4860  }
4861 #endif
4862 
4863  dev = &rte_eth_devices[port_id];
4864 
4865 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4866  if (queue_id >= dev->data->nb_tx_queues) {
4867  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4868  rte_errno = EINVAL;
4869  return 0;
4870  }
4871 #endif
4872 
4873  if (!dev->tx_pkt_prepare)
4874  return nb_pkts;
4875 
4876  return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
4877  tx_pkts, nb_pkts);
4878 }
4879 
4880 #else
4881 
4882 /*
4883  * Native NOOP operation for compilation targets which doesn't require any
4884  * preparations steps, and functional NOOP may introduce unnecessary performance
4885  * drop.
4886  *
4887  * Generally this is not a good idea to turn it on globally and didn't should
4888  * be used if behavior of tx_preparation can change.
4889  */
4890 
4891 static inline uint16_t
4892 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
4893  __rte_unused uint16_t queue_id,
4894  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4895 {
4896  return nb_pkts;
4897 }
4898 
4899 #endif
4900 
4923 static inline uint16_t
4924 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
4925  struct rte_eth_dev_tx_buffer *buffer)
4926 {
4927  uint16_t sent;
4928  uint16_t to_send = buffer->length;
4929 
4930  if (to_send == 0)
4931  return 0;
4932 
4933  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
4934 
4935  buffer->length = 0;
4936 
4937  /* All packets sent, or to be dealt with by callback below */
4938  if (unlikely(sent != to_send))
4939  buffer->error_callback(&buffer->pkts[sent],
4940  (uint16_t)(to_send - sent),
4941  buffer->error_userdata);
4942 
4943  return sent;
4944 }
4945 
4976 static __rte_always_inline uint16_t
4977 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
4978  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
4979 {
4980  buffer->pkts[buffer->length++] = tx_pkt;
4981  if (buffer->length < buffer->size)
4982  return 0;
4983 
4984  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
4985 }
4986 
4987 #ifdef __cplusplus
4988 }
4989 #endif
4990 
4991 #endif /* _RTE_ETHDEV_H_ */
uint16_t nb_rx_queues
Definition: rte_ethdev.h:1396
struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf
Definition: rte_ethdev.h:1195
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_promiscuous_disable(uint16_t port_id)
#define ETH_VMDQ_MAX_VLAN_FILTERS
Definition: rte_ethdev.h:740
struct rte_eth_conf::@139 rx_adv_conf
struct rte_eth_dev_portconf default_rxportconf
Definition: rte_ethdev.h:1399
struct rte_fdir_conf fdir_conf
Definition: rte_ethdev.h:1205
int rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id, struct rte_eth_l2_tunnel_conf *l2_tunnel)
uint32_t rmv
Definition: rte_ethdev.h:1162
#define __rte_always_inline
Definition: rte_common.h:202
uint16_t tx_rs_thresh
Definition: rte_ethdev.h:964
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
uint16_t nb_desc
Definition: rte_ethdev.h:1431
char info[RTE_ETH_BURST_MODE_INFO_SIZE]
Definition: rte_ethdev.h:1451
uint64_t rx_offload_capa
Definition: rte_ethdev.h:1374
uint16_t reta[RTE_RETA_GROUP_SIZE]
Definition: rte_ethdev.h:813
const uint32_t * dev_flags
Definition: rte_ethdev.h:1362
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
struct rte_eth_vmdq_dcb_conf::@137 pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
void rte_eth_dev_stop(uint16_t port_id)
__rte_experimental int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
#define __rte_cache_min_aligned
Definition: rte_common.h:379
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:4850
int rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
rte_eth_nb_tcs
Definition: rte_ethdev.h:821
struct rte_eth_vmdq_tx_conf vmdq_tx_conf
Definition: rte_ethdev.h:1199
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:264
__rte_experimental int rte_eth_dev_owner_new(uint64_t *owner_id)
__rte_experimental int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
struct rte_eth_thresh rx_thresh
Definition: rte_ethdev.h:944
uint16_t rte_eth_find_next(uint16_t port_id)
__rte_experimental int rte_eth_dev_is_removed(uint16_t port_id)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
int rte_eth_led_off(uint16_t port_id)
rte_fdir_pballoc_type
Definition: rte_ethdev.h:1109
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:4618
__rte_experimental uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
uint64_t imissed
Definition: rte_ethdev.h:249
static int rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:4571
uint32_t low_water
Definition: rte_ethdev.h:1069
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
uint32_t max_rx_pkt_len
Definition: rte_ethdev.h:400
uint8_t rss_key_len
Definition: rte_ethdev.h:453
void rte_eth_dev_close(uint16_t port_id)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint16_t tx_rate)
uint8_t hthresh
Definition: rte_ethdev.h:332
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
uint64_t rx_queue_offload_capa
Definition: rte_ethdev.h:1378
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
uint16_t reta_size
Definition: rte_ethdev.h:1382
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
void * userdata
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint32_t lpbk_mode
Definition: rte_ethdev.h:1180
enum rte_fdir_status_mode status
Definition: rte_ethdev.h:1133
enum rte_eth_tx_mq_mode mq_mode
Definition: rte_ethdev.h:919
#define rte_errno
Definition: rte_errno.h:29
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
uint32_t link_speeds
Definition: rte_ethdev.h:1171
uint64_t tx_queue_offload_capa
Definition: rte_ethdev.h:1380
rte_eth_tx_mq_mode
Definition: rte_ethdev.h:380
rte_eth_fc_mode
Definition: rte_ethdev.h:1055
int rte_eth_mirror_rule_set(uint16_t port_id, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on)
uint8_t enable_default_pool
Definition: rte_ethdev.h:873
uint16_t nb_mtu_seg_max
Definition: rte_ethdev.h:1049
#define __rte_unused
Definition: rte_common.h:104
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:262
uint16_t max_rx_2_tx
Definition: rte_ethdev.h:990
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
uint64_t opackets
Definition: rte_ethdev.h:246
rte_filter_op
Definition: rte_eth_ctrl.h:46
struct rte_eth_rss_conf rss_conf
Definition: rte_ethdev.h:1186
uint8_t hash_key_size
Definition: rte_ethdev.h:1384
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
uint16_t split_hdr_size
Definition: rte_ethdev.h:403
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
struct rte_mempool * mp
Definition: rte_ethdev.h:1419
uint32_t dcb_capability_en
Definition: rte_ethdev.h:1204
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:260
const char * name
Definition: rte_ethdev.h:1334
struct rte_eth_switch_info switch_info
Definition: rte_ethdev.h:1408
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
uint32_t rxq
Definition: rte_ethdev.h:1160
int rte_eth_dev_set_link_up(uint16_t port_id)
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
struct rte_eth_thresh tx_thresh
Definition: rte_ethdev.h:963
struct rte_eth_desc_lim rx_desc_lim
Definition: rte_ethdev.h:1392
uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES]
Definition: rte_ethdev.h:880
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
__rte_experimental int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
__rte_experimental int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
uint16_t vmdq_queue_num
Definition: rte_ethdev.h:1390
uint8_t rx_deferred_start
Definition: rte_ethdev.h:947
struct rte_mbuf * pkts[]
Definition: rte_ethdev.h:2949
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
Definition: rte_ethdev.h:3144
struct rte_eth_rxmode rxmode
Definition: rte_ethdev.h:1178
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:256
uint32_t high_water
Definition: rte_ethdev.h:1068
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:872
union rte_eth_conf::@140 tx_adv_conf
struct rte_eth_txconf conf
Definition: rte_ethdev.h:1430
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
struct rte_intr_conf intr_conf
Definition: rte_ethdev.h:1206
#define RTE_ETH_XSTATS_NAME_SIZE
Definition: rte_ethdev.h:1455
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
struct rte_eth_desc_lim tx_desc_lim
Definition: rte_ethdev.h:1393
int rte_eth_timesync_disable(uint16_t port_id)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint64_t offloads
Definition: rte_ethdev.h:953
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
uint16_t send_xon
Definition: rte_ethdev.h:1071
int rte_eth_stats_reset(uint16_t port_id)
int rte_eth_allmulticast_enable(uint16_t port_id)
struct rte_eth_txconf default_txconf
Definition: rte_ethdev.h:1388
#define unlikely(x)
uint64_t ibytes
Definition: rte_ethdev.h:247
uint64_t offloads
Definition: rte_ethdev.h:974
uint16_t max_nb_queues
Definition: rte_ethdev.h:988
uint64_t oerrors
Definition: rte_ethdev.h:254
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
struct rte_eth_dcb_rx_conf dcb_rx_conf
Definition: rte_ethdev.h:1189
struct rte_eth_vmdq_rx_conf vmdq_rx_conf
Definition: rte_ethdev.h:1191
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
uint16_t max_mtu
Definition: rte_ethdev.h:1361
uint64_t offloads
Definition: rte_ethdev.h:409
enum rte_eth_rx_mq_mode mq_mode
Definition: rte_ethdev.h:399
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:903
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
uint16_t tx_free_thresh
Definition: rte_ethdev.h:965
uint16_t nb_desc
Definition: rte_ethdev.h:1422
__rte_experimental uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:4494
int rte_eth_allmulticast_disable(uint16_t port_id)
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
int rte_eth_dev_start(uint16_t port_id)
uint16_t max_vmdq_pools
Definition: rte_ethdev.h:1373
uint8_t scattered_rx
Definition: rte_ethdev.h:1421
int rte_eth_dev_reset(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
struct rte_eth_dcb_tx_conf dcb_tx_conf
Definition: rte_ethdev.h:1197
uint64_t offloads
Definition: rte_ethdev.h:925
uint16_t vmdq_pool_base
Definition: rte_ethdev.h:1391
uint64_t tx_offload_capa
Definition: rte_ethdev.h:1376
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:936
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:258
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
uint16_t min_mtu
Definition: rte_ethdev.h:1360
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
Definition: rte_ethdev.h:1590
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1411
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
uint64_t obytes
Definition: rte_ethdev.h:248
uint8_t enable_loop_back
Definition: rte_ethdev.h:906
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:411
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
struct rte_eth_rxconf conf
Definition: rte_ethdev.h:1420
#define ETH_DCB_NUM_USER_PRIORITIES
Definition: rte_ethdev.h:741
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
uint16_t max_tx_queues
Definition: rte_ethdev.h:1368
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_xstats_reset(uint16_t port_id)
rte_eth_dev_state
Definition: rte_ethdev.h:1596
uint16_t rx_free_thresh
Definition: rte_ethdev.h:945
struct rte_eth_vlan_mirror vlan
Definition: rte_ethdev.h:801
uint64_t dev_capa
Definition: rte_ethdev.h:1403
uint16_t max_tx_2_rx
Definition: rte_ethdev.h:992
uint64_t ierrors
Definition: rte_ethdev.h:253
uint16_t max_nb_desc
Definition: rte_ethdev.h:993
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
__extension__ uint8_t hw_vlan_insert_pvid
Definition: rte_ethdev.h:930
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
uint64_t flow_type_rss_offloads
Definition: rte_ethdev.h:1386
uint16_t rte_eth_dev_count_total(void)
int rte_eth_promiscuous_enable(uint16_t port_id)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
struct rte_eth_dev_portconf default_txportconf
Definition: rte_ethdev.h:1401
rte_vlan_type
Definition: rte_ethdev.h:419
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
uint16_t nb_seg_max
Definition: rte_ethdev.h:1036
uint64_t ipackets
Definition: rte_ethdev.h:245
uint16_t max_vfs
Definition: rte_ethdev.h:1372
uint16_t pause_time
Definition: rte_ethdev.h:1070
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
rte_filter_type
Definition: rte_eth_ctrl.h:28
int rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id, struct rte_eth_l2_tunnel_conf *l2_tunnel, uint32_t mask, uint8_t en)
uint64_t rx_nombuf
Definition: rte_ethdev.h:255
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
Definition: rte_ethdev.h:4977
__extension__ uint8_t hw_vlan_reject_untagged
Definition: rte_ethdev.h:930
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
#define ETH_MQ_RX_RSS_FLAG
Definition: rte_ethdev.h:339
uint16_t vmdq_queue_base
Definition: rte_ethdev.h:1389
rte_eth_event_ipsec_subtype
Definition: rte_ethdev.h:3088
rte_eth_nb_pools
Definition: rte_ethdev.h:830
#define ETH_MIRROR_MAX_VLANS
Definition: rte_ethdev.h:776
uint16_t nb_align
Definition: rte_ethdev.h:1026
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
rte_eth_rx_mq_mode
Definition: rte_ethdev.h:347
__rte_experimental int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
const char * driver_name
Definition: rte_ethdev.h:1357
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:4542
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_socket_id(uint16_t port_id)
uint8_t enable_default_pool
Definition: rte_ethdev.h:904
__extension__ struct rte_eth_link __rte_aligned(8)
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
uint16_t nb_tx_queues
Definition: rte_ethdev.h:1397
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
struct rte_eth_fdir_flex_conf flex_conf
Definition: rte_ethdev.h:1137
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:976
uint32_t max_mac_addrs
Definition: rte_ethdev.h:1369
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
rte_eth_tunnel_type
Definition: rte_ethdev.h:1090
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
uint64_t value
Definition: rte_ethdev.h:1468
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
Definition: rte_ethdev.h:566
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_promiscuous_get(uint16_t port_id)
int rte_eth_led_on(uint16_t port_id)
enum rte_fdir_pballoc_type pballoc
Definition: rte_ethdev.h:1132
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
__rte_experimental int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
struct rte_eth_vmdq_rx_conf::@138 pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]
uint32_t max_rx_pktlen
Definition: rte_ethdev.h:1364
__rte_experimental int rte_eth_dev_owner_delete(const uint64_t owner_id)
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
uint64_t rss_hf
Definition: rte_ethdev.h:454
void * reserved_ptrs[2]
Definition: rte_ethdev.h:977
uint64_t id
Definition: rte_ethdev.h:1467
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1410
__extension__ uint8_t hw_vlan_reject_tagged
Definition: rte_ethdev.h:930
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:955
enum rte_fdir_mode mode
Definition: rte_ethdev.h:1131
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
unsigned int if_index
Definition: rte_ethdev.h:1358
__rte_deprecated int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type, enum rte_filter_op filter_op, void *arg)
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
uint8_t mac_ctrl_frame_fwd
Definition: rte_ethdev.h:1073
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
Definition: rte_ethdev.h:1566
enum rte_eth_fc_mode mode
Definition: rte_ethdev.h:1072
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
rte_fdir_mode
Definition: rte_eth_ctrl.h:603
struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf
Definition: rte_ethdev.h:1187
uint8_t * rss_key
Definition: rte_ethdev.h:452
rte_fdir_status_mode
Definition: rte_ethdev.h:1118
__rte_deprecated int rte_eth_dev_filter_supported(uint16_t port_id, enum rte_filter_type filter_type)
void * reserved_ptrs[2]
Definition: rte_ethdev.h:937
uint8_t tx_deferred_start
Definition: rte_ethdev.h:968
uint8_t wthresh
Definition: rte_ethdev.h:333
void * reserved_ptrs[2]
Definition: rte_ethdev.h:956
uint16_t max_rx_queues
Definition: rte_ethdev.h:1367
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:402
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
struct rte_eth_fc_conf fc
Definition: rte_ethdev.h:1083
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
struct rte_eth_txmode txmode
Definition: rte_ethdev.h:1179
int rte_eth_allmulticast_get(uint16_t port_id)
uint8_t rx_drop_en
Definition: rte_ethdev.h:946
int rte_eth_dev_is_valid_port(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:1366
void * reserved_ptrs[2]
Definition: rte_ethdev.h:412
int rte_eth_timesync_enable(uint16_t port_id)
uint8_t pthresh
Definition: rte_ethdev.h:331
struct rte_eth_rxconf default_rxconf
Definition: rte_ethdev.h:1387
uint32_t speed_capa
Definition: rte_ethdev.h:1394
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:4762
uint8_t drop_queue
Definition: rte_ethdev.h:1135
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_set_link_down(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
uint32_t min_rx_bufsize
Definition: rte_ethdev.h:1363
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
__rte_experimental int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
uint32_t lsc
Definition: rte_ethdev.h:1158
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
Definition: rte_ethdev.h:4924
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
rte_eth_event_type
Definition: rte_ethdev.h:3127