DPDK  24.11.0-rc1
rte_ethdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
148 #include <stdint.h>
149 
150 /* Use this macro to check if LRO API is supported */
151 #define RTE_ETHDEV_HAS_LRO_SUPPORT
152 
153 /* Alias RTE_LIBRTE_ETHDEV_DEBUG for backward compatibility. */
154 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
155 #define RTE_ETHDEV_DEBUG_RX
156 #define RTE_ETHDEV_DEBUG_TX
157 #endif
158 
159 #include <rte_cman.h>
160 #include <rte_compat.h>
161 #include <rte_log.h>
162 #include <rte_interrupts.h>
163 #include <rte_dev.h>
164 #include <rte_devargs.h>
165 #include <rte_bitops.h>
166 #include <rte_errno.h>
167 #include <rte_common.h>
168 #include <rte_config.h>
169 #include <rte_power_intrinsics.h>
170 
171 #include "rte_ethdev_trace_fp.h"
172 #include "rte_dev_info.h"
173 
174 extern int rte_eth_dev_logtype;
175 #define RTE_LOGTYPE_ETHDEV rte_eth_dev_logtype
176 
177 #define RTE_ETHDEV_LOG_LINE(level, ...) \
178  RTE_LOG_LINE(level, ETHDEV, "" __VA_ARGS__)
179 
180 struct rte_mbuf;
181 
198 int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
199 
214 uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter);
215 
228 void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
229 
243 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
244  for (rte_eth_iterator_init(iter, devargs), \
245  id = rte_eth_iterator_next(iter); \
246  id != RTE_MAX_ETHPORTS; \
247  id = rte_eth_iterator_next(iter))
248 
259  uint64_t ipackets;
260  uint64_t opackets;
261  uint64_t ibytes;
262  uint64_t obytes;
267  uint64_t imissed;
268  uint64_t ierrors;
269  uint64_t oerrors;
270  uint64_t rx_nombuf;
271  /* Queue stats are limited to max 256 queues */
273  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
275  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
277  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
279  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
281  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
282 };
283 
287 #define RTE_ETH_LINK_SPEED_AUTONEG 0
288 #define RTE_ETH_LINK_SPEED_FIXED RTE_BIT32(0)
289 #define RTE_ETH_LINK_SPEED_10M_HD RTE_BIT32(1)
290 #define RTE_ETH_LINK_SPEED_10M RTE_BIT32(2)
291 #define RTE_ETH_LINK_SPEED_100M_HD RTE_BIT32(3)
292 #define RTE_ETH_LINK_SPEED_100M RTE_BIT32(4)
293 #define RTE_ETH_LINK_SPEED_1G RTE_BIT32(5)
294 #define RTE_ETH_LINK_SPEED_2_5G RTE_BIT32(6)
295 #define RTE_ETH_LINK_SPEED_5G RTE_BIT32(7)
296 #define RTE_ETH_LINK_SPEED_10G RTE_BIT32(8)
297 #define RTE_ETH_LINK_SPEED_20G RTE_BIT32(9)
298 #define RTE_ETH_LINK_SPEED_25G RTE_BIT32(10)
299 #define RTE_ETH_LINK_SPEED_40G RTE_BIT32(11)
300 #define RTE_ETH_LINK_SPEED_50G RTE_BIT32(12)
301 #define RTE_ETH_LINK_SPEED_56G RTE_BIT32(13)
302 #define RTE_ETH_LINK_SPEED_100G RTE_BIT32(14)
303 #define RTE_ETH_LINK_SPEED_200G RTE_BIT32(15)
304 #define RTE_ETH_LINK_SPEED_400G RTE_BIT32(16)
310 #define RTE_ETH_SPEED_NUM_NONE 0
311 #define RTE_ETH_SPEED_NUM_10M 10
312 #define RTE_ETH_SPEED_NUM_100M 100
313 #define RTE_ETH_SPEED_NUM_1G 1000
314 #define RTE_ETH_SPEED_NUM_2_5G 2500
315 #define RTE_ETH_SPEED_NUM_5G 5000
316 #define RTE_ETH_SPEED_NUM_10G 10000
317 #define RTE_ETH_SPEED_NUM_20G 20000
318 #define RTE_ETH_SPEED_NUM_25G 25000
319 #define RTE_ETH_SPEED_NUM_40G 40000
320 #define RTE_ETH_SPEED_NUM_50G 50000
321 #define RTE_ETH_SPEED_NUM_56G 56000
322 #define RTE_ETH_SPEED_NUM_100G 100000
323 #define RTE_ETH_SPEED_NUM_200G 200000
324 #define RTE_ETH_SPEED_NUM_400G 400000
325 #define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX
331 struct rte_eth_link {
332  union {
333  RTE_ATOMIC(uint64_t) val64;
334  __extension__
335  struct {
336  uint32_t link_speed;
337  uint16_t link_duplex : 1;
338  uint16_t link_autoneg : 1;
339  uint16_t link_status : 1;
340  };
341  };
342 };
343 
347 #define RTE_ETH_LINK_HALF_DUPLEX 0
348 #define RTE_ETH_LINK_FULL_DUPLEX 1
349 #define RTE_ETH_LINK_DOWN 0
350 #define RTE_ETH_LINK_UP 1
351 #define RTE_ETH_LINK_FIXED 0
352 #define RTE_ETH_LINK_AUTONEG 1
353 #define RTE_ETH_LINK_MAX_STR_LEN 40
357 #define RTE_ETH_SPEED_LANES_TO_CAPA(x) RTE_BIT32(x)
358 
361  uint32_t speed;
362  uint32_t capa;
363 };
364 
370  uint8_t pthresh;
371  uint8_t hthresh;
372  uint8_t wthresh;
373 };
374 
378 #define RTE_ETH_MQ_RX_RSS_FLAG RTE_BIT32(0)
379 #define RTE_ETH_MQ_RX_DCB_FLAG RTE_BIT32(1)
380 #define RTE_ETH_MQ_RX_VMDQ_FLAG RTE_BIT32(2)
387 enum rte_eth_rx_mq_mode {
388 
390 
397 
407 };
408 
418 };
419 
425  enum rte_eth_rx_mq_mode mq_mode;
426  uint32_t mtu;
434  uint64_t offloads;
435 
436  uint64_t reserved_64s[2];
437  void *reserved_ptrs[2];
438 };
439 
445  RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
448  RTE_ETH_VLAN_TYPE_MAX,
449 };
450 
456  uint64_t ids[64];
457 };
458 
480  RTE_ETH_HASH_FUNCTION_MAX,
481 };
482 
483 #define RTE_ETH_HASH_ALGO_TO_CAPA(x) RTE_BIT32(x)
484 #define RTE_ETH_HASH_ALGO_CAPA_MASK(x) RTE_BIT32(RTE_ETH_HASH_FUNCTION_ ## x)
485 
503  uint8_t *rss_key;
504  uint8_t rss_key_len;
509  uint64_t rss_hf;
510  enum rte_eth_hash_function algorithm;
511 };
512 
513 /*
514  * A packet can be identified by hardware as different flow types. Different
515  * NIC hardware may support different flow types.
516  * Basically, the NIC hardware identifies the flow type as deep protocol as
517  * possible, and exclusively. For example, if a packet is identified as
518  * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
519  * though it is an actual IPV4 packet.
520  */
521 #define RTE_ETH_FLOW_UNKNOWN 0
522 #define RTE_ETH_FLOW_RAW 1
523 #define RTE_ETH_FLOW_IPV4 2
524 #define RTE_ETH_FLOW_FRAG_IPV4 3
525 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
526 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
527 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
528 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
529 #define RTE_ETH_FLOW_IPV6 8
530 #define RTE_ETH_FLOW_FRAG_IPV6 9
531 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
532 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
533 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
534 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
535 #define RTE_ETH_FLOW_L2_PAYLOAD 14
536 #define RTE_ETH_FLOW_IPV6_EX 15
537 #define RTE_ETH_FLOW_IPV6_TCP_EX 16
538 #define RTE_ETH_FLOW_IPV6_UDP_EX 17
539 
540 #define RTE_ETH_FLOW_PORT 18
541 #define RTE_ETH_FLOW_VXLAN 19
542 #define RTE_ETH_FLOW_GENEVE 20
543 #define RTE_ETH_FLOW_NVGRE 21
544 #define RTE_ETH_FLOW_VXLAN_GPE 22
545 #define RTE_ETH_FLOW_GTPU 23
546 #define RTE_ETH_FLOW_MAX 24
547 
548 /*
549  * Below macros are defined for RSS offload types, they can be used to
550  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
551  */
552 #define RTE_ETH_RSS_IPV4 RTE_BIT64(2)
553 #define RTE_ETH_RSS_FRAG_IPV4 RTE_BIT64(3)
554 #define RTE_ETH_RSS_NONFRAG_IPV4_TCP RTE_BIT64(4)
555 #define RTE_ETH_RSS_NONFRAG_IPV4_UDP RTE_BIT64(5)
556 #define RTE_ETH_RSS_NONFRAG_IPV4_SCTP RTE_BIT64(6)
557 #define RTE_ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
558 #define RTE_ETH_RSS_IPV6 RTE_BIT64(8)
559 #define RTE_ETH_RSS_FRAG_IPV6 RTE_BIT64(9)
560 #define RTE_ETH_RSS_NONFRAG_IPV6_TCP RTE_BIT64(10)
561 #define RTE_ETH_RSS_NONFRAG_IPV6_UDP RTE_BIT64(11)
562 #define RTE_ETH_RSS_NONFRAG_IPV6_SCTP RTE_BIT64(12)
563 #define RTE_ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
564 #define RTE_ETH_RSS_L2_PAYLOAD RTE_BIT64(14)
565 #define RTE_ETH_RSS_IPV6_EX RTE_BIT64(15)
566 #define RTE_ETH_RSS_IPV6_TCP_EX RTE_BIT64(16)
567 #define RTE_ETH_RSS_IPV6_UDP_EX RTE_BIT64(17)
568 #define RTE_ETH_RSS_PORT RTE_BIT64(18)
569 #define RTE_ETH_RSS_VXLAN RTE_BIT64(19)
570 #define RTE_ETH_RSS_GENEVE RTE_BIT64(20)
571 #define RTE_ETH_RSS_NVGRE RTE_BIT64(21)
572 #define RTE_ETH_RSS_GTPU RTE_BIT64(23)
573 #define RTE_ETH_RSS_ETH RTE_BIT64(24)
574 #define RTE_ETH_RSS_S_VLAN RTE_BIT64(25)
575 #define RTE_ETH_RSS_C_VLAN RTE_BIT64(26)
576 #define RTE_ETH_RSS_ESP RTE_BIT64(27)
577 #define RTE_ETH_RSS_AH RTE_BIT64(28)
578 #define RTE_ETH_RSS_L2TPV3 RTE_BIT64(29)
579 #define RTE_ETH_RSS_PFCP RTE_BIT64(30)
580 #define RTE_ETH_RSS_PPPOE RTE_BIT64(31)
581 #define RTE_ETH_RSS_ECPRI RTE_BIT64(32)
582 #define RTE_ETH_RSS_MPLS RTE_BIT64(33)
583 #define RTE_ETH_RSS_IPV4_CHKSUM RTE_BIT64(34)
584 
597 #define RTE_ETH_RSS_L4_CHKSUM RTE_BIT64(35)
598 
599 #define RTE_ETH_RSS_L2TPV2 RTE_BIT64(36)
600 #define RTE_ETH_RSS_IPV6_FLOW_LABEL RTE_BIT64(37)
601 
602 /*
603  * We use the following macros to combine with above RTE_ETH_RSS_* for
604  * more specific input set selection. These bits are defined starting
605  * from the high end of the 64 bits.
606  * Note: If we use above RTE_ETH_RSS_* without SRC/DST_ONLY, it represents
607  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
608  * the same level are used simultaneously, it is the same case as none of
609  * them are added.
610  */
611 #define RTE_ETH_RSS_L3_SRC_ONLY RTE_BIT64(63)
612 #define RTE_ETH_RSS_L3_DST_ONLY RTE_BIT64(62)
613 #define RTE_ETH_RSS_L4_SRC_ONLY RTE_BIT64(61)
614 #define RTE_ETH_RSS_L4_DST_ONLY RTE_BIT64(60)
615 #define RTE_ETH_RSS_L2_SRC_ONLY RTE_BIT64(59)
616 #define RTE_ETH_RSS_L2_DST_ONLY RTE_BIT64(58)
617 
618 /*
619  * Only select IPV6 address prefix as RSS input set according to
620  * https://tools.ietf.org/html/rfc6052
621  * Must be combined with RTE_ETH_RSS_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_UDP,
622  * RTE_ETH_RSS_NONFRAG_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP.
623  */
624 #define RTE_ETH_RSS_L3_PRE32 RTE_BIT64(57)
625 #define RTE_ETH_RSS_L3_PRE40 RTE_BIT64(56)
626 #define RTE_ETH_RSS_L3_PRE48 RTE_BIT64(55)
627 #define RTE_ETH_RSS_L3_PRE56 RTE_BIT64(54)
628 #define RTE_ETH_RSS_L3_PRE64 RTE_BIT64(53)
629 #define RTE_ETH_RSS_L3_PRE96 RTE_BIT64(52)
630 
631 /*
632  * Use the following macros to combine with the above layers
633  * to choose inner and outer layers or both for RSS computation.
634  * Bits 50 and 51 are reserved for this.
635  */
636 
644 #define RTE_ETH_RSS_LEVEL_PMD_DEFAULT (UINT64_C(0) << 50)
645 
650 #define RTE_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
651 
656 #define RTE_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
657 #define RTE_ETH_RSS_LEVEL_MASK (UINT64_C(3) << 50)
658 
659 #define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
660 
671 static inline uint64_t
672 rte_eth_rss_hf_refine(uint64_t rss_hf)
673 {
674  if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
675  rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
676 
677  if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
678  rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
679 
680  return rss_hf;
681 }
682 
683 #define RTE_ETH_RSS_IPV6_PRE32 ( \
684  RTE_ETH_RSS_IPV6 | \
685  RTE_ETH_RSS_L3_PRE32)
686 
687 #define RTE_ETH_RSS_IPV6_PRE40 ( \
688  RTE_ETH_RSS_IPV6 | \
689  RTE_ETH_RSS_L3_PRE40)
690 
691 #define RTE_ETH_RSS_IPV6_PRE48 ( \
692  RTE_ETH_RSS_IPV6 | \
693  RTE_ETH_RSS_L3_PRE48)
694 
695 #define RTE_ETH_RSS_IPV6_PRE56 ( \
696  RTE_ETH_RSS_IPV6 | \
697  RTE_ETH_RSS_L3_PRE56)
698 
699 #define RTE_ETH_RSS_IPV6_PRE64 ( \
700  RTE_ETH_RSS_IPV6 | \
701  RTE_ETH_RSS_L3_PRE64)
702 
703 #define RTE_ETH_RSS_IPV6_PRE96 ( \
704  RTE_ETH_RSS_IPV6 | \
705  RTE_ETH_RSS_L3_PRE96)
706 
707 #define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
708  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
709  RTE_ETH_RSS_L3_PRE32)
710 
711 #define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
712  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
713  RTE_ETH_RSS_L3_PRE40)
714 
715 #define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
716  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
717  RTE_ETH_RSS_L3_PRE48)
718 
719 #define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
720  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
721  RTE_ETH_RSS_L3_PRE56)
722 
723 #define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
724  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
725  RTE_ETH_RSS_L3_PRE64)
726 
727 #define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
728  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
729  RTE_ETH_RSS_L3_PRE96)
730 
731 #define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
732  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
733  RTE_ETH_RSS_L3_PRE32)
734 
735 #define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
736  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
737  RTE_ETH_RSS_L3_PRE40)
738 
739 #define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
740  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
741  RTE_ETH_RSS_L3_PRE48)
742 
743 #define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
744  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
745  RTE_ETH_RSS_L3_PRE56)
746 
747 #define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
748  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
749  RTE_ETH_RSS_L3_PRE64)
750 
751 #define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
752  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
753  RTE_ETH_RSS_L3_PRE96)
754 
755 #define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
756  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
757  RTE_ETH_RSS_L3_PRE32)
758 
759 #define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
760  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
761  RTE_ETH_RSS_L3_PRE40)
762 
763 #define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
764  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
765  RTE_ETH_RSS_L3_PRE48)
766 
767 #define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
768  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
769  RTE_ETH_RSS_L3_PRE56)
770 
771 #define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
772  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
773  RTE_ETH_RSS_L3_PRE64)
774 
775 #define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
776  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
777  RTE_ETH_RSS_L3_PRE96)
778 
779 #define RTE_ETH_RSS_IP ( \
780  RTE_ETH_RSS_IPV4 | \
781  RTE_ETH_RSS_FRAG_IPV4 | \
782  RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
783  RTE_ETH_RSS_IPV6 | \
784  RTE_ETH_RSS_FRAG_IPV6 | \
785  RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
786  RTE_ETH_RSS_IPV6_EX)
787 
788 #define RTE_ETH_RSS_UDP ( \
789  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
790  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
791  RTE_ETH_RSS_IPV6_UDP_EX)
792 
793 #define RTE_ETH_RSS_TCP ( \
794  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
795  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
796  RTE_ETH_RSS_IPV6_TCP_EX)
797 
798 #define RTE_ETH_RSS_SCTP ( \
799  RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
800  RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
801 
802 #define RTE_ETH_RSS_TUNNEL ( \
803  RTE_ETH_RSS_VXLAN | \
804  RTE_ETH_RSS_GENEVE | \
805  RTE_ETH_RSS_NVGRE)
806 
807 #define RTE_ETH_RSS_VLAN ( \
808  RTE_ETH_RSS_S_VLAN | \
809  RTE_ETH_RSS_C_VLAN)
810 
812 #define RTE_ETH_RSS_PROTO_MASK ( \
813  RTE_ETH_RSS_IPV4 | \
814  RTE_ETH_RSS_FRAG_IPV4 | \
815  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
816  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
817  RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
818  RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
819  RTE_ETH_RSS_IPV6 | \
820  RTE_ETH_RSS_FRAG_IPV6 | \
821  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
822  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
823  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
824  RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
825  RTE_ETH_RSS_L2_PAYLOAD | \
826  RTE_ETH_RSS_IPV6_EX | \
827  RTE_ETH_RSS_IPV6_TCP_EX | \
828  RTE_ETH_RSS_IPV6_UDP_EX | \
829  RTE_ETH_RSS_PORT | \
830  RTE_ETH_RSS_VXLAN | \
831  RTE_ETH_RSS_GENEVE | \
832  RTE_ETH_RSS_NVGRE | \
833  RTE_ETH_RSS_MPLS)
834 
835 /*
836  * Definitions used for redirection table entry size.
837  * Some RSS RETA sizes may not be supported by some drivers, check the
838  * documentation or the description of relevant functions for more details.
839  */
840 #define RTE_ETH_RSS_RETA_SIZE_64 64
841 #define RTE_ETH_RSS_RETA_SIZE_128 128
842 #define RTE_ETH_RSS_RETA_SIZE_256 256
843 #define RTE_ETH_RSS_RETA_SIZE_512 512
844 #define RTE_ETH_RETA_GROUP_SIZE 64
845 
847 #define RTE_ETH_VMDQ_MAX_VLAN_FILTERS 64
848 #define RTE_ETH_DCB_NUM_USER_PRIORITIES 8
849 #define RTE_ETH_VMDQ_DCB_NUM_QUEUES 128
850 #define RTE_ETH_DCB_NUM_QUEUES 128
854 #define RTE_ETH_DCB_PG_SUPPORT RTE_BIT32(0)
855 #define RTE_ETH_DCB_PFC_SUPPORT RTE_BIT32(1)
859 #define RTE_ETH_VLAN_STRIP_OFFLOAD 0x0001
860 #define RTE_ETH_VLAN_FILTER_OFFLOAD 0x0002
861 #define RTE_ETH_VLAN_EXTEND_OFFLOAD 0x0004
862 #define RTE_ETH_QINQ_STRIP_OFFLOAD 0x0008
864 #define RTE_ETH_VLAN_STRIP_MASK 0x0001
865 #define RTE_ETH_VLAN_FILTER_MASK 0x0002
866 #define RTE_ETH_VLAN_EXTEND_MASK 0x0004
867 #define RTE_ETH_QINQ_STRIP_MASK 0x0008
868 #define RTE_ETH_VLAN_ID_MAX 0x0FFF
871 /* Definitions used for receive MAC address */
872 #define RTE_ETH_NUM_RECEIVE_MAC_ADDR 128
874 /* Definitions used for unicast hash */
875 #define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY 128
881 #define RTE_ETH_VMDQ_ACCEPT_UNTAG RTE_BIT32(0)
882 
883 #define RTE_ETH_VMDQ_ACCEPT_HASH_MC RTE_BIT32(1)
884 
885 #define RTE_ETH_VMDQ_ACCEPT_HASH_UC RTE_BIT32(2)
886 
887 #define RTE_ETH_VMDQ_ACCEPT_BROADCAST RTE_BIT32(3)
888 
889 #define RTE_ETH_VMDQ_ACCEPT_MULTICAST RTE_BIT32(4)
890 
900  uint64_t mask;
902  uint16_t reta[RTE_ETH_RETA_GROUP_SIZE];
903 };
904 
912 };
913 
923 };
924 
925 /* This structure may be extended in future. */
926 struct rte_eth_dcb_rx_conf {
927  enum rte_eth_nb_tcs nb_tcs;
929  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
930 };
931 
932 struct rte_eth_vmdq_dcb_tx_conf {
933  enum rte_eth_nb_pools nb_queue_pools;
935  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
936 };
937 
938 struct rte_eth_dcb_tx_conf {
939  enum rte_eth_nb_tcs nb_tcs;
941  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
942 };
943 
944 struct rte_eth_vmdq_tx_conf {
945  enum rte_eth_nb_pools nb_queue_pools;
946 };
947 
960  enum rte_eth_nb_pools nb_queue_pools;
962  uint8_t default_pool;
963  uint8_t nb_pool_maps;
964  struct {
965  uint16_t vlan_id;
966  uint64_t pools;
967  } pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS];
970 };
971 
991  enum rte_eth_nb_pools nb_queue_pools;
993  uint8_t default_pool;
995  uint8_t nb_pool_maps;
996  uint32_t rx_mode;
997  struct {
998  uint16_t vlan_id;
999  uint64_t pools;
1000  } pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS];
1001 };
1002 
1007  enum rte_eth_tx_mq_mode mq_mode;
1013  uint64_t offloads;
1014 
1015  uint16_t pvid;
1016  __extension__
1017  uint8_t
1018  hw_vlan_reject_tagged : 1,
1022  hw_vlan_insert_pvid : 1;
1023 
1024  uint64_t reserved_64s[2];
1025  void *reserved_ptrs[2];
1026 };
1027 
1089  struct rte_mempool *mp;
1090  uint16_t length;
1091  uint16_t offset;
1103  uint32_t proto_hdr;
1104 };
1105 
1113  /* The settings for buffer split offload. */
1114  struct rte_eth_rxseg_split split;
1115  /* The other features settings should be added here. */
1116 };
1117 
1122  struct rte_eth_thresh rx_thresh;
1123  uint16_t rx_free_thresh;
1124  uint8_t rx_drop_en;
1126  uint16_t rx_nseg;
1133  uint16_t share_group;
1134  uint16_t share_qid;
1140  uint64_t offloads;
1149 
1170  uint16_t rx_nmempool;
1172  uint64_t reserved_64s[2];
1173  void *reserved_ptrs[2];
1174 };
1175 
1180  struct rte_eth_thresh tx_thresh;
1181  uint16_t tx_rs_thresh;
1182  uint16_t tx_free_thresh;
1191  uint64_t offloads;
1192 
1193  uint64_t reserved_64s[2];
1194  void *reserved_ptrs[2];
1195 };
1196 
1209 
1214  uint32_t rte_memory:1;
1215 
1216  uint32_t reserved:30;
1217 };
1218 
1227  uint16_t max_nb_queues;
1229  uint16_t max_rx_2_tx;
1231  uint16_t max_tx_2_rx;
1232  uint16_t max_nb_desc;
1235 };
1236 
1237 #define RTE_ETH_MAX_HAIRPIN_PEERS 32
1238 
1246  uint16_t port;
1247  uint16_t queue;
1248 };
1249 
1257  uint32_t peer_count:16;
1268  uint32_t tx_explicit:1;
1269 
1281  uint32_t manual_bind:1;
1282 
1295 
1307  uint32_t use_rte_memory:1;
1308 
1319  uint32_t force_memory:1;
1320 
1321  uint32_t reserved:11;
1323  struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
1324 };
1325 
1330  uint16_t nb_max;
1331  uint16_t nb_min;
1332  uint16_t nb_align;
1342  uint16_t nb_seg_max;
1343 
1355  uint16_t nb_mtu_seg_max;
1356 };
1357 
1366 };
1367 
1374  uint32_t high_water;
1375  uint32_t low_water;
1376  uint16_t pause_time;
1377  uint16_t send_xon;
1378  enum rte_eth_fc_mode mode;
1380  uint8_t autoneg;
1381 };
1382 
1389  struct rte_eth_fc_conf fc;
1390  uint8_t priority;
1391 };
1392 
1403  uint8_t tc_max;
1405  enum rte_eth_fc_mode mode_capa;
1406 };
1407 
1426  enum rte_eth_fc_mode mode;
1428  struct {
1429  uint16_t tx_qid;
1433  uint8_t tc;
1434  } rx_pause; /* Valid when (mode == FC_RX_PAUSE || mode == FC_FULL) */
1435 
1436  struct {
1437  uint16_t pause_time;
1438  uint16_t rx_qid;
1442  uint8_t tc;
1443  } tx_pause; /* Valid when (mode == FC_TX_PAUSE || mode == FC_FULL) */
1444 };
1445 
1451  RTE_ETH_TUNNEL_TYPE_NONE = 0,
1452  RTE_ETH_TUNNEL_TYPE_VXLAN,
1453  RTE_ETH_TUNNEL_TYPE_GENEVE,
1454  RTE_ETH_TUNNEL_TYPE_TEREDO,
1455  RTE_ETH_TUNNEL_TYPE_NVGRE,
1456  RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
1457  RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
1458  RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
1459  RTE_ETH_TUNNEL_TYPE_ECPRI,
1460  RTE_ETH_TUNNEL_TYPE_MAX,
1461 };
1462 
1463 /* Deprecated API file for rte_eth_dev_filter_* functions */
1464 #include "rte_eth_ctrl.h"
1465 
1476  uint16_t udp_port;
1477  uint8_t prot_type;
1478 };
1479 
1485  uint32_t lsc:1;
1487  uint32_t rxq:1;
1489  uint32_t rmv:1;
1490 };
1491 
1492 #define rte_intr_conf rte_eth_intr_conf
1493 
1500  uint32_t link_speeds;
1507  struct rte_eth_rxmode rxmode;
1508  struct rte_eth_txmode txmode;
1509  uint32_t lpbk_mode;
1514  struct {
1515  struct rte_eth_rss_conf rss_conf;
1517  struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf;
1519  struct rte_eth_dcb_rx_conf dcb_rx_conf;
1521  struct rte_eth_vmdq_rx_conf vmdq_rx_conf;
1522  } rx_adv_conf;
1523  union {
1525  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1527  struct rte_eth_dcb_tx_conf dcb_tx_conf;
1529  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1530  } tx_adv_conf;
1534  struct rte_eth_intr_conf intr_conf;
1535 };
1536 
1540 #define RTE_ETH_RX_OFFLOAD_VLAN_STRIP RTE_BIT64(0)
1541 #define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1542 #define RTE_ETH_RX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1543 #define RTE_ETH_RX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1544 #define RTE_ETH_RX_OFFLOAD_TCP_LRO RTE_BIT64(4)
1545 #define RTE_ETH_RX_OFFLOAD_QINQ_STRIP RTE_BIT64(5)
1546 #define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(6)
1547 #define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP RTE_BIT64(7)
1548 #define RTE_ETH_RX_OFFLOAD_VLAN_FILTER RTE_BIT64(9)
1549 #define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND RTE_BIT64(10)
1550 #define RTE_ETH_RX_OFFLOAD_SCATTER RTE_BIT64(13)
1551 
1556 #define RTE_ETH_RX_OFFLOAD_TIMESTAMP RTE_BIT64(14)
1557 #define RTE_ETH_RX_OFFLOAD_SECURITY RTE_BIT64(15)
1558 #define RTE_ETH_RX_OFFLOAD_KEEP_CRC RTE_BIT64(16)
1559 #define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM RTE_BIT64(17)
1560 #define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(18)
1561 #define RTE_ETH_RX_OFFLOAD_RSS_HASH RTE_BIT64(19)
1562 #define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT RTE_BIT64(20)
1563 
1564 #define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
1565  RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
1566  RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1567 #define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
1568  RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
1569  RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
1570  RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
1571 
1572 /*
1573  * If new Rx offload capabilities are defined, they also must be
1574  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1575  */
1576 
1580 #define RTE_ETH_TX_OFFLOAD_VLAN_INSERT RTE_BIT64(0)
1581 #define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1582 #define RTE_ETH_TX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1583 #define RTE_ETH_TX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1584 #define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM RTE_BIT64(4)
1585 #define RTE_ETH_TX_OFFLOAD_TCP_TSO RTE_BIT64(5)
1586 #define RTE_ETH_TX_OFFLOAD_UDP_TSO RTE_BIT64(6)
1587 #define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(7)
1588 #define RTE_ETH_TX_OFFLOAD_QINQ_INSERT RTE_BIT64(8)
1589 #define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO RTE_BIT64(9)
1590 #define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO RTE_BIT64(10)
1591 #define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO RTE_BIT64(11)
1592 #define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO RTE_BIT64(12)
1593 #define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT RTE_BIT64(13)
1594 
1598 #define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE RTE_BIT64(14)
1599 
1600 #define RTE_ETH_TX_OFFLOAD_MULTI_SEGS RTE_BIT64(15)
1601 
1606 #define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE RTE_BIT64(16)
1607 #define RTE_ETH_TX_OFFLOAD_SECURITY RTE_BIT64(17)
1608 
1613 #define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO RTE_BIT64(18)
1614 
1619 #define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO RTE_BIT64(19)
1620 
1621 #define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(20)
1622 
1627 #define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP RTE_BIT64(21)
1628 /*
1629  * If new Tx offload capabilities are defined, they also must be
1630  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1631  */
1632 
1637 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP RTE_BIT64(0)
1638 
1639 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP RTE_BIT64(1)
1640 
1649 #define RTE_ETH_DEV_CAPA_RXQ_SHARE RTE_BIT64(2)
1650 
1651 #define RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP RTE_BIT64(3)
1652 
1653 #define RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP RTE_BIT64(4)
1654 
1656 /*
1657  * Fallback default preferred Rx/Tx port parameters.
1658  * These are used if an application requests default parameters
1659  * but the PMD does not provide preferred values.
1660  */
1661 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1662 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1663 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1664 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1665 
1672  uint16_t burst_size;
1673  uint16_t ring_size;
1674  uint16_t nb_queues;
1675 };
1676 
1681 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX)
1682 
1687  const char *name;
1688  uint16_t domain_id;
1696  uint16_t port_id;
1702  uint16_t rx_domain;
1703 };
1704 
1712  __extension__
1713  uint32_t multi_pools:1;
1714  uint32_t offset_allowed:1;
1715  uint32_t offset_align_log2:4;
1716  uint16_t max_nseg;
1717  uint16_t reserved;
1718 };
1719 
1732 };
1733 
1754 };
1755 
1762  struct rte_device *device;
1763  const char *driver_name;
1764  unsigned int if_index;
1766  uint16_t min_mtu;
1767  uint16_t max_mtu;
1768  const uint32_t *dev_flags;
1770  uint32_t min_rx_bufsize;
1777  uint32_t max_rx_bufsize;
1778  uint32_t max_rx_pktlen;
1781  uint16_t max_rx_queues;
1782  uint16_t max_tx_queues;
1783  uint32_t max_mac_addrs;
1786  uint16_t max_vfs;
1787  uint16_t max_vmdq_pools;
1788  struct rte_eth_rxseg_capa rx_seg_capa;
1798  uint16_t reta_size;
1799  uint8_t hash_key_size;
1800  uint32_t rss_algo_capa;
1803  struct rte_eth_rxconf default_rxconf;
1804  struct rte_eth_txconf default_txconf;
1805  uint16_t vmdq_queue_base;
1806  uint16_t vmdq_queue_num;
1807  uint16_t vmdq_pool_base;
1808  struct rte_eth_desc_lim rx_desc_lim;
1809  struct rte_eth_desc_lim tx_desc_lim;
1810  uint32_t speed_capa;
1812  uint16_t nb_rx_queues;
1813  uint16_t nb_tx_queues;
1822  struct rte_eth_dev_portconf default_rxportconf;
1824  struct rte_eth_dev_portconf default_txportconf;
1826  uint64_t dev_capa;
1831  struct rte_eth_switch_info switch_info;
1833  enum rte_eth_err_handle_mode err_handle_mode;
1834 
1835  uint64_t reserved_64s[2];
1836  void *reserved_ptrs[2];
1837 };
1838 
1840 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1841 #define RTE_ETH_QUEUE_STATE_STARTED 1
1842 #define RTE_ETH_QUEUE_STATE_HAIRPIN 2
1849 struct __rte_cache_min_aligned rte_eth_rxq_info {
1850  struct rte_mempool *mp;
1851  struct rte_eth_rxconf conf;
1852  uint8_t scattered_rx;
1853  uint8_t queue_state;
1854  uint16_t nb_desc;
1855  uint16_t rx_buf_size;
1862  uint8_t avail_thresh;
1863 };
1864 
1870  struct rte_eth_txconf conf;
1871  uint16_t nb_desc;
1872  uint8_t queue_state;
1873 };
1874 
1884  struct rte_mbuf **mbuf_ring;
1885  struct rte_mempool *mp;
1886  uint16_t *refill_head;
1887  uint16_t *receive_tail;
1888  uint16_t mbuf_ring_size;
1897 };
1898 
1899 /* Generic Burst mode flag definition, values can be ORed. */
1900 
1906 #define RTE_ETH_BURST_FLAG_PER_QUEUE RTE_BIT64(0)
1907 
1913  uint64_t flags;
1915 #define RTE_ETH_BURST_MODE_INFO_SIZE 1024
1916  char info[RTE_ETH_BURST_MODE_INFO_SIZE];
1917 };
1918 
1920 #define RTE_ETH_XSTATS_NAME_SIZE 64
1921 
1932  uint64_t id;
1933  uint64_t value;
1934 };
1935 
1952 };
1953 
1954 #define RTE_ETH_DCB_NUM_TCS 8
1955 #define RTE_ETH_MAX_VMDQ_POOL 64
1956 
1963  struct {
1964  uint16_t base;
1965  uint16_t nb_queue;
1966  } tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1968  struct {
1969  uint16_t base;
1970  uint16_t nb_queue;
1971  } tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1972 };
1973 
1979  uint8_t nb_tcs;
1981  uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS];
1984 };
1985 
1996 };
1997 
1998 /* Translate from FEC mode to FEC capa */
1999 #define RTE_ETH_FEC_MODE_TO_CAPA(x) RTE_BIT32(x)
2000 
2001 /* This macro indicates FEC capa mask */
2002 #define RTE_ETH_FEC_MODE_CAPA_MASK(x) RTE_BIT32(RTE_ETH_FEC_ ## x)
2003 
2004 /* A structure used to get capabilities per link speed */
2005 struct rte_eth_fec_capa {
2006  uint32_t speed;
2007  uint32_t capa;
2008 };
2009 
2010 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
2011 
2012 /* Macros to check for valid port */
2013 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
2014  if (!rte_eth_dev_is_valid_port(port_id)) { \
2015  RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id); \
2016  return retval; \
2017  } \
2018 } while (0)
2019 
2020 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
2021  if (!rte_eth_dev_is_valid_port(port_id)) { \
2022  RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id); \
2023  return; \
2024  } \
2025 } while (0)
2026 
2049 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
2050  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
2051  void *user_param);
2052 
2073 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
2074  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
2075 
2086 };
2087 
2088 struct rte_eth_dev_sriov {
2089  uint8_t active;
2090  uint8_t nb_q_per_pool;
2091  uint16_t def_vmdq_idx;
2092  uint16_t def_pool_q_idx;
2093 };
2094 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
2095 
2096 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
2097 
2098 #define RTE_ETH_DEV_NO_OWNER 0
2099 
2100 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
2101 
2102 struct rte_eth_dev_owner {
2103  uint64_t id;
2104  char name[RTE_ETH_MAX_OWNER_NAME_LEN];
2105 };
2106 
2112 #define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE RTE_BIT32(0)
2113 
2114 #define RTE_ETH_DEV_INTR_LSC RTE_BIT32(1)
2115 
2116 #define RTE_ETH_DEV_BONDING_MEMBER RTE_BIT32(2)
2117 
2118 #define RTE_ETH_DEV_INTR_RMV RTE_BIT32(3)
2119 
2120 #define RTE_ETH_DEV_REPRESENTOR RTE_BIT32(4)
2121 
2122 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR RTE_BIT32(5)
2123 
2127 #define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS RTE_BIT32(6)
2128 
2141 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
2142  const uint64_t owner_id);
2143 
2147 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
2148  for (p = rte_eth_find_next_owned_by(0, o); \
2149  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
2150  p = rte_eth_find_next_owned_by(p + 1, o))
2151 
2160 uint16_t rte_eth_find_next(uint16_t port_id);
2161 
2165 #define RTE_ETH_FOREACH_DEV(p) \
2166  RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
2167 
2179 uint16_t
2180 rte_eth_find_next_of(uint16_t port_id_start,
2181  const struct rte_device *parent);
2182 
2191 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
2192  for (port_id = rte_eth_find_next_of(0, parent); \
2193  port_id < RTE_MAX_ETHPORTS; \
2194  port_id = rte_eth_find_next_of(port_id + 1, parent))
2195 
2207 uint16_t
2208 rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id);
2209 
2220 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
2221  for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
2222  port_id < RTE_MAX_ETHPORTS; \
2223  port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
2224 
2235 int rte_eth_dev_owner_new(uint64_t *owner_id);
2236 
2247 int rte_eth_dev_owner_set(const uint16_t port_id,
2248  const struct rte_eth_dev_owner *owner);
2249 
2260 int rte_eth_dev_owner_unset(const uint16_t port_id,
2261  const uint64_t owner_id);
2262 
2271 int rte_eth_dev_owner_delete(const uint64_t owner_id);
2272 
2283 int rte_eth_dev_owner_get(const uint16_t port_id,
2284  struct rte_eth_dev_owner *owner);
2285 
2296 uint16_t rte_eth_dev_count_avail(void);
2297 
2306 uint16_t rte_eth_dev_count_total(void);
2307 
2319 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
2320 
2329 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
2330 
2339 const char *rte_eth_dev_tx_offload_name(uint64_t offload);
2340 
2352 __rte_experimental
2353 const char *rte_eth_dev_capability_name(uint64_t capability);
2354 
2394 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
2395  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
2396 
2405 int
2406 rte_eth_dev_is_removed(uint16_t port_id);
2407 
2470 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2471  uint16_t nb_rx_desc, unsigned int socket_id,
2472  const struct rte_eth_rxconf *rx_conf,
2473  struct rte_mempool *mb_pool);
2474 
2502 __rte_experimental
2504  (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2505  const struct rte_eth_hairpin_conf *conf);
2506 
2555 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2556  uint16_t nb_tx_desc, unsigned int socket_id,
2557  const struct rte_eth_txconf *tx_conf);
2558 
2584 __rte_experimental
2586  (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2587  const struct rte_eth_hairpin_conf *conf);
2588 
2615 __rte_experimental
2616 int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2617  size_t len, uint32_t direction);
2618 
2641 __rte_experimental
2642 int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port);
2643 
2668 __rte_experimental
2669 int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port);
2670 
2686 __rte_experimental
2687 int rte_eth_dev_count_aggr_ports(uint16_t port_id);
2688 
2716 __rte_experimental
2717 int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id,
2718  uint8_t affinity);
2719 
2732 int rte_eth_dev_socket_id(uint16_t port_id);
2733 
2743 int rte_eth_dev_is_valid_port(uint16_t port_id);
2744 
2761 __rte_experimental
2762 int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2763 
2780 __rte_experimental
2781 int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2782 
2800 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2801 
2818 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2819 
2837 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2838 
2855 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2856 
2880 int rte_eth_dev_start(uint16_t port_id);
2881 
2895 int rte_eth_dev_stop(uint16_t port_id);
2896 
2909 int rte_eth_dev_set_link_up(uint16_t port_id);
2910 
2920 int rte_eth_dev_set_link_down(uint16_t port_id);
2921 
2932 int rte_eth_dev_close(uint16_t port_id);
2933 
2971 int rte_eth_dev_reset(uint16_t port_id);
2972 
2984 int rte_eth_promiscuous_enable(uint16_t port_id);
2985 
2997 int rte_eth_promiscuous_disable(uint16_t port_id);
2998 
3009 int rte_eth_promiscuous_get(uint16_t port_id);
3010 
3022 int rte_eth_allmulticast_enable(uint16_t port_id);
3023 
3035 int rte_eth_allmulticast_disable(uint16_t port_id);
3036 
3047 int rte_eth_allmulticast_get(uint16_t port_id);
3048 
3066 int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
3067 
3082 int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
3083 
3097 __rte_experimental
3098 const char *rte_eth_link_speed_to_str(uint32_t link_speed);
3099 
3118 __rte_experimental
3119 int rte_eth_link_to_str(char *str, size_t len,
3120  const struct rte_eth_link *eth_link);
3121 
3142 __rte_experimental
3143 int rte_eth_speed_lanes_get(uint16_t port_id, uint32_t *lanes);
3144 
3166 __rte_experimental
3167 int rte_eth_speed_lanes_set(uint16_t port_id, uint32_t speed_lanes);
3168 
3191 __rte_experimental
3192 int rte_eth_speed_lanes_get_capability(uint16_t port_id,
3193  struct rte_eth_speed_lanes_capa *speed_lanes_capa,
3194  unsigned int num);
3195 
3213 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
3214 
3226 int rte_eth_stats_reset(uint16_t port_id);
3227 
3257 int rte_eth_xstats_get_names(uint16_t port_id,
3258  struct rte_eth_xstat_name *xstats_names,
3259  unsigned int size);
3260 
3294 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3295  unsigned int n);
3296 
3321 int
3322 rte_eth_xstats_get_names_by_id(uint16_t port_id,
3323  struct rte_eth_xstat_name *xstats_names, unsigned int size,
3324  uint64_t *ids);
3325 
3350 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3351  uint64_t *values, unsigned int size);
3352 
3372 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
3373  uint64_t *id);
3374 
3387 int rte_eth_xstats_reset(uint16_t port_id);
3388 
3407 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
3408  uint16_t tx_queue_id, uint8_t stat_idx);
3409 
3428 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
3429  uint16_t rx_queue_id,
3430  uint8_t stat_idx);
3431 
3445 int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
3446 
3467 __rte_experimental
3468 int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3469  unsigned int num);
3470 
3490 int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
3491 
3507 __rte_experimental
3508 int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf);
3509 
3530 int rte_eth_dev_fw_version_get(uint16_t port_id,
3531  char *fw_version, size_t fw_size);
3532 
3572 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3573  uint32_t *ptypes, int num);
3604 int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3605  uint32_t *set_ptypes, unsigned int num);
3606 
3619 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
3620 
3638 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
3639 
3659 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
3660 
3679 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3680  int on);
3681 
3698 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3699  enum rte_vlan_type vlan_type,
3700  uint16_t tag_type);
3701 
3719 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
3720 
3734 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
3735 
3750 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
3751 
3777 __rte_experimental
3778 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
3779  uint8_t avail_thresh);
3780 
3807 __rte_experimental
3808 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
3809  uint8_t *avail_thresh);
3810 
3811 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
3812  void *userdata);
3813 
3819  buffer_tx_error_fn error_callback;
3820  void *error_userdata;
3821  uint16_t size;
3822  uint16_t length;
3824  struct rte_mbuf *pkts[];
3825 };
3826 
3833 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
3834  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3835 
3846 int
3847 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
3848 
3873 int
3875  buffer_tx_error_fn callback, void *userdata);
3876 
3899 void
3900 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3901  void *userdata);
3902 
3926 void
3927 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3928  void *userdata);
3929 
3955 int
3956 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
3957 
3990 };
3991 
4011 };
4012 
4031  uint64_t metadata;
4032 };
4033 
4071 };
4072 
4097  uint64_t metadata;
4098 };
4099 
4176 };
4177 
4191 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
4192  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
4193 
4211 int rte_eth_dev_callback_register(uint16_t port_id,
4212  enum rte_eth_event_type event,
4213  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4214 
4233 int rte_eth_dev_callback_unregister(uint16_t port_id,
4234  enum rte_eth_event_type event,
4235  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4236 
4258 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
4259 
4280 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
4281 
4299 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
4300 
4322 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4323  int epfd, int op, void *data);
4324 
4339 int
4340 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
4341 
4355 int rte_eth_led_on(uint16_t port_id);
4356 
4370 int rte_eth_led_off(uint16_t port_id);
4371 
4400 __rte_experimental
4401 int rte_eth_fec_get_capability(uint16_t port_id,
4402  struct rte_eth_fec_capa *speed_fec_capa,
4403  unsigned int num);
4404 
4425 __rte_experimental
4426 int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa);
4427 
4451 __rte_experimental
4452 int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa);
4453 
4468 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
4469  struct rte_eth_fc_conf *fc_conf);
4470 
4485 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
4486  struct rte_eth_fc_conf *fc_conf);
4487 
4503 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
4504  struct rte_eth_pfc_conf *pfc_conf);
4505 
4524 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
4525  uint32_t pool);
4526 
4544 __rte_experimental
4546  struct rte_eth_pfc_queue_info *pfc_queue_info);
4547 
4571 __rte_experimental
4573  struct rte_eth_pfc_queue_conf *pfc_queue_conf);
4574 
4589 int rte_eth_dev_mac_addr_remove(uint16_t port_id,
4590  struct rte_ether_addr *mac_addr);
4591 
4609 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
4610  struct rte_ether_addr *mac_addr);
4611 
4629 int rte_eth_dev_rss_reta_update(uint16_t port_id,
4630  struct rte_eth_rss_reta_entry64 *reta_conf,
4631  uint16_t reta_size);
4632 
4651 int rte_eth_dev_rss_reta_query(uint16_t port_id,
4652  struct rte_eth_rss_reta_entry64 *reta_conf,
4653  uint16_t reta_size);
4654 
4674 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4675  uint8_t on);
4676 
4695 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
4696 
4713 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4714  uint32_t tx_rate);
4715 
4730 int rte_eth_dev_rss_hash_update(uint16_t port_id,
4731  struct rte_eth_rss_conf *rss_conf);
4732 
4748 int
4749 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4750  struct rte_eth_rss_conf *rss_conf);
4751 
4764 __rte_experimental
4765 const char *
4767 
4784 __rte_experimental
4785 int
4786 rte_eth_find_rss_algo(const char *name, uint32_t *algo);
4787 
4812 int
4813 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4814  struct rte_eth_udp_tunnel *tunnel_udp);
4815 
4835 int
4836 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4837  struct rte_eth_udp_tunnel *tunnel_udp);
4838 
4853 int rte_eth_dev_get_dcb_info(uint16_t port_id,
4854  struct rte_eth_dcb_info *dcb_info);
4855 
4856 struct rte_eth_rxtx_callback;
4857 
4883 const struct rte_eth_rxtx_callback *
4884 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4885  rte_rx_callback_fn fn, void *user_param);
4886 
4913 const struct rte_eth_rxtx_callback *
4914 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4915  rte_rx_callback_fn fn, void *user_param);
4916 
4942 const struct rte_eth_rxtx_callback *
4943 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4944  rte_tx_callback_fn fn, void *user_param);
4945 
4979 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4980  const struct rte_eth_rxtx_callback *user_cb);
4981 
5015 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
5016  const struct rte_eth_rxtx_callback *user_cb);
5017 
5037 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5038  struct rte_eth_rxq_info *qinfo);
5039 
5059 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5060  struct rte_eth_txq_info *qinfo);
5061 
5082 __rte_experimental
5083 int rte_eth_recycle_rx_queue_info_get(uint16_t port_id,
5084  uint16_t queue_id,
5085  struct rte_eth_recycle_rxq_info *recycle_rxq_info);
5086 
5105 int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5106  struct rte_eth_burst_mode *mode);
5107 
5126 int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5127  struct rte_eth_burst_mode *mode);
5128 
5149 __rte_experimental
5150 int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5151  struct rte_power_monitor_cond *pmc);
5152 
5179 __rte_experimental
5180 int rte_eth_dev_get_reg_info_ext(uint16_t port_id, struct rte_dev_reg_info *info);
5181 
5200 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
5201 
5214 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
5215 
5232 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5233 
5250 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5251 
5270 __rte_experimental
5271 int
5272 rte_eth_dev_get_module_info(uint16_t port_id,
5273  struct rte_eth_dev_module_info *modinfo);
5274 
5294 __rte_experimental
5295 int
5296 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5297  struct rte_dev_eeprom_info *info);
5298 
5318 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5319  struct rte_ether_addr *mc_addr_set,
5320  uint32_t nb_mc_addr);
5321 
5334 int rte_eth_timesync_enable(uint16_t port_id);
5335 
5348 int rte_eth_timesync_disable(uint16_t port_id);
5349 
5368 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
5369  struct timespec *timestamp, uint32_t flags);
5370 
5386 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5387  struct timespec *timestamp);
5388 
5406 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
5407 
5448 __rte_experimental
5449 int rte_eth_timesync_adjust_freq(uint16_t port_id, int64_t ppm);
5450 
5466 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
5467 
5486 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
5487 
5533 __rte_experimental
5534 int
5535 rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
5536 
5552 int
5553 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
5554 
5571 int
5572 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
5573 
5590 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5591  uint16_t *nb_rx_desc,
5592  uint16_t *nb_tx_desc);
5593 
5608 int
5609 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
5610 
5620 void *
5621 rte_eth_dev_get_sec_ctx(uint16_t port_id);
5622 
5638 __rte_experimental
5639 int rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5640  struct rte_eth_hairpin_cap *cap);
5641 
5651  int pf;
5652  __extension__
5653  union {
5654  int vf;
5655  int sf;
5656  };
5657  uint32_t id_base;
5658  uint32_t id_end;
5659  char name[RTE_DEV_NAME_MAX_LEN];
5660 };
5661 
5669  uint16_t controller;
5670  uint16_t pf;
5671  uint32_t nb_ranges_alloc;
5672  uint32_t nb_ranges;
5673  struct rte_eth_representor_range ranges[];
5674 };
5675 
5699 __rte_experimental
5700 int rte_eth_representor_info_get(uint16_t port_id,
5701  struct rte_eth_representor_info *info);
5702 
5704 #define RTE_ETH_RX_METADATA_USER_FLAG RTE_BIT64(0)
5705 
5707 #define RTE_ETH_RX_METADATA_USER_MARK RTE_BIT64(1)
5708 
5710 #define RTE_ETH_RX_METADATA_TUNNEL_ID RTE_BIT64(2)
5711 
5751 int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features);
5752 
5754 #define RTE_ETH_DEV_REASSEMBLY_F_IPV4 (RTE_BIT32(0))
5755 
5756 #define RTE_ETH_DEV_REASSEMBLY_F_IPV6 (RTE_BIT32(1))
5757 
5768  uint32_t timeout_ms;
5770  uint16_t max_frags;
5775  uint16_t flags;
5776 };
5777 
5798 __rte_experimental
5799 int rte_eth_ip_reassembly_capability_get(uint16_t port_id,
5800  struct rte_eth_ip_reassembly_params *capa);
5801 
5823 __rte_experimental
5824 int rte_eth_ip_reassembly_conf_get(uint16_t port_id,
5825  struct rte_eth_ip_reassembly_params *conf);
5826 
5856 __rte_experimental
5857 int rte_eth_ip_reassembly_conf_set(uint16_t port_id,
5858  const struct rte_eth_ip_reassembly_params *conf);
5859 
5867 typedef struct {
5874  uint16_t time_spent;
5876  uint16_t nb_frags;
5878 
5897 __rte_experimental
5898 int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file);
5899 
5923 __rte_experimental
5924 int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5925  uint16_t offset, uint16_t num, FILE *file);
5926 
5950 __rte_experimental
5951 int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5952  uint16_t offset, uint16_t num, FILE *file);
5953 
5954 
5955 /* Congestion management */
5956 
5966 };
5967 
5984  uint64_t objs_supported;
5989  uint8_t rsvd[8];
5990 };
5991 
6002  enum rte_cman_mode mode;
6003  union {
6010  uint16_t rx_queue;
6017  uint8_t rsvd_obj_params[4];
6018  } obj_param;
6019  union {
6032  uint8_t rsvd_mode_params[4];
6033  } mode_param;
6034 };
6035 
6053 __rte_experimental
6054 int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info);
6055 
6073 __rte_experimental
6074 int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config);
6075 
6092 __rte_experimental
6093 int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config);
6094 
6115 __rte_experimental
6116 int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config);
6117 
6118 #include <rte_ethdev_core.h>
6119 
6120 #ifdef __cplusplus
6121 extern "C" {
6122 #endif
6123 
6147 uint16_t rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
6148  struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
6149  void *opaque);
6150 
6238 static inline uint16_t
6239 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
6240  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
6241 {
6242  uint16_t nb_rx;
6243  struct rte_eth_fp_ops *p;
6244  void *qd;
6245 
6246 #ifdef RTE_ETHDEV_DEBUG_RX
6247  if (port_id >= RTE_MAX_ETHPORTS ||
6248  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6249  RTE_ETHDEV_LOG_LINE(ERR,
6250  "Invalid port_id=%u or queue_id=%u",
6251  port_id, queue_id);
6252  return 0;
6253  }
6254 #endif
6255 
6256  /* fetch pointer to queue data */
6257  p = &rte_eth_fp_ops[port_id];
6258  qd = p->rxq.data[queue_id];
6259 
6260 #ifdef RTE_ETHDEV_DEBUG_RX
6261  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6262 
6263  if (qd == NULL) {
6264  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u for port_id=%u",
6265  queue_id, port_id);
6266  return 0;
6267  }
6268 #endif
6269 
6270  nb_rx = p->rx_pkt_burst(qd, rx_pkts, nb_pkts);
6271 
6272 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
6273  {
6274  void *cb;
6275 
6276  /* rte_memory_order_release memory order was used when the
6277  * call back was inserted into the list.
6278  * Since there is a clear dependency between loading
6279  * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6280  * not required.
6281  */
6282  cb = rte_atomic_load_explicit(&p->rxq.clbk[queue_id],
6283  rte_memory_order_relaxed);
6284  if (unlikely(cb != NULL))
6285  nb_rx = rte_eth_call_rx_callbacks(port_id, queue_id,
6286  rx_pkts, nb_rx, nb_pkts, cb);
6287  }
6288 #endif
6289 
6290  if (unlikely(nb_rx))
6291  rte_ethdev_trace_rx_burst_nonempty(port_id, queue_id, (void **)rx_pkts, nb_rx);
6292  else
6293  rte_ethdev_trace_rx_burst_empty(port_id, queue_id, (void **)rx_pkts);
6294  return nb_rx;
6295 }
6296 
6314 static inline int
6315 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
6316 {
6317  struct rte_eth_fp_ops *p;
6318  void *qd;
6319 
6320 #ifdef RTE_ETHDEV_DEBUG_RX
6321  if (port_id >= RTE_MAX_ETHPORTS ||
6322  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6323  RTE_ETHDEV_LOG_LINE(ERR,
6324  "Invalid port_id=%u or queue_id=%u",
6325  port_id, queue_id);
6326  return -EINVAL;
6327  }
6328 #endif
6329 
6330  /* fetch pointer to queue data */
6331  p = &rte_eth_fp_ops[port_id];
6332  qd = p->rxq.data[queue_id];
6333 
6334 #ifdef RTE_ETHDEV_DEBUG_RX
6335  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6336  if (qd == NULL)
6337  return -EINVAL;
6338 #endif
6339 
6340  if (*p->rx_queue_count == NULL)
6341  return -ENOTSUP;
6342  return (int)(*p->rx_queue_count)(qd);
6343 }
6344 
6348 #define RTE_ETH_RX_DESC_AVAIL 0
6349 #define RTE_ETH_RX_DESC_DONE 1
6350 #define RTE_ETH_RX_DESC_UNAVAIL 2
6386 static inline int
6387 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
6388  uint16_t offset)
6389 {
6390  struct rte_eth_fp_ops *p;
6391  void *qd;
6392 
6393 #ifdef RTE_ETHDEV_DEBUG_RX
6394  if (port_id >= RTE_MAX_ETHPORTS ||
6395  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6396  RTE_ETHDEV_LOG_LINE(ERR,
6397  "Invalid port_id=%u or queue_id=%u",
6398  port_id, queue_id);
6399  return -EINVAL;
6400  }
6401 #endif
6402 
6403  /* fetch pointer to queue data */
6404  p = &rte_eth_fp_ops[port_id];
6405  qd = p->rxq.data[queue_id];
6406 
6407 #ifdef RTE_ETHDEV_DEBUG_RX
6408  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6409  if (qd == NULL)
6410  return -ENODEV;
6411 #endif
6412  if (*p->rx_descriptor_status == NULL)
6413  return -ENOTSUP;
6414  return (*p->rx_descriptor_status)(qd, offset);
6415 }
6416 
6420 #define RTE_ETH_TX_DESC_FULL 0
6421 #define RTE_ETH_TX_DESC_DONE 1
6422 #define RTE_ETH_TX_DESC_UNAVAIL 2
6458 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
6459  uint16_t queue_id, uint16_t offset)
6460 {
6461  struct rte_eth_fp_ops *p;
6462  void *qd;
6463 
6464 #ifdef RTE_ETHDEV_DEBUG_TX
6465  if (port_id >= RTE_MAX_ETHPORTS ||
6466  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6467  RTE_ETHDEV_LOG_LINE(ERR,
6468  "Invalid port_id=%u or queue_id=%u",
6469  port_id, queue_id);
6470  return -EINVAL;
6471  }
6472 #endif
6473 
6474  /* fetch pointer to queue data */
6475  p = &rte_eth_fp_ops[port_id];
6476  qd = p->txq.data[queue_id];
6477 
6478 #ifdef RTE_ETHDEV_DEBUG_TX
6479  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6480  if (qd == NULL)
6481  return -ENODEV;
6482 #endif
6483  if (*p->tx_descriptor_status == NULL)
6484  return -ENOTSUP;
6485  return (*p->tx_descriptor_status)(qd, offset);
6486 }
6487 
6507 uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
6508  struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque);
6509 
6581 static inline uint16_t
6582 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
6583  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6584 {
6585  struct rte_eth_fp_ops *p;
6586  void *qd;
6587 
6588 #ifdef RTE_ETHDEV_DEBUG_TX
6589  if (port_id >= RTE_MAX_ETHPORTS ||
6590  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6591  RTE_ETHDEV_LOG_LINE(ERR,
6592  "Invalid port_id=%u or queue_id=%u",
6593  port_id, queue_id);
6594  return 0;
6595  }
6596 #endif
6597 
6598  /* fetch pointer to queue data */
6599  p = &rte_eth_fp_ops[port_id];
6600  qd = p->txq.data[queue_id];
6601 
6602 #ifdef RTE_ETHDEV_DEBUG_TX
6603  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6604 
6605  if (qd == NULL) {
6606  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6607  queue_id, port_id);
6608  return 0;
6609  }
6610 #endif
6611 
6612 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
6613  {
6614  void *cb;
6615 
6616  /* rte_memory_order_release memory order was used when the
6617  * call back was inserted into the list.
6618  * Since there is a clear dependency between loading
6619  * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6620  * not required.
6621  */
6622  cb = rte_atomic_load_explicit(&p->txq.clbk[queue_id],
6623  rte_memory_order_relaxed);
6624  if (unlikely(cb != NULL))
6625  nb_pkts = rte_eth_call_tx_callbacks(port_id, queue_id,
6626  tx_pkts, nb_pkts, cb);
6627  }
6628 #endif
6629 
6630  nb_pkts = p->tx_pkt_burst(qd, tx_pkts, nb_pkts);
6631 
6632  rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, nb_pkts);
6633  return nb_pkts;
6634 }
6635 
6689 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
6690 
6691 static inline uint16_t
6692 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
6693  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6694 {
6695  struct rte_eth_fp_ops *p;
6696  void *qd;
6697 
6698 #ifdef RTE_ETHDEV_DEBUG_TX
6699  if (port_id >= RTE_MAX_ETHPORTS ||
6700  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6701  RTE_ETHDEV_LOG_LINE(ERR,
6702  "Invalid port_id=%u or queue_id=%u",
6703  port_id, queue_id);
6704  rte_errno = ENODEV;
6705  return 0;
6706  }
6707 #endif
6708 
6709  /* fetch pointer to queue data */
6710  p = &rte_eth_fp_ops[port_id];
6711  qd = p->txq.data[queue_id];
6712 
6713 #ifdef RTE_ETHDEV_DEBUG_TX
6714  if (!rte_eth_dev_is_valid_port(port_id)) {
6715  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx port_id=%u", port_id);
6716  rte_errno = ENODEV;
6717  return 0;
6718  }
6719  if (qd == NULL) {
6720  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6721  queue_id, port_id);
6722  rte_errno = EINVAL;
6723  return 0;
6724  }
6725 #endif
6726 
6727  if (!p->tx_pkt_prepare)
6728  return nb_pkts;
6729 
6730  return p->tx_pkt_prepare(qd, tx_pkts, nb_pkts);
6731 }
6732 
6733 #else
6734 
6735 /*
6736  * Native NOOP operation for compilation targets which doesn't require any
6737  * preparations steps, and functional NOOP may introduce unnecessary performance
6738  * drop.
6739  *
6740  * Generally this is not a good idea to turn it on globally and didn't should
6741  * be used if behavior of tx_preparation can change.
6742  */
6743 
6744 static inline uint16_t
6745 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
6746  __rte_unused uint16_t queue_id,
6747  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6748 {
6749  return nb_pkts;
6750 }
6751 
6752 #endif
6753 
6776 static inline uint16_t
6777 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
6778  struct rte_eth_dev_tx_buffer *buffer)
6779 {
6780  uint16_t sent;
6781  uint16_t to_send = buffer->length;
6782 
6783  if (to_send == 0)
6784  return 0;
6785 
6786  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
6787 
6788  buffer->length = 0;
6789 
6790  /* All packets sent, or to be dealt with by callback below */
6791  if (unlikely(sent != to_send))
6792  buffer->error_callback(&buffer->pkts[sent],
6793  (uint16_t)(to_send - sent),
6794  buffer->error_userdata);
6795 
6796  return sent;
6797 }
6798 
6829 static __rte_always_inline uint16_t
6830 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
6831  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
6832 {
6833  buffer->pkts[buffer->length++] = tx_pkt;
6834  if (buffer->length < buffer->size)
6835  return 0;
6836 
6837  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
6838 }
6839 
6893 __rte_experimental
6894 static inline uint16_t
6895 rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id,
6896  uint16_t tx_port_id, uint16_t tx_queue_id,
6897  struct rte_eth_recycle_rxq_info *recycle_rxq_info)
6898 {
6899  struct rte_eth_fp_ops *p1, *p2;
6900  void *qd1, *qd2;
6901  uint16_t nb_mbufs;
6902 
6903 #ifdef RTE_ETHDEV_DEBUG_TX
6904  if (tx_port_id >= RTE_MAX_ETHPORTS ||
6905  tx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6906  RTE_ETHDEV_LOG_LINE(ERR,
6907  "Invalid tx_port_id=%u or tx_queue_id=%u",
6908  tx_port_id, tx_queue_id);
6909  return 0;
6910  }
6911 #endif
6912 
6913  /* fetch pointer to Tx queue data */
6914  p1 = &rte_eth_fp_ops[tx_port_id];
6915  qd1 = p1->txq.data[tx_queue_id];
6916 
6917 #ifdef RTE_ETHDEV_DEBUG_TX
6918  RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port_id, 0);
6919 
6920  if (qd1 == NULL) {
6921  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6922  tx_queue_id, tx_port_id);
6923  return 0;
6924  }
6925 #endif
6926  if (p1->recycle_tx_mbufs_reuse == NULL)
6927  return 0;
6928 
6929 #ifdef RTE_ETHDEV_DEBUG_RX
6930  if (rx_port_id >= RTE_MAX_ETHPORTS ||
6931  rx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6932  RTE_ETHDEV_LOG_LINE(ERR, "Invalid rx_port_id=%u or rx_queue_id=%u",
6933  rx_port_id, rx_queue_id);
6934  return 0;
6935  }
6936 #endif
6937 
6938  /* fetch pointer to Rx queue data */
6939  p2 = &rte_eth_fp_ops[rx_port_id];
6940  qd2 = p2->rxq.data[rx_queue_id];
6941 
6942 #ifdef RTE_ETHDEV_DEBUG_RX
6943  RTE_ETH_VALID_PORTID_OR_ERR_RET(rx_port_id, 0);
6944 
6945  if (qd2 == NULL) {
6946  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u for port_id=%u",
6947  rx_queue_id, rx_port_id);
6948  return 0;
6949  }
6950 #endif
6951  if (p2->recycle_rx_descriptors_refill == NULL)
6952  return 0;
6953 
6954  /* Copy used *rte_mbuf* buffer pointers from Tx mbuf ring
6955  * into Rx mbuf ring.
6956  */
6957  nb_mbufs = p1->recycle_tx_mbufs_reuse(qd1, recycle_rxq_info);
6958 
6959  /* If no recycling mbufs, return 0. */
6960  if (nb_mbufs == 0)
6961  return 0;
6962 
6963  /* Replenish the Rx descriptors with the recycling
6964  * into Rx mbuf ring.
6965  */
6966  p2->recycle_rx_descriptors_refill(qd2, nb_mbufs);
6967 
6968  return nb_mbufs;
6969 }
6970 
6999 __rte_experimental
7000 int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num);
7001 
7036 __rte_experimental
7037 static inline int
7038 rte_eth_tx_queue_count(uint16_t port_id, uint16_t queue_id)
7039 {
7040  struct rte_eth_fp_ops *fops;
7041  void *qd;
7042  int rc;
7043 
7044 #ifdef RTE_ETHDEV_DEBUG_TX
7045  if (port_id >= RTE_MAX_ETHPORTS || !rte_eth_dev_is_valid_port(port_id)) {
7046  RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id);
7047  rc = -ENODEV;
7048  goto out;
7049  }
7050 
7051  if (queue_id >= RTE_MAX_QUEUES_PER_PORT) {
7052  RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for port_id=%u",
7053  queue_id, port_id);
7054  rc = -EINVAL;
7055  goto out;
7056  }
7057 #endif
7058 
7059  /* Fetch pointer to Tx queue data */
7060  fops = &rte_eth_fp_ops[port_id];
7061  qd = fops->txq.data[queue_id];
7062 
7063 #ifdef RTE_ETHDEV_DEBUG_TX
7064  if (qd == NULL) {
7065  RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for port_id=%u",
7066  queue_id, port_id);
7067  rc = -EINVAL;
7068  goto out;
7069  }
7070 #endif
7071  if (fops->tx_queue_count == NULL) {
7072  rc = -ENOTSUP;
7073  goto out;
7074  }
7075 
7076  rc = fops->tx_queue_count(qd);
7077 
7078 out:
7079  rte_eth_trace_tx_queue_count(port_id, queue_id, rc);
7080  return rc;
7081 }
7082 
7083 #ifdef __cplusplus
7084 }
7085 #endif
7086 
7087 #endif /* _RTE_ETHDEV_H_ */
uint16_t nb_rx_queues
Definition: rte_ethdev.h:1812
int rte_eth_dev_stop(uint16_t port_id)
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_promiscuous_disable(uint16_t port_id)
__extension__ uint32_t multi_pools
Definition: rte_ethdev.h:1713
int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
__rte_experimental int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
#define __rte_always_inline
Definition: rte_common.h:404
#define RTE_ETH_DCB_NUM_USER_PRIORITIES
Definition: rte_ethdev.h:848
uint16_t tx_rs_thresh
Definition: rte_ethdev.h:1181
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
uint32_t mtu
Definition: rte_ethdev.h:426
uint16_t nb_desc
Definition: rte_ethdev.h:1871
rte_eth_event_macsec_type
Definition: rte_ethdev.h:3996
uint64_t rx_offload_capa
Definition: rte_ethdev.h:1790
const uint32_t * dev_flags
Definition: rte_ethdev.h:1768
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
__rte_experimental int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6692
struct rte_device * device
Definition: rte_ethdev.h:1762
rte_eth_nb_tcs
Definition: rte_ethdev.h:909
__rte_experimental int rte_eth_speed_lanes_get(uint16_t port_id, uint32_t *lanes)
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:281
static __rte_experimental int rte_eth_tx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:7038
uint16_t rte_eth_find_next(uint16_t port_id)
__rte_experimental int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
rte_cman_mode
Definition: rte_cman.h:16
int rte_eth_led_off(uint16_t port_id)
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:6387
uint64_t imissed
Definition: rte_ethdev.h:267
uint32_t low_water
Definition: rte_ethdev.h:1375
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
uint8_t rss_key_len
Definition: rte_ethdev.h:504
uint32_t max_rx_bufsize
Definition: rte_ethdev.h:1777
__rte_experimental int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id, uint8_t affinity)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
static __rte_experimental uint16_t rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id, uint16_t tx_port_id, uint16_t tx_queue_id, struct rte_eth_recycle_rxq_info *recycle_rxq_info)
Definition: rte_ethdev.h:6895
uint8_t hthresh
Definition: rte_ethdev.h:371
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
__rte_experimental int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
uint64_t rx_queue_offload_capa
Definition: rte_ethdev.h:1794
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
__rte_experimental int rte_eth_dev_count_aggr_ports(uint16_t port_id)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
uint16_t reta_size
Definition: rte_ethdev.h:1798
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint32_t lpbk_mode
Definition: rte_ethdev.h:1509
#define rte_errno
Definition: rte_errno.h:29
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
uint32_t link_speeds
Definition: rte_ethdev.h:1500
uint64_t tx_queue_offload_capa
Definition: rte_ethdev.h:1796
rte_eth_tx_mq_mode
Definition: rte_ethdev.h:413
rte_eth_fc_mode
Definition: rte_ethdev.h:1361
uint8_t enable_default_pool
Definition: rte_ethdev.h:961
uint32_t max_hash_mac_addrs
Definition: rte_ethdev.h:1785
uint16_t nb_mtu_seg_max
Definition: rte_ethdev.h:1355
struct rte_mempool * mp
Definition: rte_ethdev.h:1885
int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
#define __rte_unused
Definition: rte_common.h:171
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:279
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
uint64_t opackets
Definition: rte_ethdev.h:260
rte_eth_cman_obj
Definition: rte_ethdev.h:5958
uint8_t hash_key_size
Definition: rte_ethdev.h:1799
int rte_eth_dev_close(uint16_t port_id)
struct rte_mempool * mp
Definition: rte_ethdev.h:1089
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
__rte_experimental int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, uint8_t avail_thresh)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
__rte_experimental const char * rte_eth_dev_rss_algo_name(enum rte_eth_hash_function rss_algo)
int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
struct rte_mempool * mp
Definition: rte_ethdev.h:1850
uint32_t dcb_capability_en
Definition: rte_ethdev.h:1533
__rte_experimental int rte_eth_timesync_adjust_freq(uint16_t port_id, int64_t ppm)
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:277
__rte_experimental int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
const char * name
Definition: rte_ethdev.h:1687
uint8_t queue_state
Definition: rte_ethdev.h:1872
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
int rte_eth_dev_set_link_up(uint16_t port_id)
#define RTE_BIT32(nr)
Definition: rte_bitops.h:44
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
__rte_experimental int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
__rte_experimental int rte_eth_speed_lanes_get_capability(uint16_t port_id, struct rte_eth_speed_lanes_capa *speed_lanes_capa, unsigned int num)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
uint16_t vmdq_queue_num
Definition: rte_ethdev.h:1806
int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
uint16_t share_qid
Definition: rte_ethdev.h:1134
uint8_t rx_deferred_start
Definition: rte_ethdev.h:1125
__rte_experimental int rte_eth_dev_get_reg_info_ext(uint16_t port_id, struct rte_dev_reg_info *info)
struct rte_mbuf * pkts[]
Definition: rte_ethdev.h:3824
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
Definition: rte_ethdev.h:4191
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:273
uint32_t high_water
Definition: rte_ethdev.h:1374
#define RTE_ETH_MQ_RX_RSS_FLAG
Definition: rte_ethdev.h:378
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
#define RTE_ETH_XSTATS_NAME_SIZE
Definition: rte_ethdev.h:1920
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
__rte_experimental int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, unsigned int num)
int rte_eth_timesync_disable(uint16_t port_id)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint64_t offloads
Definition: rte_ethdev.h:1140
__rte_experimental int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, uint8_t *avail_thresh)
__rte_experimental int rte_eth_representor_info_get(uint16_t port_id, struct rte_eth_representor_info *info)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, struct rte_eth_pfc_queue_conf *pfc_queue_conf)
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
__rte_experimental int rte_eth_speed_lanes_set(uint16_t port_id, uint32_t speed_lanes)
__rte_experimental int rte_eth_fec_get_capability(uint16_t port_id, struct rte_eth_fec_capa *speed_fec_capa, unsigned int num)
uint16_t send_xon
Definition: rte_ethdev.h:1377
struct rte_mempool ** rx_mempools
Definition: rte_ethdev.h:1169
int rte_eth_stats_reset(uint16_t port_id)
int rte_eth_allmulticast_enable(uint16_t port_id)
uint32_t offset_allowed
Definition: rte_ethdev.h:1714
__rte_experimental int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, struct rte_power_monitor_cond *pmc)
#define unlikely(x)
uint64_t ibytes
Definition: rte_ethdev.h:261
uint32_t offset_align_log2
Definition: rte_ethdev.h:1715
uint8_t avail_thresh
Definition: rte_ethdev.h:1862
uint64_t offloads
Definition: rte_ethdev.h:1191
int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
uint16_t max_nb_queues
Definition: rte_ethdev.h:1227
uint64_t oerrors
Definition: rte_ethdev.h:269
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
#define __rte_cache_min_aligned
Definition: rte_common.h:605
__rte_experimental int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num)
uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
uint16_t max_mtu
Definition: rte_ethdev.h:1767
uint64_t offloads
Definition: rte_ethdev.h:434
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
uint16_t tx_free_thresh
Definition: rte_ethdev.h:1182
uint16_t nb_desc
Definition: rte_ethdev.h:1854
uint64_t modes_supported
Definition: rte_ethdev.h:5979
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:6239
int rte_eth_allmulticast_disable(uint16_t port_id)
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
rte_eth_hash_function
Definition: rte_ethdev.h:462
int rte_eth_dev_start(uint16_t port_id)
__rte_experimental int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config)
uint16_t rx_buf_size
Definition: rte_ethdev.h:1855
__rte_experimental int rte_eth_ip_reassembly_capability_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *capa)
uint16_t max_vmdq_pools
Definition: rte_ethdev.h:1787
uint8_t scattered_rx
Definition: rte_ethdev.h:1852
int rte_eth_dev_reset(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
#define RTE_ETH_MQ_RX_DCB_FLAG
Definition: rte_ethdev.h:379
uint64_t offloads
Definition: rte_ethdev.h:1013
uint16_t vmdq_pool_base
Definition: rte_ethdev.h:1807
__rte_experimental int rte_eth_ip_reassembly_conf_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *conf)
uint64_t tx_offload_capa
Definition: rte_ethdev.h:1792
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:275
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
uint16_t min_mtu
Definition: rte_ethdev.h:1766
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_is_removed(uint16_t port_id)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
Definition: rte_ethdev.h:2073
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
uint64_t obytes
Definition: rte_ethdev.h:262
__rte_experimental int rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
uint8_t enable_loop_back
Definition: rte_ethdev.h:994
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
rte_eth_err_handle_mode
Definition: rte_ethdev.h:1740
__rte_experimental int rte_eth_recycle_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_recycle_rxq_info *recycle_rxq_info)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, struct rte_eth_pfc_queue_info *pfc_queue_info)
int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
rte_eth_fec_mode
Definition: rte_ethdev.h:1990
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
uint16_t max_tx_queues
Definition: rte_ethdev.h:1782
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_xstats_reset(uint16_t port_id)
rte_eth_dev_state
Definition: rte_ethdev.h:2079
uint16_t rx_free_thresh
Definition: rte_ethdev.h:1123
uint64_t dev_capa
Definition: rte_ethdev.h:1826
uint64_t ierrors
Definition: rte_ethdev.h:268
#define RTE_ETH_MQ_RX_VMDQ_FLAG
Definition: rte_ethdev.h:380
__rte_experimental int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, size_t len, uint32_t direction)
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
uint64_t flow_type_rss_offloads
Definition: rte_ethdev.h:1802
#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS
Definition: rte_ethdev.h:847
uint16_t rte_eth_dev_count_total(void)
int rte_eth_promiscuous_enable(uint16_t port_id)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
union rte_eth_rxseg * rx_seg
Definition: rte_ethdev.h:1148
int rte_eth_dev_owner_new(uint64_t *owner_id)
rte_vlan_type
Definition: rte_ethdev.h:444
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
__rte_experimental int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
uint16_t nb_seg_max
Definition: rte_ethdev.h:1342
uint64_t ipackets
Definition: rte_ethdev.h:259
uint16_t max_vfs
Definition: rte_ethdev.h:1786
uint16_t pause_time
Definition: rte_ethdev.h:1376
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_owner_delete(const uint64_t owner_id)
uint64_t rx_nombuf
Definition: rte_ethdev.h:270
rte_eth_event_macsec_subtype
Definition: rte_ethdev.h:3962
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
Definition: rte_ethdev.h:6830
__extension__ uint8_t hw_vlan_reject_untagged
Definition: rte_ethdev.h:1018
uint8_t queue_state
Definition: rte_ethdev.h:1853
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
uint32_t use_locked_device_memory
Definition: rte_ethdev.h:1294
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
uint16_t vmdq_queue_base
Definition: rte_ethdev.h:1805
rte_eth_event_ipsec_subtype
Definition: rte_ethdev.h:4038
rte_eth_nb_pools
Definition: rte_ethdev.h:918
uint16_t nb_align
Definition: rte_ethdev.h:1332
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
rte_eth_rx_mq_mode
Definition: rte_ethdev.h:387
__rte_experimental const char * rte_eth_link_speed_to_str(uint32_t link_speed)
const char * driver_name
Definition: rte_ethdev.h:1763
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:6315
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
__rte_experimental int rte_eth_find_rss_algo(const char *name, uint32_t *algo)
int rte_eth_dev_socket_id(uint16_t port_id)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint32_t tx_rate)
struct rte_mbuf ** mbuf_ring
Definition: rte_ethdev.h:1884
uint8_t enable_default_pool
Definition: rte_ethdev.h:992
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
uint16_t nb_tx_queues
Definition: rte_ethdev.h:1813
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
uint32_t max_mac_addrs
Definition: rte_ethdev.h:1783
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
rte_eth_tunnel_type
Definition: rte_ethdev.h:1450
__rte_experimental const char * rte_eth_dev_capability_name(uint64_t capability)
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
uint64_t value
Definition: rte_ethdev.h:1933
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
Definition: rte_ethdev.h:672
__rte_experimental int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
__rte_experimental int rte_eth_ip_reassembly_conf_set(uint16_t port_id, const struct rte_eth_ip_reassembly_params *conf)
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
__rte_experimental int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config)
int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_promiscuous_get(uint16_t port_id)
int rte_eth_led_on(uint16_t port_id)
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
uint32_t max_rx_pktlen
Definition: rte_ethdev.h:1778
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
uint64_t rss_hf
Definition: rte_ethdev.h:509
uint64_t id
Definition: rte_ethdev.h:1932
__rte_experimental int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
unsigned int if_index
Definition: rte_ethdev.h:1764
__rte_experimental int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config)
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
uint8_t mac_ctrl_frame_fwd
Definition: rte_ethdev.h:1379
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
Definition: rte_ethdev.h:2049
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
uint8_t * rss_key
Definition: rte_ethdev.h:503
uint8_t tx_deferred_start
Definition: rte_ethdev.h:1185
uint8_t wthresh
Definition: rte_ethdev.h:372
uint16_t max_rx_queues
Definition: rte_ethdev.h:1781
uint16_t max_rx_mempools
Definition: rte_ethdev.h:1820
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
rte_eth_representor_type
Definition: rte_ethdev.h:1727
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:428
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
int rte_eth_allmulticast_get(uint16_t port_id)
uint8_t rx_drop_en
Definition: rte_ethdev.h:1124
int rte_eth_dev_is_valid_port(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint16_t rx_nseg
Definition: rte_ethdev.h:1126
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:1780
int rte_eth_timesync_enable(uint16_t port_id)
uint8_t pthresh
Definition: rte_ethdev.h:370
uint16_t share_group
Definition: rte_ethdev.h:1133
uint32_t speed_capa
Definition: rte_ethdev.h:1810
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6582
__rte_experimental int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info)
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_set_link_down(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
__rte_experimental int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
uint64_t objs_supported
Definition: rte_ethdev.h:5984
uint32_t min_rx_bufsize
Definition: rte_ethdev.h:1770
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
Definition: rte_ethdev.h:6777
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
rte_eth_event_type
Definition: rte_ethdev.h:4103