DPDK  24.11.0-rc3
rte_ethdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
148 #include <stdint.h>
149 
150 /* Use this macro to check if LRO API is supported */
151 #define RTE_ETHDEV_HAS_LRO_SUPPORT
152 
153 /* Alias RTE_LIBRTE_ETHDEV_DEBUG for backward compatibility. */
154 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
155 #define RTE_ETHDEV_DEBUG_RX
156 #define RTE_ETHDEV_DEBUG_TX
157 #endif
158 
159 #include <rte_cman.h>
160 #include <rte_compat.h>
161 #include <rte_log.h>
162 #include <rte_interrupts.h>
163 #include <rte_dev.h>
164 #include <rte_devargs.h>
165 #include <rte_bitops.h>
166 #include <rte_errno.h>
167 #include <rte_common.h>
168 #include <rte_config.h>
169 #include <rte_power_intrinsics.h>
170 
171 #include "rte_ethdev_trace_fp.h"
172 #include "rte_dev_info.h"
173 
174 #ifdef __cplusplus
175 extern "C" {
176 #endif
177 
178 extern int rte_eth_dev_logtype;
179 #define RTE_LOGTYPE_ETHDEV rte_eth_dev_logtype
180 
181 #define RTE_ETHDEV_LOG_LINE(level, ...) \
182  RTE_LOG_LINE(level, ETHDEV, "" __VA_ARGS__)
183 
184 struct rte_mbuf;
185 
202 int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
203 
218 uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter);
219 
232 void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
233 
247 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
248  for (rte_eth_iterator_init(iter, devargs), \
249  id = rte_eth_iterator_next(iter); \
250  id != RTE_MAX_ETHPORTS; \
251  id = rte_eth_iterator_next(iter))
252 
263  uint64_t ipackets;
264  uint64_t opackets;
265  uint64_t ibytes;
266  uint64_t obytes;
271  uint64_t imissed;
272  uint64_t ierrors;
273  uint64_t oerrors;
274  uint64_t rx_nombuf;
275  /* Queue stats are limited to max 256 queues */
277  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
279  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
281  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
283  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
285  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
286 };
287 
291 #define RTE_ETH_LINK_SPEED_AUTONEG 0
292 #define RTE_ETH_LINK_SPEED_FIXED RTE_BIT32(0)
293 #define RTE_ETH_LINK_SPEED_10M_HD RTE_BIT32(1)
294 #define RTE_ETH_LINK_SPEED_10M RTE_BIT32(2)
295 #define RTE_ETH_LINK_SPEED_100M_HD RTE_BIT32(3)
296 #define RTE_ETH_LINK_SPEED_100M RTE_BIT32(4)
297 #define RTE_ETH_LINK_SPEED_1G RTE_BIT32(5)
298 #define RTE_ETH_LINK_SPEED_2_5G RTE_BIT32(6)
299 #define RTE_ETH_LINK_SPEED_5G RTE_BIT32(7)
300 #define RTE_ETH_LINK_SPEED_10G RTE_BIT32(8)
301 #define RTE_ETH_LINK_SPEED_20G RTE_BIT32(9)
302 #define RTE_ETH_LINK_SPEED_25G RTE_BIT32(10)
303 #define RTE_ETH_LINK_SPEED_40G RTE_BIT32(11)
304 #define RTE_ETH_LINK_SPEED_50G RTE_BIT32(12)
305 #define RTE_ETH_LINK_SPEED_56G RTE_BIT32(13)
306 #define RTE_ETH_LINK_SPEED_100G RTE_BIT32(14)
307 #define RTE_ETH_LINK_SPEED_200G RTE_BIT32(15)
308 #define RTE_ETH_LINK_SPEED_400G RTE_BIT32(16)
314 #define RTE_ETH_SPEED_NUM_NONE 0
315 #define RTE_ETH_SPEED_NUM_10M 10
316 #define RTE_ETH_SPEED_NUM_100M 100
317 #define RTE_ETH_SPEED_NUM_1G 1000
318 #define RTE_ETH_SPEED_NUM_2_5G 2500
319 #define RTE_ETH_SPEED_NUM_5G 5000
320 #define RTE_ETH_SPEED_NUM_10G 10000
321 #define RTE_ETH_SPEED_NUM_20G 20000
322 #define RTE_ETH_SPEED_NUM_25G 25000
323 #define RTE_ETH_SPEED_NUM_40G 40000
324 #define RTE_ETH_SPEED_NUM_50G 50000
325 #define RTE_ETH_SPEED_NUM_56G 56000
326 #define RTE_ETH_SPEED_NUM_100G 100000
327 #define RTE_ETH_SPEED_NUM_200G 200000
328 #define RTE_ETH_SPEED_NUM_400G 400000
329 #define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX
335 struct rte_eth_link {
336  union {
337  RTE_ATOMIC(uint64_t) val64;
338  __extension__
339  struct {
340  uint32_t link_speed;
341  uint16_t link_duplex : 1;
342  uint16_t link_autoneg : 1;
343  uint16_t link_status : 1;
344  };
345  };
346 };
347 
351 #define RTE_ETH_LINK_HALF_DUPLEX 0
352 #define RTE_ETH_LINK_FULL_DUPLEX 1
353 #define RTE_ETH_LINK_DOWN 0
354 #define RTE_ETH_LINK_UP 1
355 #define RTE_ETH_LINK_FIXED 0
356 #define RTE_ETH_LINK_AUTONEG 1
357 #define RTE_ETH_LINK_MAX_STR_LEN 40
361 #define RTE_ETH_SPEED_LANES_TO_CAPA(x) RTE_BIT32(x)
362 
365  uint32_t speed;
366  uint32_t capa;
367 };
368 
374  uint8_t pthresh;
375  uint8_t hthresh;
376  uint8_t wthresh;
377 };
378 
382 #define RTE_ETH_MQ_RX_RSS_FLAG RTE_BIT32(0)
383 #define RTE_ETH_MQ_RX_DCB_FLAG RTE_BIT32(1)
384 #define RTE_ETH_MQ_RX_VMDQ_FLAG RTE_BIT32(2)
391 enum rte_eth_rx_mq_mode {
392 
394 
401 
411 };
412 
422 };
423 
429  enum rte_eth_rx_mq_mode mq_mode;
430  uint32_t mtu;
438  uint64_t offloads;
439 
440  uint64_t reserved_64s[2];
441  void *reserved_ptrs[2];
442 };
443 
449  RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
452  RTE_ETH_VLAN_TYPE_MAX,
453 };
454 
460  uint64_t ids[64];
461 };
462 
484  RTE_ETH_HASH_FUNCTION_MAX,
485 };
486 
487 #define RTE_ETH_HASH_ALGO_TO_CAPA(x) RTE_BIT32(x)
488 #define RTE_ETH_HASH_ALGO_CAPA_MASK(x) RTE_BIT32(RTE_ETH_HASH_FUNCTION_ ## x)
489 
507  uint8_t *rss_key;
508  uint8_t rss_key_len;
513  uint64_t rss_hf;
514  enum rte_eth_hash_function algorithm;
515 };
516 
517 /*
518  * A packet can be identified by hardware as different flow types. Different
519  * NIC hardware may support different flow types.
520  * Basically, the NIC hardware identifies the flow type as deep protocol as
521  * possible, and exclusively. For example, if a packet is identified as
522  * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
523  * though it is an actual IPV4 packet.
524  */
525 #define RTE_ETH_FLOW_UNKNOWN 0
526 #define RTE_ETH_FLOW_RAW 1
527 #define RTE_ETH_FLOW_IPV4 2
528 #define RTE_ETH_FLOW_FRAG_IPV4 3
529 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
530 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
531 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
532 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
533 #define RTE_ETH_FLOW_IPV6 8
534 #define RTE_ETH_FLOW_FRAG_IPV6 9
535 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
536 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
537 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
538 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
539 #define RTE_ETH_FLOW_L2_PAYLOAD 14
540 #define RTE_ETH_FLOW_IPV6_EX 15
541 #define RTE_ETH_FLOW_IPV6_TCP_EX 16
542 #define RTE_ETH_FLOW_IPV6_UDP_EX 17
543 
544 #define RTE_ETH_FLOW_PORT 18
545 #define RTE_ETH_FLOW_VXLAN 19
546 #define RTE_ETH_FLOW_GENEVE 20
547 #define RTE_ETH_FLOW_NVGRE 21
548 #define RTE_ETH_FLOW_VXLAN_GPE 22
549 #define RTE_ETH_FLOW_GTPU 23
550 #define RTE_ETH_FLOW_MAX 24
551 
552 /*
553  * Below macros are defined for RSS offload types, they can be used to
554  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
555  */
556 #define RTE_ETH_RSS_IPV4 RTE_BIT64(2)
557 #define RTE_ETH_RSS_FRAG_IPV4 RTE_BIT64(3)
558 #define RTE_ETH_RSS_NONFRAG_IPV4_TCP RTE_BIT64(4)
559 #define RTE_ETH_RSS_NONFRAG_IPV4_UDP RTE_BIT64(5)
560 #define RTE_ETH_RSS_NONFRAG_IPV4_SCTP RTE_BIT64(6)
561 #define RTE_ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
562 #define RTE_ETH_RSS_IPV6 RTE_BIT64(8)
563 #define RTE_ETH_RSS_FRAG_IPV6 RTE_BIT64(9)
564 #define RTE_ETH_RSS_NONFRAG_IPV6_TCP RTE_BIT64(10)
565 #define RTE_ETH_RSS_NONFRAG_IPV6_UDP RTE_BIT64(11)
566 #define RTE_ETH_RSS_NONFRAG_IPV6_SCTP RTE_BIT64(12)
567 #define RTE_ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
568 #define RTE_ETH_RSS_L2_PAYLOAD RTE_BIT64(14)
569 #define RTE_ETH_RSS_IPV6_EX RTE_BIT64(15)
570 #define RTE_ETH_RSS_IPV6_TCP_EX RTE_BIT64(16)
571 #define RTE_ETH_RSS_IPV6_UDP_EX RTE_BIT64(17)
572 #define RTE_ETH_RSS_PORT RTE_BIT64(18)
573 #define RTE_ETH_RSS_VXLAN RTE_BIT64(19)
574 #define RTE_ETH_RSS_GENEVE RTE_BIT64(20)
575 #define RTE_ETH_RSS_NVGRE RTE_BIT64(21)
576 #define RTE_ETH_RSS_GTPU RTE_BIT64(23)
577 #define RTE_ETH_RSS_ETH RTE_BIT64(24)
578 #define RTE_ETH_RSS_S_VLAN RTE_BIT64(25)
579 #define RTE_ETH_RSS_C_VLAN RTE_BIT64(26)
580 #define RTE_ETH_RSS_ESP RTE_BIT64(27)
581 #define RTE_ETH_RSS_AH RTE_BIT64(28)
582 #define RTE_ETH_RSS_L2TPV3 RTE_BIT64(29)
583 #define RTE_ETH_RSS_PFCP RTE_BIT64(30)
584 #define RTE_ETH_RSS_PPPOE RTE_BIT64(31)
585 #define RTE_ETH_RSS_ECPRI RTE_BIT64(32)
586 #define RTE_ETH_RSS_MPLS RTE_BIT64(33)
587 #define RTE_ETH_RSS_IPV4_CHKSUM RTE_BIT64(34)
588 
601 #define RTE_ETH_RSS_L4_CHKSUM RTE_BIT64(35)
602 
603 #define RTE_ETH_RSS_L2TPV2 RTE_BIT64(36)
604 #define RTE_ETH_RSS_IPV6_FLOW_LABEL RTE_BIT64(37)
605 
606 /*
607  * We use the following macros to combine with above RTE_ETH_RSS_* for
608  * more specific input set selection. These bits are defined starting
609  * from the high end of the 64 bits.
610  * Note: If we use above RTE_ETH_RSS_* without SRC/DST_ONLY, it represents
611  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
612  * the same level are used simultaneously, it is the same case as none of
613  * them are added.
614  */
615 #define RTE_ETH_RSS_L3_SRC_ONLY RTE_BIT64(63)
616 #define RTE_ETH_RSS_L3_DST_ONLY RTE_BIT64(62)
617 #define RTE_ETH_RSS_L4_SRC_ONLY RTE_BIT64(61)
618 #define RTE_ETH_RSS_L4_DST_ONLY RTE_BIT64(60)
619 #define RTE_ETH_RSS_L2_SRC_ONLY RTE_BIT64(59)
620 #define RTE_ETH_RSS_L2_DST_ONLY RTE_BIT64(58)
621 
622 /*
623  * Only select IPV6 address prefix as RSS input set according to
624  * https://tools.ietf.org/html/rfc6052
625  * Must be combined with RTE_ETH_RSS_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_UDP,
626  * RTE_ETH_RSS_NONFRAG_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP.
627  */
628 #define RTE_ETH_RSS_L3_PRE32 RTE_BIT64(57)
629 #define RTE_ETH_RSS_L3_PRE40 RTE_BIT64(56)
630 #define RTE_ETH_RSS_L3_PRE48 RTE_BIT64(55)
631 #define RTE_ETH_RSS_L3_PRE56 RTE_BIT64(54)
632 #define RTE_ETH_RSS_L3_PRE64 RTE_BIT64(53)
633 #define RTE_ETH_RSS_L3_PRE96 RTE_BIT64(52)
634 
635 /*
636  * Use the following macros to combine with the above layers
637  * to choose inner and outer layers or both for RSS computation.
638  * Bits 50 and 51 are reserved for this.
639  */
640 
648 #define RTE_ETH_RSS_LEVEL_PMD_DEFAULT (UINT64_C(0) << 50)
649 
654 #define RTE_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
655 
660 #define RTE_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
661 #define RTE_ETH_RSS_LEVEL_MASK (UINT64_C(3) << 50)
662 
663 #define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
664 
675 static inline uint64_t
676 rte_eth_rss_hf_refine(uint64_t rss_hf)
677 {
678  if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
679  rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
680 
681  if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
682  rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
683 
684  return rss_hf;
685 }
686 
687 #define RTE_ETH_RSS_IPV6_PRE32 ( \
688  RTE_ETH_RSS_IPV6 | \
689  RTE_ETH_RSS_L3_PRE32)
690 
691 #define RTE_ETH_RSS_IPV6_PRE40 ( \
692  RTE_ETH_RSS_IPV6 | \
693  RTE_ETH_RSS_L3_PRE40)
694 
695 #define RTE_ETH_RSS_IPV6_PRE48 ( \
696  RTE_ETH_RSS_IPV6 | \
697  RTE_ETH_RSS_L3_PRE48)
698 
699 #define RTE_ETH_RSS_IPV6_PRE56 ( \
700  RTE_ETH_RSS_IPV6 | \
701  RTE_ETH_RSS_L3_PRE56)
702 
703 #define RTE_ETH_RSS_IPV6_PRE64 ( \
704  RTE_ETH_RSS_IPV6 | \
705  RTE_ETH_RSS_L3_PRE64)
706 
707 #define RTE_ETH_RSS_IPV6_PRE96 ( \
708  RTE_ETH_RSS_IPV6 | \
709  RTE_ETH_RSS_L3_PRE96)
710 
711 #define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
712  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
713  RTE_ETH_RSS_L3_PRE32)
714 
715 #define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
716  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
717  RTE_ETH_RSS_L3_PRE40)
718 
719 #define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
720  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
721  RTE_ETH_RSS_L3_PRE48)
722 
723 #define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
724  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
725  RTE_ETH_RSS_L3_PRE56)
726 
727 #define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
728  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
729  RTE_ETH_RSS_L3_PRE64)
730 
731 #define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
732  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
733  RTE_ETH_RSS_L3_PRE96)
734 
735 #define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
736  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
737  RTE_ETH_RSS_L3_PRE32)
738 
739 #define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
740  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
741  RTE_ETH_RSS_L3_PRE40)
742 
743 #define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
744  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
745  RTE_ETH_RSS_L3_PRE48)
746 
747 #define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
748  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
749  RTE_ETH_RSS_L3_PRE56)
750 
751 #define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
752  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
753  RTE_ETH_RSS_L3_PRE64)
754 
755 #define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
756  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
757  RTE_ETH_RSS_L3_PRE96)
758 
759 #define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
760  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
761  RTE_ETH_RSS_L3_PRE32)
762 
763 #define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
764  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
765  RTE_ETH_RSS_L3_PRE40)
766 
767 #define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
768  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
769  RTE_ETH_RSS_L3_PRE48)
770 
771 #define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
772  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
773  RTE_ETH_RSS_L3_PRE56)
774 
775 #define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
776  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
777  RTE_ETH_RSS_L3_PRE64)
778 
779 #define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
780  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
781  RTE_ETH_RSS_L3_PRE96)
782 
783 #define RTE_ETH_RSS_IP ( \
784  RTE_ETH_RSS_IPV4 | \
785  RTE_ETH_RSS_FRAG_IPV4 | \
786  RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
787  RTE_ETH_RSS_IPV6 | \
788  RTE_ETH_RSS_FRAG_IPV6 | \
789  RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
790  RTE_ETH_RSS_IPV6_EX)
791 
792 #define RTE_ETH_RSS_UDP ( \
793  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
794  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
795  RTE_ETH_RSS_IPV6_UDP_EX)
796 
797 #define RTE_ETH_RSS_TCP ( \
798  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
799  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
800  RTE_ETH_RSS_IPV6_TCP_EX)
801 
802 #define RTE_ETH_RSS_SCTP ( \
803  RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
804  RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
805 
806 #define RTE_ETH_RSS_TUNNEL ( \
807  RTE_ETH_RSS_VXLAN | \
808  RTE_ETH_RSS_GENEVE | \
809  RTE_ETH_RSS_NVGRE)
810 
811 #define RTE_ETH_RSS_VLAN ( \
812  RTE_ETH_RSS_S_VLAN | \
813  RTE_ETH_RSS_C_VLAN)
814 
816 #define RTE_ETH_RSS_PROTO_MASK ( \
817  RTE_ETH_RSS_IPV4 | \
818  RTE_ETH_RSS_FRAG_IPV4 | \
819  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
820  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
821  RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
822  RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
823  RTE_ETH_RSS_IPV6 | \
824  RTE_ETH_RSS_FRAG_IPV6 | \
825  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
826  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
827  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
828  RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
829  RTE_ETH_RSS_L2_PAYLOAD | \
830  RTE_ETH_RSS_IPV6_EX | \
831  RTE_ETH_RSS_IPV6_TCP_EX | \
832  RTE_ETH_RSS_IPV6_UDP_EX | \
833  RTE_ETH_RSS_PORT | \
834  RTE_ETH_RSS_VXLAN | \
835  RTE_ETH_RSS_GENEVE | \
836  RTE_ETH_RSS_NVGRE | \
837  RTE_ETH_RSS_MPLS)
838 
839 /*
840  * Definitions used for redirection table entry size.
841  * Some RSS RETA sizes may not be supported by some drivers, check the
842  * documentation or the description of relevant functions for more details.
843  */
844 #define RTE_ETH_RSS_RETA_SIZE_64 64
845 #define RTE_ETH_RSS_RETA_SIZE_128 128
846 #define RTE_ETH_RSS_RETA_SIZE_256 256
847 #define RTE_ETH_RSS_RETA_SIZE_512 512
848 #define RTE_ETH_RETA_GROUP_SIZE 64
849 
851 #define RTE_ETH_VMDQ_MAX_VLAN_FILTERS 64
852 #define RTE_ETH_DCB_NUM_USER_PRIORITIES 8
853 #define RTE_ETH_VMDQ_DCB_NUM_QUEUES 128
854 #define RTE_ETH_DCB_NUM_QUEUES 128
858 #define RTE_ETH_DCB_PG_SUPPORT RTE_BIT32(0)
859 #define RTE_ETH_DCB_PFC_SUPPORT RTE_BIT32(1)
863 #define RTE_ETH_VLAN_STRIP_OFFLOAD 0x0001
864 #define RTE_ETH_VLAN_FILTER_OFFLOAD 0x0002
865 #define RTE_ETH_VLAN_EXTEND_OFFLOAD 0x0004
866 #define RTE_ETH_QINQ_STRIP_OFFLOAD 0x0008
868 #define RTE_ETH_VLAN_STRIP_MASK 0x0001
869 #define RTE_ETH_VLAN_FILTER_MASK 0x0002
870 #define RTE_ETH_VLAN_EXTEND_MASK 0x0004
871 #define RTE_ETH_QINQ_STRIP_MASK 0x0008
872 #define RTE_ETH_VLAN_ID_MAX 0x0FFF
875 /* Definitions used for receive MAC address */
876 #define RTE_ETH_NUM_RECEIVE_MAC_ADDR 128
878 /* Definitions used for unicast hash */
879 #define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY 128
885 #define RTE_ETH_VMDQ_ACCEPT_UNTAG RTE_BIT32(0)
886 
887 #define RTE_ETH_VMDQ_ACCEPT_HASH_MC RTE_BIT32(1)
888 
889 #define RTE_ETH_VMDQ_ACCEPT_HASH_UC RTE_BIT32(2)
890 
891 #define RTE_ETH_VMDQ_ACCEPT_BROADCAST RTE_BIT32(3)
892 
893 #define RTE_ETH_VMDQ_ACCEPT_MULTICAST RTE_BIT32(4)
894 
904  uint64_t mask;
906  uint16_t reta[RTE_ETH_RETA_GROUP_SIZE];
907 };
908 
916 };
917 
927 };
928 
929 /* This structure may be extended in future. */
930 struct rte_eth_dcb_rx_conf {
931  enum rte_eth_nb_tcs nb_tcs;
933  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
934 };
935 
936 struct rte_eth_vmdq_dcb_tx_conf {
937  enum rte_eth_nb_pools nb_queue_pools;
939  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
940 };
941 
942 struct rte_eth_dcb_tx_conf {
943  enum rte_eth_nb_tcs nb_tcs;
945  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
946 };
947 
948 struct rte_eth_vmdq_tx_conf {
949  enum rte_eth_nb_pools nb_queue_pools;
950 };
951 
964  enum rte_eth_nb_pools nb_queue_pools;
966  uint8_t default_pool;
967  uint8_t nb_pool_maps;
968  struct {
969  uint16_t vlan_id;
970  uint64_t pools;
971  } pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS];
974 };
975 
995  enum rte_eth_nb_pools nb_queue_pools;
997  uint8_t default_pool;
999  uint8_t nb_pool_maps;
1000  uint32_t rx_mode;
1001  struct {
1002  uint16_t vlan_id;
1003  uint64_t pools;
1004  } pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS];
1005 };
1006 
1011  enum rte_eth_tx_mq_mode mq_mode;
1017  uint64_t offloads;
1018 
1019  uint16_t pvid;
1020  __extension__
1021  uint8_t
1022  hw_vlan_reject_tagged : 1,
1026  hw_vlan_insert_pvid : 1;
1027 
1028  uint64_t reserved_64s[2];
1029  void *reserved_ptrs[2];
1030 };
1031 
1093  struct rte_mempool *mp;
1094  uint16_t length;
1095  uint16_t offset;
1107  uint32_t proto_hdr;
1108 };
1109 
1117  /* The settings for buffer split offload. */
1118  struct rte_eth_rxseg_split split;
1119  /* The other features settings should be added here. */
1120 };
1121 
1126  struct rte_eth_thresh rx_thresh;
1127  uint16_t rx_free_thresh;
1128  uint8_t rx_drop_en;
1130  uint16_t rx_nseg;
1137  uint16_t share_group;
1138  uint16_t share_qid;
1144  uint64_t offloads;
1153 
1174  uint16_t rx_nmempool;
1176  uint64_t reserved_64s[2];
1177  void *reserved_ptrs[2];
1178 };
1179 
1184  struct rte_eth_thresh tx_thresh;
1185  uint16_t tx_rs_thresh;
1186  uint16_t tx_free_thresh;
1195  uint64_t offloads;
1196 
1197  uint64_t reserved_64s[2];
1198  void *reserved_ptrs[2];
1199 };
1200 
1213 
1218  uint32_t rte_memory:1;
1219 
1220  uint32_t reserved:30;
1221 };
1222 
1231  uint16_t max_nb_queues;
1233  uint16_t max_rx_2_tx;
1235  uint16_t max_tx_2_rx;
1236  uint16_t max_nb_desc;
1239 };
1240 
1241 #define RTE_ETH_MAX_HAIRPIN_PEERS 32
1242 
1250  uint16_t port;
1251  uint16_t queue;
1252 };
1253 
1261  uint32_t peer_count:16;
1272  uint32_t tx_explicit:1;
1273 
1285  uint32_t manual_bind:1;
1286 
1299 
1311  uint32_t use_rte_memory:1;
1312 
1323  uint32_t force_memory:1;
1324 
1325  uint32_t reserved:11;
1327  struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
1328 };
1329 
1334  uint16_t nb_max;
1335  uint16_t nb_min;
1336  uint16_t nb_align;
1346  uint16_t nb_seg_max;
1347 
1359  uint16_t nb_mtu_seg_max;
1360 };
1361 
1370 };
1371 
1378  uint32_t high_water;
1379  uint32_t low_water;
1380  uint16_t pause_time;
1381  uint16_t send_xon;
1382  enum rte_eth_fc_mode mode;
1384  uint8_t autoneg;
1385 };
1386 
1393  struct rte_eth_fc_conf fc;
1394  uint8_t priority;
1395 };
1396 
1407  uint8_t tc_max;
1409  enum rte_eth_fc_mode mode_capa;
1410 };
1411 
1430  enum rte_eth_fc_mode mode;
1432  struct {
1433  uint16_t tx_qid;
1437  uint8_t tc;
1438  } rx_pause; /* Valid when (mode == FC_RX_PAUSE || mode == FC_FULL) */
1439 
1440  struct {
1441  uint16_t pause_time;
1442  uint16_t rx_qid;
1446  uint8_t tc;
1447  } tx_pause; /* Valid when (mode == FC_TX_PAUSE || mode == FC_FULL) */
1448 };
1449 
1455  RTE_ETH_TUNNEL_TYPE_NONE = 0,
1456  RTE_ETH_TUNNEL_TYPE_VXLAN,
1457  RTE_ETH_TUNNEL_TYPE_GENEVE,
1458  RTE_ETH_TUNNEL_TYPE_TEREDO,
1459  RTE_ETH_TUNNEL_TYPE_NVGRE,
1460  RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
1461  RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
1462  RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
1463  RTE_ETH_TUNNEL_TYPE_ECPRI,
1464  RTE_ETH_TUNNEL_TYPE_MAX,
1465 };
1466 
1467 #ifdef __cplusplus
1468 }
1469 #endif
1470 
1471 /* Deprecated API file for rte_eth_dev_filter_* functions */
1472 #include "rte_eth_ctrl.h"
1473 
1474 #ifdef __cplusplus
1475 extern "C" {
1476 #endif
1477 
1488  uint16_t udp_port;
1489  uint8_t prot_type;
1490 };
1491 
1497  uint32_t lsc:1;
1499  uint32_t rxq:1;
1501  uint32_t rmv:1;
1502 };
1503 
1504 #define rte_intr_conf rte_eth_intr_conf
1505 
1512  uint32_t link_speeds;
1519  struct rte_eth_rxmode rxmode;
1520  struct rte_eth_txmode txmode;
1521  uint32_t lpbk_mode;
1526  struct {
1527  struct rte_eth_rss_conf rss_conf;
1529  struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf;
1531  struct rte_eth_dcb_rx_conf dcb_rx_conf;
1533  struct rte_eth_vmdq_rx_conf vmdq_rx_conf;
1534  } rx_adv_conf;
1535  union {
1537  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1539  struct rte_eth_dcb_tx_conf dcb_tx_conf;
1541  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1542  } tx_adv_conf;
1546  struct rte_eth_intr_conf intr_conf;
1547 };
1548 
1552 #define RTE_ETH_RX_OFFLOAD_VLAN_STRIP RTE_BIT64(0)
1553 #define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1554 #define RTE_ETH_RX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1555 #define RTE_ETH_RX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1556 #define RTE_ETH_RX_OFFLOAD_TCP_LRO RTE_BIT64(4)
1557 #define RTE_ETH_RX_OFFLOAD_QINQ_STRIP RTE_BIT64(5)
1558 #define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(6)
1559 #define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP RTE_BIT64(7)
1560 #define RTE_ETH_RX_OFFLOAD_VLAN_FILTER RTE_BIT64(9)
1561 #define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND RTE_BIT64(10)
1562 #define RTE_ETH_RX_OFFLOAD_SCATTER RTE_BIT64(13)
1563 
1568 #define RTE_ETH_RX_OFFLOAD_TIMESTAMP RTE_BIT64(14)
1569 #define RTE_ETH_RX_OFFLOAD_SECURITY RTE_BIT64(15)
1570 #define RTE_ETH_RX_OFFLOAD_KEEP_CRC RTE_BIT64(16)
1571 #define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM RTE_BIT64(17)
1572 #define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(18)
1573 #define RTE_ETH_RX_OFFLOAD_RSS_HASH RTE_BIT64(19)
1574 #define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT RTE_BIT64(20)
1575 
1576 #define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
1577  RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
1578  RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1579 #define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
1580  RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
1581  RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
1582  RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
1583 
1584 /*
1585  * If new Rx offload capabilities are defined, they also must be
1586  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1587  */
1588 
1592 #define RTE_ETH_TX_OFFLOAD_VLAN_INSERT RTE_BIT64(0)
1593 #define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1594 #define RTE_ETH_TX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1595 #define RTE_ETH_TX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1596 #define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM RTE_BIT64(4)
1597 #define RTE_ETH_TX_OFFLOAD_TCP_TSO RTE_BIT64(5)
1598 #define RTE_ETH_TX_OFFLOAD_UDP_TSO RTE_BIT64(6)
1599 #define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(7)
1600 #define RTE_ETH_TX_OFFLOAD_QINQ_INSERT RTE_BIT64(8)
1601 #define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO RTE_BIT64(9)
1602 #define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO RTE_BIT64(10)
1603 #define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO RTE_BIT64(11)
1604 #define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO RTE_BIT64(12)
1605 #define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT RTE_BIT64(13)
1606 
1610 #define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE RTE_BIT64(14)
1611 
1612 #define RTE_ETH_TX_OFFLOAD_MULTI_SEGS RTE_BIT64(15)
1613 
1618 #define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE RTE_BIT64(16)
1619 #define RTE_ETH_TX_OFFLOAD_SECURITY RTE_BIT64(17)
1620 
1625 #define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO RTE_BIT64(18)
1626 
1631 #define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO RTE_BIT64(19)
1632 
1633 #define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(20)
1634 
1639 #define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP RTE_BIT64(21)
1640 /*
1641  * If new Tx offload capabilities are defined, they also must be
1642  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1643  */
1644 
1649 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP RTE_BIT64(0)
1650 
1651 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP RTE_BIT64(1)
1652 
1661 #define RTE_ETH_DEV_CAPA_RXQ_SHARE RTE_BIT64(2)
1662 
1663 #define RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP RTE_BIT64(3)
1664 
1665 #define RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP RTE_BIT64(4)
1666 
1668 /*
1669  * Fallback default preferred Rx/Tx port parameters.
1670  * These are used if an application requests default parameters
1671  * but the PMD does not provide preferred values.
1672  */
1673 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1674 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1675 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1676 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1677 
1684  uint16_t burst_size;
1685  uint16_t ring_size;
1686  uint16_t nb_queues;
1687 };
1688 
1693 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX)
1694 
1699  const char *name;
1700  uint16_t domain_id;
1708  uint16_t port_id;
1714  uint16_t rx_domain;
1715 };
1716 
1724  __extension__
1725  uint32_t multi_pools:1;
1726  uint32_t offset_allowed:1;
1727  uint32_t offset_align_log2:4;
1728  uint16_t max_nseg;
1729  uint16_t reserved;
1730 };
1731 
1744 };
1745 
1766 };
1767 
1774  struct rte_device *device;
1775  const char *driver_name;
1776  unsigned int if_index;
1778  uint16_t min_mtu;
1779  uint16_t max_mtu;
1780  const uint32_t *dev_flags;
1782  uint32_t min_rx_bufsize;
1789  uint32_t max_rx_bufsize;
1790  uint32_t max_rx_pktlen;
1793  uint16_t max_rx_queues;
1794  uint16_t max_tx_queues;
1795  uint32_t max_mac_addrs;
1798  uint16_t max_vfs;
1799  uint16_t max_vmdq_pools;
1800  struct rte_eth_rxseg_capa rx_seg_capa;
1810  uint16_t reta_size;
1811  uint8_t hash_key_size;
1812  uint32_t rss_algo_capa;
1815  struct rte_eth_rxconf default_rxconf;
1816  struct rte_eth_txconf default_txconf;
1817  uint16_t vmdq_queue_base;
1818  uint16_t vmdq_queue_num;
1819  uint16_t vmdq_pool_base;
1820  struct rte_eth_desc_lim rx_desc_lim;
1821  struct rte_eth_desc_lim tx_desc_lim;
1822  uint32_t speed_capa;
1824  uint16_t nb_rx_queues;
1825  uint16_t nb_tx_queues;
1834  struct rte_eth_dev_portconf default_rxportconf;
1836  struct rte_eth_dev_portconf default_txportconf;
1838  uint64_t dev_capa;
1843  struct rte_eth_switch_info switch_info;
1845  enum rte_eth_err_handle_mode err_handle_mode;
1846 
1847  uint64_t reserved_64s[2];
1848  void *reserved_ptrs[2];
1849 };
1850 
1852 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1853 #define RTE_ETH_QUEUE_STATE_STARTED 1
1854 #define RTE_ETH_QUEUE_STATE_HAIRPIN 2
1861 struct __rte_cache_min_aligned rte_eth_rxq_info {
1862  struct rte_mempool *mp;
1863  struct rte_eth_rxconf conf;
1864  uint8_t scattered_rx;
1865  uint8_t queue_state;
1866  uint16_t nb_desc;
1867  uint16_t rx_buf_size;
1874  uint8_t avail_thresh;
1875 };
1876 
1882  struct rte_eth_txconf conf;
1883  uint16_t nb_desc;
1884  uint8_t queue_state;
1885 };
1886 
1896  struct rte_mbuf **mbuf_ring;
1897  struct rte_mempool *mp;
1898  uint16_t *refill_head;
1899  uint16_t *receive_tail;
1900  uint16_t mbuf_ring_size;
1909 };
1910 
1911 /* Generic Burst mode flag definition, values can be ORed. */
1912 
1918 #define RTE_ETH_BURST_FLAG_PER_QUEUE RTE_BIT64(0)
1919 
1925  uint64_t flags;
1927 #define RTE_ETH_BURST_MODE_INFO_SIZE 1024
1928  char info[RTE_ETH_BURST_MODE_INFO_SIZE];
1929 };
1930 
1932 #define RTE_ETH_XSTATS_NAME_SIZE 64
1933 
1944  uint64_t id;
1945  uint64_t value;
1946 };
1947 
1964 };
1965 
1966 #define RTE_ETH_DCB_NUM_TCS 8
1967 #define RTE_ETH_MAX_VMDQ_POOL 64
1968 
1975  struct {
1976  uint16_t base;
1977  uint16_t nb_queue;
1978  } tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1980  struct {
1981  uint16_t base;
1982  uint16_t nb_queue;
1983  } tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1984 };
1985 
1991  uint8_t nb_tcs;
1993  uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS];
1996 };
1997 
2008 };
2009 
2010 /* Translate from FEC mode to FEC capa */
2011 #define RTE_ETH_FEC_MODE_TO_CAPA(x) RTE_BIT32(x)
2012 
2013 /* This macro indicates FEC capa mask */
2014 #define RTE_ETH_FEC_MODE_CAPA_MASK(x) RTE_BIT32(RTE_ETH_FEC_ ## x)
2015 
2016 /* A structure used to get capabilities per link speed */
2017 struct rte_eth_fec_capa {
2018  uint32_t speed;
2019  uint32_t capa;
2020 };
2021 
2022 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
2023 
2024 /* Macros to check for valid port */
2025 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
2026  if (!rte_eth_dev_is_valid_port(port_id)) { \
2027  RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id); \
2028  return retval; \
2029  } \
2030 } while (0)
2031 
2032 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
2033  if (!rte_eth_dev_is_valid_port(port_id)) { \
2034  RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id); \
2035  return; \
2036  } \
2037 } while (0)
2038 
2061 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
2062  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
2063  void *user_param);
2064 
2085 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
2086  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
2087 
2098 };
2099 
2100 struct rte_eth_dev_sriov {
2101  uint8_t active;
2102  uint8_t nb_q_per_pool;
2103  uint16_t def_vmdq_idx;
2104  uint16_t def_pool_q_idx;
2105 };
2106 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
2107 
2108 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
2109 
2110 #define RTE_ETH_DEV_NO_OWNER 0
2111 
2112 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
2113 
2114 struct rte_eth_dev_owner {
2115  uint64_t id;
2116  char name[RTE_ETH_MAX_OWNER_NAME_LEN];
2117 };
2118 
2124 #define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE RTE_BIT32(0)
2125 
2126 #define RTE_ETH_DEV_INTR_LSC RTE_BIT32(1)
2127 
2128 #define RTE_ETH_DEV_BONDING_MEMBER RTE_BIT32(2)
2129 
2130 #define RTE_ETH_DEV_INTR_RMV RTE_BIT32(3)
2131 
2132 #define RTE_ETH_DEV_REPRESENTOR RTE_BIT32(4)
2133 
2134 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR RTE_BIT32(5)
2135 
2139 #define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS RTE_BIT32(6)
2140 
2153 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
2154  const uint64_t owner_id);
2155 
2159 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
2160  for (p = rte_eth_find_next_owned_by(0, o); \
2161  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
2162  p = rte_eth_find_next_owned_by(p + 1, o))
2163 
2172 uint16_t rte_eth_find_next(uint16_t port_id);
2173 
2177 #define RTE_ETH_FOREACH_DEV(p) \
2178  RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
2179 
2191 uint16_t
2192 rte_eth_find_next_of(uint16_t port_id_start,
2193  const struct rte_device *parent);
2194 
2203 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
2204  for (port_id = rte_eth_find_next_of(0, parent); \
2205  port_id < RTE_MAX_ETHPORTS; \
2206  port_id = rte_eth_find_next_of(port_id + 1, parent))
2207 
2219 uint16_t
2220 rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id);
2221 
2232 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
2233  for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
2234  port_id < RTE_MAX_ETHPORTS; \
2235  port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
2236 
2247 int rte_eth_dev_owner_new(uint64_t *owner_id);
2248 
2259 int rte_eth_dev_owner_set(const uint16_t port_id,
2260  const struct rte_eth_dev_owner *owner);
2261 
2272 int rte_eth_dev_owner_unset(const uint16_t port_id,
2273  const uint64_t owner_id);
2274 
2283 int rte_eth_dev_owner_delete(const uint64_t owner_id);
2284 
2295 int rte_eth_dev_owner_get(const uint16_t port_id,
2296  struct rte_eth_dev_owner *owner);
2297 
2308 uint16_t rte_eth_dev_count_avail(void);
2309 
2318 uint16_t rte_eth_dev_count_total(void);
2319 
2331 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
2332 
2341 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
2342 
2351 const char *rte_eth_dev_tx_offload_name(uint64_t offload);
2352 
2364 __rte_experimental
2365 const char *rte_eth_dev_capability_name(uint64_t capability);
2366 
2406 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
2407  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
2408 
2417 int
2418 rte_eth_dev_is_removed(uint16_t port_id);
2419 
2482 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2483  uint16_t nb_rx_desc, unsigned int socket_id,
2484  const struct rte_eth_rxconf *rx_conf,
2485  struct rte_mempool *mb_pool);
2486 
2514 __rte_experimental
2516  (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2517  const struct rte_eth_hairpin_conf *conf);
2518 
2567 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2568  uint16_t nb_tx_desc, unsigned int socket_id,
2569  const struct rte_eth_txconf *tx_conf);
2570 
2596 __rte_experimental
2598  (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2599  const struct rte_eth_hairpin_conf *conf);
2600 
2627 __rte_experimental
2628 int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2629  size_t len, uint32_t direction);
2630 
2653 __rte_experimental
2654 int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port);
2655 
2680 __rte_experimental
2681 int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port);
2682 
2698 __rte_experimental
2699 int rte_eth_dev_count_aggr_ports(uint16_t port_id);
2700 
2728 __rte_experimental
2729 int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id,
2730  uint8_t affinity);
2731 
2744 int rte_eth_dev_socket_id(uint16_t port_id);
2745 
2755 int rte_eth_dev_is_valid_port(uint16_t port_id);
2756 
2773 __rte_experimental
2774 int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2775 
2792 __rte_experimental
2793 int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2794 
2812 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2813 
2830 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2831 
2849 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2850 
2867 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2868 
2892 int rte_eth_dev_start(uint16_t port_id);
2893 
2907 int rte_eth_dev_stop(uint16_t port_id);
2908 
2921 int rte_eth_dev_set_link_up(uint16_t port_id);
2922 
2932 int rte_eth_dev_set_link_down(uint16_t port_id);
2933 
2944 int rte_eth_dev_close(uint16_t port_id);
2945 
2983 int rte_eth_dev_reset(uint16_t port_id);
2984 
2996 int rte_eth_promiscuous_enable(uint16_t port_id);
2997 
3009 int rte_eth_promiscuous_disable(uint16_t port_id);
3010 
3021 int rte_eth_promiscuous_get(uint16_t port_id);
3022 
3034 int rte_eth_allmulticast_enable(uint16_t port_id);
3035 
3047 int rte_eth_allmulticast_disable(uint16_t port_id);
3048 
3059 int rte_eth_allmulticast_get(uint16_t port_id);
3060 
3078 int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
3080 
3095 int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
3097 
3111 __rte_experimental
3112 const char *rte_eth_link_speed_to_str(uint32_t link_speed);
3113 
3132 __rte_experimental
3133 int rte_eth_link_to_str(char *str, size_t len,
3134  const struct rte_eth_link *eth_link);
3135 
3156 __rte_experimental
3157 int rte_eth_speed_lanes_get(uint16_t port_id, uint32_t *lanes);
3158 
3180 __rte_experimental
3181 int rte_eth_speed_lanes_set(uint16_t port_id, uint32_t speed_lanes);
3182 
3205 __rte_experimental
3206 int rte_eth_speed_lanes_get_capability(uint16_t port_id,
3207  struct rte_eth_speed_lanes_capa *speed_lanes_capa,
3208  unsigned int num);
3209 
3227 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
3228 
3240 int rte_eth_stats_reset(uint16_t port_id);
3241 
3271 int rte_eth_xstats_get_names(uint16_t port_id,
3272  struct rte_eth_xstat_name *xstats_names,
3273  unsigned int size);
3274 
3308 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3309  unsigned int n);
3310 
3335 int
3336 rte_eth_xstats_get_names_by_id(uint16_t port_id,
3337  struct rte_eth_xstat_name *xstats_names, unsigned int size,
3338  uint64_t *ids);
3339 
3364 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3365  uint64_t *values, unsigned int size);
3366 
3386 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
3387  uint64_t *id);
3388 
3401 int rte_eth_xstats_reset(uint16_t port_id);
3402 
3421 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
3422  uint16_t tx_queue_id, uint8_t stat_idx);
3423 
3442 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
3443  uint16_t rx_queue_id,
3444  uint8_t stat_idx);
3445 
3459 int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
3460 
3481 __rte_experimental
3482 int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3483  unsigned int num);
3484 
3504 int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3506 
3522 __rte_experimental
3523 int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
3525 
3546 int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3548 
3588 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3589  uint32_t *ptypes, int num)
3591 
3622 int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3623  uint32_t *set_ptypes, unsigned int num);
3624 
3637 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
3638 
3656 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
3657 
3677 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
3678 
3697 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3698  int on);
3699 
3716 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3717  enum rte_vlan_type vlan_type,
3718  uint16_t tag_type);
3719 
3737 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
3738 
3752 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
3753 
3768 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
3769 
3795 __rte_experimental
3796 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
3797  uint8_t avail_thresh);
3798 
3825 __rte_experimental
3826 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
3827  uint8_t *avail_thresh);
3828 
3829 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
3830  void *userdata);
3831 
3837  buffer_tx_error_fn error_callback;
3838  void *error_userdata;
3839  uint16_t size;
3840  uint16_t length;
3842  struct rte_mbuf *pkts[];
3843 };
3844 
3851 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
3852  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3853 
3864 int
3865 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
3866 
3891 int
3893  buffer_tx_error_fn callback, void *userdata);
3894 
3917 void
3918 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3919  void *userdata);
3920 
3944 void
3945 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3946  void *userdata);
3947 
3973 int
3974 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
3975 
4008 };
4009 
4029 };
4030 
4049  uint64_t metadata;
4050 };
4051 
4089 };
4090 
4115  uint64_t metadata;
4116 };
4117 
4194 };
4195 
4209 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
4210  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
4211 
4229 int rte_eth_dev_callback_register(uint16_t port_id,
4230  enum rte_eth_event_type event,
4231  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4232 
4251 int rte_eth_dev_callback_unregister(uint16_t port_id,
4252  enum rte_eth_event_type event,
4253  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4254 
4276 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
4277 
4298 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
4299 
4317 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
4318 
4340 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4341  int epfd, int op, void *data);
4342 
4357 int
4358 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
4359 
4373 int rte_eth_led_on(uint16_t port_id);
4374 
4388 int rte_eth_led_off(uint16_t port_id);
4389 
4418 __rte_experimental
4419 int rte_eth_fec_get_capability(uint16_t port_id,
4420  struct rte_eth_fec_capa *speed_fec_capa,
4421  unsigned int num);
4422 
4443 __rte_experimental
4444 int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa);
4445 
4469 __rte_experimental
4470 int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa);
4471 
4486 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
4487  struct rte_eth_fc_conf *fc_conf);
4488 
4503 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
4504  struct rte_eth_fc_conf *fc_conf);
4505 
4521 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
4522  struct rte_eth_pfc_conf *pfc_conf);
4523 
4542 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
4543  uint32_t pool);
4544 
4562 __rte_experimental
4564  struct rte_eth_pfc_queue_info *pfc_queue_info);
4565 
4589 __rte_experimental
4591  struct rte_eth_pfc_queue_conf *pfc_queue_conf);
4592 
4607 int rte_eth_dev_mac_addr_remove(uint16_t port_id,
4608  struct rte_ether_addr *mac_addr);
4609 
4627 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
4628  struct rte_ether_addr *mac_addr);
4629 
4647 int rte_eth_dev_rss_reta_update(uint16_t port_id,
4648  struct rte_eth_rss_reta_entry64 *reta_conf,
4649  uint16_t reta_size);
4650 
4669 int rte_eth_dev_rss_reta_query(uint16_t port_id,
4670  struct rte_eth_rss_reta_entry64 *reta_conf,
4671  uint16_t reta_size);
4672 
4692 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4693  uint8_t on);
4694 
4713 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
4714 
4731 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4732  uint32_t tx_rate);
4733 
4748 int rte_eth_dev_rss_hash_update(uint16_t port_id,
4749  struct rte_eth_rss_conf *rss_conf);
4750 
4766 int
4767 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4768  struct rte_eth_rss_conf *rss_conf);
4769 
4782 __rte_experimental
4783 const char *
4785 
4802 __rte_experimental
4803 int
4804 rte_eth_find_rss_algo(const char *name, uint32_t *algo);
4805 
4830 int
4831 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4832  struct rte_eth_udp_tunnel *tunnel_udp);
4833 
4853 int
4854 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4855  struct rte_eth_udp_tunnel *tunnel_udp);
4856 
4871 int rte_eth_dev_get_dcb_info(uint16_t port_id,
4872  struct rte_eth_dcb_info *dcb_info);
4873 
4874 struct rte_eth_rxtx_callback;
4875 
4901 const struct rte_eth_rxtx_callback *
4902 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4903  rte_rx_callback_fn fn, void *user_param);
4904 
4931 const struct rte_eth_rxtx_callback *
4932 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4933  rte_rx_callback_fn fn, void *user_param);
4934 
4960 const struct rte_eth_rxtx_callback *
4961 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4962  rte_tx_callback_fn fn, void *user_param);
4963 
4997 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4998  const struct rte_eth_rxtx_callback *user_cb);
4999 
5033 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
5034  const struct rte_eth_rxtx_callback *user_cb);
5035 
5055 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5056  struct rte_eth_rxq_info *qinfo);
5057 
5077 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5078  struct rte_eth_txq_info *qinfo);
5079 
5100 __rte_experimental
5101 int rte_eth_recycle_rx_queue_info_get(uint16_t port_id,
5102  uint16_t queue_id,
5103  struct rte_eth_recycle_rxq_info *recycle_rxq_info);
5104 
5123 int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5124  struct rte_eth_burst_mode *mode);
5125 
5144 int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5145  struct rte_eth_burst_mode *mode);
5146 
5167 __rte_experimental
5168 int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5169  struct rte_power_monitor_cond *pmc);
5170 
5197 __rte_experimental
5198 int rte_eth_dev_get_reg_info_ext(uint16_t port_id, struct rte_dev_reg_info *info);
5199 
5218 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5220 
5233 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
5234 
5251 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5252 
5269 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5270 
5289 __rte_experimental
5290 int
5291 rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
5293 
5313 __rte_experimental
5314 int
5315 rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5317 
5337 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5338  struct rte_ether_addr *mc_addr_set,
5339  uint32_t nb_mc_addr);
5340 
5353 int rte_eth_timesync_enable(uint16_t port_id);
5354 
5367 int rte_eth_timesync_disable(uint16_t port_id);
5368 
5387 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
5388  struct timespec *timestamp, uint32_t flags);
5389 
5405 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5406  struct timespec *timestamp);
5407 
5425 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
5426 
5467 __rte_experimental
5468 int rte_eth_timesync_adjust_freq(uint16_t port_id, int64_t ppm);
5469 
5485 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
5486 
5505 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
5506 
5552 __rte_experimental
5553 int
5554 rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
5555 
5571 int
5572 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
5573 
5590 int
5591 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
5592 
5609 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5610  uint16_t *nb_rx_desc,
5611  uint16_t *nb_tx_desc);
5612 
5627 int
5628 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
5629 
5639 void *
5640 rte_eth_dev_get_sec_ctx(uint16_t port_id);
5641 
5657 __rte_experimental
5658 int rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5659  struct rte_eth_hairpin_cap *cap);
5660 
5670  int pf;
5671  __extension__
5672  union {
5673  int vf;
5674  int sf;
5675  };
5676  uint32_t id_base;
5677  uint32_t id_end;
5678  char name[RTE_DEV_NAME_MAX_LEN];
5679 };
5680 
5688  uint16_t controller;
5689  uint16_t pf;
5690  uint32_t nb_ranges_alloc;
5691  uint32_t nb_ranges;
5692  struct rte_eth_representor_range ranges[];
5693 };
5694 
5718 __rte_experimental
5719 int rte_eth_representor_info_get(uint16_t port_id,
5720  struct rte_eth_representor_info *info);
5721 
5723 #define RTE_ETH_RX_METADATA_USER_FLAG RTE_BIT64(0)
5724 
5726 #define RTE_ETH_RX_METADATA_USER_MARK RTE_BIT64(1)
5727 
5729 #define RTE_ETH_RX_METADATA_TUNNEL_ID RTE_BIT64(2)
5730 
5770 int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features);
5771 
5773 #define RTE_ETH_DEV_REASSEMBLY_F_IPV4 (RTE_BIT32(0))
5774 
5775 #define RTE_ETH_DEV_REASSEMBLY_F_IPV6 (RTE_BIT32(1))
5776 
5787  uint32_t timeout_ms;
5789  uint16_t max_frags;
5794  uint16_t flags;
5795 };
5796 
5817 __rte_experimental
5818 int rte_eth_ip_reassembly_capability_get(uint16_t port_id,
5819  struct rte_eth_ip_reassembly_params *capa);
5820 
5842 __rte_experimental
5843 int rte_eth_ip_reassembly_conf_get(uint16_t port_id,
5844  struct rte_eth_ip_reassembly_params *conf);
5845 
5875 __rte_experimental
5876 int rte_eth_ip_reassembly_conf_set(uint16_t port_id,
5877  const struct rte_eth_ip_reassembly_params *conf);
5878 
5886 typedef struct {
5893  uint16_t time_spent;
5895  uint16_t nb_frags;
5897 
5916 __rte_experimental
5917 int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file);
5918 
5942 __rte_experimental
5943 int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5944  uint16_t offset, uint16_t num, FILE *file);
5945 
5969 __rte_experimental
5970 int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5971  uint16_t offset, uint16_t num, FILE *file);
5972 
5973 
5974 /* Congestion management */
5975 
5985 };
5986 
6003  uint64_t objs_supported;
6008  uint8_t rsvd[8];
6009 };
6010 
6021  enum rte_cman_mode mode;
6022  union {
6029  uint16_t rx_queue;
6036  uint8_t rsvd_obj_params[4];
6037  } obj_param;
6038  union {
6051  uint8_t rsvd_mode_params[4];
6052  } mode_param;
6053 };
6054 
6072 __rte_experimental
6073 int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info);
6074 
6092 __rte_experimental
6093 int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config);
6094 
6111 __rte_experimental
6112 int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config);
6113 
6134 __rte_experimental
6135 int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config);
6136 
6137 #ifdef __cplusplus
6138 }
6139 #endif
6140 
6141 #include <rte_ethdev_core.h>
6142 
6143 #ifdef __cplusplus
6144 extern "C" {
6145 #endif
6146 
6170 uint16_t rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
6171  struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
6172  void *opaque);
6173 
6261 static inline uint16_t
6262 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
6263  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
6264 {
6265  uint16_t nb_rx;
6266  struct rte_eth_fp_ops *p;
6267  void *qd;
6268 
6269 #ifdef RTE_ETHDEV_DEBUG_RX
6270  if (port_id >= RTE_MAX_ETHPORTS ||
6271  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6272  RTE_ETHDEV_LOG_LINE(ERR,
6273  "Invalid port_id=%u or queue_id=%u",
6274  port_id, queue_id);
6275  return 0;
6276  }
6277 #endif
6278 
6279  /* fetch pointer to queue data */
6280  p = &rte_eth_fp_ops[port_id];
6281  qd = p->rxq.data[queue_id];
6282 
6283 #ifdef RTE_ETHDEV_DEBUG_RX
6284  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6285 
6286  if (qd == NULL) {
6287  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u for port_id=%u",
6288  queue_id, port_id);
6289  return 0;
6290  }
6291 #endif
6292 
6293  nb_rx = p->rx_pkt_burst(qd, rx_pkts, nb_pkts);
6294 
6295 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
6296  {
6297  void *cb;
6298 
6299  /* rte_memory_order_release memory order was used when the
6300  * call back was inserted into the list.
6301  * Since there is a clear dependency between loading
6302  * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6303  * not required.
6304  */
6305  cb = rte_atomic_load_explicit(&p->rxq.clbk[queue_id],
6306  rte_memory_order_relaxed);
6307  if (unlikely(cb != NULL))
6308  nb_rx = rte_eth_call_rx_callbacks(port_id, queue_id,
6309  rx_pkts, nb_rx, nb_pkts, cb);
6310  }
6311 #endif
6312 
6313  if (unlikely(nb_rx))
6314  rte_ethdev_trace_rx_burst_nonempty(port_id, queue_id, (void **)rx_pkts, nb_rx);
6315  else
6316  rte_ethdev_trace_rx_burst_empty(port_id, queue_id, (void **)rx_pkts);
6317  return nb_rx;
6318 }
6319 
6337 static inline int
6338 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
6339 {
6340  struct rte_eth_fp_ops *p;
6341  void *qd;
6342 
6343 #ifdef RTE_ETHDEV_DEBUG_RX
6344  if (port_id >= RTE_MAX_ETHPORTS ||
6345  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6346  RTE_ETHDEV_LOG_LINE(ERR,
6347  "Invalid port_id=%u or queue_id=%u",
6348  port_id, queue_id);
6349  return -EINVAL;
6350  }
6351 #endif
6352 
6353  /* fetch pointer to queue data */
6354  p = &rte_eth_fp_ops[port_id];
6355  qd = p->rxq.data[queue_id];
6356 
6357 #ifdef RTE_ETHDEV_DEBUG_RX
6358  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6359  if (qd == NULL)
6360  return -EINVAL;
6361 #endif
6362 
6363  if (*p->rx_queue_count == NULL)
6364  return -ENOTSUP;
6365  return (int)(*p->rx_queue_count)(qd);
6366 }
6367 
6371 #define RTE_ETH_RX_DESC_AVAIL 0
6372 #define RTE_ETH_RX_DESC_DONE 1
6373 #define RTE_ETH_RX_DESC_UNAVAIL 2
6409 static inline int
6410 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
6411  uint16_t offset)
6412 {
6413  struct rte_eth_fp_ops *p;
6414  void *qd;
6415 
6416 #ifdef RTE_ETHDEV_DEBUG_RX
6417  if (port_id >= RTE_MAX_ETHPORTS ||
6418  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6419  RTE_ETHDEV_LOG_LINE(ERR,
6420  "Invalid port_id=%u or queue_id=%u",
6421  port_id, queue_id);
6422  return -EINVAL;
6423  }
6424 #endif
6425 
6426  /* fetch pointer to queue data */
6427  p = &rte_eth_fp_ops[port_id];
6428  qd = p->rxq.data[queue_id];
6429 
6430 #ifdef RTE_ETHDEV_DEBUG_RX
6431  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6432  if (qd == NULL)
6433  return -ENODEV;
6434 #endif
6435  if (*p->rx_descriptor_status == NULL)
6436  return -ENOTSUP;
6437  return (*p->rx_descriptor_status)(qd, offset);
6438 }
6439 
6443 #define RTE_ETH_TX_DESC_FULL 0
6444 #define RTE_ETH_TX_DESC_DONE 1
6445 #define RTE_ETH_TX_DESC_UNAVAIL 2
6481 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
6482  uint16_t queue_id, uint16_t offset)
6483 {
6484  struct rte_eth_fp_ops *p;
6485  void *qd;
6486 
6487 #ifdef RTE_ETHDEV_DEBUG_TX
6488  if (port_id >= RTE_MAX_ETHPORTS ||
6489  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6490  RTE_ETHDEV_LOG_LINE(ERR,
6491  "Invalid port_id=%u or queue_id=%u",
6492  port_id, queue_id);
6493  return -EINVAL;
6494  }
6495 #endif
6496 
6497  /* fetch pointer to queue data */
6498  p = &rte_eth_fp_ops[port_id];
6499  qd = p->txq.data[queue_id];
6500 
6501 #ifdef RTE_ETHDEV_DEBUG_TX
6502  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6503  if (qd == NULL)
6504  return -ENODEV;
6505 #endif
6506  if (*p->tx_descriptor_status == NULL)
6507  return -ENOTSUP;
6508  return (*p->tx_descriptor_status)(qd, offset);
6509 }
6510 
6530 uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
6531  struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque);
6532 
6604 static inline uint16_t
6605 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
6606  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6607 {
6608  struct rte_eth_fp_ops *p;
6609  void *qd;
6610 
6611 #ifdef RTE_ETHDEV_DEBUG_TX
6612  if (port_id >= RTE_MAX_ETHPORTS ||
6613  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6614  RTE_ETHDEV_LOG_LINE(ERR,
6615  "Invalid port_id=%u or queue_id=%u",
6616  port_id, queue_id);
6617  return 0;
6618  }
6619 #endif
6620 
6621  /* fetch pointer to queue data */
6622  p = &rte_eth_fp_ops[port_id];
6623  qd = p->txq.data[queue_id];
6624 
6625 #ifdef RTE_ETHDEV_DEBUG_TX
6626  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6627 
6628  if (qd == NULL) {
6629  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6630  queue_id, port_id);
6631  return 0;
6632  }
6633 #endif
6634 
6635 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
6636  {
6637  void *cb;
6638 
6639  /* rte_memory_order_release memory order was used when the
6640  * call back was inserted into the list.
6641  * Since there is a clear dependency between loading
6642  * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6643  * not required.
6644  */
6645  cb = rte_atomic_load_explicit(&p->txq.clbk[queue_id],
6646  rte_memory_order_relaxed);
6647  if (unlikely(cb != NULL))
6648  nb_pkts = rte_eth_call_tx_callbacks(port_id, queue_id,
6649  tx_pkts, nb_pkts, cb);
6650  }
6651 #endif
6652 
6653  nb_pkts = p->tx_pkt_burst(qd, tx_pkts, nb_pkts);
6654 
6655  rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, nb_pkts);
6656  return nb_pkts;
6657 }
6658 
6712 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
6713 
6714 static inline uint16_t
6715 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
6716  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6717 {
6718  struct rte_eth_fp_ops *p;
6719  void *qd;
6720 
6721 #ifdef RTE_ETHDEV_DEBUG_TX
6722  if (port_id >= RTE_MAX_ETHPORTS ||
6723  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6724  RTE_ETHDEV_LOG_LINE(ERR,
6725  "Invalid port_id=%u or queue_id=%u",
6726  port_id, queue_id);
6727  rte_errno = ENODEV;
6728  return 0;
6729  }
6730 #endif
6731 
6732  /* fetch pointer to queue data */
6733  p = &rte_eth_fp_ops[port_id];
6734  qd = p->txq.data[queue_id];
6735 
6736 #ifdef RTE_ETHDEV_DEBUG_TX
6737  if (!rte_eth_dev_is_valid_port(port_id)) {
6738  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx port_id=%u", port_id);
6739  rte_errno = ENODEV;
6740  return 0;
6741  }
6742  if (qd == NULL) {
6743  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6744  queue_id, port_id);
6745  rte_errno = EINVAL;
6746  return 0;
6747  }
6748 #endif
6749 
6750  if (!p->tx_pkt_prepare)
6751  return nb_pkts;
6752 
6753  return p->tx_pkt_prepare(qd, tx_pkts, nb_pkts);
6754 }
6755 
6756 #else
6757 
6758 /*
6759  * Native NOOP operation for compilation targets which doesn't require any
6760  * preparations steps, and functional NOOP may introduce unnecessary performance
6761  * drop.
6762  *
6763  * Generally this is not a good idea to turn it on globally and didn't should
6764  * be used if behavior of tx_preparation can change.
6765  */
6766 
6767 static inline uint16_t
6768 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
6769  __rte_unused uint16_t queue_id,
6770  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6771 {
6772  return nb_pkts;
6773 }
6774 
6775 #endif
6776 
6799 static inline uint16_t
6800 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
6801  struct rte_eth_dev_tx_buffer *buffer)
6802 {
6803  uint16_t sent;
6804  uint16_t to_send = buffer->length;
6805 
6806  if (to_send == 0)
6807  return 0;
6808 
6809  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
6810 
6811  buffer->length = 0;
6812 
6813  /* All packets sent, or to be dealt with by callback below */
6814  if (unlikely(sent != to_send))
6815  buffer->error_callback(&buffer->pkts[sent],
6816  (uint16_t)(to_send - sent),
6817  buffer->error_userdata);
6818 
6819  return sent;
6820 }
6821 
6852 static __rte_always_inline uint16_t
6853 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
6854  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
6855 {
6856  buffer->pkts[buffer->length++] = tx_pkt;
6857  if (buffer->length < buffer->size)
6858  return 0;
6859 
6860  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
6861 }
6862 
6916 __rte_experimental
6917 static inline uint16_t
6918 rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id,
6919  uint16_t tx_port_id, uint16_t tx_queue_id,
6920  struct rte_eth_recycle_rxq_info *recycle_rxq_info)
6921 {
6922  struct rte_eth_fp_ops *p1, *p2;
6923  void *qd1, *qd2;
6924  uint16_t nb_mbufs;
6925 
6926 #ifdef RTE_ETHDEV_DEBUG_TX
6927  if (tx_port_id >= RTE_MAX_ETHPORTS ||
6928  tx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6929  RTE_ETHDEV_LOG_LINE(ERR,
6930  "Invalid tx_port_id=%u or tx_queue_id=%u",
6931  tx_port_id, tx_queue_id);
6932  return 0;
6933  }
6934 #endif
6935 
6936  /* fetch pointer to Tx queue data */
6937  p1 = &rte_eth_fp_ops[tx_port_id];
6938  qd1 = p1->txq.data[tx_queue_id];
6939 
6940 #ifdef RTE_ETHDEV_DEBUG_TX
6941  RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port_id, 0);
6942 
6943  if (qd1 == NULL) {
6944  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6945  tx_queue_id, tx_port_id);
6946  return 0;
6947  }
6948 #endif
6949  if (p1->recycle_tx_mbufs_reuse == NULL)
6950  return 0;
6951 
6952 #ifdef RTE_ETHDEV_DEBUG_RX
6953  if (rx_port_id >= RTE_MAX_ETHPORTS ||
6954  rx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6955  RTE_ETHDEV_LOG_LINE(ERR, "Invalid rx_port_id=%u or rx_queue_id=%u",
6956  rx_port_id, rx_queue_id);
6957  return 0;
6958  }
6959 #endif
6960 
6961  /* fetch pointer to Rx queue data */
6962  p2 = &rte_eth_fp_ops[rx_port_id];
6963  qd2 = p2->rxq.data[rx_queue_id];
6964 
6965 #ifdef RTE_ETHDEV_DEBUG_RX
6966  RTE_ETH_VALID_PORTID_OR_ERR_RET(rx_port_id, 0);
6967 
6968  if (qd2 == NULL) {
6969  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u for port_id=%u",
6970  rx_queue_id, rx_port_id);
6971  return 0;
6972  }
6973 #endif
6974  if (p2->recycle_rx_descriptors_refill == NULL)
6975  return 0;
6976 
6977  /* Copy used *rte_mbuf* buffer pointers from Tx mbuf ring
6978  * into Rx mbuf ring.
6979  */
6980  nb_mbufs = p1->recycle_tx_mbufs_reuse(qd1, recycle_rxq_info);
6981 
6982  /* If no recycling mbufs, return 0. */
6983  if (nb_mbufs == 0)
6984  return 0;
6985 
6986  /* Replenish the Rx descriptors with the recycling
6987  * into Rx mbuf ring.
6988  */
6989  p2->recycle_rx_descriptors_refill(qd2, nb_mbufs);
6990 
6991  return nb_mbufs;
6992 }
6993 
7022 __rte_experimental
7023 int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num)
7025 
7060 __rte_experimental
7061 static inline int
7062 rte_eth_tx_queue_count(uint16_t port_id, uint16_t queue_id)
7063 {
7064  struct rte_eth_fp_ops *fops;
7065  void *qd;
7066  int rc;
7067 
7068 #ifdef RTE_ETHDEV_DEBUG_TX
7069  if (port_id >= RTE_MAX_ETHPORTS || !rte_eth_dev_is_valid_port(port_id)) {
7070  RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id);
7071  rc = -ENODEV;
7072  goto out;
7073  }
7074 
7075  if (queue_id >= RTE_MAX_QUEUES_PER_PORT) {
7076  RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for port_id=%u",
7077  queue_id, port_id);
7078  rc = -EINVAL;
7079  goto out;
7080  }
7081 #endif
7082 
7083  /* Fetch pointer to Tx queue data */
7084  fops = &rte_eth_fp_ops[port_id];
7085  qd = fops->txq.data[queue_id];
7086 
7087 #ifdef RTE_ETHDEV_DEBUG_TX
7088  if (qd == NULL) {
7089  RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for port_id=%u",
7090  queue_id, port_id);
7091  rc = -EINVAL;
7092  goto out;
7093  }
7094 #endif
7095  if (fops->tx_queue_count == NULL) {
7096  rc = -ENOTSUP;
7097  goto out;
7098  }
7099 
7100  rc = fops->tx_queue_count(qd);
7101 
7102 out:
7103  rte_eth_trace_tx_queue_count(port_id, queue_id, rc);
7104  return rc;
7105 }
7106 
7107 #ifdef __cplusplus
7108 }
7109 #endif
7110 
7111 #endif /* _RTE_ETHDEV_H_ */
uint16_t nb_rx_queues
Definition: rte_ethdev.h:1824
int rte_eth_dev_stop(uint16_t port_id)
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_promiscuous_disable(uint16_t port_id)
__extension__ uint32_t multi_pools
Definition: rte_ethdev.h:1725
int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
__rte_experimental int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
#define __rte_always_inline
Definition: rte_common.h:413
#define RTE_ETH_DCB_NUM_USER_PRIORITIES
Definition: rte_ethdev.h:852
uint16_t tx_rs_thresh
Definition: rte_ethdev.h:1185
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
uint32_t mtu
Definition: rte_ethdev.h:430
uint16_t nb_desc
Definition: rte_ethdev.h:1883
rte_eth_event_macsec_type
Definition: rte_ethdev.h:4014
uint64_t rx_offload_capa
Definition: rte_ethdev.h:1802
const uint32_t * dev_flags
Definition: rte_ethdev.h:1780
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
__rte_experimental int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6715
struct rte_device * device
Definition: rte_ethdev.h:1774
rte_eth_nb_tcs
Definition: rte_ethdev.h:913
__rte_experimental int rte_eth_speed_lanes_get(uint16_t port_id, uint32_t *lanes)
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:285
static __rte_experimental int rte_eth_tx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:7062
uint16_t rte_eth_find_next(uint16_t port_id)
__rte_experimental int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
rte_cman_mode
Definition: rte_cman.h:16
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) __rte_warn_unused_result
int rte_eth_led_off(uint16_t port_id)
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:6410
uint64_t imissed
Definition: rte_ethdev.h:271
uint32_t low_water
Definition: rte_ethdev.h:1379
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
uint8_t rss_key_len
Definition: rte_ethdev.h:508
uint32_t max_rx_bufsize
Definition: rte_ethdev.h:1789
__rte_experimental int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id, uint8_t affinity)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
static __rte_experimental uint16_t rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id, uint16_t tx_port_id, uint16_t tx_queue_id, struct rte_eth_recycle_rxq_info *recycle_rxq_info)
Definition: rte_ethdev.h:6918
uint8_t hthresh
Definition: rte_ethdev.h:375
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
__rte_experimental int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
uint64_t rx_queue_offload_capa
Definition: rte_ethdev.h:1806
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
__rte_experimental int rte_eth_dev_count_aggr_ports(uint16_t port_id)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
uint16_t reta_size
Definition: rte_ethdev.h:1810
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint32_t lpbk_mode
Definition: rte_ethdev.h:1521
#define rte_errno
Definition: rte_errno.h:29
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link) __rte_warn_unused_result
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
uint32_t link_speeds
Definition: rte_ethdev.h:1512
uint64_t tx_queue_offload_capa
Definition: rte_ethdev.h:1808
rte_eth_tx_mq_mode
Definition: rte_ethdev.h:417
rte_eth_fc_mode
Definition: rte_ethdev.h:1365
uint8_t enable_default_pool
Definition: rte_ethdev.h:965
uint32_t max_hash_mac_addrs
Definition: rte_ethdev.h:1797
uint16_t nb_mtu_seg_max
Definition: rte_ethdev.h:1359
struct rte_mempool * mp
Definition: rte_ethdev.h:1897
int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
#define __rte_unused
Definition: rte_common.h:171
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:283
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
uint64_t opackets
Definition: rte_ethdev.h:264
rte_eth_cman_obj
Definition: rte_ethdev.h:5977
uint8_t hash_key_size
Definition: rte_ethdev.h:1811
int rte_eth_dev_close(uint16_t port_id)
struct rte_mempool * mp
Definition: rte_ethdev.h:1093
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
__rte_experimental int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, uint8_t avail_thresh)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
__rte_experimental const char * rte_eth_dev_rss_algo_name(enum rte_eth_hash_function rss_algo)
int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
struct rte_mempool * mp
Definition: rte_ethdev.h:1862
uint32_t dcb_capability_en
Definition: rte_ethdev.h:1545
__rte_experimental int rte_eth_timesync_adjust_freq(uint16_t port_id, int64_t ppm)
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:281
const char * name
Definition: rte_ethdev.h:1699
uint8_t queue_state
Definition: rte_ethdev.h:1884
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
int rte_eth_dev_set_link_up(uint16_t port_id)
#define RTE_BIT32(nr)
Definition: rte_bitops.h:44
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
__rte_experimental int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
__rte_experimental int rte_eth_speed_lanes_get_capability(uint16_t port_id, struct rte_eth_speed_lanes_capa *speed_lanes_capa, unsigned int num)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
uint16_t vmdq_queue_num
Definition: rte_ethdev.h:1818
int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link) __rte_warn_unused_result
uint16_t share_qid
Definition: rte_ethdev.h:1138
uint8_t rx_deferred_start
Definition: rte_ethdev.h:1129
__rte_experimental int rte_eth_dev_get_reg_info_ext(uint16_t port_id, struct rte_dev_reg_info *info)
struct rte_mbuf * pkts[]
Definition: rte_ethdev.h:3842
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
Definition: rte_ethdev.h:4209
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:277
uint32_t high_water
Definition: rte_ethdev.h:1378
#define RTE_ETH_MQ_RX_RSS_FLAG
Definition: rte_ethdev.h:382
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
#define RTE_ETH_XSTATS_NAME_SIZE
Definition: rte_ethdev.h:1932
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
__rte_experimental int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, unsigned int num)
int rte_eth_timesync_disable(uint16_t port_id)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint64_t offloads
Definition: rte_ethdev.h:1144
__rte_experimental int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, uint8_t *avail_thresh)
__rte_experimental int rte_eth_representor_info_get(uint16_t port_id, struct rte_eth_representor_info *info)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, struct rte_eth_pfc_queue_conf *pfc_queue_conf)
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
__rte_experimental int rte_eth_speed_lanes_set(uint16_t port_id, uint32_t speed_lanes)
__rte_experimental int rte_eth_fec_get_capability(uint16_t port_id, struct rte_eth_fec_capa *speed_fec_capa, unsigned int num)
uint16_t send_xon
Definition: rte_ethdev.h:1381
struct rte_mempool ** rx_mempools
Definition: rte_ethdev.h:1173
int rte_eth_stats_reset(uint16_t port_id)
int rte_eth_allmulticast_enable(uint16_t port_id)
uint32_t offset_allowed
Definition: rte_ethdev.h:1726
__rte_experimental int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, struct rte_power_monitor_cond *pmc)
#define unlikely(x)
uint64_t ibytes
Definition: rte_ethdev.h:265
uint32_t offset_align_log2
Definition: rte_ethdev.h:1727
uint8_t avail_thresh
Definition: rte_ethdev.h:1874
uint64_t offloads
Definition: rte_ethdev.h:1195
int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
uint16_t max_nb_queues
Definition: rte_ethdev.h:1231
uint64_t oerrors
Definition: rte_ethdev.h:273
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
#define __rte_cache_min_aligned
Definition: rte_common.h:630
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo) __rte_warn_unused_result
uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
uint16_t max_mtu
Definition: rte_ethdev.h:1779
uint64_t offloads
Definition: rte_ethdev.h:438
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
uint16_t tx_free_thresh
Definition: rte_ethdev.h:1186
uint16_t nb_desc
Definition: rte_ethdev.h:1866
uint64_t modes_supported
Definition: rte_ethdev.h:5998
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:6262
int rte_eth_allmulticast_disable(uint16_t port_id)
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
rte_eth_hash_function
Definition: rte_ethdev.h:466
int rte_eth_dev_start(uint16_t port_id)
__rte_experimental int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config)
uint16_t rx_buf_size
Definition: rte_ethdev.h:1867
__rte_experimental int rte_eth_ip_reassembly_capability_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *capa)
uint16_t max_vmdq_pools
Definition: rte_ethdev.h:1799
uint8_t scattered_rx
Definition: rte_ethdev.h:1864
int rte_eth_dev_reset(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
#define RTE_ETH_MQ_RX_DCB_FLAG
Definition: rte_ethdev.h:383
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) __rte_warn_unused_result
uint64_t offloads
Definition: rte_ethdev.h:1017
uint16_t vmdq_pool_base
Definition: rte_ethdev.h:1819
__rte_experimental int rte_eth_ip_reassembly_conf_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *conf)
uint64_t tx_offload_capa
Definition: rte_ethdev.h:1804
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
__rte_experimental int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num) __rte_warn_unused_result
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:279
uint16_t min_mtu
Definition: rte_ethdev.h:1778
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_is_removed(uint16_t port_id)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
Definition: rte_ethdev.h:2085
uint64_t obytes
Definition: rte_ethdev.h:266
__rte_experimental int rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
uint8_t enable_loop_back
Definition: rte_ethdev.h:998
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
rte_eth_err_handle_mode
Definition: rte_ethdev.h:1752
__rte_experimental int rte_eth_recycle_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_recycle_rxq_info *recycle_rxq_info)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, struct rte_eth_pfc_queue_info *pfc_queue_info)
int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
rte_eth_fec_mode
Definition: rte_ethdev.h:2002
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
uint16_t max_tx_queues
Definition: rte_ethdev.h:1794
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_xstats_reset(uint16_t port_id)
rte_eth_dev_state
Definition: rte_ethdev.h:2091
uint16_t rx_free_thresh
Definition: rte_ethdev.h:1127
uint64_t dev_capa
Definition: rte_ethdev.h:1838
uint64_t ierrors
Definition: rte_ethdev.h:272
#define RTE_ETH_MQ_RX_VMDQ_FLAG
Definition: rte_ethdev.h:384
__rte_experimental int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, size_t len, uint32_t direction)
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
uint64_t flow_type_rss_offloads
Definition: rte_ethdev.h:1814
#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS
Definition: rte_ethdev.h:851
uint16_t rte_eth_dev_count_total(void)
int rte_eth_promiscuous_enable(uint16_t port_id)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
__rte_experimental int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) __rte_warn_unused_result
union rte_eth_rxseg * rx_seg
Definition: rte_ethdev.h:1152
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num) __rte_warn_unused_result
int rte_eth_dev_owner_new(uint64_t *owner_id)
rte_vlan_type
Definition: rte_ethdev.h:448
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) __rte_warn_unused_result
__rte_experimental int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
uint16_t nb_seg_max
Definition: rte_ethdev.h:1346
uint64_t ipackets
Definition: rte_ethdev.h:263
uint16_t max_vfs
Definition: rte_ethdev.h:1798
uint16_t pause_time
Definition: rte_ethdev.h:1380
int rte_eth_dev_owner_delete(const uint64_t owner_id)
uint64_t rx_nombuf
Definition: rte_ethdev.h:274
rte_eth_event_macsec_subtype
Definition: rte_ethdev.h:3980
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
Definition: rte_ethdev.h:6853
__extension__ uint8_t hw_vlan_reject_untagged
Definition: rte_ethdev.h:1022
uint8_t queue_state
Definition: rte_ethdev.h:1865
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
uint32_t use_locked_device_memory
Definition: rte_ethdev.h:1298
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
uint16_t vmdq_queue_base
Definition: rte_ethdev.h:1817
rte_eth_event_ipsec_subtype
Definition: rte_ethdev.h:4056
rte_eth_nb_pools
Definition: rte_ethdev.h:922
uint16_t nb_align
Definition: rte_ethdev.h:1336
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
rte_eth_rx_mq_mode
Definition: rte_ethdev.h:391
__rte_experimental const char * rte_eth_link_speed_to_str(uint32_t link_speed)
const char * driver_name
Definition: rte_ethdev.h:1775
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:6338
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
__rte_experimental int rte_eth_find_rss_algo(const char *name, uint32_t *algo)
int rte_eth_dev_socket_id(uint16_t port_id)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint32_t tx_rate)
struct rte_mbuf ** mbuf_ring
Definition: rte_ethdev.h:1896
uint8_t enable_default_pool
Definition: rte_ethdev.h:996
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
uint16_t nb_tx_queues
Definition: rte_ethdev.h:1825
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
uint32_t max_mac_addrs
Definition: rte_ethdev.h:1795
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
rte_eth_tunnel_type
Definition: rte_ethdev.h:1454
__rte_experimental const char * rte_eth_dev_capability_name(uint64_t capability)
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
uint64_t value
Definition: rte_ethdev.h:1945
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
Definition: rte_ethdev.h:676
__rte_experimental int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
__rte_experimental int rte_eth_ip_reassembly_conf_set(uint16_t port_id, const struct rte_eth_ip_reassembly_params *conf)
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
__rte_experimental int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config)
int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_promiscuous_get(uint16_t port_id)
int rte_eth_led_on(uint16_t port_id)
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
uint32_t max_rx_pktlen
Definition: rte_ethdev.h:1790
uint64_t rss_hf
Definition: rte_ethdev.h:513
uint64_t id
Definition: rte_ethdev.h:1944
__rte_experimental int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
unsigned int if_index
Definition: rte_ethdev.h:1776
__rte_experimental int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config)
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
uint8_t mac_ctrl_frame_fwd
Definition: rte_ethdev.h:1383
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
Definition: rte_ethdev.h:2061
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) __rte_warn_unused_result
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
uint8_t * rss_key
Definition: rte_ethdev.h:507
uint8_t tx_deferred_start
Definition: rte_ethdev.h:1189
uint8_t wthresh
Definition: rte_ethdev.h:376
uint16_t max_rx_queues
Definition: rte_ethdev.h:1793
uint16_t max_rx_mempools
Definition: rte_ethdev.h:1832
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
rte_eth_representor_type
Definition: rte_ethdev.h:1739
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:432
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
int rte_eth_allmulticast_get(uint16_t port_id)
uint8_t rx_drop_en
Definition: rte_ethdev.h:1128
int rte_eth_dev_is_valid_port(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint16_t rx_nseg
Definition: rte_ethdev.h:1130
#define __rte_warn_unused_result
Definition: rte_common.h:404
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:1792
int rte_eth_timesync_enable(uint16_t port_id)
uint8_t pthresh
Definition: rte_ethdev.h:374
uint16_t share_group
Definition: rte_ethdev.h:1137
uint32_t speed_capa
Definition: rte_ethdev.h:1822
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6605
__rte_experimental int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info)
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_set_link_down(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
__rte_experimental int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
uint64_t objs_supported
Definition: rte_ethdev.h:6003
uint32_t min_rx_bufsize
Definition: rte_ethdev.h:1782
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
Definition: rte_ethdev.h:6800
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
rte_eth_event_type
Definition: rte_ethdev.h:4121