DPDK  24.07.0
rte_ethdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
148 #ifdef __cplusplus
149 extern "C" {
150 #endif
151 
152 #include <stdint.h>
153 
154 /* Use this macro to check if LRO API is supported */
155 #define RTE_ETHDEV_HAS_LRO_SUPPORT
156 
157 /* Alias RTE_LIBRTE_ETHDEV_DEBUG for backward compatibility. */
158 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
159 #define RTE_ETHDEV_DEBUG_RX
160 #define RTE_ETHDEV_DEBUG_TX
161 #endif
162 
163 #include <rte_cman.h>
164 #include <rte_compat.h>
165 #include <rte_log.h>
166 #include <rte_interrupts.h>
167 #include <rte_dev.h>
168 #include <rte_devargs.h>
169 #include <rte_bitops.h>
170 #include <rte_errno.h>
171 #include <rte_common.h>
172 #include <rte_config.h>
173 #include <rte_power_intrinsics.h>
174 
175 #include "rte_ethdev_trace_fp.h"
176 #include "rte_dev_info.h"
177 
178 extern int rte_eth_dev_logtype;
179 #define RTE_LOGTYPE_ETHDEV rte_eth_dev_logtype
180 
181 #define RTE_ETHDEV_LOG_LINE(level, ...) \
182  RTE_LOG_LINE(level, ETHDEV, "" __VA_ARGS__)
183 
184 struct rte_mbuf;
185 
202 int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
203 
218 uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter);
219 
232 void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
233 
247 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
248  for (rte_eth_iterator_init(iter, devargs), \
249  id = rte_eth_iterator_next(iter); \
250  id != RTE_MAX_ETHPORTS; \
251  id = rte_eth_iterator_next(iter))
252 
263  uint64_t ipackets;
264  uint64_t opackets;
265  uint64_t ibytes;
266  uint64_t obytes;
271  uint64_t imissed;
272  uint64_t ierrors;
273  uint64_t oerrors;
274  uint64_t rx_nombuf;
275  /* Queue stats are limited to max 256 queues */
277  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
279  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
281  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
283  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
285  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
286 };
287 
291 #define RTE_ETH_LINK_SPEED_AUTONEG 0
292 #define RTE_ETH_LINK_SPEED_FIXED RTE_BIT32(0)
293 #define RTE_ETH_LINK_SPEED_10M_HD RTE_BIT32(1)
294 #define RTE_ETH_LINK_SPEED_10M RTE_BIT32(2)
295 #define RTE_ETH_LINK_SPEED_100M_HD RTE_BIT32(3)
296 #define RTE_ETH_LINK_SPEED_100M RTE_BIT32(4)
297 #define RTE_ETH_LINK_SPEED_1G RTE_BIT32(5)
298 #define RTE_ETH_LINK_SPEED_2_5G RTE_BIT32(6)
299 #define RTE_ETH_LINK_SPEED_5G RTE_BIT32(7)
300 #define RTE_ETH_LINK_SPEED_10G RTE_BIT32(8)
301 #define RTE_ETH_LINK_SPEED_20G RTE_BIT32(9)
302 #define RTE_ETH_LINK_SPEED_25G RTE_BIT32(10)
303 #define RTE_ETH_LINK_SPEED_40G RTE_BIT32(11)
304 #define RTE_ETH_LINK_SPEED_50G RTE_BIT32(12)
305 #define RTE_ETH_LINK_SPEED_56G RTE_BIT32(13)
306 #define RTE_ETH_LINK_SPEED_100G RTE_BIT32(14)
307 #define RTE_ETH_LINK_SPEED_200G RTE_BIT32(15)
308 #define RTE_ETH_LINK_SPEED_400G RTE_BIT32(16)
314 #define RTE_ETH_SPEED_NUM_NONE 0
315 #define RTE_ETH_SPEED_NUM_10M 10
316 #define RTE_ETH_SPEED_NUM_100M 100
317 #define RTE_ETH_SPEED_NUM_1G 1000
318 #define RTE_ETH_SPEED_NUM_2_5G 2500
319 #define RTE_ETH_SPEED_NUM_5G 5000
320 #define RTE_ETH_SPEED_NUM_10G 10000
321 #define RTE_ETH_SPEED_NUM_20G 20000
322 #define RTE_ETH_SPEED_NUM_25G 25000
323 #define RTE_ETH_SPEED_NUM_40G 40000
324 #define RTE_ETH_SPEED_NUM_50G 50000
325 #define RTE_ETH_SPEED_NUM_56G 56000
326 #define RTE_ETH_SPEED_NUM_100G 100000
327 #define RTE_ETH_SPEED_NUM_200G 200000
328 #define RTE_ETH_SPEED_NUM_400G 400000
329 #define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX
335 struct rte_eth_link {
336  union {
337  RTE_ATOMIC(uint64_t) val64;
338  __extension__
339  struct {
340  uint32_t link_speed;
341  uint16_t link_duplex : 1;
342  uint16_t link_autoneg : 1;
343  uint16_t link_status : 1;
344  };
345  };
346 };
347 
351 #define RTE_ETH_LINK_HALF_DUPLEX 0
352 #define RTE_ETH_LINK_FULL_DUPLEX 1
353 #define RTE_ETH_LINK_DOWN 0
354 #define RTE_ETH_LINK_UP 1
355 #define RTE_ETH_LINK_FIXED 0
356 #define RTE_ETH_LINK_AUTONEG 1
357 #define RTE_ETH_LINK_MAX_STR_LEN 40
364 struct rte_eth_thresh {
365  uint8_t pthresh;
366  uint8_t hthresh;
367  uint8_t wthresh;
368 };
369 
373 #define RTE_ETH_MQ_RX_RSS_FLAG RTE_BIT32(0)
374 #define RTE_ETH_MQ_RX_DCB_FLAG RTE_BIT32(1)
375 #define RTE_ETH_MQ_RX_VMDQ_FLAG RTE_BIT32(2)
382 enum rte_eth_rx_mq_mode {
383 
385 
392 
402 };
403 
413 };
414 
420  enum rte_eth_rx_mq_mode mq_mode;
421  uint32_t mtu;
429  uint64_t offloads;
430 
431  uint64_t reserved_64s[2];
432  void *reserved_ptrs[2];
433 };
434 
440  RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
443  RTE_ETH_VLAN_TYPE_MAX,
444 };
445 
451  uint64_t ids[64];
452 };
453 
475  RTE_ETH_HASH_FUNCTION_MAX,
476 };
477 
478 #define RTE_ETH_HASH_ALGO_TO_CAPA(x) RTE_BIT32(x)
479 #define RTE_ETH_HASH_ALGO_CAPA_MASK(x) RTE_BIT32(RTE_ETH_HASH_FUNCTION_ ## x)
480 
498  uint8_t *rss_key;
499  uint8_t rss_key_len;
504  uint64_t rss_hf;
505  enum rte_eth_hash_function algorithm;
506 };
507 
508 /*
509  * A packet can be identified by hardware as different flow types. Different
510  * NIC hardware may support different flow types.
511  * Basically, the NIC hardware identifies the flow type as deep protocol as
512  * possible, and exclusively. For example, if a packet is identified as
513  * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
514  * though it is an actual IPV4 packet.
515  */
516 #define RTE_ETH_FLOW_UNKNOWN 0
517 #define RTE_ETH_FLOW_RAW 1
518 #define RTE_ETH_FLOW_IPV4 2
519 #define RTE_ETH_FLOW_FRAG_IPV4 3
520 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
521 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
522 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
523 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
524 #define RTE_ETH_FLOW_IPV6 8
525 #define RTE_ETH_FLOW_FRAG_IPV6 9
526 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
527 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
528 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
529 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
530 #define RTE_ETH_FLOW_L2_PAYLOAD 14
531 #define RTE_ETH_FLOW_IPV6_EX 15
532 #define RTE_ETH_FLOW_IPV6_TCP_EX 16
533 #define RTE_ETH_FLOW_IPV6_UDP_EX 17
534 
535 #define RTE_ETH_FLOW_PORT 18
536 #define RTE_ETH_FLOW_VXLAN 19
537 #define RTE_ETH_FLOW_GENEVE 20
538 #define RTE_ETH_FLOW_NVGRE 21
539 #define RTE_ETH_FLOW_VXLAN_GPE 22
540 #define RTE_ETH_FLOW_GTPU 23
541 #define RTE_ETH_FLOW_MAX 24
542 
543 /*
544  * Below macros are defined for RSS offload types, they can be used to
545  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
546  */
547 #define RTE_ETH_RSS_IPV4 RTE_BIT64(2)
548 #define RTE_ETH_RSS_FRAG_IPV4 RTE_BIT64(3)
549 #define RTE_ETH_RSS_NONFRAG_IPV4_TCP RTE_BIT64(4)
550 #define RTE_ETH_RSS_NONFRAG_IPV4_UDP RTE_BIT64(5)
551 #define RTE_ETH_RSS_NONFRAG_IPV4_SCTP RTE_BIT64(6)
552 #define RTE_ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
553 #define RTE_ETH_RSS_IPV6 RTE_BIT64(8)
554 #define RTE_ETH_RSS_FRAG_IPV6 RTE_BIT64(9)
555 #define RTE_ETH_RSS_NONFRAG_IPV6_TCP RTE_BIT64(10)
556 #define RTE_ETH_RSS_NONFRAG_IPV6_UDP RTE_BIT64(11)
557 #define RTE_ETH_RSS_NONFRAG_IPV6_SCTP RTE_BIT64(12)
558 #define RTE_ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
559 #define RTE_ETH_RSS_L2_PAYLOAD RTE_BIT64(14)
560 #define RTE_ETH_RSS_IPV6_EX RTE_BIT64(15)
561 #define RTE_ETH_RSS_IPV6_TCP_EX RTE_BIT64(16)
562 #define RTE_ETH_RSS_IPV6_UDP_EX RTE_BIT64(17)
563 #define RTE_ETH_RSS_PORT RTE_BIT64(18)
564 #define RTE_ETH_RSS_VXLAN RTE_BIT64(19)
565 #define RTE_ETH_RSS_GENEVE RTE_BIT64(20)
566 #define RTE_ETH_RSS_NVGRE RTE_BIT64(21)
567 #define RTE_ETH_RSS_GTPU RTE_BIT64(23)
568 #define RTE_ETH_RSS_ETH RTE_BIT64(24)
569 #define RTE_ETH_RSS_S_VLAN RTE_BIT64(25)
570 #define RTE_ETH_RSS_C_VLAN RTE_BIT64(26)
571 #define RTE_ETH_RSS_ESP RTE_BIT64(27)
572 #define RTE_ETH_RSS_AH RTE_BIT64(28)
573 #define RTE_ETH_RSS_L2TPV3 RTE_BIT64(29)
574 #define RTE_ETH_RSS_PFCP RTE_BIT64(30)
575 #define RTE_ETH_RSS_PPPOE RTE_BIT64(31)
576 #define RTE_ETH_RSS_ECPRI RTE_BIT64(32)
577 #define RTE_ETH_RSS_MPLS RTE_BIT64(33)
578 #define RTE_ETH_RSS_IPV4_CHKSUM RTE_BIT64(34)
579 
592 #define RTE_ETH_RSS_L4_CHKSUM RTE_BIT64(35)
593 
594 #define RTE_ETH_RSS_L2TPV2 RTE_BIT64(36)
595 #define RTE_ETH_RSS_IPV6_FLOW_LABEL RTE_BIT64(37)
596 
597 /*
598  * We use the following macros to combine with above RTE_ETH_RSS_* for
599  * more specific input set selection. These bits are defined starting
600  * from the high end of the 64 bits.
601  * Note: If we use above RTE_ETH_RSS_* without SRC/DST_ONLY, it represents
602  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
603  * the same level are used simultaneously, it is the same case as none of
604  * them are added.
605  */
606 #define RTE_ETH_RSS_L3_SRC_ONLY RTE_BIT64(63)
607 #define RTE_ETH_RSS_L3_DST_ONLY RTE_BIT64(62)
608 #define RTE_ETH_RSS_L4_SRC_ONLY RTE_BIT64(61)
609 #define RTE_ETH_RSS_L4_DST_ONLY RTE_BIT64(60)
610 #define RTE_ETH_RSS_L2_SRC_ONLY RTE_BIT64(59)
611 #define RTE_ETH_RSS_L2_DST_ONLY RTE_BIT64(58)
612 
613 /*
614  * Only select IPV6 address prefix as RSS input set according to
615  * https://tools.ietf.org/html/rfc6052
616  * Must be combined with RTE_ETH_RSS_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_UDP,
617  * RTE_ETH_RSS_NONFRAG_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP.
618  */
619 #define RTE_ETH_RSS_L3_PRE32 RTE_BIT64(57)
620 #define RTE_ETH_RSS_L3_PRE40 RTE_BIT64(56)
621 #define RTE_ETH_RSS_L3_PRE48 RTE_BIT64(55)
622 #define RTE_ETH_RSS_L3_PRE56 RTE_BIT64(54)
623 #define RTE_ETH_RSS_L3_PRE64 RTE_BIT64(53)
624 #define RTE_ETH_RSS_L3_PRE96 RTE_BIT64(52)
625 
626 /*
627  * Use the following macros to combine with the above layers
628  * to choose inner and outer layers or both for RSS computation.
629  * Bits 50 and 51 are reserved for this.
630  */
631 
639 #define RTE_ETH_RSS_LEVEL_PMD_DEFAULT (UINT64_C(0) << 50)
640 
645 #define RTE_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
646 
651 #define RTE_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
652 #define RTE_ETH_RSS_LEVEL_MASK (UINT64_C(3) << 50)
653 
654 #define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
655 
666 static inline uint64_t
667 rte_eth_rss_hf_refine(uint64_t rss_hf)
668 {
669  if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
670  rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
671 
672  if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
673  rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
674 
675  return rss_hf;
676 }
677 
678 #define RTE_ETH_RSS_IPV6_PRE32 ( \
679  RTE_ETH_RSS_IPV6 | \
680  RTE_ETH_RSS_L3_PRE32)
681 
682 #define RTE_ETH_RSS_IPV6_PRE40 ( \
683  RTE_ETH_RSS_IPV6 | \
684  RTE_ETH_RSS_L3_PRE40)
685 
686 #define RTE_ETH_RSS_IPV6_PRE48 ( \
687  RTE_ETH_RSS_IPV6 | \
688  RTE_ETH_RSS_L3_PRE48)
689 
690 #define RTE_ETH_RSS_IPV6_PRE56 ( \
691  RTE_ETH_RSS_IPV6 | \
692  RTE_ETH_RSS_L3_PRE56)
693 
694 #define RTE_ETH_RSS_IPV6_PRE64 ( \
695  RTE_ETH_RSS_IPV6 | \
696  RTE_ETH_RSS_L3_PRE64)
697 
698 #define RTE_ETH_RSS_IPV6_PRE96 ( \
699  RTE_ETH_RSS_IPV6 | \
700  RTE_ETH_RSS_L3_PRE96)
701 
702 #define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
703  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
704  RTE_ETH_RSS_L3_PRE32)
705 
706 #define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
707  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
708  RTE_ETH_RSS_L3_PRE40)
709 
710 #define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
711  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
712  RTE_ETH_RSS_L3_PRE48)
713 
714 #define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
715  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
716  RTE_ETH_RSS_L3_PRE56)
717 
718 #define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
719  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
720  RTE_ETH_RSS_L3_PRE64)
721 
722 #define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
723  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
724  RTE_ETH_RSS_L3_PRE96)
725 
726 #define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
727  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
728  RTE_ETH_RSS_L3_PRE32)
729 
730 #define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
731  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
732  RTE_ETH_RSS_L3_PRE40)
733 
734 #define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
735  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
736  RTE_ETH_RSS_L3_PRE48)
737 
738 #define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
739  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
740  RTE_ETH_RSS_L3_PRE56)
741 
742 #define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
743  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
744  RTE_ETH_RSS_L3_PRE64)
745 
746 #define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
747  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
748  RTE_ETH_RSS_L3_PRE96)
749 
750 #define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
751  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
752  RTE_ETH_RSS_L3_PRE32)
753 
754 #define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
755  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
756  RTE_ETH_RSS_L3_PRE40)
757 
758 #define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
759  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
760  RTE_ETH_RSS_L3_PRE48)
761 
762 #define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
763  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
764  RTE_ETH_RSS_L3_PRE56)
765 
766 #define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
767  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
768  RTE_ETH_RSS_L3_PRE64)
769 
770 #define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
771  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
772  RTE_ETH_RSS_L3_PRE96)
773 
774 #define RTE_ETH_RSS_IP ( \
775  RTE_ETH_RSS_IPV4 | \
776  RTE_ETH_RSS_FRAG_IPV4 | \
777  RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
778  RTE_ETH_RSS_IPV6 | \
779  RTE_ETH_RSS_FRAG_IPV6 | \
780  RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
781  RTE_ETH_RSS_IPV6_EX)
782 
783 #define RTE_ETH_RSS_UDP ( \
784  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
785  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
786  RTE_ETH_RSS_IPV6_UDP_EX)
787 
788 #define RTE_ETH_RSS_TCP ( \
789  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
790  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
791  RTE_ETH_RSS_IPV6_TCP_EX)
792 
793 #define RTE_ETH_RSS_SCTP ( \
794  RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
795  RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
796 
797 #define RTE_ETH_RSS_TUNNEL ( \
798  RTE_ETH_RSS_VXLAN | \
799  RTE_ETH_RSS_GENEVE | \
800  RTE_ETH_RSS_NVGRE)
801 
802 #define RTE_ETH_RSS_VLAN ( \
803  RTE_ETH_RSS_S_VLAN | \
804  RTE_ETH_RSS_C_VLAN)
805 
807 #define RTE_ETH_RSS_PROTO_MASK ( \
808  RTE_ETH_RSS_IPV4 | \
809  RTE_ETH_RSS_FRAG_IPV4 | \
810  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
811  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
812  RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
813  RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
814  RTE_ETH_RSS_IPV6 | \
815  RTE_ETH_RSS_FRAG_IPV6 | \
816  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
817  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
818  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
819  RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
820  RTE_ETH_RSS_L2_PAYLOAD | \
821  RTE_ETH_RSS_IPV6_EX | \
822  RTE_ETH_RSS_IPV6_TCP_EX | \
823  RTE_ETH_RSS_IPV6_UDP_EX | \
824  RTE_ETH_RSS_PORT | \
825  RTE_ETH_RSS_VXLAN | \
826  RTE_ETH_RSS_GENEVE | \
827  RTE_ETH_RSS_NVGRE | \
828  RTE_ETH_RSS_MPLS)
829 
830 /*
831  * Definitions used for redirection table entry size.
832  * Some RSS RETA sizes may not be supported by some drivers, check the
833  * documentation or the description of relevant functions for more details.
834  */
835 #define RTE_ETH_RSS_RETA_SIZE_64 64
836 #define RTE_ETH_RSS_RETA_SIZE_128 128
837 #define RTE_ETH_RSS_RETA_SIZE_256 256
838 #define RTE_ETH_RSS_RETA_SIZE_512 512
839 #define RTE_ETH_RETA_GROUP_SIZE 64
840 
842 #define RTE_ETH_VMDQ_MAX_VLAN_FILTERS 64
843 #define RTE_ETH_DCB_NUM_USER_PRIORITIES 8
844 #define RTE_ETH_VMDQ_DCB_NUM_QUEUES 128
845 #define RTE_ETH_DCB_NUM_QUEUES 128
849 #define RTE_ETH_DCB_PG_SUPPORT RTE_BIT32(0)
850 #define RTE_ETH_DCB_PFC_SUPPORT RTE_BIT32(1)
854 #define RTE_ETH_VLAN_STRIP_OFFLOAD 0x0001
855 #define RTE_ETH_VLAN_FILTER_OFFLOAD 0x0002
856 #define RTE_ETH_VLAN_EXTEND_OFFLOAD 0x0004
857 #define RTE_ETH_QINQ_STRIP_OFFLOAD 0x0008
859 #define RTE_ETH_VLAN_STRIP_MASK 0x0001
860 #define RTE_ETH_VLAN_FILTER_MASK 0x0002
861 #define RTE_ETH_VLAN_EXTEND_MASK 0x0004
862 #define RTE_ETH_QINQ_STRIP_MASK 0x0008
863 #define RTE_ETH_VLAN_ID_MAX 0x0FFF
866 /* Definitions used for receive MAC address */
867 #define RTE_ETH_NUM_RECEIVE_MAC_ADDR 128
869 /* Definitions used for unicast hash */
870 #define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY 128
876 #define RTE_ETH_VMDQ_ACCEPT_UNTAG RTE_BIT32(0)
877 
878 #define RTE_ETH_VMDQ_ACCEPT_HASH_MC RTE_BIT32(1)
879 
880 #define RTE_ETH_VMDQ_ACCEPT_HASH_UC RTE_BIT32(2)
881 
882 #define RTE_ETH_VMDQ_ACCEPT_BROADCAST RTE_BIT32(3)
883 
884 #define RTE_ETH_VMDQ_ACCEPT_MULTICAST RTE_BIT32(4)
885 
895  uint64_t mask;
897  uint16_t reta[RTE_ETH_RETA_GROUP_SIZE];
898 };
899 
907 };
908 
918 };
919 
920 /* This structure may be extended in future. */
921 struct rte_eth_dcb_rx_conf {
922  enum rte_eth_nb_tcs nb_tcs;
924  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
925 };
926 
927 struct rte_eth_vmdq_dcb_tx_conf {
928  enum rte_eth_nb_pools nb_queue_pools;
930  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
931 };
932 
933 struct rte_eth_dcb_tx_conf {
934  enum rte_eth_nb_tcs nb_tcs;
936  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
937 };
938 
939 struct rte_eth_vmdq_tx_conf {
940  enum rte_eth_nb_pools nb_queue_pools;
941 };
942 
955  enum rte_eth_nb_pools nb_queue_pools;
957  uint8_t default_pool;
958  uint8_t nb_pool_maps;
959  struct {
960  uint16_t vlan_id;
961  uint64_t pools;
962  } pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS];
965 };
966 
986  enum rte_eth_nb_pools nb_queue_pools;
988  uint8_t default_pool;
990  uint8_t nb_pool_maps;
991  uint32_t rx_mode;
992  struct {
993  uint16_t vlan_id;
994  uint64_t pools;
995  } pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS];
996 };
997 
1002  enum rte_eth_tx_mq_mode mq_mode;
1008  uint64_t offloads;
1009 
1010  uint16_t pvid;
1011  __extension__
1012  uint8_t
1013  hw_vlan_reject_tagged : 1,
1017  hw_vlan_insert_pvid : 1;
1018 
1019  uint64_t reserved_64s[2];
1020  void *reserved_ptrs[2];
1021 };
1022 
1084  struct rte_mempool *mp;
1085  uint16_t length;
1086  uint16_t offset;
1098  uint32_t proto_hdr;
1099 };
1100 
1108  /* The settings for buffer split offload. */
1109  struct rte_eth_rxseg_split split;
1110  /* The other features settings should be added here. */
1111 };
1112 
1117  struct rte_eth_thresh rx_thresh;
1118  uint16_t rx_free_thresh;
1119  uint8_t rx_drop_en;
1121  uint16_t rx_nseg;
1128  uint16_t share_group;
1129  uint16_t share_qid;
1135  uint64_t offloads;
1144 
1165  uint16_t rx_nmempool;
1167  uint64_t reserved_64s[2];
1168  void *reserved_ptrs[2];
1169 };
1170 
1175  struct rte_eth_thresh tx_thresh;
1176  uint16_t tx_rs_thresh;
1177  uint16_t tx_free_thresh;
1186  uint64_t offloads;
1187 
1188  uint64_t reserved_64s[2];
1189  void *reserved_ptrs[2];
1190 };
1191 
1204 
1209  uint32_t rte_memory:1;
1210 
1211  uint32_t reserved:30;
1212 };
1213 
1222  uint16_t max_nb_queues;
1224  uint16_t max_rx_2_tx;
1226  uint16_t max_tx_2_rx;
1227  uint16_t max_nb_desc;
1230 };
1231 
1232 #define RTE_ETH_MAX_HAIRPIN_PEERS 32
1233 
1241  uint16_t port;
1242  uint16_t queue;
1243 };
1244 
1252  uint32_t peer_count:16;
1263  uint32_t tx_explicit:1;
1264 
1276  uint32_t manual_bind:1;
1277 
1290 
1302  uint32_t use_rte_memory:1;
1303 
1314  uint32_t force_memory:1;
1315 
1316  uint32_t reserved:11;
1318  struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
1319 };
1320 
1325  uint16_t nb_max;
1326  uint16_t nb_min;
1327  uint16_t nb_align;
1337  uint16_t nb_seg_max;
1338 
1350  uint16_t nb_mtu_seg_max;
1351 };
1352 
1361 };
1362 
1369  uint32_t high_water;
1370  uint32_t low_water;
1371  uint16_t pause_time;
1372  uint16_t send_xon;
1373  enum rte_eth_fc_mode mode;
1375  uint8_t autoneg;
1376 };
1377 
1384  struct rte_eth_fc_conf fc;
1385  uint8_t priority;
1386 };
1387 
1398  uint8_t tc_max;
1400  enum rte_eth_fc_mode mode_capa;
1401 };
1402 
1421  enum rte_eth_fc_mode mode;
1423  struct {
1424  uint16_t tx_qid;
1428  uint8_t tc;
1429  } rx_pause; /* Valid when (mode == FC_RX_PAUSE || mode == FC_FULL) */
1430 
1431  struct {
1432  uint16_t pause_time;
1433  uint16_t rx_qid;
1437  uint8_t tc;
1438  } tx_pause; /* Valid when (mode == FC_TX_PAUSE || mode == FC_FULL) */
1439 };
1440 
1446  RTE_ETH_TUNNEL_TYPE_NONE = 0,
1447  RTE_ETH_TUNNEL_TYPE_VXLAN,
1448  RTE_ETH_TUNNEL_TYPE_GENEVE,
1449  RTE_ETH_TUNNEL_TYPE_TEREDO,
1450  RTE_ETH_TUNNEL_TYPE_NVGRE,
1451  RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
1452  RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
1453  RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
1454  RTE_ETH_TUNNEL_TYPE_ECPRI,
1455  RTE_ETH_TUNNEL_TYPE_MAX,
1456 };
1457 
1458 /* Deprecated API file for rte_eth_dev_filter_* functions */
1459 #include "rte_eth_ctrl.h"
1460 
1471  uint16_t udp_port;
1472  uint8_t prot_type;
1473 };
1474 
1480  uint32_t lsc:1;
1482  uint32_t rxq:1;
1484  uint32_t rmv:1;
1485 };
1486 
1487 #define rte_intr_conf rte_eth_intr_conf
1488 
1495  uint32_t link_speeds;
1502  struct rte_eth_rxmode rxmode;
1503  struct rte_eth_txmode txmode;
1504  uint32_t lpbk_mode;
1509  struct {
1510  struct rte_eth_rss_conf rss_conf;
1512  struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf;
1514  struct rte_eth_dcb_rx_conf dcb_rx_conf;
1516  struct rte_eth_vmdq_rx_conf vmdq_rx_conf;
1517  } rx_adv_conf;
1518  union {
1520  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1522  struct rte_eth_dcb_tx_conf dcb_tx_conf;
1524  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1525  } tx_adv_conf;
1529  struct rte_eth_intr_conf intr_conf;
1530 };
1531 
1535 #define RTE_ETH_RX_OFFLOAD_VLAN_STRIP RTE_BIT64(0)
1536 #define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1537 #define RTE_ETH_RX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1538 #define RTE_ETH_RX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1539 #define RTE_ETH_RX_OFFLOAD_TCP_LRO RTE_BIT64(4)
1540 #define RTE_ETH_RX_OFFLOAD_QINQ_STRIP RTE_BIT64(5)
1541 #define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(6)
1542 #define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP RTE_BIT64(7)
1543 #define RTE_ETH_RX_OFFLOAD_VLAN_FILTER RTE_BIT64(9)
1544 #define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND RTE_BIT64(10)
1545 #define RTE_ETH_RX_OFFLOAD_SCATTER RTE_BIT64(13)
1546 
1551 #define RTE_ETH_RX_OFFLOAD_TIMESTAMP RTE_BIT64(14)
1552 #define RTE_ETH_RX_OFFLOAD_SECURITY RTE_BIT64(15)
1553 #define RTE_ETH_RX_OFFLOAD_KEEP_CRC RTE_BIT64(16)
1554 #define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM RTE_BIT64(17)
1555 #define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(18)
1556 #define RTE_ETH_RX_OFFLOAD_RSS_HASH RTE_BIT64(19)
1557 #define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT RTE_BIT64(20)
1558 
1559 #define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
1560  RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
1561  RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1562 #define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
1563  RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
1564  RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
1565  RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
1566 
1567 /*
1568  * If new Rx offload capabilities are defined, they also must be
1569  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1570  */
1571 
1575 #define RTE_ETH_TX_OFFLOAD_VLAN_INSERT RTE_BIT64(0)
1576 #define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1577 #define RTE_ETH_TX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1578 #define RTE_ETH_TX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1579 #define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM RTE_BIT64(4)
1580 #define RTE_ETH_TX_OFFLOAD_TCP_TSO RTE_BIT64(5)
1581 #define RTE_ETH_TX_OFFLOAD_UDP_TSO RTE_BIT64(6)
1582 #define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(7)
1583 #define RTE_ETH_TX_OFFLOAD_QINQ_INSERT RTE_BIT64(8)
1584 #define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO RTE_BIT64(9)
1585 #define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO RTE_BIT64(10)
1586 #define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO RTE_BIT64(11)
1587 #define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO RTE_BIT64(12)
1588 #define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT RTE_BIT64(13)
1589 
1593 #define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE RTE_BIT64(14)
1594 
1595 #define RTE_ETH_TX_OFFLOAD_MULTI_SEGS RTE_BIT64(15)
1596 
1601 #define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE RTE_BIT64(16)
1602 #define RTE_ETH_TX_OFFLOAD_SECURITY RTE_BIT64(17)
1603 
1608 #define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO RTE_BIT64(18)
1609 
1614 #define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO RTE_BIT64(19)
1615 
1616 #define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(20)
1617 
1622 #define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP RTE_BIT64(21)
1623 /*
1624  * If new Tx offload capabilities are defined, they also must be
1625  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1626  */
1627 
1632 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP RTE_BIT64(0)
1633 
1634 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP RTE_BIT64(1)
1635 
1644 #define RTE_ETH_DEV_CAPA_RXQ_SHARE RTE_BIT64(2)
1645 
1646 #define RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP RTE_BIT64(3)
1647 
1648 #define RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP RTE_BIT64(4)
1649 
1651 /*
1652  * Fallback default preferred Rx/Tx port parameters.
1653  * These are used if an application requests default parameters
1654  * but the PMD does not provide preferred values.
1655  */
1656 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1657 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1658 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1659 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1660 
1667  uint16_t burst_size;
1668  uint16_t ring_size;
1669  uint16_t nb_queues;
1670 };
1671 
1676 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX)
1677 
1682  const char *name;
1683  uint16_t domain_id;
1691  uint16_t port_id;
1697  uint16_t rx_domain;
1698 };
1699 
1707  __extension__
1708  uint32_t multi_pools:1;
1709  uint32_t offset_allowed:1;
1710  uint32_t offset_align_log2:4;
1711  uint16_t max_nseg;
1712  uint16_t reserved;
1713 };
1714 
1727 };
1728 
1749 };
1750 
1757  struct rte_device *device;
1758  const char *driver_name;
1759  unsigned int if_index;
1761  uint16_t min_mtu;
1762  uint16_t max_mtu;
1763  const uint32_t *dev_flags;
1765  uint32_t min_rx_bufsize;
1772  uint32_t max_rx_bufsize;
1773  uint32_t max_rx_pktlen;
1776  uint16_t max_rx_queues;
1777  uint16_t max_tx_queues;
1778  uint32_t max_mac_addrs;
1781  uint16_t max_vfs;
1782  uint16_t max_vmdq_pools;
1783  struct rte_eth_rxseg_capa rx_seg_capa;
1793  uint16_t reta_size;
1794  uint8_t hash_key_size;
1795  uint32_t rss_algo_capa;
1798  struct rte_eth_rxconf default_rxconf;
1799  struct rte_eth_txconf default_txconf;
1800  uint16_t vmdq_queue_base;
1801  uint16_t vmdq_queue_num;
1802  uint16_t vmdq_pool_base;
1803  struct rte_eth_desc_lim rx_desc_lim;
1804  struct rte_eth_desc_lim tx_desc_lim;
1805  uint32_t speed_capa;
1807  uint16_t nb_rx_queues;
1808  uint16_t nb_tx_queues;
1817  struct rte_eth_dev_portconf default_rxportconf;
1819  struct rte_eth_dev_portconf default_txportconf;
1821  uint64_t dev_capa;
1826  struct rte_eth_switch_info switch_info;
1828  enum rte_eth_err_handle_mode err_handle_mode;
1829 
1830  uint64_t reserved_64s[2];
1831  void *reserved_ptrs[2];
1832 };
1833 
1835 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1836 #define RTE_ETH_QUEUE_STATE_STARTED 1
1837 #define RTE_ETH_QUEUE_STATE_HAIRPIN 2
1844 struct __rte_cache_min_aligned rte_eth_rxq_info {
1845  struct rte_mempool *mp;
1846  struct rte_eth_rxconf conf;
1847  uint8_t scattered_rx;
1848  uint8_t queue_state;
1849  uint16_t nb_desc;
1850  uint16_t rx_buf_size;
1857  uint8_t avail_thresh;
1858 };
1859 
1865  struct rte_eth_txconf conf;
1866  uint16_t nb_desc;
1867  uint8_t queue_state;
1868 };
1869 
1879  struct rte_mbuf **mbuf_ring;
1880  struct rte_mempool *mp;
1881  uint16_t *refill_head;
1882  uint16_t *receive_tail;
1883  uint16_t mbuf_ring_size;
1892 };
1893 
1894 /* Generic Burst mode flag definition, values can be ORed. */
1895 
1901 #define RTE_ETH_BURST_FLAG_PER_QUEUE RTE_BIT64(0)
1902 
1908  uint64_t flags;
1910 #define RTE_ETH_BURST_MODE_INFO_SIZE 1024
1911  char info[RTE_ETH_BURST_MODE_INFO_SIZE];
1912 };
1913 
1915 #define RTE_ETH_XSTATS_NAME_SIZE 64
1916 
1927  uint64_t id;
1928  uint64_t value;
1929 };
1930 
1947 };
1948 
1949 #define RTE_ETH_DCB_NUM_TCS 8
1950 #define RTE_ETH_MAX_VMDQ_POOL 64
1951 
1958  struct {
1959  uint16_t base;
1960  uint16_t nb_queue;
1961  } tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1963  struct {
1964  uint16_t base;
1965  uint16_t nb_queue;
1966  } tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1967 };
1968 
1974  uint8_t nb_tcs;
1976  uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS];
1979 };
1980 
1991 };
1992 
1993 /* Translate from FEC mode to FEC capa */
1994 #define RTE_ETH_FEC_MODE_TO_CAPA(x) RTE_BIT32(x)
1995 
1996 /* This macro indicates FEC capa mask */
1997 #define RTE_ETH_FEC_MODE_CAPA_MASK(x) RTE_BIT32(RTE_ETH_FEC_ ## x)
1998 
1999 /* A structure used to get capabilities per link speed */
2000 struct rte_eth_fec_capa {
2001  uint32_t speed;
2002  uint32_t capa;
2003 };
2004 
2005 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
2006 
2007 /* Macros to check for valid port */
2008 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
2009  if (!rte_eth_dev_is_valid_port(port_id)) { \
2010  RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id); \
2011  return retval; \
2012  } \
2013 } while (0)
2014 
2015 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
2016  if (!rte_eth_dev_is_valid_port(port_id)) { \
2017  RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id); \
2018  return; \
2019  } \
2020 } while (0)
2021 
2044 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
2045  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
2046  void *user_param);
2047 
2068 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
2069  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
2070 
2081 };
2082 
2083 struct rte_eth_dev_sriov {
2084  uint8_t active;
2085  uint8_t nb_q_per_pool;
2086  uint16_t def_vmdq_idx;
2087  uint16_t def_pool_q_idx;
2088 };
2089 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
2090 
2091 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
2092 
2093 #define RTE_ETH_DEV_NO_OWNER 0
2094 
2095 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
2096 
2097 struct rte_eth_dev_owner {
2098  uint64_t id;
2099  char name[RTE_ETH_MAX_OWNER_NAME_LEN];
2100 };
2101 
2107 #define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE RTE_BIT32(0)
2108 
2109 #define RTE_ETH_DEV_INTR_LSC RTE_BIT32(1)
2110 
2111 #define RTE_ETH_DEV_BONDING_MEMBER RTE_BIT32(2)
2112 
2113 #define RTE_ETH_DEV_INTR_RMV RTE_BIT32(3)
2114 
2115 #define RTE_ETH_DEV_REPRESENTOR RTE_BIT32(4)
2116 
2117 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR RTE_BIT32(5)
2118 
2122 #define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS RTE_BIT32(6)
2123 
2136 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
2137  const uint64_t owner_id);
2138 
2142 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
2143  for (p = rte_eth_find_next_owned_by(0, o); \
2144  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
2145  p = rte_eth_find_next_owned_by(p + 1, o))
2146 
2155 uint16_t rte_eth_find_next(uint16_t port_id);
2156 
2160 #define RTE_ETH_FOREACH_DEV(p) \
2161  RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
2162 
2174 uint16_t
2175 rte_eth_find_next_of(uint16_t port_id_start,
2176  const struct rte_device *parent);
2177 
2186 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
2187  for (port_id = rte_eth_find_next_of(0, parent); \
2188  port_id < RTE_MAX_ETHPORTS; \
2189  port_id = rte_eth_find_next_of(port_id + 1, parent))
2190 
2202 uint16_t
2203 rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id);
2204 
2215 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
2216  for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
2217  port_id < RTE_MAX_ETHPORTS; \
2218  port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
2219 
2230 int rte_eth_dev_owner_new(uint64_t *owner_id);
2231 
2242 int rte_eth_dev_owner_set(const uint16_t port_id,
2243  const struct rte_eth_dev_owner *owner);
2244 
2255 int rte_eth_dev_owner_unset(const uint16_t port_id,
2256  const uint64_t owner_id);
2257 
2266 int rte_eth_dev_owner_delete(const uint64_t owner_id);
2267 
2278 int rte_eth_dev_owner_get(const uint16_t port_id,
2279  struct rte_eth_dev_owner *owner);
2280 
2291 uint16_t rte_eth_dev_count_avail(void);
2292 
2301 uint16_t rte_eth_dev_count_total(void);
2302 
2314 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
2315 
2324 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
2325 
2334 const char *rte_eth_dev_tx_offload_name(uint64_t offload);
2335 
2347 __rte_experimental
2348 const char *rte_eth_dev_capability_name(uint64_t capability);
2349 
2389 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
2390  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
2391 
2400 int
2401 rte_eth_dev_is_removed(uint16_t port_id);
2402 
2465 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2466  uint16_t nb_rx_desc, unsigned int socket_id,
2467  const struct rte_eth_rxconf *rx_conf,
2468  struct rte_mempool *mb_pool);
2469 
2497 __rte_experimental
2499  (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2500  const struct rte_eth_hairpin_conf *conf);
2501 
2550 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2551  uint16_t nb_tx_desc, unsigned int socket_id,
2552  const struct rte_eth_txconf *tx_conf);
2553 
2579 __rte_experimental
2581  (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2582  const struct rte_eth_hairpin_conf *conf);
2583 
2610 __rte_experimental
2611 int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2612  size_t len, uint32_t direction);
2613 
2636 __rte_experimental
2637 int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port);
2638 
2663 __rte_experimental
2664 int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port);
2665 
2681 __rte_experimental
2682 int rte_eth_dev_count_aggr_ports(uint16_t port_id);
2683 
2711 __rte_experimental
2712 int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id,
2713  uint8_t affinity);
2714 
2727 int rte_eth_dev_socket_id(uint16_t port_id);
2728 
2738 int rte_eth_dev_is_valid_port(uint16_t port_id);
2739 
2756 __rte_experimental
2757 int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2758 
2775 __rte_experimental
2776 int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2777 
2795 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2796 
2813 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2814 
2832 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2833 
2850 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2851 
2875 int rte_eth_dev_start(uint16_t port_id);
2876 
2890 int rte_eth_dev_stop(uint16_t port_id);
2891 
2904 int rte_eth_dev_set_link_up(uint16_t port_id);
2905 
2915 int rte_eth_dev_set_link_down(uint16_t port_id);
2916 
2927 int rte_eth_dev_close(uint16_t port_id);
2928 
2966 int rte_eth_dev_reset(uint16_t port_id);
2967 
2979 int rte_eth_promiscuous_enable(uint16_t port_id);
2980 
2992 int rte_eth_promiscuous_disable(uint16_t port_id);
2993 
3004 int rte_eth_promiscuous_get(uint16_t port_id);
3005 
3017 int rte_eth_allmulticast_enable(uint16_t port_id);
3018 
3030 int rte_eth_allmulticast_disable(uint16_t port_id);
3031 
3042 int rte_eth_allmulticast_get(uint16_t port_id);
3043 
3061 int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
3062 
3077 int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
3078 
3092 __rte_experimental
3093 const char *rte_eth_link_speed_to_str(uint32_t link_speed);
3094 
3113 __rte_experimental
3114 int rte_eth_link_to_str(char *str, size_t len,
3115  const struct rte_eth_link *eth_link);
3116 
3134 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
3135 
3147 int rte_eth_stats_reset(uint16_t port_id);
3148 
3178 int rte_eth_xstats_get_names(uint16_t port_id,
3179  struct rte_eth_xstat_name *xstats_names,
3180  unsigned int size);
3181 
3215 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3216  unsigned int n);
3217 
3242 int
3243 rte_eth_xstats_get_names_by_id(uint16_t port_id,
3244  struct rte_eth_xstat_name *xstats_names, unsigned int size,
3245  uint64_t *ids);
3246 
3271 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3272  uint64_t *values, unsigned int size);
3273 
3293 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
3294  uint64_t *id);
3295 
3308 int rte_eth_xstats_reset(uint16_t port_id);
3309 
3328 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
3329  uint16_t tx_queue_id, uint8_t stat_idx);
3330 
3349 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
3350  uint16_t rx_queue_id,
3351  uint8_t stat_idx);
3352 
3366 int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
3367 
3388 __rte_experimental
3389 int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3390  unsigned int num);
3391 
3411 int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
3412 
3428 __rte_experimental
3429 int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf);
3430 
3451 int rte_eth_dev_fw_version_get(uint16_t port_id,
3452  char *fw_version, size_t fw_size);
3453 
3493 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3494  uint32_t *ptypes, int num);
3525 int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3526  uint32_t *set_ptypes, unsigned int num);
3527 
3540 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
3541 
3559 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
3560 
3580 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
3581 
3600 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3601  int on);
3602 
3619 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3620  enum rte_vlan_type vlan_type,
3621  uint16_t tag_type);
3622 
3640 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
3641 
3655 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
3656 
3671 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
3672 
3698 __rte_experimental
3699 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
3700  uint8_t avail_thresh);
3701 
3728 __rte_experimental
3729 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
3730  uint8_t *avail_thresh);
3731 
3732 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
3733  void *userdata);
3734 
3740  buffer_tx_error_fn error_callback;
3741  void *error_userdata;
3742  uint16_t size;
3743  uint16_t length;
3745  struct rte_mbuf *pkts[];
3746 };
3747 
3754 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
3755  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3756 
3767 int
3768 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
3769 
3794 int
3796  buffer_tx_error_fn callback, void *userdata);
3797 
3820 void
3821 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3822  void *userdata);
3823 
3847 void
3848 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3849  void *userdata);
3850 
3876 int
3877 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
3878 
3911 };
3912 
3932 };
3933 
3952  uint64_t metadata;
3953 };
3954 
3992 };
3993 
4018  uint64_t metadata;
4019 };
4020 
4097 };
4098 
4112 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
4113  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
4114 
4132 int rte_eth_dev_callback_register(uint16_t port_id,
4133  enum rte_eth_event_type event,
4134  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4135 
4154 int rte_eth_dev_callback_unregister(uint16_t port_id,
4155  enum rte_eth_event_type event,
4156  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4157 
4179 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
4180 
4201 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
4202 
4220 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
4221 
4243 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4244  int epfd, int op, void *data);
4245 
4260 int
4261 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
4262 
4276 int rte_eth_led_on(uint16_t port_id);
4277 
4291 int rte_eth_led_off(uint16_t port_id);
4292 
4321 __rte_experimental
4322 int rte_eth_fec_get_capability(uint16_t port_id,
4323  struct rte_eth_fec_capa *speed_fec_capa,
4324  unsigned int num);
4325 
4346 __rte_experimental
4347 int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa);
4348 
4372 __rte_experimental
4373 int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa);
4374 
4389 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
4390  struct rte_eth_fc_conf *fc_conf);
4391 
4406 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
4407  struct rte_eth_fc_conf *fc_conf);
4408 
4424 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
4425  struct rte_eth_pfc_conf *pfc_conf);
4426 
4445 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
4446  uint32_t pool);
4447 
4465 __rte_experimental
4467  struct rte_eth_pfc_queue_info *pfc_queue_info);
4468 
4492 __rte_experimental
4494  struct rte_eth_pfc_queue_conf *pfc_queue_conf);
4495 
4510 int rte_eth_dev_mac_addr_remove(uint16_t port_id,
4511  struct rte_ether_addr *mac_addr);
4512 
4530 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
4531  struct rte_ether_addr *mac_addr);
4532 
4550 int rte_eth_dev_rss_reta_update(uint16_t port_id,
4551  struct rte_eth_rss_reta_entry64 *reta_conf,
4552  uint16_t reta_size);
4553 
4572 int rte_eth_dev_rss_reta_query(uint16_t port_id,
4573  struct rte_eth_rss_reta_entry64 *reta_conf,
4574  uint16_t reta_size);
4575 
4595 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4596  uint8_t on);
4597 
4616 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
4617 
4634 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4635  uint32_t tx_rate);
4636 
4651 int rte_eth_dev_rss_hash_update(uint16_t port_id,
4652  struct rte_eth_rss_conf *rss_conf);
4653 
4669 int
4670 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4671  struct rte_eth_rss_conf *rss_conf);
4672 
4685 __rte_experimental
4686 const char *
4688 
4705 __rte_experimental
4706 int
4707 rte_eth_find_rss_algo(const char *name, uint32_t *algo);
4708 
4733 int
4734 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4735  struct rte_eth_udp_tunnel *tunnel_udp);
4736 
4756 int
4757 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4758  struct rte_eth_udp_tunnel *tunnel_udp);
4759 
4774 int rte_eth_dev_get_dcb_info(uint16_t port_id,
4775  struct rte_eth_dcb_info *dcb_info);
4776 
4777 struct rte_eth_rxtx_callback;
4778 
4804 const struct rte_eth_rxtx_callback *
4805 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4806  rte_rx_callback_fn fn, void *user_param);
4807 
4834 const struct rte_eth_rxtx_callback *
4835 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4836  rte_rx_callback_fn fn, void *user_param);
4837 
4863 const struct rte_eth_rxtx_callback *
4864 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4865  rte_tx_callback_fn fn, void *user_param);
4866 
4900 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4901  const struct rte_eth_rxtx_callback *user_cb);
4902 
4936 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4937  const struct rte_eth_rxtx_callback *user_cb);
4938 
4958 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4959  struct rte_eth_rxq_info *qinfo);
4960 
4980 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4981  struct rte_eth_txq_info *qinfo);
4982 
5003 __rte_experimental
5004 int rte_eth_recycle_rx_queue_info_get(uint16_t port_id,
5005  uint16_t queue_id,
5006  struct rte_eth_recycle_rxq_info *recycle_rxq_info);
5007 
5026 int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5027  struct rte_eth_burst_mode *mode);
5028 
5047 int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5048  struct rte_eth_burst_mode *mode);
5049 
5070 __rte_experimental
5071 int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5072  struct rte_power_monitor_cond *pmc);
5073 
5092 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
5093 
5106 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
5107 
5124 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5125 
5142 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5143 
5162 __rte_experimental
5163 int
5164 rte_eth_dev_get_module_info(uint16_t port_id,
5165  struct rte_eth_dev_module_info *modinfo);
5166 
5186 __rte_experimental
5187 int
5188 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5189  struct rte_dev_eeprom_info *info);
5190 
5210 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5211  struct rte_ether_addr *mc_addr_set,
5212  uint32_t nb_mc_addr);
5213 
5226 int rte_eth_timesync_enable(uint16_t port_id);
5227 
5240 int rte_eth_timesync_disable(uint16_t port_id);
5241 
5260 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
5261  struct timespec *timestamp, uint32_t flags);
5262 
5278 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5279  struct timespec *timestamp);
5280 
5298 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
5299 
5315 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
5316 
5335 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
5336 
5382 __rte_experimental
5383 int
5384 rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
5385 
5401 int
5402 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
5403 
5420 int
5421 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
5422 
5439 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5440  uint16_t *nb_rx_desc,
5441  uint16_t *nb_tx_desc);
5442 
5457 int
5458 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
5459 
5469 void *
5470 rte_eth_dev_get_sec_ctx(uint16_t port_id);
5471 
5487 __rte_experimental
5488 int rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5489  struct rte_eth_hairpin_cap *cap);
5490 
5500  int pf;
5501  __extension__
5502  union {
5503  int vf;
5504  int sf;
5505  };
5506  uint32_t id_base;
5507  uint32_t id_end;
5508  char name[RTE_DEV_NAME_MAX_LEN];
5509 };
5510 
5518  uint16_t controller;
5519  uint16_t pf;
5520  uint32_t nb_ranges_alloc;
5521  uint32_t nb_ranges;
5522  struct rte_eth_representor_range ranges[];
5523 };
5524 
5548 __rte_experimental
5549 int rte_eth_representor_info_get(uint16_t port_id,
5550  struct rte_eth_representor_info *info);
5551 
5553 #define RTE_ETH_RX_METADATA_USER_FLAG RTE_BIT64(0)
5554 
5556 #define RTE_ETH_RX_METADATA_USER_MARK RTE_BIT64(1)
5557 
5559 #define RTE_ETH_RX_METADATA_TUNNEL_ID RTE_BIT64(2)
5560 
5600 int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features);
5601 
5603 #define RTE_ETH_DEV_REASSEMBLY_F_IPV4 (RTE_BIT32(0))
5604 
5605 #define RTE_ETH_DEV_REASSEMBLY_F_IPV6 (RTE_BIT32(1))
5606 
5617  uint32_t timeout_ms;
5619  uint16_t max_frags;
5624  uint16_t flags;
5625 };
5626 
5647 __rte_experimental
5648 int rte_eth_ip_reassembly_capability_get(uint16_t port_id,
5649  struct rte_eth_ip_reassembly_params *capa);
5650 
5672 __rte_experimental
5673 int rte_eth_ip_reassembly_conf_get(uint16_t port_id,
5674  struct rte_eth_ip_reassembly_params *conf);
5675 
5705 __rte_experimental
5706 int rte_eth_ip_reassembly_conf_set(uint16_t port_id,
5707  const struct rte_eth_ip_reassembly_params *conf);
5708 
5716 typedef struct {
5723  uint16_t time_spent;
5725  uint16_t nb_frags;
5727 
5746 __rte_experimental
5747 int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file);
5748 
5772 __rte_experimental
5773 int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5774  uint16_t offset, uint16_t num, FILE *file);
5775 
5799 __rte_experimental
5800 int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5801  uint16_t offset, uint16_t num, FILE *file);
5802 
5803 
5804 /* Congestion management */
5805 
5815 };
5816 
5833  uint64_t objs_supported;
5838  uint8_t rsvd[8];
5839 };
5840 
5851  enum rte_cman_mode mode;
5852  union {
5859  uint16_t rx_queue;
5866  uint8_t rsvd_obj_params[4];
5867  } obj_param;
5868  union {
5881  uint8_t rsvd_mode_params[4];
5882  } mode_param;
5883 };
5884 
5902 __rte_experimental
5903 int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info);
5904 
5922 __rte_experimental
5923 int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config);
5924 
5941 __rte_experimental
5942 int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config);
5943 
5964 __rte_experimental
5965 int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config);
5966 
5967 #include <rte_ethdev_core.h>
5968 
5992 uint16_t rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
5993  struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
5994  void *opaque);
5995 
6083 static inline uint16_t
6084 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
6085  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
6086 {
6087  uint16_t nb_rx;
6088  struct rte_eth_fp_ops *p;
6089  void *qd;
6090 
6091 #ifdef RTE_ETHDEV_DEBUG_RX
6092  if (port_id >= RTE_MAX_ETHPORTS ||
6093  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6094  RTE_ETHDEV_LOG_LINE(ERR,
6095  "Invalid port_id=%u or queue_id=%u",
6096  port_id, queue_id);
6097  return 0;
6098  }
6099 #endif
6100 
6101  /* fetch pointer to queue data */
6102  p = &rte_eth_fp_ops[port_id];
6103  qd = p->rxq.data[queue_id];
6104 
6105 #ifdef RTE_ETHDEV_DEBUG_RX
6106  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6107 
6108  if (qd == NULL) {
6109  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u for port_id=%u",
6110  queue_id, port_id);
6111  return 0;
6112  }
6113 #endif
6114 
6115  nb_rx = p->rx_pkt_burst(qd, rx_pkts, nb_pkts);
6116 
6117 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
6118  {
6119  void *cb;
6120 
6121  /* rte_memory_order_release memory order was used when the
6122  * call back was inserted into the list.
6123  * Since there is a clear dependency between loading
6124  * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6125  * not required.
6126  */
6127  cb = rte_atomic_load_explicit(&p->rxq.clbk[queue_id],
6128  rte_memory_order_relaxed);
6129  if (unlikely(cb != NULL))
6130  nb_rx = rte_eth_call_rx_callbacks(port_id, queue_id,
6131  rx_pkts, nb_rx, nb_pkts, cb);
6132  }
6133 #endif
6134 
6135  rte_ethdev_trace_rx_burst(port_id, queue_id, (void **)rx_pkts, nb_rx);
6136  return nb_rx;
6137 }
6138 
6156 static inline int
6157 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
6158 {
6159  struct rte_eth_fp_ops *p;
6160  void *qd;
6161 
6162 #ifdef RTE_ETHDEV_DEBUG_RX
6163  if (port_id >= RTE_MAX_ETHPORTS ||
6164  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6165  RTE_ETHDEV_LOG_LINE(ERR,
6166  "Invalid port_id=%u or queue_id=%u",
6167  port_id, queue_id);
6168  return -EINVAL;
6169  }
6170 #endif
6171 
6172  /* fetch pointer to queue data */
6173  p = &rte_eth_fp_ops[port_id];
6174  qd = p->rxq.data[queue_id];
6175 
6176 #ifdef RTE_ETHDEV_DEBUG_RX
6177  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6178  if (qd == NULL)
6179  return -EINVAL;
6180 #endif
6181 
6182  if (*p->rx_queue_count == NULL)
6183  return -ENOTSUP;
6184  return (int)(*p->rx_queue_count)(qd);
6185 }
6186 
6190 #define RTE_ETH_RX_DESC_AVAIL 0
6191 #define RTE_ETH_RX_DESC_DONE 1
6192 #define RTE_ETH_RX_DESC_UNAVAIL 2
6228 static inline int
6229 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
6230  uint16_t offset)
6231 {
6232  struct rte_eth_fp_ops *p;
6233  void *qd;
6234 
6235 #ifdef RTE_ETHDEV_DEBUG_RX
6236  if (port_id >= RTE_MAX_ETHPORTS ||
6237  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6238  RTE_ETHDEV_LOG_LINE(ERR,
6239  "Invalid port_id=%u or queue_id=%u",
6240  port_id, queue_id);
6241  return -EINVAL;
6242  }
6243 #endif
6244 
6245  /* fetch pointer to queue data */
6246  p = &rte_eth_fp_ops[port_id];
6247  qd = p->rxq.data[queue_id];
6248 
6249 #ifdef RTE_ETHDEV_DEBUG_RX
6250  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6251  if (qd == NULL)
6252  return -ENODEV;
6253 #endif
6254  if (*p->rx_descriptor_status == NULL)
6255  return -ENOTSUP;
6256  return (*p->rx_descriptor_status)(qd, offset);
6257 }
6258 
6262 #define RTE_ETH_TX_DESC_FULL 0
6263 #define RTE_ETH_TX_DESC_DONE 1
6264 #define RTE_ETH_TX_DESC_UNAVAIL 2
6300 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
6301  uint16_t queue_id, uint16_t offset)
6302 {
6303  struct rte_eth_fp_ops *p;
6304  void *qd;
6305 
6306 #ifdef RTE_ETHDEV_DEBUG_TX
6307  if (port_id >= RTE_MAX_ETHPORTS ||
6308  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6309  RTE_ETHDEV_LOG_LINE(ERR,
6310  "Invalid port_id=%u or queue_id=%u",
6311  port_id, queue_id);
6312  return -EINVAL;
6313  }
6314 #endif
6315 
6316  /* fetch pointer to queue data */
6317  p = &rte_eth_fp_ops[port_id];
6318  qd = p->txq.data[queue_id];
6319 
6320 #ifdef RTE_ETHDEV_DEBUG_TX
6321  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6322  if (qd == NULL)
6323  return -ENODEV;
6324 #endif
6325  if (*p->tx_descriptor_status == NULL)
6326  return -ENOTSUP;
6327  return (*p->tx_descriptor_status)(qd, offset);
6328 }
6329 
6349 uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
6350  struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque);
6351 
6423 static inline uint16_t
6424 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
6425  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6426 {
6427  struct rte_eth_fp_ops *p;
6428  void *qd;
6429 
6430 #ifdef RTE_ETHDEV_DEBUG_TX
6431  if (port_id >= RTE_MAX_ETHPORTS ||
6432  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6433  RTE_ETHDEV_LOG_LINE(ERR,
6434  "Invalid port_id=%u or queue_id=%u",
6435  port_id, queue_id);
6436  return 0;
6437  }
6438 #endif
6439 
6440  /* fetch pointer to queue data */
6441  p = &rte_eth_fp_ops[port_id];
6442  qd = p->txq.data[queue_id];
6443 
6444 #ifdef RTE_ETHDEV_DEBUG_TX
6445  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6446 
6447  if (qd == NULL) {
6448  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6449  queue_id, port_id);
6450  return 0;
6451  }
6452 #endif
6453 
6454 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
6455  {
6456  void *cb;
6457 
6458  /* rte_memory_order_release memory order was used when the
6459  * call back was inserted into the list.
6460  * Since there is a clear dependency between loading
6461  * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6462  * not required.
6463  */
6464  cb = rte_atomic_load_explicit(&p->txq.clbk[queue_id],
6465  rte_memory_order_relaxed);
6466  if (unlikely(cb != NULL))
6467  nb_pkts = rte_eth_call_tx_callbacks(port_id, queue_id,
6468  tx_pkts, nb_pkts, cb);
6469  }
6470 #endif
6471 
6472  nb_pkts = p->tx_pkt_burst(qd, tx_pkts, nb_pkts);
6473 
6474  rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, nb_pkts);
6475  return nb_pkts;
6476 }
6477 
6531 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
6532 
6533 static inline uint16_t
6534 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
6535  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6536 {
6537  struct rte_eth_fp_ops *p;
6538  void *qd;
6539 
6540 #ifdef RTE_ETHDEV_DEBUG_TX
6541  if (port_id >= RTE_MAX_ETHPORTS ||
6542  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6543  RTE_ETHDEV_LOG_LINE(ERR,
6544  "Invalid port_id=%u or queue_id=%u",
6545  port_id, queue_id);
6546  rte_errno = ENODEV;
6547  return 0;
6548  }
6549 #endif
6550 
6551  /* fetch pointer to queue data */
6552  p = &rte_eth_fp_ops[port_id];
6553  qd = p->txq.data[queue_id];
6554 
6555 #ifdef RTE_ETHDEV_DEBUG_TX
6556  if (!rte_eth_dev_is_valid_port(port_id)) {
6557  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx port_id=%u", port_id);
6558  rte_errno = ENODEV;
6559  return 0;
6560  }
6561  if (qd == NULL) {
6562  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6563  queue_id, port_id);
6564  rte_errno = EINVAL;
6565  return 0;
6566  }
6567 #endif
6568 
6569  if (!p->tx_pkt_prepare)
6570  return nb_pkts;
6571 
6572  return p->tx_pkt_prepare(qd, tx_pkts, nb_pkts);
6573 }
6574 
6575 #else
6576 
6577 /*
6578  * Native NOOP operation for compilation targets which doesn't require any
6579  * preparations steps, and functional NOOP may introduce unnecessary performance
6580  * drop.
6581  *
6582  * Generally this is not a good idea to turn it on globally and didn't should
6583  * be used if behavior of tx_preparation can change.
6584  */
6585 
6586 static inline uint16_t
6587 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
6588  __rte_unused uint16_t queue_id,
6589  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6590 {
6591  return nb_pkts;
6592 }
6593 
6594 #endif
6595 
6618 static inline uint16_t
6619 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
6620  struct rte_eth_dev_tx_buffer *buffer)
6621 {
6622  uint16_t sent;
6623  uint16_t to_send = buffer->length;
6624 
6625  if (to_send == 0)
6626  return 0;
6627 
6628  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
6629 
6630  buffer->length = 0;
6631 
6632  /* All packets sent, or to be dealt with by callback below */
6633  if (unlikely(sent != to_send))
6634  buffer->error_callback(&buffer->pkts[sent],
6635  (uint16_t)(to_send - sent),
6636  buffer->error_userdata);
6637 
6638  return sent;
6639 }
6640 
6671 static __rte_always_inline uint16_t
6672 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
6673  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
6674 {
6675  buffer->pkts[buffer->length++] = tx_pkt;
6676  if (buffer->length < buffer->size)
6677  return 0;
6678 
6679  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
6680 }
6681 
6735 __rte_experimental
6736 static inline uint16_t
6737 rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id,
6738  uint16_t tx_port_id, uint16_t tx_queue_id,
6739  struct rte_eth_recycle_rxq_info *recycle_rxq_info)
6740 {
6741  struct rte_eth_fp_ops *p1, *p2;
6742  void *qd1, *qd2;
6743  uint16_t nb_mbufs;
6744 
6745 #ifdef RTE_ETHDEV_DEBUG_TX
6746  if (tx_port_id >= RTE_MAX_ETHPORTS ||
6747  tx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6748  RTE_ETHDEV_LOG_LINE(ERR,
6749  "Invalid tx_port_id=%u or tx_queue_id=%u",
6750  tx_port_id, tx_queue_id);
6751  return 0;
6752  }
6753 #endif
6754 
6755  /* fetch pointer to Tx queue data */
6756  p1 = &rte_eth_fp_ops[tx_port_id];
6757  qd1 = p1->txq.data[tx_queue_id];
6758 
6759 #ifdef RTE_ETHDEV_DEBUG_TX
6760  RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port_id, 0);
6761 
6762  if (qd1 == NULL) {
6763  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6764  tx_queue_id, tx_port_id);
6765  return 0;
6766  }
6767 #endif
6768  if (p1->recycle_tx_mbufs_reuse == NULL)
6769  return 0;
6770 
6771 #ifdef RTE_ETHDEV_DEBUG_RX
6772  if (rx_port_id >= RTE_MAX_ETHPORTS ||
6773  rx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6774  RTE_ETHDEV_LOG_LINE(ERR, "Invalid rx_port_id=%u or rx_queue_id=%u",
6775  rx_port_id, rx_queue_id);
6776  return 0;
6777  }
6778 #endif
6779 
6780  /* fetch pointer to Rx queue data */
6781  p2 = &rte_eth_fp_ops[rx_port_id];
6782  qd2 = p2->rxq.data[rx_queue_id];
6783 
6784 #ifdef RTE_ETHDEV_DEBUG_RX
6785  RTE_ETH_VALID_PORTID_OR_ERR_RET(rx_port_id, 0);
6786 
6787  if (qd2 == NULL) {
6788  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u for port_id=%u",
6789  rx_queue_id, rx_port_id);
6790  return 0;
6791  }
6792 #endif
6793  if (p2->recycle_rx_descriptors_refill == NULL)
6794  return 0;
6795 
6796  /* Copy used *rte_mbuf* buffer pointers from Tx mbuf ring
6797  * into Rx mbuf ring.
6798  */
6799  nb_mbufs = p1->recycle_tx_mbufs_reuse(qd1, recycle_rxq_info);
6800 
6801  /* If no recycling mbufs, return 0. */
6802  if (nb_mbufs == 0)
6803  return 0;
6804 
6805  /* Replenish the Rx descriptors with the recycling
6806  * into Rx mbuf ring.
6807  */
6808  p2->recycle_rx_descriptors_refill(qd2, nb_mbufs);
6809 
6810  return nb_mbufs;
6811 }
6812 
6841 __rte_experimental
6842 int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num);
6843 
6878 __rte_experimental
6879 static inline int
6880 rte_eth_tx_queue_count(uint16_t port_id, uint16_t queue_id)
6881 {
6882  struct rte_eth_fp_ops *fops;
6883  void *qd;
6884  int rc;
6885 
6886 #ifdef RTE_ETHDEV_DEBUG_TX
6887  if (port_id >= RTE_MAX_ETHPORTS || !rte_eth_dev_is_valid_port(port_id)) {
6888  RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id);
6889  rc = -ENODEV;
6890  goto out;
6891  }
6892 
6893  if (queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6894  RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for port_id=%u",
6895  queue_id, port_id);
6896  rc = -EINVAL;
6897  goto out;
6898  }
6899 #endif
6900 
6901  /* Fetch pointer to Tx queue data */
6902  fops = &rte_eth_fp_ops[port_id];
6903  qd = fops->txq.data[queue_id];
6904 
6905 #ifdef RTE_ETHDEV_DEBUG_TX
6906  if (qd == NULL) {
6907  RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for port_id=%u",
6908  queue_id, port_id);
6909  rc = -EINVAL;
6910  goto out;
6911  }
6912 #endif
6913  if (fops->tx_queue_count == NULL) {
6914  rc = -ENOTSUP;
6915  goto out;
6916  }
6917 
6918  rc = fops->tx_queue_count(qd);
6919 
6920 out:
6921  rte_eth_trace_tx_queue_count(port_id, queue_id, rc);
6922  return rc;
6923 }
6924 
6925 #ifdef __cplusplus
6926 }
6927 #endif
6928 
6929 #endif /* _RTE_ETHDEV_H_ */
uint16_t nb_rx_queues
Definition: rte_ethdev.h:1807
int rte_eth_dev_stop(uint16_t port_id)
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_promiscuous_disable(uint16_t port_id)
__extension__ uint32_t multi_pools
Definition: rte_ethdev.h:1708
int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
__rte_experimental int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
#define __rte_always_inline
Definition: rte_common.h:370
#define RTE_ETH_DCB_NUM_USER_PRIORITIES
Definition: rte_ethdev.h:843
uint16_t tx_rs_thresh
Definition: rte_ethdev.h:1176
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
uint32_t mtu
Definition: rte_ethdev.h:421
uint16_t nb_desc
Definition: rte_ethdev.h:1866
rte_eth_event_macsec_type
Definition: rte_ethdev.h:3917
uint64_t rx_offload_capa
Definition: rte_ethdev.h:1785
const uint32_t * dev_flags
Definition: rte_ethdev.h:1763
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
__rte_experimental int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6534
struct rte_device * device
Definition: rte_ethdev.h:1757
rte_eth_nb_tcs
Definition: rte_ethdev.h:904
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:285
static __rte_experimental int rte_eth_tx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:6880
uint16_t rte_eth_find_next(uint16_t port_id)
__rte_experimental int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
rte_cman_mode
Definition: rte_cman.h:20
int rte_eth_led_off(uint16_t port_id)
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:6229
uint64_t imissed
Definition: rte_ethdev.h:271
uint32_t low_water
Definition: rte_ethdev.h:1370
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
uint8_t rss_key_len
Definition: rte_ethdev.h:499
uint32_t max_rx_bufsize
Definition: rte_ethdev.h:1772
__rte_experimental int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id, uint8_t affinity)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
static __rte_experimental uint16_t rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id, uint16_t tx_port_id, uint16_t tx_queue_id, struct rte_eth_recycle_rxq_info *recycle_rxq_info)
Definition: rte_ethdev.h:6737
uint8_t hthresh
Definition: rte_ethdev.h:366
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
__rte_experimental int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
uint64_t rx_queue_offload_capa
Definition: rte_ethdev.h:1789
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
__rte_experimental int rte_eth_dev_count_aggr_ports(uint16_t port_id)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
uint16_t reta_size
Definition: rte_ethdev.h:1793
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint32_t lpbk_mode
Definition: rte_ethdev.h:1504
#define rte_errno
Definition: rte_errno.h:29
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
uint32_t link_speeds
Definition: rte_ethdev.h:1495
uint64_t tx_queue_offload_capa
Definition: rte_ethdev.h:1791
rte_eth_tx_mq_mode
Definition: rte_ethdev.h:408
rte_eth_fc_mode
Definition: rte_ethdev.h:1356
uint8_t enable_default_pool
Definition: rte_ethdev.h:956
uint32_t max_hash_mac_addrs
Definition: rte_ethdev.h:1780
uint16_t nb_mtu_seg_max
Definition: rte_ethdev.h:1350
struct rte_mempool * mp
Definition: rte_ethdev.h:1880
int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
#define __rte_unused
Definition: rte_common.h:171
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:283
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
uint64_t opackets
Definition: rte_ethdev.h:264
rte_eth_cman_obj
Definition: rte_ethdev.h:5807
uint8_t hash_key_size
Definition: rte_ethdev.h:1794
int rte_eth_dev_close(uint16_t port_id)
struct rte_mempool * mp
Definition: rte_ethdev.h:1084
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
__rte_experimental int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, uint8_t avail_thresh)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
__rte_experimental const char * rte_eth_dev_rss_algo_name(enum rte_eth_hash_function rss_algo)
int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
struct rte_mempool * mp
Definition: rte_ethdev.h:1845
uint32_t dcb_capability_en
Definition: rte_ethdev.h:1528
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:281
__rte_experimental int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
const char * name
Definition: rte_ethdev.h:1682
uint8_t queue_state
Definition: rte_ethdev.h:1867
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
int rte_eth_dev_set_link_up(uint16_t port_id)
#define RTE_BIT32(nr)
Definition: rte_bitops.h:40
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
__rte_experimental int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
uint16_t vmdq_queue_num
Definition: rte_ethdev.h:1801
int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
uint16_t share_qid
Definition: rte_ethdev.h:1129
uint8_t rx_deferred_start
Definition: rte_ethdev.h:1120
struct rte_mbuf * pkts[]
Definition: rte_ethdev.h:3745
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
Definition: rte_ethdev.h:4112
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:277
uint32_t high_water
Definition: rte_ethdev.h:1369
#define RTE_ETH_MQ_RX_RSS_FLAG
Definition: rte_ethdev.h:373
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
#define RTE_ETH_XSTATS_NAME_SIZE
Definition: rte_ethdev.h:1915
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
__rte_experimental int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, unsigned int num)
int rte_eth_timesync_disable(uint16_t port_id)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint64_t offloads
Definition: rte_ethdev.h:1135
__rte_experimental int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, uint8_t *avail_thresh)
__rte_experimental int rte_eth_representor_info_get(uint16_t port_id, struct rte_eth_representor_info *info)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, struct rte_eth_pfc_queue_conf *pfc_queue_conf)
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
__rte_experimental int rte_eth_fec_get_capability(uint16_t port_id, struct rte_eth_fec_capa *speed_fec_capa, unsigned int num)
uint16_t send_xon
Definition: rte_ethdev.h:1372
struct rte_mempool ** rx_mempools
Definition: rte_ethdev.h:1164
int rte_eth_stats_reset(uint16_t port_id)
int rte_eth_allmulticast_enable(uint16_t port_id)
uint32_t offset_allowed
Definition: rte_ethdev.h:1709
__rte_experimental int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, struct rte_power_monitor_cond *pmc)
#define unlikely(x)
uint64_t ibytes
Definition: rte_ethdev.h:265
uint32_t offset_align_log2
Definition: rte_ethdev.h:1710
uint8_t avail_thresh
Definition: rte_ethdev.h:1857
uint64_t offloads
Definition: rte_ethdev.h:1186
int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
uint16_t max_nb_queues
Definition: rte_ethdev.h:1222
uint64_t oerrors
Definition: rte_ethdev.h:273
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
#define __rte_cache_min_aligned
Definition: rte_common.h:571
__rte_experimental int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num)
uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
uint16_t max_mtu
Definition: rte_ethdev.h:1762
uint64_t offloads
Definition: rte_ethdev.h:429
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
uint16_t tx_free_thresh
Definition: rte_ethdev.h:1177
uint16_t nb_desc
Definition: rte_ethdev.h:1849
uint64_t modes_supported
Definition: rte_ethdev.h:5828
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:6084
int rte_eth_allmulticast_disable(uint16_t port_id)
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
rte_eth_hash_function
Definition: rte_ethdev.h:457
int rte_eth_dev_start(uint16_t port_id)
__rte_experimental int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config)
uint16_t rx_buf_size
Definition: rte_ethdev.h:1850
__rte_experimental int rte_eth_ip_reassembly_capability_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *capa)
uint16_t max_vmdq_pools
Definition: rte_ethdev.h:1782
uint8_t scattered_rx
Definition: rte_ethdev.h:1847
int rte_eth_dev_reset(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
#define RTE_ETH_MQ_RX_DCB_FLAG
Definition: rte_ethdev.h:374
uint64_t offloads
Definition: rte_ethdev.h:1008
uint16_t vmdq_pool_base
Definition: rte_ethdev.h:1802
__rte_experimental int rte_eth_ip_reassembly_conf_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *conf)
uint64_t tx_offload_capa
Definition: rte_ethdev.h:1787
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:279
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
uint16_t min_mtu
Definition: rte_ethdev.h:1761
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_is_removed(uint16_t port_id)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
Definition: rte_ethdev.h:2068
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
uint64_t obytes
Definition: rte_ethdev.h:266
__rte_experimental int rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
uint8_t enable_loop_back
Definition: rte_ethdev.h:989
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
rte_eth_err_handle_mode
Definition: rte_ethdev.h:1735
__rte_experimental int rte_eth_recycle_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_recycle_rxq_info *recycle_rxq_info)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, struct rte_eth_pfc_queue_info *pfc_queue_info)
int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
rte_eth_fec_mode
Definition: rte_ethdev.h:1985
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
uint16_t max_tx_queues
Definition: rte_ethdev.h:1777
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_xstats_reset(uint16_t port_id)
rte_eth_dev_state
Definition: rte_ethdev.h:2074
uint16_t rx_free_thresh
Definition: rte_ethdev.h:1118
uint64_t dev_capa
Definition: rte_ethdev.h:1821
uint64_t ierrors
Definition: rte_ethdev.h:272
#define RTE_ETH_MQ_RX_VMDQ_FLAG
Definition: rte_ethdev.h:375
__rte_experimental int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, size_t len, uint32_t direction)
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
uint64_t flow_type_rss_offloads
Definition: rte_ethdev.h:1797
#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS
Definition: rte_ethdev.h:842
uint16_t rte_eth_dev_count_total(void)
int rte_eth_promiscuous_enable(uint16_t port_id)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
union rte_eth_rxseg * rx_seg
Definition: rte_ethdev.h:1143
int rte_eth_dev_owner_new(uint64_t *owner_id)
rte_vlan_type
Definition: rte_ethdev.h:439
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
__rte_experimental int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
uint16_t nb_seg_max
Definition: rte_ethdev.h:1337
uint64_t ipackets
Definition: rte_ethdev.h:263
uint16_t max_vfs
Definition: rte_ethdev.h:1781
uint16_t pause_time
Definition: rte_ethdev.h:1371
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_owner_delete(const uint64_t owner_id)
uint64_t rx_nombuf
Definition: rte_ethdev.h:274
rte_eth_event_macsec_subtype
Definition: rte_ethdev.h:3883
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
Definition: rte_ethdev.h:6672
__extension__ uint8_t hw_vlan_reject_untagged
Definition: rte_ethdev.h:1013
uint8_t queue_state
Definition: rte_ethdev.h:1848
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
uint32_t use_locked_device_memory
Definition: rte_ethdev.h:1289
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
uint16_t vmdq_queue_base
Definition: rte_ethdev.h:1800
rte_eth_event_ipsec_subtype
Definition: rte_ethdev.h:3959
rte_eth_nb_pools
Definition: rte_ethdev.h:913
uint16_t nb_align
Definition: rte_ethdev.h:1327
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
rte_eth_rx_mq_mode
Definition: rte_ethdev.h:382
__rte_experimental const char * rte_eth_link_speed_to_str(uint32_t link_speed)
const char * driver_name
Definition: rte_ethdev.h:1758
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:6157
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
__rte_experimental int rte_eth_find_rss_algo(const char *name, uint32_t *algo)
int rte_eth_dev_socket_id(uint16_t port_id)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint32_t tx_rate)
struct rte_mbuf ** mbuf_ring
Definition: rte_ethdev.h:1879
uint8_t enable_default_pool
Definition: rte_ethdev.h:987
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
uint16_t nb_tx_queues
Definition: rte_ethdev.h:1808
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
uint32_t max_mac_addrs
Definition: rte_ethdev.h:1778
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
rte_eth_tunnel_type
Definition: rte_ethdev.h:1445
__rte_experimental const char * rte_eth_dev_capability_name(uint64_t capability)
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
uint64_t value
Definition: rte_ethdev.h:1928
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
Definition: rte_ethdev.h:667
__rte_experimental int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
__rte_experimental int rte_eth_ip_reassembly_conf_set(uint16_t port_id, const struct rte_eth_ip_reassembly_params *conf)
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
__rte_experimental int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config)
int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_promiscuous_get(uint16_t port_id)
int rte_eth_led_on(uint16_t port_id)
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
uint32_t max_rx_pktlen
Definition: rte_ethdev.h:1773
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
uint64_t rss_hf
Definition: rte_ethdev.h:504
uint64_t id
Definition: rte_ethdev.h:1927
__rte_experimental int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
unsigned int if_index
Definition: rte_ethdev.h:1759
__rte_experimental int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config)
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
uint8_t mac_ctrl_frame_fwd
Definition: rte_ethdev.h:1374
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
Definition: rte_ethdev.h:2044
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
uint8_t * rss_key
Definition: rte_ethdev.h:498
uint8_t tx_deferred_start
Definition: rte_ethdev.h:1180
uint8_t wthresh
Definition: rte_ethdev.h:367
uint16_t max_rx_queues
Definition: rte_ethdev.h:1776
uint16_t max_rx_mempools
Definition: rte_ethdev.h:1815
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
rte_eth_representor_type
Definition: rte_ethdev.h:1722
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:423
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
int rte_eth_allmulticast_get(uint16_t port_id)
uint8_t rx_drop_en
Definition: rte_ethdev.h:1119
int rte_eth_dev_is_valid_port(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint16_t rx_nseg
Definition: rte_ethdev.h:1121
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:1775
int rte_eth_timesync_enable(uint16_t port_id)
uint8_t pthresh
Definition: rte_ethdev.h:365
uint16_t share_group
Definition: rte_ethdev.h:1128
uint32_t speed_capa
Definition: rte_ethdev.h:1805
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6424
__rte_experimental int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info)
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_set_link_down(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
__rte_experimental int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
uint64_t objs_supported
Definition: rte_ethdev.h:5833
uint32_t min_rx_bufsize
Definition: rte_ethdev.h:1765
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
Definition: rte_ethdev.h:6619
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
rte_eth_event_type
Definition: rte_ethdev.h:4024