DPDK  23.11.0
rte_ethdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
148 #ifdef __cplusplus
149 extern "C" {
150 #endif
151 
152 #include <stdint.h>
153 
154 /* Use this macro to check if LRO API is supported */
155 #define RTE_ETHDEV_HAS_LRO_SUPPORT
156 
157 /* Alias RTE_LIBRTE_ETHDEV_DEBUG for backward compatibility. */
158 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
159 #define RTE_ETHDEV_DEBUG_RX
160 #define RTE_ETHDEV_DEBUG_TX
161 #endif
162 
163 #include <rte_cman.h>
164 #include <rte_compat.h>
165 #include <rte_log.h>
166 #include <rte_interrupts.h>
167 #include <rte_dev.h>
168 #include <rte_devargs.h>
169 #include <rte_bitops.h>
170 #include <rte_errno.h>
171 #include <rte_common.h>
172 #include <rte_config.h>
173 #include <rte_power_intrinsics.h>
174 
175 #include "rte_ethdev_trace_fp.h"
176 #include "rte_dev_info.h"
177 
178 extern int rte_eth_dev_logtype;
179 
180 #define RTE_ETHDEV_LOG(level, ...) \
181  rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
182 
183 struct rte_mbuf;
184 
201 int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
202 
217 uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter);
218 
231 void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
232 
246 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
247  for (rte_eth_iterator_init(iter, devargs), \
248  id = rte_eth_iterator_next(iter); \
249  id != RTE_MAX_ETHPORTS; \
250  id = rte_eth_iterator_next(iter))
251 
262  uint64_t ipackets;
263  uint64_t opackets;
264  uint64_t ibytes;
265  uint64_t obytes;
270  uint64_t imissed;
271  uint64_t ierrors;
272  uint64_t oerrors;
273  uint64_t rx_nombuf;
274  /* Queue stats are limited to max 256 queues */
276  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
278  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
280  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
282  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
284  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
285 };
286 
290 #define RTE_ETH_LINK_SPEED_AUTONEG 0
291 #define RTE_ETH_LINK_SPEED_FIXED RTE_BIT32(0)
292 #define RTE_ETH_LINK_SPEED_10M_HD RTE_BIT32(1)
293 #define RTE_ETH_LINK_SPEED_10M RTE_BIT32(2)
294 #define RTE_ETH_LINK_SPEED_100M_HD RTE_BIT32(3)
295 #define RTE_ETH_LINK_SPEED_100M RTE_BIT32(4)
296 #define RTE_ETH_LINK_SPEED_1G RTE_BIT32(5)
297 #define RTE_ETH_LINK_SPEED_2_5G RTE_BIT32(6)
298 #define RTE_ETH_LINK_SPEED_5G RTE_BIT32(7)
299 #define RTE_ETH_LINK_SPEED_10G RTE_BIT32(8)
300 #define RTE_ETH_LINK_SPEED_20G RTE_BIT32(9)
301 #define RTE_ETH_LINK_SPEED_25G RTE_BIT32(10)
302 #define RTE_ETH_LINK_SPEED_40G RTE_BIT32(11)
303 #define RTE_ETH_LINK_SPEED_50G RTE_BIT32(12)
304 #define RTE_ETH_LINK_SPEED_56G RTE_BIT32(13)
305 #define RTE_ETH_LINK_SPEED_100G RTE_BIT32(14)
306 #define RTE_ETH_LINK_SPEED_200G RTE_BIT32(15)
307 #define RTE_ETH_LINK_SPEED_400G RTE_BIT32(16)
313 #define RTE_ETH_SPEED_NUM_NONE 0
314 #define RTE_ETH_SPEED_NUM_10M 10
315 #define RTE_ETH_SPEED_NUM_100M 100
316 #define RTE_ETH_SPEED_NUM_1G 1000
317 #define RTE_ETH_SPEED_NUM_2_5G 2500
318 #define RTE_ETH_SPEED_NUM_5G 5000
319 #define RTE_ETH_SPEED_NUM_10G 10000
320 #define RTE_ETH_SPEED_NUM_20G 20000
321 #define RTE_ETH_SPEED_NUM_25G 25000
322 #define RTE_ETH_SPEED_NUM_40G 40000
323 #define RTE_ETH_SPEED_NUM_50G 50000
324 #define RTE_ETH_SPEED_NUM_56G 56000
325 #define RTE_ETH_SPEED_NUM_100G 100000
326 #define RTE_ETH_SPEED_NUM_200G 200000
327 #define RTE_ETH_SPEED_NUM_400G 400000
328 #define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX
334 __extension__
335 struct rte_eth_link {
336  uint32_t link_speed;
337  uint16_t link_duplex : 1;
338  uint16_t link_autoneg : 1;
339  uint16_t link_status : 1;
340 } __rte_aligned(8);
345 #define RTE_ETH_LINK_HALF_DUPLEX 0
346 #define RTE_ETH_LINK_FULL_DUPLEX 1
347 #define RTE_ETH_LINK_DOWN 0
348 #define RTE_ETH_LINK_UP 1
349 #define RTE_ETH_LINK_FIXED 0
350 #define RTE_ETH_LINK_AUTONEG 1
351 #define RTE_ETH_LINK_MAX_STR_LEN 40
358 struct rte_eth_thresh {
359  uint8_t pthresh;
360  uint8_t hthresh;
361  uint8_t wthresh;
362 };
363 
367 #define RTE_ETH_MQ_RX_RSS_FLAG RTE_BIT32(0)
368 #define RTE_ETH_MQ_RX_DCB_FLAG RTE_BIT32(1)
369 #define RTE_ETH_MQ_RX_VMDQ_FLAG RTE_BIT32(2)
376 enum rte_eth_rx_mq_mode {
377 
379 
386 
396 };
397 
407 };
408 
414  enum rte_eth_rx_mq_mode mq_mode;
415  uint32_t mtu;
423  uint64_t offloads;
424 
425  uint64_t reserved_64s[2];
426  void *reserved_ptrs[2];
427 };
428 
434  RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
437  RTE_ETH_VLAN_TYPE_MAX,
438 };
439 
445  uint64_t ids[64];
446 };
447 
469  RTE_ETH_HASH_FUNCTION_MAX,
470 };
471 
472 #define RTE_ETH_HASH_ALGO_TO_CAPA(x) RTE_BIT32(x)
473 #define RTE_ETH_HASH_ALGO_CAPA_MASK(x) RTE_BIT32(RTE_ETH_HASH_FUNCTION_ ## x)
474 
492  uint8_t *rss_key;
493  uint8_t rss_key_len;
498  uint64_t rss_hf;
499  enum rte_eth_hash_function algorithm;
500 };
501 
502 /*
503  * A packet can be identified by hardware as different flow types. Different
504  * NIC hardware may support different flow types.
505  * Basically, the NIC hardware identifies the flow type as deep protocol as
506  * possible, and exclusively. For example, if a packet is identified as
507  * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
508  * though it is an actual IPV4 packet.
509  */
510 #define RTE_ETH_FLOW_UNKNOWN 0
511 #define RTE_ETH_FLOW_RAW 1
512 #define RTE_ETH_FLOW_IPV4 2
513 #define RTE_ETH_FLOW_FRAG_IPV4 3
514 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
515 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
516 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
517 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
518 #define RTE_ETH_FLOW_IPV6 8
519 #define RTE_ETH_FLOW_FRAG_IPV6 9
520 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
521 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
522 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
523 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
524 #define RTE_ETH_FLOW_L2_PAYLOAD 14
525 #define RTE_ETH_FLOW_IPV6_EX 15
526 #define RTE_ETH_FLOW_IPV6_TCP_EX 16
527 #define RTE_ETH_FLOW_IPV6_UDP_EX 17
528 
529 #define RTE_ETH_FLOW_PORT 18
530 #define RTE_ETH_FLOW_VXLAN 19
531 #define RTE_ETH_FLOW_GENEVE 20
532 #define RTE_ETH_FLOW_NVGRE 21
533 #define RTE_ETH_FLOW_VXLAN_GPE 22
534 #define RTE_ETH_FLOW_GTPU 23
535 #define RTE_ETH_FLOW_MAX 24
536 
537 /*
538  * Below macros are defined for RSS offload types, they can be used to
539  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
540  */
541 #define RTE_ETH_RSS_IPV4 RTE_BIT64(2)
542 #define RTE_ETH_RSS_FRAG_IPV4 RTE_BIT64(3)
543 #define RTE_ETH_RSS_NONFRAG_IPV4_TCP RTE_BIT64(4)
544 #define RTE_ETH_RSS_NONFRAG_IPV4_UDP RTE_BIT64(5)
545 #define RTE_ETH_RSS_NONFRAG_IPV4_SCTP RTE_BIT64(6)
546 #define RTE_ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
547 #define RTE_ETH_RSS_IPV6 RTE_BIT64(8)
548 #define RTE_ETH_RSS_FRAG_IPV6 RTE_BIT64(9)
549 #define RTE_ETH_RSS_NONFRAG_IPV6_TCP RTE_BIT64(10)
550 #define RTE_ETH_RSS_NONFRAG_IPV6_UDP RTE_BIT64(11)
551 #define RTE_ETH_RSS_NONFRAG_IPV6_SCTP RTE_BIT64(12)
552 #define RTE_ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
553 #define RTE_ETH_RSS_L2_PAYLOAD RTE_BIT64(14)
554 #define RTE_ETH_RSS_IPV6_EX RTE_BIT64(15)
555 #define RTE_ETH_RSS_IPV6_TCP_EX RTE_BIT64(16)
556 #define RTE_ETH_RSS_IPV6_UDP_EX RTE_BIT64(17)
557 #define RTE_ETH_RSS_PORT RTE_BIT64(18)
558 #define RTE_ETH_RSS_VXLAN RTE_BIT64(19)
559 #define RTE_ETH_RSS_GENEVE RTE_BIT64(20)
560 #define RTE_ETH_RSS_NVGRE RTE_BIT64(21)
561 #define RTE_ETH_RSS_GTPU RTE_BIT64(23)
562 #define RTE_ETH_RSS_ETH RTE_BIT64(24)
563 #define RTE_ETH_RSS_S_VLAN RTE_BIT64(25)
564 #define RTE_ETH_RSS_C_VLAN RTE_BIT64(26)
565 #define RTE_ETH_RSS_ESP RTE_BIT64(27)
566 #define RTE_ETH_RSS_AH RTE_BIT64(28)
567 #define RTE_ETH_RSS_L2TPV3 RTE_BIT64(29)
568 #define RTE_ETH_RSS_PFCP RTE_BIT64(30)
569 #define RTE_ETH_RSS_PPPOE RTE_BIT64(31)
570 #define RTE_ETH_RSS_ECPRI RTE_BIT64(32)
571 #define RTE_ETH_RSS_MPLS RTE_BIT64(33)
572 #define RTE_ETH_RSS_IPV4_CHKSUM RTE_BIT64(34)
573 
586 #define RTE_ETH_RSS_L4_CHKSUM RTE_BIT64(35)
587 
588 #define RTE_ETH_RSS_L2TPV2 RTE_BIT64(36)
589 
590 /*
591  * We use the following macros to combine with above RTE_ETH_RSS_* for
592  * more specific input set selection. These bits are defined starting
593  * from the high end of the 64 bits.
594  * Note: If we use above RTE_ETH_RSS_* without SRC/DST_ONLY, it represents
595  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
596  * the same level are used simultaneously, it is the same case as none of
597  * them are added.
598  */
599 #define RTE_ETH_RSS_L3_SRC_ONLY RTE_BIT64(63)
600 #define RTE_ETH_RSS_L3_DST_ONLY RTE_BIT64(62)
601 #define RTE_ETH_RSS_L4_SRC_ONLY RTE_BIT64(61)
602 #define RTE_ETH_RSS_L4_DST_ONLY RTE_BIT64(60)
603 #define RTE_ETH_RSS_L2_SRC_ONLY RTE_BIT64(59)
604 #define RTE_ETH_RSS_L2_DST_ONLY RTE_BIT64(58)
605 
606 /*
607  * Only select IPV6 address prefix as RSS input set according to
608  * https://tools.ietf.org/html/rfc6052
609  * Must be combined with RTE_ETH_RSS_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_UDP,
610  * RTE_ETH_RSS_NONFRAG_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP.
611  */
612 #define RTE_ETH_RSS_L3_PRE32 RTE_BIT64(57)
613 #define RTE_ETH_RSS_L3_PRE40 RTE_BIT64(56)
614 #define RTE_ETH_RSS_L3_PRE48 RTE_BIT64(55)
615 #define RTE_ETH_RSS_L3_PRE56 RTE_BIT64(54)
616 #define RTE_ETH_RSS_L3_PRE64 RTE_BIT64(53)
617 #define RTE_ETH_RSS_L3_PRE96 RTE_BIT64(52)
618 
619 /*
620  * Use the following macros to combine with the above layers
621  * to choose inner and outer layers or both for RSS computation.
622  * Bits 50 and 51 are reserved for this.
623  */
624 
632 #define RTE_ETH_RSS_LEVEL_PMD_DEFAULT (UINT64_C(0) << 50)
633 
638 #define RTE_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
639 
644 #define RTE_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
645 #define RTE_ETH_RSS_LEVEL_MASK (UINT64_C(3) << 50)
646 
647 #define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
648 
659 static inline uint64_t
660 rte_eth_rss_hf_refine(uint64_t rss_hf)
661 {
662  if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
663  rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
664 
665  if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
666  rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
667 
668  return rss_hf;
669 }
670 
671 #define RTE_ETH_RSS_IPV6_PRE32 ( \
672  RTE_ETH_RSS_IPV6 | \
673  RTE_ETH_RSS_L3_PRE32)
674 
675 #define RTE_ETH_RSS_IPV6_PRE40 ( \
676  RTE_ETH_RSS_IPV6 | \
677  RTE_ETH_RSS_L3_PRE40)
678 
679 #define RTE_ETH_RSS_IPV6_PRE48 ( \
680  RTE_ETH_RSS_IPV6 | \
681  RTE_ETH_RSS_L3_PRE48)
682 
683 #define RTE_ETH_RSS_IPV6_PRE56 ( \
684  RTE_ETH_RSS_IPV6 | \
685  RTE_ETH_RSS_L3_PRE56)
686 
687 #define RTE_ETH_RSS_IPV6_PRE64 ( \
688  RTE_ETH_RSS_IPV6 | \
689  RTE_ETH_RSS_L3_PRE64)
690 
691 #define RTE_ETH_RSS_IPV6_PRE96 ( \
692  RTE_ETH_RSS_IPV6 | \
693  RTE_ETH_RSS_L3_PRE96)
694 
695 #define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
696  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
697  RTE_ETH_RSS_L3_PRE32)
698 
699 #define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
700  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
701  RTE_ETH_RSS_L3_PRE40)
702 
703 #define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
704  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
705  RTE_ETH_RSS_L3_PRE48)
706 
707 #define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
708  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
709  RTE_ETH_RSS_L3_PRE56)
710 
711 #define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
712  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
713  RTE_ETH_RSS_L3_PRE64)
714 
715 #define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
716  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
717  RTE_ETH_RSS_L3_PRE96)
718 
719 #define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
720  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
721  RTE_ETH_RSS_L3_PRE32)
722 
723 #define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
724  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
725  RTE_ETH_RSS_L3_PRE40)
726 
727 #define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
728  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
729  RTE_ETH_RSS_L3_PRE48)
730 
731 #define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
732  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
733  RTE_ETH_RSS_L3_PRE56)
734 
735 #define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
736  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
737  RTE_ETH_RSS_L3_PRE64)
738 
739 #define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
740  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
741  RTE_ETH_RSS_L3_PRE96)
742 
743 #define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
744  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
745  RTE_ETH_RSS_L3_PRE32)
746 
747 #define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
748  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
749  RTE_ETH_RSS_L3_PRE40)
750 
751 #define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
752  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
753  RTE_ETH_RSS_L3_PRE48)
754 
755 #define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
756  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
757  RTE_ETH_RSS_L3_PRE56)
758 
759 #define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
760  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
761  RTE_ETH_RSS_L3_PRE64)
762 
763 #define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
764  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
765  RTE_ETH_RSS_L3_PRE96)
766 
767 #define RTE_ETH_RSS_IP ( \
768  RTE_ETH_RSS_IPV4 | \
769  RTE_ETH_RSS_FRAG_IPV4 | \
770  RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
771  RTE_ETH_RSS_IPV6 | \
772  RTE_ETH_RSS_FRAG_IPV6 | \
773  RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
774  RTE_ETH_RSS_IPV6_EX)
775 
776 #define RTE_ETH_RSS_UDP ( \
777  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
778  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
779  RTE_ETH_RSS_IPV6_UDP_EX)
780 
781 #define RTE_ETH_RSS_TCP ( \
782  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
783  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
784  RTE_ETH_RSS_IPV6_TCP_EX)
785 
786 #define RTE_ETH_RSS_SCTP ( \
787  RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
788  RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
789 
790 #define RTE_ETH_RSS_TUNNEL ( \
791  RTE_ETH_RSS_VXLAN | \
792  RTE_ETH_RSS_GENEVE | \
793  RTE_ETH_RSS_NVGRE)
794 
795 #define RTE_ETH_RSS_VLAN ( \
796  RTE_ETH_RSS_S_VLAN | \
797  RTE_ETH_RSS_C_VLAN)
798 
800 #define RTE_ETH_RSS_PROTO_MASK ( \
801  RTE_ETH_RSS_IPV4 | \
802  RTE_ETH_RSS_FRAG_IPV4 | \
803  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
804  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
805  RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
806  RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
807  RTE_ETH_RSS_IPV6 | \
808  RTE_ETH_RSS_FRAG_IPV6 | \
809  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
810  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
811  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
812  RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
813  RTE_ETH_RSS_L2_PAYLOAD | \
814  RTE_ETH_RSS_IPV6_EX | \
815  RTE_ETH_RSS_IPV6_TCP_EX | \
816  RTE_ETH_RSS_IPV6_UDP_EX | \
817  RTE_ETH_RSS_PORT | \
818  RTE_ETH_RSS_VXLAN | \
819  RTE_ETH_RSS_GENEVE | \
820  RTE_ETH_RSS_NVGRE | \
821  RTE_ETH_RSS_MPLS)
822 
823 /*
824  * Definitions used for redirection table entry size.
825  * Some RSS RETA sizes may not be supported by some drivers, check the
826  * documentation or the description of relevant functions for more details.
827  */
828 #define RTE_ETH_RSS_RETA_SIZE_64 64
829 #define RTE_ETH_RSS_RETA_SIZE_128 128
830 #define RTE_ETH_RSS_RETA_SIZE_256 256
831 #define RTE_ETH_RSS_RETA_SIZE_512 512
832 #define RTE_ETH_RETA_GROUP_SIZE 64
833 
835 #define RTE_ETH_VMDQ_MAX_VLAN_FILTERS 64
836 #define RTE_ETH_DCB_NUM_USER_PRIORITIES 8
837 #define RTE_ETH_VMDQ_DCB_NUM_QUEUES 128
838 #define RTE_ETH_DCB_NUM_QUEUES 128
842 #define RTE_ETH_DCB_PG_SUPPORT RTE_BIT32(0)
843 #define RTE_ETH_DCB_PFC_SUPPORT RTE_BIT32(1)
847 #define RTE_ETH_VLAN_STRIP_OFFLOAD 0x0001
848 #define RTE_ETH_VLAN_FILTER_OFFLOAD 0x0002
849 #define RTE_ETH_VLAN_EXTEND_OFFLOAD 0x0004
850 #define RTE_ETH_QINQ_STRIP_OFFLOAD 0x0008
852 #define RTE_ETH_VLAN_STRIP_MASK 0x0001
853 #define RTE_ETH_VLAN_FILTER_MASK 0x0002
854 #define RTE_ETH_VLAN_EXTEND_MASK 0x0004
855 #define RTE_ETH_QINQ_STRIP_MASK 0x0008
856 #define RTE_ETH_VLAN_ID_MAX 0x0FFF
859 /* Definitions used for receive MAC address */
860 #define RTE_ETH_NUM_RECEIVE_MAC_ADDR 128
862 /* Definitions used for unicast hash */
863 #define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY 128
869 #define RTE_ETH_VMDQ_ACCEPT_UNTAG RTE_BIT32(0)
870 
871 #define RTE_ETH_VMDQ_ACCEPT_HASH_MC RTE_BIT32(1)
872 
873 #define RTE_ETH_VMDQ_ACCEPT_HASH_UC RTE_BIT32(2)
874 
875 #define RTE_ETH_VMDQ_ACCEPT_BROADCAST RTE_BIT32(3)
876 
877 #define RTE_ETH_VMDQ_ACCEPT_MULTICAST RTE_BIT32(4)
878 
888  uint64_t mask;
890  uint16_t reta[RTE_ETH_RETA_GROUP_SIZE];
891 };
892 
900 };
901 
911 };
912 
913 /* This structure may be extended in future. */
914 struct rte_eth_dcb_rx_conf {
915  enum rte_eth_nb_tcs nb_tcs;
917  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
918 };
919 
920 struct rte_eth_vmdq_dcb_tx_conf {
921  enum rte_eth_nb_pools nb_queue_pools;
923  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
924 };
925 
926 struct rte_eth_dcb_tx_conf {
927  enum rte_eth_nb_tcs nb_tcs;
929  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
930 };
931 
932 struct rte_eth_vmdq_tx_conf {
933  enum rte_eth_nb_pools nb_queue_pools;
934 };
935 
948  enum rte_eth_nb_pools nb_queue_pools;
950  uint8_t default_pool;
951  uint8_t nb_pool_maps;
952  struct {
953  uint16_t vlan_id;
954  uint64_t pools;
955  } pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS];
958 };
959 
979  enum rte_eth_nb_pools nb_queue_pools;
981  uint8_t default_pool;
983  uint8_t nb_pool_maps;
984  uint32_t rx_mode;
985  struct {
986  uint16_t vlan_id;
987  uint64_t pools;
988  } pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS];
989 };
990 
995  enum rte_eth_tx_mq_mode mq_mode;
1001  uint64_t offloads;
1002 
1003  uint16_t pvid;
1004  __extension__
1005  uint8_t
1006  hw_vlan_reject_tagged : 1,
1010  hw_vlan_insert_pvid : 1;
1011 
1012  uint64_t reserved_64s[2];
1013  void *reserved_ptrs[2];
1014 };
1015 
1077  struct rte_mempool *mp;
1078  uint16_t length;
1079  uint16_t offset;
1091  uint32_t proto_hdr;
1092 };
1093 
1101  /* The settings for buffer split offload. */
1102  struct rte_eth_rxseg_split split;
1103  /* The other features settings should be added here. */
1104 };
1105 
1110  struct rte_eth_thresh rx_thresh;
1111  uint16_t rx_free_thresh;
1112  uint8_t rx_drop_en;
1114  uint16_t rx_nseg;
1121  uint16_t share_group;
1122  uint16_t share_qid;
1128  uint64_t offloads;
1137 
1158  uint16_t rx_nmempool;
1160  uint64_t reserved_64s[2];
1161  void *reserved_ptrs[2];
1162 };
1163 
1168  struct rte_eth_thresh tx_thresh;
1169  uint16_t tx_rs_thresh;
1170  uint16_t tx_free_thresh;
1179  uint64_t offloads;
1180 
1181  uint64_t reserved_64s[2];
1182  void *reserved_ptrs[2];
1183 };
1184 
1197 
1202  uint32_t rte_memory:1;
1203 
1204  uint32_t reserved:30;
1205 };
1206 
1215  uint16_t max_nb_queues;
1217  uint16_t max_rx_2_tx;
1219  uint16_t max_tx_2_rx;
1220  uint16_t max_nb_desc;
1223 };
1224 
1225 #define RTE_ETH_MAX_HAIRPIN_PEERS 32
1226 
1234  uint16_t port;
1235  uint16_t queue;
1236 };
1237 
1245  uint32_t peer_count:16;
1256  uint32_t tx_explicit:1;
1257 
1269  uint32_t manual_bind:1;
1270 
1283 
1295  uint32_t use_rte_memory:1;
1296 
1307  uint32_t force_memory:1;
1308 
1309  uint32_t reserved:11;
1311  struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
1312 };
1313 
1318  uint16_t nb_max;
1319  uint16_t nb_min;
1320  uint16_t nb_align;
1330  uint16_t nb_seg_max;
1331 
1343  uint16_t nb_mtu_seg_max;
1344 };
1345 
1354 };
1355 
1362  uint32_t high_water;
1363  uint32_t low_water;
1364  uint16_t pause_time;
1365  uint16_t send_xon;
1366  enum rte_eth_fc_mode mode;
1368  uint8_t autoneg;
1369 };
1370 
1377  struct rte_eth_fc_conf fc;
1378  uint8_t priority;
1379 };
1380 
1391  uint8_t tc_max;
1393  enum rte_eth_fc_mode mode_capa;
1394 };
1395 
1414  enum rte_eth_fc_mode mode;
1416  struct {
1417  uint16_t tx_qid;
1421  uint8_t tc;
1422  } rx_pause; /* Valid when (mode == FC_RX_PAUSE || mode == FC_FULL) */
1423 
1424  struct {
1425  uint16_t pause_time;
1426  uint16_t rx_qid;
1430  uint8_t tc;
1431  } tx_pause; /* Valid when (mode == FC_TX_PAUSE || mode == FC_FULL) */
1432 };
1433 
1439  RTE_ETH_TUNNEL_TYPE_NONE = 0,
1440  RTE_ETH_TUNNEL_TYPE_VXLAN,
1441  RTE_ETH_TUNNEL_TYPE_GENEVE,
1442  RTE_ETH_TUNNEL_TYPE_TEREDO,
1443  RTE_ETH_TUNNEL_TYPE_NVGRE,
1444  RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
1445  RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
1446  RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
1447  RTE_ETH_TUNNEL_TYPE_ECPRI,
1448  RTE_ETH_TUNNEL_TYPE_MAX,
1449 };
1450 
1451 /* Deprecated API file for rte_eth_dev_filter_* functions */
1452 #include "rte_eth_ctrl.h"
1453 
1464  uint16_t udp_port;
1465  uint8_t prot_type;
1466 };
1467 
1473  uint32_t lsc:1;
1475  uint32_t rxq:1;
1477  uint32_t rmv:1;
1478 };
1479 
1480 #define rte_intr_conf rte_eth_intr_conf
1481 
1488  uint32_t link_speeds;
1495  struct rte_eth_rxmode rxmode;
1496  struct rte_eth_txmode txmode;
1497  uint32_t lpbk_mode;
1502  struct {
1503  struct rte_eth_rss_conf rss_conf;
1505  struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf;
1507  struct rte_eth_dcb_rx_conf dcb_rx_conf;
1509  struct rte_eth_vmdq_rx_conf vmdq_rx_conf;
1510  } rx_adv_conf;
1511  union {
1513  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1515  struct rte_eth_dcb_tx_conf dcb_tx_conf;
1517  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1518  } tx_adv_conf;
1522  struct rte_eth_intr_conf intr_conf;
1523 };
1524 
1528 #define RTE_ETH_RX_OFFLOAD_VLAN_STRIP RTE_BIT64(0)
1529 #define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1530 #define RTE_ETH_RX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1531 #define RTE_ETH_RX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1532 #define RTE_ETH_RX_OFFLOAD_TCP_LRO RTE_BIT64(4)
1533 #define RTE_ETH_RX_OFFLOAD_QINQ_STRIP RTE_BIT64(5)
1534 #define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(6)
1535 #define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP RTE_BIT64(7)
1536 #define RTE_ETH_RX_OFFLOAD_VLAN_FILTER RTE_BIT64(9)
1537 #define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND RTE_BIT64(10)
1538 #define RTE_ETH_RX_OFFLOAD_SCATTER RTE_BIT64(13)
1539 
1544 #define RTE_ETH_RX_OFFLOAD_TIMESTAMP RTE_BIT64(14)
1545 #define RTE_ETH_RX_OFFLOAD_SECURITY RTE_BIT64(15)
1546 #define RTE_ETH_RX_OFFLOAD_KEEP_CRC RTE_BIT64(16)
1547 #define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM RTE_BIT64(17)
1548 #define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(18)
1549 #define RTE_ETH_RX_OFFLOAD_RSS_HASH RTE_BIT64(19)
1550 #define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT RTE_BIT64(20)
1551 
1552 #define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
1553  RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
1554  RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1555 #define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
1556  RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
1557  RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
1558  RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
1559 
1560 /*
1561  * If new Rx offload capabilities are defined, they also must be
1562  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1563  */
1564 
1568 #define RTE_ETH_TX_OFFLOAD_VLAN_INSERT RTE_BIT64(0)
1569 #define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1570 #define RTE_ETH_TX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1571 #define RTE_ETH_TX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1572 #define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM RTE_BIT64(4)
1573 #define RTE_ETH_TX_OFFLOAD_TCP_TSO RTE_BIT64(5)
1574 #define RTE_ETH_TX_OFFLOAD_UDP_TSO RTE_BIT64(6)
1575 #define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(7)
1576 #define RTE_ETH_TX_OFFLOAD_QINQ_INSERT RTE_BIT64(8)
1577 #define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO RTE_BIT64(9)
1578 #define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO RTE_BIT64(10)
1579 #define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO RTE_BIT64(11)
1580 #define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO RTE_BIT64(12)
1581 #define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT RTE_BIT64(13)
1582 
1586 #define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE RTE_BIT64(14)
1587 
1588 #define RTE_ETH_TX_OFFLOAD_MULTI_SEGS RTE_BIT64(15)
1589 
1594 #define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE RTE_BIT64(16)
1595 #define RTE_ETH_TX_OFFLOAD_SECURITY RTE_BIT64(17)
1596 
1601 #define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO RTE_BIT64(18)
1602 
1607 #define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO RTE_BIT64(19)
1608 
1609 #define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(20)
1610 
1615 #define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP RTE_BIT64(21)
1616 /*
1617  * If new Tx offload capabilities are defined, they also must be
1618  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1619  */
1620 
1625 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP RTE_BIT64(0)
1626 
1627 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP RTE_BIT64(1)
1628 
1637 #define RTE_ETH_DEV_CAPA_RXQ_SHARE RTE_BIT64(2)
1638 
1639 #define RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP RTE_BIT64(3)
1640 
1641 #define RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP RTE_BIT64(4)
1642 
1644 /*
1645  * Fallback default preferred Rx/Tx port parameters.
1646  * These are used if an application requests default parameters
1647  * but the PMD does not provide preferred values.
1648  */
1649 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1650 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1651 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1652 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1653 
1660  uint16_t burst_size;
1661  uint16_t ring_size;
1662  uint16_t nb_queues;
1663 };
1664 
1669 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX)
1670 
1675  const char *name;
1676  uint16_t domain_id;
1684  uint16_t port_id;
1690  uint16_t rx_domain;
1691 };
1692 
1700  __extension__
1701  uint32_t multi_pools:1;
1702  uint32_t offset_allowed:1;
1703  uint32_t offset_align_log2:4;
1704  uint16_t max_nseg;
1705  uint16_t reserved;
1706 };
1707 
1720 };
1721 
1742 };
1743 
1750  struct rte_device *device;
1751  const char *driver_name;
1752  unsigned int if_index;
1754  uint16_t min_mtu;
1755  uint16_t max_mtu;
1756  const uint32_t *dev_flags;
1758  uint32_t min_rx_bufsize;
1765  uint32_t max_rx_bufsize;
1766  uint32_t max_rx_pktlen;
1769  uint16_t max_rx_queues;
1770  uint16_t max_tx_queues;
1771  uint32_t max_mac_addrs;
1774  uint16_t max_vfs;
1775  uint16_t max_vmdq_pools;
1776  struct rte_eth_rxseg_capa rx_seg_capa;
1786  uint16_t reta_size;
1787  uint8_t hash_key_size;
1788  uint32_t rss_algo_capa;
1791  struct rte_eth_rxconf default_rxconf;
1792  struct rte_eth_txconf default_txconf;
1793  uint16_t vmdq_queue_base;
1794  uint16_t vmdq_queue_num;
1795  uint16_t vmdq_pool_base;
1796  struct rte_eth_desc_lim rx_desc_lim;
1797  struct rte_eth_desc_lim tx_desc_lim;
1798  uint32_t speed_capa;
1800  uint16_t nb_rx_queues;
1801  uint16_t nb_tx_queues;
1810  struct rte_eth_dev_portconf default_rxportconf;
1812  struct rte_eth_dev_portconf default_txportconf;
1814  uint64_t dev_capa;
1819  struct rte_eth_switch_info switch_info;
1821  enum rte_eth_err_handle_mode err_handle_mode;
1822 
1823  uint64_t reserved_64s[2];
1824  void *reserved_ptrs[2];
1825 };
1826 
1828 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1829 #define RTE_ETH_QUEUE_STATE_STARTED 1
1830 #define RTE_ETH_QUEUE_STATE_HAIRPIN 2
1837 struct rte_eth_rxq_info {
1838  struct rte_mempool *mp;
1839  struct rte_eth_rxconf conf;
1840  uint8_t scattered_rx;
1841  uint8_t queue_state;
1842  uint16_t nb_desc;
1843  uint16_t rx_buf_size;
1850  uint8_t avail_thresh;
1852 
1858  struct rte_eth_txconf conf;
1859  uint16_t nb_desc;
1860  uint8_t queue_state;
1862 
1872  struct rte_mbuf **mbuf_ring;
1873  struct rte_mempool *mp;
1874  uint16_t *refill_head;
1875  uint16_t *receive_tail;
1876  uint16_t mbuf_ring_size;
1886 
1887 /* Generic Burst mode flag definition, values can be ORed. */
1888 
1894 #define RTE_ETH_BURST_FLAG_PER_QUEUE RTE_BIT64(0)
1895 
1901  uint64_t flags;
1903 #define RTE_ETH_BURST_MODE_INFO_SIZE 1024
1904  char info[RTE_ETH_BURST_MODE_INFO_SIZE];
1905 };
1906 
1908 #define RTE_ETH_XSTATS_NAME_SIZE 64
1909 
1920  uint64_t id;
1921  uint64_t value;
1922 };
1923 
1940 };
1941 
1942 #define RTE_ETH_DCB_NUM_TCS 8
1943 #define RTE_ETH_MAX_VMDQ_POOL 64
1944 
1951  struct {
1952  uint16_t base;
1953  uint16_t nb_queue;
1954  } tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1956  struct {
1957  uint16_t base;
1958  uint16_t nb_queue;
1959  } tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1960 };
1961 
1967  uint8_t nb_tcs;
1969  uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS];
1972 };
1973 
1984 };
1985 
1986 /* Translate from FEC mode to FEC capa */
1987 #define RTE_ETH_FEC_MODE_TO_CAPA(x) RTE_BIT32(x)
1988 
1989 /* This macro indicates FEC capa mask */
1990 #define RTE_ETH_FEC_MODE_CAPA_MASK(x) RTE_BIT32(RTE_ETH_FEC_ ## x)
1991 
1992 /* A structure used to get capabilities per link speed */
1993 struct rte_eth_fec_capa {
1994  uint32_t speed;
1995  uint32_t capa;
1996 };
1997 
1998 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
1999 
2000 /* Macros to check for valid port */
2001 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
2002  if (!rte_eth_dev_is_valid_port(port_id)) { \
2003  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
2004  return retval; \
2005  } \
2006 } while (0)
2007 
2008 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
2009  if (!rte_eth_dev_is_valid_port(port_id)) { \
2010  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
2011  return; \
2012  } \
2013 } while (0)
2014 
2037 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
2038  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
2039  void *user_param);
2040 
2061 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
2062  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
2063 
2074 };
2075 
2076 struct rte_eth_dev_sriov {
2077  uint8_t active;
2078  uint8_t nb_q_per_pool;
2079  uint16_t def_vmdq_idx;
2080  uint16_t def_pool_q_idx;
2081 };
2082 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
2083 
2084 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
2085 
2086 #define RTE_ETH_DEV_NO_OWNER 0
2087 
2088 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
2089 
2090 struct rte_eth_dev_owner {
2091  uint64_t id;
2092  char name[RTE_ETH_MAX_OWNER_NAME_LEN];
2093 };
2094 
2100 #define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE RTE_BIT32(0)
2101 
2102 #define RTE_ETH_DEV_INTR_LSC RTE_BIT32(1)
2103 
2104 #define RTE_ETH_DEV_BONDING_MEMBER RTE_BIT32(2)
2105 
2106 #define RTE_ETH_DEV_INTR_RMV RTE_BIT32(3)
2107 
2108 #define RTE_ETH_DEV_REPRESENTOR RTE_BIT32(4)
2109 
2110 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR RTE_BIT32(5)
2111 
2115 #define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS RTE_BIT32(6)
2116 
2129 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
2130  const uint64_t owner_id);
2131 
2135 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
2136  for (p = rte_eth_find_next_owned_by(0, o); \
2137  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
2138  p = rte_eth_find_next_owned_by(p + 1, o))
2139 
2148 uint16_t rte_eth_find_next(uint16_t port_id);
2149 
2153 #define RTE_ETH_FOREACH_DEV(p) \
2154  RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
2155 
2167 uint16_t
2168 rte_eth_find_next_of(uint16_t port_id_start,
2169  const struct rte_device *parent);
2170 
2179 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
2180  for (port_id = rte_eth_find_next_of(0, parent); \
2181  port_id < RTE_MAX_ETHPORTS; \
2182  port_id = rte_eth_find_next_of(port_id + 1, parent))
2183 
2195 uint16_t
2196 rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id);
2197 
2208 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
2209  for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
2210  port_id < RTE_MAX_ETHPORTS; \
2211  port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
2212 
2223 int rte_eth_dev_owner_new(uint64_t *owner_id);
2224 
2235 int rte_eth_dev_owner_set(const uint16_t port_id,
2236  const struct rte_eth_dev_owner *owner);
2237 
2248 int rte_eth_dev_owner_unset(const uint16_t port_id,
2249  const uint64_t owner_id);
2250 
2259 int rte_eth_dev_owner_delete(const uint64_t owner_id);
2260 
2271 int rte_eth_dev_owner_get(const uint16_t port_id,
2272  struct rte_eth_dev_owner *owner);
2273 
2284 uint16_t rte_eth_dev_count_avail(void);
2285 
2294 uint16_t rte_eth_dev_count_total(void);
2295 
2307 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
2308 
2317 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
2318 
2327 const char *rte_eth_dev_tx_offload_name(uint64_t offload);
2328 
2340 __rte_experimental
2341 const char *rte_eth_dev_capability_name(uint64_t capability);
2342 
2382 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
2383  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
2384 
2393 int
2394 rte_eth_dev_is_removed(uint16_t port_id);
2395 
2458 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2459  uint16_t nb_rx_desc, unsigned int socket_id,
2460  const struct rte_eth_rxconf *rx_conf,
2461  struct rte_mempool *mb_pool);
2462 
2490 __rte_experimental
2492  (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2493  const struct rte_eth_hairpin_conf *conf);
2494 
2543 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2544  uint16_t nb_tx_desc, unsigned int socket_id,
2545  const struct rte_eth_txconf *tx_conf);
2546 
2572 __rte_experimental
2574  (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2575  const struct rte_eth_hairpin_conf *conf);
2576 
2603 __rte_experimental
2604 int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2605  size_t len, uint32_t direction);
2606 
2629 __rte_experimental
2630 int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port);
2631 
2656 __rte_experimental
2657 int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port);
2658 
2674 __rte_experimental
2675 int rte_eth_dev_count_aggr_ports(uint16_t port_id);
2676 
2704 __rte_experimental
2705 int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id,
2706  uint8_t affinity);
2707 
2720 int rte_eth_dev_socket_id(uint16_t port_id);
2721 
2731 int rte_eth_dev_is_valid_port(uint16_t port_id);
2732 
2749 __rte_experimental
2750 int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2751 
2768 __rte_experimental
2769 int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2770 
2788 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2789 
2806 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2807 
2825 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2826 
2843 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2844 
2868 int rte_eth_dev_start(uint16_t port_id);
2869 
2883 int rte_eth_dev_stop(uint16_t port_id);
2884 
2897 int rte_eth_dev_set_link_up(uint16_t port_id);
2898 
2908 int rte_eth_dev_set_link_down(uint16_t port_id);
2909 
2920 int rte_eth_dev_close(uint16_t port_id);
2921 
2959 int rte_eth_dev_reset(uint16_t port_id);
2960 
2972 int rte_eth_promiscuous_enable(uint16_t port_id);
2973 
2985 int rte_eth_promiscuous_disable(uint16_t port_id);
2986 
2997 int rte_eth_promiscuous_get(uint16_t port_id);
2998 
3010 int rte_eth_allmulticast_enable(uint16_t port_id);
3011 
3023 int rte_eth_allmulticast_disable(uint16_t port_id);
3024 
3035 int rte_eth_allmulticast_get(uint16_t port_id);
3036 
3054 int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
3055 
3070 int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
3071 
3085 __rte_experimental
3086 const char *rte_eth_link_speed_to_str(uint32_t link_speed);
3087 
3106 __rte_experimental
3107 int rte_eth_link_to_str(char *str, size_t len,
3108  const struct rte_eth_link *eth_link);
3109 
3127 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
3128 
3140 int rte_eth_stats_reset(uint16_t port_id);
3141 
3171 int rte_eth_xstats_get_names(uint16_t port_id,
3172  struct rte_eth_xstat_name *xstats_names,
3173  unsigned int size);
3174 
3208 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3209  unsigned int n);
3210 
3235 int
3236 rte_eth_xstats_get_names_by_id(uint16_t port_id,
3237  struct rte_eth_xstat_name *xstats_names, unsigned int size,
3238  uint64_t *ids);
3239 
3264 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3265  uint64_t *values, unsigned int size);
3266 
3286 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
3287  uint64_t *id);
3288 
3301 int rte_eth_xstats_reset(uint16_t port_id);
3302 
3321 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
3322  uint16_t tx_queue_id, uint8_t stat_idx);
3323 
3342 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
3343  uint16_t rx_queue_id,
3344  uint8_t stat_idx);
3345 
3359 int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
3360 
3381 __rte_experimental
3382 int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3383  unsigned int num);
3384 
3404 int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
3405 
3421 __rte_experimental
3422 int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf);
3423 
3444 int rte_eth_dev_fw_version_get(uint16_t port_id,
3445  char *fw_version, size_t fw_size);
3446 
3486 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3487  uint32_t *ptypes, int num);
3518 int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3519  uint32_t *set_ptypes, unsigned int num);
3520 
3533 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
3534 
3552 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
3553 
3573 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
3574 
3593 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3594  int on);
3595 
3612 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3613  enum rte_vlan_type vlan_type,
3614  uint16_t tag_type);
3615 
3633 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
3634 
3648 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
3649 
3664 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
3665 
3691 __rte_experimental
3692 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
3693  uint8_t avail_thresh);
3694 
3721 __rte_experimental
3722 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
3723  uint8_t *avail_thresh);
3724 
3725 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
3726  void *userdata);
3727 
3733  buffer_tx_error_fn error_callback;
3734  void *error_userdata;
3735  uint16_t size;
3736  uint16_t length;
3738  struct rte_mbuf *pkts[];
3739 };
3740 
3747 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
3748  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3749 
3760 int
3761 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
3762 
3787 int
3789  buffer_tx_error_fn callback, void *userdata);
3790 
3813 void
3814 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3815  void *userdata);
3816 
3840 void
3841 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3842  void *userdata);
3843 
3869 int
3870 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
3871 
3904 };
3905 
3925 };
3926 
3945  uint64_t metadata;
3946 };
3947 
3985 };
3986 
4011  uint64_t metadata;
4012 };
4013 
4090 };
4091 
4093 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
4094  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
4095 
4113 int rte_eth_dev_callback_register(uint16_t port_id,
4114  enum rte_eth_event_type event,
4115  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4116 
4135 int rte_eth_dev_callback_unregister(uint16_t port_id,
4136  enum rte_eth_event_type event,
4137  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4138 
4160 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
4161 
4182 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
4183 
4201 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
4202 
4224 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4225  int epfd, int op, void *data);
4226 
4241 int
4242 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
4243 
4257 int rte_eth_led_on(uint16_t port_id);
4258 
4272 int rte_eth_led_off(uint16_t port_id);
4273 
4302 __rte_experimental
4303 int rte_eth_fec_get_capability(uint16_t port_id,
4304  struct rte_eth_fec_capa *speed_fec_capa,
4305  unsigned int num);
4306 
4327 __rte_experimental
4328 int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa);
4329 
4353 __rte_experimental
4354 int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa);
4355 
4370 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
4371  struct rte_eth_fc_conf *fc_conf);
4372 
4387 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
4388  struct rte_eth_fc_conf *fc_conf);
4389 
4405 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
4406  struct rte_eth_pfc_conf *pfc_conf);
4407 
4426 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
4427  uint32_t pool);
4428 
4446 __rte_experimental
4448  struct rte_eth_pfc_queue_info *pfc_queue_info);
4449 
4473 __rte_experimental
4475  struct rte_eth_pfc_queue_conf *pfc_queue_conf);
4476 
4491 int rte_eth_dev_mac_addr_remove(uint16_t port_id,
4492  struct rte_ether_addr *mac_addr);
4493 
4511 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
4512  struct rte_ether_addr *mac_addr);
4513 
4531 int rte_eth_dev_rss_reta_update(uint16_t port_id,
4532  struct rte_eth_rss_reta_entry64 *reta_conf,
4533  uint16_t reta_size);
4534 
4553 int rte_eth_dev_rss_reta_query(uint16_t port_id,
4554  struct rte_eth_rss_reta_entry64 *reta_conf,
4555  uint16_t reta_size);
4556 
4576 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4577  uint8_t on);
4578 
4597 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
4598 
4615 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4616  uint32_t tx_rate);
4617 
4632 int rte_eth_dev_rss_hash_update(uint16_t port_id,
4633  struct rte_eth_rss_conf *rss_conf);
4634 
4650 int
4651 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4652  struct rte_eth_rss_conf *rss_conf);
4653 
4666 __rte_experimental
4667 const char *
4669 
4694 int
4695 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4696  struct rte_eth_udp_tunnel *tunnel_udp);
4697 
4717 int
4718 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4719  struct rte_eth_udp_tunnel *tunnel_udp);
4720 
4735 int rte_eth_dev_get_dcb_info(uint16_t port_id,
4736  struct rte_eth_dcb_info *dcb_info);
4737 
4738 struct rte_eth_rxtx_callback;
4739 
4765 const struct rte_eth_rxtx_callback *
4766 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4767  rte_rx_callback_fn fn, void *user_param);
4768 
4795 const struct rte_eth_rxtx_callback *
4796 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4797  rte_rx_callback_fn fn, void *user_param);
4798 
4824 const struct rte_eth_rxtx_callback *
4825 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4826  rte_tx_callback_fn fn, void *user_param);
4827 
4861 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4862  const struct rte_eth_rxtx_callback *user_cb);
4863 
4897 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4898  const struct rte_eth_rxtx_callback *user_cb);
4899 
4919 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4920  struct rte_eth_rxq_info *qinfo);
4921 
4941 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4942  struct rte_eth_txq_info *qinfo);
4943 
4964 __rte_experimental
4965 int rte_eth_recycle_rx_queue_info_get(uint16_t port_id,
4966  uint16_t queue_id,
4967  struct rte_eth_recycle_rxq_info *recycle_rxq_info);
4968 
4987 int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4988  struct rte_eth_burst_mode *mode);
4989 
5008 int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5009  struct rte_eth_burst_mode *mode);
5010 
5031 __rte_experimental
5032 int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5033  struct rte_power_monitor_cond *pmc);
5034 
5053 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
5054 
5067 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
5068 
5085 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5086 
5103 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5104 
5123 __rte_experimental
5124 int
5125 rte_eth_dev_get_module_info(uint16_t port_id,
5126  struct rte_eth_dev_module_info *modinfo);
5127 
5147 __rte_experimental
5148 int
5149 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5150  struct rte_dev_eeprom_info *info);
5151 
5171 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5172  struct rte_ether_addr *mc_addr_set,
5173  uint32_t nb_mc_addr);
5174 
5187 int rte_eth_timesync_enable(uint16_t port_id);
5188 
5201 int rte_eth_timesync_disable(uint16_t port_id);
5202 
5221 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
5222  struct timespec *timestamp, uint32_t flags);
5223 
5239 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5240  struct timespec *timestamp);
5241 
5259 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
5260 
5276 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
5277 
5296 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
5297 
5343 __rte_experimental
5344 int
5345 rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
5346 
5362 int
5363 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
5364 
5381 int
5382 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
5383 
5400 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5401  uint16_t *nb_rx_desc,
5402  uint16_t *nb_tx_desc);
5403 
5418 int
5419 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
5420 
5430 void *
5431 rte_eth_dev_get_sec_ctx(uint16_t port_id);
5432 
5448 __rte_experimental
5449 int rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5450  struct rte_eth_hairpin_cap *cap);
5451 
5461  int pf;
5462  __extension__
5463  union {
5464  int vf;
5465  int sf;
5466  };
5467  uint32_t id_base;
5468  uint32_t id_end;
5469  char name[RTE_DEV_NAME_MAX_LEN];
5470 };
5471 
5479  uint16_t controller;
5480  uint16_t pf;
5481  uint32_t nb_ranges_alloc;
5482  uint32_t nb_ranges;
5483  struct rte_eth_representor_range ranges[];
5484 };
5485 
5509 __rte_experimental
5510 int rte_eth_representor_info_get(uint16_t port_id,
5511  struct rte_eth_representor_info *info);
5512 
5514 #define RTE_ETH_RX_METADATA_USER_FLAG RTE_BIT64(0)
5515 
5517 #define RTE_ETH_RX_METADATA_USER_MARK RTE_BIT64(1)
5518 
5520 #define RTE_ETH_RX_METADATA_TUNNEL_ID RTE_BIT64(2)
5521 
5561 int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features);
5562 
5564 #define RTE_ETH_DEV_REASSEMBLY_F_IPV4 (RTE_BIT32(0))
5565 
5566 #define RTE_ETH_DEV_REASSEMBLY_F_IPV6 (RTE_BIT32(1))
5567 
5578  uint32_t timeout_ms;
5580  uint16_t max_frags;
5585  uint16_t flags;
5586 };
5587 
5608 __rte_experimental
5609 int rte_eth_ip_reassembly_capability_get(uint16_t port_id,
5610  struct rte_eth_ip_reassembly_params *capa);
5611 
5633 __rte_experimental
5634 int rte_eth_ip_reassembly_conf_get(uint16_t port_id,
5635  struct rte_eth_ip_reassembly_params *conf);
5636 
5666 __rte_experimental
5667 int rte_eth_ip_reassembly_conf_set(uint16_t port_id,
5668  const struct rte_eth_ip_reassembly_params *conf);
5669 
5677 typedef struct {
5684  uint16_t time_spent;
5686  uint16_t nb_frags;
5688 
5707 __rte_experimental
5708 int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file);
5709 
5733 __rte_experimental
5734 int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5735  uint16_t offset, uint16_t num, FILE *file);
5736 
5760 __rte_experimental
5761 int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5762  uint16_t offset, uint16_t num, FILE *file);
5763 
5764 
5765 /* Congestion management */
5766 
5776 };
5777 
5794  uint64_t objs_supported;
5799  uint8_t rsvd[8];
5800 };
5801 
5812  enum rte_cman_mode mode;
5813  union {
5820  uint16_t rx_queue;
5827  uint8_t rsvd_obj_params[4];
5828  } obj_param;
5829  union {
5842  uint8_t rsvd_mode_params[4];
5843  } mode_param;
5844 };
5845 
5863 __rte_experimental
5864 int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info);
5865 
5883 __rte_experimental
5884 int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config);
5885 
5902 __rte_experimental
5903 int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config);
5904 
5925 __rte_experimental
5926 int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config);
5927 
5928 #include <rte_ethdev_core.h>
5929 
5953 uint16_t rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
5954  struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
5955  void *opaque);
5956 
6044 static inline uint16_t
6045 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
6046  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
6047 {
6048  uint16_t nb_rx;
6049  struct rte_eth_fp_ops *p;
6050  void *qd;
6051 
6052 #ifdef RTE_ETHDEV_DEBUG_RX
6053  if (port_id >= RTE_MAX_ETHPORTS ||
6054  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6055  RTE_ETHDEV_LOG(ERR,
6056  "Invalid port_id=%u or queue_id=%u\n",
6057  port_id, queue_id);
6058  return 0;
6059  }
6060 #endif
6061 
6062  /* fetch pointer to queue data */
6063  p = &rte_eth_fp_ops[port_id];
6064  qd = p->rxq.data[queue_id];
6065 
6066 #ifdef RTE_ETHDEV_DEBUG_RX
6067  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6068 
6069  if (qd == NULL) {
6070  RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u for port_id=%u\n",
6071  queue_id, port_id);
6072  return 0;
6073  }
6074 #endif
6075 
6076  nb_rx = p->rx_pkt_burst(qd, rx_pkts, nb_pkts);
6077 
6078 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
6079  {
6080  void *cb;
6081 
6082  /* rte_memory_order_release memory order was used when the
6083  * call back was inserted into the list.
6084  * Since there is a clear dependency between loading
6085  * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6086  * not required.
6087  */
6088  cb = rte_atomic_load_explicit(&p->rxq.clbk[queue_id],
6089  rte_memory_order_relaxed);
6090  if (unlikely(cb != NULL))
6091  nb_rx = rte_eth_call_rx_callbacks(port_id, queue_id,
6092  rx_pkts, nb_rx, nb_pkts, cb);
6093  }
6094 #endif
6095 
6096  rte_ethdev_trace_rx_burst(port_id, queue_id, (void **)rx_pkts, nb_rx);
6097  return nb_rx;
6098 }
6099 
6117 static inline int
6118 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
6119 {
6120  struct rte_eth_fp_ops *p;
6121  void *qd;
6122 
6123 #ifdef RTE_ETHDEV_DEBUG_RX
6124  if (port_id >= RTE_MAX_ETHPORTS ||
6125  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6126  RTE_ETHDEV_LOG(ERR,
6127  "Invalid port_id=%u or queue_id=%u\n",
6128  port_id, queue_id);
6129  return -EINVAL;
6130  }
6131 #endif
6132 
6133  /* fetch pointer to queue data */
6134  p = &rte_eth_fp_ops[port_id];
6135  qd = p->rxq.data[queue_id];
6136 
6137 #ifdef RTE_ETHDEV_DEBUG_RX
6138  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6139  if (qd == NULL)
6140  return -EINVAL;
6141 #endif
6142 
6143  if (*p->rx_queue_count == NULL)
6144  return -ENOTSUP;
6145  return (int)(*p->rx_queue_count)(qd);
6146 }
6147 
6151 #define RTE_ETH_RX_DESC_AVAIL 0
6152 #define RTE_ETH_RX_DESC_DONE 1
6153 #define RTE_ETH_RX_DESC_UNAVAIL 2
6189 static inline int
6190 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
6191  uint16_t offset)
6192 {
6193  struct rte_eth_fp_ops *p;
6194  void *qd;
6195 
6196 #ifdef RTE_ETHDEV_DEBUG_RX
6197  if (port_id >= RTE_MAX_ETHPORTS ||
6198  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6199  RTE_ETHDEV_LOG(ERR,
6200  "Invalid port_id=%u or queue_id=%u\n",
6201  port_id, queue_id);
6202  return -EINVAL;
6203  }
6204 #endif
6205 
6206  /* fetch pointer to queue data */
6207  p = &rte_eth_fp_ops[port_id];
6208  qd = p->rxq.data[queue_id];
6209 
6210 #ifdef RTE_ETHDEV_DEBUG_RX
6211  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6212  if (qd == NULL)
6213  return -ENODEV;
6214 #endif
6215  if (*p->rx_descriptor_status == NULL)
6216  return -ENOTSUP;
6217  return (*p->rx_descriptor_status)(qd, offset);
6218 }
6219 
6223 #define RTE_ETH_TX_DESC_FULL 0
6224 #define RTE_ETH_TX_DESC_DONE 1
6225 #define RTE_ETH_TX_DESC_UNAVAIL 2
6261 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
6262  uint16_t queue_id, uint16_t offset)
6263 {
6264  struct rte_eth_fp_ops *p;
6265  void *qd;
6266 
6267 #ifdef RTE_ETHDEV_DEBUG_TX
6268  if (port_id >= RTE_MAX_ETHPORTS ||
6269  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6270  RTE_ETHDEV_LOG(ERR,
6271  "Invalid port_id=%u or queue_id=%u\n",
6272  port_id, queue_id);
6273  return -EINVAL;
6274  }
6275 #endif
6276 
6277  /* fetch pointer to queue data */
6278  p = &rte_eth_fp_ops[port_id];
6279  qd = p->txq.data[queue_id];
6280 
6281 #ifdef RTE_ETHDEV_DEBUG_TX
6282  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6283  if (qd == NULL)
6284  return -ENODEV;
6285 #endif
6286  if (*p->tx_descriptor_status == NULL)
6287  return -ENOTSUP;
6288  return (*p->tx_descriptor_status)(qd, offset);
6289 }
6290 
6310 uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
6311  struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque);
6312 
6384 static inline uint16_t
6385 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
6386  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6387 {
6388  struct rte_eth_fp_ops *p;
6389  void *qd;
6390 
6391 #ifdef RTE_ETHDEV_DEBUG_TX
6392  if (port_id >= RTE_MAX_ETHPORTS ||
6393  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6394  RTE_ETHDEV_LOG(ERR,
6395  "Invalid port_id=%u or queue_id=%u\n",
6396  port_id, queue_id);
6397  return 0;
6398  }
6399 #endif
6400 
6401  /* fetch pointer to queue data */
6402  p = &rte_eth_fp_ops[port_id];
6403  qd = p->txq.data[queue_id];
6404 
6405 #ifdef RTE_ETHDEV_DEBUG_TX
6406  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6407 
6408  if (qd == NULL) {
6409  RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u for port_id=%u\n",
6410  queue_id, port_id);
6411  return 0;
6412  }
6413 #endif
6414 
6415 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
6416  {
6417  void *cb;
6418 
6419  /* rte_memory_order_release memory order was used when the
6420  * call back was inserted into the list.
6421  * Since there is a clear dependency between loading
6422  * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6423  * not required.
6424  */
6425  cb = rte_atomic_load_explicit(&p->txq.clbk[queue_id],
6426  rte_memory_order_relaxed);
6427  if (unlikely(cb != NULL))
6428  nb_pkts = rte_eth_call_tx_callbacks(port_id, queue_id,
6429  tx_pkts, nb_pkts, cb);
6430  }
6431 #endif
6432 
6433  nb_pkts = p->tx_pkt_burst(qd, tx_pkts, nb_pkts);
6434 
6435  rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, nb_pkts);
6436  return nb_pkts;
6437 }
6438 
6492 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
6493 
6494 static inline uint16_t
6495 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
6496  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6497 {
6498  struct rte_eth_fp_ops *p;
6499  void *qd;
6500 
6501 #ifdef RTE_ETHDEV_DEBUG_TX
6502  if (port_id >= RTE_MAX_ETHPORTS ||
6503  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6504  RTE_ETHDEV_LOG(ERR,
6505  "Invalid port_id=%u or queue_id=%u\n",
6506  port_id, queue_id);
6507  rte_errno = ENODEV;
6508  return 0;
6509  }
6510 #endif
6511 
6512  /* fetch pointer to queue data */
6513  p = &rte_eth_fp_ops[port_id];
6514  qd = p->txq.data[queue_id];
6515 
6516 #ifdef RTE_ETHDEV_DEBUG_TX
6517  if (!rte_eth_dev_is_valid_port(port_id)) {
6518  RTE_ETHDEV_LOG(ERR, "Invalid Tx port_id=%u\n", port_id);
6519  rte_errno = ENODEV;
6520  return 0;
6521  }
6522  if (qd == NULL) {
6523  RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u for port_id=%u\n",
6524  queue_id, port_id);
6525  rte_errno = EINVAL;
6526  return 0;
6527  }
6528 #endif
6529 
6530  if (!p->tx_pkt_prepare)
6531  return nb_pkts;
6532 
6533  return p->tx_pkt_prepare(qd, tx_pkts, nb_pkts);
6534 }
6535 
6536 #else
6537 
6538 /*
6539  * Native NOOP operation for compilation targets which doesn't require any
6540  * preparations steps, and functional NOOP may introduce unnecessary performance
6541  * drop.
6542  *
6543  * Generally this is not a good idea to turn it on globally and didn't should
6544  * be used if behavior of tx_preparation can change.
6545  */
6546 
6547 static inline uint16_t
6548 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
6549  __rte_unused uint16_t queue_id,
6550  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6551 {
6552  return nb_pkts;
6553 }
6554 
6555 #endif
6556 
6579 static inline uint16_t
6580 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
6581  struct rte_eth_dev_tx_buffer *buffer)
6582 {
6583  uint16_t sent;
6584  uint16_t to_send = buffer->length;
6585 
6586  if (to_send == 0)
6587  return 0;
6588 
6589  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
6590 
6591  buffer->length = 0;
6592 
6593  /* All packets sent, or to be dealt with by callback below */
6594  if (unlikely(sent != to_send))
6595  buffer->error_callback(&buffer->pkts[sent],
6596  (uint16_t)(to_send - sent),
6597  buffer->error_userdata);
6598 
6599  return sent;
6600 }
6601 
6632 static __rte_always_inline uint16_t
6633 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
6634  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
6635 {
6636  buffer->pkts[buffer->length++] = tx_pkt;
6637  if (buffer->length < buffer->size)
6638  return 0;
6639 
6640  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
6641 }
6642 
6696 __rte_experimental
6697 static inline uint16_t
6698 rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id,
6699  uint16_t tx_port_id, uint16_t tx_queue_id,
6700  struct rte_eth_recycle_rxq_info *recycle_rxq_info)
6701 {
6702  struct rte_eth_fp_ops *p1, *p2;
6703  void *qd1, *qd2;
6704  uint16_t nb_mbufs;
6705 
6706 #ifdef RTE_ETHDEV_DEBUG_TX
6707  if (tx_port_id >= RTE_MAX_ETHPORTS ||
6708  tx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6709  RTE_ETHDEV_LOG(ERR,
6710  "Invalid tx_port_id=%u or tx_queue_id=%u\n",
6711  tx_port_id, tx_queue_id);
6712  return 0;
6713  }
6714 #endif
6715 
6716  /* fetch pointer to Tx queue data */
6717  p1 = &rte_eth_fp_ops[tx_port_id];
6718  qd1 = p1->txq.data[tx_queue_id];
6719 
6720 #ifdef RTE_ETHDEV_DEBUG_TX
6721  RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port_id, 0);
6722 
6723  if (qd1 == NULL) {
6724  RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u for port_id=%u\n",
6725  tx_queue_id, tx_port_id);
6726  return 0;
6727  }
6728 #endif
6729  if (p1->recycle_tx_mbufs_reuse == NULL)
6730  return 0;
6731 
6732 #ifdef RTE_ETHDEV_DEBUG_RX
6733  if (rx_port_id >= RTE_MAX_ETHPORTS ||
6734  rx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6735  RTE_ETHDEV_LOG(ERR, "Invalid rx_port_id=%u or rx_queue_id=%u\n",
6736  rx_port_id, rx_queue_id);
6737  return 0;
6738  }
6739 #endif
6740 
6741  /* fetch pointer to Rx queue data */
6742  p2 = &rte_eth_fp_ops[rx_port_id];
6743  qd2 = p2->rxq.data[rx_queue_id];
6744 
6745 #ifdef RTE_ETHDEV_DEBUG_RX
6746  RTE_ETH_VALID_PORTID_OR_ERR_RET(rx_port_id, 0);
6747 
6748  if (qd2 == NULL) {
6749  RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u for port_id=%u\n",
6750  rx_queue_id, rx_port_id);
6751  return 0;
6752  }
6753 #endif
6754  if (p2->recycle_rx_descriptors_refill == NULL)
6755  return 0;
6756 
6757  /* Copy used *rte_mbuf* buffer pointers from Tx mbuf ring
6758  * into Rx mbuf ring.
6759  */
6760  nb_mbufs = p1->recycle_tx_mbufs_reuse(qd1, recycle_rxq_info);
6761 
6762  /* If no recycling mbufs, return 0. */
6763  if (nb_mbufs == 0)
6764  return 0;
6765 
6766  /* Replenish the Rx descriptors with the recycling
6767  * into Rx mbuf ring.
6768  */
6769  p2->recycle_rx_descriptors_refill(qd2, nb_mbufs);
6770 
6771  return nb_mbufs;
6772 }
6773 
6802 __rte_experimental
6803 int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num);
6804 
6805 #ifdef __cplusplus
6806 }
6807 #endif
6808 
6809 #endif /* _RTE_ETHDEV_H_ */
uint16_t nb_rx_queues
Definition: rte_ethdev.h:1800
int rte_eth_dev_stop(uint16_t port_id)
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_promiscuous_disable(uint16_t port_id)
__extension__ uint32_t multi_pools
Definition: rte_ethdev.h:1701
int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
uint16_t link_duplex
Definition: rte_ethdev.h:336
__rte_experimental int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
#define __rte_always_inline
Definition: rte_common.h:331
#define RTE_ETH_DCB_NUM_USER_PRIORITIES
Definition: rte_ethdev.h:836
uint16_t tx_rs_thresh
Definition: rte_ethdev.h:1169
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
uint32_t mtu
Definition: rte_ethdev.h:415
uint16_t nb_desc
Definition: rte_ethdev.h:1859
rte_eth_event_macsec_type
Definition: rte_ethdev.h:3910
uint64_t rx_offload_capa
Definition: rte_ethdev.h:1778
const uint32_t * dev_flags
Definition: rte_ethdev.h:1756
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
__rte_experimental int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6495
struct rte_device * device
Definition: rte_ethdev.h:1750
rte_eth_nb_tcs
Definition: rte_ethdev.h:897
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:284
#define __rte_cache_min_aligned
Definition: rte_common.h:528
uint16_t rte_eth_find_next(uint16_t port_id)
__rte_experimental int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
rte_cman_mode
Definition: rte_cman.h:20
int rte_eth_led_off(uint16_t port_id)
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:6190
uint64_t imissed
Definition: rte_ethdev.h:270
uint32_t low_water
Definition: rte_ethdev.h:1363
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
uint8_t rss_key_len
Definition: rte_ethdev.h:493
uint32_t max_rx_bufsize
Definition: rte_ethdev.h:1765
__rte_experimental int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id, uint8_t affinity)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
static __rte_experimental uint16_t rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id, uint16_t tx_port_id, uint16_t tx_queue_id, struct rte_eth_recycle_rxq_info *recycle_rxq_info)
Definition: rte_ethdev.h:6698
uint8_t hthresh
Definition: rte_ethdev.h:360
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
__rte_experimental int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
uint64_t rx_queue_offload_capa
Definition: rte_ethdev.h:1782
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
__rte_experimental int rte_eth_dev_count_aggr_ports(uint16_t port_id)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
uint16_t reta_size
Definition: rte_ethdev.h:1786
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint32_t lpbk_mode
Definition: rte_ethdev.h:1497
#define rte_errno
Definition: rte_errno.h:29
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
uint32_t link_speeds
Definition: rte_ethdev.h:1488
uint64_t tx_queue_offload_capa
Definition: rte_ethdev.h:1784
rte_eth_tx_mq_mode
Definition: rte_ethdev.h:402
rte_eth_fc_mode
Definition: rte_ethdev.h:1349
uint8_t enable_default_pool
Definition: rte_ethdev.h:949
uint32_t max_hash_mac_addrs
Definition: rte_ethdev.h:1773
uint16_t nb_mtu_seg_max
Definition: rte_ethdev.h:1343
struct rte_mempool * mp
Definition: rte_ethdev.h:1873
int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
#define __rte_unused
Definition: rte_common.h:143
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:282
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
uint64_t opackets
Definition: rte_ethdev.h:263
rte_eth_cman_obj
Definition: rte_ethdev.h:5768
uint8_t hash_key_size
Definition: rte_ethdev.h:1787
int rte_eth_dev_close(uint16_t port_id)
struct rte_mempool * mp
Definition: rte_ethdev.h:1077
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
__rte_experimental int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, uint8_t avail_thresh)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
__rte_experimental const char * rte_eth_dev_rss_algo_name(enum rte_eth_hash_function rss_algo)
int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
struct rte_mempool * mp
Definition: rte_ethdev.h:1838
uint32_t dcb_capability_en
Definition: rte_ethdev.h:1521
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:280
__rte_experimental int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
const char * name
Definition: rte_ethdev.h:1675
uint8_t queue_state
Definition: rte_ethdev.h:1860
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
int rte_eth_dev_set_link_up(uint16_t port_id)
#define RTE_BIT32(nr)
Definition: rte_bitops.h:40
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
__rte_experimental int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
uint16_t vmdq_queue_num
Definition: rte_ethdev.h:1794
int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
uint16_t share_qid
Definition: rte_ethdev.h:1122
uint8_t rx_deferred_start
Definition: rte_ethdev.h:1113
struct rte_mbuf * pkts[]
Definition: rte_ethdev.h:3738
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
Definition: rte_ethdev.h:4093
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:276
uint32_t high_water
Definition: rte_ethdev.h:1362
#define RTE_ETH_MQ_RX_RSS_FLAG
Definition: rte_ethdev.h:367
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
#define RTE_ETH_XSTATS_NAME_SIZE
Definition: rte_ethdev.h:1908
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
__rte_experimental int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, unsigned int num)
int rte_eth_timesync_disable(uint16_t port_id)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint64_t offloads
Definition: rte_ethdev.h:1128
uint32_t link_speed
Definition: rte_ethdev.h:335
__rte_experimental int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, uint8_t *avail_thresh)
__rte_experimental int rte_eth_representor_info_get(uint16_t port_id, struct rte_eth_representor_info *info)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, struct rte_eth_pfc_queue_conf *pfc_queue_conf)
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
__rte_experimental int rte_eth_fec_get_capability(uint16_t port_id, struct rte_eth_fec_capa *speed_fec_capa, unsigned int num)
uint16_t send_xon
Definition: rte_ethdev.h:1365
struct rte_mempool ** rx_mempools
Definition: rte_ethdev.h:1157
int rte_eth_stats_reset(uint16_t port_id)
int rte_eth_allmulticast_enable(uint16_t port_id)
uint32_t offset_allowed
Definition: rte_ethdev.h:1702
__rte_experimental int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, struct rte_power_monitor_cond *pmc)
#define unlikely(x)
uint64_t ibytes
Definition: rte_ethdev.h:264
uint32_t offset_align_log2
Definition: rte_ethdev.h:1703
uint8_t avail_thresh
Definition: rte_ethdev.h:1850
uint64_t offloads
Definition: rte_ethdev.h:1179
int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
uint16_t max_nb_queues
Definition: rte_ethdev.h:1215
uint64_t oerrors
Definition: rte_ethdev.h:272
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
__rte_experimental int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num)
uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
uint16_t max_mtu
Definition: rte_ethdev.h:1755
uint64_t offloads
Definition: rte_ethdev.h:423
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
uint16_t link_autoneg
Definition: rte_ethdev.h:337
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
uint16_t tx_free_thresh
Definition: rte_ethdev.h:1170
uint16_t nb_desc
Definition: rte_ethdev.h:1842
uint64_t modes_supported
Definition: rte_ethdev.h:5789
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:6045
int rte_eth_allmulticast_disable(uint16_t port_id)
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
rte_eth_hash_function
Definition: rte_ethdev.h:451
int rte_eth_dev_start(uint16_t port_id)
__rte_experimental int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config)
uint16_t rx_buf_size
Definition: rte_ethdev.h:1843
__rte_experimental int rte_eth_ip_reassembly_capability_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *capa)
uint16_t max_vmdq_pools
Definition: rte_ethdev.h:1775
uint8_t scattered_rx
Definition: rte_ethdev.h:1840
int rte_eth_dev_reset(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
#define RTE_ETH_MQ_RX_DCB_FLAG
Definition: rte_ethdev.h:368
uint64_t offloads
Definition: rte_ethdev.h:1001
uint16_t vmdq_pool_base
Definition: rte_ethdev.h:1795
__rte_experimental int rte_eth_ip_reassembly_conf_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *conf)
uint64_t tx_offload_capa
Definition: rte_ethdev.h:1780
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:278
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
uint16_t min_mtu
Definition: rte_ethdev.h:1754
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_is_removed(uint16_t port_id)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
Definition: rte_ethdev.h:2061
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
uint64_t obytes
Definition: rte_ethdev.h:265
__rte_experimental int rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
uint8_t enable_loop_back
Definition: rte_ethdev.h:982
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
rte_eth_err_handle_mode
Definition: rte_ethdev.h:1728
__rte_experimental int rte_eth_recycle_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_recycle_rxq_info *recycle_rxq_info)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, struct rte_eth_pfc_queue_info *pfc_queue_info)
int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
rte_eth_fec_mode
Definition: rte_ethdev.h:1978
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
uint16_t max_tx_queues
Definition: rte_ethdev.h:1770
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_xstats_reset(uint16_t port_id)
rte_eth_dev_state
Definition: rte_ethdev.h:2067
uint16_t rx_free_thresh
Definition: rte_ethdev.h:1111
uint64_t dev_capa
Definition: rte_ethdev.h:1814
uint64_t ierrors
Definition: rte_ethdev.h:271
#define RTE_ETH_MQ_RX_VMDQ_FLAG
Definition: rte_ethdev.h:369
__rte_experimental int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, size_t len, uint32_t direction)
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
uint64_t flow_type_rss_offloads
Definition: rte_ethdev.h:1790
#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS
Definition: rte_ethdev.h:835
uint16_t rte_eth_dev_count_total(void)
int rte_eth_promiscuous_enable(uint16_t port_id)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
union rte_eth_rxseg * rx_seg
Definition: rte_ethdev.h:1136
int rte_eth_dev_owner_new(uint64_t *owner_id)
rte_vlan_type
Definition: rte_ethdev.h:433
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
__rte_experimental int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
uint16_t nb_seg_max
Definition: rte_ethdev.h:1330
uint64_t ipackets
Definition: rte_ethdev.h:262
uint16_t max_vfs
Definition: rte_ethdev.h:1774
uint16_t pause_time
Definition: rte_ethdev.h:1364
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_owner_delete(const uint64_t owner_id)
uint64_t rx_nombuf
Definition: rte_ethdev.h:273
rte_eth_event_macsec_subtype
Definition: rte_ethdev.h:3876
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
Definition: rte_ethdev.h:6633
__extension__ uint8_t hw_vlan_reject_untagged
Definition: rte_ethdev.h:1006
uint8_t queue_state
Definition: rte_ethdev.h:1841
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
uint32_t use_locked_device_memory
Definition: rte_ethdev.h:1282
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
uint16_t vmdq_queue_base
Definition: rte_ethdev.h:1793
rte_eth_event_ipsec_subtype
Definition: rte_ethdev.h:3952
rte_eth_nb_pools
Definition: rte_ethdev.h:906
uint16_t nb_align
Definition: rte_ethdev.h:1320
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
rte_eth_rx_mq_mode
Definition: rte_ethdev.h:376
__rte_experimental const char * rte_eth_link_speed_to_str(uint32_t link_speed)
uint16_t rsvd
const char * driver_name
Definition: rte_ethdev.h:1751
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:6118
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_socket_id(uint16_t port_id)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint32_t tx_rate)
struct rte_mbuf ** mbuf_ring
Definition: rte_ethdev.h:1872
uint8_t enable_default_pool
Definition: rte_ethdev.h:980
__extension__ struct rte_eth_link __rte_aligned(8)
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
uint16_t nb_tx_queues
Definition: rte_ethdev.h:1801
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
uint32_t max_mac_addrs
Definition: rte_ethdev.h:1771
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
rte_eth_tunnel_type
Definition: rte_ethdev.h:1438
__rte_experimental const char * rte_eth_dev_capability_name(uint64_t capability)
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
uint64_t value
Definition: rte_ethdev.h:1921
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
Definition: rte_ethdev.h:660
__rte_experimental int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
__rte_experimental int rte_eth_ip_reassembly_conf_set(uint16_t port_id, const struct rte_eth_ip_reassembly_params *conf)
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
__rte_experimental int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config)
int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_promiscuous_get(uint16_t port_id)
int rte_eth_led_on(uint16_t port_id)
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
uint32_t max_rx_pktlen
Definition: rte_ethdev.h:1766
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
uint64_t rss_hf
Definition: rte_ethdev.h:498
uint64_t id
Definition: rte_ethdev.h:1920
__rte_experimental int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
unsigned int if_index
Definition: rte_ethdev.h:1752
__rte_experimental int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config)
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
uint8_t mac_ctrl_frame_fwd
Definition: rte_ethdev.h:1367
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
Definition: rte_ethdev.h:2037
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
uint8_t * rss_key
Definition: rte_ethdev.h:492
uint8_t tx_deferred_start
Definition: rte_ethdev.h:1173
uint8_t wthresh
Definition: rte_ethdev.h:361
uint16_t max_rx_queues
Definition: rte_ethdev.h:1769
uint16_t max_rx_mempools
Definition: rte_ethdev.h:1808
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
rte_eth_representor_type
Definition: rte_ethdev.h:1715
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:417
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
int rte_eth_allmulticast_get(uint16_t port_id)
uint8_t rx_drop_en
Definition: rte_ethdev.h:1112
int rte_eth_dev_is_valid_port(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint16_t rx_nseg
Definition: rte_ethdev.h:1114
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:1768
int rte_eth_timesync_enable(uint16_t port_id)
uint8_t pthresh
Definition: rte_ethdev.h:359
uint16_t share_group
Definition: rte_ethdev.h:1121
uint32_t speed_capa
Definition: rte_ethdev.h:1798
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6385
__rte_experimental int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info)
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_set_link_down(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
__rte_experimental int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
uint64_t objs_supported
Definition: rte_ethdev.h:5794
uint32_t min_rx_bufsize
Definition: rte_ethdev.h:1758
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
Definition: rte_ethdev.h:6580
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
rte_eth_event_type
Definition: rte_ethdev.h:4017