DPDK  23.07.0
rte_ethdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
148 #ifdef __cplusplus
149 extern "C" {
150 #endif
151 
152 #include <stdint.h>
153 
154 /* Use this macro to check if LRO API is supported */
155 #define RTE_ETHDEV_HAS_LRO_SUPPORT
156 
157 /* Alias RTE_LIBRTE_ETHDEV_DEBUG for backward compatibility. */
158 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
159 #define RTE_ETHDEV_DEBUG_RX
160 #define RTE_ETHDEV_DEBUG_TX
161 #endif
162 
163 #include <rte_cman.h>
164 #include <rte_compat.h>
165 #include <rte_log.h>
166 #include <rte_interrupts.h>
167 #include <rte_dev.h>
168 #include <rte_devargs.h>
169 #include <rte_bitops.h>
170 #include <rte_errno.h>
171 #include <rte_common.h>
172 #include <rte_config.h>
173 #include <rte_power_intrinsics.h>
174 
175 #include "rte_ethdev_trace_fp.h"
176 #include "rte_dev_info.h"
177 
178 extern int rte_eth_dev_logtype;
179 
180 #define RTE_ETHDEV_LOG(level, ...) \
181  rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
182 
183 struct rte_mbuf;
184 
201 int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
202 
217 uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter);
218 
231 void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
232 
246 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
247  for (rte_eth_iterator_init(iter, devargs), \
248  id = rte_eth_iterator_next(iter); \
249  id != RTE_MAX_ETHPORTS; \
250  id = rte_eth_iterator_next(iter))
251 
262  uint64_t ipackets;
263  uint64_t opackets;
264  uint64_t ibytes;
265  uint64_t obytes;
270  uint64_t imissed;
271  uint64_t ierrors;
272  uint64_t oerrors;
273  uint64_t rx_nombuf;
274  /* Queue stats are limited to max 256 queues */
276  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
278  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
280  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
282  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
284  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
285 };
286 
290 #define RTE_ETH_LINK_SPEED_AUTONEG 0
291 #define RTE_ETH_LINK_SPEED_FIXED RTE_BIT32(0)
292 #define RTE_ETH_LINK_SPEED_10M_HD RTE_BIT32(1)
293 #define RTE_ETH_LINK_SPEED_10M RTE_BIT32(2)
294 #define RTE_ETH_LINK_SPEED_100M_HD RTE_BIT32(3)
295 #define RTE_ETH_LINK_SPEED_100M RTE_BIT32(4)
296 #define RTE_ETH_LINK_SPEED_1G RTE_BIT32(5)
297 #define RTE_ETH_LINK_SPEED_2_5G RTE_BIT32(6)
298 #define RTE_ETH_LINK_SPEED_5G RTE_BIT32(7)
299 #define RTE_ETH_LINK_SPEED_10G RTE_BIT32(8)
300 #define RTE_ETH_LINK_SPEED_20G RTE_BIT32(9)
301 #define RTE_ETH_LINK_SPEED_25G RTE_BIT32(10)
302 #define RTE_ETH_LINK_SPEED_40G RTE_BIT32(11)
303 #define RTE_ETH_LINK_SPEED_50G RTE_BIT32(12)
304 #define RTE_ETH_LINK_SPEED_56G RTE_BIT32(13)
305 #define RTE_ETH_LINK_SPEED_100G RTE_BIT32(14)
306 #define RTE_ETH_LINK_SPEED_200G RTE_BIT32(15)
307 #define RTE_ETH_LINK_SPEED_400G RTE_BIT32(16)
313 #define RTE_ETH_SPEED_NUM_NONE 0
314 #define RTE_ETH_SPEED_NUM_10M 10
315 #define RTE_ETH_SPEED_NUM_100M 100
316 #define RTE_ETH_SPEED_NUM_1G 1000
317 #define RTE_ETH_SPEED_NUM_2_5G 2500
318 #define RTE_ETH_SPEED_NUM_5G 5000
319 #define RTE_ETH_SPEED_NUM_10G 10000
320 #define RTE_ETH_SPEED_NUM_20G 20000
321 #define RTE_ETH_SPEED_NUM_25G 25000
322 #define RTE_ETH_SPEED_NUM_40G 40000
323 #define RTE_ETH_SPEED_NUM_50G 50000
324 #define RTE_ETH_SPEED_NUM_56G 56000
325 #define RTE_ETH_SPEED_NUM_100G 100000
326 #define RTE_ETH_SPEED_NUM_200G 200000
327 #define RTE_ETH_SPEED_NUM_400G 400000
328 #define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX
334 __extension__
335 struct rte_eth_link {
336  uint32_t link_speed;
337  uint16_t link_duplex : 1;
338  uint16_t link_autoneg : 1;
339  uint16_t link_status : 1;
340 } __rte_aligned(8);
345 #define RTE_ETH_LINK_HALF_DUPLEX 0
346 #define RTE_ETH_LINK_FULL_DUPLEX 1
347 #define RTE_ETH_LINK_DOWN 0
348 #define RTE_ETH_LINK_UP 1
349 #define RTE_ETH_LINK_FIXED 0
350 #define RTE_ETH_LINK_AUTONEG 1
351 #define RTE_ETH_LINK_MAX_STR_LEN 40
358 struct rte_eth_thresh {
359  uint8_t pthresh;
360  uint8_t hthresh;
361  uint8_t wthresh;
362 };
363 
367 #define RTE_ETH_MQ_RX_RSS_FLAG RTE_BIT32(0)
368 #define RTE_ETH_MQ_RX_DCB_FLAG RTE_BIT32(1)
369 #define RTE_ETH_MQ_RX_VMDQ_FLAG RTE_BIT32(2)
376 enum rte_eth_rx_mq_mode {
377 
379 
386 
396 };
397 
407 };
408 
414  enum rte_eth_rx_mq_mode mq_mode;
415  uint32_t mtu;
423  uint64_t offloads;
424 
425  uint64_t reserved_64s[2];
426  void *reserved_ptrs[2];
427 };
428 
434  RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
437  RTE_ETH_VLAN_TYPE_MAX,
438 };
439 
445  uint64_t ids[64];
446 };
447 
466  uint8_t *rss_key;
467  uint8_t rss_key_len;
468  uint64_t rss_hf;
469 };
470 
471 /*
472  * A packet can be identified by hardware as different flow types. Different
473  * NIC hardware may support different flow types.
474  * Basically, the NIC hardware identifies the flow type as deep protocol as
475  * possible, and exclusively. For example, if a packet is identified as
476  * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
477  * though it is an actual IPV4 packet.
478  */
479 #define RTE_ETH_FLOW_UNKNOWN 0
480 #define RTE_ETH_FLOW_RAW 1
481 #define RTE_ETH_FLOW_IPV4 2
482 #define RTE_ETH_FLOW_FRAG_IPV4 3
483 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
484 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
485 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
486 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
487 #define RTE_ETH_FLOW_IPV6 8
488 #define RTE_ETH_FLOW_FRAG_IPV6 9
489 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
490 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
491 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
492 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
493 #define RTE_ETH_FLOW_L2_PAYLOAD 14
494 #define RTE_ETH_FLOW_IPV6_EX 15
495 #define RTE_ETH_FLOW_IPV6_TCP_EX 16
496 #define RTE_ETH_FLOW_IPV6_UDP_EX 17
497 
498 #define RTE_ETH_FLOW_PORT 18
499 #define RTE_ETH_FLOW_VXLAN 19
500 #define RTE_ETH_FLOW_GENEVE 20
501 #define RTE_ETH_FLOW_NVGRE 21
502 #define RTE_ETH_FLOW_VXLAN_GPE 22
503 #define RTE_ETH_FLOW_GTPU 23
504 #define RTE_ETH_FLOW_MAX 24
505 
506 /*
507  * Below macros are defined for RSS offload types, they can be used to
508  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
509  */
510 #define RTE_ETH_RSS_IPV4 RTE_BIT64(2)
511 #define RTE_ETH_RSS_FRAG_IPV4 RTE_BIT64(3)
512 #define RTE_ETH_RSS_NONFRAG_IPV4_TCP RTE_BIT64(4)
513 #define RTE_ETH_RSS_NONFRAG_IPV4_UDP RTE_BIT64(5)
514 #define RTE_ETH_RSS_NONFRAG_IPV4_SCTP RTE_BIT64(6)
515 #define RTE_ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
516 #define RTE_ETH_RSS_IPV6 RTE_BIT64(8)
517 #define RTE_ETH_RSS_FRAG_IPV6 RTE_BIT64(9)
518 #define RTE_ETH_RSS_NONFRAG_IPV6_TCP RTE_BIT64(10)
519 #define RTE_ETH_RSS_NONFRAG_IPV6_UDP RTE_BIT64(11)
520 #define RTE_ETH_RSS_NONFRAG_IPV6_SCTP RTE_BIT64(12)
521 #define RTE_ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
522 #define RTE_ETH_RSS_L2_PAYLOAD RTE_BIT64(14)
523 #define RTE_ETH_RSS_IPV6_EX RTE_BIT64(15)
524 #define RTE_ETH_RSS_IPV6_TCP_EX RTE_BIT64(16)
525 #define RTE_ETH_RSS_IPV6_UDP_EX RTE_BIT64(17)
526 #define RTE_ETH_RSS_PORT RTE_BIT64(18)
527 #define RTE_ETH_RSS_VXLAN RTE_BIT64(19)
528 #define RTE_ETH_RSS_GENEVE RTE_BIT64(20)
529 #define RTE_ETH_RSS_NVGRE RTE_BIT64(21)
530 #define RTE_ETH_RSS_GTPU RTE_BIT64(23)
531 #define RTE_ETH_RSS_ETH RTE_BIT64(24)
532 #define RTE_ETH_RSS_S_VLAN RTE_BIT64(25)
533 #define RTE_ETH_RSS_C_VLAN RTE_BIT64(26)
534 #define RTE_ETH_RSS_ESP RTE_BIT64(27)
535 #define RTE_ETH_RSS_AH RTE_BIT64(28)
536 #define RTE_ETH_RSS_L2TPV3 RTE_BIT64(29)
537 #define RTE_ETH_RSS_PFCP RTE_BIT64(30)
538 #define RTE_ETH_RSS_PPPOE RTE_BIT64(31)
539 #define RTE_ETH_RSS_ECPRI RTE_BIT64(32)
540 #define RTE_ETH_RSS_MPLS RTE_BIT64(33)
541 #define RTE_ETH_RSS_IPV4_CHKSUM RTE_BIT64(34)
542 
555 #define RTE_ETH_RSS_L4_CHKSUM RTE_BIT64(35)
556 
557 #define RTE_ETH_RSS_L2TPV2 RTE_BIT64(36)
558 
559 /*
560  * We use the following macros to combine with above RTE_ETH_RSS_* for
561  * more specific input set selection. These bits are defined starting
562  * from the high end of the 64 bits.
563  * Note: If we use above RTE_ETH_RSS_* without SRC/DST_ONLY, it represents
564  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
565  * the same level are used simultaneously, it is the same case as none of
566  * them are added.
567  */
568 #define RTE_ETH_RSS_L3_SRC_ONLY RTE_BIT64(63)
569 #define RTE_ETH_RSS_L3_DST_ONLY RTE_BIT64(62)
570 #define RTE_ETH_RSS_L4_SRC_ONLY RTE_BIT64(61)
571 #define RTE_ETH_RSS_L4_DST_ONLY RTE_BIT64(60)
572 #define RTE_ETH_RSS_L2_SRC_ONLY RTE_BIT64(59)
573 #define RTE_ETH_RSS_L2_DST_ONLY RTE_BIT64(58)
574 
575 /*
576  * Only select IPV6 address prefix as RSS input set according to
577  * https://tools.ietf.org/html/rfc6052
578  * Must be combined with RTE_ETH_RSS_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_UDP,
579  * RTE_ETH_RSS_NONFRAG_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP.
580  */
581 #define RTE_ETH_RSS_L3_PRE32 RTE_BIT64(57)
582 #define RTE_ETH_RSS_L3_PRE40 RTE_BIT64(56)
583 #define RTE_ETH_RSS_L3_PRE48 RTE_BIT64(55)
584 #define RTE_ETH_RSS_L3_PRE56 RTE_BIT64(54)
585 #define RTE_ETH_RSS_L3_PRE64 RTE_BIT64(53)
586 #define RTE_ETH_RSS_L3_PRE96 RTE_BIT64(52)
587 
588 /*
589  * Use the following macros to combine with the above layers
590  * to choose inner and outer layers or both for RSS computation.
591  * Bits 50 and 51 are reserved for this.
592  */
593 
601 #define RTE_ETH_RSS_LEVEL_PMD_DEFAULT (UINT64_C(0) << 50)
602 
607 #define RTE_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
608 
613 #define RTE_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
614 #define RTE_ETH_RSS_LEVEL_MASK (UINT64_C(3) << 50)
615 
616 #define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
617 
628 static inline uint64_t
629 rte_eth_rss_hf_refine(uint64_t rss_hf)
630 {
631  if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
632  rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
633 
634  if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
635  rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
636 
637  return rss_hf;
638 }
639 
640 #define RTE_ETH_RSS_IPV6_PRE32 ( \
641  RTE_ETH_RSS_IPV6 | \
642  RTE_ETH_RSS_L3_PRE32)
643 
644 #define RTE_ETH_RSS_IPV6_PRE40 ( \
645  RTE_ETH_RSS_IPV6 | \
646  RTE_ETH_RSS_L3_PRE40)
647 
648 #define RTE_ETH_RSS_IPV6_PRE48 ( \
649  RTE_ETH_RSS_IPV6 | \
650  RTE_ETH_RSS_L3_PRE48)
651 
652 #define RTE_ETH_RSS_IPV6_PRE56 ( \
653  RTE_ETH_RSS_IPV6 | \
654  RTE_ETH_RSS_L3_PRE56)
655 
656 #define RTE_ETH_RSS_IPV6_PRE64 ( \
657  RTE_ETH_RSS_IPV6 | \
658  RTE_ETH_RSS_L3_PRE64)
659 
660 #define RTE_ETH_RSS_IPV6_PRE96 ( \
661  RTE_ETH_RSS_IPV6 | \
662  RTE_ETH_RSS_L3_PRE96)
663 
664 #define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
665  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
666  RTE_ETH_RSS_L3_PRE32)
667 
668 #define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
669  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
670  RTE_ETH_RSS_L3_PRE40)
671 
672 #define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
673  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
674  RTE_ETH_RSS_L3_PRE48)
675 
676 #define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
677  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
678  RTE_ETH_RSS_L3_PRE56)
679 
680 #define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
681  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
682  RTE_ETH_RSS_L3_PRE64)
683 
684 #define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
685  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
686  RTE_ETH_RSS_L3_PRE96)
687 
688 #define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
689  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
690  RTE_ETH_RSS_L3_PRE32)
691 
692 #define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
693  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
694  RTE_ETH_RSS_L3_PRE40)
695 
696 #define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
697  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
698  RTE_ETH_RSS_L3_PRE48)
699 
700 #define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
701  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
702  RTE_ETH_RSS_L3_PRE56)
703 
704 #define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
705  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
706  RTE_ETH_RSS_L3_PRE64)
707 
708 #define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
709  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
710  RTE_ETH_RSS_L3_PRE96)
711 
712 #define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
713  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
714  RTE_ETH_RSS_L3_PRE32)
715 
716 #define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
717  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
718  RTE_ETH_RSS_L3_PRE40)
719 
720 #define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
721  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
722  RTE_ETH_RSS_L3_PRE48)
723 
724 #define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
725  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
726  RTE_ETH_RSS_L3_PRE56)
727 
728 #define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
729  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
730  RTE_ETH_RSS_L3_PRE64)
731 
732 #define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
733  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
734  RTE_ETH_RSS_L3_PRE96)
735 
736 #define RTE_ETH_RSS_IP ( \
737  RTE_ETH_RSS_IPV4 | \
738  RTE_ETH_RSS_FRAG_IPV4 | \
739  RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
740  RTE_ETH_RSS_IPV6 | \
741  RTE_ETH_RSS_FRAG_IPV6 | \
742  RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
743  RTE_ETH_RSS_IPV6_EX)
744 
745 #define RTE_ETH_RSS_UDP ( \
746  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
747  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
748  RTE_ETH_RSS_IPV6_UDP_EX)
749 
750 #define RTE_ETH_RSS_TCP ( \
751  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
752  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
753  RTE_ETH_RSS_IPV6_TCP_EX)
754 
755 #define RTE_ETH_RSS_SCTP ( \
756  RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
757  RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
758 
759 #define RTE_ETH_RSS_TUNNEL ( \
760  RTE_ETH_RSS_VXLAN | \
761  RTE_ETH_RSS_GENEVE | \
762  RTE_ETH_RSS_NVGRE)
763 
764 #define RTE_ETH_RSS_VLAN ( \
765  RTE_ETH_RSS_S_VLAN | \
766  RTE_ETH_RSS_C_VLAN)
767 
769 #define RTE_ETH_RSS_PROTO_MASK ( \
770  RTE_ETH_RSS_IPV4 | \
771  RTE_ETH_RSS_FRAG_IPV4 | \
772  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
773  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
774  RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
775  RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
776  RTE_ETH_RSS_IPV6 | \
777  RTE_ETH_RSS_FRAG_IPV6 | \
778  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
779  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
780  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
781  RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
782  RTE_ETH_RSS_L2_PAYLOAD | \
783  RTE_ETH_RSS_IPV6_EX | \
784  RTE_ETH_RSS_IPV6_TCP_EX | \
785  RTE_ETH_RSS_IPV6_UDP_EX | \
786  RTE_ETH_RSS_PORT | \
787  RTE_ETH_RSS_VXLAN | \
788  RTE_ETH_RSS_GENEVE | \
789  RTE_ETH_RSS_NVGRE | \
790  RTE_ETH_RSS_MPLS)
791 
792 /*
793  * Definitions used for redirection table entry size.
794  * Some RSS RETA sizes may not be supported by some drivers, check the
795  * documentation or the description of relevant functions for more details.
796  */
797 #define RTE_ETH_RSS_RETA_SIZE_64 64
798 #define RTE_ETH_RSS_RETA_SIZE_128 128
799 #define RTE_ETH_RSS_RETA_SIZE_256 256
800 #define RTE_ETH_RSS_RETA_SIZE_512 512
801 #define RTE_ETH_RETA_GROUP_SIZE 64
802 
804 #define RTE_ETH_VMDQ_MAX_VLAN_FILTERS 64
805 #define RTE_ETH_DCB_NUM_USER_PRIORITIES 8
806 #define RTE_ETH_VMDQ_DCB_NUM_QUEUES 128
807 #define RTE_ETH_DCB_NUM_QUEUES 128
811 #define RTE_ETH_DCB_PG_SUPPORT RTE_BIT32(0)
812 #define RTE_ETH_DCB_PFC_SUPPORT RTE_BIT32(1)
816 #define RTE_ETH_VLAN_STRIP_OFFLOAD 0x0001
817 #define RTE_ETH_VLAN_FILTER_OFFLOAD 0x0002
818 #define RTE_ETH_VLAN_EXTEND_OFFLOAD 0x0004
819 #define RTE_ETH_QINQ_STRIP_OFFLOAD 0x0008
821 #define RTE_ETH_VLAN_STRIP_MASK 0x0001
822 #define RTE_ETH_VLAN_FILTER_MASK 0x0002
823 #define RTE_ETH_VLAN_EXTEND_MASK 0x0004
824 #define RTE_ETH_QINQ_STRIP_MASK 0x0008
825 #define RTE_ETH_VLAN_ID_MAX 0x0FFF
828 /* Definitions used for receive MAC address */
829 #define RTE_ETH_NUM_RECEIVE_MAC_ADDR 128
831 /* Definitions used for unicast hash */
832 #define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY 128
838 #define RTE_ETH_VMDQ_ACCEPT_UNTAG RTE_BIT32(0)
839 
840 #define RTE_ETH_VMDQ_ACCEPT_HASH_MC RTE_BIT32(1)
841 
842 #define RTE_ETH_VMDQ_ACCEPT_HASH_UC RTE_BIT32(2)
843 
844 #define RTE_ETH_VMDQ_ACCEPT_BROADCAST RTE_BIT32(3)
845 
846 #define RTE_ETH_VMDQ_ACCEPT_MULTICAST RTE_BIT32(4)
847 
857  uint64_t mask;
859  uint16_t reta[RTE_ETH_RETA_GROUP_SIZE];
860 };
861 
869 };
870 
880 };
881 
882 /* This structure may be extended in future. */
883 struct rte_eth_dcb_rx_conf {
884  enum rte_eth_nb_tcs nb_tcs;
886  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
887 };
888 
889 struct rte_eth_vmdq_dcb_tx_conf {
890  enum rte_eth_nb_pools nb_queue_pools;
892  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
893 };
894 
895 struct rte_eth_dcb_tx_conf {
896  enum rte_eth_nb_tcs nb_tcs;
898  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
899 };
900 
901 struct rte_eth_vmdq_tx_conf {
902  enum rte_eth_nb_pools nb_queue_pools;
903 };
904 
917  enum rte_eth_nb_pools nb_queue_pools;
919  uint8_t default_pool;
920  uint8_t nb_pool_maps;
921  struct {
922  uint16_t vlan_id;
923  uint64_t pools;
924  } pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS];
927 };
928 
948  enum rte_eth_nb_pools nb_queue_pools;
950  uint8_t default_pool;
952  uint8_t nb_pool_maps;
953  uint32_t rx_mode;
954  struct {
955  uint16_t vlan_id;
956  uint64_t pools;
957  } pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS];
958 };
959 
964  enum rte_eth_tx_mq_mode mq_mode;
970  uint64_t offloads;
971 
972  uint16_t pvid;
973  __extension__
974  uint8_t
975  hw_vlan_reject_tagged : 1,
979  hw_vlan_insert_pvid : 1;
980 
981  uint64_t reserved_64s[2];
982  void *reserved_ptrs[2];
983 };
984 
1046  struct rte_mempool *mp;
1047  uint16_t length;
1048  uint16_t offset;
1060  uint32_t proto_hdr;
1061 };
1062 
1070  /* The settings for buffer split offload. */
1071  struct rte_eth_rxseg_split split;
1072  /* The other features settings should be added here. */
1073 };
1074 
1079  struct rte_eth_thresh rx_thresh;
1080  uint16_t rx_free_thresh;
1081  uint8_t rx_drop_en;
1083  uint16_t rx_nseg;
1090  uint16_t share_group;
1091  uint16_t share_qid;
1097  uint64_t offloads;
1106 
1127  uint16_t rx_nmempool;
1129  uint64_t reserved_64s[2];
1130  void *reserved_ptrs[2];
1131 };
1132 
1137  struct rte_eth_thresh tx_thresh;
1138  uint16_t tx_rs_thresh;
1139  uint16_t tx_free_thresh;
1148  uint64_t offloads;
1149 
1150  uint64_t reserved_64s[2];
1151  void *reserved_ptrs[2];
1152 };
1153 
1166 
1171  uint32_t rte_memory:1;
1172 
1173  uint32_t reserved:30;
1174 };
1175 
1184  uint16_t max_nb_queues;
1186  uint16_t max_rx_2_tx;
1188  uint16_t max_tx_2_rx;
1189  uint16_t max_nb_desc;
1192 };
1193 
1194 #define RTE_ETH_MAX_HAIRPIN_PEERS 32
1195 
1203  uint16_t port;
1204  uint16_t queue;
1205 };
1206 
1214  uint32_t peer_count:16;
1225  uint32_t tx_explicit:1;
1226 
1238  uint32_t manual_bind:1;
1239 
1252 
1264  uint32_t use_rte_memory:1;
1265 
1276  uint32_t force_memory:1;
1277 
1278  uint32_t reserved:11;
1280  struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
1281 };
1282 
1287  uint16_t nb_max;
1288  uint16_t nb_min;
1289  uint16_t nb_align;
1299  uint16_t nb_seg_max;
1300 
1312  uint16_t nb_mtu_seg_max;
1313 };
1314 
1323 };
1324 
1331  uint32_t high_water;
1332  uint32_t low_water;
1333  uint16_t pause_time;
1334  uint16_t send_xon;
1335  enum rte_eth_fc_mode mode;
1337  uint8_t autoneg;
1338 };
1339 
1346  struct rte_eth_fc_conf fc;
1347  uint8_t priority;
1348 };
1349 
1360  uint8_t tc_max;
1362  enum rte_eth_fc_mode mode_capa;
1363 };
1364 
1383  enum rte_eth_fc_mode mode;
1385  struct {
1386  uint16_t tx_qid;
1390  uint8_t tc;
1391  } rx_pause; /* Valid when (mode == FC_RX_PAUSE || mode == FC_FULL) */
1392 
1393  struct {
1394  uint16_t pause_time;
1395  uint16_t rx_qid;
1399  uint8_t tc;
1400  } tx_pause; /* Valid when (mode == FC_TX_PAUSE || mode == FC_FULL) */
1401 };
1402 
1408  RTE_ETH_TUNNEL_TYPE_NONE = 0,
1409  RTE_ETH_TUNNEL_TYPE_VXLAN,
1410  RTE_ETH_TUNNEL_TYPE_GENEVE,
1411  RTE_ETH_TUNNEL_TYPE_TEREDO,
1412  RTE_ETH_TUNNEL_TYPE_NVGRE,
1413  RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
1414  RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
1415  RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
1416  RTE_ETH_TUNNEL_TYPE_ECPRI,
1417  RTE_ETH_TUNNEL_TYPE_MAX,
1418 };
1419 
1420 /* Deprecated API file for rte_eth_dev_filter_* functions */
1421 #include "rte_eth_ctrl.h"
1422 
1433  uint16_t udp_port;
1434  uint8_t prot_type;
1435 };
1436 
1442  uint32_t lsc:1;
1444  uint32_t rxq:1;
1446  uint32_t rmv:1;
1447 };
1448 
1449 #define rte_intr_conf rte_eth_intr_conf
1450 
1457  uint32_t link_speeds;
1464  struct rte_eth_rxmode rxmode;
1465  struct rte_eth_txmode txmode;
1466  uint32_t lpbk_mode;
1471  struct {
1472  struct rte_eth_rss_conf rss_conf;
1474  struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf;
1476  struct rte_eth_dcb_rx_conf dcb_rx_conf;
1478  struct rte_eth_vmdq_rx_conf vmdq_rx_conf;
1479  } rx_adv_conf;
1480  union {
1482  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1484  struct rte_eth_dcb_tx_conf dcb_tx_conf;
1486  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1487  } tx_adv_conf;
1491  struct rte_eth_intr_conf intr_conf;
1492 };
1493 
1497 #define RTE_ETH_RX_OFFLOAD_VLAN_STRIP RTE_BIT64(0)
1498 #define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1499 #define RTE_ETH_RX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1500 #define RTE_ETH_RX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1501 #define RTE_ETH_RX_OFFLOAD_TCP_LRO RTE_BIT64(4)
1502 #define RTE_ETH_RX_OFFLOAD_QINQ_STRIP RTE_BIT64(5)
1503 #define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(6)
1504 #define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP RTE_BIT64(7)
1505 #define RTE_ETH_RX_OFFLOAD_VLAN_FILTER RTE_BIT64(9)
1506 #define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND RTE_BIT64(10)
1507 #define RTE_ETH_RX_OFFLOAD_SCATTER RTE_BIT64(13)
1508 
1513 #define RTE_ETH_RX_OFFLOAD_TIMESTAMP RTE_BIT64(14)
1514 #define RTE_ETH_RX_OFFLOAD_SECURITY RTE_BIT64(15)
1515 #define RTE_ETH_RX_OFFLOAD_KEEP_CRC RTE_BIT64(16)
1516 #define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM RTE_BIT64(17)
1517 #define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(18)
1518 #define RTE_ETH_RX_OFFLOAD_RSS_HASH RTE_BIT64(19)
1519 #define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT RTE_BIT64(20)
1520 
1521 #define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
1522  RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
1523  RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1524 #define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
1525  RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
1526  RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
1527  RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
1528 
1529 /*
1530  * If new Rx offload capabilities are defined, they also must be
1531  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1532  */
1533 
1537 #define RTE_ETH_TX_OFFLOAD_VLAN_INSERT RTE_BIT64(0)
1538 #define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1539 #define RTE_ETH_TX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1540 #define RTE_ETH_TX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1541 #define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM RTE_BIT64(4)
1542 #define RTE_ETH_TX_OFFLOAD_TCP_TSO RTE_BIT64(5)
1543 #define RTE_ETH_TX_OFFLOAD_UDP_TSO RTE_BIT64(6)
1544 #define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(7)
1545 #define RTE_ETH_TX_OFFLOAD_QINQ_INSERT RTE_BIT64(8)
1546 #define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO RTE_BIT64(9)
1547 #define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO RTE_BIT64(10)
1548 #define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO RTE_BIT64(11)
1549 #define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO RTE_BIT64(12)
1550 #define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT RTE_BIT64(13)
1551 
1555 #define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE RTE_BIT64(14)
1556 
1557 #define RTE_ETH_TX_OFFLOAD_MULTI_SEGS RTE_BIT64(15)
1558 
1563 #define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE RTE_BIT64(16)
1564 #define RTE_ETH_TX_OFFLOAD_SECURITY RTE_BIT64(17)
1565 
1570 #define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO RTE_BIT64(18)
1571 
1576 #define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO RTE_BIT64(19)
1577 
1578 #define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(20)
1579 
1584 #define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP RTE_BIT64(21)
1585 /*
1586  * If new Tx offload capabilities are defined, they also must be
1587  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1588  */
1589 
1594 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP RTE_BIT64(0)
1595 
1596 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP RTE_BIT64(1)
1597 
1606 #define RTE_ETH_DEV_CAPA_RXQ_SHARE RTE_BIT64(2)
1607 
1608 #define RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP RTE_BIT64(3)
1609 
1610 #define RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP RTE_BIT64(4)
1611 
1613 /*
1614  * Fallback default preferred Rx/Tx port parameters.
1615  * These are used if an application requests default parameters
1616  * but the PMD does not provide preferred values.
1617  */
1618 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1619 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1620 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1621 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1622 
1629  uint16_t burst_size;
1630  uint16_t ring_size;
1631  uint16_t nb_queues;
1632 };
1633 
1638 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX)
1639 
1644  const char *name;
1645  uint16_t domain_id;
1653  uint16_t port_id;
1659  uint16_t rx_domain;
1660 };
1661 
1669  __extension__
1670  uint32_t multi_pools:1;
1671  uint32_t offset_allowed:1;
1672  uint32_t offset_align_log2:4;
1673  uint16_t max_nseg;
1674  uint16_t reserved;
1675 };
1676 
1689 };
1690 
1711 };
1712 
1719  struct rte_device *device;
1720  const char *driver_name;
1721  unsigned int if_index;
1723  uint16_t min_mtu;
1724  uint16_t max_mtu;
1725  const uint32_t *dev_flags;
1726  uint32_t min_rx_bufsize;
1727  uint32_t max_rx_pktlen;
1730  uint16_t max_rx_queues;
1731  uint16_t max_tx_queues;
1732  uint32_t max_mac_addrs;
1735  uint16_t max_vfs;
1736  uint16_t max_vmdq_pools;
1737  struct rte_eth_rxseg_capa rx_seg_capa;
1747  uint16_t reta_size;
1748  uint8_t hash_key_size;
1751  struct rte_eth_rxconf default_rxconf;
1752  struct rte_eth_txconf default_txconf;
1753  uint16_t vmdq_queue_base;
1754  uint16_t vmdq_queue_num;
1755  uint16_t vmdq_pool_base;
1756  struct rte_eth_desc_lim rx_desc_lim;
1757  struct rte_eth_desc_lim tx_desc_lim;
1758  uint32_t speed_capa;
1760  uint16_t nb_rx_queues;
1761  uint16_t nb_tx_queues;
1770  struct rte_eth_dev_portconf default_rxportconf;
1772  struct rte_eth_dev_portconf default_txportconf;
1774  uint64_t dev_capa;
1779  struct rte_eth_switch_info switch_info;
1781  enum rte_eth_err_handle_mode err_handle_mode;
1782 
1783  uint64_t reserved_64s[2];
1784  void *reserved_ptrs[2];
1785 };
1786 
1788 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1789 #define RTE_ETH_QUEUE_STATE_STARTED 1
1790 #define RTE_ETH_QUEUE_STATE_HAIRPIN 2
1797 struct rte_eth_rxq_info {
1798  struct rte_mempool *mp;
1799  struct rte_eth_rxconf conf;
1800  uint8_t scattered_rx;
1801  uint8_t queue_state;
1802  uint16_t nb_desc;
1803  uint16_t rx_buf_size;
1810  uint8_t avail_thresh;
1812 
1818  struct rte_eth_txconf conf;
1819  uint16_t nb_desc;
1820  uint8_t queue_state;
1822 
1823 /* Generic Burst mode flag definition, values can be ORed. */
1824 
1830 #define RTE_ETH_BURST_FLAG_PER_QUEUE RTE_BIT64(0)
1831 
1837  uint64_t flags;
1839 #define RTE_ETH_BURST_MODE_INFO_SIZE 1024
1840  char info[RTE_ETH_BURST_MODE_INFO_SIZE];
1841 };
1842 
1844 #define RTE_ETH_XSTATS_NAME_SIZE 64
1845 
1856  uint64_t id;
1857  uint64_t value;
1858 };
1859 
1876 };
1877 
1878 #define RTE_ETH_DCB_NUM_TCS 8
1879 #define RTE_ETH_MAX_VMDQ_POOL 64
1880 
1887  struct {
1888  uint16_t base;
1889  uint16_t nb_queue;
1890  } tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1892  struct {
1893  uint16_t base;
1894  uint16_t nb_queue;
1895  } tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1896 };
1897 
1903  uint8_t nb_tcs;
1905  uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS];
1908 };
1909 
1920 };
1921 
1922 /* Translate from FEC mode to FEC capa */
1923 #define RTE_ETH_FEC_MODE_TO_CAPA(x) RTE_BIT32(x)
1924 
1925 /* This macro indicates FEC capa mask */
1926 #define RTE_ETH_FEC_MODE_CAPA_MASK(x) RTE_BIT32(RTE_ETH_FEC_ ## x)
1927 
1928 /* A structure used to get capabilities per link speed */
1929 struct rte_eth_fec_capa {
1930  uint32_t speed;
1931  uint32_t capa;
1932 };
1933 
1934 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
1935 
1936 /* Macros to check for valid port */
1937 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1938  if (!rte_eth_dev_is_valid_port(port_id)) { \
1939  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1940  return retval; \
1941  } \
1942 } while (0)
1943 
1944 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1945  if (!rte_eth_dev_is_valid_port(port_id)) { \
1946  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1947  return; \
1948  } \
1949 } while (0)
1950 
1973 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
1974  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1975  void *user_param);
1976 
1997 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
1998  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1999 
2010 };
2011 
2012 struct rte_eth_dev_sriov {
2013  uint8_t active;
2014  uint8_t nb_q_per_pool;
2015  uint16_t def_vmdq_idx;
2016  uint16_t def_pool_q_idx;
2017 };
2018 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
2019 
2020 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
2021 
2022 #define RTE_ETH_DEV_NO_OWNER 0
2023 
2024 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
2025 
2026 struct rte_eth_dev_owner {
2027  uint64_t id;
2028  char name[RTE_ETH_MAX_OWNER_NAME_LEN];
2029 };
2030 
2036 #define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE RTE_BIT32(0)
2037 
2038 #define RTE_ETH_DEV_INTR_LSC RTE_BIT32(1)
2039 
2040 #define RTE_ETH_DEV_BONDING_MEMBER RTE_BIT32(2)
2041 #define RTE_ETH_DEV_BONDED_SLAVE RTE_DEPRECATED(RTE_ETH_DEV_BONDED_SLAVE) RTE_ETH_DEV_BONDING_MEMBER
2042 
2043 #define RTE_ETH_DEV_INTR_RMV RTE_BIT32(3)
2044 
2045 #define RTE_ETH_DEV_REPRESENTOR RTE_BIT32(4)
2046 
2047 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR RTE_BIT32(5)
2048 
2052 #define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS RTE_BIT32(6)
2053 
2066 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
2067  const uint64_t owner_id);
2068 
2072 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
2073  for (p = rte_eth_find_next_owned_by(0, o); \
2074  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
2075  p = rte_eth_find_next_owned_by(p + 1, o))
2076 
2085 uint16_t rte_eth_find_next(uint16_t port_id);
2086 
2090 #define RTE_ETH_FOREACH_DEV(p) \
2091  RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
2092 
2104 uint16_t
2105 rte_eth_find_next_of(uint16_t port_id_start,
2106  const struct rte_device *parent);
2107 
2116 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
2117  for (port_id = rte_eth_find_next_of(0, parent); \
2118  port_id < RTE_MAX_ETHPORTS; \
2119  port_id = rte_eth_find_next_of(port_id + 1, parent))
2120 
2132 uint16_t
2133 rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id);
2134 
2145 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
2146  for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
2147  port_id < RTE_MAX_ETHPORTS; \
2148  port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
2149 
2160 int rte_eth_dev_owner_new(uint64_t *owner_id);
2161 
2172 int rte_eth_dev_owner_set(const uint16_t port_id,
2173  const struct rte_eth_dev_owner *owner);
2174 
2185 int rte_eth_dev_owner_unset(const uint16_t port_id,
2186  const uint64_t owner_id);
2187 
2196 int rte_eth_dev_owner_delete(const uint64_t owner_id);
2197 
2208 int rte_eth_dev_owner_get(const uint16_t port_id,
2209  struct rte_eth_dev_owner *owner);
2210 
2221 uint16_t rte_eth_dev_count_avail(void);
2222 
2231 uint16_t rte_eth_dev_count_total(void);
2232 
2244 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
2245 
2254 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
2255 
2264 const char *rte_eth_dev_tx_offload_name(uint64_t offload);
2265 
2277 __rte_experimental
2278 const char *rte_eth_dev_capability_name(uint64_t capability);
2279 
2319 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
2320  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
2321 
2330 int
2331 rte_eth_dev_is_removed(uint16_t port_id);
2332 
2395 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2396  uint16_t nb_rx_desc, unsigned int socket_id,
2397  const struct rte_eth_rxconf *rx_conf,
2398  struct rte_mempool *mb_pool);
2399 
2427 __rte_experimental
2429  (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2430  const struct rte_eth_hairpin_conf *conf);
2431 
2480 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2481  uint16_t nb_tx_desc, unsigned int socket_id,
2482  const struct rte_eth_txconf *tx_conf);
2483 
2509 __rte_experimental
2511  (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2512  const struct rte_eth_hairpin_conf *conf);
2513 
2540 __rte_experimental
2541 int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2542  size_t len, uint32_t direction);
2543 
2566 __rte_experimental
2567 int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port);
2568 
2593 __rte_experimental
2594 int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port);
2595 
2611 __rte_experimental
2612 int rte_eth_dev_count_aggr_ports(uint16_t port_id);
2613 
2641 __rte_experimental
2642 int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id,
2643  uint8_t affinity);
2644 
2657 int rte_eth_dev_socket_id(uint16_t port_id);
2658 
2668 int rte_eth_dev_is_valid_port(uint16_t port_id);
2669 
2686 __rte_experimental
2687 int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2688 
2705 __rte_experimental
2706 int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2707 
2725 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2726 
2743 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2744 
2762 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2763 
2780 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2781 
2802 int rte_eth_dev_start(uint16_t port_id);
2803 
2815 int rte_eth_dev_stop(uint16_t port_id);
2816 
2829 int rte_eth_dev_set_link_up(uint16_t port_id);
2830 
2840 int rte_eth_dev_set_link_down(uint16_t port_id);
2841 
2852 int rte_eth_dev_close(uint16_t port_id);
2853 
2891 int rte_eth_dev_reset(uint16_t port_id);
2892 
2904 int rte_eth_promiscuous_enable(uint16_t port_id);
2905 
2917 int rte_eth_promiscuous_disable(uint16_t port_id);
2918 
2929 int rte_eth_promiscuous_get(uint16_t port_id);
2930 
2942 int rte_eth_allmulticast_enable(uint16_t port_id);
2943 
2955 int rte_eth_allmulticast_disable(uint16_t port_id);
2956 
2967 int rte_eth_allmulticast_get(uint16_t port_id);
2968 
2986 int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
2987 
3002 int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
3003 
3017 __rte_experimental
3018 const char *rte_eth_link_speed_to_str(uint32_t link_speed);
3019 
3038 __rte_experimental
3039 int rte_eth_link_to_str(char *str, size_t len,
3040  const struct rte_eth_link *eth_link);
3041 
3059 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
3060 
3072 int rte_eth_stats_reset(uint16_t port_id);
3073 
3103 int rte_eth_xstats_get_names(uint16_t port_id,
3104  struct rte_eth_xstat_name *xstats_names,
3105  unsigned int size);
3106 
3140 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3141  unsigned int n);
3142 
3167 int
3168 rte_eth_xstats_get_names_by_id(uint16_t port_id,
3169  struct rte_eth_xstat_name *xstats_names, unsigned int size,
3170  uint64_t *ids);
3171 
3196 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3197  uint64_t *values, unsigned int size);
3198 
3218 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
3219  uint64_t *id);
3220 
3233 int rte_eth_xstats_reset(uint16_t port_id);
3234 
3253 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
3254  uint16_t tx_queue_id, uint8_t stat_idx);
3255 
3274 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
3275  uint16_t rx_queue_id,
3276  uint8_t stat_idx);
3277 
3291 int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
3292 
3313 __rte_experimental
3314 int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3315  unsigned int num);
3316 
3336 int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
3337 
3353 __rte_experimental
3354 int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf);
3355 
3376 int rte_eth_dev_fw_version_get(uint16_t port_id,
3377  char *fw_version, size_t fw_size);
3378 
3418 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3419  uint32_t *ptypes, int num);
3450 int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3451  uint32_t *set_ptypes, unsigned int num);
3452 
3465 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
3466 
3484 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
3485 
3505 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
3506 
3525 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3526  int on);
3527 
3544 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3545  enum rte_vlan_type vlan_type,
3546  uint16_t tag_type);
3547 
3565 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
3566 
3580 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
3581 
3596 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
3597 
3623 __rte_experimental
3624 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
3625  uint8_t avail_thresh);
3626 
3653 __rte_experimental
3654 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
3655  uint8_t *avail_thresh);
3656 
3657 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
3658  void *userdata);
3659 
3665  buffer_tx_error_fn error_callback;
3666  void *error_userdata;
3667  uint16_t size;
3668  uint16_t length;
3670  struct rte_mbuf *pkts[];
3671 };
3672 
3679 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
3680  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3681 
3692 int
3693 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
3694 
3719 int
3721  buffer_tx_error_fn callback, void *userdata);
3722 
3745 void
3746 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3747  void *userdata);
3748 
3772 void
3773 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3774  void *userdata);
3775 
3801 int
3802 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
3803 
3836 };
3837 
3857 };
3858 
3877  uint64_t metadata;
3878 };
3879 
3913 };
3914 
3939  uint64_t metadata;
3940 };
3941 
4018 };
4019 
4021 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
4022  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
4023 
4041 int rte_eth_dev_callback_register(uint16_t port_id,
4042  enum rte_eth_event_type event,
4043  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4044 
4063 int rte_eth_dev_callback_unregister(uint16_t port_id,
4064  enum rte_eth_event_type event,
4065  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4066 
4088 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
4089 
4110 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
4111 
4129 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
4130 
4152 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4153  int epfd, int op, void *data);
4154 
4169 int
4170 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
4171 
4185 int rte_eth_led_on(uint16_t port_id);
4186 
4200 int rte_eth_led_off(uint16_t port_id);
4201 
4230 __rte_experimental
4231 int rte_eth_fec_get_capability(uint16_t port_id,
4232  struct rte_eth_fec_capa *speed_fec_capa,
4233  unsigned int num);
4234 
4255 __rte_experimental
4256 int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa);
4257 
4281 __rte_experimental
4282 int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa);
4283 
4298 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
4299  struct rte_eth_fc_conf *fc_conf);
4300 
4315 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
4316  struct rte_eth_fc_conf *fc_conf);
4317 
4333 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
4334  struct rte_eth_pfc_conf *pfc_conf);
4335 
4354 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
4355  uint32_t pool);
4356 
4374 __rte_experimental
4376  struct rte_eth_pfc_queue_info *pfc_queue_info);
4377 
4401 __rte_experimental
4403  struct rte_eth_pfc_queue_conf *pfc_queue_conf);
4404 
4419 int rte_eth_dev_mac_addr_remove(uint16_t port_id,
4420  struct rte_ether_addr *mac_addr);
4421 
4439 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
4440  struct rte_ether_addr *mac_addr);
4441 
4459 int rte_eth_dev_rss_reta_update(uint16_t port_id,
4460  struct rte_eth_rss_reta_entry64 *reta_conf,
4461  uint16_t reta_size);
4462 
4481 int rte_eth_dev_rss_reta_query(uint16_t port_id,
4482  struct rte_eth_rss_reta_entry64 *reta_conf,
4483  uint16_t reta_size);
4484 
4504 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4505  uint8_t on);
4506 
4525 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
4526 
4543 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4544  uint32_t tx_rate);
4545 
4560 int rte_eth_dev_rss_hash_update(uint16_t port_id,
4561  struct rte_eth_rss_conf *rss_conf);
4562 
4578 int
4579 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4580  struct rte_eth_rss_conf *rss_conf);
4581 
4606 int
4607 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4608  struct rte_eth_udp_tunnel *tunnel_udp);
4609 
4629 int
4630 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4631  struct rte_eth_udp_tunnel *tunnel_udp);
4632 
4647 int rte_eth_dev_get_dcb_info(uint16_t port_id,
4648  struct rte_eth_dcb_info *dcb_info);
4649 
4650 struct rte_eth_rxtx_callback;
4651 
4677 const struct rte_eth_rxtx_callback *
4678 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4679  rte_rx_callback_fn fn, void *user_param);
4680 
4707 const struct rte_eth_rxtx_callback *
4708 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4709  rte_rx_callback_fn fn, void *user_param);
4710 
4736 const struct rte_eth_rxtx_callback *
4737 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4738  rte_tx_callback_fn fn, void *user_param);
4739 
4773 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4774  const struct rte_eth_rxtx_callback *user_cb);
4775 
4809 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4810  const struct rte_eth_rxtx_callback *user_cb);
4811 
4831 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4832  struct rte_eth_rxq_info *qinfo);
4833 
4853 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4854  struct rte_eth_txq_info *qinfo);
4855 
4874 int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4875  struct rte_eth_burst_mode *mode);
4876 
4895 int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4896  struct rte_eth_burst_mode *mode);
4897 
4918 __rte_experimental
4919 int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
4920  struct rte_power_monitor_cond *pmc);
4921 
4940 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
4941 
4954 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
4955 
4972 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
4973 
4990 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
4991 
5010 __rte_experimental
5011 int
5012 rte_eth_dev_get_module_info(uint16_t port_id,
5013  struct rte_eth_dev_module_info *modinfo);
5014 
5034 __rte_experimental
5035 int
5036 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5037  struct rte_dev_eeprom_info *info);
5038 
5058 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5059  struct rte_ether_addr *mc_addr_set,
5060  uint32_t nb_mc_addr);
5061 
5074 int rte_eth_timesync_enable(uint16_t port_id);
5075 
5088 int rte_eth_timesync_disable(uint16_t port_id);
5089 
5108 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
5109  struct timespec *timestamp, uint32_t flags);
5110 
5126 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5127  struct timespec *timestamp);
5128 
5146 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
5147 
5163 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
5164 
5183 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
5184 
5230 __rte_experimental
5231 int
5232 rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
5233 
5249 int
5250 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
5251 
5268 int
5269 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
5270 
5287 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5288  uint16_t *nb_rx_desc,
5289  uint16_t *nb_tx_desc);
5290 
5305 int
5306 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
5307 
5317 void *
5318 rte_eth_dev_get_sec_ctx(uint16_t port_id);
5319 
5335 __rte_experimental
5336 int rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5337  struct rte_eth_hairpin_cap *cap);
5338 
5348  int pf;
5349  __extension__
5350  union {
5351  int vf;
5352  int sf;
5353  };
5354  uint32_t id_base;
5355  uint32_t id_end;
5356  char name[RTE_DEV_NAME_MAX_LEN];
5357 };
5358 
5366  uint16_t controller;
5367  uint16_t pf;
5368  uint32_t nb_ranges_alloc;
5369  uint32_t nb_ranges;
5370  struct rte_eth_representor_range ranges[];
5371 };
5372 
5396 __rte_experimental
5397 int rte_eth_representor_info_get(uint16_t port_id,
5398  struct rte_eth_representor_info *info);
5399 
5401 #define RTE_ETH_RX_METADATA_USER_FLAG RTE_BIT64(0)
5402 
5404 #define RTE_ETH_RX_METADATA_USER_MARK RTE_BIT64(1)
5405 
5407 #define RTE_ETH_RX_METADATA_TUNNEL_ID RTE_BIT64(2)
5408 
5448 int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features);
5449 
5451 #define RTE_ETH_DEV_REASSEMBLY_F_IPV4 (RTE_BIT32(0))
5452 
5453 #define RTE_ETH_DEV_REASSEMBLY_F_IPV6 (RTE_BIT32(1))
5454 
5465  uint32_t timeout_ms;
5467  uint16_t max_frags;
5472  uint16_t flags;
5473 };
5474 
5495 __rte_experimental
5496 int rte_eth_ip_reassembly_capability_get(uint16_t port_id,
5497  struct rte_eth_ip_reassembly_params *capa);
5498 
5520 __rte_experimental
5521 int rte_eth_ip_reassembly_conf_get(uint16_t port_id,
5522  struct rte_eth_ip_reassembly_params *conf);
5523 
5553 __rte_experimental
5554 int rte_eth_ip_reassembly_conf_set(uint16_t port_id,
5555  const struct rte_eth_ip_reassembly_params *conf);
5556 
5564 typedef struct {
5571  uint16_t time_spent;
5573  uint16_t nb_frags;
5575 
5594 __rte_experimental
5595 int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file);
5596 
5620 __rte_experimental
5621 int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5622  uint16_t offset, uint16_t num, FILE *file);
5623 
5647 __rte_experimental
5648 int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5649  uint16_t offset, uint16_t num, FILE *file);
5650 
5651 
5652 /* Congestion management */
5653 
5663 };
5664 
5681  uint64_t objs_supported;
5686  uint8_t rsvd[8];
5687 };
5688 
5699  enum rte_cman_mode mode;
5700  union {
5707  uint16_t rx_queue;
5714  uint8_t rsvd_obj_params[4];
5715  } obj_param;
5716  union {
5729  uint8_t rsvd_mode_params[4];
5730  } mode_param;
5731 };
5732 
5750 __rte_experimental
5751 int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info);
5752 
5770 __rte_experimental
5771 int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config);
5772 
5789 __rte_experimental
5790 int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config);
5791 
5812 __rte_experimental
5813 int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config);
5814 
5815 #include <rte_ethdev_core.h>
5816 
5840 uint16_t rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
5841  struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
5842  void *opaque);
5843 
5931 static inline uint16_t
5932 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
5933  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
5934 {
5935  uint16_t nb_rx;
5936  struct rte_eth_fp_ops *p;
5937  void *qd;
5938 
5939 #ifdef RTE_ETHDEV_DEBUG_RX
5940  if (port_id >= RTE_MAX_ETHPORTS ||
5941  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
5942  RTE_ETHDEV_LOG(ERR,
5943  "Invalid port_id=%u or queue_id=%u\n",
5944  port_id, queue_id);
5945  return 0;
5946  }
5947 #endif
5948 
5949  /* fetch pointer to queue data */
5950  p = &rte_eth_fp_ops[port_id];
5951  qd = p->rxq.data[queue_id];
5952 
5953 #ifdef RTE_ETHDEV_DEBUG_RX
5954  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
5955 
5956  if (qd == NULL) {
5957  RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u for port_id=%u\n",
5958  queue_id, port_id);
5959  return 0;
5960  }
5961 #endif
5962 
5963  nb_rx = p->rx_pkt_burst(qd, rx_pkts, nb_pkts);
5964 
5965 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
5966  {
5967  void *cb;
5968 
5969  /* __ATOMIC_RELEASE memory order was used when the
5970  * call back was inserted into the list.
5971  * Since there is a clear dependency between loading
5972  * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
5973  * not required.
5974  */
5975  cb = __atomic_load_n((void **)&p->rxq.clbk[queue_id],
5976  __ATOMIC_RELAXED);
5977  if (unlikely(cb != NULL))
5978  nb_rx = rte_eth_call_rx_callbacks(port_id, queue_id,
5979  rx_pkts, nb_rx, nb_pkts, cb);
5980  }
5981 #endif
5982 
5983  rte_ethdev_trace_rx_burst(port_id, queue_id, (void **)rx_pkts, nb_rx);
5984  return nb_rx;
5985 }
5986 
6004 static inline int
6005 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
6006 {
6007  struct rte_eth_fp_ops *p;
6008  void *qd;
6009 
6010 #ifdef RTE_ETHDEV_DEBUG_RX
6011  if (port_id >= RTE_MAX_ETHPORTS ||
6012  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6013  RTE_ETHDEV_LOG(ERR,
6014  "Invalid port_id=%u or queue_id=%u\n",
6015  port_id, queue_id);
6016  return -EINVAL;
6017  }
6018 #endif
6019 
6020  /* fetch pointer to queue data */
6021  p = &rte_eth_fp_ops[port_id];
6022  qd = p->rxq.data[queue_id];
6023 
6024 #ifdef RTE_ETHDEV_DEBUG_RX
6025  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6026  if (qd == NULL)
6027  return -EINVAL;
6028 #endif
6029 
6030  if (*p->rx_queue_count == NULL)
6031  return -ENOTSUP;
6032  return (int)(*p->rx_queue_count)(qd);
6033 }
6034 
6038 #define RTE_ETH_RX_DESC_AVAIL 0
6039 #define RTE_ETH_RX_DESC_DONE 1
6040 #define RTE_ETH_RX_DESC_UNAVAIL 2
6076 static inline int
6077 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
6078  uint16_t offset)
6079 {
6080  struct rte_eth_fp_ops *p;
6081  void *qd;
6082 
6083 #ifdef RTE_ETHDEV_DEBUG_RX
6084  if (port_id >= RTE_MAX_ETHPORTS ||
6085  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6086  RTE_ETHDEV_LOG(ERR,
6087  "Invalid port_id=%u or queue_id=%u\n",
6088  port_id, queue_id);
6089  return -EINVAL;
6090  }
6091 #endif
6092 
6093  /* fetch pointer to queue data */
6094  p = &rte_eth_fp_ops[port_id];
6095  qd = p->rxq.data[queue_id];
6096 
6097 #ifdef RTE_ETHDEV_DEBUG_RX
6098  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6099  if (qd == NULL)
6100  return -ENODEV;
6101 #endif
6102  if (*p->rx_descriptor_status == NULL)
6103  return -ENOTSUP;
6104  return (*p->rx_descriptor_status)(qd, offset);
6105 }
6106 
6110 #define RTE_ETH_TX_DESC_FULL 0
6111 #define RTE_ETH_TX_DESC_DONE 1
6112 #define RTE_ETH_TX_DESC_UNAVAIL 2
6148 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
6149  uint16_t queue_id, uint16_t offset)
6150 {
6151  struct rte_eth_fp_ops *p;
6152  void *qd;
6153 
6154 #ifdef RTE_ETHDEV_DEBUG_TX
6155  if (port_id >= RTE_MAX_ETHPORTS ||
6156  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6157  RTE_ETHDEV_LOG(ERR,
6158  "Invalid port_id=%u or queue_id=%u\n",
6159  port_id, queue_id);
6160  return -EINVAL;
6161  }
6162 #endif
6163 
6164  /* fetch pointer to queue data */
6165  p = &rte_eth_fp_ops[port_id];
6166  qd = p->txq.data[queue_id];
6167 
6168 #ifdef RTE_ETHDEV_DEBUG_TX
6169  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6170  if (qd == NULL)
6171  return -ENODEV;
6172 #endif
6173  if (*p->tx_descriptor_status == NULL)
6174  return -ENOTSUP;
6175  return (*p->tx_descriptor_status)(qd, offset);
6176 }
6177 
6197 uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
6198  struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque);
6199 
6271 static inline uint16_t
6272 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
6273  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6274 {
6275  struct rte_eth_fp_ops *p;
6276  void *qd;
6277 
6278 #ifdef RTE_ETHDEV_DEBUG_TX
6279  if (port_id >= RTE_MAX_ETHPORTS ||
6280  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6281  RTE_ETHDEV_LOG(ERR,
6282  "Invalid port_id=%u or queue_id=%u\n",
6283  port_id, queue_id);
6284  return 0;
6285  }
6286 #endif
6287 
6288  /* fetch pointer to queue data */
6289  p = &rte_eth_fp_ops[port_id];
6290  qd = p->txq.data[queue_id];
6291 
6292 #ifdef RTE_ETHDEV_DEBUG_TX
6293  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6294 
6295  if (qd == NULL) {
6296  RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u for port_id=%u\n",
6297  queue_id, port_id);
6298  return 0;
6299  }
6300 #endif
6301 
6302 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
6303  {
6304  void *cb;
6305 
6306  /* __ATOMIC_RELEASE memory order was used when the
6307  * call back was inserted into the list.
6308  * Since there is a clear dependency between loading
6309  * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
6310  * not required.
6311  */
6312  cb = __atomic_load_n((void **)&p->txq.clbk[queue_id],
6313  __ATOMIC_RELAXED);
6314  if (unlikely(cb != NULL))
6315  nb_pkts = rte_eth_call_tx_callbacks(port_id, queue_id,
6316  tx_pkts, nb_pkts, cb);
6317  }
6318 #endif
6319 
6320  nb_pkts = p->tx_pkt_burst(qd, tx_pkts, nb_pkts);
6321 
6322  rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, nb_pkts);
6323  return nb_pkts;
6324 }
6325 
6379 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
6380 
6381 static inline uint16_t
6382 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
6383  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6384 {
6385  struct rte_eth_fp_ops *p;
6386  void *qd;
6387 
6388 #ifdef RTE_ETHDEV_DEBUG_TX
6389  if (port_id >= RTE_MAX_ETHPORTS ||
6390  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6391  RTE_ETHDEV_LOG(ERR,
6392  "Invalid port_id=%u or queue_id=%u\n",
6393  port_id, queue_id);
6394  rte_errno = ENODEV;
6395  return 0;
6396  }
6397 #endif
6398 
6399  /* fetch pointer to queue data */
6400  p = &rte_eth_fp_ops[port_id];
6401  qd = p->txq.data[queue_id];
6402 
6403 #ifdef RTE_ETHDEV_DEBUG_TX
6404  if (!rte_eth_dev_is_valid_port(port_id)) {
6405  RTE_ETHDEV_LOG(ERR, "Invalid Tx port_id=%u\n", port_id);
6406  rte_errno = ENODEV;
6407  return 0;
6408  }
6409  if (qd == NULL) {
6410  RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u for port_id=%u\n",
6411  queue_id, port_id);
6412  rte_errno = EINVAL;
6413  return 0;
6414  }
6415 #endif
6416 
6417  if (!p->tx_pkt_prepare)
6418  return nb_pkts;
6419 
6420  return p->tx_pkt_prepare(qd, tx_pkts, nb_pkts);
6421 }
6422 
6423 #else
6424 
6425 /*
6426  * Native NOOP operation for compilation targets which doesn't require any
6427  * preparations steps, and functional NOOP may introduce unnecessary performance
6428  * drop.
6429  *
6430  * Generally this is not a good idea to turn it on globally and didn't should
6431  * be used if behavior of tx_preparation can change.
6432  */
6433 
6434 static inline uint16_t
6435 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
6436  __rte_unused uint16_t queue_id,
6437  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6438 {
6439  return nb_pkts;
6440 }
6441 
6442 #endif
6443 
6466 static inline uint16_t
6467 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
6468  struct rte_eth_dev_tx_buffer *buffer)
6469 {
6470  uint16_t sent;
6471  uint16_t to_send = buffer->length;
6472 
6473  if (to_send == 0)
6474  return 0;
6475 
6476  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
6477 
6478  buffer->length = 0;
6479 
6480  /* All packets sent, or to be dealt with by callback below */
6481  if (unlikely(sent != to_send))
6482  buffer->error_callback(&buffer->pkts[sent],
6483  (uint16_t)(to_send - sent),
6484  buffer->error_userdata);
6485 
6486  return sent;
6487 }
6488 
6519 static __rte_always_inline uint16_t
6520 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
6521  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
6522 {
6523  buffer->pkts[buffer->length++] = tx_pkt;
6524  if (buffer->length < buffer->size)
6525  return 0;
6526 
6527  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
6528 }
6529 
6558 __rte_experimental
6559 int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num);
6560 
6561 #ifdef __cplusplus
6562 }
6563 #endif
6564 
6565 #endif /* _RTE_ETHDEV_H_ */
uint16_t nb_rx_queues
Definition: rte_ethdev.h:1760
int rte_eth_dev_stop(uint16_t port_id)
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_promiscuous_disable(uint16_t port_id)
__extension__ uint32_t multi_pools
Definition: rte_ethdev.h:1670
int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
uint16_t link_duplex
Definition: rte_ethdev.h:336
__rte_experimental int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
#define __rte_always_inline
Definition: rte_common.h:255
#define RTE_ETH_DCB_NUM_USER_PRIORITIES
Definition: rte_ethdev.h:805
uint16_t tx_rs_thresh
Definition: rte_ethdev.h:1138
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
uint32_t mtu
Definition: rte_ethdev.h:415
uint16_t nb_desc
Definition: rte_ethdev.h:1819
rte_eth_event_macsec_type
Definition: rte_ethdev.h:3842
uint64_t rx_offload_capa
Definition: rte_ethdev.h:1739
const uint32_t * dev_flags
Definition: rte_ethdev.h:1725
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
__rte_experimental int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
#define __rte_cache_min_aligned
Definition: rte_common.h:443
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6382
struct rte_device * device
Definition: rte_ethdev.h:1719
rte_eth_nb_tcs
Definition: rte_ethdev.h:866
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:284
uint16_t rte_eth_find_next(uint16_t port_id)
__rte_experimental int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
rte_cman_mode
Definition: rte_cman.h:20
int rte_eth_led_off(uint16_t port_id)
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:6077
uint64_t imissed
Definition: rte_ethdev.h:270
uint32_t low_water
Definition: rte_ethdev.h:1332
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
uint8_t rss_key_len
Definition: rte_ethdev.h:467
__rte_experimental int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id, uint8_t affinity)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
uint8_t hthresh
Definition: rte_ethdev.h:360
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
__rte_experimental int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
uint64_t rx_queue_offload_capa
Definition: rte_ethdev.h:1743
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
__rte_experimental int rte_eth_dev_count_aggr_ports(uint16_t port_id)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
uint16_t reta_size
Definition: rte_ethdev.h:1747
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint32_t lpbk_mode
Definition: rte_ethdev.h:1466
#define rte_errno
Definition: rte_errno.h:29
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
uint32_t link_speeds
Definition: rte_ethdev.h:1457
uint64_t tx_queue_offload_capa
Definition: rte_ethdev.h:1745
rte_eth_tx_mq_mode
Definition: rte_ethdev.h:402
rte_eth_fc_mode
Definition: rte_ethdev.h:1318
uint8_t enable_default_pool
Definition: rte_ethdev.h:918
uint32_t max_hash_mac_addrs
Definition: rte_ethdev.h:1734
uint16_t nb_mtu_seg_max
Definition: rte_ethdev.h:1312
int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
#define __rte_unused
Definition: rte_common.h:120
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:282
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
uint64_t opackets
Definition: rte_ethdev.h:263
rte_eth_cman_obj
Definition: rte_ethdev.h:5655
uint8_t hash_key_size
Definition: rte_ethdev.h:1748
int rte_eth_dev_close(uint16_t port_id)
struct rte_mempool * mp
Definition: rte_ethdev.h:1046
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
__rte_experimental int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, uint8_t avail_thresh)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
struct rte_mempool * mp
Definition: rte_ethdev.h:1798
uint32_t dcb_capability_en
Definition: rte_ethdev.h:1490
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:280
__rte_experimental int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
const char * name
Definition: rte_ethdev.h:1644
uint8_t queue_state
Definition: rte_ethdev.h:1820
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
int rte_eth_dev_set_link_up(uint16_t port_id)
#define RTE_BIT32(nr)
Definition: rte_bitops.h:38
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
__rte_experimental int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
uint16_t vmdq_queue_num
Definition: rte_ethdev.h:1754
int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
uint16_t share_qid
Definition: rte_ethdev.h:1091
uint8_t rx_deferred_start
Definition: rte_ethdev.h:1082
struct rte_mbuf * pkts[]
Definition: rte_ethdev.h:3670
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
Definition: rte_ethdev.h:4021
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:276
uint32_t high_water
Definition: rte_ethdev.h:1331
#define RTE_ETH_MQ_RX_RSS_FLAG
Definition: rte_ethdev.h:367
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
#define RTE_ETH_XSTATS_NAME_SIZE
Definition: rte_ethdev.h:1844
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
__rte_experimental int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, unsigned int num)
int rte_eth_timesync_disable(uint16_t port_id)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint64_t offloads
Definition: rte_ethdev.h:1097
uint32_t link_speed
Definition: rte_ethdev.h:335
__rte_experimental int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, uint8_t *avail_thresh)
__rte_experimental int rte_eth_representor_info_get(uint16_t port_id, struct rte_eth_representor_info *info)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, struct rte_eth_pfc_queue_conf *pfc_queue_conf)
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
__rte_experimental int rte_eth_fec_get_capability(uint16_t port_id, struct rte_eth_fec_capa *speed_fec_capa, unsigned int num)
uint16_t send_xon
Definition: rte_ethdev.h:1334
struct rte_mempool ** rx_mempools
Definition: rte_ethdev.h:1126
int rte_eth_stats_reset(uint16_t port_id)
int rte_eth_allmulticast_enable(uint16_t port_id)
uint32_t offset_allowed
Definition: rte_ethdev.h:1671
__rte_experimental int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, struct rte_power_monitor_cond *pmc)
#define unlikely(x)
uint64_t ibytes
Definition: rte_ethdev.h:264
uint32_t offset_align_log2
Definition: rte_ethdev.h:1672
uint8_t avail_thresh
Definition: rte_ethdev.h:1810
uint64_t offloads
Definition: rte_ethdev.h:1148
int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
uint16_t max_nb_queues
Definition: rte_ethdev.h:1184
uint64_t oerrors
Definition: rte_ethdev.h:272
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
__rte_experimental int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num)
uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
uint16_t max_mtu
Definition: rte_ethdev.h:1724
uint64_t offloads
Definition: rte_ethdev.h:423
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
uint16_t link_autoneg
Definition: rte_ethdev.h:337
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
uint16_t tx_free_thresh
Definition: rte_ethdev.h:1139
uint16_t nb_desc
Definition: rte_ethdev.h:1802
uint64_t modes_supported
Definition: rte_ethdev.h:5676
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:5932
int rte_eth_allmulticast_disable(uint16_t port_id)
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
int rte_eth_dev_start(uint16_t port_id)
__rte_experimental int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config)
uint16_t rx_buf_size
Definition: rte_ethdev.h:1803
__rte_experimental int rte_eth_ip_reassembly_capability_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *capa)
uint16_t max_vmdq_pools
Definition: rte_ethdev.h:1736
uint8_t scattered_rx
Definition: rte_ethdev.h:1800
int rte_eth_dev_reset(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
#define RTE_ETH_MQ_RX_DCB_FLAG
Definition: rte_ethdev.h:368
uint64_t offloads
Definition: rte_ethdev.h:970
uint16_t vmdq_pool_base
Definition: rte_ethdev.h:1755
__rte_experimental int rte_eth_ip_reassembly_conf_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *conf)
uint64_t tx_offload_capa
Definition: rte_ethdev.h:1741
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:278
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
uint16_t min_mtu
Definition: rte_ethdev.h:1723
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_is_removed(uint16_t port_id)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
Definition: rte_ethdev.h:1997
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
uint64_t obytes
Definition: rte_ethdev.h:265
__rte_experimental int rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
uint8_t enable_loop_back
Definition: rte_ethdev.h:951
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
rte_eth_err_handle_mode
Definition: rte_ethdev.h:1697
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, struct rte_eth_pfc_queue_info *pfc_queue_info)
int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
rte_eth_fec_mode
Definition: rte_ethdev.h:1914
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
uint16_t max_tx_queues
Definition: rte_ethdev.h:1731
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_xstats_reset(uint16_t port_id)
rte_eth_dev_state
Definition: rte_ethdev.h:2003
uint16_t rx_free_thresh
Definition: rte_ethdev.h:1080
uint64_t dev_capa
Definition: rte_ethdev.h:1774
uint64_t ierrors
Definition: rte_ethdev.h:271
#define RTE_ETH_MQ_RX_VMDQ_FLAG
Definition: rte_ethdev.h:369
__rte_experimental int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, size_t len, uint32_t direction)
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
uint64_t flow_type_rss_offloads
Definition: rte_ethdev.h:1750
#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS
Definition: rte_ethdev.h:804
uint16_t rte_eth_dev_count_total(void)
int rte_eth_promiscuous_enable(uint16_t port_id)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
union rte_eth_rxseg * rx_seg
Definition: rte_ethdev.h:1105
int rte_eth_dev_owner_new(uint64_t *owner_id)
rte_vlan_type
Definition: rte_ethdev.h:433
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
__rte_experimental int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
uint16_t nb_seg_max
Definition: rte_ethdev.h:1299
uint64_t ipackets
Definition: rte_ethdev.h:262
uint16_t max_vfs
Definition: rte_ethdev.h:1735
uint16_t pause_time
Definition: rte_ethdev.h:1333
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_owner_delete(const uint64_t owner_id)
uint64_t rx_nombuf
Definition: rte_ethdev.h:273
rte_eth_event_macsec_subtype
Definition: rte_ethdev.h:3808
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
Definition: rte_ethdev.h:6520
__extension__ uint8_t hw_vlan_reject_untagged
Definition: rte_ethdev.h:975
uint8_t queue_state
Definition: rte_ethdev.h:1801
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
uint32_t use_locked_device_memory
Definition: rte_ethdev.h:1251
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
uint16_t vmdq_queue_base
Definition: rte_ethdev.h:1753
rte_eth_event_ipsec_subtype
Definition: rte_ethdev.h:3884
rte_eth_nb_pools
Definition: rte_ethdev.h:875
uint16_t nb_align
Definition: rte_ethdev.h:1289
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
rte_eth_rx_mq_mode
Definition: rte_ethdev.h:376
__rte_experimental const char * rte_eth_link_speed_to_str(uint32_t link_speed)
uint16_t rsvd
const char * driver_name
Definition: rte_ethdev.h:1720
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:6005
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_socket_id(uint16_t port_id)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint32_t tx_rate)
uint8_t enable_default_pool
Definition: rte_ethdev.h:949
__extension__ struct rte_eth_link __rte_aligned(8)
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
uint16_t nb_tx_queues
Definition: rte_ethdev.h:1761
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
uint32_t max_mac_addrs
Definition: rte_ethdev.h:1732
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
rte_eth_tunnel_type
Definition: rte_ethdev.h:1407
__rte_experimental const char * rte_eth_dev_capability_name(uint64_t capability)
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
uint64_t value
Definition: rte_ethdev.h:1857
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
Definition: rte_ethdev.h:629
__rte_experimental int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
__rte_experimental int rte_eth_ip_reassembly_conf_set(uint16_t port_id, const struct rte_eth_ip_reassembly_params *conf)
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
__rte_experimental int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config)
int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_promiscuous_get(uint16_t port_id)
int rte_eth_led_on(uint16_t port_id)
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
uint32_t max_rx_pktlen
Definition: rte_ethdev.h:1727
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
uint64_t rss_hf
Definition: rte_ethdev.h:468
uint64_t id
Definition: rte_ethdev.h:1856
__rte_experimental int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
unsigned int if_index
Definition: rte_ethdev.h:1721
__rte_experimental int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config)
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
uint8_t mac_ctrl_frame_fwd
Definition: rte_ethdev.h:1336
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
Definition: rte_ethdev.h:1973
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
uint8_t * rss_key
Definition: rte_ethdev.h:466
uint8_t tx_deferred_start
Definition: rte_ethdev.h:1142
uint8_t wthresh
Definition: rte_ethdev.h:361
uint16_t max_rx_queues
Definition: rte_ethdev.h:1730
uint16_t max_rx_mempools
Definition: rte_ethdev.h:1768
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
rte_eth_representor_type
Definition: rte_ethdev.h:1684
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:417
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
int rte_eth_allmulticast_get(uint16_t port_id)
uint8_t rx_drop_en
Definition: rte_ethdev.h:1081
int rte_eth_dev_is_valid_port(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint16_t rx_nseg
Definition: rte_ethdev.h:1083
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:1729
int rte_eth_timesync_enable(uint16_t port_id)
uint8_t pthresh
Definition: rte_ethdev.h:359
uint16_t share_group
Definition: rte_ethdev.h:1090
uint32_t speed_capa
Definition: rte_ethdev.h:1758
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6272
__rte_experimental int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info)
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_set_link_down(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
__rte_experimental int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
uint64_t objs_supported
Definition: rte_ethdev.h:5681
uint32_t min_rx_bufsize
Definition: rte_ethdev.h:1726
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
Definition: rte_ethdev.h:6467
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
rte_eth_event_type
Definition: rte_ethdev.h:3945