DPDK  22.11.0
rte_ethdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
148 #ifdef __cplusplus
149 extern "C" {
150 #endif
151 
152 #include <stdint.h>
153 
154 /* Use this macro to check if LRO API is supported */
155 #define RTE_ETHDEV_HAS_LRO_SUPPORT
156 
157 /* Alias RTE_LIBRTE_ETHDEV_DEBUG for backward compatibility. */
158 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
159 #define RTE_ETHDEV_DEBUG_RX
160 #define RTE_ETHDEV_DEBUG_TX
161 #endif
162 
163 #include <rte_cman.h>
164 #include <rte_compat.h>
165 #include <rte_log.h>
166 #include <rte_interrupts.h>
167 #include <rte_dev.h>
168 #include <rte_devargs.h>
169 #include <rte_bitops.h>
170 #include <rte_errno.h>
171 #include <rte_common.h>
172 #include <rte_config.h>
173 #include <rte_power_intrinsics.h>
174 
175 #include "rte_ethdev_trace_fp.h"
176 #include "rte_dev_info.h"
177 
178 extern int rte_eth_dev_logtype;
179 
180 #define RTE_ETHDEV_LOG(level, ...) \
181  rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
182 
183 struct rte_mbuf;
184 
201 int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
202 
217 uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter);
218 
231 void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
232 
246 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
247  for (rte_eth_iterator_init(iter, devargs), \
248  id = rte_eth_iterator_next(iter); \
249  id != RTE_MAX_ETHPORTS; \
250  id = rte_eth_iterator_next(iter))
251 
262  uint64_t ipackets;
263  uint64_t opackets;
264  uint64_t ibytes;
265  uint64_t obytes;
270  uint64_t imissed;
271  uint64_t ierrors;
272  uint64_t oerrors;
273  uint64_t rx_nombuf;
274  /* Queue stats are limited to max 256 queues */
276  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
278  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
280  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
282  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
284  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
285 };
286 
290 #define RTE_ETH_LINK_SPEED_AUTONEG 0
291 #define RTE_ETH_LINK_SPEED_FIXED RTE_BIT32(0)
292 #define RTE_ETH_LINK_SPEED_10M_HD RTE_BIT32(1)
293 #define RTE_ETH_LINK_SPEED_10M RTE_BIT32(2)
294 #define RTE_ETH_LINK_SPEED_100M_HD RTE_BIT32(3)
295 #define RTE_ETH_LINK_SPEED_100M RTE_BIT32(4)
296 #define RTE_ETH_LINK_SPEED_1G RTE_BIT32(5)
297 #define RTE_ETH_LINK_SPEED_2_5G RTE_BIT32(6)
298 #define RTE_ETH_LINK_SPEED_5G RTE_BIT32(7)
299 #define RTE_ETH_LINK_SPEED_10G RTE_BIT32(8)
300 #define RTE_ETH_LINK_SPEED_20G RTE_BIT32(9)
301 #define RTE_ETH_LINK_SPEED_25G RTE_BIT32(10)
302 #define RTE_ETH_LINK_SPEED_40G RTE_BIT32(11)
303 #define RTE_ETH_LINK_SPEED_50G RTE_BIT32(12)
304 #define RTE_ETH_LINK_SPEED_56G RTE_BIT32(13)
305 #define RTE_ETH_LINK_SPEED_100G RTE_BIT32(14)
306 #define RTE_ETH_LINK_SPEED_200G RTE_BIT32(15)
312 #define RTE_ETH_SPEED_NUM_NONE 0
313 #define RTE_ETH_SPEED_NUM_10M 10
314 #define RTE_ETH_SPEED_NUM_100M 100
315 #define RTE_ETH_SPEED_NUM_1G 1000
316 #define RTE_ETH_SPEED_NUM_2_5G 2500
317 #define RTE_ETH_SPEED_NUM_5G 5000
318 #define RTE_ETH_SPEED_NUM_10G 10000
319 #define RTE_ETH_SPEED_NUM_20G 20000
320 #define RTE_ETH_SPEED_NUM_25G 25000
321 #define RTE_ETH_SPEED_NUM_40G 40000
322 #define RTE_ETH_SPEED_NUM_50G 50000
323 #define RTE_ETH_SPEED_NUM_56G 56000
324 #define RTE_ETH_SPEED_NUM_100G 100000
325 #define RTE_ETH_SPEED_NUM_200G 200000
326 #define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX
332 __extension__
333 struct rte_eth_link {
334  uint32_t link_speed;
335  uint16_t link_duplex : 1;
336  uint16_t link_autoneg : 1;
337  uint16_t link_status : 1;
338 } __rte_aligned(8);
343 #define RTE_ETH_LINK_HALF_DUPLEX 0
344 #define RTE_ETH_LINK_FULL_DUPLEX 1
345 #define RTE_ETH_LINK_DOWN 0
346 #define RTE_ETH_LINK_UP 1
347 #define RTE_ETH_LINK_FIXED 0
348 #define RTE_ETH_LINK_AUTONEG 1
349 #define RTE_ETH_LINK_MAX_STR_LEN 40
356 struct rte_eth_thresh {
357  uint8_t pthresh;
358  uint8_t hthresh;
359  uint8_t wthresh;
360 };
361 
365 #define RTE_ETH_MQ_RX_RSS_FLAG RTE_BIT32(0)
366 #define RTE_ETH_MQ_RX_DCB_FLAG RTE_BIT32(1)
367 #define RTE_ETH_MQ_RX_VMDQ_FLAG RTE_BIT32(2)
374 enum rte_eth_rx_mq_mode {
375 
377 
384 
394 };
395 
405 };
406 
412  enum rte_eth_rx_mq_mode mq_mode;
413  uint32_t mtu;
421  uint64_t offloads;
422 
423  uint64_t reserved_64s[2];
424  void *reserved_ptrs[2];
425 };
426 
432  RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
435  RTE_ETH_VLAN_TYPE_MAX,
436 };
437 
443  uint64_t ids[64];
444 };
445 
464  uint8_t *rss_key;
465  uint8_t rss_key_len;
466  uint64_t rss_hf;
467 };
468 
469 /*
470  * A packet can be identified by hardware as different flow types. Different
471  * NIC hardware may support different flow types.
472  * Basically, the NIC hardware identifies the flow type as deep protocol as
473  * possible, and exclusively. For example, if a packet is identified as
474  * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
475  * though it is an actual IPV4 packet.
476  */
477 #define RTE_ETH_FLOW_UNKNOWN 0
478 #define RTE_ETH_FLOW_RAW 1
479 #define RTE_ETH_FLOW_IPV4 2
480 #define RTE_ETH_FLOW_FRAG_IPV4 3
481 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
482 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
483 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
484 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
485 #define RTE_ETH_FLOW_IPV6 8
486 #define RTE_ETH_FLOW_FRAG_IPV6 9
487 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
488 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
489 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
490 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
491 #define RTE_ETH_FLOW_L2_PAYLOAD 14
492 #define RTE_ETH_FLOW_IPV6_EX 15
493 #define RTE_ETH_FLOW_IPV6_TCP_EX 16
494 #define RTE_ETH_FLOW_IPV6_UDP_EX 17
495 
496 #define RTE_ETH_FLOW_PORT 18
497 #define RTE_ETH_FLOW_VXLAN 19
498 #define RTE_ETH_FLOW_GENEVE 20
499 #define RTE_ETH_FLOW_NVGRE 21
500 #define RTE_ETH_FLOW_VXLAN_GPE 22
501 #define RTE_ETH_FLOW_GTPU 23
502 #define RTE_ETH_FLOW_MAX 24
503 
504 /*
505  * Below macros are defined for RSS offload types, they can be used to
506  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
507  */
508 #define RTE_ETH_RSS_IPV4 RTE_BIT64(2)
509 #define RTE_ETH_RSS_FRAG_IPV4 RTE_BIT64(3)
510 #define RTE_ETH_RSS_NONFRAG_IPV4_TCP RTE_BIT64(4)
511 #define RTE_ETH_RSS_NONFRAG_IPV4_UDP RTE_BIT64(5)
512 #define RTE_ETH_RSS_NONFRAG_IPV4_SCTP RTE_BIT64(6)
513 #define RTE_ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
514 #define RTE_ETH_RSS_IPV6 RTE_BIT64(8)
515 #define RTE_ETH_RSS_FRAG_IPV6 RTE_BIT64(9)
516 #define RTE_ETH_RSS_NONFRAG_IPV6_TCP RTE_BIT64(10)
517 #define RTE_ETH_RSS_NONFRAG_IPV6_UDP RTE_BIT64(11)
518 #define RTE_ETH_RSS_NONFRAG_IPV6_SCTP RTE_BIT64(12)
519 #define RTE_ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
520 #define RTE_ETH_RSS_L2_PAYLOAD RTE_BIT64(14)
521 #define RTE_ETH_RSS_IPV6_EX RTE_BIT64(15)
522 #define RTE_ETH_RSS_IPV6_TCP_EX RTE_BIT64(16)
523 #define RTE_ETH_RSS_IPV6_UDP_EX RTE_BIT64(17)
524 #define RTE_ETH_RSS_PORT RTE_BIT64(18)
525 #define RTE_ETH_RSS_VXLAN RTE_BIT64(19)
526 #define RTE_ETH_RSS_GENEVE RTE_BIT64(20)
527 #define RTE_ETH_RSS_NVGRE RTE_BIT64(21)
528 #define RTE_ETH_RSS_GTPU RTE_BIT64(23)
529 #define RTE_ETH_RSS_ETH RTE_BIT64(24)
530 #define RTE_ETH_RSS_S_VLAN RTE_BIT64(25)
531 #define RTE_ETH_RSS_C_VLAN RTE_BIT64(26)
532 #define RTE_ETH_RSS_ESP RTE_BIT64(27)
533 #define RTE_ETH_RSS_AH RTE_BIT64(28)
534 #define RTE_ETH_RSS_L2TPV3 RTE_BIT64(29)
535 #define RTE_ETH_RSS_PFCP RTE_BIT64(30)
536 #define RTE_ETH_RSS_PPPOE RTE_BIT64(31)
537 #define RTE_ETH_RSS_ECPRI RTE_BIT64(32)
538 #define RTE_ETH_RSS_MPLS RTE_BIT64(33)
539 #define RTE_ETH_RSS_IPV4_CHKSUM RTE_BIT64(34)
540 
553 #define RTE_ETH_RSS_L4_CHKSUM RTE_BIT64(35)
554 
555 #define RTE_ETH_RSS_L2TPV2 RTE_BIT64(36)
556 
557 /*
558  * We use the following macros to combine with above RTE_ETH_RSS_* for
559  * more specific input set selection. These bits are defined starting
560  * from the high end of the 64 bits.
561  * Note: If we use above RTE_ETH_RSS_* without SRC/DST_ONLY, it represents
562  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
563  * the same level are used simultaneously, it is the same case as none of
564  * them are added.
565  */
566 #define RTE_ETH_RSS_L3_SRC_ONLY RTE_BIT64(63)
567 #define RTE_ETH_RSS_L3_DST_ONLY RTE_BIT64(62)
568 #define RTE_ETH_RSS_L4_SRC_ONLY RTE_BIT64(61)
569 #define RTE_ETH_RSS_L4_DST_ONLY RTE_BIT64(60)
570 #define RTE_ETH_RSS_L2_SRC_ONLY RTE_BIT64(59)
571 #define RTE_ETH_RSS_L2_DST_ONLY RTE_BIT64(58)
572 
573 /*
574  * Only select IPV6 address prefix as RSS input set according to
575  * https://tools.ietf.org/html/rfc6052
576  * Must be combined with RTE_ETH_RSS_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_UDP,
577  * RTE_ETH_RSS_NONFRAG_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP.
578  */
579 #define RTE_ETH_RSS_L3_PRE32 RTE_BIT64(57)
580 #define RTE_ETH_RSS_L3_PRE40 RTE_BIT64(56)
581 #define RTE_ETH_RSS_L3_PRE48 RTE_BIT64(55)
582 #define RTE_ETH_RSS_L3_PRE56 RTE_BIT64(54)
583 #define RTE_ETH_RSS_L3_PRE64 RTE_BIT64(53)
584 #define RTE_ETH_RSS_L3_PRE96 RTE_BIT64(52)
585 
586 /*
587  * Use the following macros to combine with the above layers
588  * to choose inner and outer layers or both for RSS computation.
589  * Bits 50 and 51 are reserved for this.
590  */
591 
599 #define RTE_ETH_RSS_LEVEL_PMD_DEFAULT (UINT64_C(0) << 50)
600 
605 #define RTE_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
606 
611 #define RTE_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
612 #define RTE_ETH_RSS_LEVEL_MASK (UINT64_C(3) << 50)
613 
614 #define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
615 
626 static inline uint64_t
627 rte_eth_rss_hf_refine(uint64_t rss_hf)
628 {
629  if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
630  rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
631 
632  if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
633  rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
634 
635  return rss_hf;
636 }
637 
638 #define RTE_ETH_RSS_IPV6_PRE32 ( \
639  RTE_ETH_RSS_IPV6 | \
640  RTE_ETH_RSS_L3_PRE32)
641 
642 #define RTE_ETH_RSS_IPV6_PRE40 ( \
643  RTE_ETH_RSS_IPV6 | \
644  RTE_ETH_RSS_L3_PRE40)
645 
646 #define RTE_ETH_RSS_IPV6_PRE48 ( \
647  RTE_ETH_RSS_IPV6 | \
648  RTE_ETH_RSS_L3_PRE48)
649 
650 #define RTE_ETH_RSS_IPV6_PRE56 ( \
651  RTE_ETH_RSS_IPV6 | \
652  RTE_ETH_RSS_L3_PRE56)
653 
654 #define RTE_ETH_RSS_IPV6_PRE64 ( \
655  RTE_ETH_RSS_IPV6 | \
656  RTE_ETH_RSS_L3_PRE64)
657 
658 #define RTE_ETH_RSS_IPV6_PRE96 ( \
659  RTE_ETH_RSS_IPV6 | \
660  RTE_ETH_RSS_L3_PRE96)
661 
662 #define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
663  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
664  RTE_ETH_RSS_L3_PRE32)
665 
666 #define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
667  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
668  RTE_ETH_RSS_L3_PRE40)
669 
670 #define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
671  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
672  RTE_ETH_RSS_L3_PRE48)
673 
674 #define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
675  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
676  RTE_ETH_RSS_L3_PRE56)
677 
678 #define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
679  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
680  RTE_ETH_RSS_L3_PRE64)
681 
682 #define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
683  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
684  RTE_ETH_RSS_L3_PRE96)
685 
686 #define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
687  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
688  RTE_ETH_RSS_L3_PRE32)
689 
690 #define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
691  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
692  RTE_ETH_RSS_L3_PRE40)
693 
694 #define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
695  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
696  RTE_ETH_RSS_L3_PRE48)
697 
698 #define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
699  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
700  RTE_ETH_RSS_L3_PRE56)
701 
702 #define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
703  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
704  RTE_ETH_RSS_L3_PRE64)
705 
706 #define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
707  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
708  RTE_ETH_RSS_L3_PRE96)
709 
710 #define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
711  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
712  RTE_ETH_RSS_L3_PRE32)
713 
714 #define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
715  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
716  RTE_ETH_RSS_L3_PRE40)
717 
718 #define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
719  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
720  RTE_ETH_RSS_L3_PRE48)
721 
722 #define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
723  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
724  RTE_ETH_RSS_L3_PRE56)
725 
726 #define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
727  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
728  RTE_ETH_RSS_L3_PRE64)
729 
730 #define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
731  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
732  RTE_ETH_RSS_L3_PRE96)
733 
734 #define RTE_ETH_RSS_IP ( \
735  RTE_ETH_RSS_IPV4 | \
736  RTE_ETH_RSS_FRAG_IPV4 | \
737  RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
738  RTE_ETH_RSS_IPV6 | \
739  RTE_ETH_RSS_FRAG_IPV6 | \
740  RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
741  RTE_ETH_RSS_IPV6_EX)
742 
743 #define RTE_ETH_RSS_UDP ( \
744  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
745  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
746  RTE_ETH_RSS_IPV6_UDP_EX)
747 
748 #define RTE_ETH_RSS_TCP ( \
749  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
750  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
751  RTE_ETH_RSS_IPV6_TCP_EX)
752 
753 #define RTE_ETH_RSS_SCTP ( \
754  RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
755  RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
756 
757 #define RTE_ETH_RSS_TUNNEL ( \
758  RTE_ETH_RSS_VXLAN | \
759  RTE_ETH_RSS_GENEVE | \
760  RTE_ETH_RSS_NVGRE)
761 
762 #define RTE_ETH_RSS_VLAN ( \
763  RTE_ETH_RSS_S_VLAN | \
764  RTE_ETH_RSS_C_VLAN)
765 
767 #define RTE_ETH_RSS_PROTO_MASK ( \
768  RTE_ETH_RSS_IPV4 | \
769  RTE_ETH_RSS_FRAG_IPV4 | \
770  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
771  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
772  RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
773  RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
774  RTE_ETH_RSS_IPV6 | \
775  RTE_ETH_RSS_FRAG_IPV6 | \
776  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
777  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
778  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
779  RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
780  RTE_ETH_RSS_L2_PAYLOAD | \
781  RTE_ETH_RSS_IPV6_EX | \
782  RTE_ETH_RSS_IPV6_TCP_EX | \
783  RTE_ETH_RSS_IPV6_UDP_EX | \
784  RTE_ETH_RSS_PORT | \
785  RTE_ETH_RSS_VXLAN | \
786  RTE_ETH_RSS_GENEVE | \
787  RTE_ETH_RSS_NVGRE | \
788  RTE_ETH_RSS_MPLS)
789 
790 /*
791  * Definitions used for redirection table entry size.
792  * Some RSS RETA sizes may not be supported by some drivers, check the
793  * documentation or the description of relevant functions for more details.
794  */
795 #define RTE_ETH_RSS_RETA_SIZE_64 64
796 #define RTE_ETH_RSS_RETA_SIZE_128 128
797 #define RTE_ETH_RSS_RETA_SIZE_256 256
798 #define RTE_ETH_RSS_RETA_SIZE_512 512
799 #define RTE_ETH_RETA_GROUP_SIZE 64
800 
802 #define RTE_ETH_VMDQ_MAX_VLAN_FILTERS 64
803 #define RTE_ETH_DCB_NUM_USER_PRIORITIES 8
804 #define RTE_ETH_VMDQ_DCB_NUM_QUEUES 128
805 #define RTE_ETH_DCB_NUM_QUEUES 128
809 #define RTE_ETH_DCB_PG_SUPPORT RTE_BIT32(0)
810 #define RTE_ETH_DCB_PFC_SUPPORT RTE_BIT32(1)
814 #define RTE_ETH_VLAN_STRIP_OFFLOAD 0x0001
815 #define RTE_ETH_VLAN_FILTER_OFFLOAD 0x0002
816 #define RTE_ETH_VLAN_EXTEND_OFFLOAD 0x0004
817 #define RTE_ETH_QINQ_STRIP_OFFLOAD 0x0008
819 #define RTE_ETH_VLAN_STRIP_MASK 0x0001
820 #define RTE_ETH_VLAN_FILTER_MASK 0x0002
821 #define RTE_ETH_VLAN_EXTEND_MASK 0x0004
822 #define RTE_ETH_QINQ_STRIP_MASK 0x0008
823 #define RTE_ETH_VLAN_ID_MAX 0x0FFF
826 /* Definitions used for receive MAC address */
827 #define RTE_ETH_NUM_RECEIVE_MAC_ADDR 128
829 /* Definitions used for unicast hash */
830 #define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY 128
836 #define RTE_ETH_VMDQ_ACCEPT_UNTAG RTE_BIT32(0)
837 
838 #define RTE_ETH_VMDQ_ACCEPT_HASH_MC RTE_BIT32(1)
839 
840 #define RTE_ETH_VMDQ_ACCEPT_HASH_UC RTE_BIT32(2)
841 
842 #define RTE_ETH_VMDQ_ACCEPT_BROADCAST RTE_BIT32(3)
843 
844 #define RTE_ETH_VMDQ_ACCEPT_MULTICAST RTE_BIT32(4)
845 
855  uint64_t mask;
857  uint16_t reta[RTE_ETH_RETA_GROUP_SIZE];
858 };
859 
867 };
868 
878 };
879 
880 /* This structure may be extended in future. */
881 struct rte_eth_dcb_rx_conf {
882  enum rte_eth_nb_tcs nb_tcs;
884  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
885 };
886 
887 struct rte_eth_vmdq_dcb_tx_conf {
888  enum rte_eth_nb_pools nb_queue_pools;
890  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
891 };
892 
893 struct rte_eth_dcb_tx_conf {
894  enum rte_eth_nb_tcs nb_tcs;
896  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
897 };
898 
899 struct rte_eth_vmdq_tx_conf {
900  enum rte_eth_nb_pools nb_queue_pools;
901 };
902 
915  enum rte_eth_nb_pools nb_queue_pools;
917  uint8_t default_pool;
918  uint8_t nb_pool_maps;
919  struct {
920  uint16_t vlan_id;
921  uint64_t pools;
922  } pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS];
925 };
926 
946  enum rte_eth_nb_pools nb_queue_pools;
948  uint8_t default_pool;
950  uint8_t nb_pool_maps;
951  uint32_t rx_mode;
952  struct {
953  uint16_t vlan_id;
954  uint64_t pools;
955  } pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS];
956 };
957 
962  enum rte_eth_tx_mq_mode mq_mode;
968  uint64_t offloads;
969 
970  uint16_t pvid;
971  __extension__
972  uint8_t
973  hw_vlan_reject_tagged : 1,
977  hw_vlan_insert_pvid : 1;
978 
979  uint64_t reserved_64s[2];
980  void *reserved_ptrs[2];
981 };
982 
1044  struct rte_mempool *mp;
1045  uint16_t length;
1046  uint16_t offset;
1058  uint32_t proto_hdr;
1059 };
1060 
1068  /* The settings for buffer split offload. */
1069  struct rte_eth_rxseg_split split;
1070  /* The other features settings should be added here. */
1071 };
1072 
1077  struct rte_eth_thresh rx_thresh;
1078  uint16_t rx_free_thresh;
1079  uint8_t rx_drop_en;
1081  uint16_t rx_nseg;
1088  uint16_t share_group;
1089  uint16_t share_qid;
1095  uint64_t offloads;
1104 
1125  uint16_t rx_nmempool;
1127  uint64_t reserved_64s[2];
1128  void *reserved_ptrs[2];
1129 };
1130 
1135  struct rte_eth_thresh tx_thresh;
1136  uint16_t tx_rs_thresh;
1137  uint16_t tx_free_thresh;
1146  uint64_t offloads;
1147 
1148  uint64_t reserved_64s[2];
1149  void *reserved_ptrs[2];
1150 };
1151 
1164 
1169  uint32_t rte_memory:1;
1170 
1171  uint32_t reserved:30;
1172 };
1173 
1182  uint16_t max_nb_queues;
1184  uint16_t max_rx_2_tx;
1186  uint16_t max_tx_2_rx;
1187  uint16_t max_nb_desc;
1190 };
1191 
1192 #define RTE_ETH_MAX_HAIRPIN_PEERS 32
1193 
1201  uint16_t port;
1202  uint16_t queue;
1203 };
1204 
1212  uint32_t peer_count:16;
1223  uint32_t tx_explicit:1;
1224 
1236  uint32_t manual_bind:1;
1237 
1250 
1262  uint32_t use_rte_memory:1;
1263 
1274  uint32_t force_memory:1;
1275 
1276  uint32_t reserved:11;
1278  struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
1279 };
1280 
1285  uint16_t nb_max;
1286  uint16_t nb_min;
1287  uint16_t nb_align;
1297  uint16_t nb_seg_max;
1298 
1310  uint16_t nb_mtu_seg_max;
1311 };
1312 
1321 };
1322 
1329  uint32_t high_water;
1330  uint32_t low_water;
1331  uint16_t pause_time;
1332  uint16_t send_xon;
1333  enum rte_eth_fc_mode mode;
1335  uint8_t autoneg;
1336 };
1337 
1344  struct rte_eth_fc_conf fc;
1345  uint8_t priority;
1346 };
1347 
1358  uint8_t tc_max;
1360  enum rte_eth_fc_mode mode_capa;
1361 };
1362 
1381  enum rte_eth_fc_mode mode;
1383  struct {
1384  uint16_t tx_qid;
1388  uint8_t tc;
1389  } rx_pause; /* Valid when (mode == FC_RX_PAUSE || mode == FC_FULL) */
1390 
1391  struct {
1392  uint16_t pause_time;
1393  uint16_t rx_qid;
1397  uint8_t tc;
1398  } tx_pause; /* Valid when (mode == FC_TX_PAUSE || mode == FC_FULL) */
1399 };
1400 
1406  RTE_ETH_TUNNEL_TYPE_NONE = 0,
1407  RTE_ETH_TUNNEL_TYPE_VXLAN,
1408  RTE_ETH_TUNNEL_TYPE_GENEVE,
1409  RTE_ETH_TUNNEL_TYPE_TEREDO,
1410  RTE_ETH_TUNNEL_TYPE_NVGRE,
1411  RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
1412  RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
1413  RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
1414  RTE_ETH_TUNNEL_TYPE_ECPRI,
1415  RTE_ETH_TUNNEL_TYPE_MAX,
1416 };
1417 
1418 /* Deprecated API file for rte_eth_dev_filter_* functions */
1419 #include "rte_eth_ctrl.h"
1420 
1431  uint16_t udp_port;
1432  uint8_t prot_type;
1433 };
1434 
1440  uint32_t lsc:1;
1442  uint32_t rxq:1;
1444  uint32_t rmv:1;
1445 };
1446 
1447 #define rte_intr_conf rte_eth_intr_conf
1448 
1455  uint32_t link_speeds;
1462  struct rte_eth_rxmode rxmode;
1463  struct rte_eth_txmode txmode;
1464  uint32_t lpbk_mode;
1469  struct {
1470  struct rte_eth_rss_conf rss_conf;
1472  struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf;
1474  struct rte_eth_dcb_rx_conf dcb_rx_conf;
1476  struct rte_eth_vmdq_rx_conf vmdq_rx_conf;
1477  } rx_adv_conf;
1478  union {
1480  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1482  struct rte_eth_dcb_tx_conf dcb_tx_conf;
1484  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1485  } tx_adv_conf;
1489  struct rte_eth_intr_conf intr_conf;
1490 };
1491 
1495 #define RTE_ETH_RX_OFFLOAD_VLAN_STRIP RTE_BIT64(0)
1496 #define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1497 #define RTE_ETH_RX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1498 #define RTE_ETH_RX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1499 #define RTE_ETH_RX_OFFLOAD_TCP_LRO RTE_BIT64(4)
1500 #define RTE_ETH_RX_OFFLOAD_QINQ_STRIP RTE_BIT64(5)
1501 #define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(6)
1502 #define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP RTE_BIT64(7)
1503 #define RTE_ETH_RX_OFFLOAD_VLAN_FILTER RTE_BIT64(9)
1504 #define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND RTE_BIT64(10)
1505 #define RTE_ETH_RX_OFFLOAD_SCATTER RTE_BIT64(13)
1506 
1511 #define RTE_ETH_RX_OFFLOAD_TIMESTAMP RTE_BIT64(14)
1512 #define RTE_ETH_RX_OFFLOAD_SECURITY RTE_BIT64(15)
1513 #define RTE_ETH_RX_OFFLOAD_KEEP_CRC RTE_BIT64(16)
1514 #define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM RTE_BIT64(17)
1515 #define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(18)
1516 #define RTE_ETH_RX_OFFLOAD_RSS_HASH RTE_BIT64(19)
1517 #define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT RTE_BIT64(20)
1518 
1519 #define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
1520  RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
1521  RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1522 #define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
1523  RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
1524  RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
1525  RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
1526 
1527 /*
1528  * If new Rx offload capabilities are defined, they also must be
1529  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1530  */
1531 
1535 #define RTE_ETH_TX_OFFLOAD_VLAN_INSERT RTE_BIT64(0)
1536 #define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1537 #define RTE_ETH_TX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1538 #define RTE_ETH_TX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1539 #define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM RTE_BIT64(4)
1540 #define RTE_ETH_TX_OFFLOAD_TCP_TSO RTE_BIT64(5)
1541 #define RTE_ETH_TX_OFFLOAD_UDP_TSO RTE_BIT64(6)
1542 #define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(7)
1543 #define RTE_ETH_TX_OFFLOAD_QINQ_INSERT RTE_BIT64(8)
1544 #define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO RTE_BIT64(9)
1545 #define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO RTE_BIT64(10)
1546 #define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO RTE_BIT64(11)
1547 #define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO RTE_BIT64(12)
1548 #define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT RTE_BIT64(13)
1549 
1553 #define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE RTE_BIT64(14)
1554 
1555 #define RTE_ETH_TX_OFFLOAD_MULTI_SEGS RTE_BIT64(15)
1556 
1561 #define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE RTE_BIT64(16)
1562 #define RTE_ETH_TX_OFFLOAD_SECURITY RTE_BIT64(17)
1563 
1568 #define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO RTE_BIT64(18)
1569 
1574 #define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO RTE_BIT64(19)
1575 
1576 #define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(20)
1577 
1582 #define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP RTE_BIT64(21)
1583 /*
1584  * If new Tx offload capabilities are defined, they also must be
1585  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1586  */
1587 
1592 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP RTE_BIT64(0)
1593 
1594 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP RTE_BIT64(1)
1595 
1604 #define RTE_ETH_DEV_CAPA_RXQ_SHARE RTE_BIT64(2)
1605 
1606 #define RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP RTE_BIT64(3)
1607 
1608 #define RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP RTE_BIT64(4)
1609 
1611 /*
1612  * Fallback default preferred Rx/Tx port parameters.
1613  * These are used if an application requests default parameters
1614  * but the PMD does not provide preferred values.
1615  */
1616 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1617 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1618 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1619 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1620 
1627  uint16_t burst_size;
1628  uint16_t ring_size;
1629  uint16_t nb_queues;
1630 };
1631 
1636 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX)
1637 
1642  const char *name;
1643  uint16_t domain_id;
1651  uint16_t port_id;
1657  uint16_t rx_domain;
1658 };
1659 
1667  __extension__
1668  uint32_t multi_pools:1;
1669  uint32_t offset_allowed:1;
1670  uint32_t offset_align_log2:4;
1671  uint16_t max_nseg;
1672  uint16_t reserved;
1673 };
1674 
1687 };
1688 
1709 };
1710 
1717  struct rte_device *device;
1718  const char *driver_name;
1719  unsigned int if_index;
1721  uint16_t min_mtu;
1722  uint16_t max_mtu;
1723  const uint32_t *dev_flags;
1724  uint32_t min_rx_bufsize;
1725  uint32_t max_rx_pktlen;
1728  uint16_t max_rx_queues;
1729  uint16_t max_tx_queues;
1730  uint32_t max_mac_addrs;
1733  uint16_t max_vfs;
1734  uint16_t max_vmdq_pools;
1735  struct rte_eth_rxseg_capa rx_seg_capa;
1745  uint16_t reta_size;
1746  uint8_t hash_key_size;
1749  struct rte_eth_rxconf default_rxconf;
1750  struct rte_eth_txconf default_txconf;
1751  uint16_t vmdq_queue_base;
1752  uint16_t vmdq_queue_num;
1753  uint16_t vmdq_pool_base;
1754  struct rte_eth_desc_lim rx_desc_lim;
1755  struct rte_eth_desc_lim tx_desc_lim;
1756  uint32_t speed_capa;
1758  uint16_t nb_rx_queues;
1759  uint16_t nb_tx_queues;
1768  struct rte_eth_dev_portconf default_rxportconf;
1770  struct rte_eth_dev_portconf default_txportconf;
1772  uint64_t dev_capa;
1777  struct rte_eth_switch_info switch_info;
1779  enum rte_eth_err_handle_mode err_handle_mode;
1780 
1781  uint64_t reserved_64s[2];
1782  void *reserved_ptrs[2];
1783 };
1784 
1786 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1787 #define RTE_ETH_QUEUE_STATE_STARTED 1
1788 #define RTE_ETH_QUEUE_STATE_HAIRPIN 2
1795 struct rte_eth_rxq_info {
1796  struct rte_mempool *mp;
1797  struct rte_eth_rxconf conf;
1798  uint8_t scattered_rx;
1799  uint8_t queue_state;
1800  uint16_t nb_desc;
1801  uint16_t rx_buf_size;
1808  uint8_t avail_thresh;
1810 
1816  struct rte_eth_txconf conf;
1817  uint16_t nb_desc;
1818  uint8_t queue_state;
1820 
1821 /* Generic Burst mode flag definition, values can be ORed. */
1822 
1828 #define RTE_ETH_BURST_FLAG_PER_QUEUE RTE_BIT64(0)
1829 
1835  uint64_t flags;
1837 #define RTE_ETH_BURST_MODE_INFO_SIZE 1024
1838  char info[RTE_ETH_BURST_MODE_INFO_SIZE];
1839 };
1840 
1842 #define RTE_ETH_XSTATS_NAME_SIZE 64
1843 
1854  uint64_t id;
1855  uint64_t value;
1856 };
1857 
1874 };
1875 
1876 #define RTE_ETH_DCB_NUM_TCS 8
1877 #define RTE_ETH_MAX_VMDQ_POOL 64
1878 
1885  struct {
1886  uint16_t base;
1887  uint16_t nb_queue;
1888  } tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1890  struct {
1891  uint16_t base;
1892  uint16_t nb_queue;
1893  } tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1894 };
1895 
1901  uint8_t nb_tcs;
1903  uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS];
1906 };
1907 
1917 };
1918 
1919 /* Translate from FEC mode to FEC capa */
1920 #define RTE_ETH_FEC_MODE_TO_CAPA(x) RTE_BIT32(x)
1921 
1922 /* This macro indicates FEC capa mask */
1923 #define RTE_ETH_FEC_MODE_CAPA_MASK(x) RTE_BIT32(RTE_ETH_FEC_ ## x)
1924 
1925 /* A structure used to get capabilities per link speed */
1926 struct rte_eth_fec_capa {
1927  uint32_t speed;
1928  uint32_t capa;
1929 };
1930 
1931 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
1932 
1933 /* Macros to check for valid port */
1934 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1935  if (!rte_eth_dev_is_valid_port(port_id)) { \
1936  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1937  return retval; \
1938  } \
1939 } while (0)
1940 
1941 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1942  if (!rte_eth_dev_is_valid_port(port_id)) { \
1943  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1944  return; \
1945  } \
1946 } while (0)
1947 
1970 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
1971  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1972  void *user_param);
1973 
1994 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
1995  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1996 
2007 };
2008 
2009 struct rte_eth_dev_sriov {
2010  uint8_t active;
2011  uint8_t nb_q_per_pool;
2012  uint16_t def_vmdq_idx;
2013  uint16_t def_pool_q_idx;
2014 };
2015 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
2016 
2017 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
2018 
2019 #define RTE_ETH_DEV_NO_OWNER 0
2020 
2021 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
2022 
2023 struct rte_eth_dev_owner {
2024  uint64_t id;
2025  char name[RTE_ETH_MAX_OWNER_NAME_LEN];
2026 };
2027 
2033 #define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE RTE_BIT32(0)
2034 
2035 #define RTE_ETH_DEV_INTR_LSC RTE_BIT32(1)
2036 
2037 #define RTE_ETH_DEV_BONDED_SLAVE RTE_BIT32(2)
2038 
2039 #define RTE_ETH_DEV_INTR_RMV RTE_BIT32(3)
2040 
2041 #define RTE_ETH_DEV_REPRESENTOR RTE_BIT32(4)
2042 
2043 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR RTE_BIT32(5)
2044 
2048 #define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS RTE_BIT32(6)
2049 
2062 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
2063  const uint64_t owner_id);
2064 
2068 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
2069  for (p = rte_eth_find_next_owned_by(0, o); \
2070  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
2071  p = rte_eth_find_next_owned_by(p + 1, o))
2072 
2081 uint16_t rte_eth_find_next(uint16_t port_id);
2082 
2086 #define RTE_ETH_FOREACH_DEV(p) \
2087  RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
2088 
2100 uint16_t
2101 rte_eth_find_next_of(uint16_t port_id_start,
2102  const struct rte_device *parent);
2103 
2112 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
2113  for (port_id = rte_eth_find_next_of(0, parent); \
2114  port_id < RTE_MAX_ETHPORTS; \
2115  port_id = rte_eth_find_next_of(port_id + 1, parent))
2116 
2128 uint16_t
2129 rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id);
2130 
2141 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
2142  for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
2143  port_id < RTE_MAX_ETHPORTS; \
2144  port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
2145 
2156 int rte_eth_dev_owner_new(uint64_t *owner_id);
2157 
2168 int rte_eth_dev_owner_set(const uint16_t port_id,
2169  const struct rte_eth_dev_owner *owner);
2170 
2181 int rte_eth_dev_owner_unset(const uint16_t port_id,
2182  const uint64_t owner_id);
2183 
2192 int rte_eth_dev_owner_delete(const uint64_t owner_id);
2193 
2204 int rte_eth_dev_owner_get(const uint16_t port_id,
2205  struct rte_eth_dev_owner *owner);
2206 
2217 uint16_t rte_eth_dev_count_avail(void);
2218 
2227 uint16_t rte_eth_dev_count_total(void);
2228 
2240 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
2241 
2250 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
2251 
2260 const char *rte_eth_dev_tx_offload_name(uint64_t offload);
2261 
2273 __rte_experimental
2274 const char *rte_eth_dev_capability_name(uint64_t capability);
2275 
2315 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
2316  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
2317 
2326 int
2327 rte_eth_dev_is_removed(uint16_t port_id);
2328 
2391 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2392  uint16_t nb_rx_desc, unsigned int socket_id,
2393  const struct rte_eth_rxconf *rx_conf,
2394  struct rte_mempool *mb_pool);
2395 
2423 __rte_experimental
2425  (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2426  const struct rte_eth_hairpin_conf *conf);
2427 
2476 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2477  uint16_t nb_tx_desc, unsigned int socket_id,
2478  const struct rte_eth_txconf *tx_conf);
2479 
2505 __rte_experimental
2507  (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2508  const struct rte_eth_hairpin_conf *conf);
2509 
2536 __rte_experimental
2537 int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2538  size_t len, uint32_t direction);
2539 
2562 __rte_experimental
2563 int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port);
2564 
2589 __rte_experimental
2590 int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port);
2591 
2604 int rte_eth_dev_socket_id(uint16_t port_id);
2605 
2615 int rte_eth_dev_is_valid_port(uint16_t port_id);
2616 
2634 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2635 
2652 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2653 
2671 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2672 
2689 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2690 
2711 int rte_eth_dev_start(uint16_t port_id);
2712 
2724 int rte_eth_dev_stop(uint16_t port_id);
2725 
2738 int rte_eth_dev_set_link_up(uint16_t port_id);
2739 
2749 int rte_eth_dev_set_link_down(uint16_t port_id);
2750 
2761 int rte_eth_dev_close(uint16_t port_id);
2762 
2800 int rte_eth_dev_reset(uint16_t port_id);
2801 
2813 int rte_eth_promiscuous_enable(uint16_t port_id);
2814 
2826 int rte_eth_promiscuous_disable(uint16_t port_id);
2827 
2838 int rte_eth_promiscuous_get(uint16_t port_id);
2839 
2851 int rte_eth_allmulticast_enable(uint16_t port_id);
2852 
2864 int rte_eth_allmulticast_disable(uint16_t port_id);
2865 
2876 int rte_eth_allmulticast_get(uint16_t port_id);
2877 
2895 int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
2896 
2911 int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
2912 
2926 __rte_experimental
2927 const char *rte_eth_link_speed_to_str(uint32_t link_speed);
2928 
2947 __rte_experimental
2948 int rte_eth_link_to_str(char *str, size_t len,
2949  const struct rte_eth_link *eth_link);
2950 
2968 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
2969 
2981 int rte_eth_stats_reset(uint16_t port_id);
2982 
3012 int rte_eth_xstats_get_names(uint16_t port_id,
3013  struct rte_eth_xstat_name *xstats_names,
3014  unsigned int size);
3015 
3049 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3050  unsigned int n);
3051 
3076 int
3077 rte_eth_xstats_get_names_by_id(uint16_t port_id,
3078  struct rte_eth_xstat_name *xstats_names, unsigned int size,
3079  uint64_t *ids);
3080 
3105 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3106  uint64_t *values, unsigned int size);
3107 
3127 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
3128  uint64_t *id);
3129 
3142 int rte_eth_xstats_reset(uint16_t port_id);
3143 
3162 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
3163  uint16_t tx_queue_id, uint8_t stat_idx);
3164 
3183 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
3184  uint16_t rx_queue_id,
3185  uint8_t stat_idx);
3186 
3200 int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
3201 
3222 __rte_experimental
3223 int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3224  unsigned int num);
3225 
3269 int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
3270 
3286 __rte_experimental
3287 int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf);
3288 
3309 int rte_eth_dev_fw_version_get(uint16_t port_id,
3310  char *fw_version, size_t fw_size);
3311 
3351 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3352  uint32_t *ptypes, int num);
3383 int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3384  uint32_t *set_ptypes, unsigned int num);
3385 
3398 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
3399 
3417 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
3418 
3438 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
3439 
3458 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3459  int on);
3460 
3477 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3478  enum rte_vlan_type vlan_type,
3479  uint16_t tag_type);
3480 
3498 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
3499 
3513 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
3514 
3529 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
3530 
3556 __rte_experimental
3557 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
3558  uint8_t avail_thresh);
3559 
3586 __rte_experimental
3587 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
3588  uint8_t *avail_thresh);
3589 
3590 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
3591  void *userdata);
3592 
3598  buffer_tx_error_fn error_callback;
3599  void *error_userdata;
3600  uint16_t size;
3601  uint16_t length;
3603  struct rte_mbuf *pkts[];
3604 };
3605 
3612 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
3613  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3614 
3625 int
3626 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
3627 
3652 int
3654  buffer_tx_error_fn callback, void *userdata);
3655 
3678 void
3679 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3680  void *userdata);
3681 
3705 void
3706 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3707  void *userdata);
3708 
3734 int
3735 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
3736 
3769 };
3770 
3790 };
3791 
3810  uint64_t metadata;
3811 };
3812 
3846 };
3847 
3872  uint64_t metadata;
3873 };
3874 
3951 };
3952 
3954 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
3955  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
3956 
3974 int rte_eth_dev_callback_register(uint16_t port_id,
3975  enum rte_eth_event_type event,
3976  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3977 
3996 int rte_eth_dev_callback_unregister(uint16_t port_id,
3997  enum rte_eth_event_type event,
3998  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3999 
4021 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
4022 
4043 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
4044 
4062 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
4063 
4085 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4086  int epfd, int op, void *data);
4087 
4102 int
4103 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
4104 
4118 int rte_eth_led_on(uint16_t port_id);
4119 
4133 int rte_eth_led_off(uint16_t port_id);
4134 
4163 __rte_experimental
4164 int rte_eth_fec_get_capability(uint16_t port_id,
4165  struct rte_eth_fec_capa *speed_fec_capa,
4166  unsigned int num);
4167 
4191 __rte_experimental
4192 int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa);
4193 
4214 __rte_experimental
4215 int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa);
4216 
4231 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
4232  struct rte_eth_fc_conf *fc_conf);
4233 
4248 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
4249  struct rte_eth_fc_conf *fc_conf);
4250 
4266 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
4267  struct rte_eth_pfc_conf *pfc_conf);
4268 
4287 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
4288  uint32_t pool);
4289 
4307 __rte_experimental
4309  struct rte_eth_pfc_queue_info *pfc_queue_info);
4310 
4334 __rte_experimental
4336  struct rte_eth_pfc_queue_conf *pfc_queue_conf);
4337 
4352 int rte_eth_dev_mac_addr_remove(uint16_t port_id,
4353  struct rte_ether_addr *mac_addr);
4354 
4368 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
4369  struct rte_ether_addr *mac_addr);
4370 
4388 int rte_eth_dev_rss_reta_update(uint16_t port_id,
4389  struct rte_eth_rss_reta_entry64 *reta_conf,
4390  uint16_t reta_size);
4391 
4410 int rte_eth_dev_rss_reta_query(uint16_t port_id,
4411  struct rte_eth_rss_reta_entry64 *reta_conf,
4412  uint16_t reta_size);
4413 
4433 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4434  uint8_t on);
4435 
4454 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
4455 
4472 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4473  uint32_t tx_rate);
4474 
4489 int rte_eth_dev_rss_hash_update(uint16_t port_id,
4490  struct rte_eth_rss_conf *rss_conf);
4491 
4507 int
4508 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4509  struct rte_eth_rss_conf *rss_conf);
4510 
4535 int
4536 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4537  struct rte_eth_udp_tunnel *tunnel_udp);
4538 
4558 int
4559 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4560  struct rte_eth_udp_tunnel *tunnel_udp);
4561 
4576 int rte_eth_dev_get_dcb_info(uint16_t port_id,
4577  struct rte_eth_dcb_info *dcb_info);
4578 
4579 struct rte_eth_rxtx_callback;
4580 
4606 const struct rte_eth_rxtx_callback *
4607 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4608  rte_rx_callback_fn fn, void *user_param);
4609 
4636 const struct rte_eth_rxtx_callback *
4637 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4638  rte_rx_callback_fn fn, void *user_param);
4639 
4665 const struct rte_eth_rxtx_callback *
4666 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4667  rte_tx_callback_fn fn, void *user_param);
4668 
4702 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4703  const struct rte_eth_rxtx_callback *user_cb);
4704 
4738 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4739  const struct rte_eth_rxtx_callback *user_cb);
4740 
4760 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4761  struct rte_eth_rxq_info *qinfo);
4762 
4782 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4783  struct rte_eth_txq_info *qinfo);
4784 
4803 int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4804  struct rte_eth_burst_mode *mode);
4805 
4824 int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4825  struct rte_eth_burst_mode *mode);
4826 
4847 __rte_experimental
4848 int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
4849  struct rte_power_monitor_cond *pmc);
4850 
4869 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
4870 
4883 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
4884 
4901 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
4902 
4919 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
4920 
4939 __rte_experimental
4940 int
4941 rte_eth_dev_get_module_info(uint16_t port_id,
4942  struct rte_eth_dev_module_info *modinfo);
4943 
4963 __rte_experimental
4964 int
4965 rte_eth_dev_get_module_eeprom(uint16_t port_id,
4966  struct rte_dev_eeprom_info *info);
4967 
4987 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
4988  struct rte_ether_addr *mc_addr_set,
4989  uint32_t nb_mc_addr);
4990 
5003 int rte_eth_timesync_enable(uint16_t port_id);
5004 
5017 int rte_eth_timesync_disable(uint16_t port_id);
5018 
5037 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
5038  struct timespec *timestamp, uint32_t flags);
5039 
5055 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5056  struct timespec *timestamp);
5057 
5075 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
5076 
5092 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
5093 
5112 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
5113 
5159 __rte_experimental
5160 int
5161 rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
5162 
5178 int
5179 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
5180 
5196 int
5197 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
5198 
5215 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5216  uint16_t *nb_rx_desc,
5217  uint16_t *nb_tx_desc);
5218 
5233 int
5234 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
5235 
5245 void *
5246 rte_eth_dev_get_sec_ctx(uint16_t port_id);
5247 
5263 __rte_experimental
5264 int rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5265  struct rte_eth_hairpin_cap *cap);
5266 
5276  int pf;
5277  __extension__
5278  union {
5279  int vf;
5280  int sf;
5281  };
5282  uint32_t id_base;
5283  uint32_t id_end;
5284  char name[RTE_DEV_NAME_MAX_LEN];
5285 };
5286 
5294  uint16_t controller;
5295  uint16_t pf;
5296  uint32_t nb_ranges_alloc;
5297  uint32_t nb_ranges;
5298  struct rte_eth_representor_range ranges[];
5299 };
5300 
5324 __rte_experimental
5325 int rte_eth_representor_info_get(uint16_t port_id,
5326  struct rte_eth_representor_info *info);
5327 
5329 #define RTE_ETH_RX_METADATA_USER_FLAG RTE_BIT64(0)
5330 
5332 #define RTE_ETH_RX_METADATA_USER_MARK RTE_BIT64(1)
5333 
5335 #define RTE_ETH_RX_METADATA_TUNNEL_ID RTE_BIT64(2)
5336 
5376 int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features);
5377 
5379 #define RTE_ETH_DEV_REASSEMBLY_F_IPV4 (RTE_BIT32(0))
5380 
5381 #define RTE_ETH_DEV_REASSEMBLY_F_IPV6 (RTE_BIT32(1))
5382 
5393  uint32_t timeout_ms;
5395  uint16_t max_frags;
5400  uint16_t flags;
5401 };
5402 
5423 __rte_experimental
5424 int rte_eth_ip_reassembly_capability_get(uint16_t port_id,
5425  struct rte_eth_ip_reassembly_params *capa);
5426 
5448 __rte_experimental
5449 int rte_eth_ip_reassembly_conf_get(uint16_t port_id,
5450  struct rte_eth_ip_reassembly_params *conf);
5451 
5481 __rte_experimental
5482 int rte_eth_ip_reassembly_conf_set(uint16_t port_id,
5483  const struct rte_eth_ip_reassembly_params *conf);
5484 
5492 typedef struct {
5499  uint16_t time_spent;
5501  uint16_t nb_frags;
5503 
5522 __rte_experimental
5523 int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file);
5524 
5548 __rte_experimental
5549 int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5550  uint16_t offset, uint16_t num, FILE *file);
5551 
5575 __rte_experimental
5576 int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5577  uint16_t offset, uint16_t num, FILE *file);
5578 
5579 
5580 /* Congestion management */
5581 
5591 };
5592 
5609  uint64_t objs_supported;
5614  uint8_t rsvd[8];
5615 };
5616 
5627  enum rte_cman_mode mode;
5628  union {
5635  uint16_t rx_queue;
5642  uint8_t rsvd_obj_params[4];
5643  } obj_param;
5644  union {
5657  uint8_t rsvd_mode_params[4];
5658  } mode_param;
5659 };
5660 
5678 __rte_experimental
5679 int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info);
5680 
5698 __rte_experimental
5699 int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config);
5700 
5717 __rte_experimental
5718 int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config);
5719 
5740 __rte_experimental
5741 int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config);
5742 
5743 #include <rte_ethdev_core.h>
5744 
5768 uint16_t rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
5769  struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
5770  void *opaque);
5771 
5859 static inline uint16_t
5860 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
5861  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
5862 {
5863  uint16_t nb_rx;
5864  struct rte_eth_fp_ops *p;
5865  void *qd;
5866 
5867 #ifdef RTE_ETHDEV_DEBUG_RX
5868  if (port_id >= RTE_MAX_ETHPORTS ||
5869  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
5870  RTE_ETHDEV_LOG(ERR,
5871  "Invalid port_id=%u or queue_id=%u\n",
5872  port_id, queue_id);
5873  return 0;
5874  }
5875 #endif
5876 
5877  /* fetch pointer to queue data */
5878  p = &rte_eth_fp_ops[port_id];
5879  qd = p->rxq.data[queue_id];
5880 
5881 #ifdef RTE_ETHDEV_DEBUG_RX
5882  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
5883 
5884  if (qd == NULL) {
5885  RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u for port_id=%u\n",
5886  queue_id, port_id);
5887  return 0;
5888  }
5889 #endif
5890 
5891  nb_rx = p->rx_pkt_burst(qd, rx_pkts, nb_pkts);
5892 
5893 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
5894  {
5895  void *cb;
5896 
5897  /* __ATOMIC_RELEASE memory order was used when the
5898  * call back was inserted into the list.
5899  * Since there is a clear dependency between loading
5900  * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
5901  * not required.
5902  */
5903  cb = __atomic_load_n((void **)&p->rxq.clbk[queue_id],
5904  __ATOMIC_RELAXED);
5905  if (unlikely(cb != NULL))
5906  nb_rx = rte_eth_call_rx_callbacks(port_id, queue_id,
5907  rx_pkts, nb_rx, nb_pkts, cb);
5908  }
5909 #endif
5910 
5911  rte_ethdev_trace_rx_burst(port_id, queue_id, (void **)rx_pkts, nb_rx);
5912  return nb_rx;
5913 }
5914 
5932 static inline int
5933 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
5934 {
5935  struct rte_eth_fp_ops *p;
5936  void *qd;
5937 
5938 #ifdef RTE_ETHDEV_DEBUG_RX
5939  if (port_id >= RTE_MAX_ETHPORTS ||
5940  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
5941  RTE_ETHDEV_LOG(ERR,
5942  "Invalid port_id=%u or queue_id=%u\n",
5943  port_id, queue_id);
5944  return -EINVAL;
5945  }
5946 #endif
5947 
5948  /* fetch pointer to queue data */
5949  p = &rte_eth_fp_ops[port_id];
5950  qd = p->rxq.data[queue_id];
5951 
5952 #ifdef RTE_ETHDEV_DEBUG_RX
5953  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5954  if (qd == NULL)
5955  return -EINVAL;
5956 #endif
5957 
5958  if (*p->rx_queue_count == NULL)
5959  return -ENOTSUP;
5960  return (int)(*p->rx_queue_count)(qd);
5961 }
5962 
5966 #define RTE_ETH_RX_DESC_AVAIL 0
5967 #define RTE_ETH_RX_DESC_DONE 1
5968 #define RTE_ETH_RX_DESC_UNAVAIL 2
6004 static inline int
6005 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
6006  uint16_t offset)
6007 {
6008  struct rte_eth_fp_ops *p;
6009  void *qd;
6010 
6011 #ifdef RTE_ETHDEV_DEBUG_RX
6012  if (port_id >= RTE_MAX_ETHPORTS ||
6013  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6014  RTE_ETHDEV_LOG(ERR,
6015  "Invalid port_id=%u or queue_id=%u\n",
6016  port_id, queue_id);
6017  return -EINVAL;
6018  }
6019 #endif
6020 
6021  /* fetch pointer to queue data */
6022  p = &rte_eth_fp_ops[port_id];
6023  qd = p->rxq.data[queue_id];
6024 
6025 #ifdef RTE_ETHDEV_DEBUG_RX
6026  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6027  if (qd == NULL)
6028  return -ENODEV;
6029 #endif
6030  if (*p->rx_descriptor_status == NULL)
6031  return -ENOTSUP;
6032  return (*p->rx_descriptor_status)(qd, offset);
6033 }
6034 
6038 #define RTE_ETH_TX_DESC_FULL 0
6039 #define RTE_ETH_TX_DESC_DONE 1
6040 #define RTE_ETH_TX_DESC_UNAVAIL 2
6076 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
6077  uint16_t queue_id, uint16_t offset)
6078 {
6079  struct rte_eth_fp_ops *p;
6080  void *qd;
6081 
6082 #ifdef RTE_ETHDEV_DEBUG_TX
6083  if (port_id >= RTE_MAX_ETHPORTS ||
6084  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6085  RTE_ETHDEV_LOG(ERR,
6086  "Invalid port_id=%u or queue_id=%u\n",
6087  port_id, queue_id);
6088  return -EINVAL;
6089  }
6090 #endif
6091 
6092  /* fetch pointer to queue data */
6093  p = &rte_eth_fp_ops[port_id];
6094  qd = p->txq.data[queue_id];
6095 
6096 #ifdef RTE_ETHDEV_DEBUG_TX
6097  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6098  if (qd == NULL)
6099  return -ENODEV;
6100 #endif
6101  if (*p->tx_descriptor_status == NULL)
6102  return -ENOTSUP;
6103  return (*p->tx_descriptor_status)(qd, offset);
6104 }
6105 
6125 uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
6126  struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque);
6127 
6199 static inline uint16_t
6200 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
6201  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6202 {
6203  struct rte_eth_fp_ops *p;
6204  void *qd;
6205 
6206 #ifdef RTE_ETHDEV_DEBUG_TX
6207  if (port_id >= RTE_MAX_ETHPORTS ||
6208  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6209  RTE_ETHDEV_LOG(ERR,
6210  "Invalid port_id=%u or queue_id=%u\n",
6211  port_id, queue_id);
6212  return 0;
6213  }
6214 #endif
6215 
6216  /* fetch pointer to queue data */
6217  p = &rte_eth_fp_ops[port_id];
6218  qd = p->txq.data[queue_id];
6219 
6220 #ifdef RTE_ETHDEV_DEBUG_TX
6221  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6222 
6223  if (qd == NULL) {
6224  RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u for port_id=%u\n",
6225  queue_id, port_id);
6226  return 0;
6227  }
6228 #endif
6229 
6230 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
6231  {
6232  void *cb;
6233 
6234  /* __ATOMIC_RELEASE memory order was used when the
6235  * call back was inserted into the list.
6236  * Since there is a clear dependency between loading
6237  * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
6238  * not required.
6239  */
6240  cb = __atomic_load_n((void **)&p->txq.clbk[queue_id],
6241  __ATOMIC_RELAXED);
6242  if (unlikely(cb != NULL))
6243  nb_pkts = rte_eth_call_tx_callbacks(port_id, queue_id,
6244  tx_pkts, nb_pkts, cb);
6245  }
6246 #endif
6247 
6248  nb_pkts = p->tx_pkt_burst(qd, tx_pkts, nb_pkts);
6249 
6250  rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, nb_pkts);
6251  return nb_pkts;
6252 }
6253 
6308 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
6309 
6310 static inline uint16_t
6311 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
6312  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6313 {
6314  struct rte_eth_fp_ops *p;
6315  void *qd;
6316 
6317 #ifdef RTE_ETHDEV_DEBUG_TX
6318  if (port_id >= RTE_MAX_ETHPORTS ||
6319  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6320  RTE_ETHDEV_LOG(ERR,
6321  "Invalid port_id=%u or queue_id=%u\n",
6322  port_id, queue_id);
6323  rte_errno = ENODEV;
6324  return 0;
6325  }
6326 #endif
6327 
6328  /* fetch pointer to queue data */
6329  p = &rte_eth_fp_ops[port_id];
6330  qd = p->txq.data[queue_id];
6331 
6332 #ifdef RTE_ETHDEV_DEBUG_TX
6333  if (!rte_eth_dev_is_valid_port(port_id)) {
6334  RTE_ETHDEV_LOG(ERR, "Invalid Tx port_id=%u\n", port_id);
6335  rte_errno = ENODEV;
6336  return 0;
6337  }
6338  if (qd == NULL) {
6339  RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u for port_id=%u\n",
6340  queue_id, port_id);
6341  rte_errno = EINVAL;
6342  return 0;
6343  }
6344 #endif
6345 
6346  if (!p->tx_pkt_prepare)
6347  return nb_pkts;
6348 
6349  return p->tx_pkt_prepare(qd, tx_pkts, nb_pkts);
6350 }
6351 
6352 #else
6353 
6354 /*
6355  * Native NOOP operation for compilation targets which doesn't require any
6356  * preparations steps, and functional NOOP may introduce unnecessary performance
6357  * drop.
6358  *
6359  * Generally this is not a good idea to turn it on globally and didn't should
6360  * be used if behavior of tx_preparation can change.
6361  */
6362 
6363 static inline uint16_t
6364 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
6365  __rte_unused uint16_t queue_id,
6366  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6367 {
6368  return nb_pkts;
6369 }
6370 
6371 #endif
6372 
6395 static inline uint16_t
6396 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
6397  struct rte_eth_dev_tx_buffer *buffer)
6398 {
6399  uint16_t sent;
6400  uint16_t to_send = buffer->length;
6401 
6402  if (to_send == 0)
6403  return 0;
6404 
6405  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
6406 
6407  buffer->length = 0;
6408 
6409  /* All packets sent, or to be dealt with by callback below */
6410  if (unlikely(sent != to_send))
6411  buffer->error_callback(&buffer->pkts[sent],
6412  (uint16_t)(to_send - sent),
6413  buffer->error_userdata);
6414 
6415  return sent;
6416 }
6417 
6448 static __rte_always_inline uint16_t
6449 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
6450  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
6451 {
6452  buffer->pkts[buffer->length++] = tx_pkt;
6453  if (buffer->length < buffer->size)
6454  return 0;
6455 
6456  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
6457 }
6458 
6487 __rte_experimental
6488 int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num);
6489 
6490 #ifdef __cplusplus
6491 }
6492 #endif
6493 
6494 #endif /* _RTE_ETHDEV_H_ */
uint16_t nb_rx_queues
Definition: rte_ethdev.h:1758
int rte_eth_dev_stop(uint16_t port_id)
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_promiscuous_disable(uint16_t port_id)
__extension__ uint32_t multi_pools
Definition: rte_ethdev.h:1668
int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
uint16_t link_duplex
Definition: rte_ethdev.h:334
__rte_experimental int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
#define __rte_always_inline
Definition: rte_common.h:255
#define RTE_ETH_DCB_NUM_USER_PRIORITIES
Definition: rte_ethdev.h:803
uint16_t tx_rs_thresh
Definition: rte_ethdev.h:1136
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
uint32_t mtu
Definition: rte_ethdev.h:413
uint16_t nb_desc
Definition: rte_ethdev.h:1817
rte_eth_event_macsec_type
Definition: rte_ethdev.h:3775
uint64_t rx_offload_capa
Definition: rte_ethdev.h:1737
const uint32_t * dev_flags
Definition: rte_ethdev.h:1723
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
__rte_experimental int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
#define __rte_cache_min_aligned
Definition: rte_common.h:443
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6311
struct rte_device * device
Definition: rte_ethdev.h:1717
rte_eth_nb_tcs
Definition: rte_ethdev.h:864
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:284
uint16_t rte_eth_find_next(uint16_t port_id)
__rte_experimental int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
rte_cman_mode
Definition: rte_cman.h:20
int rte_eth_led_off(uint16_t port_id)
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:6005
uint64_t imissed
Definition: rte_ethdev.h:270
uint32_t low_water
Definition: rte_ethdev.h:1330
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
uint8_t rss_key_len
Definition: rte_ethdev.h:465
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
uint8_t hthresh
Definition: rte_ethdev.h:358
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
__rte_experimental int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
uint64_t rx_queue_offload_capa
Definition: rte_ethdev.h:1741
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
uint16_t reta_size
Definition: rte_ethdev.h:1745
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint32_t lpbk_mode
Definition: rte_ethdev.h:1464
#define rte_errno
Definition: rte_errno.h:29
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
uint32_t link_speeds
Definition: rte_ethdev.h:1455
uint64_t tx_queue_offload_capa
Definition: rte_ethdev.h:1743
rte_eth_tx_mq_mode
Definition: rte_ethdev.h:400
rte_eth_fc_mode
Definition: rte_ethdev.h:1316
uint8_t enable_default_pool
Definition: rte_ethdev.h:916
uint32_t max_hash_mac_addrs
Definition: rte_ethdev.h:1732
uint16_t nb_mtu_seg_max
Definition: rte_ethdev.h:1310
int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
#define __rte_unused
Definition: rte_common.h:120
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:282
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
uint64_t opackets
Definition: rte_ethdev.h:263
rte_eth_cman_obj
Definition: rte_ethdev.h:5583
uint8_t hash_key_size
Definition: rte_ethdev.h:1746
int rte_eth_dev_close(uint16_t port_id)
struct rte_mempool * mp
Definition: rte_ethdev.h:1044
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
__rte_experimental int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, uint8_t avail_thresh)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
struct rte_mempool * mp
Definition: rte_ethdev.h:1796
uint32_t dcb_capability_en
Definition: rte_ethdev.h:1488
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:280
__rte_experimental int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
const char * name
Definition: rte_ethdev.h:1642
uint8_t queue_state
Definition: rte_ethdev.h:1818
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
int rte_eth_dev_set_link_up(uint16_t port_id)
#define RTE_BIT32(nr)
Definition: rte_bitops.h:38
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
uint16_t vmdq_queue_num
Definition: rte_ethdev.h:1752
int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
uint16_t share_qid
Definition: rte_ethdev.h:1089
uint8_t rx_deferred_start
Definition: rte_ethdev.h:1080
struct rte_mbuf * pkts[]
Definition: rte_ethdev.h:3603
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
Definition: rte_ethdev.h:3954
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:276
uint32_t high_water
Definition: rte_ethdev.h:1329
#define RTE_ETH_MQ_RX_RSS_FLAG
Definition: rte_ethdev.h:365
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
#define RTE_ETH_XSTATS_NAME_SIZE
Definition: rte_ethdev.h:1842
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
__rte_experimental int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, unsigned int num)
int rte_eth_timesync_disable(uint16_t port_id)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint64_t offloads
Definition: rte_ethdev.h:1095
uint32_t link_speed
Definition: rte_ethdev.h:333
__rte_experimental int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, uint8_t *avail_thresh)
__rte_experimental int rte_eth_representor_info_get(uint16_t port_id, struct rte_eth_representor_info *info)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, struct rte_eth_pfc_queue_conf *pfc_queue_conf)
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
__rte_experimental int rte_eth_fec_get_capability(uint16_t port_id, struct rte_eth_fec_capa *speed_fec_capa, unsigned int num)
uint16_t send_xon
Definition: rte_ethdev.h:1332
struct rte_mempool ** rx_mempools
Definition: rte_ethdev.h:1124
int rte_eth_stats_reset(uint16_t port_id)
int rte_eth_allmulticast_enable(uint16_t port_id)
uint32_t offset_allowed
Definition: rte_ethdev.h:1669
__rte_experimental int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, struct rte_power_monitor_cond *pmc)
#define unlikely(x)
uint64_t ibytes
Definition: rte_ethdev.h:264
uint32_t offset_align_log2
Definition: rte_ethdev.h:1670
uint8_t avail_thresh
Definition: rte_ethdev.h:1808
uint64_t offloads
Definition: rte_ethdev.h:1146
int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
uint16_t max_nb_queues
Definition: rte_ethdev.h:1182
uint64_t oerrors
Definition: rte_ethdev.h:272
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
__rte_experimental int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num)
uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
uint16_t max_mtu
Definition: rte_ethdev.h:1722
uint64_t offloads
Definition: rte_ethdev.h:421
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
uint16_t link_autoneg
Definition: rte_ethdev.h:335
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
uint16_t tx_free_thresh
Definition: rte_ethdev.h:1137
uint16_t nb_desc
Definition: rte_ethdev.h:1800
uint64_t modes_supported
Definition: rte_ethdev.h:5604
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:5860
int rte_eth_allmulticast_disable(uint16_t port_id)
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
int rte_eth_dev_start(uint16_t port_id)
__rte_experimental int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config)
uint16_t rx_buf_size
Definition: rte_ethdev.h:1801
__rte_experimental int rte_eth_ip_reassembly_capability_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *capa)
uint16_t max_vmdq_pools
Definition: rte_ethdev.h:1734
uint8_t scattered_rx
Definition: rte_ethdev.h:1798
int rte_eth_dev_reset(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
#define RTE_ETH_MQ_RX_DCB_FLAG
Definition: rte_ethdev.h:366
uint64_t offloads
Definition: rte_ethdev.h:968
uint16_t vmdq_pool_base
Definition: rte_ethdev.h:1753
__rte_experimental int rte_eth_ip_reassembly_conf_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *conf)
uint64_t tx_offload_capa
Definition: rte_ethdev.h:1739
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:278
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
uint16_t min_mtu
Definition: rte_ethdev.h:1721
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_is_removed(uint16_t port_id)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
Definition: rte_ethdev.h:1994
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
uint64_t obytes
Definition: rte_ethdev.h:265
__rte_experimental int rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
uint8_t enable_loop_back
Definition: rte_ethdev.h:949
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
rte_eth_err_handle_mode
Definition: rte_ethdev.h:1695
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, struct rte_eth_pfc_queue_info *pfc_queue_info)
int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
rte_eth_fec_mode
Definition: rte_ethdev.h:1912
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
uint16_t max_tx_queues
Definition: rte_ethdev.h:1729
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_xstats_reset(uint16_t port_id)
rte_eth_dev_state
Definition: rte_ethdev.h:2000
uint16_t rx_free_thresh
Definition: rte_ethdev.h:1078
uint64_t dev_capa
Definition: rte_ethdev.h:1772
uint64_t ierrors
Definition: rte_ethdev.h:271
#define RTE_ETH_MQ_RX_VMDQ_FLAG
Definition: rte_ethdev.h:367
__rte_experimental int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, size_t len, uint32_t direction)
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
uint64_t flow_type_rss_offloads
Definition: rte_ethdev.h:1748
#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS
Definition: rte_ethdev.h:802
uint16_t rte_eth_dev_count_total(void)
int rte_eth_promiscuous_enable(uint16_t port_id)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
union rte_eth_rxseg * rx_seg
Definition: rte_ethdev.h:1103
int rte_eth_dev_owner_new(uint64_t *owner_id)
rte_vlan_type
Definition: rte_ethdev.h:431
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
__rte_experimental int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
uint16_t nb_seg_max
Definition: rte_ethdev.h:1297
uint64_t ipackets
Definition: rte_ethdev.h:262
uint16_t max_vfs
Definition: rte_ethdev.h:1733
uint16_t pause_time
Definition: rte_ethdev.h:1331
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_owner_delete(const uint64_t owner_id)
uint64_t rx_nombuf
Definition: rte_ethdev.h:273
rte_eth_event_macsec_subtype
Definition: rte_ethdev.h:3741
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
Definition: rte_ethdev.h:6449
__extension__ uint8_t hw_vlan_reject_untagged
Definition: rte_ethdev.h:973
uint8_t queue_state
Definition: rte_ethdev.h:1799
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
uint32_t use_locked_device_memory
Definition: rte_ethdev.h:1249
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
uint16_t vmdq_queue_base
Definition: rte_ethdev.h:1751
rte_eth_event_ipsec_subtype
Definition: rte_ethdev.h:3817
rte_eth_nb_pools
Definition: rte_ethdev.h:873
uint16_t nb_align
Definition: rte_ethdev.h:1287
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
rte_eth_rx_mq_mode
Definition: rte_ethdev.h:374
__rte_experimental const char * rte_eth_link_speed_to_str(uint32_t link_speed)
uint16_t rsvd
const char * driver_name
Definition: rte_ethdev.h:1718
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:5933
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_socket_id(uint16_t port_id)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint32_t tx_rate)
uint8_t enable_default_pool
Definition: rte_ethdev.h:947
__extension__ struct rte_eth_link __rte_aligned(8)
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
uint16_t nb_tx_queues
Definition: rte_ethdev.h:1759
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
uint32_t max_mac_addrs
Definition: rte_ethdev.h:1730
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
rte_eth_tunnel_type
Definition: rte_ethdev.h:1405
__rte_experimental const char * rte_eth_dev_capability_name(uint64_t capability)
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
uint64_t value
Definition: rte_ethdev.h:1855
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
Definition: rte_ethdev.h:627
__rte_experimental int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
__rte_experimental int rte_eth_ip_reassembly_conf_set(uint16_t port_id, const struct rte_eth_ip_reassembly_params *conf)
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
__rte_experimental int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config)
int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_promiscuous_get(uint16_t port_id)
int rte_eth_led_on(uint16_t port_id)
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
uint32_t max_rx_pktlen
Definition: rte_ethdev.h:1725
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
uint64_t rss_hf
Definition: rte_ethdev.h:466
uint64_t id
Definition: rte_ethdev.h:1854
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
unsigned int if_index
Definition: rte_ethdev.h:1719
__rte_experimental int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config)
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
uint8_t mac_ctrl_frame_fwd
Definition: rte_ethdev.h:1334
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
Definition: rte_ethdev.h:1970
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
uint8_t * rss_key
Definition: rte_ethdev.h:464
uint8_t tx_deferred_start
Definition: rte_ethdev.h:1140
uint8_t wthresh
Definition: rte_ethdev.h:359
uint16_t max_rx_queues
Definition: rte_ethdev.h:1728
uint16_t max_rx_mempools
Definition: rte_ethdev.h:1766
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
rte_eth_representor_type
Definition: rte_ethdev.h:1682
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:415
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
int rte_eth_allmulticast_get(uint16_t port_id)
uint8_t rx_drop_en
Definition: rte_ethdev.h:1079
int rte_eth_dev_is_valid_port(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint16_t rx_nseg
Definition: rte_ethdev.h:1081
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:1727
int rte_eth_timesync_enable(uint16_t port_id)
uint8_t pthresh
Definition: rte_ethdev.h:357
uint16_t share_group
Definition: rte_ethdev.h:1088
uint32_t speed_capa
Definition: rte_ethdev.h:1756
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6200
__rte_experimental int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info)
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_set_link_down(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
__rte_experimental int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
uint64_t objs_supported
Definition: rte_ethdev.h:5609
uint32_t min_rx_bufsize
Definition: rte_ethdev.h:1724
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
Definition: rte_ethdev.h:6396
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
rte_eth_event_type
Definition: rte_ethdev.h:3878