DPDK 25.07.0
rte_ethdev.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
3 */
4
5#ifndef _RTE_ETHDEV_H_
6#define _RTE_ETHDEV_H_
7
148#include <stdint.h>
149
150/* Use this macro to check if LRO API is supported */
151#define RTE_ETHDEV_HAS_LRO_SUPPORT
152
153/* Alias RTE_LIBRTE_ETHDEV_DEBUG for backward compatibility. */
154#ifdef RTE_LIBRTE_ETHDEV_DEBUG
155#define RTE_ETHDEV_DEBUG_RX
156#define RTE_ETHDEV_DEBUG_TX
157#endif
158
159#include <rte_cman.h>
160#include <rte_compat.h>
161#include <rte_log.h>
162#include <rte_interrupts.h>
163#include <rte_dev.h>
164#include <rte_devargs.h>
165#include <rte_bitops.h>
166#include <rte_errno.h>
167#include <rte_common.h>
168#include <rte_config.h>
169#include <rte_power_intrinsics.h>
170
171#include "rte_ethdev_trace_fp.h"
172#include "rte_dev_info.h"
173
174#ifdef __cplusplus
175extern "C" {
176#endif
177
178extern int rte_eth_dev_logtype;
179#define RTE_LOGTYPE_ETHDEV rte_eth_dev_logtype
180
181#define RTE_ETHDEV_LOG_LINE(level, ...) \
182 RTE_LOG_LINE(level, ETHDEV, "" __VA_ARGS__)
183
184struct rte_mbuf;
185
202int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
203
219
233
247#define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
248 for (rte_eth_iterator_init(iter, devargs), \
249 id = rte_eth_iterator_next(iter); \
250 id != RTE_MAX_ETHPORTS; \
251 id = rte_eth_iterator_next(iter))
252
263 uint64_t ipackets;
264 uint64_t opackets;
265 uint64_t ibytes;
266 uint64_t obytes;
271 uint64_t imissed;
272 uint64_t ierrors;
273 uint64_t oerrors;
274 uint64_t rx_nombuf;
275 /* Queue stats are limited to max 256 queues */
277 uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
279 uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
281 uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
283 uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
285 uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
286};
287
291#define RTE_ETH_LINK_SPEED_AUTONEG 0
292#define RTE_ETH_LINK_SPEED_FIXED RTE_BIT32(0)
293#define RTE_ETH_LINK_SPEED_10M_HD RTE_BIT32(1)
294#define RTE_ETH_LINK_SPEED_10M RTE_BIT32(2)
295#define RTE_ETH_LINK_SPEED_100M_HD RTE_BIT32(3)
296#define RTE_ETH_LINK_SPEED_100M RTE_BIT32(4)
297#define RTE_ETH_LINK_SPEED_1G RTE_BIT32(5)
298#define RTE_ETH_LINK_SPEED_2_5G RTE_BIT32(6)
299#define RTE_ETH_LINK_SPEED_5G RTE_BIT32(7)
300#define RTE_ETH_LINK_SPEED_10G RTE_BIT32(8)
301#define RTE_ETH_LINK_SPEED_20G RTE_BIT32(9)
302#define RTE_ETH_LINK_SPEED_25G RTE_BIT32(10)
303#define RTE_ETH_LINK_SPEED_40G RTE_BIT32(11)
304#define RTE_ETH_LINK_SPEED_50G RTE_BIT32(12)
305#define RTE_ETH_LINK_SPEED_56G RTE_BIT32(13)
306#define RTE_ETH_LINK_SPEED_100G RTE_BIT32(14)
307#define RTE_ETH_LINK_SPEED_200G RTE_BIT32(15)
308#define RTE_ETH_LINK_SPEED_400G RTE_BIT32(16)
314#define RTE_ETH_SPEED_NUM_NONE 0
315#define RTE_ETH_SPEED_NUM_10M 10
316#define RTE_ETH_SPEED_NUM_100M 100
317#define RTE_ETH_SPEED_NUM_1G 1000
318#define RTE_ETH_SPEED_NUM_2_5G 2500
319#define RTE_ETH_SPEED_NUM_5G 5000
320#define RTE_ETH_SPEED_NUM_10G 10000
321#define RTE_ETH_SPEED_NUM_20G 20000
322#define RTE_ETH_SPEED_NUM_25G 25000
323#define RTE_ETH_SPEED_NUM_40G 40000
324#define RTE_ETH_SPEED_NUM_50G 50000
325#define RTE_ETH_SPEED_NUM_56G 56000
326#define RTE_ETH_SPEED_NUM_100G 100000
327#define RTE_ETH_SPEED_NUM_200G 200000
328#define RTE_ETH_SPEED_NUM_400G 400000
329#define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX
336 union {
337 RTE_ATOMIC(uint64_t) val64;
338 __extension__
339 struct {
340 uint32_t link_speed;
341 uint16_t link_duplex : 1;
342 uint16_t link_autoneg : 1;
343 uint16_t link_status : 1;
344 };
345 };
346};
347
351#define RTE_ETH_LINK_HALF_DUPLEX 0
352#define RTE_ETH_LINK_FULL_DUPLEX 1
353#define RTE_ETH_LINK_DOWN 0
354#define RTE_ETH_LINK_UP 1
355#define RTE_ETH_LINK_FIXED 0
356#define RTE_ETH_LINK_AUTONEG 1
357#define RTE_ETH_LINK_MAX_STR_LEN 40
361#define RTE_ETH_SPEED_LANES_TO_CAPA(x) RTE_BIT32(x)
362
365 uint32_t speed;
366 uint32_t capa;
367};
368
374 uint8_t pthresh;
375 uint8_t hthresh;
376 uint8_t wthresh;
377};
378
382#define RTE_ETH_MQ_RX_RSS_FLAG RTE_BIT32(0)
383#define RTE_ETH_MQ_RX_DCB_FLAG RTE_BIT32(1)
384#define RTE_ETH_MQ_RX_VMDQ_FLAG RTE_BIT32(2)
394
401
411};
412
422};
423
430 uint32_t mtu;
438 uint64_t offloads;
439
440 uint64_t reserved_64s[2];
441 void *reserved_ptrs[2];
442};
443
449 RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
452 RTE_ETH_VLAN_TYPE_MAX,
453};
454
460 uint64_t ids[64];
461};
462
484 RTE_ETH_HASH_FUNCTION_MAX,
485};
486
487#define RTE_ETH_HASH_ALGO_TO_CAPA(x) RTE_BIT32(x)
488#define RTE_ETH_HASH_ALGO_CAPA_MASK(x) RTE_BIT32(RTE_ETH_HASH_FUNCTION_ ## x)
489
507 uint8_t *rss_key;
508 uint8_t rss_key_len;
513 uint64_t rss_hf;
515};
516
517/*
518 * A packet can be identified by hardware as different flow types. Different
519 * NIC hardware may support different flow types.
520 * Basically, the NIC hardware identifies the flow type as deep protocol as
521 * possible, and exclusively. For example, if a packet is identified as
522 * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
523 * though it is an actual IPV4 packet.
524 */
525#define RTE_ETH_FLOW_UNKNOWN 0
526#define RTE_ETH_FLOW_RAW 1
527#define RTE_ETH_FLOW_IPV4 2
528#define RTE_ETH_FLOW_FRAG_IPV4 3
529#define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
530#define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
531#define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
532#define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
533#define RTE_ETH_FLOW_IPV6 8
534#define RTE_ETH_FLOW_FRAG_IPV6 9
535#define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
536#define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
537#define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
538#define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
539#define RTE_ETH_FLOW_L2_PAYLOAD 14
540#define RTE_ETH_FLOW_IPV6_EX 15
541#define RTE_ETH_FLOW_IPV6_TCP_EX 16
542#define RTE_ETH_FLOW_IPV6_UDP_EX 17
544#define RTE_ETH_FLOW_PORT 18
545#define RTE_ETH_FLOW_VXLAN 19
546#define RTE_ETH_FLOW_GENEVE 20
547#define RTE_ETH_FLOW_NVGRE 21
548#define RTE_ETH_FLOW_VXLAN_GPE 22
549#define RTE_ETH_FLOW_GTPU 23
550#define RTE_ETH_FLOW_MAX 24
551
552/*
553 * Below macros are defined for RSS offload types, they can be used to
554 * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
555 */
556#define RTE_ETH_RSS_IPV4 RTE_BIT64(2)
557#define RTE_ETH_RSS_FRAG_IPV4 RTE_BIT64(3)
558#define RTE_ETH_RSS_NONFRAG_IPV4_TCP RTE_BIT64(4)
559#define RTE_ETH_RSS_NONFRAG_IPV4_UDP RTE_BIT64(5)
560#define RTE_ETH_RSS_NONFRAG_IPV4_SCTP RTE_BIT64(6)
561#define RTE_ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
562#define RTE_ETH_RSS_IPV6 RTE_BIT64(8)
563#define RTE_ETH_RSS_FRAG_IPV6 RTE_BIT64(9)
564#define RTE_ETH_RSS_NONFRAG_IPV6_TCP RTE_BIT64(10)
565#define RTE_ETH_RSS_NONFRAG_IPV6_UDP RTE_BIT64(11)
566#define RTE_ETH_RSS_NONFRAG_IPV6_SCTP RTE_BIT64(12)
567#define RTE_ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
568#define RTE_ETH_RSS_L2_PAYLOAD RTE_BIT64(14)
569#define RTE_ETH_RSS_IPV6_EX RTE_BIT64(15)
570#define RTE_ETH_RSS_IPV6_TCP_EX RTE_BIT64(16)
571#define RTE_ETH_RSS_IPV6_UDP_EX RTE_BIT64(17)
572#define RTE_ETH_RSS_PORT RTE_BIT64(18)
573#define RTE_ETH_RSS_VXLAN RTE_BIT64(19)
574#define RTE_ETH_RSS_GENEVE RTE_BIT64(20)
575#define RTE_ETH_RSS_NVGRE RTE_BIT64(21)
576#define RTE_ETH_RSS_GTPU RTE_BIT64(23)
577#define RTE_ETH_RSS_ETH RTE_BIT64(24)
578#define RTE_ETH_RSS_S_VLAN RTE_BIT64(25)
579#define RTE_ETH_RSS_C_VLAN RTE_BIT64(26)
580#define RTE_ETH_RSS_ESP RTE_BIT64(27)
581#define RTE_ETH_RSS_AH RTE_BIT64(28)
582#define RTE_ETH_RSS_L2TPV3 RTE_BIT64(29)
583#define RTE_ETH_RSS_PFCP RTE_BIT64(30)
584#define RTE_ETH_RSS_PPPOE RTE_BIT64(31)
585#define RTE_ETH_RSS_ECPRI RTE_BIT64(32)
586#define RTE_ETH_RSS_MPLS RTE_BIT64(33)
587#define RTE_ETH_RSS_IPV4_CHKSUM RTE_BIT64(34)
588
601#define RTE_ETH_RSS_L4_CHKSUM RTE_BIT64(35)
602
603#define RTE_ETH_RSS_L2TPV2 RTE_BIT64(36)
604#define RTE_ETH_RSS_IPV6_FLOW_LABEL RTE_BIT64(37)
605
607#define RTE_ETH_RSS_IB_BTH RTE_BIT64(38)
608
609/*
610 * We use the following macros to combine with above RTE_ETH_RSS_* for
611 * more specific input set selection. These bits are defined starting
612 * from the high end of the 64 bits.
613 * Note: If we use above RTE_ETH_RSS_* without SRC/DST_ONLY, it represents
614 * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
615 * the same level are used simultaneously, it is the same case as none of
616 * them are added.
617 */
618#define RTE_ETH_RSS_L3_SRC_ONLY RTE_BIT64(63)
619#define RTE_ETH_RSS_L3_DST_ONLY RTE_BIT64(62)
620#define RTE_ETH_RSS_L4_SRC_ONLY RTE_BIT64(61)
621#define RTE_ETH_RSS_L4_DST_ONLY RTE_BIT64(60)
622#define RTE_ETH_RSS_L2_SRC_ONLY RTE_BIT64(59)
623#define RTE_ETH_RSS_L2_DST_ONLY RTE_BIT64(58)
624
625/*
626 * Only select IPV6 address prefix as RSS input set according to
627 * https://tools.ietf.org/html/rfc6052
628 * Must be combined with RTE_ETH_RSS_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_UDP,
629 * RTE_ETH_RSS_NONFRAG_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP.
630 */
631#define RTE_ETH_RSS_L3_PRE32 RTE_BIT64(57)
632#define RTE_ETH_RSS_L3_PRE40 RTE_BIT64(56)
633#define RTE_ETH_RSS_L3_PRE48 RTE_BIT64(55)
634#define RTE_ETH_RSS_L3_PRE56 RTE_BIT64(54)
635#define RTE_ETH_RSS_L3_PRE64 RTE_BIT64(53)
636#define RTE_ETH_RSS_L3_PRE96 RTE_BIT64(52)
637
638/*
639 * Use the following macros to combine with the above layers
640 * to choose inner and outer layers or both for RSS computation.
641 * Bits 50 and 51 are reserved for this.
642 */
643
651#define RTE_ETH_RSS_LEVEL_PMD_DEFAULT (UINT64_C(0) << 50)
652
657#define RTE_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
658
663#define RTE_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
664#define RTE_ETH_RSS_LEVEL_MASK (UINT64_C(3) << 50)
665
666#define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
667
678static inline uint64_t
679rte_eth_rss_hf_refine(uint64_t rss_hf)
680{
681 if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
682 rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
683
684 if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
685 rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
686
687 return rss_hf;
688}
689
690#define RTE_ETH_RSS_IPV6_PRE32 ( \
691 RTE_ETH_RSS_IPV6 | \
692 RTE_ETH_RSS_L3_PRE32)
693
694#define RTE_ETH_RSS_IPV6_PRE40 ( \
695 RTE_ETH_RSS_IPV6 | \
696 RTE_ETH_RSS_L3_PRE40)
697
698#define RTE_ETH_RSS_IPV6_PRE48 ( \
699 RTE_ETH_RSS_IPV6 | \
700 RTE_ETH_RSS_L3_PRE48)
701
702#define RTE_ETH_RSS_IPV6_PRE56 ( \
703 RTE_ETH_RSS_IPV6 | \
704 RTE_ETH_RSS_L3_PRE56)
705
706#define RTE_ETH_RSS_IPV6_PRE64 ( \
707 RTE_ETH_RSS_IPV6 | \
708 RTE_ETH_RSS_L3_PRE64)
709
710#define RTE_ETH_RSS_IPV6_PRE96 ( \
711 RTE_ETH_RSS_IPV6 | \
712 RTE_ETH_RSS_L3_PRE96)
713
714#define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
715 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
716 RTE_ETH_RSS_L3_PRE32)
717
718#define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
719 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
720 RTE_ETH_RSS_L3_PRE40)
721
722#define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
723 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
724 RTE_ETH_RSS_L3_PRE48)
725
726#define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
727 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
728 RTE_ETH_RSS_L3_PRE56)
729
730#define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
731 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
732 RTE_ETH_RSS_L3_PRE64)
733
734#define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
735 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
736 RTE_ETH_RSS_L3_PRE96)
737
738#define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
739 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
740 RTE_ETH_RSS_L3_PRE32)
741
742#define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
743 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
744 RTE_ETH_RSS_L3_PRE40)
745
746#define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
747 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
748 RTE_ETH_RSS_L3_PRE48)
749
750#define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
751 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
752 RTE_ETH_RSS_L3_PRE56)
753
754#define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
755 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
756 RTE_ETH_RSS_L3_PRE64)
757
758#define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
759 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
760 RTE_ETH_RSS_L3_PRE96)
761
762#define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
763 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
764 RTE_ETH_RSS_L3_PRE32)
765
766#define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
767 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
768 RTE_ETH_RSS_L3_PRE40)
769
770#define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
771 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
772 RTE_ETH_RSS_L3_PRE48)
773
774#define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
775 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
776 RTE_ETH_RSS_L3_PRE56)
777
778#define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
779 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
780 RTE_ETH_RSS_L3_PRE64)
781
782#define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
783 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
784 RTE_ETH_RSS_L3_PRE96)
785
786#define RTE_ETH_RSS_IP ( \
787 RTE_ETH_RSS_IPV4 | \
788 RTE_ETH_RSS_FRAG_IPV4 | \
789 RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
790 RTE_ETH_RSS_IPV6 | \
791 RTE_ETH_RSS_FRAG_IPV6 | \
792 RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
793 RTE_ETH_RSS_IPV6_EX)
794
795#define RTE_ETH_RSS_UDP ( \
796 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
797 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
798 RTE_ETH_RSS_IPV6_UDP_EX)
799
800#define RTE_ETH_RSS_TCP ( \
801 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
802 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
803 RTE_ETH_RSS_IPV6_TCP_EX)
804
805#define RTE_ETH_RSS_SCTP ( \
806 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
807 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
808
809#define RTE_ETH_RSS_TUNNEL ( \
810 RTE_ETH_RSS_VXLAN | \
811 RTE_ETH_RSS_GENEVE | \
812 RTE_ETH_RSS_NVGRE)
813
814#define RTE_ETH_RSS_VLAN ( \
815 RTE_ETH_RSS_S_VLAN | \
816 RTE_ETH_RSS_C_VLAN)
817
819#define RTE_ETH_RSS_PROTO_MASK ( \
820 RTE_ETH_RSS_IPV4 | \
821 RTE_ETH_RSS_FRAG_IPV4 | \
822 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
823 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
824 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
825 RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
826 RTE_ETH_RSS_IPV6 | \
827 RTE_ETH_RSS_FRAG_IPV6 | \
828 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
829 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
830 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
831 RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
832 RTE_ETH_RSS_L2_PAYLOAD | \
833 RTE_ETH_RSS_IPV6_EX | \
834 RTE_ETH_RSS_IPV6_TCP_EX | \
835 RTE_ETH_RSS_IPV6_UDP_EX | \
836 RTE_ETH_RSS_PORT | \
837 RTE_ETH_RSS_VXLAN | \
838 RTE_ETH_RSS_GENEVE | \
839 RTE_ETH_RSS_NVGRE | \
840 RTE_ETH_RSS_MPLS)
841
842/*
843 * Definitions used for redirection table entry size.
844 * Some RSS RETA sizes may not be supported by some drivers, check the
845 * documentation or the description of relevant functions for more details.
846 */
847#define RTE_ETH_RSS_RETA_SIZE_64 64
848#define RTE_ETH_RSS_RETA_SIZE_128 128
849#define RTE_ETH_RSS_RETA_SIZE_256 256
850#define RTE_ETH_RSS_RETA_SIZE_512 512
851#define RTE_ETH_RETA_GROUP_SIZE 64
852
854#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS 64
855#define RTE_ETH_DCB_NUM_USER_PRIORITIES 8
856#define RTE_ETH_VMDQ_DCB_NUM_QUEUES 128
857#define RTE_ETH_DCB_NUM_QUEUES 128
861#define RTE_ETH_DCB_PG_SUPPORT RTE_BIT32(0)
862#define RTE_ETH_DCB_PFC_SUPPORT RTE_BIT32(1)
866#define RTE_ETH_VLAN_STRIP_OFFLOAD 0x0001
867#define RTE_ETH_VLAN_FILTER_OFFLOAD 0x0002
868#define RTE_ETH_VLAN_EXTEND_OFFLOAD 0x0004
869#define RTE_ETH_QINQ_STRIP_OFFLOAD 0x0008
871#define RTE_ETH_VLAN_STRIP_MASK 0x0001
872#define RTE_ETH_VLAN_FILTER_MASK 0x0002
873#define RTE_ETH_VLAN_EXTEND_MASK 0x0004
874#define RTE_ETH_QINQ_STRIP_MASK 0x0008
875#define RTE_ETH_VLAN_ID_MAX 0x0FFF
878/* Definitions used for receive MAC address */
879#define RTE_ETH_NUM_RECEIVE_MAC_ADDR 128
881/* Definitions used for unicast hash */
882#define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY 128
888#define RTE_ETH_VMDQ_ACCEPT_UNTAG RTE_BIT32(0)
890#define RTE_ETH_VMDQ_ACCEPT_HASH_MC RTE_BIT32(1)
892#define RTE_ETH_VMDQ_ACCEPT_HASH_UC RTE_BIT32(2)
894#define RTE_ETH_VMDQ_ACCEPT_BROADCAST RTE_BIT32(3)
896#define RTE_ETH_VMDQ_ACCEPT_MULTICAST RTE_BIT32(4)
907 uint64_t mask;
909 uint16_t reta[RTE_ETH_RETA_GROUP_SIZE];
910};
911
918 RTE_ETH_8_TCS = 8
920
929 RTE_ETH_64_POOLS = 64
931
932/* This structure may be extended in future. */
933struct rte_eth_dcb_rx_conf {
934 enum rte_eth_nb_tcs nb_tcs;
936 uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
937};
938
939struct rte_eth_vmdq_dcb_tx_conf {
940 enum rte_eth_nb_pools nb_queue_pools;
942 uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
943};
944
945struct rte_eth_dcb_tx_conf {
946 enum rte_eth_nb_tcs nb_tcs;
948 uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
949};
950
951struct rte_eth_vmdq_tx_conf {
952 enum rte_eth_nb_pools nb_queue_pools;
953};
954
969 uint8_t default_pool;
970 uint8_t nb_pool_maps;
971 struct {
972 uint16_t vlan_id;
973 uint64_t pools;
977};
978
1003 uint32_t rx_mode;
1004 struct {
1005 uint16_t vlan_id;
1006 uint64_t pools;
1008};
1009
1020 uint64_t offloads;
1021
1022 uint16_t pvid;
1023 __extension__
1024 uint8_t
1030
1031 uint64_t reserved_64s[2];
1032 void *reserved_ptrs[2];
1033};
1034
1096 struct rte_mempool *mp;
1097 uint16_t length;
1098 uint16_t offset;
1110 uint32_t proto_hdr;
1111};
1112
1120 /* The settings for buffer split offload. */
1121 struct rte_eth_rxseg_split split;
1122 /* The other features settings should be added here. */
1123};
1124
1131 uint8_t rx_drop_en;
1133 uint16_t rx_nseg;
1140 uint16_t share_group;
1141 uint16_t share_qid;
1147 uint64_t offloads;
1156
1177 uint16_t rx_nmempool;
1179 uint64_t reserved_64s[2];
1180 void *reserved_ptrs[2];
1181};
1182
1188 uint16_t tx_rs_thresh;
1198 uint64_t offloads;
1199
1200 uint64_t reserved_64s[2];
1201 void *reserved_ptrs[2];
1202};
1203
1216
1221 uint32_t rte_memory:1;
1222
1223 uint32_t reserved:30;
1224};
1225
1236 uint16_t max_rx_2_tx;
1238 uint16_t max_tx_2_rx;
1239 uint16_t max_nb_desc;
1242};
1243
1244#define RTE_ETH_MAX_HAIRPIN_PEERS 32
1245
1253 uint16_t port;
1254 uint16_t queue;
1255};
1256
1264 uint32_t peer_count:16;
1275 uint32_t tx_explicit:1;
1276
1288 uint32_t manual_bind:1;
1289
1302
1314 uint32_t use_rte_memory:1;
1315
1326 uint32_t force_memory:1;
1327
1328 uint32_t reserved:11;
1330 struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
1331};
1332
1337 uint16_t nb_max;
1338 uint16_t nb_min;
1339 uint16_t nb_align;
1349 uint16_t nb_seg_max;
1350
1363};
1364
1374
1381 uint32_t high_water;
1382 uint32_t low_water;
1383 uint16_t pause_time;
1384 uint16_t send_xon;
1387 uint8_t autoneg;
1388};
1389
1397 uint8_t priority;
1398};
1399
1410 uint8_t tc_max;
1413};
1414
1435 struct {
1436 uint16_t tx_qid;
1440 uint8_t tc;
1441 } rx_pause; /* Valid when (mode == FC_RX_PAUSE || mode == FC_FULL) */
1442
1443 struct {
1444 uint16_t pause_time;
1445 uint16_t rx_qid;
1449 uint8_t tc;
1450 } tx_pause; /* Valid when (mode == FC_TX_PAUSE || mode == FC_FULL) */
1451};
1452
1458 RTE_ETH_TUNNEL_TYPE_NONE = 0,
1459 RTE_ETH_TUNNEL_TYPE_VXLAN,
1460 RTE_ETH_TUNNEL_TYPE_GENEVE,
1461 RTE_ETH_TUNNEL_TYPE_TEREDO,
1462 RTE_ETH_TUNNEL_TYPE_NVGRE,
1463 RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
1464 RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
1465 RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
1466 RTE_ETH_TUNNEL_TYPE_ECPRI,
1467 RTE_ETH_TUNNEL_TYPE_MAX,
1468};
1469
1470#ifdef __cplusplus
1471}
1472#endif
1473
1474/* Deprecated API file for rte_eth_dev_filter_* functions */
1475#include "rte_eth_ctrl.h"
1476
1477#ifdef __cplusplus
1478extern "C" {
1479#endif
1480
1491 uint16_t udp_port;
1492 uint8_t prot_type;
1493};
1494
1500 uint32_t lsc:1;
1502 uint32_t rxq:1;
1504 uint32_t rmv:1;
1505};
1506
1507#define rte_intr_conf rte_eth_intr_conf
1508
1515 uint32_t link_speeds;
1524 uint32_t lpbk_mode;
1529 struct {
1534 struct rte_eth_dcb_rx_conf dcb_rx_conf;
1538 union {
1540 struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1542 struct rte_eth_dcb_tx_conf dcb_tx_conf;
1544 struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1550};
1551
1555#define RTE_ETH_RX_OFFLOAD_VLAN_STRIP RTE_BIT64(0)
1556#define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1557#define RTE_ETH_RX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1558#define RTE_ETH_RX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1559#define RTE_ETH_RX_OFFLOAD_TCP_LRO RTE_BIT64(4)
1560#define RTE_ETH_RX_OFFLOAD_QINQ_STRIP RTE_BIT64(5)
1561#define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(6)
1562#define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP RTE_BIT64(7)
1563#define RTE_ETH_RX_OFFLOAD_VLAN_FILTER RTE_BIT64(9)
1564#define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND RTE_BIT64(10)
1565#define RTE_ETH_RX_OFFLOAD_SCATTER RTE_BIT64(13)
1571#define RTE_ETH_RX_OFFLOAD_TIMESTAMP RTE_BIT64(14)
1572#define RTE_ETH_RX_OFFLOAD_SECURITY RTE_BIT64(15)
1573#define RTE_ETH_RX_OFFLOAD_KEEP_CRC RTE_BIT64(16)
1574#define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM RTE_BIT64(17)
1575#define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(18)
1576#define RTE_ETH_RX_OFFLOAD_RSS_HASH RTE_BIT64(19)
1577#define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT RTE_BIT64(20)
1578
1579#define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
1580 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
1581 RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1582#define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
1583 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
1584 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
1585 RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
1586
1587/*
1588 * If new Rx offload capabilities are defined, they also must be
1589 * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1590 */
1591
1595#define RTE_ETH_TX_OFFLOAD_VLAN_INSERT RTE_BIT64(0)
1596#define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1597#define RTE_ETH_TX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1598#define RTE_ETH_TX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1599#define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM RTE_BIT64(4)
1600#define RTE_ETH_TX_OFFLOAD_TCP_TSO RTE_BIT64(5)
1601#define RTE_ETH_TX_OFFLOAD_UDP_TSO RTE_BIT64(6)
1602#define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(7)
1603#define RTE_ETH_TX_OFFLOAD_QINQ_INSERT RTE_BIT64(8)
1604#define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO RTE_BIT64(9)
1605#define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO RTE_BIT64(10)
1606#define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO RTE_BIT64(11)
1607#define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO RTE_BIT64(12)
1608#define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT RTE_BIT64(13)
1613#define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE RTE_BIT64(14)
1615#define RTE_ETH_TX_OFFLOAD_MULTI_SEGS RTE_BIT64(15)
1623#define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE RTE_BIT64(16)
1624#define RTE_ETH_TX_OFFLOAD_SECURITY RTE_BIT64(17)
1630#define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO RTE_BIT64(18)
1636#define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO RTE_BIT64(19)
1638#define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(20)
1644#define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP RTE_BIT64(21)
1645/*
1646 * If new Tx offload capabilities are defined, they also must be
1647 * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1648 */
1649
1654#define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP RTE_BIT64(0)
1656#define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP RTE_BIT64(1)
1666#define RTE_ETH_DEV_CAPA_RXQ_SHARE RTE_BIT64(2)
1668#define RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP RTE_BIT64(3)
1670#define RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP RTE_BIT64(4)
1673/*
1674 * Fallback default preferred Rx/Tx port parameters.
1675 * These are used if an application requests default parameters
1676 * but the PMD does not provide preferred values.
1677 */
1678#define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1679#define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1680#define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1681#define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1682
1689 uint16_t burst_size;
1690 uint16_t ring_size;
1691 uint16_t nb_queues;
1692};
1693
1698#define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX)
1699
1704 const char *name;
1705 uint16_t domain_id;
1713 uint16_t port_id;
1719 uint16_t rx_domain;
1720};
1721
1729 __extension__
1730 uint32_t multi_pools:1;
1731 uint32_t offset_allowed:1;
1733 uint16_t max_nseg;
1734 uint16_t reserved;
1735};
1736
1749};
1750
1771};
1772
1779 struct rte_device *device;
1780 const char *driver_name;
1781 unsigned int if_index;
1783 uint16_t min_mtu;
1784 uint16_t max_mtu;
1785 const uint32_t *dev_flags;
1795 uint32_t max_rx_pktlen;
1798 uint16_t max_rx_queues;
1799 uint16_t max_tx_queues;
1800 uint32_t max_mac_addrs;
1803 uint16_t max_vfs;
1815 uint16_t reta_size;
1817 uint32_t rss_algo_capa;
1827 uint32_t speed_capa;
1829 uint16_t nb_rx_queues;
1830 uint16_t nb_tx_queues;
1843 uint64_t dev_capa;
1851
1852 uint64_t reserved_64s[2];
1853 void *reserved_ptrs[2];
1854};
1855
1857#define RTE_ETH_QUEUE_STATE_STOPPED 0
1858#define RTE_ETH_QUEUE_STATE_STARTED 1
1859#define RTE_ETH_QUEUE_STATE_HAIRPIN 2
1867 struct rte_mempool *mp;
1868 struct rte_eth_rxconf conf;
1870 uint8_t queue_state;
1871 uint16_t nb_desc;
1872 uint16_t rx_buf_size;
1880};
1881
1887 struct rte_eth_txconf conf;
1888 uint16_t nb_desc;
1889 uint8_t queue_state;
1890};
1891
1902 struct rte_mempool *mp;
1903 uint16_t *refill_head;
1904 uint16_t *receive_tail;
1914};
1915
1916/* Generic Burst mode flag definition, values can be ORed. */
1917
1923#define RTE_ETH_BURST_FLAG_PER_QUEUE RTE_BIT64(0)
1924
1930 uint64_t flags;
1932#define RTE_ETH_BURST_MODE_INFO_SIZE 1024
1934};
1935
1937#define RTE_ETH_XSTATS_NAME_SIZE 64
1938
1949 uint64_t id;
1950 uint64_t value;
1951};
1952
1969};
1970
1971#define RTE_ETH_DCB_NUM_TCS 8
1972#define RTE_ETH_MAX_VMDQ_POOL 64
1973
1980 struct {
1981 uint16_t base;
1982 uint16_t nb_queue;
1983 } tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1985 struct {
1986 uint16_t base;
1987 uint16_t nb_queue;
1988 } tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1989};
1990
1996 uint8_t nb_tcs;
1998 uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS];
2001};
2002
2013};
2014
2015/* Translate from FEC mode to FEC capa */
2016#define RTE_ETH_FEC_MODE_TO_CAPA(x) RTE_BIT32(x)
2017
2018/* This macro indicates FEC capa mask */
2019#define RTE_ETH_FEC_MODE_CAPA_MASK(x) RTE_BIT32(RTE_ETH_FEC_ ## x)
2020
2021/* A structure used to get capabilities per link speed */
2022struct rte_eth_fec_capa {
2023 uint32_t speed;
2024 uint32_t capa;
2025};
2026
2027#define RTE_ETH_ALL RTE_MAX_ETHPORTS
2028
2029/* Macros to check for valid port */
2030#define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
2031 if (!rte_eth_dev_is_valid_port(port_id)) { \
2032 RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id); \
2033 return retval; \
2034 } \
2035} while (0)
2036
2037#define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
2038 if (!rte_eth_dev_is_valid_port(port_id)) { \
2039 RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id); \
2040 return; \
2041 } \
2042} while (0)
2043
2066typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
2067 struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
2068 void *user_param);
2069
2090typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
2091 struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
2092
2103};
2104
2105struct rte_eth_dev_sriov {
2106 uint8_t active;
2107 uint8_t nb_q_per_pool;
2108 uint16_t def_vmdq_idx;
2109 uint16_t def_pool_q_idx;
2110};
2111#define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
2112
2113#define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
2114
2115#define RTE_ETH_DEV_NO_OWNER 0
2116
2117#define RTE_ETH_MAX_OWNER_NAME_LEN 64
2118
2119struct rte_eth_dev_owner {
2120 uint64_t id;
2121 char name[RTE_ETH_MAX_OWNER_NAME_LEN];
2122};
2123
2129#define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE RTE_BIT32(0)
2131#define RTE_ETH_DEV_INTR_LSC RTE_BIT32(1)
2133#define RTE_ETH_DEV_BONDING_MEMBER RTE_BIT32(2)
2135#define RTE_ETH_DEV_INTR_RMV RTE_BIT32(3)
2137#define RTE_ETH_DEV_REPRESENTOR RTE_BIT32(4)
2139#define RTE_ETH_DEV_NOLIVE_MAC_ADDR RTE_BIT32(5)
2144#define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS RTE_BIT32(6)
2158uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
2159 const uint64_t owner_id);
2160
2164#define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
2165 for (p = rte_eth_find_next_owned_by(0, o); \
2166 (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
2167 p = rte_eth_find_next_owned_by(p + 1, o))
2168
2177uint16_t rte_eth_find_next(uint16_t port_id);
2178
2182#define RTE_ETH_FOREACH_DEV(p) \
2183 RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
2184
2196uint16_t
2197rte_eth_find_next_of(uint16_t port_id_start,
2198 const struct rte_device *parent);
2199
2208#define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
2209 for (port_id = rte_eth_find_next_of(0, parent); \
2210 port_id < RTE_MAX_ETHPORTS; \
2211 port_id = rte_eth_find_next_of(port_id + 1, parent))
2212
2224uint16_t
2225rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id);
2226
2237#define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
2238 for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
2239 port_id < RTE_MAX_ETHPORTS; \
2240 port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
2241
2252int rte_eth_dev_owner_new(uint64_t *owner_id);
2253
2264int rte_eth_dev_owner_set(const uint16_t port_id,
2265 const struct rte_eth_dev_owner *owner);
2266
2277int rte_eth_dev_owner_unset(const uint16_t port_id,
2278 const uint64_t owner_id);
2279
2288int rte_eth_dev_owner_delete(const uint64_t owner_id);
2289
2300int rte_eth_dev_owner_get(const uint16_t port_id,
2301 struct rte_eth_dev_owner *owner);
2302
2314
2324
2336uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
2337
2346const char *rte_eth_dev_rx_offload_name(uint64_t offload);
2347
2356const char *rte_eth_dev_tx_offload_name(uint64_t offload);
2357
2369__rte_experimental
2370const char *rte_eth_dev_capability_name(uint64_t capability);
2371
2411int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
2412 uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
2413
2422int
2423rte_eth_dev_is_removed(uint16_t port_id);
2424
2487int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2488 uint16_t nb_rx_desc, unsigned int socket_id,
2489 const struct rte_eth_rxconf *rx_conf,
2490 struct rte_mempool *mb_pool);
2491
2519__rte_experimental
2521 (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2522 const struct rte_eth_hairpin_conf *conf);
2523
2572int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2573 uint16_t nb_tx_desc, unsigned int socket_id,
2574 const struct rte_eth_txconf *tx_conf);
2575
2601__rte_experimental
2603 (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2604 const struct rte_eth_hairpin_conf *conf);
2605
2632__rte_experimental
2633int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2634 size_t len, uint32_t direction);
2635
2658__rte_experimental
2659int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port);
2660
2685__rte_experimental
2686int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port);
2687
2703__rte_experimental
2704int rte_eth_dev_count_aggr_ports(uint16_t port_id);
2705
2733__rte_experimental
2734int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id,
2735 uint8_t affinity);
2736
2749int rte_eth_dev_socket_id(uint16_t port_id);
2750
2760int rte_eth_dev_is_valid_port(uint16_t port_id);
2761
2778__rte_experimental
2779int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2780
2797__rte_experimental
2798int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2799
2817int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2818
2835int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2836
2854int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2855
2872int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2873
2897int rte_eth_dev_start(uint16_t port_id);
2898
2912int rte_eth_dev_stop(uint16_t port_id);
2913
2926int rte_eth_dev_set_link_up(uint16_t port_id);
2927
2937int rte_eth_dev_set_link_down(uint16_t port_id);
2938
2949int rte_eth_dev_close(uint16_t port_id);
2950
2988int rte_eth_dev_reset(uint16_t port_id);
2989
3001int rte_eth_promiscuous_enable(uint16_t port_id);
3002
3014int rte_eth_promiscuous_disable(uint16_t port_id);
3015
3026int rte_eth_promiscuous_get(uint16_t port_id);
3027
3039int rte_eth_allmulticast_enable(uint16_t port_id);
3040
3052int rte_eth_allmulticast_disable(uint16_t port_id);
3053
3064int rte_eth_allmulticast_get(uint16_t port_id);
3065
3083int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
3085
3100int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
3102
3116__rte_experimental
3117const char *rte_eth_link_speed_to_str(uint32_t link_speed);
3118
3137__rte_experimental
3138int rte_eth_link_to_str(char *str, size_t len,
3139 const struct rte_eth_link *eth_link);
3140
3161__rte_experimental
3162int rte_eth_speed_lanes_get(uint16_t port_id, uint32_t *lanes);
3163
3185__rte_experimental
3186int rte_eth_speed_lanes_set(uint16_t port_id, uint32_t speed_lanes);
3187
3210__rte_experimental
3212 struct rte_eth_speed_lanes_capa *speed_lanes_capa,
3213 unsigned int num);
3214
3232int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
3233
3245int rte_eth_stats_reset(uint16_t port_id);
3246
3276int rte_eth_xstats_get_names(uint16_t port_id,
3277 struct rte_eth_xstat_name *xstats_names,
3278 unsigned int size);
3279
3313int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3314 unsigned int n);
3315
3340int
3342 struct rte_eth_xstat_name *xstats_names, unsigned int size,
3343 uint64_t *ids);
3344
3369int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3370 uint64_t *values, unsigned int size);
3371
3391int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
3392 uint64_t *id);
3393
3408__rte_experimental
3409int rte_eth_xstats_set_counter(uint16_t port_id, uint64_t id, int on_off);
3410
3422__rte_experimental
3423int rte_eth_xstats_query_state(uint16_t port_id, uint64_t id);
3424
3437int rte_eth_xstats_reset(uint16_t port_id);
3438
3458 uint16_t tx_queue_id, uint8_t stat_idx);
3459
3479 uint16_t rx_queue_id,
3480 uint8_t stat_idx);
3481
3495int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
3496
3517__rte_experimental
3518int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3519 unsigned int num);
3520
3540int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3542
3558__rte_experimental
3559int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
3561
3582int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3584
3624int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3625 uint32_t *ptypes, int num)
3627
3658int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3659 uint32_t *set_ptypes, unsigned int num);
3660
3673int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
3674
3692int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
3693
3713int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
3714
3733int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3734 int on);
3735
3753 enum rte_vlan_type vlan_type,
3754 uint16_t tag_type);
3755
3773int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
3774
3788int rte_eth_dev_get_vlan_offload(uint16_t port_id);
3789
3804int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
3805
3831__rte_experimental
3832int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
3833 uint8_t avail_thresh);
3834
3861__rte_experimental
3862int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
3863 uint8_t *avail_thresh);
3864
3865typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
3866 void *userdata);
3867
3873 buffer_tx_error_fn error_callback;
3874 void *error_userdata;
3875 uint16_t size;
3876 uint16_t length;
3878 struct rte_mbuf *pkts[];
3879};
3880
3887#define RTE_ETH_TX_BUFFER_SIZE(sz) \
3888 (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3889
3900int
3901rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
3902
3927int
3929 buffer_tx_error_fn callback, void *userdata);
3930
3953void
3954rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3955 void *userdata);
3956
3980void
3981rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3982 void *userdata);
3983
4009int
4010rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
4011
4044};
4045
4065};
4066
4085 uint64_t metadata;
4086};
4087
4126
4151 uint64_t metadata;
4152};
4153
4237
4251typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
4252 enum rte_eth_event_type event, void *cb_arg, void *ret_param);
4253
4272 enum rte_eth_event_type event,
4273 rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4274
4294 enum rte_eth_event_type event,
4295 rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4296
4318int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
4319
4340int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
4341
4359int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
4360
4382int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4383 int epfd, int op, void *data);
4384
4399int
4400rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
4401
4415int rte_eth_led_on(uint16_t port_id);
4416
4430int rte_eth_led_off(uint16_t port_id);
4431
4460__rte_experimental
4461int rte_eth_fec_get_capability(uint16_t port_id,
4462 struct rte_eth_fec_capa *speed_fec_capa,
4463 unsigned int num);
4464
4485__rte_experimental
4486int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa);
4487
4511__rte_experimental
4512int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa);
4513
4528int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
4529 struct rte_eth_fc_conf *fc_conf);
4530
4545int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
4546 struct rte_eth_fc_conf *fc_conf);
4547
4564 struct rte_eth_pfc_conf *pfc_conf);
4565
4584int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
4585 uint32_t pool);
4586
4604__rte_experimental
4606 struct rte_eth_pfc_queue_info *pfc_queue_info);
4607
4631__rte_experimental
4633 struct rte_eth_pfc_queue_conf *pfc_queue_conf);
4634
4649int rte_eth_dev_mac_addr_remove(uint16_t port_id,
4650 struct rte_ether_addr *mac_addr);
4651
4670 struct rte_ether_addr *mac_addr);
4671
4689int rte_eth_dev_rss_reta_update(uint16_t port_id,
4690 struct rte_eth_rss_reta_entry64 *reta_conf,
4691 uint16_t reta_size);
4692
4711int rte_eth_dev_rss_reta_query(uint16_t port_id,
4712 struct rte_eth_rss_reta_entry64 *reta_conf,
4713 uint16_t reta_size);
4714
4734int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4735 uint8_t on);
4736
4755int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
4756
4773int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4774 uint32_t tx_rate);
4775
4790int rte_eth_dev_rss_hash_update(uint16_t port_id,
4791 struct rte_eth_rss_conf *rss_conf);
4792
4808int
4810 struct rte_eth_rss_conf *rss_conf);
4811
4824__rte_experimental
4825const char *
4827
4844__rte_experimental
4845int
4846rte_eth_find_rss_algo(const char *name, uint32_t *algo);
4847
4872int
4874 struct rte_eth_udp_tunnel *tunnel_udp);
4875
4895int
4897 struct rte_eth_udp_tunnel *tunnel_udp);
4898
4913int rte_eth_dev_get_dcb_info(uint16_t port_id,
4914 struct rte_eth_dcb_info *dcb_info);
4915
4916struct rte_eth_rxtx_callback;
4917
4943const struct rte_eth_rxtx_callback *
4944rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4945 rte_rx_callback_fn fn, void *user_param);
4946
4973const struct rte_eth_rxtx_callback *
4974rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4975 rte_rx_callback_fn fn, void *user_param);
4976
5002const struct rte_eth_rxtx_callback *
5003rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
5004 rte_tx_callback_fn fn, void *user_param);
5005
5039int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
5040 const struct rte_eth_rxtx_callback *user_cb);
5041
5075int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
5076 const struct rte_eth_rxtx_callback *user_cb);
5077
5097int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5098 struct rte_eth_rxq_info *qinfo);
5099
5119int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5120 struct rte_eth_txq_info *qinfo);
5121
5142__rte_experimental
5144 uint16_t queue_id,
5145 struct rte_eth_recycle_rxq_info *recycle_rxq_info);
5146
5165int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5166 struct rte_eth_burst_mode *mode);
5167
5186int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5187 struct rte_eth_burst_mode *mode);
5188
5209__rte_experimental
5210int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5211 struct rte_power_monitor_cond *pmc);
5212
5239__rte_experimental
5240int rte_eth_dev_get_reg_info_ext(uint16_t port_id, struct rte_dev_reg_info *info);
5241
5260int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5262
5275int rte_eth_dev_get_eeprom_length(uint16_t port_id);
5276
5293int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5294
5311int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5312
5331__rte_experimental
5332int
5335
5355__rte_experimental
5356int
5357rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5359
5379int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5380 struct rte_ether_addr *mc_addr_set,
5381 uint32_t nb_mc_addr);
5382
5395int rte_eth_timesync_enable(uint16_t port_id);
5396
5409int rte_eth_timesync_disable(uint16_t port_id);
5410
5430 struct timespec *timestamp, uint32_t flags);
5431
5448 struct timespec *timestamp);
5449
5467int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
5468
5509__rte_experimental
5510int rte_eth_timesync_adjust_freq(uint16_t port_id, int64_t ppm);
5511
5527int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
5528
5547int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
5548
5594__rte_experimental
5595int
5596rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
5597
5613int
5614rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
5615
5632int
5633rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
5634
5652 uint16_t *nb_rx_desc,
5653 uint16_t *nb_tx_desc);
5654
5669int
5670rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
5671
5681void *
5682rte_eth_dev_get_sec_ctx(uint16_t port_id);
5683
5699__rte_experimental
5701 struct rte_eth_hairpin_cap *cap);
5702
5712 int pf;
5713 __extension__
5714 union {
5715 int vf;
5716 int sf;
5717 };
5718 uint32_t id_base;
5719 uint32_t id_end;
5720 char name[RTE_DEV_NAME_MAX_LEN];
5721};
5722
5730 uint16_t controller;
5731 uint16_t pf;
5733 uint32_t nb_ranges;
5735};
5736
5760__rte_experimental
5761int rte_eth_representor_info_get(uint16_t port_id,
5762 struct rte_eth_representor_info *info);
5763
5765#define RTE_ETH_RX_METADATA_USER_FLAG RTE_BIT64(0)
5766
5768#define RTE_ETH_RX_METADATA_USER_MARK RTE_BIT64(1)
5769
5771#define RTE_ETH_RX_METADATA_TUNNEL_ID RTE_BIT64(2)
5772
5812int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features);
5813
5815#define RTE_ETH_DEV_REASSEMBLY_F_IPV4 (RTE_BIT32(0))
5817#define RTE_ETH_DEV_REASSEMBLY_F_IPV6 (RTE_BIT32(1))
5818
5829 uint32_t timeout_ms;
5831 uint16_t max_frags;
5836 uint16_t flags;
5837};
5838
5859__rte_experimental
5861 struct rte_eth_ip_reassembly_params *capa);
5862
5884__rte_experimental
5886 struct rte_eth_ip_reassembly_params *conf);
5887
5917__rte_experimental
5919 const struct rte_eth_ip_reassembly_params *conf);
5920
5928typedef struct {
5935 uint16_t time_spent;
5937 uint16_t nb_frags;
5939
5958__rte_experimental
5959int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file);
5960
5984__rte_experimental
5985int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5986 uint16_t offset, uint16_t num, FILE *file);
5987
6011__rte_experimental
6012int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
6013 uint16_t offset, uint16_t num, FILE *file);
6014
6015
6016/* Congestion management */
6017
6027};
6028
6050 uint8_t rsvd[8];
6051};
6052
6064 union {
6071 uint16_t rx_queue;
6079 } obj_param;
6080 union {
6094 } mode_param;
6095};
6096
6114__rte_experimental
6115int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info);
6116
6134__rte_experimental
6135int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config);
6136
6153__rte_experimental
6154int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config);
6155
6176__rte_experimental
6177int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config);
6178
6179#ifdef __cplusplus
6180}
6181#endif
6182
6183#include <rte_ethdev_core.h>
6184
6185#ifdef __cplusplus
6186extern "C" {
6187#endif
6188
6212uint16_t rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
6213 struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
6214 void *opaque);
6215
6303static inline uint16_t
6304rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
6305 struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
6306{
6307 uint16_t nb_rx;
6308 struct rte_eth_fp_ops *p;
6309 void *qd;
6310
6311#ifdef RTE_ETHDEV_DEBUG_RX
6312 if (port_id >= RTE_MAX_ETHPORTS ||
6313 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6314 RTE_ETHDEV_LOG_LINE(ERR,
6315 "Invalid port_id=%u or queue_id=%u",
6316 port_id, queue_id);
6317 return 0;
6318 }
6319#endif
6320
6321 /* fetch pointer to queue data */
6322 p = &rte_eth_fp_ops[port_id];
6323 qd = p->rxq.data[queue_id];
6324
6325#ifdef RTE_ETHDEV_DEBUG_RX
6326 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6327
6328 if (qd == NULL) {
6329 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u for port_id=%u",
6330 queue_id, port_id);
6331 return 0;
6332 }
6333#endif
6334
6335 nb_rx = p->rx_pkt_burst(qd, rx_pkts, nb_pkts);
6336
6337#ifdef RTE_ETHDEV_RXTX_CALLBACKS
6338 {
6339 void *cb;
6340
6341 /* rte_memory_order_release memory order was used when the
6342 * call back was inserted into the list.
6343 * Since there is a clear dependency between loading
6344 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6345 * not required.
6346 */
6347 cb = rte_atomic_load_explicit(&p->rxq.clbk[queue_id],
6348 rte_memory_order_relaxed);
6349 if (unlikely(cb != NULL))
6350 nb_rx = rte_eth_call_rx_callbacks(port_id, queue_id,
6351 rx_pkts, nb_rx, nb_pkts, cb);
6352 }
6353#endif
6354
6355 if (unlikely(nb_rx))
6356 rte_ethdev_trace_rx_burst_nonempty(port_id, queue_id, (void **)rx_pkts, nb_rx);
6357 else
6358 rte_ethdev_trace_rx_burst_empty(port_id, queue_id, (void **)rx_pkts);
6359 return nb_rx;
6360}
6361
6379static inline int
6380rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
6381{
6382 struct rte_eth_fp_ops *p;
6383 void *qd;
6384
6385#ifdef RTE_ETHDEV_DEBUG_RX
6386 if (port_id >= RTE_MAX_ETHPORTS ||
6387 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6388 RTE_ETHDEV_LOG_LINE(ERR,
6389 "Invalid port_id=%u or queue_id=%u",
6390 port_id, queue_id);
6391 return -EINVAL;
6392 }
6393#endif
6394
6395 /* fetch pointer to queue data */
6396 p = &rte_eth_fp_ops[port_id];
6397 qd = p->rxq.data[queue_id];
6398
6399#ifdef RTE_ETHDEV_DEBUG_RX
6400 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6401 if (qd == NULL)
6402 return -EINVAL;
6403#endif
6404
6405 if (p->rx_queue_count == NULL)
6406 return -ENOTSUP;
6407 return (int)p->rx_queue_count(qd);
6408}
6409
6413#define RTE_ETH_RX_DESC_AVAIL 0
6414#define RTE_ETH_RX_DESC_DONE 1
6415#define RTE_ETH_RX_DESC_UNAVAIL 2
6451static inline int
6452rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
6453 uint16_t offset)
6454{
6455 struct rte_eth_fp_ops *p;
6456 void *qd;
6457
6458#ifdef RTE_ETHDEV_DEBUG_RX
6459 if (port_id >= RTE_MAX_ETHPORTS ||
6460 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6461 RTE_ETHDEV_LOG_LINE(ERR,
6462 "Invalid port_id=%u or queue_id=%u",
6463 port_id, queue_id);
6464 return -EINVAL;
6465 }
6466#endif
6467
6468 /* fetch pointer to queue data */
6469 p = &rte_eth_fp_ops[port_id];
6470 qd = p->rxq.data[queue_id];
6471
6472#ifdef RTE_ETHDEV_DEBUG_RX
6473 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6474 if (qd == NULL)
6475 return -ENODEV;
6476#endif
6477 if (p->rx_descriptor_status == NULL)
6478 return -ENOTSUP;
6479 return p->rx_descriptor_status(qd, offset);
6480}
6481
6485#define RTE_ETH_TX_DESC_FULL 0
6486#define RTE_ETH_TX_DESC_DONE 1
6487#define RTE_ETH_TX_DESC_UNAVAIL 2
6523static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
6524 uint16_t queue_id, uint16_t offset)
6525{
6526 struct rte_eth_fp_ops *p;
6527 void *qd;
6528
6529#ifdef RTE_ETHDEV_DEBUG_TX
6530 if (port_id >= RTE_MAX_ETHPORTS ||
6531 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6532 RTE_ETHDEV_LOG_LINE(ERR,
6533 "Invalid port_id=%u or queue_id=%u",
6534 port_id, queue_id);
6535 return -EINVAL;
6536 }
6537#endif
6538
6539 /* fetch pointer to queue data */
6540 p = &rte_eth_fp_ops[port_id];
6541 qd = p->txq.data[queue_id];
6542
6543#ifdef RTE_ETHDEV_DEBUG_TX
6544 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6545 if (qd == NULL)
6546 return -ENODEV;
6547#endif
6548 if (p->tx_descriptor_status == NULL)
6549 return -ENOTSUP;
6550 return p->tx_descriptor_status(qd, offset);
6551}
6552
6572uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
6573 struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque);
6574
6646static inline uint16_t
6647rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
6648 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6649{
6650 struct rte_eth_fp_ops *p;
6651 void *qd;
6652
6653#ifdef RTE_ETHDEV_DEBUG_TX
6654 if (port_id >= RTE_MAX_ETHPORTS ||
6655 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6656 RTE_ETHDEV_LOG_LINE(ERR,
6657 "Invalid port_id=%u or queue_id=%u",
6658 port_id, queue_id);
6659 return 0;
6660 }
6661#endif
6662
6663 /* fetch pointer to queue data */
6664 p = &rte_eth_fp_ops[port_id];
6665 qd = p->txq.data[queue_id];
6666
6667#ifdef RTE_ETHDEV_DEBUG_TX
6668 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6669
6670 if (qd == NULL) {
6671 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6672 queue_id, port_id);
6673 return 0;
6674 }
6675#endif
6676
6677#ifdef RTE_ETHDEV_RXTX_CALLBACKS
6678 {
6679 void *cb;
6680
6681 /* rte_memory_order_release memory order was used when the
6682 * call back was inserted into the list.
6683 * Since there is a clear dependency between loading
6684 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6685 * not required.
6686 */
6687 cb = rte_atomic_load_explicit(&p->txq.clbk[queue_id],
6688 rte_memory_order_relaxed);
6689 if (unlikely(cb != NULL))
6690 nb_pkts = rte_eth_call_tx_callbacks(port_id, queue_id,
6691 tx_pkts, nb_pkts, cb);
6692 }
6693#endif
6694
6695 nb_pkts = p->tx_pkt_burst(qd, tx_pkts, nb_pkts);
6696
6697 rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, nb_pkts);
6698 return nb_pkts;
6699}
6700
6754#ifndef RTE_ETHDEV_TX_PREPARE_NOOP
6755
6756static inline uint16_t
6757rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
6758 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6759{
6760 struct rte_eth_fp_ops *p;
6761 void *qd;
6762
6763#ifdef RTE_ETHDEV_DEBUG_TX
6764 if (port_id >= RTE_MAX_ETHPORTS ||
6765 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6766 RTE_ETHDEV_LOG_LINE(ERR,
6767 "Invalid port_id=%u or queue_id=%u",
6768 port_id, queue_id);
6769 rte_errno = ENODEV;
6770 return 0;
6771 }
6772#endif
6773
6774 /* fetch pointer to queue data */
6775 p = &rte_eth_fp_ops[port_id];
6776 qd = p->txq.data[queue_id];
6777
6778#ifdef RTE_ETHDEV_DEBUG_TX
6779 if (!rte_eth_dev_is_valid_port(port_id)) {
6780 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx port_id=%u", port_id);
6781 rte_errno = ENODEV;
6782 return 0;
6783 }
6784 if (qd == NULL) {
6785 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6786 queue_id, port_id);
6787 rte_errno = EINVAL;
6788 return 0;
6789 }
6790#endif
6791
6792 if (!p->tx_pkt_prepare)
6793 return nb_pkts;
6794
6795 return p->tx_pkt_prepare(qd, tx_pkts, nb_pkts);
6796}
6797
6798#else
6799
6800/*
6801 * Native NOOP operation for compilation targets which doesn't require any
6802 * preparations steps, and functional NOOP may introduce unnecessary performance
6803 * drop.
6804 *
6805 * Generally this is not a good idea to turn it on globally and didn't should
6806 * be used if behavior of tx_preparation can change.
6807 */
6808
6809static inline uint16_t
6810rte_eth_tx_prepare(__rte_unused uint16_t port_id,
6811 __rte_unused uint16_t queue_id,
6812 __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6813{
6814 return nb_pkts;
6815}
6816
6817#endif
6818
6841static inline uint16_t
6842rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
6843 struct rte_eth_dev_tx_buffer *buffer)
6844{
6845 uint16_t sent;
6846 uint16_t to_send = buffer->length;
6847
6848 if (to_send == 0)
6849 return 0;
6850
6851 sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
6852
6853 buffer->length = 0;
6854
6855 /* All packets sent, or to be dealt with by callback below */
6856 if (unlikely(sent != to_send))
6857 buffer->error_callback(&buffer->pkts[sent],
6858 (uint16_t)(to_send - sent),
6859 buffer->error_userdata);
6860
6861 return sent;
6862}
6863
6894static __rte_always_inline uint16_t
6895rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
6896 struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
6897{
6898 buffer->pkts[buffer->length++] = tx_pkt;
6899 if (buffer->length < buffer->size)
6900 return 0;
6901
6902 return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
6903}
6904
6958__rte_experimental
6959static inline uint16_t
6960rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id,
6961 uint16_t tx_port_id, uint16_t tx_queue_id,
6962 struct rte_eth_recycle_rxq_info *recycle_rxq_info)
6963{
6964 struct rte_eth_fp_ops *p1, *p2;
6965 void *qd1, *qd2;
6966 uint16_t nb_mbufs;
6967
6968#ifdef RTE_ETHDEV_DEBUG_TX
6969 if (tx_port_id >= RTE_MAX_ETHPORTS ||
6970 tx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6971 RTE_ETHDEV_LOG_LINE(ERR,
6972 "Invalid tx_port_id=%u or tx_queue_id=%u",
6973 tx_port_id, tx_queue_id);
6974 return 0;
6975 }
6976#endif
6977
6978 /* fetch pointer to Tx queue data */
6979 p1 = &rte_eth_fp_ops[tx_port_id];
6980 qd1 = p1->txq.data[tx_queue_id];
6981
6982#ifdef RTE_ETHDEV_DEBUG_TX
6983 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port_id, 0);
6984
6985 if (qd1 == NULL) {
6986 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6987 tx_queue_id, tx_port_id);
6988 return 0;
6989 }
6990#endif
6991 if (p1->recycle_tx_mbufs_reuse == NULL)
6992 return 0;
6993
6994#ifdef RTE_ETHDEV_DEBUG_RX
6995 if (rx_port_id >= RTE_MAX_ETHPORTS ||
6996 rx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6997 RTE_ETHDEV_LOG_LINE(ERR, "Invalid rx_port_id=%u or rx_queue_id=%u",
6998 rx_port_id, rx_queue_id);
6999 return 0;
7000 }
7001#endif
7002
7003 /* fetch pointer to Rx queue data */
7004 p2 = &rte_eth_fp_ops[rx_port_id];
7005 qd2 = p2->rxq.data[rx_queue_id];
7006
7007#ifdef RTE_ETHDEV_DEBUG_RX
7008 RTE_ETH_VALID_PORTID_OR_ERR_RET(rx_port_id, 0);
7009
7010 if (qd2 == NULL) {
7011 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u for port_id=%u",
7012 rx_queue_id, rx_port_id);
7013 return 0;
7014 }
7015#endif
7016 if (p2->recycle_rx_descriptors_refill == NULL)
7017 return 0;
7018
7019 /* Copy used *rte_mbuf* buffer pointers from Tx mbuf ring
7020 * into Rx mbuf ring.
7021 */
7022 nb_mbufs = p1->recycle_tx_mbufs_reuse(qd1, recycle_rxq_info);
7023
7024 /* If no recycling mbufs, return 0. */
7025 if (nb_mbufs == 0)
7026 return 0;
7027
7028 /* Replenish the Rx descriptors with the recycling
7029 * into Rx mbuf ring.
7030 */
7031 p2->recycle_rx_descriptors_refill(qd2, nb_mbufs);
7032
7033 return nb_mbufs;
7034}
7035
7064__rte_experimental
7065int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num)
7067
7102__rte_experimental
7103static inline int
7104rte_eth_tx_queue_count(uint16_t port_id, uint16_t queue_id)
7105{
7106 struct rte_eth_fp_ops *fops;
7107 void *qd;
7108 int rc;
7109
7110#ifdef RTE_ETHDEV_DEBUG_TX
7111 if (port_id >= RTE_MAX_ETHPORTS || !rte_eth_dev_is_valid_port(port_id)) {
7112 RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id);
7113 rc = -ENODEV;
7114 goto out;
7115 }
7116
7117 if (queue_id >= RTE_MAX_QUEUES_PER_PORT) {
7118 RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for port_id=%u",
7119 queue_id, port_id);
7120 rc = -EINVAL;
7121 goto out;
7122 }
7123#endif
7124
7125 /* Fetch pointer to Tx queue data */
7126 fops = &rte_eth_fp_ops[port_id];
7127 qd = fops->txq.data[queue_id];
7128
7129#ifdef RTE_ETHDEV_DEBUG_TX
7130 if (qd == NULL) {
7131 RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for port_id=%u",
7132 queue_id, port_id);
7133 rc = -EINVAL;
7134 goto out;
7135 }
7136#endif
7137 if (fops->tx_queue_count == NULL) {
7138 rc = -ENOTSUP;
7139 goto out;
7140 }
7141
7142 rc = fops->tx_queue_count(qd);
7143
7144out:
7145 rte_eth_trace_tx_queue_count(port_id, queue_id, rc);
7146 return rc;
7147}
7148
7149#ifdef __cplusplus
7150}
7151#endif
7152
7153#endif /* _RTE_ETHDEV_H_ */
#define RTE_BIT32(nr)
Definition: rte_bitops.h:44
#define unlikely(x)
rte_cman_mode
Definition: rte_cman.h:16
#define __rte_cache_min_aligned
Definition: rte_common.h:742
#define __rte_unused
Definition: rte_common.h:248
#define __rte_always_inline
Definition: rte_common.h:490
#define __rte_warn_unused_result
Definition: rte_common.h:481
#define rte_errno
Definition: rte_errno.h:29
rte_eth_nb_pools
Definition: rte_ethdev.h:925
@ RTE_ETH_64_POOLS
Definition: rte_ethdev.h:929
@ RTE_ETH_32_POOLS
Definition: rte_ethdev.h:928
@ RTE_ETH_8_POOLS
Definition: rte_ethdev.h:926
@ RTE_ETH_16_POOLS
Definition: rte_ethdev.h:927
rte_eth_event_ipsec_subtype
Definition: rte_ethdev.h:4092
@ RTE_ETH_EVENT_IPSEC_PMD_ERROR_END
Definition: rte_ethdev.h:4096
@ RTE_ETH_EVENT_IPSEC_UNKNOWN
Definition: rte_ethdev.h:4098
@ RTE_ETH_EVENT_IPSEC_MAX
Definition: rte_ethdev.h:4124
@ RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY
Definition: rte_ethdev.h:4112
@ RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW
Definition: rte_ethdev.h:4100
@ RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY
Definition: rte_ethdev.h:4117
@ RTE_ETH_EVENT_IPSEC_PMD_ERROR_START
Definition: rte_ethdev.h:4094
@ RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY
Definition: rte_ethdev.h:4107
@ RTE_ETH_EVENT_IPSEC_SA_TIME_EXPIRY
Definition: rte_ethdev.h:4102
@ RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY
Definition: rte_ethdev.h:4122
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
__rte_experimental int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config)
int rte_eth_dev_is_removed(uint16_t port_id)
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
Definition: rte_ethdev.h:679
__rte_experimental int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id, uint8_t affinity)
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
Definition: rte_ethdev.h:6895
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
int rte_eth_dev_set_link_down(uint16_t port_id)
rte_eth_event_macsec_subtype
Definition: rte_ethdev.h:4016
@ RTE_ETH_SUBEVENT_MACSEC_UNKNOWN
Definition: rte_ethdev.h:4018
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_E_EQ0_C_EQ1
Definition: rte_ethdev.h:4028
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SL_GTE48
Definition: rte_ethdev.h:4033
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SC_EQ1_SCB_EQ1
Definition: rte_ethdev.h:4043
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_ES_EQ1_SC_EQ1
Definition: rte_ethdev.h:4038
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_V_EQ1
Definition: rte_ethdev.h:4023
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
rte_eth_event_type
Definition: rte_ethdev.h:4157
@ RTE_ETH_EVENT_RECOVERY_FAILED
Definition: rte_ethdev.h:4234
@ RTE_ETH_EVENT_UNKNOWN
Definition: rte_ethdev.h:4158
@ RTE_ETH_EVENT_VF_MBOX
Definition: rte_ethdev.h:4164
@ RTE_ETH_EVENT_IPSEC
Definition: rte_ethdev.h:4175
@ RTE_ETH_EVENT_INTR_RESET
Definition: rte_ethdev.h:4163
@ RTE_ETH_EVENT_INTR_RMV
Definition: rte_ethdev.h:4166
@ RTE_ETH_EVENT_ERR_RECOVERING
Definition: rte_ethdev.h:4198
@ RTE_ETH_EVENT_MACSEC
Definition: rte_ethdev.h:4165
@ RTE_ETH_EVENT_RECOVERY_SUCCESS
Definition: rte_ethdev.h:4229
@ RTE_ETH_EVENT_DESTROY
Definition: rte_ethdev.h:4174
@ RTE_ETH_EVENT_FLOW_AGED
Definition: rte_ethdev.h:4176
@ RTE_ETH_EVENT_QUEUE_STATE
Definition: rte_ethdev.h:4161
@ RTE_ETH_EVENT_INTR_LSC
Definition: rte_ethdev.h:4159
@ RTE_ETH_EVENT_MAX
Definition: rte_ethdev.h:4235
@ RTE_ETH_EVENT_RX_AVAIL_THRESH
Definition: rte_ethdev.h:4181
@ RTE_ETH_EVENT_NEW
Definition: rte_ethdev.h:4173
int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
int rte_eth_dev_is_valid_port(uint16_t port_id)
rte_eth_cman_obj
Definition: rte_ethdev.h:6019
@ RTE_ETH_CMAN_OBJ_RX_QUEUE_MEMPOOL
Definition: rte_ethdev.h:6026
@ RTE_ETH_CMAN_OBJ_RX_QUEUE
Definition: rte_ethdev.h:6021
#define RTE_ETH_DCB_NUM_USER_PRIORITIES
Definition: rte_ethdev.h:855
__rte_experimental int rte_eth_speed_lanes_get_capability(uint16_t port_id, struct rte_eth_speed_lanes_capa *speed_lanes_capa, unsigned int num)
__rte_experimental int rte_eth_recycle_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_recycle_rxq_info *recycle_rxq_info)
__rte_experimental int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
int rte_eth_dev_reset(uint16_t port_id)
#define RTE_ETH_BURST_MODE_INFO_SIZE
Definition: rte_ethdev.h:1932
__rte_experimental const char * rte_eth_dev_capability_name(uint64_t capability)
__rte_experimental int rte_eth_speed_lanes_get(uint16_t port_id, uint32_t *lanes)
int rte_eth_allmulticast_disable(uint16_t port_id)
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
rte_eth_dev_state
Definition: rte_ethdev.h:2096
@ RTE_ETH_DEV_ATTACHED
Definition: rte_ethdev.h:2100
@ RTE_ETH_DEV_UNUSED
Definition: rte_ethdev.h:2098
@ RTE_ETH_DEV_REMOVED
Definition: rte_ethdev.h:2102
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
__rte_experimental int rte_eth_dev_count_aggr_ports(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:6304
rte_eth_fec_mode
Definition: rte_ethdev.h:2007
@ RTE_ETH_FEC_NOFEC
Definition: rte_ethdev.h:2008
@ RTE_ETH_FEC_BASER
Definition: rte_ethdev.h:2010
@ RTE_ETH_FEC_AUTO
Definition: rte_ethdev.h:2009
@ RTE_ETH_FEC_RS
Definition: rte_ethdev.h:2011
@ RTE_ETH_FEC_LLRS
Definition: rte_ethdev.h:2012
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) __rte_warn_unused_result
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
rte_eth_err_handle_mode
Definition: rte_ethdev.h:1757
@ RTE_ETH_ERROR_HANDLE_MODE_PASSIVE
Definition: rte_ethdev.h:1764
@ RTE_ETH_ERROR_HANDLE_MODE_NONE
Definition: rte_ethdev.h:1759
@ RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE
Definition: rte_ethdev.h:1770
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) __rte_warn_unused_result
rte_eth_tx_mq_mode
Definition: rte_ethdev.h:417
@ RTE_ETH_MQ_TX_DCB
Definition: rte_ethdev.h:419
@ RTE_ETH_MQ_TX_VMDQ_DCB
Definition: rte_ethdev.h:420
@ RTE_ETH_MQ_TX_VMDQ_ONLY
Definition: rte_ethdev.h:421
@ RTE_ETH_MQ_TX_NONE
Definition: rte_ethdev.h:418
int rte_eth_promiscuous_get(uint16_t port_id)
__rte_experimental int rte_eth_xstats_set_counter(uint16_t port_id, uint64_t id, int on_off)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
int rte_eth_led_off(uint16_t port_id)
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
__rte_experimental int rte_eth_speed_lanes_set(uint16_t port_id, uint32_t speed_lanes)
int rte_eth_dev_set_link_up(uint16_t port_id)
__rte_experimental int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
uint16_t rte_eth_find_next(uint16_t port_id)
rte_eth_rx_mq_mode
Definition: rte_ethdev.h:391
@ RTE_ETH_MQ_RX_DCB_RSS
Definition: rte_ethdev.h:400
@ RTE_ETH_MQ_RX_VMDQ_DCB_RSS
Definition: rte_ethdev.h:409
@ RTE_ETH_MQ_RX_DCB
Definition: rte_ethdev.h:398
@ RTE_ETH_MQ_RX_VMDQ_DCB
Definition: rte_ethdev.h:407
@ RTE_ETH_MQ_RX_VMDQ_RSS
Definition: rte_ethdev.h:405
@ RTE_ETH_MQ_RX_NONE
Definition: rte_ethdev.h:393
@ RTE_ETH_MQ_RX_RSS
Definition: rte_ethdev.h:396
@ RTE_ETH_MQ_RX_VMDQ_ONLY
Definition: rte_ethdev.h:403
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link) __rte_warn_unused_result
int rte_eth_allmulticast_get(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
int rte_eth_allmulticast_enable(uint16_t port_id)
int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
int rte_eth_promiscuous_enable(uint16_t port_id)
rte_eth_representor_type
Definition: rte_ethdev.h:1744
@ RTE_ETH_REPRESENTOR_PF
Definition: rte_ethdev.h:1748
@ RTE_ETH_REPRESENTOR_VF
Definition: rte_ethdev.h:1746
@ RTE_ETH_REPRESENTOR_SF
Definition: rte_ethdev.h:1747
@ RTE_ETH_REPRESENTOR_NONE
Definition: rte_ethdev.h:1745
__rte_experimental int rte_eth_timesync_adjust_freq(uint16_t port_id, int64_t ppm)
int rte_eth_timesync_enable(uint16_t port_id)
__rte_experimental int rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS
Definition: rte_ethdev.h:854
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
__rte_experimental int rte_eth_ip_reassembly_capability_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *capa)
__rte_experimental int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) __rte_warn_unused_result
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
int rte_eth_dev_stop(uint16_t port_id)
int rte_eth_timesync_disable(uint16_t port_id)
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) __rte_warn_unused_result
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
Definition: rte_ethdev.h:2066
__rte_experimental int rte_eth_find_rss_algo(const char *name, uint32_t *algo)
__rte_experimental int rte_eth_dev_get_reg_info_ext(uint16_t port_id, struct rte_dev_reg_info *info)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6757
rte_eth_tunnel_type
Definition: rte_ethdev.h:1457
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
int rte_eth_promiscuous_disable(uint16_t port_id)
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_dev_owner_delete(const uint64_t owner_id)
__rte_experimental int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6647
int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
__rte_experimental int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
__rte_experimental int rte_eth_xstats_query_state(uint16_t port_id, uint64_t id)
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
Definition: rte_ethdev.h:4251
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, struct rte_eth_pfc_queue_info *pfc_queue_info)
static __rte_experimental int rte_eth_tx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:7104
#define RTE_ETH_MQ_RX_DCB_FLAG
Definition: rte_ethdev.h:383
uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num) __rte_warn_unused_result
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
__rte_experimental int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo) __rte_warn_unused_result
rte_eth_fc_mode
Definition: rte_ethdev.h:1368
@ RTE_ETH_FC_TX_PAUSE
Definition: rte_ethdev.h:1371
@ RTE_ETH_FC_RX_PAUSE
Definition: rte_ethdev.h:1370
@ RTE_ETH_FC_NONE
Definition: rte_ethdev.h:1369
@ RTE_ETH_FC_FULL
Definition: rte_ethdev.h:1372
rte_eth_event_macsec_type
Definition: rte_ethdev.h:4050
@ RTE_ETH_EVENT_MACSEC_RX_SA_PN_HARD_EXP
Definition: rte_ethdev.h:4056
@ RTE_ETH_EVENT_MACSEC_SA_NOT_VALID
Definition: rte_ethdev.h:4064
@ RTE_ETH_EVENT_MACSEC_RX_SA_PN_SOFT_EXP
Definition: rte_ethdev.h:4058
@ RTE_ETH_EVENT_MACSEC_UNKNOWN
Definition: rte_ethdev.h:4052
@ RTE_ETH_EVENT_MACSEC_TX_SA_PN_HARD_EXP
Definition: rte_ethdev.h:4060
@ RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR
Definition: rte_ethdev.h:4054
@ RTE_ETH_EVENT_MACSEC_TX_SA_PN_SOFT_EXP
Definition: rte_ethdev.h:4062
int rte_eth_led_on(uint16_t port_id)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
__rte_experimental int rte_eth_ip_reassembly_conf_set(uint16_t port_id, const struct rte_eth_ip_reassembly_params *conf)
__rte_experimental int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, uint8_t avail_thresh)
__rte_experimental int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
__rte_experimental int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, struct rte_eth_pfc_queue_conf *pfc_queue_conf)
__rte_experimental int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, size_t len, uint32_t direction)
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
#define RTE_ETH_MQ_RX_RSS_FLAG
Definition: rte_ethdev.h:382
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link) __rte_warn_unused_result
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_close(uint16_t port_id)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
__rte_experimental int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
__rte_experimental int rte_eth_ip_reassembly_conf_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *conf)
__rte_experimental int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, struct rte_power_monitor_cond *pmc)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
#define RTE_ETH_MQ_RX_VMDQ_FLAG
Definition: rte_ethdev.h:384
__rte_experimental int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
Definition: rte_ethdev.h:6842
int rte_eth_dev_socket_id(uint16_t port_id)
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:6452
__rte_experimental int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
static int rte_eth_tx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:6523
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
static __rte_experimental uint16_t rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id, uint16_t tx_port_id, uint16_t tx_queue_id, struct rte_eth_recycle_rxq_info *recycle_rxq_info)
Definition: rte_ethdev.h:6960
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
int rte_eth_dev_owner_new(uint64_t *owner_id)
int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
__rte_experimental int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, uint8_t *avail_thresh)
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
__rte_experimental const char * rte_eth_link_speed_to_str(uint32_t link_speed)
int rte_eth_xstats_reset(uint16_t port_id)
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint32_t tx_rate)
__rte_experimental const char * rte_eth_dev_rss_algo_name(enum rte_eth_hash_function rss_algo)
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) __rte_warn_unused_result
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
rte_vlan_type
Definition: rte_ethdev.h:448
@ RTE_ETH_VLAN_TYPE_OUTER
Definition: rte_ethdev.h:451
@ RTE_ETH_VLAN_TYPE_INNER
Definition: rte_ethdev.h:450
__rte_experimental int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, unsigned int num)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
Definition: rte_ethdev.h:2090
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
rte_eth_hash_function
Definition: rte_ethdev.h:466
@ RTE_ETH_HASH_FUNCTION_DEFAULT
Definition: rte_ethdev.h:468
@ RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ_SORT
Definition: rte_ethdev.h:483
@ RTE_ETH_HASH_FUNCTION_SIMPLE_XOR
Definition: rte_ethdev.h:470
@ RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ
Definition: rte_ethdev.h:476
@ RTE_ETH_HASH_FUNCTION_TOEPLITZ
Definition: rte_ethdev.h:469
uint16_t rte_eth_dev_count_total(void)
__rte_experimental int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num) __rte_warn_unused_result
#define RTE_ETH_XSTATS_NAME_SIZE
Definition: rte_ethdev.h:1937
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_stats_reset(uint16_t port_id)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:6380
__rte_experimental int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config)
rte_eth_nb_tcs
Definition: rte_ethdev.h:916
@ RTE_ETH_4_TCS
Definition: rte_ethdev.h:917
@ RTE_ETH_8_TCS
Definition: rte_ethdev.h:918
__rte_experimental int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config)
__rte_experimental int rte_eth_representor_info_get(uint16_t port_id, struct rte_eth_representor_info *info)
int rte_eth_dev_start(uint16_t port_id)
__rte_experimental int rte_eth_fec_get_capability(uint16_t port_id, struct rte_eth_fec_capa *speed_fec_capa, unsigned int num)
char info[RTE_ETH_BURST_MODE_INFO_SIZE]
Definition: rte_ethdev.h:1933
uint8_t rsvd_mode_params[4]
Definition: rte_ethdev.h:6093
enum rte_eth_cman_obj obj
Definition: rte_ethdev.h:6061
struct rte_cman_red_params red
Definition: rte_ethdev.h:6086
uint8_t rsvd_obj_params[4]
Definition: rte_ethdev.h:6078
enum rte_cman_mode mode
Definition: rte_ethdev.h:6063
uint8_t rsvd[8]
Definition: rte_ethdev.h:6050
uint64_t modes_supported
Definition: rte_ethdev.h:6040
uint64_t objs_supported
Definition: rte_ethdev.h:6045
struct rte_eth_intr_conf intr_conf
Definition: rte_ethdev.h:1549
struct rte_eth_vmdq_rx_conf vmdq_rx_conf
Definition: rte_ethdev.h:1536
struct rte_eth_txmode txmode
Definition: rte_ethdev.h:1523
struct rte_eth_rxmode rxmode
Definition: rte_ethdev.h:1522
struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf
Definition: rte_ethdev.h:1532
uint32_t lpbk_mode
Definition: rte_ethdev.h:1524
uint32_t dcb_capability_en
Definition: rte_ethdev.h:1548
struct rte_eth_conf::@151 rx_adv_conf
union rte_eth_conf::@152 tx_adv_conf
struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf
Definition: rte_ethdev.h:1540
uint32_t link_speeds
Definition: rte_ethdev.h:1515
struct rte_eth_rss_conf rss_conf
Definition: rte_ethdev.h:1530
struct rte_eth_dcb_tx_conf dcb_tx_conf
Definition: rte_ethdev.h:1542
struct rte_eth_dcb_rx_conf dcb_rx_conf
Definition: rte_ethdev.h:1534
struct rte_eth_vmdq_tx_conf vmdq_tx_conf
Definition: rte_ethdev.h:1544
uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS]
Definition: rte_ethdev.h:1998
uint8_t prio_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]
Definition: rte_ethdev.h:1997
struct rte_eth_dcb_tc_queue_mapping tc_queue
Definition: rte_ethdev.h:2000
struct rte_eth_dcb_tc_queue_mapping::@154 tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS]
struct rte_eth_dcb_tc_queue_mapping::@153 tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS]
uint16_t nb_mtu_seg_max
Definition: rte_ethdev.h:1362
uint16_t nb_seg_max
Definition: rte_ethdev.h:1349
uint16_t nb_align
Definition: rte_ethdev.h:1339
uint32_t max_rx_bufsize
Definition: rte_ethdev.h:1794
uint32_t max_hash_mac_addrs
Definition: rte_ethdev.h:1802
struct rte_eth_desc_lim rx_desc_lim
Definition: rte_ethdev.h:1825
unsigned int if_index
Definition: rte_ethdev.h:1781
uint16_t max_rx_queues
Definition: rte_ethdev.h:1798
uint64_t dev_capa
Definition: rte_ethdev.h:1843
uint16_t vmdq_queue_num
Definition: rte_ethdev.h:1823
uint32_t min_rx_bufsize
Definition: rte_ethdev.h:1787
uint16_t max_tx_queues
Definition: rte_ethdev.h:1799
struct rte_eth_txconf default_txconf
Definition: rte_ethdev.h:1821
uint16_t max_vmdq_pools
Definition: rte_ethdev.h:1804
struct rte_device * device
Definition: rte_ethdev.h:1779
struct rte_eth_rxconf default_rxconf
Definition: rte_ethdev.h:1820
uint16_t nb_tx_queues
Definition: rte_ethdev.h:1830
enum rte_eth_err_handle_mode err_handle_mode
Definition: rte_ethdev.h:1850
uint32_t max_rx_pktlen
Definition: rte_ethdev.h:1795
uint16_t max_mtu
Definition: rte_ethdev.h:1784
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:1797
uint16_t vmdq_queue_base
Definition: rte_ethdev.h:1822
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1853
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1852
uint64_t tx_queue_offload_capa
Definition: rte_ethdev.h:1813
uint16_t vmdq_pool_base
Definition: rte_ethdev.h:1824
uint16_t min_mtu
Definition: rte_ethdev.h:1783
uint16_t reta_size
Definition: rte_ethdev.h:1815
struct rte_eth_desc_lim tx_desc_lim
Definition: rte_ethdev.h:1826
uint64_t flow_type_rss_offloads
Definition: rte_ethdev.h:1819
uint16_t max_rx_mempools
Definition: rte_ethdev.h:1837
uint16_t max_vfs
Definition: rte_ethdev.h:1803
struct rte_eth_dev_portconf default_txportconf
Definition: rte_ethdev.h:1841
uint64_t tx_offload_capa
Definition: rte_ethdev.h:1809
const char * driver_name
Definition: rte_ethdev.h:1780
uint8_t hash_key_size
Definition: rte_ethdev.h:1816
uint32_t speed_capa
Definition: rte_ethdev.h:1827
struct rte_eth_dev_portconf default_rxportconf
Definition: rte_ethdev.h:1839
struct rte_eth_switch_info switch_info
Definition: rte_ethdev.h:1848
struct rte_eth_rxseg_capa rx_seg_capa
Definition: rte_ethdev.h:1805
uint64_t rx_queue_offload_capa
Definition: rte_ethdev.h:1811
uint64_t rx_offload_capa
Definition: rte_ethdev.h:1807
uint16_t nb_rx_queues
Definition: rte_ethdev.h:1829
uint32_t max_mac_addrs
Definition: rte_ethdev.h:1800
const uint32_t * dev_flags
Definition: rte_ethdev.h:1785
struct rte_mbuf * pkts[]
Definition: rte_ethdev.h:3878
enum rte_eth_event_ipsec_subtype subtype
Definition: rte_ethdev.h:4133
enum rte_eth_event_macsec_type type
Definition: rte_ethdev.h:4073
enum rte_eth_event_macsec_subtype subtype
Definition: rte_ethdev.h:4075
uint32_t low_water
Definition: rte_ethdev.h:1382
uint16_t send_xon
Definition: rte_ethdev.h:1384
enum rte_eth_fc_mode mode
Definition: rte_ethdev.h:1385
uint32_t high_water
Definition: rte_ethdev.h:1381
uint16_t pause_time
Definition: rte_ethdev.h:1383
uint8_t mac_ctrl_frame_fwd
Definition: rte_ethdev.h:1386
uint16_t max_nb_queues
Definition: rte_ethdev.h:1234
struct rte_eth_hairpin_queue_cap tx_cap
Definition: rte_ethdev.h:1241
struct rte_eth_hairpin_queue_cap rx_cap
Definition: rte_ethdev.h:1240
uint32_t use_locked_device_memory
Definition: rte_ethdev.h:1301
struct rte_eth_fc_conf fc
Definition: rte_ethdev.h:1396
enum rte_eth_fc_mode mode
Definition: rte_ethdev.h:1433
enum rte_eth_fc_mode mode_capa
Definition: rte_ethdev.h:1412
struct rte_mempool * mp
Definition: rte_ethdev.h:1902
struct rte_mbuf ** mbuf_ring
Definition: rte_ethdev.h:1901
struct rte_eth_representor_range ranges[]
Definition: rte_ethdev.h:5734
enum rte_eth_representor_type type
Definition: rte_ethdev.h:5710
char name[RTE_DEV_NAME_MAX_LEN]
Definition: rte_ethdev.h:5720
uint8_t * rss_key
Definition: rte_ethdev.h:507
uint8_t rss_key_len
Definition: rte_ethdev.h:508
enum rte_eth_hash_function algorithm
Definition: rte_ethdev.h:514
uint64_t rss_hf
Definition: rte_ethdev.h:513
uint16_t reta[RTE_ETH_RETA_GROUP_SIZE]
Definition: rte_ethdev.h:909
struct rte_eth_thresh rx_thresh
Definition: rte_ethdev.h:1129
uint64_t offloads
Definition: rte_ethdev.h:1147
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1180
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1179
uint8_t rx_deferred_start
Definition: rte_ethdev.h:1132
uint16_t share_group
Definition: rte_ethdev.h:1140
uint8_t rx_drop_en
Definition: rte_ethdev.h:1131
uint16_t share_qid
Definition: rte_ethdev.h:1141
union rte_eth_rxseg * rx_seg
Definition: rte_ethdev.h:1155
struct rte_mempool ** rx_mempools
Definition: rte_ethdev.h:1176
uint16_t rx_nseg
Definition: rte_ethdev.h:1133
uint16_t rx_free_thresh
Definition: rte_ethdev.h:1130
uint32_t mtu
Definition: rte_ethdev.h:430
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:432
uint64_t offloads
Definition: rte_ethdev.h:438
void * reserved_ptrs[2]
Definition: rte_ethdev.h:441
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:440
enum rte_eth_rx_mq_mode mq_mode
Definition: rte_ethdev.h:429
uint8_t scattered_rx
Definition: rte_ethdev.h:1869
struct rte_mempool * mp
Definition: rte_ethdev.h:1867
uint8_t queue_state
Definition: rte_ethdev.h:1870
uint8_t avail_thresh
Definition: rte_ethdev.h:1879
uint16_t nb_desc
Definition: rte_ethdev.h:1871
uint16_t rx_buf_size
Definition: rte_ethdev.h:1872
__extension__ uint32_t multi_pools
Definition: rte_ethdev.h:1730
uint32_t offset_allowed
Definition: rte_ethdev.h:1731
uint32_t offset_align_log2
Definition: rte_ethdev.h:1732
struct rte_mempool * mp
Definition: rte_ethdev.h:1096
uint64_t imissed
Definition: rte_ethdev.h:271
uint64_t obytes
Definition: rte_ethdev.h:266
uint64_t opackets
Definition: rte_ethdev.h:264
uint64_t rx_nombuf
Definition: rte_ethdev.h:274
uint64_t ibytes
Definition: rte_ethdev.h:265
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:281
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:279
uint64_t ierrors
Definition: rte_ethdev.h:272
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:285
uint64_t ipackets
Definition: rte_ethdev.h:263
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:277
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:283
uint64_t oerrors
Definition: rte_ethdev.h:273
const char * name
Definition: rte_ethdev.h:1704
uint8_t hthresh
Definition: rte_ethdev.h:375
uint8_t pthresh
Definition: rte_ethdev.h:374
uint8_t wthresh
Definition: rte_ethdev.h:376
uint8_t tx_deferred_start
Definition: rte_ethdev.h:1192
uint64_t offloads
Definition: rte_ethdev.h:1198
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1201
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1200
struct rte_eth_thresh tx_thresh
Definition: rte_ethdev.h:1187
uint16_t tx_rs_thresh
Definition: rte_ethdev.h:1188
uint16_t tx_free_thresh
Definition: rte_ethdev.h:1189
uint64_t offloads
Definition: rte_ethdev.h:1020
__extension__ uint8_t hw_vlan_insert_pvid
Definition: rte_ethdev.h:1029
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1032
__extension__ uint8_t hw_vlan_reject_tagged
Definition: rte_ethdev.h:1025
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1031
__extension__ uint8_t hw_vlan_reject_untagged
Definition: rte_ethdev.h:1027
enum rte_eth_tx_mq_mode mq_mode
Definition: rte_ethdev.h:1014
uint8_t queue_state
Definition: rte_ethdev.h:1889
uint16_t nb_desc
Definition: rte_ethdev.h:1888
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:967
uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]
Definition: rte_ethdev.h:976
struct rte_eth_vmdq_dcb_conf::@147 pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]
uint8_t enable_default_pool
Definition: rte_ethdev.h:968
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:998
uint8_t enable_default_pool
Definition: rte_ethdev.h:999
struct rte_eth_vmdq_rx_conf::@148 pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]
char name[RTE_ETH_XSTATS_NAME_SIZE]
Definition: rte_ethdev.h:1968
uint64_t value
Definition: rte_ethdev.h:1950
uint64_t id
Definition: rte_ethdev.h:1949