DPDK 25.03.0-rc1
rte_ethdev.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
3 */
4
5#ifndef _RTE_ETHDEV_H_
6#define _RTE_ETHDEV_H_
7
148#include <stdint.h>
149
150/* Use this macro to check if LRO API is supported */
151#define RTE_ETHDEV_HAS_LRO_SUPPORT
152
153/* Alias RTE_LIBRTE_ETHDEV_DEBUG for backward compatibility. */
154#ifdef RTE_LIBRTE_ETHDEV_DEBUG
155#define RTE_ETHDEV_DEBUG_RX
156#define RTE_ETHDEV_DEBUG_TX
157#endif
158
159#include <rte_cman.h>
160#include <rte_compat.h>
161#include <rte_log.h>
162#include <rte_interrupts.h>
163#include <rte_dev.h>
164#include <rte_devargs.h>
165#include <rte_bitops.h>
166#include <rte_errno.h>
167#include <rte_common.h>
168#include <rte_config.h>
169#include <rte_power_intrinsics.h>
170
171#include "rte_ethdev_trace_fp.h"
172#include "rte_dev_info.h"
173
174#ifdef __cplusplus
175extern "C" {
176#endif
177
178extern int rte_eth_dev_logtype;
179#define RTE_LOGTYPE_ETHDEV rte_eth_dev_logtype
180
181#define RTE_ETHDEV_LOG_LINE(level, ...) \
182 RTE_LOG_LINE(level, ETHDEV, "" __VA_ARGS__)
183
184struct rte_mbuf;
185
202int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
203
219
233
247#define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
248 for (rte_eth_iterator_init(iter, devargs), \
249 id = rte_eth_iterator_next(iter); \
250 id != RTE_MAX_ETHPORTS; \
251 id = rte_eth_iterator_next(iter))
252
263 uint64_t ipackets;
264 uint64_t opackets;
265 uint64_t ibytes;
266 uint64_t obytes;
271 uint64_t imissed;
272 uint64_t ierrors;
273 uint64_t oerrors;
274 uint64_t rx_nombuf;
275 /* Queue stats are limited to max 256 queues */
277 uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
279 uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
281 uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
283 uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
285 uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
286};
287
291#define RTE_ETH_LINK_SPEED_AUTONEG 0
292#define RTE_ETH_LINK_SPEED_FIXED RTE_BIT32(0)
293#define RTE_ETH_LINK_SPEED_10M_HD RTE_BIT32(1)
294#define RTE_ETH_LINK_SPEED_10M RTE_BIT32(2)
295#define RTE_ETH_LINK_SPEED_100M_HD RTE_BIT32(3)
296#define RTE_ETH_LINK_SPEED_100M RTE_BIT32(4)
297#define RTE_ETH_LINK_SPEED_1G RTE_BIT32(5)
298#define RTE_ETH_LINK_SPEED_2_5G RTE_BIT32(6)
299#define RTE_ETH_LINK_SPEED_5G RTE_BIT32(7)
300#define RTE_ETH_LINK_SPEED_10G RTE_BIT32(8)
301#define RTE_ETH_LINK_SPEED_20G RTE_BIT32(9)
302#define RTE_ETH_LINK_SPEED_25G RTE_BIT32(10)
303#define RTE_ETH_LINK_SPEED_40G RTE_BIT32(11)
304#define RTE_ETH_LINK_SPEED_50G RTE_BIT32(12)
305#define RTE_ETH_LINK_SPEED_56G RTE_BIT32(13)
306#define RTE_ETH_LINK_SPEED_100G RTE_BIT32(14)
307#define RTE_ETH_LINK_SPEED_200G RTE_BIT32(15)
308#define RTE_ETH_LINK_SPEED_400G RTE_BIT32(16)
314#define RTE_ETH_SPEED_NUM_NONE 0
315#define RTE_ETH_SPEED_NUM_10M 10
316#define RTE_ETH_SPEED_NUM_100M 100
317#define RTE_ETH_SPEED_NUM_1G 1000
318#define RTE_ETH_SPEED_NUM_2_5G 2500
319#define RTE_ETH_SPEED_NUM_5G 5000
320#define RTE_ETH_SPEED_NUM_10G 10000
321#define RTE_ETH_SPEED_NUM_20G 20000
322#define RTE_ETH_SPEED_NUM_25G 25000
323#define RTE_ETH_SPEED_NUM_40G 40000
324#define RTE_ETH_SPEED_NUM_50G 50000
325#define RTE_ETH_SPEED_NUM_56G 56000
326#define RTE_ETH_SPEED_NUM_100G 100000
327#define RTE_ETH_SPEED_NUM_200G 200000
328#define RTE_ETH_SPEED_NUM_400G 400000
329#define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX
336 union {
337 RTE_ATOMIC(uint64_t) val64;
338 __extension__
339 struct {
340 uint32_t link_speed;
341 uint16_t link_duplex : 1;
342 uint16_t link_autoneg : 1;
343 uint16_t link_status : 1;
344 };
345 };
346};
347
351#define RTE_ETH_LINK_HALF_DUPLEX 0
352#define RTE_ETH_LINK_FULL_DUPLEX 1
353#define RTE_ETH_LINK_DOWN 0
354#define RTE_ETH_LINK_UP 1
355#define RTE_ETH_LINK_FIXED 0
356#define RTE_ETH_LINK_AUTONEG 1
357#define RTE_ETH_LINK_MAX_STR_LEN 40
361#define RTE_ETH_SPEED_LANES_TO_CAPA(x) RTE_BIT32(x)
362
365 uint32_t speed;
366 uint32_t capa;
367};
368
374 uint8_t pthresh;
375 uint8_t hthresh;
376 uint8_t wthresh;
377};
378
382#define RTE_ETH_MQ_RX_RSS_FLAG RTE_BIT32(0)
383#define RTE_ETH_MQ_RX_DCB_FLAG RTE_BIT32(1)
384#define RTE_ETH_MQ_RX_VMDQ_FLAG RTE_BIT32(2)
394
401
411};
412
422};
423
430 uint32_t mtu;
438 uint64_t offloads;
439
440 uint64_t reserved_64s[2];
441 void *reserved_ptrs[2];
442};
443
449 RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
452 RTE_ETH_VLAN_TYPE_MAX,
453};
454
460 uint64_t ids[64];
461};
462
484 RTE_ETH_HASH_FUNCTION_MAX,
485};
486
487#define RTE_ETH_HASH_ALGO_TO_CAPA(x) RTE_BIT32(x)
488#define RTE_ETH_HASH_ALGO_CAPA_MASK(x) RTE_BIT32(RTE_ETH_HASH_FUNCTION_ ## x)
489
507 uint8_t *rss_key;
508 uint8_t rss_key_len;
513 uint64_t rss_hf;
515};
516
517/*
518 * A packet can be identified by hardware as different flow types. Different
519 * NIC hardware may support different flow types.
520 * Basically, the NIC hardware identifies the flow type as deep protocol as
521 * possible, and exclusively. For example, if a packet is identified as
522 * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
523 * though it is an actual IPV4 packet.
524 */
525#define RTE_ETH_FLOW_UNKNOWN 0
526#define RTE_ETH_FLOW_RAW 1
527#define RTE_ETH_FLOW_IPV4 2
528#define RTE_ETH_FLOW_FRAG_IPV4 3
529#define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
530#define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
531#define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
532#define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
533#define RTE_ETH_FLOW_IPV6 8
534#define RTE_ETH_FLOW_FRAG_IPV6 9
535#define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
536#define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
537#define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
538#define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
539#define RTE_ETH_FLOW_L2_PAYLOAD 14
540#define RTE_ETH_FLOW_IPV6_EX 15
541#define RTE_ETH_FLOW_IPV6_TCP_EX 16
542#define RTE_ETH_FLOW_IPV6_UDP_EX 17
544#define RTE_ETH_FLOW_PORT 18
545#define RTE_ETH_FLOW_VXLAN 19
546#define RTE_ETH_FLOW_GENEVE 20
547#define RTE_ETH_FLOW_NVGRE 21
548#define RTE_ETH_FLOW_VXLAN_GPE 22
549#define RTE_ETH_FLOW_GTPU 23
550#define RTE_ETH_FLOW_MAX 24
551
552/*
553 * Below macros are defined for RSS offload types, they can be used to
554 * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
555 */
556#define RTE_ETH_RSS_IPV4 RTE_BIT64(2)
557#define RTE_ETH_RSS_FRAG_IPV4 RTE_BIT64(3)
558#define RTE_ETH_RSS_NONFRAG_IPV4_TCP RTE_BIT64(4)
559#define RTE_ETH_RSS_NONFRAG_IPV4_UDP RTE_BIT64(5)
560#define RTE_ETH_RSS_NONFRAG_IPV4_SCTP RTE_BIT64(6)
561#define RTE_ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
562#define RTE_ETH_RSS_IPV6 RTE_BIT64(8)
563#define RTE_ETH_RSS_FRAG_IPV6 RTE_BIT64(9)
564#define RTE_ETH_RSS_NONFRAG_IPV6_TCP RTE_BIT64(10)
565#define RTE_ETH_RSS_NONFRAG_IPV6_UDP RTE_BIT64(11)
566#define RTE_ETH_RSS_NONFRAG_IPV6_SCTP RTE_BIT64(12)
567#define RTE_ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
568#define RTE_ETH_RSS_L2_PAYLOAD RTE_BIT64(14)
569#define RTE_ETH_RSS_IPV6_EX RTE_BIT64(15)
570#define RTE_ETH_RSS_IPV6_TCP_EX RTE_BIT64(16)
571#define RTE_ETH_RSS_IPV6_UDP_EX RTE_BIT64(17)
572#define RTE_ETH_RSS_PORT RTE_BIT64(18)
573#define RTE_ETH_RSS_VXLAN RTE_BIT64(19)
574#define RTE_ETH_RSS_GENEVE RTE_BIT64(20)
575#define RTE_ETH_RSS_NVGRE RTE_BIT64(21)
576#define RTE_ETH_RSS_GTPU RTE_BIT64(23)
577#define RTE_ETH_RSS_ETH RTE_BIT64(24)
578#define RTE_ETH_RSS_S_VLAN RTE_BIT64(25)
579#define RTE_ETH_RSS_C_VLAN RTE_BIT64(26)
580#define RTE_ETH_RSS_ESP RTE_BIT64(27)
581#define RTE_ETH_RSS_AH RTE_BIT64(28)
582#define RTE_ETH_RSS_L2TPV3 RTE_BIT64(29)
583#define RTE_ETH_RSS_PFCP RTE_BIT64(30)
584#define RTE_ETH_RSS_PPPOE RTE_BIT64(31)
585#define RTE_ETH_RSS_ECPRI RTE_BIT64(32)
586#define RTE_ETH_RSS_MPLS RTE_BIT64(33)
587#define RTE_ETH_RSS_IPV4_CHKSUM RTE_BIT64(34)
588
601#define RTE_ETH_RSS_L4_CHKSUM RTE_BIT64(35)
602
603#define RTE_ETH_RSS_L2TPV2 RTE_BIT64(36)
604#define RTE_ETH_RSS_IPV6_FLOW_LABEL RTE_BIT64(37)
605
606/*
607 * We use the following macros to combine with above RTE_ETH_RSS_* for
608 * more specific input set selection. These bits are defined starting
609 * from the high end of the 64 bits.
610 * Note: If we use above RTE_ETH_RSS_* without SRC/DST_ONLY, it represents
611 * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
612 * the same level are used simultaneously, it is the same case as none of
613 * them are added.
614 */
615#define RTE_ETH_RSS_L3_SRC_ONLY RTE_BIT64(63)
616#define RTE_ETH_RSS_L3_DST_ONLY RTE_BIT64(62)
617#define RTE_ETH_RSS_L4_SRC_ONLY RTE_BIT64(61)
618#define RTE_ETH_RSS_L4_DST_ONLY RTE_BIT64(60)
619#define RTE_ETH_RSS_L2_SRC_ONLY RTE_BIT64(59)
620#define RTE_ETH_RSS_L2_DST_ONLY RTE_BIT64(58)
621
622/*
623 * Only select IPV6 address prefix as RSS input set according to
624 * https://tools.ietf.org/html/rfc6052
625 * Must be combined with RTE_ETH_RSS_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_UDP,
626 * RTE_ETH_RSS_NONFRAG_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP.
627 */
628#define RTE_ETH_RSS_L3_PRE32 RTE_BIT64(57)
629#define RTE_ETH_RSS_L3_PRE40 RTE_BIT64(56)
630#define RTE_ETH_RSS_L3_PRE48 RTE_BIT64(55)
631#define RTE_ETH_RSS_L3_PRE56 RTE_BIT64(54)
632#define RTE_ETH_RSS_L3_PRE64 RTE_BIT64(53)
633#define RTE_ETH_RSS_L3_PRE96 RTE_BIT64(52)
634
635/*
636 * Use the following macros to combine with the above layers
637 * to choose inner and outer layers or both for RSS computation.
638 * Bits 50 and 51 are reserved for this.
639 */
640
648#define RTE_ETH_RSS_LEVEL_PMD_DEFAULT (UINT64_C(0) << 50)
649
654#define RTE_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
655
660#define RTE_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
661#define RTE_ETH_RSS_LEVEL_MASK (UINT64_C(3) << 50)
662
663#define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
664
675static inline uint64_t
676rte_eth_rss_hf_refine(uint64_t rss_hf)
677{
678 if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
679 rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
680
681 if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
682 rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
683
684 return rss_hf;
685}
686
687#define RTE_ETH_RSS_IPV6_PRE32 ( \
688 RTE_ETH_RSS_IPV6 | \
689 RTE_ETH_RSS_L3_PRE32)
690
691#define RTE_ETH_RSS_IPV6_PRE40 ( \
692 RTE_ETH_RSS_IPV6 | \
693 RTE_ETH_RSS_L3_PRE40)
694
695#define RTE_ETH_RSS_IPV6_PRE48 ( \
696 RTE_ETH_RSS_IPV6 | \
697 RTE_ETH_RSS_L3_PRE48)
698
699#define RTE_ETH_RSS_IPV6_PRE56 ( \
700 RTE_ETH_RSS_IPV6 | \
701 RTE_ETH_RSS_L3_PRE56)
702
703#define RTE_ETH_RSS_IPV6_PRE64 ( \
704 RTE_ETH_RSS_IPV6 | \
705 RTE_ETH_RSS_L3_PRE64)
706
707#define RTE_ETH_RSS_IPV6_PRE96 ( \
708 RTE_ETH_RSS_IPV6 | \
709 RTE_ETH_RSS_L3_PRE96)
710
711#define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
712 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
713 RTE_ETH_RSS_L3_PRE32)
714
715#define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
716 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
717 RTE_ETH_RSS_L3_PRE40)
718
719#define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
720 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
721 RTE_ETH_RSS_L3_PRE48)
722
723#define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
724 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
725 RTE_ETH_RSS_L3_PRE56)
726
727#define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
728 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
729 RTE_ETH_RSS_L3_PRE64)
730
731#define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
732 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
733 RTE_ETH_RSS_L3_PRE96)
734
735#define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
736 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
737 RTE_ETH_RSS_L3_PRE32)
738
739#define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
740 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
741 RTE_ETH_RSS_L3_PRE40)
742
743#define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
744 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
745 RTE_ETH_RSS_L3_PRE48)
746
747#define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
748 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
749 RTE_ETH_RSS_L3_PRE56)
750
751#define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
752 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
753 RTE_ETH_RSS_L3_PRE64)
754
755#define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
756 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
757 RTE_ETH_RSS_L3_PRE96)
758
759#define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
760 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
761 RTE_ETH_RSS_L3_PRE32)
762
763#define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
764 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
765 RTE_ETH_RSS_L3_PRE40)
766
767#define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
768 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
769 RTE_ETH_RSS_L3_PRE48)
770
771#define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
772 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
773 RTE_ETH_RSS_L3_PRE56)
774
775#define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
776 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
777 RTE_ETH_RSS_L3_PRE64)
778
779#define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
780 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
781 RTE_ETH_RSS_L3_PRE96)
782
783#define RTE_ETH_RSS_IP ( \
784 RTE_ETH_RSS_IPV4 | \
785 RTE_ETH_RSS_FRAG_IPV4 | \
786 RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
787 RTE_ETH_RSS_IPV6 | \
788 RTE_ETH_RSS_FRAG_IPV6 | \
789 RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
790 RTE_ETH_RSS_IPV6_EX)
791
792#define RTE_ETH_RSS_UDP ( \
793 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
794 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
795 RTE_ETH_RSS_IPV6_UDP_EX)
796
797#define RTE_ETH_RSS_TCP ( \
798 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
799 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
800 RTE_ETH_RSS_IPV6_TCP_EX)
801
802#define RTE_ETH_RSS_SCTP ( \
803 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
804 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
805
806#define RTE_ETH_RSS_TUNNEL ( \
807 RTE_ETH_RSS_VXLAN | \
808 RTE_ETH_RSS_GENEVE | \
809 RTE_ETH_RSS_NVGRE)
810
811#define RTE_ETH_RSS_VLAN ( \
812 RTE_ETH_RSS_S_VLAN | \
813 RTE_ETH_RSS_C_VLAN)
814
816#define RTE_ETH_RSS_PROTO_MASK ( \
817 RTE_ETH_RSS_IPV4 | \
818 RTE_ETH_RSS_FRAG_IPV4 | \
819 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
820 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
821 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
822 RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
823 RTE_ETH_RSS_IPV6 | \
824 RTE_ETH_RSS_FRAG_IPV6 | \
825 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
826 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
827 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
828 RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
829 RTE_ETH_RSS_L2_PAYLOAD | \
830 RTE_ETH_RSS_IPV6_EX | \
831 RTE_ETH_RSS_IPV6_TCP_EX | \
832 RTE_ETH_RSS_IPV6_UDP_EX | \
833 RTE_ETH_RSS_PORT | \
834 RTE_ETH_RSS_VXLAN | \
835 RTE_ETH_RSS_GENEVE | \
836 RTE_ETH_RSS_NVGRE | \
837 RTE_ETH_RSS_MPLS)
838
839/*
840 * Definitions used for redirection table entry size.
841 * Some RSS RETA sizes may not be supported by some drivers, check the
842 * documentation or the description of relevant functions for more details.
843 */
844#define RTE_ETH_RSS_RETA_SIZE_64 64
845#define RTE_ETH_RSS_RETA_SIZE_128 128
846#define RTE_ETH_RSS_RETA_SIZE_256 256
847#define RTE_ETH_RSS_RETA_SIZE_512 512
848#define RTE_ETH_RETA_GROUP_SIZE 64
849
851#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS 64
852#define RTE_ETH_DCB_NUM_USER_PRIORITIES 8
853#define RTE_ETH_VMDQ_DCB_NUM_QUEUES 128
854#define RTE_ETH_DCB_NUM_QUEUES 128
858#define RTE_ETH_DCB_PG_SUPPORT RTE_BIT32(0)
859#define RTE_ETH_DCB_PFC_SUPPORT RTE_BIT32(1)
863#define RTE_ETH_VLAN_STRIP_OFFLOAD 0x0001
864#define RTE_ETH_VLAN_FILTER_OFFLOAD 0x0002
865#define RTE_ETH_VLAN_EXTEND_OFFLOAD 0x0004
866#define RTE_ETH_QINQ_STRIP_OFFLOAD 0x0008
868#define RTE_ETH_VLAN_STRIP_MASK 0x0001
869#define RTE_ETH_VLAN_FILTER_MASK 0x0002
870#define RTE_ETH_VLAN_EXTEND_MASK 0x0004
871#define RTE_ETH_QINQ_STRIP_MASK 0x0008
872#define RTE_ETH_VLAN_ID_MAX 0x0FFF
875/* Definitions used for receive MAC address */
876#define RTE_ETH_NUM_RECEIVE_MAC_ADDR 128
878/* Definitions used for unicast hash */
879#define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY 128
885#define RTE_ETH_VMDQ_ACCEPT_UNTAG RTE_BIT32(0)
887#define RTE_ETH_VMDQ_ACCEPT_HASH_MC RTE_BIT32(1)
889#define RTE_ETH_VMDQ_ACCEPT_HASH_UC RTE_BIT32(2)
891#define RTE_ETH_VMDQ_ACCEPT_BROADCAST RTE_BIT32(3)
893#define RTE_ETH_VMDQ_ACCEPT_MULTICAST RTE_BIT32(4)
904 uint64_t mask;
906 uint16_t reta[RTE_ETH_RETA_GROUP_SIZE];
907};
908
915 RTE_ETH_8_TCS = 8
917
926 RTE_ETH_64_POOLS = 64
928
929/* This structure may be extended in future. */
930struct rte_eth_dcb_rx_conf {
931 enum rte_eth_nb_tcs nb_tcs;
933 uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
934};
935
936struct rte_eth_vmdq_dcb_tx_conf {
937 enum rte_eth_nb_pools nb_queue_pools;
939 uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
940};
941
942struct rte_eth_dcb_tx_conf {
943 enum rte_eth_nb_tcs nb_tcs;
945 uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
946};
947
948struct rte_eth_vmdq_tx_conf {
949 enum rte_eth_nb_pools nb_queue_pools;
950};
951
966 uint8_t default_pool;
967 uint8_t nb_pool_maps;
968 struct {
969 uint16_t vlan_id;
970 uint64_t pools;
974};
975
997 uint8_t default_pool;
999 uint8_t nb_pool_maps;
1000 uint32_t rx_mode;
1001 struct {
1002 uint16_t vlan_id;
1003 uint64_t pools;
1005};
1006
1017 uint64_t offloads;
1018
1019 uint16_t pvid;
1020 __extension__
1021 uint8_t
1027
1028 uint64_t reserved_64s[2];
1029 void *reserved_ptrs[2];
1030};
1031
1093 struct rte_mempool *mp;
1094 uint16_t length;
1095 uint16_t offset;
1107 uint32_t proto_hdr;
1108};
1109
1117 /* The settings for buffer split offload. */
1118 struct rte_eth_rxseg_split split;
1119 /* The other features settings should be added here. */
1120};
1121
1128 uint8_t rx_drop_en;
1130 uint16_t rx_nseg;
1137 uint16_t share_group;
1138 uint16_t share_qid;
1144 uint64_t offloads;
1153
1174 uint16_t rx_nmempool;
1176 uint64_t reserved_64s[2];
1177 void *reserved_ptrs[2];
1178};
1179
1185 uint16_t tx_rs_thresh;
1195 uint64_t offloads;
1196
1197 uint64_t reserved_64s[2];
1198 void *reserved_ptrs[2];
1199};
1200
1213
1218 uint32_t rte_memory:1;
1219
1220 uint32_t reserved:30;
1221};
1222
1233 uint16_t max_rx_2_tx;
1235 uint16_t max_tx_2_rx;
1236 uint16_t max_nb_desc;
1239};
1240
1241#define RTE_ETH_MAX_HAIRPIN_PEERS 32
1242
1250 uint16_t port;
1251 uint16_t queue;
1252};
1253
1261 uint32_t peer_count:16;
1272 uint32_t tx_explicit:1;
1273
1285 uint32_t manual_bind:1;
1286
1299
1311 uint32_t use_rte_memory:1;
1312
1323 uint32_t force_memory:1;
1324
1325 uint32_t reserved:11;
1327 struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
1328};
1329
1334 uint16_t nb_max;
1335 uint16_t nb_min;
1336 uint16_t nb_align;
1346 uint16_t nb_seg_max;
1347
1360};
1361
1371
1378 uint32_t high_water;
1379 uint32_t low_water;
1380 uint16_t pause_time;
1381 uint16_t send_xon;
1384 uint8_t autoneg;
1385};
1386
1394 uint8_t priority;
1395};
1396
1407 uint8_t tc_max;
1410};
1411
1432 struct {
1433 uint16_t tx_qid;
1437 uint8_t tc;
1438 } rx_pause; /* Valid when (mode == FC_RX_PAUSE || mode == FC_FULL) */
1439
1440 struct {
1441 uint16_t pause_time;
1442 uint16_t rx_qid;
1446 uint8_t tc;
1447 } tx_pause; /* Valid when (mode == FC_TX_PAUSE || mode == FC_FULL) */
1448};
1449
1455 RTE_ETH_TUNNEL_TYPE_NONE = 0,
1456 RTE_ETH_TUNNEL_TYPE_VXLAN,
1457 RTE_ETH_TUNNEL_TYPE_GENEVE,
1458 RTE_ETH_TUNNEL_TYPE_TEREDO,
1459 RTE_ETH_TUNNEL_TYPE_NVGRE,
1460 RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
1461 RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
1462 RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
1463 RTE_ETH_TUNNEL_TYPE_ECPRI,
1464 RTE_ETH_TUNNEL_TYPE_MAX,
1465};
1466
1467#ifdef __cplusplus
1468}
1469#endif
1470
1471/* Deprecated API file for rte_eth_dev_filter_* functions */
1472#include "rte_eth_ctrl.h"
1473
1474#ifdef __cplusplus
1475extern "C" {
1476#endif
1477
1488 uint16_t udp_port;
1489 uint8_t prot_type;
1490};
1491
1497 uint32_t lsc:1;
1499 uint32_t rxq:1;
1501 uint32_t rmv:1;
1502};
1503
1504#define rte_intr_conf rte_eth_intr_conf
1505
1512 uint32_t link_speeds;
1521 uint32_t lpbk_mode;
1526 struct {
1531 struct rte_eth_dcb_rx_conf dcb_rx_conf;
1535 union {
1537 struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1539 struct rte_eth_dcb_tx_conf dcb_tx_conf;
1541 struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1547};
1548
1552#define RTE_ETH_RX_OFFLOAD_VLAN_STRIP RTE_BIT64(0)
1553#define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1554#define RTE_ETH_RX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1555#define RTE_ETH_RX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1556#define RTE_ETH_RX_OFFLOAD_TCP_LRO RTE_BIT64(4)
1557#define RTE_ETH_RX_OFFLOAD_QINQ_STRIP RTE_BIT64(5)
1558#define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(6)
1559#define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP RTE_BIT64(7)
1560#define RTE_ETH_RX_OFFLOAD_VLAN_FILTER RTE_BIT64(9)
1561#define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND RTE_BIT64(10)
1562#define RTE_ETH_RX_OFFLOAD_SCATTER RTE_BIT64(13)
1568#define RTE_ETH_RX_OFFLOAD_TIMESTAMP RTE_BIT64(14)
1569#define RTE_ETH_RX_OFFLOAD_SECURITY RTE_BIT64(15)
1570#define RTE_ETH_RX_OFFLOAD_KEEP_CRC RTE_BIT64(16)
1571#define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM RTE_BIT64(17)
1572#define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(18)
1573#define RTE_ETH_RX_OFFLOAD_RSS_HASH RTE_BIT64(19)
1574#define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT RTE_BIT64(20)
1575
1576#define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
1577 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
1578 RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1579#define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
1580 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
1581 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
1582 RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
1583
1584/*
1585 * If new Rx offload capabilities are defined, they also must be
1586 * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1587 */
1588
1592#define RTE_ETH_TX_OFFLOAD_VLAN_INSERT RTE_BIT64(0)
1593#define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1594#define RTE_ETH_TX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1595#define RTE_ETH_TX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1596#define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM RTE_BIT64(4)
1597#define RTE_ETH_TX_OFFLOAD_TCP_TSO RTE_BIT64(5)
1598#define RTE_ETH_TX_OFFLOAD_UDP_TSO RTE_BIT64(6)
1599#define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(7)
1600#define RTE_ETH_TX_OFFLOAD_QINQ_INSERT RTE_BIT64(8)
1601#define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO RTE_BIT64(9)
1602#define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO RTE_BIT64(10)
1603#define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO RTE_BIT64(11)
1604#define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO RTE_BIT64(12)
1605#define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT RTE_BIT64(13)
1610#define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE RTE_BIT64(14)
1612#define RTE_ETH_TX_OFFLOAD_MULTI_SEGS RTE_BIT64(15)
1620#define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE RTE_BIT64(16)
1621#define RTE_ETH_TX_OFFLOAD_SECURITY RTE_BIT64(17)
1627#define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO RTE_BIT64(18)
1633#define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO RTE_BIT64(19)
1635#define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(20)
1641#define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP RTE_BIT64(21)
1642/*
1643 * If new Tx offload capabilities are defined, they also must be
1644 * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1645 */
1646
1651#define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP RTE_BIT64(0)
1653#define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP RTE_BIT64(1)
1663#define RTE_ETH_DEV_CAPA_RXQ_SHARE RTE_BIT64(2)
1665#define RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP RTE_BIT64(3)
1667#define RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP RTE_BIT64(4)
1670/*
1671 * Fallback default preferred Rx/Tx port parameters.
1672 * These are used if an application requests default parameters
1673 * but the PMD does not provide preferred values.
1674 */
1675#define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1676#define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1677#define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1678#define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1679
1686 uint16_t burst_size;
1687 uint16_t ring_size;
1688 uint16_t nb_queues;
1689};
1690
1695#define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX)
1696
1701 const char *name;
1702 uint16_t domain_id;
1710 uint16_t port_id;
1716 uint16_t rx_domain;
1717};
1718
1726 __extension__
1727 uint32_t multi_pools:1;
1728 uint32_t offset_allowed:1;
1730 uint16_t max_nseg;
1731 uint16_t reserved;
1732};
1733
1746};
1747
1768};
1769
1776 struct rte_device *device;
1777 const char *driver_name;
1778 unsigned int if_index;
1780 uint16_t min_mtu;
1781 uint16_t max_mtu;
1782 const uint32_t *dev_flags;
1792 uint32_t max_rx_pktlen;
1795 uint16_t max_rx_queues;
1796 uint16_t max_tx_queues;
1797 uint32_t max_mac_addrs;
1800 uint16_t max_vfs;
1812 uint16_t reta_size;
1814 uint32_t rss_algo_capa;
1824 uint32_t speed_capa;
1826 uint16_t nb_rx_queues;
1827 uint16_t nb_tx_queues;
1840 uint64_t dev_capa;
1848
1849 uint64_t reserved_64s[2];
1850 void *reserved_ptrs[2];
1851};
1852
1854#define RTE_ETH_QUEUE_STATE_STOPPED 0
1855#define RTE_ETH_QUEUE_STATE_STARTED 1
1856#define RTE_ETH_QUEUE_STATE_HAIRPIN 2
1864 struct rte_mempool *mp;
1865 struct rte_eth_rxconf conf;
1867 uint8_t queue_state;
1868 uint16_t nb_desc;
1869 uint16_t rx_buf_size;
1877};
1878
1884 struct rte_eth_txconf conf;
1885 uint16_t nb_desc;
1886 uint8_t queue_state;
1887};
1888
1899 struct rte_mempool *mp;
1900 uint16_t *refill_head;
1901 uint16_t *receive_tail;
1911};
1912
1913/* Generic Burst mode flag definition, values can be ORed. */
1914
1920#define RTE_ETH_BURST_FLAG_PER_QUEUE RTE_BIT64(0)
1921
1927 uint64_t flags;
1929#define RTE_ETH_BURST_MODE_INFO_SIZE 1024
1931};
1932
1934#define RTE_ETH_XSTATS_NAME_SIZE 64
1935
1946 uint64_t id;
1947 uint64_t value;
1948};
1949
1966};
1967
1968#define RTE_ETH_DCB_NUM_TCS 8
1969#define RTE_ETH_MAX_VMDQ_POOL 64
1970
1977 struct {
1978 uint16_t base;
1979 uint16_t nb_queue;
1980 } tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1982 struct {
1983 uint16_t base;
1984 uint16_t nb_queue;
1985 } tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1986};
1987
1993 uint8_t nb_tcs;
1995 uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS];
1998};
1999
2010};
2011
2012/* Translate from FEC mode to FEC capa */
2013#define RTE_ETH_FEC_MODE_TO_CAPA(x) RTE_BIT32(x)
2014
2015/* This macro indicates FEC capa mask */
2016#define RTE_ETH_FEC_MODE_CAPA_MASK(x) RTE_BIT32(RTE_ETH_FEC_ ## x)
2017
2018/* A structure used to get capabilities per link speed */
2019struct rte_eth_fec_capa {
2020 uint32_t speed;
2021 uint32_t capa;
2022};
2023
2024#define RTE_ETH_ALL RTE_MAX_ETHPORTS
2025
2026/* Macros to check for valid port */
2027#define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
2028 if (!rte_eth_dev_is_valid_port(port_id)) { \
2029 RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id); \
2030 return retval; \
2031 } \
2032} while (0)
2033
2034#define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
2035 if (!rte_eth_dev_is_valid_port(port_id)) { \
2036 RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id); \
2037 return; \
2038 } \
2039} while (0)
2040
2063typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
2064 struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
2065 void *user_param);
2066
2087typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
2088 struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
2089
2100};
2101
2102struct rte_eth_dev_sriov {
2103 uint8_t active;
2104 uint8_t nb_q_per_pool;
2105 uint16_t def_vmdq_idx;
2106 uint16_t def_pool_q_idx;
2107};
2108#define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
2109
2110#define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
2111
2112#define RTE_ETH_DEV_NO_OWNER 0
2113
2114#define RTE_ETH_MAX_OWNER_NAME_LEN 64
2115
2116struct rte_eth_dev_owner {
2117 uint64_t id;
2118 char name[RTE_ETH_MAX_OWNER_NAME_LEN];
2119};
2120
2126#define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE RTE_BIT32(0)
2128#define RTE_ETH_DEV_INTR_LSC RTE_BIT32(1)
2130#define RTE_ETH_DEV_BONDING_MEMBER RTE_BIT32(2)
2132#define RTE_ETH_DEV_INTR_RMV RTE_BIT32(3)
2134#define RTE_ETH_DEV_REPRESENTOR RTE_BIT32(4)
2136#define RTE_ETH_DEV_NOLIVE_MAC_ADDR RTE_BIT32(5)
2141#define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS RTE_BIT32(6)
2155uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
2156 const uint64_t owner_id);
2157
2161#define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
2162 for (p = rte_eth_find_next_owned_by(0, o); \
2163 (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
2164 p = rte_eth_find_next_owned_by(p + 1, o))
2165
2174uint16_t rte_eth_find_next(uint16_t port_id);
2175
2179#define RTE_ETH_FOREACH_DEV(p) \
2180 RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
2181
2193uint16_t
2194rte_eth_find_next_of(uint16_t port_id_start,
2195 const struct rte_device *parent);
2196
2205#define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
2206 for (port_id = rte_eth_find_next_of(0, parent); \
2207 port_id < RTE_MAX_ETHPORTS; \
2208 port_id = rte_eth_find_next_of(port_id + 1, parent))
2209
2221uint16_t
2222rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id);
2223
2234#define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
2235 for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
2236 port_id < RTE_MAX_ETHPORTS; \
2237 port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
2238
2249int rte_eth_dev_owner_new(uint64_t *owner_id);
2250
2261int rte_eth_dev_owner_set(const uint16_t port_id,
2262 const struct rte_eth_dev_owner *owner);
2263
2274int rte_eth_dev_owner_unset(const uint16_t port_id,
2275 const uint64_t owner_id);
2276
2285int rte_eth_dev_owner_delete(const uint64_t owner_id);
2286
2297int rte_eth_dev_owner_get(const uint16_t port_id,
2298 struct rte_eth_dev_owner *owner);
2299
2311
2321
2333uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
2334
2343const char *rte_eth_dev_rx_offload_name(uint64_t offload);
2344
2353const char *rte_eth_dev_tx_offload_name(uint64_t offload);
2354
2366__rte_experimental
2367const char *rte_eth_dev_capability_name(uint64_t capability);
2368
2408int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
2409 uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
2410
2419int
2420rte_eth_dev_is_removed(uint16_t port_id);
2421
2484int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2485 uint16_t nb_rx_desc, unsigned int socket_id,
2486 const struct rte_eth_rxconf *rx_conf,
2487 struct rte_mempool *mb_pool);
2488
2516__rte_experimental
2518 (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2519 const struct rte_eth_hairpin_conf *conf);
2520
2569int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2570 uint16_t nb_tx_desc, unsigned int socket_id,
2571 const struct rte_eth_txconf *tx_conf);
2572
2598__rte_experimental
2600 (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2601 const struct rte_eth_hairpin_conf *conf);
2602
2629__rte_experimental
2630int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2631 size_t len, uint32_t direction);
2632
2655__rte_experimental
2656int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port);
2657
2682__rte_experimental
2683int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port);
2684
2700__rte_experimental
2701int rte_eth_dev_count_aggr_ports(uint16_t port_id);
2702
2730__rte_experimental
2731int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id,
2732 uint8_t affinity);
2733
2746int rte_eth_dev_socket_id(uint16_t port_id);
2747
2757int rte_eth_dev_is_valid_port(uint16_t port_id);
2758
2775__rte_experimental
2776int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2777
2794__rte_experimental
2795int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2796
2814int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2815
2832int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2833
2851int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2852
2869int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2870
2894int rte_eth_dev_start(uint16_t port_id);
2895
2909int rte_eth_dev_stop(uint16_t port_id);
2910
2923int rte_eth_dev_set_link_up(uint16_t port_id);
2924
2934int rte_eth_dev_set_link_down(uint16_t port_id);
2935
2946int rte_eth_dev_close(uint16_t port_id);
2947
2985int rte_eth_dev_reset(uint16_t port_id);
2986
2998int rte_eth_promiscuous_enable(uint16_t port_id);
2999
3011int rte_eth_promiscuous_disable(uint16_t port_id);
3012
3023int rte_eth_promiscuous_get(uint16_t port_id);
3024
3036int rte_eth_allmulticast_enable(uint16_t port_id);
3037
3049int rte_eth_allmulticast_disable(uint16_t port_id);
3050
3061int rte_eth_allmulticast_get(uint16_t port_id);
3062
3080int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
3082
3097int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
3099
3113__rte_experimental
3114const char *rte_eth_link_speed_to_str(uint32_t link_speed);
3115
3134__rte_experimental
3135int rte_eth_link_to_str(char *str, size_t len,
3136 const struct rte_eth_link *eth_link);
3137
3158__rte_experimental
3159int rte_eth_speed_lanes_get(uint16_t port_id, uint32_t *lanes);
3160
3182__rte_experimental
3183int rte_eth_speed_lanes_set(uint16_t port_id, uint32_t speed_lanes);
3184
3207__rte_experimental
3209 struct rte_eth_speed_lanes_capa *speed_lanes_capa,
3210 unsigned int num);
3211
3229int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
3230
3242int rte_eth_stats_reset(uint16_t port_id);
3243
3273int rte_eth_xstats_get_names(uint16_t port_id,
3274 struct rte_eth_xstat_name *xstats_names,
3275 unsigned int size);
3276
3310int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3311 unsigned int n);
3312
3337int
3339 struct rte_eth_xstat_name *xstats_names, unsigned int size,
3340 uint64_t *ids);
3341
3366int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3367 uint64_t *values, unsigned int size);
3368
3388int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
3389 uint64_t *id);
3390
3405__rte_experimental
3406int rte_eth_xstats_set_counter(uint16_t port_id, uint64_t id, int on_off);
3407
3419__rte_experimental
3420int rte_eth_xstats_query_state(uint16_t port_id, uint64_t id);
3421
3434int rte_eth_xstats_reset(uint16_t port_id);
3435
3455 uint16_t tx_queue_id, uint8_t stat_idx);
3456
3476 uint16_t rx_queue_id,
3477 uint8_t stat_idx);
3478
3492int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
3493
3514__rte_experimental
3515int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3516 unsigned int num);
3517
3537int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3539
3555__rte_experimental
3556int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
3558
3579int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3581
3621int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3622 uint32_t *ptypes, int num)
3624
3655int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3656 uint32_t *set_ptypes, unsigned int num);
3657
3670int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
3671
3689int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
3690
3710int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
3711
3730int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3731 int on);
3732
3750 enum rte_vlan_type vlan_type,
3751 uint16_t tag_type);
3752
3770int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
3771
3785int rte_eth_dev_get_vlan_offload(uint16_t port_id);
3786
3801int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
3802
3828__rte_experimental
3829int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
3830 uint8_t avail_thresh);
3831
3858__rte_experimental
3859int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
3860 uint8_t *avail_thresh);
3861
3862typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
3863 void *userdata);
3864
3870 buffer_tx_error_fn error_callback;
3871 void *error_userdata;
3872 uint16_t size;
3873 uint16_t length;
3875 struct rte_mbuf *pkts[];
3876};
3877
3884#define RTE_ETH_TX_BUFFER_SIZE(sz) \
3885 (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3886
3897int
3898rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
3899
3924int
3926 buffer_tx_error_fn callback, void *userdata);
3927
3950void
3951rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3952 void *userdata);
3953
3977void
3978rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3979 void *userdata);
3980
4006int
4007rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
4008
4041};
4042
4062};
4063
4082 uint64_t metadata;
4083};
4084
4123
4148 uint64_t metadata;
4149};
4150
4234
4248typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
4249 enum rte_eth_event_type event, void *cb_arg, void *ret_param);
4250
4269 enum rte_eth_event_type event,
4270 rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4271
4291 enum rte_eth_event_type event,
4292 rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4293
4315int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
4316
4337int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
4338
4356int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
4357
4379int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4380 int epfd, int op, void *data);
4381
4396int
4397rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
4398
4412int rte_eth_led_on(uint16_t port_id);
4413
4427int rte_eth_led_off(uint16_t port_id);
4428
4457__rte_experimental
4458int rte_eth_fec_get_capability(uint16_t port_id,
4459 struct rte_eth_fec_capa *speed_fec_capa,
4460 unsigned int num);
4461
4482__rte_experimental
4483int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa);
4484
4508__rte_experimental
4509int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa);
4510
4525int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
4526 struct rte_eth_fc_conf *fc_conf);
4527
4542int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
4543 struct rte_eth_fc_conf *fc_conf);
4544
4561 struct rte_eth_pfc_conf *pfc_conf);
4562
4581int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
4582 uint32_t pool);
4583
4601__rte_experimental
4603 struct rte_eth_pfc_queue_info *pfc_queue_info);
4604
4628__rte_experimental
4630 struct rte_eth_pfc_queue_conf *pfc_queue_conf);
4631
4646int rte_eth_dev_mac_addr_remove(uint16_t port_id,
4647 struct rte_ether_addr *mac_addr);
4648
4667 struct rte_ether_addr *mac_addr);
4668
4686int rte_eth_dev_rss_reta_update(uint16_t port_id,
4687 struct rte_eth_rss_reta_entry64 *reta_conf,
4688 uint16_t reta_size);
4689
4708int rte_eth_dev_rss_reta_query(uint16_t port_id,
4709 struct rte_eth_rss_reta_entry64 *reta_conf,
4710 uint16_t reta_size);
4711
4731int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4732 uint8_t on);
4733
4752int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
4753
4770int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4771 uint32_t tx_rate);
4772
4787int rte_eth_dev_rss_hash_update(uint16_t port_id,
4788 struct rte_eth_rss_conf *rss_conf);
4789
4805int
4807 struct rte_eth_rss_conf *rss_conf);
4808
4821__rte_experimental
4822const char *
4824
4841__rte_experimental
4842int
4843rte_eth_find_rss_algo(const char *name, uint32_t *algo);
4844
4869int
4871 struct rte_eth_udp_tunnel *tunnel_udp);
4872
4892int
4894 struct rte_eth_udp_tunnel *tunnel_udp);
4895
4910int rte_eth_dev_get_dcb_info(uint16_t port_id,
4911 struct rte_eth_dcb_info *dcb_info);
4912
4913struct rte_eth_rxtx_callback;
4914
4940const struct rte_eth_rxtx_callback *
4941rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4942 rte_rx_callback_fn fn, void *user_param);
4943
4970const struct rte_eth_rxtx_callback *
4971rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4972 rte_rx_callback_fn fn, void *user_param);
4973
4999const struct rte_eth_rxtx_callback *
5000rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
5001 rte_tx_callback_fn fn, void *user_param);
5002
5036int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
5037 const struct rte_eth_rxtx_callback *user_cb);
5038
5072int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
5073 const struct rte_eth_rxtx_callback *user_cb);
5074
5094int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5095 struct rte_eth_rxq_info *qinfo);
5096
5116int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5117 struct rte_eth_txq_info *qinfo);
5118
5139__rte_experimental
5141 uint16_t queue_id,
5142 struct rte_eth_recycle_rxq_info *recycle_rxq_info);
5143
5162int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5163 struct rte_eth_burst_mode *mode);
5164
5183int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5184 struct rte_eth_burst_mode *mode);
5185
5206__rte_experimental
5207int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5208 struct rte_power_monitor_cond *pmc);
5209
5236__rte_experimental
5237int rte_eth_dev_get_reg_info_ext(uint16_t port_id, struct rte_dev_reg_info *info);
5238
5257int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5259
5272int rte_eth_dev_get_eeprom_length(uint16_t port_id);
5273
5290int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5291
5308int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5309
5328__rte_experimental
5329int
5332
5352__rte_experimental
5353int
5354rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5356
5376int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5377 struct rte_ether_addr *mc_addr_set,
5378 uint32_t nb_mc_addr);
5379
5392int rte_eth_timesync_enable(uint16_t port_id);
5393
5406int rte_eth_timesync_disable(uint16_t port_id);
5407
5427 struct timespec *timestamp, uint32_t flags);
5428
5445 struct timespec *timestamp);
5446
5464int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
5465
5506__rte_experimental
5507int rte_eth_timesync_adjust_freq(uint16_t port_id, int64_t ppm);
5508
5524int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
5525
5544int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
5545
5591__rte_experimental
5592int
5593rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
5594
5610int
5611rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
5612
5629int
5630rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
5631
5649 uint16_t *nb_rx_desc,
5650 uint16_t *nb_tx_desc);
5651
5666int
5667rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
5668
5678void *
5679rte_eth_dev_get_sec_ctx(uint16_t port_id);
5680
5696__rte_experimental
5698 struct rte_eth_hairpin_cap *cap);
5699
5709 int pf;
5710 __extension__
5711 union {
5712 int vf;
5713 int sf;
5714 };
5715 uint32_t id_base;
5716 uint32_t id_end;
5717 char name[RTE_DEV_NAME_MAX_LEN];
5718};
5719
5727 uint16_t controller;
5728 uint16_t pf;
5730 uint32_t nb_ranges;
5732};
5733
5757__rte_experimental
5758int rte_eth_representor_info_get(uint16_t port_id,
5759 struct rte_eth_representor_info *info);
5760
5762#define RTE_ETH_RX_METADATA_USER_FLAG RTE_BIT64(0)
5763
5765#define RTE_ETH_RX_METADATA_USER_MARK RTE_BIT64(1)
5766
5768#define RTE_ETH_RX_METADATA_TUNNEL_ID RTE_BIT64(2)
5769
5809int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features);
5810
5812#define RTE_ETH_DEV_REASSEMBLY_F_IPV4 (RTE_BIT32(0))
5814#define RTE_ETH_DEV_REASSEMBLY_F_IPV6 (RTE_BIT32(1))
5815
5826 uint32_t timeout_ms;
5828 uint16_t max_frags;
5833 uint16_t flags;
5834};
5835
5856__rte_experimental
5858 struct rte_eth_ip_reassembly_params *capa);
5859
5881__rte_experimental
5883 struct rte_eth_ip_reassembly_params *conf);
5884
5914__rte_experimental
5916 const struct rte_eth_ip_reassembly_params *conf);
5917
5925typedef struct {
5932 uint16_t time_spent;
5934 uint16_t nb_frags;
5936
5955__rte_experimental
5956int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file);
5957
5981__rte_experimental
5982int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5983 uint16_t offset, uint16_t num, FILE *file);
5984
6008__rte_experimental
6009int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
6010 uint16_t offset, uint16_t num, FILE *file);
6011
6012
6013/* Congestion management */
6014
6024};
6025
6047 uint8_t rsvd[8];
6048};
6049
6061 union {
6068 uint16_t rx_queue;
6076 } obj_param;
6077 union {
6091 } mode_param;
6092};
6093
6111__rte_experimental
6112int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info);
6113
6131__rte_experimental
6132int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config);
6133
6150__rte_experimental
6151int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config);
6152
6173__rte_experimental
6174int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config);
6175
6176#ifdef __cplusplus
6177}
6178#endif
6179
6180#include <rte_ethdev_core.h>
6181
6182#ifdef __cplusplus
6183extern "C" {
6184#endif
6185
6209uint16_t rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
6210 struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
6211 void *opaque);
6212
6300static inline uint16_t
6301rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
6302 struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
6303{
6304 uint16_t nb_rx;
6305 struct rte_eth_fp_ops *p;
6306 void *qd;
6307
6308#ifdef RTE_ETHDEV_DEBUG_RX
6309 if (port_id >= RTE_MAX_ETHPORTS ||
6310 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6311 RTE_ETHDEV_LOG_LINE(ERR,
6312 "Invalid port_id=%u or queue_id=%u",
6313 port_id, queue_id);
6314 return 0;
6315 }
6316#endif
6317
6318 /* fetch pointer to queue data */
6319 p = &rte_eth_fp_ops[port_id];
6320 qd = p->rxq.data[queue_id];
6321
6322#ifdef RTE_ETHDEV_DEBUG_RX
6323 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6324
6325 if (qd == NULL) {
6326 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u for port_id=%u",
6327 queue_id, port_id);
6328 return 0;
6329 }
6330#endif
6331
6332 nb_rx = p->rx_pkt_burst(qd, rx_pkts, nb_pkts);
6333
6334#ifdef RTE_ETHDEV_RXTX_CALLBACKS
6335 {
6336 void *cb;
6337
6338 /* rte_memory_order_release memory order was used when the
6339 * call back was inserted into the list.
6340 * Since there is a clear dependency between loading
6341 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6342 * not required.
6343 */
6344 cb = rte_atomic_load_explicit(&p->rxq.clbk[queue_id],
6345 rte_memory_order_relaxed);
6346 if (unlikely(cb != NULL))
6347 nb_rx = rte_eth_call_rx_callbacks(port_id, queue_id,
6348 rx_pkts, nb_rx, nb_pkts, cb);
6349 }
6350#endif
6351
6352 if (unlikely(nb_rx))
6353 rte_ethdev_trace_rx_burst_nonempty(port_id, queue_id, (void **)rx_pkts, nb_rx);
6354 else
6355 rte_ethdev_trace_rx_burst_empty(port_id, queue_id, (void **)rx_pkts);
6356 return nb_rx;
6357}
6358
6376static inline int
6377rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
6378{
6379 struct rte_eth_fp_ops *p;
6380 void *qd;
6381
6382#ifdef RTE_ETHDEV_DEBUG_RX
6383 if (port_id >= RTE_MAX_ETHPORTS ||
6384 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6385 RTE_ETHDEV_LOG_LINE(ERR,
6386 "Invalid port_id=%u or queue_id=%u",
6387 port_id, queue_id);
6388 return -EINVAL;
6389 }
6390#endif
6391
6392 /* fetch pointer to queue data */
6393 p = &rte_eth_fp_ops[port_id];
6394 qd = p->rxq.data[queue_id];
6395
6396#ifdef RTE_ETHDEV_DEBUG_RX
6397 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6398 if (qd == NULL)
6399 return -EINVAL;
6400#endif
6401
6402 if (*p->rx_queue_count == NULL)
6403 return -ENOTSUP;
6404 return (int)(*p->rx_queue_count)(qd);
6405}
6406
6410#define RTE_ETH_RX_DESC_AVAIL 0
6411#define RTE_ETH_RX_DESC_DONE 1
6412#define RTE_ETH_RX_DESC_UNAVAIL 2
6448static inline int
6449rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
6450 uint16_t offset)
6451{
6452 struct rte_eth_fp_ops *p;
6453 void *qd;
6454
6455#ifdef RTE_ETHDEV_DEBUG_RX
6456 if (port_id >= RTE_MAX_ETHPORTS ||
6457 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6458 RTE_ETHDEV_LOG_LINE(ERR,
6459 "Invalid port_id=%u or queue_id=%u",
6460 port_id, queue_id);
6461 return -EINVAL;
6462 }
6463#endif
6464
6465 /* fetch pointer to queue data */
6466 p = &rte_eth_fp_ops[port_id];
6467 qd = p->rxq.data[queue_id];
6468
6469#ifdef RTE_ETHDEV_DEBUG_RX
6470 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6471 if (qd == NULL)
6472 return -ENODEV;
6473#endif
6474 if (*p->rx_descriptor_status == NULL)
6475 return -ENOTSUP;
6476 return (*p->rx_descriptor_status)(qd, offset);
6477}
6478
6482#define RTE_ETH_TX_DESC_FULL 0
6483#define RTE_ETH_TX_DESC_DONE 1
6484#define RTE_ETH_TX_DESC_UNAVAIL 2
6520static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
6521 uint16_t queue_id, uint16_t offset)
6522{
6523 struct rte_eth_fp_ops *p;
6524 void *qd;
6525
6526#ifdef RTE_ETHDEV_DEBUG_TX
6527 if (port_id >= RTE_MAX_ETHPORTS ||
6528 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6529 RTE_ETHDEV_LOG_LINE(ERR,
6530 "Invalid port_id=%u or queue_id=%u",
6531 port_id, queue_id);
6532 return -EINVAL;
6533 }
6534#endif
6535
6536 /* fetch pointer to queue data */
6537 p = &rte_eth_fp_ops[port_id];
6538 qd = p->txq.data[queue_id];
6539
6540#ifdef RTE_ETHDEV_DEBUG_TX
6541 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6542 if (qd == NULL)
6543 return -ENODEV;
6544#endif
6545 if (*p->tx_descriptor_status == NULL)
6546 return -ENOTSUP;
6547 return (*p->tx_descriptor_status)(qd, offset);
6548}
6549
6569uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
6570 struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque);
6571
6643static inline uint16_t
6644rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
6645 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6646{
6647 struct rte_eth_fp_ops *p;
6648 void *qd;
6649
6650#ifdef RTE_ETHDEV_DEBUG_TX
6651 if (port_id >= RTE_MAX_ETHPORTS ||
6652 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6653 RTE_ETHDEV_LOG_LINE(ERR,
6654 "Invalid port_id=%u or queue_id=%u",
6655 port_id, queue_id);
6656 return 0;
6657 }
6658#endif
6659
6660 /* fetch pointer to queue data */
6661 p = &rte_eth_fp_ops[port_id];
6662 qd = p->txq.data[queue_id];
6663
6664#ifdef RTE_ETHDEV_DEBUG_TX
6665 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6666
6667 if (qd == NULL) {
6668 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6669 queue_id, port_id);
6670 return 0;
6671 }
6672#endif
6673
6674#ifdef RTE_ETHDEV_RXTX_CALLBACKS
6675 {
6676 void *cb;
6677
6678 /* rte_memory_order_release memory order was used when the
6679 * call back was inserted into the list.
6680 * Since there is a clear dependency between loading
6681 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6682 * not required.
6683 */
6684 cb = rte_atomic_load_explicit(&p->txq.clbk[queue_id],
6685 rte_memory_order_relaxed);
6686 if (unlikely(cb != NULL))
6687 nb_pkts = rte_eth_call_tx_callbacks(port_id, queue_id,
6688 tx_pkts, nb_pkts, cb);
6689 }
6690#endif
6691
6692 nb_pkts = p->tx_pkt_burst(qd, tx_pkts, nb_pkts);
6693
6694 rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, nb_pkts);
6695 return nb_pkts;
6696}
6697
6751#ifndef RTE_ETHDEV_TX_PREPARE_NOOP
6752
6753static inline uint16_t
6754rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
6755 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6756{
6757 struct rte_eth_fp_ops *p;
6758 void *qd;
6759
6760#ifdef RTE_ETHDEV_DEBUG_TX
6761 if (port_id >= RTE_MAX_ETHPORTS ||
6762 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6763 RTE_ETHDEV_LOG_LINE(ERR,
6764 "Invalid port_id=%u or queue_id=%u",
6765 port_id, queue_id);
6766 rte_errno = ENODEV;
6767 return 0;
6768 }
6769#endif
6770
6771 /* fetch pointer to queue data */
6772 p = &rte_eth_fp_ops[port_id];
6773 qd = p->txq.data[queue_id];
6774
6775#ifdef RTE_ETHDEV_DEBUG_TX
6776 if (!rte_eth_dev_is_valid_port(port_id)) {
6777 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx port_id=%u", port_id);
6778 rte_errno = ENODEV;
6779 return 0;
6780 }
6781 if (qd == NULL) {
6782 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6783 queue_id, port_id);
6784 rte_errno = EINVAL;
6785 return 0;
6786 }
6787#endif
6788
6789 if (!p->tx_pkt_prepare)
6790 return nb_pkts;
6791
6792 return p->tx_pkt_prepare(qd, tx_pkts, nb_pkts);
6793}
6794
6795#else
6796
6797/*
6798 * Native NOOP operation for compilation targets which doesn't require any
6799 * preparations steps, and functional NOOP may introduce unnecessary performance
6800 * drop.
6801 *
6802 * Generally this is not a good idea to turn it on globally and didn't should
6803 * be used if behavior of tx_preparation can change.
6804 */
6805
6806static inline uint16_t
6807rte_eth_tx_prepare(__rte_unused uint16_t port_id,
6808 __rte_unused uint16_t queue_id,
6809 __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6810{
6811 return nb_pkts;
6812}
6813
6814#endif
6815
6838static inline uint16_t
6839rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
6840 struct rte_eth_dev_tx_buffer *buffer)
6841{
6842 uint16_t sent;
6843 uint16_t to_send = buffer->length;
6844
6845 if (to_send == 0)
6846 return 0;
6847
6848 sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
6849
6850 buffer->length = 0;
6851
6852 /* All packets sent, or to be dealt with by callback below */
6853 if (unlikely(sent != to_send))
6854 buffer->error_callback(&buffer->pkts[sent],
6855 (uint16_t)(to_send - sent),
6856 buffer->error_userdata);
6857
6858 return sent;
6859}
6860
6891static __rte_always_inline uint16_t
6892rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
6893 struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
6894{
6895 buffer->pkts[buffer->length++] = tx_pkt;
6896 if (buffer->length < buffer->size)
6897 return 0;
6898
6899 return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
6900}
6901
6955__rte_experimental
6956static inline uint16_t
6957rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id,
6958 uint16_t tx_port_id, uint16_t tx_queue_id,
6959 struct rte_eth_recycle_rxq_info *recycle_rxq_info)
6960{
6961 struct rte_eth_fp_ops *p1, *p2;
6962 void *qd1, *qd2;
6963 uint16_t nb_mbufs;
6964
6965#ifdef RTE_ETHDEV_DEBUG_TX
6966 if (tx_port_id >= RTE_MAX_ETHPORTS ||
6967 tx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6968 RTE_ETHDEV_LOG_LINE(ERR,
6969 "Invalid tx_port_id=%u or tx_queue_id=%u",
6970 tx_port_id, tx_queue_id);
6971 return 0;
6972 }
6973#endif
6974
6975 /* fetch pointer to Tx queue data */
6976 p1 = &rte_eth_fp_ops[tx_port_id];
6977 qd1 = p1->txq.data[tx_queue_id];
6978
6979#ifdef RTE_ETHDEV_DEBUG_TX
6980 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port_id, 0);
6981
6982 if (qd1 == NULL) {
6983 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6984 tx_queue_id, tx_port_id);
6985 return 0;
6986 }
6987#endif
6988 if (p1->recycle_tx_mbufs_reuse == NULL)
6989 return 0;
6990
6991#ifdef RTE_ETHDEV_DEBUG_RX
6992 if (rx_port_id >= RTE_MAX_ETHPORTS ||
6993 rx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6994 RTE_ETHDEV_LOG_LINE(ERR, "Invalid rx_port_id=%u or rx_queue_id=%u",
6995 rx_port_id, rx_queue_id);
6996 return 0;
6997 }
6998#endif
6999
7000 /* fetch pointer to Rx queue data */
7001 p2 = &rte_eth_fp_ops[rx_port_id];
7002 qd2 = p2->rxq.data[rx_queue_id];
7003
7004#ifdef RTE_ETHDEV_DEBUG_RX
7005 RTE_ETH_VALID_PORTID_OR_ERR_RET(rx_port_id, 0);
7006
7007 if (qd2 == NULL) {
7008 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u for port_id=%u",
7009 rx_queue_id, rx_port_id);
7010 return 0;
7011 }
7012#endif
7013 if (p2->recycle_rx_descriptors_refill == NULL)
7014 return 0;
7015
7016 /* Copy used *rte_mbuf* buffer pointers from Tx mbuf ring
7017 * into Rx mbuf ring.
7018 */
7019 nb_mbufs = p1->recycle_tx_mbufs_reuse(qd1, recycle_rxq_info);
7020
7021 /* If no recycling mbufs, return 0. */
7022 if (nb_mbufs == 0)
7023 return 0;
7024
7025 /* Replenish the Rx descriptors with the recycling
7026 * into Rx mbuf ring.
7027 */
7028 p2->recycle_rx_descriptors_refill(qd2, nb_mbufs);
7029
7030 return nb_mbufs;
7031}
7032
7061__rte_experimental
7062int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num)
7064
7099__rte_experimental
7100static inline int
7101rte_eth_tx_queue_count(uint16_t port_id, uint16_t queue_id)
7102{
7103 struct rte_eth_fp_ops *fops;
7104 void *qd;
7105 int rc;
7106
7107#ifdef RTE_ETHDEV_DEBUG_TX
7108 if (port_id >= RTE_MAX_ETHPORTS || !rte_eth_dev_is_valid_port(port_id)) {
7109 RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id);
7110 rc = -ENODEV;
7111 goto out;
7112 }
7113
7114 if (queue_id >= RTE_MAX_QUEUES_PER_PORT) {
7115 RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for port_id=%u",
7116 queue_id, port_id);
7117 rc = -EINVAL;
7118 goto out;
7119 }
7120#endif
7121
7122 /* Fetch pointer to Tx queue data */
7123 fops = &rte_eth_fp_ops[port_id];
7124 qd = fops->txq.data[queue_id];
7125
7126#ifdef RTE_ETHDEV_DEBUG_TX
7127 if (qd == NULL) {
7128 RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for port_id=%u",
7129 queue_id, port_id);
7130 rc = -EINVAL;
7131 goto out;
7132 }
7133#endif
7134 if (fops->tx_queue_count == NULL) {
7135 rc = -ENOTSUP;
7136 goto out;
7137 }
7138
7139 rc = fops->tx_queue_count(qd);
7140
7141out:
7142 rte_eth_trace_tx_queue_count(port_id, queue_id, rc);
7143 return rc;
7144}
7145
7146#ifdef __cplusplus
7147}
7148#endif
7149
7150#endif /* _RTE_ETHDEV_H_ */
#define RTE_BIT32(nr)
Definition: rte_bitops.h:44
#define unlikely(x)
rte_cman_mode
Definition: rte_cman.h:16
#define __rte_cache_min_aligned
Definition: rte_common.h:704
#define __rte_unused
Definition: rte_common.h:210
#define __rte_always_inline
Definition: rte_common.h:452
#define __rte_warn_unused_result
Definition: rte_common.h:443
#define rte_errno
Definition: rte_errno.h:29
rte_eth_nb_pools
Definition: rte_ethdev.h:922
@ RTE_ETH_64_POOLS
Definition: rte_ethdev.h:926
@ RTE_ETH_32_POOLS
Definition: rte_ethdev.h:925
@ RTE_ETH_8_POOLS
Definition: rte_ethdev.h:923
@ RTE_ETH_16_POOLS
Definition: rte_ethdev.h:924
rte_eth_event_ipsec_subtype
Definition: rte_ethdev.h:4089
@ RTE_ETH_EVENT_IPSEC_PMD_ERROR_END
Definition: rte_ethdev.h:4093
@ RTE_ETH_EVENT_IPSEC_UNKNOWN
Definition: rte_ethdev.h:4095
@ RTE_ETH_EVENT_IPSEC_MAX
Definition: rte_ethdev.h:4121
@ RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY
Definition: rte_ethdev.h:4109
@ RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW
Definition: rte_ethdev.h:4097
@ RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY
Definition: rte_ethdev.h:4114
@ RTE_ETH_EVENT_IPSEC_PMD_ERROR_START
Definition: rte_ethdev.h:4091
@ RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY
Definition: rte_ethdev.h:4104
@ RTE_ETH_EVENT_IPSEC_SA_TIME_EXPIRY
Definition: rte_ethdev.h:4099
@ RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY
Definition: rte_ethdev.h:4119
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
__rte_experimental int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config)
int rte_eth_dev_is_removed(uint16_t port_id)
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
Definition: rte_ethdev.h:676
__rte_experimental int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id, uint8_t affinity)
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
Definition: rte_ethdev.h:6892
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
int rte_eth_dev_set_link_down(uint16_t port_id)
rte_eth_event_macsec_subtype
Definition: rte_ethdev.h:4013
@ RTE_ETH_SUBEVENT_MACSEC_UNKNOWN
Definition: rte_ethdev.h:4015
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_E_EQ0_C_EQ1
Definition: rte_ethdev.h:4025
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SL_GTE48
Definition: rte_ethdev.h:4030
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SC_EQ1_SCB_EQ1
Definition: rte_ethdev.h:4040
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_ES_EQ1_SC_EQ1
Definition: rte_ethdev.h:4035
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_V_EQ1
Definition: rte_ethdev.h:4020
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
rte_eth_event_type
Definition: rte_ethdev.h:4154
@ RTE_ETH_EVENT_RECOVERY_FAILED
Definition: rte_ethdev.h:4231
@ RTE_ETH_EVENT_UNKNOWN
Definition: rte_ethdev.h:4155
@ RTE_ETH_EVENT_VF_MBOX
Definition: rte_ethdev.h:4161
@ RTE_ETH_EVENT_IPSEC
Definition: rte_ethdev.h:4172
@ RTE_ETH_EVENT_INTR_RESET
Definition: rte_ethdev.h:4160
@ RTE_ETH_EVENT_INTR_RMV
Definition: rte_ethdev.h:4163
@ RTE_ETH_EVENT_ERR_RECOVERING
Definition: rte_ethdev.h:4195
@ RTE_ETH_EVENT_MACSEC
Definition: rte_ethdev.h:4162
@ RTE_ETH_EVENT_RECOVERY_SUCCESS
Definition: rte_ethdev.h:4226
@ RTE_ETH_EVENT_DESTROY
Definition: rte_ethdev.h:4171
@ RTE_ETH_EVENT_FLOW_AGED
Definition: rte_ethdev.h:4173
@ RTE_ETH_EVENT_QUEUE_STATE
Definition: rte_ethdev.h:4158
@ RTE_ETH_EVENT_INTR_LSC
Definition: rte_ethdev.h:4156
@ RTE_ETH_EVENT_MAX
Definition: rte_ethdev.h:4232
@ RTE_ETH_EVENT_RX_AVAIL_THRESH
Definition: rte_ethdev.h:4178
@ RTE_ETH_EVENT_NEW
Definition: rte_ethdev.h:4170
int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
int rte_eth_dev_is_valid_port(uint16_t port_id)
rte_eth_cman_obj
Definition: rte_ethdev.h:6016
@ RTE_ETH_CMAN_OBJ_RX_QUEUE_MEMPOOL
Definition: rte_ethdev.h:6023
@ RTE_ETH_CMAN_OBJ_RX_QUEUE
Definition: rte_ethdev.h:6018
#define RTE_ETH_DCB_NUM_USER_PRIORITIES
Definition: rte_ethdev.h:852
__rte_experimental int rte_eth_speed_lanes_get_capability(uint16_t port_id, struct rte_eth_speed_lanes_capa *speed_lanes_capa, unsigned int num)
__rte_experimental int rte_eth_recycle_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_recycle_rxq_info *recycle_rxq_info)
__rte_experimental int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
int rte_eth_dev_reset(uint16_t port_id)
#define RTE_ETH_BURST_MODE_INFO_SIZE
Definition: rte_ethdev.h:1929
__rte_experimental const char * rte_eth_dev_capability_name(uint64_t capability)
__rte_experimental int rte_eth_speed_lanes_get(uint16_t port_id, uint32_t *lanes)
int rte_eth_allmulticast_disable(uint16_t port_id)
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
rte_eth_dev_state
Definition: rte_ethdev.h:2093
@ RTE_ETH_DEV_ATTACHED
Definition: rte_ethdev.h:2097
@ RTE_ETH_DEV_UNUSED
Definition: rte_ethdev.h:2095
@ RTE_ETH_DEV_REMOVED
Definition: rte_ethdev.h:2099
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
__rte_experimental int rte_eth_dev_count_aggr_ports(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:6301
rte_eth_fec_mode
Definition: rte_ethdev.h:2004
@ RTE_ETH_FEC_NOFEC
Definition: rte_ethdev.h:2005
@ RTE_ETH_FEC_BASER
Definition: rte_ethdev.h:2007
@ RTE_ETH_FEC_AUTO
Definition: rte_ethdev.h:2006
@ RTE_ETH_FEC_RS
Definition: rte_ethdev.h:2008
@ RTE_ETH_FEC_LLRS
Definition: rte_ethdev.h:2009
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) __rte_warn_unused_result
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
rte_eth_err_handle_mode
Definition: rte_ethdev.h:1754
@ RTE_ETH_ERROR_HANDLE_MODE_PASSIVE
Definition: rte_ethdev.h:1761
@ RTE_ETH_ERROR_HANDLE_MODE_NONE
Definition: rte_ethdev.h:1756
@ RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE
Definition: rte_ethdev.h:1767
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) __rte_warn_unused_result
rte_eth_tx_mq_mode
Definition: rte_ethdev.h:417
@ RTE_ETH_MQ_TX_DCB
Definition: rte_ethdev.h:419
@ RTE_ETH_MQ_TX_VMDQ_DCB
Definition: rte_ethdev.h:420
@ RTE_ETH_MQ_TX_VMDQ_ONLY
Definition: rte_ethdev.h:421
@ RTE_ETH_MQ_TX_NONE
Definition: rte_ethdev.h:418
int rte_eth_promiscuous_get(uint16_t port_id)
__rte_experimental int rte_eth_xstats_set_counter(uint16_t port_id, uint64_t id, int on_off)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
int rte_eth_led_off(uint16_t port_id)
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
__rte_experimental int rte_eth_speed_lanes_set(uint16_t port_id, uint32_t speed_lanes)
int rte_eth_dev_set_link_up(uint16_t port_id)
__rte_experimental int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
uint16_t rte_eth_find_next(uint16_t port_id)
rte_eth_rx_mq_mode
Definition: rte_ethdev.h:391
@ RTE_ETH_MQ_RX_DCB_RSS
Definition: rte_ethdev.h:400
@ RTE_ETH_MQ_RX_VMDQ_DCB_RSS
Definition: rte_ethdev.h:409
@ RTE_ETH_MQ_RX_DCB
Definition: rte_ethdev.h:398
@ RTE_ETH_MQ_RX_VMDQ_DCB
Definition: rte_ethdev.h:407
@ RTE_ETH_MQ_RX_VMDQ_RSS
Definition: rte_ethdev.h:405
@ RTE_ETH_MQ_RX_NONE
Definition: rte_ethdev.h:393
@ RTE_ETH_MQ_RX_RSS
Definition: rte_ethdev.h:396
@ RTE_ETH_MQ_RX_VMDQ_ONLY
Definition: rte_ethdev.h:403
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link) __rte_warn_unused_result
int rte_eth_allmulticast_get(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
int rte_eth_allmulticast_enable(uint16_t port_id)
int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
int rte_eth_promiscuous_enable(uint16_t port_id)
rte_eth_representor_type
Definition: rte_ethdev.h:1741
@ RTE_ETH_REPRESENTOR_PF
Definition: rte_ethdev.h:1745
@ RTE_ETH_REPRESENTOR_VF
Definition: rte_ethdev.h:1743
@ RTE_ETH_REPRESENTOR_SF
Definition: rte_ethdev.h:1744
@ RTE_ETH_REPRESENTOR_NONE
Definition: rte_ethdev.h:1742
__rte_experimental int rte_eth_timesync_adjust_freq(uint16_t port_id, int64_t ppm)
int rte_eth_timesync_enable(uint16_t port_id)
__rte_experimental int rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS
Definition: rte_ethdev.h:851
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
__rte_experimental int rte_eth_ip_reassembly_capability_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *capa)
__rte_experimental int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) __rte_warn_unused_result
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
int rte_eth_dev_stop(uint16_t port_id)
int rte_eth_timesync_disable(uint16_t port_id)
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) __rte_warn_unused_result
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
Definition: rte_ethdev.h:2063
__rte_experimental int rte_eth_find_rss_algo(const char *name, uint32_t *algo)
__rte_experimental int rte_eth_dev_get_reg_info_ext(uint16_t port_id, struct rte_dev_reg_info *info)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6754
rte_eth_tunnel_type
Definition: rte_ethdev.h:1454
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
int rte_eth_promiscuous_disable(uint16_t port_id)
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_dev_owner_delete(const uint64_t owner_id)
__rte_experimental int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6644
int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
__rte_experimental int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
__rte_experimental int rte_eth_xstats_query_state(uint16_t port_id, uint64_t id)
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
Definition: rte_ethdev.h:4248
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, struct rte_eth_pfc_queue_info *pfc_queue_info)
static __rte_experimental int rte_eth_tx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:7101
#define RTE_ETH_MQ_RX_DCB_FLAG
Definition: rte_ethdev.h:383
uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num) __rte_warn_unused_result
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
__rte_experimental int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo) __rte_warn_unused_result
rte_eth_fc_mode
Definition: rte_ethdev.h:1365
@ RTE_ETH_FC_TX_PAUSE
Definition: rte_ethdev.h:1368
@ RTE_ETH_FC_RX_PAUSE
Definition: rte_ethdev.h:1367
@ RTE_ETH_FC_NONE
Definition: rte_ethdev.h:1366
@ RTE_ETH_FC_FULL
Definition: rte_ethdev.h:1369
rte_eth_event_macsec_type
Definition: rte_ethdev.h:4047
@ RTE_ETH_EVENT_MACSEC_RX_SA_PN_HARD_EXP
Definition: rte_ethdev.h:4053
@ RTE_ETH_EVENT_MACSEC_SA_NOT_VALID
Definition: rte_ethdev.h:4061
@ RTE_ETH_EVENT_MACSEC_RX_SA_PN_SOFT_EXP
Definition: rte_ethdev.h:4055
@ RTE_ETH_EVENT_MACSEC_UNKNOWN
Definition: rte_ethdev.h:4049
@ RTE_ETH_EVENT_MACSEC_TX_SA_PN_HARD_EXP
Definition: rte_ethdev.h:4057
@ RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR
Definition: rte_ethdev.h:4051
@ RTE_ETH_EVENT_MACSEC_TX_SA_PN_SOFT_EXP
Definition: rte_ethdev.h:4059
int rte_eth_led_on(uint16_t port_id)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
__rte_experimental int rte_eth_ip_reassembly_conf_set(uint16_t port_id, const struct rte_eth_ip_reassembly_params *conf)
__rte_experimental int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, uint8_t avail_thresh)
__rte_experimental int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
__rte_experimental int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, struct rte_eth_pfc_queue_conf *pfc_queue_conf)
__rte_experimental int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, size_t len, uint32_t direction)
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
#define RTE_ETH_MQ_RX_RSS_FLAG
Definition: rte_ethdev.h:382
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link) __rte_warn_unused_result
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_close(uint16_t port_id)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
__rte_experimental int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
__rte_experimental int rte_eth_ip_reassembly_conf_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *conf)
__rte_experimental int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, struct rte_power_monitor_cond *pmc)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
#define RTE_ETH_MQ_RX_VMDQ_FLAG
Definition: rte_ethdev.h:384
__rte_experimental int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
Definition: rte_ethdev.h:6839
int rte_eth_dev_socket_id(uint16_t port_id)
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:6449
__rte_experimental int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
static int rte_eth_tx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:6520
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
static __rte_experimental uint16_t rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id, uint16_t tx_port_id, uint16_t tx_queue_id, struct rte_eth_recycle_rxq_info *recycle_rxq_info)
Definition: rte_ethdev.h:6957
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
int rte_eth_dev_owner_new(uint64_t *owner_id)
int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
__rte_experimental int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, uint8_t *avail_thresh)
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
__rte_experimental const char * rte_eth_link_speed_to_str(uint32_t link_speed)
int rte_eth_xstats_reset(uint16_t port_id)
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint32_t tx_rate)
__rte_experimental const char * rte_eth_dev_rss_algo_name(enum rte_eth_hash_function rss_algo)
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) __rte_warn_unused_result
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
rte_vlan_type
Definition: rte_ethdev.h:448
@ RTE_ETH_VLAN_TYPE_OUTER
Definition: rte_ethdev.h:451
@ RTE_ETH_VLAN_TYPE_INNER
Definition: rte_ethdev.h:450
__rte_experimental int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, unsigned int num)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
Definition: rte_ethdev.h:2087
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
rte_eth_hash_function
Definition: rte_ethdev.h:466
@ RTE_ETH_HASH_FUNCTION_DEFAULT
Definition: rte_ethdev.h:468
@ RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ_SORT
Definition: rte_ethdev.h:483
@ RTE_ETH_HASH_FUNCTION_SIMPLE_XOR
Definition: rte_ethdev.h:470
@ RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ
Definition: rte_ethdev.h:476
@ RTE_ETH_HASH_FUNCTION_TOEPLITZ
Definition: rte_ethdev.h:469
uint16_t rte_eth_dev_count_total(void)
__rte_experimental int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num) __rte_warn_unused_result
#define RTE_ETH_XSTATS_NAME_SIZE
Definition: rte_ethdev.h:1934
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_stats_reset(uint16_t port_id)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:6377
__rte_experimental int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config)
rte_eth_nb_tcs
Definition: rte_ethdev.h:913
@ RTE_ETH_4_TCS
Definition: rte_ethdev.h:914
@ RTE_ETH_8_TCS
Definition: rte_ethdev.h:915
__rte_experimental int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config)
__rte_experimental int rte_eth_representor_info_get(uint16_t port_id, struct rte_eth_representor_info *info)
int rte_eth_dev_start(uint16_t port_id)
__rte_experimental int rte_eth_fec_get_capability(uint16_t port_id, struct rte_eth_fec_capa *speed_fec_capa, unsigned int num)
char info[RTE_ETH_BURST_MODE_INFO_SIZE]
Definition: rte_ethdev.h:1930
uint8_t rsvd_mode_params[4]
Definition: rte_ethdev.h:6090
enum rte_eth_cman_obj obj
Definition: rte_ethdev.h:6058
struct rte_cman_red_params red
Definition: rte_ethdev.h:6083
uint8_t rsvd_obj_params[4]
Definition: rte_ethdev.h:6075
enum rte_cman_mode mode
Definition: rte_ethdev.h:6060
uint8_t rsvd[8]
Definition: rte_ethdev.h:6047
uint64_t modes_supported
Definition: rte_ethdev.h:6037
uint64_t objs_supported
Definition: rte_ethdev.h:6042
struct rte_eth_intr_conf intr_conf
Definition: rte_ethdev.h:1546
struct rte_eth_vmdq_rx_conf vmdq_rx_conf
Definition: rte_ethdev.h:1533
struct rte_eth_txmode txmode
Definition: rte_ethdev.h:1520
struct rte_eth_rxmode rxmode
Definition: rte_ethdev.h:1519
struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf
Definition: rte_ethdev.h:1529
uint32_t lpbk_mode
Definition: rte_ethdev.h:1521
union rte_eth_conf::@150 tx_adv_conf
uint32_t dcb_capability_en
Definition: rte_ethdev.h:1545
struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf
Definition: rte_ethdev.h:1537
uint32_t link_speeds
Definition: rte_ethdev.h:1512
struct rte_eth_rss_conf rss_conf
Definition: rte_ethdev.h:1527
struct rte_eth_conf::@149 rx_adv_conf
struct rte_eth_dcb_tx_conf dcb_tx_conf
Definition: rte_ethdev.h:1539
struct rte_eth_dcb_rx_conf dcb_rx_conf
Definition: rte_ethdev.h:1531
struct rte_eth_vmdq_tx_conf vmdq_tx_conf
Definition: rte_ethdev.h:1541
uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS]
Definition: rte_ethdev.h:1995
uint8_t prio_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]
Definition: rte_ethdev.h:1994
struct rte_eth_dcb_tc_queue_mapping tc_queue
Definition: rte_ethdev.h:1997
struct rte_eth_dcb_tc_queue_mapping::@152 tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS]
struct rte_eth_dcb_tc_queue_mapping::@151 tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS]
uint16_t nb_mtu_seg_max
Definition: rte_ethdev.h:1359
uint16_t nb_seg_max
Definition: rte_ethdev.h:1346
uint16_t nb_align
Definition: rte_ethdev.h:1336
uint32_t max_rx_bufsize
Definition: rte_ethdev.h:1791
uint32_t max_hash_mac_addrs
Definition: rte_ethdev.h:1799
struct rte_eth_desc_lim rx_desc_lim
Definition: rte_ethdev.h:1822
unsigned int if_index
Definition: rte_ethdev.h:1778
uint16_t max_rx_queues
Definition: rte_ethdev.h:1795
uint64_t dev_capa
Definition: rte_ethdev.h:1840
uint16_t vmdq_queue_num
Definition: rte_ethdev.h:1820
uint32_t min_rx_bufsize
Definition: rte_ethdev.h:1784
uint16_t max_tx_queues
Definition: rte_ethdev.h:1796
struct rte_eth_txconf default_txconf
Definition: rte_ethdev.h:1818
uint16_t max_vmdq_pools
Definition: rte_ethdev.h:1801
struct rte_device * device
Definition: rte_ethdev.h:1776
struct rte_eth_rxconf default_rxconf
Definition: rte_ethdev.h:1817
uint16_t nb_tx_queues
Definition: rte_ethdev.h:1827
enum rte_eth_err_handle_mode err_handle_mode
Definition: rte_ethdev.h:1847
uint32_t max_rx_pktlen
Definition: rte_ethdev.h:1792
uint16_t max_mtu
Definition: rte_ethdev.h:1781
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:1794
uint16_t vmdq_queue_base
Definition: rte_ethdev.h:1819
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1850
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1849
uint64_t tx_queue_offload_capa
Definition: rte_ethdev.h:1810
uint16_t vmdq_pool_base
Definition: rte_ethdev.h:1821
uint16_t min_mtu
Definition: rte_ethdev.h:1780
uint16_t reta_size
Definition: rte_ethdev.h:1812
struct rte_eth_desc_lim tx_desc_lim
Definition: rte_ethdev.h:1823
uint64_t flow_type_rss_offloads
Definition: rte_ethdev.h:1816
uint16_t max_rx_mempools
Definition: rte_ethdev.h:1834
uint16_t max_vfs
Definition: rte_ethdev.h:1800
struct rte_eth_dev_portconf default_txportconf
Definition: rte_ethdev.h:1838
uint64_t tx_offload_capa
Definition: rte_ethdev.h:1806
const char * driver_name
Definition: rte_ethdev.h:1777
uint8_t hash_key_size
Definition: rte_ethdev.h:1813
uint32_t speed_capa
Definition: rte_ethdev.h:1824
struct rte_eth_dev_portconf default_rxportconf
Definition: rte_ethdev.h:1836
struct rte_eth_switch_info switch_info
Definition: rte_ethdev.h:1845
struct rte_eth_rxseg_capa rx_seg_capa
Definition: rte_ethdev.h:1802
uint64_t rx_queue_offload_capa
Definition: rte_ethdev.h:1808
uint64_t rx_offload_capa
Definition: rte_ethdev.h:1804
uint16_t nb_rx_queues
Definition: rte_ethdev.h:1826
uint32_t max_mac_addrs
Definition: rte_ethdev.h:1797
const uint32_t * dev_flags
Definition: rte_ethdev.h:1782
struct rte_mbuf * pkts[]
Definition: rte_ethdev.h:3875
enum rte_eth_event_ipsec_subtype subtype
Definition: rte_ethdev.h:4130
enum rte_eth_event_macsec_type type
Definition: rte_ethdev.h:4070
enum rte_eth_event_macsec_subtype subtype
Definition: rte_ethdev.h:4072
uint32_t low_water
Definition: rte_ethdev.h:1379
uint16_t send_xon
Definition: rte_ethdev.h:1381
enum rte_eth_fc_mode mode
Definition: rte_ethdev.h:1382
uint32_t high_water
Definition: rte_ethdev.h:1378
uint16_t pause_time
Definition: rte_ethdev.h:1380
uint8_t mac_ctrl_frame_fwd
Definition: rte_ethdev.h:1383
uint16_t max_nb_queues
Definition: rte_ethdev.h:1231
struct rte_eth_hairpin_queue_cap tx_cap
Definition: rte_ethdev.h:1238
struct rte_eth_hairpin_queue_cap rx_cap
Definition: rte_ethdev.h:1237
uint32_t use_locked_device_memory
Definition: rte_ethdev.h:1298
struct rte_eth_fc_conf fc
Definition: rte_ethdev.h:1393
enum rte_eth_fc_mode mode
Definition: rte_ethdev.h:1430
enum rte_eth_fc_mode mode_capa
Definition: rte_ethdev.h:1409
struct rte_mempool * mp
Definition: rte_ethdev.h:1899
struct rte_mbuf ** mbuf_ring
Definition: rte_ethdev.h:1898
struct rte_eth_representor_range ranges[]
Definition: rte_ethdev.h:5731
enum rte_eth_representor_type type
Definition: rte_ethdev.h:5707
char name[RTE_DEV_NAME_MAX_LEN]
Definition: rte_ethdev.h:5717
uint8_t * rss_key
Definition: rte_ethdev.h:507
uint8_t rss_key_len
Definition: rte_ethdev.h:508
enum rte_eth_hash_function algorithm
Definition: rte_ethdev.h:514
uint64_t rss_hf
Definition: rte_ethdev.h:513
uint16_t reta[RTE_ETH_RETA_GROUP_SIZE]
Definition: rte_ethdev.h:906
struct rte_eth_thresh rx_thresh
Definition: rte_ethdev.h:1126
uint64_t offloads
Definition: rte_ethdev.h:1144
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1177
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1176
uint8_t rx_deferred_start
Definition: rte_ethdev.h:1129
uint16_t share_group
Definition: rte_ethdev.h:1137
uint8_t rx_drop_en
Definition: rte_ethdev.h:1128
uint16_t share_qid
Definition: rte_ethdev.h:1138
union rte_eth_rxseg * rx_seg
Definition: rte_ethdev.h:1152
struct rte_mempool ** rx_mempools
Definition: rte_ethdev.h:1173
uint16_t rx_nseg
Definition: rte_ethdev.h:1130
uint16_t rx_free_thresh
Definition: rte_ethdev.h:1127
uint32_t mtu
Definition: rte_ethdev.h:430
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:432
uint64_t offloads
Definition: rte_ethdev.h:438
void * reserved_ptrs[2]
Definition: rte_ethdev.h:441
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:440
enum rte_eth_rx_mq_mode mq_mode
Definition: rte_ethdev.h:429
uint8_t scattered_rx
Definition: rte_ethdev.h:1866
struct rte_mempool * mp
Definition: rte_ethdev.h:1864
uint8_t queue_state
Definition: rte_ethdev.h:1867
uint8_t avail_thresh
Definition: rte_ethdev.h:1876
uint16_t nb_desc
Definition: rte_ethdev.h:1868
uint16_t rx_buf_size
Definition: rte_ethdev.h:1869
__extension__ uint32_t multi_pools
Definition: rte_ethdev.h:1727
uint32_t offset_allowed
Definition: rte_ethdev.h:1728
uint32_t offset_align_log2
Definition: rte_ethdev.h:1729
struct rte_mempool * mp
Definition: rte_ethdev.h:1093
uint64_t imissed
Definition: rte_ethdev.h:271
uint64_t obytes
Definition: rte_ethdev.h:266
uint64_t opackets
Definition: rte_ethdev.h:264
uint64_t rx_nombuf
Definition: rte_ethdev.h:274
uint64_t ibytes
Definition: rte_ethdev.h:265
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:281
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:279
uint64_t ierrors
Definition: rte_ethdev.h:272
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:285
uint64_t ipackets
Definition: rte_ethdev.h:263
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:277
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:283
uint64_t oerrors
Definition: rte_ethdev.h:273
const char * name
Definition: rte_ethdev.h:1701
uint8_t hthresh
Definition: rte_ethdev.h:375
uint8_t pthresh
Definition: rte_ethdev.h:374
uint8_t wthresh
Definition: rte_ethdev.h:376
uint8_t tx_deferred_start
Definition: rte_ethdev.h:1189
uint64_t offloads
Definition: rte_ethdev.h:1195
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1198
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1197
struct rte_eth_thresh tx_thresh
Definition: rte_ethdev.h:1184
uint16_t tx_rs_thresh
Definition: rte_ethdev.h:1185
uint16_t tx_free_thresh
Definition: rte_ethdev.h:1186
uint64_t offloads
Definition: rte_ethdev.h:1017
__extension__ uint8_t hw_vlan_insert_pvid
Definition: rte_ethdev.h:1026
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1029
__extension__ uint8_t hw_vlan_reject_tagged
Definition: rte_ethdev.h:1022
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1028
__extension__ uint8_t hw_vlan_reject_untagged
Definition: rte_ethdev.h:1024
enum rte_eth_tx_mq_mode mq_mode
Definition: rte_ethdev.h:1011
uint8_t queue_state
Definition: rte_ethdev.h:1886
uint16_t nb_desc
Definition: rte_ethdev.h:1885
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:964
uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]
Definition: rte_ethdev.h:973
uint8_t enable_default_pool
Definition: rte_ethdev.h:965
struct rte_eth_vmdq_dcb_conf::@145 pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:995
struct rte_eth_vmdq_rx_conf::@146 pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]
uint8_t enable_default_pool
Definition: rte_ethdev.h:996
uint8_t enable_loop_back
Definition: rte_ethdev.h:998
char name[RTE_ETH_XSTATS_NAME_SIZE]
Definition: rte_ethdev.h:1965
uint64_t value
Definition: rte_ethdev.h:1947
uint64_t id
Definition: rte_ethdev.h:1946