DPDK 21.11.9
rte_ethdev.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
3 */
4
5#ifndef _RTE_ETHDEV_H_
6#define _RTE_ETHDEV_H_
7
148#ifdef __cplusplus
149extern "C" {
150#endif
151
152#include <stdint.h>
153
154/* Use this macro to check if LRO API is supported */
155#define RTE_ETHDEV_HAS_LRO_SUPPORT
156
157/* Alias RTE_LIBRTE_ETHDEV_DEBUG for backward compatibility. */
158#ifdef RTE_LIBRTE_ETHDEV_DEBUG
159#define RTE_ETHDEV_DEBUG_RX
160#define RTE_ETHDEV_DEBUG_TX
161#endif
162
163#include <rte_compat.h>
164#include <rte_log.h>
165#include <rte_interrupts.h>
166#include <rte_dev.h>
167#include <rte_devargs.h>
168#include <rte_bitops.h>
169#include <rte_errno.h>
170#include <rte_common.h>
171#include <rte_config.h>
172#include <rte_ether.h>
173#include <rte_power_intrinsics.h>
174
175#include "rte_ethdev_trace_fp.h"
176#include "rte_dev_info.h"
177
178extern int rte_eth_dev_logtype;
179
180#define RTE_ETHDEV_LOG(level, ...) \
181 rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
182
183struct rte_mbuf;
184
201int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
202
218
232
246#define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
247 for (rte_eth_iterator_init(iter, devargs), \
248 id = rte_eth_iterator_next(iter); \
249 id != RTE_MAX_ETHPORTS; \
250 id = rte_eth_iterator_next(iter))
251
262 uint64_t ipackets;
263 uint64_t opackets;
264 uint64_t ibytes;
265 uint64_t obytes;
270 uint64_t imissed;
271 uint64_t ierrors;
272 uint64_t oerrors;
273 uint64_t rx_nombuf;
274 /* Queue stats are limited to max 256 queues */
276 uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
278 uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
280 uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
282 uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
284 uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
285};
286
290#define RTE_ETH_LINK_SPEED_AUTONEG 0
291#define ETH_LINK_SPEED_AUTONEG RTE_ETH_LINK_SPEED_AUTONEG
292#define RTE_ETH_LINK_SPEED_FIXED RTE_BIT32(0)
293#define ETH_LINK_SPEED_FIXED RTE_ETH_LINK_SPEED_FIXED
294#define RTE_ETH_LINK_SPEED_10M_HD RTE_BIT32(1)
295#define ETH_LINK_SPEED_10M_HD RTE_ETH_LINK_SPEED_10M_HD
296#define RTE_ETH_LINK_SPEED_10M RTE_BIT32(2)
297#define ETH_LINK_SPEED_10M RTE_ETH_LINK_SPEED_10M
298#define RTE_ETH_LINK_SPEED_100M_HD RTE_BIT32(3)
299#define ETH_LINK_SPEED_100M_HD RTE_ETH_LINK_SPEED_100M_HD
300#define RTE_ETH_LINK_SPEED_100M RTE_BIT32(4)
301#define ETH_LINK_SPEED_100M RTE_ETH_LINK_SPEED_100M
302#define RTE_ETH_LINK_SPEED_1G RTE_BIT32(5)
303#define ETH_LINK_SPEED_1G RTE_ETH_LINK_SPEED_1G
304#define RTE_ETH_LINK_SPEED_2_5G RTE_BIT32(6)
305#define ETH_LINK_SPEED_2_5G RTE_ETH_LINK_SPEED_2_5G
306#define RTE_ETH_LINK_SPEED_5G RTE_BIT32(7)
307#define ETH_LINK_SPEED_5G RTE_ETH_LINK_SPEED_5G
308#define RTE_ETH_LINK_SPEED_10G RTE_BIT32(8)
309#define ETH_LINK_SPEED_10G RTE_ETH_LINK_SPEED_10G
310#define RTE_ETH_LINK_SPEED_20G RTE_BIT32(9)
311#define ETH_LINK_SPEED_20G RTE_ETH_LINK_SPEED_20G
312#define RTE_ETH_LINK_SPEED_25G RTE_BIT32(10)
313#define ETH_LINK_SPEED_25G RTE_ETH_LINK_SPEED_25G
314#define RTE_ETH_LINK_SPEED_40G RTE_BIT32(11)
315#define ETH_LINK_SPEED_40G RTE_ETH_LINK_SPEED_40G
316#define RTE_ETH_LINK_SPEED_50G RTE_BIT32(12)
317#define ETH_LINK_SPEED_50G RTE_ETH_LINK_SPEED_50G
318#define RTE_ETH_LINK_SPEED_56G RTE_BIT32(13)
319#define ETH_LINK_SPEED_56G RTE_ETH_LINK_SPEED_56G
320#define RTE_ETH_LINK_SPEED_100G RTE_BIT32(14)
321#define ETH_LINK_SPEED_100G RTE_ETH_LINK_SPEED_100G
322#define RTE_ETH_LINK_SPEED_200G RTE_BIT32(15)
323#define ETH_LINK_SPEED_200G RTE_ETH_LINK_SPEED_200G
329#define RTE_ETH_SPEED_NUM_NONE 0
330#define ETH_SPEED_NUM_NONE RTE_ETH_SPEED_NUM_NONE
331#define RTE_ETH_SPEED_NUM_10M 10
332#define ETH_SPEED_NUM_10M RTE_ETH_SPEED_NUM_10M
333#define RTE_ETH_SPEED_NUM_100M 100
334#define ETH_SPEED_NUM_100M RTE_ETH_SPEED_NUM_100M
335#define RTE_ETH_SPEED_NUM_1G 1000
336#define ETH_SPEED_NUM_1G RTE_ETH_SPEED_NUM_1G
337#define RTE_ETH_SPEED_NUM_2_5G 2500
338#define ETH_SPEED_NUM_2_5G RTE_ETH_SPEED_NUM_2_5G
339#define RTE_ETH_SPEED_NUM_5G 5000
340#define ETH_SPEED_NUM_5G RTE_ETH_SPEED_NUM_5G
341#define RTE_ETH_SPEED_NUM_10G 10000
342#define ETH_SPEED_NUM_10G RTE_ETH_SPEED_NUM_10G
343#define RTE_ETH_SPEED_NUM_20G 20000
344#define ETH_SPEED_NUM_20G RTE_ETH_SPEED_NUM_20G
345#define RTE_ETH_SPEED_NUM_25G 25000
346#define ETH_SPEED_NUM_25G RTE_ETH_SPEED_NUM_25G
347#define RTE_ETH_SPEED_NUM_40G 40000
348#define ETH_SPEED_NUM_40G RTE_ETH_SPEED_NUM_40G
349#define RTE_ETH_SPEED_NUM_50G 50000
350#define ETH_SPEED_NUM_50G RTE_ETH_SPEED_NUM_50G
351#define RTE_ETH_SPEED_NUM_56G 56000
352#define ETH_SPEED_NUM_56G RTE_ETH_SPEED_NUM_56G
353#define RTE_ETH_SPEED_NUM_100G 100000
354#define ETH_SPEED_NUM_100G RTE_ETH_SPEED_NUM_100G
355#define RTE_ETH_SPEED_NUM_200G 200000
356#define ETH_SPEED_NUM_200G RTE_ETH_SPEED_NUM_200G
357#define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX
358#define ETH_SPEED_NUM_UNKNOWN RTE_ETH_SPEED_NUM_UNKNOWN
364__extension__
366 uint32_t link_speed;
367 uint16_t link_duplex : 1;
368 uint16_t link_autoneg : 1;
369 uint16_t link_status : 1;
370} __rte_aligned(8);
375#define RTE_ETH_LINK_HALF_DUPLEX 0
376#define ETH_LINK_HALF_DUPLEX RTE_ETH_LINK_HALF_DUPLEX
377#define RTE_ETH_LINK_FULL_DUPLEX 1
378#define ETH_LINK_FULL_DUPLEX RTE_ETH_LINK_FULL_DUPLEX
379#define RTE_ETH_LINK_DOWN 0
380#define ETH_LINK_DOWN RTE_ETH_LINK_DOWN
381#define RTE_ETH_LINK_UP 1
382#define ETH_LINK_UP RTE_ETH_LINK_UP
383#define RTE_ETH_LINK_FIXED 0
384#define ETH_LINK_FIXED RTE_ETH_LINK_FIXED
385#define RTE_ETH_LINK_AUTONEG 1
386#define ETH_LINK_AUTONEG RTE_ETH_LINK_AUTONEG
387#define RTE_ETH_LINK_MAX_STR_LEN 40
395 uint8_t pthresh;
396 uint8_t hthresh;
397 uint8_t wthresh;
398};
399
403#define RTE_ETH_MQ_RX_RSS_FLAG RTE_BIT32(0)
404#define ETH_MQ_RX_RSS_FLAG RTE_ETH_MQ_RX_RSS_FLAG
405#define RTE_ETH_MQ_RX_DCB_FLAG RTE_BIT32(1)
406#define ETH_MQ_RX_DCB_FLAG RTE_ETH_MQ_RX_DCB_FLAG
407#define RTE_ETH_MQ_RX_VMDQ_FLAG RTE_BIT32(2)
408#define ETH_MQ_RX_VMDQ_FLAG RTE_ETH_MQ_RX_VMDQ_FLAG
418
425
435};
436
437#define ETH_MQ_RX_NONE RTE_ETH_MQ_RX_NONE
438#define ETH_MQ_RX_RSS RTE_ETH_MQ_RX_RSS
439#define ETH_MQ_RX_DCB RTE_ETH_MQ_RX_DCB
440#define ETH_MQ_RX_DCB_RSS RTE_ETH_MQ_RX_DCB_RSS
441#define ETH_MQ_RX_VMDQ_ONLY RTE_ETH_MQ_RX_VMDQ_ONLY
442#define ETH_MQ_RX_VMDQ_RSS RTE_ETH_MQ_RX_VMDQ_RSS
443#define ETH_MQ_RX_VMDQ_DCB RTE_ETH_MQ_RX_VMDQ_DCB
444#define ETH_MQ_RX_VMDQ_DCB_RSS RTE_ETH_MQ_RX_VMDQ_DCB_RSS
445
455};
456#define ETH_MQ_TX_NONE RTE_ETH_MQ_TX_NONE
457#define ETH_MQ_TX_DCB RTE_ETH_MQ_TX_DCB
458#define ETH_MQ_TX_VMDQ_DCB RTE_ETH_MQ_TX_VMDQ_DCB
459#define ETH_MQ_TX_VMDQ_ONLY RTE_ETH_MQ_TX_VMDQ_ONLY
460
467 uint32_t mtu;
470 uint16_t split_hdr_size;
476 uint64_t offloads;
477
478 uint64_t reserved_64s[2];
479 void *reserved_ptrs[2];
480};
481
487 RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
490 RTE_ETH_VLAN_TYPE_MAX,
491};
492
493#define ETH_VLAN_TYPE_UNKNOWN RTE_ETH_VLAN_TYPE_UNKNOWN
494#define ETH_VLAN_TYPE_INNER RTE_ETH_VLAN_TYPE_INNER
495#define ETH_VLAN_TYPE_OUTER RTE_ETH_VLAN_TYPE_OUTER
496#define ETH_VLAN_TYPE_MAX RTE_ETH_VLAN_TYPE_MAX
497
503 uint64_t ids[64];
504};
505
524 uint8_t *rss_key;
525 uint8_t rss_key_len;
526 uint64_t rss_hf;
527};
528
529/*
530 * A packet can be identified by hardware as different flow types. Different
531 * NIC hardware may support different flow types.
532 * Basically, the NIC hardware identifies the flow type as deep protocol as
533 * possible, and exclusively. For example, if a packet is identified as
534 * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
535 * though it is an actual IPV4 packet.
536 */
537#define RTE_ETH_FLOW_UNKNOWN 0
538#define RTE_ETH_FLOW_RAW 1
539#define RTE_ETH_FLOW_IPV4 2
540#define RTE_ETH_FLOW_FRAG_IPV4 3
541#define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
542#define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
543#define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
544#define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
545#define RTE_ETH_FLOW_IPV6 8
546#define RTE_ETH_FLOW_FRAG_IPV6 9
547#define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
548#define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
549#define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
550#define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
551#define RTE_ETH_FLOW_L2_PAYLOAD 14
552#define RTE_ETH_FLOW_IPV6_EX 15
553#define RTE_ETH_FLOW_IPV6_TCP_EX 16
554#define RTE_ETH_FLOW_IPV6_UDP_EX 17
556#define RTE_ETH_FLOW_PORT 18
557#define RTE_ETH_FLOW_VXLAN 19
558#define RTE_ETH_FLOW_GENEVE 20
559#define RTE_ETH_FLOW_NVGRE 21
560#define RTE_ETH_FLOW_VXLAN_GPE 22
561#define RTE_ETH_FLOW_GTPU 23
562#define RTE_ETH_FLOW_MAX 24
563
564/*
565 * Below macros are defined for RSS offload types, they can be used to
566 * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
567 */
568#define RTE_ETH_RSS_IPV4 RTE_BIT64(2)
569#define ETH_RSS_IPV4 RTE_ETH_RSS_IPV4
570#define RTE_ETH_RSS_FRAG_IPV4 RTE_BIT64(3)
571#define ETH_RSS_FRAG_IPV4 RTE_ETH_RSS_FRAG_IPV4
572#define RTE_ETH_RSS_NONFRAG_IPV4_TCP RTE_BIT64(4)
573#define ETH_RSS_NONFRAG_IPV4_TCP RTE_ETH_RSS_NONFRAG_IPV4_TCP
574#define RTE_ETH_RSS_NONFRAG_IPV4_UDP RTE_BIT64(5)
575#define ETH_RSS_NONFRAG_IPV4_UDP RTE_ETH_RSS_NONFRAG_IPV4_UDP
576#define RTE_ETH_RSS_NONFRAG_IPV4_SCTP RTE_BIT64(6)
577#define ETH_RSS_NONFRAG_IPV4_SCTP RTE_ETH_RSS_NONFRAG_IPV4_SCTP
578#define RTE_ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
579#define ETH_RSS_NONFRAG_IPV4_OTHER RTE_ETH_RSS_NONFRAG_IPV4_OTHER
580#define RTE_ETH_RSS_IPV6 RTE_BIT64(8)
581#define ETH_RSS_IPV6 RTE_ETH_RSS_IPV6
582#define RTE_ETH_RSS_FRAG_IPV6 RTE_BIT64(9)
583#define ETH_RSS_FRAG_IPV6 RTE_ETH_RSS_FRAG_IPV6
584#define RTE_ETH_RSS_NONFRAG_IPV6_TCP RTE_BIT64(10)
585#define ETH_RSS_NONFRAG_IPV6_TCP RTE_ETH_RSS_NONFRAG_IPV6_TCP
586#define RTE_ETH_RSS_NONFRAG_IPV6_UDP RTE_BIT64(11)
587#define ETH_RSS_NONFRAG_IPV6_UDP RTE_ETH_RSS_NONFRAG_IPV6_UDP
588#define RTE_ETH_RSS_NONFRAG_IPV6_SCTP RTE_BIT64(12)
589#define ETH_RSS_NONFRAG_IPV6_SCTP RTE_ETH_RSS_NONFRAG_IPV6_SCTP
590#define RTE_ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
591#define ETH_RSS_NONFRAG_IPV6_OTHER RTE_ETH_RSS_NONFRAG_IPV6_OTHER
592#define RTE_ETH_RSS_L2_PAYLOAD RTE_BIT64(14)
593#define ETH_RSS_L2_PAYLOAD RTE_ETH_RSS_L2_PAYLOAD
594#define RTE_ETH_RSS_IPV6_EX RTE_BIT64(15)
595#define ETH_RSS_IPV6_EX RTE_ETH_RSS_IPV6_EX
596#define RTE_ETH_RSS_IPV6_TCP_EX RTE_BIT64(16)
597#define ETH_RSS_IPV6_TCP_EX RTE_ETH_RSS_IPV6_TCP_EX
598#define RTE_ETH_RSS_IPV6_UDP_EX RTE_BIT64(17)
599#define ETH_RSS_IPV6_UDP_EX RTE_ETH_RSS_IPV6_UDP_EX
600#define RTE_ETH_RSS_PORT RTE_BIT64(18)
601#define ETH_RSS_PORT RTE_ETH_RSS_PORT
602#define RTE_ETH_RSS_VXLAN RTE_BIT64(19)
603#define ETH_RSS_VXLAN RTE_ETH_RSS_VXLAN
604#define RTE_ETH_RSS_GENEVE RTE_BIT64(20)
605#define ETH_RSS_GENEVE RTE_ETH_RSS_GENEVE
606#define RTE_ETH_RSS_NVGRE RTE_BIT64(21)
607#define ETH_RSS_NVGRE RTE_ETH_RSS_NVGRE
608#define RTE_ETH_RSS_GTPU RTE_BIT64(23)
609#define ETH_RSS_GTPU RTE_ETH_RSS_GTPU
610#define RTE_ETH_RSS_ETH RTE_BIT64(24)
611#define ETH_RSS_ETH RTE_ETH_RSS_ETH
612#define RTE_ETH_RSS_S_VLAN RTE_BIT64(25)
613#define ETH_RSS_S_VLAN RTE_ETH_RSS_S_VLAN
614#define RTE_ETH_RSS_C_VLAN RTE_BIT64(26)
615#define ETH_RSS_C_VLAN RTE_ETH_RSS_C_VLAN
616#define RTE_ETH_RSS_ESP RTE_BIT64(27)
617#define ETH_RSS_ESP RTE_ETH_RSS_ESP
618#define RTE_ETH_RSS_AH RTE_BIT64(28)
619#define ETH_RSS_AH RTE_ETH_RSS_AH
620#define RTE_ETH_RSS_L2TPV3 RTE_BIT64(29)
621#define ETH_RSS_L2TPV3 RTE_ETH_RSS_L2TPV3
622#define RTE_ETH_RSS_PFCP RTE_BIT64(30)
623#define ETH_RSS_PFCP RTE_ETH_RSS_PFCP
624#define RTE_ETH_RSS_PPPOE RTE_BIT64(31)
625#define ETH_RSS_PPPOE RTE_ETH_RSS_PPPOE
626#define RTE_ETH_RSS_ECPRI RTE_BIT64(32)
627#define ETH_RSS_ECPRI RTE_ETH_RSS_ECPRI
628#define RTE_ETH_RSS_MPLS RTE_BIT64(33)
629#define ETH_RSS_MPLS RTE_ETH_RSS_MPLS
630#define RTE_ETH_RSS_IPV4_CHKSUM RTE_BIT64(34)
631#define ETH_RSS_IPV4_CHKSUM RTE_ETH_RSS_IPV4_CHKSUM
632
645#define RTE_ETH_RSS_L4_CHKSUM RTE_BIT64(35)
646#define ETH_RSS_L4_CHKSUM RTE_ETH_RSS_L4_CHKSUM
647
648/*
649 * We use the following macros to combine with above RTE_ETH_RSS_* for
650 * more specific input set selection. These bits are defined starting
651 * from the high end of the 64 bits.
652 * Note: If we use above RTE_ETH_RSS_* without SRC/DST_ONLY, it represents
653 * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
654 * the same level are used simultaneously, it is the same case as none of
655 * them are added.
656 */
657#define RTE_ETH_RSS_L3_SRC_ONLY RTE_BIT64(63)
658#define ETH_RSS_L3_SRC_ONLY RTE_ETH_RSS_L3_SRC_ONLY
659#define RTE_ETH_RSS_L3_DST_ONLY RTE_BIT64(62)
660#define ETH_RSS_L3_DST_ONLY RTE_ETH_RSS_L3_DST_ONLY
661#define RTE_ETH_RSS_L4_SRC_ONLY RTE_BIT64(61)
662#define ETH_RSS_L4_SRC_ONLY RTE_ETH_RSS_L4_SRC_ONLY
663#define RTE_ETH_RSS_L4_DST_ONLY RTE_BIT64(60)
664#define ETH_RSS_L4_DST_ONLY RTE_ETH_RSS_L4_DST_ONLY
665#define RTE_ETH_RSS_L2_SRC_ONLY RTE_BIT64(59)
666#define ETH_RSS_L2_SRC_ONLY RTE_ETH_RSS_L2_SRC_ONLY
667#define RTE_ETH_RSS_L2_DST_ONLY RTE_BIT64(58)
668#define ETH_RSS_L2_DST_ONLY RTE_ETH_RSS_L2_DST_ONLY
669
670/*
671 * Only select IPV6 address prefix as RSS input set according to
672 * https:tools.ietf.org/html/rfc6052
673 * Must be combined with RTE_ETH_RSS_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_UDP,
674 * RTE_ETH_RSS_NONFRAG_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP.
675 */
676#define RTE_ETH_RSS_L3_PRE32 RTE_BIT64(57)
677#define RTE_ETH_RSS_L3_PRE40 RTE_BIT64(56)
678#define RTE_ETH_RSS_L3_PRE48 RTE_BIT64(55)
679#define RTE_ETH_RSS_L3_PRE56 RTE_BIT64(54)
680#define RTE_ETH_RSS_L3_PRE64 RTE_BIT64(53)
681#define RTE_ETH_RSS_L3_PRE96 RTE_BIT64(52)
682
683/*
684 * Use the following macros to combine with the above layers
685 * to choose inner and outer layers or both for RSS computation.
686 * Bits 50 and 51 are reserved for this.
687 */
688
696#define RTE_ETH_RSS_LEVEL_PMD_DEFAULT (UINT64_C(0) << 50)
697#define ETH_RSS_LEVEL_PMD_DEFAULT RTE_ETH_RSS_LEVEL_PMD_DEFAULT
698
703#define RTE_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
704#define ETH_RSS_LEVEL_OUTERMOST RTE_ETH_RSS_LEVEL_OUTERMOST
705
710#define RTE_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
711#define ETH_RSS_LEVEL_INNERMOST RTE_ETH_RSS_LEVEL_INNERMOST
712#define RTE_ETH_RSS_LEVEL_MASK (UINT64_C(3) << 50)
713#define ETH_RSS_LEVEL_MASK RTE_ETH_RSS_LEVEL_MASK
714
715#define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
716#define ETH_RSS_LEVEL(rss_hf) RTE_ETH_RSS_LEVEL(rss_hf)
717
728static inline uint64_t
729rte_eth_rss_hf_refine(uint64_t rss_hf)
730{
731 if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
732 rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
733
734 if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
735 rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
736
737 return rss_hf;
738}
739
740#define RTE_ETH_RSS_IPV6_PRE32 ( \
741 RTE_ETH_RSS_IPV6 | \
742 RTE_ETH_RSS_L3_PRE32)
743#define ETH_RSS_IPV6_PRE32 RTE_ETH_RSS_IPV6_PRE32
744
745#define RTE_ETH_RSS_IPV6_PRE40 ( \
746 RTE_ETH_RSS_IPV6 | \
747 RTE_ETH_RSS_L3_PRE40)
748#define ETH_RSS_IPV6_PRE40 RTE_ETH_RSS_IPV6_PRE40
749
750#define RTE_ETH_RSS_IPV6_PRE48 ( \
751 RTE_ETH_RSS_IPV6 | \
752 RTE_ETH_RSS_L3_PRE48)
753#define ETH_RSS_IPV6_PRE48 RTE_ETH_RSS_IPV6_PRE48
754
755#define RTE_ETH_RSS_IPV6_PRE56 ( \
756 RTE_ETH_RSS_IPV6 | \
757 RTE_ETH_RSS_L3_PRE56)
758#define ETH_RSS_IPV6_PRE56 RTE_ETH_RSS_IPV6_PRE56
759
760#define RTE_ETH_RSS_IPV6_PRE64 ( \
761 RTE_ETH_RSS_IPV6 | \
762 RTE_ETH_RSS_L3_PRE64)
763#define ETH_RSS_IPV6_PRE64 RTE_ETH_RSS_IPV6_PRE64
764
765#define RTE_ETH_RSS_IPV6_PRE96 ( \
766 RTE_ETH_RSS_IPV6 | \
767 RTE_ETH_RSS_L3_PRE96)
768#define ETH_RSS_IPV6_PRE96 RTE_ETH_RSS_IPV6_PRE96
769
770#define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
771 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
772 RTE_ETH_RSS_L3_PRE32)
773#define ETH_RSS_IPV6_PRE32_UDP RTE_ETH_RSS_IPV6_PRE32_UDP
774
775#define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
776 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
777 RTE_ETH_RSS_L3_PRE40)
778#define ETH_RSS_IPV6_PRE40_UDP RTE_ETH_RSS_IPV6_PRE40_UDP
779
780#define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
781 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
782 RTE_ETH_RSS_L3_PRE48)
783#define ETH_RSS_IPV6_PRE48_UDP RTE_ETH_RSS_IPV6_PRE48_UDP
784
785#define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
786 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
787 RTE_ETH_RSS_L3_PRE56)
788#define ETH_RSS_IPV6_PRE56_UDP RTE_ETH_RSS_IPV6_PRE56_UDP
789
790#define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
791 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
792 RTE_ETH_RSS_L3_PRE64)
793#define ETH_RSS_IPV6_PRE64_UDP RTE_ETH_RSS_IPV6_PRE64_UDP
794
795#define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
796 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
797 RTE_ETH_RSS_L3_PRE96)
798#define ETH_RSS_IPV6_PRE96_UDP RTE_ETH_RSS_IPV6_PRE96_UDP
799
800#define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
801 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
802 RTE_ETH_RSS_L3_PRE32)
803#define ETH_RSS_IPV6_PRE32_TCP RTE_ETH_RSS_IPV6_PRE32_TCP
804
805#define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
806 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
807 RTE_ETH_RSS_L3_PRE40)
808#define ETH_RSS_IPV6_PRE40_TCP RTE_ETH_RSS_IPV6_PRE40_TCP
809
810#define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
811 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
812 RTE_ETH_RSS_L3_PRE48)
813#define ETH_RSS_IPV6_PRE48_TCP RTE_ETH_RSS_IPV6_PRE48_TCP
814
815#define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
816 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
817 RTE_ETH_RSS_L3_PRE56)
818#define ETH_RSS_IPV6_PRE56_TCP RTE_ETH_RSS_IPV6_PRE56_TCP
819
820#define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
821 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
822 RTE_ETH_RSS_L3_PRE64)
823#define ETH_RSS_IPV6_PRE64_TCP RTE_ETH_RSS_IPV6_PRE64_TCP
824
825#define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
826 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
827 RTE_ETH_RSS_L3_PRE96)
828#define ETH_RSS_IPV6_PRE96_TCP RTE_ETH_RSS_IPV6_PRE96_TCP
829
830#define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
831 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
832 RTE_ETH_RSS_L3_PRE32)
833#define ETH_RSS_IPV6_PRE32_SCTP RTE_ETH_RSS_IPV6_PRE32_SCTP
834
835#define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
836 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
837 RTE_ETH_RSS_L3_PRE40)
838#define ETH_RSS_IPV6_PRE40_SCTP RTE_ETH_RSS_IPV6_PRE40_SCTP
839
840#define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
841 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
842 RTE_ETH_RSS_L3_PRE48)
843#define ETH_RSS_IPV6_PRE48_SCTP RTE_ETH_RSS_IPV6_PRE48_SCTP
844
845#define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
846 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
847 RTE_ETH_RSS_L3_PRE56)
848#define ETH_RSS_IPV6_PRE56_SCTP RTE_ETH_RSS_IPV6_PRE56_SCTP
849
850#define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
851 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
852 RTE_ETH_RSS_L3_PRE64)
853#define ETH_RSS_IPV6_PRE64_SCTP RTE_ETH_RSS_IPV6_PRE64_SCTP
854
855#define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
856 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
857 RTE_ETH_RSS_L3_PRE96)
858#define ETH_RSS_IPV6_PRE96_SCTP RTE_ETH_RSS_IPV6_PRE96_SCTP
859
860#define RTE_ETH_RSS_IP ( \
861 RTE_ETH_RSS_IPV4 | \
862 RTE_ETH_RSS_FRAG_IPV4 | \
863 RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
864 RTE_ETH_RSS_IPV6 | \
865 RTE_ETH_RSS_FRAG_IPV6 | \
866 RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
867 RTE_ETH_RSS_IPV6_EX)
868#define ETH_RSS_IP RTE_ETH_RSS_IP
869
870#define RTE_ETH_RSS_UDP ( \
871 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
872 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
873 RTE_ETH_RSS_IPV6_UDP_EX)
874#define ETH_RSS_UDP RTE_ETH_RSS_UDP
875
876#define RTE_ETH_RSS_TCP ( \
877 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
878 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
879 RTE_ETH_RSS_IPV6_TCP_EX)
880#define ETH_RSS_TCP RTE_ETH_RSS_TCP
881
882#define RTE_ETH_RSS_SCTP ( \
883 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
884 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
885#define ETH_RSS_SCTP RTE_ETH_RSS_SCTP
886
887#define RTE_ETH_RSS_TUNNEL ( \
888 RTE_ETH_RSS_VXLAN | \
889 RTE_ETH_RSS_GENEVE | \
890 RTE_ETH_RSS_NVGRE)
891#define ETH_RSS_TUNNEL RTE_ETH_RSS_TUNNEL
892
893#define RTE_ETH_RSS_VLAN ( \
894 RTE_ETH_RSS_S_VLAN | \
895 RTE_ETH_RSS_C_VLAN)
896#define ETH_RSS_VLAN RTE_ETH_RSS_VLAN
897
899#define RTE_ETH_RSS_PROTO_MASK ( \
900 RTE_ETH_RSS_IPV4 | \
901 RTE_ETH_RSS_FRAG_IPV4 | \
902 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
903 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
904 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
905 RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
906 RTE_ETH_RSS_IPV6 | \
907 RTE_ETH_RSS_FRAG_IPV6 | \
908 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
909 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
910 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
911 RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
912 RTE_ETH_RSS_L2_PAYLOAD | \
913 RTE_ETH_RSS_IPV6_EX | \
914 RTE_ETH_RSS_IPV6_TCP_EX | \
915 RTE_ETH_RSS_IPV6_UDP_EX | \
916 RTE_ETH_RSS_PORT | \
917 RTE_ETH_RSS_VXLAN | \
918 RTE_ETH_RSS_GENEVE | \
919 RTE_ETH_RSS_NVGRE | \
920 RTE_ETH_RSS_MPLS)
921#define ETH_RSS_PROTO_MASK RTE_ETH_RSS_PROTO_MASK
922
923/*
924 * Definitions used for redirection table entry size.
925 * Some RSS RETA sizes may not be supported by some drivers, check the
926 * documentation or the description of relevant functions for more details.
927 */
928#define RTE_ETH_RSS_RETA_SIZE_64 64
929#define ETH_RSS_RETA_SIZE_64 RTE_ETH_RSS_RETA_SIZE_64
930#define RTE_ETH_RSS_RETA_SIZE_128 128
931#define ETH_RSS_RETA_SIZE_128 RTE_ETH_RSS_RETA_SIZE_128
932#define RTE_ETH_RSS_RETA_SIZE_256 256
933#define ETH_RSS_RETA_SIZE_256 RTE_ETH_RSS_RETA_SIZE_256
934#define RTE_ETH_RSS_RETA_SIZE_512 512
935#define ETH_RSS_RETA_SIZE_512 RTE_ETH_RSS_RETA_SIZE_512
936#define RTE_ETH_RETA_GROUP_SIZE 64
937#define RTE_RETA_GROUP_SIZE RTE_ETH_RETA_GROUP_SIZE
938
940#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS 64
941#define ETH_VMDQ_MAX_VLAN_FILTERS RTE_ETH_VMDQ_MAX_VLAN_FILTERS
942#define RTE_ETH_DCB_NUM_USER_PRIORITIES 8
943#define ETH_DCB_NUM_USER_PRIORITIES RTE_ETH_DCB_NUM_USER_PRIORITIES
944#define RTE_ETH_VMDQ_DCB_NUM_QUEUES 128
945#define ETH_VMDQ_DCB_NUM_QUEUES RTE_ETH_VMDQ_DCB_NUM_QUEUES
946#define RTE_ETH_DCB_NUM_QUEUES 128
947#define ETH_DCB_NUM_QUEUES RTE_ETH_DCB_NUM_QUEUES
951#define RTE_ETH_DCB_PG_SUPPORT RTE_BIT32(0)
952#define ETH_DCB_PG_SUPPORT RTE_ETH_DCB_PG_SUPPORT
953#define RTE_ETH_DCB_PFC_SUPPORT RTE_BIT32(1)
954#define ETH_DCB_PFC_SUPPORT RTE_ETH_DCB_PFC_SUPPORT
958#define RTE_ETH_VLAN_STRIP_OFFLOAD 0x0001
959#define ETH_VLAN_STRIP_OFFLOAD RTE_ETH_VLAN_STRIP_OFFLOAD
960#define RTE_ETH_VLAN_FILTER_OFFLOAD 0x0002
961#define ETH_VLAN_FILTER_OFFLOAD RTE_ETH_VLAN_FILTER_OFFLOAD
962#define RTE_ETH_VLAN_EXTEND_OFFLOAD 0x0004
963#define ETH_VLAN_EXTEND_OFFLOAD RTE_ETH_VLAN_EXTEND_OFFLOAD
964#define RTE_ETH_QINQ_STRIP_OFFLOAD 0x0008
965#define ETH_QINQ_STRIP_OFFLOAD RTE_ETH_QINQ_STRIP_OFFLOAD
966
967#define RTE_ETH_VLAN_STRIP_MASK 0x0001
968#define ETH_VLAN_STRIP_MASK RTE_ETH_VLAN_STRIP_MASK
969#define RTE_ETH_VLAN_FILTER_MASK 0x0002
970#define ETH_VLAN_FILTER_MASK RTE_ETH_VLAN_FILTER_MASK
971#define RTE_ETH_VLAN_EXTEND_MASK 0x0004
972#define ETH_VLAN_EXTEND_MASK RTE_ETH_VLAN_EXTEND_MASK
973#define RTE_ETH_QINQ_STRIP_MASK 0x0008
974#define ETH_QINQ_STRIP_MASK RTE_ETH_QINQ_STRIP_MASK
975#define RTE_ETH_VLAN_ID_MAX 0x0FFF
976#define ETH_VLAN_ID_MAX RTE_ETH_VLAN_ID_MAX
979/* Definitions used for receive MAC address */
980#define RTE_ETH_NUM_RECEIVE_MAC_ADDR 128
981#define ETH_NUM_RECEIVE_MAC_ADDR RTE_ETH_NUM_RECEIVE_MAC_ADDR
982
983/* Definitions used for unicast hash */
984#define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY 128
985#define ETH_VMDQ_NUM_UC_HASH_ARRAY RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY
986
991#define RTE_ETH_VMDQ_ACCEPT_UNTAG RTE_BIT32(0)
992#define ETH_VMDQ_ACCEPT_UNTAG RTE_ETH_VMDQ_ACCEPT_UNTAG
994#define RTE_ETH_VMDQ_ACCEPT_HASH_MC RTE_BIT32(1)
995#define ETH_VMDQ_ACCEPT_HASH_MC RTE_ETH_VMDQ_ACCEPT_HASH_MC
997#define RTE_ETH_VMDQ_ACCEPT_HASH_UC RTE_BIT32(2)
998#define ETH_VMDQ_ACCEPT_HASH_UC RTE_ETH_VMDQ_ACCEPT_HASH_UC
1000#define RTE_ETH_VMDQ_ACCEPT_BROADCAST RTE_BIT32(3)
1001#define ETH_VMDQ_ACCEPT_BROADCAST RTE_ETH_VMDQ_ACCEPT_BROADCAST
1003#define RTE_ETH_VMDQ_ACCEPT_MULTICAST RTE_BIT32(4)
1004#define ETH_VMDQ_ACCEPT_MULTICAST RTE_ETH_VMDQ_ACCEPT_MULTICAST
1015 uint64_t mask;
1017 uint16_t reta[RTE_ETH_RETA_GROUP_SIZE];
1018};
1019
1026 RTE_ETH_8_TCS = 8
1028#define ETH_4_TCS RTE_ETH_4_TCS
1029#define ETH_8_TCS RTE_ETH_8_TCS
1030
1039 RTE_ETH_64_POOLS = 64
1041#define ETH_8_POOLS RTE_ETH_8_POOLS
1042#define ETH_16_POOLS RTE_ETH_16_POOLS
1043#define ETH_32_POOLS RTE_ETH_32_POOLS
1044#define ETH_64_POOLS RTE_ETH_64_POOLS
1045
1046/* This structure may be extended in future. */
1047struct rte_eth_dcb_rx_conf {
1048 enum rte_eth_nb_tcs nb_tcs;
1050 uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
1051};
1052
1053struct rte_eth_vmdq_dcb_tx_conf {
1054 enum rte_eth_nb_pools nb_queue_pools;
1056 uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
1057};
1058
1059struct rte_eth_dcb_tx_conf {
1060 enum rte_eth_nb_tcs nb_tcs;
1062 uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
1063};
1064
1065struct rte_eth_vmdq_tx_conf {
1066 enum rte_eth_nb_pools nb_queue_pools;
1067};
1068
1085 struct {
1086 uint16_t vlan_id;
1087 uint64_t pools;
1091};
1092
1117 uint32_t rx_mode;
1118 struct {
1119 uint16_t vlan_id;
1120 uint64_t pools;
1122};
1123
1134 uint64_t offloads;
1135
1136 uint16_t pvid;
1137 __extension__
1138 uint8_t
1144
1145 uint64_t reserved_64s[2];
1146 void *reserved_ptrs[2];
1147};
1148
1186 struct rte_mempool *mp;
1187 uint16_t length;
1188 uint16_t offset;
1189 uint32_t reserved;
1190};
1191
1199 /* The settings for buffer split offload. */
1200 struct rte_eth_rxseg_split split;
1201 /* The other features settings should be added here. */
1202};
1203
1210 uint8_t rx_drop_en;
1212 uint16_t rx_nseg;
1219 uint16_t share_group;
1220 uint16_t share_qid;
1226 uint64_t offloads;
1235
1236 uint64_t reserved_64s[2];
1237 void *reserved_ptrs[2];
1238};
1239
1245 uint16_t tx_rs_thresh;
1255 uint64_t offloads;
1256
1257 uint64_t reserved_64s[2];
1258 void *reserved_ptrs[2];
1259};
1260
1271 uint16_t max_rx_2_tx;
1273 uint16_t max_tx_2_rx;
1274 uint16_t max_nb_desc;
1275};
1276
1277#define RTE_ETH_MAX_HAIRPIN_PEERS 32
1278
1286 uint16_t port;
1287 uint16_t queue;
1288};
1289
1297 uint32_t peer_count:16;
1308 uint32_t tx_explicit:1;
1309
1321 uint32_t manual_bind:1;
1322 uint32_t reserved:14;
1323 struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
1324};
1325
1330 uint16_t nb_max;
1331 uint16_t nb_min;
1332 uint16_t nb_align;
1342 uint16_t nb_seg_max;
1343
1356};
1357
1367
1368#define RTE_FC_NONE RTE_ETH_FC_NONE
1369#define RTE_FC_RX_PAUSE RTE_ETH_FC_RX_PAUSE
1370#define RTE_FC_TX_PAUSE RTE_ETH_FC_TX_PAUSE
1371#define RTE_FC_FULL RTE_ETH_FC_FULL
1372
1379 uint32_t high_water;
1380 uint32_t low_water;
1381 uint16_t pause_time;
1382 uint16_t send_xon;
1385 uint8_t autoneg;
1386};
1387
1395 uint8_t priority;
1396};
1397
1403 RTE_ETH_TUNNEL_TYPE_NONE = 0,
1404 RTE_ETH_TUNNEL_TYPE_VXLAN,
1405 RTE_ETH_TUNNEL_TYPE_GENEVE,
1406 RTE_ETH_TUNNEL_TYPE_TEREDO,
1407 RTE_ETH_TUNNEL_TYPE_NVGRE,
1408 RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
1409 RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
1410 RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
1411 RTE_ETH_TUNNEL_TYPE_ECPRI,
1412 RTE_ETH_TUNNEL_TYPE_MAX,
1413};
1414
1415#define RTE_TUNNEL_TYPE_NONE RTE_ETH_TUNNEL_TYPE_NONE
1416#define RTE_TUNNEL_TYPE_VXLAN RTE_ETH_TUNNEL_TYPE_VXLAN
1417#define RTE_TUNNEL_TYPE_GENEVE RTE_ETH_TUNNEL_TYPE_GENEVE
1418#define RTE_TUNNEL_TYPE_TEREDO RTE_ETH_TUNNEL_TYPE_TEREDO
1419#define RTE_TUNNEL_TYPE_NVGRE RTE_ETH_TUNNEL_TYPE_NVGRE
1420#define RTE_TUNNEL_TYPE_IP_IN_GRE RTE_ETH_TUNNEL_TYPE_IP_IN_GRE
1421#define RTE_L2_TUNNEL_TYPE_E_TAG RTE_ETH_L2_TUNNEL_TYPE_E_TAG
1422#define RTE_TUNNEL_TYPE_VXLAN_GPE RTE_ETH_TUNNEL_TYPE_VXLAN_GPE
1423#define RTE_TUNNEL_TYPE_ECPRI RTE_ETH_TUNNEL_TYPE_ECPRI
1424#define RTE_TUNNEL_TYPE_MAX RTE_ETH_TUNNEL_TYPE_MAX
1425
1426/* Deprecated API file for rte_eth_dev_filter_* functions */
1427#include "rte_eth_ctrl.h"
1428
1437};
1438#define rte_fdir_pballoc_type rte_eth_fdir_pballoc_type
1439
1440#define RTE_FDIR_PBALLOC_64K RTE_ETH_FDIR_PBALLOC_64K
1441#define RTE_FDIR_PBALLOC_128K RTE_ETH_FDIR_PBALLOC_128K
1442#define RTE_FDIR_PBALLOC_256K RTE_ETH_FDIR_PBALLOC_256K
1443
1451};
1452
1464 uint8_t drop_queue;
1465 struct rte_eth_fdir_masks mask;
1468};
1469
1470#define rte_fdir_conf rte_eth_fdir_conf
1471
1482 uint16_t udp_port;
1483 uint8_t prot_type;
1484};
1485
1491 uint32_t lsc:1;
1493 uint32_t rxq:1;
1495 uint32_t rmv:1;
1496};
1497
1498#define rte_intr_conf rte_eth_intr_conf
1499
1506 uint32_t link_speeds;
1515 uint32_t lpbk_mode;
1520 struct {
1525 struct rte_eth_dcb_rx_conf dcb_rx_conf;
1529 union {
1531 struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1533 struct rte_eth_dcb_tx_conf dcb_tx_conf;
1535 struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1542};
1543
1547#define RTE_ETH_RX_OFFLOAD_VLAN_STRIP RTE_BIT64(0)
1548#define DEV_RX_OFFLOAD_VLAN_STRIP RTE_ETH_RX_OFFLOAD_VLAN_STRIP
1549#define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1550#define DEV_RX_OFFLOAD_IPV4_CKSUM RTE_ETH_RX_OFFLOAD_IPV4_CKSUM
1551#define RTE_ETH_RX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1552#define DEV_RX_OFFLOAD_UDP_CKSUM RTE_ETH_RX_OFFLOAD_UDP_CKSUM
1553#define RTE_ETH_RX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1554#define DEV_RX_OFFLOAD_TCP_CKSUM RTE_ETH_RX_OFFLOAD_TCP_CKSUM
1555#define RTE_ETH_RX_OFFLOAD_TCP_LRO RTE_BIT64(4)
1556#define DEV_RX_OFFLOAD_TCP_LRO RTE_ETH_RX_OFFLOAD_TCP_LRO
1557#define RTE_ETH_RX_OFFLOAD_QINQ_STRIP RTE_BIT64(5)
1558#define DEV_RX_OFFLOAD_QINQ_STRIP RTE_ETH_RX_OFFLOAD_QINQ_STRIP
1559#define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(6)
1560#define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM
1561#define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP RTE_BIT64(7)
1562#define DEV_RX_OFFLOAD_MACSEC_STRIP RTE_ETH_RX_OFFLOAD_MACSEC_STRIP
1563#define RTE_ETH_RX_OFFLOAD_HEADER_SPLIT RTE_BIT64(8)
1564#define DEV_RX_OFFLOAD_HEADER_SPLIT RTE_ETH_RX_OFFLOAD_HEADER_SPLIT
1565#define RTE_ETH_RX_OFFLOAD_VLAN_FILTER RTE_BIT64(9)
1566#define DEV_RX_OFFLOAD_VLAN_FILTER RTE_ETH_RX_OFFLOAD_VLAN_FILTER
1567#define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND RTE_BIT64(10)
1568#define DEV_RX_OFFLOAD_VLAN_EXTEND RTE_ETH_RX_OFFLOAD_VLAN_EXTEND
1569#define RTE_ETH_RX_OFFLOAD_SCATTER RTE_BIT64(13)
1570#define DEV_RX_OFFLOAD_SCATTER RTE_ETH_RX_OFFLOAD_SCATTER
1576#define RTE_ETH_RX_OFFLOAD_TIMESTAMP RTE_BIT64(14)
1577#define DEV_RX_OFFLOAD_TIMESTAMP RTE_ETH_RX_OFFLOAD_TIMESTAMP
1578#define RTE_ETH_RX_OFFLOAD_SECURITY RTE_BIT64(15)
1579#define DEV_RX_OFFLOAD_SECURITY RTE_ETH_RX_OFFLOAD_SECURITY
1580#define RTE_ETH_RX_OFFLOAD_KEEP_CRC RTE_BIT64(16)
1581#define DEV_RX_OFFLOAD_KEEP_CRC RTE_ETH_RX_OFFLOAD_KEEP_CRC
1582#define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM RTE_BIT64(17)
1583#define DEV_RX_OFFLOAD_SCTP_CKSUM RTE_ETH_RX_OFFLOAD_SCTP_CKSUM
1584#define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(18)
1585#define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM
1586#define RTE_ETH_RX_OFFLOAD_RSS_HASH RTE_BIT64(19)
1587#define DEV_RX_OFFLOAD_RSS_HASH RTE_ETH_RX_OFFLOAD_RSS_HASH
1588#define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT RTE_BIT64(20)
1589
1590#define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
1591 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
1592 RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1593#define DEV_RX_OFFLOAD_CHECKSUM RTE_ETH_RX_OFFLOAD_CHECKSUM
1594#define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
1595 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
1596 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
1597 RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
1598#define DEV_RX_OFFLOAD_VLAN RTE_ETH_RX_OFFLOAD_VLAN
1599
1600/*
1601 * If new Rx offload capabilities are defined, they also must be
1602 * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1603 */
1604
1608#define RTE_ETH_TX_OFFLOAD_VLAN_INSERT RTE_BIT64(0)
1609#define DEV_TX_OFFLOAD_VLAN_INSERT RTE_ETH_TX_OFFLOAD_VLAN_INSERT
1610#define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1611#define DEV_TX_OFFLOAD_IPV4_CKSUM RTE_ETH_TX_OFFLOAD_IPV4_CKSUM
1612#define RTE_ETH_TX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1613#define DEV_TX_OFFLOAD_UDP_CKSUM RTE_ETH_TX_OFFLOAD_UDP_CKSUM
1614#define RTE_ETH_TX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1615#define DEV_TX_OFFLOAD_TCP_CKSUM RTE_ETH_TX_OFFLOAD_TCP_CKSUM
1616#define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM RTE_BIT64(4)
1617#define DEV_TX_OFFLOAD_SCTP_CKSUM RTE_ETH_TX_OFFLOAD_SCTP_CKSUM
1618#define RTE_ETH_TX_OFFLOAD_TCP_TSO RTE_BIT64(5)
1619#define DEV_TX_OFFLOAD_TCP_TSO RTE_ETH_TX_OFFLOAD_TCP_TSO
1620#define RTE_ETH_TX_OFFLOAD_UDP_TSO RTE_BIT64(6)
1621#define DEV_TX_OFFLOAD_UDP_TSO RTE_ETH_TX_OFFLOAD_UDP_TSO
1622#define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(7)
1623#define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM
1624#define RTE_ETH_TX_OFFLOAD_QINQ_INSERT RTE_BIT64(8)
1625#define DEV_TX_OFFLOAD_QINQ_INSERT RTE_ETH_TX_OFFLOAD_QINQ_INSERT
1626#define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO RTE_BIT64(9)
1627#define DEV_TX_OFFLOAD_VXLAN_TNL_TSO RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO
1628#define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO RTE_BIT64(10)
1629#define DEV_TX_OFFLOAD_GRE_TNL_TSO RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO
1630#define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO RTE_BIT64(11)
1631#define DEV_TX_OFFLOAD_IPIP_TNL_TSO RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO
1632#define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO RTE_BIT64(12)
1633#define DEV_TX_OFFLOAD_GENEVE_TNL_TSO RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO
1634#define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT RTE_BIT64(13)
1635#define DEV_TX_OFFLOAD_MACSEC_INSERT RTE_ETH_TX_OFFLOAD_MACSEC_INSERT
1640#define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE RTE_BIT64(14)
1641#define DEV_TX_OFFLOAD_MT_LOCKFREE RTE_ETH_TX_OFFLOAD_MT_LOCKFREE
1643#define RTE_ETH_TX_OFFLOAD_MULTI_SEGS RTE_BIT64(15)
1644#define DEV_TX_OFFLOAD_MULTI_SEGS RTE_ETH_TX_OFFLOAD_MULTI_SEGS
1650#define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE RTE_BIT64(16)
1651#define DEV_TX_OFFLOAD_MBUF_FAST_FREE RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
1652#define RTE_ETH_TX_OFFLOAD_SECURITY RTE_BIT64(17)
1653#define DEV_TX_OFFLOAD_SECURITY RTE_ETH_TX_OFFLOAD_SECURITY
1659#define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO RTE_BIT64(18)
1660#define DEV_TX_OFFLOAD_UDP_TNL_TSO RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO
1666#define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO RTE_BIT64(19)
1667#define DEV_TX_OFFLOAD_IP_TNL_TSO RTE_ETH_TX_OFFLOAD_IP_TNL_TSO
1669#define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(20)
1670#define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM
1676#define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP RTE_BIT64(21)
1677#define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP
1678/*
1679 * If new Tx offload capabilities are defined, they also must be
1680 * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1681 */
1682
1687#define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP RTE_BIT64(0)
1689#define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP RTE_BIT64(1)
1699#define RTE_ETH_DEV_CAPA_RXQ_SHARE RTE_BIT64(2)
1701#define RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP RTE_BIT64(3)
1703#define RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP RTE_BIT64(4)
1706/*
1707 * Fallback default preferred Rx/Tx port parameters.
1708 * These are used if an application requests default parameters
1709 * but the PMD does not provide preferred values.
1710 */
1711#define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1712#define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1713#define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1714#define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1715
1722 uint16_t burst_size;
1723 uint16_t ring_size;
1724 uint16_t nb_queues;
1725};
1726
1731#define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX)
1732
1737 const char *name;
1738 uint16_t domain_id;
1746 uint16_t port_id;
1752 uint16_t rx_domain;
1753};
1754
1762 __extension__
1763 uint32_t multi_pools:1;
1764 uint32_t offset_allowed:1;
1766 uint16_t max_nseg;
1767 uint16_t reserved;
1768};
1769
1782};
1783
1791 const char *driver_name;
1792 unsigned int if_index;
1794 uint16_t min_mtu;
1795 uint16_t max_mtu;
1796 const uint32_t *dev_flags;
1798 uint32_t max_rx_pktlen;
1801 uint16_t max_rx_queues;
1802 uint16_t max_tx_queues;
1803 uint32_t max_mac_addrs;
1806 uint16_t max_vfs;
1818 uint16_t reta_size;
1829 uint32_t speed_capa;
1831 uint16_t nb_rx_queues;
1832 uint16_t nb_tx_queues;
1838 uint64_t dev_capa;
1844
1845 uint64_t reserved_64s[2];
1846 void *reserved_ptrs[2];
1847};
1848
1850#define RTE_ETH_QUEUE_STATE_STOPPED 0
1851#define RTE_ETH_QUEUE_STATE_STARTED 1
1852#define RTE_ETH_QUEUE_STATE_HAIRPIN 2
1860 struct rte_mempool *mp;
1863 uint8_t queue_state;
1864 uint16_t nb_desc;
1865 uint16_t rx_buf_size;
1867
1874 uint16_t nb_desc;
1875 uint8_t queue_state;
1877
1878/* Generic Burst mode flag definition, values can be ORed. */
1879
1885#define RTE_ETH_BURST_FLAG_PER_QUEUE RTE_BIT64(0)
1886
1892 uint64_t flags;
1894#define RTE_ETH_BURST_MODE_INFO_SIZE 1024
1896};
1897
1899#define RTE_ETH_XSTATS_NAME_SIZE 64
1900
1911 uint64_t id;
1912 uint64_t value;
1913};
1914
1931};
1932
1933#define RTE_ETH_DCB_NUM_TCS 8
1934#define ETH_DCB_NUM_TCS RTE_ETH_DCB_NUM_TCS
1935#define RTE_ETH_MAX_VMDQ_POOL 64
1936#define ETH_MAX_VMDQ_POOL RTE_ETH_MAX_VMDQ_POOL
1937
1944 struct {
1945 uint16_t base;
1946 uint16_t nb_queue;
1947 } tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1949 struct {
1950 uint16_t base;
1951 uint16_t nb_queue;
1952 } tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1953};
1954
1960 uint8_t nb_tcs;
1962 uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS];
1965};
1966
1976};
1977
1978/* Translate from FEC mode to FEC capa */
1979#define RTE_ETH_FEC_MODE_TO_CAPA(x) RTE_BIT32(x)
1980
1981/* This macro indicates FEC capa mask */
1982#define RTE_ETH_FEC_MODE_CAPA_MASK(x) RTE_BIT32(RTE_ETH_FEC_ ## x)
1983
1984/* A structure used to get capabilities per link speed */
1985struct rte_eth_fec_capa {
1986 uint32_t speed;
1987 uint32_t capa;
1988};
1989
1990#define RTE_ETH_ALL RTE_MAX_ETHPORTS
1991
1992/* Macros to check for valid port */
1993#define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1994 if (!rte_eth_dev_is_valid_port(port_id)) { \
1995 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1996 return retval; \
1997 } \
1998} while (0)
1999
2000#define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
2001 if (!rte_eth_dev_is_valid_port(port_id)) { \
2002 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
2003 return; \
2004 } \
2005} while (0)
2006
2029typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
2030 struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
2031 void *user_param);
2032
2053typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
2054 struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
2055
2066};
2067
2068struct rte_eth_dev_sriov {
2069 uint8_t active;
2070 uint8_t nb_q_per_pool;
2071 uint16_t def_vmdq_idx;
2072 uint16_t def_pool_q_idx;
2073};
2074#define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
2075
2076#define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
2077
2078#define RTE_ETH_DEV_NO_OWNER 0
2079
2080#define RTE_ETH_MAX_OWNER_NAME_LEN 64
2081
2082struct rte_eth_dev_owner {
2083 uint64_t id;
2084 char name[RTE_ETH_MAX_OWNER_NAME_LEN];
2085};
2086
2092#define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE RTE_BIT32(0)
2094#define RTE_ETH_DEV_INTR_LSC RTE_BIT32(1)
2096#define RTE_ETH_DEV_BONDED_SLAVE RTE_BIT32(2)
2098#define RTE_ETH_DEV_INTR_RMV RTE_BIT32(3)
2100#define RTE_ETH_DEV_REPRESENTOR RTE_BIT32(4)
2102#define RTE_ETH_DEV_NOLIVE_MAC_ADDR RTE_BIT32(5)
2107#define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS RTE_BIT32(6)
2121uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
2122 const uint64_t owner_id);
2123
2127#define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
2128 for (p = rte_eth_find_next_owned_by(0, o); \
2129 (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
2130 p = rte_eth_find_next_owned_by(p + 1, o))
2131
2140uint16_t rte_eth_find_next(uint16_t port_id);
2141
2145#define RTE_ETH_FOREACH_DEV(p) \
2146 RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
2147
2159uint16_t
2160rte_eth_find_next_of(uint16_t port_id_start,
2161 const struct rte_device *parent);
2162
2171#define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
2172 for (port_id = rte_eth_find_next_of(0, parent); \
2173 port_id < RTE_MAX_ETHPORTS; \
2174 port_id = rte_eth_find_next_of(port_id + 1, parent))
2175
2187uint16_t
2188rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id);
2189
2200#define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
2201 for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
2202 port_id < RTE_MAX_ETHPORTS; \
2203 port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
2204
2218__rte_experimental
2219int rte_eth_dev_owner_new(uint64_t *owner_id);
2220
2234__rte_experimental
2235int rte_eth_dev_owner_set(const uint16_t port_id,
2236 const struct rte_eth_dev_owner *owner);
2237
2251__rte_experimental
2252int rte_eth_dev_owner_unset(const uint16_t port_id,
2253 const uint64_t owner_id);
2254
2266__rte_experimental
2267int rte_eth_dev_owner_delete(const uint64_t owner_id);
2268
2282__rte_experimental
2283int rte_eth_dev_owner_get(const uint16_t port_id,
2284 struct rte_eth_dev_owner *owner);
2285
2297
2307
2319uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
2320
2329const char *rte_eth_dev_rx_offload_name(uint64_t offload);
2330
2339const char *rte_eth_dev_tx_offload_name(uint64_t offload);
2340
2352__rte_experimental
2353const char *rte_eth_dev_capability_name(uint64_t capability);
2354
2394int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
2395 uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
2396
2405int
2406rte_eth_dev_is_removed(uint16_t port_id);
2407
2470int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2471 uint16_t nb_rx_desc, unsigned int socket_id,
2472 const struct rte_eth_rxconf *rx_conf,
2473 struct rte_mempool *mb_pool);
2474
2502__rte_experimental
2504 (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2505 const struct rte_eth_hairpin_conf *conf);
2506
2555int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2556 uint16_t nb_tx_desc, unsigned int socket_id,
2557 const struct rte_eth_txconf *tx_conf);
2558
2584__rte_experimental
2586 (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2587 const struct rte_eth_hairpin_conf *conf);
2588
2615__rte_experimental
2616int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2617 size_t len, uint32_t direction);
2618
2641__rte_experimental
2642int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port);
2643
2668__rte_experimental
2669int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port);
2670
2681int rte_eth_dev_socket_id(uint16_t port_id);
2682
2692int rte_eth_dev_is_valid_port(uint16_t port_id);
2693
2711int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2712
2729int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2730
2748int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2749
2766int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2767
2787int rte_eth_dev_start(uint16_t port_id);
2788
2799int rte_eth_dev_stop(uint16_t port_id);
2800
2813int rte_eth_dev_set_link_up(uint16_t port_id);
2814
2824int rte_eth_dev_set_link_down(uint16_t port_id);
2825
2836int rte_eth_dev_close(uint16_t port_id);
2837
2875int rte_eth_dev_reset(uint16_t port_id);
2876
2888int rte_eth_promiscuous_enable(uint16_t port_id);
2889
2901int rte_eth_promiscuous_disable(uint16_t port_id);
2902
2913int rte_eth_promiscuous_get(uint16_t port_id);
2914
2926int rte_eth_allmulticast_enable(uint16_t port_id);
2927
2939int rte_eth_allmulticast_disable(uint16_t port_id);
2940
2951int rte_eth_allmulticast_get(uint16_t port_id);
2952
2970int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
2971
2986int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
2987
3001__rte_experimental
3003
3022__rte_experimental
3023int rte_eth_link_to_str(char *str, size_t len,
3024 const struct rte_eth_link *eth_link);
3025
3043int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
3044
3056int rte_eth_stats_reset(uint16_t port_id);
3057
3087int rte_eth_xstats_get_names(uint16_t port_id,
3088 struct rte_eth_xstat_name *xstats_names,
3089 unsigned int size);
3090
3120int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3121 unsigned int n);
3122
3147int
3149 struct rte_eth_xstat_name *xstats_names, unsigned int size,
3150 uint64_t *ids);
3151
3176int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3177 uint64_t *values, unsigned int size);
3178
3198int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
3199 uint64_t *id);
3200
3213int rte_eth_xstats_reset(uint16_t port_id);
3214
3234 uint16_t tx_queue_id, uint8_t stat_idx);
3235
3255 uint16_t rx_queue_id,
3256 uint8_t stat_idx);
3257
3271int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
3272
3293__rte_experimental
3294int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3295 unsigned int num);
3296
3340int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
3341
3357__rte_experimental
3358int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf);
3359
3380int rte_eth_dev_fw_version_get(uint16_t port_id,
3381 char *fw_version, size_t fw_size);
3382
3422int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3423 uint32_t *ptypes, int num);
3454int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3455 uint32_t *set_ptypes, unsigned int num);
3456
3469int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
3470
3488int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
3489
3509int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
3510
3529int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3530 int on);
3531
3549 enum rte_vlan_type vlan_type,
3550 uint16_t tag_type);
3551
3569int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
3570
3584int rte_eth_dev_get_vlan_offload(uint16_t port_id);
3585
3600int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
3601
3602typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
3603 void *userdata);
3604
3610 buffer_tx_error_fn error_callback;
3611 void *error_userdata;
3612 uint16_t size;
3613 uint16_t length;
3615 struct rte_mbuf *pkts[];
3616};
3617
3624#define RTE_ETH_TX_BUFFER_SIZE(sz) \
3625 (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3626
3637int
3638rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
3639
3664int
3666 buffer_tx_error_fn callback, void *userdata);
3667
3690void
3691rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3692 void *userdata);
3693
3717void
3718rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3719 void *userdata);
3720
3746int
3747rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
3748
3765
3787 uint64_t metadata;
3788};
3789
3809
3811typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
3812 enum rte_eth_event_type event, void *cb_arg, void *ret_param);
3813
3832 enum rte_eth_event_type event,
3833 rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3834
3854 enum rte_eth_event_type event,
3855 rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3856
3878int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
3879
3900int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
3901
3919int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
3920
3942int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3943 int epfd, int op, void *data);
3944
3959int
3960rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
3961
3975int rte_eth_led_on(uint16_t port_id);
3976
3990int rte_eth_led_off(uint16_t port_id);
3991
4020__rte_experimental
4021int rte_eth_fec_get_capability(uint16_t port_id,
4022 struct rte_eth_fec_capa *speed_fec_capa,
4023 unsigned int num);
4024
4045__rte_experimental
4046int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa);
4047
4071__rte_experimental
4072int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa);
4073
4088int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
4089 struct rte_eth_fc_conf *fc_conf);
4090
4105int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
4106 struct rte_eth_fc_conf *fc_conf);
4107
4124 struct rte_eth_pfc_conf *pfc_conf);
4125
4144int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
4145 uint32_t pool);
4146
4161int rte_eth_dev_mac_addr_remove(uint16_t port_id,
4162 struct rte_ether_addr *mac_addr);
4163
4182 struct rte_ether_addr *mac_addr);
4183
4201int rte_eth_dev_rss_reta_update(uint16_t port_id,
4202 struct rte_eth_rss_reta_entry64 *reta_conf,
4203 uint16_t reta_size);
4204
4223int rte_eth_dev_rss_reta_query(uint16_t port_id,
4224 struct rte_eth_rss_reta_entry64 *reta_conf,
4225 uint16_t reta_size);
4226
4246int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4247 uint8_t on);
4248
4267int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
4268
4285int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4286 uint16_t tx_rate);
4287
4302int rte_eth_dev_rss_hash_update(uint16_t port_id,
4303 struct rte_eth_rss_conf *rss_conf);
4304
4320int
4322 struct rte_eth_rss_conf *rss_conf);
4323
4348int
4350 struct rte_eth_udp_tunnel *tunnel_udp);
4351
4371int
4373 struct rte_eth_udp_tunnel *tunnel_udp);
4374
4389int rte_eth_dev_get_dcb_info(uint16_t port_id,
4390 struct rte_eth_dcb_info *dcb_info);
4391
4392struct rte_eth_rxtx_callback;
4393
4419const struct rte_eth_rxtx_callback *
4420rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4421 rte_rx_callback_fn fn, void *user_param);
4422
4449const struct rte_eth_rxtx_callback *
4450rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4451 rte_rx_callback_fn fn, void *user_param);
4452
4478const struct rte_eth_rxtx_callback *
4479rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4480 rte_tx_callback_fn fn, void *user_param);
4481
4515int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4516 const struct rte_eth_rxtx_callback *user_cb);
4517
4551int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4552 const struct rte_eth_rxtx_callback *user_cb);
4553
4573int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4574 struct rte_eth_rxq_info *qinfo);
4575
4595int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4596 struct rte_eth_txq_info *qinfo);
4597
4616int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4617 struct rte_eth_burst_mode *mode);
4618
4637int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4638 struct rte_eth_burst_mode *mode);
4639
4660__rte_experimental
4661int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
4662 struct rte_power_monitor_cond *pmc);
4663
4682int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
4683
4696int rte_eth_dev_get_eeprom_length(uint16_t port_id);
4697
4714int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
4715
4732int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
4733
4752__rte_experimental
4753int
4755 struct rte_eth_dev_module_info *modinfo);
4756
4776__rte_experimental
4777int
4779 struct rte_dev_eeprom_info *info);
4780
4800int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
4801 struct rte_ether_addr *mc_addr_set,
4802 uint32_t nb_mc_addr);
4803
4816int rte_eth_timesync_enable(uint16_t port_id);
4817
4830int rte_eth_timesync_disable(uint16_t port_id);
4831
4851 struct timespec *timestamp, uint32_t flags);
4852
4869 struct timespec *timestamp);
4870
4888int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
4889
4905int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
4906
4925int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
4926
4972__rte_experimental
4973int
4974rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
4975
4991int
4992rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
4993
5009int
5010rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
5011
5029 uint16_t *nb_rx_desc,
5030 uint16_t *nb_tx_desc);
5031
5046int
5047rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
5048
5058void *
5059rte_eth_dev_get_sec_ctx(uint16_t port_id);
5060
5076__rte_experimental
5078 struct rte_eth_hairpin_cap *cap);
5079
5089 int pf;
5090 __extension__
5091 union {
5092 int vf;
5093 int sf;
5094 };
5095 uint32_t id_base;
5096 uint32_t id_end;
5097 char name[RTE_DEV_NAME_MAX_LEN];
5098};
5099
5107 uint16_t controller;
5108 uint16_t pf;
5110 uint32_t nb_ranges;
5112};
5113
5137__rte_experimental
5138int rte_eth_representor_info_get(uint16_t port_id,
5139 struct rte_eth_representor_info *info);
5140
5142#define RTE_ETH_RX_METADATA_USER_FLAG RTE_BIT64(0)
5143
5145#define RTE_ETH_RX_METADATA_USER_MARK RTE_BIT64(1)
5146
5148#define RTE_ETH_RX_METADATA_TUNNEL_ID RTE_BIT64(2)
5149
5192__rte_experimental
5193int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features);
5194
5195#include <rte_ethdev_core.h>
5196
5220uint16_t rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
5221 struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
5222 void *opaque);
5223
5311static inline uint16_t
5312rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
5313 struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
5314{
5315 uint16_t nb_rx;
5316 struct rte_eth_fp_ops *p;
5317 void *qd;
5318
5319#ifdef RTE_ETHDEV_DEBUG_RX
5320 if (port_id >= RTE_MAX_ETHPORTS ||
5321 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
5322 RTE_ETHDEV_LOG(ERR,
5323 "Invalid port_id=%u or queue_id=%u\n",
5324 port_id, queue_id);
5325 return 0;
5326 }
5327#endif
5328
5329 /* fetch pointer to queue data */
5330 p = &rte_eth_fp_ops[port_id];
5331 qd = p->rxq.data[queue_id];
5332
5333#ifdef RTE_ETHDEV_DEBUG_RX
5334 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
5335
5336 if (qd == NULL) {
5337 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u for port_id=%u\n",
5338 queue_id, port_id);
5339 return 0;
5340 }
5341#endif
5342
5343 nb_rx = p->rx_pkt_burst(qd, rx_pkts, nb_pkts);
5344
5345#ifdef RTE_ETHDEV_RXTX_CALLBACKS
5346 {
5347 void *cb;
5348
5349 /* __ATOMIC_RELEASE memory order was used when the
5350 * call back was inserted into the list.
5351 * Since there is a clear dependency between loading
5352 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
5353 * not required.
5354 */
5355 cb = __atomic_load_n((void **)&p->rxq.clbk[queue_id],
5356 __ATOMIC_RELAXED);
5357 if (unlikely(cb != NULL))
5358 nb_rx = rte_eth_call_rx_callbacks(port_id, queue_id,
5359 rx_pkts, nb_rx, nb_pkts, cb);
5360 }
5361#endif
5362
5363 rte_ethdev_trace_rx_burst(port_id, queue_id, (void **)rx_pkts, nb_rx);
5364 return nb_rx;
5365}
5366
5380static inline int
5381rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
5382{
5383 struct rte_eth_fp_ops *p;
5384 void *qd;
5385
5386 if (port_id >= RTE_MAX_ETHPORTS ||
5387 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
5388 RTE_ETHDEV_LOG(ERR,
5389 "Invalid port_id=%u or queue_id=%u\n",
5390 port_id, queue_id);
5391 return -EINVAL;
5392 }
5393
5394 /* fetch pointer to queue data */
5395 p = &rte_eth_fp_ops[port_id];
5396 qd = p->rxq.data[queue_id];
5397
5398 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5399 RTE_FUNC_PTR_OR_ERR_RET(*p->rx_queue_count, -ENOTSUP);
5400 if (qd == NULL)
5401 return -EINVAL;
5402
5403 return (int)(*p->rx_queue_count)(qd);
5404}
5405
5409#define RTE_ETH_RX_DESC_AVAIL 0
5410#define RTE_ETH_RX_DESC_DONE 1
5411#define RTE_ETH_RX_DESC_UNAVAIL 2
5447static inline int
5448rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
5449 uint16_t offset)
5450{
5451 struct rte_eth_fp_ops *p;
5452 void *qd;
5453
5454#ifdef RTE_ETHDEV_DEBUG_RX
5455 if (port_id >= RTE_MAX_ETHPORTS ||
5456 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
5457 RTE_ETHDEV_LOG(ERR,
5458 "Invalid port_id=%u or queue_id=%u\n",
5459 port_id, queue_id);
5460 return -EINVAL;
5461 }
5462#endif
5463
5464 /* fetch pointer to queue data */
5465 p = &rte_eth_fp_ops[port_id];
5466 qd = p->rxq.data[queue_id];
5467
5468#ifdef RTE_ETHDEV_DEBUG_RX
5469 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5470 if (qd == NULL)
5471 return -ENODEV;
5472#endif
5473 RTE_FUNC_PTR_OR_ERR_RET(*p->rx_descriptor_status, -ENOTSUP);
5474 return (*p->rx_descriptor_status)(qd, offset);
5475}
5476
5480#define RTE_ETH_TX_DESC_FULL 0
5481#define RTE_ETH_TX_DESC_DONE 1
5482#define RTE_ETH_TX_DESC_UNAVAIL 2
5518static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
5519 uint16_t queue_id, uint16_t offset)
5520{
5521 struct rte_eth_fp_ops *p;
5522 void *qd;
5523
5524#ifdef RTE_ETHDEV_DEBUG_TX
5525 if (port_id >= RTE_MAX_ETHPORTS ||
5526 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
5527 RTE_ETHDEV_LOG(ERR,
5528 "Invalid port_id=%u or queue_id=%u\n",
5529 port_id, queue_id);
5530 return -EINVAL;
5531 }
5532#endif
5533
5534 /* fetch pointer to queue data */
5535 p = &rte_eth_fp_ops[port_id];
5536 qd = p->txq.data[queue_id];
5537
5538#ifdef RTE_ETHDEV_DEBUG_TX
5539 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5540 if (qd == NULL)
5541 return -ENODEV;
5542#endif
5543 RTE_FUNC_PTR_OR_ERR_RET(*p->tx_descriptor_status, -ENOTSUP);
5544 return (*p->tx_descriptor_status)(qd, offset);
5545}
5546
5566uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
5567 struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque);
5568
5635static inline uint16_t
5636rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
5637 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
5638{
5639 struct rte_eth_fp_ops *p;
5640 void *qd;
5641
5642#ifdef RTE_ETHDEV_DEBUG_TX
5643 if (port_id >= RTE_MAX_ETHPORTS ||
5644 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
5645 RTE_ETHDEV_LOG(ERR,
5646 "Invalid port_id=%u or queue_id=%u\n",
5647 port_id, queue_id);
5648 return 0;
5649 }
5650#endif
5651
5652 /* fetch pointer to queue data */
5653 p = &rte_eth_fp_ops[port_id];
5654 qd = p->txq.data[queue_id];
5655
5656#ifdef RTE_ETHDEV_DEBUG_TX
5657 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
5658
5659 if (qd == NULL) {
5660 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u for port_id=%u\n",
5661 queue_id, port_id);
5662 return 0;
5663 }
5664#endif
5665
5666#ifdef RTE_ETHDEV_RXTX_CALLBACKS
5667 {
5668 void *cb;
5669
5670 /* __ATOMIC_RELEASE memory order was used when the
5671 * call back was inserted into the list.
5672 * Since there is a clear dependency between loading
5673 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
5674 * not required.
5675 */
5676 cb = __atomic_load_n((void **)&p->txq.clbk[queue_id],
5677 __ATOMIC_RELAXED);
5678 if (unlikely(cb != NULL))
5679 nb_pkts = rte_eth_call_tx_callbacks(port_id, queue_id,
5680 tx_pkts, nb_pkts, cb);
5681 }
5682#endif
5683
5684 nb_pkts = p->tx_pkt_burst(qd, tx_pkts, nb_pkts);
5685
5686 rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, nb_pkts);
5687 return nb_pkts;
5688}
5689
5744#ifndef RTE_ETHDEV_TX_PREPARE_NOOP
5745
5746static inline uint16_t
5747rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
5748 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
5749{
5750 struct rte_eth_fp_ops *p;
5751 void *qd;
5752
5753#ifdef RTE_ETHDEV_DEBUG_TX
5754 if (port_id >= RTE_MAX_ETHPORTS ||
5755 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
5756 RTE_ETHDEV_LOG(ERR,
5757 "Invalid port_id=%u or queue_id=%u\n",
5758 port_id, queue_id);
5759 rte_errno = ENODEV;
5760 return 0;
5761 }
5762#endif
5763
5764 /* fetch pointer to queue data */
5765 p = &rte_eth_fp_ops[port_id];
5766 qd = p->txq.data[queue_id];
5767
5768#ifdef RTE_ETHDEV_DEBUG_TX
5769 if (!rte_eth_dev_is_valid_port(port_id)) {
5770 RTE_ETHDEV_LOG(ERR, "Invalid Tx port_id=%u\n", port_id);
5771 rte_errno = ENODEV;
5772 return 0;
5773 }
5774 if (qd == NULL) {
5775 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u for port_id=%u\n",
5776 queue_id, port_id);
5777 rte_errno = EINVAL;
5778 return 0;
5779 }
5780#endif
5781
5782 if (!p->tx_pkt_prepare)
5783 return nb_pkts;
5784
5785 return p->tx_pkt_prepare(qd, tx_pkts, nb_pkts);
5786}
5787
5788#else
5789
5790/*
5791 * Native NOOP operation for compilation targets which doesn't require any
5792 * preparations steps, and functional NOOP may introduce unnecessary performance
5793 * drop.
5794 *
5795 * Generally this is not a good idea to turn it on globally and didn't should
5796 * be used if behavior of tx_preparation can change.
5797 */
5798
5799static inline uint16_t
5800rte_eth_tx_prepare(__rte_unused uint16_t port_id,
5801 __rte_unused uint16_t queue_id,
5802 __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
5803{
5804 return nb_pkts;
5805}
5806
5807#endif
5808
5831static inline uint16_t
5832rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
5833 struct rte_eth_dev_tx_buffer *buffer)
5834{
5835 uint16_t sent;
5836 uint16_t to_send = buffer->length;
5837
5838 if (to_send == 0)
5839 return 0;
5840
5841 sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
5842
5843 buffer->length = 0;
5844
5845 /* All packets sent, or to be dealt with by callback below */
5846 if (unlikely(sent != to_send))
5847 buffer->error_callback(&buffer->pkts[sent],
5848 (uint16_t)(to_send - sent),
5849 buffer->error_userdata);
5850
5851 return sent;
5852}
5853
5884static __rte_always_inline uint16_t
5885rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
5886 struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
5887{
5888 buffer->pkts[buffer->length++] = tx_pkt;
5889 if (buffer->length < buffer->size)
5890 return 0;
5891
5892 return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
5893}
5894
5895#ifdef __cplusplus
5896}
5897#endif
5898
5899#endif /* _RTE_ETHDEV_H_ */
#define unlikely(x)
#define __rte_cache_min_aligned
Definition: rte_common.h:423
#define __rte_unused
Definition: rte_common.h:123
#define __rte_always_inline
Definition: rte_common.h:233
#define rte_errno
Definition: rte_errno.h:29
rte_fdir_mode
Definition: rte_eth_ctrl.h:425
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
rte_eth_nb_pools
Definition: rte_ethdev.h:1035
@ RTE_ETH_64_POOLS
Definition: rte_ethdev.h:1039
@ RTE_ETH_32_POOLS
Definition: rte_ethdev.h:1038
@ RTE_ETH_8_POOLS
Definition: rte_ethdev.h:1036
@ RTE_ETH_16_POOLS
Definition: rte_ethdev.h:1037
rte_eth_event_ipsec_subtype
Definition: rte_ethdev.h:3753
@ RTE_ETH_EVENT_IPSEC_UNKNOWN
Definition: rte_ethdev.h:3755
@ RTE_ETH_EVENT_IPSEC_MAX
Definition: rte_ethdev.h:3763
@ RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW
Definition: rte_ethdev.h:3757
@ RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY
Definition: rte_ethdev.h:3761
@ RTE_ETH_EVENT_IPSEC_SA_TIME_EXPIRY
Definition: rte_ethdev.h:3759
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
int rte_eth_dev_is_removed(uint16_t port_id)
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
Definition: rte_ethdev.h:729
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
Definition: rte_ethdev.h:5885
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
__rte_experimental int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
int rte_eth_dev_set_link_down(uint16_t port_id)
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
rte_eth_event_type
Definition: rte_ethdev.h:3793
@ RTE_ETH_EVENT_UNKNOWN
Definition: rte_ethdev.h:3794
@ RTE_ETH_EVENT_VF_MBOX
Definition: rte_ethdev.h:3800
@ RTE_ETH_EVENT_IPSEC
Definition: rte_ethdev.h:3805
@ RTE_ETH_EVENT_INTR_RESET
Definition: rte_ethdev.h:3799
@ RTE_ETH_EVENT_INTR_RMV
Definition: rte_ethdev.h:3802
@ RTE_ETH_EVENT_MACSEC
Definition: rte_ethdev.h:3801
@ RTE_ETH_EVENT_DESTROY
Definition: rte_ethdev.h:3804
@ RTE_ETH_EVENT_FLOW_AGED
Definition: rte_ethdev.h:3806
@ RTE_ETH_EVENT_QUEUE_STATE
Definition: rte_ethdev.h:3797
@ RTE_ETH_EVENT_INTR_LSC
Definition: rte_ethdev.h:3795
@ RTE_ETH_EVENT_MAX
Definition: rte_ethdev.h:3807
@ RTE_ETH_EVENT_NEW
Definition: rte_ethdev.h:3803
int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
int rte_eth_dev_is_valid_port(uint16_t port_id)
#define RTE_ETH_DCB_NUM_USER_PRIORITIES
Definition: rte_ethdev.h:942
int rte_eth_dev_reset(uint16_t port_id)
#define RTE_ETH_BURST_MODE_INFO_SIZE
Definition: rte_ethdev.h:1894
__rte_experimental const char * rte_eth_dev_capability_name(uint64_t capability)
int rte_eth_allmulticast_disable(uint16_t port_id)
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
rte_eth_dev_state
Definition: rte_ethdev.h:2059
@ RTE_ETH_DEV_ATTACHED
Definition: rte_ethdev.h:2063
@ RTE_ETH_DEV_UNUSED
Definition: rte_ethdev.h:2061
@ RTE_ETH_DEV_REMOVED
Definition: rte_ethdev.h:2065
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:5312
rte_eth_fec_mode
Definition: rte_ethdev.h:1971
@ RTE_ETH_FEC_NOFEC
Definition: rte_ethdev.h:1972
@ RTE_ETH_FEC_BASER
Definition: rte_ethdev.h:1974
@ RTE_ETH_FEC_AUTO
Definition: rte_ethdev.h:1973
@ RTE_ETH_FEC_RS
Definition: rte_ethdev.h:1975
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
__rte_experimental int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
rte_eth_tx_mq_mode
Definition: rte_ethdev.h:450
@ RTE_ETH_MQ_TX_DCB
Definition: rte_ethdev.h:452
@ RTE_ETH_MQ_TX_VMDQ_DCB
Definition: rte_ethdev.h:453
@ RTE_ETH_MQ_TX_VMDQ_ONLY
Definition: rte_ethdev.h:454
@ RTE_ETH_MQ_TX_NONE
Definition: rte_ethdev.h:451
int rte_eth_promiscuous_get(uint16_t port_id)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
__rte_experimental int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
int rte_eth_led_off(uint16_t port_id)
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_set_link_up(uint16_t port_id)
__rte_experimental int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
uint16_t rte_eth_find_next(uint16_t port_id)
__rte_experimental int rte_eth_dev_owner_new(uint64_t *owner_id)
rte_eth_rx_mq_mode
Definition: rte_ethdev.h:415
@ RTE_ETH_MQ_RX_DCB_RSS
Definition: rte_ethdev.h:424
@ RTE_ETH_MQ_RX_VMDQ_DCB_RSS
Definition: rte_ethdev.h:433
@ RTE_ETH_MQ_RX_DCB
Definition: rte_ethdev.h:422
@ RTE_ETH_MQ_RX_VMDQ_DCB
Definition: rte_ethdev.h:431
@ RTE_ETH_MQ_RX_VMDQ_RSS
Definition: rte_ethdev.h:429
@ RTE_ETH_MQ_RX_NONE
Definition: rte_ethdev.h:417
@ RTE_ETH_MQ_RX_RSS
Definition: rte_ethdev.h:420
@ RTE_ETH_MQ_RX_VMDQ_ONLY
Definition: rte_ethdev.h:427
int rte_eth_allmulticast_get(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
int rte_eth_allmulticast_enable(uint16_t port_id)
int rte_eth_promiscuous_enable(uint16_t port_id)
rte_eth_representor_type
Definition: rte_ethdev.h:1777
@ RTE_ETH_REPRESENTOR_PF
Definition: rte_ethdev.h:1781
@ RTE_ETH_REPRESENTOR_VF
Definition: rte_ethdev.h:1779
@ RTE_ETH_REPRESENTOR_SF
Definition: rte_ethdev.h:1780
@ RTE_ETH_REPRESENTOR_NONE
Definition: rte_ethdev.h:1778
int rte_eth_timesync_enable(uint16_t port_id)
__rte_experimental int rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS
Definition: rte_ethdev.h:940
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
int rte_eth_dev_stop(uint16_t port_id)
int rte_eth_timesync_disable(uint16_t port_id)
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
Definition: rte_ethdev.h:2029
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:5747
rte_eth_tunnel_type
Definition: rte_ethdev.h:1402
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
int rte_eth_promiscuous_disable(uint16_t port_id)
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:5636
int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
Definition: rte_ethdev.h:3811
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
#define RTE_ETH_MQ_RX_DCB_FLAG
Definition: rte_ethdev.h:405
uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
__rte_experimental int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
rte_eth_fc_mode
Definition: rte_ethdev.h:1361
@ RTE_ETH_FC_TX_PAUSE
Definition: rte_ethdev.h:1364
@ RTE_ETH_FC_RX_PAUSE
Definition: rte_ethdev.h:1363
@ RTE_ETH_FC_NONE
Definition: rte_ethdev.h:1362
@ RTE_ETH_FC_FULL
Definition: rte_ethdev.h:1365
int rte_eth_led_on(uint16_t port_id)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
rte_eth_fdir_pballoc_type
Definition: rte_ethdev.h:1433
@ RTE_ETH_FDIR_PBALLOC_128K
Definition: rte_ethdev.h:1435
@ RTE_ETH_FDIR_PBALLOC_64K
Definition: rte_ethdev.h:1434
@ RTE_ETH_FDIR_PBALLOC_256K
Definition: rte_ethdev.h:1436
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
__rte_experimental int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
__rte_experimental int rte_eth_dev_owner_delete(const uint64_t owner_id)
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
uint32_t link_speed
Definition: rte_ethdev.h:0
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
__rte_experimental int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
__rte_experimental int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, size_t len, uint32_t direction)
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
#define RTE_ETH_MQ_RX_RSS_FLAG
Definition: rte_ethdev.h:403
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint16_t tx_rate)
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_close(uint16_t port_id)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
__rte_experimental int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
__rte_experimental int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, struct rte_power_monitor_cond *pmc)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
#define RTE_ETH_MQ_RX_VMDQ_FLAG
Definition: rte_ethdev.h:407
__rte_experimental int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
Definition: rte_ethdev.h:5832
rte_fdir_status_mode
Definition: rte_ethdev.h:1447
@ RTE_FDIR_REPORT_STATUS
Definition: rte_ethdev.h:1449
@ RTE_FDIR_NO_REPORT_STATUS
Definition: rte_ethdev.h:1448
@ RTE_FDIR_REPORT_STATUS_ALWAYS
Definition: rte_ethdev.h:1450
int rte_eth_dev_socket_id(uint16_t port_id)
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:5448
static int rte_eth_tx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:5518
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
__rte_experimental const char * rte_eth_link_speed_to_str(uint32_t link_speed)
__extension__ struct rte_eth_link __rte_aligned(8)
int rte_eth_xstats_reset(uint16_t port_id)
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
rte_vlan_type
Definition: rte_ethdev.h:486
@ RTE_ETH_VLAN_TYPE_OUTER
Definition: rte_ethdev.h:489
@ RTE_ETH_VLAN_TYPE_INNER
Definition: rte_ethdev.h:488
__rte_experimental int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, unsigned int num)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
Definition: rte_ethdev.h:2053
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
uint16_t rte_eth_dev_count_total(void)
#define RTE_ETH_XSTATS_NAME_SIZE
Definition: rte_ethdev.h:1899
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_stats_reset(uint16_t port_id)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:5381
rte_eth_nb_tcs
Definition: rte_ethdev.h:1024
@ RTE_ETH_4_TCS
Definition: rte_ethdev.h:1025
@ RTE_ETH_8_TCS
Definition: rte_ethdev.h:1026
__rte_experimental int rte_eth_representor_info_get(uint16_t port_id, struct rte_eth_representor_info *info)
int rte_eth_dev_start(uint16_t port_id)
__rte_experimental int rte_eth_fec_get_capability(uint16_t port_id, struct rte_eth_fec_capa *speed_fec_capa, unsigned int num)
char info[RTE_ETH_BURST_MODE_INFO_SIZE]
Definition: rte_ethdev.h:1895
struct rte_eth_intr_conf intr_conf
Definition: rte_ethdev.h:1541
struct rte_eth_vmdq_rx_conf vmdq_rx_conf
Definition: rte_ethdev.h:1527
struct rte_eth_txmode txmode
Definition: rte_ethdev.h:1514
struct rte_eth_rxmode rxmode
Definition: rte_ethdev.h:1513
struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf
Definition: rte_ethdev.h:1523
struct rte_eth_conf::@147 rx_adv_conf
uint32_t lpbk_mode
Definition: rte_ethdev.h:1515
struct rte_eth_fdir_conf fdir_conf
Definition: rte_ethdev.h:1540
uint32_t dcb_capability_en
Definition: rte_ethdev.h:1539
struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf
Definition: rte_ethdev.h:1531
union rte_eth_conf::@148 tx_adv_conf
uint32_t link_speeds
Definition: rte_ethdev.h:1506
struct rte_eth_rss_conf rss_conf
Definition: rte_ethdev.h:1521
struct rte_eth_dcb_tx_conf dcb_tx_conf
Definition: rte_ethdev.h:1533
struct rte_eth_dcb_rx_conf dcb_rx_conf
Definition: rte_ethdev.h:1525
struct rte_eth_vmdq_tx_conf vmdq_tx_conf
Definition: rte_ethdev.h:1535
uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS]
Definition: rte_ethdev.h:1962
uint8_t prio_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]
Definition: rte_ethdev.h:1961
struct rte_eth_dcb_tc_queue_mapping tc_queue
Definition: rte_ethdev.h:1964
struct rte_eth_dcb_tc_queue_mapping::@149 tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS]
struct rte_eth_dcb_tc_queue_mapping::@150 tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS]
uint16_t nb_mtu_seg_max
Definition: rte_ethdev.h:1355
uint16_t nb_seg_max
Definition: rte_ethdev.h:1342
uint16_t nb_align
Definition: rte_ethdev.h:1332
uint32_t max_hash_mac_addrs
Definition: rte_ethdev.h:1805
struct rte_eth_desc_lim rx_desc_lim
Definition: rte_ethdev.h:1827
unsigned int if_index
Definition: rte_ethdev.h:1792
uint16_t max_rx_queues
Definition: rte_ethdev.h:1801
uint64_t dev_capa
Definition: rte_ethdev.h:1838
uint16_t vmdq_queue_num
Definition: rte_ethdev.h:1825
uint32_t min_rx_bufsize
Definition: rte_ethdev.h:1797
uint16_t max_tx_queues
Definition: rte_ethdev.h:1802
struct rte_eth_txconf default_txconf
Definition: rte_ethdev.h:1823
uint16_t max_vmdq_pools
Definition: rte_ethdev.h:1807
struct rte_device * device
Definition: rte_ethdev.h:1790
struct rte_eth_rxconf default_rxconf
Definition: rte_ethdev.h:1822
uint16_t nb_tx_queues
Definition: rte_ethdev.h:1832
uint32_t max_rx_pktlen
Definition: rte_ethdev.h:1798
uint16_t max_mtu
Definition: rte_ethdev.h:1795
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:1800
uint16_t vmdq_queue_base
Definition: rte_ethdev.h:1824
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1846
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1845
uint64_t tx_queue_offload_capa
Definition: rte_ethdev.h:1816
uint16_t vmdq_pool_base
Definition: rte_ethdev.h:1826
uint16_t min_mtu
Definition: rte_ethdev.h:1794
uint16_t reta_size
Definition: rte_ethdev.h:1818
struct rte_eth_desc_lim tx_desc_lim
Definition: rte_ethdev.h:1828
uint64_t flow_type_rss_offloads
Definition: rte_ethdev.h:1821
uint16_t max_vfs
Definition: rte_ethdev.h:1806
struct rte_eth_dev_portconf default_txportconf
Definition: rte_ethdev.h:1836
uint64_t tx_offload_capa
Definition: rte_ethdev.h:1812
const char * driver_name
Definition: rte_ethdev.h:1791
uint8_t hash_key_size
Definition: rte_ethdev.h:1819
uint32_t speed_capa
Definition: rte_ethdev.h:1829
struct rte_eth_dev_portconf default_rxportconf
Definition: rte_ethdev.h:1834
struct rte_eth_switch_info switch_info
Definition: rte_ethdev.h:1843
struct rte_eth_rxseg_capa rx_seg_capa
Definition: rte_ethdev.h:1808
uint64_t rx_queue_offload_capa
Definition: rte_ethdev.h:1814
uint64_t rx_offload_capa
Definition: rte_ethdev.h:1810
uint16_t nb_rx_queues
Definition: rte_ethdev.h:1831
uint32_t max_mac_addrs
Definition: rte_ethdev.h:1803
const uint32_t * dev_flags
Definition: rte_ethdev.h:1796
struct rte_mbuf * pkts[]
Definition: rte_ethdev.h:3615
enum rte_eth_event_ipsec_subtype subtype
Definition: rte_ethdev.h:3772
uint32_t low_water
Definition: rte_ethdev.h:1380
uint16_t send_xon
Definition: rte_ethdev.h:1382
enum rte_eth_fc_mode mode
Definition: rte_ethdev.h:1383
uint32_t high_water
Definition: rte_ethdev.h:1379
uint16_t pause_time
Definition: rte_ethdev.h:1381
uint8_t mac_ctrl_frame_fwd
Definition: rte_ethdev.h:1384
enum rte_eth_fdir_pballoc_type pballoc
Definition: rte_ethdev.h:1461
enum rte_fdir_status_mode status
Definition: rte_ethdev.h:1462
struct rte_eth_fdir_flex_conf flex_conf
Definition: rte_ethdev.h:1467
enum rte_fdir_mode mode
Definition: rte_ethdev.h:1460
uint16_t max_nb_queues
Definition: rte_ethdev.h:1269
struct rte_eth_fc_conf fc
Definition: rte_ethdev.h:1394
struct rte_eth_representor_range ranges[]
Definition: rte_ethdev.h:5111
enum rte_eth_representor_type type
Definition: rte_ethdev.h:5087
char name[RTE_DEV_NAME_MAX_LEN]
Definition: rte_ethdev.h:5097
uint8_t * rss_key
Definition: rte_ethdev.h:524
uint8_t rss_key_len
Definition: rte_ethdev.h:525
uint64_t rss_hf
Definition: rte_ethdev.h:526
uint16_t reta[RTE_ETH_RETA_GROUP_SIZE]
Definition: rte_ethdev.h:1017
struct rte_eth_thresh rx_thresh
Definition: rte_ethdev.h:1208
uint64_t offloads
Definition: rte_ethdev.h:1226
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1237
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1236
uint8_t rx_deferred_start
Definition: rte_ethdev.h:1211
uint16_t share_group
Definition: rte_ethdev.h:1219
uint8_t rx_drop_en
Definition: rte_ethdev.h:1210
uint16_t share_qid
Definition: rte_ethdev.h:1220
union rte_eth_rxseg * rx_seg
Definition: rte_ethdev.h:1234
uint16_t rx_nseg
Definition: rte_ethdev.h:1212
uint16_t rx_free_thresh
Definition: rte_ethdev.h:1209
uint32_t mtu
Definition: rte_ethdev.h:467
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:469
uint64_t offloads
Definition: rte_ethdev.h:476
uint16_t split_hdr_size
Definition: rte_ethdev.h:470
void * reserved_ptrs[2]
Definition: rte_ethdev.h:479
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:478
enum rte_eth_rx_mq_mode mq_mode
Definition: rte_ethdev.h:466
struct rte_eth_rxconf conf
Definition: rte_ethdev.h:1861
uint8_t scattered_rx
Definition: rte_ethdev.h:1862
struct rte_mempool * mp
Definition: rte_ethdev.h:1860
uint8_t queue_state
Definition: rte_ethdev.h:1863
uint16_t nb_desc
Definition: rte_ethdev.h:1864
uint16_t rx_buf_size
Definition: rte_ethdev.h:1865
__extension__ uint32_t multi_pools
Definition: rte_ethdev.h:1763
uint32_t offset_allowed
Definition: rte_ethdev.h:1764
uint32_t offset_align_log2
Definition: rte_ethdev.h:1765
struct rte_mempool * mp
Definition: rte_ethdev.h:1186
uint64_t imissed
Definition: rte_ethdev.h:270
uint64_t obytes
Definition: rte_ethdev.h:265
uint64_t opackets
Definition: rte_ethdev.h:263
uint64_t rx_nombuf
Definition: rte_ethdev.h:273
uint64_t ibytes
Definition: rte_ethdev.h:264
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:280
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:278
uint64_t ierrors
Definition: rte_ethdev.h:271
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:284
uint64_t ipackets
Definition: rte_ethdev.h:262
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:276
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:282
uint64_t oerrors
Definition: rte_ethdev.h:272
const char * name
Definition: rte_ethdev.h:1737
uint8_t hthresh
Definition: rte_ethdev.h:396
uint8_t pthresh
Definition: rte_ethdev.h:395
uint8_t wthresh
Definition: rte_ethdev.h:397
uint8_t tx_deferred_start
Definition: rte_ethdev.h:1249
uint64_t offloads
Definition: rte_ethdev.h:1255
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1258
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1257
struct rte_eth_thresh tx_thresh
Definition: rte_ethdev.h:1244
uint16_t tx_rs_thresh
Definition: rte_ethdev.h:1245
uint16_t tx_free_thresh
Definition: rte_ethdev.h:1246
uint64_t offloads
Definition: rte_ethdev.h:1134
__extension__ uint8_t hw_vlan_insert_pvid
Definition: rte_ethdev.h:1143
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1146
__extension__ uint8_t hw_vlan_reject_tagged
Definition: rte_ethdev.h:1139
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1145
__extension__ uint8_t hw_vlan_reject_untagged
Definition: rte_ethdev.h:1141
enum rte_eth_tx_mq_mode mq_mode
Definition: rte_ethdev.h:1128
struct rte_eth_txconf conf
Definition: rte_ethdev.h:1873
uint8_t queue_state
Definition: rte_ethdev.h:1875
uint16_t nb_desc
Definition: rte_ethdev.h:1874
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:1081
uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]
Definition: rte_ethdev.h:1090
struct rte_eth_vmdq_dcb_conf::@145 pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:1112
struct rte_eth_vmdq_rx_conf::@146 pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]
uint8_t enable_default_pool
Definition: rte_ethdev.h:1113
char name[RTE_ETH_XSTATS_NAME_SIZE]
Definition: rte_ethdev.h:1930
uint64_t value
Definition: rte_ethdev.h:1912
uint64_t id
Definition: rte_ethdev.h:1911