DPDK  24.03.0
rte_ethdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
148 #ifdef __cplusplus
149 extern "C" {
150 #endif
151 
152 #include <stdint.h>
153 
154 /* Use this macro to check if LRO API is supported */
155 #define RTE_ETHDEV_HAS_LRO_SUPPORT
156 
157 /* Alias RTE_LIBRTE_ETHDEV_DEBUG for backward compatibility. */
158 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
159 #define RTE_ETHDEV_DEBUG_RX
160 #define RTE_ETHDEV_DEBUG_TX
161 #endif
162 
163 #include <rte_cman.h>
164 #include <rte_compat.h>
165 #include <rte_log.h>
166 #include <rte_interrupts.h>
167 #include <rte_dev.h>
168 #include <rte_devargs.h>
169 #include <rte_bitops.h>
170 #include <rte_errno.h>
171 #include <rte_common.h>
172 #include <rte_config.h>
173 #include <rte_power_intrinsics.h>
174 
175 #include "rte_ethdev_trace_fp.h"
176 #include "rte_dev_info.h"
177 
178 extern int rte_eth_dev_logtype;
179 #define RTE_LOGTYPE_ETHDEV rte_eth_dev_logtype
180 
181 #define RTE_ETHDEV_LOG_LINE(level, ...) \
182  RTE_LOG_LINE(level, ETHDEV, "" __VA_ARGS__)
183 
184 struct rte_mbuf;
185 
202 int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
203 
218 uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter);
219 
232 void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
233 
247 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
248  for (rte_eth_iterator_init(iter, devargs), \
249  id = rte_eth_iterator_next(iter); \
250  id != RTE_MAX_ETHPORTS; \
251  id = rte_eth_iterator_next(iter))
252 
263  uint64_t ipackets;
264  uint64_t opackets;
265  uint64_t ibytes;
266  uint64_t obytes;
271  uint64_t imissed;
272  uint64_t ierrors;
273  uint64_t oerrors;
274  uint64_t rx_nombuf;
275  /* Queue stats are limited to max 256 queues */
277  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
279  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
281  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
283  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
285  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
286 };
287 
291 #define RTE_ETH_LINK_SPEED_AUTONEG 0
292 #define RTE_ETH_LINK_SPEED_FIXED RTE_BIT32(0)
293 #define RTE_ETH_LINK_SPEED_10M_HD RTE_BIT32(1)
294 #define RTE_ETH_LINK_SPEED_10M RTE_BIT32(2)
295 #define RTE_ETH_LINK_SPEED_100M_HD RTE_BIT32(3)
296 #define RTE_ETH_LINK_SPEED_100M RTE_BIT32(4)
297 #define RTE_ETH_LINK_SPEED_1G RTE_BIT32(5)
298 #define RTE_ETH_LINK_SPEED_2_5G RTE_BIT32(6)
299 #define RTE_ETH_LINK_SPEED_5G RTE_BIT32(7)
300 #define RTE_ETH_LINK_SPEED_10G RTE_BIT32(8)
301 #define RTE_ETH_LINK_SPEED_20G RTE_BIT32(9)
302 #define RTE_ETH_LINK_SPEED_25G RTE_BIT32(10)
303 #define RTE_ETH_LINK_SPEED_40G RTE_BIT32(11)
304 #define RTE_ETH_LINK_SPEED_50G RTE_BIT32(12)
305 #define RTE_ETH_LINK_SPEED_56G RTE_BIT32(13)
306 #define RTE_ETH_LINK_SPEED_100G RTE_BIT32(14)
307 #define RTE_ETH_LINK_SPEED_200G RTE_BIT32(15)
308 #define RTE_ETH_LINK_SPEED_400G RTE_BIT32(16)
314 #define RTE_ETH_SPEED_NUM_NONE 0
315 #define RTE_ETH_SPEED_NUM_10M 10
316 #define RTE_ETH_SPEED_NUM_100M 100
317 #define RTE_ETH_SPEED_NUM_1G 1000
318 #define RTE_ETH_SPEED_NUM_2_5G 2500
319 #define RTE_ETH_SPEED_NUM_5G 5000
320 #define RTE_ETH_SPEED_NUM_10G 10000
321 #define RTE_ETH_SPEED_NUM_20G 20000
322 #define RTE_ETH_SPEED_NUM_25G 25000
323 #define RTE_ETH_SPEED_NUM_40G 40000
324 #define RTE_ETH_SPEED_NUM_50G 50000
325 #define RTE_ETH_SPEED_NUM_56G 56000
326 #define RTE_ETH_SPEED_NUM_100G 100000
327 #define RTE_ETH_SPEED_NUM_200G 200000
328 #define RTE_ETH_SPEED_NUM_400G 400000
329 #define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX
335 __extension__
336 struct __rte_aligned(8) rte_eth_link {
337  uint32_t link_speed;
338  uint16_t link_duplex : 1;
339  uint16_t link_autoneg : 1;
340  uint16_t link_status : 1;
341 };
342 
346 #define RTE_ETH_LINK_HALF_DUPLEX 0
347 #define RTE_ETH_LINK_FULL_DUPLEX 1
348 #define RTE_ETH_LINK_DOWN 0
349 #define RTE_ETH_LINK_UP 1
350 #define RTE_ETH_LINK_FIXED 0
351 #define RTE_ETH_LINK_AUTONEG 1
352 #define RTE_ETH_LINK_MAX_STR_LEN 40
359 struct rte_eth_thresh {
360  uint8_t pthresh;
361  uint8_t hthresh;
362  uint8_t wthresh;
363 };
364 
368 #define RTE_ETH_MQ_RX_RSS_FLAG RTE_BIT32(0)
369 #define RTE_ETH_MQ_RX_DCB_FLAG RTE_BIT32(1)
370 #define RTE_ETH_MQ_RX_VMDQ_FLAG RTE_BIT32(2)
377 enum rte_eth_rx_mq_mode {
378 
380 
387 
397 };
398 
408 };
409 
415  enum rte_eth_rx_mq_mode mq_mode;
416  uint32_t mtu;
424  uint64_t offloads;
425 
426  uint64_t reserved_64s[2];
427  void *reserved_ptrs[2];
428 };
429 
435  RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
438  RTE_ETH_VLAN_TYPE_MAX,
439 };
440 
446  uint64_t ids[64];
447 };
448 
470  RTE_ETH_HASH_FUNCTION_MAX,
471 };
472 
473 #define RTE_ETH_HASH_ALGO_TO_CAPA(x) RTE_BIT32(x)
474 #define RTE_ETH_HASH_ALGO_CAPA_MASK(x) RTE_BIT32(RTE_ETH_HASH_FUNCTION_ ## x)
475 
493  uint8_t *rss_key;
494  uint8_t rss_key_len;
499  uint64_t rss_hf;
500  enum rte_eth_hash_function algorithm;
501 };
502 
503 /*
504  * A packet can be identified by hardware as different flow types. Different
505  * NIC hardware may support different flow types.
506  * Basically, the NIC hardware identifies the flow type as deep protocol as
507  * possible, and exclusively. For example, if a packet is identified as
508  * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
509  * though it is an actual IPV4 packet.
510  */
511 #define RTE_ETH_FLOW_UNKNOWN 0
512 #define RTE_ETH_FLOW_RAW 1
513 #define RTE_ETH_FLOW_IPV4 2
514 #define RTE_ETH_FLOW_FRAG_IPV4 3
515 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
516 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
517 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
518 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
519 #define RTE_ETH_FLOW_IPV6 8
520 #define RTE_ETH_FLOW_FRAG_IPV6 9
521 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
522 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
523 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
524 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
525 #define RTE_ETH_FLOW_L2_PAYLOAD 14
526 #define RTE_ETH_FLOW_IPV6_EX 15
527 #define RTE_ETH_FLOW_IPV6_TCP_EX 16
528 #define RTE_ETH_FLOW_IPV6_UDP_EX 17
529 
530 #define RTE_ETH_FLOW_PORT 18
531 #define RTE_ETH_FLOW_VXLAN 19
532 #define RTE_ETH_FLOW_GENEVE 20
533 #define RTE_ETH_FLOW_NVGRE 21
534 #define RTE_ETH_FLOW_VXLAN_GPE 22
535 #define RTE_ETH_FLOW_GTPU 23
536 #define RTE_ETH_FLOW_MAX 24
537 
538 /*
539  * Below macros are defined for RSS offload types, they can be used to
540  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
541  */
542 #define RTE_ETH_RSS_IPV4 RTE_BIT64(2)
543 #define RTE_ETH_RSS_FRAG_IPV4 RTE_BIT64(3)
544 #define RTE_ETH_RSS_NONFRAG_IPV4_TCP RTE_BIT64(4)
545 #define RTE_ETH_RSS_NONFRAG_IPV4_UDP RTE_BIT64(5)
546 #define RTE_ETH_RSS_NONFRAG_IPV4_SCTP RTE_BIT64(6)
547 #define RTE_ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
548 #define RTE_ETH_RSS_IPV6 RTE_BIT64(8)
549 #define RTE_ETH_RSS_FRAG_IPV6 RTE_BIT64(9)
550 #define RTE_ETH_RSS_NONFRAG_IPV6_TCP RTE_BIT64(10)
551 #define RTE_ETH_RSS_NONFRAG_IPV6_UDP RTE_BIT64(11)
552 #define RTE_ETH_RSS_NONFRAG_IPV6_SCTP RTE_BIT64(12)
553 #define RTE_ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
554 #define RTE_ETH_RSS_L2_PAYLOAD RTE_BIT64(14)
555 #define RTE_ETH_RSS_IPV6_EX RTE_BIT64(15)
556 #define RTE_ETH_RSS_IPV6_TCP_EX RTE_BIT64(16)
557 #define RTE_ETH_RSS_IPV6_UDP_EX RTE_BIT64(17)
558 #define RTE_ETH_RSS_PORT RTE_BIT64(18)
559 #define RTE_ETH_RSS_VXLAN RTE_BIT64(19)
560 #define RTE_ETH_RSS_GENEVE RTE_BIT64(20)
561 #define RTE_ETH_RSS_NVGRE RTE_BIT64(21)
562 #define RTE_ETH_RSS_GTPU RTE_BIT64(23)
563 #define RTE_ETH_RSS_ETH RTE_BIT64(24)
564 #define RTE_ETH_RSS_S_VLAN RTE_BIT64(25)
565 #define RTE_ETH_RSS_C_VLAN RTE_BIT64(26)
566 #define RTE_ETH_RSS_ESP RTE_BIT64(27)
567 #define RTE_ETH_RSS_AH RTE_BIT64(28)
568 #define RTE_ETH_RSS_L2TPV3 RTE_BIT64(29)
569 #define RTE_ETH_RSS_PFCP RTE_BIT64(30)
570 #define RTE_ETH_RSS_PPPOE RTE_BIT64(31)
571 #define RTE_ETH_RSS_ECPRI RTE_BIT64(32)
572 #define RTE_ETH_RSS_MPLS RTE_BIT64(33)
573 #define RTE_ETH_RSS_IPV4_CHKSUM RTE_BIT64(34)
574 
587 #define RTE_ETH_RSS_L4_CHKSUM RTE_BIT64(35)
588 
589 #define RTE_ETH_RSS_L2TPV2 RTE_BIT64(36)
590 #define RTE_ETH_RSS_IPV6_FLOW_LABEL RTE_BIT64(37)
591 
592 /*
593  * We use the following macros to combine with above RTE_ETH_RSS_* for
594  * more specific input set selection. These bits are defined starting
595  * from the high end of the 64 bits.
596  * Note: If we use above RTE_ETH_RSS_* without SRC/DST_ONLY, it represents
597  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
598  * the same level are used simultaneously, it is the same case as none of
599  * them are added.
600  */
601 #define RTE_ETH_RSS_L3_SRC_ONLY RTE_BIT64(63)
602 #define RTE_ETH_RSS_L3_DST_ONLY RTE_BIT64(62)
603 #define RTE_ETH_RSS_L4_SRC_ONLY RTE_BIT64(61)
604 #define RTE_ETH_RSS_L4_DST_ONLY RTE_BIT64(60)
605 #define RTE_ETH_RSS_L2_SRC_ONLY RTE_BIT64(59)
606 #define RTE_ETH_RSS_L2_DST_ONLY RTE_BIT64(58)
607 
608 /*
609  * Only select IPV6 address prefix as RSS input set according to
610  * https://tools.ietf.org/html/rfc6052
611  * Must be combined with RTE_ETH_RSS_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_UDP,
612  * RTE_ETH_RSS_NONFRAG_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP.
613  */
614 #define RTE_ETH_RSS_L3_PRE32 RTE_BIT64(57)
615 #define RTE_ETH_RSS_L3_PRE40 RTE_BIT64(56)
616 #define RTE_ETH_RSS_L3_PRE48 RTE_BIT64(55)
617 #define RTE_ETH_RSS_L3_PRE56 RTE_BIT64(54)
618 #define RTE_ETH_RSS_L3_PRE64 RTE_BIT64(53)
619 #define RTE_ETH_RSS_L3_PRE96 RTE_BIT64(52)
620 
621 /*
622  * Use the following macros to combine with the above layers
623  * to choose inner and outer layers or both for RSS computation.
624  * Bits 50 and 51 are reserved for this.
625  */
626 
634 #define RTE_ETH_RSS_LEVEL_PMD_DEFAULT (UINT64_C(0) << 50)
635 
640 #define RTE_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
641 
646 #define RTE_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
647 #define RTE_ETH_RSS_LEVEL_MASK (UINT64_C(3) << 50)
648 
649 #define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
650 
661 static inline uint64_t
662 rte_eth_rss_hf_refine(uint64_t rss_hf)
663 {
664  if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
665  rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
666 
667  if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
668  rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
669 
670  return rss_hf;
671 }
672 
673 #define RTE_ETH_RSS_IPV6_PRE32 ( \
674  RTE_ETH_RSS_IPV6 | \
675  RTE_ETH_RSS_L3_PRE32)
676 
677 #define RTE_ETH_RSS_IPV6_PRE40 ( \
678  RTE_ETH_RSS_IPV6 | \
679  RTE_ETH_RSS_L3_PRE40)
680 
681 #define RTE_ETH_RSS_IPV6_PRE48 ( \
682  RTE_ETH_RSS_IPV6 | \
683  RTE_ETH_RSS_L3_PRE48)
684 
685 #define RTE_ETH_RSS_IPV6_PRE56 ( \
686  RTE_ETH_RSS_IPV6 | \
687  RTE_ETH_RSS_L3_PRE56)
688 
689 #define RTE_ETH_RSS_IPV6_PRE64 ( \
690  RTE_ETH_RSS_IPV6 | \
691  RTE_ETH_RSS_L3_PRE64)
692 
693 #define RTE_ETH_RSS_IPV6_PRE96 ( \
694  RTE_ETH_RSS_IPV6 | \
695  RTE_ETH_RSS_L3_PRE96)
696 
697 #define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
698  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
699  RTE_ETH_RSS_L3_PRE32)
700 
701 #define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
702  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
703  RTE_ETH_RSS_L3_PRE40)
704 
705 #define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
706  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
707  RTE_ETH_RSS_L3_PRE48)
708 
709 #define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
710  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
711  RTE_ETH_RSS_L3_PRE56)
712 
713 #define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
714  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
715  RTE_ETH_RSS_L3_PRE64)
716 
717 #define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
718  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
719  RTE_ETH_RSS_L3_PRE96)
720 
721 #define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
722  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
723  RTE_ETH_RSS_L3_PRE32)
724 
725 #define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
726  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
727  RTE_ETH_RSS_L3_PRE40)
728 
729 #define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
730  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
731  RTE_ETH_RSS_L3_PRE48)
732 
733 #define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
734  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
735  RTE_ETH_RSS_L3_PRE56)
736 
737 #define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
738  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
739  RTE_ETH_RSS_L3_PRE64)
740 
741 #define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
742  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
743  RTE_ETH_RSS_L3_PRE96)
744 
745 #define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
746  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
747  RTE_ETH_RSS_L3_PRE32)
748 
749 #define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
750  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
751  RTE_ETH_RSS_L3_PRE40)
752 
753 #define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
754  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
755  RTE_ETH_RSS_L3_PRE48)
756 
757 #define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
758  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
759  RTE_ETH_RSS_L3_PRE56)
760 
761 #define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
762  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
763  RTE_ETH_RSS_L3_PRE64)
764 
765 #define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
766  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
767  RTE_ETH_RSS_L3_PRE96)
768 
769 #define RTE_ETH_RSS_IP ( \
770  RTE_ETH_RSS_IPV4 | \
771  RTE_ETH_RSS_FRAG_IPV4 | \
772  RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
773  RTE_ETH_RSS_IPV6 | \
774  RTE_ETH_RSS_FRAG_IPV6 | \
775  RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
776  RTE_ETH_RSS_IPV6_EX)
777 
778 #define RTE_ETH_RSS_UDP ( \
779  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
780  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
781  RTE_ETH_RSS_IPV6_UDP_EX)
782 
783 #define RTE_ETH_RSS_TCP ( \
784  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
785  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
786  RTE_ETH_RSS_IPV6_TCP_EX)
787 
788 #define RTE_ETH_RSS_SCTP ( \
789  RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
790  RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
791 
792 #define RTE_ETH_RSS_TUNNEL ( \
793  RTE_ETH_RSS_VXLAN | \
794  RTE_ETH_RSS_GENEVE | \
795  RTE_ETH_RSS_NVGRE)
796 
797 #define RTE_ETH_RSS_VLAN ( \
798  RTE_ETH_RSS_S_VLAN | \
799  RTE_ETH_RSS_C_VLAN)
800 
802 #define RTE_ETH_RSS_PROTO_MASK ( \
803  RTE_ETH_RSS_IPV4 | \
804  RTE_ETH_RSS_FRAG_IPV4 | \
805  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
806  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
807  RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
808  RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
809  RTE_ETH_RSS_IPV6 | \
810  RTE_ETH_RSS_FRAG_IPV6 | \
811  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
812  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
813  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
814  RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
815  RTE_ETH_RSS_L2_PAYLOAD | \
816  RTE_ETH_RSS_IPV6_EX | \
817  RTE_ETH_RSS_IPV6_TCP_EX | \
818  RTE_ETH_RSS_IPV6_UDP_EX | \
819  RTE_ETH_RSS_PORT | \
820  RTE_ETH_RSS_VXLAN | \
821  RTE_ETH_RSS_GENEVE | \
822  RTE_ETH_RSS_NVGRE | \
823  RTE_ETH_RSS_MPLS)
824 
825 /*
826  * Definitions used for redirection table entry size.
827  * Some RSS RETA sizes may not be supported by some drivers, check the
828  * documentation or the description of relevant functions for more details.
829  */
830 #define RTE_ETH_RSS_RETA_SIZE_64 64
831 #define RTE_ETH_RSS_RETA_SIZE_128 128
832 #define RTE_ETH_RSS_RETA_SIZE_256 256
833 #define RTE_ETH_RSS_RETA_SIZE_512 512
834 #define RTE_ETH_RETA_GROUP_SIZE 64
835 
837 #define RTE_ETH_VMDQ_MAX_VLAN_FILTERS 64
838 #define RTE_ETH_DCB_NUM_USER_PRIORITIES 8
839 #define RTE_ETH_VMDQ_DCB_NUM_QUEUES 128
840 #define RTE_ETH_DCB_NUM_QUEUES 128
844 #define RTE_ETH_DCB_PG_SUPPORT RTE_BIT32(0)
845 #define RTE_ETH_DCB_PFC_SUPPORT RTE_BIT32(1)
849 #define RTE_ETH_VLAN_STRIP_OFFLOAD 0x0001
850 #define RTE_ETH_VLAN_FILTER_OFFLOAD 0x0002
851 #define RTE_ETH_VLAN_EXTEND_OFFLOAD 0x0004
852 #define RTE_ETH_QINQ_STRIP_OFFLOAD 0x0008
854 #define RTE_ETH_VLAN_STRIP_MASK 0x0001
855 #define RTE_ETH_VLAN_FILTER_MASK 0x0002
856 #define RTE_ETH_VLAN_EXTEND_MASK 0x0004
857 #define RTE_ETH_QINQ_STRIP_MASK 0x0008
858 #define RTE_ETH_VLAN_ID_MAX 0x0FFF
861 /* Definitions used for receive MAC address */
862 #define RTE_ETH_NUM_RECEIVE_MAC_ADDR 128
864 /* Definitions used for unicast hash */
865 #define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY 128
871 #define RTE_ETH_VMDQ_ACCEPT_UNTAG RTE_BIT32(0)
872 
873 #define RTE_ETH_VMDQ_ACCEPT_HASH_MC RTE_BIT32(1)
874 
875 #define RTE_ETH_VMDQ_ACCEPT_HASH_UC RTE_BIT32(2)
876 
877 #define RTE_ETH_VMDQ_ACCEPT_BROADCAST RTE_BIT32(3)
878 
879 #define RTE_ETH_VMDQ_ACCEPT_MULTICAST RTE_BIT32(4)
880 
890  uint64_t mask;
892  uint16_t reta[RTE_ETH_RETA_GROUP_SIZE];
893 };
894 
902 };
903 
913 };
914 
915 /* This structure may be extended in future. */
916 struct rte_eth_dcb_rx_conf {
917  enum rte_eth_nb_tcs nb_tcs;
919  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
920 };
921 
922 struct rte_eth_vmdq_dcb_tx_conf {
923  enum rte_eth_nb_pools nb_queue_pools;
925  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
926 };
927 
928 struct rte_eth_dcb_tx_conf {
929  enum rte_eth_nb_tcs nb_tcs;
931  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
932 };
933 
934 struct rte_eth_vmdq_tx_conf {
935  enum rte_eth_nb_pools nb_queue_pools;
936 };
937 
950  enum rte_eth_nb_pools nb_queue_pools;
952  uint8_t default_pool;
953  uint8_t nb_pool_maps;
954  struct {
955  uint16_t vlan_id;
956  uint64_t pools;
957  } pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS];
960 };
961 
981  enum rte_eth_nb_pools nb_queue_pools;
983  uint8_t default_pool;
985  uint8_t nb_pool_maps;
986  uint32_t rx_mode;
987  struct {
988  uint16_t vlan_id;
989  uint64_t pools;
990  } pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS];
991 };
992 
997  enum rte_eth_tx_mq_mode mq_mode;
1003  uint64_t offloads;
1004 
1005  uint16_t pvid;
1006  __extension__
1007  uint8_t
1008  hw_vlan_reject_tagged : 1,
1012  hw_vlan_insert_pvid : 1;
1013 
1014  uint64_t reserved_64s[2];
1015  void *reserved_ptrs[2];
1016 };
1017 
1079  struct rte_mempool *mp;
1080  uint16_t length;
1081  uint16_t offset;
1093  uint32_t proto_hdr;
1094 };
1095 
1103  /* The settings for buffer split offload. */
1104  struct rte_eth_rxseg_split split;
1105  /* The other features settings should be added here. */
1106 };
1107 
1112  struct rte_eth_thresh rx_thresh;
1113  uint16_t rx_free_thresh;
1114  uint8_t rx_drop_en;
1116  uint16_t rx_nseg;
1123  uint16_t share_group;
1124  uint16_t share_qid;
1130  uint64_t offloads;
1139 
1160  uint16_t rx_nmempool;
1162  uint64_t reserved_64s[2];
1163  void *reserved_ptrs[2];
1164 };
1165 
1170  struct rte_eth_thresh tx_thresh;
1171  uint16_t tx_rs_thresh;
1172  uint16_t tx_free_thresh;
1181  uint64_t offloads;
1182 
1183  uint64_t reserved_64s[2];
1184  void *reserved_ptrs[2];
1185 };
1186 
1199 
1204  uint32_t rte_memory:1;
1205 
1206  uint32_t reserved:30;
1207 };
1208 
1217  uint16_t max_nb_queues;
1219  uint16_t max_rx_2_tx;
1221  uint16_t max_tx_2_rx;
1222  uint16_t max_nb_desc;
1225 };
1226 
1227 #define RTE_ETH_MAX_HAIRPIN_PEERS 32
1228 
1236  uint16_t port;
1237  uint16_t queue;
1238 };
1239 
1247  uint32_t peer_count:16;
1258  uint32_t tx_explicit:1;
1259 
1271  uint32_t manual_bind:1;
1272 
1285 
1297  uint32_t use_rte_memory:1;
1298 
1309  uint32_t force_memory:1;
1310 
1311  uint32_t reserved:11;
1313  struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
1314 };
1315 
1320  uint16_t nb_max;
1321  uint16_t nb_min;
1322  uint16_t nb_align;
1332  uint16_t nb_seg_max;
1333 
1345  uint16_t nb_mtu_seg_max;
1346 };
1347 
1356 };
1357 
1364  uint32_t high_water;
1365  uint32_t low_water;
1366  uint16_t pause_time;
1367  uint16_t send_xon;
1368  enum rte_eth_fc_mode mode;
1370  uint8_t autoneg;
1371 };
1372 
1379  struct rte_eth_fc_conf fc;
1380  uint8_t priority;
1381 };
1382 
1393  uint8_t tc_max;
1395  enum rte_eth_fc_mode mode_capa;
1396 };
1397 
1416  enum rte_eth_fc_mode mode;
1418  struct {
1419  uint16_t tx_qid;
1423  uint8_t tc;
1424  } rx_pause; /* Valid when (mode == FC_RX_PAUSE || mode == FC_FULL) */
1425 
1426  struct {
1427  uint16_t pause_time;
1428  uint16_t rx_qid;
1432  uint8_t tc;
1433  } tx_pause; /* Valid when (mode == FC_TX_PAUSE || mode == FC_FULL) */
1434 };
1435 
1441  RTE_ETH_TUNNEL_TYPE_NONE = 0,
1442  RTE_ETH_TUNNEL_TYPE_VXLAN,
1443  RTE_ETH_TUNNEL_TYPE_GENEVE,
1444  RTE_ETH_TUNNEL_TYPE_TEREDO,
1445  RTE_ETH_TUNNEL_TYPE_NVGRE,
1446  RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
1447  RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
1448  RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
1449  RTE_ETH_TUNNEL_TYPE_ECPRI,
1450  RTE_ETH_TUNNEL_TYPE_MAX,
1451 };
1452 
1453 /* Deprecated API file for rte_eth_dev_filter_* functions */
1454 #include "rte_eth_ctrl.h"
1455 
1466  uint16_t udp_port;
1467  uint8_t prot_type;
1468 };
1469 
1475  uint32_t lsc:1;
1477  uint32_t rxq:1;
1479  uint32_t rmv:1;
1480 };
1481 
1482 #define rte_intr_conf rte_eth_intr_conf
1483 
1490  uint32_t link_speeds;
1497  struct rte_eth_rxmode rxmode;
1498  struct rte_eth_txmode txmode;
1499  uint32_t lpbk_mode;
1504  struct {
1505  struct rte_eth_rss_conf rss_conf;
1507  struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf;
1509  struct rte_eth_dcb_rx_conf dcb_rx_conf;
1511  struct rte_eth_vmdq_rx_conf vmdq_rx_conf;
1512  } rx_adv_conf;
1513  union {
1515  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1517  struct rte_eth_dcb_tx_conf dcb_tx_conf;
1519  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1520  } tx_adv_conf;
1524  struct rte_eth_intr_conf intr_conf;
1525 };
1526 
1530 #define RTE_ETH_RX_OFFLOAD_VLAN_STRIP RTE_BIT64(0)
1531 #define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1532 #define RTE_ETH_RX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1533 #define RTE_ETH_RX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1534 #define RTE_ETH_RX_OFFLOAD_TCP_LRO RTE_BIT64(4)
1535 #define RTE_ETH_RX_OFFLOAD_QINQ_STRIP RTE_BIT64(5)
1536 #define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(6)
1537 #define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP RTE_BIT64(7)
1538 #define RTE_ETH_RX_OFFLOAD_VLAN_FILTER RTE_BIT64(9)
1539 #define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND RTE_BIT64(10)
1540 #define RTE_ETH_RX_OFFLOAD_SCATTER RTE_BIT64(13)
1541 
1546 #define RTE_ETH_RX_OFFLOAD_TIMESTAMP RTE_BIT64(14)
1547 #define RTE_ETH_RX_OFFLOAD_SECURITY RTE_BIT64(15)
1548 #define RTE_ETH_RX_OFFLOAD_KEEP_CRC RTE_BIT64(16)
1549 #define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM RTE_BIT64(17)
1550 #define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(18)
1551 #define RTE_ETH_RX_OFFLOAD_RSS_HASH RTE_BIT64(19)
1552 #define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT RTE_BIT64(20)
1553 
1554 #define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
1555  RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
1556  RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1557 #define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
1558  RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
1559  RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
1560  RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
1561 
1562 /*
1563  * If new Rx offload capabilities are defined, they also must be
1564  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1565  */
1566 
1570 #define RTE_ETH_TX_OFFLOAD_VLAN_INSERT RTE_BIT64(0)
1571 #define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1572 #define RTE_ETH_TX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1573 #define RTE_ETH_TX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1574 #define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM RTE_BIT64(4)
1575 #define RTE_ETH_TX_OFFLOAD_TCP_TSO RTE_BIT64(5)
1576 #define RTE_ETH_TX_OFFLOAD_UDP_TSO RTE_BIT64(6)
1577 #define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(7)
1578 #define RTE_ETH_TX_OFFLOAD_QINQ_INSERT RTE_BIT64(8)
1579 #define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO RTE_BIT64(9)
1580 #define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO RTE_BIT64(10)
1581 #define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO RTE_BIT64(11)
1582 #define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO RTE_BIT64(12)
1583 #define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT RTE_BIT64(13)
1584 
1588 #define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE RTE_BIT64(14)
1589 
1590 #define RTE_ETH_TX_OFFLOAD_MULTI_SEGS RTE_BIT64(15)
1591 
1596 #define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE RTE_BIT64(16)
1597 #define RTE_ETH_TX_OFFLOAD_SECURITY RTE_BIT64(17)
1598 
1603 #define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO RTE_BIT64(18)
1604 
1609 #define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO RTE_BIT64(19)
1610 
1611 #define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(20)
1612 
1617 #define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP RTE_BIT64(21)
1618 /*
1619  * If new Tx offload capabilities are defined, they also must be
1620  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1621  */
1622 
1627 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP RTE_BIT64(0)
1628 
1629 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP RTE_BIT64(1)
1630 
1639 #define RTE_ETH_DEV_CAPA_RXQ_SHARE RTE_BIT64(2)
1640 
1641 #define RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP RTE_BIT64(3)
1642 
1643 #define RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP RTE_BIT64(4)
1644 
1646 /*
1647  * Fallback default preferred Rx/Tx port parameters.
1648  * These are used if an application requests default parameters
1649  * but the PMD does not provide preferred values.
1650  */
1651 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1652 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1653 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1654 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1655 
1662  uint16_t burst_size;
1663  uint16_t ring_size;
1664  uint16_t nb_queues;
1665 };
1666 
1671 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX)
1672 
1677  const char *name;
1678  uint16_t domain_id;
1686  uint16_t port_id;
1692  uint16_t rx_domain;
1693 };
1694 
1702  __extension__
1703  uint32_t multi_pools:1;
1704  uint32_t offset_allowed:1;
1705  uint32_t offset_align_log2:4;
1706  uint16_t max_nseg;
1707  uint16_t reserved;
1708 };
1709 
1722 };
1723 
1744 };
1745 
1752  struct rte_device *device;
1753  const char *driver_name;
1754  unsigned int if_index;
1756  uint16_t min_mtu;
1757  uint16_t max_mtu;
1758  const uint32_t *dev_flags;
1760  uint32_t min_rx_bufsize;
1767  uint32_t max_rx_bufsize;
1768  uint32_t max_rx_pktlen;
1771  uint16_t max_rx_queues;
1772  uint16_t max_tx_queues;
1773  uint32_t max_mac_addrs;
1776  uint16_t max_vfs;
1777  uint16_t max_vmdq_pools;
1778  struct rte_eth_rxseg_capa rx_seg_capa;
1788  uint16_t reta_size;
1789  uint8_t hash_key_size;
1790  uint32_t rss_algo_capa;
1793  struct rte_eth_rxconf default_rxconf;
1794  struct rte_eth_txconf default_txconf;
1795  uint16_t vmdq_queue_base;
1796  uint16_t vmdq_queue_num;
1797  uint16_t vmdq_pool_base;
1798  struct rte_eth_desc_lim rx_desc_lim;
1799  struct rte_eth_desc_lim tx_desc_lim;
1800  uint32_t speed_capa;
1802  uint16_t nb_rx_queues;
1803  uint16_t nb_tx_queues;
1812  struct rte_eth_dev_portconf default_rxportconf;
1814  struct rte_eth_dev_portconf default_txportconf;
1816  uint64_t dev_capa;
1821  struct rte_eth_switch_info switch_info;
1823  enum rte_eth_err_handle_mode err_handle_mode;
1824 
1825  uint64_t reserved_64s[2];
1826  void *reserved_ptrs[2];
1827 };
1828 
1830 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1831 #define RTE_ETH_QUEUE_STATE_STARTED 1
1832 #define RTE_ETH_QUEUE_STATE_HAIRPIN 2
1839 struct __rte_cache_min_aligned rte_eth_rxq_info {
1840  struct rte_mempool *mp;
1841  struct rte_eth_rxconf conf;
1842  uint8_t scattered_rx;
1843  uint8_t queue_state;
1844  uint16_t nb_desc;
1845  uint16_t rx_buf_size;
1852  uint8_t avail_thresh;
1853 };
1854 
1860  struct rte_eth_txconf conf;
1861  uint16_t nb_desc;
1862  uint8_t queue_state;
1863 };
1864 
1874  struct rte_mbuf **mbuf_ring;
1875  struct rte_mempool *mp;
1876  uint16_t *refill_head;
1877  uint16_t *receive_tail;
1878  uint16_t mbuf_ring_size;
1887 };
1888 
1889 /* Generic Burst mode flag definition, values can be ORed. */
1890 
1896 #define RTE_ETH_BURST_FLAG_PER_QUEUE RTE_BIT64(0)
1897 
1903  uint64_t flags;
1905 #define RTE_ETH_BURST_MODE_INFO_SIZE 1024
1906  char info[RTE_ETH_BURST_MODE_INFO_SIZE];
1907 };
1908 
1910 #define RTE_ETH_XSTATS_NAME_SIZE 64
1911 
1922  uint64_t id;
1923  uint64_t value;
1924 };
1925 
1942 };
1943 
1944 #define RTE_ETH_DCB_NUM_TCS 8
1945 #define RTE_ETH_MAX_VMDQ_POOL 64
1946 
1953  struct {
1954  uint16_t base;
1955  uint16_t nb_queue;
1956  } tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1958  struct {
1959  uint16_t base;
1960  uint16_t nb_queue;
1961  } tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1962 };
1963 
1969  uint8_t nb_tcs;
1971  uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS];
1974 };
1975 
1986 };
1987 
1988 /* Translate from FEC mode to FEC capa */
1989 #define RTE_ETH_FEC_MODE_TO_CAPA(x) RTE_BIT32(x)
1990 
1991 /* This macro indicates FEC capa mask */
1992 #define RTE_ETH_FEC_MODE_CAPA_MASK(x) RTE_BIT32(RTE_ETH_FEC_ ## x)
1993 
1994 /* A structure used to get capabilities per link speed */
1995 struct rte_eth_fec_capa {
1996  uint32_t speed;
1997  uint32_t capa;
1998 };
1999 
2000 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
2001 
2002 /* Macros to check for valid port */
2003 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
2004  if (!rte_eth_dev_is_valid_port(port_id)) { \
2005  RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id); \
2006  return retval; \
2007  } \
2008 } while (0)
2009 
2010 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
2011  if (!rte_eth_dev_is_valid_port(port_id)) { \
2012  RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id); \
2013  return; \
2014  } \
2015 } while (0)
2016 
2039 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
2040  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
2041  void *user_param);
2042 
2063 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
2064  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
2065 
2076 };
2077 
2078 struct rte_eth_dev_sriov {
2079  uint8_t active;
2080  uint8_t nb_q_per_pool;
2081  uint16_t def_vmdq_idx;
2082  uint16_t def_pool_q_idx;
2083 };
2084 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
2085 
2086 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
2087 
2088 #define RTE_ETH_DEV_NO_OWNER 0
2089 
2090 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
2091 
2092 struct rte_eth_dev_owner {
2093  uint64_t id;
2094  char name[RTE_ETH_MAX_OWNER_NAME_LEN];
2095 };
2096 
2102 #define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE RTE_BIT32(0)
2103 
2104 #define RTE_ETH_DEV_INTR_LSC RTE_BIT32(1)
2105 
2106 #define RTE_ETH_DEV_BONDING_MEMBER RTE_BIT32(2)
2107 
2108 #define RTE_ETH_DEV_INTR_RMV RTE_BIT32(3)
2109 
2110 #define RTE_ETH_DEV_REPRESENTOR RTE_BIT32(4)
2111 
2112 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR RTE_BIT32(5)
2113 
2117 #define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS RTE_BIT32(6)
2118 
2131 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
2132  const uint64_t owner_id);
2133 
2137 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
2138  for (p = rte_eth_find_next_owned_by(0, o); \
2139  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
2140  p = rte_eth_find_next_owned_by(p + 1, o))
2141 
2150 uint16_t rte_eth_find_next(uint16_t port_id);
2151 
2155 #define RTE_ETH_FOREACH_DEV(p) \
2156  RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
2157 
2169 uint16_t
2170 rte_eth_find_next_of(uint16_t port_id_start,
2171  const struct rte_device *parent);
2172 
2181 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
2182  for (port_id = rte_eth_find_next_of(0, parent); \
2183  port_id < RTE_MAX_ETHPORTS; \
2184  port_id = rte_eth_find_next_of(port_id + 1, parent))
2185 
2197 uint16_t
2198 rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id);
2199 
2210 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
2211  for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
2212  port_id < RTE_MAX_ETHPORTS; \
2213  port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
2214 
2225 int rte_eth_dev_owner_new(uint64_t *owner_id);
2226 
2237 int rte_eth_dev_owner_set(const uint16_t port_id,
2238  const struct rte_eth_dev_owner *owner);
2239 
2250 int rte_eth_dev_owner_unset(const uint16_t port_id,
2251  const uint64_t owner_id);
2252 
2261 int rte_eth_dev_owner_delete(const uint64_t owner_id);
2262 
2273 int rte_eth_dev_owner_get(const uint16_t port_id,
2274  struct rte_eth_dev_owner *owner);
2275 
2286 uint16_t rte_eth_dev_count_avail(void);
2287 
2296 uint16_t rte_eth_dev_count_total(void);
2297 
2309 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
2310 
2319 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
2320 
2329 const char *rte_eth_dev_tx_offload_name(uint64_t offload);
2330 
2342 __rte_experimental
2343 const char *rte_eth_dev_capability_name(uint64_t capability);
2344 
2384 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
2385  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
2386 
2395 int
2396 rte_eth_dev_is_removed(uint16_t port_id);
2397 
2460 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2461  uint16_t nb_rx_desc, unsigned int socket_id,
2462  const struct rte_eth_rxconf *rx_conf,
2463  struct rte_mempool *mb_pool);
2464 
2492 __rte_experimental
2494  (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2495  const struct rte_eth_hairpin_conf *conf);
2496 
2545 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2546  uint16_t nb_tx_desc, unsigned int socket_id,
2547  const struct rte_eth_txconf *tx_conf);
2548 
2574 __rte_experimental
2576  (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2577  const struct rte_eth_hairpin_conf *conf);
2578 
2605 __rte_experimental
2606 int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2607  size_t len, uint32_t direction);
2608 
2631 __rte_experimental
2632 int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port);
2633 
2658 __rte_experimental
2659 int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port);
2660 
2676 __rte_experimental
2677 int rte_eth_dev_count_aggr_ports(uint16_t port_id);
2678 
2706 __rte_experimental
2707 int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id,
2708  uint8_t affinity);
2709 
2722 int rte_eth_dev_socket_id(uint16_t port_id);
2723 
2733 int rte_eth_dev_is_valid_port(uint16_t port_id);
2734 
2751 __rte_experimental
2752 int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2753 
2770 __rte_experimental
2771 int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2772 
2790 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2791 
2808 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2809 
2827 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2828 
2845 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2846 
2870 int rte_eth_dev_start(uint16_t port_id);
2871 
2885 int rte_eth_dev_stop(uint16_t port_id);
2886 
2899 int rte_eth_dev_set_link_up(uint16_t port_id);
2900 
2910 int rte_eth_dev_set_link_down(uint16_t port_id);
2911 
2922 int rte_eth_dev_close(uint16_t port_id);
2923 
2961 int rte_eth_dev_reset(uint16_t port_id);
2962 
2974 int rte_eth_promiscuous_enable(uint16_t port_id);
2975 
2987 int rte_eth_promiscuous_disable(uint16_t port_id);
2988 
2999 int rte_eth_promiscuous_get(uint16_t port_id);
3000 
3012 int rte_eth_allmulticast_enable(uint16_t port_id);
3013 
3025 int rte_eth_allmulticast_disable(uint16_t port_id);
3026 
3037 int rte_eth_allmulticast_get(uint16_t port_id);
3038 
3056 int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
3057 
3072 int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
3073 
3087 __rte_experimental
3088 const char *rte_eth_link_speed_to_str(uint32_t link_speed);
3089 
3108 __rte_experimental
3109 int rte_eth_link_to_str(char *str, size_t len,
3110  const struct rte_eth_link *eth_link);
3111 
3129 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
3130 
3142 int rte_eth_stats_reset(uint16_t port_id);
3143 
3173 int rte_eth_xstats_get_names(uint16_t port_id,
3174  struct rte_eth_xstat_name *xstats_names,
3175  unsigned int size);
3176 
3210 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3211  unsigned int n);
3212 
3237 int
3238 rte_eth_xstats_get_names_by_id(uint16_t port_id,
3239  struct rte_eth_xstat_name *xstats_names, unsigned int size,
3240  uint64_t *ids);
3241 
3266 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3267  uint64_t *values, unsigned int size);
3268 
3288 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
3289  uint64_t *id);
3290 
3303 int rte_eth_xstats_reset(uint16_t port_id);
3304 
3323 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
3324  uint16_t tx_queue_id, uint8_t stat_idx);
3325 
3344 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
3345  uint16_t rx_queue_id,
3346  uint8_t stat_idx);
3347 
3361 int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
3362 
3383 __rte_experimental
3384 int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3385  unsigned int num);
3386 
3406 int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
3407 
3423 __rte_experimental
3424 int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf);
3425 
3446 int rte_eth_dev_fw_version_get(uint16_t port_id,
3447  char *fw_version, size_t fw_size);
3448 
3488 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3489  uint32_t *ptypes, int num);
3520 int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3521  uint32_t *set_ptypes, unsigned int num);
3522 
3535 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
3536 
3554 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
3555 
3575 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
3576 
3595 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3596  int on);
3597 
3614 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3615  enum rte_vlan_type vlan_type,
3616  uint16_t tag_type);
3617 
3635 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
3636 
3650 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
3651 
3666 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
3667 
3693 __rte_experimental
3694 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
3695  uint8_t avail_thresh);
3696 
3723 __rte_experimental
3724 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
3725  uint8_t *avail_thresh);
3726 
3727 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
3728  void *userdata);
3729 
3735  buffer_tx_error_fn error_callback;
3736  void *error_userdata;
3737  uint16_t size;
3738  uint16_t length;
3740  struct rte_mbuf *pkts[];
3741 };
3742 
3749 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
3750  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3751 
3762 int
3763 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
3764 
3789 int
3791  buffer_tx_error_fn callback, void *userdata);
3792 
3815 void
3816 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3817  void *userdata);
3818 
3842 void
3843 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3844  void *userdata);
3845 
3871 int
3872 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
3873 
3906 };
3907 
3927 };
3928 
3947  uint64_t metadata;
3948 };
3949 
3987 };
3988 
4013  uint64_t metadata;
4014 };
4015 
4092 };
4093 
4107 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
4108  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
4109 
4127 int rte_eth_dev_callback_register(uint16_t port_id,
4128  enum rte_eth_event_type event,
4129  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4130 
4149 int rte_eth_dev_callback_unregister(uint16_t port_id,
4150  enum rte_eth_event_type event,
4151  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4152 
4174 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
4175 
4196 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
4197 
4215 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
4216 
4238 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4239  int epfd, int op, void *data);
4240 
4255 int
4256 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
4257 
4271 int rte_eth_led_on(uint16_t port_id);
4272 
4286 int rte_eth_led_off(uint16_t port_id);
4287 
4316 __rte_experimental
4317 int rte_eth_fec_get_capability(uint16_t port_id,
4318  struct rte_eth_fec_capa *speed_fec_capa,
4319  unsigned int num);
4320 
4341 __rte_experimental
4342 int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa);
4343 
4367 __rte_experimental
4368 int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa);
4369 
4384 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
4385  struct rte_eth_fc_conf *fc_conf);
4386 
4401 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
4402  struct rte_eth_fc_conf *fc_conf);
4403 
4419 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
4420  struct rte_eth_pfc_conf *pfc_conf);
4421 
4440 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
4441  uint32_t pool);
4442 
4460 __rte_experimental
4462  struct rte_eth_pfc_queue_info *pfc_queue_info);
4463 
4487 __rte_experimental
4489  struct rte_eth_pfc_queue_conf *pfc_queue_conf);
4490 
4505 int rte_eth_dev_mac_addr_remove(uint16_t port_id,
4506  struct rte_ether_addr *mac_addr);
4507 
4525 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
4526  struct rte_ether_addr *mac_addr);
4527 
4545 int rte_eth_dev_rss_reta_update(uint16_t port_id,
4546  struct rte_eth_rss_reta_entry64 *reta_conf,
4547  uint16_t reta_size);
4548 
4567 int rte_eth_dev_rss_reta_query(uint16_t port_id,
4568  struct rte_eth_rss_reta_entry64 *reta_conf,
4569  uint16_t reta_size);
4570 
4590 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4591  uint8_t on);
4592 
4611 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
4612 
4629 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4630  uint32_t tx_rate);
4631 
4646 int rte_eth_dev_rss_hash_update(uint16_t port_id,
4647  struct rte_eth_rss_conf *rss_conf);
4648 
4664 int
4665 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4666  struct rte_eth_rss_conf *rss_conf);
4667 
4680 __rte_experimental
4681 const char *
4683 
4700 __rte_experimental
4701 int
4702 rte_eth_find_rss_algo(const char *name, uint32_t *algo);
4703 
4728 int
4729 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4730  struct rte_eth_udp_tunnel *tunnel_udp);
4731 
4751 int
4752 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4753  struct rte_eth_udp_tunnel *tunnel_udp);
4754 
4769 int rte_eth_dev_get_dcb_info(uint16_t port_id,
4770  struct rte_eth_dcb_info *dcb_info);
4771 
4772 struct rte_eth_rxtx_callback;
4773 
4799 const struct rte_eth_rxtx_callback *
4800 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4801  rte_rx_callback_fn fn, void *user_param);
4802 
4829 const struct rte_eth_rxtx_callback *
4830 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4831  rte_rx_callback_fn fn, void *user_param);
4832 
4858 const struct rte_eth_rxtx_callback *
4859 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4860  rte_tx_callback_fn fn, void *user_param);
4861 
4895 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4896  const struct rte_eth_rxtx_callback *user_cb);
4897 
4931 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4932  const struct rte_eth_rxtx_callback *user_cb);
4933 
4953 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4954  struct rte_eth_rxq_info *qinfo);
4955 
4975 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4976  struct rte_eth_txq_info *qinfo);
4977 
4998 __rte_experimental
4999 int rte_eth_recycle_rx_queue_info_get(uint16_t port_id,
5000  uint16_t queue_id,
5001  struct rte_eth_recycle_rxq_info *recycle_rxq_info);
5002 
5021 int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5022  struct rte_eth_burst_mode *mode);
5023 
5042 int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5043  struct rte_eth_burst_mode *mode);
5044 
5065 __rte_experimental
5066 int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5067  struct rte_power_monitor_cond *pmc);
5068 
5087 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
5088 
5101 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
5102 
5119 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5120 
5137 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5138 
5157 __rte_experimental
5158 int
5159 rte_eth_dev_get_module_info(uint16_t port_id,
5160  struct rte_eth_dev_module_info *modinfo);
5161 
5181 __rte_experimental
5182 int
5183 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5184  struct rte_dev_eeprom_info *info);
5185 
5205 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5206  struct rte_ether_addr *mc_addr_set,
5207  uint32_t nb_mc_addr);
5208 
5221 int rte_eth_timesync_enable(uint16_t port_id);
5222 
5235 int rte_eth_timesync_disable(uint16_t port_id);
5236 
5255 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
5256  struct timespec *timestamp, uint32_t flags);
5257 
5273 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5274  struct timespec *timestamp);
5275 
5293 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
5294 
5310 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
5311 
5330 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
5331 
5377 __rte_experimental
5378 int
5379 rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
5380 
5396 int
5397 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
5398 
5415 int
5416 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
5417 
5434 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5435  uint16_t *nb_rx_desc,
5436  uint16_t *nb_tx_desc);
5437 
5452 int
5453 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
5454 
5464 void *
5465 rte_eth_dev_get_sec_ctx(uint16_t port_id);
5466 
5482 __rte_experimental
5483 int rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5484  struct rte_eth_hairpin_cap *cap);
5485 
5495  int pf;
5496  __extension__
5497  union {
5498  int vf;
5499  int sf;
5500  };
5501  uint32_t id_base;
5502  uint32_t id_end;
5503  char name[RTE_DEV_NAME_MAX_LEN];
5504 };
5505 
5513  uint16_t controller;
5514  uint16_t pf;
5515  uint32_t nb_ranges_alloc;
5516  uint32_t nb_ranges;
5517  struct rte_eth_representor_range ranges[];
5518 };
5519 
5543 __rte_experimental
5544 int rte_eth_representor_info_get(uint16_t port_id,
5545  struct rte_eth_representor_info *info);
5546 
5548 #define RTE_ETH_RX_METADATA_USER_FLAG RTE_BIT64(0)
5549 
5551 #define RTE_ETH_RX_METADATA_USER_MARK RTE_BIT64(1)
5552 
5554 #define RTE_ETH_RX_METADATA_TUNNEL_ID RTE_BIT64(2)
5555 
5595 int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features);
5596 
5598 #define RTE_ETH_DEV_REASSEMBLY_F_IPV4 (RTE_BIT32(0))
5599 
5600 #define RTE_ETH_DEV_REASSEMBLY_F_IPV6 (RTE_BIT32(1))
5601 
5612  uint32_t timeout_ms;
5614  uint16_t max_frags;
5619  uint16_t flags;
5620 };
5621 
5642 __rte_experimental
5643 int rte_eth_ip_reassembly_capability_get(uint16_t port_id,
5644  struct rte_eth_ip_reassembly_params *capa);
5645 
5667 __rte_experimental
5668 int rte_eth_ip_reassembly_conf_get(uint16_t port_id,
5669  struct rte_eth_ip_reassembly_params *conf);
5670 
5700 __rte_experimental
5701 int rte_eth_ip_reassembly_conf_set(uint16_t port_id,
5702  const struct rte_eth_ip_reassembly_params *conf);
5703 
5711 typedef struct {
5718  uint16_t time_spent;
5720  uint16_t nb_frags;
5722 
5741 __rte_experimental
5742 int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file);
5743 
5767 __rte_experimental
5768 int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5769  uint16_t offset, uint16_t num, FILE *file);
5770 
5794 __rte_experimental
5795 int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5796  uint16_t offset, uint16_t num, FILE *file);
5797 
5798 
5799 /* Congestion management */
5800 
5810 };
5811 
5828  uint64_t objs_supported;
5833  uint8_t rsvd[8];
5834 };
5835 
5846  enum rte_cman_mode mode;
5847  union {
5854  uint16_t rx_queue;
5861  uint8_t rsvd_obj_params[4];
5862  } obj_param;
5863  union {
5876  uint8_t rsvd_mode_params[4];
5877  } mode_param;
5878 };
5879 
5897 __rte_experimental
5898 int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info);
5899 
5917 __rte_experimental
5918 int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config);
5919 
5936 __rte_experimental
5937 int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config);
5938 
5959 __rte_experimental
5960 int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config);
5961 
5962 #include <rte_ethdev_core.h>
5963 
5987 uint16_t rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
5988  struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
5989  void *opaque);
5990 
6078 static inline uint16_t
6079 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
6080  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
6081 {
6082  uint16_t nb_rx;
6083  struct rte_eth_fp_ops *p;
6084  void *qd;
6085 
6086 #ifdef RTE_ETHDEV_DEBUG_RX
6087  if (port_id >= RTE_MAX_ETHPORTS ||
6088  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6089  RTE_ETHDEV_LOG_LINE(ERR,
6090  "Invalid port_id=%u or queue_id=%u",
6091  port_id, queue_id);
6092  return 0;
6093  }
6094 #endif
6095 
6096  /* fetch pointer to queue data */
6097  p = &rte_eth_fp_ops[port_id];
6098  qd = p->rxq.data[queue_id];
6099 
6100 #ifdef RTE_ETHDEV_DEBUG_RX
6101  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6102 
6103  if (qd == NULL) {
6104  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u for port_id=%u",
6105  queue_id, port_id);
6106  return 0;
6107  }
6108 #endif
6109 
6110  nb_rx = p->rx_pkt_burst(qd, rx_pkts, nb_pkts);
6111 
6112 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
6113  {
6114  void *cb;
6115 
6116  /* rte_memory_order_release memory order was used when the
6117  * call back was inserted into the list.
6118  * Since there is a clear dependency between loading
6119  * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6120  * not required.
6121  */
6122  cb = rte_atomic_load_explicit(&p->rxq.clbk[queue_id],
6123  rte_memory_order_relaxed);
6124  if (unlikely(cb != NULL))
6125  nb_rx = rte_eth_call_rx_callbacks(port_id, queue_id,
6126  rx_pkts, nb_rx, nb_pkts, cb);
6127  }
6128 #endif
6129 
6130  rte_ethdev_trace_rx_burst(port_id, queue_id, (void **)rx_pkts, nb_rx);
6131  return nb_rx;
6132 }
6133 
6151 static inline int
6152 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
6153 {
6154  struct rte_eth_fp_ops *p;
6155  void *qd;
6156 
6157 #ifdef RTE_ETHDEV_DEBUG_RX
6158  if (port_id >= RTE_MAX_ETHPORTS ||
6159  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6160  RTE_ETHDEV_LOG_LINE(ERR,
6161  "Invalid port_id=%u or queue_id=%u",
6162  port_id, queue_id);
6163  return -EINVAL;
6164  }
6165 #endif
6166 
6167  /* fetch pointer to queue data */
6168  p = &rte_eth_fp_ops[port_id];
6169  qd = p->rxq.data[queue_id];
6170 
6171 #ifdef RTE_ETHDEV_DEBUG_RX
6172  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6173  if (qd == NULL)
6174  return -EINVAL;
6175 #endif
6176 
6177  if (*p->rx_queue_count == NULL)
6178  return -ENOTSUP;
6179  return (int)(*p->rx_queue_count)(qd);
6180 }
6181 
6185 #define RTE_ETH_RX_DESC_AVAIL 0
6186 #define RTE_ETH_RX_DESC_DONE 1
6187 #define RTE_ETH_RX_DESC_UNAVAIL 2
6223 static inline int
6224 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
6225  uint16_t offset)
6226 {
6227  struct rte_eth_fp_ops *p;
6228  void *qd;
6229 
6230 #ifdef RTE_ETHDEV_DEBUG_RX
6231  if (port_id >= RTE_MAX_ETHPORTS ||
6232  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6233  RTE_ETHDEV_LOG_LINE(ERR,
6234  "Invalid port_id=%u or queue_id=%u",
6235  port_id, queue_id);
6236  return -EINVAL;
6237  }
6238 #endif
6239 
6240  /* fetch pointer to queue data */
6241  p = &rte_eth_fp_ops[port_id];
6242  qd = p->rxq.data[queue_id];
6243 
6244 #ifdef RTE_ETHDEV_DEBUG_RX
6245  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6246  if (qd == NULL)
6247  return -ENODEV;
6248 #endif
6249  if (*p->rx_descriptor_status == NULL)
6250  return -ENOTSUP;
6251  return (*p->rx_descriptor_status)(qd, offset);
6252 }
6253 
6257 #define RTE_ETH_TX_DESC_FULL 0
6258 #define RTE_ETH_TX_DESC_DONE 1
6259 #define RTE_ETH_TX_DESC_UNAVAIL 2
6295 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
6296  uint16_t queue_id, uint16_t offset)
6297 {
6298  struct rte_eth_fp_ops *p;
6299  void *qd;
6300 
6301 #ifdef RTE_ETHDEV_DEBUG_TX
6302  if (port_id >= RTE_MAX_ETHPORTS ||
6303  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6304  RTE_ETHDEV_LOG_LINE(ERR,
6305  "Invalid port_id=%u or queue_id=%u",
6306  port_id, queue_id);
6307  return -EINVAL;
6308  }
6309 #endif
6310 
6311  /* fetch pointer to queue data */
6312  p = &rte_eth_fp_ops[port_id];
6313  qd = p->txq.data[queue_id];
6314 
6315 #ifdef RTE_ETHDEV_DEBUG_TX
6316  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6317  if (qd == NULL)
6318  return -ENODEV;
6319 #endif
6320  if (*p->tx_descriptor_status == NULL)
6321  return -ENOTSUP;
6322  return (*p->tx_descriptor_status)(qd, offset);
6323 }
6324 
6344 uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
6345  struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque);
6346 
6418 static inline uint16_t
6419 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
6420  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6421 {
6422  struct rte_eth_fp_ops *p;
6423  void *qd;
6424 
6425 #ifdef RTE_ETHDEV_DEBUG_TX
6426  if (port_id >= RTE_MAX_ETHPORTS ||
6427  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6428  RTE_ETHDEV_LOG_LINE(ERR,
6429  "Invalid port_id=%u or queue_id=%u",
6430  port_id, queue_id);
6431  return 0;
6432  }
6433 #endif
6434 
6435  /* fetch pointer to queue data */
6436  p = &rte_eth_fp_ops[port_id];
6437  qd = p->txq.data[queue_id];
6438 
6439 #ifdef RTE_ETHDEV_DEBUG_TX
6440  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6441 
6442  if (qd == NULL) {
6443  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6444  queue_id, port_id);
6445  return 0;
6446  }
6447 #endif
6448 
6449 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
6450  {
6451  void *cb;
6452 
6453  /* rte_memory_order_release memory order was used when the
6454  * call back was inserted into the list.
6455  * Since there is a clear dependency between loading
6456  * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6457  * not required.
6458  */
6459  cb = rte_atomic_load_explicit(&p->txq.clbk[queue_id],
6460  rte_memory_order_relaxed);
6461  if (unlikely(cb != NULL))
6462  nb_pkts = rte_eth_call_tx_callbacks(port_id, queue_id,
6463  tx_pkts, nb_pkts, cb);
6464  }
6465 #endif
6466 
6467  nb_pkts = p->tx_pkt_burst(qd, tx_pkts, nb_pkts);
6468 
6469  rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, nb_pkts);
6470  return nb_pkts;
6471 }
6472 
6526 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
6527 
6528 static inline uint16_t
6529 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
6530  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6531 {
6532  struct rte_eth_fp_ops *p;
6533  void *qd;
6534 
6535 #ifdef RTE_ETHDEV_DEBUG_TX
6536  if (port_id >= RTE_MAX_ETHPORTS ||
6537  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6538  RTE_ETHDEV_LOG_LINE(ERR,
6539  "Invalid port_id=%u or queue_id=%u",
6540  port_id, queue_id);
6541  rte_errno = ENODEV;
6542  return 0;
6543  }
6544 #endif
6545 
6546  /* fetch pointer to queue data */
6547  p = &rte_eth_fp_ops[port_id];
6548  qd = p->txq.data[queue_id];
6549 
6550 #ifdef RTE_ETHDEV_DEBUG_TX
6551  if (!rte_eth_dev_is_valid_port(port_id)) {
6552  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx port_id=%u", port_id);
6553  rte_errno = ENODEV;
6554  return 0;
6555  }
6556  if (qd == NULL) {
6557  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6558  queue_id, port_id);
6559  rte_errno = EINVAL;
6560  return 0;
6561  }
6562 #endif
6563 
6564  if (!p->tx_pkt_prepare)
6565  return nb_pkts;
6566 
6567  return p->tx_pkt_prepare(qd, tx_pkts, nb_pkts);
6568 }
6569 
6570 #else
6571 
6572 /*
6573  * Native NOOP operation for compilation targets which doesn't require any
6574  * preparations steps, and functional NOOP may introduce unnecessary performance
6575  * drop.
6576  *
6577  * Generally this is not a good idea to turn it on globally and didn't should
6578  * be used if behavior of tx_preparation can change.
6579  */
6580 
6581 static inline uint16_t
6582 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
6583  __rte_unused uint16_t queue_id,
6584  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6585 {
6586  return nb_pkts;
6587 }
6588 
6589 #endif
6590 
6613 static inline uint16_t
6614 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
6615  struct rte_eth_dev_tx_buffer *buffer)
6616 {
6617  uint16_t sent;
6618  uint16_t to_send = buffer->length;
6619 
6620  if (to_send == 0)
6621  return 0;
6622 
6623  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
6624 
6625  buffer->length = 0;
6626 
6627  /* All packets sent, or to be dealt with by callback below */
6628  if (unlikely(sent != to_send))
6629  buffer->error_callback(&buffer->pkts[sent],
6630  (uint16_t)(to_send - sent),
6631  buffer->error_userdata);
6632 
6633  return sent;
6634 }
6635 
6666 static __rte_always_inline uint16_t
6667 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
6668  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
6669 {
6670  buffer->pkts[buffer->length++] = tx_pkt;
6671  if (buffer->length < buffer->size)
6672  return 0;
6673 
6674  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
6675 }
6676 
6730 __rte_experimental
6731 static inline uint16_t
6732 rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id,
6733  uint16_t tx_port_id, uint16_t tx_queue_id,
6734  struct rte_eth_recycle_rxq_info *recycle_rxq_info)
6735 {
6736  struct rte_eth_fp_ops *p1, *p2;
6737  void *qd1, *qd2;
6738  uint16_t nb_mbufs;
6739 
6740 #ifdef RTE_ETHDEV_DEBUG_TX
6741  if (tx_port_id >= RTE_MAX_ETHPORTS ||
6742  tx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6743  RTE_ETHDEV_LOG_LINE(ERR,
6744  "Invalid tx_port_id=%u or tx_queue_id=%u",
6745  tx_port_id, tx_queue_id);
6746  return 0;
6747  }
6748 #endif
6749 
6750  /* fetch pointer to Tx queue data */
6751  p1 = &rte_eth_fp_ops[tx_port_id];
6752  qd1 = p1->txq.data[tx_queue_id];
6753 
6754 #ifdef RTE_ETHDEV_DEBUG_TX
6755  RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port_id, 0);
6756 
6757  if (qd1 == NULL) {
6758  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6759  tx_queue_id, tx_port_id);
6760  return 0;
6761  }
6762 #endif
6763  if (p1->recycle_tx_mbufs_reuse == NULL)
6764  return 0;
6765 
6766 #ifdef RTE_ETHDEV_DEBUG_RX
6767  if (rx_port_id >= RTE_MAX_ETHPORTS ||
6768  rx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6769  RTE_ETHDEV_LOG_LINE(ERR, "Invalid rx_port_id=%u or rx_queue_id=%u",
6770  rx_port_id, rx_queue_id);
6771  return 0;
6772  }
6773 #endif
6774 
6775  /* fetch pointer to Rx queue data */
6776  p2 = &rte_eth_fp_ops[rx_port_id];
6777  qd2 = p2->rxq.data[rx_queue_id];
6778 
6779 #ifdef RTE_ETHDEV_DEBUG_RX
6780  RTE_ETH_VALID_PORTID_OR_ERR_RET(rx_port_id, 0);
6781 
6782  if (qd2 == NULL) {
6783  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u for port_id=%u",
6784  rx_queue_id, rx_port_id);
6785  return 0;
6786  }
6787 #endif
6788  if (p2->recycle_rx_descriptors_refill == NULL)
6789  return 0;
6790 
6791  /* Copy used *rte_mbuf* buffer pointers from Tx mbuf ring
6792  * into Rx mbuf ring.
6793  */
6794  nb_mbufs = p1->recycle_tx_mbufs_reuse(qd1, recycle_rxq_info);
6795 
6796  /* If no recycling mbufs, return 0. */
6797  if (nb_mbufs == 0)
6798  return 0;
6799 
6800  /* Replenish the Rx descriptors with the recycling
6801  * into Rx mbuf ring.
6802  */
6803  p2->recycle_rx_descriptors_refill(qd2, nb_mbufs);
6804 
6805  return nb_mbufs;
6806 }
6807 
6836 __rte_experimental
6837 int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num);
6838 
6873 __rte_experimental
6874 static inline int
6875 rte_eth_tx_queue_count(uint16_t port_id, uint16_t queue_id)
6876 {
6877  struct rte_eth_fp_ops *fops;
6878  void *qd;
6879  int rc;
6880 
6881 #ifdef RTE_ETHDEV_DEBUG_TX
6882  if (port_id >= RTE_MAX_ETHPORTS || !rte_eth_dev_is_valid_port(port_id)) {
6883  RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id);
6884  rc = -ENODEV;
6885  goto out;
6886  }
6887 
6888  if (queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6889  RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for port_id=%u",
6890  queue_id, port_id);
6891  rc = -EINVAL;
6892  goto out;
6893  }
6894 #endif
6895 
6896  /* Fetch pointer to Tx queue data */
6897  fops = &rte_eth_fp_ops[port_id];
6898  qd = fops->txq.data[queue_id];
6899 
6900 #ifdef RTE_ETHDEV_DEBUG_TX
6901  if (qd == NULL) {
6902  RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for port_id=%u",
6903  queue_id, port_id);
6904  rc = -EINVAL;
6905  goto out;
6906  }
6907 #endif
6908  if (fops->tx_queue_count == NULL) {
6909  rc = -ENOTSUP;
6910  goto out;
6911  }
6912 
6913  rc = fops->tx_queue_count(qd);
6914 
6915 out:
6916  rte_eth_trace_tx_queue_count(port_id, queue_id, rc);
6917  return rc;
6918 }
6919 
6920 #ifdef __cplusplus
6921 }
6922 #endif
6923 
6924 #endif /* _RTE_ETHDEV_H_ */
uint16_t nb_rx_queues
Definition: rte_ethdev.h:1802
int rte_eth_dev_stop(uint16_t port_id)
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_promiscuous_disable(uint16_t port_id)
__extension__ uint32_t multi_pools
Definition: rte_ethdev.h:1703
int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
__rte_experimental int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
__extension__ struct __rte_aligned(8) rte_eth_link
Definition: rte_ethdev.h:336
#define __rte_always_inline
Definition: rte_common.h:355
#define RTE_ETH_DCB_NUM_USER_PRIORITIES
Definition: rte_ethdev.h:838
uint16_t tx_rs_thresh
Definition: rte_ethdev.h:1171
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
uint32_t mtu
Definition: rte_ethdev.h:416
uint16_t nb_desc
Definition: rte_ethdev.h:1861
rte_eth_event_macsec_type
Definition: rte_ethdev.h:3912
uint64_t rx_offload_capa
Definition: rte_ethdev.h:1780
const uint32_t * dev_flags
Definition: rte_ethdev.h:1758
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
__rte_experimental int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6529
struct rte_device * device
Definition: rte_ethdev.h:1752
rte_eth_nb_tcs
Definition: rte_ethdev.h:899
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:285
static __rte_experimental int rte_eth_tx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:6875
uint16_t rte_eth_find_next(uint16_t port_id)
__rte_experimental int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
rte_cman_mode
Definition: rte_cman.h:20
int rte_eth_led_off(uint16_t port_id)
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:6224
uint64_t imissed
Definition: rte_ethdev.h:271
uint32_t low_water
Definition: rte_ethdev.h:1365
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
uint8_t rss_key_len
Definition: rte_ethdev.h:494
uint32_t max_rx_bufsize
Definition: rte_ethdev.h:1767
__rte_experimental int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id, uint8_t affinity)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
static __rte_experimental uint16_t rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id, uint16_t tx_port_id, uint16_t tx_queue_id, struct rte_eth_recycle_rxq_info *recycle_rxq_info)
Definition: rte_ethdev.h:6732
uint8_t hthresh
Definition: rte_ethdev.h:361
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
__rte_experimental int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
uint64_t rx_queue_offload_capa
Definition: rte_ethdev.h:1784
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
__rte_experimental int rte_eth_dev_count_aggr_ports(uint16_t port_id)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
uint16_t reta_size
Definition: rte_ethdev.h:1788
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint32_t lpbk_mode
Definition: rte_ethdev.h:1499
#define rte_errno
Definition: rte_errno.h:29
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
uint32_t link_speeds
Definition: rte_ethdev.h:1490
uint64_t tx_queue_offload_capa
Definition: rte_ethdev.h:1786
rte_eth_tx_mq_mode
Definition: rte_ethdev.h:403
rte_eth_fc_mode
Definition: rte_ethdev.h:1351
uint8_t enable_default_pool
Definition: rte_ethdev.h:951
uint32_t max_hash_mac_addrs
Definition: rte_ethdev.h:1775
uint16_t nb_mtu_seg_max
Definition: rte_ethdev.h:1345
struct rte_mempool * mp
Definition: rte_ethdev.h:1875
int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
#define __rte_unused
Definition: rte_common.h:156
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:283
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
uint64_t opackets
Definition: rte_ethdev.h:264
rte_eth_cman_obj
Definition: rte_ethdev.h:5802
uint8_t hash_key_size
Definition: rte_ethdev.h:1789
int rte_eth_dev_close(uint16_t port_id)
struct rte_mempool * mp
Definition: rte_ethdev.h:1079
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
__rte_experimental int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, uint8_t avail_thresh)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
__rte_experimental const char * rte_eth_dev_rss_algo_name(enum rte_eth_hash_function rss_algo)
int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
struct rte_mempool * mp
Definition: rte_ethdev.h:1840
uint32_t dcb_capability_en
Definition: rte_ethdev.h:1523
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:281
__rte_experimental int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
const char * name
Definition: rte_ethdev.h:1677
uint8_t queue_state
Definition: rte_ethdev.h:1862
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
int rte_eth_dev_set_link_up(uint16_t port_id)
#define RTE_BIT32(nr)
Definition: rte_bitops.h:40
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
__rte_experimental int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
uint16_t vmdq_queue_num
Definition: rte_ethdev.h:1796
int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
uint16_t share_qid
Definition: rte_ethdev.h:1124
uint8_t rx_deferred_start
Definition: rte_ethdev.h:1115
struct rte_mbuf * pkts[]
Definition: rte_ethdev.h:3740
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
Definition: rte_ethdev.h:4107
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:277
uint32_t high_water
Definition: rte_ethdev.h:1364
#define RTE_ETH_MQ_RX_RSS_FLAG
Definition: rte_ethdev.h:368
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
#define RTE_ETH_XSTATS_NAME_SIZE
Definition: rte_ethdev.h:1910
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
__rte_experimental int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, unsigned int num)
int rte_eth_timesync_disable(uint16_t port_id)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint64_t offloads
Definition: rte_ethdev.h:1130
__rte_experimental int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, uint8_t *avail_thresh)
__rte_experimental int rte_eth_representor_info_get(uint16_t port_id, struct rte_eth_representor_info *info)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, struct rte_eth_pfc_queue_conf *pfc_queue_conf)
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
__rte_experimental int rte_eth_fec_get_capability(uint16_t port_id, struct rte_eth_fec_capa *speed_fec_capa, unsigned int num)
uint16_t send_xon
Definition: rte_ethdev.h:1367
struct rte_mempool ** rx_mempools
Definition: rte_ethdev.h:1159
int rte_eth_stats_reset(uint16_t port_id)
int rte_eth_allmulticast_enable(uint16_t port_id)
uint32_t offset_allowed
Definition: rte_ethdev.h:1704
__rte_experimental int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, struct rte_power_monitor_cond *pmc)
#define unlikely(x)
uint64_t ibytes
Definition: rte_ethdev.h:265
uint32_t offset_align_log2
Definition: rte_ethdev.h:1705
uint8_t avail_thresh
Definition: rte_ethdev.h:1852
uint64_t offloads
Definition: rte_ethdev.h:1181
int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
uint16_t max_nb_queues
Definition: rte_ethdev.h:1217
uint64_t oerrors
Definition: rte_ethdev.h:273
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
#define __rte_cache_min_aligned
Definition: rte_common.h:556
__rte_experimental int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num)
uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
uint16_t max_mtu
Definition: rte_ethdev.h:1757
uint64_t offloads
Definition: rte_ethdev.h:424
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
uint16_t tx_free_thresh
Definition: rte_ethdev.h:1172
uint16_t nb_desc
Definition: rte_ethdev.h:1844
uint64_t modes_supported
Definition: rte_ethdev.h:5823
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:6079
int rte_eth_allmulticast_disable(uint16_t port_id)
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
rte_eth_hash_function
Definition: rte_ethdev.h:452
int rte_eth_dev_start(uint16_t port_id)
__rte_experimental int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config)
uint16_t rx_buf_size
Definition: rte_ethdev.h:1845
__rte_experimental int rte_eth_ip_reassembly_capability_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *capa)
uint16_t max_vmdq_pools
Definition: rte_ethdev.h:1777
uint8_t scattered_rx
Definition: rte_ethdev.h:1842
int rte_eth_dev_reset(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
#define RTE_ETH_MQ_RX_DCB_FLAG
Definition: rte_ethdev.h:369
uint64_t offloads
Definition: rte_ethdev.h:1003
uint16_t vmdq_pool_base
Definition: rte_ethdev.h:1797
__rte_experimental int rte_eth_ip_reassembly_conf_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *conf)
uint64_t tx_offload_capa
Definition: rte_ethdev.h:1782
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:279
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
uint16_t min_mtu
Definition: rte_ethdev.h:1756
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_is_removed(uint16_t port_id)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
Definition: rte_ethdev.h:2063
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
uint64_t obytes
Definition: rte_ethdev.h:266
__rte_experimental int rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
uint8_t enable_loop_back
Definition: rte_ethdev.h:984
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
rte_eth_err_handle_mode
Definition: rte_ethdev.h:1730
__rte_experimental int rte_eth_recycle_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_recycle_rxq_info *recycle_rxq_info)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, struct rte_eth_pfc_queue_info *pfc_queue_info)
int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
rte_eth_fec_mode
Definition: rte_ethdev.h:1980
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
uint16_t max_tx_queues
Definition: rte_ethdev.h:1772
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_xstats_reset(uint16_t port_id)
rte_eth_dev_state
Definition: rte_ethdev.h:2069
uint16_t rx_free_thresh
Definition: rte_ethdev.h:1113
uint64_t dev_capa
Definition: rte_ethdev.h:1816
uint64_t ierrors
Definition: rte_ethdev.h:272
#define RTE_ETH_MQ_RX_VMDQ_FLAG
Definition: rte_ethdev.h:370
__rte_experimental int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, size_t len, uint32_t direction)
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
uint64_t flow_type_rss_offloads
Definition: rte_ethdev.h:1792
#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS
Definition: rte_ethdev.h:837
uint16_t rte_eth_dev_count_total(void)
int rte_eth_promiscuous_enable(uint16_t port_id)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
union rte_eth_rxseg * rx_seg
Definition: rte_ethdev.h:1138
int rte_eth_dev_owner_new(uint64_t *owner_id)
rte_vlan_type
Definition: rte_ethdev.h:434
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
__rte_experimental int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
uint16_t nb_seg_max
Definition: rte_ethdev.h:1332
uint64_t ipackets
Definition: rte_ethdev.h:263
uint16_t max_vfs
Definition: rte_ethdev.h:1776
uint16_t pause_time
Definition: rte_ethdev.h:1366
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_owner_delete(const uint64_t owner_id)
uint64_t rx_nombuf
Definition: rte_ethdev.h:274
rte_eth_event_macsec_subtype
Definition: rte_ethdev.h:3878
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
Definition: rte_ethdev.h:6667
__extension__ uint8_t hw_vlan_reject_untagged
Definition: rte_ethdev.h:1008
uint8_t queue_state
Definition: rte_ethdev.h:1843
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
uint32_t use_locked_device_memory
Definition: rte_ethdev.h:1284
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
uint16_t vmdq_queue_base
Definition: rte_ethdev.h:1795
rte_eth_event_ipsec_subtype
Definition: rte_ethdev.h:3954
rte_eth_nb_pools
Definition: rte_ethdev.h:908
uint16_t nb_align
Definition: rte_ethdev.h:1322
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
rte_eth_rx_mq_mode
Definition: rte_ethdev.h:377
__rte_experimental const char * rte_eth_link_speed_to_str(uint32_t link_speed)
const char * driver_name
Definition: rte_ethdev.h:1753
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:6152
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
__rte_experimental int rte_eth_find_rss_algo(const char *name, uint32_t *algo)
int rte_eth_dev_socket_id(uint16_t port_id)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint32_t tx_rate)
struct rte_mbuf ** mbuf_ring
Definition: rte_ethdev.h:1874
uint8_t enable_default_pool
Definition: rte_ethdev.h:982
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
uint16_t nb_tx_queues
Definition: rte_ethdev.h:1803
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
uint32_t max_mac_addrs
Definition: rte_ethdev.h:1773
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
rte_eth_tunnel_type
Definition: rte_ethdev.h:1440
__rte_experimental const char * rte_eth_dev_capability_name(uint64_t capability)
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
uint64_t value
Definition: rte_ethdev.h:1923
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
Definition: rte_ethdev.h:662
__rte_experimental int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
__rte_experimental int rte_eth_ip_reassembly_conf_set(uint16_t port_id, const struct rte_eth_ip_reassembly_params *conf)
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
__rte_experimental int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config)
int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_promiscuous_get(uint16_t port_id)
int rte_eth_led_on(uint16_t port_id)
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
uint32_t max_rx_pktlen
Definition: rte_ethdev.h:1768
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
uint64_t rss_hf
Definition: rte_ethdev.h:499
uint64_t id
Definition: rte_ethdev.h:1922
__rte_experimental int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
unsigned int if_index
Definition: rte_ethdev.h:1754
__rte_experimental int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config)
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
uint8_t mac_ctrl_frame_fwd
Definition: rte_ethdev.h:1369
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
Definition: rte_ethdev.h:2039
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
uint8_t * rss_key
Definition: rte_ethdev.h:493
uint8_t tx_deferred_start
Definition: rte_ethdev.h:1175
uint8_t wthresh
Definition: rte_ethdev.h:362
uint16_t max_rx_queues
Definition: rte_ethdev.h:1771
uint16_t max_rx_mempools
Definition: rte_ethdev.h:1810
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
rte_eth_representor_type
Definition: rte_ethdev.h:1717
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:418
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
int rte_eth_allmulticast_get(uint16_t port_id)
uint8_t rx_drop_en
Definition: rte_ethdev.h:1114
int rte_eth_dev_is_valid_port(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint16_t rx_nseg
Definition: rte_ethdev.h:1116
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:1770
int rte_eth_timesync_enable(uint16_t port_id)
uint8_t pthresh
Definition: rte_ethdev.h:360
uint16_t share_group
Definition: rte_ethdev.h:1123
uint32_t speed_capa
Definition: rte_ethdev.h:1800
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6419
__rte_experimental int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info)
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_set_link_down(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
__rte_experimental int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
uint64_t objs_supported
Definition: rte_ethdev.h:5828
uint32_t min_rx_bufsize
Definition: rte_ethdev.h:1760
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
Definition: rte_ethdev.h:6614
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
rte_eth_event_type
Definition: rte_ethdev.h:4019