DPDK  21.02.0
rte_ethdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
142 #ifdef __cplusplus
143 extern "C" {
144 #endif
145 
146 #include <stdint.h>
147 
148 /* Use this macro to check if LRO API is supported */
149 #define RTE_ETHDEV_HAS_LRO_SUPPORT
150 
151 #include <rte_compat.h>
152 #include <rte_log.h>
153 #include <rte_interrupts.h>
154 #include <rte_dev.h>
155 #include <rte_devargs.h>
156 #include <rte_errno.h>
157 #include <rte_common.h>
158 #include <rte_config.h>
159 #include <rte_ether.h>
160 #include <rte_power_intrinsics.h>
161 
162 #include "rte_ethdev_trace_fp.h"
163 #include "rte_dev_info.h"
164 
165 extern int rte_eth_dev_logtype;
166 
167 #define RTE_ETHDEV_LOG(level, ...) \
168  rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
169 
170 struct rte_mbuf;
171 
188 int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
189 
204 uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter);
205 
218 void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
219 
233 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
234  for (rte_eth_iterator_init(iter, devargs), \
235  id = rte_eth_iterator_next(iter); \
236  id != RTE_MAX_ETHPORTS; \
237  id = rte_eth_iterator_next(iter))
238 
249  uint64_t ipackets;
250  uint64_t opackets;
251  uint64_t ibytes;
252  uint64_t obytes;
253  uint64_t imissed;
257  uint64_t ierrors;
258  uint64_t oerrors;
259  uint64_t rx_nombuf;
260  /* Queue stats are limited to max 256 queues */
261  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
263  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
265  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
267  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
269  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
271 };
272 
276 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
277 #define ETH_LINK_SPEED_FIXED (1 << 0)
278 #define ETH_LINK_SPEED_10M_HD (1 << 1)
279 #define ETH_LINK_SPEED_10M (1 << 2)
280 #define ETH_LINK_SPEED_100M_HD (1 << 3)
281 #define ETH_LINK_SPEED_100M (1 << 4)
282 #define ETH_LINK_SPEED_1G (1 << 5)
283 #define ETH_LINK_SPEED_2_5G (1 << 6)
284 #define ETH_LINK_SPEED_5G (1 << 7)
285 #define ETH_LINK_SPEED_10G (1 << 8)
286 #define ETH_LINK_SPEED_20G (1 << 9)
287 #define ETH_LINK_SPEED_25G (1 << 10)
288 #define ETH_LINK_SPEED_40G (1 << 11)
289 #define ETH_LINK_SPEED_50G (1 << 12)
290 #define ETH_LINK_SPEED_56G (1 << 13)
291 #define ETH_LINK_SPEED_100G (1 << 14)
292 #define ETH_LINK_SPEED_200G (1 << 15)
297 #define ETH_SPEED_NUM_NONE 0
298 #define ETH_SPEED_NUM_10M 10
299 #define ETH_SPEED_NUM_100M 100
300 #define ETH_SPEED_NUM_1G 1000
301 #define ETH_SPEED_NUM_2_5G 2500
302 #define ETH_SPEED_NUM_5G 5000
303 #define ETH_SPEED_NUM_10G 10000
304 #define ETH_SPEED_NUM_20G 20000
305 #define ETH_SPEED_NUM_25G 25000
306 #define ETH_SPEED_NUM_40G 40000
307 #define ETH_SPEED_NUM_50G 50000
308 #define ETH_SPEED_NUM_56G 56000
309 #define ETH_SPEED_NUM_100G 100000
310 #define ETH_SPEED_NUM_200G 200000
311 #define ETH_SPEED_NUM_UNKNOWN UINT32_MAX
316 __extension__
317 struct rte_eth_link {
318  uint32_t link_speed;
319  uint16_t link_duplex : 1;
320  uint16_t link_autoneg : 1;
321  uint16_t link_status : 1;
322 } __rte_aligned(8);
324 /* Utility constants */
325 #define ETH_LINK_HALF_DUPLEX 0
326 #define ETH_LINK_FULL_DUPLEX 1
327 #define ETH_LINK_DOWN 0
328 #define ETH_LINK_UP 1
329 #define ETH_LINK_FIXED 0
330 #define ETH_LINK_AUTONEG 1
331 #define RTE_ETH_LINK_MAX_STR_LEN 40
337 struct rte_eth_thresh {
338  uint8_t pthresh;
339  uint8_t hthresh;
340  uint8_t wthresh;
341 };
342 
346 #define ETH_MQ_RX_RSS_FLAG 0x1
347 #define ETH_MQ_RX_DCB_FLAG 0x2
348 #define ETH_MQ_RX_VMDQ_FLAG 0x4
349 
357 
361  ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
363  ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
364 
366  ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
368  ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
370  ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
373  ETH_MQ_RX_VMDQ_FLAG,
374 };
375 
379 #define ETH_RSS ETH_MQ_RX_RSS
380 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
381 #define ETH_DCB_RX ETH_MQ_RX_DCB
382 
392 };
393 
397 #define ETH_DCB_NONE ETH_MQ_TX_NONE
398 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
399 #define ETH_DCB_TX ETH_MQ_TX_DCB
400 
407  uint32_t max_rx_pkt_len;
410  uint16_t split_hdr_size;
416  uint64_t offloads;
417 
418  uint64_t reserved_64s[2];
419  void *reserved_ptrs[2];
420 };
421 
427  ETH_VLAN_TYPE_UNKNOWN = 0,
430  ETH_VLAN_TYPE_MAX,
431 };
432 
438  uint64_t ids[64];
439 };
440 
459  uint8_t *rss_key;
460  uint8_t rss_key_len;
461  uint64_t rss_hf;
462 };
463 
464 /*
465  * A packet can be identified by hardware as different flow types. Different
466  * NIC hardware may support different flow types.
467  * Basically, the NIC hardware identifies the flow type as deep protocol as
468  * possible, and exclusively. For example, if a packet is identified as
469  * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
470  * though it is an actual IPV4 packet.
471  */
472 #define RTE_ETH_FLOW_UNKNOWN 0
473 #define RTE_ETH_FLOW_RAW 1
474 #define RTE_ETH_FLOW_IPV4 2
475 #define RTE_ETH_FLOW_FRAG_IPV4 3
476 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
477 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
478 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
479 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
480 #define RTE_ETH_FLOW_IPV6 8
481 #define RTE_ETH_FLOW_FRAG_IPV6 9
482 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
483 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
484 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
485 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
486 #define RTE_ETH_FLOW_L2_PAYLOAD 14
487 #define RTE_ETH_FLOW_IPV6_EX 15
488 #define RTE_ETH_FLOW_IPV6_TCP_EX 16
489 #define RTE_ETH_FLOW_IPV6_UDP_EX 17
490 #define RTE_ETH_FLOW_PORT 18
491 
492 #define RTE_ETH_FLOW_VXLAN 19
493 #define RTE_ETH_FLOW_GENEVE 20
494 #define RTE_ETH_FLOW_NVGRE 21
495 #define RTE_ETH_FLOW_VXLAN_GPE 22
496 #define RTE_ETH_FLOW_GTPU 23
497 #define RTE_ETH_FLOW_MAX 24
498 
499 /*
500  * Below macros are defined for RSS offload types, they can be used to
501  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
502  */
503 #define ETH_RSS_IPV4 (1ULL << 2)
504 #define ETH_RSS_FRAG_IPV4 (1ULL << 3)
505 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << 4)
506 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << 5)
507 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << 6)
508 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << 7)
509 #define ETH_RSS_IPV6 (1ULL << 8)
510 #define ETH_RSS_FRAG_IPV6 (1ULL << 9)
511 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << 10)
512 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << 11)
513 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << 12)
514 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << 13)
515 #define ETH_RSS_L2_PAYLOAD (1ULL << 14)
516 #define ETH_RSS_IPV6_EX (1ULL << 15)
517 #define ETH_RSS_IPV6_TCP_EX (1ULL << 16)
518 #define ETH_RSS_IPV6_UDP_EX (1ULL << 17)
519 #define ETH_RSS_PORT (1ULL << 18)
520 #define ETH_RSS_VXLAN (1ULL << 19)
521 #define ETH_RSS_GENEVE (1ULL << 20)
522 #define ETH_RSS_NVGRE (1ULL << 21)
523 #define ETH_RSS_GTPU (1ULL << 23)
524 #define ETH_RSS_ETH (1ULL << 24)
525 #define ETH_RSS_S_VLAN (1ULL << 25)
526 #define ETH_RSS_C_VLAN (1ULL << 26)
527 #define ETH_RSS_ESP (1ULL << 27)
528 #define ETH_RSS_AH (1ULL << 28)
529 #define ETH_RSS_L2TPV3 (1ULL << 29)
530 #define ETH_RSS_PFCP (1ULL << 30)
531 #define ETH_RSS_PPPOE (1ULL << 31)
532 #define ETH_RSS_ECPRI (1ULL << 32)
533 #define ETH_RSS_MPLS (1ULL << 33)
534 
535 /*
536  * We use the following macros to combine with above ETH_RSS_* for
537  * more specific input set selection. These bits are defined starting
538  * from the high end of the 64 bits.
539  * Note: If we use above ETH_RSS_* without SRC/DST_ONLY, it represents
540  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
541  * the same level are used simultaneously, it is the same case as none of
542  * them are added.
543  */
544 #define ETH_RSS_L3_SRC_ONLY (1ULL << 63)
545 #define ETH_RSS_L3_DST_ONLY (1ULL << 62)
546 #define ETH_RSS_L4_SRC_ONLY (1ULL << 61)
547 #define ETH_RSS_L4_DST_ONLY (1ULL << 60)
548 #define ETH_RSS_L2_SRC_ONLY (1ULL << 59)
549 #define ETH_RSS_L2_DST_ONLY (1ULL << 58)
550 
551 /*
552  * Only select IPV6 address prefix as RSS input set according to
553  * https://tools.ietf.org/html/rfc6052
554  * Must be combined with ETH_RSS_IPV6, ETH_RSS_NONFRAG_IPV6_UDP,
555  * ETH_RSS_NONFRAG_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_SCTP.
556  */
557 #define RTE_ETH_RSS_L3_PRE32 (1ULL << 57)
558 #define RTE_ETH_RSS_L3_PRE40 (1ULL << 56)
559 #define RTE_ETH_RSS_L3_PRE48 (1ULL << 55)
560 #define RTE_ETH_RSS_L3_PRE56 (1ULL << 54)
561 #define RTE_ETH_RSS_L3_PRE64 (1ULL << 53)
562 #define RTE_ETH_RSS_L3_PRE96 (1ULL << 52)
563 
564 /*
565  * Use the following macros to combine with the above layers
566  * to choose inner and outer layers or both for RSS computation.
567  * Bits 50 and 51 are reserved for this.
568  */
569 
577 #define ETH_RSS_LEVEL_PMD_DEFAULT (0ULL << 50)
578 
583 #define ETH_RSS_LEVEL_OUTERMOST (1ULL << 50)
584 
589 #define ETH_RSS_LEVEL_INNERMOST (2ULL << 50)
590 #define ETH_RSS_LEVEL_MASK (3ULL << 50)
591 
592 #define ETH_RSS_LEVEL(rss_hf) ((rss_hf & ETH_RSS_LEVEL_MASK) >> 50)
593 
604 static inline uint64_t
605 rte_eth_rss_hf_refine(uint64_t rss_hf)
606 {
607  if ((rss_hf & ETH_RSS_L3_SRC_ONLY) && (rss_hf & ETH_RSS_L3_DST_ONLY))
608  rss_hf &= ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
609 
610  if ((rss_hf & ETH_RSS_L4_SRC_ONLY) && (rss_hf & ETH_RSS_L4_DST_ONLY))
611  rss_hf &= ~(ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
612 
613  return rss_hf;
614 }
615 
616 #define ETH_RSS_IPV6_PRE32 ( \
617  ETH_RSS_IPV6 | \
618  RTE_ETH_RSS_L3_PRE32)
619 
620 #define ETH_RSS_IPV6_PRE40 ( \
621  ETH_RSS_IPV6 | \
622  RTE_ETH_RSS_L3_PRE40)
623 
624 #define ETH_RSS_IPV6_PRE48 ( \
625  ETH_RSS_IPV6 | \
626  RTE_ETH_RSS_L3_PRE48)
627 
628 #define ETH_RSS_IPV6_PRE56 ( \
629  ETH_RSS_IPV6 | \
630  RTE_ETH_RSS_L3_PRE56)
631 
632 #define ETH_RSS_IPV6_PRE64 ( \
633  ETH_RSS_IPV6 | \
634  RTE_ETH_RSS_L3_PRE64)
635 
636 #define ETH_RSS_IPV6_PRE96 ( \
637  ETH_RSS_IPV6 | \
638  RTE_ETH_RSS_L3_PRE96)
639 
640 #define ETH_RSS_IPV6_PRE32_UDP ( \
641  ETH_RSS_NONFRAG_IPV6_UDP | \
642  RTE_ETH_RSS_L3_PRE32)
643 
644 #define ETH_RSS_IPV6_PRE40_UDP ( \
645  ETH_RSS_NONFRAG_IPV6_UDP | \
646  RTE_ETH_RSS_L3_PRE40)
647 
648 #define ETH_RSS_IPV6_PRE48_UDP ( \
649  ETH_RSS_NONFRAG_IPV6_UDP | \
650  RTE_ETH_RSS_L3_PRE48)
651 
652 #define ETH_RSS_IPV6_PRE56_UDP ( \
653  ETH_RSS_NONFRAG_IPV6_UDP | \
654  RTE_ETH_RSS_L3_PRE56)
655 
656 #define ETH_RSS_IPV6_PRE64_UDP ( \
657  ETH_RSS_NONFRAG_IPV6_UDP | \
658  RTE_ETH_RSS_L3_PRE64)
659 
660 #define ETH_RSS_IPV6_PRE96_UDP ( \
661  ETH_RSS_NONFRAG_IPV6_UDP | \
662  RTE_ETH_RSS_L3_PRE96)
663 
664 #define ETH_RSS_IPV6_PRE32_TCP ( \
665  ETH_RSS_NONFRAG_IPV6_TCP | \
666  RTE_ETH_RSS_L3_PRE32)
667 
668 #define ETH_RSS_IPV6_PRE40_TCP ( \
669  ETH_RSS_NONFRAG_IPV6_TCP | \
670  RTE_ETH_RSS_L3_PRE40)
671 
672 #define ETH_RSS_IPV6_PRE48_TCP ( \
673  ETH_RSS_NONFRAG_IPV6_TCP | \
674  RTE_ETH_RSS_L3_PRE48)
675 
676 #define ETH_RSS_IPV6_PRE56_TCP ( \
677  ETH_RSS_NONFRAG_IPV6_TCP | \
678  RTE_ETH_RSS_L3_PRE56)
679 
680 #define ETH_RSS_IPV6_PRE64_TCP ( \
681  ETH_RSS_NONFRAG_IPV6_TCP | \
682  RTE_ETH_RSS_L3_PRE64)
683 
684 #define ETH_RSS_IPV6_PRE96_TCP ( \
685  ETH_RSS_NONFRAG_IPV6_TCP | \
686  RTE_ETH_RSS_L3_PRE96)
687 
688 #define ETH_RSS_IPV6_PRE32_SCTP ( \
689  ETH_RSS_NONFRAG_IPV6_SCTP | \
690  RTE_ETH_RSS_L3_PRE32)
691 
692 #define ETH_RSS_IPV6_PRE40_SCTP ( \
693  ETH_RSS_NONFRAG_IPV6_SCTP | \
694  RTE_ETH_RSS_L3_PRE40)
695 
696 #define ETH_RSS_IPV6_PRE48_SCTP ( \
697  ETH_RSS_NONFRAG_IPV6_SCTP | \
698  RTE_ETH_RSS_L3_PRE48)
699 
700 #define ETH_RSS_IPV6_PRE56_SCTP ( \
701  ETH_RSS_NONFRAG_IPV6_SCTP | \
702  RTE_ETH_RSS_L3_PRE56)
703 
704 #define ETH_RSS_IPV6_PRE64_SCTP ( \
705  ETH_RSS_NONFRAG_IPV6_SCTP | \
706  RTE_ETH_RSS_L3_PRE64)
707 
708 #define ETH_RSS_IPV6_PRE96_SCTP ( \
709  ETH_RSS_NONFRAG_IPV6_SCTP | \
710  RTE_ETH_RSS_L3_PRE96)
711 
712 #define ETH_RSS_IP ( \
713  ETH_RSS_IPV4 | \
714  ETH_RSS_FRAG_IPV4 | \
715  ETH_RSS_NONFRAG_IPV4_OTHER | \
716  ETH_RSS_IPV6 | \
717  ETH_RSS_FRAG_IPV6 | \
718  ETH_RSS_NONFRAG_IPV6_OTHER | \
719  ETH_RSS_IPV6_EX)
720 
721 #define ETH_RSS_UDP ( \
722  ETH_RSS_NONFRAG_IPV4_UDP | \
723  ETH_RSS_NONFRAG_IPV6_UDP | \
724  ETH_RSS_IPV6_UDP_EX)
725 
726 #define ETH_RSS_TCP ( \
727  ETH_RSS_NONFRAG_IPV4_TCP | \
728  ETH_RSS_NONFRAG_IPV6_TCP | \
729  ETH_RSS_IPV6_TCP_EX)
730 
731 #define ETH_RSS_SCTP ( \
732  ETH_RSS_NONFRAG_IPV4_SCTP | \
733  ETH_RSS_NONFRAG_IPV6_SCTP)
734 
735 #define ETH_RSS_TUNNEL ( \
736  ETH_RSS_VXLAN | \
737  ETH_RSS_GENEVE | \
738  ETH_RSS_NVGRE)
739 
740 #define ETH_RSS_VLAN ( \
741  ETH_RSS_S_VLAN | \
742  ETH_RSS_C_VLAN)
743 
745 #define ETH_RSS_PROTO_MASK ( \
746  ETH_RSS_IPV4 | \
747  ETH_RSS_FRAG_IPV4 | \
748  ETH_RSS_NONFRAG_IPV4_TCP | \
749  ETH_RSS_NONFRAG_IPV4_UDP | \
750  ETH_RSS_NONFRAG_IPV4_SCTP | \
751  ETH_RSS_NONFRAG_IPV4_OTHER | \
752  ETH_RSS_IPV6 | \
753  ETH_RSS_FRAG_IPV6 | \
754  ETH_RSS_NONFRAG_IPV6_TCP | \
755  ETH_RSS_NONFRAG_IPV6_UDP | \
756  ETH_RSS_NONFRAG_IPV6_SCTP | \
757  ETH_RSS_NONFRAG_IPV6_OTHER | \
758  ETH_RSS_L2_PAYLOAD | \
759  ETH_RSS_IPV6_EX | \
760  ETH_RSS_IPV6_TCP_EX | \
761  ETH_RSS_IPV6_UDP_EX | \
762  ETH_RSS_PORT | \
763  ETH_RSS_VXLAN | \
764  ETH_RSS_GENEVE | \
765  ETH_RSS_NVGRE | \
766  ETH_RSS_MPLS)
767 
768 /*
769  * Definitions used for redirection table entry size.
770  * Some RSS RETA sizes may not be supported by some drivers, check the
771  * documentation or the description of relevant functions for more details.
772  */
773 #define ETH_RSS_RETA_SIZE_64 64
774 #define ETH_RSS_RETA_SIZE_128 128
775 #define ETH_RSS_RETA_SIZE_256 256
776 #define ETH_RSS_RETA_SIZE_512 512
777 #define RTE_RETA_GROUP_SIZE 64
778 
779 /* Definitions used for VMDQ and DCB functionality */
780 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
781 #define ETH_DCB_NUM_USER_PRIORITIES 8
782 #define ETH_VMDQ_DCB_NUM_QUEUES 128
783 #define ETH_DCB_NUM_QUEUES 128
785 /* DCB capability defines */
786 #define ETH_DCB_PG_SUPPORT 0x00000001
787 #define ETH_DCB_PFC_SUPPORT 0x00000002
789 /* Definitions used for VLAN Offload functionality */
790 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
791 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
792 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
793 #define ETH_QINQ_STRIP_OFFLOAD 0x0008
795 /* Definitions used for mask VLAN setting */
796 #define ETH_VLAN_STRIP_MASK 0x0001
797 #define ETH_VLAN_FILTER_MASK 0x0002
798 #define ETH_VLAN_EXTEND_MASK 0x0004
799 #define ETH_QINQ_STRIP_MASK 0x0008
800 #define ETH_VLAN_ID_MAX 0x0FFF
802 /* Definitions used for receive MAC address */
803 #define ETH_NUM_RECEIVE_MAC_ADDR 128
805 /* Definitions used for unicast hash */
806 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
808 /* Definitions used for VMDQ pool rx mode setting */
809 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
810 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
811 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
812 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
813 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
816 #define ETH_MIRROR_MAX_VLANS 64
817 
818 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
819 #define ETH_MIRROR_UPLINK_PORT 0x02
820 #define ETH_MIRROR_DOWNLINK_PORT 0x04
821 #define ETH_MIRROR_VLAN 0x08
822 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
827 struct rte_eth_vlan_mirror {
828  uint64_t vlan_mask;
830  uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
831 };
832 
837  uint8_t rule_type;
838  uint8_t dst_pool;
839  uint64_t pool_mask;
842 };
843 
851  uint64_t mask;
853  uint16_t reta[RTE_RETA_GROUP_SIZE];
855 };
856 
862  ETH_4_TCS = 4,
864 };
865 
875 };
876 
877 /* This structure may be extended in future. */
878 struct rte_eth_dcb_rx_conf {
879  enum rte_eth_nb_tcs nb_tcs;
881  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
882 };
883 
884 struct rte_eth_vmdq_dcb_tx_conf {
885  enum rte_eth_nb_pools nb_queue_pools;
887  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
888 };
889 
890 struct rte_eth_dcb_tx_conf {
891  enum rte_eth_nb_tcs nb_tcs;
893  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
894 };
895 
896 struct rte_eth_vmdq_tx_conf {
897  enum rte_eth_nb_pools nb_queue_pools;
898 };
899 
914  uint8_t default_pool;
915  uint8_t nb_pool_maps;
916  struct {
917  uint16_t vlan_id;
918  uint64_t pools;
922 };
923 
945  uint8_t default_pool;
947  uint8_t nb_pool_maps;
948  uint32_t rx_mode;
949  struct {
950  uint16_t vlan_id;
951  uint64_t pools;
953 };
954 
965  uint64_t offloads;
966 
967  uint16_t pvid;
968  __extension__
969  uint8_t hw_vlan_reject_tagged : 1,
976  uint64_t reserved_64s[2];
977  void *reserved_ptrs[2];
978 };
979 
1017  struct rte_mempool *mp;
1018  uint16_t length;
1019  uint16_t offset;
1020  uint32_t reserved;
1021 };
1022 
1030  /* The settings for buffer split offload. */
1031  struct rte_eth_rxseg_split split;
1032  /* The other features settings should be added here. */
1033 };
1034 
1040  uint16_t rx_free_thresh;
1041  uint8_t rx_drop_en;
1043  uint16_t rx_nseg;
1049  uint64_t offloads;
1058 
1059  uint64_t reserved_64s[2];
1060  void *reserved_ptrs[2];
1061 };
1062 
1068  uint16_t tx_rs_thresh;
1069  uint16_t tx_free_thresh;
1078  uint64_t offloads;
1079 
1080  uint64_t reserved_64s[2];
1081  void *reserved_ptrs[2];
1082 };
1083 
1092  uint16_t max_nb_queues;
1094  uint16_t max_rx_2_tx;
1096  uint16_t max_tx_2_rx;
1097  uint16_t max_nb_desc;
1098 };
1099 
1100 #define RTE_ETH_MAX_HAIRPIN_PEERS 32
1101 
1109  uint16_t port;
1110  uint16_t queue;
1111 };
1112 
1120  uint32_t peer_count:16;
1131  uint32_t tx_explicit:1;
1132 
1144  uint32_t manual_bind:1;
1145  uint32_t reserved:14;
1146  struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
1147 };
1148 
1153  uint16_t nb_max;
1154  uint16_t nb_min;
1155  uint16_t nb_align;
1165  uint16_t nb_seg_max;
1166 
1178  uint16_t nb_mtu_seg_max;
1179 };
1180 
1189 };
1190 
1197  uint32_t high_water;
1198  uint32_t low_water;
1199  uint16_t pause_time;
1200  uint16_t send_xon;
1203  uint8_t autoneg;
1204 };
1205 
1213  uint8_t priority;
1214 };
1215 
1221  RTE_TUNNEL_TYPE_NONE = 0,
1222  RTE_TUNNEL_TYPE_VXLAN,
1223  RTE_TUNNEL_TYPE_GENEVE,
1224  RTE_TUNNEL_TYPE_TEREDO,
1225  RTE_TUNNEL_TYPE_NVGRE,
1226  RTE_TUNNEL_TYPE_IP_IN_GRE,
1227  RTE_L2_TUNNEL_TYPE_E_TAG,
1228  RTE_TUNNEL_TYPE_VXLAN_GPE,
1229  RTE_TUNNEL_TYPE_ECPRI,
1230  RTE_TUNNEL_TYPE_MAX,
1231 };
1232 
1233 /* Deprecated API file for rte_eth_dev_filter_* functions */
1234 #include "rte_eth_ctrl.h"
1235 
1244 };
1245 
1253 };
1254 
1266  uint8_t drop_queue;
1267  struct rte_eth_fdir_masks mask;
1270 };
1271 
1282  uint16_t udp_port;
1283  uint8_t prot_type;
1284 };
1285 
1291  uint32_t lsc:1;
1293  uint32_t rxq:1;
1295  uint32_t rmv:1;
1296 };
1297 
1304  uint32_t link_speeds;
1313  uint32_t lpbk_mode;
1318  struct {
1322  struct rte_eth_dcb_rx_conf dcb_rx_conf;
1326  } rx_adv_conf;
1327  union {
1328  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1330  struct rte_eth_dcb_tx_conf dcb_tx_conf;
1332  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1334  } tx_adv_conf;
1340 };
1341 
1345 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
1346 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
1347 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
1348 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
1349 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
1350 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
1351 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
1352 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
1353 #define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100
1354 #define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200
1355 #define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
1356 #define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
1357 #define DEV_RX_OFFLOAD_SCATTER 0x00002000
1358 
1363 #define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000
1364 #define DEV_RX_OFFLOAD_SECURITY 0x00008000
1365 #define DEV_RX_OFFLOAD_KEEP_CRC 0x00010000
1366 #define DEV_RX_OFFLOAD_SCTP_CKSUM 0x00020000
1367 #define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM 0x00040000
1368 #define DEV_RX_OFFLOAD_RSS_HASH 0x00080000
1369 #define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT 0x00100000
1370 
1371 #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
1372  DEV_RX_OFFLOAD_UDP_CKSUM | \
1373  DEV_RX_OFFLOAD_TCP_CKSUM)
1374 #define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
1375  DEV_RX_OFFLOAD_VLAN_FILTER | \
1376  DEV_RX_OFFLOAD_VLAN_EXTEND | \
1377  DEV_RX_OFFLOAD_QINQ_STRIP)
1378 
1379 /*
1380  * If new Rx offload capabilities are defined, they also must be
1381  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1382  */
1383 
1387 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
1388 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
1389 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
1390 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
1391 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
1392 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
1393 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
1394 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
1395 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
1396 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200
1397 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400
1398 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800
1399 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000
1400 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
1401 #define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000
1402 
1405 #define DEV_TX_OFFLOAD_MULTI_SEGS 0x00008000
1406 
1407 #define DEV_TX_OFFLOAD_MBUF_FAST_FREE 0x00010000
1408 
1412 #define DEV_TX_OFFLOAD_SECURITY 0x00020000
1413 
1418 #define DEV_TX_OFFLOAD_UDP_TNL_TSO 0x00040000
1419 
1424 #define DEV_TX_OFFLOAD_IP_TNL_TSO 0x00080000
1425 
1426 #define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM 0x00100000
1427 
1432 #define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000
1433 /*
1434  * If new Tx offload capabilities are defined, they also must be
1435  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1436  */
1437 
1442 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001
1443 
1444 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002
1445 
1447 /*
1448  * Fallback default preferred Rx/Tx port parameters.
1449  * These are used if an application requests default parameters
1450  * but the PMD does not provide preferred values.
1451  */
1452 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1453 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1454 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1455 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1456 
1463  uint16_t burst_size;
1464  uint16_t ring_size;
1465  uint16_t nb_queues;
1466 };
1467 
1472 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX)
1473 
1478  const char *name;
1479  uint16_t domain_id;
1480  uint16_t port_id;
1488 };
1489 
1497  __extension__
1498  uint32_t multi_pools:1;
1499  uint32_t offset_allowed:1;
1500  uint32_t offset_align_log2:4;
1501  uint16_t max_nseg;
1502  uint16_t reserved;
1503 };
1504 
1515  struct rte_device *device;
1516  const char *driver_name;
1517  unsigned int if_index;
1519  uint16_t min_mtu;
1520  uint16_t max_mtu;
1521  const uint32_t *dev_flags;
1522  uint32_t min_rx_bufsize;
1523  uint32_t max_rx_pktlen;
1526  uint16_t max_rx_queues;
1527  uint16_t max_tx_queues;
1528  uint32_t max_mac_addrs;
1529  uint32_t max_hash_mac_addrs;
1531  uint16_t max_vfs;
1532  uint16_t max_vmdq_pools;
1542  uint16_t reta_size;
1544  uint8_t hash_key_size;
1549  uint16_t vmdq_queue_base;
1550  uint16_t vmdq_queue_num;
1551  uint16_t vmdq_pool_base;
1554  uint32_t speed_capa;
1556  uint16_t nb_rx_queues;
1557  uint16_t nb_tx_queues;
1563  uint64_t dev_capa;
1569 
1570  uint64_t reserved_64s[2];
1571  void *reserved_ptrs[2];
1572 };
1573 
1579  struct rte_mempool *mp;
1581  uint8_t scattered_rx;
1582  uint16_t nb_desc;
1583  uint16_t rx_buf_size;
1585 
1592  uint16_t nb_desc;
1594 
1595 /* Generic Burst mode flag definition, values can be ORed. */
1596 
1602 #define RTE_ETH_BURST_FLAG_PER_QUEUE (1ULL << 0)
1603 
1609  uint64_t flags;
1611 #define RTE_ETH_BURST_MODE_INFO_SIZE 1024
1612  char info[RTE_ETH_BURST_MODE_INFO_SIZE];
1613 };
1614 
1616 #define RTE_ETH_XSTATS_NAME_SIZE 64
1617 
1628  uint64_t id;
1629  uint64_t value;
1630 };
1631 
1648 };
1649 
1650 #define ETH_DCB_NUM_TCS 8
1651 #define ETH_MAX_VMDQ_POOL 64
1652 
1659  struct {
1660  uint16_t base;
1661  uint16_t nb_queue;
1662  } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1664  struct {
1665  uint16_t base;
1666  uint16_t nb_queue;
1667  } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1668 };
1669 
1675  uint8_t nb_tcs;
1676  uint8_t prio_tc[ETH_DCB_NUM_USER_PRIORITIES];
1677  uint8_t tc_bws[ETH_DCB_NUM_TCS];
1680 };
1681 
1691 };
1692 
1693 /* Translate from FEC mode to FEC capa */
1694 #define RTE_ETH_FEC_MODE_TO_CAPA(x) (1U << (x))
1695 
1696 /* This macro indicates FEC capa mask */
1697 #define RTE_ETH_FEC_MODE_CAPA_MASK(x) (1U << (RTE_ETH_FEC_ ## x))
1698 
1699 /* A structure used to get capabilities per link speed */
1700 struct rte_eth_fec_capa {
1701  uint32_t speed;
1702  uint32_t capa;
1703 };
1704 
1705 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
1706 
1707 /* Macros to check for valid port */
1708 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1709  if (!rte_eth_dev_is_valid_port(port_id)) { \
1710  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1711  return retval; \
1712  } \
1713 } while (0)
1714 
1715 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1716  if (!rte_eth_dev_is_valid_port(port_id)) { \
1717  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1718  return; \
1719  } \
1720 } while (0)
1721 
1727 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1728 
1729 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1730 
1731 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1732 
1733 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1734 
1757 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
1758  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1759  void *user_param);
1760 
1781 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
1782  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1783 
1794 };
1795 
1796 struct rte_eth_dev_sriov {
1797  uint8_t active;
1798  uint8_t nb_q_per_pool;
1799  uint16_t def_vmdq_idx;
1800  uint16_t def_pool_q_idx;
1801 };
1802 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1803 
1804 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
1805 
1806 #define RTE_ETH_DEV_NO_OWNER 0
1807 
1808 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
1809 
1810 struct rte_eth_dev_owner {
1811  uint64_t id;
1812  char name[RTE_ETH_MAX_OWNER_NAME_LEN];
1813 };
1814 
1816 #define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE 0x0001
1817 
1818 #define RTE_ETH_DEV_INTR_LSC 0x0002
1819 
1820 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1821 
1822 #define RTE_ETH_DEV_INTR_RMV 0x0008
1823 
1824 #define RTE_ETH_DEV_REPRESENTOR 0x0010
1825 
1826 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR 0x0020
1827 
1831 #define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS 0x0040
1832 
1844 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
1845  const uint64_t owner_id);
1846 
1850 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
1851  for (p = rte_eth_find_next_owned_by(0, o); \
1852  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
1853  p = rte_eth_find_next_owned_by(p + 1, o))
1854 
1863 uint16_t rte_eth_find_next(uint16_t port_id);
1864 
1868 #define RTE_ETH_FOREACH_DEV(p) \
1869  RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
1870 
1885 __rte_experimental
1886 uint16_t
1887 rte_eth_find_next_of(uint16_t port_id_start,
1888  const struct rte_device *parent);
1889 
1898 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
1899  for (port_id = rte_eth_find_next_of(0, parent); \
1900  port_id < RTE_MAX_ETHPORTS; \
1901  port_id = rte_eth_find_next_of(port_id + 1, parent))
1902 
1917 __rte_experimental
1918 uint16_t
1919 rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id);
1920 
1931 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
1932  for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
1933  port_id < RTE_MAX_ETHPORTS; \
1934  port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
1935 
1949 __rte_experimental
1950 int rte_eth_dev_owner_new(uint64_t *owner_id);
1951 
1965 __rte_experimental
1966 int rte_eth_dev_owner_set(const uint16_t port_id,
1967  const struct rte_eth_dev_owner *owner);
1968 
1982 __rte_experimental
1983 int rte_eth_dev_owner_unset(const uint16_t port_id,
1984  const uint64_t owner_id);
1985 
1997 __rte_experimental
1998 int rte_eth_dev_owner_delete(const uint64_t owner_id);
1999 
2013 __rte_experimental
2014 int rte_eth_dev_owner_get(const uint16_t port_id,
2015  struct rte_eth_dev_owner *owner);
2016 
2027 uint16_t rte_eth_dev_count_avail(void);
2028 
2037 uint16_t rte_eth_dev_count_total(void);
2038 
2050 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
2051 
2060 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
2061 
2070 const char *rte_eth_dev_tx_offload_name(uint64_t offload);
2071 
2111 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
2112  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
2113 
2125 __rte_experimental
2126 int
2127 rte_eth_dev_is_removed(uint16_t port_id);
2128 
2191 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2192  uint16_t nb_rx_desc, unsigned int socket_id,
2193  const struct rte_eth_rxconf *rx_conf,
2194  struct rte_mempool *mb_pool);
2195 
2223 __rte_experimental
2225  (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2226  const struct rte_eth_hairpin_conf *conf);
2227 
2276 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2277  uint16_t nb_tx_desc, unsigned int socket_id,
2278  const struct rte_eth_txconf *tx_conf);
2279 
2305 __rte_experimental
2307  (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2308  const struct rte_eth_hairpin_conf *conf);
2309 
2336 __rte_experimental
2337 int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2338  size_t len, uint32_t direction);
2339 
2362 __rte_experimental
2363 int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port);
2364 
2389 __rte_experimental
2390 int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port);
2391 
2402 int rte_eth_dev_socket_id(uint16_t port_id);
2403 
2413 int rte_eth_dev_is_valid_port(uint16_t port_id);
2414 
2432 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2433 
2450 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2451 
2469 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2470 
2487 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2488 
2508 int rte_eth_dev_start(uint16_t port_id);
2509 
2520 int rte_eth_dev_stop(uint16_t port_id);
2521 
2534 int rte_eth_dev_set_link_up(uint16_t port_id);
2535 
2545 int rte_eth_dev_set_link_down(uint16_t port_id);
2546 
2557 int rte_eth_dev_close(uint16_t port_id);
2558 
2596 int rte_eth_dev_reset(uint16_t port_id);
2597 
2609 int rte_eth_promiscuous_enable(uint16_t port_id);
2610 
2622 int rte_eth_promiscuous_disable(uint16_t port_id);
2623 
2634 int rte_eth_promiscuous_get(uint16_t port_id);
2635 
2647 int rte_eth_allmulticast_enable(uint16_t port_id);
2648 
2660 int rte_eth_allmulticast_disable(uint16_t port_id);
2661 
2672 int rte_eth_allmulticast_get(uint16_t port_id);
2673 
2690 int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
2691 
2705 int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
2706 
2720 __rte_experimental
2721 const char *rte_eth_link_speed_to_str(uint32_t link_speed);
2722 
2741 __rte_experimental
2742 int rte_eth_link_to_str(char *str, size_t len,
2743  const struct rte_eth_link *eth_link);
2744 
2762 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
2763 
2775 int rte_eth_stats_reset(uint16_t port_id);
2776 
2806 int rte_eth_xstats_get_names(uint16_t port_id,
2807  struct rte_eth_xstat_name *xstats_names,
2808  unsigned int size);
2809 
2839 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2840  unsigned int n);
2841 
2864 int
2865 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2866  struct rte_eth_xstat_name *xstats_names, unsigned int size,
2867  uint64_t *ids);
2868 
2892 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2893  uint64_t *values, unsigned int size);
2894 
2913 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2914  uint64_t *id);
2915 
2928 int rte_eth_xstats_reset(uint16_t port_id);
2929 
2948 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
2949  uint16_t tx_queue_id, uint8_t stat_idx);
2950 
2969 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
2970  uint16_t rx_queue_id,
2971  uint8_t stat_idx);
2972 
2985 int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
2986 
3029 int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
3030 
3050 int rte_eth_dev_fw_version_get(uint16_t port_id,
3051  char *fw_version, size_t fw_size);
3052 
3091 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3092  uint32_t *ptypes, int num);
3126 __rte_experimental
3127 int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3128  uint32_t *set_ptypes, unsigned int num);
3129 
3141 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
3142 
3160 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
3161 
3181 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
3182 
3201 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3202  int on);
3203 
3220 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3221  enum rte_vlan_type vlan_type,
3222  uint16_t tag_type);
3223 
3241 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
3242 
3256 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
3257 
3272 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
3273 
3274 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
3275  void *userdata);
3276 
3282  buffer_tx_error_fn error_callback;
3283  void *error_userdata;
3284  uint16_t size;
3285  uint16_t length;
3286  struct rte_mbuf *pkts[];
3288 };
3289 
3296 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
3297  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3298 
3309 int
3310 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
3311 
3336 int
3338  buffer_tx_error_fn callback, void *userdata);
3339 
3362 void
3363 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3364  void *userdata);
3365 
3389 void
3390 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3391  void *userdata);
3392 
3418 int
3419 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
3420 
3436 };
3437 
3445  uint64_t metadata;
3459 };
3460 
3479 };
3480 
3481 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
3482  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
3502 int rte_eth_dev_callback_register(uint16_t port_id,
3503  enum rte_eth_event_type event,
3504  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3505 
3524 int rte_eth_dev_callback_unregister(uint16_t port_id,
3525  enum rte_eth_event_type event,
3526  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3527 
3549 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
3550 
3571 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
3572 
3590 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
3591 
3613 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3614  int epfd, int op, void *data);
3615 
3633 __rte_experimental
3634 int
3635 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
3636 
3650 int rte_eth_led_on(uint16_t port_id);
3651 
3665 int rte_eth_led_off(uint16_t port_id);
3666 
3695 __rte_experimental
3696 int rte_eth_fec_get_capability(uint16_t port_id,
3697  struct rte_eth_fec_capa *speed_fec_capa,
3698  unsigned int num);
3699 
3723 __rte_experimental
3724 int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa);
3725 
3746 __rte_experimental
3747 int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa);
3748 
3762 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
3763  struct rte_eth_fc_conf *fc_conf);
3764 
3779 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
3780  struct rte_eth_fc_conf *fc_conf);
3781 
3797 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3798  struct rte_eth_pfc_conf *pfc_conf);
3799 
3818 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
3819  uint32_t pool);
3820 
3834 int rte_eth_dev_mac_addr_remove(uint16_t port_id,
3835  struct rte_ether_addr *mac_addr);
3836 
3850 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
3851  struct rte_ether_addr *mac_addr);
3852 
3870 int rte_eth_dev_rss_reta_update(uint16_t port_id,
3871  struct rte_eth_rss_reta_entry64 *reta_conf,
3872  uint16_t reta_size);
3873 
3892 int rte_eth_dev_rss_reta_query(uint16_t port_id,
3893  struct rte_eth_rss_reta_entry64 *reta_conf,
3894  uint16_t reta_size);
3895 
3915 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
3916  uint8_t on);
3917 
3936 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
3937 
3960 int rte_eth_mirror_rule_set(uint16_t port_id,
3961  struct rte_eth_mirror_conf *mirror_conf,
3962  uint8_t rule_id,
3963  uint8_t on);
3964 
3979 int rte_eth_mirror_rule_reset(uint16_t port_id,
3980  uint8_t rule_id);
3981 
3998 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3999  uint16_t tx_rate);
4000 
4015 int rte_eth_dev_rss_hash_update(uint16_t port_id,
4016  struct rte_eth_rss_conf *rss_conf);
4017 
4032 int
4033 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4034  struct rte_eth_rss_conf *rss_conf);
4035 
4060 int
4061 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4062  struct rte_eth_udp_tunnel *tunnel_udp);
4063 
4083 int
4084 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4085  struct rte_eth_udp_tunnel *tunnel_udp);
4086 
4100 int rte_eth_dev_get_dcb_info(uint16_t port_id,
4101  struct rte_eth_dcb_info *dcb_info);
4102 
4103 struct rte_eth_rxtx_callback;
4104 
4130 const struct rte_eth_rxtx_callback *
4131 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4132  rte_rx_callback_fn fn, void *user_param);
4133 
4160 const struct rte_eth_rxtx_callback *
4161 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4162  rte_rx_callback_fn fn, void *user_param);
4163 
4189 const struct rte_eth_rxtx_callback *
4190 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4191  rte_tx_callback_fn fn, void *user_param);
4192 
4226 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4227  const struct rte_eth_rxtx_callback *user_cb);
4228 
4262 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4263  const struct rte_eth_rxtx_callback *user_cb);
4264 
4284 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4285  struct rte_eth_rxq_info *qinfo);
4286 
4306 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4307  struct rte_eth_txq_info *qinfo);
4308 
4327 __rte_experimental
4328 int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4329  struct rte_eth_burst_mode *mode);
4330 
4349 __rte_experimental
4350 int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4351  struct rte_eth_burst_mode *mode);
4352 
4373 __rte_experimental
4374 int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
4375  struct rte_power_monitor_cond *pmc);
4376 
4394 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
4395 
4408 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
4409 
4425 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
4426 
4442 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
4443 
4461 __rte_experimental
4462 int
4463 rte_eth_dev_get_module_info(uint16_t port_id,
4464  struct rte_eth_dev_module_info *modinfo);
4465 
4484 __rte_experimental
4485 int
4486 rte_eth_dev_get_module_eeprom(uint16_t port_id,
4487  struct rte_dev_eeprom_info *info);
4488 
4507 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
4508  struct rte_ether_addr *mc_addr_set,
4509  uint32_t nb_mc_addr);
4510 
4523 int rte_eth_timesync_enable(uint16_t port_id);
4524 
4537 int rte_eth_timesync_disable(uint16_t port_id);
4538 
4557 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
4558  struct timespec *timestamp, uint32_t flags);
4559 
4575 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4576  struct timespec *timestamp);
4577 
4595 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
4596 
4611 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
4612 
4631 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
4632 
4677 __rte_experimental
4678 int
4679 rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
4680 
4696 int
4697 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
4698 
4714 int
4715 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
4716 
4733 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4734  uint16_t *nb_rx_desc,
4735  uint16_t *nb_tx_desc);
4736 
4751 int
4752 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
4753 
4763 void *
4764 rte_eth_dev_get_sec_ctx(uint16_t port_id);
4765 
4780 __rte_experimental
4781 int rte_eth_dev_hairpin_capability_get(uint16_t port_id,
4782  struct rte_eth_hairpin_cap *cap);
4783 
4784 #include <rte_ethdev_core.h>
4785 
4873 static inline uint16_t
4874 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
4875  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
4876 {
4877  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4878  uint16_t nb_rx;
4879 
4880 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4881  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4882  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
4883 
4884  if (queue_id >= dev->data->nb_rx_queues) {
4885  RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4886  return 0;
4887  }
4888 #endif
4889  nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
4890  rx_pkts, nb_pkts);
4891 
4892 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
4893  struct rte_eth_rxtx_callback *cb;
4894 
4895  /* __ATOMIC_RELEASE memory order was used when the
4896  * call back was inserted into the list.
4897  * Since there is a clear dependency between loading
4898  * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
4899  * not required.
4900  */
4901  cb = __atomic_load_n(&dev->post_rx_burst_cbs[queue_id],
4902  __ATOMIC_RELAXED);
4903 
4904  if (unlikely(cb != NULL)) {
4905  do {
4906  nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
4907  nb_pkts, cb->param);
4908  cb = cb->next;
4909  } while (cb != NULL);
4910  }
4911 #endif
4912 
4913  rte_ethdev_trace_rx_burst(port_id, queue_id, (void **)rx_pkts, nb_rx);
4914  return nb_rx;
4915 }
4916 
4930 static inline int
4931 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
4932 {
4933  struct rte_eth_dev *dev;
4934 
4935  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4936  dev = &rte_eth_devices[port_id];
4937  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_queue_count, -ENOTSUP);
4938  if (queue_id >= dev->data->nb_rx_queues ||
4939  dev->data->rx_queues[queue_id] == NULL)
4940  return -EINVAL;
4941 
4942  return (int)(*dev->rx_queue_count)(dev, queue_id);
4943 }
4944 
4960 __rte_deprecated
4961 static inline int
4962 rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
4963 {
4964  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4965  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4966  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_descriptor_done, -ENOTSUP);
4967  return (*dev->rx_descriptor_done)(dev->data->rx_queues[queue_id], offset);
4968 }
4969 
4970 #define RTE_ETH_RX_DESC_AVAIL 0
4971 #define RTE_ETH_RX_DESC_DONE 1
4972 #define RTE_ETH_RX_DESC_UNAVAIL 2
5007 static inline int
5008 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
5009  uint16_t offset)
5010 {
5011  struct rte_eth_dev *dev;
5012  void *rxq;
5013 
5014 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
5015  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5016 #endif
5017  dev = &rte_eth_devices[port_id];
5018 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
5019  if (queue_id >= dev->data->nb_rx_queues)
5020  return -ENODEV;
5021 #endif
5022  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_descriptor_status, -ENOTSUP);
5023  rxq = dev->data->rx_queues[queue_id];
5024 
5025  return (*dev->rx_descriptor_status)(rxq, offset);
5026 }
5027 
5028 #define RTE_ETH_TX_DESC_FULL 0
5029 #define RTE_ETH_TX_DESC_DONE 1
5030 #define RTE_ETH_TX_DESC_UNAVAIL 2
5065 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
5066  uint16_t queue_id, uint16_t offset)
5067 {
5068  struct rte_eth_dev *dev;
5069  void *txq;
5070 
5071 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
5072  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5073 #endif
5074  dev = &rte_eth_devices[port_id];
5075 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
5076  if (queue_id >= dev->data->nb_tx_queues)
5077  return -ENODEV;
5078 #endif
5079  RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_descriptor_status, -ENOTSUP);
5080  txq = dev->data->tx_queues[queue_id];
5081 
5082  return (*dev->tx_descriptor_status)(txq, offset);
5083 }
5084 
5151 static inline uint16_t
5152 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
5153  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
5154 {
5155  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5156 
5157 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
5158  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
5159  RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
5160 
5161  if (queue_id >= dev->data->nb_tx_queues) {
5162  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5163  return 0;
5164  }
5165 #endif
5166 
5167 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
5168  struct rte_eth_rxtx_callback *cb;
5169 
5170  /* __ATOMIC_RELEASE memory order was used when the
5171  * call back was inserted into the list.
5172  * Since there is a clear dependency between loading
5173  * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
5174  * not required.
5175  */
5176  cb = __atomic_load_n(&dev->pre_tx_burst_cbs[queue_id],
5177  __ATOMIC_RELAXED);
5178 
5179  if (unlikely(cb != NULL)) {
5180  do {
5181  nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
5182  cb->param);
5183  cb = cb->next;
5184  } while (cb != NULL);
5185  }
5186 #endif
5187 
5188  rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts,
5189  nb_pkts);
5190  return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
5191 }
5192 
5247 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
5248 
5249 static inline uint16_t
5250 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
5251  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
5252 {
5253  struct rte_eth_dev *dev;
5254 
5255 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
5256  if (!rte_eth_dev_is_valid_port(port_id)) {
5257  RTE_ETHDEV_LOG(ERR, "Invalid TX port_id=%u\n", port_id);
5258  rte_errno = ENODEV;
5259  return 0;
5260  }
5261 #endif
5262 
5263  dev = &rte_eth_devices[port_id];
5264 
5265 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
5266  if (queue_id >= dev->data->nb_tx_queues) {
5267  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5268  rte_errno = EINVAL;
5269  return 0;
5270  }
5271 #endif
5272 
5273  if (!dev->tx_pkt_prepare)
5274  return nb_pkts;
5275 
5276  return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
5277  tx_pkts, nb_pkts);
5278 }
5279 
5280 #else
5281 
5282 /*
5283  * Native NOOP operation for compilation targets which doesn't require any
5284  * preparations steps, and functional NOOP may introduce unnecessary performance
5285  * drop.
5286  *
5287  * Generally this is not a good idea to turn it on globally and didn't should
5288  * be used if behavior of tx_preparation can change.
5289  */
5290 
5291 static inline uint16_t
5292 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
5293  __rte_unused uint16_t queue_id,
5294  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
5295 {
5296  return nb_pkts;
5297 }
5298 
5299 #endif
5300 
5323 static inline uint16_t
5324 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
5325  struct rte_eth_dev_tx_buffer *buffer)
5326 {
5327  uint16_t sent;
5328  uint16_t to_send = buffer->length;
5329 
5330  if (to_send == 0)
5331  return 0;
5332 
5333  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
5334 
5335  buffer->length = 0;
5336 
5337  /* All packets sent, or to be dealt with by callback below */
5338  if (unlikely(sent != to_send))
5339  buffer->error_callback(&buffer->pkts[sent],
5340  (uint16_t)(to_send - sent),
5341  buffer->error_userdata);
5342 
5343  return sent;
5344 }
5345 
5376 static __rte_always_inline uint16_t
5377 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
5378  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
5379 {
5380  buffer->pkts[buffer->length++] = tx_pkt;
5381  if (buffer->length < buffer->size)
5382  return 0;
5383 
5384  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
5385 }
5386 
5387 #ifdef __cplusplus
5388 }
5389 #endif
5390 
5391 #endif /* _RTE_ETHDEV_H_ */
uint16_t nb_rx_queues
Definition: rte_ethdev.h:1556
int rte_eth_dev_stop(uint16_t port_id)
struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf
Definition: rte_ethdev.h:1328
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_promiscuous_disable(uint16_t port_id)
__extension__ uint32_t multi_pools
Definition: rte_ethdev.h:1498
#define ETH_VMDQ_MAX_VLAN_FILTERS
Definition: rte_ethdev.h:780
struct rte_eth_dev_portconf default_rxportconf
Definition: rte_ethdev.h:1559
struct rte_fdir_conf fdir_conf
Definition: rte_ethdev.h:1338
uint32_t rmv
Definition: rte_ethdev.h:1295
#define __rte_always_inline
Definition: rte_common.h:226
uint16_t tx_rs_thresh
Definition: rte_ethdev.h:1068
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
uint16_t nb_desc
Definition: rte_ethdev.h:1592
char info[RTE_ETH_BURST_MODE_INFO_SIZE]
Definition: rte_ethdev.h:1612
uint64_t rx_offload_capa
Definition: rte_ethdev.h:1534
uint16_t reta[RTE_RETA_GROUP_SIZE]
Definition: rte_ethdev.h:853
const uint32_t * dev_flags
Definition: rte_ethdev.h:1521
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
struct rte_eth_rxseg_capa rx_seg_capa
Definition: rte_ethdev.h:1533
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
__rte_experimental int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
#define __rte_cache_min_aligned
Definition: rte_common.h:403
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:5250
int rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
rte_eth_nb_tcs
Definition: rte_ethdev.h:861
struct rte_eth_vmdq_tx_conf vmdq_tx_conf
Definition: rte_ethdev.h:1332
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:269
__rte_experimental int rte_eth_dev_owner_new(uint64_t *owner_id)
__rte_experimental int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
struct rte_eth_thresh rx_thresh
Definition: rte_ethdev.h:1039
uint16_t rte_eth_find_next(uint16_t port_id)
__rte_experimental int rte_eth_dev_is_removed(uint16_t port_id)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
int rte_eth_led_off(uint16_t port_id)
rte_fdir_pballoc_type
Definition: rte_ethdev.h:1240
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:5008
__rte_experimental uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
uint64_t imissed
Definition: rte_ethdev.h:253
uint32_t low_water
Definition: rte_ethdev.h:1198
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
uint32_t max_rx_pkt_len
Definition: rte_ethdev.h:407
uint8_t rss_key_len
Definition: rte_ethdev.h:460
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint16_t tx_rate)
uint8_t hthresh
Definition: rte_ethdev.h:339
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
__rte_experimental int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
uint64_t rx_queue_offload_capa
Definition: rte_ethdev.h:1538
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
uint16_t reta_size
Definition: rte_ethdev.h:1542
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint32_t lpbk_mode
Definition: rte_ethdev.h:1313
enum rte_fdir_status_mode status
Definition: rte_ethdev.h:1264
enum rte_eth_tx_mq_mode mq_mode
Definition: rte_ethdev.h:959
#define rte_errno
Definition: rte_errno.h:29
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
uint32_t link_speeds
Definition: rte_ethdev.h:1304
uint64_t tx_queue_offload_capa
Definition: rte_ethdev.h:1540
rte_eth_tx_mq_mode
Definition: rte_ethdev.h:387
rte_eth_fc_mode
Definition: rte_ethdev.h:1184
int rte_eth_mirror_rule_set(uint16_t port_id, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on)
uint8_t enable_default_pool
Definition: rte_ethdev.h:913
uint16_t nb_mtu_seg_max
Definition: rte_ethdev.h:1178
#define __rte_unused
Definition: rte_common.h:116
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:267
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
uint64_t opackets
Definition: rte_ethdev.h:250
struct rte_eth_rss_conf rss_conf
Definition: rte_ethdev.h:1319
uint8_t hash_key_size
Definition: rte_ethdev.h:1544
int rte_eth_dev_close(uint16_t port_id)
struct rte_mempool * mp
Definition: rte_ethdev.h:1017
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
uint16_t split_hdr_size
Definition: rte_ethdev.h:410
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
struct rte_mempool * mp
Definition: rte_ethdev.h:1579
uint32_t dcb_capability_en
Definition: rte_ethdev.h:1337
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:265
const char * name
Definition: rte_ethdev.h:1478
struct rte_eth_switch_info switch_info
Definition: rte_ethdev.h:1568
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
struct rte_eth_vmdq_dcb_conf::@151 pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
uint32_t rxq
Definition: rte_ethdev.h:1293
int rte_eth_dev_set_link_up(uint16_t port_id)
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
struct rte_eth_thresh tx_thresh
Definition: rte_ethdev.h:1067
struct rte_eth_desc_lim rx_desc_lim
Definition: rte_ethdev.h:1552
uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES]
Definition: rte_ethdev.h:920
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
__rte_experimental int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
__rte_experimental int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
uint16_t vmdq_queue_num
Definition: rte_ethdev.h:1550
uint8_t rx_deferred_start
Definition: rte_ethdev.h:1042
struct rte_mbuf * pkts[]
Definition: rte_ethdev.h:3286
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
Definition: rte_ethdev.h:3481
struct rte_eth_rxmode rxmode
Definition: rte_ethdev.h:1311
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:261
uint32_t high_water
Definition: rte_ethdev.h:1197
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:912
struct rte_eth_txconf conf
Definition: rte_ethdev.h:1591
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
struct rte_intr_conf intr_conf
Definition: rte_ethdev.h:1339
#define RTE_ETH_XSTATS_NAME_SIZE
Definition: rte_ethdev.h:1616
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
struct rte_eth_desc_lim tx_desc_lim
Definition: rte_ethdev.h:1553
int rte_eth_timesync_disable(uint16_t port_id)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint64_t offloads
Definition: rte_ethdev.h:1049
uint32_t link_speed
Definition: rte_ethdev.h:317
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
__rte_experimental int rte_eth_fec_get_capability(uint16_t port_id, struct rte_eth_fec_capa *speed_fec_capa, unsigned int num)
uint16_t send_xon
Definition: rte_ethdev.h:1200
int rte_eth_stats_reset(uint16_t port_id)
struct rte_eth_vmdq_rx_conf::@152 pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]
int rte_eth_allmulticast_enable(uint16_t port_id)
struct rte_eth_txconf default_txconf
Definition: rte_ethdev.h:1548
uint32_t offset_allowed
Definition: rte_ethdev.h:1499
__rte_experimental int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, struct rte_power_monitor_cond *pmc)
#define unlikely(x)
uint64_t ibytes
Definition: rte_ethdev.h:251
uint32_t offset_align_log2
Definition: rte_ethdev.h:1500
uint64_t offloads
Definition: rte_ethdev.h:1078
uint16_t max_nb_queues
Definition: rte_ethdev.h:1092
uint64_t oerrors
Definition: rte_ethdev.h:258
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
struct rte_eth_dcb_rx_conf dcb_rx_conf
Definition: rte_ethdev.h:1322
struct rte_eth_vmdq_rx_conf vmdq_rx_conf
Definition: rte_ethdev.h:1324
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
uint16_t max_mtu
Definition: rte_ethdev.h:1520
uint64_t offloads
Definition: rte_ethdev.h:416
enum rte_eth_rx_mq_mode mq_mode
Definition: rte_ethdev.h:406
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:943
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
uint16_t tx_free_thresh
Definition: rte_ethdev.h:1069
uint16_t nb_desc
Definition: rte_ethdev.h:1582
__rte_experimental uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:4874
int rte_eth_allmulticast_disable(uint16_t port_id)
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
int rte_eth_dev_start(uint16_t port_id)
uint16_t rx_buf_size
Definition: rte_ethdev.h:1583
uint16_t max_vmdq_pools
Definition: rte_ethdev.h:1532
uint8_t scattered_rx
Definition: rte_ethdev.h:1581
int rte_eth_dev_reset(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
struct rte_eth_dcb_tx_conf dcb_tx_conf
Definition: rte_ethdev.h:1330
uint64_t offloads
Definition: rte_ethdev.h:965
uint16_t vmdq_pool_base
Definition: rte_ethdev.h:1551
uint64_t tx_offload_capa
Definition: rte_ethdev.h:1536
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:976
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:263
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
uint16_t min_mtu
Definition: rte_ethdev.h:1519
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
Definition: rte_ethdev.h:1781
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1571
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
struct rte_eth_conf::@153 rx_adv_conf
uint64_t obytes
Definition: rte_ethdev.h:252
__rte_experimental int rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
uint8_t enable_loop_back
Definition: rte_ethdev.h:946
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:418
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
struct rte_eth_rxconf conf
Definition: rte_ethdev.h:1580
#define ETH_DCB_NUM_USER_PRIORITIES
Definition: rte_ethdev.h:781
rte_eth_fec_mode
Definition: rte_ethdev.h:1686
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
uint16_t max_tx_queues
Definition: rte_ethdev.h:1527
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_xstats_reset(uint16_t port_id)
rte_eth_dev_state
Definition: rte_ethdev.h:1787
uint16_t rx_free_thresh
Definition: rte_ethdev.h:1040
struct rte_eth_vlan_mirror vlan
Definition: rte_ethdev.h:841
uint64_t dev_capa
Definition: rte_ethdev.h:1563
uint64_t ierrors
Definition: rte_ethdev.h:257
__rte_experimental int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, size_t len, uint32_t direction)
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
__extension__ uint8_t hw_vlan_insert_pvid
Definition: rte_ethdev.h:970
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
uint64_t flow_type_rss_offloads
Definition: rte_ethdev.h:1546
uint16_t rte_eth_dev_count_total(void)
int rte_eth_promiscuous_enable(uint16_t port_id)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
union rte_eth_rxseg * rx_seg
Definition: rte_ethdev.h:1057
struct rte_eth_dev_portconf default_txportconf
Definition: rte_ethdev.h:1561
rte_vlan_type
Definition: rte_ethdev.h:426
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
__rte_experimental int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
uint16_t nb_seg_max
Definition: rte_ethdev.h:1165
uint64_t ipackets
Definition: rte_ethdev.h:249
uint16_t max_vfs
Definition: rte_ethdev.h:1531
uint16_t pause_time
Definition: rte_ethdev.h:1199
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
uint64_t rx_nombuf
Definition: rte_ethdev.h:259
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
Definition: rte_ethdev.h:5377
__extension__ uint8_t hw_vlan_reject_untagged
Definition: rte_ethdev.h:970
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
#define ETH_MQ_RX_RSS_FLAG
Definition: rte_ethdev.h:346
uint16_t vmdq_queue_base
Definition: rte_ethdev.h:1549
rte_eth_event_ipsec_subtype
Definition: rte_ethdev.h:3425
rte_eth_nb_pools
Definition: rte_ethdev.h:870
#define ETH_MIRROR_MAX_VLANS
Definition: rte_ethdev.h:816
uint16_t nb_align
Definition: rte_ethdev.h:1155
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
rte_eth_rx_mq_mode
Definition: rte_ethdev.h:354
__rte_experimental const char * rte_eth_link_speed_to_str(uint32_t link_speed)
__rte_experimental int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
const char * driver_name
Definition: rte_ethdev.h:1516
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:4931
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_socket_id(uint16_t port_id)
uint8_t enable_default_pool
Definition: rte_ethdev.h:944
__extension__ struct rte_eth_link __rte_aligned(8)
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
uint16_t nb_tx_queues
Definition: rte_ethdev.h:1557
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
struct rte_eth_fdir_flex_conf flex_conf
Definition: rte_ethdev.h:1268
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1080
uint32_t max_mac_addrs
Definition: rte_ethdev.h:1528
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
rte_eth_tunnel_type
Definition: rte_ethdev.h:1220
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
uint64_t value
Definition: rte_ethdev.h:1629
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
Definition: rte_ethdev.h:605
__rte_experimental int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_promiscuous_get(uint16_t port_id)
int rte_eth_led_on(uint16_t port_id)
enum rte_fdir_pballoc_type pballoc
Definition: rte_ethdev.h:1263
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
__rte_experimental int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
uint32_t max_rx_pktlen
Definition: rte_ethdev.h:1523
__rte_experimental int rte_eth_dev_owner_delete(const uint64_t owner_id)
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
static __rte_deprecated int rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:4962
uint64_t rss_hf
Definition: rte_ethdev.h:461
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1081
uint64_t id
Definition: rte_ethdev.h:1628
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1570
__extension__ uint8_t hw_vlan_reject_tagged
Definition: rte_ethdev.h:970
union rte_eth_conf::@154 tx_adv_conf
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1059
enum rte_fdir_mode mode
Definition: rte_ethdev.h:1262
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
unsigned int if_index
Definition: rte_ethdev.h:1517
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
uint8_t mac_ctrl_frame_fwd
Definition: rte_ethdev.h:1202
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
Definition: rte_ethdev.h:1757
enum rte_eth_fc_mode mode
Definition: rte_ethdev.h:1201
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
rte_fdir_mode
Definition: rte_eth_ctrl.h:425
struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf
Definition: rte_ethdev.h:1320
uint8_t * rss_key
Definition: rte_ethdev.h:459
rte_fdir_status_mode
Definition: rte_ethdev.h:1249
void * reserved_ptrs[2]
Definition: rte_ethdev.h:977
uint8_t tx_deferred_start
Definition: rte_ethdev.h:1072
uint8_t wthresh
Definition: rte_ethdev.h:340
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1060
uint16_t max_rx_queues
Definition: rte_ethdev.h:1526
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:409
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
struct rte_eth_fc_conf fc
Definition: rte_ethdev.h:1212
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
struct rte_eth_txmode txmode
Definition: rte_ethdev.h:1312
int rte_eth_allmulticast_get(uint16_t port_id)
uint8_t rx_drop_en
Definition: rte_ethdev.h:1041
int rte_eth_dev_is_valid_port(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint16_t rx_nseg
Definition: rte_ethdev.h:1043
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:1525
void * reserved_ptrs[2]
Definition: rte_ethdev.h:419
int rte_eth_timesync_enable(uint16_t port_id)
uint8_t pthresh
Definition: rte_ethdev.h:338
struct rte_eth_rxconf default_rxconf
Definition: rte_ethdev.h:1547
uint32_t speed_capa
Definition: rte_ethdev.h:1554
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:5152
uint8_t drop_queue
Definition: rte_ethdev.h:1266
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_set_link_down(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
__rte_experimental int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
uint32_t min_rx_bufsize
Definition: rte_ethdev.h:1522
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
__rte_experimental int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
uint32_t lsc
Definition: rte_ethdev.h:1291
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
Definition: rte_ethdev.h:5324
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
rte_eth_event_type
Definition: rte_ethdev.h:3464