DPDK  21.08.0
rte_ethdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
142 #ifdef __cplusplus
143 extern "C" {
144 #endif
145 
146 #include <stdint.h>
147 
148 /* Use this macro to check if LRO API is supported */
149 #define RTE_ETHDEV_HAS_LRO_SUPPORT
150 
151 /* Alias RTE_LIBRTE_ETHDEV_DEBUG for backward compatibility. */
152 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
153 #define RTE_ETHDEV_DEBUG_RX
154 #define RTE_ETHDEV_DEBUG_TX
155 #endif
156 
157 #include <rte_compat.h>
158 #include <rte_log.h>
159 #include <rte_interrupts.h>
160 #include <rte_dev.h>
161 #include <rte_devargs.h>
162 #include <rte_errno.h>
163 #include <rte_common.h>
164 #include <rte_config.h>
165 #include <rte_ether.h>
166 #include <rte_power_intrinsics.h>
167 
168 #include "rte_ethdev_trace_fp.h"
169 #include "rte_dev_info.h"
170 
171 extern int rte_eth_dev_logtype;
172 
173 #define RTE_ETHDEV_LOG(level, ...) \
174  rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
175 
176 struct rte_mbuf;
177 
194 int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
195 
210 uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter);
211 
224 void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
225 
239 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
240  for (rte_eth_iterator_init(iter, devargs), \
241  id = rte_eth_iterator_next(iter); \
242  id != RTE_MAX_ETHPORTS; \
243  id = rte_eth_iterator_next(iter))
244 
255  uint64_t ipackets;
256  uint64_t opackets;
257  uint64_t ibytes;
258  uint64_t obytes;
259  uint64_t imissed;
263  uint64_t ierrors;
264  uint64_t oerrors;
265  uint64_t rx_nombuf;
266  /* Queue stats are limited to max 256 queues */
267  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
269  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
271  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
273  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
275  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
277 };
278 
282 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
283 #define ETH_LINK_SPEED_FIXED (1 << 0)
284 #define ETH_LINK_SPEED_10M_HD (1 << 1)
285 #define ETH_LINK_SPEED_10M (1 << 2)
286 #define ETH_LINK_SPEED_100M_HD (1 << 3)
287 #define ETH_LINK_SPEED_100M (1 << 4)
288 #define ETH_LINK_SPEED_1G (1 << 5)
289 #define ETH_LINK_SPEED_2_5G (1 << 6)
290 #define ETH_LINK_SPEED_5G (1 << 7)
291 #define ETH_LINK_SPEED_10G (1 << 8)
292 #define ETH_LINK_SPEED_20G (1 << 9)
293 #define ETH_LINK_SPEED_25G (1 << 10)
294 #define ETH_LINK_SPEED_40G (1 << 11)
295 #define ETH_LINK_SPEED_50G (1 << 12)
296 #define ETH_LINK_SPEED_56G (1 << 13)
297 #define ETH_LINK_SPEED_100G (1 << 14)
298 #define ETH_LINK_SPEED_200G (1 << 15)
303 #define ETH_SPEED_NUM_NONE 0
304 #define ETH_SPEED_NUM_10M 10
305 #define ETH_SPEED_NUM_100M 100
306 #define ETH_SPEED_NUM_1G 1000
307 #define ETH_SPEED_NUM_2_5G 2500
308 #define ETH_SPEED_NUM_5G 5000
309 #define ETH_SPEED_NUM_10G 10000
310 #define ETH_SPEED_NUM_20G 20000
311 #define ETH_SPEED_NUM_25G 25000
312 #define ETH_SPEED_NUM_40G 40000
313 #define ETH_SPEED_NUM_50G 50000
314 #define ETH_SPEED_NUM_56G 56000
315 #define ETH_SPEED_NUM_100G 100000
316 #define ETH_SPEED_NUM_200G 200000
317 #define ETH_SPEED_NUM_UNKNOWN UINT32_MAX
322 __extension__
323 struct rte_eth_link {
324  uint32_t link_speed;
325  uint16_t link_duplex : 1;
326  uint16_t link_autoneg : 1;
327  uint16_t link_status : 1;
328 } __rte_aligned(8);
330 /* Utility constants */
331 #define ETH_LINK_HALF_DUPLEX 0
332 #define ETH_LINK_FULL_DUPLEX 1
333 #define ETH_LINK_DOWN 0
334 #define ETH_LINK_UP 1
335 #define ETH_LINK_FIXED 0
336 #define ETH_LINK_AUTONEG 1
337 #define RTE_ETH_LINK_MAX_STR_LEN 40
343 struct rte_eth_thresh {
344  uint8_t pthresh;
345  uint8_t hthresh;
346  uint8_t wthresh;
347 };
348 
352 #define ETH_MQ_RX_RSS_FLAG 0x1
353 #define ETH_MQ_RX_DCB_FLAG 0x2
354 #define ETH_MQ_RX_VMDQ_FLAG 0x4
355 
363 
367  ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
369  ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
370 
372  ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
374  ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
376  ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
379  ETH_MQ_RX_VMDQ_FLAG,
380 };
381 
385 #define ETH_RSS ETH_MQ_RX_RSS
386 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
387 #define ETH_DCB_RX ETH_MQ_RX_DCB
388 
398 };
399 
403 #define ETH_DCB_NONE ETH_MQ_TX_NONE
404 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
405 #define ETH_DCB_TX ETH_MQ_TX_DCB
406 
412  enum rte_eth_rx_mq_mode mq_mode;
413  uint32_t max_rx_pkt_len;
416  uint16_t split_hdr_size;
422  uint64_t offloads;
423 
424  uint64_t reserved_64s[2];
425  void *reserved_ptrs[2];
426 };
427 
433  ETH_VLAN_TYPE_UNKNOWN = 0,
436  ETH_VLAN_TYPE_MAX,
437 };
438 
444  uint64_t ids[64];
445 };
446 
465  uint8_t *rss_key;
466  uint8_t rss_key_len;
467  uint64_t rss_hf;
468 };
469 
470 /*
471  * A packet can be identified by hardware as different flow types. Different
472  * NIC hardware may support different flow types.
473  * Basically, the NIC hardware identifies the flow type as deep protocol as
474  * possible, and exclusively. For example, if a packet is identified as
475  * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
476  * though it is an actual IPV4 packet.
477  */
478 #define RTE_ETH_FLOW_UNKNOWN 0
479 #define RTE_ETH_FLOW_RAW 1
480 #define RTE_ETH_FLOW_IPV4 2
481 #define RTE_ETH_FLOW_FRAG_IPV4 3
482 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
483 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
484 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
485 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
486 #define RTE_ETH_FLOW_IPV6 8
487 #define RTE_ETH_FLOW_FRAG_IPV6 9
488 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
489 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
490 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
491 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
492 #define RTE_ETH_FLOW_L2_PAYLOAD 14
493 #define RTE_ETH_FLOW_IPV6_EX 15
494 #define RTE_ETH_FLOW_IPV6_TCP_EX 16
495 #define RTE_ETH_FLOW_IPV6_UDP_EX 17
496 #define RTE_ETH_FLOW_PORT 18
497 
498 #define RTE_ETH_FLOW_VXLAN 19
499 #define RTE_ETH_FLOW_GENEVE 20
500 #define RTE_ETH_FLOW_NVGRE 21
501 #define RTE_ETH_FLOW_VXLAN_GPE 22
502 #define RTE_ETH_FLOW_GTPU 23
503 #define RTE_ETH_FLOW_MAX 24
504 
505 /*
506  * Below macros are defined for RSS offload types, they can be used to
507  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
508  */
509 #define ETH_RSS_IPV4 (1ULL << 2)
510 #define ETH_RSS_FRAG_IPV4 (1ULL << 3)
511 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << 4)
512 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << 5)
513 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << 6)
514 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << 7)
515 #define ETH_RSS_IPV6 (1ULL << 8)
516 #define ETH_RSS_FRAG_IPV6 (1ULL << 9)
517 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << 10)
518 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << 11)
519 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << 12)
520 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << 13)
521 #define ETH_RSS_L2_PAYLOAD (1ULL << 14)
522 #define ETH_RSS_IPV6_EX (1ULL << 15)
523 #define ETH_RSS_IPV6_TCP_EX (1ULL << 16)
524 #define ETH_RSS_IPV6_UDP_EX (1ULL << 17)
525 #define ETH_RSS_PORT (1ULL << 18)
526 #define ETH_RSS_VXLAN (1ULL << 19)
527 #define ETH_RSS_GENEVE (1ULL << 20)
528 #define ETH_RSS_NVGRE (1ULL << 21)
529 #define ETH_RSS_GTPU (1ULL << 23)
530 #define ETH_RSS_ETH (1ULL << 24)
531 #define ETH_RSS_S_VLAN (1ULL << 25)
532 #define ETH_RSS_C_VLAN (1ULL << 26)
533 #define ETH_RSS_ESP (1ULL << 27)
534 #define ETH_RSS_AH (1ULL << 28)
535 #define ETH_RSS_L2TPV3 (1ULL << 29)
536 #define ETH_RSS_PFCP (1ULL << 30)
537 #define ETH_RSS_PPPOE (1ULL << 31)
538 #define ETH_RSS_ECPRI (1ULL << 32)
539 #define ETH_RSS_MPLS (1ULL << 33)
540 
541 /*
542  * We use the following macros to combine with above ETH_RSS_* for
543  * more specific input set selection. These bits are defined starting
544  * from the high end of the 64 bits.
545  * Note: If we use above ETH_RSS_* without SRC/DST_ONLY, it represents
546  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
547  * the same level are used simultaneously, it is the same case as none of
548  * them are added.
549  */
550 #define ETH_RSS_L3_SRC_ONLY (1ULL << 63)
551 #define ETH_RSS_L3_DST_ONLY (1ULL << 62)
552 #define ETH_RSS_L4_SRC_ONLY (1ULL << 61)
553 #define ETH_RSS_L4_DST_ONLY (1ULL << 60)
554 #define ETH_RSS_L2_SRC_ONLY (1ULL << 59)
555 #define ETH_RSS_L2_DST_ONLY (1ULL << 58)
556 
557 /*
558  * Only select IPV6 address prefix as RSS input set according to
559  * https://tools.ietf.org/html/rfc6052
560  * Must be combined with ETH_RSS_IPV6, ETH_RSS_NONFRAG_IPV6_UDP,
561  * ETH_RSS_NONFRAG_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_SCTP.
562  */
563 #define RTE_ETH_RSS_L3_PRE32 (1ULL << 57)
564 #define RTE_ETH_RSS_L3_PRE40 (1ULL << 56)
565 #define RTE_ETH_RSS_L3_PRE48 (1ULL << 55)
566 #define RTE_ETH_RSS_L3_PRE56 (1ULL << 54)
567 #define RTE_ETH_RSS_L3_PRE64 (1ULL << 53)
568 #define RTE_ETH_RSS_L3_PRE96 (1ULL << 52)
569 
570 /*
571  * Use the following macros to combine with the above layers
572  * to choose inner and outer layers or both for RSS computation.
573  * Bits 50 and 51 are reserved for this.
574  */
575 
583 #define ETH_RSS_LEVEL_PMD_DEFAULT (0ULL << 50)
584 
589 #define ETH_RSS_LEVEL_OUTERMOST (1ULL << 50)
590 
595 #define ETH_RSS_LEVEL_INNERMOST (2ULL << 50)
596 #define ETH_RSS_LEVEL_MASK (3ULL << 50)
597 
598 #define ETH_RSS_LEVEL(rss_hf) ((rss_hf & ETH_RSS_LEVEL_MASK) >> 50)
599 
610 static inline uint64_t
611 rte_eth_rss_hf_refine(uint64_t rss_hf)
612 {
613  if ((rss_hf & ETH_RSS_L3_SRC_ONLY) && (rss_hf & ETH_RSS_L3_DST_ONLY))
614  rss_hf &= ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
615 
616  if ((rss_hf & ETH_RSS_L4_SRC_ONLY) && (rss_hf & ETH_RSS_L4_DST_ONLY))
617  rss_hf &= ~(ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
618 
619  return rss_hf;
620 }
621 
622 #define ETH_RSS_IPV6_PRE32 ( \
623  ETH_RSS_IPV6 | \
624  RTE_ETH_RSS_L3_PRE32)
625 
626 #define ETH_RSS_IPV6_PRE40 ( \
627  ETH_RSS_IPV6 | \
628  RTE_ETH_RSS_L3_PRE40)
629 
630 #define ETH_RSS_IPV6_PRE48 ( \
631  ETH_RSS_IPV6 | \
632  RTE_ETH_RSS_L3_PRE48)
633 
634 #define ETH_RSS_IPV6_PRE56 ( \
635  ETH_RSS_IPV6 | \
636  RTE_ETH_RSS_L3_PRE56)
637 
638 #define ETH_RSS_IPV6_PRE64 ( \
639  ETH_RSS_IPV6 | \
640  RTE_ETH_RSS_L3_PRE64)
641 
642 #define ETH_RSS_IPV6_PRE96 ( \
643  ETH_RSS_IPV6 | \
644  RTE_ETH_RSS_L3_PRE96)
645 
646 #define ETH_RSS_IPV6_PRE32_UDP ( \
647  ETH_RSS_NONFRAG_IPV6_UDP | \
648  RTE_ETH_RSS_L3_PRE32)
649 
650 #define ETH_RSS_IPV6_PRE40_UDP ( \
651  ETH_RSS_NONFRAG_IPV6_UDP | \
652  RTE_ETH_RSS_L3_PRE40)
653 
654 #define ETH_RSS_IPV6_PRE48_UDP ( \
655  ETH_RSS_NONFRAG_IPV6_UDP | \
656  RTE_ETH_RSS_L3_PRE48)
657 
658 #define ETH_RSS_IPV6_PRE56_UDP ( \
659  ETH_RSS_NONFRAG_IPV6_UDP | \
660  RTE_ETH_RSS_L3_PRE56)
661 
662 #define ETH_RSS_IPV6_PRE64_UDP ( \
663  ETH_RSS_NONFRAG_IPV6_UDP | \
664  RTE_ETH_RSS_L3_PRE64)
665 
666 #define ETH_RSS_IPV6_PRE96_UDP ( \
667  ETH_RSS_NONFRAG_IPV6_UDP | \
668  RTE_ETH_RSS_L3_PRE96)
669 
670 #define ETH_RSS_IPV6_PRE32_TCP ( \
671  ETH_RSS_NONFRAG_IPV6_TCP | \
672  RTE_ETH_RSS_L3_PRE32)
673 
674 #define ETH_RSS_IPV6_PRE40_TCP ( \
675  ETH_RSS_NONFRAG_IPV6_TCP | \
676  RTE_ETH_RSS_L3_PRE40)
677 
678 #define ETH_RSS_IPV6_PRE48_TCP ( \
679  ETH_RSS_NONFRAG_IPV6_TCP | \
680  RTE_ETH_RSS_L3_PRE48)
681 
682 #define ETH_RSS_IPV6_PRE56_TCP ( \
683  ETH_RSS_NONFRAG_IPV6_TCP | \
684  RTE_ETH_RSS_L3_PRE56)
685 
686 #define ETH_RSS_IPV6_PRE64_TCP ( \
687  ETH_RSS_NONFRAG_IPV6_TCP | \
688  RTE_ETH_RSS_L3_PRE64)
689 
690 #define ETH_RSS_IPV6_PRE96_TCP ( \
691  ETH_RSS_NONFRAG_IPV6_TCP | \
692  RTE_ETH_RSS_L3_PRE96)
693 
694 #define ETH_RSS_IPV6_PRE32_SCTP ( \
695  ETH_RSS_NONFRAG_IPV6_SCTP | \
696  RTE_ETH_RSS_L3_PRE32)
697 
698 #define ETH_RSS_IPV6_PRE40_SCTP ( \
699  ETH_RSS_NONFRAG_IPV6_SCTP | \
700  RTE_ETH_RSS_L3_PRE40)
701 
702 #define ETH_RSS_IPV6_PRE48_SCTP ( \
703  ETH_RSS_NONFRAG_IPV6_SCTP | \
704  RTE_ETH_RSS_L3_PRE48)
705 
706 #define ETH_RSS_IPV6_PRE56_SCTP ( \
707  ETH_RSS_NONFRAG_IPV6_SCTP | \
708  RTE_ETH_RSS_L3_PRE56)
709 
710 #define ETH_RSS_IPV6_PRE64_SCTP ( \
711  ETH_RSS_NONFRAG_IPV6_SCTP | \
712  RTE_ETH_RSS_L3_PRE64)
713 
714 #define ETH_RSS_IPV6_PRE96_SCTP ( \
715  ETH_RSS_NONFRAG_IPV6_SCTP | \
716  RTE_ETH_RSS_L3_PRE96)
717 
718 #define ETH_RSS_IP ( \
719  ETH_RSS_IPV4 | \
720  ETH_RSS_FRAG_IPV4 | \
721  ETH_RSS_NONFRAG_IPV4_OTHER | \
722  ETH_RSS_IPV6 | \
723  ETH_RSS_FRAG_IPV6 | \
724  ETH_RSS_NONFRAG_IPV6_OTHER | \
725  ETH_RSS_IPV6_EX)
726 
727 #define ETH_RSS_UDP ( \
728  ETH_RSS_NONFRAG_IPV4_UDP | \
729  ETH_RSS_NONFRAG_IPV6_UDP | \
730  ETH_RSS_IPV6_UDP_EX)
731 
732 #define ETH_RSS_TCP ( \
733  ETH_RSS_NONFRAG_IPV4_TCP | \
734  ETH_RSS_NONFRAG_IPV6_TCP | \
735  ETH_RSS_IPV6_TCP_EX)
736 
737 #define ETH_RSS_SCTP ( \
738  ETH_RSS_NONFRAG_IPV4_SCTP | \
739  ETH_RSS_NONFRAG_IPV6_SCTP)
740 
741 #define ETH_RSS_TUNNEL ( \
742  ETH_RSS_VXLAN | \
743  ETH_RSS_GENEVE | \
744  ETH_RSS_NVGRE)
745 
746 #define ETH_RSS_VLAN ( \
747  ETH_RSS_S_VLAN | \
748  ETH_RSS_C_VLAN)
749 
751 #define ETH_RSS_PROTO_MASK ( \
752  ETH_RSS_IPV4 | \
753  ETH_RSS_FRAG_IPV4 | \
754  ETH_RSS_NONFRAG_IPV4_TCP | \
755  ETH_RSS_NONFRAG_IPV4_UDP | \
756  ETH_RSS_NONFRAG_IPV4_SCTP | \
757  ETH_RSS_NONFRAG_IPV4_OTHER | \
758  ETH_RSS_IPV6 | \
759  ETH_RSS_FRAG_IPV6 | \
760  ETH_RSS_NONFRAG_IPV6_TCP | \
761  ETH_RSS_NONFRAG_IPV6_UDP | \
762  ETH_RSS_NONFRAG_IPV6_SCTP | \
763  ETH_RSS_NONFRAG_IPV6_OTHER | \
764  ETH_RSS_L2_PAYLOAD | \
765  ETH_RSS_IPV6_EX | \
766  ETH_RSS_IPV6_TCP_EX | \
767  ETH_RSS_IPV6_UDP_EX | \
768  ETH_RSS_PORT | \
769  ETH_RSS_VXLAN | \
770  ETH_RSS_GENEVE | \
771  ETH_RSS_NVGRE | \
772  ETH_RSS_MPLS)
773 
774 /*
775  * Definitions used for redirection table entry size.
776  * Some RSS RETA sizes may not be supported by some drivers, check the
777  * documentation or the description of relevant functions for more details.
778  */
779 #define ETH_RSS_RETA_SIZE_64 64
780 #define ETH_RSS_RETA_SIZE_128 128
781 #define ETH_RSS_RETA_SIZE_256 256
782 #define ETH_RSS_RETA_SIZE_512 512
783 #define RTE_RETA_GROUP_SIZE 64
784 
785 /* Definitions used for VMDQ and DCB functionality */
786 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
787 #define ETH_DCB_NUM_USER_PRIORITIES 8
788 #define ETH_VMDQ_DCB_NUM_QUEUES 128
789 #define ETH_DCB_NUM_QUEUES 128
791 /* DCB capability defines */
792 #define ETH_DCB_PG_SUPPORT 0x00000001
793 #define ETH_DCB_PFC_SUPPORT 0x00000002
795 /* Definitions used for VLAN Offload functionality */
796 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
797 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
798 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
799 #define ETH_QINQ_STRIP_OFFLOAD 0x0008
801 /* Definitions used for mask VLAN setting */
802 #define ETH_VLAN_STRIP_MASK 0x0001
803 #define ETH_VLAN_FILTER_MASK 0x0002
804 #define ETH_VLAN_EXTEND_MASK 0x0004
805 #define ETH_QINQ_STRIP_MASK 0x0008
806 #define ETH_VLAN_ID_MAX 0x0FFF
808 /* Definitions used for receive MAC address */
809 #define ETH_NUM_RECEIVE_MAC_ADDR 128
811 /* Definitions used for unicast hash */
812 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
814 /* Definitions used for VMDQ pool rx mode setting */
815 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
816 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
817 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
818 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
819 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
822 #define ETH_MIRROR_MAX_VLANS 64
823 
824 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
825 #define ETH_MIRROR_UPLINK_PORT 0x02
826 #define ETH_MIRROR_DOWNLINK_PORT 0x04
827 #define ETH_MIRROR_VLAN 0x08
828 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
833 struct rte_eth_vlan_mirror {
834  uint64_t vlan_mask;
836  uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
837 };
838 
843  uint8_t rule_type;
844  uint8_t dst_pool;
845  uint64_t pool_mask;
847  struct rte_eth_vlan_mirror vlan;
848 };
849 
857  uint64_t mask;
859  uint16_t reta[RTE_RETA_GROUP_SIZE];
861 };
862 
868  ETH_4_TCS = 4,
870 };
871 
881 };
882 
883 /* This structure may be extended in future. */
884 struct rte_eth_dcb_rx_conf {
885  enum rte_eth_nb_tcs nb_tcs;
887  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
888 };
889 
890 struct rte_eth_vmdq_dcb_tx_conf {
891  enum rte_eth_nb_pools nb_queue_pools;
893  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
894 };
895 
896 struct rte_eth_dcb_tx_conf {
897  enum rte_eth_nb_tcs nb_tcs;
899  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
900 };
901 
902 struct rte_eth_vmdq_tx_conf {
903  enum rte_eth_nb_pools nb_queue_pools;
904 };
905 
918  enum rte_eth_nb_pools nb_queue_pools;
920  uint8_t default_pool;
921  uint8_t nb_pool_maps;
922  struct {
923  uint16_t vlan_id;
924  uint64_t pools;
925  } pool_map[ETH_VMDQ_MAX_VLAN_FILTERS];
928 };
929 
949  enum rte_eth_nb_pools nb_queue_pools;
951  uint8_t default_pool;
953  uint8_t nb_pool_maps;
954  uint32_t rx_mode;
955  struct {
956  uint16_t vlan_id;
957  uint64_t pools;
958  } pool_map[ETH_VMDQ_MAX_VLAN_FILTERS];
959 };
960 
965  enum rte_eth_tx_mq_mode mq_mode;
971  uint64_t offloads;
972 
973  uint16_t pvid;
974  __extension__
975  uint8_t hw_vlan_reject_tagged : 1,
977  hw_vlan_reject_untagged : 1,
979  hw_vlan_insert_pvid : 1;
982  uint64_t reserved_64s[2];
983  void *reserved_ptrs[2];
984 };
985 
1023  struct rte_mempool *mp;
1024  uint16_t length;
1025  uint16_t offset;
1026  uint32_t reserved;
1027 };
1028 
1036  /* The settings for buffer split offload. */
1037  struct rte_eth_rxseg_split split;
1038  /* The other features settings should be added here. */
1039 };
1040 
1045  struct rte_eth_thresh rx_thresh;
1046  uint16_t rx_free_thresh;
1047  uint8_t rx_drop_en;
1049  uint16_t rx_nseg;
1055  uint64_t offloads;
1064 
1065  uint64_t reserved_64s[2];
1066  void *reserved_ptrs[2];
1067 };
1068 
1073  struct rte_eth_thresh tx_thresh;
1074  uint16_t tx_rs_thresh;
1075  uint16_t tx_free_thresh;
1084  uint64_t offloads;
1085 
1086  uint64_t reserved_64s[2];
1087  void *reserved_ptrs[2];
1088 };
1089 
1098  uint16_t max_nb_queues;
1100  uint16_t max_rx_2_tx;
1102  uint16_t max_tx_2_rx;
1103  uint16_t max_nb_desc;
1104 };
1105 
1106 #define RTE_ETH_MAX_HAIRPIN_PEERS 32
1107 
1115  uint16_t port;
1116  uint16_t queue;
1117 };
1118 
1126  uint32_t peer_count:16;
1137  uint32_t tx_explicit:1;
1138 
1150  uint32_t manual_bind:1;
1151  uint32_t reserved:14;
1152  struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
1153 };
1154 
1159  uint16_t nb_max;
1160  uint16_t nb_min;
1161  uint16_t nb_align;
1171  uint16_t nb_seg_max;
1172 
1184  uint16_t nb_mtu_seg_max;
1185 };
1186 
1195 };
1196 
1203  uint32_t high_water;
1204  uint32_t low_water;
1205  uint16_t pause_time;
1206  uint16_t send_xon;
1207  enum rte_eth_fc_mode mode;
1209  uint8_t autoneg;
1210 };
1211 
1218  struct rte_eth_fc_conf fc;
1219  uint8_t priority;
1220 };
1221 
1227  RTE_TUNNEL_TYPE_NONE = 0,
1228  RTE_TUNNEL_TYPE_VXLAN,
1229  RTE_TUNNEL_TYPE_GENEVE,
1230  RTE_TUNNEL_TYPE_TEREDO,
1231  RTE_TUNNEL_TYPE_NVGRE,
1232  RTE_TUNNEL_TYPE_IP_IN_GRE,
1233  RTE_L2_TUNNEL_TYPE_E_TAG,
1234  RTE_TUNNEL_TYPE_VXLAN_GPE,
1235  RTE_TUNNEL_TYPE_ECPRI,
1236  RTE_TUNNEL_TYPE_MAX,
1237 };
1238 
1239 /* Deprecated API file for rte_eth_dev_filter_* functions */
1240 #include "rte_eth_ctrl.h"
1241 
1250 };
1251 
1259 };
1260 
1268  enum rte_fdir_mode mode;
1269  enum rte_fdir_pballoc_type pballoc;
1270  enum rte_fdir_status_mode status;
1272  uint8_t drop_queue;
1273  struct rte_eth_fdir_masks mask;
1274  struct rte_eth_fdir_flex_conf flex_conf;
1276 };
1277 
1288  uint16_t udp_port;
1289  uint8_t prot_type;
1290 };
1291 
1297  uint32_t lsc:1;
1299  uint32_t rxq:1;
1301  uint32_t rmv:1;
1302 };
1303 
1310  uint32_t link_speeds;
1317  struct rte_eth_rxmode rxmode;
1318  struct rte_eth_txmode txmode;
1319  uint32_t lpbk_mode;
1324  struct {
1325  struct rte_eth_rss_conf rss_conf;
1326  struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf;
1328  struct rte_eth_dcb_rx_conf dcb_rx_conf;
1330  struct rte_eth_vmdq_rx_conf vmdq_rx_conf;
1332  } rx_adv_conf;
1333  union {
1334  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1336  struct rte_eth_dcb_tx_conf dcb_tx_conf;
1338  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1340  } tx_adv_conf;
1344  struct rte_fdir_conf fdir_conf;
1345  struct rte_intr_conf intr_conf;
1346 };
1347 
1351 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
1352 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
1353 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
1354 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
1355 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
1356 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
1357 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
1358 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
1359 #define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100
1360 #define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200
1361 #define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
1362 #define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
1363 #define DEV_RX_OFFLOAD_SCATTER 0x00002000
1364 
1369 #define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000
1370 #define DEV_RX_OFFLOAD_SECURITY 0x00008000
1371 #define DEV_RX_OFFLOAD_KEEP_CRC 0x00010000
1372 #define DEV_RX_OFFLOAD_SCTP_CKSUM 0x00020000
1373 #define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM 0x00040000
1374 #define DEV_RX_OFFLOAD_RSS_HASH 0x00080000
1375 #define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT 0x00100000
1376 
1377 #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
1378  DEV_RX_OFFLOAD_UDP_CKSUM | \
1379  DEV_RX_OFFLOAD_TCP_CKSUM)
1380 #define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
1381  DEV_RX_OFFLOAD_VLAN_FILTER | \
1382  DEV_RX_OFFLOAD_VLAN_EXTEND | \
1383  DEV_RX_OFFLOAD_QINQ_STRIP)
1384 
1385 /*
1386  * If new Rx offload capabilities are defined, they also must be
1387  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1388  */
1389 
1393 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
1394 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
1395 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
1396 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
1397 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
1398 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
1399 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
1400 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
1401 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
1402 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200
1403 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400
1404 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800
1405 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000
1406 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
1407 #define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000
1408 
1411 #define DEV_TX_OFFLOAD_MULTI_SEGS 0x00008000
1412 
1413 #define DEV_TX_OFFLOAD_MBUF_FAST_FREE 0x00010000
1414 
1418 #define DEV_TX_OFFLOAD_SECURITY 0x00020000
1419 
1424 #define DEV_TX_OFFLOAD_UDP_TNL_TSO 0x00040000
1425 
1430 #define DEV_TX_OFFLOAD_IP_TNL_TSO 0x00080000
1431 
1432 #define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM 0x00100000
1433 
1438 #define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000
1439 /*
1440  * If new Tx offload capabilities are defined, they also must be
1441  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1442  */
1443 
1448 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001
1449 
1450 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002
1451 
1453 /*
1454  * Fallback default preferred Rx/Tx port parameters.
1455  * These are used if an application requests default parameters
1456  * but the PMD does not provide preferred values.
1457  */
1458 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1459 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1460 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1461 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1462 
1469  uint16_t burst_size;
1470  uint16_t ring_size;
1471  uint16_t nb_queues;
1472 };
1473 
1478 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX)
1479 
1484  const char *name;
1485  uint16_t domain_id;
1486  uint16_t port_id;
1494 };
1495 
1503  __extension__
1504  uint32_t multi_pools:1;
1505  uint32_t offset_allowed:1;
1506  uint32_t offset_align_log2:4;
1507  uint16_t max_nseg;
1508  uint16_t reserved;
1509 };
1510 
1523 };
1524 
1531  struct rte_device *device;
1532  const char *driver_name;
1533  unsigned int if_index;
1535  uint16_t min_mtu;
1536  uint16_t max_mtu;
1537  const uint32_t *dev_flags;
1538  uint32_t min_rx_bufsize;
1539  uint32_t max_rx_pktlen;
1542  uint16_t max_rx_queues;
1543  uint16_t max_tx_queues;
1544  uint32_t max_mac_addrs;
1545  uint32_t max_hash_mac_addrs;
1547  uint16_t max_vfs;
1548  uint16_t max_vmdq_pools;
1549  struct rte_eth_rxseg_capa rx_seg_capa;
1558  uint16_t reta_size;
1560  uint8_t hash_key_size;
1563  struct rte_eth_rxconf default_rxconf;
1564  struct rte_eth_txconf default_txconf;
1565  uint16_t vmdq_queue_base;
1566  uint16_t vmdq_queue_num;
1567  uint16_t vmdq_pool_base;
1568  struct rte_eth_desc_lim rx_desc_lim;
1569  struct rte_eth_desc_lim tx_desc_lim;
1570  uint32_t speed_capa;
1572  uint16_t nb_rx_queues;
1573  uint16_t nb_tx_queues;
1575  struct rte_eth_dev_portconf default_rxportconf;
1577  struct rte_eth_dev_portconf default_txportconf;
1579  uint64_t dev_capa;
1584  struct rte_eth_switch_info switch_info;
1585 
1586  uint64_t reserved_64s[2];
1587  void *reserved_ptrs[2];
1588 };
1589 
1593 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1594 #define RTE_ETH_QUEUE_STATE_STARTED 1
1595 #define RTE_ETH_QUEUE_STATE_HAIRPIN 2
1596 
1602  struct rte_mempool *mp;
1603  struct rte_eth_rxconf conf;
1604  uint8_t scattered_rx;
1605  uint8_t queue_state;
1606  uint16_t nb_desc;
1607  uint16_t rx_buf_size;
1609 
1615  struct rte_eth_txconf conf;
1616  uint16_t nb_desc;
1617  uint8_t queue_state;
1619 
1620 /* Generic Burst mode flag definition, values can be ORed. */
1621 
1627 #define RTE_ETH_BURST_FLAG_PER_QUEUE (1ULL << 0)
1628 
1634  uint64_t flags;
1636 #define RTE_ETH_BURST_MODE_INFO_SIZE 1024
1637  char info[RTE_ETH_BURST_MODE_INFO_SIZE];
1638 };
1639 
1641 #define RTE_ETH_XSTATS_NAME_SIZE 64
1642 
1653  uint64_t id;
1654  uint64_t value;
1655 };
1656 
1673 };
1674 
1675 #define ETH_DCB_NUM_TCS 8
1676 #define ETH_MAX_VMDQ_POOL 64
1677 
1684  struct {
1685  uint16_t base;
1686  uint16_t nb_queue;
1687  } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1689  struct {
1690  uint16_t base;
1691  uint16_t nb_queue;
1692  } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1693 };
1694 
1700  uint8_t nb_tcs;
1701  uint8_t prio_tc[ETH_DCB_NUM_USER_PRIORITIES];
1702  uint8_t tc_bws[ETH_DCB_NUM_TCS];
1705 };
1706 
1716 };
1717 
1718 /* Translate from FEC mode to FEC capa */
1719 #define RTE_ETH_FEC_MODE_TO_CAPA(x) (1U << (x))
1720 
1721 /* This macro indicates FEC capa mask */
1722 #define RTE_ETH_FEC_MODE_CAPA_MASK(x) (1U << (RTE_ETH_FEC_ ## x))
1723 
1724 /* A structure used to get capabilities per link speed */
1725 struct rte_eth_fec_capa {
1726  uint32_t speed;
1727  uint32_t capa;
1728 };
1729 
1730 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
1731 
1732 /* Macros to check for valid port */
1733 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1734  if (!rte_eth_dev_is_valid_port(port_id)) { \
1735  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1736  return retval; \
1737  } \
1738 } while (0)
1739 
1740 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1741  if (!rte_eth_dev_is_valid_port(port_id)) { \
1742  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1743  return; \
1744  } \
1745 } while (0)
1746 
1752 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1753 
1754 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1755 
1756 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1757 
1758 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1759 
1782 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
1783  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1784  void *user_param);
1785 
1806 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
1807  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1808 
1819 };
1820 
1821 struct rte_eth_dev_sriov {
1822  uint8_t active;
1823  uint8_t nb_q_per_pool;
1824  uint16_t def_vmdq_idx;
1825  uint16_t def_pool_q_idx;
1826 };
1827 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1828 
1829 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
1830 
1831 #define RTE_ETH_DEV_NO_OWNER 0
1832 
1833 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
1834 
1835 struct rte_eth_dev_owner {
1836  uint64_t id;
1837  char name[RTE_ETH_MAX_OWNER_NAME_LEN];
1838 };
1839 
1841 #define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE 0x0001
1842 
1843 #define RTE_ETH_DEV_INTR_LSC 0x0002
1844 
1845 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1846 
1847 #define RTE_ETH_DEV_INTR_RMV 0x0008
1848 
1849 #define RTE_ETH_DEV_REPRESENTOR 0x0010
1850 
1851 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR 0x0020
1852 
1856 #define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS 0x0040
1857 
1869 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
1870  const uint64_t owner_id);
1871 
1875 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
1876  for (p = rte_eth_find_next_owned_by(0, o); \
1877  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
1878  p = rte_eth_find_next_owned_by(p + 1, o))
1879 
1888 uint16_t rte_eth_find_next(uint16_t port_id);
1889 
1893 #define RTE_ETH_FOREACH_DEV(p) \
1894  RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
1895 
1910 __rte_experimental
1911 uint16_t
1912 rte_eth_find_next_of(uint16_t port_id_start,
1913  const struct rte_device *parent);
1914 
1923 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
1924  for (port_id = rte_eth_find_next_of(0, parent); \
1925  port_id < RTE_MAX_ETHPORTS; \
1926  port_id = rte_eth_find_next_of(port_id + 1, parent))
1927 
1942 __rte_experimental
1943 uint16_t
1944 rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id);
1945 
1956 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
1957  for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
1958  port_id < RTE_MAX_ETHPORTS; \
1959  port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
1960 
1974 __rte_experimental
1975 int rte_eth_dev_owner_new(uint64_t *owner_id);
1976 
1990 __rte_experimental
1991 int rte_eth_dev_owner_set(const uint16_t port_id,
1992  const struct rte_eth_dev_owner *owner);
1993 
2007 __rte_experimental
2008 int rte_eth_dev_owner_unset(const uint16_t port_id,
2009  const uint64_t owner_id);
2010 
2022 __rte_experimental
2023 int rte_eth_dev_owner_delete(const uint64_t owner_id);
2024 
2038 __rte_experimental
2039 int rte_eth_dev_owner_get(const uint16_t port_id,
2040  struct rte_eth_dev_owner *owner);
2041 
2052 uint16_t rte_eth_dev_count_avail(void);
2053 
2062 uint16_t rte_eth_dev_count_total(void);
2063 
2075 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
2076 
2085 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
2086 
2095 const char *rte_eth_dev_tx_offload_name(uint64_t offload);
2096 
2136 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
2137  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
2138 
2150 __rte_experimental
2151 int
2152 rte_eth_dev_is_removed(uint16_t port_id);
2153 
2216 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2217  uint16_t nb_rx_desc, unsigned int socket_id,
2218  const struct rte_eth_rxconf *rx_conf,
2219  struct rte_mempool *mb_pool);
2220 
2248 __rte_experimental
2250  (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2251  const struct rte_eth_hairpin_conf *conf);
2252 
2301 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2302  uint16_t nb_tx_desc, unsigned int socket_id,
2303  const struct rte_eth_txconf *tx_conf);
2304 
2330 __rte_experimental
2332  (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2333  const struct rte_eth_hairpin_conf *conf);
2334 
2361 __rte_experimental
2362 int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2363  size_t len, uint32_t direction);
2364 
2387 __rte_experimental
2388 int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port);
2389 
2414 __rte_experimental
2415 int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port);
2416 
2427 int rte_eth_dev_socket_id(uint16_t port_id);
2428 
2438 int rte_eth_dev_is_valid_port(uint16_t port_id);
2439 
2457 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2458 
2475 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2476 
2494 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2495 
2512 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2513 
2533 int rte_eth_dev_start(uint16_t port_id);
2534 
2545 int rte_eth_dev_stop(uint16_t port_id);
2546 
2559 int rte_eth_dev_set_link_up(uint16_t port_id);
2560 
2570 int rte_eth_dev_set_link_down(uint16_t port_id);
2571 
2582 int rte_eth_dev_close(uint16_t port_id);
2583 
2621 int rte_eth_dev_reset(uint16_t port_id);
2622 
2634 int rte_eth_promiscuous_enable(uint16_t port_id);
2635 
2647 int rte_eth_promiscuous_disable(uint16_t port_id);
2648 
2659 int rte_eth_promiscuous_get(uint16_t port_id);
2660 
2672 int rte_eth_allmulticast_enable(uint16_t port_id);
2673 
2685 int rte_eth_allmulticast_disable(uint16_t port_id);
2686 
2697 int rte_eth_allmulticast_get(uint16_t port_id);
2698 
2716 int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
2717 
2732 int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
2733 
2747 __rte_experimental
2748 const char *rte_eth_link_speed_to_str(uint32_t link_speed);
2749 
2768 __rte_experimental
2769 int rte_eth_link_to_str(char *str, size_t len,
2770  const struct rte_eth_link *eth_link);
2771 
2789 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
2790 
2802 int rte_eth_stats_reset(uint16_t port_id);
2803 
2833 int rte_eth_xstats_get_names(uint16_t port_id,
2834  struct rte_eth_xstat_name *xstats_names,
2835  unsigned int size);
2836 
2866 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2867  unsigned int n);
2868 
2891 int
2892 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2893  struct rte_eth_xstat_name *xstats_names, unsigned int size,
2894  uint64_t *ids);
2895 
2919 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2920  uint64_t *values, unsigned int size);
2921 
2941 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2942  uint64_t *id);
2943 
2956 int rte_eth_xstats_reset(uint16_t port_id);
2957 
2976 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
2977  uint16_t tx_queue_id, uint8_t stat_idx);
2978 
2997 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
2998  uint16_t rx_queue_id,
2999  uint8_t stat_idx);
3000 
3014 int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
3015 
3059 int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
3060 
3081 int rte_eth_dev_fw_version_get(uint16_t port_id,
3082  char *fw_version, size_t fw_size);
3083 
3123 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3124  uint32_t *ptypes, int num);
3158 __rte_experimental
3159 int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3160  uint32_t *set_ptypes, unsigned int num);
3161 
3174 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
3175 
3193 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
3194 
3214 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
3215 
3234 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3235  int on);
3236 
3253 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3254  enum rte_vlan_type vlan_type,
3255  uint16_t tag_type);
3256 
3274 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
3275 
3289 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
3290 
3305 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
3306 
3307 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
3308  void *userdata);
3309 
3315  buffer_tx_error_fn error_callback;
3316  void *error_userdata;
3317  uint16_t size;
3318  uint16_t length;
3319  struct rte_mbuf *pkts[];
3321 };
3322 
3329 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
3330  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3331 
3342 int
3343 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
3344 
3369 int
3371  buffer_tx_error_fn callback, void *userdata);
3372 
3395 void
3396 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3397  void *userdata);
3398 
3422 void
3423 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3424  void *userdata);
3425 
3451 int
3452 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
3453 
3469 };
3470 
3478  uint64_t metadata;
3492 };
3493 
3512 };
3513 
3514 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
3515  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
3535 int rte_eth_dev_callback_register(uint16_t port_id,
3536  enum rte_eth_event_type event,
3537  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3538 
3557 int rte_eth_dev_callback_unregister(uint16_t port_id,
3558  enum rte_eth_event_type event,
3559  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3560 
3582 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
3583 
3604 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
3605 
3623 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
3624 
3646 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3647  int epfd, int op, void *data);
3648 
3666 __rte_experimental
3667 int
3668 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
3669 
3683 int rte_eth_led_on(uint16_t port_id);
3684 
3698 int rte_eth_led_off(uint16_t port_id);
3699 
3728 __rte_experimental
3729 int rte_eth_fec_get_capability(uint16_t port_id,
3730  struct rte_eth_fec_capa *speed_fec_capa,
3731  unsigned int num);
3732 
3756 __rte_experimental
3757 int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa);
3758 
3779 __rte_experimental
3780 int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa);
3781 
3796 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
3797  struct rte_eth_fc_conf *fc_conf);
3798 
3813 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
3814  struct rte_eth_fc_conf *fc_conf);
3815 
3831 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3832  struct rte_eth_pfc_conf *pfc_conf);
3833 
3852 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
3853  uint32_t pool);
3854 
3869 int rte_eth_dev_mac_addr_remove(uint16_t port_id,
3870  struct rte_ether_addr *mac_addr);
3871 
3885 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
3886  struct rte_ether_addr *mac_addr);
3887 
3905 int rte_eth_dev_rss_reta_update(uint16_t port_id,
3906  struct rte_eth_rss_reta_entry64 *reta_conf,
3907  uint16_t reta_size);
3908 
3927 int rte_eth_dev_rss_reta_query(uint16_t port_id,
3928  struct rte_eth_rss_reta_entry64 *reta_conf,
3929  uint16_t reta_size);
3930 
3950 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
3951  uint8_t on);
3952 
3971 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
3972 
3995 int rte_eth_mirror_rule_set(uint16_t port_id,
3996  struct rte_eth_mirror_conf *mirror_conf,
3997  uint8_t rule_id,
3998  uint8_t on);
3999 
4014 int rte_eth_mirror_rule_reset(uint16_t port_id,
4015  uint8_t rule_id);
4016 
4033 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4034  uint16_t tx_rate);
4035 
4050 int rte_eth_dev_rss_hash_update(uint16_t port_id,
4051  struct rte_eth_rss_conf *rss_conf);
4052 
4068 int
4069 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4070  struct rte_eth_rss_conf *rss_conf);
4071 
4096 int
4097 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4098  struct rte_eth_udp_tunnel *tunnel_udp);
4099 
4119 int
4120 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4121  struct rte_eth_udp_tunnel *tunnel_udp);
4122 
4137 int rte_eth_dev_get_dcb_info(uint16_t port_id,
4138  struct rte_eth_dcb_info *dcb_info);
4139 
4140 struct rte_eth_rxtx_callback;
4141 
4167 const struct rte_eth_rxtx_callback *
4168 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4169  rte_rx_callback_fn fn, void *user_param);
4170 
4197 const struct rte_eth_rxtx_callback *
4198 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4199  rte_rx_callback_fn fn, void *user_param);
4200 
4226 const struct rte_eth_rxtx_callback *
4227 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4228  rte_tx_callback_fn fn, void *user_param);
4229 
4263 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4264  const struct rte_eth_rxtx_callback *user_cb);
4265 
4299 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4300  const struct rte_eth_rxtx_callback *user_cb);
4301 
4321 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4322  struct rte_eth_rxq_info *qinfo);
4323 
4343 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4344  struct rte_eth_txq_info *qinfo);
4345 
4364 __rte_experimental
4365 int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4366  struct rte_eth_burst_mode *mode);
4367 
4386 __rte_experimental
4387 int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4388  struct rte_eth_burst_mode *mode);
4389 
4410 __rte_experimental
4411 int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
4412  struct rte_power_monitor_cond *pmc);
4413 
4432 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
4433 
4446 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
4447 
4464 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
4465 
4482 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
4483 
4502 __rte_experimental
4503 int
4504 rte_eth_dev_get_module_info(uint16_t port_id,
4505  struct rte_eth_dev_module_info *modinfo);
4506 
4526 __rte_experimental
4527 int
4528 rte_eth_dev_get_module_eeprom(uint16_t port_id,
4529  struct rte_dev_eeprom_info *info);
4530 
4550 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
4551  struct rte_ether_addr *mc_addr_set,
4552  uint32_t nb_mc_addr);
4553 
4566 int rte_eth_timesync_enable(uint16_t port_id);
4567 
4580 int rte_eth_timesync_disable(uint16_t port_id);
4581 
4600 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
4601  struct timespec *timestamp, uint32_t flags);
4602 
4618 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4619  struct timespec *timestamp);
4620 
4638 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
4639 
4655 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
4656 
4675 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
4676 
4722 __rte_experimental
4723 int
4724 rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
4725 
4741 int
4742 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
4743 
4759 int
4760 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
4761 
4778 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4779  uint16_t *nb_rx_desc,
4780  uint16_t *nb_tx_desc);
4781 
4796 int
4797 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
4798 
4808 void *
4809 rte_eth_dev_get_sec_ctx(uint16_t port_id);
4810 
4826 __rte_experimental
4827 int rte_eth_dev_hairpin_capability_get(uint16_t port_id,
4828  struct rte_eth_hairpin_cap *cap);
4829 
4839  int pf;
4840  __extension__
4841  union {
4842  int vf;
4843  int sf;
4844  };
4845  uint32_t id_base;
4846  uint32_t id_end;
4847  char name[RTE_DEV_NAME_MAX_LEN];
4848 };
4849 
4857  uint16_t controller;
4858  uint16_t pf;
4859  uint32_t nb_ranges_alloc;
4860  uint32_t nb_ranges;
4861  struct rte_eth_representor_range ranges[];
4862 };
4863 
4887 __rte_experimental
4888 int rte_eth_representor_info_get(uint16_t port_id,
4889  struct rte_eth_representor_info *info);
4890 
4891 #include <rte_ethdev_core.h>
4892 
4980 static inline uint16_t
4981 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
4982  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
4983 {
4984  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4985  uint16_t nb_rx;
4986 
4987 #ifdef RTE_ETHDEV_DEBUG_RX
4988  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4989  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
4990 
4991  if (queue_id >= dev->data->nb_rx_queues) {
4992  RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4993  return 0;
4994  }
4995 #endif
4996  nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
4997  rx_pkts, nb_pkts);
4998 
4999 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
5000  struct rte_eth_rxtx_callback *cb;
5001 
5002  /* __ATOMIC_RELEASE memory order was used when the
5003  * call back was inserted into the list.
5004  * Since there is a clear dependency between loading
5005  * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
5006  * not required.
5007  */
5008  cb = __atomic_load_n(&dev->post_rx_burst_cbs[queue_id],
5009  __ATOMIC_RELAXED);
5010 
5011  if (unlikely(cb != NULL)) {
5012  do {
5013  nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
5014  nb_pkts, cb->param);
5015  cb = cb->next;
5016  } while (cb != NULL);
5017  }
5018 #endif
5019 
5020  rte_ethdev_trace_rx_burst(port_id, queue_id, (void **)rx_pkts, nb_rx);
5021  return nb_rx;
5022 }
5023 
5037 static inline int
5038 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
5039 {
5040  struct rte_eth_dev *dev;
5041 
5042  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5043  dev = &rte_eth_devices[port_id];
5044  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_queue_count, -ENOTSUP);
5045  if (queue_id >= dev->data->nb_rx_queues ||
5046  dev->data->rx_queues[queue_id] == NULL)
5047  return -EINVAL;
5048 
5049  return (int)(*dev->rx_queue_count)(dev, queue_id);
5050 }
5051 
5067 __rte_deprecated
5068 static inline int
5069 rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
5070 {
5071  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5072  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5073  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_descriptor_done, -ENOTSUP);
5074  return (*dev->rx_descriptor_done)(dev->data->rx_queues[queue_id], offset);
5075 }
5076 
5077 #define RTE_ETH_RX_DESC_AVAIL 0
5078 #define RTE_ETH_RX_DESC_DONE 1
5079 #define RTE_ETH_RX_DESC_UNAVAIL 2
5114 static inline int
5115 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
5116  uint16_t offset)
5117 {
5118  struct rte_eth_dev *dev;
5119  void *rxq;
5120 
5121 #ifdef RTE_ETHDEV_DEBUG_RX
5122  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5123 #endif
5124  dev = &rte_eth_devices[port_id];
5125 #ifdef RTE_ETHDEV_DEBUG_RX
5126  if (queue_id >= dev->data->nb_rx_queues)
5127  return -ENODEV;
5128 #endif
5129  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_descriptor_status, -ENOTSUP);
5130  rxq = dev->data->rx_queues[queue_id];
5131 
5132  return (*dev->rx_descriptor_status)(rxq, offset);
5133 }
5134 
5135 #define RTE_ETH_TX_DESC_FULL 0
5136 #define RTE_ETH_TX_DESC_DONE 1
5137 #define RTE_ETH_TX_DESC_UNAVAIL 2
5172 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
5173  uint16_t queue_id, uint16_t offset)
5174 {
5175  struct rte_eth_dev *dev;
5176  void *txq;
5177 
5178 #ifdef RTE_ETHDEV_DEBUG_TX
5179  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5180 #endif
5181  dev = &rte_eth_devices[port_id];
5182 #ifdef RTE_ETHDEV_DEBUG_TX
5183  if (queue_id >= dev->data->nb_tx_queues)
5184  return -ENODEV;
5185 #endif
5186  RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_descriptor_status, -ENOTSUP);
5187  txq = dev->data->tx_queues[queue_id];
5188 
5189  return (*dev->tx_descriptor_status)(txq, offset);
5190 }
5191 
5258 static inline uint16_t
5259 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
5260  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
5261 {
5262  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5263 
5264 #ifdef RTE_ETHDEV_DEBUG_TX
5265  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
5266  RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
5267 
5268  if (queue_id >= dev->data->nb_tx_queues) {
5269  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5270  return 0;
5271  }
5272 #endif
5273 
5274 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
5275  struct rte_eth_rxtx_callback *cb;
5276 
5277  /* __ATOMIC_RELEASE memory order was used when the
5278  * call back was inserted into the list.
5279  * Since there is a clear dependency between loading
5280  * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
5281  * not required.
5282  */
5283  cb = __atomic_load_n(&dev->pre_tx_burst_cbs[queue_id],
5284  __ATOMIC_RELAXED);
5285 
5286  if (unlikely(cb != NULL)) {
5287  do {
5288  nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
5289  cb->param);
5290  cb = cb->next;
5291  } while (cb != NULL);
5292  }
5293 #endif
5294 
5295  rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts,
5296  nb_pkts);
5297  return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
5298 }
5299 
5354 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
5355 
5356 static inline uint16_t
5357 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
5358  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
5359 {
5360  struct rte_eth_dev *dev;
5361 
5362 #ifdef RTE_ETHDEV_DEBUG_TX
5363  if (!rte_eth_dev_is_valid_port(port_id)) {
5364  RTE_ETHDEV_LOG(ERR, "Invalid TX port_id=%u\n", port_id);
5365  rte_errno = ENODEV;
5366  return 0;
5367  }
5368 #endif
5369 
5370  dev = &rte_eth_devices[port_id];
5371 
5372 #ifdef RTE_ETHDEV_DEBUG_TX
5373  if (queue_id >= dev->data->nb_tx_queues) {
5374  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5375  rte_errno = EINVAL;
5376  return 0;
5377  }
5378 #endif
5379 
5380  if (!dev->tx_pkt_prepare)
5381  return nb_pkts;
5382 
5383  return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
5384  tx_pkts, nb_pkts);
5385 }
5386 
5387 #else
5388 
5389 /*
5390  * Native NOOP operation for compilation targets which doesn't require any
5391  * preparations steps, and functional NOOP may introduce unnecessary performance
5392  * drop.
5393  *
5394  * Generally this is not a good idea to turn it on globally and didn't should
5395  * be used if behavior of tx_preparation can change.
5396  */
5397 
5398 static inline uint16_t
5399 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
5400  __rte_unused uint16_t queue_id,
5401  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
5402 {
5403  return nb_pkts;
5404 }
5405 
5406 #endif
5407 
5430 static inline uint16_t
5431 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
5432  struct rte_eth_dev_tx_buffer *buffer)
5433 {
5434  uint16_t sent;
5435  uint16_t to_send = buffer->length;
5436 
5437  if (to_send == 0)
5438  return 0;
5439 
5440  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
5441 
5442  buffer->length = 0;
5443 
5444  /* All packets sent, or to be dealt with by callback below */
5445  if (unlikely(sent != to_send))
5446  buffer->error_callback(&buffer->pkts[sent],
5447  (uint16_t)(to_send - sent),
5448  buffer->error_userdata);
5449 
5450  return sent;
5451 }
5452 
5483 static __rte_always_inline uint16_t
5484 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
5485  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
5486 {
5487  buffer->pkts[buffer->length++] = tx_pkt;
5488  if (buffer->length < buffer->size)
5489  return 0;
5490 
5491  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
5492 }
5493 
5494 #ifdef __cplusplus
5495 }
5496 #endif
5497 
5498 #endif /* _RTE_ETHDEV_H_ */
uint16_t nb_rx_queues
Definition: rte_ethdev.h:1572
int rte_eth_dev_stop(uint16_t port_id)
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_promiscuous_disable(uint16_t port_id)
__extension__ uint32_t multi_pools
Definition: rte_ethdev.h:1504
uint16_t link_duplex
Definition: rte_ethdev.h:324
#define ETH_VMDQ_MAX_VLAN_FILTERS
Definition: rte_ethdev.h:786
uint32_t rmv
Definition: rte_ethdev.h:1301
#define __rte_always_inline
Definition: rte_common.h:228
uint16_t tx_rs_thresh
Definition: rte_ethdev.h:1074
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
uint16_t nb_desc
Definition: rte_ethdev.h:1616
uint64_t rx_offload_capa
Definition: rte_ethdev.h:1550
const uint32_t * dev_flags
Definition: rte_ethdev.h:1537
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
__rte_experimental int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
#define __rte_cache_min_aligned
Definition: rte_common.h:405
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:5357
int rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
rte_eth_nb_tcs
Definition: rte_ethdev.h:867
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:275
__rte_experimental int rte_eth_dev_owner_new(uint64_t *owner_id)
__rte_experimental int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
uint16_t rte_eth_find_next(uint16_t port_id)
__rte_experimental int rte_eth_dev_is_removed(uint16_t port_id)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
int rte_eth_led_off(uint16_t port_id)
rte_fdir_pballoc_type
Definition: rte_ethdev.h:1246
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:5115
__rte_experimental uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
uint64_t imissed
Definition: rte_ethdev.h:259
uint32_t low_water
Definition: rte_ethdev.h:1204
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
uint32_t max_rx_pkt_len
Definition: rte_ethdev.h:413
uint8_t rss_key_len
Definition: rte_ethdev.h:466
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint16_t tx_rate)
uint8_t hthresh
Definition: rte_ethdev.h:345
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
__rte_experimental int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
uint64_t rx_queue_offload_capa
Definition: rte_ethdev.h:1554
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
uint16_t reta_size
Definition: rte_ethdev.h:1558
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint32_t lpbk_mode
Definition: rte_ethdev.h:1319
#define rte_errno
Definition: rte_errno.h:29
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
uint32_t link_speeds
Definition: rte_ethdev.h:1310
uint64_t tx_queue_offload_capa
Definition: rte_ethdev.h:1556
rte_eth_tx_mq_mode
Definition: rte_ethdev.h:393
rte_eth_fc_mode
Definition: rte_ethdev.h:1190
int rte_eth_mirror_rule_set(uint16_t port_id, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on)
uint8_t enable_default_pool
Definition: rte_ethdev.h:919
uint16_t nb_mtu_seg_max
Definition: rte_ethdev.h:1184
#define __rte_unused
Definition: rte_common.h:118
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:273
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
uint64_t opackets
Definition: rte_ethdev.h:256
uint8_t hash_key_size
Definition: rte_ethdev.h:1560
int rte_eth_dev_close(uint16_t port_id)
struct rte_mempool * mp
Definition: rte_ethdev.h:1023
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
uint16_t split_hdr_size
Definition: rte_ethdev.h:416
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
struct rte_mempool * mp
Definition: rte_ethdev.h:1602
uint32_t dcb_capability_en
Definition: rte_ethdev.h:1343
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:271
const char * name
Definition: rte_ethdev.h:1484
uint8_t queue_state
Definition: rte_ethdev.h:1617
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
uint32_t rxq
Definition: rte_ethdev.h:1299
int rte_eth_dev_set_link_up(uint16_t port_id)
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
__rte_experimental int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
__rte_experimental int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
uint16_t vmdq_queue_num
Definition: rte_ethdev.h:1566
uint8_t rx_deferred_start
Definition: rte_ethdev.h:1048
struct rte_mbuf * pkts[]
Definition: rte_ethdev.h:3319
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
Definition: rte_ethdev.h:3514
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:267
uint32_t high_water
Definition: rte_ethdev.h:1203
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
#define RTE_ETH_XSTATS_NAME_SIZE
Definition: rte_ethdev.h:1641
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
int rte_eth_timesync_disable(uint16_t port_id)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint64_t offloads
Definition: rte_ethdev.h:1055
uint32_t link_speed
Definition: rte_ethdev.h:323
__rte_experimental int rte_eth_representor_info_get(uint16_t port_id, struct rte_eth_representor_info *info)
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
__rte_experimental int rte_eth_fec_get_capability(uint16_t port_id, struct rte_eth_fec_capa *speed_fec_capa, unsigned int num)
uint16_t send_xon
Definition: rte_ethdev.h:1206
int rte_eth_stats_reset(uint16_t port_id)
int rte_eth_allmulticast_enable(uint16_t port_id)
uint32_t offset_allowed
Definition: rte_ethdev.h:1505
__rte_experimental int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, struct rte_power_monitor_cond *pmc)
#define unlikely(x)
uint64_t ibytes
Definition: rte_ethdev.h:257
uint32_t offset_align_log2
Definition: rte_ethdev.h:1506
uint64_t offloads
Definition: rte_ethdev.h:1084
uint16_t max_nb_queues
Definition: rte_ethdev.h:1098
uint64_t oerrors
Definition: rte_ethdev.h:264
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
uint16_t max_mtu
Definition: rte_ethdev.h:1536
uint64_t offloads
Definition: rte_ethdev.h:422
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
uint16_t link_autoneg
Definition: rte_ethdev.h:325
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
uint16_t tx_free_thresh
Definition: rte_ethdev.h:1075
uint16_t nb_desc
Definition: rte_ethdev.h:1606
__rte_experimental uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:4981
int rte_eth_allmulticast_disable(uint16_t port_id)
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
int rte_eth_dev_start(uint16_t port_id)
uint16_t rx_buf_size
Definition: rte_ethdev.h:1607
uint16_t max_vmdq_pools
Definition: rte_ethdev.h:1548
uint8_t scattered_rx
Definition: rte_ethdev.h:1604
int rte_eth_dev_reset(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
uint64_t offloads
Definition: rte_ethdev.h:971
uint16_t vmdq_pool_base
Definition: rte_ethdev.h:1567
uint64_t tx_offload_capa
Definition: rte_ethdev.h:1552
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:269
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
uint16_t min_mtu
Definition: rte_ethdev.h:1535
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
Definition: rte_ethdev.h:1806
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
uint64_t obytes
Definition: rte_ethdev.h:258
__rte_experimental int rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
uint8_t enable_loop_back
Definition: rte_ethdev.h:952
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
#define ETH_DCB_NUM_USER_PRIORITIES
Definition: rte_ethdev.h:787
rte_eth_fec_mode
Definition: rte_ethdev.h:1711
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
uint16_t max_tx_queues
Definition: rte_ethdev.h:1543
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_xstats_reset(uint16_t port_id)
rte_eth_dev_state
Definition: rte_ethdev.h:1812
uint16_t rx_free_thresh
Definition: rte_ethdev.h:1046
uint64_t dev_capa
Definition: rte_ethdev.h:1579
uint64_t ierrors
Definition: rte_ethdev.h:263
__rte_experimental int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, size_t len, uint32_t direction)
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
uint64_t flow_type_rss_offloads
Definition: rte_ethdev.h:1562
uint16_t rte_eth_dev_count_total(void)
int rte_eth_promiscuous_enable(uint16_t port_id)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
union rte_eth_rxseg * rx_seg
Definition: rte_ethdev.h:1063
rte_vlan_type
Definition: rte_ethdev.h:432
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
__rte_experimental int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
uint16_t nb_seg_max
Definition: rte_ethdev.h:1171
uint64_t ipackets
Definition: rte_ethdev.h:255
uint16_t max_vfs
Definition: rte_ethdev.h:1547
uint16_t pause_time
Definition: rte_ethdev.h:1205
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
uint64_t rx_nombuf
Definition: rte_ethdev.h:265
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
Definition: rte_ethdev.h:5484
uint8_t queue_state
Definition: rte_ethdev.h:1605
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
#define ETH_MQ_RX_RSS_FLAG
Definition: rte_ethdev.h:352
uint16_t vmdq_queue_base
Definition: rte_ethdev.h:1565
rte_eth_event_ipsec_subtype
Definition: rte_ethdev.h:3458
rte_eth_nb_pools
Definition: rte_ethdev.h:876
#define ETH_MIRROR_MAX_VLANS
Definition: rte_ethdev.h:822
uint16_t nb_align
Definition: rte_ethdev.h:1161
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
rte_eth_rx_mq_mode
Definition: rte_ethdev.h:360
__rte_experimental const char * rte_eth_link_speed_to_str(uint32_t link_speed)
__rte_experimental int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
const char * driver_name
Definition: rte_ethdev.h:1532
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:5038
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_socket_id(uint16_t port_id)
uint8_t enable_default_pool
Definition: rte_ethdev.h:950
__extension__ struct rte_eth_link __rte_aligned(8)
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
uint16_t nb_tx_queues
Definition: rte_ethdev.h:1573
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
uint32_t max_mac_addrs
Definition: rte_ethdev.h:1544
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
rte_eth_tunnel_type
Definition: rte_ethdev.h:1226
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
uint64_t value
Definition: rte_ethdev.h:1654
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
Definition: rte_ethdev.h:611
__rte_experimental int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_promiscuous_get(uint16_t port_id)
int rte_eth_led_on(uint16_t port_id)
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
__rte_experimental int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
uint32_t max_rx_pktlen
Definition: rte_ethdev.h:1539
__rte_experimental int rte_eth_dev_owner_delete(const uint64_t owner_id)
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
static __rte_deprecated int rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:5069
uint64_t rss_hf
Definition: rte_ethdev.h:467
uint64_t id
Definition: rte_ethdev.h:1653
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
unsigned int if_index
Definition: rte_ethdev.h:1533
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
uint8_t mac_ctrl_frame_fwd
Definition: rte_ethdev.h:1208
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
Definition: rte_ethdev.h:1782
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
rte_fdir_mode
Definition: rte_eth_ctrl.h:425
uint8_t * rss_key
Definition: rte_ethdev.h:465
rte_fdir_status_mode
Definition: rte_ethdev.h:1255
uint8_t tx_deferred_start
Definition: rte_ethdev.h:1078
uint8_t wthresh
Definition: rte_ethdev.h:346
uint16_t max_rx_queues
Definition: rte_ethdev.h:1542
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
rte_eth_representor_type
Definition: rte_ethdev.h:1518
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:415
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
int rte_eth_allmulticast_get(uint16_t port_id)
uint8_t rx_drop_en
Definition: rte_ethdev.h:1047
int rte_eth_dev_is_valid_port(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint16_t rx_nseg
Definition: rte_ethdev.h:1049
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:1541
int rte_eth_timesync_enable(uint16_t port_id)
uint8_t pthresh
Definition: rte_ethdev.h:344
uint32_t speed_capa
Definition: rte_ethdev.h:1570
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:5259
uint8_t drop_queue
Definition: rte_ethdev.h:1272
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_set_link_down(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
__rte_experimental int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
uint32_t min_rx_bufsize
Definition: rte_ethdev.h:1538
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
__rte_experimental int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
uint32_t lsc
Definition: rte_ethdev.h:1297
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
Definition: rte_ethdev.h:5431
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
rte_eth_event_type
Definition: rte_ethdev.h:3497