DPDK  20.11.6
rte_ethdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
142 #ifdef __cplusplus
143 extern "C" {
144 #endif
145 
146 #include <stdint.h>
147 
148 /* Use this macro to check if LRO API is supported */
149 #define RTE_ETHDEV_HAS_LRO_SUPPORT
150 
151 #include <rte_compat.h>
152 #include <rte_log.h>
153 #include <rte_interrupts.h>
154 #include <rte_dev.h>
155 #include <rte_devargs.h>
156 #include <rte_errno.h>
157 #include <rte_common.h>
158 #include <rte_config.h>
159 #include <rte_ether.h>
160 
161 #include "rte_ethdev_trace_fp.h"
162 #include "rte_dev_info.h"
163 
164 extern int rte_eth_dev_logtype;
165 
166 #define RTE_ETHDEV_LOG(level, ...) \
167  rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
168 
169 struct rte_mbuf;
170 
187 int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
188 
203 uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter);
204 
217 void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
218 
232 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
233  for (rte_eth_iterator_init(iter, devargs), \
234  id = rte_eth_iterator_next(iter); \
235  id != RTE_MAX_ETHPORTS; \
236  id = rte_eth_iterator_next(iter))
237 
245  uint64_t ipackets;
246  uint64_t opackets;
247  uint64_t ibytes;
248  uint64_t obytes;
249  uint64_t imissed;
253  uint64_t ierrors;
254  uint64_t oerrors;
255  uint64_t rx_nombuf;
256  /* Queue stats are limited to max 256 queues */
257  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
259  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
261  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
263  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
265  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
267 };
268 
272 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
273 #define ETH_LINK_SPEED_FIXED (1 << 0)
274 #define ETH_LINK_SPEED_10M_HD (1 << 1)
275 #define ETH_LINK_SPEED_10M (1 << 2)
276 #define ETH_LINK_SPEED_100M_HD (1 << 3)
277 #define ETH_LINK_SPEED_100M (1 << 4)
278 #define ETH_LINK_SPEED_1G (1 << 5)
279 #define ETH_LINK_SPEED_2_5G (1 << 6)
280 #define ETH_LINK_SPEED_5G (1 << 7)
281 #define ETH_LINK_SPEED_10G (1 << 8)
282 #define ETH_LINK_SPEED_20G (1 << 9)
283 #define ETH_LINK_SPEED_25G (1 << 10)
284 #define ETH_LINK_SPEED_40G (1 << 11)
285 #define ETH_LINK_SPEED_50G (1 << 12)
286 #define ETH_LINK_SPEED_56G (1 << 13)
287 #define ETH_LINK_SPEED_100G (1 << 14)
288 #define ETH_LINK_SPEED_200G (1 << 15)
293 #define ETH_SPEED_NUM_NONE 0
294 #define ETH_SPEED_NUM_10M 10
295 #define ETH_SPEED_NUM_100M 100
296 #define ETH_SPEED_NUM_1G 1000
297 #define ETH_SPEED_NUM_2_5G 2500
298 #define ETH_SPEED_NUM_5G 5000
299 #define ETH_SPEED_NUM_10G 10000
300 #define ETH_SPEED_NUM_20G 20000
301 #define ETH_SPEED_NUM_25G 25000
302 #define ETH_SPEED_NUM_40G 40000
303 #define ETH_SPEED_NUM_50G 50000
304 #define ETH_SPEED_NUM_56G 56000
305 #define ETH_SPEED_NUM_100G 100000
306 #define ETH_SPEED_NUM_200G 200000
307 #define ETH_SPEED_NUM_UNKNOWN UINT32_MAX
312 __extension__
313 struct rte_eth_link {
314  uint32_t link_speed;
315  uint16_t link_duplex : 1;
316  uint16_t link_autoneg : 1;
317  uint16_t link_status : 1;
318 } __rte_aligned(8);
320 /* Utility constants */
321 #define ETH_LINK_HALF_DUPLEX 0
322 #define ETH_LINK_FULL_DUPLEX 1
323 #define ETH_LINK_DOWN 0
324 #define ETH_LINK_UP 1
325 #define ETH_LINK_FIXED 0
326 #define ETH_LINK_AUTONEG 1
327 #define RTE_ETH_LINK_MAX_STR_LEN 40
333 struct rte_eth_thresh {
334  uint8_t pthresh;
335  uint8_t hthresh;
336  uint8_t wthresh;
337 };
338 
342 #define ETH_MQ_RX_RSS_FLAG 0x1
343 #define ETH_MQ_RX_DCB_FLAG 0x2
344 #define ETH_MQ_RX_VMDQ_FLAG 0x4
345 
353 
357  ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
359  ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
360 
362  ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
364  ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
366  ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
369  ETH_MQ_RX_VMDQ_FLAG,
370 };
371 
375 #define ETH_RSS ETH_MQ_RX_RSS
376 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
377 #define ETH_DCB_RX ETH_MQ_RX_DCB
378 
388 };
389 
393 #define ETH_DCB_NONE ETH_MQ_TX_NONE
394 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
395 #define ETH_DCB_TX ETH_MQ_TX_DCB
396 
402  enum rte_eth_rx_mq_mode mq_mode;
403  uint32_t max_rx_pkt_len;
406  uint16_t split_hdr_size;
412  uint64_t offloads;
413 
414  uint64_t reserved_64s[2];
415  void *reserved_ptrs[2];
416 };
417 
423  ETH_VLAN_TYPE_UNKNOWN = 0,
426  ETH_VLAN_TYPE_MAX,
427 };
428 
434  uint64_t ids[64];
435 };
436 
455  uint8_t *rss_key;
456  uint8_t rss_key_len;
457  uint64_t rss_hf;
458 };
459 
460 /*
461  * A packet can be identified by hardware as different flow types. Different
462  * NIC hardware may support different flow types.
463  * Basically, the NIC hardware identifies the flow type as deep protocol as
464  * possible, and exclusively. For example, if a packet is identified as
465  * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
466  * though it is an actual IPV4 packet.
467  */
468 #define RTE_ETH_FLOW_UNKNOWN 0
469 #define RTE_ETH_FLOW_RAW 1
470 #define RTE_ETH_FLOW_IPV4 2
471 #define RTE_ETH_FLOW_FRAG_IPV4 3
472 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
473 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
474 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
475 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
476 #define RTE_ETH_FLOW_IPV6 8
477 #define RTE_ETH_FLOW_FRAG_IPV6 9
478 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
479 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
480 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
481 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
482 #define RTE_ETH_FLOW_L2_PAYLOAD 14
483 #define RTE_ETH_FLOW_IPV6_EX 15
484 #define RTE_ETH_FLOW_IPV6_TCP_EX 16
485 #define RTE_ETH_FLOW_IPV6_UDP_EX 17
486 #define RTE_ETH_FLOW_PORT 18
487 
488 #define RTE_ETH_FLOW_VXLAN 19
489 #define RTE_ETH_FLOW_GENEVE 20
490 #define RTE_ETH_FLOW_NVGRE 21
491 #define RTE_ETH_FLOW_VXLAN_GPE 22
492 #define RTE_ETH_FLOW_GTPU 23
493 #define RTE_ETH_FLOW_MAX 24
494 
495 /*
496  * Below macros are defined for RSS offload types, they can be used to
497  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
498  */
499 #define ETH_RSS_IPV4 (1ULL << 2)
500 #define ETH_RSS_FRAG_IPV4 (1ULL << 3)
501 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << 4)
502 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << 5)
503 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << 6)
504 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << 7)
505 #define ETH_RSS_IPV6 (1ULL << 8)
506 #define ETH_RSS_FRAG_IPV6 (1ULL << 9)
507 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << 10)
508 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << 11)
509 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << 12)
510 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << 13)
511 #define ETH_RSS_L2_PAYLOAD (1ULL << 14)
512 #define ETH_RSS_IPV6_EX (1ULL << 15)
513 #define ETH_RSS_IPV6_TCP_EX (1ULL << 16)
514 #define ETH_RSS_IPV6_UDP_EX (1ULL << 17)
515 #define ETH_RSS_PORT (1ULL << 18)
516 #define ETH_RSS_VXLAN (1ULL << 19)
517 #define ETH_RSS_GENEVE (1ULL << 20)
518 #define ETH_RSS_NVGRE (1ULL << 21)
519 #define ETH_RSS_GTPU (1ULL << 23)
520 #define ETH_RSS_ETH (1ULL << 24)
521 #define ETH_RSS_S_VLAN (1ULL << 25)
522 #define ETH_RSS_C_VLAN (1ULL << 26)
523 #define ETH_RSS_ESP (1ULL << 27)
524 #define ETH_RSS_AH (1ULL << 28)
525 #define ETH_RSS_L2TPV3 (1ULL << 29)
526 #define ETH_RSS_PFCP (1ULL << 30)
527 #define ETH_RSS_PPPOE (1ULL << 31)
528 #define ETH_RSS_ECPRI (1ULL << 32)
529 
530 /*
531  * We use the following macros to combine with above ETH_RSS_* for
532  * more specific input set selection. These bits are defined starting
533  * from the high end of the 64 bits.
534  * Note: If we use above ETH_RSS_* without SRC/DST_ONLY, it represents
535  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
536  * the same level are used simultaneously, it is the same case as none of
537  * them are added.
538  */
539 #define ETH_RSS_L3_SRC_ONLY (1ULL << 63)
540 #define ETH_RSS_L3_DST_ONLY (1ULL << 62)
541 #define ETH_RSS_L4_SRC_ONLY (1ULL << 61)
542 #define ETH_RSS_L4_DST_ONLY (1ULL << 60)
543 #define ETH_RSS_L2_SRC_ONLY (1ULL << 59)
544 #define ETH_RSS_L2_DST_ONLY (1ULL << 58)
545 
546 /*
547  * Only select IPV6 address prefix as RSS input set according to
548  * https://tools.ietf.org/html/rfc6052
549  * Must be combined with ETH_RSS_IPV6, ETH_RSS_NONFRAG_IPV6_UDP,
550  * ETH_RSS_NONFRAG_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_SCTP.
551  */
552 #define RTE_ETH_RSS_L3_PRE32 (1ULL << 57)
553 #define RTE_ETH_RSS_L3_PRE40 (1ULL << 56)
554 #define RTE_ETH_RSS_L3_PRE48 (1ULL << 55)
555 #define RTE_ETH_RSS_L3_PRE56 (1ULL << 54)
556 #define RTE_ETH_RSS_L3_PRE64 (1ULL << 53)
557 #define RTE_ETH_RSS_L3_PRE96 (1ULL << 52)
558 
559 /*
560  * Use the following macros to combine with the above layers
561  * to choose inner and outer layers or both for RSS computation.
562  * Bits 50 and 51 are reserved for this.
563  */
564 
572 #define ETH_RSS_LEVEL_PMD_DEFAULT (0ULL << 50)
573 
578 #define ETH_RSS_LEVEL_OUTERMOST (1ULL << 50)
579 
584 #define ETH_RSS_LEVEL_INNERMOST (2ULL << 50)
585 #define ETH_RSS_LEVEL_MASK (3ULL << 50)
586 
587 #define ETH_RSS_LEVEL(rss_hf) ((rss_hf & ETH_RSS_LEVEL_MASK) >> 50)
588 
599 static inline uint64_t
600 rte_eth_rss_hf_refine(uint64_t rss_hf)
601 {
602  if ((rss_hf & ETH_RSS_L3_SRC_ONLY) && (rss_hf & ETH_RSS_L3_DST_ONLY))
603  rss_hf &= ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
604 
605  if ((rss_hf & ETH_RSS_L4_SRC_ONLY) && (rss_hf & ETH_RSS_L4_DST_ONLY))
606  rss_hf &= ~(ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
607 
608  return rss_hf;
609 }
610 
611 #define ETH_RSS_IPV6_PRE32 ( \
612  ETH_RSS_IPV6 | \
613  RTE_ETH_RSS_L3_PRE32)
614 
615 #define ETH_RSS_IPV6_PRE40 ( \
616  ETH_RSS_IPV6 | \
617  RTE_ETH_RSS_L3_PRE40)
618 
619 #define ETH_RSS_IPV6_PRE48 ( \
620  ETH_RSS_IPV6 | \
621  RTE_ETH_RSS_L3_PRE48)
622 
623 #define ETH_RSS_IPV6_PRE56 ( \
624  ETH_RSS_IPV6 | \
625  RTE_ETH_RSS_L3_PRE56)
626 
627 #define ETH_RSS_IPV6_PRE64 ( \
628  ETH_RSS_IPV6 | \
629  RTE_ETH_RSS_L3_PRE64)
630 
631 #define ETH_RSS_IPV6_PRE96 ( \
632  ETH_RSS_IPV6 | \
633  RTE_ETH_RSS_L3_PRE96)
634 
635 #define ETH_RSS_IPV6_PRE32_UDP ( \
636  ETH_RSS_NONFRAG_IPV6_UDP | \
637  RTE_ETH_RSS_L3_PRE32)
638 
639 #define ETH_RSS_IPV6_PRE40_UDP ( \
640  ETH_RSS_NONFRAG_IPV6_UDP | \
641  RTE_ETH_RSS_L3_PRE40)
642 
643 #define ETH_RSS_IPV6_PRE48_UDP ( \
644  ETH_RSS_NONFRAG_IPV6_UDP | \
645  RTE_ETH_RSS_L3_PRE48)
646 
647 #define ETH_RSS_IPV6_PRE56_UDP ( \
648  ETH_RSS_NONFRAG_IPV6_UDP | \
649  RTE_ETH_RSS_L3_PRE56)
650 
651 #define ETH_RSS_IPV6_PRE64_UDP ( \
652  ETH_RSS_NONFRAG_IPV6_UDP | \
653  RTE_ETH_RSS_L3_PRE64)
654 
655 #define ETH_RSS_IPV6_PRE96_UDP ( \
656  ETH_RSS_NONFRAG_IPV6_UDP | \
657  RTE_ETH_RSS_L3_PRE96)
658 
659 #define ETH_RSS_IPV6_PRE32_TCP ( \
660  ETH_RSS_NONFRAG_IPV6_TCP | \
661  RTE_ETH_RSS_L3_PRE32)
662 
663 #define ETH_RSS_IPV6_PRE40_TCP ( \
664  ETH_RSS_NONFRAG_IPV6_TCP | \
665  RTE_ETH_RSS_L3_PRE40)
666 
667 #define ETH_RSS_IPV6_PRE48_TCP ( \
668  ETH_RSS_NONFRAG_IPV6_TCP | \
669  RTE_ETH_RSS_L3_PRE48)
670 
671 #define ETH_RSS_IPV6_PRE56_TCP ( \
672  ETH_RSS_NONFRAG_IPV6_TCP | \
673  RTE_ETH_RSS_L3_PRE56)
674 
675 #define ETH_RSS_IPV6_PRE64_TCP ( \
676  ETH_RSS_NONFRAG_IPV6_TCP | \
677  RTE_ETH_RSS_L3_PRE64)
678 
679 #define ETH_RSS_IPV6_PRE96_TCP ( \
680  ETH_RSS_NONFRAG_IPV6_TCP | \
681  RTE_ETH_RSS_L3_PRE96)
682 
683 #define ETH_RSS_IPV6_PRE32_SCTP ( \
684  ETH_RSS_NONFRAG_IPV6_SCTP | \
685  RTE_ETH_RSS_L3_PRE32)
686 
687 #define ETH_RSS_IPV6_PRE40_SCTP ( \
688  ETH_RSS_NONFRAG_IPV6_SCTP | \
689  RTE_ETH_RSS_L3_PRE40)
690 
691 #define ETH_RSS_IPV6_PRE48_SCTP ( \
692  ETH_RSS_NONFRAG_IPV6_SCTP | \
693  RTE_ETH_RSS_L3_PRE48)
694 
695 #define ETH_RSS_IPV6_PRE56_SCTP ( \
696  ETH_RSS_NONFRAG_IPV6_SCTP | \
697  RTE_ETH_RSS_L3_PRE56)
698 
699 #define ETH_RSS_IPV6_PRE64_SCTP ( \
700  ETH_RSS_NONFRAG_IPV6_SCTP | \
701  RTE_ETH_RSS_L3_PRE64)
702 
703 #define ETH_RSS_IPV6_PRE96_SCTP ( \
704  ETH_RSS_NONFRAG_IPV6_SCTP | \
705  RTE_ETH_RSS_L3_PRE96)
706 
707 #define ETH_RSS_IP ( \
708  ETH_RSS_IPV4 | \
709  ETH_RSS_FRAG_IPV4 | \
710  ETH_RSS_NONFRAG_IPV4_OTHER | \
711  ETH_RSS_IPV6 | \
712  ETH_RSS_FRAG_IPV6 | \
713  ETH_RSS_NONFRAG_IPV6_OTHER | \
714  ETH_RSS_IPV6_EX)
715 
716 #define ETH_RSS_UDP ( \
717  ETH_RSS_NONFRAG_IPV4_UDP | \
718  ETH_RSS_NONFRAG_IPV6_UDP | \
719  ETH_RSS_IPV6_UDP_EX)
720 
721 #define ETH_RSS_TCP ( \
722  ETH_RSS_NONFRAG_IPV4_TCP | \
723  ETH_RSS_NONFRAG_IPV6_TCP | \
724  ETH_RSS_IPV6_TCP_EX)
725 
726 #define ETH_RSS_SCTP ( \
727  ETH_RSS_NONFRAG_IPV4_SCTP | \
728  ETH_RSS_NONFRAG_IPV6_SCTP)
729 
730 #define ETH_RSS_TUNNEL ( \
731  ETH_RSS_VXLAN | \
732  ETH_RSS_GENEVE | \
733  ETH_RSS_NVGRE)
734 
735 #define ETH_RSS_VLAN ( \
736  ETH_RSS_S_VLAN | \
737  ETH_RSS_C_VLAN)
738 
740 #define ETH_RSS_PROTO_MASK ( \
741  ETH_RSS_IPV4 | \
742  ETH_RSS_FRAG_IPV4 | \
743  ETH_RSS_NONFRAG_IPV4_TCP | \
744  ETH_RSS_NONFRAG_IPV4_UDP | \
745  ETH_RSS_NONFRAG_IPV4_SCTP | \
746  ETH_RSS_NONFRAG_IPV4_OTHER | \
747  ETH_RSS_IPV6 | \
748  ETH_RSS_FRAG_IPV6 | \
749  ETH_RSS_NONFRAG_IPV6_TCP | \
750  ETH_RSS_NONFRAG_IPV6_UDP | \
751  ETH_RSS_NONFRAG_IPV6_SCTP | \
752  ETH_RSS_NONFRAG_IPV6_OTHER | \
753  ETH_RSS_L2_PAYLOAD | \
754  ETH_RSS_IPV6_EX | \
755  ETH_RSS_IPV6_TCP_EX | \
756  ETH_RSS_IPV6_UDP_EX | \
757  ETH_RSS_PORT | \
758  ETH_RSS_VXLAN | \
759  ETH_RSS_GENEVE | \
760  ETH_RSS_NVGRE)
761 
762 /*
763  * Definitions used for redirection table entry size.
764  * Some RSS RETA sizes may not be supported by some drivers, check the
765  * documentation or the description of relevant functions for more details.
766  */
767 #define ETH_RSS_RETA_SIZE_64 64
768 #define ETH_RSS_RETA_SIZE_128 128
769 #define ETH_RSS_RETA_SIZE_256 256
770 #define ETH_RSS_RETA_SIZE_512 512
771 #define RTE_RETA_GROUP_SIZE 64
772 
773 /* Definitions used for VMDQ and DCB functionality */
774 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
775 #define ETH_DCB_NUM_USER_PRIORITIES 8
776 #define ETH_VMDQ_DCB_NUM_QUEUES 128
777 #define ETH_DCB_NUM_QUEUES 128
779 /* DCB capability defines */
780 #define ETH_DCB_PG_SUPPORT 0x00000001
781 #define ETH_DCB_PFC_SUPPORT 0x00000002
783 /* Definitions used for VLAN Offload functionality */
784 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
785 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
786 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
787 #define ETH_QINQ_STRIP_OFFLOAD 0x0008
789 /* Definitions used for mask VLAN setting */
790 #define ETH_VLAN_STRIP_MASK 0x0001
791 #define ETH_VLAN_FILTER_MASK 0x0002
792 #define ETH_VLAN_EXTEND_MASK 0x0004
793 #define ETH_QINQ_STRIP_MASK 0x0008
794 #define ETH_VLAN_ID_MAX 0x0FFF
796 /* Definitions used for receive MAC address */
797 #define ETH_NUM_RECEIVE_MAC_ADDR 128
799 /* Definitions used for unicast hash */
800 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
802 /* Definitions used for VMDQ pool rx mode setting */
803 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
804 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
805 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
806 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
807 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
810 #define ETH_MIRROR_MAX_VLANS 64
811 
812 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
813 #define ETH_MIRROR_UPLINK_PORT 0x02
814 #define ETH_MIRROR_DOWNLINK_PORT 0x04
815 #define ETH_MIRROR_VLAN 0x08
816 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
821 struct rte_eth_vlan_mirror {
822  uint64_t vlan_mask;
824  uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
825 };
826 
831  uint8_t rule_type;
832  uint8_t dst_pool;
833  uint64_t pool_mask;
835  struct rte_eth_vlan_mirror vlan;
836 };
837 
845  uint64_t mask;
847  uint16_t reta[RTE_RETA_GROUP_SIZE];
849 };
850 
856  ETH_4_TCS = 4,
858 };
859 
869 };
870 
871 /* This structure may be extended in future. */
872 struct rte_eth_dcb_rx_conf {
873  enum rte_eth_nb_tcs nb_tcs;
875  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
876 };
877 
878 struct rte_eth_vmdq_dcb_tx_conf {
879  enum rte_eth_nb_pools nb_queue_pools;
881  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
882 };
883 
884 struct rte_eth_dcb_tx_conf {
885  enum rte_eth_nb_tcs nb_tcs;
887  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
888 };
889 
890 struct rte_eth_vmdq_tx_conf {
891  enum rte_eth_nb_pools nb_queue_pools;
892 };
893 
906  enum rte_eth_nb_pools nb_queue_pools;
908  uint8_t default_pool;
909  uint8_t nb_pool_maps;
910  struct {
911  uint16_t vlan_id;
912  uint64_t pools;
913  } pool_map[ETH_VMDQ_MAX_VLAN_FILTERS];
916 };
917 
937  enum rte_eth_nb_pools nb_queue_pools;
939  uint8_t default_pool;
941  uint8_t nb_pool_maps;
942  uint32_t rx_mode;
943  struct {
944  uint16_t vlan_id;
945  uint64_t pools;
946  } pool_map[ETH_VMDQ_MAX_VLAN_FILTERS];
947 };
948 
953  enum rte_eth_tx_mq_mode mq_mode;
959  uint64_t offloads;
960 
961  uint16_t pvid;
962  __extension__
963  uint8_t hw_vlan_reject_tagged : 1,
965  hw_vlan_reject_untagged : 1,
967  hw_vlan_insert_pvid : 1;
970  uint64_t reserved_64s[2];
971  void *reserved_ptrs[2];
972 };
973 
1011  struct rte_mempool *mp;
1012  uint16_t length;
1013  uint16_t offset;
1014  uint32_t reserved;
1015 };
1016 
1024  /* The settings for buffer split offload. */
1025  struct rte_eth_rxseg_split split;
1026  /* The other features settings should be added here. */
1027 };
1028 
1033  struct rte_eth_thresh rx_thresh;
1034  uint16_t rx_free_thresh;
1035  uint8_t rx_drop_en;
1037  uint16_t rx_nseg;
1043  uint64_t offloads;
1052 
1053  uint64_t reserved_64s[2];
1054  void *reserved_ptrs[2];
1055 };
1056 
1061  struct rte_eth_thresh tx_thresh;
1062  uint16_t tx_rs_thresh;
1063  uint16_t tx_free_thresh;
1072  uint64_t offloads;
1073 
1074  uint64_t reserved_64s[2];
1075  void *reserved_ptrs[2];
1076 };
1077 
1086  uint16_t max_nb_queues;
1088  uint16_t max_rx_2_tx;
1090  uint16_t max_tx_2_rx;
1091  uint16_t max_nb_desc;
1092 };
1093 
1094 #define RTE_ETH_MAX_HAIRPIN_PEERS 32
1095 
1103  uint16_t port;
1104  uint16_t queue;
1105 };
1106 
1114  uint32_t peer_count:16;
1125  uint32_t tx_explicit:1;
1126 
1138  uint32_t manual_bind:1;
1139  uint32_t reserved:14;
1140  struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
1141 };
1142 
1147  uint16_t nb_max;
1148  uint16_t nb_min;
1149  uint16_t nb_align;
1159  uint16_t nb_seg_max;
1160 
1172  uint16_t nb_mtu_seg_max;
1173 };
1174 
1183 };
1184 
1191  uint32_t high_water;
1192  uint32_t low_water;
1193  uint16_t pause_time;
1194  uint16_t send_xon;
1195  enum rte_eth_fc_mode mode;
1197  uint8_t autoneg;
1198 };
1199 
1206  struct rte_eth_fc_conf fc;
1207  uint8_t priority;
1208 };
1209 
1214  RTE_TUNNEL_TYPE_NONE = 0,
1215  RTE_TUNNEL_TYPE_VXLAN,
1216  RTE_TUNNEL_TYPE_GENEVE,
1217  RTE_TUNNEL_TYPE_TEREDO,
1218  RTE_TUNNEL_TYPE_NVGRE,
1219  RTE_TUNNEL_TYPE_IP_IN_GRE,
1220  RTE_L2_TUNNEL_TYPE_E_TAG,
1221  RTE_TUNNEL_TYPE_VXLAN_GPE,
1222  RTE_TUNNEL_TYPE_MAX,
1223 };
1224 
1225 /* Deprecated API file for rte_eth_dev_filter_* functions */
1226 #include "rte_eth_ctrl.h"
1227 
1236 };
1237 
1245 };
1246 
1254  enum rte_fdir_mode mode;
1255  enum rte_fdir_pballoc_type pballoc;
1256  enum rte_fdir_status_mode status;
1258  uint8_t drop_queue;
1259  struct rte_eth_fdir_masks mask;
1260  struct rte_eth_fdir_flex_conf flex_conf;
1262 };
1263 
1272  uint16_t udp_port;
1273  uint8_t prot_type;
1274 };
1275 
1281  uint32_t lsc:1;
1283  uint32_t rxq:1;
1285  uint32_t rmv:1;
1286 };
1287 
1294  uint32_t link_speeds;
1301  struct rte_eth_rxmode rxmode;
1302  struct rte_eth_txmode txmode;
1303  uint32_t lpbk_mode;
1308  struct {
1309  struct rte_eth_rss_conf rss_conf;
1310  struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf;
1312  struct rte_eth_dcb_rx_conf dcb_rx_conf;
1314  struct rte_eth_vmdq_rx_conf vmdq_rx_conf;
1316  } rx_adv_conf;
1317  union {
1318  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1320  struct rte_eth_dcb_tx_conf dcb_tx_conf;
1322  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1324  } tx_adv_conf;
1328  struct rte_fdir_conf fdir_conf;
1329  struct rte_intr_conf intr_conf;
1330 };
1331 
1335 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
1336 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
1337 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
1338 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
1339 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
1340 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
1341 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
1342 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
1343 #define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100
1344 #define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200
1345 #define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
1346 #define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
1347 #define DEV_RX_OFFLOAD_SCATTER 0x00002000
1348 
1353 #define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000
1354 #define DEV_RX_OFFLOAD_SECURITY 0x00008000
1355 #define DEV_RX_OFFLOAD_KEEP_CRC 0x00010000
1356 #define DEV_RX_OFFLOAD_SCTP_CKSUM 0x00020000
1357 #define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM 0x00040000
1358 #define DEV_RX_OFFLOAD_RSS_HASH 0x00080000
1359 #define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT 0x00100000
1360 
1361 #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
1362  DEV_RX_OFFLOAD_UDP_CKSUM | \
1363  DEV_RX_OFFLOAD_TCP_CKSUM)
1364 #define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
1365  DEV_RX_OFFLOAD_VLAN_FILTER | \
1366  DEV_RX_OFFLOAD_VLAN_EXTEND | \
1367  DEV_RX_OFFLOAD_QINQ_STRIP)
1368 
1369 /*
1370  * If new Rx offload capabilities are defined, they also must be
1371  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1372  */
1373 
1377 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
1378 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
1379 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
1380 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
1381 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
1382 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
1383 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
1384 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
1385 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
1386 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200
1387 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400
1388 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800
1389 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000
1390 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
1391 #define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000
1392 
1395 #define DEV_TX_OFFLOAD_MULTI_SEGS 0x00008000
1396 
1397 #define DEV_TX_OFFLOAD_MBUF_FAST_FREE 0x00010000
1398 
1402 #define DEV_TX_OFFLOAD_SECURITY 0x00020000
1403 
1408 #define DEV_TX_OFFLOAD_UDP_TNL_TSO 0x00040000
1409 
1414 #define DEV_TX_OFFLOAD_IP_TNL_TSO 0x00080000
1415 
1416 #define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM 0x00100000
1417 
1422 #define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000
1423 /*
1424  * If new Tx offload capabilities are defined, they also must be
1425  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1426  */
1427 
1432 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001
1433 
1434 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002
1435 
1437 /*
1438  * Fallback default preferred Rx/Tx port parameters.
1439  * These are used if an application requests default parameters
1440  * but the PMD does not provide preferred values.
1441  */
1442 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1443 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1444 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1445 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1446 
1453  uint16_t burst_size;
1454  uint16_t ring_size;
1455  uint16_t nb_queues;
1456 };
1457 
1462 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX)
1463 
1468  const char *name;
1469  uint16_t domain_id;
1470  uint16_t port_id;
1478 };
1479 
1487  __extension__
1488  uint32_t multi_pools:1;
1489  uint32_t offset_allowed:1;
1490  uint32_t offset_align_log2:4;
1491  uint16_t max_nseg;
1492  uint16_t reserved;
1493 };
1494 
1505  struct rte_device *device;
1506  const char *driver_name;
1507  unsigned int if_index;
1509  uint16_t min_mtu;
1510  uint16_t max_mtu;
1511  const uint32_t *dev_flags;
1512  uint32_t min_rx_bufsize;
1513  uint32_t max_rx_pktlen;
1516  uint16_t max_rx_queues;
1517  uint16_t max_tx_queues;
1518  uint32_t max_mac_addrs;
1521  uint16_t max_vfs;
1522  uint16_t max_vmdq_pools;
1523  struct rte_eth_rxseg_capa rx_seg_capa;
1532  uint16_t reta_size;
1534  uint8_t hash_key_size;
1537  struct rte_eth_rxconf default_rxconf;
1538  struct rte_eth_txconf default_txconf;
1539  uint16_t vmdq_queue_base;
1540  uint16_t vmdq_queue_num;
1541  uint16_t vmdq_pool_base;
1542  struct rte_eth_desc_lim rx_desc_lim;
1543  struct rte_eth_desc_lim tx_desc_lim;
1544  uint32_t speed_capa;
1546  uint16_t nb_rx_queues;
1547  uint16_t nb_tx_queues;
1549  struct rte_eth_dev_portconf default_rxportconf;
1551  struct rte_eth_dev_portconf default_txportconf;
1553  uint64_t dev_capa;
1558  struct rte_eth_switch_info switch_info;
1559 
1560  uint64_t reserved_64s[2];
1561  void *reserved_ptrs[2];
1562 };
1563 
1567 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1568 #define RTE_ETH_QUEUE_STATE_STARTED 1
1569 #define RTE_ETH_QUEUE_STATE_HAIRPIN 2
1570 
1576  struct rte_mempool *mp;
1577  struct rte_eth_rxconf conf;
1578  uint8_t scattered_rx;
1579  uint16_t nb_desc;
1580  uint16_t rx_buf_size;
1582 
1588  struct rte_eth_txconf conf;
1589  uint16_t nb_desc;
1591 
1592 /* Generic Burst mode flag definition, values can be ORed. */
1593 
1599 #define RTE_ETH_BURST_FLAG_PER_QUEUE (1ULL << 0)
1600 
1606  uint64_t flags;
1608 #define RTE_ETH_BURST_MODE_INFO_SIZE 1024
1609  char info[RTE_ETH_BURST_MODE_INFO_SIZE];
1610 };
1611 
1613 #define RTE_ETH_XSTATS_NAME_SIZE 64
1614 
1625  uint64_t id;
1626  uint64_t value;
1627 };
1628 
1645 };
1646 
1647 #define ETH_DCB_NUM_TCS 8
1648 #define ETH_MAX_VMDQ_POOL 64
1649 
1656  struct {
1657  uint16_t base;
1658  uint16_t nb_queue;
1659  } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1661  struct {
1662  uint16_t base;
1663  uint16_t nb_queue;
1664  } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1665 };
1666 
1672  uint8_t nb_tcs;
1673  uint8_t prio_tc[ETH_DCB_NUM_USER_PRIORITIES];
1674  uint8_t tc_bws[ETH_DCB_NUM_TCS];
1677 };
1678 
1688 };
1689 
1690 /* Translate from FEC mode to FEC capa */
1691 #define RTE_ETH_FEC_MODE_TO_CAPA(x) (1U << (x))
1692 
1693 /* This macro indicates FEC capa mask */
1694 #define RTE_ETH_FEC_MODE_CAPA_MASK(x) (1U << (RTE_ETH_FEC_ ## x))
1695 
1696 /* A structure used to get capabilities per link speed */
1697 struct rte_eth_fec_capa {
1698  uint32_t speed;
1699  uint32_t capa;
1700 };
1701 
1702 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
1703 
1704 /* Macros to check for valid port */
1705 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1706  if (!rte_eth_dev_is_valid_port(port_id)) { \
1707  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1708  return retval; \
1709  } \
1710 } while (0)
1711 
1712 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1713  if (!rte_eth_dev_is_valid_port(port_id)) { \
1714  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1715  return; \
1716  } \
1717 } while (0)
1718 
1724 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1725 
1726 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1727 
1728 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1729 
1730 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1731 
1754 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
1755  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1756  void *user_param);
1757 
1778 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
1779  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1780 
1791 };
1792 
1793 struct rte_eth_dev_sriov {
1794  uint8_t active;
1795  uint8_t nb_q_per_pool;
1796  uint16_t def_vmdq_idx;
1797  uint16_t def_pool_q_idx;
1798 };
1799 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1800 
1801 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
1802 
1803 #define RTE_ETH_DEV_NO_OWNER 0
1804 
1805 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
1806 
1807 struct rte_eth_dev_owner {
1808  uint64_t id;
1809  char name[RTE_ETH_MAX_OWNER_NAME_LEN];
1810 };
1811 
1813 #define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE 0x0001
1814 
1815 #define RTE_ETH_DEV_INTR_LSC 0x0002
1816 
1817 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1818 
1819 #define RTE_ETH_DEV_INTR_RMV 0x0008
1820 
1821 #define RTE_ETH_DEV_REPRESENTOR 0x0010
1822 
1823 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR 0x0020
1824 
1828 #define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS 0x0040
1829 
1841 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
1842  const uint64_t owner_id);
1843 
1847 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
1848  for (p = rte_eth_find_next_owned_by(0, o); \
1849  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
1850  p = rte_eth_find_next_owned_by(p + 1, o))
1851 
1860 uint16_t rte_eth_find_next(uint16_t port_id);
1861 
1865 #define RTE_ETH_FOREACH_DEV(p) \
1866  RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
1867 
1882 __rte_experimental
1883 uint16_t
1884 rte_eth_find_next_of(uint16_t port_id_start,
1885  const struct rte_device *parent);
1886 
1895 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
1896  for (port_id = rte_eth_find_next_of(0, parent); \
1897  port_id < RTE_MAX_ETHPORTS; \
1898  port_id = rte_eth_find_next_of(port_id + 1, parent))
1899 
1914 __rte_experimental
1915 uint16_t
1916 rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id);
1917 
1928 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
1929  for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
1930  port_id < RTE_MAX_ETHPORTS; \
1931  port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
1932 
1946 __rte_experimental
1947 int rte_eth_dev_owner_new(uint64_t *owner_id);
1948 
1962 __rte_experimental
1963 int rte_eth_dev_owner_set(const uint16_t port_id,
1964  const struct rte_eth_dev_owner *owner);
1965 
1979 __rte_experimental
1980 int rte_eth_dev_owner_unset(const uint16_t port_id,
1981  const uint64_t owner_id);
1982 
1994 __rte_experimental
1995 int rte_eth_dev_owner_delete(const uint64_t owner_id);
1996 
2010 __rte_experimental
2011 int rte_eth_dev_owner_get(const uint16_t port_id,
2012  struct rte_eth_dev_owner *owner);
2013 
2024 uint16_t rte_eth_dev_count_avail(void);
2025 
2034 uint16_t rte_eth_dev_count_total(void);
2035 
2047 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
2048 
2057 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
2058 
2067 const char *rte_eth_dev_tx_offload_name(uint64_t offload);
2068 
2108 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
2109  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
2110 
2122 __rte_experimental
2123 int
2124 rte_eth_dev_is_removed(uint16_t port_id);
2125 
2188 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2189  uint16_t nb_rx_desc, unsigned int socket_id,
2190  const struct rte_eth_rxconf *rx_conf,
2191  struct rte_mempool *mb_pool);
2192 
2220 __rte_experimental
2222  (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2223  const struct rte_eth_hairpin_conf *conf);
2224 
2273 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2274  uint16_t nb_tx_desc, unsigned int socket_id,
2275  const struct rte_eth_txconf *tx_conf);
2276 
2302 __rte_experimental
2304  (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2305  const struct rte_eth_hairpin_conf *conf);
2306 
2333 __rte_experimental
2334 int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2335  size_t len, uint32_t direction);
2336 
2359 __rte_experimental
2360 int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port);
2361 
2386 __rte_experimental
2387 int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port);
2388 
2399 int rte_eth_dev_socket_id(uint16_t port_id);
2400 
2410 int rte_eth_dev_is_valid_port(uint16_t port_id);
2411 
2429 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2430 
2447 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2448 
2466 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2467 
2484 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2485 
2505 int rte_eth_dev_start(uint16_t port_id);
2506 
2517 int rte_eth_dev_stop(uint16_t port_id);
2518 
2531 int rte_eth_dev_set_link_up(uint16_t port_id);
2532 
2542 int rte_eth_dev_set_link_down(uint16_t port_id);
2543 
2554 int rte_eth_dev_close(uint16_t port_id);
2555 
2593 int rte_eth_dev_reset(uint16_t port_id);
2594 
2606 int rte_eth_promiscuous_enable(uint16_t port_id);
2607 
2619 int rte_eth_promiscuous_disable(uint16_t port_id);
2620 
2631 int rte_eth_promiscuous_get(uint16_t port_id);
2632 
2644 int rte_eth_allmulticast_enable(uint16_t port_id);
2645 
2657 int rte_eth_allmulticast_disable(uint16_t port_id);
2658 
2669 int rte_eth_allmulticast_get(uint16_t port_id);
2670 
2687 int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
2688 
2702 int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
2703 
2717 __rte_experimental
2718 const char *rte_eth_link_speed_to_str(uint32_t link_speed);
2719 
2738 __rte_experimental
2739 int rte_eth_link_to_str(char *str, size_t len,
2740  const struct rte_eth_link *eth_link);
2741 
2759 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
2760 
2772 int rte_eth_stats_reset(uint16_t port_id);
2773 
2803 int rte_eth_xstats_get_names(uint16_t port_id,
2804  struct rte_eth_xstat_name *xstats_names,
2805  unsigned int size);
2806 
2840 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2841  unsigned int n);
2842 
2867 int
2868 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2869  struct rte_eth_xstat_name *xstats_names, unsigned int size,
2870  uint64_t *ids);
2871 
2896 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2897  uint64_t *values, unsigned int size);
2898 
2917 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2918  uint64_t *id);
2919 
2932 int rte_eth_xstats_reset(uint16_t port_id);
2933 
2952 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
2953  uint16_t tx_queue_id, uint8_t stat_idx);
2954 
2973 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
2974  uint16_t rx_queue_id,
2975  uint8_t stat_idx);
2976 
2989 int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
2990 
3033 int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
3034 
3054 int rte_eth_dev_fw_version_get(uint16_t port_id,
3055  char *fw_version, size_t fw_size);
3056 
3095 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3096  uint32_t *ptypes, int num);
3130 __rte_experimental
3131 int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3132  uint32_t *set_ptypes, unsigned int num);
3133 
3145 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
3146 
3164 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
3165 
3185 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
3186 
3205 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3206  int on);
3207 
3224 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3225  enum rte_vlan_type vlan_type,
3226  uint16_t tag_type);
3227 
3245 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
3246 
3260 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
3261 
3276 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
3277 
3278 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
3279  void *userdata);
3280 
3286  buffer_tx_error_fn error_callback;
3287  void *error_userdata;
3288  uint16_t size;
3289  uint16_t length;
3290  struct rte_mbuf *pkts[];
3292 };
3293 
3300 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
3301  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3302 
3313 int
3314 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
3315 
3340 int
3342  buffer_tx_error_fn callback, void *userdata);
3343 
3366 void
3367 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3368  void *userdata);
3369 
3393 void
3394 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3395  void *userdata);
3396 
3422 int
3423 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
3424 
3440 };
3441 
3449  uint64_t metadata;
3463 };
3464 
3483 };
3484 
3485 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
3486  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
3506 int rte_eth_dev_callback_register(uint16_t port_id,
3507  enum rte_eth_event_type event,
3508  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3509 
3528 int rte_eth_dev_callback_unregister(uint16_t port_id,
3529  enum rte_eth_event_type event,
3530  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3531 
3553 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
3554 
3575 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
3576 
3594 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
3595 
3617 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3618  int epfd, int op, void *data);
3619 
3637 __rte_experimental
3638 int
3639 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
3640 
3654 int rte_eth_led_on(uint16_t port_id);
3655 
3669 int rte_eth_led_off(uint16_t port_id);
3670 
3699 __rte_experimental
3700 int rte_eth_fec_get_capability(uint16_t port_id,
3701  struct rte_eth_fec_capa *speed_fec_capa,
3702  unsigned int num);
3703 
3727 __rte_experimental
3728 int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa);
3729 
3750 __rte_experimental
3751 int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa);
3752 
3766 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
3767  struct rte_eth_fc_conf *fc_conf);
3768 
3783 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
3784  struct rte_eth_fc_conf *fc_conf);
3785 
3801 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3802  struct rte_eth_pfc_conf *pfc_conf);
3803 
3822 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
3823  uint32_t pool);
3824 
3838 int rte_eth_dev_mac_addr_remove(uint16_t port_id,
3839  struct rte_ether_addr *mac_addr);
3840 
3854 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
3855  struct rte_ether_addr *mac_addr);
3856 
3874 int rte_eth_dev_rss_reta_update(uint16_t port_id,
3875  struct rte_eth_rss_reta_entry64 *reta_conf,
3876  uint16_t reta_size);
3877 
3896 int rte_eth_dev_rss_reta_query(uint16_t port_id,
3897  struct rte_eth_rss_reta_entry64 *reta_conf,
3898  uint16_t reta_size);
3899 
3919 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
3920  uint8_t on);
3921 
3940 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
3941 
3964 int rte_eth_mirror_rule_set(uint16_t port_id,
3965  struct rte_eth_mirror_conf *mirror_conf,
3966  uint8_t rule_id,
3967  uint8_t on);
3968 
3983 int rte_eth_mirror_rule_reset(uint16_t port_id,
3984  uint8_t rule_id);
3985 
4002 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4003  uint16_t tx_rate);
4004 
4019 int rte_eth_dev_rss_hash_update(uint16_t port_id,
4020  struct rte_eth_rss_conf *rss_conf);
4021 
4036 int
4037 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4038  struct rte_eth_rss_conf *rss_conf);
4039 
4058 int
4059 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4060  struct rte_eth_udp_tunnel *tunnel_udp);
4061 
4081 int
4082 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4083  struct rte_eth_udp_tunnel *tunnel_udp);
4084 
4098 int rte_eth_dev_get_dcb_info(uint16_t port_id,
4099  struct rte_eth_dcb_info *dcb_info);
4100 
4101 struct rte_eth_rxtx_callback;
4102 
4128 const struct rte_eth_rxtx_callback *
4129 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4130  rte_rx_callback_fn fn, void *user_param);
4131 
4158 const struct rte_eth_rxtx_callback *
4159 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4160  rte_rx_callback_fn fn, void *user_param);
4161 
4187 const struct rte_eth_rxtx_callback *
4188 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4189  rte_tx_callback_fn fn, void *user_param);
4190 
4224 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4225  const struct rte_eth_rxtx_callback *user_cb);
4226 
4260 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4261  const struct rte_eth_rxtx_callback *user_cb);
4262 
4282 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4283  struct rte_eth_rxq_info *qinfo);
4284 
4304 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4305  struct rte_eth_txq_info *qinfo);
4306 
4325 __rte_experimental
4326 int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4327  struct rte_eth_burst_mode *mode);
4328 
4347 __rte_experimental
4348 int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4349  struct rte_eth_burst_mode *mode);
4350 
4369 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
4370 
4383 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
4384 
4401 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
4402 
4419 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
4420 
4439 __rte_experimental
4440 int
4441 rte_eth_dev_get_module_info(uint16_t port_id,
4442  struct rte_eth_dev_module_info *modinfo);
4443 
4463 __rte_experimental
4464 int
4465 rte_eth_dev_get_module_eeprom(uint16_t port_id,
4466  struct rte_dev_eeprom_info *info);
4467 
4486 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
4487  struct rte_ether_addr *mc_addr_set,
4488  uint32_t nb_mc_addr);
4489 
4502 int rte_eth_timesync_enable(uint16_t port_id);
4503 
4516 int rte_eth_timesync_disable(uint16_t port_id);
4517 
4536 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
4537  struct timespec *timestamp, uint32_t flags);
4538 
4554 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4555  struct timespec *timestamp);
4556 
4574 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
4575 
4590 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
4591 
4610 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
4611 
4656 __rte_experimental
4657 int
4658 rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
4659 
4675 int
4676 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
4677 
4693 int
4694 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
4695 
4712 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4713  uint16_t *nb_rx_desc,
4714  uint16_t *nb_tx_desc);
4715 
4730 int
4731 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
4732 
4742 void *
4743 rte_eth_dev_get_sec_ctx(uint16_t port_id);
4744 
4759 __rte_experimental
4760 int rte_eth_dev_hairpin_capability_get(uint16_t port_id,
4761  struct rte_eth_hairpin_cap *cap);
4762 
4763 #include <rte_ethdev_core.h>
4764 
4852 static inline uint16_t
4853 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
4854  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
4855 {
4856  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4857  uint16_t nb_rx;
4858 
4859 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4860  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4861  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
4862 
4863  if (queue_id >= dev->data->nb_rx_queues) {
4864  RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4865  return 0;
4866  }
4867 #endif
4868  nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
4869  rx_pkts, nb_pkts);
4870 
4871 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
4872  struct rte_eth_rxtx_callback *cb;
4873 
4874  /* __ATOMIC_RELEASE memory order was used when the
4875  * call back was inserted into the list.
4876  * Since there is a clear dependency between loading
4877  * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
4878  * not required.
4879  */
4880  cb = __atomic_load_n(&dev->post_rx_burst_cbs[queue_id],
4881  __ATOMIC_RELAXED);
4882 
4883  if (unlikely(cb != NULL)) {
4884  do {
4885  nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
4886  nb_pkts, cb->param);
4887  cb = cb->next;
4888  } while (cb != NULL);
4889  }
4890 #endif
4891 
4892  rte_ethdev_trace_rx_burst(port_id, queue_id, (void **)rx_pkts, nb_rx);
4893  return nb_rx;
4894 }
4895 
4909 static inline int
4910 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
4911 {
4912  struct rte_eth_dev *dev;
4913 
4914  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4915  dev = &rte_eth_devices[port_id];
4916  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_queue_count, -ENOTSUP);
4917  if (queue_id >= dev->data->nb_rx_queues ||
4918  dev->data->rx_queues[queue_id] == NULL)
4919  return -EINVAL;
4920 
4921  return (int)(*dev->rx_queue_count)(dev, queue_id);
4922 }
4923 
4939 __rte_deprecated
4940 static inline int
4941 rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
4942 {
4943  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4944  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4945  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_descriptor_done, -ENOTSUP);
4946  return (*dev->rx_descriptor_done)(dev->data->rx_queues[queue_id], offset);
4947 }
4948 
4949 #define RTE_ETH_RX_DESC_AVAIL 0
4950 #define RTE_ETH_RX_DESC_DONE 1
4951 #define RTE_ETH_RX_DESC_UNAVAIL 2
4986 static inline int
4987 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
4988  uint16_t offset)
4989 {
4990  struct rte_eth_dev *dev;
4991  void *rxq;
4992 
4993 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4994  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4995 #endif
4996  dev = &rte_eth_devices[port_id];
4997 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4998  if (queue_id >= dev->data->nb_rx_queues)
4999  return -ENODEV;
5000 #endif
5001  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_descriptor_status, -ENOTSUP);
5002  rxq = dev->data->rx_queues[queue_id];
5003 
5004  return (*dev->rx_descriptor_status)(rxq, offset);
5005 }
5006 
5007 #define RTE_ETH_TX_DESC_FULL 0
5008 #define RTE_ETH_TX_DESC_DONE 1
5009 #define RTE_ETH_TX_DESC_UNAVAIL 2
5044 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
5045  uint16_t queue_id, uint16_t offset)
5046 {
5047  struct rte_eth_dev *dev;
5048  void *txq;
5049 
5050 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
5051  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5052 #endif
5053  dev = &rte_eth_devices[port_id];
5054 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
5055  if (queue_id >= dev->data->nb_tx_queues)
5056  return -ENODEV;
5057 #endif
5058  RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_descriptor_status, -ENOTSUP);
5059  txq = dev->data->tx_queues[queue_id];
5060 
5061  return (*dev->tx_descriptor_status)(txq, offset);
5062 }
5063 
5130 static inline uint16_t
5131 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
5132  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
5133 {
5134  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5135 
5136 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
5137  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
5138  RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
5139 
5140  if (queue_id >= dev->data->nb_tx_queues) {
5141  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5142  return 0;
5143  }
5144 #endif
5145 
5146 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
5147  struct rte_eth_rxtx_callback *cb;
5148 
5149  /* __ATOMIC_RELEASE memory order was used when the
5150  * call back was inserted into the list.
5151  * Since there is a clear dependency between loading
5152  * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
5153  * not required.
5154  */
5155  cb = __atomic_load_n(&dev->pre_tx_burst_cbs[queue_id],
5156  __ATOMIC_RELAXED);
5157 
5158  if (unlikely(cb != NULL)) {
5159  do {
5160  nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
5161  cb->param);
5162  cb = cb->next;
5163  } while (cb != NULL);
5164  }
5165 #endif
5166 
5167  rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts,
5168  nb_pkts);
5169  return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
5170 }
5171 
5226 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
5227 
5228 static inline uint16_t
5229 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
5230  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
5231 {
5232  struct rte_eth_dev *dev;
5233 
5234 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
5235  if (!rte_eth_dev_is_valid_port(port_id)) {
5236  RTE_ETHDEV_LOG(ERR, "Invalid TX port_id=%u\n", port_id);
5237  rte_errno = ENODEV;
5238  return 0;
5239  }
5240 #endif
5241 
5242  dev = &rte_eth_devices[port_id];
5243 
5244 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
5245  if (queue_id >= dev->data->nb_tx_queues) {
5246  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5247  rte_errno = EINVAL;
5248  return 0;
5249  }
5250 #endif
5251 
5252  if (!dev->tx_pkt_prepare)
5253  return nb_pkts;
5254 
5255  return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
5256  tx_pkts, nb_pkts);
5257 }
5258 
5259 #else
5260 
5261 /*
5262  * Native NOOP operation for compilation targets which doesn't require any
5263  * preparations steps, and functional NOOP may introduce unnecessary performance
5264  * drop.
5265  *
5266  * Generally this is not a good idea to turn it on globally and didn't should
5267  * be used if behavior of tx_preparation can change.
5268  */
5269 
5270 static inline uint16_t
5271 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
5272  __rte_unused uint16_t queue_id,
5273  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
5274 {
5275  return nb_pkts;
5276 }
5277 
5278 #endif
5279 
5302 static inline uint16_t
5303 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
5304  struct rte_eth_dev_tx_buffer *buffer)
5305 {
5306  uint16_t sent;
5307  uint16_t to_send = buffer->length;
5308 
5309  if (to_send == 0)
5310  return 0;
5311 
5312  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
5313 
5314  buffer->length = 0;
5315 
5316  /* All packets sent, or to be dealt with by callback below */
5317  if (unlikely(sent != to_send))
5318  buffer->error_callback(&buffer->pkts[sent],
5319  (uint16_t)(to_send - sent),
5320  buffer->error_userdata);
5321 
5322  return sent;
5323 }
5324 
5355 static __rte_always_inline uint16_t
5356 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
5357  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
5358 {
5359  buffer->pkts[buffer->length++] = tx_pkt;
5360  if (buffer->length < buffer->size)
5361  return 0;
5362 
5363  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
5364 }
5365 
5366 #ifdef __cplusplus
5367 }
5368 #endif
5369 
5370 #endif /* _RTE_ETHDEV_H_ */
uint16_t nb_rx_queues
Definition: rte_ethdev.h:1546
int rte_eth_dev_stop(uint16_t port_id)
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_promiscuous_disable(uint16_t port_id)
__extension__ uint32_t multi_pools
Definition: rte_ethdev.h:1488
uint16_t link_duplex
Definition: rte_ethdev.h:314
#define ETH_VMDQ_MAX_VLAN_FILTERS
Definition: rte_ethdev.h:774
uint32_t rmv
Definition: rte_ethdev.h:1285
#define __rte_always_inline
Definition: rte_common.h:231
uint16_t tx_rs_thresh
Definition: rte_ethdev.h:1062
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
uint16_t nb_desc
Definition: rte_ethdev.h:1589
uint64_t rx_offload_capa
Definition: rte_ethdev.h:1524
const uint32_t * dev_flags
Definition: rte_ethdev.h:1511
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
__rte_experimental int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
#define __rte_cache_min_aligned
Definition: rte_common.h:408
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:5229
struct rte_device * device
Definition: rte_ethdev.h:1505
int rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
rte_eth_nb_tcs
Definition: rte_ethdev.h:855
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:265
__rte_experimental int rte_eth_dev_owner_new(uint64_t *owner_id)
__rte_experimental int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
uint16_t rte_eth_find_next(uint16_t port_id)
__rte_experimental int rte_eth_dev_is_removed(uint16_t port_id)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
int rte_eth_led_off(uint16_t port_id)
rte_fdir_pballoc_type
Definition: rte_ethdev.h:1232
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:4987
__rte_experimental uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
uint64_t imissed
Definition: rte_ethdev.h:249
uint32_t low_water
Definition: rte_ethdev.h:1192
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
uint32_t max_rx_pkt_len
Definition: rte_ethdev.h:403
uint8_t rss_key_len
Definition: rte_ethdev.h:456
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint16_t tx_rate)
uint8_t hthresh
Definition: rte_ethdev.h:335
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
__rte_experimental int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
uint64_t rx_queue_offload_capa
Definition: rte_ethdev.h:1528
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
uint16_t reta_size
Definition: rte_ethdev.h:1532
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint32_t lpbk_mode
Definition: rte_ethdev.h:1303
#define rte_errno
Definition: rte_errno.h:29
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
uint32_t link_speeds
Definition: rte_ethdev.h:1294
uint64_t tx_queue_offload_capa
Definition: rte_ethdev.h:1530
rte_eth_tx_mq_mode
Definition: rte_ethdev.h:383
rte_eth_fc_mode
Definition: rte_ethdev.h:1178
int rte_eth_mirror_rule_set(uint16_t port_id, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on)
uint8_t enable_default_pool
Definition: rte_ethdev.h:907
uint32_t max_hash_mac_addrs
Definition: rte_ethdev.h:1520
uint16_t nb_mtu_seg_max
Definition: rte_ethdev.h:1172
#define __rte_unused
Definition: rte_common.h:121
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:263
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
uint64_t opackets
Definition: rte_ethdev.h:246
uint8_t hash_key_size
Definition: rte_ethdev.h:1534
int rte_eth_dev_close(uint16_t port_id)
struct rte_mempool * mp
Definition: rte_ethdev.h:1011
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
uint16_t split_hdr_size
Definition: rte_ethdev.h:406
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
struct rte_mempool * mp
Definition: rte_ethdev.h:1576
uint32_t dcb_capability_en
Definition: rte_ethdev.h:1327
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:261
const char * name
Definition: rte_ethdev.h:1468
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
uint32_t rxq
Definition: rte_ethdev.h:1283
int rte_eth_dev_set_link_up(uint16_t port_id)
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
__rte_experimental int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
__rte_experimental int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
uint16_t vmdq_queue_num
Definition: rte_ethdev.h:1540
uint8_t rx_deferred_start
Definition: rte_ethdev.h:1036
struct rte_mbuf * pkts[]
Definition: rte_ethdev.h:3290
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
Definition: rte_ethdev.h:3485
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:257
uint32_t high_water
Definition: rte_ethdev.h:1191
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
#define RTE_ETH_XSTATS_NAME_SIZE
Definition: rte_ethdev.h:1613
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
int rte_eth_timesync_disable(uint16_t port_id)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint64_t offloads
Definition: rte_ethdev.h:1043
uint32_t link_speed
Definition: rte_ethdev.h:313
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
__rte_experimental int rte_eth_fec_get_capability(uint16_t port_id, struct rte_eth_fec_capa *speed_fec_capa, unsigned int num)
uint16_t send_xon
Definition: rte_ethdev.h:1194
int rte_eth_stats_reset(uint16_t port_id)
int rte_eth_allmulticast_enable(uint16_t port_id)
uint32_t offset_allowed
Definition: rte_ethdev.h:1489
#define unlikely(x)
uint64_t ibytes
Definition: rte_ethdev.h:247
uint32_t offset_align_log2
Definition: rte_ethdev.h:1490
uint64_t offloads
Definition: rte_ethdev.h:1072
uint16_t max_nb_queues
Definition: rte_ethdev.h:1086
uint64_t oerrors
Definition: rte_ethdev.h:254
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
uint16_t max_mtu
Definition: rte_ethdev.h:1510
uint64_t offloads
Definition: rte_ethdev.h:412
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
uint16_t link_autoneg
Definition: rte_ethdev.h:315
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
uint16_t tx_free_thresh
Definition: rte_ethdev.h:1063
uint16_t nb_desc
Definition: rte_ethdev.h:1579
__rte_experimental uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:4853
int rte_eth_allmulticast_disable(uint16_t port_id)
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
int rte_eth_dev_start(uint16_t port_id)
uint16_t rx_buf_size
Definition: rte_ethdev.h:1580
uint16_t max_vmdq_pools
Definition: rte_ethdev.h:1522
uint8_t scattered_rx
Definition: rte_ethdev.h:1578
int rte_eth_dev_reset(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
uint64_t offloads
Definition: rte_ethdev.h:959
uint16_t vmdq_pool_base
Definition: rte_ethdev.h:1541
uint64_t tx_offload_capa
Definition: rte_ethdev.h:1526
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:259
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
uint16_t min_mtu
Definition: rte_ethdev.h:1509
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
Definition: rte_ethdev.h:1778
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
uint64_t obytes
Definition: rte_ethdev.h:248
__rte_experimental int rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
uint8_t enable_loop_back
Definition: rte_ethdev.h:940
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
#define ETH_DCB_NUM_USER_PRIORITIES
Definition: rte_ethdev.h:775
rte_eth_fec_mode
Definition: rte_ethdev.h:1683
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
uint16_t max_tx_queues
Definition: rte_ethdev.h:1517
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_xstats_reset(uint16_t port_id)
rte_eth_dev_state
Definition: rte_ethdev.h:1784
uint16_t rx_free_thresh
Definition: rte_ethdev.h:1034
uint64_t dev_capa
Definition: rte_ethdev.h:1553
uint64_t ierrors
Definition: rte_ethdev.h:253
__rte_experimental int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, size_t len, uint32_t direction)
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
uint64_t flow_type_rss_offloads
Definition: rte_ethdev.h:1536
uint16_t rte_eth_dev_count_total(void)
int rte_eth_promiscuous_enable(uint16_t port_id)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
union rte_eth_rxseg * rx_seg
Definition: rte_ethdev.h:1051
rte_vlan_type
Definition: rte_ethdev.h:422
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
__rte_experimental int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
uint16_t nb_seg_max
Definition: rte_ethdev.h:1159
uint64_t ipackets
Definition: rte_ethdev.h:245
uint16_t max_vfs
Definition: rte_ethdev.h:1521
uint16_t pause_time
Definition: rte_ethdev.h:1193
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
uint64_t rx_nombuf
Definition: rte_ethdev.h:255
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
Definition: rte_ethdev.h:5356
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
#define ETH_MQ_RX_RSS_FLAG
Definition: rte_ethdev.h:342
uint16_t vmdq_queue_base
Definition: rte_ethdev.h:1539
rte_eth_event_ipsec_subtype
Definition: rte_ethdev.h:3429
rte_eth_nb_pools
Definition: rte_ethdev.h:864
#define ETH_MIRROR_MAX_VLANS
Definition: rte_ethdev.h:810
uint16_t nb_align
Definition: rte_ethdev.h:1149
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
rte_eth_rx_mq_mode
Definition: rte_ethdev.h:350
__rte_experimental const char * rte_eth_link_speed_to_str(uint32_t link_speed)
__rte_experimental int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
const char * driver_name
Definition: rte_ethdev.h:1506
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:4910
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_socket_id(uint16_t port_id)
uint8_t enable_default_pool
Definition: rte_ethdev.h:938
__extension__ struct rte_eth_link __rte_aligned(8)
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
uint16_t nb_tx_queues
Definition: rte_ethdev.h:1547
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
uint32_t max_mac_addrs
Definition: rte_ethdev.h:1518
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
rte_eth_tunnel_type
Definition: rte_ethdev.h:1213
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
uint64_t value
Definition: rte_ethdev.h:1626
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
Definition: rte_ethdev.h:600
__rte_experimental int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_promiscuous_get(uint16_t port_id)
int rte_eth_led_on(uint16_t port_id)
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
__rte_experimental int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
uint32_t max_rx_pktlen
Definition: rte_ethdev.h:1513
__rte_experimental int rte_eth_dev_owner_delete(const uint64_t owner_id)
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
static __rte_deprecated int rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:4941
uint64_t rss_hf
Definition: rte_ethdev.h:457
uint64_t id
Definition: rte_ethdev.h:1625
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
unsigned int if_index
Definition: rte_ethdev.h:1507
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
uint8_t mac_ctrl_frame_fwd
Definition: rte_ethdev.h:1196
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
Definition: rte_ethdev.h:1754
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
rte_fdir_mode
Definition: rte_eth_ctrl.h:425
uint8_t * rss_key
Definition: rte_ethdev.h:455
rte_fdir_status_mode
Definition: rte_ethdev.h:1241
uint8_t tx_deferred_start
Definition: rte_ethdev.h:1066
uint8_t wthresh
Definition: rte_ethdev.h:336
uint16_t max_rx_queues
Definition: rte_ethdev.h:1516
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:405
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
int rte_eth_allmulticast_get(uint16_t port_id)
uint8_t rx_drop_en
Definition: rte_ethdev.h:1035
int rte_eth_dev_is_valid_port(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint16_t rx_nseg
Definition: rte_ethdev.h:1037
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:1515
int rte_eth_timesync_enable(uint16_t port_id)
uint8_t pthresh
Definition: rte_ethdev.h:334
uint32_t speed_capa
Definition: rte_ethdev.h:1544
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:5131
uint8_t drop_queue
Definition: rte_ethdev.h:1258
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_set_link_down(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
__rte_experimental int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
uint32_t min_rx_bufsize
Definition: rte_ethdev.h:1512
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
__rte_experimental int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
uint32_t lsc
Definition: rte_ethdev.h:1281
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
Definition: rte_ethdev.h:5303
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
rte_eth_event_type
Definition: rte_ethdev.h:3468