DPDK  19.05.0
rte_ethdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
142 #ifdef __cplusplus
143 extern "C" {
144 #endif
145 
146 #include <stdint.h>
147 
148 /* Use this macro to check if LRO API is supported */
149 #define RTE_ETHDEV_HAS_LRO_SUPPORT
150 
151 #include <rte_compat.h>
152 #include <rte_log.h>
153 #include <rte_interrupts.h>
154 #include <rte_dev.h>
155 #include <rte_devargs.h>
156 #include <rte_errno.h>
157 #include <rte_common.h>
158 #include <rte_config.h>
159 
160 #include "rte_ether.h"
161 #include "rte_dev_info.h"
162 
163 extern int rte_eth_dev_logtype;
164 
165 #define RTE_ETHDEV_LOG(level, ...) \
166  rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
167 
168 struct rte_mbuf;
169 
186 int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
187 
202 uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter);
203 
216 void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
217 
231 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
232  for (rte_eth_iterator_init(iter, devargs), \
233  id = rte_eth_iterator_next(iter); \
234  id != RTE_MAX_ETHPORTS; \
235  id = rte_eth_iterator_next(iter))
236 
244  uint64_t ipackets;
245  uint64_t opackets;
246  uint64_t ibytes;
247  uint64_t obytes;
248  uint64_t imissed;
252  uint64_t ierrors;
253  uint64_t oerrors;
254  uint64_t rx_nombuf;
255  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
257  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
259  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
261  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
263  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
265 };
266 
270 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
271 #define ETH_LINK_SPEED_FIXED (1 << 0)
272 #define ETH_LINK_SPEED_10M_HD (1 << 1)
273 #define ETH_LINK_SPEED_10M (1 << 2)
274 #define ETH_LINK_SPEED_100M_HD (1 << 3)
275 #define ETH_LINK_SPEED_100M (1 << 4)
276 #define ETH_LINK_SPEED_1G (1 << 5)
277 #define ETH_LINK_SPEED_2_5G (1 << 6)
278 #define ETH_LINK_SPEED_5G (1 << 7)
279 #define ETH_LINK_SPEED_10G (1 << 8)
280 #define ETH_LINK_SPEED_20G (1 << 9)
281 #define ETH_LINK_SPEED_25G (1 << 10)
282 #define ETH_LINK_SPEED_40G (1 << 11)
283 #define ETH_LINK_SPEED_50G (1 << 12)
284 #define ETH_LINK_SPEED_56G (1 << 13)
285 #define ETH_LINK_SPEED_100G (1 << 14)
290 #define ETH_SPEED_NUM_NONE 0
291 #define ETH_SPEED_NUM_10M 10
292 #define ETH_SPEED_NUM_100M 100
293 #define ETH_SPEED_NUM_1G 1000
294 #define ETH_SPEED_NUM_2_5G 2500
295 #define ETH_SPEED_NUM_5G 5000
296 #define ETH_SPEED_NUM_10G 10000
297 #define ETH_SPEED_NUM_20G 20000
298 #define ETH_SPEED_NUM_25G 25000
299 #define ETH_SPEED_NUM_40G 40000
300 #define ETH_SPEED_NUM_50G 50000
301 #define ETH_SPEED_NUM_56G 56000
302 #define ETH_SPEED_NUM_100G 100000
307 __extension__
308 struct rte_eth_link {
309  uint32_t link_speed;
310  uint16_t link_duplex : 1;
311  uint16_t link_autoneg : 1;
312  uint16_t link_status : 1;
313 } __attribute__((aligned(8)));
315 /* Utility constants */
316 #define ETH_LINK_HALF_DUPLEX 0
317 #define ETH_LINK_FULL_DUPLEX 1
318 #define ETH_LINK_DOWN 0
319 #define ETH_LINK_UP 1
320 #define ETH_LINK_FIXED 0
321 #define ETH_LINK_AUTONEG 1
327 struct rte_eth_thresh {
328  uint8_t pthresh;
329  uint8_t hthresh;
330  uint8_t wthresh;
331 };
332 
336 #define ETH_MQ_RX_RSS_FLAG 0x1
337 #define ETH_MQ_RX_DCB_FLAG 0x2
338 #define ETH_MQ_RX_VMDQ_FLAG 0x4
339 
347 
351  ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
353  ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
354 
356  ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
358  ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
360  ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
363  ETH_MQ_RX_VMDQ_FLAG,
364 };
365 
369 #define ETH_RSS ETH_MQ_RX_RSS
370 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
371 #define ETH_DCB_RX ETH_MQ_RX_DCB
372 
382 };
383 
387 #define ETH_DCB_NONE ETH_MQ_TX_NONE
388 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
389 #define ETH_DCB_TX ETH_MQ_TX_DCB
390 
397  uint32_t max_rx_pkt_len;
398  uint16_t split_hdr_size;
404  uint64_t offloads;
405 };
406 
412  ETH_VLAN_TYPE_UNKNOWN = 0,
415  ETH_VLAN_TYPE_MAX,
416 };
417 
423  uint64_t ids[64];
424 };
425 
444  uint8_t *rss_key;
445  uint8_t rss_key_len;
446  uint64_t rss_hf;
447 };
448 
449 /*
450  * A packet can be identified by hardware as different flow types. Different
451  * NIC hardware may support different flow types.
452  * Basically, the NIC hardware identifies the flow type as deep protocol as
453  * possible, and exclusively. For example, if a packet is identified as
454  * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
455  * though it is an actual IPV4 packet.
456  * Note that the flow types are used to define RSS offload types.
457  */
458 #define RTE_ETH_FLOW_UNKNOWN 0
459 #define RTE_ETH_FLOW_RAW 1
460 #define RTE_ETH_FLOW_IPV4 2
461 #define RTE_ETH_FLOW_FRAG_IPV4 3
462 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
463 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
464 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
465 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
466 #define RTE_ETH_FLOW_IPV6 8
467 #define RTE_ETH_FLOW_FRAG_IPV6 9
468 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
469 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
470 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
471 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
472 #define RTE_ETH_FLOW_L2_PAYLOAD 14
473 #define RTE_ETH_FLOW_IPV6_EX 15
474 #define RTE_ETH_FLOW_IPV6_TCP_EX 16
475 #define RTE_ETH_FLOW_IPV6_UDP_EX 17
476 #define RTE_ETH_FLOW_PORT 18
477 
478 #define RTE_ETH_FLOW_VXLAN 19
479 #define RTE_ETH_FLOW_GENEVE 20
480 #define RTE_ETH_FLOW_NVGRE 21
481 #define RTE_ETH_FLOW_VXLAN_GPE 22
482 #define RTE_ETH_FLOW_MAX 23
483 
484 /*
485  * The RSS offload types are defined based on flow types.
486  * Different NIC hardware may support different RSS offload
487  * types. The supported flow types or RSS offload types can be queried by
488  * rte_eth_dev_info_get().
489  */
490 #define ETH_RSS_IPV4 (1ULL << RTE_ETH_FLOW_IPV4)
491 #define ETH_RSS_FRAG_IPV4 (1ULL << RTE_ETH_FLOW_FRAG_IPV4)
492 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
493 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP)
494 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP)
495 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER)
496 #define ETH_RSS_IPV6 (1ULL << RTE_ETH_FLOW_IPV6)
497 #define ETH_RSS_FRAG_IPV6 (1ULL << RTE_ETH_FLOW_FRAG_IPV6)
498 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP)
499 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
500 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP)
501 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)
502 #define ETH_RSS_L2_PAYLOAD (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)
503 #define ETH_RSS_IPV6_EX (1ULL << RTE_ETH_FLOW_IPV6_EX)
504 #define ETH_RSS_IPV6_TCP_EX (1ULL << RTE_ETH_FLOW_IPV6_TCP_EX)
505 #define ETH_RSS_IPV6_UDP_EX (1ULL << RTE_ETH_FLOW_IPV6_UDP_EX)
506 #define ETH_RSS_PORT (1ULL << RTE_ETH_FLOW_PORT)
507 #define ETH_RSS_VXLAN (1ULL << RTE_ETH_FLOW_VXLAN)
508 #define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE)
509 #define ETH_RSS_NVGRE (1ULL << RTE_ETH_FLOW_NVGRE)
510 
511 #define ETH_RSS_IP ( \
512  ETH_RSS_IPV4 | \
513  ETH_RSS_FRAG_IPV4 | \
514  ETH_RSS_NONFRAG_IPV4_OTHER | \
515  ETH_RSS_IPV6 | \
516  ETH_RSS_FRAG_IPV6 | \
517  ETH_RSS_NONFRAG_IPV6_OTHER | \
518  ETH_RSS_IPV6_EX)
519 
520 #define ETH_RSS_UDP ( \
521  ETH_RSS_NONFRAG_IPV4_UDP | \
522  ETH_RSS_NONFRAG_IPV6_UDP | \
523  ETH_RSS_IPV6_UDP_EX)
524 
525 #define ETH_RSS_TCP ( \
526  ETH_RSS_NONFRAG_IPV4_TCP | \
527  ETH_RSS_NONFRAG_IPV6_TCP | \
528  ETH_RSS_IPV6_TCP_EX)
529 
530 #define ETH_RSS_SCTP ( \
531  ETH_RSS_NONFRAG_IPV4_SCTP | \
532  ETH_RSS_NONFRAG_IPV6_SCTP)
533 
534 #define ETH_RSS_TUNNEL ( \
535  ETH_RSS_VXLAN | \
536  ETH_RSS_GENEVE | \
537  ETH_RSS_NVGRE)
538 
540 #define ETH_RSS_PROTO_MASK ( \
541  ETH_RSS_IPV4 | \
542  ETH_RSS_FRAG_IPV4 | \
543  ETH_RSS_NONFRAG_IPV4_TCP | \
544  ETH_RSS_NONFRAG_IPV4_UDP | \
545  ETH_RSS_NONFRAG_IPV4_SCTP | \
546  ETH_RSS_NONFRAG_IPV4_OTHER | \
547  ETH_RSS_IPV6 | \
548  ETH_RSS_FRAG_IPV6 | \
549  ETH_RSS_NONFRAG_IPV6_TCP | \
550  ETH_RSS_NONFRAG_IPV6_UDP | \
551  ETH_RSS_NONFRAG_IPV6_SCTP | \
552  ETH_RSS_NONFRAG_IPV6_OTHER | \
553  ETH_RSS_L2_PAYLOAD | \
554  ETH_RSS_IPV6_EX | \
555  ETH_RSS_IPV6_TCP_EX | \
556  ETH_RSS_IPV6_UDP_EX | \
557  ETH_RSS_PORT | \
558  ETH_RSS_VXLAN | \
559  ETH_RSS_GENEVE | \
560  ETH_RSS_NVGRE)
561 
562 /*
563  * Definitions used for redirection table entry size.
564  * Some RSS RETA sizes may not be supported by some drivers, check the
565  * documentation or the description of relevant functions for more details.
566  */
567 #define ETH_RSS_RETA_SIZE_64 64
568 #define ETH_RSS_RETA_SIZE_128 128
569 #define ETH_RSS_RETA_SIZE_256 256
570 #define ETH_RSS_RETA_SIZE_512 512
571 #define RTE_RETA_GROUP_SIZE 64
572 
573 /* Definitions used for VMDQ and DCB functionality */
574 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
575 #define ETH_DCB_NUM_USER_PRIORITIES 8
576 #define ETH_VMDQ_DCB_NUM_QUEUES 128
577 #define ETH_DCB_NUM_QUEUES 128
579 /* DCB capability defines */
580 #define ETH_DCB_PG_SUPPORT 0x00000001
581 #define ETH_DCB_PFC_SUPPORT 0x00000002
583 /* Definitions used for VLAN Offload functionality */
584 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
585 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
586 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
588 /* Definitions used for mask VLAN setting */
589 #define ETH_VLAN_STRIP_MASK 0x0001
590 #define ETH_VLAN_FILTER_MASK 0x0002
591 #define ETH_VLAN_EXTEND_MASK 0x0004
592 #define ETH_VLAN_ID_MAX 0x0FFF
594 /* Definitions used for receive MAC address */
595 #define ETH_NUM_RECEIVE_MAC_ADDR 128
597 /* Definitions used for unicast hash */
598 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
600 /* Definitions used for VMDQ pool rx mode setting */
601 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
602 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
603 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
604 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
605 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
608 #define ETH_MIRROR_MAX_VLANS 64
609 
610 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
611 #define ETH_MIRROR_UPLINK_PORT 0x02
612 #define ETH_MIRROR_DOWNLINK_PORT 0x04
613 #define ETH_MIRROR_VLAN 0x08
614 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
619 struct rte_eth_vlan_mirror {
620  uint64_t vlan_mask;
622  uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
623 };
624 
629  uint8_t rule_type;
630  uint8_t dst_pool;
631  uint64_t pool_mask;
634 };
635 
643  uint64_t mask;
645  uint16_t reta[RTE_RETA_GROUP_SIZE];
647 };
648 
654  ETH_4_TCS = 4,
656 };
657 
667 };
668 
669 /* This structure may be extended in future. */
670 struct rte_eth_dcb_rx_conf {
671  enum rte_eth_nb_tcs nb_tcs;
673  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
674 };
675 
676 struct rte_eth_vmdq_dcb_tx_conf {
677  enum rte_eth_nb_pools nb_queue_pools;
679  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
680 };
681 
682 struct rte_eth_dcb_tx_conf {
683  enum rte_eth_nb_tcs nb_tcs;
685  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
686 };
687 
688 struct rte_eth_vmdq_tx_conf {
689  enum rte_eth_nb_pools nb_queue_pools;
690 };
691 
706  uint8_t default_pool;
707  uint8_t nb_pool_maps;
708  struct {
709  uint16_t vlan_id;
710  uint64_t pools;
714 };
715 
737  uint8_t default_pool;
739  uint8_t nb_pool_maps;
740  uint32_t rx_mode;
741  struct {
742  uint16_t vlan_id;
743  uint64_t pools;
745 };
746 
757  uint64_t offloads;
758 
759  /* For i40e specifically */
760  uint16_t pvid;
761  __extension__
762  uint8_t hw_vlan_reject_tagged : 1,
768 };
769 
775  uint16_t rx_free_thresh;
776  uint8_t rx_drop_en;
783  uint64_t offloads;
784 };
785 
791  uint16_t tx_rs_thresh;
792  uint16_t tx_free_thresh;
801  uint64_t offloads;
802 };
803 
808  uint16_t nb_max;
809  uint16_t nb_min;
810  uint16_t nb_align;
820  uint16_t nb_seg_max;
821 
833  uint16_t nb_mtu_seg_max;
834 };
835 
844 };
845 
852  uint32_t high_water;
853  uint32_t low_water;
854  uint16_t pause_time;
855  uint16_t send_xon;
858  uint8_t autoneg;
859 };
860 
868  uint8_t priority;
869 };
870 
875  RTE_TUNNEL_TYPE_NONE = 0,
876  RTE_TUNNEL_TYPE_VXLAN,
877  RTE_TUNNEL_TYPE_GENEVE,
878  RTE_TUNNEL_TYPE_TEREDO,
879  RTE_TUNNEL_TYPE_NVGRE,
880  RTE_TUNNEL_TYPE_IP_IN_GRE,
881  RTE_L2_TUNNEL_TYPE_E_TAG,
882  RTE_TUNNEL_TYPE_VXLAN_GPE,
883  RTE_TUNNEL_TYPE_MAX,
884 };
885 
886 /* Deprecated API file for rte_eth_dev_filter_* functions */
887 #include "rte_eth_ctrl.h"
888 
897 };
898 
906 };
907 
919  uint8_t drop_queue;
920  struct rte_eth_fdir_masks mask;
923 };
924 
933  uint16_t udp_port;
934  uint8_t prot_type;
935 };
936 
942  uint32_t lsc:1;
944  uint32_t rxq:1;
946  uint32_t rmv:1;
947 };
948 
954 struct rte_eth_conf {
955  uint32_t link_speeds;
964  uint32_t lpbk_mode;
969  struct {
973  struct rte_eth_dcb_rx_conf dcb_rx_conf;
977  } rx_adv_conf;
978  union {
979  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
981  struct rte_eth_dcb_tx_conf dcb_tx_conf;
983  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
985  } tx_adv_conf;
991 };
992 
996 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
997 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
998 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
999 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
1000 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
1001 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
1002 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
1003 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
1004 #define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100
1005 #define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200
1006 #define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
1007 #define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
1008 #define DEV_RX_OFFLOAD_SCATTER 0x00002000
1009 #define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000
1010 #define DEV_RX_OFFLOAD_SECURITY 0x00008000
1011 #define DEV_RX_OFFLOAD_KEEP_CRC 0x00010000
1012 #define DEV_RX_OFFLOAD_SCTP_CKSUM 0x00020000
1013 #define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM 0x00040000
1014 
1015 #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
1016  DEV_RX_OFFLOAD_UDP_CKSUM | \
1017  DEV_RX_OFFLOAD_TCP_CKSUM)
1018 #define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
1019  DEV_RX_OFFLOAD_VLAN_FILTER | \
1020  DEV_RX_OFFLOAD_VLAN_EXTEND)
1021 
1022 /*
1023  * If new Rx offload capabilities are defined, they also must be
1024  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1025  */
1026 
1030 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
1031 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
1032 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
1033 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
1034 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
1035 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
1036 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
1037 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
1038 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
1039 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200
1040 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400
1041 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800
1042 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000
1043 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
1044 #define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000
1045 
1048 #define DEV_TX_OFFLOAD_MULTI_SEGS 0x00008000
1049 
1050 #define DEV_TX_OFFLOAD_MBUF_FAST_FREE 0x00010000
1051 
1055 #define DEV_TX_OFFLOAD_SECURITY 0x00020000
1056 
1061 #define DEV_TX_OFFLOAD_UDP_TNL_TSO 0x00040000
1062 
1067 #define DEV_TX_OFFLOAD_IP_TNL_TSO 0x00080000
1068 
1069 #define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM 0x00100000
1070 
1074 #define DEV_TX_OFFLOAD_MATCH_METADATA 0x00200000
1075 
1076 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001
1077 
1078 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002
1079 
1081 /*
1082  * If new Tx offload capabilities are defined, they also must be
1083  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1084  */
1085 
1086 /*
1087  * Fallback default preferred Rx/Tx port parameters.
1088  * These are used if an application requests default parameters
1089  * but the PMD does not provide preferred values.
1090  */
1091 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1092 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1093 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1094 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1095 
1102  uint16_t burst_size;
1103  uint16_t ring_size;
1104  uint16_t nb_queues;
1105 };
1106 
1111 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (0)
1112 
1117  const char *name;
1118  uint16_t domain_id;
1119  uint16_t port_id;
1127 };
1128 
1139  struct rte_device *device;
1140  const char *driver_name;
1141  unsigned int if_index;
1143  uint16_t min_mtu;
1144  uint16_t max_mtu;
1145  const uint32_t *dev_flags;
1146  uint32_t min_rx_bufsize;
1147  uint32_t max_rx_pktlen;
1148  uint16_t max_rx_queues;
1149  uint16_t max_tx_queues;
1150  uint32_t max_mac_addrs;
1151  uint32_t max_hash_mac_addrs;
1153  uint16_t max_vfs;
1154  uint16_t max_vmdq_pools;
1155  uint64_t rx_offload_capa;
1157  uint64_t tx_offload_capa;
1159  uint64_t rx_queue_offload_capa;
1161  uint64_t tx_queue_offload_capa;
1163  uint16_t reta_size;
1165  uint8_t hash_key_size;
1170  uint16_t vmdq_queue_base;
1171  uint16_t vmdq_queue_num;
1172  uint16_t vmdq_pool_base;
1175  uint32_t speed_capa;
1177  uint16_t nb_rx_queues;
1178  uint16_t nb_tx_queues;
1184  uint64_t dev_capa;
1190 };
1191 
1197  struct rte_mempool *mp;
1199  uint8_t scattered_rx;
1200  uint16_t nb_desc;
1202 
1209  uint16_t nb_desc;
1211 
1213 #define RTE_ETH_XSTATS_NAME_SIZE 64
1214 
1225  uint64_t id;
1226  uint64_t value;
1227 };
1228 
1238 };
1239 
1240 #define ETH_DCB_NUM_TCS 8
1241 #define ETH_MAX_VMDQ_POOL 64
1242 
1249  struct {
1250  uint8_t base;
1251  uint8_t nb_queue;
1252  } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1254  struct {
1255  uint8_t base;
1256  uint8_t nb_queue;
1257  } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1258 };
1259 
1265  uint8_t nb_tcs;
1267  uint8_t tc_bws[ETH_DCB_NUM_TCS];
1270 };
1271 
1275 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1276 #define RTE_ETH_QUEUE_STATE_STARTED 1
1277 
1278 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
1279 
1280 /* Macros to check for valid port */
1281 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1282  if (!rte_eth_dev_is_valid_port(port_id)) { \
1283  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1284  return retval; \
1285  } \
1286 } while (0)
1287 
1288 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1289  if (!rte_eth_dev_is_valid_port(port_id)) { \
1290  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1291  return; \
1292  } \
1293 } while (0)
1294 
1300 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1301 
1302 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1303 
1304 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1305 
1306 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1307 
1330 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
1331  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1332  void *user_param);
1333 
1354 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
1355  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1356 
1367 };
1368 
1369 struct rte_eth_dev_sriov {
1370  uint8_t active;
1371  uint8_t nb_q_per_pool;
1372  uint16_t def_vmdq_idx;
1373  uint16_t def_pool_q_idx;
1374 };
1375 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1376 
1377 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
1378 
1379 #define RTE_ETH_DEV_NO_OWNER 0
1380 
1381 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
1382 
1383 struct rte_eth_dev_owner {
1384  uint64_t id;
1385  char name[RTE_ETH_MAX_OWNER_NAME_LEN];
1386 };
1387 
1392 #define RTE_ETH_DEV_CLOSE_REMOVE 0x0001
1393 
1394 #define RTE_ETH_DEV_INTR_LSC 0x0002
1395 
1396 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1397 
1398 #define RTE_ETH_DEV_INTR_RMV 0x0008
1399 
1400 #define RTE_ETH_DEV_REPRESENTOR 0x0010
1401 
1402 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR 0x0020
1403 
1415 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
1416  const uint64_t owner_id);
1417 
1421 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
1422  for (p = rte_eth_find_next_owned_by(0, o); \
1423  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
1424  p = rte_eth_find_next_owned_by(p + 1, o))
1425 
1434 uint16_t rte_eth_find_next(uint16_t port_id);
1435 
1439 #define RTE_ETH_FOREACH_DEV(p) \
1440  RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
1441 
1456 uint16_t __rte_experimental
1457 rte_eth_find_next_of(uint16_t port_id_start,
1458  const struct rte_device *parent);
1459 
1468 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
1469  for (port_id = rte_eth_find_next_of(0, parent); \
1470  port_id < RTE_MAX_ETHPORTS; \
1471  port_id = rte_eth_find_next_of(port_id + 1, parent))
1472 
1487 uint16_t __rte_experimental
1488 rte_eth_find_next_sibling(uint16_t port_id_start,
1489  uint16_t ref_port_id);
1490 
1501 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
1502  for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
1503  port_id < RTE_MAX_ETHPORTS; \
1504  port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
1505 
1519 int __rte_experimental rte_eth_dev_owner_new(uint64_t *owner_id);
1520 
1534 int __rte_experimental rte_eth_dev_owner_set(const uint16_t port_id,
1535  const struct rte_eth_dev_owner *owner);
1536 
1550 int __rte_experimental rte_eth_dev_owner_unset(const uint16_t port_id,
1551  const uint64_t owner_id);
1552 
1562 void __rte_experimental rte_eth_dev_owner_delete(const uint64_t owner_id);
1563 
1577 int __rte_experimental rte_eth_dev_owner_get(const uint16_t port_id,
1578  struct rte_eth_dev_owner *owner);
1579 
1592 __rte_deprecated
1593 uint16_t rte_eth_dev_count(void);
1594 
1605 uint16_t rte_eth_dev_count_avail(void);
1606 
1615 uint16_t rte_eth_dev_count_total(void);
1616 
1628 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
1629 
1638 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
1639 
1648 const char *rte_eth_dev_tx_offload_name(uint64_t offload);
1649 
1689 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
1690  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
1691 
1703 int __rte_experimental
1704 rte_eth_dev_is_removed(uint16_t port_id);
1705 
1755 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1756  uint16_t nb_rx_desc, unsigned int socket_id,
1757  const struct rte_eth_rxconf *rx_conf,
1758  struct rte_mempool *mb_pool);
1759 
1808 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1809  uint16_t nb_tx_desc, unsigned int socket_id,
1810  const struct rte_eth_txconf *tx_conf);
1811 
1822 int rte_eth_dev_socket_id(uint16_t port_id);
1823 
1833 int rte_eth_dev_is_valid_port(uint16_t port_id);
1834 
1851 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
1852 
1868 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
1869 
1886 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
1887 
1903 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
1904 
1924 int rte_eth_dev_start(uint16_t port_id);
1925 
1933 void rte_eth_dev_stop(uint16_t port_id);
1934 
1947 int rte_eth_dev_set_link_up(uint16_t port_id);
1948 
1958 int rte_eth_dev_set_link_down(uint16_t port_id);
1959 
1968 void rte_eth_dev_close(uint16_t port_id);
1969 
2007 int rte_eth_dev_reset(uint16_t port_id);
2008 
2015 void rte_eth_promiscuous_enable(uint16_t port_id);
2016 
2023 void rte_eth_promiscuous_disable(uint16_t port_id);
2024 
2035 int rte_eth_promiscuous_get(uint16_t port_id);
2036 
2043 void rte_eth_allmulticast_enable(uint16_t port_id);
2044 
2051 void rte_eth_allmulticast_disable(uint16_t port_id);
2052 
2063 int rte_eth_allmulticast_get(uint16_t port_id);
2064 
2076 void rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
2077 
2089 void rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
2090 
2108 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
2109 
2120 int rte_eth_stats_reset(uint16_t port_id);
2121 
2151 int rte_eth_xstats_get_names(uint16_t port_id,
2152  struct rte_eth_xstat_name *xstats_names,
2153  unsigned int size);
2154 
2184 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2185  unsigned int n);
2186 
2209 int
2210 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2211  struct rte_eth_xstat_name *xstats_names, unsigned int size,
2212  uint64_t *ids);
2213 
2237 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2238  uint64_t *values, unsigned int size);
2239 
2258 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2259  uint64_t *id);
2260 
2267 void rte_eth_xstats_reset(uint16_t port_id);
2268 
2286 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
2287  uint16_t tx_queue_id, uint8_t stat_idx);
2288 
2306 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
2307  uint16_t rx_queue_id,
2308  uint8_t stat_idx);
2309 
2319 void rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr);
2320 
2357 void rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
2358 
2378 int rte_eth_dev_fw_version_get(uint16_t port_id,
2379  char *fw_version, size_t fw_size);
2380 
2419 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2420  uint32_t *ptypes, int num);
2421 
2433 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
2434 
2452 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
2453 
2473 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
2474 
2494 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2495  int on);
2496 
2514 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2515  enum rte_vlan_type vlan_type,
2516  uint16_t tag_type);
2517 
2539 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
2540 
2553 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
2554 
2569 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
2570 
2571 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
2572  void *userdata);
2573 
2579  buffer_tx_error_fn error_callback;
2580  void *error_userdata;
2581  uint16_t size;
2582  uint16_t length;
2583  struct rte_mbuf *pkts[];
2585 };
2586 
2593 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
2594  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
2595 
2606 int
2607 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
2608 
2633 int
2635  buffer_tx_error_fn callback, void *userdata);
2636 
2659 void
2660 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2661  void *userdata);
2662 
2686 void
2687 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2688  void *userdata);
2689 
2715 int
2716 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
2717 
2733 };
2734 
2742  uint64_t metadata;
2756 };
2757 
2775 };
2776 
2777 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
2778  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
2798 int rte_eth_dev_callback_register(uint16_t port_id,
2799  enum rte_eth_event_type event,
2800  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
2801 
2820 int rte_eth_dev_callback_unregister(uint16_t port_id,
2821  enum rte_eth_event_type event,
2822  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
2823 
2845 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
2846 
2867 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
2868 
2886 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
2887 
2909 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
2910  int epfd, int op, void *data);
2911 
2929 int __rte_experimental
2930 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
2931 
2945 int rte_eth_led_on(uint16_t port_id);
2946 
2960 int rte_eth_led_off(uint16_t port_id);
2961 
2975 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
2976  struct rte_eth_fc_conf *fc_conf);
2977 
2992 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
2993  struct rte_eth_fc_conf *fc_conf);
2994 
3010 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3011  struct rte_eth_pfc_conf *pfc_conf);
3012 
3032 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *mac_addr,
3033  uint32_t pool);
3034 
3048 int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *mac_addr);
3049 
3063 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
3064  struct ether_addr *mac_addr);
3065 
3082 int rte_eth_dev_rss_reta_update(uint16_t port_id,
3083  struct rte_eth_rss_reta_entry64 *reta_conf,
3084  uint16_t reta_size);
3085 
3103 int rte_eth_dev_rss_reta_query(uint16_t port_id,
3104  struct rte_eth_rss_reta_entry64 *reta_conf,
3105  uint16_t reta_size);
3106 
3126 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
3127  uint8_t on);
3128 
3147 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
3148 
3171 int rte_eth_mirror_rule_set(uint16_t port_id,
3172  struct rte_eth_mirror_conf *mirror_conf,
3173  uint8_t rule_id,
3174  uint8_t on);
3175 
3190 int rte_eth_mirror_rule_reset(uint16_t port_id,
3191  uint8_t rule_id);
3192 
3209 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3210  uint16_t tx_rate);
3211 
3226 int rte_eth_dev_rss_hash_update(uint16_t port_id,
3227  struct rte_eth_rss_conf *rss_conf);
3228 
3243 int
3244 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3245  struct rte_eth_rss_conf *rss_conf);
3246 
3265 int
3266 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3267  struct rte_eth_udp_tunnel *tunnel_udp);
3268 
3288 int
3289 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3290  struct rte_eth_udp_tunnel *tunnel_udp);
3291 
3306 __rte_deprecated
3307 int rte_eth_dev_filter_supported(uint16_t port_id,
3308  enum rte_filter_type filter_type);
3309 
3329 __rte_deprecated
3330 int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3331  enum rte_filter_op filter_op, void *arg);
3332 
3346 int rte_eth_dev_get_dcb_info(uint16_t port_id,
3347  struct rte_eth_dcb_info *dcb_info);
3348 
3349 struct rte_eth_rxtx_callback;
3350 
3375 const struct rte_eth_rxtx_callback *
3376 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3377  rte_rx_callback_fn fn, void *user_param);
3378 
3404 const struct rte_eth_rxtx_callback *
3405 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3406  rte_rx_callback_fn fn, void *user_param);
3407 
3432 const struct rte_eth_rxtx_callback *
3433 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3434  rte_tx_callback_fn fn, void *user_param);
3435 
3466 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3467  const struct rte_eth_rxtx_callback *user_cb);
3468 
3499 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3500  const struct rte_eth_rxtx_callback *user_cb);
3501 
3519 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3520  struct rte_eth_rxq_info *qinfo);
3521 
3539 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3540  struct rte_eth_txq_info *qinfo);
3541 
3559 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
3560 
3573 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
3574 
3590 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
3591 
3607 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
3608 
3626 int __rte_experimental
3627 rte_eth_dev_get_module_info(uint16_t port_id,
3628  struct rte_eth_dev_module_info *modinfo);
3629 
3648 int __rte_experimental
3649 rte_eth_dev_get_module_eeprom(uint16_t port_id,
3650  struct rte_dev_eeprom_info *info);
3651 
3670 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3671  struct ether_addr *mc_addr_set,
3672  uint32_t nb_mc_addr);
3673 
3686 int rte_eth_timesync_enable(uint16_t port_id);
3687 
3700 int rte_eth_timesync_disable(uint16_t port_id);
3701 
3720 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
3721  struct timespec *timestamp, uint32_t flags);
3722 
3738 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3739  struct timespec *timestamp);
3740 
3758 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
3759 
3774 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
3775 
3794 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
3795 
3811 int
3812 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
3813  struct rte_eth_l2_tunnel_conf *l2_tunnel);
3814 
3839 int
3840 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
3841  struct rte_eth_l2_tunnel_conf *l2_tunnel,
3842  uint32_t mask,
3843  uint8_t en);
3844 
3860 int
3861 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
3862 
3877 int
3878 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
3879 
3896 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
3897  uint16_t *nb_rx_desc,
3898  uint16_t *nb_tx_desc);
3899 
3914 int
3915 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
3916 
3926 void *
3927 rte_eth_dev_get_sec_ctx(uint16_t port_id);
3928 
3929 
3930 #include <rte_ethdev_core.h>
3931 
4014 static inline uint16_t
4015 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
4016  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
4017 {
4018  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4019  uint16_t nb_rx;
4020 
4021 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4022  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4023  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
4024 
4025  if (queue_id >= dev->data->nb_rx_queues) {
4026  RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4027  return 0;
4028  }
4029 #endif
4030  nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
4031  rx_pkts, nb_pkts);
4032 
4033 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
4034  if (unlikely(dev->post_rx_burst_cbs[queue_id] != NULL)) {
4035  struct rte_eth_rxtx_callback *cb =
4036  dev->post_rx_burst_cbs[queue_id];
4037 
4038  do {
4039  nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
4040  nb_pkts, cb->param);
4041  cb = cb->next;
4042  } while (cb != NULL);
4043  }
4044 #endif
4045 
4046  return nb_rx;
4047 }
4048 
4061 static inline int
4062 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
4063 {
4064  struct rte_eth_dev *dev;
4065 
4066  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4067  dev = &rte_eth_devices[port_id];
4068  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
4069  if (queue_id >= dev->data->nb_rx_queues)
4070  return -EINVAL;
4071 
4072  return (int)(*dev->dev_ops->rx_queue_count)(dev, queue_id);
4073 }
4074 
4090 static inline int
4091 rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
4092 {
4093  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4094  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4095  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
4096  return (*dev->dev_ops->rx_descriptor_done)( \
4097  dev->data->rx_queues[queue_id], offset);
4098 }
4099 
4100 #define RTE_ETH_RX_DESC_AVAIL 0
4101 #define RTE_ETH_RX_DESC_DONE 1
4102 #define RTE_ETH_RX_DESC_UNAVAIL 2
4137 static inline int
4138 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
4139  uint16_t offset)
4140 {
4141  struct rte_eth_dev *dev;
4142  void *rxq;
4143 
4144 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4145  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4146 #endif
4147  dev = &rte_eth_devices[port_id];
4148 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4149  if (queue_id >= dev->data->nb_rx_queues)
4150  return -ENODEV;
4151 #endif
4152  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
4153  rxq = dev->data->rx_queues[queue_id];
4154 
4155  return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
4156 }
4157 
4158 #define RTE_ETH_TX_DESC_FULL 0
4159 #define RTE_ETH_TX_DESC_DONE 1
4160 #define RTE_ETH_TX_DESC_UNAVAIL 2
4195 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
4196  uint16_t queue_id, uint16_t offset)
4197 {
4198  struct rte_eth_dev *dev;
4199  void *txq;
4200 
4201 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4202  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4203 #endif
4204  dev = &rte_eth_devices[port_id];
4205 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4206  if (queue_id >= dev->data->nb_tx_queues)
4207  return -ENODEV;
4208 #endif
4209  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
4210  txq = dev->data->tx_queues[queue_id];
4211 
4212  return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
4213 }
4214 
4281 static inline uint16_t
4282 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
4283  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4284 {
4285  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4286 
4287 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4288  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4289  RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
4290 
4291  if (queue_id >= dev->data->nb_tx_queues) {
4292  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4293  return 0;
4294  }
4295 #endif
4296 
4297 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
4298  struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
4299 
4300  if (unlikely(cb != NULL)) {
4301  do {
4302  nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
4303  cb->param);
4304  cb = cb->next;
4305  } while (cb != NULL);
4306  }
4307 #endif
4308 
4309  return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
4310 }
4311 
4365 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
4366 
4367 static inline uint16_t
4368 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
4369  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4370 {
4371  struct rte_eth_dev *dev;
4372 
4373 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4374  if (!rte_eth_dev_is_valid_port(port_id)) {
4375  RTE_ETHDEV_LOG(ERR, "Invalid TX port_id=%u\n", port_id);
4376  rte_errno = EINVAL;
4377  return 0;
4378  }
4379 #endif
4380 
4381  dev = &rte_eth_devices[port_id];
4382 
4383 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4384  if (queue_id >= dev->data->nb_tx_queues) {
4385  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4386  rte_errno = EINVAL;
4387  return 0;
4388  }
4389 #endif
4390 
4391  if (!dev->tx_pkt_prepare)
4392  return nb_pkts;
4393 
4394  return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
4395  tx_pkts, nb_pkts);
4396 }
4397 
4398 #else
4399 
4400 /*
4401  * Native NOOP operation for compilation targets which doesn't require any
4402  * preparations steps, and functional NOOP may introduce unnecessary performance
4403  * drop.
4404  *
4405  * Generally this is not a good idea to turn it on globally and didn't should
4406  * be used if behavior of tx_preparation can change.
4407  */
4408 
4409 static inline uint16_t
4410 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
4411  __rte_unused uint16_t queue_id,
4412  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4413 {
4414  return nb_pkts;
4415 }
4416 
4417 #endif
4418 
4441 static inline uint16_t
4442 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
4443  struct rte_eth_dev_tx_buffer *buffer)
4444 {
4445  uint16_t sent;
4446  uint16_t to_send = buffer->length;
4447 
4448  if (to_send == 0)
4449  return 0;
4450 
4451  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
4452 
4453  buffer->length = 0;
4454 
4455  /* All packets sent, or to be dealt with by callback below */
4456  if (unlikely(sent != to_send))
4457  buffer->error_callback(&buffer->pkts[sent],
4458  (uint16_t)(to_send - sent),
4459  buffer->error_userdata);
4460 
4461  return sent;
4462 }
4463 
4494 static __rte_always_inline uint16_t
4495 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
4496  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
4497 {
4498  buffer->pkts[buffer->length++] = tx_pkt;
4499  if (buffer->length < buffer->size)
4500  return 0;
4501 
4502  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
4503 }
4504 
4505 #ifdef __cplusplus
4506 }
4507 #endif
4508 
4509 #endif /* _RTE_ETHDEV_H_ */