DPDK  19.08.0
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
rte_ethdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
142 #ifdef __cplusplus
143 extern "C" {
144 #endif
145 
146 #include <stdint.h>
147 
148 /* Use this macro to check if LRO API is supported */
149 #define RTE_ETHDEV_HAS_LRO_SUPPORT
150 
151 #include <rte_compat.h>
152 #include <rte_log.h>
153 #include <rte_interrupts.h>
154 #include <rte_dev.h>
155 #include <rte_devargs.h>
156 #include <rte_errno.h>
157 #include <rte_common.h>
158 #include <rte_config.h>
159 
160 #include "rte_ether.h"
161 #include "rte_dev_info.h"
162 
163 extern int rte_eth_dev_logtype;
164 
165 #define RTE_ETHDEV_LOG(level, ...) \
166  rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
167 
168 struct rte_mbuf;
169 
186 int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
187 
202 uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter);
203 
216 void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
217 
231 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
232  for (rte_eth_iterator_init(iter, devargs), \
233  id = rte_eth_iterator_next(iter); \
234  id != RTE_MAX_ETHPORTS; \
235  id = rte_eth_iterator_next(iter))
236 
244  uint64_t ipackets;
245  uint64_t opackets;
246  uint64_t ibytes;
247  uint64_t obytes;
248  uint64_t imissed;
252  uint64_t ierrors;
253  uint64_t oerrors;
254  uint64_t rx_nombuf;
255  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
257  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
259  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
261  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
263  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
265 };
266 
270 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
271 #define ETH_LINK_SPEED_FIXED (1 << 0)
272 #define ETH_LINK_SPEED_10M_HD (1 << 1)
273 #define ETH_LINK_SPEED_10M (1 << 2)
274 #define ETH_LINK_SPEED_100M_HD (1 << 3)
275 #define ETH_LINK_SPEED_100M (1 << 4)
276 #define ETH_LINK_SPEED_1G (1 << 5)
277 #define ETH_LINK_SPEED_2_5G (1 << 6)
278 #define ETH_LINK_SPEED_5G (1 << 7)
279 #define ETH_LINK_SPEED_10G (1 << 8)
280 #define ETH_LINK_SPEED_20G (1 << 9)
281 #define ETH_LINK_SPEED_25G (1 << 10)
282 #define ETH_LINK_SPEED_40G (1 << 11)
283 #define ETH_LINK_SPEED_50G (1 << 12)
284 #define ETH_LINK_SPEED_56G (1 << 13)
285 #define ETH_LINK_SPEED_100G (1 << 14)
290 #define ETH_SPEED_NUM_NONE 0
291 #define ETH_SPEED_NUM_10M 10
292 #define ETH_SPEED_NUM_100M 100
293 #define ETH_SPEED_NUM_1G 1000
294 #define ETH_SPEED_NUM_2_5G 2500
295 #define ETH_SPEED_NUM_5G 5000
296 #define ETH_SPEED_NUM_10G 10000
297 #define ETH_SPEED_NUM_20G 20000
298 #define ETH_SPEED_NUM_25G 25000
299 #define ETH_SPEED_NUM_40G 40000
300 #define ETH_SPEED_NUM_50G 50000
301 #define ETH_SPEED_NUM_56G 56000
302 #define ETH_SPEED_NUM_100G 100000
307 __extension__
308 struct rte_eth_link {
309  uint32_t link_speed;
310  uint16_t link_duplex : 1;
311  uint16_t link_autoneg : 1;
312  uint16_t link_status : 1;
313 } __attribute__((aligned(8)));
315 /* Utility constants */
316 #define ETH_LINK_HALF_DUPLEX 0
317 #define ETH_LINK_FULL_DUPLEX 1
318 #define ETH_LINK_DOWN 0
319 #define ETH_LINK_UP 1
320 #define ETH_LINK_FIXED 0
321 #define ETH_LINK_AUTONEG 1
327 struct rte_eth_thresh {
328  uint8_t pthresh;
329  uint8_t hthresh;
330  uint8_t wthresh;
331 };
332 
336 #define ETH_MQ_RX_RSS_FLAG 0x1
337 #define ETH_MQ_RX_DCB_FLAG 0x2
338 #define ETH_MQ_RX_VMDQ_FLAG 0x4
339 
347 
351  ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
353  ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
354 
356  ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
358  ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
360  ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
363  ETH_MQ_RX_VMDQ_FLAG,
364 };
365 
369 #define ETH_RSS ETH_MQ_RX_RSS
370 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
371 #define ETH_DCB_RX ETH_MQ_RX_DCB
372 
382 };
383 
387 #define ETH_DCB_NONE ETH_MQ_TX_NONE
388 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
389 #define ETH_DCB_TX ETH_MQ_TX_DCB
390 
397  uint32_t max_rx_pkt_len;
398  uint16_t split_hdr_size;
404  uint64_t offloads;
405 };
406 
412  ETH_VLAN_TYPE_UNKNOWN = 0,
415  ETH_VLAN_TYPE_MAX,
416 };
417 
423  uint64_t ids[64];
424 };
425 
444  uint8_t *rss_key;
445  uint8_t rss_key_len;
446  uint64_t rss_hf;
447 };
448 
449 /*
450  * A packet can be identified by hardware as different flow types. Different
451  * NIC hardware may support different flow types.
452  * Basically, the NIC hardware identifies the flow type as deep protocol as
453  * possible, and exclusively. For example, if a packet is identified as
454  * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
455  * though it is an actual IPV4 packet.
456  * Note that the flow types are used to define RSS offload types.
457  */
458 #define RTE_ETH_FLOW_UNKNOWN 0
459 #define RTE_ETH_FLOW_RAW 1
460 #define RTE_ETH_FLOW_IPV4 2
461 #define RTE_ETH_FLOW_FRAG_IPV4 3
462 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
463 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
464 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
465 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
466 #define RTE_ETH_FLOW_IPV6 8
467 #define RTE_ETH_FLOW_FRAG_IPV6 9
468 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
469 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
470 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
471 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
472 #define RTE_ETH_FLOW_L2_PAYLOAD 14
473 #define RTE_ETH_FLOW_IPV6_EX 15
474 #define RTE_ETH_FLOW_IPV6_TCP_EX 16
475 #define RTE_ETH_FLOW_IPV6_UDP_EX 17
476 #define RTE_ETH_FLOW_PORT 18
477 
478 #define RTE_ETH_FLOW_VXLAN 19
479 #define RTE_ETH_FLOW_GENEVE 20
480 #define RTE_ETH_FLOW_NVGRE 21
481 #define RTE_ETH_FLOW_VXLAN_GPE 22
482 #define RTE_ETH_FLOW_MAX 23
483 
484 /*
485  * The RSS offload types are defined based on flow types.
486  * Different NIC hardware may support different RSS offload
487  * types. The supported flow types or RSS offload types can be queried by
488  * rte_eth_dev_info_get().
489  */
490 #define ETH_RSS_IPV4 (1ULL << RTE_ETH_FLOW_IPV4)
491 #define ETH_RSS_FRAG_IPV4 (1ULL << RTE_ETH_FLOW_FRAG_IPV4)
492 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
493 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP)
494 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP)
495 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER)
496 #define ETH_RSS_IPV6 (1ULL << RTE_ETH_FLOW_IPV6)
497 #define ETH_RSS_FRAG_IPV6 (1ULL << RTE_ETH_FLOW_FRAG_IPV6)
498 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP)
499 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
500 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP)
501 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)
502 #define ETH_RSS_L2_PAYLOAD (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)
503 #define ETH_RSS_IPV6_EX (1ULL << RTE_ETH_FLOW_IPV6_EX)
504 #define ETH_RSS_IPV6_TCP_EX (1ULL << RTE_ETH_FLOW_IPV6_TCP_EX)
505 #define ETH_RSS_IPV6_UDP_EX (1ULL << RTE_ETH_FLOW_IPV6_UDP_EX)
506 #define ETH_RSS_PORT (1ULL << RTE_ETH_FLOW_PORT)
507 #define ETH_RSS_VXLAN (1ULL << RTE_ETH_FLOW_VXLAN)
508 #define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE)
509 #define ETH_RSS_NVGRE (1ULL << RTE_ETH_FLOW_NVGRE)
510 
511 #define ETH_RSS_IP ( \
512  ETH_RSS_IPV4 | \
513  ETH_RSS_FRAG_IPV4 | \
514  ETH_RSS_NONFRAG_IPV4_OTHER | \
515  ETH_RSS_IPV6 | \
516  ETH_RSS_FRAG_IPV6 | \
517  ETH_RSS_NONFRAG_IPV6_OTHER | \
518  ETH_RSS_IPV6_EX)
519 
520 #define ETH_RSS_UDP ( \
521  ETH_RSS_NONFRAG_IPV4_UDP | \
522  ETH_RSS_NONFRAG_IPV6_UDP | \
523  ETH_RSS_IPV6_UDP_EX)
524 
525 #define ETH_RSS_TCP ( \
526  ETH_RSS_NONFRAG_IPV4_TCP | \
527  ETH_RSS_NONFRAG_IPV6_TCP | \
528  ETH_RSS_IPV6_TCP_EX)
529 
530 #define ETH_RSS_SCTP ( \
531  ETH_RSS_NONFRAG_IPV4_SCTP | \
532  ETH_RSS_NONFRAG_IPV6_SCTP)
533 
534 #define ETH_RSS_TUNNEL ( \
535  ETH_RSS_VXLAN | \
536  ETH_RSS_GENEVE | \
537  ETH_RSS_NVGRE)
538 
540 #define ETH_RSS_PROTO_MASK ( \
541  ETH_RSS_IPV4 | \
542  ETH_RSS_FRAG_IPV4 | \
543  ETH_RSS_NONFRAG_IPV4_TCP | \
544  ETH_RSS_NONFRAG_IPV4_UDP | \
545  ETH_RSS_NONFRAG_IPV4_SCTP | \
546  ETH_RSS_NONFRAG_IPV4_OTHER | \
547  ETH_RSS_IPV6 | \
548  ETH_RSS_FRAG_IPV6 | \
549  ETH_RSS_NONFRAG_IPV6_TCP | \
550  ETH_RSS_NONFRAG_IPV6_UDP | \
551  ETH_RSS_NONFRAG_IPV6_SCTP | \
552  ETH_RSS_NONFRAG_IPV6_OTHER | \
553  ETH_RSS_L2_PAYLOAD | \
554  ETH_RSS_IPV6_EX | \
555  ETH_RSS_IPV6_TCP_EX | \
556  ETH_RSS_IPV6_UDP_EX | \
557  ETH_RSS_PORT | \
558  ETH_RSS_VXLAN | \
559  ETH_RSS_GENEVE | \
560  ETH_RSS_NVGRE)
561 
562 /*
563  * Definitions used for redirection table entry size.
564  * Some RSS RETA sizes may not be supported by some drivers, check the
565  * documentation or the description of relevant functions for more details.
566  */
567 #define ETH_RSS_RETA_SIZE_64 64
568 #define ETH_RSS_RETA_SIZE_128 128
569 #define ETH_RSS_RETA_SIZE_256 256
570 #define ETH_RSS_RETA_SIZE_512 512
571 #define RTE_RETA_GROUP_SIZE 64
572 
573 /* Definitions used for VMDQ and DCB functionality */
574 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
575 #define ETH_DCB_NUM_USER_PRIORITIES 8
576 #define ETH_VMDQ_DCB_NUM_QUEUES 128
577 #define ETH_DCB_NUM_QUEUES 128
579 /* DCB capability defines */
580 #define ETH_DCB_PG_SUPPORT 0x00000001
581 #define ETH_DCB_PFC_SUPPORT 0x00000002
583 /* Definitions used for VLAN Offload functionality */
584 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
585 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
586 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
587 #define ETH_QINQ_STRIP_OFFLOAD 0x0008
589 /* Definitions used for mask VLAN setting */
590 #define ETH_VLAN_STRIP_MASK 0x0001
591 #define ETH_VLAN_FILTER_MASK 0x0002
592 #define ETH_VLAN_EXTEND_MASK 0x0004
593 #define ETH_QINQ_STRIP_MASK 0x0008
594 #define ETH_VLAN_ID_MAX 0x0FFF
596 /* Definitions used for receive MAC address */
597 #define ETH_NUM_RECEIVE_MAC_ADDR 128
599 /* Definitions used for unicast hash */
600 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
602 /* Definitions used for VMDQ pool rx mode setting */
603 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
604 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
605 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
606 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
607 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
610 #define ETH_MIRROR_MAX_VLANS 64
611 
612 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
613 #define ETH_MIRROR_UPLINK_PORT 0x02
614 #define ETH_MIRROR_DOWNLINK_PORT 0x04
615 #define ETH_MIRROR_VLAN 0x08
616 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
621 struct rte_eth_vlan_mirror {
622  uint64_t vlan_mask;
624  uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
625 };
626 
631  uint8_t rule_type;
632  uint8_t dst_pool;
633  uint64_t pool_mask;
636 };
637 
645  uint64_t mask;
647  uint16_t reta[RTE_RETA_GROUP_SIZE];
649 };
650 
656  ETH_4_TCS = 4,
658 };
659 
669 };
670 
671 /* This structure may be extended in future. */
672 struct rte_eth_dcb_rx_conf {
673  enum rte_eth_nb_tcs nb_tcs;
675  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
676 };
677 
678 struct rte_eth_vmdq_dcb_tx_conf {
679  enum rte_eth_nb_pools nb_queue_pools;
681  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
682 };
683 
684 struct rte_eth_dcb_tx_conf {
685  enum rte_eth_nb_tcs nb_tcs;
687  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
688 };
689 
690 struct rte_eth_vmdq_tx_conf {
691  enum rte_eth_nb_pools nb_queue_pools;
692 };
693 
708  uint8_t default_pool;
709  uint8_t nb_pool_maps;
710  struct {
711  uint16_t vlan_id;
712  uint64_t pools;
716 };
717 
739  uint8_t default_pool;
741  uint8_t nb_pool_maps;
742  uint32_t rx_mode;
743  struct {
744  uint16_t vlan_id;
745  uint64_t pools;
747 };
748 
759  uint64_t offloads;
760 
761  /* For i40e specifically */
762  uint16_t pvid;
763  __extension__
764  uint8_t hw_vlan_reject_tagged : 1,
770 };
771 
777  uint16_t rx_free_thresh;
778  uint8_t rx_drop_en;
785  uint64_t offloads;
786 };
787 
793  uint16_t tx_rs_thresh;
794  uint16_t tx_free_thresh;
803  uint64_t offloads;
804 };
805 
810  uint16_t nb_max;
811  uint16_t nb_min;
812  uint16_t nb_align;
822  uint16_t nb_seg_max;
823 
835  uint16_t nb_mtu_seg_max;
836 };
837 
846 };
847 
854  uint32_t high_water;
855  uint32_t low_water;
856  uint16_t pause_time;
857  uint16_t send_xon;
860  uint8_t autoneg;
861 };
862 
870  uint8_t priority;
871 };
872 
877  RTE_TUNNEL_TYPE_NONE = 0,
878  RTE_TUNNEL_TYPE_VXLAN,
879  RTE_TUNNEL_TYPE_GENEVE,
880  RTE_TUNNEL_TYPE_TEREDO,
881  RTE_TUNNEL_TYPE_NVGRE,
882  RTE_TUNNEL_TYPE_IP_IN_GRE,
883  RTE_L2_TUNNEL_TYPE_E_TAG,
884  RTE_TUNNEL_TYPE_VXLAN_GPE,
885  RTE_TUNNEL_TYPE_MAX,
886 };
887 
888 /* Deprecated API file for rte_eth_dev_filter_* functions */
889 #include "rte_eth_ctrl.h"
890 
899 };
900 
908 };
909 
921  uint8_t drop_queue;
922  struct rte_eth_fdir_masks mask;
925 };
926 
935  uint16_t udp_port;
936  uint8_t prot_type;
937 };
938 
944  uint32_t lsc:1;
946  uint32_t rxq:1;
948  uint32_t rmv:1;
949 };
950 
956 struct rte_eth_conf {
957  uint32_t link_speeds;
966  uint32_t lpbk_mode;
971  struct {
975  struct rte_eth_dcb_rx_conf dcb_rx_conf;
979  } rx_adv_conf;
980  union {
981  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
983  struct rte_eth_dcb_tx_conf dcb_tx_conf;
985  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
987  } tx_adv_conf;
993 };
994 
998 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
999 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
1000 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
1001 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
1002 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
1003 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
1004 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
1005 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
1006 #define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100
1007 #define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200
1008 #define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
1009 #define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
1010 #define DEV_RX_OFFLOAD_SCATTER 0x00002000
1011 #define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000
1012 #define DEV_RX_OFFLOAD_SECURITY 0x00008000
1013 #define DEV_RX_OFFLOAD_KEEP_CRC 0x00010000
1014 #define DEV_RX_OFFLOAD_SCTP_CKSUM 0x00020000
1015 #define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM 0x00040000
1016 
1017 #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
1018  DEV_RX_OFFLOAD_UDP_CKSUM | \
1019  DEV_RX_OFFLOAD_TCP_CKSUM)
1020 #define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
1021  DEV_RX_OFFLOAD_VLAN_FILTER | \
1022  DEV_RX_OFFLOAD_VLAN_EXTEND | \
1023  DEV_RX_OFFLOAD_QINQ_STRIP)
1024 
1025 /*
1026  * If new Rx offload capabilities are defined, they also must be
1027  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1028  */
1029 
1033 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
1034 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
1035 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
1036 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
1037 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
1038 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
1039 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
1040 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
1041 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
1042 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200
1043 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400
1044 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800
1045 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000
1046 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
1047 #define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000
1048 
1051 #define DEV_TX_OFFLOAD_MULTI_SEGS 0x00008000
1052 
1053 #define DEV_TX_OFFLOAD_MBUF_FAST_FREE 0x00010000
1054 
1058 #define DEV_TX_OFFLOAD_SECURITY 0x00020000
1059 
1064 #define DEV_TX_OFFLOAD_UDP_TNL_TSO 0x00040000
1065 
1070 #define DEV_TX_OFFLOAD_IP_TNL_TSO 0x00080000
1071 
1072 #define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM 0x00100000
1073 
1077 #define DEV_TX_OFFLOAD_MATCH_METADATA 0x00200000
1078 
1079 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001
1080 
1081 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002
1082 
1084 /*
1085  * If new Tx offload capabilities are defined, they also must be
1086  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1087  */
1088 
1089 /*
1090  * Fallback default preferred Rx/Tx port parameters.
1091  * These are used if an application requests default parameters
1092  * but the PMD does not provide preferred values.
1093  */
1094 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1095 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1096 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1097 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1098 
1105  uint16_t burst_size;
1106  uint16_t ring_size;
1107  uint16_t nb_queues;
1108 };
1109 
1114 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (0)
1115 
1120  const char *name;
1121  uint16_t domain_id;
1122  uint16_t port_id;
1130 };
1131 
1142  struct rte_device *device;
1143  const char *driver_name;
1144  unsigned int if_index;
1146  uint16_t min_mtu;
1147  uint16_t max_mtu;
1148  const uint32_t *dev_flags;
1149  uint32_t min_rx_bufsize;
1150  uint32_t max_rx_pktlen;
1151  uint16_t max_rx_queues;
1152  uint16_t max_tx_queues;
1153  uint32_t max_mac_addrs;
1154  uint32_t max_hash_mac_addrs;
1156  uint16_t max_vfs;
1157  uint16_t max_vmdq_pools;
1158  uint64_t rx_offload_capa;
1160  uint64_t tx_offload_capa;
1162  uint64_t rx_queue_offload_capa;
1164  uint64_t tx_queue_offload_capa;
1166  uint16_t reta_size;
1168  uint8_t hash_key_size;
1173  uint16_t vmdq_queue_base;
1174  uint16_t vmdq_queue_num;
1175  uint16_t vmdq_pool_base;
1178  uint32_t speed_capa;
1180  uint16_t nb_rx_queues;
1181  uint16_t nb_tx_queues;
1187  uint64_t dev_capa;
1193 };
1194 
1200  struct rte_mempool *mp;
1202  uint8_t scattered_rx;
1203  uint16_t nb_desc;
1205 
1212  uint16_t nb_desc;
1214 
1216 #define RTE_ETH_XSTATS_NAME_SIZE 64
1217 
1228  uint64_t id;
1229  uint64_t value;
1230 };
1231 
1241 };
1242 
1243 #define ETH_DCB_NUM_TCS 8
1244 #define ETH_MAX_VMDQ_POOL 64
1245 
1252  struct {
1253  uint8_t base;
1254  uint8_t nb_queue;
1255  } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1257  struct {
1258  uint8_t base;
1259  uint8_t nb_queue;
1260  } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1261 };
1262 
1268  uint8_t nb_tcs;
1270  uint8_t tc_bws[ETH_DCB_NUM_TCS];
1273 };
1274 
1278 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1279 #define RTE_ETH_QUEUE_STATE_STARTED 1
1280 
1281 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
1282 
1283 /* Macros to check for valid port */
1284 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1285  if (!rte_eth_dev_is_valid_port(port_id)) { \
1286  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1287  return retval; \
1288  } \
1289 } while (0)
1290 
1291 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1292  if (!rte_eth_dev_is_valid_port(port_id)) { \
1293  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1294  return; \
1295  } \
1296 } while (0)
1297 
1303 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1304 
1305 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1306 
1307 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1308 
1309 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1310 
1333 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
1334  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1335  void *user_param);
1336 
1357 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
1358  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1359 
1370 };
1371 
1372 struct rte_eth_dev_sriov {
1373  uint8_t active;
1374  uint8_t nb_q_per_pool;
1375  uint16_t def_vmdq_idx;
1376  uint16_t def_pool_q_idx;
1377 };
1378 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1379 
1380 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
1381 
1382 #define RTE_ETH_DEV_NO_OWNER 0
1383 
1384 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
1385 
1386 struct rte_eth_dev_owner {
1387  uint64_t id;
1388  char name[RTE_ETH_MAX_OWNER_NAME_LEN];
1389 };
1390 
1395 #define RTE_ETH_DEV_CLOSE_REMOVE 0x0001
1396 
1397 #define RTE_ETH_DEV_INTR_LSC 0x0002
1398 
1399 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1400 
1401 #define RTE_ETH_DEV_INTR_RMV 0x0008
1402 
1403 #define RTE_ETH_DEV_REPRESENTOR 0x0010
1404 
1405 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR 0x0020
1406 
1418 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
1419  const uint64_t owner_id);
1420 
1424 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
1425  for (p = rte_eth_find_next_owned_by(0, o); \
1426  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
1427  p = rte_eth_find_next_owned_by(p + 1, o))
1428 
1437 uint16_t rte_eth_find_next(uint16_t port_id);
1438 
1442 #define RTE_ETH_FOREACH_DEV(p) \
1443  RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
1444 
1459 __rte_experimental
1460 uint16_t
1461 rte_eth_find_next_of(uint16_t port_id_start,
1462  const struct rte_device *parent);
1463 
1472 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
1473  for (port_id = rte_eth_find_next_of(0, parent); \
1474  port_id < RTE_MAX_ETHPORTS; \
1475  port_id = rte_eth_find_next_of(port_id + 1, parent))
1476 
1491 __rte_experimental
1492 uint16_t
1493 rte_eth_find_next_sibling(uint16_t port_id_start,
1494  uint16_t ref_port_id);
1495 
1506 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
1507  for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
1508  port_id < RTE_MAX_ETHPORTS; \
1509  port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
1510 
1524 __rte_experimental
1525 int rte_eth_dev_owner_new(uint64_t *owner_id);
1526 
1540 __rte_experimental
1541 int rte_eth_dev_owner_set(const uint16_t port_id,
1542  const struct rte_eth_dev_owner *owner);
1543 
1557 __rte_experimental
1558 int rte_eth_dev_owner_unset(const uint16_t port_id,
1559  const uint64_t owner_id);
1560 
1570 __rte_experimental
1571 void rte_eth_dev_owner_delete(const uint64_t owner_id);
1572 
1586 __rte_experimental
1587 int rte_eth_dev_owner_get(const uint16_t port_id,
1588  struct rte_eth_dev_owner *owner);
1589 
1602 __rte_deprecated
1603 uint16_t rte_eth_dev_count(void);
1604 
1615 uint16_t rte_eth_dev_count_avail(void);
1616 
1625 uint16_t rte_eth_dev_count_total(void);
1626 
1638 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
1639 
1648 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
1649 
1658 const char *rte_eth_dev_tx_offload_name(uint64_t offload);
1659 
1699 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
1700  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
1701 
1713 __rte_experimental
1714 int
1715 rte_eth_dev_is_removed(uint16_t port_id);
1716 
1766 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1767  uint16_t nb_rx_desc, unsigned int socket_id,
1768  const struct rte_eth_rxconf *rx_conf,
1769  struct rte_mempool *mb_pool);
1770 
1819 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1820  uint16_t nb_tx_desc, unsigned int socket_id,
1821  const struct rte_eth_txconf *tx_conf);
1822 
1833 int rte_eth_dev_socket_id(uint16_t port_id);
1834 
1844 int rte_eth_dev_is_valid_port(uint16_t port_id);
1845 
1862 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
1863 
1879 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
1880 
1897 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
1898 
1914 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
1915 
1935 int rte_eth_dev_start(uint16_t port_id);
1936 
1944 void rte_eth_dev_stop(uint16_t port_id);
1945 
1958 int rte_eth_dev_set_link_up(uint16_t port_id);
1959 
1969 int rte_eth_dev_set_link_down(uint16_t port_id);
1970 
1979 void rte_eth_dev_close(uint16_t port_id);
1980 
2018 int rte_eth_dev_reset(uint16_t port_id);
2019 
2026 void rte_eth_promiscuous_enable(uint16_t port_id);
2027 
2034 void rte_eth_promiscuous_disable(uint16_t port_id);
2035 
2046 int rte_eth_promiscuous_get(uint16_t port_id);
2047 
2054 void rte_eth_allmulticast_enable(uint16_t port_id);
2055 
2062 void rte_eth_allmulticast_disable(uint16_t port_id);
2063 
2074 int rte_eth_allmulticast_get(uint16_t port_id);
2075 
2087 void rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
2088 
2100 void rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
2101 
2119 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
2120 
2131 int rte_eth_stats_reset(uint16_t port_id);
2132 
2162 int rte_eth_xstats_get_names(uint16_t port_id,
2163  struct rte_eth_xstat_name *xstats_names,
2164  unsigned int size);
2165 
2195 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2196  unsigned int n);
2197 
2220 int
2221 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2222  struct rte_eth_xstat_name *xstats_names, unsigned int size,
2223  uint64_t *ids);
2224 
2248 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2249  uint64_t *values, unsigned int size);
2250 
2269 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2270  uint64_t *id);
2271 
2278 void rte_eth_xstats_reset(uint16_t port_id);
2279 
2297 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
2298  uint16_t tx_queue_id, uint8_t stat_idx);
2299 
2317 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
2318  uint16_t rx_queue_id,
2319  uint8_t stat_idx);
2320 
2330 void rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
2331 
2370 void rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
2371 
2391 int rte_eth_dev_fw_version_get(uint16_t port_id,
2392  char *fw_version, size_t fw_size);
2393 
2432 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2433  uint32_t *ptypes, int num);
2434 
2446 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
2447 
2465 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
2466 
2486 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
2487 
2507 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2508  int on);
2509 
2527 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2528  enum rte_vlan_type vlan_type,
2529  uint16_t tag_type);
2530 
2553 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
2554 
2568 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
2569 
2584 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
2585 
2586 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
2587  void *userdata);
2588 
2594  buffer_tx_error_fn error_callback;
2595  void *error_userdata;
2596  uint16_t size;
2597  uint16_t length;
2598  struct rte_mbuf *pkts[];
2600 };
2601 
2608 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
2609  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
2610 
2621 int
2622 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
2623 
2648 int
2650  buffer_tx_error_fn callback, void *userdata);
2651 
2674 void
2675 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2676  void *userdata);
2677 
2701 void
2702 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2703  void *userdata);
2704 
2730 int
2731 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
2732 
2748 };
2749 
2757  uint64_t metadata;
2771 };
2772 
2790 };
2791 
2792 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
2793  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
2813 int rte_eth_dev_callback_register(uint16_t port_id,
2814  enum rte_eth_event_type event,
2815  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
2816 
2835 int rte_eth_dev_callback_unregister(uint16_t port_id,
2836  enum rte_eth_event_type event,
2837  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
2838 
2860 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
2861 
2882 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
2883 
2901 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
2902 
2924 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
2925  int epfd, int op, void *data);
2926 
2944 __rte_experimental
2945 int
2946 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
2947 
2961 int rte_eth_led_on(uint16_t port_id);
2962 
2976 int rte_eth_led_off(uint16_t port_id);
2977 
2991 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
2992  struct rte_eth_fc_conf *fc_conf);
2993 
3008 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
3009  struct rte_eth_fc_conf *fc_conf);
3010 
3026 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3027  struct rte_eth_pfc_conf *pfc_conf);
3028 
3048 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
3049  uint32_t pool);
3050 
3064 int rte_eth_dev_mac_addr_remove(uint16_t port_id,
3065  struct rte_ether_addr *mac_addr);
3066 
3080 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
3081  struct rte_ether_addr *mac_addr);
3082 
3099 int rte_eth_dev_rss_reta_update(uint16_t port_id,
3100  struct rte_eth_rss_reta_entry64 *reta_conf,
3101  uint16_t reta_size);
3102 
3120 int rte_eth_dev_rss_reta_query(uint16_t port_id,
3121  struct rte_eth_rss_reta_entry64 *reta_conf,
3122  uint16_t reta_size);
3123 
3143 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
3144  uint8_t on);
3145 
3164 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
3165 
3188 int rte_eth_mirror_rule_set(uint16_t port_id,
3189  struct rte_eth_mirror_conf *mirror_conf,
3190  uint8_t rule_id,
3191  uint8_t on);
3192 
3207 int rte_eth_mirror_rule_reset(uint16_t port_id,
3208  uint8_t rule_id);
3209 
3226 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3227  uint16_t tx_rate);
3228 
3243 int rte_eth_dev_rss_hash_update(uint16_t port_id,
3244  struct rte_eth_rss_conf *rss_conf);
3245 
3260 int
3261 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3262  struct rte_eth_rss_conf *rss_conf);
3263 
3282 int
3283 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3284  struct rte_eth_udp_tunnel *tunnel_udp);
3285 
3305 int
3306 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3307  struct rte_eth_udp_tunnel *tunnel_udp);
3308 
3323 __rte_deprecated
3324 int rte_eth_dev_filter_supported(uint16_t port_id,
3325  enum rte_filter_type filter_type);
3326 
3346 __rte_deprecated
3347 int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3348  enum rte_filter_op filter_op, void *arg);
3349 
3363 int rte_eth_dev_get_dcb_info(uint16_t port_id,
3364  struct rte_eth_dcb_info *dcb_info);
3365 
3366 struct rte_eth_rxtx_callback;
3367 
3392 const struct rte_eth_rxtx_callback *
3393 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3394  rte_rx_callback_fn fn, void *user_param);
3395 
3421 const struct rte_eth_rxtx_callback *
3422 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3423  rte_rx_callback_fn fn, void *user_param);
3424 
3449 const struct rte_eth_rxtx_callback *
3450 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3451  rte_tx_callback_fn fn, void *user_param);
3452 
3483 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3484  const struct rte_eth_rxtx_callback *user_cb);
3485 
3516 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3517  const struct rte_eth_rxtx_callback *user_cb);
3518 
3536 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3537  struct rte_eth_rxq_info *qinfo);
3538 
3556 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3557  struct rte_eth_txq_info *qinfo);
3558 
3576 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
3577 
3590 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
3591 
3607 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
3608 
3624 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
3625 
3643 __rte_experimental
3644 int
3645 rte_eth_dev_get_module_info(uint16_t port_id,
3646  struct rte_eth_dev_module_info *modinfo);
3647 
3666 __rte_experimental
3667 int
3668 rte_eth_dev_get_module_eeprom(uint16_t port_id,
3669  struct rte_dev_eeprom_info *info);
3670 
3689 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3690  struct rte_ether_addr *mc_addr_set,
3691  uint32_t nb_mc_addr);
3692 
3705 int rte_eth_timesync_enable(uint16_t port_id);
3706 
3719 int rte_eth_timesync_disable(uint16_t port_id);
3720 
3739 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
3740  struct timespec *timestamp, uint32_t flags);
3741 
3757 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3758  struct timespec *timestamp);
3759 
3777 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
3778 
3793 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
3794 
3813 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
3814 
3859 __rte_experimental
3860 int
3861 rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
3862 
3878 int
3879 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
3880  struct rte_eth_l2_tunnel_conf *l2_tunnel);
3881 
3906 int
3907 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
3908  struct rte_eth_l2_tunnel_conf *l2_tunnel,
3909  uint32_t mask,
3910  uint8_t en);
3911 
3927 int
3928 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
3929 
3944 int
3945 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
3946 
3963 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
3964  uint16_t *nb_rx_desc,
3965  uint16_t *nb_tx_desc);
3966 
3981 int
3982 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
3983 
3993 void *
3994 rte_eth_dev_get_sec_ctx(uint16_t port_id);
3995 
3996 
3997 #include <rte_ethdev_core.h>
3998 
4081 static inline uint16_t
4082 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
4083  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
4084 {
4085  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4086  uint16_t nb_rx;
4087 
4088 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4089  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4090  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
4091 
4092  if (queue_id >= dev->data->nb_rx_queues) {
4093  RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4094  return 0;
4095  }
4096 #endif
4097  nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
4098  rx_pkts, nb_pkts);
4099 
4100 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
4101  if (unlikely(dev->post_rx_burst_cbs[queue_id] != NULL)) {
4102  struct rte_eth_rxtx_callback *cb =
4103  dev->post_rx_burst_cbs[queue_id];
4104 
4105  do {
4106  nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
4107  nb_pkts, cb->param);
4108  cb = cb->next;
4109  } while (cb != NULL);
4110  }
4111 #endif
4112 
4113  return nb_rx;
4114 }
4115 
4128 static inline int
4129 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
4130 {
4131  struct rte_eth_dev *dev;
4132 
4133  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4134  dev = &rte_eth_devices[port_id];
4135  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
4136  if (queue_id >= dev->data->nb_rx_queues)
4137  return -EINVAL;
4138 
4139  return (int)(*dev->dev_ops->rx_queue_count)(dev, queue_id);
4140 }
4141 
4157 static inline int
4158 rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
4159 {
4160  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4161  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4162  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
4163  return (*dev->dev_ops->rx_descriptor_done)( \
4164  dev->data->rx_queues[queue_id], offset);
4165 }
4166 
4167 #define RTE_ETH_RX_DESC_AVAIL 0
4168 #define RTE_ETH_RX_DESC_DONE 1
4169 #define RTE_ETH_RX_DESC_UNAVAIL 2
4204 static inline int
4205 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
4206  uint16_t offset)
4207 {
4208  struct rte_eth_dev *dev;
4209  void *rxq;
4210 
4211 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4212  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4213 #endif
4214  dev = &rte_eth_devices[port_id];
4215 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4216  if (queue_id >= dev->data->nb_rx_queues)
4217  return -ENODEV;
4218 #endif
4219  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
4220  rxq = dev->data->rx_queues[queue_id];
4221 
4222  return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
4223 }
4224 
4225 #define RTE_ETH_TX_DESC_FULL 0
4226 #define RTE_ETH_TX_DESC_DONE 1
4227 #define RTE_ETH_TX_DESC_UNAVAIL 2
4262 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
4263  uint16_t queue_id, uint16_t offset)
4264 {
4265  struct rte_eth_dev *dev;
4266  void *txq;
4267 
4268 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4269  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4270 #endif
4271  dev = &rte_eth_devices[port_id];
4272 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4273  if (queue_id >= dev->data->nb_tx_queues)
4274  return -ENODEV;
4275 #endif
4276  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
4277  txq = dev->data->tx_queues[queue_id];
4278 
4279  return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
4280 }
4281 
4348 static inline uint16_t
4349 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
4350  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4351 {
4352  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4353 
4354 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4355  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4356  RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
4357 
4358  if (queue_id >= dev->data->nb_tx_queues) {
4359  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4360  return 0;
4361  }
4362 #endif
4363 
4364 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
4365  struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
4366 
4367  if (unlikely(cb != NULL)) {
4368  do {
4369  nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
4370  cb->param);
4371  cb = cb->next;
4372  } while (cb != NULL);
4373  }
4374 #endif
4375 
4376  return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
4377 }
4378 
4432 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
4433 
4434 static inline uint16_t
4435 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
4436  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4437 {
4438  struct rte_eth_dev *dev;
4439 
4440 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4441  if (!rte_eth_dev_is_valid_port(port_id)) {
4442  RTE_ETHDEV_LOG(ERR, "Invalid TX port_id=%u\n", port_id);
4443  rte_errno = EINVAL;
4444  return 0;
4445  }
4446 #endif
4447 
4448  dev = &rte_eth_devices[port_id];
4449 
4450 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4451  if (queue_id >= dev->data->nb_tx_queues) {
4452  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4453  rte_errno = EINVAL;
4454  return 0;
4455  }
4456 #endif
4457 
4458  if (!dev->tx_pkt_prepare)
4459  return nb_pkts;
4460 
4461  return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
4462  tx_pkts, nb_pkts);
4463 }
4464 
4465 #else
4466 
4467 /*
4468  * Native NOOP operation for compilation targets which doesn't require any
4469  * preparations steps, and functional NOOP may introduce unnecessary performance
4470  * drop.
4471  *
4472  * Generally this is not a good idea to turn it on globally and didn't should
4473  * be used if behavior of tx_preparation can change.
4474  */
4475 
4476 static inline uint16_t
4477 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
4478  __rte_unused uint16_t queue_id,
4479  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4480 {
4481  return nb_pkts;
4482 }
4483 
4484 #endif
4485 
4508 static inline uint16_t
4509 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
4510  struct rte_eth_dev_tx_buffer *buffer)
4511 {
4512  uint16_t sent;
4513  uint16_t to_send = buffer->length;
4514 
4515  if (to_send == 0)
4516  return 0;
4517 
4518  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
4519 
4520  buffer->length = 0;
4521 
4522  /* All packets sent, or to be dealt with by callback below */
4523  if (unlikely(sent != to_send))
4524  buffer->error_callback(&buffer->pkts[sent],
4525  (uint16_t)(to_send - sent),
4526  buffer->error_userdata);
4527 
4528  return sent;
4529 }
4530 
4561 static __rte_always_inline uint16_t
4562 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
4563  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
4564 {
4565  buffer->pkts[buffer->length++] = tx_pkt;
4566  if (buffer->length < buffer->size)
4567  return 0;
4568 
4569  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
4570 }
4571 
4572 #ifdef __cplusplus
4573 }
4574 #endif
4575 
4576 #endif /* _RTE_ETHDEV_H_ */