DPDK  16.04.0
rte_ethdev.h
Go to the documentation of this file.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  * * Neither the name of Intel Corporation nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef _RTE_ETHDEV_H_
35 #define _RTE_ETHDEV_H_
36 
169 #ifdef __cplusplus
170 extern "C" {
171 #endif
172 
173 #include <stdint.h>
174 
175 #include <rte_dev.h>
176 
177 /* Use this macro to check if LRO API is supported */
178 #define RTE_ETHDEV_HAS_LRO_SUPPORT
179 
180 #include <rte_log.h>
181 #include <rte_interrupts.h>
182 #include <rte_pci.h>
183 #include <rte_dev.h>
184 #include <rte_devargs.h>
185 #include "rte_ether.h"
186 #include "rte_eth_ctrl.h"
187 #include "rte_dev_info.h"
188 
189 struct rte_mbuf;
190 
195  uint64_t ipackets;
196  uint64_t opackets;
197  uint64_t ibytes;
198  uint64_t obytes;
199  uint64_t imissed;
203  uint64_t ibadcrc __rte_deprecated;
205  uint64_t ibadlen __rte_deprecated;
207  uint64_t ierrors;
208  uint64_t oerrors;
209  uint64_t imcasts;
211  uint64_t rx_nombuf;
212  uint64_t fdirmatch __rte_deprecated;
214  uint64_t fdirmiss __rte_deprecated;
216  uint64_t tx_pause_xon __rte_deprecated;
218  uint64_t rx_pause_xon __rte_deprecated;
220  uint64_t tx_pause_xoff __rte_deprecated;
222  uint64_t rx_pause_xoff __rte_deprecated;
224  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
226  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
228  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
230  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
232  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
234  uint64_t ilbpackets;
236  uint64_t olbpackets;
238  uint64_t ilbbytes;
240  uint64_t olbbytes;
242 };
243 
247 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
248 #define ETH_LINK_SPEED_FIXED (1 << 0)
249 #define ETH_LINK_SPEED_10M_HD (1 << 1)
250 #define ETH_LINK_SPEED_10M (1 << 2)
251 #define ETH_LINK_SPEED_100M_HD (1 << 3)
252 #define ETH_LINK_SPEED_100M (1 << 4)
253 #define ETH_LINK_SPEED_1G (1 << 5)
254 #define ETH_LINK_SPEED_2_5G (1 << 6)
255 #define ETH_LINK_SPEED_5G (1 << 7)
256 #define ETH_LINK_SPEED_10G (1 << 8)
257 #define ETH_LINK_SPEED_20G (1 << 9)
258 #define ETH_LINK_SPEED_25G (1 << 10)
259 #define ETH_LINK_SPEED_40G (1 << 11)
260 #define ETH_LINK_SPEED_50G (1 << 12)
261 #define ETH_LINK_SPEED_56G (1 << 13)
262 #define ETH_LINK_SPEED_100G (1 << 14)
267 #define ETH_SPEED_NUM_NONE 0
268 #define ETH_SPEED_NUM_10M 10
269 #define ETH_SPEED_NUM_100M 100
270 #define ETH_SPEED_NUM_1G 1000
271 #define ETH_SPEED_NUM_2_5G 2500
272 #define ETH_SPEED_NUM_5G 5000
273 #define ETH_SPEED_NUM_10G 10000
274 #define ETH_SPEED_NUM_20G 20000
275 #define ETH_SPEED_NUM_25G 25000
276 #define ETH_SPEED_NUM_40G 40000
277 #define ETH_SPEED_NUM_50G 50000
278 #define ETH_SPEED_NUM_56G 56000
279 #define ETH_SPEED_NUM_100G 100000
284 struct rte_eth_link {
285  uint32_t link_speed;
286  uint16_t link_duplex : 1;
287  uint16_t link_autoneg : 1;
288  uint16_t link_status : 1;
289 } __attribute__((aligned(8)));
291 /* Utility constants */
292 #define ETH_LINK_HALF_DUPLEX 0
293 #define ETH_LINK_FULL_DUPLEX 1
294 #define ETH_LINK_DOWN 0
295 #define ETH_LINK_UP 1
296 #define ETH_LINK_FIXED 0
297 #define ETH_LINK_AUTONEG 1
303 struct rte_eth_thresh {
304  uint8_t pthresh;
305  uint8_t hthresh;
306  uint8_t wthresh;
307 };
308 
312 #define ETH_MQ_RX_RSS_FLAG 0x1
313 #define ETH_MQ_RX_DCB_FLAG 0x2
314 #define ETH_MQ_RX_VMDQ_FLAG 0x4
315 
323 
327  ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
329  ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
330 
332  ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
334  ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
336  ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
339  ETH_MQ_RX_VMDQ_FLAG,
340 };
341 
345 #define ETH_RSS ETH_MQ_RX_RSS
346 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
347 #define ETH_DCB_RX ETH_MQ_RX_DCB
348 
358 };
359 
363 #define ETH_DCB_NONE ETH_MQ_TX_NONE
364 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
365 #define ETH_DCB_TX ETH_MQ_TX_DCB
366 
373  uint32_t max_rx_pkt_len;
374  uint16_t split_hdr_size;
375  uint16_t header_split : 1,
376  hw_ip_checksum : 1,
377  hw_vlan_filter : 1,
378  hw_vlan_strip : 1,
379  hw_vlan_extend : 1,
380  jumbo_frame : 1,
381  hw_strip_crc : 1,
382  enable_scatter : 1,
383  enable_lro : 1;
384 };
385 
391  ETH_VLAN_TYPE_UNKNOWN = 0,
394  ETH_VLAN_TYPE_MAX,
395 };
396 
415  uint8_t *rss_key;
416  uint8_t rss_key_len;
417  uint64_t rss_hf;
418 };
419 
420 /*
421  * The RSS offload types are defined based on flow types which are defined
422  * in rte_eth_ctrl.h. Different NIC hardwares may support different RSS offload
423  * types. The supported flow types or RSS offload types can be queried by
424  * rte_eth_dev_info_get().
425  */
426 #define ETH_RSS_IPV4 (1ULL << RTE_ETH_FLOW_IPV4)
427 #define ETH_RSS_FRAG_IPV4 (1ULL << RTE_ETH_FLOW_FRAG_IPV4)
428 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
429 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP)
430 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP)
431 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER)
432 #define ETH_RSS_IPV6 (1ULL << RTE_ETH_FLOW_IPV6)
433 #define ETH_RSS_FRAG_IPV6 (1ULL << RTE_ETH_FLOW_FRAG_IPV6)
434 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP)
435 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
436 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP)
437 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)
438 #define ETH_RSS_L2_PAYLOAD (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)
439 #define ETH_RSS_IPV6_EX (1ULL << RTE_ETH_FLOW_IPV6_EX)
440 #define ETH_RSS_IPV6_TCP_EX (1ULL << RTE_ETH_FLOW_IPV6_TCP_EX)
441 #define ETH_RSS_IPV6_UDP_EX (1ULL << RTE_ETH_FLOW_IPV6_UDP_EX)
442 
443 #define ETH_RSS_IP ( \
444  ETH_RSS_IPV4 | \
445  ETH_RSS_FRAG_IPV4 | \
446  ETH_RSS_NONFRAG_IPV4_OTHER | \
447  ETH_RSS_IPV6 | \
448  ETH_RSS_FRAG_IPV6 | \
449  ETH_RSS_NONFRAG_IPV6_OTHER | \
450  ETH_RSS_IPV6_EX)
451 
452 #define ETH_RSS_UDP ( \
453  ETH_RSS_NONFRAG_IPV4_UDP | \
454  ETH_RSS_NONFRAG_IPV6_UDP | \
455  ETH_RSS_IPV6_UDP_EX)
456 
457 #define ETH_RSS_TCP ( \
458  ETH_RSS_NONFRAG_IPV4_TCP | \
459  ETH_RSS_NONFRAG_IPV6_TCP | \
460  ETH_RSS_IPV6_TCP_EX)
461 
462 #define ETH_RSS_SCTP ( \
463  ETH_RSS_NONFRAG_IPV4_SCTP | \
464  ETH_RSS_NONFRAG_IPV6_SCTP)
465 
467 #define ETH_RSS_PROTO_MASK ( \
468  ETH_RSS_IPV4 | \
469  ETH_RSS_FRAG_IPV4 | \
470  ETH_RSS_NONFRAG_IPV4_TCP | \
471  ETH_RSS_NONFRAG_IPV4_UDP | \
472  ETH_RSS_NONFRAG_IPV4_SCTP | \
473  ETH_RSS_NONFRAG_IPV4_OTHER | \
474  ETH_RSS_IPV6 | \
475  ETH_RSS_FRAG_IPV6 | \
476  ETH_RSS_NONFRAG_IPV6_TCP | \
477  ETH_RSS_NONFRAG_IPV6_UDP | \
478  ETH_RSS_NONFRAG_IPV6_SCTP | \
479  ETH_RSS_NONFRAG_IPV6_OTHER | \
480  ETH_RSS_L2_PAYLOAD | \
481  ETH_RSS_IPV6_EX | \
482  ETH_RSS_IPV6_TCP_EX | \
483  ETH_RSS_IPV6_UDP_EX)
484 
485 /*
486  * Definitions used for redirection table entry size.
487  * Some RSS RETA sizes may not be supported by some drivers, check the
488  * documentation or the description of relevant functions for more details.
489  */
490 #define ETH_RSS_RETA_SIZE_64 64
491 #define ETH_RSS_RETA_SIZE_128 128
492 #define ETH_RSS_RETA_SIZE_512 512
493 #define RTE_RETA_GROUP_SIZE 64
494 
495 /* Definitions used for VMDQ and DCB functionality */
496 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
497 #define ETH_DCB_NUM_USER_PRIORITIES 8
498 #define ETH_VMDQ_DCB_NUM_QUEUES 128
499 #define ETH_DCB_NUM_QUEUES 128
501 /* DCB capability defines */
502 #define ETH_DCB_PG_SUPPORT 0x00000001
503 #define ETH_DCB_PFC_SUPPORT 0x00000002
505 /* Definitions used for VLAN Offload functionality */
506 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
507 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
508 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
510 /* Definitions used for mask VLAN setting */
511 #define ETH_VLAN_STRIP_MASK 0x0001
512 #define ETH_VLAN_FILTER_MASK 0x0002
513 #define ETH_VLAN_EXTEND_MASK 0x0004
514 #define ETH_VLAN_ID_MAX 0x0FFF
516 /* Definitions used for receive MAC address */
517 #define ETH_NUM_RECEIVE_MAC_ADDR 128
519 /* Definitions used for unicast hash */
520 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
522 /* Definitions used for VMDQ pool rx mode setting */
523 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
524 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
525 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
526 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
527 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
530 #define ETH_MIRROR_MAX_VLANS 64
531 
532 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
533 #define ETH_MIRROR_UPLINK_PORT 0x02
534 #define ETH_MIRROR_DOWNLINK_PORT 0x04
535 #define ETH_MIRROR_VLAN 0x08
536 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
541 struct rte_eth_vlan_mirror {
542  uint64_t vlan_mask;
544  uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
545 };
546 
551  uint8_t rule_type;
552  uint8_t dst_pool;
553  uint64_t pool_mask;
556 };
557 
565  uint64_t mask;
567  uint16_t reta[RTE_RETA_GROUP_SIZE];
569 };
570 
576  ETH_4_TCS = 4,
578 };
579 
589 };
590 
591 /* This structure may be extended in future. */
592 struct rte_eth_dcb_rx_conf {
593  enum rte_eth_nb_tcs nb_tcs;
595  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
596 };
597 
598 struct rte_eth_vmdq_dcb_tx_conf {
599  enum rte_eth_nb_pools nb_queue_pools;
601  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
602 };
603 
604 struct rte_eth_dcb_tx_conf {
605  enum rte_eth_nb_tcs nb_tcs;
607  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
608 };
609 
610 struct rte_eth_vmdq_tx_conf {
611  enum rte_eth_nb_pools nb_queue_pools;
612 };
613 
628  uint8_t default_pool;
629  uint8_t nb_pool_maps;
630  struct {
631  uint16_t vlan_id;
632  uint64_t pools;
636 };
637 
638 struct rte_eth_vmdq_rx_conf {
639  enum rte_eth_nb_pools nb_queue_pools;
640  uint8_t enable_default_pool;
641  uint8_t default_pool;
642  uint8_t enable_loop_back;
643  uint8_t nb_pool_maps;
644  uint32_t rx_mode;
645  struct {
646  uint16_t vlan_id;
647  uint64_t pools;
648  } pool_map[ETH_VMDQ_MAX_VLAN_FILTERS];
649 };
650 
657  /* For i40e specifically */
658  uint16_t pvid;
659  uint8_t hw_vlan_reject_tagged : 1,
665 };
666 
672  uint16_t rx_free_thresh;
673  uint8_t rx_drop_en;
675 };
676 
677 #define ETH_TXQ_FLAGS_NOMULTSEGS 0x0001
678 #define ETH_TXQ_FLAGS_NOREFCOUNT 0x0002
679 #define ETH_TXQ_FLAGS_NOMULTMEMP 0x0004
680 #define ETH_TXQ_FLAGS_NOVLANOFFL 0x0100
681 #define ETH_TXQ_FLAGS_NOXSUMSCTP 0x0200
682 #define ETH_TXQ_FLAGS_NOXSUMUDP 0x0400
683 #define ETH_TXQ_FLAGS_NOXSUMTCP 0x0800
684 #define ETH_TXQ_FLAGS_NOOFFLOADS \
685  (ETH_TXQ_FLAGS_NOVLANOFFL | ETH_TXQ_FLAGS_NOXSUMSCTP | \
686  ETH_TXQ_FLAGS_NOXSUMUDP | ETH_TXQ_FLAGS_NOXSUMTCP)
687 #define ETH_TXQ_FLAGS_NOXSUMS \
688  (ETH_TXQ_FLAGS_NOXSUMSCTP | ETH_TXQ_FLAGS_NOXSUMUDP | \
689  ETH_TXQ_FLAGS_NOXSUMTCP)
690 
695  uint16_t tx_rs_thresh;
696  uint16_t tx_free_thresh;
699  uint32_t txq_flags;
701 };
702 
707  uint16_t nb_max;
708  uint16_t nb_min;
709  uint16_t nb_align;
710 };
711 
720 };
721 
728  uint32_t high_water;
729  uint32_t low_water;
730  uint16_t pause_time;
731  uint16_t send_xon;
734  uint8_t autoneg;
735 };
736 
744  uint8_t priority;
745 };
746 
755 };
756 
764 };
765 
777  uint8_t drop_queue;
778  struct rte_eth_fdir_masks mask;
781 };
782 
791  uint16_t udp_port;
792  uint8_t prot_type;
793 };
794 
800  uint16_t lsc;
802  uint16_t rxq;
803 };
804 
810 struct rte_eth_conf {
811  uint32_t link_speeds;
820  uint32_t lpbk_mode;
825  struct {
829  struct rte_eth_dcb_rx_conf dcb_rx_conf;
831  struct rte_eth_vmdq_rx_conf vmdq_rx_conf;
833  } rx_adv_conf;
834  union {
835  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
837  struct rte_eth_dcb_tx_conf dcb_tx_conf;
839  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
841  } tx_adv_conf;
847 };
848 
858 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
859 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
860 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
861 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
862 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
863 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
864 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
865 
869 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
870 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
871 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
872 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
873 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
874 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
875 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
876 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
877 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
878 
884  const char *driver_name;
885  unsigned int if_index;
887  uint32_t min_rx_bufsize;
888  uint32_t max_rx_pktlen;
889  uint16_t max_rx_queues;
890  uint16_t max_tx_queues;
891  uint32_t max_mac_addrs;
892  uint32_t max_hash_mac_addrs;
894  uint16_t max_vfs;
895  uint16_t max_vmdq_pools;
896  uint32_t rx_offload_capa;
897  uint32_t tx_offload_capa;
898  uint16_t reta_size;
900  uint8_t hash_key_size;
905  uint16_t vmdq_queue_base;
906  uint16_t vmdq_queue_num;
907  uint16_t vmdq_pool_base;
910  uint32_t speed_capa;
911 };
912 
918  struct rte_mempool *mp;
920  uint8_t scattered_rx;
921  uint16_t nb_desc;
923 
930  uint16_t nb_desc;
932 
934 #define RTE_ETH_XSTATS_NAME_SIZE 64
935 
944  char name[RTE_ETH_XSTATS_NAME_SIZE];
945  uint64_t value;
946 };
947 
948 #define ETH_DCB_NUM_TCS 8
949 #define ETH_MAX_VMDQ_POOL 64
950 
957  struct {
958  uint8_t base;
959  uint8_t nb_queue;
960  } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
962  struct {
963  uint8_t base;
964  uint8_t nb_queue;
965  } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
966 };
967 
973  uint8_t nb_tcs;
975  uint8_t tc_bws[ETH_DCB_NUM_TCS];
978 };
979 
983 #define RTE_ETH_QUEUE_STATE_STOPPED 0
984 #define RTE_ETH_QUEUE_STATE_STARTED 1
985 
986 struct rte_eth_dev;
987 
988 struct rte_eth_dev_callback;
990 TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback);
991 
992 
993 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
994 #define RTE_PMD_DEBUG_TRACE(...) \
995  rte_pmd_debug_trace(__func__, __VA_ARGS__)
996 #else
997 #define RTE_PMD_DEBUG_TRACE(...)
998 #endif
999 
1000 
1001 /* Macros to check for valid port */
1002 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1003  if (!rte_eth_dev_is_valid_port(port_id)) { \
1004  RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
1005  return retval; \
1006  } \
1007 } while (0)
1008 
1009 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1010  if (!rte_eth_dev_is_valid_port(port_id)) { \
1011  RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
1012  return; \
1013  } \
1014 } while (0)
1015 
1021 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1022 
1023 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1024 
1025 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1026 
1027 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1028 
1029 /*
1030  * Definitions of all functions exported by an Ethernet driver through the
1031  * the generic structure of type *eth_dev_ops* supplied in the *rte_eth_dev*
1032  * structure associated with an Ethernet device.
1033  */
1034 
1035 typedef int (*eth_dev_configure_t)(struct rte_eth_dev *dev);
1038 typedef int (*eth_dev_start_t)(struct rte_eth_dev *dev);
1041 typedef void (*eth_dev_stop_t)(struct rte_eth_dev *dev);
1044 typedef int (*eth_dev_set_link_up_t)(struct rte_eth_dev *dev);
1047 typedef int (*eth_dev_set_link_down_t)(struct rte_eth_dev *dev);
1050 typedef void (*eth_dev_close_t)(struct rte_eth_dev *dev);
1053 typedef void (*eth_promiscuous_enable_t)(struct rte_eth_dev *dev);
1056 typedef void (*eth_promiscuous_disable_t)(struct rte_eth_dev *dev);
1059 typedef void (*eth_allmulticast_enable_t)(struct rte_eth_dev *dev);
1062 typedef void (*eth_allmulticast_disable_t)(struct rte_eth_dev *dev);
1065 typedef int (*eth_link_update_t)(struct rte_eth_dev *dev,
1066  int wait_to_complete);
1069 typedef void (*eth_stats_get_t)(struct rte_eth_dev *dev,
1070  struct rte_eth_stats *igb_stats);
1073 typedef void (*eth_stats_reset_t)(struct rte_eth_dev *dev);
1076 typedef int (*eth_xstats_get_t)(struct rte_eth_dev *dev,
1077  struct rte_eth_xstats *stats, unsigned n);
1080 typedef void (*eth_xstats_reset_t)(struct rte_eth_dev *dev);
1083 typedef int (*eth_queue_stats_mapping_set_t)(struct rte_eth_dev *dev,
1084  uint16_t queue_id,
1085  uint8_t stat_idx,
1086  uint8_t is_rx);
1089 typedef void (*eth_dev_infos_get_t)(struct rte_eth_dev *dev,
1090  struct rte_eth_dev_info *dev_info);
1093 typedef const uint32_t *(*eth_dev_supported_ptypes_get_t)(struct rte_eth_dev *dev);
1096 typedef int (*eth_queue_start_t)(struct rte_eth_dev *dev,
1097  uint16_t queue_id);
1100 typedef int (*eth_queue_stop_t)(struct rte_eth_dev *dev,
1101  uint16_t queue_id);
1104 typedef int (*eth_rx_queue_setup_t)(struct rte_eth_dev *dev,
1105  uint16_t rx_queue_id,
1106  uint16_t nb_rx_desc,
1107  unsigned int socket_id,
1108  const struct rte_eth_rxconf *rx_conf,
1109  struct rte_mempool *mb_pool);
1112 typedef int (*eth_tx_queue_setup_t)(struct rte_eth_dev *dev,
1113  uint16_t tx_queue_id,
1114  uint16_t nb_tx_desc,
1115  unsigned int socket_id,
1116  const struct rte_eth_txconf *tx_conf);
1119 typedef int (*eth_rx_enable_intr_t)(struct rte_eth_dev *dev,
1120  uint16_t rx_queue_id);
1123 typedef int (*eth_rx_disable_intr_t)(struct rte_eth_dev *dev,
1124  uint16_t rx_queue_id);
1127 typedef void (*eth_queue_release_t)(void *queue);
1130 typedef uint32_t (*eth_rx_queue_count_t)(struct rte_eth_dev *dev,
1131  uint16_t rx_queue_id);
1134 typedef int (*eth_rx_descriptor_done_t)(void *rxq, uint16_t offset);
1137 typedef void (*eth_rxq_info_get_t)(struct rte_eth_dev *dev,
1138  uint16_t rx_queue_id, struct rte_eth_rxq_info *qinfo);
1139 
1140 typedef void (*eth_txq_info_get_t)(struct rte_eth_dev *dev,
1141  uint16_t tx_queue_id, struct rte_eth_txq_info *qinfo);
1142 
1143 typedef int (*mtu_set_t)(struct rte_eth_dev *dev, uint16_t mtu);
1146 typedef int (*vlan_filter_set_t)(struct rte_eth_dev *dev,
1147  uint16_t vlan_id,
1148  int on);
1151 typedef int (*vlan_tpid_set_t)(struct rte_eth_dev *dev,
1152  enum rte_vlan_type type, uint16_t tpid);
1155 typedef void (*vlan_offload_set_t)(struct rte_eth_dev *dev, int mask);
1158 typedef int (*vlan_pvid_set_t)(struct rte_eth_dev *dev,
1159  uint16_t vlan_id,
1160  int on);
1163 typedef void (*vlan_strip_queue_set_t)(struct rte_eth_dev *dev,
1164  uint16_t rx_queue_id,
1165  int on);
1168 typedef uint16_t (*eth_rx_burst_t)(void *rxq,
1169  struct rte_mbuf **rx_pkts,
1170  uint16_t nb_pkts);
1173 typedef uint16_t (*eth_tx_burst_t)(void *txq,
1174  struct rte_mbuf **tx_pkts,
1175  uint16_t nb_pkts);
1178 typedef int (*flow_ctrl_get_t)(struct rte_eth_dev *dev,
1179  struct rte_eth_fc_conf *fc_conf);
1182 typedef int (*flow_ctrl_set_t)(struct rte_eth_dev *dev,
1183  struct rte_eth_fc_conf *fc_conf);
1186 typedef int (*priority_flow_ctrl_set_t)(struct rte_eth_dev *dev,
1187  struct rte_eth_pfc_conf *pfc_conf);
1190 typedef int (*reta_update_t)(struct rte_eth_dev *dev,
1191  struct rte_eth_rss_reta_entry64 *reta_conf,
1192  uint16_t reta_size);
1195 typedef int (*reta_query_t)(struct rte_eth_dev *dev,
1196  struct rte_eth_rss_reta_entry64 *reta_conf,
1197  uint16_t reta_size);
1200 typedef int (*rss_hash_update_t)(struct rte_eth_dev *dev,
1201  struct rte_eth_rss_conf *rss_conf);
1204 typedef int (*rss_hash_conf_get_t)(struct rte_eth_dev *dev,
1205  struct rte_eth_rss_conf *rss_conf);
1208 typedef int (*eth_dev_led_on_t)(struct rte_eth_dev *dev);
1211 typedef int (*eth_dev_led_off_t)(struct rte_eth_dev *dev);
1214 typedef void (*eth_mac_addr_remove_t)(struct rte_eth_dev *dev, uint32_t index);
1217 typedef void (*eth_mac_addr_add_t)(struct rte_eth_dev *dev,
1218  struct ether_addr *mac_addr,
1219  uint32_t index,
1220  uint32_t vmdq);
1223 typedef void (*eth_mac_addr_set_t)(struct rte_eth_dev *dev,
1224  struct ether_addr *mac_addr);
1227 typedef int (*eth_uc_hash_table_set_t)(struct rte_eth_dev *dev,
1228  struct ether_addr *mac_addr,
1229  uint8_t on);
1232 typedef int (*eth_uc_all_hash_table_set_t)(struct rte_eth_dev *dev,
1233  uint8_t on);
1236 typedef int (*eth_set_vf_rx_mode_t)(struct rte_eth_dev *dev,
1237  uint16_t vf,
1238  uint16_t rx_mode,
1239  uint8_t on);
1242 typedef int (*eth_set_vf_rx_t)(struct rte_eth_dev *dev,
1243  uint16_t vf,
1244  uint8_t on);
1247 typedef int (*eth_set_vf_tx_t)(struct rte_eth_dev *dev,
1248  uint16_t vf,
1249  uint8_t on);
1252 typedef int (*eth_set_vf_vlan_filter_t)(struct rte_eth_dev *dev,
1253  uint16_t vlan,
1254  uint64_t vf_mask,
1255  uint8_t vlan_on);
1258 typedef int (*eth_set_queue_rate_limit_t)(struct rte_eth_dev *dev,
1259  uint16_t queue_idx,
1260  uint16_t tx_rate);
1263 typedef int (*eth_set_vf_rate_limit_t)(struct rte_eth_dev *dev,
1264  uint16_t vf,
1265  uint16_t tx_rate,
1266  uint64_t q_msk);
1269 typedef int (*eth_mirror_rule_set_t)(struct rte_eth_dev *dev,
1270  struct rte_eth_mirror_conf *mirror_conf,
1271  uint8_t rule_id,
1272  uint8_t on);
1275 typedef int (*eth_mirror_rule_reset_t)(struct rte_eth_dev *dev,
1276  uint8_t rule_id);
1279 typedef int (*eth_udp_tunnel_port_add_t)(struct rte_eth_dev *dev,
1280  struct rte_eth_udp_tunnel *tunnel_udp);
1283 typedef int (*eth_udp_tunnel_port_del_t)(struct rte_eth_dev *dev,
1284  struct rte_eth_udp_tunnel *tunnel_udp);
1287 typedef int (*eth_set_mc_addr_list_t)(struct rte_eth_dev *dev,
1288  struct ether_addr *mc_addr_set,
1289  uint32_t nb_mc_addr);
1292 typedef int (*eth_timesync_enable_t)(struct rte_eth_dev *dev);
1295 typedef int (*eth_timesync_disable_t)(struct rte_eth_dev *dev);
1298 typedef int (*eth_timesync_read_rx_timestamp_t)(struct rte_eth_dev *dev,
1299  struct timespec *timestamp,
1300  uint32_t flags);
1303 typedef int (*eth_timesync_read_tx_timestamp_t)(struct rte_eth_dev *dev,
1304  struct timespec *timestamp);
1307 typedef int (*eth_timesync_adjust_time)(struct rte_eth_dev *dev, int64_t);
1310 typedef int (*eth_timesync_read_time)(struct rte_eth_dev *dev,
1311  struct timespec *timestamp);
1314 typedef int (*eth_timesync_write_time)(struct rte_eth_dev *dev,
1315  const struct timespec *timestamp);
1318 typedef int (*eth_get_reg_length_t)(struct rte_eth_dev *dev);
1321 typedef int (*eth_get_reg_t)(struct rte_eth_dev *dev,
1322  struct rte_dev_reg_info *info);
1325 typedef int (*eth_get_eeprom_length_t)(struct rte_eth_dev *dev);
1328 typedef int (*eth_get_eeprom_t)(struct rte_eth_dev *dev,
1329  struct rte_dev_eeprom_info *info);
1332 typedef int (*eth_set_eeprom_t)(struct rte_eth_dev *dev,
1333  struct rte_dev_eeprom_info *info);
1336 typedef int (*eth_l2_tunnel_eth_type_conf_t)
1337  (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
1340 typedef int (*eth_l2_tunnel_offload_set_t)
1341  (struct rte_eth_dev *dev,
1342  struct rte_eth_l2_tunnel_conf *l2_tunnel,
1343  uint32_t mask,
1344  uint8_t en);
1347 #ifdef RTE_NIC_BYPASS
1348 
1349 enum {
1350  RTE_BYPASS_MODE_NONE,
1351  RTE_BYPASS_MODE_NORMAL,
1352  RTE_BYPASS_MODE_BYPASS,
1353  RTE_BYPASS_MODE_ISOLATE,
1354  RTE_BYPASS_MODE_NUM,
1355 };
1356 
1357 #define RTE_BYPASS_MODE_VALID(x) \
1358  ((x) > RTE_BYPASS_MODE_NONE && (x) < RTE_BYPASS_MODE_NUM)
1359 
1360 enum {
1361  RTE_BYPASS_EVENT_NONE,
1362  RTE_BYPASS_EVENT_START,
1363  RTE_BYPASS_EVENT_OS_ON = RTE_BYPASS_EVENT_START,
1364  RTE_BYPASS_EVENT_POWER_ON,
1365  RTE_BYPASS_EVENT_OS_OFF,
1366  RTE_BYPASS_EVENT_POWER_OFF,
1367  RTE_BYPASS_EVENT_TIMEOUT,
1368  RTE_BYPASS_EVENT_NUM
1369 };
1370 
1371 #define RTE_BYPASS_EVENT_VALID(x) \
1372  ((x) > RTE_BYPASS_EVENT_NONE && (x) < RTE_BYPASS_MODE_NUM)
1373 
1374 enum {
1375  RTE_BYPASS_TMT_OFF, /* timeout disabled. */
1376  RTE_BYPASS_TMT_1_5_SEC, /* timeout for 1.5 seconds */
1377  RTE_BYPASS_TMT_2_SEC, /* timeout for 2 seconds */
1378  RTE_BYPASS_TMT_3_SEC, /* timeout for 3 seconds */
1379  RTE_BYPASS_TMT_4_SEC, /* timeout for 4 seconds */
1380  RTE_BYPASS_TMT_8_SEC, /* timeout for 8 seconds */
1381  RTE_BYPASS_TMT_16_SEC, /* timeout for 16 seconds */
1382  RTE_BYPASS_TMT_32_SEC, /* timeout for 32 seconds */
1383  RTE_BYPASS_TMT_NUM
1384 };
1385 
1386 #define RTE_BYPASS_TMT_VALID(x) \
1387  ((x) == RTE_BYPASS_TMT_OFF || \
1388  ((x) > RTE_BYPASS_TMT_OFF && (x) < RTE_BYPASS_TMT_NUM))
1389 
1390 typedef void (*bypass_init_t)(struct rte_eth_dev *dev);
1391 typedef int32_t (*bypass_state_set_t)(struct rte_eth_dev *dev, uint32_t *new_state);
1392 typedef int32_t (*bypass_state_show_t)(struct rte_eth_dev *dev, uint32_t *state);
1393 typedef int32_t (*bypass_event_set_t)(struct rte_eth_dev *dev, uint32_t state, uint32_t event);
1394 typedef int32_t (*bypass_event_show_t)(struct rte_eth_dev *dev, uint32_t event_shift, uint32_t *event);
1395 typedef int32_t (*bypass_wd_timeout_set_t)(struct rte_eth_dev *dev, uint32_t timeout);
1396 typedef int32_t (*bypass_wd_timeout_show_t)(struct rte_eth_dev *dev, uint32_t *wd_timeout);
1397 typedef int32_t (*bypass_ver_show_t)(struct rte_eth_dev *dev, uint32_t *ver);
1398 typedef int32_t (*bypass_wd_reset_t)(struct rte_eth_dev *dev);
1399 #endif
1400 
1401 typedef int (*eth_filter_ctrl_t)(struct rte_eth_dev *dev,
1402  enum rte_filter_type filter_type,
1403  enum rte_filter_op filter_op,
1404  void *arg);
1407 typedef int (*eth_get_dcb_info)(struct rte_eth_dev *dev,
1408  struct rte_eth_dcb_info *dcb_info);
1414 struct eth_dev_ops {
1415  eth_dev_configure_t dev_configure;
1416  eth_dev_start_t dev_start;
1417  eth_dev_stop_t dev_stop;
1418  eth_dev_set_link_up_t dev_set_link_up;
1419  eth_dev_set_link_down_t dev_set_link_down;
1420  eth_dev_close_t dev_close;
1421  eth_promiscuous_enable_t promiscuous_enable;
1422  eth_promiscuous_disable_t promiscuous_disable;
1423  eth_allmulticast_enable_t allmulticast_enable;
1424  eth_allmulticast_disable_t allmulticast_disable;
1425  eth_link_update_t link_update;
1426  eth_stats_get_t stats_get;
1427  eth_stats_reset_t stats_reset;
1428  eth_xstats_get_t xstats_get;
1429  eth_xstats_reset_t xstats_reset;
1430  eth_queue_stats_mapping_set_t queue_stats_mapping_set;
1432  eth_dev_infos_get_t dev_infos_get;
1433  eth_dev_supported_ptypes_get_t dev_supported_ptypes_get;
1435  mtu_set_t mtu_set;
1436  vlan_filter_set_t vlan_filter_set;
1437  vlan_tpid_set_t vlan_tpid_set;
1438  vlan_strip_queue_set_t vlan_strip_queue_set;
1439  vlan_offload_set_t vlan_offload_set;
1440  vlan_pvid_set_t vlan_pvid_set;
1441  eth_queue_start_t rx_queue_start;
1442  eth_queue_stop_t rx_queue_stop;
1443  eth_queue_start_t tx_queue_start;
1444  eth_queue_stop_t tx_queue_stop;
1445  eth_rx_queue_setup_t rx_queue_setup;
1446  eth_queue_release_t rx_queue_release;
1447  eth_rx_queue_count_t rx_queue_count;
1448  eth_rx_descriptor_done_t rx_descriptor_done;
1450  eth_rx_enable_intr_t rx_queue_intr_enable;
1452  eth_rx_disable_intr_t rx_queue_intr_disable;
1453  eth_tx_queue_setup_t tx_queue_setup;
1454  eth_queue_release_t tx_queue_release;
1455  eth_dev_led_on_t dev_led_on;
1456  eth_dev_led_off_t dev_led_off;
1457  flow_ctrl_get_t flow_ctrl_get;
1458  flow_ctrl_set_t flow_ctrl_set;
1459  priority_flow_ctrl_set_t priority_flow_ctrl_set;
1460  eth_mac_addr_remove_t mac_addr_remove;
1461  eth_mac_addr_add_t mac_addr_add;
1462  eth_mac_addr_set_t mac_addr_set;
1463  eth_uc_hash_table_set_t uc_hash_table_set;
1464  eth_uc_all_hash_table_set_t uc_all_hash_table_set;
1465  eth_mirror_rule_set_t mirror_rule_set;
1466  eth_mirror_rule_reset_t mirror_rule_reset;
1467  eth_set_vf_rx_mode_t set_vf_rx_mode;
1468  eth_set_vf_rx_t set_vf_rx;
1469  eth_set_vf_tx_t set_vf_tx;
1470  eth_set_vf_vlan_filter_t set_vf_vlan_filter;
1472  eth_udp_tunnel_port_add_t udp_tunnel_port_add;
1474  eth_udp_tunnel_port_del_t udp_tunnel_port_del;
1475  eth_set_queue_rate_limit_t set_queue_rate_limit;
1476  eth_set_vf_rate_limit_t set_vf_rate_limit;
1478  reta_update_t reta_update;
1480  reta_query_t reta_query;
1481 
1482  eth_get_reg_length_t get_reg_length;
1484  eth_get_reg_t get_reg;
1486  eth_get_eeprom_length_t get_eeprom_length;
1488  eth_get_eeprom_t get_eeprom;
1490  eth_set_eeprom_t set_eeprom;
1492  /* bypass control */
1493 #ifdef RTE_NIC_BYPASS
1494  bypass_init_t bypass_init;
1495  bypass_state_set_t bypass_state_set;
1496  bypass_state_show_t bypass_state_show;
1497  bypass_event_set_t bypass_event_set;
1498  bypass_event_show_t bypass_event_show;
1499  bypass_wd_timeout_set_t bypass_wd_timeout_set;
1500  bypass_wd_timeout_show_t bypass_wd_timeout_show;
1501  bypass_ver_show_t bypass_ver_show;
1502  bypass_wd_reset_t bypass_wd_reset;
1503 #endif
1504 
1506  rss_hash_update_t rss_hash_update;
1508  rss_hash_conf_get_t rss_hash_conf_get;
1509  eth_filter_ctrl_t filter_ctrl;
1511  eth_set_mc_addr_list_t set_mc_addr_list;
1512  eth_rxq_info_get_t rxq_info_get;
1514  eth_txq_info_get_t txq_info_get;
1517  eth_timesync_enable_t timesync_enable;
1519  eth_timesync_disable_t timesync_disable;
1521  eth_timesync_read_rx_timestamp_t timesync_read_rx_timestamp;
1523  eth_timesync_read_tx_timestamp_t timesync_read_tx_timestamp;
1524 
1526  eth_get_dcb_info get_dcb_info;
1528  eth_timesync_adjust_time timesync_adjust_time;
1530  eth_timesync_read_time timesync_read_time;
1532  eth_timesync_write_time timesync_write_time;
1534  eth_l2_tunnel_eth_type_conf_t l2_tunnel_eth_type_conf;
1536  eth_l2_tunnel_offload_set_t l2_tunnel_offload_set;
1537 };
1538 
1561 typedef uint16_t (*rte_rx_callback_fn)(uint8_t port, uint16_t queue,
1562  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1563  void *user_param);
1564 
1585 typedef uint16_t (*rte_tx_callback_fn)(uint8_t port, uint16_t queue,
1586  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1587 
1593 struct rte_eth_rxtx_callback {
1594  struct rte_eth_rxtx_callback *next;
1595  union{
1596  rte_rx_callback_fn rx;
1597  rte_tx_callback_fn tx;
1598  } fn;
1599  void *param;
1600 };
1601 
1611 };
1612 
1623 struct rte_eth_dev {
1624  eth_rx_burst_t rx_pkt_burst;
1625  eth_tx_burst_t tx_pkt_burst;
1626  struct rte_eth_dev_data *data;
1627  const struct eth_driver *driver;
1628  const struct eth_dev_ops *dev_ops;
1629  struct rte_pci_device *pci_dev;
1631  struct rte_eth_dev_cb_list link_intr_cbs;
1636  struct rte_eth_rxtx_callback *post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
1641  struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
1642  uint8_t attached;
1643  enum rte_eth_dev_type dev_type;
1644 };
1645 
1646 struct rte_eth_dev_sriov {
1647  uint8_t active;
1648  uint8_t nb_q_per_pool;
1649  uint16_t def_vmdq_idx;
1650  uint16_t def_pool_q_idx;
1651 };
1652 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1653 
1654 #define RTE_ETH_NAME_MAX_LEN (32)
1655 
1663 struct rte_eth_dev_data {
1664  char name[RTE_ETH_NAME_MAX_LEN];
1666  void **rx_queues;
1667  void **tx_queues;
1668  uint16_t nb_rx_queues;
1669  uint16_t nb_tx_queues;
1671  struct rte_eth_dev_sriov sriov;
1673  void *dev_private;
1675  struct rte_eth_link dev_link;
1678  struct rte_eth_conf dev_conf;
1679  uint16_t mtu;
1681  uint32_t min_rx_buf_size;
1684  uint64_t rx_mbuf_alloc_failed;
1685  struct ether_addr* mac_addrs;
1686  uint64_t mac_pool_sel[ETH_NUM_RECEIVE_MAC_ADDR];
1688  struct ether_addr* hash_mac_addrs;
1690  uint8_t port_id;
1691  uint8_t promiscuous : 1,
1692  scattered_rx : 1,
1693  all_multicast : 1,
1694  dev_started : 1,
1695  lro : 1;
1696  uint8_t rx_queue_state[RTE_MAX_QUEUES_PER_PORT];
1698  uint8_t tx_queue_state[RTE_MAX_QUEUES_PER_PORT];
1700  uint32_t dev_flags;
1701  enum rte_kernel_driver kdrv;
1702  int numa_node;
1703  const char *drv_name;
1704 };
1705 
1707 #define RTE_ETH_DEV_DETACHABLE 0x0001
1708 
1709 #define RTE_ETH_DEV_INTR_LSC 0x0002
1710 
1711 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1712 
1718 extern struct rte_eth_dev rte_eth_devices[];
1719 
1733 uint8_t rte_eth_dev_count(void);
1734 
1744 struct rte_eth_dev *rte_eth_dev_allocated(const char *name);
1745 
1756 struct rte_eth_dev *rte_eth_dev_allocate(const char *name,
1757  enum rte_eth_dev_type type);
1758 
1768 int rte_eth_dev_release_port(struct rte_eth_dev *eth_dev);
1769 
1782 int rte_eth_dev_attach(const char *devargs, uint8_t *port_id);
1783 
1796 int rte_eth_dev_detach(uint8_t port_id, char *devname);
1797 
1798 struct eth_driver;
1829 typedef int (*eth_dev_init_t)(struct rte_eth_dev *eth_dev);
1830 
1846 typedef int (*eth_dev_uninit_t)(struct rte_eth_dev *eth_dev);
1847 
1863 struct eth_driver {
1864  struct rte_pci_driver pci_drv;
1865  eth_dev_init_t eth_dev_init;
1866  eth_dev_uninit_t eth_dev_uninit;
1867  unsigned int dev_private_size;
1868 };
1869 
1880 void rte_eth_driver_register(struct eth_driver *eth_drv);
1881 
1893 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
1894 
1924 int rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_queue,
1925  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
1926 
1966 int rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1967  uint16_t nb_rx_desc, unsigned int socket_id,
1968  const struct rte_eth_rxconf *rx_conf,
1969  struct rte_mempool *mb_pool);
1970 
2014 int rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
2015  uint16_t nb_tx_desc, unsigned int socket_id,
2016  const struct rte_eth_txconf *tx_conf);
2017 
2018 /*
2019  * Return the NUMA socket to which an Ethernet device is connected
2020  *
2021  * @param port_id
2022  * The port identifier of the Ethernet device
2023  * @return
2024  * The NUMA socket id to which the Ethernet device is connected or
2025  * a default of zero if the socket could not be determined.
2026  * -1 is returned is the port_id value is out of range.
2027  */
2028 int rte_eth_dev_socket_id(uint8_t port_id);
2029 
2030 /*
2031  * Check if port_id of device is attached
2032  *
2033  * @param port_id
2034  * The port identifier of the Ethernet device
2035  * @return
2036  * - 0 if port is out of range or not attached
2037  * - 1 if device is attached
2038  */
2039 int rte_eth_dev_is_valid_port(uint8_t port_id);
2040 
2041 /*
2042  * Allocate mbuf from mempool, setup the DMA physical address
2043  * and then start RX for specified queue of a port. It is used
2044  * when rx_deferred_start flag of the specified queue is true.
2045  *
2046  * @param port_id
2047  * The port identifier of the Ethernet device
2048  * @param rx_queue_id
2049  * The index of the rx queue to update the ring.
2050  * The value must be in the range [0, nb_rx_queue - 1] previously supplied
2051  * to rte_eth_dev_configure().
2052  * @return
2053  * - 0: Success, the transmit queue is correctly set up.
2054  * - -EINVAL: The port_id or the queue_id out of range.
2055  * - -ENOTSUP: The function not supported in PMD driver.
2056  */
2057 int rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id);
2058 
2059 /*
2060  * Stop specified RX queue of a port
2061  *
2062  * @param port_id
2063  * The port identifier of the Ethernet device
2064  * @param rx_queue_id
2065  * The index of the rx queue to update the ring.
2066  * The value must be in the range [0, nb_rx_queue - 1] previously supplied
2067  * to rte_eth_dev_configure().
2068  * @return
2069  * - 0: Success, the transmit queue is correctly set up.
2070  * - -EINVAL: The port_id or the queue_id out of range.
2071  * - -ENOTSUP: The function not supported in PMD driver.
2072  */
2073 int rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id);
2074 
2075 /*
2076  * Start TX for specified queue of a port. It is used when tx_deferred_start
2077  * flag of the specified queue is true.
2078  *
2079  * @param port_id
2080  * The port identifier of the Ethernet device
2081  * @param tx_queue_id
2082  * The index of the tx queue to update the ring.
2083  * The value must be in the range [0, nb_tx_queue - 1] previously supplied
2084  * to rte_eth_dev_configure().
2085  * @return
2086  * - 0: Success, the transmit queue is correctly set up.
2087  * - -EINVAL: The port_id or the queue_id out of range.
2088  * - -ENOTSUP: The function not supported in PMD driver.
2089  */
2090 int rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id);
2091 
2092 /*
2093  * Stop specified TX queue of a port
2094  *
2095  * @param port_id
2096  * The port identifier of the Ethernet device
2097  * @param tx_queue_id
2098  * The index of the tx queue to update the ring.
2099  * The value must be in the range [0, nb_tx_queue - 1] previously supplied
2100  * to rte_eth_dev_configure().
2101  * @return
2102  * - 0: Success, the transmit queue is correctly set up.
2103  * - -EINVAL: The port_id or the queue_id out of range.
2104  * - -ENOTSUP: The function not supported in PMD driver.
2105  */
2106 int rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id);
2107 
2108 
2109 
2125 int rte_eth_dev_start(uint8_t port_id);
2126 
2134 void rte_eth_dev_stop(uint8_t port_id);
2135 
2136 
2149 int rte_eth_dev_set_link_up(uint8_t port_id);
2150 
2160 int rte_eth_dev_set_link_down(uint8_t port_id);
2161 
2170 void rte_eth_dev_close(uint8_t port_id);
2171 
2178 void rte_eth_promiscuous_enable(uint8_t port_id);
2179 
2186 void rte_eth_promiscuous_disable(uint8_t port_id);
2187 
2198 int rte_eth_promiscuous_get(uint8_t port_id);
2199 
2206 void rte_eth_allmulticast_enable(uint8_t port_id);
2207 
2214 void rte_eth_allmulticast_disable(uint8_t port_id);
2215 
2226 int rte_eth_allmulticast_get(uint8_t port_id);
2227 
2239 void rte_eth_link_get(uint8_t port_id, struct rte_eth_link *link);
2240 
2252 void rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *link);
2253 
2271 int rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats);
2272 
2279 void rte_eth_stats_reset(uint8_t port_id);
2280 
2302 int rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
2303  unsigned n);
2304 
2311 void rte_eth_xstats_reset(uint8_t port_id);
2312 
2330 int rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id,
2331  uint16_t tx_queue_id, uint8_t stat_idx);
2332 
2350 int rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id,
2351  uint16_t rx_queue_id,
2352  uint8_t stat_idx);
2353 
2363 void rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr);
2364 
2374 void rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info);
2375 
2399 int rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
2400  uint32_t *ptypes, int num);
2401 
2413 int rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu);
2414 
2428 int rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu);
2429 
2448 int rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on);
2449 
2469 int rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id,
2470  int on);
2471 
2488 int rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
2489  enum rte_vlan_type vlan_type,
2490  uint16_t tag_type);
2491 
2512 int rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask);
2513 
2526 int rte_eth_dev_get_vlan_offload(uint8_t port_id);
2527 
2542 int rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on);
2543 
2626 static inline uint16_t
2627 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2628  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
2629 {
2630  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2631 
2632 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2633  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
2634  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
2635 
2636  if (queue_id >= dev->data->nb_rx_queues) {
2637  RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2638  return 0;
2639  }
2640 #endif
2641  int16_t nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2642  rx_pkts, nb_pkts);
2643 
2644 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
2645  struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
2646 
2647  if (unlikely(cb != NULL)) {
2648  do {
2649  nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
2650  nb_pkts, cb->param);
2651  cb = cb->next;
2652  } while (cb != NULL);
2653  }
2654 #endif
2655 
2656  return nb_rx;
2657 }
2658 
2671 static inline int
2672 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2673 {
2674  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2675  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2676  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
2677  return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2678 }
2679 
2695 static inline int
2696 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2697 {
2698  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2699  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2700  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2701  return (*dev->dev_ops->rx_descriptor_done)( \
2702  dev->data->rx_queues[queue_id], offset);
2703 }
2704 
2763 static inline uint16_t
2764 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2765  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2766 {
2767  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2768 
2769 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2770  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
2771  RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
2772 
2773  if (queue_id >= dev->data->nb_tx_queues) {
2774  RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2775  return 0;
2776  }
2777 #endif
2778 
2779 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
2780  struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
2781 
2782  if (unlikely(cb != NULL)) {
2783  do {
2784  nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
2785  cb->param);
2786  cb = cb->next;
2787  } while (cb != NULL);
2788  }
2789 #endif
2790 
2791  return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
2792 }
2793 
2794 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
2795  void *userdata);
2796 
2802  buffer_tx_error_fn error_callback;
2803  void *error_userdata;
2804  uint16_t size;
2805  uint16_t length;
2806  struct rte_mbuf *pkts[];
2808 };
2809 
2816 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
2817  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
2818 
2829 int
2830 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
2831 
2854 static inline uint16_t
2855 rte_eth_tx_buffer_flush(uint8_t port_id, uint16_t queue_id,
2856  struct rte_eth_dev_tx_buffer *buffer)
2857 {
2858  uint16_t sent;
2859  uint16_t to_send = buffer->length;
2860 
2861  if (to_send == 0)
2862  return 0;
2863 
2864  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
2865 
2866  buffer->length = 0;
2867 
2868  /* All packets sent, or to be dealt with by callback below */
2869  if (unlikely(sent != to_send))
2870  buffer->error_callback(&buffer->pkts[sent], to_send - sent,
2871  buffer->error_userdata);
2872 
2873  return sent;
2874 }
2875 
2906 static inline uint16_t __attribute__((always_inline))
2907 rte_eth_tx_buffer(uint8_t port_id, uint16_t queue_id,
2908  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
2909 {
2910  buffer->pkts[buffer->length++] = tx_pkt;
2911  if (buffer->length < buffer->size)
2912  return 0;
2913 
2914  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
2915 }
2916 
2941 int
2943  buffer_tx_error_fn callback, void *userdata);
2944 
2967 void
2968 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2969  void *userdata);
2970 
2994 void
2995 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2996  void *userdata);
2997 
3009 };
3010 
3011 typedef void (*rte_eth_dev_cb_fn)(uint8_t port_id, \
3012  enum rte_eth_event_type event, void *cb_arg);
3033 int rte_eth_dev_callback_register(uint8_t port_id,
3034  enum rte_eth_event_type event,
3035  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3036 
3054 int rte_eth_dev_callback_unregister(uint8_t port_id,
3055  enum rte_eth_event_type event,
3056  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3057 
3071 void _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3072  enum rte_eth_event_type event);
3073 
3094 int rte_eth_dev_rx_intr_enable(uint8_t port_id, uint16_t queue_id);
3095 
3115 int rte_eth_dev_rx_intr_disable(uint8_t port_id, uint16_t queue_id);
3116 
3134 int rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data);
3135 
3157 int rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
3158  int epfd, int op, void *data);
3159 
3172 int rte_eth_led_on(uint8_t port_id);
3173 
3186 int rte_eth_led_off(uint8_t port_id);
3187 
3200 int rte_eth_dev_flow_ctrl_get(uint8_t port_id,
3201  struct rte_eth_fc_conf *fc_conf);
3202 
3217 int rte_eth_dev_flow_ctrl_set(uint8_t port_id,
3218  struct rte_eth_fc_conf *fc_conf);
3219 
3235 int rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id,
3236  struct rte_eth_pfc_conf *pfc_conf);
3237 
3256 int rte_eth_dev_mac_addr_add(uint8_t port, struct ether_addr *mac_addr,
3257  uint32_t pool);
3258 
3272 int rte_eth_dev_mac_addr_remove(uint8_t port, struct ether_addr *mac_addr);
3273 
3287 int rte_eth_dev_default_mac_addr_set(uint8_t port, struct ether_addr *mac_addr);
3288 
3289 
3305 int rte_eth_dev_rss_reta_update(uint8_t port,
3306  struct rte_eth_rss_reta_entry64 *reta_conf,
3307  uint16_t reta_size);
3308 
3324 int rte_eth_dev_rss_reta_query(uint8_t port,
3325  struct rte_eth_rss_reta_entry64 *reta_conf,
3326  uint16_t reta_size);
3327 
3346 int rte_eth_dev_uc_hash_table_set(uint8_t port,struct ether_addr *addr,
3347  uint8_t on);
3348 
3366 int rte_eth_dev_uc_all_hash_table_set(uint8_t port,uint8_t on);
3367 
3390 int rte_eth_dev_set_vf_rxmode(uint8_t port, uint16_t vf, uint16_t rx_mode,
3391  uint8_t on);
3392 
3409 int
3410 rte_eth_dev_set_vf_tx(uint8_t port,uint16_t vf, uint8_t on);
3411 
3428 int
3429 rte_eth_dev_set_vf_rx(uint8_t port,uint16_t vf, uint8_t on);
3430 
3450 int
3451 rte_eth_dev_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
3452  uint64_t vf_mask,
3453  uint8_t vlan_on);
3454 
3476 int rte_eth_mirror_rule_set(uint8_t port_id,
3477  struct rte_eth_mirror_conf *mirror_conf,
3478  uint8_t rule_id,
3479  uint8_t on);
3480 
3494 int rte_eth_mirror_rule_reset(uint8_t port_id,
3495  uint8_t rule_id);
3496 
3512 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
3513  uint16_t tx_rate);
3514 
3532 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf,
3533  uint16_t tx_rate, uint64_t q_msk);
3534 
3546 int rte_eth_dev_bypass_init(uint8_t port);
3547 
3563 int rte_eth_dev_bypass_state_show(uint8_t port, uint32_t *state);
3564 
3580 int rte_eth_dev_bypass_state_set(uint8_t port, uint32_t *new_state);
3581 
3604 int rte_eth_dev_bypass_event_show(uint8_t port, uint32_t event, uint32_t *state);
3605 
3628 int rte_eth_dev_bypass_event_store(uint8_t port, uint32_t event, uint32_t state);
3629 
3650 int rte_eth_dev_wd_timeout_store(uint8_t port, uint32_t timeout);
3651 
3664 int rte_eth_dev_bypass_ver_show(uint8_t port, uint32_t *ver);
3665 
3686 int rte_eth_dev_bypass_wd_timeout_show(uint8_t port, uint32_t *wd_timeout);
3687 
3698 int rte_eth_dev_bypass_wd_reset(uint8_t port);
3699 
3713 int rte_eth_dev_rss_hash_update(uint8_t port_id,
3714  struct rte_eth_rss_conf *rss_conf);
3715 
3729 int
3730 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
3731  struct rte_eth_rss_conf *rss_conf);
3732 
3750 int
3751 rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
3752  struct rte_eth_udp_tunnel *tunnel_udp);
3753 
3772 int
3773 rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
3774  struct rte_eth_udp_tunnel *tunnel_udp);
3775 
3789 int rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type);
3790 
3809 int rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
3810  enum rte_filter_op filter_op, void *arg);
3811 
3824 int rte_eth_dev_get_dcb_info(uint8_t port_id,
3825  struct rte_eth_dcb_info *dcb_info);
3826 
3851 void *rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
3852  rte_rx_callback_fn fn, void *user_param);
3853 
3878 void *rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
3879  rte_tx_callback_fn fn, void *user_param);
3880 
3911 int rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
3912  struct rte_eth_rxtx_callback *user_cb);
3913 
3944 int rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3945  struct rte_eth_rxtx_callback *user_cb);
3946 
3964 int rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3965  struct rte_eth_rxq_info *qinfo);
3966 
3984 int rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3985  struct rte_eth_txq_info *qinfo);
3986 
3987 /*
3988  * Retrieve number of available registers for access
3989  *
3990  * @param port_id
3991  * The port identifier of the Ethernet device.
3992  * @return
3993  * - (>=0) number of registers if successful.
3994  * - (-ENOTSUP) if hardware doesn't support.
3995  * - (-ENODEV) if *port_id* invalid.
3996  * - others depends on the specific operations implementation.
3997  */
3998 int rte_eth_dev_get_reg_length(uint8_t port_id);
3999 
4013 int rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info);
4014 
4026 int rte_eth_dev_get_eeprom_length(uint8_t port_id);
4027 
4042 int rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info);
4043 
4058 int rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info);
4059 
4077 int rte_eth_dev_set_mc_addr_list(uint8_t port_id,
4078  struct ether_addr *mc_addr_set,
4079  uint32_t nb_mc_addr);
4080 
4092 int rte_eth_timesync_enable(uint8_t port_id);
4093 
4105 int rte_eth_timesync_disable(uint8_t port_id);
4106 
4124 int rte_eth_timesync_read_rx_timestamp(uint8_t port_id,
4125  struct timespec *timestamp, uint32_t flags);
4126 
4141 int rte_eth_timesync_read_tx_timestamp(uint8_t port_id,
4142  struct timespec *timestamp);
4143 
4160 int rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta);
4161 
4176 int rte_eth_timesync_read_time(uint8_t port_id, struct timespec *time);
4177 
4195 int rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *time);
4196 
4208 void rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev,
4209  struct rte_pci_device *pci_dev);
4210 
4230 const struct rte_memzone *
4231 rte_eth_dma_zone_reserve(const struct rte_eth_dev *eth_dev, const char *name,
4232  uint16_t queue_id, size_t size,
4233  unsigned align, int socket_id);
4234 
4249 int
4250 rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
4251  struct rte_eth_l2_tunnel_conf *l2_tunnel);
4252 
4276 int
4277 rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
4278  struct rte_eth_l2_tunnel_conf *l2_tunnel,
4279  uint32_t mask,
4280  uint8_t en);
4281 
4282 #ifdef __cplusplus
4283 }
4284 #endif
4285 
4286 #endif /* _RTE_ETHDEV_H_ */