34 #ifndef _RTE_ETHDEV_H_
35 #define _RTE_ETHDEV_H_
176 #define RTE_ETHDEV_HAS_LRO_SUPPORT
185 #include "rte_dev_info.h"
207 uint64_t
q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
209 uint64_t
q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
211 uint64_t
q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
213 uint64_t
q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
215 uint64_t
q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
222 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
223 #define ETH_LINK_SPEED_FIXED (1 << 0)
224 #define ETH_LINK_SPEED_10M_HD (1 << 1)
225 #define ETH_LINK_SPEED_10M (1 << 2)
226 #define ETH_LINK_SPEED_100M_HD (1 << 3)
227 #define ETH_LINK_SPEED_100M (1 << 4)
228 #define ETH_LINK_SPEED_1G (1 << 5)
229 #define ETH_LINK_SPEED_2_5G (1 << 6)
230 #define ETH_LINK_SPEED_5G (1 << 7)
231 #define ETH_LINK_SPEED_10G (1 << 8)
232 #define ETH_LINK_SPEED_20G (1 << 9)
233 #define ETH_LINK_SPEED_25G (1 << 10)
234 #define ETH_LINK_SPEED_40G (1 << 11)
235 #define ETH_LINK_SPEED_50G (1 << 12)
236 #define ETH_LINK_SPEED_56G (1 << 13)
237 #define ETH_LINK_SPEED_100G (1 << 14)
242 #define ETH_SPEED_NUM_NONE 0
243 #define ETH_SPEED_NUM_10M 10
244 #define ETH_SPEED_NUM_100M 100
245 #define ETH_SPEED_NUM_1G 1000
246 #define ETH_SPEED_NUM_2_5G 2500
247 #define ETH_SPEED_NUM_5G 5000
248 #define ETH_SPEED_NUM_10G 10000
249 #define ETH_SPEED_NUM_20G 20000
250 #define ETH_SPEED_NUM_25G 25000
251 #define ETH_SPEED_NUM_40G 40000
252 #define ETH_SPEED_NUM_50G 50000
253 #define ETH_SPEED_NUM_56G 56000
254 #define ETH_SPEED_NUM_100G 100000
265 } __attribute__((aligned(8)));
268 #define ETH_LINK_HALF_DUPLEX 0
269 #define ETH_LINK_FULL_DUPLEX 1
270 #define ETH_LINK_DOWN 0
271 #define ETH_LINK_UP 1
272 #define ETH_LINK_FIXED 0
273 #define ETH_LINK_AUTONEG 1
279 struct rte_eth_thresh {
288 #define ETH_MQ_RX_RSS_FLAG 0x1
289 #define ETH_MQ_RX_DCB_FLAG 0x2
290 #define ETH_MQ_RX_VMDQ_FLAG 0x4
321 #define ETH_RSS ETH_MQ_RX_RSS
322 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
323 #define ETH_DCB_RX ETH_MQ_RX_DCB
339 #define ETH_DCB_NONE ETH_MQ_TX_NONE
340 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
341 #define ETH_DCB_TX ETH_MQ_TX_DCB
368 ETH_VLAN_TYPE_UNKNOWN = 0,
411 #define ETH_RSS_IPV4 (1ULL << RTE_ETH_FLOW_IPV4)
412 #define ETH_RSS_FRAG_IPV4 (1ULL << RTE_ETH_FLOW_FRAG_IPV4)
413 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
414 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP)
415 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP)
416 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER)
417 #define ETH_RSS_IPV6 (1ULL << RTE_ETH_FLOW_IPV6)
418 #define ETH_RSS_FRAG_IPV6 (1ULL << RTE_ETH_FLOW_FRAG_IPV6)
419 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP)
420 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
421 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP)
422 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)
423 #define ETH_RSS_L2_PAYLOAD (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)
424 #define ETH_RSS_IPV6_EX (1ULL << RTE_ETH_FLOW_IPV6_EX)
425 #define ETH_RSS_IPV6_TCP_EX (1ULL << RTE_ETH_FLOW_IPV6_TCP_EX)
426 #define ETH_RSS_IPV6_UDP_EX (1ULL << RTE_ETH_FLOW_IPV6_UDP_EX)
427 #define ETH_RSS_PORT (1ULL << RTE_ETH_FLOW_PORT)
428 #define ETH_RSS_VXLAN (1ULL << RTE_ETH_FLOW_VXLAN)
429 #define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE)
430 #define ETH_RSS_NVGRE (1ULL << RTE_ETH_FLOW_NVGRE)
432 #define ETH_RSS_IP ( \
434 ETH_RSS_FRAG_IPV4 | \
435 ETH_RSS_NONFRAG_IPV4_OTHER | \
437 ETH_RSS_FRAG_IPV6 | \
438 ETH_RSS_NONFRAG_IPV6_OTHER | \
441 #define ETH_RSS_UDP ( \
442 ETH_RSS_NONFRAG_IPV4_UDP | \
443 ETH_RSS_NONFRAG_IPV6_UDP | \
446 #define ETH_RSS_TCP ( \
447 ETH_RSS_NONFRAG_IPV4_TCP | \
448 ETH_RSS_NONFRAG_IPV6_TCP | \
451 #define ETH_RSS_SCTP ( \
452 ETH_RSS_NONFRAG_IPV4_SCTP | \
453 ETH_RSS_NONFRAG_IPV6_SCTP)
455 #define ETH_RSS_TUNNEL ( \
462 #define ETH_RSS_PROTO_MASK ( \
464 ETH_RSS_FRAG_IPV4 | \
465 ETH_RSS_NONFRAG_IPV4_TCP | \
466 ETH_RSS_NONFRAG_IPV4_UDP | \
467 ETH_RSS_NONFRAG_IPV4_SCTP | \
468 ETH_RSS_NONFRAG_IPV4_OTHER | \
470 ETH_RSS_FRAG_IPV6 | \
471 ETH_RSS_NONFRAG_IPV6_TCP | \
472 ETH_RSS_NONFRAG_IPV6_UDP | \
473 ETH_RSS_NONFRAG_IPV6_SCTP | \
474 ETH_RSS_NONFRAG_IPV6_OTHER | \
475 ETH_RSS_L2_PAYLOAD | \
477 ETH_RSS_IPV6_TCP_EX | \
478 ETH_RSS_IPV6_UDP_EX | \
489 #define ETH_RSS_RETA_SIZE_64 64
490 #define ETH_RSS_RETA_SIZE_128 128
491 #define ETH_RSS_RETA_SIZE_256 256
492 #define ETH_RSS_RETA_SIZE_512 512
493 #define RTE_RETA_GROUP_SIZE 64
496 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
497 #define ETH_DCB_NUM_USER_PRIORITIES 8
498 #define ETH_VMDQ_DCB_NUM_QUEUES 128
499 #define ETH_DCB_NUM_QUEUES 128
502 #define ETH_DCB_PG_SUPPORT 0x00000001
503 #define ETH_DCB_PFC_SUPPORT 0x00000002
506 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
507 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
508 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
511 #define ETH_VLAN_STRIP_MASK 0x0001
512 #define ETH_VLAN_FILTER_MASK 0x0002
513 #define ETH_VLAN_EXTEND_MASK 0x0004
514 #define ETH_VLAN_ID_MAX 0x0FFF
517 #define ETH_NUM_RECEIVE_MAC_ADDR 128
520 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
523 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
524 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
525 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
526 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
527 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
530 #define ETH_MIRROR_MAX_VLANS 64
532 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
533 #define ETH_MIRROR_UPLINK_PORT 0x02
534 #define ETH_MIRROR_DOWNLINK_PORT 0x04
535 #define ETH_MIRROR_VLAN 0x08
536 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
541 struct rte_eth_vlan_mirror {
567 uint16_t
reta[RTE_RETA_GROUP_SIZE];
592 struct rte_eth_dcb_rx_conf {
598 struct rte_eth_vmdq_dcb_tx_conf {
604 struct rte_eth_dcb_tx_conf {
610 struct rte_eth_vmdq_tx_conf {
696 #define ETH_TXQ_FLAGS_NOMULTSEGS 0x0001
697 #define ETH_TXQ_FLAGS_NOREFCOUNT 0x0002
698 #define ETH_TXQ_FLAGS_NOMULTMEMP 0x0004
699 #define ETH_TXQ_FLAGS_NOVLANOFFL 0x0100
700 #define ETH_TXQ_FLAGS_NOXSUMSCTP 0x0200
701 #define ETH_TXQ_FLAGS_NOXSUMUDP 0x0400
702 #define ETH_TXQ_FLAGS_NOXSUMTCP 0x0800
703 #define ETH_TXQ_FLAGS_NOOFFLOADS \
704 (ETH_TXQ_FLAGS_NOVLANOFFL | ETH_TXQ_FLAGS_NOXSUMSCTP | \
705 ETH_TXQ_FLAGS_NOXSUMUDP | ETH_TXQ_FLAGS_NOXSUMTCP)
706 #define ETH_TXQ_FLAGS_NOXSUMS \
707 (ETH_TXQ_FLAGS_NOXSUMSCTP | ETH_TXQ_FLAGS_NOXSUMUDP | \
708 ETH_TXQ_FLAGS_NOXSUMTCP)
902 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
903 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
904 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
905 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
906 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
907 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
908 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
909 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
914 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
915 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
916 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
917 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
918 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
919 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
920 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
921 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
922 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
923 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200
924 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400
925 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800
926 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000
927 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
928 #define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000
948 uint32_t max_hash_mac_addrs;
993 #define RTE_ETH_XSTATS_NAME_SIZE 64
1020 #define ETH_DCB_NUM_TCS 8
1021 #define ETH_MAX_VMDQ_POOL 64
1032 }
tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1037 }
tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1055 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1056 #define RTE_ETH_QUEUE_STATE_STARTED 1
1060 struct rte_eth_dev_callback;
1062 TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback);
1065 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1066 if (!rte_eth_dev_is_valid_port(port_id)) { \
1067 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
1072 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1073 if (!rte_eth_dev_is_valid_port(port_id)) { \
1074 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
1079 #define RTE_ETH_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
1086 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1088 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1090 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1092 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1100 typedef int (*eth_dev_configure_t)(
struct rte_eth_dev *dev);
1103 typedef int (*eth_dev_start_t)(
struct rte_eth_dev *dev);
1106 typedef void (*eth_dev_stop_t)(
struct rte_eth_dev *dev);
1109 typedef int (*eth_dev_set_link_up_t)(
struct rte_eth_dev *dev);
1112 typedef int (*eth_dev_set_link_down_t)(
struct rte_eth_dev *dev);
1115 typedef void (*eth_dev_close_t)(
struct rte_eth_dev *dev);
1118 typedef void (*eth_promiscuous_enable_t)(
struct rte_eth_dev *dev);
1121 typedef void (*eth_promiscuous_disable_t)(
struct rte_eth_dev *dev);
1124 typedef void (*eth_allmulticast_enable_t)(
struct rte_eth_dev *dev);
1127 typedef void (*eth_allmulticast_disable_t)(
struct rte_eth_dev *dev);
1130 typedef int (*eth_link_update_t)(
struct rte_eth_dev *dev,
1131 int wait_to_complete);
1134 typedef void (*eth_stats_get_t)(
struct rte_eth_dev *dev,
1138 typedef void (*eth_stats_reset_t)(
struct rte_eth_dev *dev);
1141 typedef int (*eth_xstats_get_t)(
struct rte_eth_dev *dev,
1145 typedef int (*eth_xstats_get_by_id_t)(
struct rte_eth_dev *dev,
1146 const uint64_t *ids,
1151 typedef void (*eth_xstats_reset_t)(
struct rte_eth_dev *dev);
1154 typedef int (*eth_xstats_get_names_t)(
struct rte_eth_dev *dev,
1158 typedef int (*eth_xstats_get_names_by_id_t)(
struct rte_eth_dev *dev,
1163 typedef int (*eth_queue_stats_mapping_set_t)(
struct rte_eth_dev *dev,
1169 typedef void (*eth_dev_infos_get_t)(
struct rte_eth_dev *dev,
1173 typedef const uint32_t *(*eth_dev_supported_ptypes_get_t)(
struct rte_eth_dev *dev);
1176 typedef int (*eth_queue_start_t)(
struct rte_eth_dev *dev,
1180 typedef int (*eth_queue_stop_t)(
struct rte_eth_dev *dev,
1184 typedef int (*eth_rx_queue_setup_t)(
struct rte_eth_dev *dev,
1185 uint16_t rx_queue_id,
1186 uint16_t nb_rx_desc,
1187 unsigned int socket_id,
1192 typedef int (*eth_tx_queue_setup_t)(
struct rte_eth_dev *dev,
1193 uint16_t tx_queue_id,
1194 uint16_t nb_tx_desc,
1195 unsigned int socket_id,
1199 typedef int (*eth_rx_enable_intr_t)(
struct rte_eth_dev *dev,
1200 uint16_t rx_queue_id);
1203 typedef int (*eth_rx_disable_intr_t)(
struct rte_eth_dev *dev,
1204 uint16_t rx_queue_id);
1207 typedef void (*eth_queue_release_t)(
void *queue);
1210 typedef uint32_t (*eth_rx_queue_count_t)(
struct rte_eth_dev *dev,
1211 uint16_t rx_queue_id);
1214 typedef int (*eth_rx_descriptor_done_t)(
void *rxq, uint16_t offset);
1217 typedef int (*eth_rx_descriptor_status_t)(
void *rxq, uint16_t offset);
1220 typedef int (*eth_tx_descriptor_status_t)(
void *txq, uint16_t offset);
1223 typedef int (*eth_fw_version_get_t)(
struct rte_eth_dev *dev,
1224 char *fw_version,
size_t fw_size);
1227 typedef int (*eth_tx_done_cleanup_t)(
void *txq, uint32_t free_cnt);
1230 typedef void (*eth_rxq_info_get_t)(
struct rte_eth_dev *dev,
1233 typedef void (*eth_txq_info_get_t)(
struct rte_eth_dev *dev,
1236 typedef int (*mtu_set_t)(
struct rte_eth_dev *dev, uint16_t mtu);
1239 typedef int (*vlan_filter_set_t)(
struct rte_eth_dev *dev,
1244 typedef int (*vlan_tpid_set_t)(
struct rte_eth_dev *dev,
1248 typedef void (*vlan_offload_set_t)(
struct rte_eth_dev *dev,
int mask);
1251 typedef int (*vlan_pvid_set_t)(
struct rte_eth_dev *dev,
1256 typedef void (*vlan_strip_queue_set_t)(
struct rte_eth_dev *dev,
1257 uint16_t rx_queue_id,
1261 typedef uint16_t (*eth_rx_burst_t)(
void *rxq,
1266 typedef uint16_t (*eth_tx_burst_t)(
void *txq,
1271 typedef uint16_t (*eth_tx_prep_t)(
void *txq,
1276 typedef int (*flow_ctrl_get_t)(
struct rte_eth_dev *dev,
1280 typedef int (*flow_ctrl_set_t)(
struct rte_eth_dev *dev,
1284 typedef int (*priority_flow_ctrl_set_t)(
struct rte_eth_dev *dev,
1288 typedef int (*reta_update_t)(
struct rte_eth_dev *dev,
1290 uint16_t reta_size);
1293 typedef int (*reta_query_t)(
struct rte_eth_dev *dev,
1295 uint16_t reta_size);
1298 typedef int (*rss_hash_update_t)(
struct rte_eth_dev *dev,
1302 typedef int (*rss_hash_conf_get_t)(
struct rte_eth_dev *dev,
1306 typedef int (*eth_dev_led_on_t)(
struct rte_eth_dev *dev);
1309 typedef int (*eth_dev_led_off_t)(
struct rte_eth_dev *dev);
1312 typedef void (*eth_mac_addr_remove_t)(
struct rte_eth_dev *dev, uint32_t index);
1315 typedef int (*eth_mac_addr_add_t)(
struct rte_eth_dev *dev,
1321 typedef void (*eth_mac_addr_set_t)(
struct rte_eth_dev *dev,
1325 typedef int (*eth_uc_hash_table_set_t)(
struct rte_eth_dev *dev,
1330 typedef int (*eth_uc_all_hash_table_set_t)(
struct rte_eth_dev *dev,
1334 typedef int (*eth_set_queue_rate_limit_t)(
struct rte_eth_dev *dev,
1339 typedef int (*eth_mirror_rule_set_t)(
struct rte_eth_dev *dev,
1345 typedef int (*eth_mirror_rule_reset_t)(
struct rte_eth_dev *dev,
1349 typedef int (*eth_udp_tunnel_port_add_t)(
struct rte_eth_dev *dev,
1353 typedef int (*eth_udp_tunnel_port_del_t)(
struct rte_eth_dev *dev,
1357 typedef int (*eth_set_mc_addr_list_t)(
struct rte_eth_dev *dev,
1359 uint32_t nb_mc_addr);
1362 typedef int (*eth_timesync_enable_t)(
struct rte_eth_dev *dev);
1365 typedef int (*eth_timesync_disable_t)(
struct rte_eth_dev *dev);
1368 typedef int (*eth_timesync_read_rx_timestamp_t)(
struct rte_eth_dev *dev,
1369 struct timespec *timestamp,
1373 typedef int (*eth_timesync_read_tx_timestamp_t)(
struct rte_eth_dev *dev,
1374 struct timespec *timestamp);
1377 typedef int (*eth_timesync_adjust_time)(
struct rte_eth_dev *dev, int64_t);
1380 typedef int (*eth_timesync_read_time)(
struct rte_eth_dev *dev,
1381 struct timespec *timestamp);
1384 typedef int (*eth_timesync_write_time)(
struct rte_eth_dev *dev,
1385 const struct timespec *timestamp);
1388 typedef int (*eth_get_reg_t)(
struct rte_eth_dev *dev,
1389 struct rte_dev_reg_info *info);
1392 typedef int (*eth_get_eeprom_length_t)(
struct rte_eth_dev *dev);
1395 typedef int (*eth_get_eeprom_t)(
struct rte_eth_dev *dev,
1396 struct rte_dev_eeprom_info *info);
1399 typedef int (*eth_set_eeprom_t)(
struct rte_eth_dev *dev,
1400 struct rte_dev_eeprom_info *info);
1403 typedef int (*eth_l2_tunnel_eth_type_conf_t)
1407 typedef int (*eth_l2_tunnel_offload_set_t)
1408 (
struct rte_eth_dev *dev,
1415 typedef int (*eth_filter_ctrl_t)(
struct rte_eth_dev *dev,
1421 typedef int (*eth_tm_ops_get_t)(
struct rte_eth_dev *dev,
void *ops);
1424 typedef int (*eth_get_dcb_info)(
struct rte_eth_dev *dev,
1431 struct eth_dev_ops {
1432 eth_dev_configure_t dev_configure;
1433 eth_dev_start_t dev_start;
1434 eth_dev_stop_t dev_stop;
1435 eth_dev_set_link_up_t dev_set_link_up;
1436 eth_dev_set_link_down_t dev_set_link_down;
1437 eth_dev_close_t dev_close;
1438 eth_link_update_t link_update;
1440 eth_promiscuous_enable_t promiscuous_enable;
1441 eth_promiscuous_disable_t promiscuous_disable;
1442 eth_allmulticast_enable_t allmulticast_enable;
1443 eth_allmulticast_disable_t allmulticast_disable;
1444 eth_mac_addr_remove_t mac_addr_remove;
1445 eth_mac_addr_add_t mac_addr_add;
1446 eth_mac_addr_set_t mac_addr_set;
1447 eth_set_mc_addr_list_t set_mc_addr_list;
1450 eth_stats_get_t stats_get;
1451 eth_stats_reset_t stats_reset;
1452 eth_xstats_get_t xstats_get;
1453 eth_xstats_reset_t xstats_reset;
1454 eth_xstats_get_names_t xstats_get_names;
1456 eth_queue_stats_mapping_set_t queue_stats_mapping_set;
1459 eth_dev_infos_get_t dev_infos_get;
1460 eth_rxq_info_get_t rxq_info_get;
1461 eth_txq_info_get_t txq_info_get;
1462 eth_fw_version_get_t fw_version_get;
1463 eth_dev_supported_ptypes_get_t dev_supported_ptypes_get;
1466 vlan_filter_set_t vlan_filter_set;
1467 vlan_tpid_set_t vlan_tpid_set;
1468 vlan_strip_queue_set_t vlan_strip_queue_set;
1469 vlan_offload_set_t vlan_offload_set;
1470 vlan_pvid_set_t vlan_pvid_set;
1472 eth_queue_start_t rx_queue_start;
1473 eth_queue_stop_t rx_queue_stop;
1474 eth_queue_start_t tx_queue_start;
1475 eth_queue_stop_t tx_queue_stop;
1476 eth_rx_queue_setup_t rx_queue_setup;
1477 eth_queue_release_t rx_queue_release;
1478 eth_rx_queue_count_t rx_queue_count;
1480 eth_rx_descriptor_done_t rx_descriptor_done;
1481 eth_rx_descriptor_status_t rx_descriptor_status;
1483 eth_tx_descriptor_status_t tx_descriptor_status;
1485 eth_rx_enable_intr_t rx_queue_intr_enable;
1486 eth_rx_disable_intr_t rx_queue_intr_disable;
1487 eth_tx_queue_setup_t tx_queue_setup;
1488 eth_queue_release_t tx_queue_release;
1489 eth_tx_done_cleanup_t tx_done_cleanup;
1491 eth_dev_led_on_t dev_led_on;
1492 eth_dev_led_off_t dev_led_off;
1494 flow_ctrl_get_t flow_ctrl_get;
1495 flow_ctrl_set_t flow_ctrl_set;
1496 priority_flow_ctrl_set_t priority_flow_ctrl_set;
1498 eth_uc_hash_table_set_t uc_hash_table_set;
1499 eth_uc_all_hash_table_set_t uc_all_hash_table_set;
1501 eth_mirror_rule_set_t mirror_rule_set;
1502 eth_mirror_rule_reset_t mirror_rule_reset;
1504 eth_udp_tunnel_port_add_t udp_tunnel_port_add;
1505 eth_udp_tunnel_port_del_t udp_tunnel_port_del;
1506 eth_l2_tunnel_eth_type_conf_t l2_tunnel_eth_type_conf;
1508 eth_l2_tunnel_offload_set_t l2_tunnel_offload_set;
1511 eth_set_queue_rate_limit_t set_queue_rate_limit;
1513 rss_hash_update_t rss_hash_update;
1514 rss_hash_conf_get_t rss_hash_conf_get;
1515 reta_update_t reta_update;
1516 reta_query_t reta_query;
1518 eth_get_reg_t get_reg;
1519 eth_get_eeprom_length_t get_eeprom_length;
1520 eth_get_eeprom_t get_eeprom;
1521 eth_set_eeprom_t set_eeprom;
1524 eth_filter_ctrl_t filter_ctrl;
1526 eth_get_dcb_info get_dcb_info;
1528 eth_timesync_enable_t timesync_enable;
1530 eth_timesync_disable_t timesync_disable;
1532 eth_timesync_read_rx_timestamp_t timesync_read_rx_timestamp;
1534 eth_timesync_read_tx_timestamp_t timesync_read_tx_timestamp;
1536 eth_timesync_adjust_time timesync_adjust_time;
1537 eth_timesync_read_time timesync_read_time;
1538 eth_timesync_write_time timesync_write_time;
1540 eth_xstats_get_by_id_t xstats_get_by_id;
1542 eth_xstats_get_names_by_id_t xstats_get_names_by_id;
1545 eth_tm_ops_get_t tm_ops_get;
1572 struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1596 struct rte_mbuf *pkts[], uint16_t nb_pkts,
void *user_param);
1603 struct rte_eth_rxtx_callback {
1604 struct rte_eth_rxtx_callback *next;
1616 RTE_ETH_DEV_UNUSED = 0,
1617 RTE_ETH_DEV_ATTACHED,
1618 RTE_ETH_DEV_DEFERRED,
1631 struct rte_eth_dev {
1632 eth_rx_burst_t rx_pkt_burst;
1633 eth_tx_burst_t tx_pkt_burst;
1634 eth_tx_prep_t tx_pkt_prepare;
1635 struct rte_eth_dev_data *data;
1636 const struct eth_dev_ops *dev_ops;
1638 struct rte_intr_handle *intr_handle;
1640 struct rte_eth_dev_cb_list link_intr_cbs;
1645 struct rte_eth_rxtx_callback *post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
1650 struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
1654 struct rte_eth_dev_sriov {
1656 uint8_t nb_q_per_pool;
1657 uint16_t def_vmdq_idx;
1658 uint16_t def_pool_q_idx;
1660 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1662 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
1671 struct rte_eth_dev_data {
1672 char name[RTE_ETH_NAME_MAX_LEN];
1676 uint16_t nb_rx_queues;
1677 uint16_t nb_tx_queues;
1679 struct rte_eth_dev_sriov sriov;
1689 uint32_t min_rx_buf_size;
1692 uint64_t rx_mbuf_alloc_failed;
1700 uint8_t promiscuous : 1,
1705 uint8_t rx_queue_state[RTE_MAX_QUEUES_PER_PORT];
1707 uint8_t tx_queue_state[RTE_MAX_QUEUES_PER_PORT];
1717 #define RTE_ETH_DEV_DETACHABLE 0x0001
1719 #define RTE_ETH_DEV_INTR_LSC 0x0002
1721 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1723 #define RTE_ETH_DEV_INTR_RMV 0x0008
1730 extern struct rte_eth_dev rte_eth_devices[];
1745 #define RTE_ETH_FOREACH_DEV(p) \
1746 for (p = rte_eth_find_next(0); \
1747 (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
1748 p = rte_eth_find_next(p + 1))
1774 struct rte_eth_dev *rte_eth_dev_allocated(
const char *name);
1786 struct rte_eth_dev *rte_eth_dev_allocate(
const char *name);
1799 struct rte_eth_dev *rte_eth_dev_attach_secondary(
const char *name);
1810 int rte_eth_dev_release_port(
struct rte_eth_dev *eth_dev);
1884 uint16_t nb_tx_queue,
const struct rte_eth_conf *eth_conf);
1897 void _rte_eth_dev_reset(
struct rte_eth_dev *dev);
1939 uint16_t nb_rx_desc,
unsigned int socket_id,
1987 uint16_t nb_tx_desc,
unsigned int socket_id,
2352 uint64_t *values,
unsigned int n);
2400 uint16_t tx_queue_id, uint8_t stat_idx);
2420 uint16_t rx_queue_id,
2464 char *fw_version,
size_t fw_size);
2505 uint32_t *ptypes,
int num);
2732 static inline uint16_t
2734 struct rte_mbuf **rx_pkts,
const uint16_t nb_pkts)
2736 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2738 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2739 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
2740 RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
2742 if (queue_id >= dev->data->nb_rx_queues) {
2743 RTE_PMD_DEBUG_TRACE(
"Invalid RX queue_id=%d\n", queue_id);
2747 int16_t nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2750 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
2751 struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
2755 nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
2756 nb_pkts, cb->param);
2758 }
while (cb != NULL);
2780 struct rte_eth_dev *dev;
2782 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2783 dev = &rte_eth_devices[port_id];
2784 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
2785 if (queue_id >= dev->data->nb_rx_queues)
2788 return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2809 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2810 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2811 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2812 return (*dev->dev_ops->rx_descriptor_done)( \
2813 dev->data->rx_queues[queue_id], offset);
2816 #define RTE_ETH_RX_DESC_AVAIL 0
2817 #define RTE_ETH_RX_DESC_DONE 1
2818 #define RTE_ETH_RX_DESC_UNAVAIL 2
2857 struct rte_eth_dev *dev;
2860 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2861 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2863 dev = &rte_eth_devices[port_id];
2864 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2865 if (queue_id >= dev->data->nb_rx_queues)
2868 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
2869 rxq = dev->data->rx_queues[queue_id];
2871 return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
2874 #define RTE_ETH_TX_DESC_FULL 0
2875 #define RTE_ETH_TX_DESC_DONE 1
2876 #define RTE_ETH_TX_DESC_UNAVAIL 2
2911 static inline int rte_eth_tx_descriptor_status(uint8_t port_id,
2912 uint16_t queue_id, uint16_t offset)
2914 struct rte_eth_dev *dev;
2917 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2918 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2920 dev = &rte_eth_devices[port_id];
2921 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2922 if (queue_id >= dev->data->nb_tx_queues)
2925 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
2926 txq = dev->data->tx_queues[queue_id];
2928 return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
2994 static inline uint16_t
2996 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2998 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3000 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3001 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
3002 RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
3004 if (queue_id >= dev->data->nb_tx_queues) {
3005 RTE_PMD_DEBUG_TRACE(
"Invalid TX queue_id=%d\n", queue_id);
3010 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
3011 struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3015 nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
3018 }
while (cb != NULL);
3022 return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
3081 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
3083 static inline uint16_t
3085 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3087 struct rte_eth_dev *dev;
3089 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3091 RTE_PMD_DEBUG_TRACE(
"Invalid TX port_id=%d\n", port_id);
3097 dev = &rte_eth_devices[port_id];
3099 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3100 if (queue_id >= dev->data->nb_tx_queues) {
3101 RTE_PMD_DEBUG_TRACE(
"Invalid TX queue_id=%d\n", queue_id);
3107 if (!dev->tx_pkt_prepare)
3110 return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
3125 static inline uint16_t
3134 typedef void (*buffer_tx_error_fn)(
struct rte_mbuf **unsent, uint16_t count,
3142 buffer_tx_error_fn error_callback;
3143 void *error_userdata;
3156 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
3157 (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3194 static inline uint16_t
3199 uint16_t to_send = buffer->
length;
3210 buffer->error_callback(&buffer->
pkts[sent], to_send - sent,
3211 buffer->error_userdata);
3283 buffer_tx_error_fn callback,
void *
userdata);
3447 int _rte_eth_dev_callback_process(
struct rte_eth_dev *dev,
3534 int epfd,
int op,
void *data);
3683 uint16_t reta_size);
3702 uint16_t reta_size);
4044 struct rte_eth_rxtx_callback *user_cb);
4077 struct rte_eth_rxtx_callback *user_cb);
4201 uint32_t nb_mc_addr);
4247 struct timespec *timestamp, uint32_t flags);
4264 struct timespec *timestamp);
4340 uint16_t queue_id,
size_t size,
4341 unsigned align,
int socket_id);
4436 uint16_t *nb_rx_desc,
4437 uint16_t *nb_tx_desc);