34 #ifndef _RTE_ETHDEV_H_
35 #define _RTE_ETHDEV_H_
178 #define RTE_ETHDEV_HAS_LRO_SUPPORT
187 #include "rte_dev_info.h"
209 uint64_t
q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
211 uint64_t
q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
213 uint64_t
q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
215 uint64_t
q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
217 uint64_t
q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
224 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
225 #define ETH_LINK_SPEED_FIXED (1 << 0)
226 #define ETH_LINK_SPEED_10M_HD (1 << 1)
227 #define ETH_LINK_SPEED_10M (1 << 2)
228 #define ETH_LINK_SPEED_100M_HD (1 << 3)
229 #define ETH_LINK_SPEED_100M (1 << 4)
230 #define ETH_LINK_SPEED_1G (1 << 5)
231 #define ETH_LINK_SPEED_2_5G (1 << 6)
232 #define ETH_LINK_SPEED_5G (1 << 7)
233 #define ETH_LINK_SPEED_10G (1 << 8)
234 #define ETH_LINK_SPEED_20G (1 << 9)
235 #define ETH_LINK_SPEED_25G (1 << 10)
236 #define ETH_LINK_SPEED_40G (1 << 11)
237 #define ETH_LINK_SPEED_50G (1 << 12)
238 #define ETH_LINK_SPEED_56G (1 << 13)
239 #define ETH_LINK_SPEED_100G (1 << 14)
244 #define ETH_SPEED_NUM_NONE 0
245 #define ETH_SPEED_NUM_10M 10
246 #define ETH_SPEED_NUM_100M 100
247 #define ETH_SPEED_NUM_1G 1000
248 #define ETH_SPEED_NUM_2_5G 2500
249 #define ETH_SPEED_NUM_5G 5000
250 #define ETH_SPEED_NUM_10G 10000
251 #define ETH_SPEED_NUM_20G 20000
252 #define ETH_SPEED_NUM_25G 25000
253 #define ETH_SPEED_NUM_40G 40000
254 #define ETH_SPEED_NUM_50G 50000
255 #define ETH_SPEED_NUM_56G 56000
256 #define ETH_SPEED_NUM_100G 100000
267 } __attribute__((aligned(8)));
270 #define ETH_LINK_HALF_DUPLEX 0
271 #define ETH_LINK_FULL_DUPLEX 1
272 #define ETH_LINK_DOWN 0
273 #define ETH_LINK_UP 1
274 #define ETH_LINK_FIXED 0
275 #define ETH_LINK_AUTONEG 1
281 struct rte_eth_thresh {
290 #define ETH_MQ_RX_RSS_FLAG 0x1
291 #define ETH_MQ_RX_DCB_FLAG 0x2
292 #define ETH_MQ_RX_VMDQ_FLAG 0x4
323 #define ETH_RSS ETH_MQ_RX_RSS
324 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
325 #define ETH_DCB_RX ETH_MQ_RX_DCB
341 #define ETH_DCB_NONE ETH_MQ_TX_NONE
342 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
343 #define ETH_DCB_TX ETH_MQ_TX_DCB
370 ETH_VLAN_TYPE_UNKNOWN = 0,
405 #define ETH_RSS_IPV4 (1ULL << RTE_ETH_FLOW_IPV4)
406 #define ETH_RSS_FRAG_IPV4 (1ULL << RTE_ETH_FLOW_FRAG_IPV4)
407 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
408 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP)
409 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP)
410 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER)
411 #define ETH_RSS_IPV6 (1ULL << RTE_ETH_FLOW_IPV6)
412 #define ETH_RSS_FRAG_IPV6 (1ULL << RTE_ETH_FLOW_FRAG_IPV6)
413 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP)
414 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
415 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP)
416 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)
417 #define ETH_RSS_L2_PAYLOAD (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)
418 #define ETH_RSS_IPV6_EX (1ULL << RTE_ETH_FLOW_IPV6_EX)
419 #define ETH_RSS_IPV6_TCP_EX (1ULL << RTE_ETH_FLOW_IPV6_TCP_EX)
420 #define ETH_RSS_IPV6_UDP_EX (1ULL << RTE_ETH_FLOW_IPV6_UDP_EX)
421 #define ETH_RSS_PORT (1ULL << RTE_ETH_FLOW_PORT)
422 #define ETH_RSS_VXLAN (1ULL << RTE_ETH_FLOW_VXLAN)
423 #define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE)
424 #define ETH_RSS_NVGRE (1ULL << RTE_ETH_FLOW_NVGRE)
426 #define ETH_RSS_IP ( \
428 ETH_RSS_FRAG_IPV4 | \
429 ETH_RSS_NONFRAG_IPV4_OTHER | \
431 ETH_RSS_FRAG_IPV6 | \
432 ETH_RSS_NONFRAG_IPV6_OTHER | \
435 #define ETH_RSS_UDP ( \
436 ETH_RSS_NONFRAG_IPV4_UDP | \
437 ETH_RSS_NONFRAG_IPV6_UDP | \
440 #define ETH_RSS_TCP ( \
441 ETH_RSS_NONFRAG_IPV4_TCP | \
442 ETH_RSS_NONFRAG_IPV6_TCP | \
445 #define ETH_RSS_SCTP ( \
446 ETH_RSS_NONFRAG_IPV4_SCTP | \
447 ETH_RSS_NONFRAG_IPV6_SCTP)
449 #define ETH_RSS_TUNNEL ( \
456 #define ETH_RSS_PROTO_MASK ( \
458 ETH_RSS_FRAG_IPV4 | \
459 ETH_RSS_NONFRAG_IPV4_TCP | \
460 ETH_RSS_NONFRAG_IPV4_UDP | \
461 ETH_RSS_NONFRAG_IPV4_SCTP | \
462 ETH_RSS_NONFRAG_IPV4_OTHER | \
464 ETH_RSS_FRAG_IPV6 | \
465 ETH_RSS_NONFRAG_IPV6_TCP | \
466 ETH_RSS_NONFRAG_IPV6_UDP | \
467 ETH_RSS_NONFRAG_IPV6_SCTP | \
468 ETH_RSS_NONFRAG_IPV6_OTHER | \
469 ETH_RSS_L2_PAYLOAD | \
471 ETH_RSS_IPV6_TCP_EX | \
472 ETH_RSS_IPV6_UDP_EX | \
483 #define ETH_RSS_RETA_SIZE_64 64
484 #define ETH_RSS_RETA_SIZE_128 128
485 #define ETH_RSS_RETA_SIZE_256 256
486 #define ETH_RSS_RETA_SIZE_512 512
487 #define RTE_RETA_GROUP_SIZE 64
490 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
491 #define ETH_DCB_NUM_USER_PRIORITIES 8
492 #define ETH_VMDQ_DCB_NUM_QUEUES 128
493 #define ETH_DCB_NUM_QUEUES 128
496 #define ETH_DCB_PG_SUPPORT 0x00000001
497 #define ETH_DCB_PFC_SUPPORT 0x00000002
500 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
501 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
502 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
505 #define ETH_VLAN_STRIP_MASK 0x0001
506 #define ETH_VLAN_FILTER_MASK 0x0002
507 #define ETH_VLAN_EXTEND_MASK 0x0004
508 #define ETH_VLAN_ID_MAX 0x0FFF
511 #define ETH_NUM_RECEIVE_MAC_ADDR 128
514 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
517 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
518 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
519 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
520 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
521 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
524 #define ETH_MIRROR_MAX_VLANS 64
526 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
527 #define ETH_MIRROR_UPLINK_PORT 0x02
528 #define ETH_MIRROR_DOWNLINK_PORT 0x04
529 #define ETH_MIRROR_VLAN 0x08
530 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
535 struct rte_eth_vlan_mirror {
561 uint16_t
reta[RTE_RETA_GROUP_SIZE];
586 struct rte_eth_dcb_rx_conf {
592 struct rte_eth_vmdq_dcb_tx_conf {
598 struct rte_eth_dcb_tx_conf {
604 struct rte_eth_vmdq_tx_conf {
632 struct rte_eth_vmdq_rx_conf {
634 uint8_t enable_default_pool;
635 uint8_t default_pool;
636 uint8_t enable_loop_back;
637 uint8_t nb_pool_maps;
672 #define ETH_TXQ_FLAGS_NOMULTSEGS 0x0001
673 #define ETH_TXQ_FLAGS_NOREFCOUNT 0x0002
674 #define ETH_TXQ_FLAGS_NOMULTMEMP 0x0004
675 #define ETH_TXQ_FLAGS_NOVLANOFFL 0x0100
676 #define ETH_TXQ_FLAGS_NOXSUMSCTP 0x0200
677 #define ETH_TXQ_FLAGS_NOXSUMUDP 0x0400
678 #define ETH_TXQ_FLAGS_NOXSUMTCP 0x0800
679 #define ETH_TXQ_FLAGS_NOOFFLOADS \
680 (ETH_TXQ_FLAGS_NOVLANOFFL | ETH_TXQ_FLAGS_NOXSUMSCTP | \
681 ETH_TXQ_FLAGS_NOXSUMUDP | ETH_TXQ_FLAGS_NOXSUMTCP)
682 #define ETH_TXQ_FLAGS_NOXSUMS \
683 (ETH_TXQ_FLAGS_NOXSUMSCTP | ETH_TXQ_FLAGS_NOXSUMUDP | \
684 ETH_TXQ_FLAGS_NOXSUMTCP)
878 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
879 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
880 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
881 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
882 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
883 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
884 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
885 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
890 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
891 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
892 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
893 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
894 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
895 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
896 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
897 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
898 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
899 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200
900 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400
901 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800
902 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000
903 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
920 uint32_t max_hash_mac_addrs;
965 #define RTE_ETH_XSTATS_NAME_SIZE 64
992 #define ETH_DCB_NUM_TCS 8
993 #define ETH_MAX_VMDQ_POOL 64
1004 }
tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1009 }
tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1027 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1028 #define RTE_ETH_QUEUE_STATE_STARTED 1
1032 struct rte_eth_dev_callback;
1034 TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback);
1037 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1038 if (!rte_eth_dev_is_valid_port(port_id)) { \
1039 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
1044 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1045 if (!rte_eth_dev_is_valid_port(port_id)) { \
1046 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
1056 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1058 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1060 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1062 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1070 typedef int (*eth_dev_configure_t)(
struct rte_eth_dev *dev);
1073 typedef int (*eth_dev_start_t)(
struct rte_eth_dev *dev);
1076 typedef void (*eth_dev_stop_t)(
struct rte_eth_dev *dev);
1079 typedef int (*eth_dev_set_link_up_t)(
struct rte_eth_dev *dev);
1082 typedef int (*eth_dev_set_link_down_t)(
struct rte_eth_dev *dev);
1085 typedef void (*eth_dev_close_t)(
struct rte_eth_dev *dev);
1088 typedef void (*eth_promiscuous_enable_t)(
struct rte_eth_dev *dev);
1091 typedef void (*eth_promiscuous_disable_t)(
struct rte_eth_dev *dev);
1094 typedef void (*eth_allmulticast_enable_t)(
struct rte_eth_dev *dev);
1097 typedef void (*eth_allmulticast_disable_t)(
struct rte_eth_dev *dev);
1100 typedef int (*eth_link_update_t)(
struct rte_eth_dev *dev,
1101 int wait_to_complete);
1104 typedef void (*eth_stats_get_t)(
struct rte_eth_dev *dev,
1108 typedef void (*eth_stats_reset_t)(
struct rte_eth_dev *dev);
1111 typedef int (*eth_xstats_get_t)(
struct rte_eth_dev *dev,
1115 typedef int (*eth_xstats_get_by_id_t)(
struct rte_eth_dev *dev,
1116 const uint64_t *ids,
1121 typedef void (*eth_xstats_reset_t)(
struct rte_eth_dev *dev);
1124 typedef int (*eth_xstats_get_names_t)(
struct rte_eth_dev *dev,
1128 typedef int (*eth_xstats_get_names_by_id_t)(
struct rte_eth_dev *dev,
1133 typedef int (*eth_queue_stats_mapping_set_t)(
struct rte_eth_dev *dev,
1139 typedef void (*eth_dev_infos_get_t)(
struct rte_eth_dev *dev,
1143 typedef const uint32_t *(*eth_dev_supported_ptypes_get_t)(
struct rte_eth_dev *dev);
1146 typedef int (*eth_queue_start_t)(
struct rte_eth_dev *dev,
1150 typedef int (*eth_queue_stop_t)(
struct rte_eth_dev *dev,
1154 typedef int (*eth_rx_queue_setup_t)(
struct rte_eth_dev *dev,
1155 uint16_t rx_queue_id,
1156 uint16_t nb_rx_desc,
1157 unsigned int socket_id,
1162 typedef int (*eth_tx_queue_setup_t)(
struct rte_eth_dev *dev,
1163 uint16_t tx_queue_id,
1164 uint16_t nb_tx_desc,
1165 unsigned int socket_id,
1169 typedef int (*eth_rx_enable_intr_t)(
struct rte_eth_dev *dev,
1170 uint16_t rx_queue_id);
1173 typedef int (*eth_rx_disable_intr_t)(
struct rte_eth_dev *dev,
1174 uint16_t rx_queue_id);
1177 typedef void (*eth_queue_release_t)(
void *queue);
1180 typedef uint32_t (*eth_rx_queue_count_t)(
struct rte_eth_dev *dev,
1181 uint16_t rx_queue_id);
1184 typedef int (*eth_rx_descriptor_done_t)(
void *rxq, uint16_t offset);
1187 typedef int (*eth_rx_descriptor_status_t)(
void *rxq, uint16_t offset);
1190 typedef int (*eth_tx_descriptor_status_t)(
void *txq, uint16_t offset);
1193 typedef int (*eth_fw_version_get_t)(
struct rte_eth_dev *dev,
1194 char *fw_version,
size_t fw_size);
1197 typedef int (*eth_tx_done_cleanup_t)(
void *txq, uint32_t free_cnt);
1200 typedef void (*eth_rxq_info_get_t)(
struct rte_eth_dev *dev,
1203 typedef void (*eth_txq_info_get_t)(
struct rte_eth_dev *dev,
1206 typedef int (*mtu_set_t)(
struct rte_eth_dev *dev, uint16_t mtu);
1209 typedef int (*vlan_filter_set_t)(
struct rte_eth_dev *dev,
1214 typedef int (*vlan_tpid_set_t)(
struct rte_eth_dev *dev,
1218 typedef void (*vlan_offload_set_t)(
struct rte_eth_dev *dev,
int mask);
1221 typedef int (*vlan_pvid_set_t)(
struct rte_eth_dev *dev,
1226 typedef void (*vlan_strip_queue_set_t)(
struct rte_eth_dev *dev,
1227 uint16_t rx_queue_id,
1231 typedef uint16_t (*eth_rx_burst_t)(
void *rxq,
1236 typedef uint16_t (*eth_tx_burst_t)(
void *txq,
1241 typedef uint16_t (*eth_tx_prep_t)(
void *txq,
1246 typedef int (*flow_ctrl_get_t)(
struct rte_eth_dev *dev,
1250 typedef int (*flow_ctrl_set_t)(
struct rte_eth_dev *dev,
1254 typedef int (*priority_flow_ctrl_set_t)(
struct rte_eth_dev *dev,
1258 typedef int (*reta_update_t)(
struct rte_eth_dev *dev,
1260 uint16_t reta_size);
1263 typedef int (*reta_query_t)(
struct rte_eth_dev *dev,
1265 uint16_t reta_size);
1268 typedef int (*rss_hash_update_t)(
struct rte_eth_dev *dev,
1272 typedef int (*rss_hash_conf_get_t)(
struct rte_eth_dev *dev,
1276 typedef int (*eth_dev_led_on_t)(
struct rte_eth_dev *dev);
1279 typedef int (*eth_dev_led_off_t)(
struct rte_eth_dev *dev);
1282 typedef void (*eth_mac_addr_remove_t)(
struct rte_eth_dev *dev, uint32_t index);
1285 typedef int (*eth_mac_addr_add_t)(
struct rte_eth_dev *dev,
1291 typedef void (*eth_mac_addr_set_t)(
struct rte_eth_dev *dev,
1295 typedef int (*eth_uc_hash_table_set_t)(
struct rte_eth_dev *dev,
1300 typedef int (*eth_uc_all_hash_table_set_t)(
struct rte_eth_dev *dev,
1304 typedef int (*eth_set_queue_rate_limit_t)(
struct rte_eth_dev *dev,
1309 typedef int (*eth_mirror_rule_set_t)(
struct rte_eth_dev *dev,
1315 typedef int (*eth_mirror_rule_reset_t)(
struct rte_eth_dev *dev,
1319 typedef int (*eth_udp_tunnel_port_add_t)(
struct rte_eth_dev *dev,
1323 typedef int (*eth_udp_tunnel_port_del_t)(
struct rte_eth_dev *dev,
1327 typedef int (*eth_set_mc_addr_list_t)(
struct rte_eth_dev *dev,
1329 uint32_t nb_mc_addr);
1332 typedef int (*eth_timesync_enable_t)(
struct rte_eth_dev *dev);
1335 typedef int (*eth_timesync_disable_t)(
struct rte_eth_dev *dev);
1338 typedef int (*eth_timesync_read_rx_timestamp_t)(
struct rte_eth_dev *dev,
1339 struct timespec *timestamp,
1343 typedef int (*eth_timesync_read_tx_timestamp_t)(
struct rte_eth_dev *dev,
1344 struct timespec *timestamp);
1347 typedef int (*eth_timesync_adjust_time)(
struct rte_eth_dev *dev, int64_t);
1350 typedef int (*eth_timesync_read_time)(
struct rte_eth_dev *dev,
1351 struct timespec *timestamp);
1354 typedef int (*eth_timesync_write_time)(
struct rte_eth_dev *dev,
1355 const struct timespec *timestamp);
1358 typedef int (*eth_get_reg_t)(
struct rte_eth_dev *dev,
1359 struct rte_dev_reg_info *info);
1362 typedef int (*eth_get_eeprom_length_t)(
struct rte_eth_dev *dev);
1365 typedef int (*eth_get_eeprom_t)(
struct rte_eth_dev *dev,
1366 struct rte_dev_eeprom_info *info);
1369 typedef int (*eth_set_eeprom_t)(
struct rte_eth_dev *dev,
1370 struct rte_dev_eeprom_info *info);
1373 typedef int (*eth_l2_tunnel_eth_type_conf_t)
1377 typedef int (*eth_l2_tunnel_offload_set_t)
1378 (
struct rte_eth_dev *dev,
1384 #ifdef RTE_NIC_BYPASS
1387 RTE_BYPASS_MODE_NONE,
1388 RTE_BYPASS_MODE_NORMAL,
1389 RTE_BYPASS_MODE_BYPASS,
1390 RTE_BYPASS_MODE_ISOLATE,
1391 RTE_BYPASS_MODE_NUM,
1394 #define RTE_BYPASS_MODE_VALID(x) \
1395 ((x) > RTE_BYPASS_MODE_NONE && (x) < RTE_BYPASS_MODE_NUM)
1398 RTE_BYPASS_EVENT_NONE,
1399 RTE_BYPASS_EVENT_START,
1400 RTE_BYPASS_EVENT_OS_ON = RTE_BYPASS_EVENT_START,
1401 RTE_BYPASS_EVENT_POWER_ON,
1402 RTE_BYPASS_EVENT_OS_OFF,
1403 RTE_BYPASS_EVENT_POWER_OFF,
1404 RTE_BYPASS_EVENT_TIMEOUT,
1405 RTE_BYPASS_EVENT_NUM
1408 #define RTE_BYPASS_EVENT_VALID(x) \
1409 ((x) > RTE_BYPASS_EVENT_NONE && (x) < RTE_BYPASS_MODE_NUM)
1413 RTE_BYPASS_TMT_1_5_SEC,
1414 RTE_BYPASS_TMT_2_SEC,
1415 RTE_BYPASS_TMT_3_SEC,
1416 RTE_BYPASS_TMT_4_SEC,
1417 RTE_BYPASS_TMT_8_SEC,
1418 RTE_BYPASS_TMT_16_SEC,
1419 RTE_BYPASS_TMT_32_SEC,
1423 #define RTE_BYPASS_TMT_VALID(x) \
1424 ((x) == RTE_BYPASS_TMT_OFF || \
1425 ((x) > RTE_BYPASS_TMT_OFF && (x) < RTE_BYPASS_TMT_NUM))
1427 typedef void (*bypass_init_t)(
struct rte_eth_dev *dev);
1428 typedef int32_t (*bypass_state_set_t)(
struct rte_eth_dev *dev, uint32_t *new_state);
1429 typedef int32_t (*bypass_state_show_t)(
struct rte_eth_dev *dev, uint32_t *state);
1430 typedef int32_t (*bypass_event_set_t)(
struct rte_eth_dev *dev, uint32_t state, uint32_t event);
1431 typedef int32_t (*bypass_event_show_t)(
struct rte_eth_dev *dev, uint32_t event_shift, uint32_t *event);
1432 typedef int32_t (*bypass_wd_timeout_set_t)(
struct rte_eth_dev *dev, uint32_t timeout);
1433 typedef int32_t (*bypass_wd_timeout_show_t)(
struct rte_eth_dev *dev, uint32_t *wd_timeout);
1434 typedef int32_t (*bypass_ver_show_t)(
struct rte_eth_dev *dev, uint32_t *ver);
1435 typedef int32_t (*bypass_wd_reset_t)(
struct rte_eth_dev *dev);
1438 typedef int (*eth_filter_ctrl_t)(
struct rte_eth_dev *dev,
1444 typedef int (*eth_get_dcb_info)(
struct rte_eth_dev *dev,
1451 struct eth_dev_ops {
1452 eth_dev_configure_t dev_configure;
1453 eth_dev_start_t dev_start;
1454 eth_dev_stop_t dev_stop;
1455 eth_dev_set_link_up_t dev_set_link_up;
1456 eth_dev_set_link_down_t dev_set_link_down;
1457 eth_dev_close_t dev_close;
1458 eth_link_update_t link_update;
1460 eth_promiscuous_enable_t promiscuous_enable;
1461 eth_promiscuous_disable_t promiscuous_disable;
1462 eth_allmulticast_enable_t allmulticast_enable;
1463 eth_allmulticast_disable_t allmulticast_disable;
1464 eth_mac_addr_remove_t mac_addr_remove;
1465 eth_mac_addr_add_t mac_addr_add;
1466 eth_mac_addr_set_t mac_addr_set;
1467 eth_set_mc_addr_list_t set_mc_addr_list;
1470 eth_stats_get_t stats_get;
1471 eth_stats_reset_t stats_reset;
1472 eth_xstats_get_t xstats_get;
1473 eth_xstats_reset_t xstats_reset;
1474 eth_xstats_get_names_t xstats_get_names;
1476 eth_queue_stats_mapping_set_t queue_stats_mapping_set;
1479 eth_dev_infos_get_t dev_infos_get;
1480 eth_rxq_info_get_t rxq_info_get;
1481 eth_txq_info_get_t txq_info_get;
1482 eth_fw_version_get_t fw_version_get;
1483 eth_dev_supported_ptypes_get_t dev_supported_ptypes_get;
1486 vlan_filter_set_t vlan_filter_set;
1487 vlan_tpid_set_t vlan_tpid_set;
1488 vlan_strip_queue_set_t vlan_strip_queue_set;
1489 vlan_offload_set_t vlan_offload_set;
1490 vlan_pvid_set_t vlan_pvid_set;
1492 eth_queue_start_t rx_queue_start;
1493 eth_queue_stop_t rx_queue_stop;
1494 eth_queue_start_t tx_queue_start;
1495 eth_queue_stop_t tx_queue_stop;
1496 eth_rx_queue_setup_t rx_queue_setup;
1497 eth_queue_release_t rx_queue_release;
1498 eth_rx_queue_count_t rx_queue_count;
1500 eth_rx_descriptor_done_t rx_descriptor_done;
1501 eth_rx_descriptor_status_t rx_descriptor_status;
1503 eth_tx_descriptor_status_t tx_descriptor_status;
1505 eth_rx_enable_intr_t rx_queue_intr_enable;
1506 eth_rx_disable_intr_t rx_queue_intr_disable;
1507 eth_tx_queue_setup_t tx_queue_setup;
1508 eth_queue_release_t tx_queue_release;
1509 eth_tx_done_cleanup_t tx_done_cleanup;
1511 eth_dev_led_on_t dev_led_on;
1512 eth_dev_led_off_t dev_led_off;
1514 flow_ctrl_get_t flow_ctrl_get;
1515 flow_ctrl_set_t flow_ctrl_set;
1516 priority_flow_ctrl_set_t priority_flow_ctrl_set;
1518 eth_uc_hash_table_set_t uc_hash_table_set;
1519 eth_uc_all_hash_table_set_t uc_all_hash_table_set;
1521 eth_mirror_rule_set_t mirror_rule_set;
1522 eth_mirror_rule_reset_t mirror_rule_reset;
1524 eth_udp_tunnel_port_add_t udp_tunnel_port_add;
1525 eth_udp_tunnel_port_del_t udp_tunnel_port_del;
1526 eth_l2_tunnel_eth_type_conf_t l2_tunnel_eth_type_conf;
1528 eth_l2_tunnel_offload_set_t l2_tunnel_offload_set;
1531 eth_set_queue_rate_limit_t set_queue_rate_limit;
1533 rss_hash_update_t rss_hash_update;
1534 rss_hash_conf_get_t rss_hash_conf_get;
1535 reta_update_t reta_update;
1536 reta_query_t reta_query;
1538 eth_get_reg_t get_reg;
1539 eth_get_eeprom_length_t get_eeprom_length;
1540 eth_get_eeprom_t get_eeprom;
1541 eth_set_eeprom_t set_eeprom;
1544 #ifdef RTE_NIC_BYPASS
1545 bypass_init_t bypass_init;
1546 bypass_state_set_t bypass_state_set;
1547 bypass_state_show_t bypass_state_show;
1548 bypass_event_set_t bypass_event_set;
1549 bypass_event_show_t bypass_event_show;
1550 bypass_wd_timeout_set_t bypass_wd_timeout_set;
1551 bypass_wd_timeout_show_t bypass_wd_timeout_show;
1552 bypass_ver_show_t bypass_ver_show;
1553 bypass_wd_reset_t bypass_wd_reset;
1556 eth_filter_ctrl_t filter_ctrl;
1558 eth_get_dcb_info get_dcb_info;
1560 eth_timesync_enable_t timesync_enable;
1562 eth_timesync_disable_t timesync_disable;
1564 eth_timesync_read_rx_timestamp_t timesync_read_rx_timestamp;
1566 eth_timesync_read_tx_timestamp_t timesync_read_tx_timestamp;
1568 eth_timesync_adjust_time timesync_adjust_time;
1569 eth_timesync_read_time timesync_read_time;
1570 eth_timesync_write_time timesync_write_time;
1572 eth_xstats_get_by_id_t xstats_get_by_id;
1574 eth_xstats_get_names_by_id_t xstats_get_names_by_id;
1601 struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1625 struct rte_mbuf *pkts[], uint16_t nb_pkts,
void *user_param);
1632 struct rte_eth_rxtx_callback {
1633 struct rte_eth_rxtx_callback *next;
1645 RTE_ETH_DEV_UNUSED = 0,
1646 RTE_ETH_DEV_ATTACHED,
1659 struct rte_eth_dev {
1660 eth_rx_burst_t rx_pkt_burst;
1661 eth_tx_burst_t tx_pkt_burst;
1662 eth_tx_prep_t tx_pkt_prepare;
1663 struct rte_eth_dev_data *data;
1664 const struct eth_dev_ops *dev_ops;
1666 struct rte_intr_handle *intr_handle;
1668 struct rte_eth_dev_cb_list link_intr_cbs;
1673 struct rte_eth_rxtx_callback *post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
1678 struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
1682 struct rte_eth_dev_sriov {
1684 uint8_t nb_q_per_pool;
1685 uint16_t def_vmdq_idx;
1686 uint16_t def_pool_q_idx;
1688 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1690 #define RTE_ETH_NAME_MAX_LEN (32)
1699 struct rte_eth_dev_data {
1700 char name[RTE_ETH_NAME_MAX_LEN];
1704 uint16_t nb_rx_queues;
1705 uint16_t nb_tx_queues;
1707 struct rte_eth_dev_sriov sriov;
1717 uint32_t min_rx_buf_size;
1720 uint64_t rx_mbuf_alloc_failed;
1728 uint8_t promiscuous : 1,
1733 uint8_t rx_queue_state[RTE_MAX_QUEUES_PER_PORT];
1735 uint8_t tx_queue_state[RTE_MAX_QUEUES_PER_PORT];
1738 enum rte_kernel_driver kdrv;
1740 const char *drv_name;
1744 #define RTE_ETH_DEV_DETACHABLE 0x0001
1746 #define RTE_ETH_DEV_INTR_LSC 0x0002
1748 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1750 #define RTE_ETH_DEV_INTR_RMV 0x0008
1757 extern struct rte_eth_dev rte_eth_devices[];
1772 #define RTE_ETH_FOREACH_DEV(p) \
1773 for (p = rte_eth_find_next(0); \
1774 (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
1775 p = rte_eth_find_next(p + 1))
1802 struct rte_eth_dev *rte_eth_dev_allocated(
const char *name);
1814 struct rte_eth_dev *rte_eth_dev_allocate(
const char *name);
1827 struct rte_eth_dev *rte_eth_dev_attach_secondary(
const char *name);
1838 int rte_eth_dev_release_port(
struct rte_eth_dev *eth_dev);
1911 uint16_t nb_tx_queue,
const struct rte_eth_conf *eth_conf);
1924 void _rte_eth_dev_reset(
struct rte_eth_dev *dev);
1966 uint16_t nb_rx_desc,
unsigned int socket_id,
2014 uint16_t nb_tx_desc,
unsigned int socket_id,
2379 uint64_t *values,
unsigned int n);
2427 uint16_t tx_queue_id, uint8_t stat_idx);
2447 uint16_t rx_queue_id,
2491 char *fw_version,
size_t fw_size);
2532 uint32_t *ptypes,
int num);
2759 static inline uint16_t
2761 struct rte_mbuf **rx_pkts,
const uint16_t nb_pkts)
2763 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2765 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2766 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
2767 RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
2769 if (queue_id >= dev->data->nb_rx_queues) {
2770 RTE_PMD_DEBUG_TRACE(
"Invalid RX queue_id=%d\n", queue_id);
2774 int16_t nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2777 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
2778 struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
2782 nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
2783 nb_pkts, cb->param);
2785 }
while (cb != NULL);
2807 struct rte_eth_dev *dev;
2809 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2810 dev = &rte_eth_devices[port_id];
2811 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
2812 if (queue_id >= dev->data->nb_rx_queues)
2815 return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2836 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2837 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2838 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2839 return (*dev->dev_ops->rx_descriptor_done)( \
2840 dev->data->rx_queues[queue_id], offset);
2843 #define RTE_ETH_RX_DESC_AVAIL 0
2844 #define RTE_ETH_RX_DESC_DONE 1
2845 #define RTE_ETH_RX_DESC_UNAVAIL 2
2884 struct rte_eth_dev *dev;
2887 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2888 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2890 dev = &rte_eth_devices[port_id];
2891 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2892 if (queue_id >= dev->data->nb_rx_queues)
2895 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
2896 rxq = dev->data->rx_queues[queue_id];
2898 return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
2901 #define RTE_ETH_TX_DESC_FULL 0
2902 #define RTE_ETH_TX_DESC_DONE 1
2903 #define RTE_ETH_TX_DESC_UNAVAIL 2
2938 static inline int rte_eth_tx_descriptor_status(uint8_t port_id,
2939 uint16_t queue_id, uint16_t offset)
2941 struct rte_eth_dev *dev;
2944 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2945 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2947 dev = &rte_eth_devices[port_id];
2948 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2949 if (queue_id >= dev->data->nb_tx_queues)
2952 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
2953 txq = dev->data->tx_queues[queue_id];
2955 return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
3017 static inline uint16_t
3019 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3021 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3023 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3024 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
3025 RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
3027 if (queue_id >= dev->data->nb_tx_queues) {
3028 RTE_PMD_DEBUG_TRACE(
"Invalid TX queue_id=%d\n", queue_id);
3033 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
3034 struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3038 nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
3041 }
while (cb != NULL);
3045 return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
3104 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
3106 static inline uint16_t
3108 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3110 struct rte_eth_dev *dev;
3112 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3114 RTE_PMD_DEBUG_TRACE(
"Invalid TX port_id=%d\n", port_id);
3120 dev = &rte_eth_devices[port_id];
3122 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3123 if (queue_id >= dev->data->nb_tx_queues) {
3124 RTE_PMD_DEBUG_TRACE(
"Invalid TX queue_id=%d\n", queue_id);
3130 if (!dev->tx_pkt_prepare)
3133 return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
3148 static inline uint16_t
3157 typedef void (*buffer_tx_error_fn)(
struct rte_mbuf **unsent, uint16_t count,
3165 buffer_tx_error_fn error_callback;
3166 void *error_userdata;
3179 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
3180 (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3217 static inline uint16_t
3222 uint16_t to_send = buffer->
length;
3233 buffer->error_callback(&buffer->
pkts[sent], to_send - sent,
3234 buffer->error_userdata);
3269 static inline uint16_t __attribute__((always_inline))
3273 buffer->pkts[buffer->length++] = tx_pkt;
3274 if (buffer->length < buffer->size)
3306 buffer_tx_error_fn callback,
void *
userdata);
3473 void _rte_eth_dev_callback_process(
struct rte_eth_dev *dev,
3560 int epfd,
int op,
void *data);
3709 uint16_t reta_size);
3728 uint16_t reta_size);
4235 struct rte_eth_rxtx_callback *user_cb);
4268 struct rte_eth_rxtx_callback *user_cb);
4392 uint32_t nb_mc_addr);
4438 struct timespec *timestamp, uint32_t flags);
4455 struct timespec *timestamp);
4531 uint16_t queue_id,
size_t size,
4532 unsigned align,
int socket_id);