149 #define RTE_ETHDEV_HAS_LRO_SUPPORT 152 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 153 #define RTE_ETHDEV_DEBUG_RX 154 #define RTE_ETHDEV_DEBUG_TX 157 #include <rte_compat.h> 164 #include <rte_config.h> 169 #include "rte_dev_info.h" 171 extern int rte_eth_dev_logtype;
173 #define RTE_ETHDEV_LOG(level, ...) \ 174 rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__) 239 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \ 240 for (rte_eth_iterator_init(iter, devargs), \ 241 id = rte_eth_iterator_next(iter); \ 242 id != RTE_MAX_ETHPORTS; \ 243 id = rte_eth_iterator_next(iter)) 282 #define ETH_LINK_SPEED_AUTONEG (0 << 0) 283 #define ETH_LINK_SPEED_FIXED (1 << 0) 284 #define ETH_LINK_SPEED_10M_HD (1 << 1) 285 #define ETH_LINK_SPEED_10M (1 << 2) 286 #define ETH_LINK_SPEED_100M_HD (1 << 3) 287 #define ETH_LINK_SPEED_100M (1 << 4) 288 #define ETH_LINK_SPEED_1G (1 << 5) 289 #define ETH_LINK_SPEED_2_5G (1 << 6) 290 #define ETH_LINK_SPEED_5G (1 << 7) 291 #define ETH_LINK_SPEED_10G (1 << 8) 292 #define ETH_LINK_SPEED_20G (1 << 9) 293 #define ETH_LINK_SPEED_25G (1 << 10) 294 #define ETH_LINK_SPEED_40G (1 << 11) 295 #define ETH_LINK_SPEED_50G (1 << 12) 296 #define ETH_LINK_SPEED_56G (1 << 13) 297 #define ETH_LINK_SPEED_100G (1 << 14) 298 #define ETH_LINK_SPEED_200G (1 << 15) 303 #define ETH_SPEED_NUM_NONE 0 304 #define ETH_SPEED_NUM_10M 10 305 #define ETH_SPEED_NUM_100M 100 306 #define ETH_SPEED_NUM_1G 1000 307 #define ETH_SPEED_NUM_2_5G 2500 308 #define ETH_SPEED_NUM_5G 5000 309 #define ETH_SPEED_NUM_10G 10000 310 #define ETH_SPEED_NUM_20G 20000 311 #define ETH_SPEED_NUM_25G 25000 312 #define ETH_SPEED_NUM_40G 40000 313 #define ETH_SPEED_NUM_50G 50000 314 #define ETH_SPEED_NUM_56G 56000 315 #define ETH_SPEED_NUM_100G 100000 316 #define ETH_SPEED_NUM_200G 200000 317 #define ETH_SPEED_NUM_UNKNOWN UINT32_MAX 331 #define ETH_LINK_HALF_DUPLEX 0 332 #define ETH_LINK_FULL_DUPLEX 1 333 #define ETH_LINK_DOWN 0 334 #define ETH_LINK_UP 1 335 #define ETH_LINK_FIXED 0 336 #define ETH_LINK_AUTONEG 1 337 #define RTE_ETH_LINK_MAX_STR_LEN 40 343 struct rte_eth_thresh { 352 #define ETH_MQ_RX_RSS_FLAG 0x1 353 #define ETH_MQ_RX_DCB_FLAG 0x2 354 #define ETH_MQ_RX_VMDQ_FLAG 0x4 385 #define ETH_RSS ETH_MQ_RX_RSS 386 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB 387 #define ETH_DCB_RX ETH_MQ_RX_DCB 403 #define ETH_DCB_NONE ETH_MQ_TX_NONE 404 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB 405 #define ETH_DCB_TX ETH_MQ_TX_DCB 424 uint64_t reserved_64s[2];
425 void *reserved_ptrs[2];
433 ETH_VLAN_TYPE_UNKNOWN = 0,
478 #define RTE_ETH_FLOW_UNKNOWN 0 479 #define RTE_ETH_FLOW_RAW 1 480 #define RTE_ETH_FLOW_IPV4 2 481 #define RTE_ETH_FLOW_FRAG_IPV4 3 482 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4 483 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5 484 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6 485 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7 486 #define RTE_ETH_FLOW_IPV6 8 487 #define RTE_ETH_FLOW_FRAG_IPV6 9 488 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10 489 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11 490 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12 491 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13 492 #define RTE_ETH_FLOW_L2_PAYLOAD 14 493 #define RTE_ETH_FLOW_IPV6_EX 15 494 #define RTE_ETH_FLOW_IPV6_TCP_EX 16 495 #define RTE_ETH_FLOW_IPV6_UDP_EX 17 496 #define RTE_ETH_FLOW_PORT 18 498 #define RTE_ETH_FLOW_VXLAN 19 499 #define RTE_ETH_FLOW_GENEVE 20 500 #define RTE_ETH_FLOW_NVGRE 21 501 #define RTE_ETH_FLOW_VXLAN_GPE 22 502 #define RTE_ETH_FLOW_GTPU 23 503 #define RTE_ETH_FLOW_MAX 24 509 #define ETH_RSS_IPV4 (1ULL << 2) 510 #define ETH_RSS_FRAG_IPV4 (1ULL << 3) 511 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << 4) 512 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << 5) 513 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << 6) 514 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << 7) 515 #define ETH_RSS_IPV6 (1ULL << 8) 516 #define ETH_RSS_FRAG_IPV6 (1ULL << 9) 517 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << 10) 518 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << 11) 519 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << 12) 520 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << 13) 521 #define ETH_RSS_L2_PAYLOAD (1ULL << 14) 522 #define ETH_RSS_IPV6_EX (1ULL << 15) 523 #define ETH_RSS_IPV6_TCP_EX (1ULL << 16) 524 #define ETH_RSS_IPV6_UDP_EX (1ULL << 17) 525 #define ETH_RSS_PORT (1ULL << 18) 526 #define ETH_RSS_VXLAN (1ULL << 19) 527 #define ETH_RSS_GENEVE (1ULL << 20) 528 #define ETH_RSS_NVGRE (1ULL << 21) 529 #define ETH_RSS_GTPU (1ULL << 23) 530 #define ETH_RSS_ETH (1ULL << 24) 531 #define ETH_RSS_S_VLAN (1ULL << 25) 532 #define ETH_RSS_C_VLAN (1ULL << 26) 533 #define ETH_RSS_ESP (1ULL << 27) 534 #define ETH_RSS_AH (1ULL << 28) 535 #define ETH_RSS_L2TPV3 (1ULL << 29) 536 #define ETH_RSS_PFCP (1ULL << 30) 537 #define ETH_RSS_PPPOE (1ULL << 31) 538 #define ETH_RSS_ECPRI (1ULL << 32) 539 #define ETH_RSS_MPLS (1ULL << 33) 550 #define ETH_RSS_L3_SRC_ONLY (1ULL << 63) 551 #define ETH_RSS_L3_DST_ONLY (1ULL << 62) 552 #define ETH_RSS_L4_SRC_ONLY (1ULL << 61) 553 #define ETH_RSS_L4_DST_ONLY (1ULL << 60) 554 #define ETH_RSS_L2_SRC_ONLY (1ULL << 59) 555 #define ETH_RSS_L2_DST_ONLY (1ULL << 58) 563 #define RTE_ETH_RSS_L3_PRE32 (1ULL << 57) 564 #define RTE_ETH_RSS_L3_PRE40 (1ULL << 56) 565 #define RTE_ETH_RSS_L3_PRE48 (1ULL << 55) 566 #define RTE_ETH_RSS_L3_PRE56 (1ULL << 54) 567 #define RTE_ETH_RSS_L3_PRE64 (1ULL << 53) 568 #define RTE_ETH_RSS_L3_PRE96 (1ULL << 52) 583 #define ETH_RSS_LEVEL_PMD_DEFAULT (0ULL << 50) 589 #define ETH_RSS_LEVEL_OUTERMOST (1ULL << 50) 595 #define ETH_RSS_LEVEL_INNERMOST (2ULL << 50) 596 #define ETH_RSS_LEVEL_MASK (3ULL << 50) 598 #define ETH_RSS_LEVEL(rss_hf) ((rss_hf & ETH_RSS_LEVEL_MASK) >> 50) 610 static inline uint64_t
613 if ((rss_hf & ETH_RSS_L3_SRC_ONLY) && (rss_hf & ETH_RSS_L3_DST_ONLY))
614 rss_hf &= ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
616 if ((rss_hf & ETH_RSS_L4_SRC_ONLY) && (rss_hf & ETH_RSS_L4_DST_ONLY))
617 rss_hf &= ~(ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
622 #define ETH_RSS_IPV6_PRE32 ( \ 624 RTE_ETH_RSS_L3_PRE32) 626 #define ETH_RSS_IPV6_PRE40 ( \ 628 RTE_ETH_RSS_L3_PRE40) 630 #define ETH_RSS_IPV6_PRE48 ( \ 632 RTE_ETH_RSS_L3_PRE48) 634 #define ETH_RSS_IPV6_PRE56 ( \ 636 RTE_ETH_RSS_L3_PRE56) 638 #define ETH_RSS_IPV6_PRE64 ( \ 640 RTE_ETH_RSS_L3_PRE64) 642 #define ETH_RSS_IPV6_PRE96 ( \ 644 RTE_ETH_RSS_L3_PRE96) 646 #define ETH_RSS_IPV6_PRE32_UDP ( \ 647 ETH_RSS_NONFRAG_IPV6_UDP | \ 648 RTE_ETH_RSS_L3_PRE32) 650 #define ETH_RSS_IPV6_PRE40_UDP ( \ 651 ETH_RSS_NONFRAG_IPV6_UDP | \ 652 RTE_ETH_RSS_L3_PRE40) 654 #define ETH_RSS_IPV6_PRE48_UDP ( \ 655 ETH_RSS_NONFRAG_IPV6_UDP | \ 656 RTE_ETH_RSS_L3_PRE48) 658 #define ETH_RSS_IPV6_PRE56_UDP ( \ 659 ETH_RSS_NONFRAG_IPV6_UDP | \ 660 RTE_ETH_RSS_L3_PRE56) 662 #define ETH_RSS_IPV6_PRE64_UDP ( \ 663 ETH_RSS_NONFRAG_IPV6_UDP | \ 664 RTE_ETH_RSS_L3_PRE64) 666 #define ETH_RSS_IPV6_PRE96_UDP ( \ 667 ETH_RSS_NONFRAG_IPV6_UDP | \ 668 RTE_ETH_RSS_L3_PRE96) 670 #define ETH_RSS_IPV6_PRE32_TCP ( \ 671 ETH_RSS_NONFRAG_IPV6_TCP | \ 672 RTE_ETH_RSS_L3_PRE32) 674 #define ETH_RSS_IPV6_PRE40_TCP ( \ 675 ETH_RSS_NONFRAG_IPV6_TCP | \ 676 RTE_ETH_RSS_L3_PRE40) 678 #define ETH_RSS_IPV6_PRE48_TCP ( \ 679 ETH_RSS_NONFRAG_IPV6_TCP | \ 680 RTE_ETH_RSS_L3_PRE48) 682 #define ETH_RSS_IPV6_PRE56_TCP ( \ 683 ETH_RSS_NONFRAG_IPV6_TCP | \ 684 RTE_ETH_RSS_L3_PRE56) 686 #define ETH_RSS_IPV6_PRE64_TCP ( \ 687 ETH_RSS_NONFRAG_IPV6_TCP | \ 688 RTE_ETH_RSS_L3_PRE64) 690 #define ETH_RSS_IPV6_PRE96_TCP ( \ 691 ETH_RSS_NONFRAG_IPV6_TCP | \ 692 RTE_ETH_RSS_L3_PRE96) 694 #define ETH_RSS_IPV6_PRE32_SCTP ( \ 695 ETH_RSS_NONFRAG_IPV6_SCTP | \ 696 RTE_ETH_RSS_L3_PRE32) 698 #define ETH_RSS_IPV6_PRE40_SCTP ( \ 699 ETH_RSS_NONFRAG_IPV6_SCTP | \ 700 RTE_ETH_RSS_L3_PRE40) 702 #define ETH_RSS_IPV6_PRE48_SCTP ( \ 703 ETH_RSS_NONFRAG_IPV6_SCTP | \ 704 RTE_ETH_RSS_L3_PRE48) 706 #define ETH_RSS_IPV6_PRE56_SCTP ( \ 707 ETH_RSS_NONFRAG_IPV6_SCTP | \ 708 RTE_ETH_RSS_L3_PRE56) 710 #define ETH_RSS_IPV6_PRE64_SCTP ( \ 711 ETH_RSS_NONFRAG_IPV6_SCTP | \ 712 RTE_ETH_RSS_L3_PRE64) 714 #define ETH_RSS_IPV6_PRE96_SCTP ( \ 715 ETH_RSS_NONFRAG_IPV6_SCTP | \ 716 RTE_ETH_RSS_L3_PRE96) 718 #define ETH_RSS_IP ( \ 720 ETH_RSS_FRAG_IPV4 | \ 721 ETH_RSS_NONFRAG_IPV4_OTHER | \ 723 ETH_RSS_FRAG_IPV6 | \ 724 ETH_RSS_NONFRAG_IPV6_OTHER | \ 727 #define ETH_RSS_UDP ( \ 728 ETH_RSS_NONFRAG_IPV4_UDP | \ 729 ETH_RSS_NONFRAG_IPV6_UDP | \ 732 #define ETH_RSS_TCP ( \ 733 ETH_RSS_NONFRAG_IPV4_TCP | \ 734 ETH_RSS_NONFRAG_IPV6_TCP | \ 737 #define ETH_RSS_SCTP ( \ 738 ETH_RSS_NONFRAG_IPV4_SCTP | \ 739 ETH_RSS_NONFRAG_IPV6_SCTP) 741 #define ETH_RSS_TUNNEL ( \ 746 #define ETH_RSS_VLAN ( \ 751 #define ETH_RSS_PROTO_MASK ( \ 753 ETH_RSS_FRAG_IPV4 | \ 754 ETH_RSS_NONFRAG_IPV4_TCP | \ 755 ETH_RSS_NONFRAG_IPV4_UDP | \ 756 ETH_RSS_NONFRAG_IPV4_SCTP | \ 757 ETH_RSS_NONFRAG_IPV4_OTHER | \ 759 ETH_RSS_FRAG_IPV6 | \ 760 ETH_RSS_NONFRAG_IPV6_TCP | \ 761 ETH_RSS_NONFRAG_IPV6_UDP | \ 762 ETH_RSS_NONFRAG_IPV6_SCTP | \ 763 ETH_RSS_NONFRAG_IPV6_OTHER | \ 764 ETH_RSS_L2_PAYLOAD | \ 766 ETH_RSS_IPV6_TCP_EX | \ 767 ETH_RSS_IPV6_UDP_EX | \ 779 #define ETH_RSS_RETA_SIZE_64 64 780 #define ETH_RSS_RETA_SIZE_128 128 781 #define ETH_RSS_RETA_SIZE_256 256 782 #define ETH_RSS_RETA_SIZE_512 512 783 #define RTE_RETA_GROUP_SIZE 64 786 #define ETH_VMDQ_MAX_VLAN_FILTERS 64 787 #define ETH_DCB_NUM_USER_PRIORITIES 8 788 #define ETH_VMDQ_DCB_NUM_QUEUES 128 789 #define ETH_DCB_NUM_QUEUES 128 792 #define ETH_DCB_PG_SUPPORT 0x00000001 793 #define ETH_DCB_PFC_SUPPORT 0x00000002 796 #define ETH_VLAN_STRIP_OFFLOAD 0x0001 797 #define ETH_VLAN_FILTER_OFFLOAD 0x0002 798 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004 799 #define ETH_QINQ_STRIP_OFFLOAD 0x0008 802 #define ETH_VLAN_STRIP_MASK 0x0001 803 #define ETH_VLAN_FILTER_MASK 0x0002 804 #define ETH_VLAN_EXTEND_MASK 0x0004 805 #define ETH_QINQ_STRIP_MASK 0x0008 806 #define ETH_VLAN_ID_MAX 0x0FFF 809 #define ETH_NUM_RECEIVE_MAC_ADDR 128 812 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128 815 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001 816 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002 817 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004 818 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008 819 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010 822 #define ETH_MIRROR_MAX_VLANS 64 824 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01 825 #define ETH_MIRROR_UPLINK_PORT 0x02 826 #define ETH_MIRROR_DOWNLINK_PORT 0x04 827 #define ETH_MIRROR_VLAN 0x08 828 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10 833 struct rte_eth_vlan_mirror { 859 uint16_t reta[RTE_RETA_GROUP_SIZE];
884 struct rte_eth_dcb_rx_conf {
890 struct rte_eth_vmdq_dcb_tx_conf {
896 struct rte_eth_dcb_tx_conf {
902 struct rte_eth_vmdq_tx_conf {
975 uint8_t hw_vlan_reject_tagged : 1,
977 hw_vlan_reject_untagged : 1,
979 hw_vlan_insert_pvid : 1;
982 uint64_t reserved_64s[2];
983 void *reserved_ptrs[2];
1065 uint64_t reserved_64s[2];
1066 void *reserved_ptrs[2];
1086 uint64_t reserved_64s[2];
1087 void *reserved_ptrs[2];
1106 #define RTE_ETH_MAX_HAIRPIN_PEERS 32 1227 RTE_TUNNEL_TYPE_NONE = 0,
1228 RTE_TUNNEL_TYPE_VXLAN,
1229 RTE_TUNNEL_TYPE_GENEVE,
1230 RTE_TUNNEL_TYPE_TEREDO,
1231 RTE_TUNNEL_TYPE_NVGRE,
1232 RTE_TUNNEL_TYPE_IP_IN_GRE,
1233 RTE_L2_TUNNEL_TYPE_E_TAG,
1234 RTE_TUNNEL_TYPE_VXLAN_GPE,
1235 RTE_TUNNEL_TYPE_ECPRI,
1236 RTE_TUNNEL_TYPE_MAX,
1328 struct rte_eth_dcb_rx_conf dcb_rx_conf;
1334 struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1336 struct rte_eth_dcb_tx_conf dcb_tx_conf;
1338 struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1351 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001 1352 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002 1353 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004 1354 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008 1355 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010 1356 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020 1357 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040 1358 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080 1359 #define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100 1360 #define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200 1361 #define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400 1362 #define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800 1363 #define DEV_RX_OFFLOAD_SCATTER 0x00002000 1369 #define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000 1370 #define DEV_RX_OFFLOAD_SECURITY 0x00008000 1371 #define DEV_RX_OFFLOAD_KEEP_CRC 0x00010000 1372 #define DEV_RX_OFFLOAD_SCTP_CKSUM 0x00020000 1373 #define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM 0x00040000 1374 #define DEV_RX_OFFLOAD_RSS_HASH 0x00080000 1375 #define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT 0x00100000 1377 #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \ 1378 DEV_RX_OFFLOAD_UDP_CKSUM | \ 1379 DEV_RX_OFFLOAD_TCP_CKSUM) 1380 #define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \ 1381 DEV_RX_OFFLOAD_VLAN_FILTER | \ 1382 DEV_RX_OFFLOAD_VLAN_EXTEND | \ 1383 DEV_RX_OFFLOAD_QINQ_STRIP) 1393 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001 1394 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002 1395 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004 1396 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008 1397 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010 1398 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020 1399 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040 1400 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080 1401 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100 1402 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200 1403 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400 1404 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800 1405 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000 1406 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000 1407 #define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000 1411 #define DEV_TX_OFFLOAD_MULTI_SEGS 0x00008000 1413 #define DEV_TX_OFFLOAD_MBUF_FAST_FREE 0x00010000 1418 #define DEV_TX_OFFLOAD_SECURITY 0x00020000 1424 #define DEV_TX_OFFLOAD_UDP_TNL_TSO 0x00040000 1430 #define DEV_TX_OFFLOAD_IP_TNL_TSO 0x00080000 1432 #define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM 0x00100000 1438 #define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000 1448 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001 1450 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002 1458 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512 1459 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512 1460 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1 1461 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1 1478 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX) 1545 uint32_t max_hash_mac_addrs;
1586 uint64_t reserved_64s[2];
1587 void *reserved_ptrs[2];
1593 #define RTE_ETH_QUEUE_STATE_STOPPED 0 1594 #define RTE_ETH_QUEUE_STATE_STARTED 1 1595 #define RTE_ETH_QUEUE_STATE_HAIRPIN 2 1627 #define RTE_ETH_BURST_FLAG_PER_QUEUE (1ULL << 0) 1636 #define RTE_ETH_BURST_MODE_INFO_SIZE 1024 1637 char info[RTE_ETH_BURST_MODE_INFO_SIZE]; 1641 #define RTE_ETH_XSTATS_NAME_SIZE 64 1675 #define ETH_DCB_NUM_TCS 8 1676 #define ETH_MAX_VMDQ_POOL 64 1687 } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1692 } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1702 uint8_t tc_bws[ETH_DCB_NUM_TCS];
1719 #define RTE_ETH_FEC_MODE_TO_CAPA(x) (1U << (x)) 1722 #define RTE_ETH_FEC_MODE_CAPA_MASK(x) (1U << (RTE_ETH_FEC_ ## x)) 1725 struct rte_eth_fec_capa {
1730 #define RTE_ETH_ALL RTE_MAX_ETHPORTS 1733 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \ 1734 if (!rte_eth_dev_is_valid_port(port_id)) { \ 1735 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \ 1740 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \ 1741 if (!rte_eth_dev_is_valid_port(port_id)) { \ 1742 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \ 1752 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001 1754 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002 1756 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004 1758 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008 1783 struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1807 struct rte_mbuf *pkts[], uint16_t nb_pkts,
void *user_param);
1821 struct rte_eth_dev_sriov {
1823 uint8_t nb_q_per_pool;
1824 uint16_t def_vmdq_idx;
1825 uint16_t def_pool_q_idx;
1827 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov) 1829 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN 1831 #define RTE_ETH_DEV_NO_OWNER 0 1833 #define RTE_ETH_MAX_OWNER_NAME_LEN 64 1835 struct rte_eth_dev_owner {
1837 char name[RTE_ETH_MAX_OWNER_NAME_LEN];
1841 #define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE 0x0001 1843 #define RTE_ETH_DEV_INTR_LSC 0x0002 1845 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004 1847 #define RTE_ETH_DEV_INTR_RMV 0x0008 1849 #define RTE_ETH_DEV_REPRESENTOR 0x0010 1851 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR 0x0020 1856 #define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS 0x0040 1870 const uint64_t owner_id);
1875 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \ 1876 for (p = rte_eth_find_next_owned_by(0, o); \ 1877 (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \ 1878 p = rte_eth_find_next_owned_by(p + 1, o)) 1893 #define RTE_ETH_FOREACH_DEV(p) \ 1894 RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER) 1923 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \ 1924 for (port_id = rte_eth_find_next_of(0, parent); \ 1925 port_id < RTE_MAX_ETHPORTS; \ 1926 port_id = rte_eth_find_next_of(port_id + 1, parent)) 1956 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \ 1957 for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \ 1958 port_id < RTE_MAX_ETHPORTS; \ 1959 port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id)) 1992 const struct rte_eth_dev_owner *owner);
2009 const uint64_t owner_id);
2040 struct rte_eth_dev_owner *owner);
2137 uint16_t nb_tx_queue,
const struct rte_eth_conf *eth_conf);
2217 uint16_t nb_rx_desc,
unsigned int socket_id,
2250 (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2302 uint16_t nb_tx_desc,
unsigned int socket_id,
2332 (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2363 size_t len, uint32_t direction);
2920 uint64_t *values,
unsigned int size);
2977 uint16_t tx_queue_id, uint8_t stat_idx);
2998 uint16_t rx_queue_id,
3082 char *fw_version,
size_t fw_size);
3124 uint32_t *ptypes,
int num);
3160 uint32_t *set_ptypes,
unsigned int num);
3307 typedef void (*buffer_tx_error_fn)(
struct rte_mbuf **unsent, uint16_t count,
3315 buffer_tx_error_fn error_callback;
3316 void *error_userdata;
3329 #define RTE_ETH_TX_BUFFER_SIZE(sz) \ 3330 (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *)) 3371 buffer_tx_error_fn callback,
void *userdata);
3647 int epfd,
int op,
void *data);
3730 struct rte_eth_fec_capa *speed_fec_capa,
3907 uint16_t reta_size);
3929 uint16_t reta_size);
4140 struct rte_eth_rxtx_callback;
4167 const struct rte_eth_rxtx_callback *
4197 const struct rte_eth_rxtx_callback *
4226 const struct rte_eth_rxtx_callback *
4264 const struct rte_eth_rxtx_callback *user_cb);
4300 const struct rte_eth_rxtx_callback *user_cb);
4412 struct rte_power_monitor_cond *pmc);
4529 struct rte_dev_eeprom_info *info);
4552 uint32_t nb_mc_addr);
4601 struct timespec *timestamp, uint32_t flags);
4619 struct timespec *timestamp);
4779 uint16_t *nb_rx_desc,
4780 uint16_t *nb_tx_desc);
4847 char name[RTE_DEV_NAME_MAX_LEN];
4980 static inline uint16_t
4982 struct rte_mbuf **rx_pkts,
const uint16_t nb_pkts)
4984 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4987 #ifdef RTE_ETHDEV_DEBUG_RX 4988 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4989 RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
4991 if (queue_id >= dev->data->nb_rx_queues) {
4992 RTE_ETHDEV_LOG(ERR,
"Invalid RX queue_id=%u\n", queue_id);
4996 nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
4999 #ifdef RTE_ETHDEV_RXTX_CALLBACKS 5000 struct rte_eth_rxtx_callback *cb;
5008 cb = __atomic_load_n(&dev->post_rx_burst_cbs[queue_id],
5013 nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
5014 nb_pkts, cb->param);
5016 }
while (cb != NULL);
5020 rte_ethdev_trace_rx_burst(port_id, queue_id, (
void **)rx_pkts, nb_rx);
5040 struct rte_eth_dev *dev;
5042 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5043 dev = &rte_eth_devices[port_id];
5044 RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_queue_count, -ENOTSUP);
5045 if (queue_id >= dev->data->nb_rx_queues ||
5046 dev->data->rx_queues[queue_id] == NULL)
5049 return (
int)(*dev->rx_queue_count)(dev, queue_id);
5071 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5072 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5073 RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_descriptor_done, -ENOTSUP);
5074 return (*dev->rx_descriptor_done)(dev->data->rx_queues[queue_id], offset);
5077 #define RTE_ETH_RX_DESC_AVAIL 0 5078 #define RTE_ETH_RX_DESC_DONE 1 5079 #define RTE_ETH_RX_DESC_UNAVAIL 2 5118 struct rte_eth_dev *dev;
5121 #ifdef RTE_ETHDEV_DEBUG_RX 5122 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5124 dev = &rte_eth_devices[port_id];
5125 #ifdef RTE_ETHDEV_DEBUG_RX 5126 if (queue_id >= dev->data->nb_rx_queues)
5129 RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_descriptor_status, -ENOTSUP);
5130 rxq = dev->data->rx_queues[queue_id];
5132 return (*dev->rx_descriptor_status)(rxq, offset);
5135 #define RTE_ETH_TX_DESC_FULL 0 5136 #define RTE_ETH_TX_DESC_DONE 1 5137 #define RTE_ETH_TX_DESC_UNAVAIL 2 5172 static inline int rte_eth_tx_descriptor_status(uint16_t port_id, 5173 uint16_t queue_id, uint16_t offset)
5175 struct rte_eth_dev *dev;
5178 #ifdef RTE_ETHDEV_DEBUG_TX 5179 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5181 dev = &rte_eth_devices[port_id];
5182 #ifdef RTE_ETHDEV_DEBUG_TX 5183 if (queue_id >= dev->data->nb_tx_queues)
5186 RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_descriptor_status, -ENOTSUP);
5187 txq = dev->data->tx_queues[queue_id];
5189 return (*dev->tx_descriptor_status)(txq, offset);
5258 static inline uint16_t
5260 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
5262 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5264 #ifdef RTE_ETHDEV_DEBUG_TX 5265 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
5266 RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
5268 if (queue_id >= dev->data->nb_tx_queues) {
5269 RTE_ETHDEV_LOG(ERR,
"Invalid TX queue_id=%u\n", queue_id);
5274 #ifdef RTE_ETHDEV_RXTX_CALLBACKS 5275 struct rte_eth_rxtx_callback *cb;
5283 cb = __atomic_load_n(&dev->pre_tx_burst_cbs[queue_id],
5288 nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
5291 }
while (cb != NULL);
5295 rte_ethdev_trace_tx_burst(port_id, queue_id, (
void **)tx_pkts,
5297 return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
5354 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP 5356 static inline uint16_t
5358 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
5360 struct rte_eth_dev *dev;
5362 #ifdef RTE_ETHDEV_DEBUG_TX 5364 RTE_ETHDEV_LOG(ERR,
"Invalid TX port_id=%u\n", port_id);
5370 dev = &rte_eth_devices[port_id];
5372 #ifdef RTE_ETHDEV_DEBUG_TX 5373 if (queue_id >= dev->data->nb_tx_queues) {
5374 RTE_ETHDEV_LOG(ERR,
"Invalid TX queue_id=%u\n", queue_id);
5380 if (!dev->tx_pkt_prepare)
5383 return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
5398 static inline uint16_t
5430 static inline uint16_t
5435 uint16_t to_send = buffer->
length;
5446 buffer->error_callback(&buffer->
pkts[sent],
5447 (uint16_t)(to_send - sent),
5448 buffer->error_userdata);
int rte_eth_dev_stop(uint16_t port_id)
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_promiscuous_disable(uint16_t port_id)
__extension__ uint32_t multi_pools
#define ETH_VMDQ_MAX_VLAN_FILTERS
#define __rte_always_inline
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
const uint32_t * dev_flags
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
__rte_experimental int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
#define __rte_cache_min_aligned
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
int rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
__rte_experimental int rte_eth_dev_owner_new(uint64_t *owner_id)
__rte_experimental int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
uint16_t rte_eth_find_next(uint16_t port_id)
__rte_experimental int rte_eth_dev_is_removed(uint16_t port_id)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
int rte_eth_led_off(uint16_t port_id)
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
__rte_experimental uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint16_t tx_rate)
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
__rte_experimental int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
uint64_t rx_queue_offload_capa
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
uint64_t tx_queue_offload_capa
int rte_eth_mirror_rule_set(uint16_t port_id, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on)
uint8_t enable_default_pool
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
int rte_eth_dev_close(uint16_t port_id)
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
uint32_t dcb_capability_en
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
int rte_eth_dev_set_link_up(uint16_t port_id)
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
__rte_experimental int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
__rte_experimental int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
uint8_t rx_deferred_start
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
#define RTE_ETH_XSTATS_NAME_SIZE
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
int rte_eth_timesync_disable(uint16_t port_id)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
__rte_experimental int rte_eth_representor_info_get(uint16_t port_id, struct rte_eth_representor_info *info)
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
__rte_experimental int rte_eth_fec_get_capability(uint16_t port_id, struct rte_eth_fec_capa *speed_fec_capa, unsigned int num)
int rte_eth_stats_reset(uint16_t port_id)
int rte_eth_allmulticast_enable(uint16_t port_id)
__rte_experimental int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, struct rte_power_monitor_cond *pmc)
uint32_t offset_align_log2
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
__rte_experimental uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
int rte_eth_allmulticast_disable(uint16_t port_id)
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
int rte_eth_dev_start(uint16_t port_id)
int rte_eth_dev_reset(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
__rte_experimental int rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
#define ETH_DCB_NUM_USER_PRIORITIES
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_xstats_reset(uint16_t port_id)
__rte_experimental int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, size_t len, uint32_t direction)
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
uint64_t flow_type_rss_offloads
uint16_t rte_eth_dev_count_total(void)
int rte_eth_promiscuous_enable(uint16_t port_id)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
union rte_eth_rxseg * rx_seg
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
__rte_experimental int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
#define ETH_MQ_RX_RSS_FLAG
rte_eth_event_ipsec_subtype
#define ETH_MIRROR_MAX_VLANS
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
__rte_experimental const char * rte_eth_link_speed_to_str(uint32_t link_speed)
__rte_experimental int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_socket_id(uint16_t port_id)
uint8_t enable_default_pool
__extension__ struct rte_eth_link __rte_aligned(8)
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
__rte_experimental int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_promiscuous_get(uint16_t port_id)
int rte_eth_led_on(uint16_t port_id)
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
__rte_experimental int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
__rte_experimental int rte_eth_dev_owner_delete(const uint64_t owner_id)
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
static __rte_deprecated int rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
uint8_t mac_ctrl_frame_fwd
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
uint8_t tx_deferred_start
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
uint32_t max_lro_pkt_size
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
int rte_eth_allmulticast_get(uint16_t port_id)
int rte_eth_dev_is_valid_port(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint32_t max_lro_pkt_size
int rte_eth_timesync_enable(uint16_t port_id)
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_set_link_down(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
__rte_experimental int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
__rte_experimental int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)