147 #define RTE_ETHDEV_HAS_LRO_SUPPORT
149 #include <rte_compat.h>
156 #include <rte_config.h>
160 #include "rte_dev_info.h"
182 uint64_t
q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
184 uint64_t
q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
186 uint64_t
q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
188 uint64_t
q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
190 uint64_t
q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
197 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
198 #define ETH_LINK_SPEED_FIXED (1 << 0)
199 #define ETH_LINK_SPEED_10M_HD (1 << 1)
200 #define ETH_LINK_SPEED_10M (1 << 2)
201 #define ETH_LINK_SPEED_100M_HD (1 << 3)
202 #define ETH_LINK_SPEED_100M (1 << 4)
203 #define ETH_LINK_SPEED_1G (1 << 5)
204 #define ETH_LINK_SPEED_2_5G (1 << 6)
205 #define ETH_LINK_SPEED_5G (1 << 7)
206 #define ETH_LINK_SPEED_10G (1 << 8)
207 #define ETH_LINK_SPEED_20G (1 << 9)
208 #define ETH_LINK_SPEED_25G (1 << 10)
209 #define ETH_LINK_SPEED_40G (1 << 11)
210 #define ETH_LINK_SPEED_50G (1 << 12)
211 #define ETH_LINK_SPEED_56G (1 << 13)
212 #define ETH_LINK_SPEED_100G (1 << 14)
217 #define ETH_SPEED_NUM_NONE 0
218 #define ETH_SPEED_NUM_10M 10
219 #define ETH_SPEED_NUM_100M 100
220 #define ETH_SPEED_NUM_1G 1000
221 #define ETH_SPEED_NUM_2_5G 2500
222 #define ETH_SPEED_NUM_5G 5000
223 #define ETH_SPEED_NUM_10G 10000
224 #define ETH_SPEED_NUM_20G 20000
225 #define ETH_SPEED_NUM_25G 25000
226 #define ETH_SPEED_NUM_40G 40000
227 #define ETH_SPEED_NUM_50G 50000
228 #define ETH_SPEED_NUM_56G 56000
229 #define ETH_SPEED_NUM_100G 100000
240 } __attribute__((aligned(8)));
243 #define ETH_LINK_HALF_DUPLEX 0
244 #define ETH_LINK_FULL_DUPLEX 1
245 #define ETH_LINK_DOWN 0
246 #define ETH_LINK_UP 1
247 #define ETH_LINK_FIXED 0
248 #define ETH_LINK_AUTONEG 1
254 struct rte_eth_thresh {
263 #define ETH_MQ_RX_RSS_FLAG 0x1
264 #define ETH_MQ_RX_DCB_FLAG 0x2
265 #define ETH_MQ_RX_VMDQ_FLAG 0x4
296 #define ETH_RSS ETH_MQ_RX_RSS
297 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
298 #define ETH_DCB_RX ETH_MQ_RX_DCB
314 #define ETH_DCB_NONE ETH_MQ_TX_NONE
315 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
316 #define ETH_DCB_TX ETH_MQ_TX_DCB
366 ETH_VLAN_TYPE_UNKNOWN = 0,
409 #define ETH_RSS_IPV4 (1ULL << RTE_ETH_FLOW_IPV4)
410 #define ETH_RSS_FRAG_IPV4 (1ULL << RTE_ETH_FLOW_FRAG_IPV4)
411 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
412 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP)
413 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP)
414 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER)
415 #define ETH_RSS_IPV6 (1ULL << RTE_ETH_FLOW_IPV6)
416 #define ETH_RSS_FRAG_IPV6 (1ULL << RTE_ETH_FLOW_FRAG_IPV6)
417 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP)
418 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
419 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP)
420 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)
421 #define ETH_RSS_L2_PAYLOAD (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)
422 #define ETH_RSS_IPV6_EX (1ULL << RTE_ETH_FLOW_IPV6_EX)
423 #define ETH_RSS_IPV6_TCP_EX (1ULL << RTE_ETH_FLOW_IPV6_TCP_EX)
424 #define ETH_RSS_IPV6_UDP_EX (1ULL << RTE_ETH_FLOW_IPV6_UDP_EX)
425 #define ETH_RSS_PORT (1ULL << RTE_ETH_FLOW_PORT)
426 #define ETH_RSS_VXLAN (1ULL << RTE_ETH_FLOW_VXLAN)
427 #define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE)
428 #define ETH_RSS_NVGRE (1ULL << RTE_ETH_FLOW_NVGRE)
430 #define ETH_RSS_IP ( \
432 ETH_RSS_FRAG_IPV4 | \
433 ETH_RSS_NONFRAG_IPV4_OTHER | \
435 ETH_RSS_FRAG_IPV6 | \
436 ETH_RSS_NONFRAG_IPV6_OTHER | \
439 #define ETH_RSS_UDP ( \
440 ETH_RSS_NONFRAG_IPV4_UDP | \
441 ETH_RSS_NONFRAG_IPV6_UDP | \
444 #define ETH_RSS_TCP ( \
445 ETH_RSS_NONFRAG_IPV4_TCP | \
446 ETH_RSS_NONFRAG_IPV6_TCP | \
449 #define ETH_RSS_SCTP ( \
450 ETH_RSS_NONFRAG_IPV4_SCTP | \
451 ETH_RSS_NONFRAG_IPV6_SCTP)
453 #define ETH_RSS_TUNNEL ( \
459 #define ETH_RSS_PROTO_MASK ( \
461 ETH_RSS_FRAG_IPV4 | \
462 ETH_RSS_NONFRAG_IPV4_TCP | \
463 ETH_RSS_NONFRAG_IPV4_UDP | \
464 ETH_RSS_NONFRAG_IPV4_SCTP | \
465 ETH_RSS_NONFRAG_IPV4_OTHER | \
467 ETH_RSS_FRAG_IPV6 | \
468 ETH_RSS_NONFRAG_IPV6_TCP | \
469 ETH_RSS_NONFRAG_IPV6_UDP | \
470 ETH_RSS_NONFRAG_IPV6_SCTP | \
471 ETH_RSS_NONFRAG_IPV6_OTHER | \
472 ETH_RSS_L2_PAYLOAD | \
474 ETH_RSS_IPV6_TCP_EX | \
475 ETH_RSS_IPV6_UDP_EX | \
486 #define ETH_RSS_RETA_SIZE_64 64
487 #define ETH_RSS_RETA_SIZE_128 128
488 #define ETH_RSS_RETA_SIZE_256 256
489 #define ETH_RSS_RETA_SIZE_512 512
490 #define RTE_RETA_GROUP_SIZE 64
493 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
494 #define ETH_DCB_NUM_USER_PRIORITIES 8
495 #define ETH_VMDQ_DCB_NUM_QUEUES 128
496 #define ETH_DCB_NUM_QUEUES 128
499 #define ETH_DCB_PG_SUPPORT 0x00000001
500 #define ETH_DCB_PFC_SUPPORT 0x00000002
503 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
504 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
505 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
508 #define ETH_VLAN_STRIP_MASK 0x0001
509 #define ETH_VLAN_FILTER_MASK 0x0002
510 #define ETH_VLAN_EXTEND_MASK 0x0004
511 #define ETH_VLAN_ID_MAX 0x0FFF
514 #define ETH_NUM_RECEIVE_MAC_ADDR 128
517 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
520 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
521 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
522 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
523 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
524 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
527 #define ETH_MIRROR_MAX_VLANS 64
529 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
530 #define ETH_MIRROR_UPLINK_PORT 0x02
531 #define ETH_MIRROR_DOWNLINK_PORT 0x04
532 #define ETH_MIRROR_VLAN 0x08
533 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
538 struct rte_eth_vlan_mirror {
564 uint16_t
reta[RTE_RETA_GROUP_SIZE];
589 struct rte_eth_dcb_rx_conf {
595 struct rte_eth_vmdq_dcb_tx_conf {
601 struct rte_eth_dcb_tx_conf {
607 struct rte_eth_vmdq_tx_conf {
705 #define ETH_TXQ_FLAGS_NOMULTSEGS 0x0001
706 #define ETH_TXQ_FLAGS_NOREFCOUNT 0x0002
707 #define ETH_TXQ_FLAGS_NOMULTMEMP 0x0004
708 #define ETH_TXQ_FLAGS_NOVLANOFFL 0x0100
709 #define ETH_TXQ_FLAGS_NOXSUMSCTP 0x0200
710 #define ETH_TXQ_FLAGS_NOXSUMUDP 0x0400
711 #define ETH_TXQ_FLAGS_NOXSUMTCP 0x0800
712 #define ETH_TXQ_FLAGS_NOOFFLOADS \
713 (ETH_TXQ_FLAGS_NOVLANOFFL | ETH_TXQ_FLAGS_NOXSUMSCTP | \
714 ETH_TXQ_FLAGS_NOXSUMUDP | ETH_TXQ_FLAGS_NOXSUMTCP)
715 #define ETH_TXQ_FLAGS_NOXSUMS \
716 (ETH_TXQ_FLAGS_NOXSUMSCTP | ETH_TXQ_FLAGS_NOXSUMUDP | \
717 ETH_TXQ_FLAGS_NOXSUMTCP)
725 #define ETH_TXQ_FLAGS_IGNORE 0x8000
926 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
927 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
928 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
929 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
930 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
931 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
932 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
933 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
934 #define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100
935 #define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200
936 #define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
937 #define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
938 #define DEV_RX_OFFLOAD_CRC_STRIP 0x00001000
939 #define DEV_RX_OFFLOAD_SCATTER 0x00002000
940 #define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000
941 #define DEV_RX_OFFLOAD_SECURITY 0x00008000
942 #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
943 DEV_RX_OFFLOAD_UDP_CKSUM | \
944 DEV_RX_OFFLOAD_TCP_CKSUM)
945 #define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
946 DEV_RX_OFFLOAD_VLAN_FILTER | \
947 DEV_RX_OFFLOAD_VLAN_EXTEND)
957 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
958 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
959 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
960 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
961 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
962 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
963 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
964 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
965 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
966 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200
967 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400
968 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800
969 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000
970 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
971 #define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000
975 #define DEV_TX_OFFLOAD_MULTI_SEGS 0x00008000
977 #define DEV_TX_OFFLOAD_MBUF_FAST_FREE 0x00010000
982 #define DEV_TX_OFFLOAD_SECURITY 0x00020000
988 #define DEV_TX_OFFLOAD_UDP_TNL_TSO 0x00040000
994 #define DEV_TX_OFFLOAD_IP_TNL_TSO 0x00080000
996 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001
998 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002
1006 struct rte_pci_device;
1013 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1014 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1015 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1016 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1033 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (0)
1065 uint32_t max_hash_mac_addrs;
1127 #define RTE_ETH_XSTATS_NAME_SIZE 64
1154 #define ETH_DCB_NUM_TCS 8
1155 #define ETH_MAX_VMDQ_POOL 64
1166 }
tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1171 }
tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1189 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1190 #define RTE_ETH_QUEUE_STATE_STARTED 1
1192 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
1195 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1196 if (!rte_eth_dev_is_valid_port(port_id)) { \
1197 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
1202 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1203 if (!rte_eth_dev_is_valid_port(port_id)) { \
1204 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
1214 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1216 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1218 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1220 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1245 struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1269 struct rte_mbuf *pkts[], uint16_t nb_pkts,
void *user_param);
1285 struct rte_eth_dev_sriov {
1287 uint8_t nb_q_per_pool;
1288 uint16_t def_vmdq_idx;
1289 uint16_t def_pool_q_idx;
1291 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1293 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
1295 #define RTE_ETH_DEV_NO_OWNER 0
1297 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
1299 struct rte_eth_dev_owner {
1301 char name[RTE_ETH_MAX_OWNER_NAME_LEN];
1305 #define RTE_ETH_DEV_INTR_LSC 0x0002
1307 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1309 #define RTE_ETH_DEV_INTR_RMV 0x0008
1311 #define RTE_ETH_DEV_REPRESENTOR 0x0010
1325 const uint64_t owner_id);
1330 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
1331 for (p = rte_eth_find_next_owned_by(0, o); \
1332 (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
1333 p = rte_eth_find_next_owned_by(p + 1, o))
1348 #define RTE_ETH_FOREACH_DEV(p) \
1349 RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
1381 const struct rte_eth_dev_owner *owner);
1397 const uint64_t owner_id);
1424 struct rte_eth_dev_owner *owner);
1569 uint16_t nb_tx_queue,
const struct rte_eth_conf *eth_conf);
1582 int __rte_experimental
1635 uint16_t nb_rx_desc,
unsigned int socket_id,
1694 uint16_t nb_tx_desc,
unsigned int socket_id,
2103 uint64_t *values,
unsigned int size);
2152 uint16_t tx_queue_id, uint8_t stat_idx);
2172 uint16_t rx_queue_id,
2217 char *fw_version,
size_t fw_size);
2258 uint32_t *ptypes,
int num);
2407 typedef void (*buffer_tx_error_fn)(
struct rte_mbuf **unsent, uint16_t count,
2415 buffer_tx_error_fn error_callback;
2416 void *error_userdata;
2429 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
2430 (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
2471 buffer_tx_error_fn callback,
void *
userdata);
2746 int epfd,
int op,
void *data);
2900 uint16_t reta_size);
2920 uint16_t reta_size);
3162 struct rte_eth_rxtx_callback;
3188 const struct rte_eth_rxtx_callback *
3217 const struct rte_eth_rxtx_callback *
3245 const struct rte_eth_rxtx_callback *
3280 const struct rte_eth_rxtx_callback *user_cb);
3313 const struct rte_eth_rxtx_callback *user_cb);
3439 int __rte_experimental
3461 int __rte_experimental
3463 struct rte_dev_eeprom_info *info);
3485 uint32_t nb_mc_addr);
3534 struct timespec *timestamp, uint32_t flags);
3552 struct timespec *timestamp);
3711 uint16_t *nb_rx_desc,
3712 uint16_t *nb_tx_desc);
3828 static inline uint16_t
3830 struct rte_mbuf **rx_pkts,
const uint16_t nb_pkts)
3832 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3835 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3836 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
3837 RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
3839 if (queue_id >= dev->data->nb_rx_queues) {
3840 RTE_PMD_DEBUG_TRACE(
"Invalid RX queue_id=%d\n", queue_id);
3844 nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
3847 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
3848 if (
unlikely(dev->post_rx_burst_cbs[queue_id] != NULL)) {
3849 struct rte_eth_rxtx_callback *cb =
3850 dev->post_rx_burst_cbs[queue_id];
3853 nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
3854 nb_pkts, cb->param);
3856 }
while (cb != NULL);
3878 struct rte_eth_dev *dev;
3880 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3881 dev = &rte_eth_devices[port_id];
3882 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
3883 if (queue_id >= dev->data->nb_rx_queues)
3886 return (
int)(*dev->dev_ops->rx_queue_count)(dev, queue_id);
3907 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3908 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3909 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
3910 return (*dev->dev_ops->rx_descriptor_done)( \
3911 dev->data->rx_queues[queue_id], offset);
3914 #define RTE_ETH_RX_DESC_AVAIL 0
3915 #define RTE_ETH_RX_DESC_DONE 1
3916 #define RTE_ETH_RX_DESC_UNAVAIL 2
3955 struct rte_eth_dev *dev;
3958 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3959 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3961 dev = &rte_eth_devices[port_id];
3962 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3963 if (queue_id >= dev->data->nb_rx_queues)
3966 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
3967 rxq = dev->data->rx_queues[queue_id];
3969 return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
3972 #define RTE_ETH_TX_DESC_FULL 0
3973 #define RTE_ETH_TX_DESC_DONE 1
3974 #define RTE_ETH_TX_DESC_UNAVAIL 2
4009 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
4010 uint16_t queue_id, uint16_t offset)
4012 struct rte_eth_dev *dev;
4015 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4016 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4018 dev = &rte_eth_devices[port_id];
4019 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4020 if (queue_id >= dev->data->nb_tx_queues)
4023 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
4024 txq = dev->data->tx_queues[queue_id];
4026 return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
4095 static inline uint16_t
4097 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4099 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4101 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4102 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4103 RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
4105 if (queue_id >= dev->data->nb_tx_queues) {
4106 RTE_PMD_DEBUG_TRACE(
"Invalid TX queue_id=%d\n", queue_id);
4111 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
4112 struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
4116 nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
4119 }
while (cb != NULL);
4123 return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
4182 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
4184 static inline uint16_t
4186 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4188 struct rte_eth_dev *dev;
4190 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4192 RTE_PMD_DEBUG_TRACE(
"Invalid TX port_id=%d\n", port_id);
4198 dev = &rte_eth_devices[port_id];
4200 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4201 if (queue_id >= dev->data->nb_tx_queues) {
4202 RTE_PMD_DEBUG_TRACE(
"Invalid TX queue_id=%d\n", queue_id);
4208 if (!dev->tx_pkt_prepare)
4211 return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
4226 static inline uint16_t
4258 static inline uint16_t
4263 uint16_t to_send = buffer->
length;
4274 buffer->error_callback(&buffer->
pkts[sent],
4275 (uint16_t)(to_send - sent),
4276 buffer->error_userdata);