147 #define RTE_ETHDEV_HAS_LRO_SUPPORT
149 #include <rte_compat.h>
156 #include <rte_config.h>
160 #include "rte_dev_info.h"
182 uint64_t
q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
184 uint64_t
q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
186 uint64_t
q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
188 uint64_t
q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
190 uint64_t
q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
197 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
198 #define ETH_LINK_SPEED_FIXED (1 << 0)
199 #define ETH_LINK_SPEED_10M_HD (1 << 1)
200 #define ETH_LINK_SPEED_10M (1 << 2)
201 #define ETH_LINK_SPEED_100M_HD (1 << 3)
202 #define ETH_LINK_SPEED_100M (1 << 4)
203 #define ETH_LINK_SPEED_1G (1 << 5)
204 #define ETH_LINK_SPEED_2_5G (1 << 6)
205 #define ETH_LINK_SPEED_5G (1 << 7)
206 #define ETH_LINK_SPEED_10G (1 << 8)
207 #define ETH_LINK_SPEED_20G (1 << 9)
208 #define ETH_LINK_SPEED_25G (1 << 10)
209 #define ETH_LINK_SPEED_40G (1 << 11)
210 #define ETH_LINK_SPEED_50G (1 << 12)
211 #define ETH_LINK_SPEED_56G (1 << 13)
212 #define ETH_LINK_SPEED_100G (1 << 14)
217 #define ETH_SPEED_NUM_NONE 0
218 #define ETH_SPEED_NUM_10M 10
219 #define ETH_SPEED_NUM_100M 100
220 #define ETH_SPEED_NUM_1G 1000
221 #define ETH_SPEED_NUM_2_5G 2500
222 #define ETH_SPEED_NUM_5G 5000
223 #define ETH_SPEED_NUM_10G 10000
224 #define ETH_SPEED_NUM_20G 20000
225 #define ETH_SPEED_NUM_25G 25000
226 #define ETH_SPEED_NUM_40G 40000
227 #define ETH_SPEED_NUM_50G 50000
228 #define ETH_SPEED_NUM_56G 56000
229 #define ETH_SPEED_NUM_100G 100000
240 } __attribute__((aligned(8)));
243 #define ETH_LINK_HALF_DUPLEX 0
244 #define ETH_LINK_FULL_DUPLEX 1
245 #define ETH_LINK_DOWN 0
246 #define ETH_LINK_UP 1
247 #define ETH_LINK_FIXED 0
248 #define ETH_LINK_AUTONEG 1
254 struct rte_eth_thresh {
263 #define ETH_MQ_RX_RSS_FLAG 0x1
264 #define ETH_MQ_RX_DCB_FLAG 0x2
265 #define ETH_MQ_RX_VMDQ_FLAG 0x4
296 #define ETH_RSS ETH_MQ_RX_RSS
297 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
298 #define ETH_DCB_RX ETH_MQ_RX_DCB
314 #define ETH_DCB_NONE ETH_MQ_TX_NONE
315 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
316 #define ETH_DCB_TX ETH_MQ_TX_DCB
366 ETH_VLAN_TYPE_UNKNOWN = 0,
409 #define ETH_RSS_IPV4 (1ULL << RTE_ETH_FLOW_IPV4)
410 #define ETH_RSS_FRAG_IPV4 (1ULL << RTE_ETH_FLOW_FRAG_IPV4)
411 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
412 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP)
413 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP)
414 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER)
415 #define ETH_RSS_IPV6 (1ULL << RTE_ETH_FLOW_IPV6)
416 #define ETH_RSS_FRAG_IPV6 (1ULL << RTE_ETH_FLOW_FRAG_IPV6)
417 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP)
418 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
419 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP)
420 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)
421 #define ETH_RSS_L2_PAYLOAD (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)
422 #define ETH_RSS_IPV6_EX (1ULL << RTE_ETH_FLOW_IPV6_EX)
423 #define ETH_RSS_IPV6_TCP_EX (1ULL << RTE_ETH_FLOW_IPV6_TCP_EX)
424 #define ETH_RSS_IPV6_UDP_EX (1ULL << RTE_ETH_FLOW_IPV6_UDP_EX)
425 #define ETH_RSS_PORT (1ULL << RTE_ETH_FLOW_PORT)
426 #define ETH_RSS_VXLAN (1ULL << RTE_ETH_FLOW_VXLAN)
427 #define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE)
428 #define ETH_RSS_NVGRE (1ULL << RTE_ETH_FLOW_NVGRE)
430 #define ETH_RSS_IP ( \
432 ETH_RSS_FRAG_IPV4 | \
433 ETH_RSS_NONFRAG_IPV4_OTHER | \
435 ETH_RSS_FRAG_IPV6 | \
436 ETH_RSS_NONFRAG_IPV6_OTHER | \
439 #define ETH_RSS_UDP ( \
440 ETH_RSS_NONFRAG_IPV4_UDP | \
441 ETH_RSS_NONFRAG_IPV6_UDP | \
444 #define ETH_RSS_TCP ( \
445 ETH_RSS_NONFRAG_IPV4_TCP | \
446 ETH_RSS_NONFRAG_IPV6_TCP | \
449 #define ETH_RSS_SCTP ( \
450 ETH_RSS_NONFRAG_IPV4_SCTP | \
451 ETH_RSS_NONFRAG_IPV6_SCTP)
453 #define ETH_RSS_TUNNEL ( \
459 #define ETH_RSS_PROTO_MASK ( \
461 ETH_RSS_FRAG_IPV4 | \
462 ETH_RSS_NONFRAG_IPV4_TCP | \
463 ETH_RSS_NONFRAG_IPV4_UDP | \
464 ETH_RSS_NONFRAG_IPV4_SCTP | \
465 ETH_RSS_NONFRAG_IPV4_OTHER | \
467 ETH_RSS_FRAG_IPV6 | \
468 ETH_RSS_NONFRAG_IPV6_TCP | \
469 ETH_RSS_NONFRAG_IPV6_UDP | \
470 ETH_RSS_NONFRAG_IPV6_SCTP | \
471 ETH_RSS_NONFRAG_IPV6_OTHER | \
472 ETH_RSS_L2_PAYLOAD | \
474 ETH_RSS_IPV6_TCP_EX | \
475 ETH_RSS_IPV6_UDP_EX | \
486 #define ETH_RSS_RETA_SIZE_64 64
487 #define ETH_RSS_RETA_SIZE_128 128
488 #define ETH_RSS_RETA_SIZE_256 256
489 #define ETH_RSS_RETA_SIZE_512 512
490 #define RTE_RETA_GROUP_SIZE 64
493 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
494 #define ETH_DCB_NUM_USER_PRIORITIES 8
495 #define ETH_VMDQ_DCB_NUM_QUEUES 128
496 #define ETH_DCB_NUM_QUEUES 128
499 #define ETH_DCB_PG_SUPPORT 0x00000001
500 #define ETH_DCB_PFC_SUPPORT 0x00000002
503 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
504 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
505 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
508 #define ETH_VLAN_STRIP_MASK 0x0001
509 #define ETH_VLAN_FILTER_MASK 0x0002
510 #define ETH_VLAN_EXTEND_MASK 0x0004
511 #define ETH_VLAN_ID_MAX 0x0FFF
514 #define ETH_NUM_RECEIVE_MAC_ADDR 128
517 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
520 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
521 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
522 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
523 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
524 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
527 #define ETH_MIRROR_MAX_VLANS 64
529 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
530 #define ETH_MIRROR_UPLINK_PORT 0x02
531 #define ETH_MIRROR_DOWNLINK_PORT 0x04
532 #define ETH_MIRROR_VLAN 0x08
533 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
538 struct rte_eth_vlan_mirror {
564 uint16_t
reta[RTE_RETA_GROUP_SIZE];
589 struct rte_eth_dcb_rx_conf {
595 struct rte_eth_vmdq_dcb_tx_conf {
601 struct rte_eth_dcb_tx_conf {
607 struct rte_eth_vmdq_tx_conf {
705 #define ETH_TXQ_FLAGS_NOMULTSEGS 0x0001
706 #define ETH_TXQ_FLAGS_NOREFCOUNT 0x0002
707 #define ETH_TXQ_FLAGS_NOMULTMEMP 0x0004
708 #define ETH_TXQ_FLAGS_NOVLANOFFL 0x0100
709 #define ETH_TXQ_FLAGS_NOXSUMSCTP 0x0200
710 #define ETH_TXQ_FLAGS_NOXSUMUDP 0x0400
711 #define ETH_TXQ_FLAGS_NOXSUMTCP 0x0800
712 #define ETH_TXQ_FLAGS_NOOFFLOADS \
713 (ETH_TXQ_FLAGS_NOVLANOFFL | ETH_TXQ_FLAGS_NOXSUMSCTP | \
714 ETH_TXQ_FLAGS_NOXSUMUDP | ETH_TXQ_FLAGS_NOXSUMTCP)
715 #define ETH_TXQ_FLAGS_NOXSUMS \
716 (ETH_TXQ_FLAGS_NOXSUMSCTP | ETH_TXQ_FLAGS_NOXSUMUDP | \
717 ETH_TXQ_FLAGS_NOXSUMTCP)
725 #define ETH_TXQ_FLAGS_IGNORE 0x8000
926 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
927 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
928 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
929 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
930 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
931 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
932 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
933 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
934 #define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100
935 #define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200
936 #define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
937 #define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
938 #define DEV_RX_OFFLOAD_CRC_STRIP 0x00001000
939 #define DEV_RX_OFFLOAD_SCATTER 0x00002000
940 #define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000
941 #define DEV_RX_OFFLOAD_SECURITY 0x00008000
942 #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
943 DEV_RX_OFFLOAD_UDP_CKSUM | \
944 DEV_RX_OFFLOAD_TCP_CKSUM)
945 #define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
946 DEV_RX_OFFLOAD_VLAN_FILTER | \
947 DEV_RX_OFFLOAD_VLAN_EXTEND)
957 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
958 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
959 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
960 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
961 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
962 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
963 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
964 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
965 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
966 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200
967 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400
968 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800
969 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000
970 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
971 #define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000
975 #define DEV_TX_OFFLOAD_MULTI_SEGS 0x00008000
977 #define DEV_TX_OFFLOAD_MBUF_FAST_FREE 0x00010000
982 #define DEV_TX_OFFLOAD_SECURITY 0x00020000
989 struct rte_pci_device;
1004 uint32_t max_hash_mac_addrs;
1055 #define RTE_ETH_XSTATS_NAME_SIZE 64
1082 #define ETH_DCB_NUM_TCS 8
1083 #define ETH_MAX_VMDQ_POOL 64
1094 }
tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1099 }
tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1117 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1118 #define RTE_ETH_QUEUE_STATE_STARTED 1
1120 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
1123 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1124 if (!rte_eth_dev_is_valid_port(port_id)) { \
1125 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
1130 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1131 if (!rte_eth_dev_is_valid_port(port_id)) { \
1132 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
1142 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1144 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1146 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1148 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1173 struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1197 struct rte_mbuf *pkts[], uint16_t nb_pkts,
void *user_param);
1213 struct rte_eth_dev_sriov {
1215 uint8_t nb_q_per_pool;
1216 uint16_t def_vmdq_idx;
1217 uint16_t def_pool_q_idx;
1219 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1221 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
1223 #define RTE_ETH_DEV_NO_OWNER 0
1225 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
1227 struct rte_eth_dev_owner {
1229 char name[RTE_ETH_MAX_OWNER_NAME_LEN];
1233 #define RTE_ETH_DEV_INTR_LSC 0x0002
1235 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1237 #define RTE_ETH_DEV_INTR_RMV 0x0008
1254 const uint64_t owner_id);
1259 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
1260 for (p = rte_eth_find_next_owned_by(0, o); \
1261 (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
1262 p = rte_eth_find_next_owned_by(p + 1, o))
1277 #define RTE_ETH_FOREACH_DEV(p) \
1278 RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
1310 const struct rte_eth_dev_owner *owner);
1326 const uint64_t owner_id);
1353 struct rte_eth_dev_owner *owner);
1470 uint16_t nb_tx_queue,
const struct rte_eth_conf *eth_conf);
1483 int __rte_experimental
1529 uint16_t nb_rx_desc,
unsigned int socket_id,
1582 uint16_t nb_tx_desc,
unsigned int socket_id,
1991 uint64_t *values,
unsigned int size);
2040 uint16_t tx_queue_id, uint8_t stat_idx);
2060 uint16_t rx_queue_id,
2105 char *fw_version,
size_t fw_size);
2146 uint32_t *ptypes,
int num);
2295 typedef void (*buffer_tx_error_fn)(
struct rte_mbuf **unsent, uint16_t count,
2303 buffer_tx_error_fn error_callback;
2304 void *error_userdata;
2317 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
2318 (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
2359 buffer_tx_error_fn callback,
void *
userdata);
2593 int epfd,
int op,
void *data);
2747 uint16_t reta_size);
2767 uint16_t reta_size);
3091 struct rte_eth_rxtx_callback;
3124 struct rte_eth_rxtx_callback *user_cb);
3157 struct rte_eth_rxtx_callback *user_cb);
3286 uint32_t nb_mc_addr);
3335 struct timespec *timestamp, uint32_t flags);
3353 struct timespec *timestamp);
3505 uint16_t *nb_rx_desc,
3506 uint16_t *nb_tx_desc);
3622 static inline uint16_t
3624 struct rte_mbuf **rx_pkts,
const uint16_t nb_pkts)
3626 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3629 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3630 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
3631 RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
3633 if (queue_id >= dev->data->nb_rx_queues) {
3634 RTE_PMD_DEBUG_TRACE(
"Invalid RX queue_id=%d\n", queue_id);
3638 nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
3641 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
3642 if (
unlikely(dev->post_rx_burst_cbs[queue_id] != NULL)) {
3643 struct rte_eth_rxtx_callback *cb =
3644 dev->post_rx_burst_cbs[queue_id];
3647 nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
3648 nb_pkts, cb->param);
3650 }
while (cb != NULL);
3672 struct rte_eth_dev *dev;
3674 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3675 dev = &rte_eth_devices[port_id];
3676 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
3677 if (queue_id >= dev->data->nb_rx_queues)
3680 return (
int)(*dev->dev_ops->rx_queue_count)(dev, queue_id);
3701 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3702 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3703 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
3704 return (*dev->dev_ops->rx_descriptor_done)( \
3705 dev->data->rx_queues[queue_id], offset);
3708 #define RTE_ETH_RX_DESC_AVAIL 0
3709 #define RTE_ETH_RX_DESC_DONE 1
3710 #define RTE_ETH_RX_DESC_UNAVAIL 2
3749 struct rte_eth_dev *dev;
3752 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3753 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3755 dev = &rte_eth_devices[port_id];
3756 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3757 if (queue_id >= dev->data->nb_rx_queues)
3760 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
3761 rxq = dev->data->rx_queues[queue_id];
3763 return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
3766 #define RTE_ETH_TX_DESC_FULL 0
3767 #define RTE_ETH_TX_DESC_DONE 1
3768 #define RTE_ETH_TX_DESC_UNAVAIL 2
3803 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
3804 uint16_t queue_id, uint16_t offset)
3806 struct rte_eth_dev *dev;
3809 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3810 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3812 dev = &rte_eth_devices[port_id];
3813 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3814 if (queue_id >= dev->data->nb_tx_queues)
3817 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
3818 txq = dev->data->tx_queues[queue_id];
3820 return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
3889 static inline uint16_t
3891 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3893 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3895 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3896 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
3897 RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
3899 if (queue_id >= dev->data->nb_tx_queues) {
3900 RTE_PMD_DEBUG_TRACE(
"Invalid TX queue_id=%d\n", queue_id);
3905 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
3906 struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3910 nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
3913 }
while (cb != NULL);
3917 return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
3976 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
3978 static inline uint16_t
3980 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3982 struct rte_eth_dev *dev;
3984 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3986 RTE_PMD_DEBUG_TRACE(
"Invalid TX port_id=%d\n", port_id);
3992 dev = &rte_eth_devices[port_id];
3994 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3995 if (queue_id >= dev->data->nb_tx_queues) {
3996 RTE_PMD_DEBUG_TRACE(
"Invalid TX queue_id=%d\n", queue_id);
4002 if (!dev->tx_pkt_prepare)
4005 return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
4020 static inline uint16_t
4052 static inline uint16_t
4057 uint16_t to_send = buffer->
length;
4068 buffer->error_callback(&buffer->
pkts[sent],
4069 (uint16_t)(to_send - sent),
4070 buffer->error_userdata);