147 #define RTE_ETHDEV_HAS_LRO_SUPPORT
149 #include <rte_compat.h>
156 #include <rte_config.h>
160 #include "rte_dev_info.h"
162 extern int rte_eth_dev_logtype;
164 #define RTE_ETHDEV_LOG(level, ...) \
165 rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
187 uint64_t
q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
189 uint64_t
q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
191 uint64_t
q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
193 uint64_t
q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
195 uint64_t
q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
202 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
203 #define ETH_LINK_SPEED_FIXED (1 << 0)
204 #define ETH_LINK_SPEED_10M_HD (1 << 1)
205 #define ETH_LINK_SPEED_10M (1 << 2)
206 #define ETH_LINK_SPEED_100M_HD (1 << 3)
207 #define ETH_LINK_SPEED_100M (1 << 4)
208 #define ETH_LINK_SPEED_1G (1 << 5)
209 #define ETH_LINK_SPEED_2_5G (1 << 6)
210 #define ETH_LINK_SPEED_5G (1 << 7)
211 #define ETH_LINK_SPEED_10G (1 << 8)
212 #define ETH_LINK_SPEED_20G (1 << 9)
213 #define ETH_LINK_SPEED_25G (1 << 10)
214 #define ETH_LINK_SPEED_40G (1 << 11)
215 #define ETH_LINK_SPEED_50G (1 << 12)
216 #define ETH_LINK_SPEED_56G (1 << 13)
217 #define ETH_LINK_SPEED_100G (1 << 14)
222 #define ETH_SPEED_NUM_NONE 0
223 #define ETH_SPEED_NUM_10M 10
224 #define ETH_SPEED_NUM_100M 100
225 #define ETH_SPEED_NUM_1G 1000
226 #define ETH_SPEED_NUM_2_5G 2500
227 #define ETH_SPEED_NUM_5G 5000
228 #define ETH_SPEED_NUM_10G 10000
229 #define ETH_SPEED_NUM_20G 20000
230 #define ETH_SPEED_NUM_25G 25000
231 #define ETH_SPEED_NUM_40G 40000
232 #define ETH_SPEED_NUM_50G 50000
233 #define ETH_SPEED_NUM_56G 56000
234 #define ETH_SPEED_NUM_100G 100000
245 } __attribute__((aligned(8)));
248 #define ETH_LINK_HALF_DUPLEX 0
249 #define ETH_LINK_FULL_DUPLEX 1
250 #define ETH_LINK_DOWN 0
251 #define ETH_LINK_UP 1
252 #define ETH_LINK_FIXED 0
253 #define ETH_LINK_AUTONEG 1
259 struct rte_eth_thresh {
268 #define ETH_MQ_RX_RSS_FLAG 0x1
269 #define ETH_MQ_RX_DCB_FLAG 0x2
270 #define ETH_MQ_RX_VMDQ_FLAG 0x4
301 #define ETH_RSS ETH_MQ_RX_RSS
302 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
303 #define ETH_DCB_RX ETH_MQ_RX_DCB
319 #define ETH_DCB_NONE ETH_MQ_TX_NONE
320 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
321 #define ETH_DCB_TX ETH_MQ_TX_DCB
344 ETH_VLAN_TYPE_UNKNOWN = 0,
387 #define ETH_RSS_IPV4 (1ULL << RTE_ETH_FLOW_IPV4)
388 #define ETH_RSS_FRAG_IPV4 (1ULL << RTE_ETH_FLOW_FRAG_IPV4)
389 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
390 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP)
391 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP)
392 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER)
393 #define ETH_RSS_IPV6 (1ULL << RTE_ETH_FLOW_IPV6)
394 #define ETH_RSS_FRAG_IPV6 (1ULL << RTE_ETH_FLOW_FRAG_IPV6)
395 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP)
396 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
397 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP)
398 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)
399 #define ETH_RSS_L2_PAYLOAD (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)
400 #define ETH_RSS_IPV6_EX (1ULL << RTE_ETH_FLOW_IPV6_EX)
401 #define ETH_RSS_IPV6_TCP_EX (1ULL << RTE_ETH_FLOW_IPV6_TCP_EX)
402 #define ETH_RSS_IPV6_UDP_EX (1ULL << RTE_ETH_FLOW_IPV6_UDP_EX)
403 #define ETH_RSS_PORT (1ULL << RTE_ETH_FLOW_PORT)
404 #define ETH_RSS_VXLAN (1ULL << RTE_ETH_FLOW_VXLAN)
405 #define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE)
406 #define ETH_RSS_NVGRE (1ULL << RTE_ETH_FLOW_NVGRE)
408 #define ETH_RSS_IP ( \
410 ETH_RSS_FRAG_IPV4 | \
411 ETH_RSS_NONFRAG_IPV4_OTHER | \
413 ETH_RSS_FRAG_IPV6 | \
414 ETH_RSS_NONFRAG_IPV6_OTHER | \
417 #define ETH_RSS_UDP ( \
418 ETH_RSS_NONFRAG_IPV4_UDP | \
419 ETH_RSS_NONFRAG_IPV6_UDP | \
422 #define ETH_RSS_TCP ( \
423 ETH_RSS_NONFRAG_IPV4_TCP | \
424 ETH_RSS_NONFRAG_IPV6_TCP | \
427 #define ETH_RSS_SCTP ( \
428 ETH_RSS_NONFRAG_IPV4_SCTP | \
429 ETH_RSS_NONFRAG_IPV6_SCTP)
431 #define ETH_RSS_TUNNEL ( \
437 #define ETH_RSS_PROTO_MASK ( \
439 ETH_RSS_FRAG_IPV4 | \
440 ETH_RSS_NONFRAG_IPV4_TCP | \
441 ETH_RSS_NONFRAG_IPV4_UDP | \
442 ETH_RSS_NONFRAG_IPV4_SCTP | \
443 ETH_RSS_NONFRAG_IPV4_OTHER | \
445 ETH_RSS_FRAG_IPV6 | \
446 ETH_RSS_NONFRAG_IPV6_TCP | \
447 ETH_RSS_NONFRAG_IPV6_UDP | \
448 ETH_RSS_NONFRAG_IPV6_SCTP | \
449 ETH_RSS_NONFRAG_IPV6_OTHER | \
450 ETH_RSS_L2_PAYLOAD | \
452 ETH_RSS_IPV6_TCP_EX | \
453 ETH_RSS_IPV6_UDP_EX | \
464 #define ETH_RSS_RETA_SIZE_64 64
465 #define ETH_RSS_RETA_SIZE_128 128
466 #define ETH_RSS_RETA_SIZE_256 256
467 #define ETH_RSS_RETA_SIZE_512 512
468 #define RTE_RETA_GROUP_SIZE 64
471 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
472 #define ETH_DCB_NUM_USER_PRIORITIES 8
473 #define ETH_VMDQ_DCB_NUM_QUEUES 128
474 #define ETH_DCB_NUM_QUEUES 128
477 #define ETH_DCB_PG_SUPPORT 0x00000001
478 #define ETH_DCB_PFC_SUPPORT 0x00000002
481 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
482 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
483 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
486 #define ETH_VLAN_STRIP_MASK 0x0001
487 #define ETH_VLAN_FILTER_MASK 0x0002
488 #define ETH_VLAN_EXTEND_MASK 0x0004
489 #define ETH_VLAN_ID_MAX 0x0FFF
492 #define ETH_NUM_RECEIVE_MAC_ADDR 128
495 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
498 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
499 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
500 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
501 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
502 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
505 #define ETH_MIRROR_MAX_VLANS 64
507 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
508 #define ETH_MIRROR_UPLINK_PORT 0x02
509 #define ETH_MIRROR_DOWNLINK_PORT 0x04
510 #define ETH_MIRROR_VLAN 0x08
511 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
516 struct rte_eth_vlan_mirror {
542 uint16_t
reta[RTE_RETA_GROUP_SIZE];
567 struct rte_eth_dcb_rx_conf {
573 struct rte_eth_vmdq_dcb_tx_conf {
579 struct rte_eth_dcb_tx_conf {
585 struct rte_eth_vmdq_tx_conf {
875 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
876 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
877 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
878 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
879 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
880 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
881 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
882 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
883 #define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100
884 #define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200
885 #define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
886 #define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
887 #define DEV_RX_OFFLOAD_CRC_STRIP 0x00001000
888 #define DEV_RX_OFFLOAD_SCATTER 0x00002000
889 #define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000
890 #define DEV_RX_OFFLOAD_SECURITY 0x00008000
896 #define DEV_RX_OFFLOAD_KEEP_CRC 0x00010000
897 #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
898 DEV_RX_OFFLOAD_UDP_CKSUM | \
899 DEV_RX_OFFLOAD_TCP_CKSUM)
900 #define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
901 DEV_RX_OFFLOAD_VLAN_FILTER | \
902 DEV_RX_OFFLOAD_VLAN_EXTEND)
912 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
913 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
914 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
915 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
916 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
917 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
918 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
919 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
920 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
921 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200
922 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400
923 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800
924 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000
925 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
926 #define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000
930 #define DEV_TX_OFFLOAD_MULTI_SEGS 0x00008000
932 #define DEV_TX_OFFLOAD_MBUF_FAST_FREE 0x00010000
937 #define DEV_TX_OFFLOAD_SECURITY 0x00020000
943 #define DEV_TX_OFFLOAD_UDP_TNL_TSO 0x00040000
949 #define DEV_TX_OFFLOAD_IP_TNL_TSO 0x00080000
951 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001
953 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002
966 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
967 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
968 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
969 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
986 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (0)
1024 uint32_t max_hash_mac_addrs;
1086 #define RTE_ETH_XSTATS_NAME_SIZE 64
1113 #define ETH_DCB_NUM_TCS 8
1114 #define ETH_MAX_VMDQ_POOL 64
1125 }
tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1130 }
tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1148 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1149 #define RTE_ETH_QUEUE_STATE_STARTED 1
1151 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
1154 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1155 if (!rte_eth_dev_is_valid_port(port_id)) { \
1156 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1161 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1162 if (!rte_eth_dev_is_valid_port(port_id)) { \
1163 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1173 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1175 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1177 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1179 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1204 struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1228 struct rte_mbuf *pkts[], uint16_t nb_pkts,
void *user_param);
1244 struct rte_eth_dev_sriov {
1246 uint8_t nb_q_per_pool;
1247 uint16_t def_vmdq_idx;
1248 uint16_t def_pool_q_idx;
1250 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1252 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
1254 #define RTE_ETH_DEV_NO_OWNER 0
1256 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
1258 struct rte_eth_dev_owner {
1260 char name[RTE_ETH_MAX_OWNER_NAME_LEN];
1264 #define RTE_ETH_DEV_INTR_LSC 0x0002
1266 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1268 #define RTE_ETH_DEV_INTR_RMV 0x0008
1270 #define RTE_ETH_DEV_REPRESENTOR 0x0010
1272 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR 0x0020
1286 const uint64_t owner_id);
1291 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
1292 for (p = rte_eth_find_next_owned_by(0, o); \
1293 (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
1294 p = rte_eth_find_next_owned_by(p + 1, o))
1309 #define RTE_ETH_FOREACH_DEV(p) \
1310 RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
1342 const struct rte_eth_dev_owner *owner);
1358 const uint64_t owner_id);
1385 struct rte_eth_dev_owner *owner);
1534 uint16_t nb_tx_queue,
const struct rte_eth_conf *eth_conf);
1547 int __rte_experimental
1600 uint16_t nb_rx_desc,
unsigned int socket_id,
1653 uint16_t nb_tx_desc,
unsigned int socket_id,
2082 uint64_t *values,
unsigned int size);
2131 uint16_t tx_queue_id, uint8_t stat_idx);
2151 uint16_t rx_queue_id,
2196 char *fw_version,
size_t fw_size);
2237 uint32_t *ptypes,
int num);
2386 typedef void (*buffer_tx_error_fn)(
struct rte_mbuf **unsent, uint16_t count,
2394 buffer_tx_error_fn error_callback;
2395 void *error_userdata;
2408 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
2409 (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
2450 buffer_tx_error_fn callback,
void *
userdata);
2725 int epfd,
int op,
void *data);
2879 uint16_t reta_size);
2899 uint16_t reta_size);
3141 struct rte_eth_rxtx_callback;
3167 const struct rte_eth_rxtx_callback *
3196 const struct rte_eth_rxtx_callback *
3224 const struct rte_eth_rxtx_callback *
3259 const struct rte_eth_rxtx_callback *user_cb);
3292 const struct rte_eth_rxtx_callback *user_cb);
3418 int __rte_experimental
3440 int __rte_experimental
3442 struct rte_dev_eeprom_info *info);
3464 uint32_t nb_mc_addr);
3513 struct timespec *timestamp, uint32_t flags);
3531 struct timespec *timestamp);
3689 uint16_t *nb_rx_desc,
3690 uint16_t *nb_tx_desc);
3806 static inline uint16_t
3808 struct rte_mbuf **rx_pkts,
const uint16_t nb_pkts)
3810 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3813 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3814 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
3815 RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
3817 if (queue_id >= dev->data->nb_rx_queues) {
3818 RTE_ETHDEV_LOG(ERR,
"Invalid RX queue_id=%u\n", queue_id);
3822 nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
3825 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
3826 if (
unlikely(dev->post_rx_burst_cbs[queue_id] != NULL)) {
3827 struct rte_eth_rxtx_callback *cb =
3828 dev->post_rx_burst_cbs[queue_id];
3831 nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
3832 nb_pkts, cb->param);
3834 }
while (cb != NULL);
3856 struct rte_eth_dev *dev;
3858 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3859 dev = &rte_eth_devices[port_id];
3860 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
3861 if (queue_id >= dev->data->nb_rx_queues)
3864 return (
int)(*dev->dev_ops->rx_queue_count)(dev, queue_id);
3885 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3886 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3887 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
3888 return (*dev->dev_ops->rx_descriptor_done)( \
3889 dev->data->rx_queues[queue_id], offset);
3892 #define RTE_ETH_RX_DESC_AVAIL 0
3893 #define RTE_ETH_RX_DESC_DONE 1
3894 #define RTE_ETH_RX_DESC_UNAVAIL 2
3933 struct rte_eth_dev *dev;
3936 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3937 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3939 dev = &rte_eth_devices[port_id];
3940 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3941 if (queue_id >= dev->data->nb_rx_queues)
3944 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
3945 rxq = dev->data->rx_queues[queue_id];
3947 return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
3950 #define RTE_ETH_TX_DESC_FULL 0
3951 #define RTE_ETH_TX_DESC_DONE 1
3952 #define RTE_ETH_TX_DESC_UNAVAIL 2
3987 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
3988 uint16_t queue_id, uint16_t offset)
3990 struct rte_eth_dev *dev;
3993 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3994 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3996 dev = &rte_eth_devices[port_id];
3997 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3998 if (queue_id >= dev->data->nb_tx_queues)
4001 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
4002 txq = dev->data->tx_queues[queue_id];
4004 return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
4073 static inline uint16_t
4075 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4077 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4079 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4080 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4081 RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
4083 if (queue_id >= dev->data->nb_tx_queues) {
4084 RTE_ETHDEV_LOG(ERR,
"Invalid TX queue_id=%u\n", queue_id);
4089 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
4090 struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
4094 nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
4097 }
while (cb != NULL);
4101 return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
4160 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
4162 static inline uint16_t
4164 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4166 struct rte_eth_dev *dev;
4168 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4170 RTE_ETHDEV_LOG(ERR,
"Invalid TX port_id=%u\n", port_id);
4176 dev = &rte_eth_devices[port_id];
4178 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4179 if (queue_id >= dev->data->nb_tx_queues) {
4180 RTE_ETHDEV_LOG(ERR,
"Invalid TX queue_id=%u\n", queue_id);
4186 if (!dev->tx_pkt_prepare)
4189 return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
4204 static inline uint16_t
4236 static inline uint16_t
4241 uint16_t to_send = buffer->
length;
4252 buffer->error_callback(&buffer->
pkts[sent],
4253 (uint16_t)(to_send - sent),
4254 buffer->error_userdata);