4 #ifndef _RTE_IOAT_RAWDEV_FNS_H_ 5 #define _RTE_IOAT_RAWDEV_FNS_H_ 18 #include <x86intrin.h> 28 enum rte_ioat_dev_type {
37 struct rte_ioat_xstats {
38 uint64_t enqueue_failed;
50 struct rte_ioat_generic_hw_desc {
55 uint32_t int_enable: 1;
56 uint32_t src_snoop_disable: 1;
57 uint32_t dest_snoop_disable: 1;
58 uint32_t completion_update: 1;
60 uint32_t reserved2: 1;
61 uint32_t src_page_break: 1;
62 uint32_t dest_page_break: 1;
66 uint32_t reserved: 13;
73 uint64_t op_specific[4];
80 struct rte_ioat_rawdev {
82 enum rte_ioat_dev_type type;
83 struct rte_ioat_xstats xstats;
85 struct rte_rawdev *rawdev;
93 unsigned short ring_size;
95 struct rte_ioat_generic_hw_desc *desc_ring;
99 unsigned short next_read;
100 unsigned short next_write;
106 volatile struct rte_ioat_registers *regs;
109 #define RTE_IOAT_CHANSTS_IDLE 0x1 110 #define RTE_IOAT_CHANSTS_SUSPENDED 0x2 111 #define RTE_IOAT_CHANSTS_HALTED 0x3 112 #define RTE_IOAT_CHANSTS_ARMED 0x4 115 __ioat_burst_capacity(
int dev_id)
117 struct rte_ioat_rawdev *ioat =
118 (
struct rte_ioat_rawdev *)rte_rawdevs[dev_id].
dev_private;
119 unsigned short size = ioat->ring_size - 1;
120 unsigned short read = ioat->next_read;
121 unsigned short write = ioat->next_write;
122 unsigned short space = size - (write - read);
128 __ioat_write_desc(
int dev_id, uint32_t op, uint64_t src,
phys_addr_t dst,
129 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
131 struct rte_ioat_rawdev *ioat =
132 (
struct rte_ioat_rawdev *)rte_rawdevs[dev_id].
dev_private;
133 unsigned short read = ioat->next_read;
134 unsigned short write = ioat->next_write;
135 unsigned short mask = ioat->ring_size - 1;
136 unsigned short space = mask + read - write;
137 struct rte_ioat_generic_hw_desc *desc;
140 ioat->xstats.enqueue_failed++;
144 ioat->next_write = write + 1;
147 desc = &ioat->desc_ring[write];
150 desc->u.control_raw = (uint32_t)((op << IOAT_CMD_OP_SHIFT) |
151 (!(write & 0xF) << IOAT_COMP_UPDATE_SHIFT));
152 desc->src_addr = src;
153 desc->dest_addr = dst;
155 if (!ioat->hdls_disable)
156 ioat->hdls[write] = _mm_set_epi64x((int64_t)dst_hdl,
160 ioat->xstats.enqueued++;
165 __ioat_enqueue_fill(
int dev_id, uint64_t pattern,
phys_addr_t dst,
166 unsigned int length, uintptr_t dst_hdl)
168 static const uintptr_t null_hdl;
170 return __ioat_write_desc(dev_id, ioat_op_fill, pattern, dst, length,
179 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
181 return __ioat_write_desc(dev_id, ioat_op_copy, src, dst, length,
187 __ioat_fence(
int dev_id)
189 struct rte_ioat_rawdev *ioat =
190 (
struct rte_ioat_rawdev *)rte_rawdevs[dev_id].
dev_private;
191 unsigned short write = ioat->next_write;
192 unsigned short mask = ioat->ring_size - 1;
193 struct rte_ioat_generic_hw_desc *desc;
195 write = (write - 1) & mask;
196 desc = &ioat->desc_ring[write];
198 desc->u.control.fence = 1;
206 __ioat_perform_ops(
int dev_id)
208 struct rte_ioat_rawdev *ioat =
209 (
struct rte_ioat_rawdev *)rte_rawdevs[dev_id].
dev_private;
210 ioat->desc_ring[(ioat->next_write - 1) & (ioat->ring_size - 1)].u
211 .control.completion_update = 1;
213 *ioat->doorbell = ioat->next_write;
214 ioat->xstats.started = ioat->xstats.enqueued;
224 __ioat_get_last_completed(
struct rte_ioat_rawdev *ioat,
int *error)
226 uint64_t status = ioat->status;
231 *error = status & (RTE_IOAT_CHANSTS_SUSPENDED | RTE_IOAT_CHANSTS_ARMED);
232 return (status - ioat->ring_addr) >> 6;
239 __ioat_completed_ops(
int dev_id, uint8_t max_copies,
240 uintptr_t *src_hdls, uintptr_t *dst_hdls)
242 struct rte_ioat_rawdev *ioat =
243 (
struct rte_ioat_rawdev *)rte_rawdevs[dev_id].
dev_private;
244 unsigned short mask = (ioat->ring_size - 1);
245 unsigned short read = ioat->next_read;
246 unsigned short end_read, count;
250 end_read = (__ioat_get_last_completed(ioat, &error) + 1) & mask;
251 count = (end_read - (read & mask)) & mask;
258 if (ioat->hdls_disable) {
263 if (count > max_copies)
266 for (; i < count - 1; i += 2, read += 2) {
267 __m128i hdls0 = _mm_load_si128(&ioat->hdls[read & mask]);
268 __m128i hdls1 = _mm_load_si128(&ioat->hdls[(read + 1) & mask]);
270 _mm_storeu_si128((__m128i *)&src_hdls[i],
271 _mm_unpacklo_epi64(hdls0, hdls1));
272 _mm_storeu_si128((__m128i *)&dst_hdls[i],
273 _mm_unpackhi_epi64(hdls0, hdls1));
275 for (; i < count; i++, read++) {
276 uintptr_t *hdls = (uintptr_t *)&ioat->hdls[read & mask];
277 src_hdls[i] = hdls[0];
278 dst_hdls[i] = hdls[1];
282 ioat->next_read = read;
283 ioat->xstats.completed += count;
287 static inline uint16_t
288 rte_ioat_burst_capacity(
int dev_id)
290 enum rte_ioat_dev_type *type =
291 (
enum rte_ioat_dev_type *)rte_rawdevs[dev_id].
dev_private;
292 if (*type == RTE_IDXD_DEV)
293 return __idxd_burst_capacity(dev_id);
295 return __ioat_burst_capacity(dev_id);
300 unsigned int len, uintptr_t dst_hdl)
302 enum rte_ioat_dev_type *type =
303 (
enum rte_ioat_dev_type *)rte_rawdevs[dev_id].
dev_private;
304 if (*type == RTE_IDXD_DEV)
305 return __idxd_enqueue_fill(dev_id, pattern, dst, len, dst_hdl);
307 return __ioat_enqueue_fill(dev_id, pattern, dst, len, dst_hdl);
312 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
314 enum rte_ioat_dev_type *type =
315 (
enum rte_ioat_dev_type *)rte_rawdevs[dev_id].
dev_private;
316 if (*type == RTE_IDXD_DEV)
317 return __idxd_enqueue_copy(dev_id, src, dst, length,
320 return __ioat_enqueue_copy(dev_id, src, dst, length,
327 enum rte_ioat_dev_type *type =
328 (
enum rte_ioat_dev_type *)rte_rawdevs[dev_id].
dev_private;
329 if (*type == RTE_IDXD_DEV)
330 return __idxd_fence(dev_id);
332 return __ioat_fence(dev_id);
338 enum rte_ioat_dev_type *type =
339 (
enum rte_ioat_dev_type *)rte_rawdevs[dev_id].
dev_private;
340 if (*type == RTE_IDXD_DEV)
341 return __idxd_perform_ops(dev_id);
343 return __ioat_perform_ops(dev_id);
348 uint32_t *status, uint8_t *num_unsuccessful,
349 uintptr_t *src_hdls, uintptr_t *dst_hdls)
351 enum rte_ioat_dev_type *type =
352 (
enum rte_ioat_dev_type *)rte_rawdevs[dev_id].
dev_private;
355 if (num_unsuccessful == NULL)
356 num_unsuccessful = &tmp;
358 *num_unsuccessful = 0;
359 if (*type == RTE_IDXD_DEV)
360 return __idxd_completed_ops(dev_id, max_copies, status, num_unsuccessful,
363 return __ioat_completed_ops(dev_id, max_copies, src_hdls, dst_hdls);
367 __rte_deprecated_msg(
"use rte_ioat_perform_ops() instead")
371 __rte_deprecated_msg(
"use rte_ioat_completed_ops() instead")
372 rte_ioat_completed_copies(
int dev_id, uint8_t max_copies,
373 uintptr_t *src_hdls, uintptr_t *dst_hdls)
struct rte_ether_addr src_addr
static int __rte_experimental rte_ioat_enqueue_fill(int dev_id, uint64_t pattern, phys_addr_t dst, unsigned int length, uintptr_t dst_hdl)
#define __rte_always_inline
static int __rte_experimental rte_ioat_fence(int dev_id)
static int __rte_experimental rte_ioat_perform_ops(int dev_id)
#define rte_compiler_barrier()
#define __rte_cache_aligned
static int __rte_experimental rte_ioat_completed_ops(int dev_id, uint8_t max_copies, uint32_t *status, uint8_t *num_unsuccessful, uintptr_t *src_hdls, uintptr_t *dst_hdls)
static void rte_prefetch0(const volatile void *p)
static int __rte_experimental rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst, unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)