4 #ifndef _RTE_IOAT_RAWDEV_FNS_H_
5 #define _RTE_IOAT_RAWDEV_FNS_H_
16 struct rte_ioat_generic_hw_desc {
21 uint32_t int_enable: 1;
22 uint32_t src_snoop_disable: 1;
23 uint32_t dest_snoop_disable: 1;
24 uint32_t completion_update: 1;
26 uint32_t reserved2: 1;
27 uint32_t src_page_break: 1;
28 uint32_t dest_page_break: 1;
32 uint32_t reserved: 13;
39 uint64_t op_specific[4];
47 enum rte_ioat_dev_type {
56 struct rte_ioat_xstats {
57 uint64_t enqueue_failed;
67 struct rte_ioat_rawdev {
69 enum rte_ioat_dev_type type;
70 struct rte_ioat_xstats xstats;
72 struct rte_rawdev *rawdev;
80 unsigned short ring_size;
82 struct rte_ioat_generic_hw_desc *desc_ring;
86 unsigned short next_read;
87 unsigned short next_write;
93 volatile struct rte_ioat_registers *regs;
96 #define RTE_IOAT_CHANSTS_IDLE 0x1
97 #define RTE_IOAT_CHANSTS_SUSPENDED 0x2
98 #define RTE_IOAT_CHANSTS_HALTED 0x3
99 #define RTE_IOAT_CHANSTS_ARMED 0x4
104 #define IDXD_CMD_OP_SHIFT 24
113 #define IDXD_FLAG_FENCE (1 << 0)
114 #define IDXD_FLAG_COMPLETION_ADDR_VALID (1 << 2)
115 #define IDXD_FLAG_REQUEST_COMPLETION (1 << 3)
116 #define IDXD_FLAG_CACHE_CONTROL (1 << 8)
118 #define IOAT_COMP_UPDATE_SHIFT 3
119 #define IOAT_CMD_OP_SHIFT 24
153 uint32_t completed_size;
156 uint32_t invalid_flags;
159 #define BATCH_SIZE 64
192 struct rte_idxd_rawdev {
193 enum rte_ioat_dev_type type;
194 struct rte_ioat_xstats xstats;
199 uint16_t batch_ring_sz;
200 uint16_t hdl_ring_sz;
203 uint16_t next_completed;
204 uint16_t next_ret_hdl;
205 uint16_t last_completed_hdl;
206 uint16_t next_free_hdl;
207 uint16_t hdls_disable;
214 __ioat_write_desc(
int dev_id, uint32_t op, uint64_t src,
phys_addr_t dst,
215 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
217 struct rte_ioat_rawdev *ioat =
218 (
struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
219 unsigned short read = ioat->next_read;
220 unsigned short write = ioat->next_write;
221 unsigned short mask = ioat->ring_size - 1;
222 unsigned short space = mask + read - write;
223 struct rte_ioat_generic_hw_desc *desc;
226 ioat->xstats.enqueue_failed++;
230 ioat->next_write = write + 1;
233 desc = &ioat->desc_ring[write];
236 desc->u.control_raw = (uint32_t)((op << IOAT_CMD_OP_SHIFT) |
237 (!(write & 0xF) << IOAT_COMP_UPDATE_SHIFT));
238 desc->src_addr = src;
239 desc->dest_addr = dst;
241 if (!ioat->hdls_disable)
242 ioat->hdls[write] = _mm_set_epi64x((int64_t)dst_hdl,
246 ioat->xstats.enqueued++;
251 __ioat_enqueue_fill(
int dev_id, uint64_t pattern,
phys_addr_t dst,
252 unsigned int length, uintptr_t dst_hdl)
254 static const uintptr_t null_hdl;
256 return __ioat_write_desc(dev_id, ioat_op_fill, pattern, dst, length,
265 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
267 return __ioat_write_desc(dev_id, ioat_op_copy, src, dst, length,
273 __ioat_fence(
int dev_id)
275 struct rte_ioat_rawdev *ioat =
276 (
struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
277 unsigned short write = ioat->next_write;
278 unsigned short mask = ioat->ring_size - 1;
279 struct rte_ioat_generic_hw_desc *desc;
281 write = (write - 1) & mask;
282 desc = &ioat->desc_ring[write];
284 desc->u.control.fence = 1;
292 __ioat_perform_ops(
int dev_id)
294 struct rte_ioat_rawdev *ioat =
295 (
struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
296 ioat->desc_ring[(ioat->next_write - 1) & (ioat->ring_size - 1)].u
297 .control.completion_update = 1;
299 *ioat->doorbell = ioat->next_write;
300 ioat->xstats.started = ioat->xstats.enqueued;
308 __ioat_get_last_completed(
struct rte_ioat_rawdev *ioat,
int *error)
310 uint64_t status = ioat->status;
315 *error = status & (RTE_IOAT_CHANSTS_SUSPENDED | RTE_IOAT_CHANSTS_ARMED);
316 return (status - ioat->ring_addr) >> 6;
323 __ioat_completed_ops(
int dev_id, uint8_t max_copies,
324 uintptr_t *src_hdls, uintptr_t *dst_hdls)
326 struct rte_ioat_rawdev *ioat =
327 (
struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
328 unsigned short mask = (ioat->ring_size - 1);
329 unsigned short read = ioat->next_read;
330 unsigned short end_read, count;
334 end_read = (__ioat_get_last_completed(ioat, &error) + 1) & mask;
335 count = (end_read - (read & mask)) & mask;
342 if (ioat->hdls_disable) {
347 if (count > max_copies)
350 for (; i < count - 1; i += 2, read += 2) {
351 __m128i hdls0 = _mm_load_si128(&ioat->hdls[read & mask]);
352 __m128i hdls1 = _mm_load_si128(&ioat->hdls[(read + 1) & mask]);
354 _mm_storeu_si128((__m128i *)&src_hdls[i],
355 _mm_unpacklo_epi64(hdls0, hdls1));
356 _mm_storeu_si128((__m128i *)&dst_hdls[i],
357 _mm_unpackhi_epi64(hdls0, hdls1));
359 for (; i < count; i++, read++) {
360 uintptr_t *hdls = (uintptr_t *)&ioat->hdls[read & mask];
361 src_hdls[i] = hdls[0];
362 dst_hdls[i] = hdls[1];
366 ioat->next_read = read;
367 ioat->xstats.completed += count;
375 struct rte_idxd_rawdev *idxd =
376 (
struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
380 if (((idxd->next_free_hdl + 1) & (idxd->hdl_ring_sz - 1)) == idxd->next_ret_hdl)
384 if (b->op_count >= BATCH_SIZE)
392 b->ops[b->op_count++] = *desc;
395 if (!idxd->hdls_disable)
396 idxd->hdl_ring[idxd->next_free_hdl] = *hdl;
397 if (++idxd->next_free_hdl == idxd->hdl_ring_sz)
398 idxd->next_free_hdl = 0;
400 idxd->xstats.enqueued++;
404 idxd->xstats.enqueue_failed++;
410 __idxd_enqueue_fill(
int dev_id, uint64_t pattern,
rte_iova_t dst,
411 unsigned int length, uintptr_t dst_hdl)
414 .op_flags = (idxd_op_fill << IDXD_CMD_OP_SHIFT) |
415 IDXD_FLAG_CACHE_CONTROL,
423 return __idxd_write_desc(dev_id, &desc, &hdl);
428 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
431 .op_flags = (idxd_op_memmove << IDXD_CMD_OP_SHIFT) |
432 IDXD_FLAG_CACHE_CONTROL,
441 return __idxd_write_desc(dev_id, &desc, &hdl);
445 __idxd_fence(
int dev_id)
448 .op_flags = IDXD_FLAG_FENCE
451 return __idxd_write_desc(dev_id, &fence, &null_hdl);
455 __idxd_movdir64b(
volatile void *dst,
const void *src)
457 asm volatile (
".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
459 :
"a" (dst),
"d" (src));
463 __idxd_perform_ops(
int dev_id)
465 struct rte_idxd_rawdev *idxd =
466 (
struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
469 if (b->submitted || b->op_count == 0)
471 b->hdl_end = idxd->next_free_hdl;
474 b->batch_desc.size = b->op_count + 1;
475 __idxd_movdir64b(idxd->portal, &b->batch_desc);
477 if (++idxd->next_batch == idxd->batch_ring_sz)
478 idxd->next_batch = 0;
479 idxd->xstats.started = idxd->xstats.enqueued;
483 __idxd_completed_ops(
int dev_id, uint8_t max_ops,
484 uintptr_t *src_hdls, uintptr_t *dst_hdls)
486 struct rte_idxd_rawdev *idxd =
487 (
struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
489 uint16_t h_idx = idxd->next_ret_hdl;
492 while (b->submitted && b->comp.status != 0) {
493 idxd->last_completed_hdl = b->hdl_end;
496 if (++idxd->next_completed == idxd->batch_ring_sz)
497 idxd->next_completed = 0;
498 b = &idxd->batch_ring[idxd->next_completed];
501 if (!idxd->hdls_disable)
502 for (n = 0; n < max_ops && h_idx != idxd->last_completed_hdl; n++) {
503 src_hdls[n] = idxd->hdl_ring[h_idx].src;
504 dst_hdls[n] = idxd->hdl_ring[h_idx].dst;
505 if (++h_idx == idxd->hdl_ring_sz)
509 while (h_idx != idxd->last_completed_hdl) {
511 if (++h_idx == idxd->hdl_ring_sz)
515 idxd->next_ret_hdl = h_idx;
517 idxd->xstats.completed += n;
523 unsigned int len, uintptr_t dst_hdl)
525 enum rte_ioat_dev_type *type =
526 (
enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
527 if (*type == RTE_IDXD_DEV)
528 return __idxd_enqueue_fill(dev_id, pattern, dst, len, dst_hdl);
530 return __ioat_enqueue_fill(dev_id, pattern, dst, len, dst_hdl);
535 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
537 enum rte_ioat_dev_type *type =
538 (
enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
539 if (*type == RTE_IDXD_DEV)
540 return __idxd_enqueue_copy(dev_id, src, dst, length,
543 return __ioat_enqueue_copy(dev_id, src, dst, length,
550 enum rte_ioat_dev_type *type =
551 (
enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
552 if (*type == RTE_IDXD_DEV)
553 return __idxd_fence(dev_id);
555 return __ioat_fence(dev_id);
561 enum rte_ioat_dev_type *type =
562 (
enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
563 if (*type == RTE_IDXD_DEV)
564 return __idxd_perform_ops(dev_id);
566 return __ioat_perform_ops(dev_id);
571 uintptr_t *src_hdls, uintptr_t *dst_hdls)
573 enum rte_ioat_dev_type *type =
574 (
enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
575 if (*type == RTE_IDXD_DEV)
576 return __idxd_completed_ops(dev_id, max_copies,
579 return __ioat_completed_ops(dev_id, max_copies,
584 __rte_deprecated_msg(
"use rte_ioat_perform_ops() instead")
588 __rte_deprecated_msg(
"use rte_ioat_completed_ops() instead")
589 rte_ioat_completed_copies(
int dev_id, uint8_t max_copies,
590 uintptr_t *src_hdls, uintptr_t *dst_hdls)
static int __rte_experimental rte_ioat_enqueue_fill(int dev_id, uint64_t pattern, phys_addr_t dst, unsigned int length, uintptr_t dst_hdl)
#define __rte_always_inline
static int __rte_experimental rte_ioat_fence(int dev_id)
static void __rte_experimental rte_ioat_perform_ops(int dev_id)
#define rte_compiler_barrier()
static int __rte_experimental rte_ioat_completed_ops(int dev_id, uint8_t max_copies, uintptr_t *src_hdls, uintptr_t *dst_hdls)
#define __rte_cache_aligned
__extension__ struct rte_eth_link __rte_aligned(8)
static void rte_prefetch0(const volatile void *p)
static int __rte_experimental rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst, unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)