4 #ifndef _RTE_IDXD_RAWDEV_FNS_H_ 5 #define _RTE_IDXD_RAWDEV_FNS_H_ 24 #define IDXD_CMD_OP_SHIFT 24 33 #define IDXD_FLAG_FENCE (1 << 0) 34 #define IDXD_FLAG_COMPLETION_ADDR_VALID (1 << 2) 35 #define IDXD_FLAG_REQUEST_COMPLETION (1 << 3) 36 #define IDXD_FLAG_CACHE_CONTROL (1 << 8) 38 #define IOAT_COMP_UPDATE_SHIFT 3 39 #define IOAT_CMD_OP_SHIFT 24 66 uint16_t __reserved[13];
76 uint32_t completed_size;
79 uint32_t invalid_flags;
95 struct rte_idxd_rawdev {
96 enum rte_ioat_dev_type type;
97 struct rte_ioat_xstats xstats;
105 unsigned short max_batches;
106 unsigned short batch_idx_read;
107 unsigned short batch_idx_write;
108 unsigned short *batch_idx_ring;
111 unsigned short desc_ring_mask;
112 unsigned short hdls_avail;
113 unsigned short hdls_read;
114 unsigned short batch_start;
115 unsigned short batch_size;
122 uint16_t *hdl_ring_flags;
125 #define RTE_IDXD_HDL_NORMAL 0 126 #define RTE_IDXD_HDL_INVALID (1 << 0) 127 #define RTE_IDXD_HDL_OP_FAILED (1 << 1) 128 #define RTE_IDXD_HDL_OP_SKIPPED (1 << 2) 131 __idxd_burst_capacity(
int dev_id)
133 struct rte_idxd_rawdev *idxd =
134 (
struct rte_idxd_rawdev *)rte_rawdevs[dev_id].
dev_private;
135 uint16_t write_idx = idxd->batch_start + idxd->batch_size;
136 uint16_t used_space, free_space;
139 if ((idxd->batch_idx_read == 0 && idxd->batch_idx_write == idxd->max_batches) ||
140 idxd->batch_idx_write + 1 == idxd->batch_idx_read)
144 if (idxd->hdls_read > write_idx)
145 write_idx += idxd->desc_ring_mask + 1;
146 used_space = write_idx - idxd->hdls_read;
151 free_space = idxd->desc_ring_mask - used_space;
154 return free_space - 2;
158 __desc_idx_to_iova(
struct rte_idxd_rawdev *idxd, uint16_t n)
164 __idxd_write_desc(
int dev_id,
165 const uint32_t op_flags,
171 struct rte_idxd_rawdev *idxd =
172 (
struct rte_idxd_rawdev *)rte_rawdevs[dev_id].
dev_private;
173 uint16_t write_idx = idxd->batch_start + idxd->batch_size;
174 uint16_t mask = idxd->desc_ring_mask;
177 if ((idxd->batch_idx_read == 0 && idxd->batch_idx_write == idxd->max_batches) ||
178 idxd->batch_idx_write + 1 == idxd->batch_idx_read)
181 if (((write_idx + 2) & mask) == idxd->hdls_read ||
182 ((write_idx + 1) & mask) == idxd->hdls_read)
186 idxd->desc_ring[write_idx].pasid = 0;
187 idxd->desc_ring[write_idx].op_flags = op_flags | IDXD_FLAG_COMPLETION_ADDR_VALID;
188 idxd->desc_ring[write_idx].completion = __desc_idx_to_iova(idxd, write_idx & mask);
189 idxd->desc_ring[write_idx].src = src;
190 idxd->desc_ring[write_idx].dst = dst;
191 idxd->desc_ring[write_idx].size = size;
194 idxd->hdl_ring_flags[write_idx & mask] = RTE_IDXD_HDL_INVALID;
196 idxd->hdl_ring[write_idx & mask] = *hdl;
199 idxd->xstats.enqueued++;
205 idxd->xstats.enqueue_failed++;
211 __idxd_enqueue_fill(
int dev_id, uint64_t pattern,
rte_iova_t dst,
212 unsigned int length, uintptr_t dst_hdl)
217 return __idxd_write_desc(dev_id,
218 (idxd_op_fill << IDXD_CMD_OP_SHIFT) | IDXD_FLAG_CACHE_CONTROL,
219 pattern, dst, length, &hdl);
224 unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
230 return __idxd_write_desc(dev_id,
231 (idxd_op_memmove << IDXD_CMD_OP_SHIFT) | IDXD_FLAG_CACHE_CONTROL,
232 src, dst, length, &hdl);
236 __idxd_enqueue_nop(
int dev_id)
239 return __idxd_write_desc(dev_id, idxd_op_nop << IDXD_CMD_OP_SHIFT,
244 __idxd_fence(
int dev_id)
247 return __idxd_write_desc(dev_id, IDXD_FLAG_FENCE, 0, 0, 0, NULL);
253 asm volatile (
".byte 0x66, 0x0f, 0x38, 0xf8, 0x02" 255 :
"a" (dst),
"d" (src)
260 __idxd_perform_ops(
int dev_id)
262 struct rte_idxd_rawdev *idxd =
263 (
struct rte_idxd_rawdev *)rte_rawdevs[dev_id].
dev_private;
265 if (!idxd->cfg.no_prefetch_completions)
266 rte_prefetch1(&idxd->desc_ring[idxd->batch_idx_ring[idxd->batch_idx_read]]);
268 if (idxd->batch_size == 0)
271 if (idxd->batch_size == 1)
273 if (__idxd_enqueue_nop(dev_id) != 1)
277 uint16_t comp_idx = (idxd->batch_start + idxd->batch_size) & idxd->desc_ring_mask;
278 *((uint64_t *)&idxd->desc_ring[comp_idx]) = 0;
279 idxd->hdl_ring_flags[comp_idx] = RTE_IDXD_HDL_INVALID;
282 .op_flags = (idxd_op_batch << IDXD_CMD_OP_SHIFT) |
283 IDXD_FLAG_COMPLETION_ADDR_VALID |
284 IDXD_FLAG_REQUEST_COMPLETION,
285 .desc_addr = __desc_idx_to_iova(idxd, idxd->batch_start),
286 .completion = __desc_idx_to_iova(idxd, comp_idx),
287 .size = idxd->batch_size,
291 __idxd_movdir64b(idxd->portal, &batch_desc);
292 idxd->xstats.started += idxd->batch_size;
294 idxd->batch_start += idxd->batch_size + 1;
295 idxd->batch_start &= idxd->desc_ring_mask;
296 idxd->batch_size = 0;
298 idxd->batch_idx_ring[idxd->batch_idx_write++] = comp_idx;
299 if (idxd->batch_idx_write > idxd->max_batches)
300 idxd->batch_idx_write = 0;
306 __idxd_completed_ops(
int dev_id, uint8_t max_ops, uint32_t *status, uint8_t *num_unsuccessful,
307 uintptr_t *src_hdls, uintptr_t *dst_hdls)
309 struct rte_idxd_rawdev *idxd =
310 (
struct rte_idxd_rawdev *)rte_rawdevs[dev_id].
dev_private;
311 unsigned short n, h_idx;
313 while (idxd->batch_idx_read != idxd->batch_idx_write) {
314 uint16_t idx_to_chk = idxd->batch_idx_ring[idxd->batch_idx_read];
317 uint8_t batch_status = comp_to_chk->status;
318 if (batch_status == 0)
320 comp_to_chk->status = 0;
323 uint16_t desc_count = comp_to_chk->completed_size;
324 uint16_t batch_start = idxd->hdls_avail;
325 uint16_t batch_end = idx_to_chk;
327 if (batch_start > batch_end)
328 batch_end += idxd->desc_ring_mask + 1;
330 for (n = 0; n < desc_count; n++) {
331 uint16_t idx = (batch_start + n) & idxd->desc_ring_mask;
334 if (comp->status != 0 &&
335 idxd->hdl_ring_flags[idx] == RTE_IDXD_HDL_NORMAL) {
336 idxd->hdl_ring_flags[idx] = RTE_IDXD_HDL_OP_FAILED;
337 idxd->hdl_ring_flags[idx] |= (comp->status << 8);
342 for ( ; n < batch_end - batch_start; n++) {
343 uint16_t idx = (batch_start + n) & idxd->desc_ring_mask;
344 if (idxd->hdl_ring_flags[idx] == RTE_IDXD_HDL_NORMAL)
345 idxd->hdl_ring_flags[idx] = RTE_IDXD_HDL_OP_SKIPPED;
349 idxd->hdls_avail = (idx_to_chk + 1) & idxd->desc_ring_mask;
350 idxd->batch_idx_read++;
351 if (idxd->batch_idx_read > idxd->max_batches)
352 idxd->batch_idx_read = 0;
356 h_idx = idxd->hdls_read;
357 while (h_idx != idxd->hdls_avail) {
358 uint16_t flag = idxd->hdl_ring_flags[h_idx];
359 if (flag != RTE_IDXD_HDL_INVALID) {
360 if (!idxd->cfg.hdls_disable) {
361 src_hdls[n] = idxd->hdl_ring[h_idx].src;
362 dst_hdls[n] = idxd->hdl_ring[h_idx].dst;
364 if (
unlikely(flag != RTE_IDXD_HDL_NORMAL)) {
366 status[n] = flag == RTE_IDXD_HDL_OP_SKIPPED ?
369 idxd->hdl_ring_flags[h_idx] >> 8;
370 if (num_unsuccessful != NULL)
371 *num_unsuccessful += 1;
375 idxd->hdl_ring_flags[h_idx] = RTE_IDXD_HDL_NORMAL;
376 if (++h_idx > idxd->desc_ring_mask)
383 while (idxd->hdl_ring_flags[h_idx] == RTE_IDXD_HDL_INVALID && h_idx != idxd->hdls_avail) {
384 idxd->hdl_ring_flags[h_idx] = RTE_IDXD_HDL_NORMAL;
385 if (++h_idx > idxd->desc_ring_mask)
388 idxd->hdls_read = h_idx;
390 idxd->xstats.completed += n;
#define __rte_always_inline
#define RTE_IOAT_OP_SKIPPED
static void rte_prefetch1(const volatile void *p)
static __rte_experimental void rte_prefetch0_write(const void *p)
__extension__ struct rte_eth_link __rte_aligned(8)