DPDK  21.02.0
rte_ioat_rawdev_fns.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Intel Corporation
3  */
4 #ifndef _RTE_IOAT_RAWDEV_FNS_H_
5 #define _RTE_IOAT_RAWDEV_FNS_H_
6 
7 #include <x86intrin.h>
8 #include <rte_rawdev.h>
9 #include <rte_memzone.h>
10 #include <rte_prefetch.h>
11 
16 struct rte_ioat_generic_hw_desc {
17  uint32_t size;
18  union {
19  uint32_t control_raw;
20  struct {
21  uint32_t int_enable: 1;
22  uint32_t src_snoop_disable: 1;
23  uint32_t dest_snoop_disable: 1;
24  uint32_t completion_update: 1;
25  uint32_t fence: 1;
26  uint32_t reserved2: 1;
27  uint32_t src_page_break: 1;
28  uint32_t dest_page_break: 1;
29  uint32_t bundle: 1;
30  uint32_t dest_dca: 1;
31  uint32_t hint: 1;
32  uint32_t reserved: 13;
33  uint32_t op: 8;
34  } control;
35  } u;
36  uint64_t src_addr;
37  uint64_t dest_addr;
38  uint64_t next;
39  uint64_t op_specific[4];
40 };
41 
47 enum rte_ioat_dev_type {
48  RTE_IOAT_DEV,
49  RTE_IDXD_DEV,
50 };
51 
56 struct rte_ioat_xstats {
57  uint64_t enqueue_failed;
58  uint64_t enqueued;
59  uint64_t started;
60  uint64_t completed;
61 };
62 
67 struct rte_ioat_rawdev {
68  /* common fields at the top - match those in rte_idxd_rawdev */
69  enum rte_ioat_dev_type type;
70  struct rte_ioat_xstats xstats;
71 
72  struct rte_rawdev *rawdev;
73  const struct rte_memzone *mz;
74  const struct rte_memzone *desc_mz;
75 
76  volatile uint16_t *doorbell __rte_cache_aligned;
77  phys_addr_t status_addr;
78  phys_addr_t ring_addr;
79 
80  unsigned short ring_size;
81  bool hdls_disable;
82  struct rte_ioat_generic_hw_desc *desc_ring;
83  __m128i *hdls; /* completion handles for returning to user */
84 
85 
86  unsigned short next_read;
87  unsigned short next_write;
88 
89  /* to report completions, the device will write status back here */
90  volatile uint64_t status __rte_cache_aligned;
91 
92  /* pointer to the register bar */
93  volatile struct rte_ioat_registers *regs;
94 };
95 
96 #define RTE_IOAT_CHANSTS_IDLE 0x1
97 #define RTE_IOAT_CHANSTS_SUSPENDED 0x2
98 #define RTE_IOAT_CHANSTS_HALTED 0x3
99 #define RTE_IOAT_CHANSTS_ARMED 0x4
100 
101 /*
102  * Defines used in the data path for interacting with hardware.
103  */
104 #define IDXD_CMD_OP_SHIFT 24
105 enum rte_idxd_ops {
106  idxd_op_nop = 0,
107  idxd_op_batch,
108  idxd_op_drain,
109  idxd_op_memmove,
110  idxd_op_fill
111 };
112 
113 #define IDXD_FLAG_FENCE (1 << 0)
114 #define IDXD_FLAG_COMPLETION_ADDR_VALID (1 << 2)
115 #define IDXD_FLAG_REQUEST_COMPLETION (1 << 3)
116 #define IDXD_FLAG_CACHE_CONTROL (1 << 8)
117 
118 #define IOAT_COMP_UPDATE_SHIFT 3
119 #define IOAT_CMD_OP_SHIFT 24
120 enum rte_ioat_ops {
121  ioat_op_copy = 0, /* Standard DMA Operation */
122  ioat_op_fill /* Block Fill */
123 };
124 
130  uint32_t pasid;
131  uint32_t op_flags;
132  rte_iova_t completion;
133 
135  union {
136  rte_iova_t src; /* source address for copy ops etc. */
137  rte_iova_t desc_addr; /* descriptor pointer for batch */
138  };
139  rte_iova_t dst;
140 
141  uint32_t size; /* length of data for op, or batch size */
142 
143  /* 28 bytes of padding here */
144 } __rte_aligned(64);
145 
150  uint8_t status;
151  uint8_t result;
152  /* 16-bits pad here */
153  uint32_t completed_size; /* data length, or descriptors for batch */
154 
155  rte_iova_t fault_address;
156  uint32_t invalid_flags;
157 } __rte_aligned(32);
158 
159 #define BATCH_SIZE 64
160 
166  struct rte_idxd_completion comp; /* the completion record for batch */
167 
168  uint16_t submitted;
169  uint16_t op_count;
170  uint16_t hdl_end;
171 
172  struct rte_idxd_hw_desc batch_desc;
173 
174  /* batches must always have 2 descriptors, so put a null at the start */
175  struct rte_idxd_hw_desc null_desc;
176  struct rte_idxd_hw_desc ops[BATCH_SIZE];
177 };
178 
184  uint64_t src;
185  uint64_t dst;
186 };
187 
192 struct rte_idxd_rawdev {
193  enum rte_ioat_dev_type type;
194  struct rte_ioat_xstats xstats;
195 
196  void *portal; /* address to write the batch descriptor */
197 
198  /* counters to track the batches and the individual op handles */
199  uint16_t batch_ring_sz; /* size of batch ring */
200  uint16_t hdl_ring_sz; /* size of the user hdl ring */
201 
202  uint16_t next_batch; /* where we write descriptor ops */
203  uint16_t next_completed; /* batch where we read completions */
204  uint16_t next_ret_hdl; /* the next user hdl to return */
205  uint16_t last_completed_hdl; /* the last user hdl that has completed */
206  uint16_t next_free_hdl; /* where the handle for next op will go */
207  uint16_t hdls_disable; /* disable tracking completion handles */
208 
209  struct rte_idxd_user_hdl *hdl_ring;
210  struct rte_idxd_desc_batch *batch_ring;
211 };
212 
213 static __rte_always_inline int
214 __ioat_write_desc(int dev_id, uint32_t op, uint64_t src, phys_addr_t dst,
215  unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
216 {
217  struct rte_ioat_rawdev *ioat =
218  (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
219  unsigned short read = ioat->next_read;
220  unsigned short write = ioat->next_write;
221  unsigned short mask = ioat->ring_size - 1;
222  unsigned short space = mask + read - write;
223  struct rte_ioat_generic_hw_desc *desc;
224 
225  if (space == 0) {
226  ioat->xstats.enqueue_failed++;
227  return 0;
228  }
229 
230  ioat->next_write = write + 1;
231  write &= mask;
232 
233  desc = &ioat->desc_ring[write];
234  desc->size = length;
235  /* set descriptor write-back every 16th descriptor */
236  desc->u.control_raw = (uint32_t)((op << IOAT_CMD_OP_SHIFT) |
237  (!(write & 0xF) << IOAT_COMP_UPDATE_SHIFT));
238  desc->src_addr = src;
239  desc->dest_addr = dst;
240 
241  if (!ioat->hdls_disable)
242  ioat->hdls[write] = _mm_set_epi64x((int64_t)dst_hdl,
243  (int64_t)src_hdl);
244  rte_prefetch0(&ioat->desc_ring[ioat->next_write & mask]);
245 
246  ioat->xstats.enqueued++;
247  return 1;
248 }
249 
250 static __rte_always_inline int
251 __ioat_enqueue_fill(int dev_id, uint64_t pattern, phys_addr_t dst,
252  unsigned int length, uintptr_t dst_hdl)
253 {
254  static const uintptr_t null_hdl;
255 
256  return __ioat_write_desc(dev_id, ioat_op_fill, pattern, dst, length,
257  null_hdl, dst_hdl);
258 }
259 
260 /*
261  * Enqueue a copy operation onto the ioat device
262  */
263 static __rte_always_inline int
264 __ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
265  unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
266 {
267  return __ioat_write_desc(dev_id, ioat_op_copy, src, dst, length,
268  src_hdl, dst_hdl);
269 }
270 
271 /* add fence to last written descriptor */
272 static __rte_always_inline int
273 __ioat_fence(int dev_id)
274 {
275  struct rte_ioat_rawdev *ioat =
276  (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
277  unsigned short write = ioat->next_write;
278  unsigned short mask = ioat->ring_size - 1;
279  struct rte_ioat_generic_hw_desc *desc;
280 
281  write = (write - 1) & mask;
282  desc = &ioat->desc_ring[write];
283 
284  desc->u.control.fence = 1;
285  return 0;
286 }
287 
288 /*
289  * Trigger hardware to begin performing enqueued operations
290  */
291 static __rte_always_inline void
292 __ioat_perform_ops(int dev_id)
293 {
294  struct rte_ioat_rawdev *ioat =
295  (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
296  ioat->desc_ring[(ioat->next_write - 1) & (ioat->ring_size - 1)].u
297  .control.completion_update = 1;
299  *ioat->doorbell = ioat->next_write;
300  ioat->xstats.started = ioat->xstats.enqueued;
301 }
302 
307 static __rte_always_inline int
308 __ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
309 {
310  uint64_t status = ioat->status;
311 
312  /* lower 3 bits indicate "transfer status" : active, idle, halted.
313  * We can ignore bit 0.
314  */
315  *error = status & (RTE_IOAT_CHANSTS_SUSPENDED | RTE_IOAT_CHANSTS_ARMED);
316  return (status - ioat->ring_addr) >> 6;
317 }
318 
319 /*
320  * Returns details of operations that have been completed
321  */
322 static __rte_always_inline int
323 __ioat_completed_ops(int dev_id, uint8_t max_copies,
324  uintptr_t *src_hdls, uintptr_t *dst_hdls)
325 {
326  struct rte_ioat_rawdev *ioat =
327  (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
328  unsigned short mask = (ioat->ring_size - 1);
329  unsigned short read = ioat->next_read;
330  unsigned short end_read, count;
331  int error;
332  int i = 0;
333 
334  end_read = (__ioat_get_last_completed(ioat, &error) + 1) & mask;
335  count = (end_read - (read & mask)) & mask;
336 
337  if (error) {
338  rte_errno = EIO;
339  return -1;
340  }
341 
342  if (ioat->hdls_disable) {
343  read += count;
344  goto end;
345  }
346 
347  if (count > max_copies)
348  count = max_copies;
349 
350  for (; i < count - 1; i += 2, read += 2) {
351  __m128i hdls0 = _mm_load_si128(&ioat->hdls[read & mask]);
352  __m128i hdls1 = _mm_load_si128(&ioat->hdls[(read + 1) & mask]);
353 
354  _mm_storeu_si128((__m128i *)&src_hdls[i],
355  _mm_unpacklo_epi64(hdls0, hdls1));
356  _mm_storeu_si128((__m128i *)&dst_hdls[i],
357  _mm_unpackhi_epi64(hdls0, hdls1));
358  }
359  for (; i < count; i++, read++) {
360  uintptr_t *hdls = (uintptr_t *)&ioat->hdls[read & mask];
361  src_hdls[i] = hdls[0];
362  dst_hdls[i] = hdls[1];
363  }
364 
365 end:
366  ioat->next_read = read;
367  ioat->xstats.completed += count;
368  return count;
369 }
370 
371 static __rte_always_inline int
372 __idxd_write_desc(int dev_id, const struct rte_idxd_hw_desc *desc,
373  const struct rte_idxd_user_hdl *hdl)
374 {
375  struct rte_idxd_rawdev *idxd =
376  (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
377  struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch];
378 
379  /* check for room in the handle ring */
380  if (((idxd->next_free_hdl + 1) & (idxd->hdl_ring_sz - 1)) == idxd->next_ret_hdl)
381  goto failed;
382 
383  /* check for space in current batch */
384  if (b->op_count >= BATCH_SIZE)
385  goto failed;
386 
387  /* check that we can actually use the current batch */
388  if (b->submitted)
389  goto failed;
390 
391  /* write the descriptor */
392  b->ops[b->op_count++] = *desc;
393 
394  /* store the completion details */
395  if (!idxd->hdls_disable)
396  idxd->hdl_ring[idxd->next_free_hdl] = *hdl;
397  if (++idxd->next_free_hdl == idxd->hdl_ring_sz)
398  idxd->next_free_hdl = 0;
399 
400  idxd->xstats.enqueued++;
401  return 1;
402 
403 failed:
404  idxd->xstats.enqueue_failed++;
405  rte_errno = ENOSPC;
406  return 0;
407 }
408 
409 static __rte_always_inline int
410 __idxd_enqueue_fill(int dev_id, uint64_t pattern, rte_iova_t dst,
411  unsigned int length, uintptr_t dst_hdl)
412 {
413  const struct rte_idxd_hw_desc desc = {
414  .op_flags = (idxd_op_fill << IDXD_CMD_OP_SHIFT) |
415  IDXD_FLAG_CACHE_CONTROL,
416  .src = pattern,
417  .dst = dst,
418  .size = length
419  };
420  const struct rte_idxd_user_hdl hdl = {
421  .dst = dst_hdl
422  };
423  return __idxd_write_desc(dev_id, &desc, &hdl);
424 }
425 
426 static __rte_always_inline int
427 __idxd_enqueue_copy(int dev_id, rte_iova_t src, rte_iova_t dst,
428  unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
429 {
430  const struct rte_idxd_hw_desc desc = {
431  .op_flags = (idxd_op_memmove << IDXD_CMD_OP_SHIFT) |
432  IDXD_FLAG_CACHE_CONTROL,
433  .src = src,
434  .dst = dst,
435  .size = length
436  };
437  const struct rte_idxd_user_hdl hdl = {
438  .src = src_hdl,
439  .dst = dst_hdl
440  };
441  return __idxd_write_desc(dev_id, &desc, &hdl);
442 }
443 
444 static __rte_always_inline int
445 __idxd_fence(int dev_id)
446 {
447  static const struct rte_idxd_hw_desc fence = {
448  .op_flags = IDXD_FLAG_FENCE
449  };
450  static const struct rte_idxd_user_hdl null_hdl;
451  return __idxd_write_desc(dev_id, &fence, &null_hdl);
452 }
453 
454 static __rte_always_inline void
455 __idxd_movdir64b(volatile void *dst, const void *src)
456 {
457  asm volatile (".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
458  :
459  : "a" (dst), "d" (src));
460 }
461 
462 static __rte_always_inline void
463 __idxd_perform_ops(int dev_id)
464 {
465  struct rte_idxd_rawdev *idxd =
466  (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
467  struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch];
468 
469  if (b->submitted || b->op_count == 0)
470  return;
471  b->hdl_end = idxd->next_free_hdl;
472  b->comp.status = 0;
473  b->submitted = 1;
474  b->batch_desc.size = b->op_count + 1;
475  __idxd_movdir64b(idxd->portal, &b->batch_desc);
476 
477  if (++idxd->next_batch == idxd->batch_ring_sz)
478  idxd->next_batch = 0;
479  idxd->xstats.started = idxd->xstats.enqueued;
480 }
481 
482 static __rte_always_inline int
483 __idxd_completed_ops(int dev_id, uint8_t max_ops,
484  uintptr_t *src_hdls, uintptr_t *dst_hdls)
485 {
486  struct rte_idxd_rawdev *idxd =
487  (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
488  struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_completed];
489  uint16_t h_idx = idxd->next_ret_hdl;
490  int n = 0;
491 
492  while (b->submitted && b->comp.status != 0) {
493  idxd->last_completed_hdl = b->hdl_end;
494  b->submitted = 0;
495  b->op_count = 0;
496  if (++idxd->next_completed == idxd->batch_ring_sz)
497  idxd->next_completed = 0;
498  b = &idxd->batch_ring[idxd->next_completed];
499  }
500 
501  if (!idxd->hdls_disable)
502  for (n = 0; n < max_ops && h_idx != idxd->last_completed_hdl; n++) {
503  src_hdls[n] = idxd->hdl_ring[h_idx].src;
504  dst_hdls[n] = idxd->hdl_ring[h_idx].dst;
505  if (++h_idx == idxd->hdl_ring_sz)
506  h_idx = 0;
507  }
508  else
509  while (h_idx != idxd->last_completed_hdl) {
510  n++;
511  if (++h_idx == idxd->hdl_ring_sz)
512  h_idx = 0;
513  }
514 
515  idxd->next_ret_hdl = h_idx;
516 
517  idxd->xstats.completed += n;
518  return n;
519 }
520 
521 static inline int
522 rte_ioat_enqueue_fill(int dev_id, uint64_t pattern, phys_addr_t dst,
523  unsigned int len, uintptr_t dst_hdl)
524 {
525  enum rte_ioat_dev_type *type =
526  (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
527  if (*type == RTE_IDXD_DEV)
528  return __idxd_enqueue_fill(dev_id, pattern, dst, len, dst_hdl);
529  else
530  return __ioat_enqueue_fill(dev_id, pattern, dst, len, dst_hdl);
531 }
532 
533 static inline int
534 rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
535  unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
536 {
537  enum rte_ioat_dev_type *type =
538  (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
539  if (*type == RTE_IDXD_DEV)
540  return __idxd_enqueue_copy(dev_id, src, dst, length,
541  src_hdl, dst_hdl);
542  else
543  return __ioat_enqueue_copy(dev_id, src, dst, length,
544  src_hdl, dst_hdl);
545 }
546 
547 static inline int
548 rte_ioat_fence(int dev_id)
549 {
550  enum rte_ioat_dev_type *type =
551  (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
552  if (*type == RTE_IDXD_DEV)
553  return __idxd_fence(dev_id);
554  else
555  return __ioat_fence(dev_id);
556 }
557 
558 static inline void
559 rte_ioat_perform_ops(int dev_id)
560 {
561  enum rte_ioat_dev_type *type =
562  (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
563  if (*type == RTE_IDXD_DEV)
564  return __idxd_perform_ops(dev_id);
565  else
566  return __ioat_perform_ops(dev_id);
567 }
568 
569 static inline int
570 rte_ioat_completed_ops(int dev_id, uint8_t max_copies,
571  uintptr_t *src_hdls, uintptr_t *dst_hdls)
572 {
573  enum rte_ioat_dev_type *type =
574  (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
575  if (*type == RTE_IDXD_DEV)
576  return __idxd_completed_ops(dev_id, max_copies,
577  src_hdls, dst_hdls);
578  else
579  return __ioat_completed_ops(dev_id, max_copies,
580  src_hdls, dst_hdls);
581 }
582 
583 static inline void
584 __rte_deprecated_msg("use rte_ioat_perform_ops() instead")
585 rte_ioat_do_copies(int dev_id) { rte_ioat_perform_ops(dev_id); }
586 
587 static inline int
588 __rte_deprecated_msg("use rte_ioat_completed_ops() instead")
589 rte_ioat_completed_copies(int dev_id, uint8_t max_copies,
590  uintptr_t *src_hdls, uintptr_t *dst_hdls)
591 {
592  return rte_ioat_completed_ops(dev_id, max_copies, src_hdls, dst_hdls);
593 }
594 
595 #endif /* _RTE_IOAT_RAWDEV_FNS_H_ */
static int __rte_experimental rte_ioat_enqueue_fill(int dev_id, uint64_t pattern, phys_addr_t dst, unsigned int length, uintptr_t dst_hdl)
#define __rte_always_inline
Definition: rte_common.h:226
static int __rte_experimental rte_ioat_fence(int dev_id)
uint64_t rte_iova_t
Definition: rte_common.h:418
#define rte_errno
Definition: rte_errno.h:29
static void __rte_experimental rte_ioat_perform_ops(int dev_id)
uint64_t phys_addr_t
Definition: rte_common.h:408
#define rte_compiler_barrier()
Definition: rte_atomic.h:118
static int __rte_experimental rte_ioat_completed_ops(int dev_id, uint8_t max_copies, uintptr_t *src_hdls, uintptr_t *dst_hdls)
#define __rte_cache_aligned
Definition: rte_common.h:400
#define RTE_STD_C11
Definition: rte_common.h:40
__extension__ struct rte_eth_link __rte_aligned(8)
static void rte_prefetch0(const volatile void *p)
static int __rte_experimental rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst, unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)