DPDK  22.07.0
rte_ioat_rawdev_fns.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Intel Corporation
3  */
4 #ifndef _RTE_IOAT_RAWDEV_FNS_H_
5 #define _RTE_IOAT_RAWDEV_FNS_H_
6 
18 #include <x86intrin.h>
19 #include <rte_rawdev.h>
20 #include <rte_memzone.h>
21 #include <rte_prefetch.h>
22 
28 enum rte_ioat_dev_type {
29  RTE_IOAT_DEV,
30  RTE_IDXD_DEV,
31 };
32 
37 struct rte_ioat_xstats {
38  uint64_t enqueue_failed;
39  uint64_t enqueued;
40  uint64_t started;
41  uint64_t completed;
42 };
43 
44 #include "rte_idxd_rawdev_fns.h"
45 
50 struct rte_ioat_generic_hw_desc {
51  uint32_t size;
52  union {
53  uint32_t control_raw;
54  struct {
55  uint32_t int_enable: 1;
56  uint32_t src_snoop_disable: 1;
57  uint32_t dest_snoop_disable: 1;
58  uint32_t completion_update: 1;
59  uint32_t fence: 1;
60  uint32_t reserved2: 1;
61  uint32_t src_page_break: 1;
62  uint32_t dest_page_break: 1;
63  uint32_t bundle: 1;
64  uint32_t dest_dca: 1;
65  uint32_t hint: 1;
66  uint32_t reserved: 13;
67  uint32_t op: 8;
68  } control;
69  } u;
70  uint64_t src_addr;
71  uint64_t dest_addr;
72  uint64_t next;
73  uint64_t op_specific[4];
74 };
75 
80 struct rte_ioat_rawdev {
81  /* common fields at the top - match those in rte_idxd_rawdev */
82  enum rte_ioat_dev_type type;
83  struct rte_ioat_xstats xstats;
84 
85  struct rte_rawdev *rawdev;
86  const struct rte_memzone *mz;
87  const struct rte_memzone *desc_mz;
88 
89  volatile uint16_t *doorbell __rte_cache_aligned;
90  phys_addr_t status_addr;
91  phys_addr_t ring_addr;
92 
93  unsigned short ring_size;
94  bool hdls_disable;
95  struct rte_ioat_generic_hw_desc *desc_ring;
96  __m128i *hdls; /* completion handles for returning to user */
97 
98 
99  unsigned short next_read;
100  unsigned short next_write;
101 
102  /* to report completions, the device will write status back here */
103  volatile uint64_t status __rte_cache_aligned;
104 
105  /* pointer to the register bar */
106  volatile struct rte_ioat_registers *regs;
107 };
108 
109 #define RTE_IOAT_CHANSTS_IDLE 0x1
110 #define RTE_IOAT_CHANSTS_SUSPENDED 0x2
111 #define RTE_IOAT_CHANSTS_HALTED 0x3
112 #define RTE_IOAT_CHANSTS_ARMED 0x4
113 
114 static __rte_always_inline uint16_t
115 __ioat_burst_capacity(int dev_id)
116 {
117  struct rte_ioat_rawdev *ioat =
118  (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
119  unsigned short size = ioat->ring_size - 1;
120  unsigned short read = ioat->next_read;
121  unsigned short write = ioat->next_write;
122  unsigned short space = size - (write - read);
123 
124  return space;
125 }
126 
127 static __rte_always_inline int
128 __ioat_write_desc(int dev_id, uint32_t op, uint64_t src, phys_addr_t dst,
129  unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
130 {
131  struct rte_ioat_rawdev *ioat =
132  (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
133  unsigned short read = ioat->next_read;
134  unsigned short write = ioat->next_write;
135  unsigned short mask = ioat->ring_size - 1;
136  unsigned short space = mask + read - write;
137  struct rte_ioat_generic_hw_desc *desc;
138 
139  if (space == 0) {
140  ioat->xstats.enqueue_failed++;
141  return 0;
142  }
143 
144  ioat->next_write = write + 1;
145  write &= mask;
146 
147  desc = &ioat->desc_ring[write];
148  desc->size = length;
149  /* set descriptor write-back every 16th descriptor */
150  desc->u.control_raw = (uint32_t)((op << IOAT_CMD_OP_SHIFT) |
151  (!(write & 0xF) << IOAT_COMP_UPDATE_SHIFT));
152  desc->src_addr = src;
153  desc->dest_addr = dst;
154 
155  if (!ioat->hdls_disable)
156  ioat->hdls[write] = _mm_set_epi64x((int64_t)dst_hdl,
157  (int64_t)src_hdl);
158  rte_prefetch0(&ioat->desc_ring[ioat->next_write & mask]);
159 
160  ioat->xstats.enqueued++;
161  return 1;
162 }
163 
164 static __rte_always_inline int
165 __ioat_enqueue_fill(int dev_id, uint64_t pattern, phys_addr_t dst,
166  unsigned int length, uintptr_t dst_hdl)
167 {
168  static const uintptr_t null_hdl;
169 
170  return __ioat_write_desc(dev_id, ioat_op_fill, pattern, dst, length,
171  null_hdl, dst_hdl);
172 }
173 
174 /*
175  * Enqueue a copy operation onto the ioat device
176  */
177 static __rte_always_inline int
178 __ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
179  unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
180 {
181  return __ioat_write_desc(dev_id, ioat_op_copy, src, dst, length,
182  src_hdl, dst_hdl);
183 }
184 
185 /* add fence to last written descriptor */
186 static __rte_always_inline int
187 __ioat_fence(int dev_id)
188 {
189  struct rte_ioat_rawdev *ioat =
190  (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
191  unsigned short write = ioat->next_write;
192  unsigned short mask = ioat->ring_size - 1;
193  struct rte_ioat_generic_hw_desc *desc;
194 
195  write = (write - 1) & mask;
196  desc = &ioat->desc_ring[write];
197 
198  desc->u.control.fence = 1;
199  return 0;
200 }
201 
202 /*
203  * Trigger hardware to begin performing enqueued operations
204  */
205 static __rte_always_inline int
206 __ioat_perform_ops(int dev_id)
207 {
208  struct rte_ioat_rawdev *ioat =
209  (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
210  ioat->desc_ring[(ioat->next_write - 1) & (ioat->ring_size - 1)].u
211  .control.completion_update = 1;
213  *ioat->doorbell = ioat->next_write;
214  ioat->xstats.started = ioat->xstats.enqueued;
215 
216  return 0;
217 }
218 
223 static __rte_always_inline int
224 __ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
225 {
226  uint64_t status = ioat->status;
227 
228  /* lower 3 bits indicate "transfer status" : active, idle, halted.
229  * We can ignore bit 0.
230  */
231  *error = status & (RTE_IOAT_CHANSTS_SUSPENDED | RTE_IOAT_CHANSTS_ARMED);
232  return (status - ioat->ring_addr) >> 6;
233 }
234 
235 /*
236  * Returns details of operations that have been completed
237  */
238 static __rte_always_inline int
239 __ioat_completed_ops(int dev_id, uint8_t max_copies,
240  uintptr_t *src_hdls, uintptr_t *dst_hdls)
241 {
242  struct rte_ioat_rawdev *ioat =
243  (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
244  unsigned short mask = (ioat->ring_size - 1);
245  unsigned short read = ioat->next_read;
246  unsigned short end_read, count;
247  int error;
248  int i = 0;
249 
250  end_read = (__ioat_get_last_completed(ioat, &error) + 1) & mask;
251  count = (end_read - (read & mask)) & mask;
252 
253  if (error) {
254  rte_errno = EIO;
255  return -1;
256  }
257 
258  if (ioat->hdls_disable) {
259  read += count;
260  goto end;
261  }
262 
263  if (count > max_copies)
264  count = max_copies;
265 
266  for (; i < count - 1; i += 2, read += 2) {
267  __m128i hdls0 = _mm_load_si128(&ioat->hdls[read & mask]);
268  __m128i hdls1 = _mm_load_si128(&ioat->hdls[(read + 1) & mask]);
269 
270  _mm_storeu_si128((__m128i *)&src_hdls[i],
271  _mm_unpacklo_epi64(hdls0, hdls1));
272  _mm_storeu_si128((__m128i *)&dst_hdls[i],
273  _mm_unpackhi_epi64(hdls0, hdls1));
274  }
275  for (; i < count; i++, read++) {
276  uintptr_t *hdls = (uintptr_t *)&ioat->hdls[read & mask];
277  src_hdls[i] = hdls[0];
278  dst_hdls[i] = hdls[1];
279  }
280 
281 end:
282  ioat->next_read = read;
283  ioat->xstats.completed += count;
284  return count;
285 }
286 
287 static inline uint16_t
288 rte_ioat_burst_capacity(int dev_id)
289 {
290  enum rte_ioat_dev_type *type =
291  (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
292  if (*type == RTE_IDXD_DEV)
293  return __idxd_burst_capacity(dev_id);
294  else
295  return __ioat_burst_capacity(dev_id);
296 }
297 
298 static inline int
299 rte_ioat_enqueue_fill(int dev_id, uint64_t pattern, phys_addr_t dst,
300  unsigned int len, uintptr_t dst_hdl)
301 {
302  enum rte_ioat_dev_type *type =
303  (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
304  if (*type == RTE_IDXD_DEV)
305  return __idxd_enqueue_fill(dev_id, pattern, dst, len, dst_hdl);
306  else
307  return __ioat_enqueue_fill(dev_id, pattern, dst, len, dst_hdl);
308 }
309 
310 static inline int
311 rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
312  unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
313 {
314  enum rte_ioat_dev_type *type =
315  (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
316  if (*type == RTE_IDXD_DEV)
317  return __idxd_enqueue_copy(dev_id, src, dst, length,
318  src_hdl, dst_hdl);
319  else
320  return __ioat_enqueue_copy(dev_id, src, dst, length,
321  src_hdl, dst_hdl);
322 }
323 
324 static inline int
325 rte_ioat_fence(int dev_id)
326 {
327  enum rte_ioat_dev_type *type =
328  (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
329  if (*type == RTE_IDXD_DEV)
330  return __idxd_fence(dev_id);
331  else
332  return __ioat_fence(dev_id);
333 }
334 
335 static inline int
336 rte_ioat_perform_ops(int dev_id)
337 {
338  enum rte_ioat_dev_type *type =
339  (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
340  if (*type == RTE_IDXD_DEV)
341  return __idxd_perform_ops(dev_id);
342  else
343  return __ioat_perform_ops(dev_id);
344 }
345 
346 static inline int
347 rte_ioat_completed_ops(int dev_id, uint8_t max_copies,
348  uint32_t *status, uint8_t *num_unsuccessful,
349  uintptr_t *src_hdls, uintptr_t *dst_hdls)
350 {
351  enum rte_ioat_dev_type *type =
352  (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
353  uint8_t tmp; /* used so functions don't need to check for null parameter */
354 
355  if (num_unsuccessful == NULL)
356  num_unsuccessful = &tmp;
357 
358  *num_unsuccessful = 0;
359  if (*type == RTE_IDXD_DEV)
360  return __idxd_completed_ops(dev_id, max_copies, status, num_unsuccessful,
361  src_hdls, dst_hdls);
362  else
363  return __ioat_completed_ops(dev_id, max_copies, src_hdls, dst_hdls);
364 }
365 
366 static inline void
367 __rte_deprecated_msg("use rte_ioat_perform_ops() instead")
368 rte_ioat_do_copies(int dev_id) { rte_ioat_perform_ops(dev_id); }
369 
370 static inline int
371 __rte_deprecated_msg("use rte_ioat_completed_ops() instead")
372 rte_ioat_completed_copies(int dev_id, uint8_t max_copies,
373  uintptr_t *src_hdls, uintptr_t *dst_hdls)
374 {
375  return rte_ioat_completed_ops(dev_id, max_copies, NULL, NULL,
376  src_hdls, dst_hdls);
377 }
378 
379 #endif /* _RTE_IOAT_RAWDEV_FNS_H_ */
struct rte_ether_addr src_addr
Definition: rte_ether.h:269
static int __rte_experimental rte_ioat_enqueue_fill(int dev_id, uint64_t pattern, phys_addr_t dst, unsigned int length, uintptr_t dst_hdl)
#define __rte_always_inline
Definition: rte_common.h:258
static int __rte_experimental rte_ioat_fence(int dev_id)
#define rte_errno
Definition: rte_errno.h:29
static int __rte_experimental rte_ioat_perform_ops(int dev_id)
void * dev_private
uint64_t phys_addr_t
Definition: rte_common.h:453
#define rte_compiler_barrier()
Definition: rte_atomic.h:118
#define __rte_cache_aligned
Definition: rte_common.h:445
static int __rte_experimental rte_ioat_completed_ops(int dev_id, uint8_t max_copies, uint32_t *status, uint8_t *num_unsuccessful, uintptr_t *src_hdls, uintptr_t *dst_hdls)
static void rte_prefetch0(const volatile void *p)
static int __rte_experimental rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst, unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)