DPDK  20.11.10
rte_vhost.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_VHOST_H_
6 #define _RTE_VHOST_H_
7 
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <sys/eventfd.h>
16 
17 #include <rte_memory.h>
18 #include <rte_mempool.h>
19 
20 #ifdef __cplusplus
21 extern "C" {
22 #endif
23 
24 #ifndef __cplusplus
25 /* These are not C++-aware. */
26 #include <linux/vhost.h>
27 #include <linux/virtio_ring.h>
28 #include <linux/virtio_net.h>
29 #endif
30 
31 #define RTE_VHOST_USER_CLIENT (1ULL << 0)
32 #define RTE_VHOST_USER_NO_RECONNECT (1ULL << 1)
33 #define RTE_VHOST_USER_RESERVED_1 (1ULL << 2)
34 #define RTE_VHOST_USER_IOMMU_SUPPORT (1ULL << 3)
35 #define RTE_VHOST_USER_POSTCOPY_SUPPORT (1ULL << 4)
36 /* support mbuf with external buffer attached */
37 #define RTE_VHOST_USER_EXTBUF_SUPPORT (1ULL << 5)
38 /* support only linear buffers (no chained mbufs) */
39 #define RTE_VHOST_USER_LINEARBUF_SUPPORT (1ULL << 6)
40 #define RTE_VHOST_USER_ASYNC_COPY (1ULL << 7)
41 #define RTE_VHOST_USER_NET_COMPLIANT_OL_FLAGS (1ULL << 8)
42 
43 /* Features. */
44 #ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
45  #define VIRTIO_NET_F_GUEST_ANNOUNCE 21
46 #endif
47 
48 #ifndef VIRTIO_NET_F_MQ
49  #define VIRTIO_NET_F_MQ 22
50 #endif
51 
52 #ifndef VIRTIO_NET_F_MTU
53  #define VIRTIO_NET_F_MTU 3
54 #endif
55 
56 #ifndef VIRTIO_F_ANY_LAYOUT
57  #define VIRTIO_F_ANY_LAYOUT 27
58 #endif
59 
61 #ifndef VHOST_USER_PROTOCOL_F_MQ
62 #define VHOST_USER_PROTOCOL_F_MQ 0
63 #endif
64 
65 #ifndef VHOST_USER_PROTOCOL_F_LOG_SHMFD
66 #define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
67 #endif
68 
69 #ifndef VHOST_USER_PROTOCOL_F_RARP
70 #define VHOST_USER_PROTOCOL_F_RARP 2
71 #endif
72 
73 #ifndef VHOST_USER_PROTOCOL_F_REPLY_ACK
74 #define VHOST_USER_PROTOCOL_F_REPLY_ACK 3
75 #endif
76 
77 #ifndef VHOST_USER_PROTOCOL_F_NET_MTU
78 #define VHOST_USER_PROTOCOL_F_NET_MTU 4
79 #endif
80 
81 #ifndef VHOST_USER_PROTOCOL_F_SLAVE_REQ
82 #define VHOST_USER_PROTOCOL_F_SLAVE_REQ 5
83 #endif
84 
85 #ifndef VHOST_USER_PROTOCOL_F_CRYPTO_SESSION
86 #define VHOST_USER_PROTOCOL_F_CRYPTO_SESSION 7
87 #endif
88 
89 #ifndef VHOST_USER_PROTOCOL_F_PAGEFAULT
90 #define VHOST_USER_PROTOCOL_F_PAGEFAULT 8
91 #endif
92 
93 #ifndef VHOST_USER_PROTOCOL_F_CONFIG
94 #define VHOST_USER_PROTOCOL_F_CONFIG 9
95 #endif
96 
97 #ifndef VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD
98 #define VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD 10
99 #endif
100 
101 #ifndef VHOST_USER_PROTOCOL_F_HOST_NOTIFIER
102 #define VHOST_USER_PROTOCOL_F_HOST_NOTIFIER 11
103 #endif
104 
105 #ifndef VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
106 #define VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD 12
107 #endif
108 
109 #ifndef VHOST_USER_PROTOCOL_F_STATUS
110 #define VHOST_USER_PROTOCOL_F_STATUS 16
111 #endif
112 
114 #ifndef VHOST_USER_F_PROTOCOL_FEATURES
115 #define VHOST_USER_F_PROTOCOL_FEATURES 30
116 #endif
117 
118 struct rte_vdpa_device;
119 
125  uint64_t guest_phys_addr;
126  uint64_t guest_user_addr;
127  uint64_t host_user_addr;
128  uint64_t size;
129  void *mmap_addr;
130  uint64_t mmap_size;
131  int fd;
132 };
133 
138  uint32_t nregions;
139  struct rte_vhost_mem_region regions[];
140 };
141 
142 struct rte_vhost_inflight_desc_split {
143  uint8_t inflight;
144  uint8_t padding[5];
145  uint16_t next;
146  uint64_t counter;
147 };
148 
149 struct rte_vhost_inflight_info_split {
150  uint64_t features;
151  uint16_t version;
152  uint16_t desc_num;
153  uint16_t last_inflight_io;
154  uint16_t used_idx;
155  struct rte_vhost_inflight_desc_split desc[0];
156 };
157 
158 struct rte_vhost_inflight_desc_packed {
159  uint8_t inflight;
160  uint8_t padding;
161  uint16_t next;
162  uint16_t last;
163  uint16_t num;
164  uint64_t counter;
165  uint16_t id;
166  uint16_t flags;
167  uint32_t len;
168  uint64_t addr;
169 };
170 
171 struct rte_vhost_inflight_info_packed {
172  uint64_t features;
173  uint16_t version;
174  uint16_t desc_num;
175  uint16_t free_head;
176  uint16_t old_free_head;
177  uint16_t used_idx;
178  uint16_t old_used_idx;
179  uint8_t used_wrap_counter;
180  uint8_t old_used_wrap_counter;
181  uint8_t padding[7];
182  struct rte_vhost_inflight_desc_packed desc[0];
183 };
184 
185 struct rte_vhost_resubmit_desc {
186  uint16_t index;
187  uint64_t counter;
188 };
189 
190 struct rte_vhost_resubmit_info {
191  struct rte_vhost_resubmit_desc *resubmit_list;
192  uint16_t resubmit_num;
193 };
194 
195 struct rte_vhost_ring_inflight {
196  union {
197  struct rte_vhost_inflight_info_split *inflight_split;
198  struct rte_vhost_inflight_info_packed *inflight_packed;
199  };
200 
201  struct rte_vhost_resubmit_info *resubmit_inflight;
202 };
203 
204 struct rte_vhost_vring {
205  union {
206  struct vring_desc *desc;
207  struct vring_packed_desc *desc_packed;
208  };
209  union {
210  struct vring_avail *avail;
211  struct vring_packed_desc_event *driver_event;
212  };
213  union {
214  struct vring_used *used;
215  struct vring_packed_desc_event *device_event;
216  };
217  uint64_t log_guest_addr;
218 
220  int callfd;
221 
222  int kickfd;
223  uint16_t size;
224 };
225 
230  /* Message handling failed */
231  RTE_VHOST_MSG_RESULT_ERR = -1,
232  /* Message handling successful */
233  RTE_VHOST_MSG_RESULT_OK = 0,
234  /* Message handling successful and reply prepared */
235  RTE_VHOST_MSG_RESULT_REPLY = 1,
236  /* Message not handled */
237  RTE_VHOST_MSG_RESULT_NOT_HANDLED,
238 };
239 
254 typedef enum rte_vhost_msg_result (*rte_vhost_msg_handle)(int vid, void *msg);
255 
260  /* Called prior to the master message handling. */
261  rte_vhost_msg_handle pre_msg_handle;
262  /* Called after the master message handling. */
263  rte_vhost_msg_handle post_msg_handle;
264 };
265 
270  int (*new_device)(int vid);
271  void (*destroy_device)(int vid);
273  int (*vring_state_changed)(int vid, uint16_t queue_id, int enable);
281  int (*features_changed)(int vid, uint64_t features);
282 
283  int (*new_connection)(int vid);
284  void (*destroy_connection)(int vid);
285 
292  void (*guest_notified)(int vid);
293 
294  void *reserved[1];
295 };
296 
312 __rte_deprecated
313 static __rte_always_inline uint64_t
314 rte_vhost_gpa_to_vva(struct rte_vhost_memory *mem, uint64_t gpa)
315 {
316  struct rte_vhost_mem_region *reg;
317  uint32_t i;
318 
319  for (i = 0; i < mem->nregions; i++) {
320  reg = &mem->regions[i];
321  if (gpa >= reg->guest_phys_addr &&
322  gpa < reg->guest_phys_addr + reg->size) {
323  return gpa - reg->guest_phys_addr +
324  reg->host_user_addr;
325  }
326  }
327 
328  return 0;
329 }
330 
347 __rte_experimental
348 static __rte_always_inline uint64_t
350  uint64_t gpa, uint64_t *len)
351 {
352  struct rte_vhost_mem_region *r;
353  uint32_t i;
354 
355  for (i = 0; i < mem->nregions; i++) {
356  r = &mem->regions[i];
357  if (gpa >= r->guest_phys_addr &&
358  gpa < r->guest_phys_addr + r->size) {
359 
360  if (unlikely(*len > r->guest_phys_addr + r->size - gpa))
361  *len = r->guest_phys_addr + r->size - gpa;
362 
363  return gpa - r->guest_phys_addr +
364  r->host_user_addr;
365  }
366  }
367  *len = 0;
368 
369  return 0;
370 }
371 
372 #define RTE_VHOST_NEED_LOG(features) ((features) & (1ULL << VHOST_F_LOG_ALL))
373 
392 void rte_vhost_log_write(int vid, uint64_t addr, uint64_t len);
393 
412 void rte_vhost_log_used_vring(int vid, uint16_t vring_idx,
413  uint64_t offset, uint64_t len);
414 
415 int rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable);
416 
421 int rte_vhost_driver_register(const char *path, uint64_t flags);
422 
423 /* Unregister vhost driver. This is only meaningful to vhost user. */
424 int rte_vhost_driver_unregister(const char *path);
425 
436 int
437 rte_vhost_driver_attach_vdpa_device(const char *path,
438  struct rte_vdpa_device *dev);
439 
448 int
449 rte_vhost_driver_detach_vdpa_device(const char *path);
450 
459 struct rte_vdpa_device *
460 rte_vhost_driver_get_vdpa_device(const char *path);
461 
472 int rte_vhost_driver_set_features(const char *path, uint64_t features);
473 
489 int rte_vhost_driver_enable_features(const char *path, uint64_t features);
490 
503 int rte_vhost_driver_disable_features(const char *path, uint64_t features);
504 
515 int rte_vhost_driver_get_features(const char *path, uint64_t *features);
516 
527 __rte_experimental
528 int
530  uint64_t protocol_features);
531 
542 __rte_experimental
543 int
545  uint64_t *protocol_features);
546 
557 __rte_experimental
558 int
559 rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num);
560 
571 int rte_vhost_get_negotiated_features(int vid, uint64_t *features);
572 
573 /* Register callbacks. */
574 int rte_vhost_driver_callback_register(const char *path,
575  struct vhost_device_ops const * const ops);
576 
588 int rte_vhost_driver_start(const char *path);
589 
603 int rte_vhost_get_mtu(int vid, uint16_t *mtu);
604 
615 int rte_vhost_get_numa_node(int vid);
616 
631 __rte_deprecated
632 uint32_t rte_vhost_get_queue_num(int vid);
633 
643 uint16_t rte_vhost_get_vring_num(int vid);
644 
659 int rte_vhost_get_ifname(int vid, char *buf, size_t len);
660 
672 uint16_t rte_vhost_avail_entries(int vid, uint16_t queue_id);
673 
674 struct rte_mbuf;
675 struct rte_mempool;
692 uint16_t rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
693  struct rte_mbuf **pkts, uint16_t count);
694 
712 uint16_t rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
713  struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count);
714 
729 int rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem);
730 
743 int rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
744  struct rte_vhost_vring *vring);
745 
758 __rte_experimental
759 int
760 rte_vhost_get_vhost_ring_inflight(int vid, uint16_t vring_idx,
761  struct rte_vhost_ring_inflight *vring);
762 
778 __rte_experimental
779 int
780 rte_vhost_set_inflight_desc_split(int vid, uint16_t vring_idx,
781  uint16_t idx);
782 
801 __rte_experimental
802 int
803 rte_vhost_set_inflight_desc_packed(int vid, uint16_t vring_idx,
804  uint16_t head, uint16_t last, uint16_t *inflight_entry);
805 
818 __rte_experimental
819 int
821  uint16_t vring_idx, uint16_t idx);
822 
838 __rte_experimental
839 int
841  uint16_t vring_idx, uint16_t head);
842 
857 __rte_experimental
858 int
859 rte_vhost_clr_inflight_desc_split(int vid, uint16_t vring_idx,
860  uint16_t last_used_idx, uint16_t idx);
861 
874 __rte_experimental
875 int
876 rte_vhost_clr_inflight_desc_packed(int vid, uint16_t vring_idx,
877  uint16_t head);
878 
890 int rte_vhost_vring_call(int vid, uint16_t vring_idx);
891 
904 __rte_experimental
905 int rte_vhost_vring_call_nonblock(int vid, uint16_t vring_idx);
906 
917 uint32_t rte_vhost_rx_queue_count(int vid, uint16_t qid);
918 
931 int
932 rte_vhost_get_log_base(int vid, uint64_t *log_base, uint64_t *log_size);
933 
948 int
949 rte_vhost_get_vring_base(int vid, uint16_t queue_id,
950  uint16_t *last_avail_idx, uint16_t *last_used_idx);
951 
970 __rte_experimental
971 int
973  uint16_t queue_id, uint16_t *last_avail_idx, uint16_t *last_used_idx);
974 
989 int
990 rte_vhost_set_vring_base(int vid, uint16_t queue_id,
991  uint16_t last_avail_idx, uint16_t last_used_idx);
992 
1005 __rte_experimental
1006 int
1008  struct rte_vhost_user_extern_ops const * const ops, void *ctx);
1009 
1018 struct rte_vdpa_device *
1019 rte_vhost_get_vdpa_device(int vid);
1020 
1031 __rte_experimental
1032 int
1033 rte_vhost_slave_config_change(int vid, bool need_reply);
1034 
1035 #ifdef __cplusplus
1036 }
1037 #endif
1038 
1039 #endif /* _RTE_VHOST_H_ */
static __rte_deprecated __rte_always_inline uint64_t rte_vhost_gpa_to_vva(struct rte_vhost_memory *mem, uint64_t gpa)
Definition: rte_vhost.h:314
__rte_experimental int rte_vhost_set_last_inflight_io_split(int vid, uint16_t vring_idx, uint16_t idx)
int rte_vhost_driver_start(const char *path)
#define __rte_always_inline
Definition: rte_common.h:231
int rte_vhost_driver_register(const char *path, uint64_t flags)
uint16_t rte_vhost_avail_entries(int vid, uint16_t queue_id)
__rte_experimental int rte_vhost_clr_inflight_desc_split(int vid, uint16_t vring_idx, uint16_t last_used_idx, uint16_t idx)
int rte_vhost_driver_disable_features(const char *path, uint64_t features)
struct rte_vdpa_device * rte_vhost_get_vdpa_device(int vid)
void rte_vhost_log_used_vring(int vid, uint16_t vring_idx, uint64_t offset, uint64_t len)
__rte_experimental int rte_vhost_extern_callback_register(int vid, struct rte_vhost_user_extern_ops const *const ops, void *ctx)
int rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx, struct rte_vhost_vring *vring)
__rte_experimental int rte_vhost_set_last_inflight_io_packed(int vid, uint16_t vring_idx, uint16_t head)
uint16_t rte_vhost_enqueue_burst(int vid, uint16_t queue_id, struct rte_mbuf **pkts, uint16_t count)
__rte_deprecated uint32_t rte_vhost_get_queue_num(int vid)
int rte_vhost_get_mtu(int vid, uint16_t *mtu)
int rte_vhost_driver_detach_vdpa_device(const char *path)
__rte_experimental int rte_vhost_slave_config_change(int vid, bool need_reply)
__rte_experimental int rte_vhost_driver_set_protocol_features(const char *path, uint64_t protocol_features)
#define unlikely(x)
uint16_t rte_vhost_get_vring_num(int vid)
__rte_experimental int rte_vhost_vring_call_nonblock(int vid, uint16_t vring_idx)
int rte_vhost_get_numa_node(int vid)
struct rte_vdpa_dev_ops * ops
Definition: rte_vdpa_dev.h:82
static __rte_experimental __rte_always_inline uint64_t rte_vhost_va_from_guest_pa(struct rte_vhost_memory *mem, uint64_t gpa, uint64_t *len)
Definition: rte_vhost.h:349
__rte_experimental int rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num)
int rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem)
int rte_vhost_driver_attach_vdpa_device(const char *path, struct rte_vdpa_device *dev)
int rte_vhost_vring_call(int vid, uint16_t vring_idx)
int rte_vhost_get_negotiated_features(int vid, uint64_t *features)
struct rte_vdpa_device * rte_vhost_driver_get_vdpa_device(const char *path)
rte_vhost_msg_result
Definition: rte_vhost.h:229
int rte_vhost_set_vring_base(int vid, uint16_t queue_id, uint16_t last_avail_idx, uint16_t last_used_idx)
__rte_experimental int rte_vhost_set_inflight_desc_packed(int vid, uint16_t vring_idx, uint16_t head, uint16_t last, uint16_t *inflight_entry)
int rte_vhost_get_log_base(int vid, uint64_t *log_base, uint64_t *log_size)
uint32_t rte_vhost_rx_queue_count(int vid, uint16_t qid)
uint16_t rte_vhost_dequeue_burst(int vid, uint16_t queue_id, struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
int rte_vhost_get_vring_base(int vid, uint16_t queue_id, uint16_t *last_avail_idx, uint16_t *last_used_idx)
__rte_experimental int rte_vhost_clr_inflight_desc_packed(int vid, uint16_t vring_idx, uint16_t head)
__rte_experimental int rte_vhost_driver_get_protocol_features(const char *path, uint64_t *protocol_features)
__rte_experimental int rte_vhost_get_vhost_ring_inflight(int vid, uint16_t vring_idx, struct rte_vhost_ring_inflight *vring)
int rte_vhost_driver_get_features(const char *path, uint64_t *features)
int rte_vhost_driver_set_features(const char *path, uint64_t features)
int rte_vhost_get_ifname(int vid, char *buf, size_t len)
__rte_experimental int rte_vhost_get_vring_base_from_inflight(int vid, uint16_t queue_id, uint16_t *last_avail_idx, uint16_t *last_used_idx)
__rte_experimental int rte_vhost_set_inflight_desc_split(int vid, uint16_t vring_idx, uint16_t idx)
enum rte_vhost_msg_result(* rte_vhost_msg_handle)(int vid, void *msg)
Definition: rte_vhost.h:254
int rte_vhost_driver_enable_features(const char *path, uint64_t features)
void rte_vhost_log_write(int vid, uint64_t addr, uint64_t len)