1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
4 * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com>
6 * virtio ring implementation
12 #include <virtio_types.h>
14 #include <virtio_ring.h>
15 #include <linux/compat.h>
17 int virtqueue_add(struct virtqueue *vq, struct virtio_sg *sgs[],
18 unsigned int out_sgs, unsigned int in_sgs)
20 struct vring_desc *desc;
21 unsigned int total_sg = out_sgs + in_sgs;
22 unsigned int i, n, avail, descs_used, uninitialized_var(prev);
25 WARN_ON(total_sg == 0);
29 desc = vq->vring.desc;
31 descs_used = total_sg;
33 if (vq->num_free < descs_used) {
34 debug("Can't add buf len %i - avail = %i\n",
35 descs_used, vq->num_free);
37 * FIXME: for historical reasons, we force a notify here if
38 * there are outgoing parts to the buffer. Presumably the
39 * host should service the ring ASAP.
42 virtio_notify(vq->vdev, vq);
46 for (n = 0; n < out_sgs; n++) {
47 struct virtio_sg *sg = sgs[n];
49 desc[i].flags = cpu_to_virtio16(vq->vdev, VRING_DESC_F_NEXT);
50 desc[i].addr = cpu_to_virtio64(vq->vdev, (u64)(size_t)sg->addr);
51 desc[i].len = cpu_to_virtio32(vq->vdev, sg->length);
54 i = virtio16_to_cpu(vq->vdev, desc[i].next);
56 for (; n < (out_sgs + in_sgs); n++) {
57 struct virtio_sg *sg = sgs[n];
59 desc[i].flags = cpu_to_virtio16(vq->vdev, VRING_DESC_F_NEXT |
61 desc[i].addr = cpu_to_virtio64(vq->vdev,
62 (u64)(uintptr_t)sg->addr);
63 desc[i].len = cpu_to_virtio32(vq->vdev, sg->length);
66 i = virtio16_to_cpu(vq->vdev, desc[i].next);
68 /* Last one doesn't continue */
69 desc[prev].flags &= cpu_to_virtio16(vq->vdev, ~VRING_DESC_F_NEXT);
71 /* We're using some buffers from the free list. */
72 vq->num_free -= descs_used;
74 /* Update free pointer */
78 * Put entry in available array (but don't update avail->idx
79 * until they do sync).
81 avail = vq->avail_idx_shadow & (vq->vring.num - 1);
82 vq->vring.avail->ring[avail] = cpu_to_virtio16(vq->vdev, head);
85 * Descriptors and available array need to be set before we expose the
86 * new available array entries.
89 vq->avail_idx_shadow++;
90 vq->vring.avail->idx = cpu_to_virtio16(vq->vdev, vq->avail_idx_shadow);
94 * This is very unlikely, but theoretically possible.
97 if (unlikely(vq->num_added == (1 << 16) - 1))
103 static bool virtqueue_kick_prepare(struct virtqueue *vq)
109 * We need to expose available array entries before checking
114 old = vq->avail_idx_shadow - vq->num_added;
115 new = vq->avail_idx_shadow;
119 needs_kick = vring_need_event(virtio16_to_cpu(vq->vdev,
120 vring_avail_event(&vq->vring)), new, old);
122 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(vq->vdev,
123 VRING_USED_F_NO_NOTIFY));
129 void virtqueue_kick(struct virtqueue *vq)
131 if (virtqueue_kick_prepare(vq))
132 virtio_notify(vq->vdev, vq);
135 static void detach_buf(struct virtqueue *vq, unsigned int head)
138 __virtio16 nextflag = cpu_to_virtio16(vq->vdev, VRING_DESC_F_NEXT);
140 /* Put back on free list: unmap first-level descriptors and find end */
143 while (vq->vring.desc[i].flags & nextflag) {
144 i = virtio16_to_cpu(vq->vdev, vq->vring.desc[i].next);
148 vq->vring.desc[i].next = cpu_to_virtio16(vq->vdev, vq->free_head);
149 vq->free_head = head;
151 /* Plus final descriptor */
155 static inline bool more_used(const struct virtqueue *vq)
157 return vq->last_used_idx != virtio16_to_cpu(vq->vdev,
158 vq->vring.used->idx);
161 void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len)
166 if (!more_used(vq)) {
167 debug("(%s.%d): No more buffers in queue\n",
168 vq->vdev->name, vq->index);
172 /* Only get used array entries after they have been exposed by host */
175 last_used = (vq->last_used_idx & (vq->vring.num - 1));
176 i = virtio32_to_cpu(vq->vdev, vq->vring.used->ring[last_used].id);
178 *len = virtio32_to_cpu(vq->vdev,
179 vq->vring.used->ring[last_used].len);
180 debug("(%s.%d): last used idx %u with len %u\n",
181 vq->vdev->name, vq->index, i, *len);
184 if (unlikely(i >= vq->vring.num)) {
185 printf("(%s.%d): id %u out of range\n",
186 vq->vdev->name, vq->index, i);
193 * If we expect an interrupt for the next entry, tell host
194 * by writing event index and flush out the write before
195 * the read in the next get_buf call.
197 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
198 virtio_store_mb(&vring_used_event(&vq->vring),
199 cpu_to_virtio16(vq->vdev, vq->last_used_idx));
201 return (void *)(uintptr_t)virtio64_to_cpu(vq->vdev,
202 vq->vring.desc[i].addr);
205 static struct virtqueue *__vring_new_virtqueue(unsigned int index,
207 struct udevice *udev)
210 struct virtqueue *vq;
211 struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
212 struct udevice *vdev = uc_priv->vdev;
214 vq = malloc(sizeof(*vq));
220 vq->num_free = vring.num;
222 vq->last_used_idx = 0;
223 vq->avail_flags_shadow = 0;
224 vq->avail_idx_shadow = 0;
226 list_add_tail(&vq->list, &uc_priv->vqs);
228 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
230 /* Tell other side not to bother us */
231 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
233 vq->vring.avail->flags = cpu_to_virtio16(vdev,
234 vq->avail_flags_shadow);
236 /* Put everything in free lists */
238 for (i = 0; i < vring.num - 1; i++)
239 vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
244 struct virtqueue *vring_create_virtqueue(unsigned int index, unsigned int num,
245 unsigned int vring_align,
246 struct udevice *udev)
248 struct virtqueue *vq;
252 /* We assume num is a power of 2 */
253 if (num & (num - 1)) {
254 printf("Bad virtqueue length %u\n", num);
258 /* TODO: allocate each queue chunk individually */
259 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
260 queue = memalign(PAGE_SIZE, vring_size(num, vring_align));
269 /* Try to get a single page. You are my only hope! */
270 queue = memalign(PAGE_SIZE, vring_size(num, vring_align));
275 memset(queue, 0, vring_size(num, vring_align));
276 vring_init(&vring, num, queue, vring_align);
278 vq = __vring_new_virtqueue(index, vring, udev);
283 debug("(%s): created vring @ %p for vq @ %p with num %u\n", udev->name,
289 void vring_del_virtqueue(struct virtqueue *vq)
291 free(vq->vring.desc);
296 unsigned int virtqueue_get_vring_size(struct virtqueue *vq)
298 return vq->vring.num;
301 ulong virtqueue_get_desc_addr(struct virtqueue *vq)
303 return (ulong)vq->vring.desc;
306 ulong virtqueue_get_avail_addr(struct virtqueue *vq)
308 return (ulong)vq->vring.desc +
309 ((char *)vq->vring.avail - (char *)vq->vring.desc);
312 ulong virtqueue_get_used_addr(struct virtqueue *vq)
314 return (ulong)vq->vring.desc +
315 ((char *)vq->vring.used - (char *)vq->vring.desc);
318 bool virtqueue_poll(struct virtqueue *vq, u16 last_used_idx)
322 return last_used_idx != virtio16_to_cpu(vq->vdev, vq->vring.used->idx);
325 void virtqueue_dump(struct virtqueue *vq)
329 printf("virtqueue %p for dev %s:\n", vq, vq->vdev->name);
330 printf("\tindex %u, phys addr %p num %u\n",
331 vq->index, vq->vring.desc, vq->vring.num);
332 printf("\tfree_head %u, num_added %u, num_free %u\n",
333 vq->free_head, vq->num_added, vq->num_free);
334 printf("\tlast_used_idx %u, avail_flags_shadow %u, avail_idx_shadow %u\n",
335 vq->last_used_idx, vq->avail_flags_shadow, vq->avail_idx_shadow);
337 printf("Descriptor dump:\n");
338 for (i = 0; i < vq->vring.num; i++) {
339 printf("\tdesc[%u] = { 0x%llx, len %u, flags %u, next %u }\n",
340 i, vq->vring.desc[i].addr, vq->vring.desc[i].len,
341 vq->vring.desc[i].flags, vq->vring.desc[i].next);
344 printf("Avail ring dump:\n");
345 printf("\tflags %u, idx %u\n",
346 vq->vring.avail->flags, vq->vring.avail->idx);
347 for (i = 0; i < vq->vring.num; i++) {
348 printf("\tavail[%u] = %u\n",
349 i, vq->vring.avail->ring[i]);
352 printf("Used ring dump:\n");
353 printf("\tflags %u, idx %u\n",
354 vq->vring.used->flags, vq->vring.used->idx);
355 for (i = 0; i < vq->vring.num; i++) {
356 printf("\tused[%u] = { %u, %u }\n", i,
357 vq->vring.used->ring[i].id, vq->vring.used->ring[i].len);