[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] RE: [RFCv4,04/35] plat/virtio: Add new virtio_mmio device/driver on arm64
> -----Original Message----- > From: Razvan Virtan <virtanrazvan@xxxxxxxxx> > Sent: Saturday, April 3, 2021 10:45 PM > To: Justin He <Justin.He@xxxxxxx> > Cc: minios-devel@xxxxxxxxxxxxx > Subject: Re: [RFCv4,04/35] plat/virtio: Add new virtio_mmio device/driver > on arm64 > > Hi, Justin. > This patch looks good, I've only found some minor typos. I'll let them here, > as > inline comments. > > diff --git a/plat/drivers/virtio/virtio_mmio.c > b/plat/drivers/virtio/virtio_mmio.c > new file mode 100644 > index 0000000..6716153 > --- /dev/null > +++ b/plat/drivers/virtio/virtio_mmio.c > @@ -0,0 +1,480 @@ > +/* SPDX-License-Identifier: BSD-3-Clause */ > +/* > + * Authors: Jia He <justin.he@xxxxxxx> > + * > + * Copyright (c) 2020, Arm Ltd. All rights reserved. > + * > + * Redistribution and use in source and binary forms, with or without > + * modification, are permitted provided that the following conditions > + * are met: > + * > + * 1. Redistributions of source code must retain the above copyright > + * notice, this list of conditions and the following disclaimer. > + * 2. Redistributions in binary form must reproduce the above copyright > + * notice, this list of conditions and the following disclaimer in the > + * documentation and/or other materials provided with the distribution. > + * 3. Neither the name of the copyright holder nor the names of its > + * contributors may be used to endorse or promote products derived from > + * this software without specific prior written permission. > + * > + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS > IS" > + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, > THE > + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR > PURPOSE > + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS > BE > + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR > + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF > + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR > BUSINESS > + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN > + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) > + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF > THE > + * POSSIBILITY OF SUCH DAMAGE. > + * > + * THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. > + */ > + > +#include <uk/config.h> > +#include <uk/arch/types.h> > +#include <errno.h> > +#include <string.h> > +#include <uk/alloc.h> > +#include <uk/print.h> > +#include <uk/plat/lcpu.h> > +#include <uk/plat/irq.h> > +#include <uk/bus.h> > +#include <uk/bitops.h> > + > +#include <platform_bus.h> > +#include <virtio/virtio_config.h> > +#include <virtio/virtio_bus.h> > +#include <virtio/virtqueue.h> > +#include <virtio/virtio_mmio.h> > + > +/* The alignment to use between consumer and producer parts of vring. > + * Currently hardcoded to the page size. */ > +#define VIRTIO_MMIO_VRING_ALIGN __PAGE_SIZE > + > +static struct uk_alloc *a; > +struct virtio_mmio_device_id { > + uint16_t device_id; > + uint32_t vendor; > +}; > + > +struct virtio_mmio_device { > + struct virtio_dev vdev; > + char *name; > + struct virtio_mmio_device_id id; > + struct virtio_mmio_driver *drv; > + unsigned long version; > + unsigned long irq; > + void *base; > + struct pf_device *pfdev; > +}; > + > +#define to_virtio_mmio_device(_dev) \ > + __containerof(_dev, struct virtio_mmio_device, vdev) > + > +struct virtio_mmio_vq_info { > + /* the actual virtqueue */ > + struct virtqueue *vq; > +}; > +typedef void vq_callback_t(struct virtqueue *); > + > +/* Configuration interface */ > + > +static __u64 vm_get_features(struct virtio_dev *vdev) > +{ > + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); > + __u64 features = 0; > + > + virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_DEVICE_FEATURES_SEL, 1); > + features = virtio_cread32(vm_dev->base, VIRTIO_MMIO_DEVICE_FEATURES); > + features <<= 32; > + > + virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_DEVICE_FEATURES_SEL, 0); > + features |= virtio_cread32(vm_dev->base, > VIRTIO_MMIO_DEVICE_FEATURES); > + > + return features; > +} > + > +static void vm_set_features(struct virtio_dev *vdev, > + __u64 features) > +{ > + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); > + > + /* Give virtio_ring a chance to accept features. */ > + virtqueue_feature_negotiate(features); > + > > "Make sure there are no mixed devices" > > + /* Make sure there is are no mixed devices */ > + if (vm_dev->version == 2 && > + !uk_test_bit(VIRTIO_F_VERSION_1, &vdev->features)) { > + uk_pr_err("New virtio-mmio devices (version 2) must provide > VIRTIO_F_VERSION_1 feature!\n"); > + return; > + } > + > + virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 1); > + virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_DRIVER_FEATURES, > + (__u32)(vdev->features >> 32)); > + > + virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 0); > + virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_DRIVER_FEATURES, > + (__u32)vdev->features); > + > + return; > +} > + > +static int vm_get(struct virtio_dev *vdev, __u16 offset, > + void *buf, __u32 len, __u8 type_len __unused) > +{ > + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); > + void *base = vm_dev->base + VIRTIO_MMIO_CONFIG; > + __u8 b; > + __u16 w; > + __u32 l; > + > + if (vm_dev->version == 1) { > + __u8 *ptr = buf; > + unsigned i; > + > + for (i = 0; i < len; i++) > + ptr[i] = virtio_cread8(base, offset + i); > + return len; > + } > + > + switch (len) { > + case 1: > + b = virtio_cread8(base, offset); > + memcpy(buf, &b, sizeof b); > + break; > + case 2: > + w = (virtio_cread16(base, offset)); > + memcpy(buf, &w, sizeof w); > + break; > + case 4: > + l = (virtio_cread32(base, offset)); > + memcpy(buf, &l, sizeof l); > + break; > + case 8: > + l = (virtio_cread32(base, offset)); > + memcpy(buf, &l, sizeof l); > + l = (virtio_cread32(base, offset + sizeof l)); > + memcpy(buf + sizeof l, &l, sizeof l); > + break; > + default: > + uk_pr_err("Not supported length(%d) for io read\n", len); > + UK_BUG(); > + } > + > + return len; > +} > + > +static int vm_set(struct virtio_dev *vdev, __u16 offset, > + const void *buf, __u32 len) > +{ > + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); > + void *base = vm_dev->base + VIRTIO_MMIO_CONFIG; > + __u8 b; > + __u16 w; > + __u32 l; > + > + if (vm_dev->version == 1) { > + const __u8 *ptr = buf; > + __u32 i; > + > + for (i = 0; i < len; i++) > + virtio_cwrite8(base, offset + i, ptr[i]); > + return 0; > + } > + > + switch (len) { > + case 1: > + memcpy(&b, buf, sizeof b); > + virtio_cwrite8(base, offset, b); > + break; > + case 2: > + memcpy(&w, buf, sizeof w); > + virtio_cwrite16(base, offset, w); > + break; > + case 4: > + memcpy(&l, buf, sizeof l); > + virtio_cwrite32(base, offset, l); > + break; > + case 8: > + memcpy(&l, buf, sizeof l); > + virtio_cwrite32(base, offset, l); > + memcpy(&l, buf + sizeof l, sizeof(l)); > + virtio_cwrite32(base, offset + sizeof l, l); > + break; > + default: > + uk_pr_err("Not supported length(%d) for io write\n", len); > + UK_BUG(); > + } > + > + return 0; > +} > + > +static __u8 vm_get_status(struct virtio_dev *vdev) > +{ > + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); > + > + return virtio_cread32(vm_dev->base, VIRTIO_MMIO_STATUS) & 0xff; > +} > + > +static void vm_set_status(struct virtio_dev *vdev, __u8 status) > +{ > + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); > + > + /* We should never be setting status to 0. */ > + UK_BUGON(status == 0); > + > + virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_STATUS, status); > +} > + > +static void vm_reset(struct virtio_dev *vdev) > +{ > + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); > + > + /* 0 status means a reset. */ > + virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_STATUS, 0); > +} > + > +/* Transport interface */ > + > +/* the notify function used when creating a virt queue */ > +static int vm_notify(struct virtio_dev *vdev, __u16 queue_id) > +{ > + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); > + > + /* We write the queue's selector into the notification register to > + * signal the other end */ > + virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_NOTIFY, queue_id); > + return 1; > +} > + > +/* Notify all virtqueues on an interrupt. */ > +static int vm_interrupt(void *opaque) > +{ > + struct virtio_mmio_device *vm_dev = opaque; > + unsigned long status; > + unsigned long flags; > + int rc = 0; > + struct virtqueue *vq; > + > + /* Read and acknowledge interrupts */ > + status = virtio_cread32(vm_dev->base, VIRTIO_MMIO_INTERRUPT_STATUS); > + virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_INTERRUPT_ACK, status); > + > + if (unlikely(status & VIRTIO_MMIO_INT_CONFIG)) { > + uk_pr_warn("Unsupported config change interrupt received on > virtio-mmio device %p\n", > + vm_dev); > + } > + > + if (likely(status & VIRTIO_MMIO_INT_VRING)) { > + flags = ukplat_lcpu_save_irqf(); > + UK_TAILQ_FOREACH(vq, &vm_dev->vdev.vqs, next) { > + rc |= virtqueue_ring_interrupt(vq); > + } > + ukplat_lcpu_restore_irqf(flags); > + } > + > + return rc; > +} > + > + > +static struct virtqueue *vm_setup_vq(struct virtio_dev *vdev, > + __u16 queue_id, > + __u16 num_desc, > + virtqueue_callback_t callback, > + struct uk_alloc *a) > +{ > + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); > + struct virtqueue *vq; > + unsigned long flags; > + > + /* Create the vring */ > + vq = virtqueue_create(queue_id, num_desc, VIRTIO_MMIO_VRING_ALIGN, > + callback, vm_notify, vdev, a); > + > + if (PTRISERR(vq)) { > + uk_pr_err("Failed to create the virtqueue: %d\n", > + PTR2ERR(vq)); > + > + goto err_exit; > + } > + > + /* Select the queue we're interested in */ > + virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_SEL, queue_id); > + > + /* Activate the queue */ > + virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_NUM, > (__u32)virtqueue_vring_get_num(vq)); > + if (vm_dev->version == 1) { > + > + virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_ALIGN, > __PAGE_SIZE); > + virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_PFN, > + virtqueue_physaddr(vq) >> __PAGE_SHIFT); > + } else { > + __u64 addr; > + addr = virtqueue_physaddr(vq); > + virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_DESC_LOW, > (__u32)addr); > + virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_DESC_HIGH, > + (__u32)(addr >> > 32)); > + > + addr = virtqueue_get_avail_addr(vq); > + virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_AVAIL_LOW, > (__u32)addr); > + virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_AVAIL_HIGH, > + (__u32)(addr >> > 32)); > + > + addr = virtqueue_get_used_addr(vq); > + virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_USED_LOW, > (__u32)addr); > + virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_USED_HIGH, > + (__u32)(addr >> > 32)); > + > + virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_READY, 1); > + } > + > + flags = ukplat_lcpu_save_irqf(); > + UK_TAILQ_INSERT_TAIL(&vm_dev->vdev.vqs, vq, next); > + ukplat_lcpu_restore_irqf(flags); > + > +err_exit: > + return vq; > +} > + > +static int vm_find_vqs(struct virtio_dev *vdev, __u16 num_vqs, __u16 > *qdesc_size) > +{ > + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); > + unsigned int irq = vm_dev->pfdev->irq; > + int i, err; > + int vq_cnt = 0; > + > + err = ukplat_irq_register(irq, vm_interrupt, vm_dev); > + if (err) > + return err; > + > + for (i = 0; i < num_vqs; ++i) { > + /* Select the queue we're interested in */ > + virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_SEL, i); > + > + /* Queue shouldn't already be set up. */ > + if (virtio_cread32(vm_dev->base, (vm_dev->version == 1 ? > + VIRTIO_MMIO_QUEUE_PFN : > VIRTIO_MMIO_QUEUE_READY))) { > + uk_pr_err("vm_find_vqs error mmio queue not ready\n"); > + err = -ENOENT; > + goto error_exit; > + } > + > + qdesc_size[i] = virtio_cread32(vm_dev->base, > VIRTIO_MMIO_QUEUE_NUM_MAX); > + if (qdesc_size[i] == 0) { > + err = -ENOENT; > + goto error_exit; > + } > + > + vq_cnt++; > + } > + > + return vq_cnt; > +error_exit: > + uk_pr_err("err in vm_find_vqs :%d\n", err); > + return err; > +} > + > +static struct virtio_config_ops virtio_mmio_config_ops = { > + .config_get = vm_get, > + .config_set = vm_set, > + .status_get = vm_get_status, > + .status_set = vm_set_status, > + .device_reset = vm_reset, > + .features_get = vm_get_features, > + .features_set = vm_set_features, > + .vqs_find = vm_find_vqs, > + .vq_setup = vm_setup_vq, > +}; > + > +static int virtio_mmio_add_dev(struct pf_device *pfdev) > +{ > + struct virtio_mmio_device *vm_dev; > + unsigned int magic; > + int rc; > + > + UK_ASSERT(pfdev != NULL); > + uk_pr_info("virtio_mmio_add dev\n"); > + > + vm_dev = uk_malloc(a, sizeof(*vm_dev)); > + if (!vm_dev) { > > "Failed to allocate virtio-mmio...." (instead of virtio-pci) > > + uk_pr_err("Failed to allocate virtio-pci device\n"); > + return -ENOMEM; > + } > + > + /* Fetch Pf Device information */ > + vm_dev->pfdev = pfdev; > + vm_dev->base = (void *)pfdev->base; > + vm_dev->vdev.cops = &virtio_mmio_config_ops; > + vm_dev->name = "virtio_mmio"; > + > + if (vm_dev->base == NULL) > + return -EFAULT; > + > + magic = virtio_cread32(vm_dev->base, VIRTIO_MMIO_MAGIC_VALUE); > + if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) { > + uk_pr_err("Wrong magic value 0x%x!\n", magic); > + return -ENODEV; > + } > + > + /* Check device version */ > + vm_dev->version = virtio_cread32(vm_dev->base, VIRTIO_MMIO_VERSION); > + if (vm_dev->version < 1 || vm_dev->version > 2) { > + uk_pr_err("Version %ld not supported!\n", vm_dev->version); > + return -ENXIO; > + } > + > + vm_dev->vdev.id.virtio_device_id = virtio_cread32(vm_dev->base, > VIRTIO_MMIO_DEVICE_ID); > + if (vm_dev->vdev.id.virtio_device_id == 0) { > + /* > + * virtio-mmio device with an ID 0 is a (dummy) placeholder > + * with no function. End probing now with no error reported. > + */ > + uk_pr_err("virtio_device_id is 0\n"); > + > + return -ENODEV; > + } > + vm_dev->id.vendor = virtio_cread32(vm_dev->base, > VIRTIO_MMIO_VENDOR_ID); > + > + virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_GUEST_PAGE_SIZE, > __PAGE_SIZE); > + > + rc = virtio_bus_register_device(&vm_dev->vdev); > + if (rc != 0) { > + uk_pr_err("Failed to register the virtio device: %d\n", rc); > + goto free_pf_dev; > + } > + > + uk_pr_info("finish add a virtio mmio dev\n"); > + > + return rc; > + > +free_pf_dev: > + uk_free(a, vm_dev); > + > + return 0; > +} > + > +static int virtio_mmio_drv_init(struct uk_alloc *drv_allocator) > +{ > + /* driver initialization */ > + if (!drv_allocator) > + return -EINVAL; > + > + a = drv_allocator; > + > + return 0; > +} > + > +static const struct pf_device_id virtio_mmio_ids = { > + .device_id = VIRTIO_MMIO_ID > +}; > + > +static struct pf_driver virtio_mmio_drv = { > + .device_ids = &virtio_mmio_ids, > + .init = virtio_mmio_drv_init, > + .add_dev = virtio_mmio_add_dev > +}; > + > +PF_REGISTER_DRIVER(&virtio_mmio_drv); > > > Reviewed-by: Razvan Virtan <virtanrazvan@xxxxxxxxx> Thanks, will update the comments as you suggested -- Cheers, Justin (Jia He) IMPORTANT NOTICE: The contents of this email and any attachments are confidential and may also be privileged. If you are not the intended recipient, please notify the sender immediately and do not disclose the contents to any other person, use it for any purpose, or store or copy the information in any medium. Thank you.
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |