+ * @param obj
+ * Reference to the virtqueue.
+ *
+ * @return int
+ * 0, Interrupt was not for this virtqueue.
+ * 1, Virtqueue has handled the interrupt.
+ */
+int virtqueue_ring_interrupt(void *obj);
+
+/**
* Negotiate with the virtqueue features.
* @param feature_set
* The feature set the device request.
@@ -93,6 +104,17 @@ __phys_addr virtqueue_physaddr(struct virtqueue *vq);
__u64 virtqueue_feature_negotiate(__u64 feature_set);
/**
+ * Check if host notification is enabled.
+ *
+ * @param vq
+ * Reference to the virtqueue.
+ * @return
+ * Returns 1, host needs notification on new descriptors.
+ * 0, otherwise.
+ */
+int virtqueue_notify_enabled(struct virtqueue *vq);
+
+/**
* Allocate a virtqueue.
* @param queue_id
* The virtqueue hw id.
@@ -138,6 +160,36 @@ int virtqueue_is_full(struct virtqueue *vq);
*/
void virtqueue_destroy(struct virtqueue *vq, struct uk_alloc *a);
+/**
+ * Disable interrupts on the virtqueue.
+ * @param vq
+ * Reference to the virtqueue.
+ */
+void virtqueue_intr_disable(struct virtqueue *vq);
+
+/**
+ * Enable interrupts on the virtqueue.
+ * @param vq
+ * Reference to the virtqueue
+ * @return
+ * 0, On successful enabling of interrupt.
+ * 1, More packet in the ring to be processed.
+ */
+int virtqueue_intr_enable(struct virtqueue *vq);
+
+/**
+ * Notify the host of an event.
+ * @param vq
+ * Reference to the virtual queue.
+ */
+static inline void virtqueue_host_notify(struct virtqueue *vq)
+{
+ UK_ASSERT(vq);
+
+ if (vq->vq_notify_host && virtqueue_notify_enabled(vq))
+ vq->vq_notify_host(vq->vdev, vq->queue_id);
+}
+
#ifdef __cplusplus
}
#endif /* __cplusplus */
diff --git a/plat/drivers/virtio/virtio_pci.c b/plat/drivers/virtio/virtio_pci.c
index d07b011..82f4249 100644
--- a/plat/drivers/virtio/virtio_pci.c
+++ b/plat/drivers/virtio/virtio_pci.c
@@ -97,6 +97,8 @@ static struct virtqueue *vpci_legacy_vq_setup(struct
virtio_dev *vdev,
static inline void virtio_device_id_add(struct virtio_dev *vdev,
__u16 pci_dev_id,
__u16 vpci_dev_id_start);
+static int virtio_pci_handle(void *arg);
+static int vpci_legacy_notify(struct virtio_dev *vdev, __u16 queue_id);
static int virtio_pci_legacy_add_dev(struct pci_device *pci_dev,
struct virtio_pci_dev *vpci_dev);
@@ -115,6 +117,43 @@ static struct virtio_config_ops vpci_legacy_ops = {
.vq_setup = vpci_legacy_vq_setup,
};
+static int vpci_legacy_notify(struct virtio_dev *vdev, __u16 queue_id)
+{
+ struct virtio_pci_dev *vpdev;
+
+ UK_ASSERT(vdev);
+ vpdev = to_virtiopcidev(vdev);
+ virtio_cwrite16((void *)(unsigned long) vpdev->pci_base_addr,
+ VIRTIO_PCI_QUEUE_NOTIFY, queue_id);
+
+ return 0;
+}
+
+static int virtio_pci_handle(void *arg)
+{
+ struct virtio_pci_dev *d = (struct virtio_pci_dev *) arg;
+ uint8_t isr_status;
+ struct virtqueue *vq;
+ int rc = 0;
+
+ UK_ASSERT(arg);
+
+ /* Reading the isr status is used to acknowledge the interrupt */
+ isr_status = virtio_cread8((void *)(unsigned long)d->pci_isr_addr, 0);
+ /* We don't support configuration interrupt on the device */
+ if (isr_status & VIRTIO_PCI_ISR_CONFIG) {
+ uk_pr_warn("Unsupported config change interrupt received on
virtio-pci device %p\n",
+ d);
+ }
+
+ if (isr_status & VIRTIO_PCI_ISR_HAS_INTR) {
+ UK_TAILQ_FOREACH(vq, &d->vdev.vqs, next) {
+ rc |= virtqueue_ring_interrupt(vq);
+ }
+ }
+ return rc;
+}
+
static struct virtqueue *vpci_legacy_vq_setup(struct virtio_dev *vdev,
__u16 queue_id,
__u16 num_desc,
@@ -130,7 +169,7 @@ static struct virtqueue *vpci_legacy_vq_setup(struct
virtio_dev *vdev,
vpdev = to_virtiopcidev(vdev);
vq = virtqueue_create(queue_id, num_desc, VIRTIO_PCI_VRING_ALIGN,
- callback, NULL, vdev, a);
+ callback, vpci_legacy_notify, vdev, a);
if (PTRISERR(vq)) {
uk_pr_err("Failed to create the virtqueue: %d\n",
PTR2ERR(vq));
@@ -163,6 +202,9 @@ static int vpci_legacy_pci_vq_find(struct virtio_dev *vdev,
__u16 num_vqs,
UK_ASSERT(vdev);
vpdev = to_virtiopcidev(vdev);
+ /* Registering the interrupt for the queue */
+ ukplat_irq_register(vpdev->pdev->irq, virtio_pci_handle, vpdev);
+
for (i = 0; i < num_vqs; i++) {
virtio_cwrite16((void *) (unsigned long)vpdev->pci_base_addr,
VIRTIO_PCI_QUEUE_SEL, i);
diff --git a/plat/drivers/virtio/virtio_ring.c
b/plat/drivers/virtio/virtio_ring.c
index ba91594..6012bd2 100644
--- a/plat/drivers/virtio/virtio_ring.c
+++ b/plat/drivers/virtio/virtio_ring.c
@@ -74,9 +74,74 @@ struct virtqueue_vring {
/**
* Static function Declaration(s).
*/
+static inline int virtqueue_hasdata(struct virtqueue_vring *vrq);
static void virtqueue_vring_init(struct virtqueue_vring *vrq, __u16 nr_desc,
__u16 align);
+void virtqueue_intr_disable(struct virtqueue *vq)
+{
+ struct virtqueue_vring *vrq;
+
+ UK_ASSERT(vq);
+
+ vrq = to_virtqueue_vring(vq);
+ vrq->vring.avail->flags |= (VRING_AVAIL_F_NO_INTERRUPT);
+}
+
+int virtqueue_intr_enable(struct virtqueue *vq)
+{
+ struct virtqueue_vring *vrq;
+ int rc = 0;
+
+ UK_ASSERT(vq);
+
+ vrq = to_virtqueue_vring(vq);
+ /* Check if there are no more packets enabled */
+ if (!virtqueue_hasdata(vrq)) {
+ if (vrq->vring.avail->flags | VRING_AVAIL_F_NO_INTERRUPT) {
+ vrq->vring.avail->flags &=
+ (~VRING_AVAIL_F_NO_INTERRUPT);
+ /**
+ * We enabled the interrupts. We ensure it using the
+ * memory barrier and check if there are any further
+ * data available in the queue. The check for data
+ * after enabling the interrupt is to make sure we do
+ * not miss any interrupt while transitioning to enable
+ * interrupt. This is inline with the requirement from
+ * virtio specification section 3.2.2
+ */
+ mb();
+ /* Check if there are further descriptors */
+ if (virtqueue_hasdata(vrq)) {
+ virtqueue_intr_disable(vq);
+ rc = 1;
+ }
+ }
+ } else {
+ /**
+ * There are more packet in the virtqueue to be processed while
+ * the interrupt was disabled.
+ */
+ rc = 1;
+ }
+ return rc;
+}
+
+int virtqueue_notify_enabled(struct virtqueue *vq)
+{
+ struct virtqueue_vring *vrq;
+
+ UK_ASSERT(vq);
+ vrq = to_virtqueue_vring(vq);
+
+ return ((vrq->vring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
+}
+
+static inline int virtqueue_hasdata(struct virtqueue_vring *vrq)
+{
+ return (vrq->last_used_desc_idx != vrq->vring.used->idx);
+}
+
/**
* Driver implementation
*/
@@ -92,6 +157,23 @@ __u64 virtqueue_feature_negotiate(__u64 feature_set)
return feature;
}
+int virtqueue_ring_interrupt(void *obj)
+{
+ struct virtqueue_vring *vrq = NULL;
+ struct virtqueue *vq = (struct virtqueue *)obj;
+ int rc = 0;
+
+ UK_ASSERT(vq);
+
+ vrq = to_virtqueue_vring(vq);
+ if (!virtqueue_hasdata(vrq))
+ return rc;
+
+ if (likely(vq->vq_callback))
+ rc = vq->vq_callback(vq, vq->priv);
+ return rc;
+}
+
__phys_addr virtqueue_physaddr(struct virtqueue *vq)
{
struct virtqueue_vring *vrq = NULL;