[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 3/6] ioreq-server: create basic ioreq server abstraction.



Collect together data structures concerning device emulation together into
a new struct hvm_ioreq_server.

Code that deals with the shared and buffered ioreq pages is extracted from
functions such as hvm_domain_initialise, hvm_vcpu_initialise and do_hvm_op
and consolidated into a set of hvm_ioreq_server manipulation functions.

This patch also adds some more missing emacs boilerplate in the places
where I needed it.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c           |  322 ++++++++++++++++++++++++++------------
 xen/include/asm-x86/hvm/domain.h |   18 ++-
 xen/include/asm-x86/hvm/vcpu.h   |   12 +-
 3 files changed, 248 insertions(+), 104 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index e07cae3..d9586b2 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -345,28 +345,32 @@ void hvm_migrate_pirqs(struct vcpu *v)
     spin_unlock(&d->event_lock);
 }
 
-static ioreq_t *get_ioreq(struct vcpu *v)
+static ioreq_t *get_ioreq(struct hvm_ioreq_server *s, int id)
 {
-    struct domain *d = v->domain;
-    shared_iopage_t *p = d->arch.hvm_domain.ioreq.va;
+    shared_iopage_t *p = s->ioreq.va;
 
-    ASSERT((v == current) || spin_is_locked(&d->arch.hvm_domain.ioreq.lock));
+    ASSERT(p != NULL);
 
-    return p ? &p->vcpu_ioreq[v->vcpu_id] : NULL;
+    return &p->vcpu_ioreq[id];
 }
 
 bool_t hvm_io_pending(struct vcpu *v)
 {
+    struct domain *d = v->domain;
+    struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
     ioreq_t *p;
 
-    if ( !(p = get_ioreq(v)) )
-         return 0;
+    if ( !s )
+        return 0;
 
+    p = get_ioreq(s, v->vcpu_id);
     return ( p->state != STATE_IOREQ_NONE );
 }
 
 void hvm_do_resume(struct vcpu *v)
 {
+    struct domain *d = v->domain;
+    struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
     ioreq_t *p;
 
     check_wakeup_from_wait();
@@ -374,10 +378,11 @@ void hvm_do_resume(struct vcpu *v)
     if ( is_hvm_vcpu(v) )
         pt_restore_timer(v);
 
-    /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
-    if ( !(p = get_ioreq(v)) )
+    if ( !s )
         goto check_inject_trap;
 
+    /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
+    p = get_ioreq(s, v->vcpu_id);
     while ( p->state != STATE_IOREQ_NONE )
     {
         switch ( p->state )
@@ -387,7 +392,7 @@ void hvm_do_resume(struct vcpu *v)
             break;
         case STATE_IOREQ_READY:  /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
         case STATE_IOREQ_INPROCESS:
-            wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port,
+            wait_on_xen_event_channel(p->vp_eport,
                                       (p->state != STATE_IOREQ_READY) &&
                                       (p->state != STATE_IOREQ_INPROCESS));
             break;
@@ -410,7 +415,6 @@ void hvm_do_resume(struct vcpu *v)
 static void hvm_init_ioreq_page(
     struct domain *d, struct hvm_ioreq_page *iorp)
 {
-    memset(iorp, 0, sizeof(*iorp));
     spin_lock_init(&iorp->lock);
     domain_pause(d);
 }
@@ -553,6 +557,167 @@ static int handle_pvh_io(
     return X86EMUL_OKAY;
 }
 
+static int hvm_init_ioreq_server(struct domain *d)
+{
+    struct hvm_ioreq_server *s;
+    int i;
+
+    s = xzalloc(struct hvm_ioreq_server);
+    if ( !s )
+        return -ENOMEM;
+
+    s->domain = d;
+
+    for ( i = 0; i < MAX_HVM_VCPUS; i++ )
+        s->ioreq_evtchn[i] = -1;
+    s->buf_ioreq_evtchn = -1;
+
+    hvm_init_ioreq_page(d, &s->ioreq);
+    hvm_init_ioreq_page(d, &s->buf_ioreq);
+
+    d->arch.hvm_domain.ioreq_server = s;
+    return 0;
+}
+
+static void hvm_deinit_ioreq_server(struct domain *d)
+{
+    struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
+
+    hvm_destroy_ioreq_page(d, &s->ioreq);
+    hvm_destroy_ioreq_page(d, &s->buf_ioreq);
+
+    xfree(s);
+}
+
+static void hvm_update_ioreq_server_evtchn(struct hvm_ioreq_server *s)
+{
+    struct domain *d = s->domain;
+
+    if ( s->ioreq.va != NULL )
+    {
+        shared_iopage_t *p = s->ioreq.va;
+        struct vcpu *v;
+
+        for_each_vcpu ( d, v )
+            p->vcpu_ioreq[v->vcpu_id].vp_eport = s->ioreq_evtchn[v->vcpu_id];
+    }
+}
+
+static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s, struct vcpu 
*v)
+{
+    int rc;
+
+    /* Create ioreq event channel. */
+    rc = alloc_unbound_xen_event_channel(v, s->domid, NULL);
+    if ( rc < 0 )
+        goto done;
+
+    /* Register ioreq event channel. */
+    s->ioreq_evtchn[v->vcpu_id] = rc;
+
+    if ( v->vcpu_id == 0 )
+    {
+        /* Create bufioreq event channel. */
+        rc = alloc_unbound_xen_event_channel(v, s->domid, NULL);
+        if ( rc < 0 )
+            goto done;
+
+        s->buf_ioreq_evtchn = rc;
+    }
+
+    hvm_update_ioreq_server_evtchn(s);
+    rc = 0;
+
+done:
+    return rc;
+}
+
+static void hvm_ioreq_server_remove_vcpu(struct hvm_ioreq_server *s, struct 
vcpu *v)
+{
+    if ( v->vcpu_id == 0 )
+    {
+        if ( s->buf_ioreq_evtchn >= 0 )
+        {
+            free_xen_event_channel(v, s->buf_ioreq_evtchn);
+            s->buf_ioreq_evtchn = -1;
+        }
+    }
+
+    if ( s->ioreq_evtchn[v->vcpu_id] >= 0 )
+    {
+        free_xen_event_channel(v, s->ioreq_evtchn[v->vcpu_id]);
+        s->ioreq_evtchn[v->vcpu_id] = -1;
+    }
+}
+
+static int hvm_replace_event_channel(struct vcpu *v, domid_t remote_domid,
+                                     int *p_port)
+{
+    int old_port, new_port;
+
+    new_port = alloc_unbound_xen_event_channel(v, remote_domid, NULL);
+    if ( new_port < 0 )
+        return new_port;
+
+    /* xchg() ensures that only we call free_xen_event_channel(). */
+    old_port = xchg(p_port, new_port);
+    free_xen_event_channel(v, old_port);
+    return 0;
+}
+
+static int hvm_set_ioreq_server_domid(struct hvm_ioreq_server *s, domid_t 
domid)
+{
+    struct domain *d = s->domain;
+    struct vcpu *v;
+    int rc = 0;
+
+    domain_pause(d);
+
+    if ( d->vcpu[0] )
+    {
+        rc = hvm_replace_event_channel(d->vcpu[0], domid, 
&s->buf_ioreq_evtchn);
+        if ( rc < 0 )
+            goto done;
+    }
+
+    for_each_vcpu ( d, v )
+    {
+        rc = hvm_replace_event_channel(v, domid, &s->ioreq_evtchn[v->vcpu_id]);
+        if ( rc < 0 )
+            goto done;
+    }
+
+    hvm_update_ioreq_server_evtchn(s);
+
+    s->domid = domid;
+
+done:
+    domain_unpause(d);
+
+    return rc;
+}
+
+static int hvm_set_ioreq_server_pfn(struct hvm_ioreq_server *s, unsigned long 
pfn)
+{
+    struct domain *d = s->domain;
+    int rc;
+
+    rc = hvm_set_ioreq_page(d, &s->ioreq, pfn);
+    if ( rc < 0 )
+        return rc;
+
+    hvm_update_ioreq_server_evtchn(s);
+
+    return 0;
+}
+
+static int hvm_set_ioreq_server_buf_pfn(struct hvm_ioreq_server *s, unsigned 
long pfn)
+{
+    struct domain *d = s->domain;
+
+    return hvm_set_ioreq_page(d, &s->buf_ioreq, pfn);
+}
+
 int hvm_domain_initialise(struct domain *d)
 {
     int rc;
@@ -620,17 +785,20 @@ int hvm_domain_initialise(struct domain *d)
 
     rtc_init(d);
 
-    hvm_init_ioreq_page(d, &d->arch.hvm_domain.ioreq);
-    hvm_init_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq);
+    rc = hvm_init_ioreq_server(d);
+    if ( rc != 0 )
+        goto fail2;
 
     register_portio_handler(d, 0xe9, 1, hvm_print_line);
 
     rc = hvm_funcs.domain_initialise(d);
     if ( rc != 0 )
-        goto fail2;
+        goto fail3;
 
     return 0;
 
+ fail3:
+    hvm_deinit_ioreq_server(d);
  fail2:
     rtc_deinit(d);
     stdvga_deinit(d);
@@ -654,8 +822,7 @@ void hvm_domain_relinquish_resources(struct domain *d)
     if ( hvm_funcs.nhvm_domain_relinquish_resources )
         hvm_funcs.nhvm_domain_relinquish_resources(d);
 
-    hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.ioreq);
-    hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq);
+    hvm_deinit_ioreq_server(d);
 
     msixtbl_pt_cleanup(d);
 
@@ -1287,7 +1454,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
 {
     int rc;
     struct domain *d = v->domain;
-    domid_t dm_domid;
+    struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
 
     hvm_asid_flush_vcpu(v);
 
@@ -1330,30 +1497,10 @@ int hvm_vcpu_initialise(struct vcpu *v)
          && (rc = nestedhvm_vcpu_initialise(v)) < 0 ) /* teardown: 
nestedhvm_vcpu_destroy */
         goto fail5;
 
-    dm_domid = d->arch.hvm_domain.params[HVM_PARAM_DM_DOMAIN];
-
-    /* Create ioreq event channel. */
-    rc = alloc_unbound_xen_event_channel(v, dm_domid, NULL); /* teardown: none 
*/
+    rc = hvm_ioreq_server_add_vcpu(s, v);
     if ( rc < 0 )
         goto fail6;
 
-    /* Register ioreq event channel. */
-    v->arch.hvm_vcpu.xen_port = rc;
-
-    if ( v->vcpu_id == 0 )
-    {
-        /* Create bufioreq event channel. */
-        rc = alloc_unbound_xen_event_channel(v, dm_domid, NULL); /* teardown: 
none */
-        if ( rc < 0 )
-            goto fail6;
-        d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN] = rc;
-    }
-
-    spin_lock(&d->arch.hvm_domain.ioreq.lock);
-    if ( d->arch.hvm_domain.ioreq.va != NULL )
-        get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port;
-    spin_unlock(&d->arch.hvm_domain.ioreq.lock);
-
     if ( v->vcpu_id == 0 )
     {
         /* NB. All these really belong in hvm_domain_initialise(). */
@@ -1387,6 +1534,11 @@ int hvm_vcpu_initialise(struct vcpu *v)
 
 void hvm_vcpu_destroy(struct vcpu *v)
 {
+    struct domain *d = v->domain;
+    struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
+
+    hvm_ioreq_server_remove_vcpu(s, v);
+
     nestedhvm_vcpu_destroy(v);
 
     free_compat_arg_xlat(v);
@@ -1398,9 +1550,6 @@ void hvm_vcpu_destroy(struct vcpu *v)
         vlapic_destroy(v);
 
     hvm_funcs.vcpu_destroy(v);
-
-    /* Event channel is already freed by evtchn_destroy(). */
-    /*free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);*/
 }
 
 void hvm_vcpu_down(struct vcpu *v)
@@ -1430,8 +1579,10 @@ void hvm_vcpu_down(struct vcpu *v)
 int hvm_buffered_io_send(ioreq_t *p)
 {
     struct vcpu *v = current;
-    struct hvm_ioreq_page *iorp = &v->domain->arch.hvm_domain.buf_ioreq;
-    buffered_iopage_t *pg = iorp->va;
+    struct domain *d = v->domain;
+    struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
+    struct hvm_ioreq_page *iorp;
+    buffered_iopage_t *pg;
     buf_ioreq_t bp;
     /* Timeoffset sends 64b data, but no address. Use two consecutive slots. */
     int qw = 0;
@@ -1439,6 +1590,12 @@ int hvm_buffered_io_send(ioreq_t *p)
     /* Ensure buffered_iopage fits in a page */
     BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
 
+    if ( !s )
+        return 0;
+
+    iorp = &s->buf_ioreq;
+    pg = iorp->va;
+
     /*
      * Return 0 for the cases we can't deal with:
      *  - 'addr' is only a 20-bit field, so we cannot address beyond 1MB
@@ -1499,8 +1656,7 @@ int hvm_buffered_io_send(ioreq_t *p)
     wmb();
     pg->write_pointer += qw ? 2 : 1;
 
-    notify_via_xen_event_channel(v->domain,
-            v->domain->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN]);
+    notify_via_xen_event_channel(d, s->buf_ioreq_evtchn);
     spin_unlock(&iorp->lock);
     
     return 1;
@@ -1508,19 +1664,23 @@ int hvm_buffered_io_send(ioreq_t *p)
 
 bool_t hvm_send_assist_req(struct vcpu *v, ioreq_t *proto_p)
 {
+    struct domain *d = v->domain;
+    struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
     ioreq_t *p;
 
     if ( unlikely(!vcpu_start_shutdown_deferral(v)) )
         return 0; /* implicitly bins the i/o operation */
 
-    if ( !(p = get_ioreq(v)) )
+    if ( !s )
         return 0;
 
+    p = get_ioreq(s, v->vcpu_id);
+
     if ( unlikely(p->state != STATE_IOREQ_NONE) )
     {
         /* This indicates a bug in the device model. Crash the domain. */
         gdprintk(XENLOG_ERR, "Device model set bad IO state %d.\n", p->state);
-        domain_crash(v->domain);
+        domain_crash(d);
         return 0;
     }
 
@@ -1533,14 +1693,14 @@ bool_t hvm_send_assist_req(struct vcpu *v, ioreq_t 
*proto_p)
     p->df = proto_p->df;
     p->data = proto_p->data;
 
-    prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port);
+    prepare_wait_on_xen_event_channel(p->vp_eport);
 
     /*
      * Following happens /after/ blocking and setting up ioreq contents.
      * prepare_wait_on_xen_event_channel() is an implicit barrier.
      */
     p->state = STATE_IOREQ_READY;
-    notify_via_xen_event_channel(v->domain, v->arch.hvm_vcpu.xen_port);
+    notify_via_xen_event_channel(d, p->vp_eport);
 
     return 1;
 }
@@ -4133,21 +4293,6 @@ static int hvmop_flush_tlb_all(void)
     return 0;
 }
 
-static int hvm_replace_event_channel(struct vcpu *v, domid_t remote_domid,
-                                     int *p_port)
-{
-    int old_port, new_port;
-
-    new_port = alloc_unbound_xen_event_channel(v, remote_domid, NULL);
-    if ( new_port < 0 )
-        return new_port;
-
-    /* xchg() ensures that only we call free_xen_event_channel(). */
-    old_port = xchg(p_port, new_port);
-    free_xen_event_channel(v, old_port);
-    return 0;
-}
-
 long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
 
 {
@@ -4160,7 +4305,6 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
     case HVMOP_get_param:
     {
         struct xen_hvm_param a;
-        struct hvm_ioreq_page *iorp;
         struct domain *d;
         struct vcpu *v;
 
@@ -4193,19 +4337,12 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
             switch ( a.index )
             {
             case HVM_PARAM_IOREQ_PFN:
-                iorp = &d->arch.hvm_domain.ioreq;
-                if ( (rc = hvm_set_ioreq_page(d, iorp, a.value)) != 0 )
-                    break;
-                spin_lock(&iorp->lock);
-                if ( iorp->va != NULL )
-                    /* Initialise evtchn port info if VCPUs already created. */
-                    for_each_vcpu ( d, v )
-                        get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port;
-                spin_unlock(&iorp->lock);
+                rc = hvm_set_ioreq_server_pfn(d->arch.hvm_domain.ioreq_server,
+                                              a.value);
                 break;
             case HVM_PARAM_BUFIOREQ_PFN: 
-                iorp = &d->arch.hvm_domain.buf_ioreq;
-                rc = hvm_set_ioreq_page(d, iorp, a.value);
+                rc = 
hvm_set_ioreq_server_buf_pfn(d->arch.hvm_domain.ioreq_server,
+                                                  a.value);
                 break;
             case HVM_PARAM_CALLBACK_IRQ:
                 hvm_set_callback_via(d, a.value);
@@ -4260,31 +4397,8 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
                 if ( a.value == DOMID_SELF )
                     a.value = curr_d->domain_id;
 
-                rc = 0;
-                domain_pause(d); /* safe to change per-vcpu xen_port */
-                if ( d->vcpu[0] )
-                    rc = hvm_replace_event_channel(d->vcpu[0], a.value,
-                             (int *)&d->vcpu[0]->domain->arch.hvm_domain.params
-                                     [HVM_PARAM_BUFIOREQ_EVTCHN]);
-                if ( rc )
-                {
-                    domain_unpause(d);
-                    break;
-                }
-                iorp = &d->arch.hvm_domain.ioreq;
-                for_each_vcpu ( d, v )
-                {
-                    rc = hvm_replace_event_channel(v, a.value,
-                                                   &v->arch.hvm_vcpu.xen_port);
-                    if ( rc )
-                        break;
-
-                    spin_lock(&iorp->lock);
-                    if ( iorp->va != NULL )
-                        get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port;
-                    spin_unlock(&iorp->lock);
-                }
-                domain_unpause(d);
+                rc = 
hvm_set_ioreq_server_domid(d->arch.hvm_domain.ioreq_server,
+                                                a.value);
                 break;
             case HVM_PARAM_ACPI_S_STATE:
                 /* Not reflexive, as we must domain_pause(). */
@@ -4379,6 +4493,12 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
         {
             switch ( a.index )
             {
+            case HVM_PARAM_BUFIOREQ_EVTCHN: {
+                struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
+
+                a.value = s->buf_ioreq_evtchn;
+                break;
+            }
             case HVM_PARAM_ACPI_S_STATE:
                 a.value = d->arch.hvm_domain.is_s3_suspended ? 3 : 0;
                 break;
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index b1e3187..a77b83d 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -41,10 +41,17 @@ struct hvm_ioreq_page {
     void *va;
 };
 
-struct hvm_domain {
+struct hvm_ioreq_server {
+    struct domain          *domain;
+    domid_t                domid;
     struct hvm_ioreq_page  ioreq;
+    int                    ioreq_evtchn[MAX_HVM_VCPUS];
     struct hvm_ioreq_page  buf_ioreq;
+    int                    buf_ioreq_evtchn;
+};
 
+struct hvm_domain {
+    struct hvm_ioreq_server *ioreq_server;
     struct pl_time         pl_time;
 
     struct hvm_io_handler *io_handler;
@@ -100,3 +107,12 @@ struct hvm_domain {
 
 #endif /* __ASM_X86_HVM_DOMAIN_H__ */
 
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 122ab0d..08e98fb 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -138,8 +138,6 @@ struct hvm_vcpu {
     spinlock_t          tm_lock;
     struct list_head    tm_list;
 
-    int                 xen_port;
-
     bool_t              flag_dr_dirty;
     bool_t              debug_state_latch;
     bool_t              single_step;
@@ -186,3 +184,13 @@ struct hvm_vcpu {
 };
 
 #endif /* __ASM_X86_HVM_VCPU_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.