[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/2] x86: latch currentâ>domain in do_physdev_op()



... and drop global latching of current, as being needed more than once
only in PHYSDEVOP_set_iobitmap, and not at all in almost all other
cases.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -291,7 +291,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
 {
     int irq;
     ret_t ret;
-    struct vcpu *v = current;
+    struct domain *currd = current->domain;
 
     switch ( cmd )
     {
@@ -303,32 +303,31 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
         if ( copy_from_guest(&eoi, arg, 1) != 0 )
             break;
         ret = -EINVAL;
-        if ( eoi.irq >= v->domain->nr_pirqs )
+        if ( eoi.irq >= currd->nr_pirqs )
             break;
-        spin_lock(&v->domain->event_lock);
-        pirq = pirq_info(v->domain, eoi.irq);
+        spin_lock(&currd->event_lock);
+        pirq = pirq_info(currd, eoi.irq);
         if ( !pirq ) {
-            spin_unlock(&v->domain->event_lock);
+            spin_unlock(&currd->event_lock);
             break;
         }
-        if ( v->domain->arch.auto_unmask )
+        if ( currd->arch.auto_unmask )
             evtchn_unmask(pirq->evtchn);
-        if ( is_pv_domain(v->domain) ||
-             domain_pirq_to_irq(v->domain, eoi.irq) > 0 )
+        if ( is_pv_domain(currd) || domain_pirq_to_irq(currd, eoi.irq) > 0 )
             pirq_guest_eoi(pirq);
-        if ( is_hvm_domain(v->domain) &&
-                domain_pirq_to_emuirq(v->domain, eoi.irq) > 0 )
+        if ( is_hvm_domain(currd) &&
+             domain_pirq_to_emuirq(currd, eoi.irq) > 0 )
         {
-            struct hvm_irq *hvm_irq = &v->domain->arch.hvm_domain.irq;
-            int gsi = domain_pirq_to_emuirq(v->domain, eoi.irq);
+            struct hvm_irq *hvm_irq = &currd->arch.hvm_domain.irq;
+            int gsi = domain_pirq_to_emuirq(currd, eoi.irq);
 
             /* if this is a level irq and count > 0, send another
              * notification */ 
             if ( gsi >= NR_ISAIRQS /* ISA irqs are edge triggered */
                     && hvm_irq->gsi_assert_count[gsi] )
-                send_guest_pirq(v->domain, pirq);
+                send_guest_pirq(currd, pirq);
         }
-        spin_unlock(&v->domain->event_lock);
+        spin_unlock(&currd->event_lock);
         ret = 0;
         break;
     }
@@ -352,7 +351,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
             break;
         }
 
-        if ( cmpxchg(&v->domain->arch.pirq_eoi_map_mfn,
+        if ( cmpxchg(&currd->arch.pirq_eoi_map_mfn,
                      0, page_to_mfn(page)) != 0 )
         {
             put_page_and_type(page);
@@ -360,16 +359,16 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
             break;
         }
 
-        v->domain->arch.pirq_eoi_map = __map_domain_page_global(page);
-        if ( v->domain->arch.pirq_eoi_map == NULL )
+        currd->arch.pirq_eoi_map = __map_domain_page_global(page);
+        if ( currd->arch.pirq_eoi_map == NULL )
         {
-            v->domain->arch.pirq_eoi_map_mfn = 0;
+            currd->arch.pirq_eoi_map_mfn = 0;
             put_page_and_type(page);
             ret = -ENOSPC;
             break;
         }
         if ( cmd == PHYSDEVOP_pirq_eoi_gmfn_v1 )
-            v->domain->arch.auto_unmask = 1;
+            currd->arch.auto_unmask = 1;
 
         ret = 0;
         break;
@@ -377,7 +376,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
 
     /* Legacy since 0x00030202. */
     case PHYSDEVOP_IRQ_UNMASK_NOTIFY: {
-        ret = pirq_guest_unmask(v->domain);
+        ret = pirq_guest_unmask(currd);
         break;
     }
 
@@ -388,12 +387,12 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
             break;
         irq = irq_status_query.irq;
         ret = -EINVAL;
-        if ( (irq < 0) || (irq >= v->domain->nr_pirqs) )
+        if ( (irq < 0) || (irq >= currd->nr_pirqs) )
             break;
         irq_status_query.flags = 0;
-        if ( is_hvm_domain(v->domain) &&
-             domain_pirq_to_irq(v->domain, irq) <= 0 &&
-             domain_pirq_to_emuirq(v->domain, irq) == IRQ_UNBOUND )
+        if ( is_hvm_domain(currd) &&
+             domain_pirq_to_irq(currd, irq) <= 0 &&
+             domain_pirq_to_emuirq(currd, irq) == IRQ_UNBOUND )
         {
             ret = -EINVAL;
             break;
@@ -408,7 +407,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
          * then dom0 is probably modern anyway.
          */
         irq_status_query.flags |= XENIRQSTAT_needs_eoi;
-        if ( pirq_shared(v->domain, irq) )
+        if ( pirq_shared(currd, irq) )
             irq_status_query.flags |= XENIRQSTAT_shared;
         ret = __copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0;
         break;
@@ -469,7 +468,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
         ret = -EFAULT;
         if ( copy_from_guest(&apic, arg, 1) != 0 )
             break;
-        ret = xsm_apic(XSM_PRIV, v->domain, cmd);
+        ret = xsm_apic(XSM_PRIV, currd, cmd);
         if ( ret )
             break;
         ret = ioapic_guest_read(apic.apic_physbase, apic.reg, &apic.value);
@@ -483,7 +482,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
         ret = -EFAULT;
         if ( copy_from_guest(&apic, arg, 1) != 0 )
             break;
-        ret = xsm_apic(XSM_PRIV, v->domain, cmd);
+        ret = xsm_apic(XSM_PRIV, currd, cmd);
         if ( ret )
             break;
         ret = ioapic_guest_write(apic.apic_physbase, apic.reg, apic.value);
@@ -499,7 +498,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
 
         /* Use the APIC check since this dummy hypercall should still only
          * be called by the domain with access to program the ioapic */
-        ret = xsm_apic(XSM_PRIV, v->domain, cmd);
+        ret = xsm_apic(XSM_PRIV, currd, cmd);
         if ( ret )
             break;
 
@@ -529,15 +528,16 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
         if ( set_iopl.iopl > 3 )
             break;
         ret = 0;
-        v->arch.pv_vcpu.iopl = set_iopl.iopl;
+        current->arch.pv_vcpu.iopl = set_iopl.iopl;
         break;
     }
 
     case PHYSDEVOP_set_iobitmap: {
+        struct vcpu *curr = current;
         struct physdev_set_iobitmap set_iobitmap;
 
         ret = -ENOSYS;
-        if ( is_pvh_vcpu(current) )
+        if ( is_pvh_vcpu(curr) )
             break;
 
         ret = -EFAULT;
@@ -549,11 +549,12 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
             break;
         ret = 0;
 #ifndef COMPAT
-        v->arch.pv_vcpu.iobmp = set_iobitmap.bitmap;
+        curr->arch.pv_vcpu.iobmp = set_iobitmap.bitmap;
 #else
-        guest_from_compat_handle(v->arch.pv_vcpu.iobmp, set_iobitmap.bitmap);
+        guest_from_compat_handle(curr->arch.pv_vcpu.iobmp,
+                                 set_iobitmap.bitmap);
 #endif
-        v->arch.pv_vcpu.iobmp_limit = set_iobitmap.nr_ports;
+        curr->arch.pv_vcpu.iobmp_limit = set_iobitmap.nr_ports;
         break;
     }
 
@@ -713,18 +714,17 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
     }
     case PHYSDEVOP_get_free_pirq: {
         struct physdev_get_free_pirq out;
-        struct domain *d = v->domain;
 
         ret = -EFAULT;
         if ( copy_from_guest(&out, arg, 1) != 0 )
             break;
 
-        spin_lock(&d->event_lock);
+        spin_lock(&currd->event_lock);
 
-        ret = get_free_pirq(d, out.type);
+        ret = get_free_pirq(currd, out.type);
         if ( ret >= 0 )
         {
-            struct pirq *info = pirq_get_info(d, ret);
+            struct pirq *info = pirq_get_info(currd, ret);
 
             if ( info )
                 info->arch.irq = PIRQ_ALLOCATED;
@@ -732,7 +732,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
                 ret = -ENOMEM;
         }
 
-        spin_unlock(&d->event_lock);
+        spin_unlock(&currd->event_lock);
 
         if ( ret >= 0 )
         {
@@ -746,7 +746,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
     case PHYSDEVOP_dbgp_op: {
         struct physdev_dbgp_op op;
 
-        if ( !is_hardware_domain(v->domain) )
+        if ( !is_hardware_domain(currd) )
             ret = -EPERM;
         else if ( copy_from_guest(&op, arg, 1) )
             ret = -EFAULT;


Attachment: x86-physdevop-latch-domain.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.