[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Rework hvm_wait_io() -- now tries to be a polite user of



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 4a9a39d08a065edc829bbc887b56c9c0509f1eb2
# Parent  0828f5f18b5609139d171ad64d5119c79ae02fdb
Rework hvm_wait_io() -- now tries to be a polite user of
event channels by re-setting the selector and master 
pending flags when it exits. Should make for better
behaviour when there are others using the event channels.

This needs some testing to be sure it doesn't break
anything or trigger latent bugs.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r 0828f5f18b56 -r 4a9a39d08a06 xen/arch/x86/hvm/io.c
--- a/xen/arch/x86/hvm/io.c     Fri Feb 10 16:57:13 2006
+++ b/xen/arch/x86/hvm/io.c     Sat Feb 11 12:06:49 2006
@@ -690,62 +690,41 @@
     }
 }
 
-int hvm_clear_pending_io_event(struct vcpu *v)
-{
-    struct domain *d = v->domain;
-    int port = iopacket_port(d);
-
-    /* evtchn_pending_sel bit is shared by other event channels. */
-    if (!d->shared_info->evtchn_pending[port/BITS_PER_LONG])
-        clear_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
-
-    /* Note: HVM domains may need upcalls as well. */
-    if (!v->vcpu_info->evtchn_pending_sel)
-        clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
-
-    /* Clear the pending bit for port. */
-    return test_and_clear_bit(port, &d->shared_info->evtchn_pending[0]);
-}
-
 /*
- * Because we've cleared the pending events first, we need to guarantee that
- * all events to be handled by xen for HVM domains are taken care of here.
- *
- * interrupts are guaranteed to be checked before resuming guest.
- * HVM upcalls have been already arranged for if necessary.
- */
-void hvm_check_events(struct vcpu *v)
-{
-    /*
-     * Clear the event *before* checking for work. This should
-     * avoid the set-and-check races
-     */
-    if (hvm_clear_pending_io_event(current))
-        hvm_io_assist(v);
-}
-
-/*
- * On exit from hvm_wait_io, we're guaranteed to have a I/O response
- * from the device model.
+ * On exit from hvm_wait_io, we're guaranteed not to be waiting on
+ * I/O response from the device model.
  */
 void hvm_wait_io(void)
 {
-    int port = iopacket_port(current->domain);
-
-    do {
-        if (!test_bit(port, &current->domain->shared_info->evtchn_pending[0]))
-           do_sched_op(SCHEDOP_block, 0);
-
-        hvm_check_events(current);
-        if (!test_bit(ARCH_HVM_IO_WAIT, &current->arch.hvm_vcpu.ioflags))
-            break;
-        /*
-        * Events other than IOPACKET_PORT might have woken us up.
-        * In that case, safely go back to sleep.
-        */
-        clear_bit(port/BITS_PER_LONG, &current->vcpu_info->evtchn_pending_sel);
-        clear_bit(0, &current->vcpu_info->evtchn_upcall_pending);
-    } while(1);
+    struct vcpu *v = current;
+    struct domain *d = v->domain;    
+    int port = iopacket_port(d);
+
+    for ( ; ; )
+    {
+        /* Clear master flag, selector flag, event flag each in turn. */
+        v->vcpu_info->evtchn_upcall_pending = 0;
+        smp_mb__before_clear_bit();
+        clear_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
+        smp_mb__after_clear_bit();
+        if ( test_and_clear_bit(port, &d->shared_info->evtchn_pending[0]) )
+            hvm_io_assist(v);
+
+        /* Need to wait for I/O responses? */
+        if ( !test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
+            break;
+
+        do_sched_op(SCHEDOP_block, 0);
+    }
+
+    /*
+     * Re-set the selector and master flags in case any other notifications
+     * are pending.
+     */
+    if ( d->shared_info->evtchn_pending[port/BITS_PER_LONG] )
+        set_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
+    if ( v->vcpu_info->evtchn_pending_sel )
+        v->vcpu_info->evtchn_upcall_pending = 1;
 }
 
 /*
diff -r 0828f5f18b56 -r 4a9a39d08a06 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Fri Feb 10 16:57:13 2006
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Sat Feb 11 12:06:49 2006
@@ -489,13 +489,8 @@
 {
     struct hvm_virpit *vpit = &v->domain->arch.hvm_domain.vpit;
     
-    if (event_pending(v)) 
-    {
-        hvm_check_events(v);
-
-        if (test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags))
-            hvm_wait_io();
-    }
+    if ( event_pending(v) )
+        hvm_wait_io();
 
     /* pick up the elapsed PIT ticks and re-enable pit_timer */
     if ( vpit->first_injected ) {
diff -r 0828f5f18b56 -r 4a9a39d08a06 xen/arch/x86/hvm/vmx/io.c
--- a/xen/arch/x86/hvm/vmx/io.c Fri Feb 10 16:57:13 2006
+++ b/xen/arch/x86/hvm/vmx/io.c Sat Feb 11 12:06:49 2006
@@ -177,17 +177,13 @@
 
     vmx_stts();
 
-    if (event_pending(v)) {
-        hvm_check_events(v);
-
-        if (test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags))
-            hvm_wait_io();
-    }
+    if ( event_pending(v) )
+        hvm_wait_io();
+
     /* pick up the elapsed PIT ticks and re-enable pit_timer */
-    if ( vpit->first_injected ) {
+    if ( vpit->first_injected )
         pickup_deactive_ticks(vpit);
-    }
-    vmx_set_tsc_shift(v,vpit);
+    vmx_set_tsc_shift(v, vpit);
 
     /* We can't resume the guest if we're waiting on I/O */
     ASSERT(!test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags));
diff -r 0828f5f18b56 -r 4a9a39d08a06 xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Fri Feb 10 16:57:13 2006
+++ b/xen/include/asm-x86/hvm/support.h Sat Feb 11 12:06:49 2006
@@ -141,7 +141,6 @@
 extern void hvm_setup_platform(struct domain* d);
 extern int hvm_mmio_intercept(ioreq_t *p);
 extern int hvm_io_intercept(ioreq_t *p, int type);
-extern void hvm_check_events(struct vcpu *v);
 extern void hvm_hooks_assist(struct vcpu *v);
 extern void hvm_print_line(struct vcpu *v, const char c);
 extern void hlt_timer_fn(void *data);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.