[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[win-pv-devel] [PATCH 12/15] Use a DPC per CPU for EVTCHN Trigger



Using a single DPC potentially means re-afinitizing it for each use, and
this potentially means Windows could try to insert it onto multiple CPU
queues at the same time, which probably won't end well. Using a DPC per
CPU seems a lot safer.

Also add a DPC flush before we zero out data structures.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
 src/xenbus/evtchn.c | 20 +++++++++++++++-----
 1 file changed, 15 insertions(+), 5 deletions(-)

diff --git a/src/xenbus/evtchn.c b/src/xenbus/evtchn.c
index 4cb00eb..81f97e4 100644
--- a/src/xenbus/evtchn.c
+++ b/src/xenbus/evtchn.c
@@ -105,7 +105,7 @@ struct _XENBUS_EVTCHN_CONTEXT {
     BOOLEAN                         UseEvtchnFifoAbi;
     PXENBUS_HASH_TABLE              Table;
     LIST_ENTRY                      List;
-    KDPC                            Dpc;
+    KDPC                            Dpc[MAXIMUM_PROCESSORS];
 };
 
 #define XENBUS_EVTCHN_TAG  'CTVE'
@@ -471,14 +471,14 @@ EvtchnTrigger(
     )
 {
     PXENBUS_EVTCHN_CONTEXT      Context = Interface->Context;
-    PKDPC                       Dpc = &Context->Dpc;
+    PKDPC                       Dpc;
     KIRQL                       Irql;
 
     ASSERT3U(Channel->Magic, ==, XENBUS_EVTCHN_CHANNEL_MAGIC);
 
     KeAcquireSpinLock(&Channel->Lock, &Irql);
 
-    KeSetTargetProcessorDpc(Dpc, (CCHAR)Channel->Cpu);
+    Dpc = &Context->Dpc[Channel->Cpu];
     KeInsertQueueDpc(Dpc, Channel, NULL);
 
     KeReleaseSpinLock(&Channel->Lock, Irql);
@@ -1213,6 +1213,7 @@ EvtchnInitialize(
 {
     HANDLE                      ParametersKey;
     ULONG                       UseEvtchnFifoAbi;
+    ULONG                       Cpu;
     NTSTATUS                    status;
 
     Trace("====>\n");
@@ -1269,7 +1270,13 @@ EvtchnInitialize(
 
     InitializeListHead(&(*Context)->List);
     KeInitializeSpinLock(&(*Context)->Lock);
-    KeInitializeDpc(&(*Context)->Dpc, EvtchnCallback, Context);
+
+    for (Cpu = 0; Cpu < MAXIMUM_PROCESSORS; Cpu++) {
+        PKDPC   Dpc = &(*Context)->Dpc[Cpu];
+
+        KeInitializeDpc(Dpc, EvtchnCallback, *Context);
+        KeSetTargetProcessorDpc(Dpc, (CCHAR)Cpu);
+    }
 
     (*Context)->Fdo = Fdo;
 
@@ -1380,9 +1387,12 @@ EvtchnTeardown(
 {
     Trace("====>\n");
 
+    ASSERT3U(KeGetCurrentIrql(), ==, PASSIVE_LEVEL);
+    KeFlushQueuedDpcs();
+
     Context->Fdo = NULL;
 
-    RtlZeroMemory(&Context->Dpc, sizeof (KDPC));
+    RtlZeroMemory(&Context->Dpc, sizeof (KDPC) * MAXIMUM_PROCESSORS);
     RtlZeroMemory(&Context->Lock, sizeof (KSPIN_LOCK));
     RtlZeroMemory(&Context->List, sizeof (LIST_ENTRY));
 
-- 
2.1.1


_______________________________________________
win-pv-devel mailing list
win-pv-devel@xxxxxxxxxxxxxxxxxxxx
http://lists.xenproject.org/cgi-bin/mailman/listinfo/win-pv-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.