[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH V5 12/14] xen: introduce xen_event_channel_register_3level



Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 drivers/xen/events.c |  172 ++++++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 161 insertions(+), 11 deletions(-)

diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 270821d..6bb9a47 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -201,6 +201,16 @@ static bool (*pirq_needs_eoi)(unsigned irq);
  */
 #define EVTCHN_WORD_BITORDER (sizeof(xen_ulong_t) == 8 ? 6 : 5)
 /*
+ * If we use 3-level event channel and the event word size is 64 bits, we have
+ * 256k event channels in total, for 32 bits, we have 32k event channels in
+ * total. A page (4K) can represent 4096 * 8 = 32k event channels. So we can
+ * calculate pages needed for 3-level event channels is 1 page for 32 bits and
+ * 8 pages for 64 bits.
+ */
+#define BITMAP_PG_ORDER (BITS_PER_EVTCHN_WORD == 64 ? 3 : 0)
+#define BITMAP_NR_PAGES (BITMAP_PG_ORDER == 3 ? 8 : 1)
+
+/*
  * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t
  * array. Primarily to avoid long lines (hence the terse name).
  */
@@ -2146,6 +2156,115 @@ uint64_t xen_event_channel_query_extended_abis(void)
        return query.abis;
 }
 
+static int xen_event_channel_register_3level_bitmaps(void)
+{
+       struct evtchn_register_3level reg;
+       int i;
+       int rc;
+       xen_ulong_t _evtchn_pending[EVTCHN_MAX_L3_PAGES];
+       xen_ulong_t _evtchn_mask[EVTCHN_MAX_L3_PAGES];
+
+       /*
+        * can only register 3-level ABI in following states:
+        * a) no extended ABIs in use
+        * b) come from restore path which already has ABI set and
+        *    pages allocated
+        */
+       if (!(xen_evtchn_extended == EVTCHN_EXTENDED_NONE ||
+             (xen_evtchn_extended == EVTCHN_EXTENDED_L3 &&
+              evtchn_pending && evtchn_pending)))
+               return -EINVAL;
+
+       /*
+        * If we come from restore path, we don't need to allocate
+        * pages.
+        */
+       if (!evtchn_pending && !evtchn_mask) {
+               /* Get zeroed pages */
+               evtchn_pending =
+                       (xen_ulong_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+                                                       BITMAP_PG_ORDER);
+               evtchn_mask =
+                       (xen_ulong_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+                                                       BITMAP_PG_ORDER);
+               if (!evtchn_pending || !evtchn_mask) {
+                       free_pages((unsigned long)evtchn_pending, 
BITMAP_PG_ORDER);
+                       free_pages((unsigned long)evtchn_mask, BITMAP_PG_ORDER);
+                       evtchn_pending = NULL;
+                       evtchn_mask = NULL;
+                       rc = -ENOMEM;
+                       goto err;
+               }
+       }
+
+       memset(&reg, 0, sizeof(reg));
+
+       for (i = 0; i < BITMAP_NR_PAGES; i++) {
+               unsigned long offset = PAGE_SIZE * i;
+               _evtchn_pending[i] =
+                       arbitrary_virt_to_mfn(
+                               (void *)((unsigned long)evtchn_pending+offset));
+               _evtchn_mask[i] =
+                       arbitrary_virt_to_mfn(
+                               (void *)((unsigned long)evtchn_mask+offset));
+       }
+
+       reg.cmd = REGISTER_BITMAPS;
+       reg.u.bitmaps.nr_pages = BITMAP_NR_PAGES;
+       reg.u.bitmaps.evtchn_pending = _evtchn_pending;
+       reg.u.bitmaps.evtchn_mask = _evtchn_mask;
+
+       rc = HYPERVISOR_event_channel_op(EVTCHNOP_register_3level, &reg);
+       if (rc) {
+               free_pages((unsigned long)evtchn_pending, BITMAP_PG_ORDER);
+               free_pages((unsigned long)evtchn_mask, BITMAP_PG_ORDER);
+               evtchn_pending = NULL;
+               evtchn_mask = NULL;
+       }
+
+err:
+       return rc;
+}
+
+int xen_event_channel_register_3level_l2selector(int cpu)
+{
+       struct evtchn_register_3level reg;
+       int rc;
+
+       memset(&reg, 0, sizeof(reg));
+
+       reg.cmd = REGISTER_L2_SELECTOR;
+
+       reg.u.l2_selector.cpu_id = cpu;
+       reg.u.l2_selector.mfn =
+               arbitrary_virt_to_mfn(&per_cpu(evtchn_sel_l2, cpu));
+       reg.u.l2_selector.offset =
+               offset_in_page(&per_cpu(evtchn_sel_l2, cpu));
+
+       rc = HYPERVISOR_event_channel_op(EVTCHNOP_register_3level, &reg);
+
+       if (rc == -EBUSY) /* already registered, this can happen in hotplug */
+               return 0;
+
+       if (!rc)
+               per_cpu(evtchn_sel, cpu)[1] = per_cpu(evtchn_sel_l2, cpu);
+
+       return rc;
+}
+
+static int xen_event_channel_register_3level(void)
+{
+       int rc;
+
+       rc = xen_event_channel_register_3level_bitmaps();
+       if (rc)
+               return rc;
+
+       rc = xen_event_channel_register_3level_l2selector(0);
+
+       return rc;
+}
+
 const struct evtchn_ops evtchn_l2_ops = {
        .unmask = __unmask_local_port_l2,
        .debug_interrupt = xen_debug_interrupt_l2,
@@ -2158,6 +2277,47 @@ const struct evtchn_ops evtchn_l3_ops = {
        .do_upcall = __xen_evtchn_do_upcall_l3
 };
 
+void xen_set_event_channel_extended(uint64_t abi)
+{
+       struct shared_info *s = HYPERVISOR_shared_info;
+       int cpu;
+
+       switch (abi) {
+       case EVTCHN_EXTENDED_NONE:
+               evtchn_pending = s->evtchn_pending;
+               evtchn_mask = s->evtchn_mask;
+               for_each_possible_cpu(cpu) {
+                       struct vcpu_info *vcpu_info = per_cpu(xen_vcpu, cpu);
+                       per_cpu(evtchn_sel, cpu)[0] =
+                               &vcpu_info->evtchn_pending_sel;
+               }
+               xen_evtchn_extended = EVTCHN_EXTENDED_NONE;
+               xen_nr_event_channels = NR_EVENT_CHANNELS_L2;
+               eops = &evtchn_l2_ops;
+               printk(KERN_INFO "Using 2-level event channel ABI.\n");
+               break;
+       case EVTCHN_EXTENDED_L3:
+               /* evtchn_pending/mask already set */
+               for_each_possible_cpu(cpu) {
+                       struct vcpu_info *vcpu_info = per_cpu(xen_vcpu, cpu);
+                       per_cpu(evtchn_sel, cpu)[0] =
+                               &vcpu_info->evtchn_pending_sel;
+                       per_cpu(evtchn_sel, cpu)[1] =
+                               per_cpu(evtchn_sel_l2, cpu);
+               }
+               xen_evtchn_extended = EVTCHN_EXTENDED_L3;
+               xen_nr_event_channels = NR_EVENT_CHANNELS_L3;
+               eops = &evtchn_l3_ops;
+               printk(KERN_INFO "Using 3-level event channel ABI.\n");
+               break;
+       default:
+               printk(KERN_EMERG
+                      "Trying to set unsupported event channel ABI %llx\n",
+                      abi);
+               BUG();
+       }
+}
+
 static int __cpuinit xen_events_notifier_cb(struct notifier_block *self,
                                            unsigned long action,
                                            void *hcpu)
@@ -2197,18 +2357,8 @@ void __init xen_init_IRQ(void)
 {
        int i;
        int cpu;
-       struct shared_info *s = HYPERVISOR_shared_info;
-
-       evtchn_pending = s->evtchn_pending;
-       evtchn_mask = s->evtchn_mask;
-       for_each_possible_cpu(cpu) {
-               struct vcpu_info *vcpu_info = per_cpu(xen_vcpu, cpu);
-               per_cpu(evtchn_sel, cpu)[0] = &vcpu_info->evtchn_pending_sel;
-       }
 
-       xen_evtchn_extended = EVTCHN_EXTENDED_NONE;
-       xen_nr_event_channels = NR_EVENT_CHANNELS_L2;
-       eops = &evtchn_l2_ops;
+       xen_set_event_channel_extended(EVTCHN_EXTENDED_NONE);
 
        evtchn_to_irq = kcalloc(xen_nr_event_channels, sizeof(*evtchn_to_irq),
                                    GFP_KERNEL);
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.