[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v20 01/10] x86: add generic resource (e.g. MSR) access hypercall



Add a generic resource access hypercall for tool stack or other
components, e.g., accessing MSR, port I/O, etc.

The resource is abstracted as a resource address/value pair.
The resource access can be any type of XEN_RESOURCE_OP_*(current
only support MSR and it's white-listed). The resource operations
are always runs on cpu that caller specified. If caller does not
care this, it should use current cpu to eliminate the IPI overhead.

Batch resource operations in one call are also supported but the
max number currently is limited to 2. The operations in a batch are
non-preemptible and execute in their original order. If preemptible
batch is desirable, then multicall mechanism can be used.

Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx>
Signed-off-by: Chao Peng <chao.p.peng@xxxxxxxxxxxxxxx>
---
 xen/arch/x86/platform_hypercall.c        |  155 ++++++++++++++++++++++++++++++
 xen/arch/x86/x86_64/platform_hypercall.c |    4 +
 xen/include/public/platform.h            |   34 +++++++
 xen/include/xlat.lst                     |    1 +
 4 files changed, 194 insertions(+)

diff --git a/xen/arch/x86/platform_hypercall.c 
b/xen/arch/x86/platform_hypercall.c
index 2162811..55554c9 100644
--- a/xen/arch/x86/platform_hypercall.c
+++ b/xen/arch/x86/platform_hypercall.c
@@ -61,6 +61,90 @@ long cpu_down_helper(void *data);
 long core_parking_helper(void *data);
 uint32_t get_cur_idle_nums(void);
 
+#define RESOURCE_ACCESS_MAX_ENTRIES 2
+struct xen_resource_access {
+    unsigned int nr_done;
+    unsigned int nr_entries;
+    xenpf_resource_entry_t *entries;
+};
+
+static bool_t allow_access_msr(unsigned int msr)
+{
+    return 0;
+}
+
+static void check_resource_access(struct xen_resource_access *ra)
+{
+    unsigned int i;
+
+    for ( i = 0; i < ra->nr_entries; i++ )
+    {
+        int ret = 0;
+        xenpf_resource_entry_t *entry = ra->entries + i;
+
+        if ( entry->rsvd )
+        {
+            entry->u.ret = -EINVAL;
+            break;
+        }
+
+        switch ( entry->u.cmd )
+        {
+        case XEN_RESOURCE_OP_MSR_READ:
+        case XEN_RESOURCE_OP_MSR_WRITE:
+            if ( entry->idx >> 32 )
+                ret = -EINVAL;
+            else if ( !allow_access_msr(entry->idx) )
+                ret = -EACCES;
+            break;
+        default:
+            ret = -EOPNOTSUPP;
+            break;
+        }
+
+        if ( ret )
+        {
+           entry->u.ret = ret;
+           break;
+        }
+    }
+
+    ra->nr_done = i;
+}
+
+static void resource_access(void *info)
+{
+    struct xen_resource_access *ra = info;
+    unsigned int i;
+
+    for ( i = 0; i < ra->nr_done; i++ )
+    {
+        int ret;
+        xenpf_resource_entry_t *entry = ra->entries + i;
+
+        switch ( entry->u.cmd )
+        {
+        case XEN_RESOURCE_OP_MSR_READ:
+            ret = rdmsr_safe(entry->idx, entry->val);
+            break;
+        case XEN_RESOURCE_OP_MSR_WRITE:
+            ret = wrmsr_safe(entry->idx, entry->val);
+            break;
+        default:
+            BUG();
+            break;
+        }
+
+        if ( ret )
+        {
+            entry->u.ret = ret;
+            break;
+        }
+    }
+
+    ra->nr_done = i;
+}
+
 ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op)
 {
     ret_t ret = 0;
@@ -601,6 +685,77 @@ ret_t 
do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op)
     }
     break;
 
+    case XENPF_resource_op:
+    {
+        struct xen_resource_access ra;
+        unsigned int cpu;
+        XEN_GUEST_HANDLE(xenpf_resource_entry_t) guest_entries;
+
+        ra.nr_entries = op->u.resource_op.nr_entries;
+        if ( ra.nr_entries == 0 )
+        {
+            ret = 0;
+            break;
+        }
+        if ( ra.nr_entries > RESOURCE_ACCESS_MAX_ENTRIES )
+        {
+            ret = -EINVAL;
+            break;
+        }
+
+        ra.entries = xmalloc_array(xenpf_resource_entry_t, ra.nr_entries);
+        if ( !ra.entries )
+        {
+            ret = -ENOMEM;
+            break;
+        }
+
+        guest_from_compat_handle(guest_entries, op->u.resource_op.entries);
+
+        if ( copy_from_guest(ra.entries, guest_entries, ra.nr_entries) )
+        {
+            xfree(ra.entries);
+            ret = -EFAULT;
+            break;
+        }
+
+        /* Do sanity check earlier to omit the potential IPI overhead. */
+        check_resource_access(&ra);
+        if ( ra.nr_done == 0 )
+        {
+            /* Copy the return value for entry 0 if it failed. */
+            if ( __copy_to_guest(guest_entries, ra.entries, 1) )
+                ret = -EFAULT;
+            else
+                ret = 0;
+
+            xfree(ra.entries);
+            break;
+        }
+
+        cpu = op->u.resource_op.cpu;
+        if ( (cpu >= nr_cpu_ids) || !cpu_online(cpu) )
+        {
+            xfree(ra.entries);
+            ret = -ENODEV;
+            break;
+        }
+        if ( cpu == smp_processor_id() )
+            resource_access(&ra);
+        else
+            on_selected_cpus(cpumask_of(cpu), resource_access, &ra, 1);
+
+        /* Copy all if succeeded or up to the failed entry. */
+        if ( __copy_to_guest(guest_entries, ra.entries,
+                ra.nr_done < ra.nr_entries ? ra.nr_done + 1 : ra.nr_entries) )
+            ret = -EFAULT;
+        else
+            ret = ra.nr_done;
+
+        xfree(ra.entries);
+    }
+    break;
+
     default:
         ret = -ENOSYS;
         break;
diff --git a/xen/arch/x86/x86_64/platform_hypercall.c 
b/xen/arch/x86/x86_64/platform_hypercall.c
index b6f380e..ccfd30d 100644
--- a/xen/arch/x86/x86_64/platform_hypercall.c
+++ b/xen/arch/x86/x86_64/platform_hypercall.c
@@ -32,6 +32,10 @@ CHECK_pf_pcpu_version;
 CHECK_pf_enter_acpi_sleep;
 #undef xen_pf_enter_acpi_sleep
 
+#define xen_pf_resource_entry xenpf_resource_entry
+CHECK_pf_resource_entry;
+#undef xen_pf_resource_entry
+
 #define COMPAT
 #define _XEN_GUEST_HANDLE(t) XEN_GUEST_HANDLE(t)
 #define _XEN_GUEST_HANDLE_PARAM(t) XEN_GUEST_HANDLE_PARAM(t)
diff --git a/xen/include/public/platform.h b/xen/include/public/platform.h
index 053b9fa..97aa3c3c 100644
--- a/xen/include/public/platform.h
+++ b/xen/include/public/platform.h
@@ -528,6 +528,39 @@ typedef struct xenpf_core_parking xenpf_core_parking_t;
 DEFINE_XEN_GUEST_HANDLE(xenpf_core_parking_t);
 
 /*
+ * Access generic platform resources(e.g., accessing MSR, port I/O, etc)
+ * in unified way. Batch resource operations in one call are supported and
+ * they are always non-preemptible and executed in their original order.
+ * The batch itself returns a negative integer for general errors, or a
+ * non-negative integer for the number of successful operations. For the latter
+ * case, the @ret in the failed entry(if any) indicates the exact error.
+ */
+#define XENPF_resource_op   61
+
+#define XEN_RESOURCE_OP_MSR_READ  0
+#define XEN_RESOURCE_OP_MSR_WRITE 1
+
+struct xenpf_resource_entry {
+    union {
+        uint32_t cmd;   /* IN: XEN_RESOURCE_OP_* */
+        int32_t  ret;   /* OUT: return value for failed entry */
+    } u;
+    uint32_t rsvd;      /* IN: padding and must be zero */
+    uint64_t idx;       /* IN: resource address to access */
+    uint64_t val;       /* IN/OUT: resource value to set/get */
+};
+typedef struct xenpf_resource_entry xenpf_resource_entry_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_resource_entry_t);
+
+struct xenpf_resource_op {
+    uint32_t nr_entries;    /* number of resource entry */
+    uint32_t cpu;           /* which cpu to run */
+    XEN_GUEST_HANDLE(xenpf_resource_entry_t) entries;
+};
+typedef struct xenpf_resource_op xenpf_resource_op_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_resource_op_t);
+
+/*
  * ` enum neg_errnoval
  * ` HYPERVISOR_platform_op(const struct xen_platform_op*);
  */
@@ -553,6 +586,7 @@ struct xen_platform_op {
         struct xenpf_cpu_hotadd        cpu_add;
         struct xenpf_mem_hotadd        mem_add;
         struct xenpf_core_parking      core_parking;
+        struct xenpf_resource_op       resource_op;
         uint8_t                        pad[128];
     } u;
 };
diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
index 9a35dd7..234b668 100644
--- a/xen/include/xlat.lst
+++ b/xen/include/xlat.lst
@@ -88,6 +88,7 @@
 ?      xenpf_enter_acpi_sleep          platform.h
 ?      xenpf_pcpuinfo                  platform.h
 ?      xenpf_pcpu_version              platform.h
+?      xenpf_resource_entry            platform.h
 !      sched_poll                      sched.h
 ?      sched_remote_shutdown           sched.h
 ?      sched_shutdown                  sched.h
-- 
1.7.9.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.