[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2] domctl: tighten XEN_DOMCTL_*_permission



With proper permission (and, for the I/O port case, wrap-around) checks
added (note that for the I/O port case a count of zero is now being
disallowed, in line with I/O memory handling):

XEN_DOMCTL_irq_permission:
XEN_DOMCTL_ioport_permission:

 Of both IRQs and I/O ports there is only a reasonably small amount, so
 there's no excess resource consumption involved here. Additionally
 they both have a specialized XSM hook associated.

XEN_DOMCTL_iomem_permission:

 While this also has a specialized XSM hook associated (just like
 XEN_DOMCTL_{irq,ioport}_permission), it's not clear whether it's
 reasonable to expect XSM to restrict the number of ranges associated
 with a domain via this hook (which is the main resource consumption
 item here).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v2: Fix off-by-one in XEN_DOMCTL_ioport_permission bounds checking.

--- a/docs/misc/xsm-flask.txt
+++ b/docs/misc/xsm-flask.txt
@@ -72,9 +72,7 @@ __HYPERVISOR_domctl (xen/include/public/
  * XEN_DOMCTL_getvcpucontext
  * XEN_DOMCTL_max_vcpus
  * XEN_DOMCTL_scheduler_op
- * XEN_DOMCTL_irq_permission
  * XEN_DOMCTL_iomem_permission
- * XEN_DOMCTL_ioport_permission
  * XEN_DOMCTL_gethvmcontext
  * XEN_DOMCTL_sethvmcontext
  * XEN_DOMCTL_set_address_size
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -46,6 +46,8 @@ static int gdbsx_guest_mem_io(
     return (iop->remain ? -EFAULT : 0);
 }
 
+#define MAX_IOPORTS 0x10000
+
 long arch_do_domctl(
     struct xen_domctl *domctl, struct domain *d,
     XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
@@ -72,13 +74,11 @@ long arch_do_domctl(
         unsigned int np = domctl->u.ioport_permission.nr_ports;
         int allow = domctl->u.ioport_permission.allow_access;
 
-        ret = -EINVAL;
-        if ( (fp + np) > 65536 )
-            break;
-
-        if ( np == 0 )
-            ret = 0;
-        else if ( xsm_ioport_permission(XSM_HOOK, d, fp, fp + np - 1, allow) )
+        if ( (fp + np) <= fp || (fp + np) > MAX_IOPORTS )
+            ret = -EINVAL;
+        else if ( !ioports_access_permitted(current->domain,
+                                            fp, fp + np - 1) ||
+                  xsm_ioport_permission(XSM_HOOK, d, fp, fp + np - 1, allow) )
             ret = -EPERM;
         else if ( allow )
             ret = ioports_permit_access(d, fp, fp + np - 1);
@@ -719,7 +719,6 @@ long arch_do_domctl(
 
     case XEN_DOMCTL_ioport_mapping:
     {
-#define MAX_IOPORTS    0x10000
         struct hvm_iommu *hd;
         unsigned int fgp = domctl->u.ioport_mapping.first_gport;
         unsigned int fmp = domctl->u.ioport_mapping.first_mport;
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -790,7 +790,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
 
         if ( pirq >= d->nr_pirqs )
             ret = -EINVAL;
-        else if ( xsm_irq_permission(XSM_HOOK, d, pirq, allow) )
+        else if ( !pirq_access_permitted(current->domain, pirq) ||
+                  xsm_irq_permission(XSM_HOOK, d, pirq, allow) )
             ret = -EPERM;
         else if ( allow )
             ret = pirq_permit_access(d, pirq);
@@ -809,7 +810,9 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
         if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
             break;
 
-        if ( xsm_iomem_permission(XSM_HOOK, d, mfn, mfn + nr_mfns - 1, allow) )
+        if ( !iomem_access_permitted(current->domain,
+                                     mfn, mfn + nr_mfns - 1) ||
+             xsm_iomem_permission(XSM_HOOK, d, mfn, mfn + nr_mfns - 1, allow) )
             ret = -EPERM;
         else if ( allow )
             ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);



Attachment: domctl-permit-access.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.