[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [v8][PATCH 03/17] introduce XENMEM_reserved_device_memory_map
On Mon, Dec 01, 2014 at 05:24:21PM +0800, Tiejun Chen wrote: > From: Jan Beulich <jbeulich@xxxxxxxx> > > This is a prerequisite for punching holes into HVM and PVH guests' P2M > to allow passing through devices that are associated with (on VT-d) > RMRRs. > > Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> > Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx> > --- > xen/common/compat/memory.c | 54 > ++++++++++++++++++++++++++++++++++++ > xen/common/memory.c | 51 ++++++++++++++++++++++++++++++++++ > xen/drivers/passthrough/iommu.c | 10 +++++++ > xen/drivers/passthrough/vtd/dmar.c | 17 ++++++++++++ > xen/drivers/passthrough/vtd/extern.h | 1 + > xen/drivers/passthrough/vtd/iommu.c | 1 + > xen/include/public/memory.h | 24 +++++++++++++++- > xen/include/xen/iommu.h | 4 +++ > xen/include/xlat.lst | 3 +- > 9 files changed, 163 insertions(+), 2 deletions(-) > > diff --git a/xen/common/compat/memory.c b/xen/common/compat/memory.c > index 06c90be..60512fa 100644 > --- a/xen/common/compat/memory.c > +++ b/xen/common/compat/memory.c > @@ -16,6 +16,37 @@ CHECK_TYPE(domid); > > CHECK_mem_access_op; > > +#ifdef HAS_PASSTHROUGH > +struct get_reserved_device_memory { > + struct compat_reserved_device_memory_map map; > + unsigned int used_entries; > +}; > + > +static int get_reserved_device_memory(xen_pfn_t start, > + xen_ulong_t nr, void *ctxt) > +{ > + struct get_reserved_device_memory *grdm = ctxt; > + > + if ( grdm->used_entries < grdm->map.nr_entries ) > + { > + struct compat_reserved_device_memory rdm = { > + .start_pfn = start, .nr_pages = nr > + }; > + > + if ( rdm.start_pfn != start || rdm.nr_pages != nr ) > + return -ERANGE; > + > + if ( __copy_to_compat_offset(grdm->map.buffer, grdm->used_entries, > + &rdm, 1) ) > + return -EFAULT; > + } > + > + ++grdm->used_entries; > + > + return 0; > +} > +#endif > + > int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) > { > int split, op = cmd & MEMOP_CMD_MASK; > @@ -273,6 +304,29 @@ int compat_memory_op(unsigned int cmd, > XEN_GUEST_HANDLE_PARAM(void) compat) > break; > } > > +#ifdef HAS_PASSTHROUGH > + case XENMEM_reserved_device_memory_map: > + { > + struct get_reserved_device_memory grdm; > + > + if ( copy_from_guest(&grdm.map, compat, 1) || > + !compat_handle_okay(grdm.map.buffer, grdm.map.nr_entries) ) > + return -EFAULT; > + > + grdm.used_entries = 0; > + rc = iommu_get_reserved_device_memory(get_reserved_device_memory, > + &grdm); > + > + if ( !rc && grdm.map.nr_entries < grdm.used_entries ) > + rc = -ENOBUFS; > + grdm.map.nr_entries = grdm.used_entries; > + if ( __copy_to_guest(compat, &grdm.map, 1) ) > + rc = -EFAULT; > + > + return rc; > + } > +#endif > + > default: > return compat_arch_memory_op(cmd, compat); > } > diff --git a/xen/common/memory.c b/xen/common/memory.c > index 9f21bd3..4788acc 100644 > --- a/xen/common/memory.c > +++ b/xen/common/memory.c > @@ -692,6 +692,34 @@ out: > return rc; > } > > +#ifdef HAS_PASSTHROUGH > +struct get_reserved_device_memory { > + struct xen_reserved_device_memory_map map; > + unsigned int used_entries; > +}; > + > +static int get_reserved_device_memory(xen_pfn_t start, > + xen_ulong_t nr, void *ctxt) > +{ > + struct get_reserved_device_memory *grdm = ctxt; > + > + if ( grdm->used_entries < grdm->map.nr_entries ) > + { > + struct xen_reserved_device_memory rdm = { > + .start_pfn = start, .nr_pages = nr > + }; > + > + if ( __copy_to_guest_offset(grdm->map.buffer, grdm->used_entries, > + &rdm, 1) ) > + return -EFAULT; > + } > + > + ++grdm->used_entries; > + > + return 0; > +} > +#endif > + > long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) > { > struct domain *d; > @@ -1101,6 +1129,29 @@ long do_memory_op(unsigned long cmd, > XEN_GUEST_HANDLE_PARAM(void) arg) > break; > } > > +#ifdef HAS_PASSTHROUGH > + case XENMEM_reserved_device_memory_map: > + { > + struct get_reserved_device_memory grdm; > + > + if ( copy_from_guest(&grdm.map, arg, 1) || > + !guest_handle_okay(grdm.map.buffer, grdm.map.nr_entries) ) > + return -EFAULT; > + Shouldn't there be an XSM check here? > + grdm.used_entries = 0; > + rc = iommu_get_reserved_device_memory(get_reserved_device_memory, > + &grdm); > + Also since we doing an iteration over possible many nr_entries should we think about returning -EAGAIN to user-space so that it can retry? (As in, have preemption baked in this hypercall) > + if ( !rc && grdm.map.nr_entries < grdm.used_entries ) > + rc = -ENOBUFS; > + grdm.map.nr_entries = grdm.used_entries; > + if ( __copy_to_guest(arg, &grdm.map, 1) ) > + rc = -EFAULT; > + > + break; > + } > +#endif > + > default: > rc = arch_memory_op(cmd, arg); > break; > diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c > index cc12735..7c17e8d 100644 > --- a/xen/drivers/passthrough/iommu.c > +++ b/xen/drivers/passthrough/iommu.c > @@ -344,6 +344,16 @@ void iommu_crash_shutdown(void) > iommu_enabled = iommu_intremap = 0; > } > > +int iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt) > +{ > + const struct iommu_ops *ops = iommu_get_ops(); > + > + if ( !iommu_enabled || !ops->get_reserved_device_memory ) > + return 0; > + > + return ops->get_reserved_device_memory(func, ctxt); > +} > + > bool_t iommu_has_feature(struct domain *d, enum iommu_feature feature) > { > const struct hvm_iommu *hd = domain_hvm_iommu(d); > diff --git a/xen/drivers/passthrough/vtd/dmar.c > b/xen/drivers/passthrough/vtd/dmar.c > index 5e41e7a..86cfad3 100644 > --- a/xen/drivers/passthrough/vtd/dmar.c > +++ b/xen/drivers/passthrough/vtd/dmar.c > @@ -901,3 +901,20 @@ int platform_supports_x2apic(void) > unsigned int mask = ACPI_DMAR_INTR_REMAP | ACPI_DMAR_X2APIC_OPT_OUT; > return cpu_has_x2apic && ((dmar_flags & mask) == ACPI_DMAR_INTR_REMAP); > } > + > +int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt) > +{ > + struct acpi_rmrr_unit *rmrr; > + int rc = 0; > + > + list_for_each_entry(rmrr, &acpi_rmrr_units, list) > + { > + rc = func(PFN_DOWN(rmrr->base_address), > + PFN_UP(rmrr->end_address) - PFN_DOWN(rmrr->base_address), > + ctxt); > + if ( rc ) > + break; > + } > + > + return rc; > +} > diff --git a/xen/drivers/passthrough/vtd/extern.h > b/xen/drivers/passthrough/vtd/extern.h > index 5524dba..f9ee9b0 100644 > --- a/xen/drivers/passthrough/vtd/extern.h > +++ b/xen/drivers/passthrough/vtd/extern.h > @@ -75,6 +75,7 @@ int domain_context_mapping_one(struct domain *domain, > struct iommu *iommu, > u8 bus, u8 devfn, const struct pci_dev *); > int domain_context_unmap_one(struct domain *domain, struct iommu *iommu, > u8 bus, u8 devfn); > +int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt); > > unsigned int io_apic_read_remap_rte(unsigned int apic, unsigned int reg); > void io_apic_write_remap_rte(unsigned int apic, > diff --git a/xen/drivers/passthrough/vtd/iommu.c > b/xen/drivers/passthrough/vtd/iommu.c > index 19d8165..a38f201 100644 > --- a/xen/drivers/passthrough/vtd/iommu.c > +++ b/xen/drivers/passthrough/vtd/iommu.c > @@ -2491,6 +2491,7 @@ const struct iommu_ops intel_iommu_ops = { > .crash_shutdown = vtd_crash_shutdown, > .iotlb_flush = intel_iommu_iotlb_flush, > .iotlb_flush_all = intel_iommu_iotlb_flush_all, > + .get_reserved_device_memory = intel_iommu_get_reserved_device_memory, > .dump_p2m_table = vtd_dump_p2m_table, > }; > > diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h > index 595f953..cee4535 100644 > --- a/xen/include/public/memory.h > +++ b/xen/include/public/memory.h > @@ -572,7 +572,29 @@ struct xen_vnuma_topology_info { > typedef struct xen_vnuma_topology_info xen_vnuma_topology_info_t; > DEFINE_XEN_GUEST_HANDLE(xen_vnuma_topology_info_t); > > -/* Next available subop number is 27 */ > +/* > + * For legacy reasons, some devices must be configured with special memory > + * regions to function correctly. The guest must avoid using any of these > + * regions. > + */ > +#define XENMEM_reserved_device_memory_map 27 > +struct xen_reserved_device_memory { > + xen_pfn_t start_pfn; > + xen_ulong_t nr_pages; > +}; > +typedef struct xen_reserved_device_memory xen_reserved_device_memory_t; > +DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_t); > + > +struct xen_reserved_device_memory_map { > + /* IN/OUT */ > + unsigned int nr_entries; > + /* OUT */ > + XEN_GUEST_HANDLE(xen_reserved_device_memory_t) buffer; > +}; > +typedef struct xen_reserved_device_memory_map > xen_reserved_device_memory_map_t; > +DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_map_t); > + > +/* Next available subop number is 28 */ > > #endif /* __XEN_PUBLIC_MEMORY_H__ */ > > diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h > index 8eb764a..409f6f8 100644 > --- a/xen/include/xen/iommu.h > +++ b/xen/include/xen/iommu.h > @@ -120,6 +120,8 @@ void iommu_dt_domain_destroy(struct domain *d); > > struct page_info; > > +typedef int iommu_grdm_t(xen_pfn_t start, xen_ulong_t nr, void *ctxt); > + > struct iommu_ops { > int (*init)(struct domain *d); > void (*hwdom_init)(struct domain *d); > @@ -156,12 +158,14 @@ struct iommu_ops { > void (*crash_shutdown)(void); > void (*iotlb_flush)(struct domain *d, unsigned long gfn, unsigned int > page_count); > void (*iotlb_flush_all)(struct domain *d); > + int (*get_reserved_device_memory)(iommu_grdm_t *, void *); > void (*dump_p2m_table)(struct domain *d); > }; > > void iommu_suspend(void); > void iommu_resume(void); > void iommu_crash_shutdown(void); > +int iommu_get_reserved_device_memory(iommu_grdm_t *, void *); > > void iommu_share_p2m_table(struct domain *d); > > diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst > index 41b3e35..42229fd 100644 > --- a/xen/include/xlat.lst > +++ b/xen/include/xlat.lst > @@ -61,9 +61,10 @@ > ! memory_exchange memory.h > ! memory_map memory.h > ! memory_reservation memory.h > -? mem_access_op memory.h > +? mem_access_op memory.h > ! pod_target memory.h > ! remove_from_physmap memory.h > +! reserved_device_memory_map memory.h > ? physdev_eoi physdev.h > ? physdev_get_free_pirq physdev.h > ? physdev_irq physdev.h > -- > 1.9.1 > _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |