[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC][v2][PATCH 02/14] introduce XENMEM_reserved_device_memory_map
From: Jan Beulich <jbeulich@xxxxxxxx> This is a prerequisite for punching holes into HVM and PVH guests' P2M to allow passing through devices that are associated with (on VT-d) RMRRs. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Signed-off-by: Tiejun Chen <tiejun.chen@xxxxxxxxx> --- xen/common/compat/memory.c | 66 ++++++++++++++++++++++++++++++++++++ xen/common/memory.c | 64 ++++++++++++++++++++++++++++++++++ xen/drivers/passthrough/iommu.c | 10 ++++++ xen/drivers/passthrough/vtd/dmar.c | 32 +++++++++++++++++ xen/drivers/passthrough/vtd/extern.h | 1 + xen/drivers/passthrough/vtd/iommu.c | 1 + xen/include/public/memory.h | 32 ++++++++++++++++- xen/include/xen/iommu.h | 10 ++++++ xen/include/xen/pci.h | 2 ++ xen/include/xlat.lst | 3 +- 10 files changed, 219 insertions(+), 2 deletions(-) diff --git a/xen/common/compat/memory.c b/xen/common/compat/memory.c index b258138..b608496 100644 --- a/xen/common/compat/memory.c +++ b/xen/common/compat/memory.c @@ -17,6 +17,45 @@ CHECK_TYPE(domid); CHECK_mem_access_op; CHECK_vmemrange; +#ifdef HAS_PASSTHROUGH +struct get_reserved_device_memory { + struct compat_reserved_device_memory_map map; + unsigned int used_entries; +}; + +static int get_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr, + u32 id, void *ctxt) +{ + struct get_reserved_device_memory *grdm = ctxt; + u32 sbdf; + struct compat_reserved_device_memory rdm = { + .start_pfn = start, .nr_pages = nr + }; + + sbdf = PCI_SBDF2(grdm->map.seg, grdm->map.bus, grdm->map.devfn); + if ( (grdm->map.flag & PCI_DEV_RDM_ALL) || (sbdf == id) ) + { + if ( grdm->used_entries < grdm->map.nr_entries ) + { + if ( rdm.start_pfn != start || rdm.nr_pages != nr ) + return -ERANGE; + + if ( __copy_to_compat_offset(grdm->map.buffer, + grdm->used_entries, + &rdm, + 1) ) + { + return -EFAULT; + } + } + ++grdm->used_entries; + return 1; + } + + return 0; +} +#endif + int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) { int split, op = cmd & MEMOP_CMD_MASK; @@ -303,6 +342,33 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) break; } +#ifdef HAS_PASSTHROUGH + case XENMEM_reserved_device_memory_map: + { + struct get_reserved_device_memory grdm; + + if ( copy_from_guest(&grdm.map, compat, 1) || + !compat_handle_okay(grdm.map.buffer, grdm.map.nr_entries) ) + return -EFAULT; + + grdm.used_entries = 0; + rc = iommu_get_reserved_device_memory(get_reserved_device_memory, + &grdm); + + if ( !rc && grdm.map.nr_entries < grdm.used_entries ) + rc = -ENOBUFS; + + grdm.map.nr_entries = grdm.used_entries; + if ( grdm.map.nr_entries ) + { + if ( __copy_to_guest(compat, &grdm.map, 1) ) + rc = -EFAULT; + } + + return rc; + } +#endif + default: return compat_arch_memory_op(cmd, compat); } diff --git a/xen/common/memory.c b/xen/common/memory.c index 063a1c5..c789f72 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -748,6 +748,43 @@ static int construct_memop_from_reservation( return 0; } +#ifdef HAS_PASSTHROUGH +struct get_reserved_device_memory { + struct xen_reserved_device_memory_map map; + unsigned int used_entries; +}; + +static int get_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr, + u32 id, void *ctxt) +{ + struct get_reserved_device_memory *grdm = ctxt; + u32 sbdf; + + sbdf = PCI_SBDF2(grdm->map.seg, grdm->map.bus, grdm->map.devfn); + if ( (grdm->map.flag & PCI_DEV_RDM_ALL) || (sbdf == id) ) + { + if ( grdm->used_entries < grdm->map.nr_entries ) + { + struct xen_reserved_device_memory rdm = { + .start_pfn = start, .nr_pages = nr + }; + + if ( __copy_to_guest_offset(grdm->map.buffer, + grdm->used_entries, + &rdm, + 1) ) + { + return -EFAULT; + } + } + ++grdm->used_entries; + return 1; + } + + return 0; +} +#endif + long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) { struct domain *d; @@ -1162,6 +1199,33 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } +#ifdef HAS_PASSTHROUGH + case XENMEM_reserved_device_memory_map: + { + struct get_reserved_device_memory grdm; + + if ( copy_from_guest(&grdm.map, arg, 1) || + !guest_handle_okay(grdm.map.buffer, grdm.map.nr_entries) ) + return -EFAULT; + + grdm.used_entries = 0; + rc = iommu_get_reserved_device_memory(get_reserved_device_memory, + &grdm); + + if ( !rc && grdm.map.nr_entries < grdm.used_entries ) + rc = -ENOBUFS; + + grdm.map.nr_entries = grdm.used_entries; + if ( grdm.map.nr_entries ) + { + if ( __copy_to_guest(arg, &grdm.map, 1) ) + rc = -EFAULT; + } + + break; + } +#endif + default: rc = arch_memory_op(cmd, arg); break; diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c index 06cb38f..0b2ef52 100644 --- a/xen/drivers/passthrough/iommu.c +++ b/xen/drivers/passthrough/iommu.c @@ -375,6 +375,16 @@ void iommu_crash_shutdown(void) iommu_enabled = iommu_intremap = 0; } +int iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt) +{ + const struct iommu_ops *ops = iommu_get_ops(); + + if ( !iommu_enabled || !ops->get_reserved_device_memory ) + return 0; + + return ops->get_reserved_device_memory(func, ctxt); +} + bool_t iommu_has_feature(struct domain *d, enum iommu_feature feature) { const struct hvm_iommu *hd = domain_hvm_iommu(d); diff --git a/xen/drivers/passthrough/vtd/dmar.c b/xen/drivers/passthrough/vtd/dmar.c index 18d7903..518cae6 100644 --- a/xen/drivers/passthrough/vtd/dmar.c +++ b/xen/drivers/passthrough/vtd/dmar.c @@ -893,3 +893,35 @@ int platform_supports_x2apic(void) unsigned int mask = ACPI_DMAR_INTR_REMAP | ACPI_DMAR_X2APIC_OPT_OUT; return cpu_has_x2apic && ((dmar_flags & mask) == ACPI_DMAR_INTR_REMAP); } + +int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt) +{ + struct acpi_rmrr_unit *rmrr, *rmrr_cur = NULL; + int rc = 0; + unsigned int i; + u16 bdf; + + for_each_rmrr_device ( rmrr, bdf, i ) + { + if ( rmrr != rmrr_cur ) + { + rc = func(PFN_DOWN(rmrr->base_address), + PFN_UP(rmrr->end_address) - + PFN_DOWN(rmrr->base_address), + PCI_SBDF(rmrr->segment, bdf), + ctxt); + + if ( unlikely(rc < 0) ) + return rc; + + if ( !rc ) + continue; + + /* Just go next. */ + if ( rc == 1 ) + rmrr_cur = rmrr; + } + } + + return 0; +} diff --git a/xen/drivers/passthrough/vtd/extern.h b/xen/drivers/passthrough/vtd/extern.h index 5524dba..f9ee9b0 100644 --- a/xen/drivers/passthrough/vtd/extern.h +++ b/xen/drivers/passthrough/vtd/extern.h @@ -75,6 +75,7 @@ int domain_context_mapping_one(struct domain *domain, struct iommu *iommu, u8 bus, u8 devfn, const struct pci_dev *); int domain_context_unmap_one(struct domain *domain, struct iommu *iommu, u8 bus, u8 devfn); +int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt); unsigned int io_apic_read_remap_rte(unsigned int apic, unsigned int reg); void io_apic_write_remap_rte(unsigned int apic, diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c index 9053a1f..6a37624 100644 --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -2491,6 +2491,7 @@ const struct iommu_ops intel_iommu_ops = { .crash_shutdown = vtd_crash_shutdown, .iotlb_flush = intel_iommu_iotlb_flush, .iotlb_flush_all = intel_iommu_iotlb_flush_all, + .get_reserved_device_memory = intel_iommu_get_reserved_device_memory, .dump_p2m_table = vtd_dump_p2m_table, }; diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h index 832559a..7b25275 100644 --- a/xen/include/public/memory.h +++ b/xen/include/public/memory.h @@ -573,7 +573,37 @@ struct xen_vnuma_topology_info { typedef struct xen_vnuma_topology_info xen_vnuma_topology_info_t; DEFINE_XEN_GUEST_HANDLE(xen_vnuma_topology_info_t); -/* Next available subop number is 27 */ +/* + * With some legacy devices, certain guest-physical addresses cannot safely + * be used for other purposes, e.g. to map guest RAM. This hypercall + * enumerates those regions so the toolstack can avoid using them. + */ +#define XENMEM_reserved_device_memory_map 27 +struct xen_reserved_device_memory { + xen_pfn_t start_pfn; + xen_ulong_t nr_pages; +}; +typedef struct xen_reserved_device_memory xen_reserved_device_memory_t; +DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_t); + +struct xen_reserved_device_memory_map { + /* IN */ + /* Currently just one bit to indicate checkng all Reserved Device Memory. */ +#define PCI_DEV_RDM_ALL 0x1 + uint32_t flag; + /* IN */ + uint16_t seg; + uint8_t bus; + uint8_t devfn; + /* IN/OUT */ + unsigned int nr_entries; + /* OUT */ + XEN_GUEST_HANDLE(xen_reserved_device_memory_t) buffer; +}; +typedef struct xen_reserved_device_memory_map xen_reserved_device_memory_map_t; +DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_map_t); + +/* Next available subop number is 28 */ #endif /* __XEN_PUBLIC_MEMORY_H__ */ diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h index b30bf41..e2f584d 100644 --- a/xen/include/xen/iommu.h +++ b/xen/include/xen/iommu.h @@ -126,6 +126,14 @@ int iommu_do_dt_domctl(struct xen_domctl *, struct domain *, struct page_info; +/* + * Any non-zero value returned from callbacks of this type will cause the + * function the callback was handed to terminate its iteration. Assigning + * meaning of these non-zero values is left to the top level caller / + * callback pair. + */ +typedef int iommu_grdm_t(xen_pfn_t start, xen_ulong_t nr, u32 id, void *ctxt); + struct iommu_ops { int (*init)(struct domain *d); void (*hwdom_init)(struct domain *d); @@ -157,12 +165,14 @@ struct iommu_ops { void (*crash_shutdown)(void); void (*iotlb_flush)(struct domain *d, unsigned long gfn, unsigned int page_count); void (*iotlb_flush_all)(struct domain *d); + int (*get_reserved_device_memory)(iommu_grdm_t *, void *); void (*dump_p2m_table)(struct domain *d); }; void iommu_suspend(void); void iommu_resume(void); void iommu_crash_shutdown(void); +int iommu_get_reserved_device_memory(iommu_grdm_t *, void *); void iommu_share_p2m_table(struct domain *d); diff --git a/xen/include/xen/pci.h b/xen/include/xen/pci.h index 4377f3e..f891f85 100644 --- a/xen/include/xen/pci.h +++ b/xen/include/xen/pci.h @@ -33,6 +33,8 @@ #define PCI_DEVFN2(bdf) ((bdf) & 0xff) #define PCI_BDF(b,d,f) ((((b) & 0xff) << 8) | PCI_DEVFN(d,f)) #define PCI_BDF2(b,df) ((((b) & 0xff) << 8) | ((df) & 0xff)) +#define PCI_SBDF(s,bdf) (((s & 0xffff) << 16) | (bdf & 0xffff)) +#define PCI_SBDF2(s,b,df) (((s & 0xffff) << 16) | PCI_BDF2(b,df)) struct pci_dev_info { bool_t is_extfn; diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst index 9c9fd9a..dd23559 100644 --- a/xen/include/xlat.lst +++ b/xen/include/xlat.lst @@ -61,9 +61,10 @@ ! memory_exchange memory.h ! memory_map memory.h ! memory_reservation memory.h -? mem_access_op memory.h +? mem_access_op memory.h ! pod_target memory.h ! remove_from_physmap memory.h +! reserved_device_memory_map memory.h ? vmemrange memory.h ! vnuma_topology_info memory.h ? physdev_eoi physdev.h -- 1.9.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |