|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [v5][PATCH 03/10] xen:x86: define a new hypercall to get RMRR mappings
On 2014/9/1 18:29, Jan Beulich wrote: On 01.09.14 at 11:44, <tiejun.chen@xxxxxxxxx> wrote:On 2014/8/29 17:18, Jan Beulich wrote:This still allocates another instance of structures to create a second linked list. Did you consider get_device_reserved_memory() to takeDo you mean we still use this existing type combo, acpi_rmrr_units and acpi_rmrr_units?a callback function instead? Jan,I see you're reviewing other patches in v5 so really appreciate your comments. But I will address those comments until here I can implement this callback mechanism as you expect. Because some comments from other patches may need to rebase on this better way. So I hope I can finish your callback mechanism firstly to avoid bring you potential duplicated faults :) So could you take a look at the follows? xen/vtd: add one iommu ops to expose device reserved memory We need this interface to expose device reserved memory safely in common place. Signed-off-by: Tiejun Chen <tiejun.chen@xxxxxxxxx> ---xen/drivers/passthrough/vtd/dmar.c | 40 +++++++++++++++++++++++++++++++++++++ xen/drivers/passthrough/vtd/iommu.c | 14 +++++++++++++ xen/include/asm-x86/iommu.h | 3 +++ xen/include/xen/iommu.h | 1 + 4 files changed, 58 insertions(+)diff --git a/xen/drivers/passthrough/vtd/dmar.c b/xen/drivers/passthrough/vtd/dmar.c
index 1152c3a..f46aee2 100644
--- a/xen/drivers/passthrough/vtd/dmar.c
+++ b/xen/drivers/passthrough/vtd/dmar.c
@@ -567,6 +567,44 @@ out:
return ret;
}
+extern get_device_reserved_memory_t get_drm_callback;
+struct xen_mem_reserved_device_memory
*get_device_acpi_reserved_memory(unsigned int *nr_entries)
+{
+ struct acpi_rmrr_unit *rmrru;
+ static struct xen_mem_reserved_device_memory *rmrrm = NULL;
+ static unsigned int drm_entries = 0;
+ static unsigned int check_done = 0;
+ unsigned int i = 0;
+
+ *nr_entries = drm_entries;
+ if ( check_done )
+ return rmrrm;
+
+ list_for_each_entry(rmrru, &acpi_rmrr_units, list)
+ drm_entries++;
+
+ if ( drm_entries )
+ {
+ rmrrm = xzalloc_array(struct xen_mem_reserved_device_memory,
+ drm_entries);
+ if ( !rmrrm )
+ return NULL;
+
+ list_for_each_entry(rmrru, &acpi_rmrr_units, list)
+ {
+ rmrrm[i].start_pfn = rmrru->base_address >> PAGE_SHIFT;
+ rmrrm[i].nr_pages = PAGE_ALIGN(rmrru->end_address -
+ rmrru->base_address) /
+ PAGE_SIZE;
+ i++;
+ }
+ }
+
+ check_done = 1;
+
+ return rmrrm;
+}
+
static int __init
acpi_parse_one_rmrr(struct acpi_dmar_header *header)
{
@@ -678,6 +716,8 @@ acpi_parse_one_rmrr(struct acpi_dmar_header *header)
}
}
+ get_drm_callback = get_device_acpi_reserved_memory;
+
return ret;
}
diff --git a/xen/drivers/passthrough/vtd/iommu.c
b/xen/drivers/passthrough/vtd/iommu.c
index 042b882..43ff443 100644 --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -2461,6 +2461,19 @@ static void vtd_dump_p2m_table(struct domain *d)vtd_dump_p2m_table_level(hd->arch.pgd_maddr, agaw_to_level(hd->arch.agaw), 0, 0); }+struct xen_mem_reserved_device_memory *dummy_get_drm_callback(unsigned int *nr_entries)
+{
+ *nr_entries = 0;
+ return NULL;
+}
+
+get_device_reserved_memory_t get_drm_callback = dummy_get_drm_callback;
+
+struct xen_mem_reserved_device_memory *get_drm_all(unsigned int
*nr_entries)
+{
+ return (get_drm_callback(nr_entries));
+}
+
const struct iommu_ops intel_iommu_ops = {
.init = intel_iommu_domain_init,
.hwdom_init = intel_iommu_hwdom_init,
@@ -2486,6 +2499,7 @@ const struct iommu_ops intel_iommu_ops = {
.iotlb_flush = intel_iommu_iotlb_flush,
.iotlb_flush_all = intel_iommu_iotlb_flush_all,
.dump_p2m_table = vtd_dump_p2m_table,
+ .get_device_reserved_memory = get_drm_all,
};
/*
diff --git a/xen/include/asm-x86/iommu.h b/xen/include/asm-x86/iommu.h
index e7a65da..aead1d7 100644
--- a/xen/include/asm-x86/iommu.h
+++ b/xen/include/asm-x86/iommu.h
@@ -15,6 +15,8 @@
#ifndef __ARCH_X86_IOMMU_H__
#define __ARCH_X86_IOMMU_H__
+#include <public/memory.h>
+
#define MAX_IOMMUS 32
/* Does this domain have a P2M table we can use as its IOMMU pagetable? */
@@ -32,6 +34,7 @@ int iommu_supports_eim(void);
int iommu_enable_x2apic_IR(void);
void iommu_disable_x2apic_IR(void);
+typedef struct xen_mem_reserved_device_memory*
(*get_device_reserved_memory_t)(unsigned int *nr_entries);
#endif /* !__ARCH_X86_IOMMU_H__ */
/*
* Local variables:
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 8eb764a..8806ef6 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -149,6 +149,7 @@ struct iommu_ops {
void (*update_ire_from_apic)(unsigned int apic, unsigned int reg,
unsigned int value);
unsigned int (*read_apic_from_ire)(unsigned int apic, unsigned int
reg);
int (*setup_hpet_msi)(struct msi_desc *);
+ struct xen_mem_reserved_device_memory*
(*get_device_reserved_memory)(unsigned int *nr_entries);
#endif /* CONFIG_X86 */
void (*suspend)(void);
void (*resume)(void);
--
Then when call the hypercall,
+ case XENMEM_reserved_device_memory_map:
+ {
+ struct xen_mem_reserved_device_memory *xmrdm = NULL;
+ struct xen_mem_reserved_device_memory_map xmrdmm;
+ XEN_GUEST_HANDLE(xen_mem_reserved_device_memory_t) buffer;
+ XEN_GUEST_HANDLE_PARAM(xen_mem_reserved_device_memory_t)
buffer_param;
+ const struct iommu_ops *ops = iommu_get_ops();
+ unsigned int nr_entries = 0;
+ unsigned int i = 0;
+
+ xmrdm = ops->get_device_reserved_memory(&nr_entries);
+ if ( !nr_entries )
+ return -ENOENT;
+ if ( nr_entries < 0 )
+ return -EFAULT;
+
+ if ( copy_from_guest(&xmrdmm, arg, 1) )
+ return -EFAULT;
+
+ if ( xmrdmm.nr_entries < nr_entries )
+ {
+ xmrdmm.nr_entries = nr_entries;
+ if ( copy_to_guest(arg, &xmrdmm, 1) )
+ return -EFAULT;
+ return -ENOBUFS;
+ }
+
+ buffer_param = guest_handle_cast(xmrdmm.buffer,
+ xen_mem_reserved_device_memory_t);
+ buffer = guest_handle_from_param(buffer_param,
+ xen_mem_reserved_device_memory_t);
+ if ( !guest_handle_okay(buffer, xmrdmm.nr_entries) )
+ return -EFAULT;
+
+ for ( i = 0; i < nr_entries; i++ )
+ {
+ if ( copy_to_guest_offset(buffer, i, xmrdm + i, 1) )
+ return -EFAULT;
+ }
+
+ xmrdmm.nr_entries = i;
+
+ if ( copy_to_guest(arg, &xmrdmm, 1) )
+ return -EFAULT;
+
+ return 0;
+ }
+
Thanks
Tiejun
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |