[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Xen-devel] [RFC][v3][PATCH 5/6] tools:libxc: check if mmio BAR is out of RMRR mappings
On 2014/8/15 20:21, Andrew Cooper wrote:
On 15/08/14 09:27, Tiejun Chen wrote:
We need to avoid allocating MMIO BAR conflicting to RMRR range.
Signed-off-by: Tiejun Chen <tiejun.chen@xxxxxxxxx>
---
tools/libxc/xc_domain.c | 26 ++++++++++++++++++++++++++
tools/libxc/xc_hvm_build_x86.c | 23 +++++++++++++++++++++++
tools/libxc/xenctrl.h | 4 ++++
3 files changed, 53 insertions(+)
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index c67ac9a..8d011ef 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -649,6 +649,32 @@ int xc_domain_set_memory_map(xc_interface *xch,
return rc;
}
+
+int xc_get_rmrr_map(xc_interface *xch,
This function name should match the hypercall. Perhaps
xc_reserved_device_memory_map() ?
Okay.
+ struct e820entry entries[],
+ uint32_t max_entries)
This libxc function would be far more use if it took a single
xen_memory_map parameter, rather than splitting the return information
from Xen between one of the parameters and the return value.
I can't understand why we do something specific here. Please take a look
at xc_get_machine_memory_map(). So why did you think we should do this?
Thanks
Tiejun
~Andrew
+{
+ int rc;
+ struct xen_memory_map memmap = {
+ .nr_entries = max_entries
+ };
+ DECLARE_HYPERCALL_BOUNCE(entries, sizeof(struct e820entry) * max_entries,
+ XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+ if ( !entries || xc_hypercall_bounce_pre(xch, entries) || max_entries <= 1)
+ return -1;
+
+
+ set_xen_guest_handle(memmap.buffer, entries);
+
+ rc = do_memory_op(xch, XENMEM_reserved_device_memory_map,
+ &memmap, sizeof(memmap));
+
+ xc_hypercall_bounce_post(xch, entries);
+
+ return rc ? rc : memmap.nr_entries;
+}
+
int xc_get_machine_memory_map(xc_interface *xch,
struct e820entry entries[],
uint32_t max_entries)
diff --git a/tools/libxc/xc_hvm_build_x86.c b/tools/libxc/xc_hvm_build_x86.c
index c81a25b..2196cdb 100644
--- a/tools/libxc/xc_hvm_build_x86.c
+++ b/tools/libxc/xc_hvm_build_x86.c
@@ -262,6 +262,8 @@ static int setup_guest(xc_interface *xch,
int claim_enabled = args->claim_enabled;
xen_pfn_t special_array[NR_SPECIAL_PAGES];
xen_pfn_t ioreq_server_array[NR_IOREQ_SERVER_PAGES];
+ struct e820entry map[E820MAX];
+ uint64_t rmrr_start = 0, rmrr_end = 0;
if ( nr_pages > target_pages )
pod_mode = XENMEMF_populate_on_demand;
@@ -300,6 +302,27 @@ static int setup_guest(xc_interface *xch,
goto error_out;
}
+ /* We should check if mmio range is out of RMRR mapping. */
+ rc = xc_get_rmrr_map(xch, map, E820MAX);
+ if (rc < 0)
+ {
+ PERROR("Could not get RMRR info on domain");
+ }
+ else if ( rc )
+ {
+ for ( i = 0; i < rc; i++ )
+ {
+ rmrr_start = map[i].addr;
+ rmrr_end = map[i].addr + map[i].size + 1;
+ if ( check_mmio_hole(rmrr_start, map[i].size + 1, mmio_start,
mmio_size) )
+ {
+ PERROR("MMIO: [%lx]<->[%lx] overlap RMRR [%lx]<->[%lx]\n",
+ mmio_start, (mmio_start + mmio_size), rmrr_start,
rmrr_end);
+ goto error_out;
+ }
+ }
+ }
+
for ( i = 0; i < nr_pages; i++ )
page_array[i] = i;
for ( i = mmio_start >> PAGE_SHIFT; i < nr_pages; i++ )
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 1c5d0db..6d3b135 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -1270,6 +1270,10 @@ int xc_domain_set_memory_map(xc_interface *xch,
int xc_get_machine_memory_map(xc_interface *xch,
struct e820entry entries[],
uint32_t max_entries);
+
+int xc_get_rmrr_map(xc_interface *xch,
+ struct e820entry entries[],
+ uint32_t max_entries);
#endif
int xc_domain_set_time_offset(xc_interface *xch,
uint32_t domid,
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|