|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] x86/shim: Fixes to replace_linear_mapping()
The function replace_va_mapping() has multiple issues:
* It uses linear addresses, not virtual addresses. Fix its name.
* Guest pagetables are allocated from the domheap not the xenheap, so need
map_domain_page() to safely access.
* put_page_and_type() should only apply to present mappings.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
---
xen/arch/x86/pv/shim.c | 28 ++++++++++++++++------------
1 file changed, 16 insertions(+), 12 deletions(-)
diff --git a/xen/arch/x86/pv/shim.c b/xen/arch/x86/pv/shim.c
index d5383dc..fae7818 100644
--- a/xen/arch/x86/pv/shim.c
+++ b/xen/arch/x86/pv/shim.c
@@ -119,19 +119,23 @@ uint64_t pv_shim_mem(uint64_t avail)
_PAGE_GUEST_KERNEL)
#define COMPAT_L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
-static void __init replace_va_mapping(struct domain *d, l4_pgentry_t *l4start,
- unsigned long va, mfn_t mfn)
+static void __init replace_linear_mapping(
+ struct domain *d, l4_pgentry_t *l4t, unsigned long linear, mfn_t mfn)
{
- l4_pgentry_t *pl4e = l4start + l4_table_offset(va);
- l3_pgentry_t *pl3e = l4e_to_l3e(*pl4e) + l3_table_offset(va);
- l2_pgentry_t *pl2e = l3e_to_l2e(*pl3e) + l2_table_offset(va);
- l1_pgentry_t *pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
- struct page_info *page = mfn_to_page(l1e_get_mfn(*pl1e));
+ l4_pgentry_t *l4e = l4t + l4_table_offset(linear);
+ l3_pgentry_t *l3e = map_l3t_from_l4e(*l4e) + l3_table_offset(linear);
+ l2_pgentry_t *l2e = map_l2t_from_l3e(*l3e) + l2_table_offset(linear);
+ l1_pgentry_t *l1e = map_l1t_from_l2e(*l2e) + l1_table_offset(linear);
+ unsigned int flags = is_pv_32bit_domain(d) ? COMPAT_L1_PROT : L1_PROT;
- put_page_and_type(page);
+ if ( l1e_get_flags(*l1e) & _PAGE_PRESENT )
+ put_page_and_type(l1e_get_page(*l1e));
- *pl1e = l1e_from_mfn(mfn, (!is_pv_32bit_domain(d) ? L1_PROT
- : COMPAT_L1_PROT));
+ l1e_write(l1e, l1e_from_mfn(mfn, flags));
+
+ unmap_domain_page(l1e);
+ unmap_domain_page(l2e);
+ unmap_domain_page(l3e);
}
static void evtchn_reserve(struct domain *d, unsigned int port)
@@ -172,7 +176,7 @@ void __init pv_shim_setup_dom(struct domain *d,
l4_pgentry_t *l4start,
{
\
share_xen_page_with_guest(mfn_to_page(_mfn(param)), d,
\
XENSHARE_writable);
\
- replace_va_mapping(d, l4start, va, _mfn(param));
\
+ replace_linear_mapping(d, l4start, va, _mfn(param));
\
dom0_update_physmap(d, PFN_DOWN((va) - va_start), param, vphysmap);
\
}
\
else
\
@@ -200,7 +204,7 @@ void __init pv_shim_setup_dom(struct domain *d,
l4_pgentry_t *l4start,
si->console.domU.mfn = mfn_x(console_mfn);
share_xen_page_with_guest(mfn_to_page(console_mfn), d,
XENSHARE_writable);
- replace_va_mapping(d, l4start, console_va, console_mfn);
+ replace_linear_mapping(d, l4start, console_va, console_mfn);
dom0_update_physmap(d, (console_va - va_start) >> PAGE_SHIFT,
mfn_x(console_mfn), vphysmap);
consoled_set_ring_addr(page);
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |