|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 3/6] x86/P2M: simplify write_p2m_entry()
The "table_mfn" parameter really isn't needed anywhere, so it gets
dropped.
The "struct vcpu *" one was always bogus (as was being made up by
paging_write_p2m_entry()), and is not commonly used. It can be easily
enough made up in the one place (sh_unshadow_for_p2m_change()) it is
needed, and we can otherwise pass "struct domain *" instead, properly
reflecting that P2M operations are per-domain.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -695,10 +695,9 @@ static void hap_update_paging_modes(stru
}
static void
-hap_write_p2m_entry(struct vcpu *v, unsigned long gfn, l1_pgentry_t *p,
- mfn_t table_mfn, l1_pgentry_t new, unsigned int level)
+hap_write_p2m_entry(struct domain *d, unsigned long gfn, l1_pgentry_t *p,
+ l1_pgentry_t new, unsigned int level)
{
- struct domain *d = v->domain;
uint32_t old_flags;
bool_t flush_nestedp2m = 0;
--- a/xen/arch/x86/mm/hap/nested_hap.c
+++ b/xen/arch/x86/mm/hap/nested_hap.c
@@ -79,7 +79,7 @@
void
nestedp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
- l1_pgentry_t *p, mfn_t table_mfn, l1_pgentry_t new, unsigned int level)
+ l1_pgentry_t *p, l1_pgentry_t new, unsigned int level)
{
struct domain *d = p2m->domain;
uint32_t old_flags;
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -156,7 +156,7 @@ static void p2m_add_iommu_flags(l1_pgent
/* Returns: 0 for success, -errno for failure */
static int
-p2m_next_level(struct p2m_domain *p2m, mfn_t *table_mfn, void **table,
+p2m_next_level(struct p2m_domain *p2m, void **table,
unsigned long *gfn_remainder, unsigned long gfn, u32 shift,
u32 max, unsigned long type)
{
@@ -185,15 +185,15 @@ p2m_next_level(struct p2m_domain *p2m, m
switch ( type ) {
case PGT_l3_page_table:
p2m_add_iommu_flags(&new_entry, 3,
IOMMUF_readable|IOMMUF_writable);
- p2m->write_p2m_entry(p2m, gfn, p2m_entry, *table_mfn, new_entry,
4);
+ p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 4);
break;
case PGT_l2_page_table:
p2m_add_iommu_flags(&new_entry, 2,
IOMMUF_readable|IOMMUF_writable);
- p2m->write_p2m_entry(p2m, gfn, p2m_entry, *table_mfn, new_entry,
3);
+ p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 3);
break;
case PGT_l1_page_table:
p2m_add_iommu_flags(&new_entry, 1,
IOMMUF_readable|IOMMUF_writable);
- p2m->write_p2m_entry(p2m, gfn, p2m_entry, *table_mfn, new_entry,
2);
+ p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 2);
break;
default:
BUG();
@@ -221,14 +221,13 @@ p2m_next_level(struct p2m_domain *p2m, m
{
new_entry = l1e_from_pfn(pfn + (i * L1_PAGETABLE_ENTRIES), flags);
p2m_add_iommu_flags(&new_entry, 1,
IOMMUF_readable|IOMMUF_writable);
- p2m->write_p2m_entry(p2m, gfn,
- l1_entry+i, *table_mfn, new_entry, 2);
+ p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, 2);
}
unmap_domain_page(l1_entry);
new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
__PAGE_HYPERVISOR|_PAGE_USER); //disable PSE
p2m_add_iommu_flags(&new_entry, 2, IOMMUF_readable|IOMMUF_writable);
- p2m->write_p2m_entry(p2m, gfn, p2m_entry, *table_mfn, new_entry, 3);
+ p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 3);
}
@@ -256,20 +255,17 @@ p2m_next_level(struct p2m_domain *p2m, m
{
new_entry = l1e_from_pfn(pfn + i, flags);
p2m_add_iommu_flags(&new_entry, 0, 0);
- p2m->write_p2m_entry(p2m, gfn,
- l1_entry+i, *table_mfn, new_entry, 1);
+ p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, 1);
}
unmap_domain_page(l1_entry);
new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
__PAGE_HYPERVISOR|_PAGE_USER);
p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable);
- p2m->write_p2m_entry(p2m, gfn,
- p2m_entry, *table_mfn, new_entry, 2);
+ p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 2);
}
- *table_mfn = _mfn(l1e_get_pfn(*p2m_entry));
- next = map_domain_page(mfn_x(*table_mfn));
+ next = map_domain_page(l1e_get_pfn(*p2m_entry));
unmap_domain_page(*table);
*table = next;
@@ -282,8 +278,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
{
/* XXX -- this might be able to be faster iff current->domain == d */
- mfn_t table_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
- void *table = map_domain_page(mfn_x(table_mfn));
+ void *table;
unsigned long i, gfn_remainder = gfn;
l1_pgentry_t *p2m_entry;
l1_pgentry_t entry_content;
@@ -312,7 +307,8 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
__trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), &t);
}
- rc = p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn,
+ table = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+ rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn,
L4_PAGETABLE_SHIFT - PAGE_SHIFT,
L4_PAGETABLE_ENTRIES, PGT_l3_page_table);
if ( rc )
@@ -349,7 +345,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
old_mfn = l1e_get_pfn(*p2m_entry);
}
- p2m->write_p2m_entry(p2m, gfn, p2m_entry, table_mfn, entry_content, 3);
+ p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3);
/* NB: paging_write_p2m_entry() handles tlb flushes properly */
/* Free old intermediate tables if necessary */
@@ -358,8 +354,8 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
}
else
{
- rc = p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder,
- gfn, L3_PAGETABLE_SHIFT - PAGE_SHIFT,
+ rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn,
+ L3_PAGETABLE_SHIFT - PAGE_SHIFT,
L3_PAGETABLE_ENTRIES, PGT_l2_page_table);
if ( rc )
goto out;
@@ -367,7 +363,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
if ( page_order == PAGE_ORDER_4K )
{
- rc = p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn,
+ rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn,
L2_PAGETABLE_SHIFT - PAGE_SHIFT,
L2_PAGETABLE_ENTRIES, PGT_l1_page_table);
if ( rc )
@@ -390,7 +386,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
old_mfn = l1e_get_pfn(*p2m_entry);
}
/* level 1 entry */
- p2m->write_p2m_entry(p2m, gfn, p2m_entry, table_mfn, entry_content, 1);
+ p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1);
/* NB: paging_write_p2m_entry() handles tlb flushes properly */
}
else if ( page_order == PAGE_ORDER_2M )
@@ -426,7 +422,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
old_mfn = l1e_get_pfn(*p2m_entry);
}
- p2m->write_p2m_entry(p2m, gfn, p2m_entry, table_mfn, entry_content, 2);
+ p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2);
/* NB: paging_write_p2m_entry() handles tlb flushes properly */
/* Free old intermediate tables if necessary */
@@ -660,7 +656,7 @@ static void p2m_pt_change_entry_type_glo
l1e_content = l1e_from_pfn(mfn, flags | _PAGE_PSE);
p2m->write_p2m_entry(p2m, gfn,
(l1_pgentry_t *)&l3e[i3],
- l3mfn, l1e_content, 3);
+ l1e_content, 3);
continue;
}
@@ -687,7 +683,7 @@ static void p2m_pt_change_entry_type_glo
l1e_content = l1e_from_pfn(mfn, flags | _PAGE_PSE);
p2m->write_p2m_entry(p2m, gfn,
(l1_pgentry_t *)&l2e[i2],
- l2mfn, l1e_content, 2);
+ l1e_content, 2);
continue;
}
@@ -706,7 +702,7 @@ static void p2m_pt_change_entry_type_glo
flags = p2m_type_to_flags(nt, _mfn(mfn));
l1e_content = p2m_l1e_from_pfn(mfn, flags);
p2m->write_p2m_entry(p2m, gfn, &l1e[i1],
- l1mfn, l1e_content, 1);
+ l1e_content, 1);
}
unmap_domain_page(l1e);
}
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -724,18 +724,15 @@ void paging_update_nestedmode(struct vcp
}
void paging_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
- l1_pgentry_t *p, mfn_t table_mfn,
- l1_pgentry_t new, unsigned int level)
+ l1_pgentry_t *p, l1_pgentry_t new,
+ unsigned int level)
{
struct domain *d = p2m->domain;
struct vcpu *v = current;
if ( v->domain != d )
v = d->vcpu ? d->vcpu[0] : NULL;
if ( likely(v && paging_mode_enabled(d) && paging_get_hostmode(v) != NULL)
)
- {
- return paging_get_hostmode(v)->write_p2m_entry(v, gfn, p, table_mfn,
- new, level);
- }
+ paging_get_hostmode(v)->write_p2m_entry(d, gfn, p, new, level);
else
safe_write_pte(p, new);
}
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -3310,11 +3310,14 @@ static int shadow_test_disable(struct do
* shadow processing jobs.
*/
-static void sh_unshadow_for_p2m_change(struct vcpu *v, unsigned long gfn,
- l1_pgentry_t *p, mfn_t table_mfn,
- l1_pgentry_t new, unsigned int level)
+static void sh_unshadow_for_p2m_change(struct domain *d, unsigned long gfn,
+ l1_pgentry_t *p, l1_pgentry_t new,
+ unsigned int level)
{
- struct domain *d = v->domain;
+ struct vcpu *v = current;
+
+ if ( v->domain != d )
+ v = d->vcpu ? d->vcpu[0] : NULL;
/* The following assertion is to make sure we don't step on 1GB host
* page support of HVM guest. */
@@ -3379,18 +3382,16 @@ static void sh_unshadow_for_p2m_change(s
}
void
-shadow_write_p2m_entry(struct vcpu *v, unsigned long gfn,
- l1_pgentry_t *p, mfn_t table_mfn,
- l1_pgentry_t new, unsigned int level)
+shadow_write_p2m_entry(struct domain *d, unsigned long gfn,
+ l1_pgentry_t *p, l1_pgentry_t new,
+ unsigned int level)
{
- struct domain *d = v->domain;
-
paging_lock(d);
/* If there are any shadows, update them. But if shadow_teardown()
* has already been called then it's not safe to try. */
if ( likely(d->arch.paging.shadow.total_pages != 0) )
- sh_unshadow_for_p2m_change(v, gfn, p, table_mfn, new, level);
+ sh_unshadow_for_p2m_change(d, gfn, p, new, level);
/* Update the entry with new content */
safe_write_pte(p, new);
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -359,9 +359,9 @@ extern int sh_remove_write_access(struct
unsigned long fault_addr);
/* Functions that atomically write PT/P2M entries and update state */
-void shadow_write_p2m_entry(struct vcpu *v, unsigned long gfn,
- l1_pgentry_t *p, mfn_t table_mfn,
- l1_pgentry_t new, unsigned int level);
+void shadow_write_p2m_entry(struct domain *d, unsigned long gfn,
+ l1_pgentry_t *p, l1_pgentry_t new,
+ unsigned int level);
int shadow_write_guest_entry(struct vcpu *v, intpte_t *p,
intpte_t new, mfn_t gmfn);
int shadow_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p,
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -251,8 +251,7 @@ struct p2m_domain {
void (*write_p2m_entry)(struct p2m_domain *p2m,
unsigned long gfn, l1_pgentry_t *p,
- mfn_t table_mfn, l1_pgentry_t new,
- unsigned int level);
+ l1_pgentry_t new, unsigned int
level);
long (*audit_p2m)(struct p2m_domain *p2m);
/* Default P2M access type for each page in the the domain: new pages,
@@ -679,7 +678,7 @@ void p2m_flush(struct vcpu *v, struct p2
void p2m_flush_nestedp2m(struct domain *d);
void nestedp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
- l1_pgentry_t *p, mfn_t table_mfn, l1_pgentry_t new, unsigned int level);
+ l1_pgentry_t *p, l1_pgentry_t new, unsigned int level);
#endif /* _XEN_P2M_H */
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -111,9 +111,8 @@ struct paging_mode {
unsigned int *page_order);
void (*update_cr3 )(struct vcpu *v, int do_locking);
void (*update_paging_modes )(struct vcpu *v);
- void (*write_p2m_entry )(struct vcpu *v, unsigned long gfn,
- l1_pgentry_t *p, mfn_t table_mfn,
- l1_pgentry_t new,
+ void (*write_p2m_entry )(struct domain *d, unsigned long
gfn,
+ l1_pgentry_t *p, l1_pgentry_t new,
unsigned int level);
int (*write_guest_entry )(struct vcpu *v, intpte_t *p,
intpte_t new, mfn_t gmfn);
@@ -335,9 +334,9 @@ static inline void safe_write_pte(l1_pge
* we are writing. */
struct p2m_domain;
-void paging_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
- l1_pgentry_t *p, mfn_t table_mfn,
- l1_pgentry_t new, unsigned int level);
+void paging_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
+ l1_pgentry_t *p, l1_pgentry_t new,
+ unsigned int level);
/* Called from the guest to indicate that the a process is being
* torn down and its pagetables will soon be discarded */
Attachment:
x86-write_p2m_entry-simplify.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |