|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH V7 2/5] x86/mm: allocate logdirty_ranges for altp2ms
For now, only do allocation/deallocation; keeping them in sync
will be done in subsequent patches.
Logdirty synchronization will only be done for active altp2ms;
so allocate logdirty rangesets (copying the host logdirty
rangeset) when an altp2m is activated, and free it when
deactivated.
Write a helper function to do altp2m activiation (appropriately
handling failures). Also, refactor p2m_reset_altp2m() so that it
can be used to remove redundant codepaths, fixing the locking
while we’re at it.
While we're here, switch global_logdirty from bool_t to bool.
Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
---
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
CC: Jan Beulich <jbeulich@xxxxxxxx>
CC: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
---
Changes since V6:
- Replaced the description of the patch with the one suggested
by George.
- p2m_reset_altp2m() now takes an enum altp2m_reset_type param
to make it clearer what it does at callsites.
- The "Uninit and reinit ept to force TLB shootdown" comment
has been moved above the ept_p2m_uninit() call in
p2m_reset_altp2m().
- p2m_init_altp2m_logdirty() has been merged into
p2m_activate_altp2m().
- p2m_activate_altp2m() now takes the p2m lock.
---
xen/arch/x86/mm/p2m.c | 103 ++++++++++++++++++++++++++++++++--------------
xen/include/asm-x86/p2m.h | 2 +-
2 files changed, 74 insertions(+), 31 deletions(-)
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 418ff85..9773495 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -2282,6 +2282,36 @@ bool_t p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t gpa,
return 1;
}
+enum altp2m_reset_type {
+ ALTP2M_RESET,
+ ALTP2M_DEACTIVATE
+};
+
+static void p2m_reset_altp2m(struct domain *d, unsigned int idx,
+ enum altp2m_reset_type reset_type)
+{
+ struct p2m_domain *p2m;
+
+ ASSERT(idx < MAX_ALTP2M);
+ p2m = d->arch.altp2m_p2m[idx];
+
+ p2m_lock(p2m);
+
+ p2m_flush_table_locked(p2m);
+
+ if ( reset_type == ALTP2M_DEACTIVATE )
+ p2m_free_logdirty(p2m);
+
+ /* Uninit and reinit ept to force TLB shootdown */
+ ept_p2m_uninit(p2m);
+ ept_p2m_init(p2m);
+
+ p2m->min_remapped_gfn = gfn_x(INVALID_GFN);
+ p2m->max_remapped_gfn = 0;
+
+ p2m_unlock(p2m);
+}
+
void p2m_flush_altp2m(struct domain *d)
{
unsigned int i;
@@ -2290,16 +2320,47 @@ void p2m_flush_altp2m(struct domain *d)
for ( i = 0; i < MAX_ALTP2M; i++ )
{
- p2m_flush_table(d->arch.altp2m_p2m[i]);
- /* Uninit and reinit ept to force TLB shootdown */
- ept_p2m_uninit(d->arch.altp2m_p2m[i]);
- ept_p2m_init(d->arch.altp2m_p2m[i]);
+ p2m_reset_altp2m(d, i, ALTP2M_DEACTIVATE);
d->arch.altp2m_eptp[i] = mfn_x(INVALID_MFN);
}
altp2m_list_unlock(d);
}
+static int p2m_activate_altp2m(struct domain *d, unsigned int idx)
+{
+ struct p2m_domain *hostp2m, *p2m;
+ int rc;
+
+ ASSERT(idx < MAX_ALTP2M);
+
+ p2m = d->arch.altp2m_p2m[idx];
+ hostp2m = p2m_get_hostp2m(d);
+
+ p2m_lock(p2m);
+
+ rc = p2m_init_logdirty(p2m);
+
+ if ( rc )
+ goto out;
+
+ /* The following is really just a rangeset copy. */
+ rc = rangeset_merge(p2m->logdirty_ranges, hostp2m->logdirty_ranges);
+
+ if ( rc )
+ {
+ p2m_free_logdirty(p2m);
+ goto out;
+ }
+
+ p2m_init_altp2m_ept(d, idx);
+
+out:
+ p2m_unlock(p2m);
+
+ return rc;
+}
+
int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx)
{
int rc = -EINVAL;
@@ -2310,10 +2371,7 @@ int p2m_init_altp2m_by_id(struct domain *d, unsigned int
idx)
altp2m_list_lock(d);
if ( d->arch.altp2m_eptp[idx] == mfn_x(INVALID_MFN) )
- {
- p2m_init_altp2m_ept(d, idx);
- rc = 0;
- }
+ rc = p2m_activate_altp2m(d, idx);
altp2m_list_unlock(d);
return rc;
@@ -2331,9 +2389,10 @@ int p2m_init_next_altp2m(struct domain *d, uint16_t *idx)
if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
continue;
- p2m_init_altp2m_ept(d, i);
- *idx = i;
- rc = 0;
+ rc = p2m_activate_altp2m(d, i);
+
+ if ( !rc )
+ *idx = i;
break;
}
@@ -2360,10 +2419,7 @@ int p2m_destroy_altp2m_by_id(struct domain *d, unsigned
int idx)
if ( !_atomic_read(p2m->active_vcpus) )
{
- p2m_flush_table(d->arch.altp2m_p2m[idx]);
- /* Uninit and reinit ept to force TLB shootdown */
- ept_p2m_uninit(d->arch.altp2m_p2m[idx]);
- ept_p2m_init(d->arch.altp2m_p2m[idx]);
+ p2m_reset_altp2m(d, idx, ALTP2M_DEACTIVATE);
d->arch.altp2m_eptp[idx] = mfn_x(INVALID_MFN);
rc = 0;
}
@@ -2488,16 +2544,6 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned int
idx,
return rc;
}
-static void p2m_reset_altp2m(struct p2m_domain *p2m)
-{
- p2m_flush_table(p2m);
- /* Uninit and reinit ept to force TLB shootdown */
- ept_p2m_uninit(p2m);
- ept_p2m_init(p2m);
- p2m->min_remapped_gfn = gfn_x(INVALID_GFN);
- p2m->max_remapped_gfn = 0;
-}
-
int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
mfn_t mfn, unsigned int page_order,
p2m_type_t p2mt, p2m_access_t p2ma)
@@ -2531,7 +2577,7 @@ int p2m_altp2m_propagate_change(struct domain *d, gfn_t
gfn,
{
if ( !reset_count++ )
{
- p2m_reset_altp2m(p2m);
+ p2m_reset_altp2m(d, i, ALTP2M_RESET);
last_reset_idx = i;
}
else
@@ -2545,10 +2591,7 @@ int p2m_altp2m_propagate_change(struct domain *d, gfn_t
gfn,
d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
continue;
- p2m = d->arch.altp2m_p2m[i];
- p2m_lock(p2m);
- p2m_reset_altp2m(p2m);
- p2m_unlock(p2m);
+ p2m_reset_altp2m(d, i, ALTP2M_RESET);
}
ret = 0;
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index ac33f50..c7f5710 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -222,7 +222,7 @@ struct p2m_domain {
struct rangeset *logdirty_ranges;
/* Host p2m: Global log-dirty mode enabled for the domain. */
- bool_t global_logdirty;
+ bool global_logdirty;
/* Host p2m: when this flag is set, don't flush all the nested-p2m
* tables on every host-p2m change. The setter of this flag
--
2.7.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |