|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] x86/hvm: always do SMAP check when updating runstate_guest(v)
commit 31ae587e6f0181bf1f7d196fe1b49357c8922e60
Author: Feng Wu <feng.wu@xxxxxxxxx>
AuthorDate: Fri Aug 1 16:39:17 2014 +0200
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Fri Aug 1 16:39:17 2014 +0200
x86/hvm: always do SMAP check when updating runstate_guest(v)
In the current implementation, we honor the guest's CPL and AC
to determain whether do the SMAP check or not for runstate_guest(v).
However, this doesn't work. The VMCS feild is invalid when we try
to get geust's SS by hvm_get_segment_register(), since the
right VMCS has not beed loaded for the current VCPU.
In this patch, we always do the SMAP check when updating
runstate_guest(v) for the guest when SMAP is enabled by it.
Reported-by: Sander Eikelenboom <linux@xxxxxxxxxxxxxx>
Signed-off-by: Feng Wu <feng.wu@xxxxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
Tested-by: Sander Eikelenboom <linux@xxxxxxxxxxxxxx>
---
xen/arch/x86/domain.c | 25 +++++++++++++++++++++----
xen/arch/x86/mm/guest_walk.c | 39 ++++++++++++++++++++++++++-------------
xen/include/asm-x86/domain.h | 17 ++++++++++++++++-
3 files changed, 63 insertions(+), 18 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index e896210..f7e0e78 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -180,6 +180,14 @@ void dump_pageframe_info(struct domain *d)
spin_unlock(&d->page_alloc_lock);
}
+smap_check_policy_t smap_policy_change(struct vcpu *v,
+ smap_check_policy_t new_policy)
+{
+ smap_check_policy_t old_policy = v->arch.smap_check_policy;
+ v->arch.smap_check_policy = new_policy;
+ return old_policy;
+}
+
/*
* The hole may be at or above the 44-bit boundary, so we need to determine
* the total bit count until reaching 32 significant (not squashed out) bits
@@ -1349,22 +1357,31 @@ static void paravirt_ctxt_switch_to(struct vcpu *v)
}
/* Update per-VCPU guest runstate shared memory area (if registered). */
-bool_t update_runstate_area(const struct vcpu *v)
+bool_t update_runstate_area(struct vcpu *v)
{
+ bool_t rc;
+ smap_check_policy_t smap_policy;
+
if ( guest_handle_is_null(runstate_guest(v)) )
return 1;
+ smap_policy = smap_policy_change(v, SMAP_CHECK_ENABLED);
+
if ( has_32bit_shinfo(v->domain) )
{
struct compat_vcpu_runstate_info info;
XLAT_vcpu_runstate_info(&info, &v->runstate);
__copy_to_guest(v->runstate_guest.compat, &info, 1);
- return 1;
+ rc = 1;
}
+ else
+ rc = __copy_to_guest(runstate_guest(v), &v->runstate, 1) !=
+ sizeof(v->runstate);
- return __copy_to_guest(runstate_guest(v), &v->runstate, 1) !=
- sizeof(v->runstate);
+ smap_policy_change(v, smap_policy);
+
+ return rc;
}
static void _update_runstate_area(struct vcpu *v)
diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index bb38fda..1b26175 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -164,25 +164,38 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
struct segment_register seg;
const struct cpu_user_regs *regs = guest_cpu_user_regs();
- hvm_get_segment_register(v, x86_seg_ss, &seg);
-
/* SMEP: kernel-mode instruction fetches from user-mode mappings
* should fault. Unlike NX or invalid bits, we're looking for _all_
* entries in the walk to have _PAGE_USER set, so we need to do the
* whole walk as if it were a user-mode one and then invert the
answer. */
smep = hvm_smep_enabled(v) && (pfec & PFEC_insn_fetch);
- /*
- * SMAP: kernel-mode data accesses from user-mode mappings should fault
- * A fault is considered as a SMAP violation if the following
- * conditions come true:
- * - X86_CR4_SMAP is set in CR4
- * - A user page is accessed
- * - CPL = 3 or X86_EFLAGS_AC is clear
- * - Page fault in kernel mode
- */
- smap = hvm_smap_enabled(v) &&
- ((seg.attr.fields.dpl == 3) || !(regs->eflags & X86_EFLAGS_AC));
+ switch ( v->arch.smap_check_policy )
+ {
+ case SMAP_CHECK_HONOR_CPL_AC:
+ hvm_get_segment_register(v, x86_seg_ss, &seg);
+
+ /*
+ * SMAP: kernel-mode data accesses from user-mode mappings
+ * should fault.
+ * A fault is considered as a SMAP violation if the following
+ * conditions come true:
+ * - X86_CR4_SMAP is set in CR4
+ * - A user page is accessed
+ * - CPL = 3 or X86_EFLAGS_AC is clear
+ * - Page fault in kernel mode
+ */
+ smap = hvm_smap_enabled(v) &&
+ ((seg.attr.fields.dpl == 3) ||
+ !(regs->eflags & X86_EFLAGS_AC));
+ break;
+ case SMAP_CHECK_ENABLED:
+ smap = hvm_smap_enabled(v);
+ break;
+ default:
+ ASSERT(v->arch.smap_check_policy == SMAP_CHECK_DISABLED);
+ break;
+ }
}
if ( smep || smap )
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index abf55fb..112d0b1 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -382,6 +382,12 @@ struct pv_vcpu
struct vcpu_time_info pending_system_time;
};
+typedef enum __packed {
+ SMAP_CHECK_HONOR_CPL_AC, /* honor the guest's CPL and AC */
+ SMAP_CHECK_ENABLED, /* enable the check */
+ SMAP_CHECK_DISABLED, /* disable the check */
+} smap_check_policy_t;
+
struct arch_vcpu
{
/*
@@ -438,6 +444,12 @@ struct arch_vcpu
* and thus should be saved/restored. */
bool_t nonlazy_xstate_used;
+ /*
+ * The SMAP check policy when updating runstate_guest(v) and the
+ * secondary system time.
+ */
+ smap_check_policy_t smap_check_policy;
+
struct vmce vmce;
struct paging_vcpu paging;
@@ -448,11 +460,14 @@ struct arch_vcpu
XEN_GUEST_HANDLE(vcpu_time_info_t) time_info_guest;
} __cacheline_aligned;
+smap_check_policy_t smap_policy_change(struct vcpu *v,
+ smap_check_policy_t new_policy);
+
/* Shorthands to improve code legibility. */
#define hvm_vmx hvm_vcpu.u.vmx
#define hvm_svm hvm_vcpu.u.svm
-bool_t update_runstate_area(const struct vcpu *);
+bool_t update_runstate_area(struct vcpu *);
bool_t update_secondary_system_time(const struct vcpu *,
struct vcpu_time_info *);
--
generated by git-patchbot for /home/xen/git/xen.git#master
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |