|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v3 6/9] x86/HVM: convert hvm_virtual_to_linear_addr() to be remote-capable
While all present callers want to act on "current", stack dumping for
HVM vCPU-s will require the function to be able to act on a remote vCPU.
To avoid touching all present callers, convert the existing function to
an inline wrapper around the extend new one.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
Alternatively the actual dumping patch could avoid using this more
elaborate function and, ignoring access checks, simply add in the SS
segment base itself (if needed in the first place).
---
v3: New.
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2526,7 +2526,8 @@ int hvm_set_cr4(unsigned long value, boo
return X86EMUL_OKAY;
}
-bool_t hvm_virtual_to_linear_addr(
+bool hvm_vcpu_virtual_to_linear(
+ struct vcpu *v,
enum x86_segment seg,
const struct segment_register *reg,
unsigned long offset,
@@ -2535,8 +2536,9 @@ bool_t hvm_virtual_to_linear_addr(
const struct segment_register *active_cs,
unsigned long *linear_addr)
{
- const struct vcpu *curr = current;
unsigned long addr = offset, last_byte;
+ const struct cpu_user_regs *regs = v == current ? guest_cpu_user_regs()
+ : &v->arch.user_regs;
bool_t okay = 0;
/*
@@ -2547,7 +2549,7 @@ bool_t hvm_virtual_to_linear_addr(
*/
ASSERT(seg < x86_seg_none);
- if ( !(curr->arch.hvm.guest_cr[0] & X86_CR0_PE) )
+ if ( !(v->arch.hvm.guest_cr[0] & X86_CR0_PE) )
{
/*
* REAL MODE: Don't bother with segment access checks.
@@ -2555,7 +2557,7 @@ bool_t hvm_virtual_to_linear_addr(
*/
addr = (uint32_t)(addr + reg->base);
}
- else if ( (guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) &&
+ else if ( (regs->eflags & X86_EFLAGS_VM) &&
is_x86_user_segment(seg) )
{
/* VM86 MODE: Fixed 64k limits on all user segments. */
@@ -2564,7 +2566,7 @@ bool_t hvm_virtual_to_linear_addr(
if ( max(offset, last_byte) >> 16 )
goto out;
}
- else if ( hvm_long_mode_active(curr) &&
+ else if ( hvm_long_mode_active(v) &&
(is_x86_system_segment(seg) || active_cs->l) )
{
/*
@@ -2636,7 +2638,7 @@ bool_t hvm_virtual_to_linear_addr(
else if ( last_byte > reg->limit )
goto out; /* last byte is beyond limit */
else if ( last_byte < offset &&
- curr->domain->arch.cpuid->x86_vendor == X86_VENDOR_AMD )
+ v->domain->arch.cpuid->x86_vendor == X86_VENDOR_AMD )
goto out; /* access wraps */
}
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -314,7 +314,9 @@ enum hvm_access_type {
hvm_access_read,
hvm_access_write
};
-bool_t hvm_virtual_to_linear_addr(
+
+bool hvm_vcpu_virtual_to_linear(
+ struct vcpu *v,
enum x86_segment seg,
const struct segment_register *reg,
unsigned long offset,
@@ -323,6 +325,19 @@ bool_t hvm_virtual_to_linear_addr(
const struct segment_register *active_cs,
unsigned long *linear_addr);
+static inline bool hvm_virtual_to_linear_addr(
+ enum x86_segment seg,
+ const struct segment_register *reg,
+ unsigned long offset,
+ unsigned int bytes,
+ enum hvm_access_type access_type,
+ const struct segment_register *active_cs,
+ unsigned long *linear)
+{
+ return hvm_vcpu_virtual_to_linear(current, seg, reg, offset, bytes,
+ access_type, active_cs, linear);
+}
+
void *hvm_map_guest_frame_rw(unsigned long gfn, bool_t permanent,
bool_t *writable);
void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent);
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |