|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 12/20] PVH xen: create read_descriptor_sel()
This patch changes read descriptor functionality to support PVH by
introducing read_descriptor_sel(). Also, we make emulate_forced_invalid_op()
public and suitable for PVH use in next patch.
Changes in V5: None. New patch (separating this from prev patch 10 which was
getting large).
Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
---
xen/arch/x86/traps.c | 87 +++++++++++++++++++++++++++++++++------
xen/include/asm-x86/processor.h | 1 +
2 files changed, 75 insertions(+), 13 deletions(-)
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index f68c526..663e351 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -905,7 +905,7 @@ static int emulate_invalid_rdtscp(struct cpu_user_regs
*regs)
return EXCRET_fault_fixed;
}
-static int emulate_forced_invalid_op(struct cpu_user_regs *regs)
+int emulate_forced_invalid_op(struct cpu_user_regs *regs)
{
char sig[5], instr[2];
unsigned long eip, rc;
@@ -913,7 +913,7 @@ static int emulate_forced_invalid_op(struct cpu_user_regs
*regs)
eip = regs->eip;
/* Check for forced emulation signature: ud2 ; .ascii "xen". */
- if ( (rc = copy_from_user(sig, (char *)eip, sizeof(sig))) != 0 )
+ if ( (rc = raw_copy_from_guest(sig, (char *)eip, sizeof(sig))) != 0 )
{
propagate_page_fault(eip + sizeof(sig) - rc, 0);
return EXCRET_fault_fixed;
@@ -923,7 +923,7 @@ static int emulate_forced_invalid_op(struct cpu_user_regs
*regs)
eip += sizeof(sig);
/* We only emulate CPUID. */
- if ( ( rc = copy_from_user(instr, (char *)eip, sizeof(instr))) != 0 )
+ if ( ( rc = raw_copy_from_guest(instr, (char *)eip, sizeof(instr))) != 0 )
{
propagate_page_fault(eip + sizeof(instr) - rc, 0);
return EXCRET_fault_fixed;
@@ -1068,6 +1068,12 @@ void propagate_page_fault(unsigned long addr, u16
error_code)
struct vcpu *v = current;
struct trap_bounce *tb = &v->arch.pv_vcpu.trap_bounce;
+ if ( is_pvh_vcpu(v) )
+ {
+ hvm_inject_page_fault(error_code, addr);
+ return;
+ }
+
v->arch.pv_vcpu.ctrlreg[2] = addr;
arch_set_cr2(v, addr);
@@ -1506,6 +1512,49 @@ static int read_descriptor(unsigned int sel,
return 1;
}
+static int read_descriptor_sel(unsigned int sel,
+ enum x86_segment which_sel,
+ struct vcpu *v,
+ const struct cpu_user_regs *regs,
+ unsigned long *base,
+ unsigned long *limit,
+ unsigned int *ar,
+ unsigned int vm86attr)
+{
+ struct segment_register seg;
+ unsigned int long_mode = 0;
+
+ if ( !is_pvh_vcpu(v) )
+ return read_descriptor(sel, v, regs, base, limit, ar, vm86attr);
+
+ hvm_get_segment_register(v, x86_seg_cs, &seg);
+ long_mode = seg.attr.fields.l;
+
+ if ( which_sel != x86_seg_cs )
+ hvm_get_segment_register(v, which_sel, &seg);
+
+ /* ar is returned packed as in segment_attributes_t. Fix it up */
+ *ar = (unsigned int)seg.attr.bytes;
+ *ar = (*ar & 0xff ) | ((*ar & 0xf00) << 4);
+ *ar = *ar << 8;
+
+ if ( long_mode )
+ {
+ *limit = ~0UL;
+
+ if ( which_sel < x86_seg_fs )
+ {
+ *base = 0UL;
+ return 1;
+ }
+ }
+ else
+ *limit = (unsigned long)seg.limit;
+
+ *base = seg.base;
+ return 1;
+}
+
static int read_gate_descriptor(unsigned int gate_sel,
const struct vcpu *v,
unsigned int *sel,
@@ -1833,6 +1882,7 @@ static int is_cpufreq_controller(struct domain *d)
int emulate_privileged_op(struct cpu_user_regs *regs)
{
+ enum x86_segment which_sel;
struct vcpu *v = current;
unsigned long *reg, eip = regs->eip;
u8 opcode, modrm_reg = 0, modrm_rm = 0, rep_prefix = 0, lock = 0, rex = 0;
@@ -1855,9 +1905,10 @@ int emulate_privileged_op(struct cpu_user_regs *regs)
void (*io_emul)(struct cpu_user_regs *) __attribute__((__regparm__(1)));
uint64_t val, msr_content;
- if ( !read_descriptor(regs->cs, v, regs,
- &code_base, &code_limit, &ar,
- _SEGMENT_CODE|_SEGMENT_S|_SEGMENT_DPL|_SEGMENT_P) )
+ if ( !read_descriptor_sel(regs->cs, x86_seg_cs, v, regs,
+ &code_base, &code_limit, &ar,
+ _SEGMENT_CODE|_SEGMENT_S|
+ _SEGMENT_DPL|_SEGMENT_P) )
goto fail;
op_default = op_bytes = (ar & (_SEGMENT_L|_SEGMENT_DB)) ? 4 : 2;
ad_default = ad_bytes = (ar & _SEGMENT_L) ? 8 : op_default;
@@ -1868,6 +1919,7 @@ int emulate_privileged_op(struct cpu_user_regs *regs)
/* emulating only opcodes not allowing SS to be default */
data_sel = read_segment_register(v, regs, ds);
+ which_sel = x86_seg_ds;
/* Legacy prefixes. */
for ( i = 0; i < 8; i++, rex == opcode || (rex = 0) )
@@ -1883,23 +1935,29 @@ int emulate_privileged_op(struct cpu_user_regs *regs)
continue;
case 0x2e: /* CS override */
data_sel = regs->cs;
+ which_sel = x86_seg_cs;
continue;
case 0x3e: /* DS override */
data_sel = read_segment_register(v, regs, ds);
+ which_sel = x86_seg_ds;
continue;
case 0x26: /* ES override */
data_sel = read_segment_register(v, regs, es);
+ which_sel = x86_seg_es;
continue;
case 0x64: /* FS override */
data_sel = read_segment_register(v, regs, fs);
+ which_sel = x86_seg_fs;
lm_ovr = lm_seg_fs;
continue;
case 0x65: /* GS override */
data_sel = read_segment_register(v, regs, gs);
+ which_sel = x86_seg_gs;
lm_ovr = lm_seg_gs;
continue;
case 0x36: /* SS override */
data_sel = regs->ss;
+ which_sel = x86_seg_ss;
continue;
case 0xf0: /* LOCK */
lock = 1;
@@ -1943,15 +2001,16 @@ int emulate_privileged_op(struct cpu_user_regs *regs)
if ( !(opcode & 2) )
{
data_sel = read_segment_register(v, regs, es);
+ which_sel = x86_seg_es;
lm_ovr = lm_seg_none;
}
if ( !(ar & _SEGMENT_L) )
{
- if ( !read_descriptor(data_sel, v, regs,
- &data_base, &data_limit, &ar,
- _SEGMENT_WR|_SEGMENT_S|_SEGMENT_DPL|
- _SEGMENT_P) )
+ if ( !read_descriptor_sel(data_sel, which_sel, v, regs,
+ &data_base, &data_limit, &ar,
+ _SEGMENT_WR|_SEGMENT_S|_SEGMENT_DPL|
+ _SEGMENT_P) )
goto fail;
if ( !(ar & _SEGMENT_S) ||
!(ar & _SEGMENT_P) ||
@@ -1981,9 +2040,9 @@ int emulate_privileged_op(struct cpu_user_regs *regs)
}
}
else
- read_descriptor(data_sel, v, regs,
- &data_base, &data_limit, &ar,
- 0);
+ read_descriptor_sel(data_sel, which_sel, v, regs,
+ &data_base, &data_limit, &ar,
+ 0);
data_limit = ~0UL;
ar = _SEGMENT_WR|_SEGMENT_S|_SEGMENT_DPL|_SEGMENT_P;
}
@@ -2638,6 +2697,8 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
unsigned long off, eip, opnd_off, base, limit;
int jump;
+ ASSERT(!is_pvh_vcpu(v));
+
/* Check whether this fault is due to the use of a call gate. */
if ( !read_gate_descriptor(regs->error_code, v, &sel, &off, &ar) ||
(((ar >> 13) & 3) < (regs->cs & 3)) ||
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index 8c70324..ab15ff0 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -567,6 +567,7 @@ int microcode_update(XEN_GUEST_HANDLE_PARAM(const_void),
unsigned long len);
int microcode_resume_cpu(int cpu);
void pv_cpuid(struct cpu_user_regs *regs);
+int emulate_forced_invalid_op(struct cpu_user_regs *regs);
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_X86_PROCESSOR_H */
--
1.7.2.3
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |