|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2 08/16] SVM: use generic instruction decoding
On 28/09/16 09:13, Jan Beulich wrote:
> ... instead of custom handling. To facilitate this break out init code
> from _hvm_emulate_one() into the new hvm_emulate_init(), and make
> hvmemul_insn_fetch( globally available.
)
> int __get_instruction_length_from_list(struct vcpu *v,
> const enum instruction_index *list, unsigned int list_count)
> {
> struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
> - unsigned int i, j, inst_len = 0;
> - enum instruction_index instr = 0;
> - u8 buf[MAX_INST_LEN];
> - const u8 *opcode = NULL;
> - unsigned long fetch_addr, fetch_limit;
> - unsigned int fetch_len, max_len;
> + struct hvm_emulate_ctxt ctxt;
> + struct x86_emulate_state *state;
> + unsigned int inst_len, j, modrm_rm, modrm_reg;
> + int modrm_mod;
>
Despite knowing how this works, it is still confusing to read. Do you
mind putting in a comment such as:
/* In a debug build, always use x86_decode_insn() and compare with
hardware. */
> +#ifdef NDEBUG
> if ( (inst_len = svm_nextrip_insn_length(v)) != 0 )
> return inst_len;
>
> if ( vmcb->exitcode == VMEXIT_IOIO )
> return vmcb->exitinfo2 - vmcb->rip;
> +#endif
>
> - /* Fetch up to the next page break; we'll fetch from the next page
> - * later if we have to. */
> - fetch_addr = svm_rip2pointer(v, &fetch_limit);
> - if ( vmcb->rip > fetch_limit )
> - return 0;
> - max_len = min(fetch_limit - vmcb->rip + 1, MAX_INST_LEN + 0UL);
> - fetch_len = min_t(unsigned int, max_len,
> - PAGE_SIZE - (fetch_addr & ~PAGE_MASK));
> - if ( !fetch(vmcb, buf, fetch_addr, fetch_len) )
> + ASSERT(v == current);
> + hvm_emulate_prepare(&ctxt, guest_cpu_user_regs());
> + hvm_emulate_init(&ctxt, NULL, 0);
> + state = x86_decode_insn(&ctxt.ctxt, hvmemul_insn_fetch);
> + if ( IS_ERR_OR_NULL(state) )
> return 0;
>
> - while ( (inst_len < max_len) && is_prefix(buf[inst_len]) )
> - {
> - inst_len++;
> - if ( inst_len >= fetch_len )
> - {
> - if ( !fetch(vmcb, buf + fetch_len, fetch_addr + fetch_len,
> - max_len - fetch_len) )
> - return 0;
> - fetch_len = max_len;
> - }
> + inst_len = x86_insn_length(state, &ctxt.ctxt);
> + modrm_mod = x86_insn_modrm(state, &modrm_rm, &modrm_reg);
> + x86_emulate_free_state(state);
> +#ifndef NDEBUG
> + if ( vmcb->exitcode == VMEXIT_IOIO )
> + j = vmcb->exitinfo2 - vmcb->rip;
> + else
> + j = svm_nextrip_insn_length(v);
> + if ( j && j != inst_len )
> + {
> + gprintk(XENLOG_WARNING, "insn-len[%02x]=%u (exp %u)\n",
> + ctxt.ctxt.opcode, inst_len, j);
> + return j;
> }
> +#endif
>
> for ( j = 0; j < list_count; j++ )
> {
> - instr = list[j];
> - opcode = opc_bytes[instr];
> + enum instruction_index instr = list[j];
>
> - for ( i = 0; (i < opcode[0]) && ((inst_len + i) < max_len); i++ )
> + ASSERT(instr >= 0 && instr < ARRAY_SIZE(opc_tab));
This is another ASSERT() used as a bounds check, and will suffer a build
failure on clang.
You need to use s/enum instruction_index/unsigned int/ to fix the build
issue. Can I also request the use of
if ( instr >= ARRAY_SIZE(opc_tab) )
{
ASSERT_UNREACHABLE();
return 0;
}
instead?
> --- a/xen/arch/x86/x86_emulate/x86_emulate.c
> +++ b/xen/arch/x86/x86_emulate/x86_emulate.c
> @@ -5200,3 +5214,89 @@ x86_emulate(
> #undef vex
> #undef override_seg
> #undef ea
> +
> +#ifdef __XEN__
> +
> +#include <xen/err.h>
> +
> +struct x86_emulate_state *
> +x86_decode_insn(
> + struct x86_emulate_ctxt *ctxt,
> + int (*insn_fetch)(
> + enum x86_segment seg, unsigned long offset,
> + void *p_data, unsigned int bytes,
> + struct x86_emulate_ctxt *ctxt))
> +{
> + static DEFINE_PER_CPU(struct x86_emulate_state, state);
> + struct x86_emulate_state *state = &this_cpu(state);
> + const struct x86_emulate_ops ops = {
This can be static, to avoid having it reconstructed on the stack each
function call.
Otherwise, Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |