|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v8 17/21] xen/x86: allow HVM guests to use hypercalls to bring up vCPUs
>>> On 06.11.15 at 17:05, <roger.pau@xxxxxxxxxx> wrote:
> @@ -1183,6 +1184,301 @@ int arch_set_info_guest(
> #undef c
> }
>
> +static inline int check_segment(struct segment_register reg,
Passed by value?
> + enum x86_segment seg)
> +{
> +
> + if ( reg.attr.fields.pad != 0 )
> + {
> + gprintk(XENLOG_ERR,
> + "Attribute bits 12-15 of the segment are not zero\n");
> + return -EINVAL;
> + }
> +
> + if ( reg.sel == 0 && reg.base == 0 && reg.limit == 0 &&
What's the sel check good for when your only caller only ever calls
you with it being zero? Looking at base or limit here doesn't seem
right either.
> + reg.attr.bytes == 0)
> + {
> + if ( seg == x86_seg_cs || seg == x86_seg_ss )
> + {
> + gprintk(XENLOG_ERR, "Null selector provided for CS or SS\n");
> + return -EINVAL;
> + }
> + return 0;
> + }
> +
> + if ( reg.attr.fields.p != 1 )
This is effectively a boolean field - no need for "!= 1", should be
just !.
> + {
> + gprintk(XENLOG_ERR, "Non-present segment provided\n");
> + return -EINVAL;
> + }
> +
> + if ( seg == x86_seg_cs && !(reg.attr.fields.type & 0x8) )
> + {
> + gprintk(XENLOG_ERR, "CS is missing code type\n");
> + return -EINVAL;
> + }
> +
> + if ( seg != x86_seg_cs && !!(reg.attr.fields.type & 0x8) )
No need for !! here. Also only SS is disallowed to get code segments
loaded. For other selector registers they're okay when readable.
> + {
> + gprintk(XENLOG_ERR, "Non code segment with code type set\n");
> + return -EINVAL;
> + }
SS must be writable.
> +int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
> +{
> + struct cpu_user_regs *uregs = &v->arch.user_regs;
> + struct segment_register cs, ds, ss, es, tr;
> + int rc;
> +
> + if ( ctx->pad != 0 )
> + {
> + gprintk(XENLOG_ERR, "Padding field != 0\n");
Please don't.
> + return -EINVAL;
> + }
> +
> + switch ( ctx->mode )
> + {
> + default:
> + return -EINVAL;
> +
> + case VCPU_HVM_MODE_32B:
> + {
> + const struct vcpu_hvm_x86_32 *regs = &ctx->cpu_regs.x86_32;
> + uint32_t limit;
> +
> + if ( ctx->cpu_regs.x86_32.pad1 != 0 ||
> + ctx->cpu_regs.x86_32.pad2[0] != 0 ||
> + ctx->cpu_regs.x86_32.pad2[1] != 0 ||
> + ctx->cpu_regs.x86_32.pad2[2] != 0 )
> + {
> + gprintk(XENLOG_ERR, "Padding field != 0\n");
> + return -EINVAL;
> + }
> +
> +#define SEG(s, r) \
> + (struct segment_register){ .sel = 0, .base = (r)->s ## _base, \
> + .limit = (r)->s ## _limit, .attr.bytes = (r)->s ## _ar }
> + cs = SEG(cs, regs);
> + ds = SEG(ds, regs);
> + ss = SEG(ss, regs);
> + es = SEG(es, regs);
> + tr = SEG(tr, regs);
> +#undef SEG
> +
> + rc = check_segment(cs, x86_seg_cs);
> + rc |= check_segment(ds, x86_seg_ds);
> + rc |= check_segment(ss, x86_seg_ss);
> + rc |= check_segment(es, x86_seg_es);
> + rc |= check_segment(tr, x86_seg_tr);
> + if ( rc != 0 )
> + return rc;
> +
> + /* Basic sanity checks. */
> + limit = cs.limit;
> + if ( cs.attr.fields.g )
> + limit = (limit << 12) | 0xfff;
> + if ( regs->eip > limit )
> + {
> + gprintk(XENLOG_ERR, "EIP (%#08x) outside CS limit (%#08x)\n",
> + regs->eip, limit);
> + return -EINVAL;
> + }
> +
> + if ( ds.attr.fields.p && ds.attr.fields.dpl > cs.attr.fields.dpl )
> + {
> + gprintk(XENLOG_ERR, "DS.DPL (%u) is greater than CS.DPL (%u)\n",
> + ds.attr.fields.dpl, cs.attr.fields.dpl);
Indentation.
> + return -EINVAL;
> + }
> +
> + if ( ss.attr.fields.p && ss.attr.fields.dpl != cs.attr.fields.dpl )
> + {
> + gprintk(XENLOG_ERR, "SS.DPL (%u) is different than CS.DPL
> (%u)\n",
> + ss.attr.fields.dpl, cs.attr.fields.dpl);
> + return -EINVAL;
> + }
I think this should go ahead of the DS one. Also I don't think
either CS or SS would be okay with the present bit clear, even
less so in 32-bit mode.
> +int arch_initialize_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
> +{
> + struct domain *d = v->domain;
> + int rc;
> +
> + if ( is_hvm_vcpu(v) )
> + {
> + struct vcpu_hvm_context ctxt;
> +
> + if ( copy_from_guest(&ctxt, arg, 1) )
> + return -EFAULT;
> +
> + domain_lock(d);
> + rc = v->is_initialised ? -EEXIST : arch_set_info_hvm_guest(v, &ctxt);
> + domain_unlock(d);
> + }
> + else
> + {
> + struct vcpu_guest_context *ctxt;
> +
> + if ( (ctxt = alloc_vcpu_guest_context()) == NULL )
> + return -ENOMEM;
> +
> + if ( copy_from_guest(ctxt, arg, 1) )
> + {
> + free_vcpu_guest_context(ctxt);
> + return -EFAULT;
> + }
> +
> + domain_lock(d);
> + rc = v->is_initialised ? -EEXIST : arch_set_info_guest(v, ctxt);
> + domain_unlock(d);
> +
> + free_vcpu_guest_context(ctxt);
> + }
This else branch looks suspiciously like the ARM variant, and iirc I
had asked already on an earlier version to have this handled in
common code (with ARM simply using the common function for its
arch_initialize_vcpu()).
Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |