[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 1/5] xen/arm: implement do_hvm_op for ARM
We need to device how hybrid our hybrid arm guests are going to be. The particular hvm params you are using here (evtchn port etc) typically live in start_info for a PV guest. In principal we could define a start info for ARM too but that leaves the question of how the guest can find it (which loops up back to hvm_params...). Looking at the other stuff in start_info, it has stuff like modules (aka ramdisks) and command lines which ARM guest get via the normal ARM boot protocol stuff (i.e. the domain builder does it) and a bunch of stuff which seems to only make sense for proper-PV guests. So maybe HVM PARAM is the right answer? sinfo does have flags which contains stuff like SIF_INITIAL_DOMAIN. Might we want that? On Fri, 2012-06-22 at 17:09 +0100, Stefano Stabellini wrote: > Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx> > --- > xen/arch/arm/Makefile | 1 + > xen/arch/arm/hvm.c | 60 > ++++++++++++++++++++++++++++++++++++++++++ > xen/arch/arm/traps.c | 1 + > xen/include/asm-arm/domain.h | 7 +++++ > 4 files changed, 69 insertions(+), 0 deletions(-) > create mode 100644 xen/arch/arm/hvm.c > > diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile > index 5a87ba6..634b620 100644 > --- a/xen/arch/arm/Makefile > +++ b/xen/arch/arm/Makefile > @@ -26,6 +26,7 @@ obj-y += traps.o > obj-y += vgic.o > obj-y += vtimer.o > obj-y += vpl011.o > +obj-y += hvm.o > > #obj-bin-y += ....o > > diff --git a/xen/arch/arm/hvm.c b/xen/arch/arm/hvm.c > new file mode 100644 > index 0000000..c11378d > --- /dev/null > +++ b/xen/arch/arm/hvm.c > @@ -0,0 +1,60 @@ > +#include <xen/config.h> > +#include <xen/init.h> > +#include <xen/lib.h> > +#include <xen/errno.h> > +#include <xen/guest_access.h> > +#include <xen/sched.h> > + > +#include <public/xen.h> > +#include <public/hvm/params.h> > +#include <public/hvm/hvm_op.h> > + > +#include <asm/hypercall.h> > + > +long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg) > + > +{ > + long rc = 0; > + > + switch ( op ) > + { > + case HVMOP_set_param: > + case HVMOP_get_param: > + { > + struct xen_hvm_param a; > + struct domain *d; > + > + if ( copy_from_guest(&a, arg, 1) ) > + return -EFAULT; > + > + if ( a.index >= HVM_NR_PARAMS ) > + return -EINVAL; > + > + rc = rcu_lock_target_domain_by_id(a.domid, &d); > + if ( rc != 0 ) > + return rc; > + > + if ( op == HVMOP_set_param ) > + { > + d->arch.hvm_domain.params[a.index] = a.value; > + } > + else > + { > + a.value = d->arch.hvm_domain.params[a.index]; > + rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0; > + } > + > + rcu_unlock_domain(d); > + break; > + } > + > + default: > + { > + printk("%s: Bad HVM op %ld.\n", __func__, op); > + rc = -ENOSYS; > + break; > + } > + } > + > + return rc; > +} > diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c > index ec74298..3900545 100644 > --- a/xen/arch/arm/traps.c > +++ b/xen/arch/arm/traps.c > @@ -430,6 +430,7 @@ static arm_hypercall_t *arm_hypercall_table[] = { > HYPERCALL(memory_op), > HYPERCALL(physdev_op), > HYPERCALL(sysctl), > + HYPERCALL(hvm_op), > }; > > static void do_debug_trap(struct cpu_user_regs *regs, unsigned int code) > diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h > index 2b14545..114a8f6 100644 > --- a/xen/include/asm-arm/domain.h > +++ b/xen/include/asm-arm/domain.h > @@ -5,6 +5,7 @@ > #include <xen/cache.h> > #include <asm/page.h> > #include <asm/p2m.h> > +#include <public/hvm/params.h> > > /* Represents state corresponding to a block of 32 interrupts */ > struct vgic_irq_rank { > @@ -28,9 +29,15 @@ struct pending_irq > struct list_head lr_queue; > }; > > +struct hvm_domain > +{ > + uint64_t params[HVM_NR_PARAMS]; > +} __cacheline_aligned; > + > struct arch_domain > { > struct p2m_domain p2m; > + struct hvm_domain hvm_domain; > > struct { > /* _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |