|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [RFC PATCH 4/5] ioreq-server: add support for multiple servers
> -----Original Message-----
[snip]
> > +
> > + if ( max_emulators < 1 )
> > + goto error_out;
>
> Is there a sane upper bound for emulators?
>
I imagine there probably needs to be. I haven't work it out yet, but it will be
when the special pages start to run into something else no doubt.
> >
> > if ( nr_pages > target_pages )
> > pod_mode = XENMEMF_populate_on_demand;
> > @@ -458,7 +463,8 @@ static int setup_guest(xc_interface *xch,
> > xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
> > HVM_INFO_PFN)) == NULL )
> > goto error_out;
> > - build_hvm_info(hvm_info_page, v_end, mmio_start, mmio_size);
> > + build_hvm_info(hvm_info_page, v_end, mmio_start, mmio_size,
> > + max_emulators);
> > munmap(hvm_info_page, PAGE_SIZE);
> >
> > /* Allocate and clear special pages. */
> > @@ -470,17 +476,18 @@ static int setup_guest(xc_interface *xch,
> > " STORE: %"PRI_xen_pfn"\n"
> > " IDENT_PT: %"PRI_xen_pfn"\n"
> > " CONSOLE: %"PRI_xen_pfn"\n"
> > - " IOREQ: %"PRI_xen_pfn"\n",
> > - NR_SPECIAL_PAGES,
> > + " IOREQ(%02d): %"PRI_xen_pfn"\n",
> > + NR_SPECIAL_PAGES(max_emulators),
> > (xen_pfn_t)special_pfn(SPECIALPAGE_PAGING),
> > (xen_pfn_t)special_pfn(SPECIALPAGE_ACCESS),
> > (xen_pfn_t)special_pfn(SPECIALPAGE_SHARING),
> > (xen_pfn_t)special_pfn(SPECIALPAGE_XENSTORE),
> > (xen_pfn_t)special_pfn(SPECIALPAGE_IDENT_PT),
> > (xen_pfn_t)special_pfn(SPECIALPAGE_CONSOLE),
> > + max_emulators * 2,
> > (xen_pfn_t)special_pfn(SPECIALPAGE_IOREQ));
> >
> > - for ( i = 0; i < NR_SPECIAL_PAGES; i++ )
> > + for ( i = 0; i < NR_SPECIAL_PAGES(max_emulators); i++ )
> > {
> > xen_pfn_t pfn = special_pfn(i);
> > rc = xc_domain_populate_physmap_exact(xch, dom, 1, 0, 0, &pfn);
> > @@ -506,7 +513,9 @@ static int setup_guest(xc_interface *xch,
> > xc_set_hvm_param(xch, dom, HVM_PARAM_IOREQ_PFN,
> > special_pfn(SPECIALPAGE_IOREQ));
> > xc_set_hvm_param(xch, dom, HVM_PARAM_BUFIOREQ_PFN,
> > - special_pfn(SPECIALPAGE_IOREQ) - 1);
> > + special_pfn(SPECIALPAGE_IOREQ) - max_emulators);
> > + xc_set_hvm_param(xch, dom, HVM_PARAM_NR_IOREQ_SERVERS,
> > + max_emulators);
> >
> > /*
> > * Identity-map page table is required for running with CR0.PG=0 when
> > diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
> > index 13f816b..142aaea 100644
> > --- a/tools/libxc/xenctrl.h
> > +++ b/tools/libxc/xenctrl.h
> > @@ -1801,6 +1801,47 @@ void xc_clear_last_error(xc_interface *xch);
> > int xc_set_hvm_param(xc_interface *handle, domid_t dom, int param,
> unsigned long value);
> > int xc_get_hvm_param(xc_interface *handle, domid_t dom, int param,
> unsigned long *value);
> >
> > +/*
> > + * IOREQ server API
> > + */
> > +int xc_hvm_create_ioreq_server(xc_interface *xch,
> > + domid_t domid,
> > + ioservid_t *id);
> > +
> > +int xc_hvm_get_ioreq_server_info(xc_interface *xch,
> > + domid_t domid,
> > + ioservid_t id,
> > + xen_pfn_t *pfn,
> > + xen_pfn_t *buf_pfn,
> > + evtchn_port_t *buf_port);
> > +
> > +int xc_hvm_map_io_range_to_ioreq_server(xc_interface *xch,
> > + domid_t domid,
> > + ioservid_t id,
> > + int is_mmio,
> > + uint64_t start,
> > + uint64_t end);
> > +
> > +int xc_hvm_unmap_io_range_from_ioreq_server(xc_interface *xch,
> > + domid_t domid,
> > + ioservid_t id,
> > + int is_mmio,
> > + uint64_t start);
> > +
> > +int xc_hvm_map_pcidev_to_ioreq_server(xc_interface *xch,
> > + domid_t domid,
> > + ioservid_t id,
> > + uint16_t bdf);
> > +
> > +int xc_hvm_unmap_pcidev_from_ioreq_server(xc_interface *xch,
> > + domid_t domid,
> > + ioservid_t id,
> > + uint16_t bdf);
> > +
> > +int xc_hvm_destroy_ioreq_server(xc_interface *xch,
> > + domid_t domid,
> > + ioservid_t id);
> > +
>
> There are tab/space issues in this hunk.
>
So there are. Probably some missing emacs boilerplate.
[snip]
> > + case HVM_PARAM_NR_IOREQ_SERVERS:
> > + if ( d == current->domain )
> > + rc = -EPERM;
> > + break;
>
> Is this correct? Security-wise, it should be restricted more.
>
> Having said that, I can't see anything good to come from being able to
> change this value on the fly. Is it possible to make a domain creation
> parameters?
>
I don't know. Maybe we can have one-time settable params? The other 'legacy'
ioreq params seem quite insecure too.
> > }
> >
> > if ( rc == 0 )
> > @@ -4483,7 +5251,7 @@ long do_hvm_op(unsigned long op,
> XEN_GUEST_HANDLE_PARAM(void) arg)
> > case HVM_PARAM_BUFIOREQ_PFN:
> > case HVM_PARAM_BUFIOREQ_EVTCHN:
> > /* May need to create server */
> > - rc = hvm_create_ioreq_server(d, curr_d->domain_id);
> > + rc = hvm_create_ioreq_server(d, 0, curr_d->domain_id);
> > if ( rc != 0 && rc != -EEXIST )
> > goto param_fail;
> >
> > @@ -4492,7 +5260,7 @@ long do_hvm_op(unsigned long op,
> XEN_GUEST_HANDLE_PARAM(void) arg)
> > case HVM_PARAM_IOREQ_PFN: {
> > xen_pfn_t pfn;
> >
> > - if ( (rc = hvm_get_ioreq_server_pfn(d, 0, &pfn)) < 0 )
> > + if ( (rc = hvm_get_ioreq_server_pfn(d, 0, 0, &pfn)) <
> > 0 )
> > goto param_fail;
> >
> > a.value = pfn;
> > @@ -4501,7 +5269,7 @@ long do_hvm_op(unsigned long op,
> XEN_GUEST_HANDLE_PARAM(void) arg)
> > case HVM_PARAM_BUFIOREQ_PFN: {
> > xen_pfn_t pfn;
> >
> > - if ( (rc = hvm_get_ioreq_server_pfn(d, 1, &pfn)) < 0 )
> > + if ( (rc = hvm_get_ioreq_server_pfn(d, 0, 1, &pfn)) <
> > 0 )
> > goto param_fail;
> >
> > a.value = pfn;
> > @@ -4510,7 +5278,7 @@ long do_hvm_op(unsigned long op,
> XEN_GUEST_HANDLE_PARAM(void) arg)
> > case HVM_PARAM_BUFIOREQ_EVTCHN: {
> > evtchn_port_t port;
> >
> > - if ( (rc = hvm_get_ioreq_server_buf_port(d, &port)) <
> > 0 )
> > + if ( (rc = hvm_get_ioreq_server_buf_port(d, 0, &port))
> > < 0 )
> > goto param_fail;
> >
> > a.value = port;
> > diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
> > index 576641c..a0d76b2 100644
> > --- a/xen/arch/x86/hvm/io.c
> > +++ b/xen/arch/x86/hvm/io.c
> > @@ -78,7 +78,7 @@ void send_invalidate_req(void)
> > p->dir = IOREQ_WRITE;
> > p->data = ~0UL; /* flush all */
> >
> > - (void)hvm_send_assist_req(v, p);
> > + hvm_broadcast_assist_req(v, p);
> > }
> >
> > int handle_mmio(void)
> > diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-
> x86/hvm/domain.h
> > index e750ef0..93dcec1 100644
> > --- a/xen/include/asm-x86/hvm/domain.h
> > +++ b/xen/include/asm-x86/hvm/domain.h
> > @@ -41,19 +41,38 @@ struct hvm_ioreq_page {
> > void *va;
> > };
> >
> > +struct hvm_io_range {
> > + struct hvm_io_range *next;
> > + uint64_t start, end;
> > +};
> > +
> > +struct hvm_pcidev {
> > + struct hvm_pcidev *next;
> > + uint16_t bdf;
> > +};
> > +
> > struct hvm_ioreq_server {
> > + struct list_head domain_list_entry;
> > + struct list_head vcpu_list_entry[MAX_HVM_VCPUS];
>
> Given that this has to be initialised anyway, would it be better to have
> it dynamically sized on the d->max_cpus, which is almost always be far
> smaller?
>
Can vcpu ids be sparse? If not then that would seem fine.
Paul
> ~Andrew
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |