[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [QEMU][RFC V2 04/10] xen-hvm: register qemu as ioreq server and retrieve shared pages
With QEMU disaggregation in Xen environment, each QEMU needs to ask Xen for an ioreq server id. This id will be use to retrieve its private share pages. Signed-off-by: Julien Grall <julien.grall@xxxxxxxxxx> --- xen-all.c | 80 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 files changed, 76 insertions(+), 4 deletions(-) diff --git a/xen-all.c b/xen-all.c index df6927d..5f05838 100644 --- a/xen-all.c +++ b/xen-all.c @@ -36,6 +36,7 @@ static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi; static MemoryRegion *framebuffer; +static unsigned int serverid; static uint32_t xen_dmid = ~0; /* Compatibility with older version */ @@ -64,6 +65,67 @@ static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu) #define HVM_PARAM_BUFIOREQ_EVTCHN 26 #endif +#if __XEN_LATEST_INTERFACE_VERSION__ < 0x00040300 +static inline unsigned long xen_buffered_iopage(void) +{ + unsigned long pfn; + + xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_PFN, &pfn); + + return pfn; +} + +static inline unsigned long xen_iopage(void) +{ + unsigned long pfn; + + xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_IOREQ_PFN, &pfn); + + return pfn; +} + +static inline evtchn_port_or_error_t xen_buffered_channel(void) +{ + unsigned long evtchn; + int rc; + + rc = xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_EVTCHN, + &evtchn); + + if (rc < 0) { + return rc; + } else { + return evtchn; + } +} +#else +static inline unsigned long xen_buffered_iopage(void) +{ + unsigned long pfn; + + xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_IO_PFN_FIRST, &pfn); + pfn += (serverid - 1) * 2 + 2; + + return pfn; +} + +static inline unsigned long xen_iopage(void) +{ + unsigned long pfn; + + xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_IO_PFN_FIRST, &pfn); + pfn += (serverid - 1) * 2 + 1; + + return pfn; +} + +static inline evtchn_port_or_error_t xen_buffered_channel(void) +{ + return xc_hvm_get_ioreq_server_buf_channel(xen_xc, xen_domid, serverid); +} + +#endif + #define BUFFER_IO_MAX_DELAY 100 typedef struct XenPhysmap { @@ -1112,7 +1174,15 @@ int xen_hvm_init(void) state->suspend.notify = xen_suspend_notifier; qemu_register_suspend_notifier(&state->suspend); - xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn); + rc = xen_xc_hvm_register_ioreq_server(xen_xc, xen_domid); + + if (rc < 0) { + hw_error("registered server returned error %d", rc); + } + + serverid = rc; + + ioreq_pfn = xen_iopage(); DPRINTF("shared page at pfn %lx\n", ioreq_pfn); state->shared_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE, PROT_READ|PROT_WRITE, ioreq_pfn); @@ -1121,7 +1191,7 @@ int xen_hvm_init(void) errno, xen_xc); } - xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn); + ioreq_pfn = xen_buffered_iopage(); DPRINTF("buffered io page at pfn %lx\n", ioreq_pfn); state->buffered_io_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE, PROT_READ|PROT_WRITE, ioreq_pfn); @@ -1142,12 +1212,14 @@ int xen_hvm_init(void) state->ioreq_local_port[i] = rc; } - rc = xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_EVTCHN, - &bufioreq_evtchn); + rc = xen_buffered_channel(); if (rc < 0) { fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n"); return -1; } + + bufioreq_evtchn = rc; + rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid, (uint32_t)bufioreq_evtchn); if (rc == -1) { -- Julien Grall _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |