[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [patch] 32/64-bit hypercall interface revisited
On Fri, 2006-04-14 at 16:15 -0500, Hollis Blanchard wrote: > > Third, the tools need to treat GUEST_HANDLEs specially.[4] The attached > patch does this; I have a ppc32 app successfully making > DOM0_GETVCPUCONTEXT hcalls to a ppc64 Xen. The patch has only been > compile-tested on x86-32, so I would appreciate it if people could try > it out on x86. I noticed that I'd forgotten the corresponding ia64 and x86_64 changes, so this is the updated patch. Note that this overlaps with the frameno_t patch I just sent out; if both are acceptable, I would prefer that one goes in first and I can update and resend this one. -- Hollis Blanchard IBM Linux Technology Center Introduce wrappers to access GUEST_HANDLES in libxc. Signed-off-by: Hollis Blanchard <hollisb@xxxxxxxxxx> diff -r 56e1802303e5 tools/libxc/xc_domain.c --- a/tools/libxc/xc_domain.c Wed Apr 12 15:41:55 2006 -0500 +++ b/tools/libxc/xc_domain.c Wed Apr 19 12:53:37 2006 -0500 @@ -171,7 +171,7 @@ int xc_domain_getinfolist(int xc_handle, op.cmd = DOM0_GETDOMAININFOLIST; op.u.getdomaininfolist.first_domain = first_domain; op.u.getdomaininfolist.max_domains = max_domains; - op.u.getdomaininfolist.buffer = info; + SET_HANDLE(op.u.getdomaininfolist.buffer, info); if ( xc_dom0_op(xc_handle, &op) < 0 ) ret = -1; @@ -195,7 +195,7 @@ int xc_vcpu_getcontext(int xc_handle, op.cmd = DOM0_GETVCPUCONTEXT; op.u.getvcpucontext.domain = (domid_t)domid; op.u.getvcpucontext.vcpu = (uint16_t)vcpu; - op.u.getvcpucontext.ctxt = ctxt; + SET_HANDLE(op.u.getvcpucontext.ctxt, ctxt); if ( (rc = mlock(ctxt, sizeof(*ctxt))) != 0 ) return rc; @@ -220,7 +220,7 @@ int xc_shadow_control(int xc_handle, op.cmd = DOM0_SHADOW_CONTROL; op.u.shadow_control.domain = (domid_t)domid; op.u.shadow_control.op = sop; - op.u.shadow_control.dirty_bitmap = dirty_bitmap; + SET_HANDLE(op.u.shadow_control.dirty_bitmap, dirty_bitmap); op.u.shadow_control.pages = pages; rc = do_dom0_op(xc_handle, &op); @@ -295,12 +295,12 @@ int xc_domain_memory_increase_reservatio { int err; struct xen_memory_reservation reservation = { - .extent_start = extent_start, /* may be NULL */ .nr_extents = nr_extents, .extent_order = extent_order, .address_bits = address_bits, .domid = domid }; + SET_HANDLE(reservation.extent_start, extent_start); /* may be NULL */ err = xc_memory_op(xc_handle, XENMEM_increase_reservation, &reservation); if ( err == nr_extents ) @@ -326,12 +326,12 @@ int xc_domain_memory_decrease_reservatio { int err; struct xen_memory_reservation reservation = { - .extent_start = extent_start, .nr_extents = nr_extents, .extent_order = extent_order, .address_bits = 0, .domid = domid }; + SET_HANDLE(reservation.extent_start, extent_start); /* may be NULL */ if ( extent_start == NULL ) { @@ -364,12 +364,12 @@ int xc_domain_memory_populate_physmap(in { int err; struct xen_memory_reservation reservation = { - .extent_start = extent_start, .nr_extents = nr_extents, .extent_order = extent_order, .address_bits = address_bits, .domid = domid }; + SET_HANDLE(reservation.extent_start, extent_start); /* may be NULL */ err = xc_memory_op(xc_handle, XENMEM_populate_physmap, &reservation); if ( err == nr_extents ) @@ -395,9 +395,9 @@ int xc_domain_translate_gpfn_list(int xc struct xen_translate_gpfn_list op = { .domid = domid, .nr_gpfns = nr_gpfns, - .gpfn_list = gpfn_list, - .mfn_list = mfn_list }; + SET_HANDLE(op.gpfn_list, gpfn_list); + SET_HANDLE(op.mfn_list, mfn_list); return xc_memory_op(xc_handle, XENMEM_translate_gpfn_list, &op); } @@ -467,7 +467,7 @@ int xc_vcpu_setcontext(int xc_handle, op.cmd = DOM0_SETVCPUCONTEXT; op.u.setvcpucontext.domain = domid; op.u.setvcpucontext.vcpu = vcpu; - op.u.setvcpucontext.ctxt = ctxt; + SET_HANDLE(op.u.setvcpucontext.ctxt, ctxt); if ( (rc = mlock(ctxt, sizeof(*ctxt))) != 0 ) return rc; diff -r 56e1802303e5 tools/libxc/xc_hvm_build.c --- a/tools/libxc/xc_hvm_build.c Wed Apr 12 15:41:55 2006 -0500 +++ b/tools/libxc/xc_hvm_build.c Wed Apr 19 12:53:37 2006 -0500 @@ -440,7 +440,7 @@ static int xc_hvm_build_internal(int xc_ launch_op.u.setvcpucontext.domain = (domid_t)domid; launch_op.u.setvcpucontext.vcpu = 0; - launch_op.u.setvcpucontext.ctxt = ctxt; + SET_HANDLE(launch_op.u.setvcpucontext.ctxt, ctxt); launch_op.cmd = DOM0_SETVCPUCONTEXT; rc = xc_dom0_op(xc_handle, &launch_op); diff -r 56e1802303e5 tools/libxc/xc_linux_restore.c --- a/tools/libxc/xc_linux_restore.c Wed Apr 12 15:41:55 2006 -0500 +++ b/tools/libxc/xc_linux_restore.c Wed Apr 19 12:53:37 2006 -0500 @@ -583,11 +583,11 @@ int xc_linux_restore(int xc_handle, int if (count > 0) { struct xen_memory_reservation reservation = { - .extent_start = pfntab, .nr_extents = count, .extent_order = 0, .domid = dom }; + SET_HANDLE(reservation.extent_start, pfntab); if ((rc = xc_memory_op(xc_handle, XENMEM_decrease_reservation, &reservation)) != count) { @@ -727,7 +727,7 @@ int xc_linux_restore(int xc_handle, int op.cmd = DOM0_SETVCPUCONTEXT; op.u.setvcpucontext.domain = (domid_t)dom; op.u.setvcpucontext.vcpu = 0; - op.u.setvcpucontext.ctxt = &ctxt; + SET_HANDLE(op.u.setvcpucontext.ctxt, &ctxt); rc = xc_dom0_op(xc_handle, &op); if (rc != 0) { diff -r 56e1802303e5 tools/libxc/xc_linux_save.c --- a/tools/libxc/xc_linux_save.c Wed Apr 12 15:41:55 2006 -0500 +++ b/tools/libxc/xc_linux_save.c Wed Apr 19 12:53:37 2006 -0500 @@ -509,16 +509,18 @@ static unsigned long *xc_map_m2p(int xc_ privcmd_mmap_entry_t *entries; unsigned long m2p_chunks, m2p_size; unsigned long *m2p; + unsigned long *extent_start; int i, rc; m2p_size = M2P_SIZE(max_mfn); m2p_chunks = M2P_CHUNKS(max_mfn); xmml.max_extents = m2p_chunks; - if (!(xmml.extent_start = malloc(m2p_chunks * sizeof(unsigned long)))) { + if (!(extent_start = malloc(m2p_chunks * sizeof(unsigned long)))) { ERR("failed to allocate space for m2p mfns"); return NULL; } + SET_HANDLE(xmml.extent_start, extent_start); if (xc_memory_op(xc_handle, XENMEM_machphys_mfn_list, &xmml) || (xmml.nr_extents != m2p_chunks)) { @@ -543,7 +545,7 @@ static unsigned long *xc_map_m2p(int xc_ for (i=0; i < m2p_chunks; i++) { entries[i].va = (unsigned long)(((void *)m2p) + (i * M2P_CHUNK_SIZE)); - entries[i].mfn = xmml.extent_start[i]; + entries[i].mfn = extent_start[i]; entries[i].npages = M2P_CHUNK_SIZE >> PAGE_SHIFT; } @@ -552,7 +554,7 @@ static unsigned long *xc_map_m2p(int xc_ return NULL; } - free(xmml.extent_start); + free(extent_start); free(entries); return m2p; diff -r 56e1802303e5 tools/libxc/xc_misc.c --- a/tools/libxc/xc_misc.c Wed Apr 12 15:41:55 2006 -0500 +++ b/tools/libxc/xc_misc.c Wed Apr 19 12:53:37 2006 -0500 @@ -30,7 +30,7 @@ int xc_readconsolering(int xc_handle, unsigned int nr_chars = *pnr_chars; op.cmd = DOM0_READCONSOLE; - op.u.readconsole.buffer = buffer; + SET_HANDLE(op.u.readconsole.buffer, buffer); op.u.readconsole.count = nr_chars; op.u.readconsole.clear = clear; @@ -39,7 +39,7 @@ int xc_readconsolering(int xc_handle, if ( (ret = do_dom0_op(xc_handle, &op)) == 0 ) { - *pbuffer = op.u.readconsole.buffer; + GET_HANDLE(*pbuffer, op.u.readconsole.buffer); *pnr_chars = op.u.readconsole.count; } @@ -91,7 +91,7 @@ int xc_perfc_control(int xc_handle, op.cmd = DOM0_PERFCCONTROL; op.u.perfccontrol.op = opcode; - op.u.perfccontrol.desc = desc; + SET_HANDLE(op.u.perfccontrol.desc, desc); rc = do_dom0_op(xc_handle, &op); diff -r 56e1802303e5 tools/libxc/xc_private.c --- a/tools/libxc/xc_private.c Wed Apr 12 15:41:55 2006 -0500 +++ b/tools/libxc/xc_private.c Wed Apr 19 12:53:37 2006 -0500 @@ -71,7 +71,7 @@ int xc_get_pfn_type_batch(int xc_handle, op.cmd = DOM0_GETPAGEFRAMEINFO2; op.u.getpageframeinfo2.domain = (domid_t)dom; op.u.getpageframeinfo2.num = num; - op.u.getpageframeinfo2.array = arr; + SET_HANDLE(op.u.getpageframeinfo2.array, arr); return do_dom0_op(xc_handle, &op); } @@ -191,6 +191,9 @@ int xc_memory_op(int xc_handle, struct xen_memory_reservation *reservation = arg; struct xen_machphys_mfn_list *xmml = arg; struct xen_translate_gpfn_list *trans = arg; + unsigned long *extent_start; + unsigned long *gpfn_list; + unsigned long *mfn_list; long ret = -EINVAL; hypercall.op = __HYPERVISOR_memory_op; @@ -207,8 +210,9 @@ int xc_memory_op(int xc_handle, PERROR("Could not mlock"); goto out1; } - if ( (reservation->extent_start != NULL) && - (mlock(reservation->extent_start, + GET_HANDLE(extent_start, reservation->extent_start); + if ( (extent_start != NULL) && + (mlock(extent_start, reservation->nr_extents * sizeof(unsigned long)) != 0) ) { PERROR("Could not mlock"); @@ -222,7 +226,8 @@ int xc_memory_op(int xc_handle, PERROR("Could not mlock"); goto out1; } - if ( mlock(xmml->extent_start, + GET_HANDLE(extent_start, reservation->extent_start); + if ( mlock(extent_start, xmml->max_extents * sizeof(unsigned long)) != 0 ) { PERROR("Could not mlock"); @@ -243,16 +248,18 @@ int xc_memory_op(int xc_handle, PERROR("Could not mlock"); goto out1; } - if ( mlock(trans->gpfn_list, trans->nr_gpfns * sizeof(long)) != 0 ) + GET_HANDLE(gpfn_list, trans->gpfn_list); + if ( mlock(gpfn_list, trans->nr_gpfns * sizeof(long)) != 0 ) { PERROR("Could not mlock"); safe_munlock(trans, sizeof(*trans)); goto out1; } - if ( mlock(trans->mfn_list, trans->nr_gpfns * sizeof(long)) != 0 ) - { - PERROR("Could not mlock"); - safe_munlock(trans->gpfn_list, trans->nr_gpfns * sizeof(long)); + GET_HANDLE(mfn_list, trans->mfn_list); + if ( mlock(mfn_list, trans->nr_gpfns * sizeof(long)) != 0 ) + { + PERROR("Could not mlock"); + safe_munlock(gpfn_list, trans->nr_gpfns * sizeof(long)); safe_munlock(trans, sizeof(*trans)); goto out1; } @@ -267,21 +274,25 @@ int xc_memory_op(int xc_handle, case XENMEM_decrease_reservation: case XENMEM_populate_physmap: safe_munlock(reservation, sizeof(*reservation)); - if ( reservation->extent_start != NULL ) - safe_munlock(reservation->extent_start, + GET_HANDLE(extent_start, reservation->extent_start); + if ( extent_start != NULL ) + safe_munlock(extent_start, reservation->nr_extents * sizeof(unsigned long)); break; case XENMEM_machphys_mfn_list: safe_munlock(xmml, sizeof(*xmml)); - safe_munlock(xmml->extent_start, + GET_HANDLE(extent_start, reservation->extent_start); + safe_munlock(extent_start, xmml->max_extents * sizeof(unsigned long)); break; case XENMEM_add_to_physmap: safe_munlock(arg, sizeof(struct xen_add_to_physmap)); break; case XENMEM_translate_gpfn_list: - safe_munlock(trans->mfn_list, trans->nr_gpfns * sizeof(long)); - safe_munlock(trans->gpfn_list, trans->nr_gpfns * sizeof(long)); + GET_HANDLE(mfn_list, trans->mfn_list); + safe_munlock(mfn_list, trans->nr_gpfns * sizeof(long)); + GET_HANDLE(gpfn_list, trans->gpfn_list); + safe_munlock(gpfn_list, trans->nr_gpfns * sizeof(long)); safe_munlock(trans, sizeof(*trans)); break; } @@ -317,7 +328,7 @@ int xc_get_pfn_list(int xc_handle, op.cmd = DOM0_GETMEMLIST; op.u.getmemlist.domain = (domid_t)domid; op.u.getmemlist.max_pfns = max_pfns; - op.u.getmemlist.buffer = pfn_buf; + SET_HANDLE(op.u.getmemlist.buffer, pfn_buf); #ifdef VALGRIND memset(pfn_buf, 0, max_pfns * sizeof(unsigned long)); diff -r 56e1802303e5 xen/include/public/arch-ia64.h --- a/xen/include/public/arch-ia64.h Wed Apr 12 15:41:55 2006 -0500 +++ b/xen/include/public/arch-ia64.h Wed Apr 19 12:53:37 2006 -0500 @@ -7,16 +7,13 @@ #ifndef __HYPERVISOR_IF_IA64_H__ #define __HYPERVISOR_IF_IA64_H__ -#ifdef __XEN__ #define __DEFINE_GUEST_HANDLE(name, type) \ typedef struct { type *p; } __guest_handle_ ## name -#else -#define __DEFINE_GUEST_HANDLE(name, type) \ - typedef type * __guest_handle_ ## name -#endif #define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) #define GUEST_HANDLE(name) __guest_handle_ ## name +#define SET_HANDLE(hnd, val) do { (hnd).p = val; } while (0) +#define GET_HANDLE(val, hnd) do { val = (hnd).p; } while (0) #ifndef __ASSEMBLY__ /* Guest handles for primitive C types. */ diff -r 56e1802303e5 xen/include/public/arch-x86_32.h --- a/xen/include/public/arch-x86_32.h Wed Apr 12 15:41:55 2006 -0500 +++ b/xen/include/public/arch-x86_32.h Wed Apr 19 12:53:37 2006 -0500 @@ -9,16 +9,13 @@ #ifndef __XEN_PUBLIC_ARCH_X86_32_H__ #define __XEN_PUBLIC_ARCH_X86_32_H__ -#ifdef __XEN__ #define __DEFINE_GUEST_HANDLE(name, type) \ typedef struct { type *p; } __guest_handle_ ## name -#else -#define __DEFINE_GUEST_HANDLE(name, type) \ - typedef type * __guest_handle_ ## name -#endif #define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) #define GUEST_HANDLE(name) __guest_handle_ ## name +#define SET_HANDLE(hnd, val) do { (hnd).p = val; } while (0) +#define GET_HANDLE(val, hnd) do { val = (hnd).p; } while (0) #ifndef __ASSEMBLY__ /* Guest handles for primitive C types. */ diff -r 56e1802303e5 xen/include/public/arch-x86_64.h --- a/xen/include/public/arch-x86_64.h Wed Apr 12 15:41:55 2006 -0500 +++ b/xen/include/public/arch-x86_64.h Wed Apr 19 12:53:37 2006 -0500 @@ -9,16 +9,13 @@ #ifndef __XEN_PUBLIC_ARCH_X86_64_H__ #define __XEN_PUBLIC_ARCH_X86_64_H__ -#ifdef __XEN__ #define __DEFINE_GUEST_HANDLE(name, type) \ typedef struct { type *p; } __guest_handle_ ## name -#else -#define __DEFINE_GUEST_HANDLE(name, type) \ - typedef type * __guest_handle_ ## name -#endif #define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) #define GUEST_HANDLE(name) __guest_handle_ ## name +#define SET_HANDLE(hnd, val) do { (hnd).p = val; } while (0) +#define GET_HANDLE(val, hnd) do { val = (hnd).p; } while (0) #ifndef __ASSEMBLY__ /* Guest handles for primitive C types. */ _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |