[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 2/3] tools/libxc: Make the mem_access APIs generic
This patch does the following: 1. Add new xc_[sg]et_mem_access APIs. 2. Remove xc_hvm_[sg]et_mem_access() APIs. Signed-off-by: Aravindh Puthiyaparambil <aravindp@xxxxxxxxx> Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx> Cc: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx> Cc: Ian Campbell <ian.campbell@xxxxxxxxxx> --- Changes from version 1 of the patch: 1. Remove unused "gfn" from the parameter list of xc_mem_access_resume(). 2. Use structure initialisation. 3. Write "access" back only in the case of do_memory_op returning success. 4. Fix formatting. Changes from the RFC version of the patch: 1. Remove xc_mem_access_memop() wrapper. 2. Remove xc_hvm_[sg]et_mem_access() APIs. diff --git a/tools/libxc/xc_mem_access.c b/tools/libxc/xc_mem_access.c index a50c145..f436e69 100644 --- a/tools/libxc/xc_mem_access.c +++ b/tools/libxc/xc_mem_access.c @@ -22,7 +22,7 @@ */ #include "xc_private.h" - +#include <xen/memory.h> int xc_mem_access_enable(xc_interface *xch, domid_t domain_id, uint32_t *port) @@ -47,12 +47,54 @@ int xc_mem_access_disable(xc_interface *xch, domid_t domain_id) NULL); } -int xc_mem_access_resume(xc_interface *xch, domid_t domain_id, unsigned long gfn) +int xc_mem_access_resume(xc_interface *xch, domid_t domain_id) +{ + xen_mem_access_op_t mao = + { + .op = XENMEM_access_op_resume, + .domid = domain_id + }; + + return do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao)); +} + +int xc_set_mem_access(xc_interface *xch, + domid_t domain_id, + xenmem_access_t access, + uint64_t first_pfn, + uint32_t nr) { - return xc_mem_event_memop(xch, domain_id, - XENMEM_access_op_resume, - XENMEM_access_op, - gfn, NULL); + xen_mem_access_op_t mao = + { + .op = XENMEM_access_op_set_access, + .domid = domain_id, + .access = access, + .pfn = first_pfn, + .nr = nr + }; + + return do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao)); +} + +int xc_get_mem_access(xc_interface *xch, + domid_t domain_id, + uint64_t pfn, + xenmem_access_t *access) +{ + int rc; + xen_mem_access_op_t mao = + { + .op = XENMEM_access_op_get_access, + .domid = domain_id, + .pfn = pfn + }; + + rc = do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao)); + + if ( rc == 0 ) + *access = mao.access; + + return rc; } /* diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c index 3303454..4143de6 100644 --- a/tools/libxc/xc_misc.c +++ b/tools/libxc/xc_misc.c @@ -593,67 +593,6 @@ int xc_hvm_set_mem_type( return rc; } -int xc_hvm_set_mem_access( - xc_interface *xch, domid_t dom, hvmmem_access_t mem_access, uint64_t first_pfn, uint64_t nr) -{ - DECLARE_HYPERCALL; - DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_mem_access, arg); - int rc; - - arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg)); - if ( arg == NULL ) - { - PERROR("Could not allocate memory for xc_hvm_set_mem_access hypercall"); - return -1; - } - - arg->domid = dom; - arg->hvmmem_access = mem_access; - arg->first_pfn = first_pfn; - arg->nr = nr; - - hypercall.op = __HYPERVISOR_hvm_op; - hypercall.arg[0] = HVMOP_set_mem_access; - hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg); - - rc = do_xen_hypercall(xch, &hypercall); - - xc_hypercall_buffer_free(xch, arg); - - return rc; -} - -int xc_hvm_get_mem_access( - xc_interface *xch, domid_t dom, uint64_t pfn, hvmmem_access_t* mem_access) -{ - DECLARE_HYPERCALL; - DECLARE_HYPERCALL_BUFFER(struct xen_hvm_get_mem_access, arg); - int rc; - - arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg)); - if ( arg == NULL ) - { - PERROR("Could not allocate memory for xc_hvm_get_mem_access hypercall"); - return -1; - } - - arg->domid = dom; - arg->pfn = pfn; - - hypercall.op = __HYPERVISOR_hvm_op; - hypercall.arg[0] = HVMOP_get_mem_access; - hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg); - - rc = do_xen_hypercall(xch, &hypercall); - - if ( !rc ) - *mem_access = arg->hvmmem_access; - - xc_hypercall_buffer_free(xch, arg); - - return rc; -} - int xc_hvm_inject_trap( xc_interface *xch, domid_t dom, int vcpu, uint32_t vector, uint32_t type, uint32_t error_code, uint32_t insn_len, diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h index e3a32f2..02129f7 100644 --- a/tools/libxc/xenctrl.h +++ b/tools/libxc/xenctrl.h @@ -1729,20 +1729,6 @@ int xc_hvm_set_mem_type( xc_interface *xch, domid_t dom, hvmmem_type_t memtype, uint64_t first_pfn, uint64_t nr); /* - * Set a range of memory to a specific access. - * Allowed types are HVMMEM_access_default, HVMMEM_access_n, any combination of - * HVM_access_ + (rwx), and HVM_access_rx2rw - */ -int xc_hvm_set_mem_access( - xc_interface *xch, domid_t dom, hvmmem_access_t memaccess, uint64_t first_pfn, uint64_t nr); - -/* - * Gets the mem access for the given page (returned in memacess on success) - */ -int xc_hvm_get_mem_access( - xc_interface *xch, domid_t dom, uint64_t pfn, hvmmem_access_t* memaccess); - -/* * Injects a hardware/software CPU trap, to take effect the next time the HVM * resumes. */ @@ -2059,8 +2045,22 @@ int xc_mem_paging_load(xc_interface *xch, domid_t domain_id, */ int xc_mem_access_enable(xc_interface *xch, domid_t domain_id, uint32_t *port); int xc_mem_access_disable(xc_interface *xch, domid_t domain_id); -int xc_mem_access_resume(xc_interface *xch, domid_t domain_id, - unsigned long gfn); +int xc_mem_access_resume(xc_interface *xch, domid_t domain_id); + +/* + * Set a range of memory to a specific access. + * Allowed types are XENMEM_access_default, XENMEM_access_n, any combination of + * XENMEM_access_ + (rwx), and XENMEM_access_rx2rw + */ +int xc_set_mem_access(xc_interface *xch, domid_t domain_id, + xenmem_access_t access, uint64_t first_pfn, + uint32_t nr); + +/* + * Gets the mem access for the given page (returned in access on success) + */ +int xc_get_mem_access(xc_interface *xch, domid_t domain_id, + uint64_t pfn, xenmem_access_t *access); /*** * Memory sharing operations. -- 1.8.3.2 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |