[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 1/3] x86/mem_access: Make the mem_access ops generic
This patch does the following: 1. Deprecate the HVMOP_[sg]et_mem_access HVM ops. 2. Move the ops under XENMEM_access_opi. 3. Rename enums and structs to be more generic rather than HVM specific. 4. Remove the enums and structs associated with the HVM ops. Signed-off-by: Aravindh Puthiyaparambil <aravindp@xxxxxxxxx> Cc: Keir Fraser <keir@xxxxxxx> Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Tim Deegan <tim@xxxxxxx> --- Changes from version 1 of the patch: 1. Use MEMOP_CMD_MASK instead of introducing a new mask. 2. Pass "cmd" down from do_memory_op() instead of "op" and "start_extent". 3. Pass typed handle to mem_access_memop() and use __copy_field_to_guest(). 4. Use ACCESS() macro to remove ordering dependency. 5. Add compat verification for xen_mem_access_op_t. 6. Fix formatting. Changes from the RFC version of the patch: 1. Removed pointless braces. 2. Change preemption handling to use upper "cmd" bits from do_memory_op(). 3. Delete old interface enum and structs. 4. Remove xenmem_ prefix. 5. Make access uint8_t and place above domid. diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 38c491e..eeaa72e 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -4589,79 +4589,10 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) } case HVMOP_set_mem_access: - { - struct xen_hvm_set_mem_access a; - struct domain *d; - - if ( copy_from_guest(&a, arg, 1) ) - return -EFAULT; - - rc = rcu_lock_remote_domain_by_id(a.domid, &d); - if ( rc != 0 ) - return rc; - - rc = -EINVAL; - if ( !is_hvm_domain(d) ) - goto param_fail5; - - rc = xsm_hvm_param(XSM_TARGET, d, op); - if ( rc ) - goto param_fail5; - - rc = -EINVAL; - if ( (a.first_pfn != ~0ull) && - (a.nr < start_iter || - ((a.first_pfn + a.nr - 1) < a.first_pfn) || - ((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d))) ) - goto param_fail5; - - rc = p2m_set_mem_access(d, a.first_pfn, a.nr, start_iter, - HVMOP_op_mask, a.hvmmem_access); - if ( rc > 0 ) - { - start_iter = rc; - rc = -EAGAIN; - } - - param_fail5: - rcu_unlock_domain(d); - break; - } - case HVMOP_get_mem_access: { - struct xen_hvm_get_mem_access a; - struct domain *d; - hvmmem_access_t access; - - if ( copy_from_guest(&a, arg, 1) ) - return -EFAULT; - - rc = rcu_lock_remote_domain_by_id(a.domid, &d); - if ( rc != 0 ) - return rc; - - rc = -EINVAL; - if ( !is_hvm_domain(d) ) - goto param_fail6; - - rc = xsm_hvm_param(XSM_TARGET, d, op); - if ( rc ) - goto param_fail6; - - rc = -EINVAL; - if ( (a.pfn > domain_get_maximum_gpfn(d)) && a.pfn != ~0ull ) - goto param_fail6; - - rc = p2m_get_mem_access(d, a.pfn, &access); - if ( rc != 0 ) - goto param_fail6; - - a.hvmmem_access = access; - rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0; - - param_fail6: - rcu_unlock_domain(d); + gdprintk(XENLOG_DEBUG, "Deprecated HVM op %ld.\n", op); + rc = -ENOSYS; break; } diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index fdc5ed3..719b255 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -104,6 +104,7 @@ #include <xen/xmalloc.h> #include <xen/efi.h> #include <xen/grant_table.h> +#include <xen/hypercall.h> #include <asm/paging.h> #include <asm/shadow.h> #include <asm/page.h> @@ -4631,9 +4632,10 @@ int xenmem_add_to_physmap_one( return rc; } -long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) +long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) { int rc; + int op = cmd & MEMOP_CMD_MASK; switch ( op ) { @@ -4853,7 +4855,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) } default: - return subarch_memory_op(op, arg); + return subarch_memory_op(cmd, arg); } return 0; diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c index 50aaf27..e02e08b 100644 --- a/xen/arch/x86/mm/mem_access.c +++ b/xen/arch/x86/mm/mem_access.c @@ -21,31 +21,94 @@ */ +#include <xen/sched.h> +#include <xen/guest_access.h> +#include <xen/hypercall.h> #include <asm/p2m.h> #include <asm/mem_event.h> +#include <xsm/xsm.h> -int mem_access_memop(struct domain *d, xen_mem_event_op_t *meo) +int mem_access_memop(unsigned long cmd, + XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg) { - int rc; + long rc; + xen_mem_access_op_t mao; + struct domain *d; + + if ( copy_from_guest(&mao, arg, 1) ) + return -EFAULT; + + rc = rcu_lock_live_remote_domain_by_id(mao.domid, &d); + if ( rc ) + return rc; + + if ( !is_hvm_domain(d) ) + return -EINVAL; + + rc = xsm_mem_event_op(XSM_TARGET, d, XENMEM_access_op); + if ( rc ) + goto out; if ( unlikely(!d->mem_event->access.ring_page) ) return -ENODEV; - switch( meo->op ) + switch ( mao.op ) { case XENMEM_access_op_resume: { p2m_mem_access_resume(d); rc = 0; + break; + } + + case XENMEM_access_op_set_access: + { + unsigned long start_iter = cmd & ~MEMOP_CMD_MASK; + + rc = -EINVAL; + if ( (mao.pfn != ~0ull) && + (mao.nr < start_iter || + ((mao.pfn + mao.nr - 1) < mao.pfn) || + ((mao.pfn + mao.nr - 1) > domain_get_maximum_gpfn(d))) ) + break; + + rc = p2m_set_mem_access(d, mao.pfn, mao.nr, start_iter, + MEMOP_CMD_MASK, mao.access); + if ( rc > 0 ) + { + ASSERT(!(rc & MEMOP_CMD_MASK)); + rc = hypercall_create_continuation(__HYPERVISOR_memory_op, "lh", + cmd | rc, arg); + } + break; + } + + case XENMEM_access_op_get_access: + { + xenmem_access_t access; + + rc = -EINVAL; + if ( (mao.pfn > domain_get_maximum_gpfn(d)) && mao.pfn != ~0ull ) + break; + + rc = p2m_get_mem_access(d, mao.pfn, &access); + if ( rc != 0 ) + break; + + mao.access = access; + rc = __copy_field_to_guest(arg, &mao, access) ? -EFAULT : 0; + + break; } - break; default: rc = -ENOSYS; break; } + out: + rcu_unlock_domain(d); return rc; } diff --git a/xen/arch/x86/mm/mem_event.c b/xen/arch/x86/mm/mem_event.c index d00e404..36b9dba 100644 --- a/xen/arch/x86/mm/mem_event.c +++ b/xen/arch/x86/mm/mem_event.c @@ -458,9 +458,6 @@ int do_mem_event_op(int op, uint32_t domain, void *arg) case XENMEM_paging_op: ret = mem_paging_memop(d, (xen_mem_event_op_t *) arg); break; - case XENMEM_access_op: - ret = mem_access_memop(d, (xen_mem_event_op_t *) arg); - break; case XENMEM_sharing_op: ret = mem_sharing_memop(d, (xen_mem_sharing_op_t *) arg); break; diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index c38f334..be77d7c 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -1350,7 +1350,7 @@ void p2m_mem_access_resume(struct domain *d) /* Set access type for a region of pfns. * If start_pfn == -1ul, sets the default access type */ long p2m_set_mem_access(struct domain *d, unsigned long pfn, uint32_t nr, - uint32_t start, uint32_t mask, hvmmem_access_t access) + uint32_t start, uint32_t mask, xenmem_access_t access) { struct p2m_domain *p2m = p2m_get_hostp2m(d); p2m_access_t a, _a; @@ -1359,7 +1359,7 @@ long p2m_set_mem_access(struct domain *d, unsigned long pfn, uint32_t nr, long rc = 0; static const p2m_access_t memaccess[] = { -#define ACCESS(ac) [HVMMEM_access_##ac] = p2m_access_##ac +#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac ACCESS(n), ACCESS(r), ACCESS(w), @@ -1378,7 +1378,7 @@ long p2m_set_mem_access(struct domain *d, unsigned long pfn, uint32_t nr, case 0 ... ARRAY_SIZE(memaccess) - 1: a = memaccess[access]; break; - case HVMMEM_access_default: + case XENMEM_access_default: a = p2m->default_access; break; default: @@ -1416,23 +1416,26 @@ long p2m_set_mem_access(struct domain *d, unsigned long pfn, uint32_t nr, /* Get access type for a pfn * If pfn == -1ul, gets the default access type */ int p2m_get_mem_access(struct domain *d, unsigned long pfn, - hvmmem_access_t *access) + xenmem_access_t *access) { struct p2m_domain *p2m = p2m_get_hostp2m(d); p2m_type_t t; p2m_access_t a; mfn_t mfn; - static const hvmmem_access_t memaccess[] = { - HVMMEM_access_n, - HVMMEM_access_r, - HVMMEM_access_w, - HVMMEM_access_rw, - HVMMEM_access_x, - HVMMEM_access_rx, - HVMMEM_access_wx, - HVMMEM_access_rwx, - HVMMEM_access_rx2rw + static const xenmem_access_t memaccess[] = { +#define ACCESS(ac) [XENMEM_access_##ac] = XENMEM_access_##ac + ACCESS(n), + ACCESS(r), + ACCESS(w), + ACCESS(rw), + ACCESS(x), + ACCESS(rx), + ACCESS(wx), + ACCESS(rwx), + ACCESS(rx2rw), + ACCESS(n2rwx), +#undef ACCESS }; /* If request to get default access */ diff --git a/xen/arch/x86/x86_64/compat/mm.c b/xen/arch/x86/x86_64/compat/mm.c index 0a8408b..b768158 100644 --- a/xen/arch/x86/x86_64/compat/mm.c +++ b/xen/arch/x86/x86_64/compat/mm.c @@ -4,6 +4,7 @@ #include <compat/xen.h> #include <asm/mem_event.h> #include <asm/mem_sharing.h> +#include <asm/mem_access.h> int compat_set_gdt(XEN_GUEST_HANDLE_PARAM(uint) frame_list, unsigned int entries) { @@ -44,7 +45,7 @@ int compat_update_descriptor(u32 pa_lo, u32 pa_hi, u32 desc_lo, u32 desc_hi) desc_lo | ((u64)desc_hi << 32)); } -int compat_arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) +int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) { struct compat_machphys_mfn_list xmml; l2_pgentry_t l2e; @@ -52,6 +53,7 @@ int compat_arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) compat_pfn_t mfn; unsigned int i; int rc = 0; + int op = cmd & MEMOP_CMD_MASK; switch ( op ) { @@ -68,7 +70,7 @@ int compat_arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) XLAT_foreign_memory_map(nat, &cmp); #undef XLAT_memory_map_HNDL_buffer - rc = arch_memory_op(op, guest_handle_from_ptr(nat, void)); + rc = arch_memory_op(cmd, guest_handle_from_ptr(nat, void)); break; } @@ -87,7 +89,7 @@ int compat_arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) XLAT_memory_map(nat, &cmp); #undef XLAT_memory_map_HNDL_buffer - rc = arch_memory_op(op, guest_handle_from_ptr(nat, void)); + rc = arch_memory_op(cmd, guest_handle_from_ptr(nat, void)); if ( rc < 0 ) break; @@ -111,7 +113,7 @@ int compat_arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) XLAT_pod_target(nat, &cmp); - rc = arch_memory_op(op, guest_handle_from_ptr(nat, void)); + rc = arch_memory_op(cmd, guest_handle_from_ptr(nat, void)); if ( rc < 0 ) break; @@ -185,7 +187,6 @@ int compat_arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) return mem_sharing_get_nr_shared_mfns(); case XENMEM_paging_op: - case XENMEM_access_op: { xen_mem_event_op_t meo; if ( copy_from_guest(&meo, arg, 1) ) @@ -195,6 +196,11 @@ int compat_arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) return -EFAULT; break; } + + case XENMEM_access_op: + rc = mem_access_memop(cmd, guest_handle_cast(arg, xen_mem_access_op_t)); + break; + case XENMEM_sharing_op: { xen_mem_sharing_op_t mso; diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c index f6ea012..264e39f 100644 --- a/xen/arch/x86/x86_64/mm.c +++ b/xen/arch/x86/x86_64/mm.c @@ -25,6 +25,7 @@ #include <xen/numa.h> #include <xen/nodemask.h> #include <xen/guest_access.h> +#include <xen/hypercall.h> #include <asm/current.h> #include <asm/asm_defns.h> #include <asm/page.h> @@ -36,6 +37,7 @@ #include <asm/numa.h> #include <asm/mem_event.h> #include <asm/mem_sharing.h> +#include <asm/mem_access.h> #include <public/memory.h> /* Parameters for PFN/MADDR compression. */ @@ -948,7 +950,7 @@ void __init subarch_init_memory(void) } } -long subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) +long subarch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) { struct xen_machphys_mfn_list xmml; l3_pgentry_t l3e; @@ -957,6 +959,7 @@ long subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) xen_pfn_t mfn, last_mfn; unsigned int i; long rc = 0; + int op = cmd & MEMOP_CMD_MASK; switch ( op ) { @@ -1007,7 +1010,6 @@ long subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) return mem_sharing_get_nr_shared_mfns(); case XENMEM_paging_op: - case XENMEM_access_op: { xen_mem_event_op_t meo; if ( copy_from_guest(&meo, arg, 1) ) @@ -1017,6 +1019,11 @@ long subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) return -EFAULT; break; } + + case XENMEM_access_op: + rc = mem_access_memop(cmd, guest_handle_cast(arg, xen_mem_access_op_t)); + break; + case XENMEM_sharing_op: { xen_mem_sharing_op_t mso; diff --git a/xen/common/compat/memory.c b/xen/common/compat/memory.c index daa2e04..25dc016 100644 --- a/xen/common/compat/memory.c +++ b/xen/common/compat/memory.c @@ -13,6 +13,8 @@ CHECK_TYPE(domid); #undef compat_domid_t #undef xen_domid_t +CHECK_mem_access_op; + int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) { int split, op = cmd & MEMOP_CMD_MASK; diff --git a/xen/common/memory.c b/xen/common/memory.c index 4d6ffee..257f4b0 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -964,7 +964,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; default: - rc = arch_memory_op(op, arg); + rc = arch_memory_op(cmd, arg); break; } diff --git a/xen/include/asm-x86/mem_access.h b/xen/include/asm-x86/mem_access.h index 60c2834..5c7c5fd 100644 --- a/xen/include/asm-x86/mem_access.h +++ b/xen/include/asm-x86/mem_access.h @@ -23,7 +23,8 @@ #ifndef _XEN_ASM_MEM_ACCESS_H #define _XEN_ASM_MEM_ACCESS_H -int mem_access_memop(struct domain *d, xen_mem_event_op_t *meo); +int mem_access_memop(unsigned long cmd, + XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg); int mem_access_send_req(struct domain *d, mem_event_request_t *req); #endif /* _XEN_ASM_MEM_ACCESS_H */ diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h index c835f76..7059adc 100644 --- a/xen/include/asm-x86/mm.h +++ b/xen/include/asm-x86/mm.h @@ -561,9 +561,9 @@ void *do_page_walk(struct vcpu *v, unsigned long addr); int __sync_local_execstate(void); /* Arch-specific portion of memory_op hypercall. */ -long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg); -long subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg); -int compat_arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void)); +long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg); +long subarch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg); +int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void)); int compat_subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void)); int steal_page( diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h index d644f82..743bb59 100644 --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -577,12 +577,12 @@ void p2m_mem_access_resume(struct domain *d); /* Set access type for a region of pfns. * If start_pfn == -1ul, sets the default access type */ long p2m_set_mem_access(struct domain *d, unsigned long start_pfn, uint32_t nr, - uint32_t start, uint32_t mask, hvmmem_access_t access); + uint32_t start, uint32_t mask, xenmem_access_t access); /* Get access type for a pfn * If pfn == -1ul, gets the default access type */ -int p2m_get_mem_access(struct domain *d, unsigned long pfn, - hvmmem_access_t *access); +int p2m_get_mem_access(struct domain *d, unsigned long pfn, + xenmem_access_t *access); /* * Internal functions, only called by other p2m code diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h index 3204ec4..f00f6d2 100644 --- a/xen/include/public/hvm/hvm_op.h +++ b/xen/include/public/hvm/hvm_op.h @@ -162,49 +162,11 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_xentrace_t); /* Following tools-only interfaces may change in future. */ #if defined(__XEN__) || defined(__XEN_TOOLS__) +/* Deprecated by XENMEM_access_op_set_access */ #define HVMOP_set_mem_access 12 -typedef enum { - HVMMEM_access_n, - HVMMEM_access_r, - HVMMEM_access_w, - HVMMEM_access_rw, - HVMMEM_access_x, - HVMMEM_access_rx, - HVMMEM_access_wx, - HVMMEM_access_rwx, - HVMMEM_access_rx2rw, /* Page starts off as r-x, but automatically - * change to r-w on a write */ - HVMMEM_access_n2rwx, /* Log access: starts off as n, automatically - * goes to rwx, generating an event without - * pausing the vcpu */ - HVMMEM_access_default /* Take the domain default */ -} hvmmem_access_t; -/* Notify that a region of memory is to have specific access types */ -struct xen_hvm_set_mem_access { - /* Domain to be updated. */ - domid_t domid; - /* Memory type */ - uint16_t hvmmem_access; /* hvm_access_t */ - /* Number of pages, ignored on setting default access */ - uint32_t nr; - /* First pfn, or ~0ull to set the default access for new pages */ - uint64_aligned_t first_pfn; -}; -typedef struct xen_hvm_set_mem_access xen_hvm_set_mem_access_t; -DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_access_t); +/* Deprecated by XENMEM_access_op_get_access */ #define HVMOP_get_mem_access 13 -/* Get the specific access type for that region of memory */ -struct xen_hvm_get_mem_access { - /* Domain to be queried. */ - domid_t domid; - /* Memory type: OUT */ - uint16_t hvmmem_access; /* hvm_access_t */ - /* pfn, or ~0ull for default access for new pages. IN */ - uint64_aligned_t pfn; -}; -typedef struct xen_hvm_get_mem_access xen_hvm_get_mem_access_t; -DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_access_t); #define HVMOP_inject_trap 14 /* Inject a trap into a VCPU, which will get taken up on the next diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h index f19ac14..5bcd475 100644 --- a/xen/include/public/memory.h +++ b/xen/include/public/memory.h @@ -363,9 +363,6 @@ typedef struct xen_pod_target xen_pod_target_t; #define XENMEM_paging_op_evict 1 #define XENMEM_paging_op_prep 2 -#define XENMEM_access_op 21 -#define XENMEM_access_op_resume 0 - struct xen_mem_event_op { uint8_t op; /* XENMEM_*_op_* */ domid_t domain; @@ -379,6 +376,56 @@ struct xen_mem_event_op { typedef struct xen_mem_event_op xen_mem_event_op_t; DEFINE_XEN_GUEST_HANDLE(xen_mem_event_op_t); +#define XENMEM_access_op 21 +#define XENMEM_access_op_resume 0 +#define XENMEM_access_op_set_access 1 +#define XENMEM_access_op_get_access 2 + +typedef enum { + XENMEM_access_n, + XENMEM_access_r, + XENMEM_access_w, + XENMEM_access_rw, + XENMEM_access_x, + XENMEM_access_rx, + XENMEM_access_wx, + XENMEM_access_rwx, + /* + * Page starts off as r-x, but automatically + * change to r-w on a write + */ + XENMEM_access_rx2rw, + /* + * Log access: starts off as n, automatically + * goes to rwx, generating an event without + * pausing the vcpu + */ + XENMEM_access_n2rwx, + /* Take the domain default */ + XENMEM_access_default +} xenmem_access_t; + +struct xen_mem_access_op { + /* XENMEM_access_op_* */ + uint8_t op; + /* xenmem_access_t */ + uint8_t access; + domid_t domid; + /* + * Number of pages for set op + * Ignored on setting default access and other ops + */ + uint32_t nr; + /* + * First pfn for set op + * pfn for get op + * ~0ull is used to set and get the default access for pages + */ + uint64_aligned_t pfn; +}; +typedef struct xen_mem_access_op xen_mem_access_op_t; +DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t); + #define XENMEM_sharing_op 22 #define XENMEM_sharing_op_nominate_gfn 0 #define XENMEM_sharing_op_nominate_gref 1 diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst index 5d354d8..9a35dd7 100644 --- a/xen/include/xlat.lst +++ b/xen/include/xlat.lst @@ -60,6 +60,7 @@ ! memory_exchange memory.h ! memory_map memory.h ! memory_reservation memory.h +? mem_access_op memory.h ! pod_target memory.h ! remove_from_physmap memory.h ? physdev_eoi physdev.h -- 1.8.3.2 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |