[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH V5 01/12] xen/mem_event: Cleanup of mem_event structures



On Fri, Feb 13, 2015 at 6:23 PM, Andrew Cooper
<andrew.cooper3@xxxxxxxxxx> wrote:
> On 13/02/15 16:33, Tamas K Lengyel wrote:
>> The public mem_event structures used to communicate with helper applications 
>> via
>> shared rings have been used in different settings. However, the variable 
>> names
>> within this structure have not reflected this fact, resulting in the reuse of
>> variables to mean different things under different scenarios.
>>
>> This patch remedies the issue by clearly defining the structure members 
>> based on
>> the actual context within which the structure is used.
>>
>> Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
>> Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx>
>> ---
>> v5: Style fixes
>>     Convert gfn to uint32_t
>
> It is perfectly possible to have guests with more memory than is covered
> by 44 bits, or PV guests whose frames reside above the 44bit boundary.
> All gfn values should be 64bits wide.
>
> ~Andrew

Internally Xen handles all gfn's as unsigned long's so depending on
the compiler it may be only 32-bit wide. If gfn must be larger than
32-bit than we should use unsigned long long's within Xen.

Tamas

>
>> and define mem_access flags bits as we can now save
>>         space on the ring this way
>>     Split non-mem_event flags into access/paging flags
>> v4: Attach mem_event version to each outgoing request directly in mem_event.
>> v3: Add padding to mem_event structures.
>>     Add version field to mem_event structures and checks for it.
>> ---
>>  tools/libxc/xc_mem_event.c          |   2 +-
>>  tools/libxc/xc_private.h            |   2 +-
>>  tools/tests/xen-access/xen-access.c |  45 +++++----
>>  tools/xenpaging/xenpaging.c         |  51 ++++++-----
>>  xen/arch/x86/hvm/hvm.c              | 177 
>> +++++++++++++++++++-----------------
>>  xen/arch/x86/mm/mem_sharing.c       |  16 +++-
>>  xen/arch/x86/mm/p2m.c               | 163 ++++++++++++++++++---------------
>>  xen/common/mem_access.c             |   6 ++
>>  xen/common/mem_event.c              |   2 +
>>  xen/include/public/mem_event.h      | 173 
>> ++++++++++++++++++++++++++---------
>>  xen/include/public/memory.h         |  11 ++-
>>  11 files changed, 401 insertions(+), 247 deletions(-)
>>
>> diff --git a/tools/libxc/xc_mem_event.c b/tools/libxc/xc_mem_event.c
>> index 8c0be4e..1b5f7c3 100644
>> --- a/tools/libxc/xc_mem_event.c
>> +++ b/tools/libxc/xc_mem_event.c
>> @@ -42,7 +42,7 @@ int xc_mem_event_control(xc_interface *xch, domid_t 
>> domain_id, unsigned int op,
>>
>>  int xc_mem_event_memop(xc_interface *xch, domid_t domain_id,
>>                          unsigned int op, unsigned int mode,
>> -                        uint64_t gfn, void *buffer)
>> +                        uint32_t gfn, void *buffer)
>>  {
>>      xen_mem_event_op_t meo;
>>
>> diff --git a/tools/libxc/xc_private.h b/tools/libxc/xc_private.h
>> index 45b8644..bc021b8 100644
>> --- a/tools/libxc/xc_private.h
>> +++ b/tools/libxc/xc_private.h
>> @@ -427,7 +427,7 @@ int xc_mem_event_control(xc_interface *xch, domid_t 
>> domain_id, unsigned int op,
>>                           unsigned int mode, uint32_t *port);
>>  int xc_mem_event_memop(xc_interface *xch, domid_t domain_id,
>>                          unsigned int op, unsigned int mode,
>> -                        uint64_t gfn, void *buffer);
>> +                        uint32_t gfn, void *buffer);
>>  /*
>>   * Enables mem_event and returns the mapped ring page indicated by param.
>>   * param can be HVM_PARAM_PAGING/ACCESS/SHARING_RING_PFN
>> diff --git a/tools/tests/xen-access/xen-access.c 
>> b/tools/tests/xen-access/xen-access.c
>> index 6cb382d..68f05db 100644
>> --- a/tools/tests/xen-access/xen-access.c
>> +++ b/tools/tests/xen-access/xen-access.c
>> @@ -551,13 +551,21 @@ int main(int argc, char *argv[])
>>                  continue;
>>              }
>>
>> +            if ( req.version != MEM_EVENT_INTERFACE_VERSION )
>> +            {
>> +                ERROR("Error: mem_event interface version mismatch!\n");
>> +                interrupted = -1;
>> +                continue;
>> +            }
>> +
>>              memset( &rsp, 0, sizeof (rsp) );
>> +            rsp.version = MEM_EVENT_INTERFACE_VERSION;
>>              rsp.vcpu_id = req.vcpu_id;
>>              rsp.flags = req.flags;
>>
>>              switch (req.reason) {
>> -            case MEM_EVENT_REASON_VIOLATION:
>> -                rc = xc_get_mem_access(xch, domain_id, req.gfn, &access);
>> +            case MEM_EVENT_REASON_MEM_ACCESS:
>> +                rc = xc_get_mem_access(xch, domain_id, 
>> req.u.mem_access.gfn, &access);
>>                  if (rc < 0)
>>                  {
>>                      ERROR("Error %d getting mem_access event\n", rc);
>> @@ -565,23 +573,23 @@ int main(int argc, char *argv[])
>>                      continue;
>>                  }
>>
>> -                printf("PAGE ACCESS: %c%c%c for GFN %"PRIx64" (offset %06"
>> +                printf("PAGE ACCESS: %c%c%c for GFN %"PRIx32" (offset %06"
>>                         PRIx64") gla %016"PRIx64" (valid: %c; fault in gpt: 
>> %c; fault with gla: %c) (vcpu %u)\n",
>> -                       req.access_r ? 'r' : '-',
>> -                       req.access_w ? 'w' : '-',
>> -                       req.access_x ? 'x' : '-',
>> -                       req.gfn,
>> -                       req.offset,
>> -                       req.gla,
>> -                       req.gla_valid ? 'y' : 'n',
>> -                       req.fault_in_gpt ? 'y' : 'n',
>> -                       req.fault_with_gla ? 'y': 'n',
>> +                       (req.u.mem_access.flags & MEM_ACCESS_R) ? 'r' : '-',
>> +                       (req.u.mem_access.flags & MEM_ACCESS_W) ? 'w' : '-',
>> +                       (req.u.mem_access.flags & MEM_ACCESS_X) ? 'x' : '-',
>> +                       req.u.mem_access.gfn,
>> +                       req.u.mem_access.offset,
>> +                       req.u.mem_access.gla,
>> +                       (req.u.mem_access.flags & MEM_ACCESS_GLA_VALID) ? 
>> 'y' : 'n',
>> +                       (req.u.mem_access.flags & MEM_ACCESS_FAULT_IN_GPT) ? 
>> 'y' : 'n',
>> +                       (req.u.mem_access.flags & MEM_ACCESS_FAULT_WITH_GLA) 
>> ? 'y': 'n',
>>                         req.vcpu_id);
>>
>>                  if ( default_access != after_first_access )
>>                  {
>>                      rc = xc_set_mem_access(xch, domain_id, 
>> after_first_access,
>> -                                           req.gfn, 1);
>> +                                           req.u.mem_access.gfn, 1);
>>                      if (rc < 0)
>>                      {
>>                          ERROR("Error %d setting gfn to access_type %d\n", 
>> rc,
>> @@ -592,13 +600,12 @@ int main(int argc, char *argv[])
>>                  }
>>
>>
>> -                rsp.gfn = req.gfn;
>> -                rsp.p2mt = req.p2mt;
>> +                rsp.u.mem_access.gfn = req.u.mem_access.gfn;
>>                  break;
>> -            case MEM_EVENT_REASON_INT3:
>> -                printf("INT3: rip=%016"PRIx64", gfn=%"PRIx64" (vcpu %d)\n",
>> -                       req.gla,
>> -                       req.gfn,
>> +            case MEM_EVENT_REASON_SOFTWARE_BREAKPOINT:
>> +                printf("INT3: rip=%016"PRIx64", gfn=%"PRIx32" (vcpu %d)\n",
>> +                       req.regs.x86.rip,
>> +                       req.u.software_breakpoint.gfn,
>>                         req.vcpu_id);
>>
>>                  /* Reinject */
>> diff --git a/tools/xenpaging/xenpaging.c b/tools/xenpaging/xenpaging.c
>> index 82c1ee4..29ca7c7 100644
>> --- a/tools/xenpaging/xenpaging.c
>> +++ b/tools/xenpaging/xenpaging.c
>> @@ -684,9 +684,9 @@ static int xenpaging_resume_page(struct xenpaging 
>> *paging, mem_event_response_t
>>           * This allows page-out of these gfns if the target grows again.
>>           */
>>          if (paging->num_paged_out > paging->policy_mru_size)
>> -            policy_notify_paged_in(rsp->gfn);
>> +            policy_notify_paged_in(rsp->u.mem_paging.gfn);
>>          else
>> -            policy_notify_paged_in_nomru(rsp->gfn);
>> +            policy_notify_paged_in_nomru(rsp->u.mem_paging.gfn);
>>
>>         /* Record number of resumed pages */
>>         paging->num_paged_out--;
>> @@ -874,7 +874,8 @@ int main(int argc, char *argv[])
>>      }
>>      xch = paging->xc_handle;
>>
>> -    DPRINTF("starting %s for domain_id %u with pagefile %s\n", argv[0], 
>> paging->mem_event.domain_id, filename);
>> +    DPRINTF("starting %s for domain_id %u with pagefile %s\n",
>> +            argv[0], paging->mem_event.domain_id, filename);
>>
>>      /* ensure that if we get a signal, we'll do cleanup, then exit */
>>      act.sa_handler = close_handler;
>> @@ -910,49 +911,52 @@ int main(int argc, char *argv[])
>>
>>              get_request(&paging->mem_event, &req);
>>
>> -            if ( req.gfn > paging->max_pages )
>> +            if ( req.u.mem_paging.gfn > paging->max_pages )
>>              {
>> -                ERROR("Requested gfn %"PRIx64" higher than max_pages %x\n", 
>> req.gfn, paging->max_pages);
>> +                ERROR("Requested gfn %"PRIx32" higher than max_pages %x\n",
>> +                      req.u.mem_paging.gfn, paging->max_pages);
>>                  goto out;
>>              }
>>
>>              /* Check if the page has already been paged in */
>> -            if ( test_and_clear_bit(req.gfn, paging->bitmap) )
>> +            if ( test_and_clear_bit(req.u.mem_paging.gfn, paging->bitmap) )
>>              {
>>                  /* Find where in the paging file to read from */
>> -                slot = paging->gfn_to_slot[req.gfn];
>> +                slot = paging->gfn_to_slot[req.u.mem_paging.gfn];
>>
>>                  /* Sanity check */
>> -                if ( paging->slot_to_gfn[slot] != req.gfn )
>> +                if ( paging->slot_to_gfn[slot] != req.u.mem_paging.gfn )
>>                  {
>> -                    ERROR("Expected gfn %"PRIx64" in slot %d, but found gfn 
>> %lx\n", req.gfn, slot, paging->slot_to_gfn[slot]);
>> +                    ERROR("Expected gfn %"PRIx32" in slot %d, but found gfn 
>> %lx\n",
>> +                          req.u.mem_paging.gfn, slot, 
>> paging->slot_to_gfn[slot]);
>>                      goto out;
>>                  }
>>
>> -                if ( req.flags & MEM_EVENT_FLAG_DROP_PAGE )
>> +                if ( req.u.mem_paging.flags & MEM_PAGING_DROP_PAGE )
>>                  {
>> -                    DPRINTF("drop_page ^ gfn %"PRIx64" pageslot %d\n", 
>> req.gfn, slot);
>> +                    DPRINTF("drop_page ^ gfn %"PRIx32" pageslot %d\n",
>> +                            req.u.mem_paging.gfn, slot);
>>                      /* Notify policy of page being dropped */
>> -                    policy_notify_dropped(req.gfn);
>> +                    policy_notify_dropped(req.u.mem_paging.gfn);
>>                  }
>>                  else
>>                  {
>>                      /* Populate the page */
>> -                    if ( xenpaging_populate_page(paging, req.gfn, slot) < 0 
>> )
>> +                    if ( xenpaging_populate_page(paging, 
>> req.u.mem_paging.gfn, slot) < 0 )
>>                      {
>> -                        ERROR("Error populating page %"PRIx64"", req.gfn);
>> +                        ERROR("Error populating page %"PRIx32"", 
>> req.u.mem_paging.gfn);
>>                          goto out;
>>                      }
>>                  }
>>
>>                  /* Prepare the response */
>> -                rsp.gfn = req.gfn;
>> +                rsp.u.mem_paging.gfn = req.u.mem_paging.gfn;
>>                  rsp.vcpu_id = req.vcpu_id;
>>                  rsp.flags = req.flags;
>>
>>                  if ( xenpaging_resume_page(paging, &rsp, 1) < 0 )
>>                  {
>> -                    PERROR("Error resuming page %"PRIx64"", req.gfn);
>> +                    PERROR("Error resuming page %"PRIx32"", 
>> req.u.mem_paging.gfn);
>>                      goto out;
>>                  }
>>
>> @@ -965,23 +969,24 @@ int main(int argc, char *argv[])
>>              else
>>              {
>>                  DPRINTF("page %s populated (domain = %d; vcpu = %d;"
>> -                        " gfn = %"PRIx64"; paused = %d; evict_fail = %d)\n",
>> -                        req.flags & MEM_EVENT_FLAG_EVICT_FAIL ? "not" : 
>> "already",
>> -                        paging->mem_event.domain_id, req.vcpu_id, req.gfn,
>> +                        " gfn = %"PRIx32"; paused = %d; evict_fail = %d)\n",
>> +                        req.u.mem_paging.flags & MEM_PAGING_EVICT_FAIL ? 
>> "not" : "already",
>> +                        paging->mem_event.domain_id, req.vcpu_id, 
>> req.u.mem_paging.gfn,
>>                          !!(req.flags & MEM_EVENT_FLAG_VCPU_PAUSED) ,
>> -                        !!(req.flags & MEM_EVENT_FLAG_EVICT_FAIL) );
>> +                        !!(req.u.mem_paging.flags & MEM_PAGING_EVICT_FAIL) 
>> );
>>
>>                  /* Tell Xen to resume the vcpu */
>> -                if (( req.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) || ( 
>> req.flags & MEM_EVENT_FLAG_EVICT_FAIL ))
>> +                if (( req.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) ||
>> +                    ( req.u.mem_paging.flags & MEM_PAGING_EVICT_FAIL ))
>>                  {
>>                      /* Prepare the response */
>> -                    rsp.gfn = req.gfn;
>> +                    rsp.u.mem_paging.gfn = req.u.mem_paging.gfn;
>>                      rsp.vcpu_id = req.vcpu_id;
>>                      rsp.flags = req.flags;
>>
>>                      if ( xenpaging_resume_page(paging, &rsp, 0) < 0 )
>>                      {
>> -                        PERROR("Error resuming page %"PRIx64"", req.gfn);
>> +                        PERROR("Error resuming page %"PRIx32"", 
>> req.u.mem_paging.gfn);
>>                          goto out;
>>                      }
>>                  }
>> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
>> index b03ee4e..fe5f568 100644
>> --- a/xen/arch/x86/hvm/hvm.c
>> +++ b/xen/arch/x86/hvm/hvm.c
>> @@ -6324,48 +6324,42 @@ static void 
>> hvm_mem_event_fill_regs(mem_event_request_t *req)
>>      const struct cpu_user_regs *regs = guest_cpu_user_regs();
>>      const struct vcpu *curr = current;
>>
>> -    req->x86_regs.rax = regs->eax;
>> -    req->x86_regs.rcx = regs->ecx;
>> -    req->x86_regs.rdx = regs->edx;
>> -    req->x86_regs.rbx = regs->ebx;
>> -    req->x86_regs.rsp = regs->esp;
>> -    req->x86_regs.rbp = regs->ebp;
>> -    req->x86_regs.rsi = regs->esi;
>> -    req->x86_regs.rdi = regs->edi;
>> -
>> -    req->x86_regs.r8  = regs->r8;
>> -    req->x86_regs.r9  = regs->r9;
>> -    req->x86_regs.r10 = regs->r10;
>> -    req->x86_regs.r11 = regs->r11;
>> -    req->x86_regs.r12 = regs->r12;
>> -    req->x86_regs.r13 = regs->r13;
>> -    req->x86_regs.r14 = regs->r14;
>> -    req->x86_regs.r15 = regs->r15;
>> -
>> -    req->x86_regs.rflags = regs->eflags;
>> -    req->x86_regs.rip    = regs->eip;
>> -
>> -    req->x86_regs.msr_efer = curr->arch.hvm_vcpu.guest_efer;
>> -    req->x86_regs.cr0 = curr->arch.hvm_vcpu.guest_cr[0];
>> -    req->x86_regs.cr3 = curr->arch.hvm_vcpu.guest_cr[3];
>> -    req->x86_regs.cr4 = curr->arch.hvm_vcpu.guest_cr[4];
>> -}
>> -
>> -static int hvm_memory_event_traps(long p, uint32_t reason,
>> -                                  unsigned long value, unsigned long old,
>> -                                  bool_t gla_valid, unsigned long gla)
>> -{
>> -    struct vcpu* v = current;
>> -    struct domain *d = v->domain;
>> -    mem_event_request_t req = { .reason = reason };
>> +    req->regs.x86.rax = regs->eax;
>> +    req->regs.x86.rcx = regs->ecx;
>> +    req->regs.x86.rdx = regs->edx;
>> +    req->regs.x86.rbx = regs->ebx;
>> +    req->regs.x86.rsp = regs->esp;
>> +    req->regs.x86.rbp = regs->ebp;
>> +    req->regs.x86.rsi = regs->esi;
>> +    req->regs.x86.rdi = regs->edi;
>> +
>> +    req->regs.x86.r8  = regs->r8;
>> +    req->regs.x86.r9  = regs->r9;
>> +    req->regs.x86.r10 = regs->r10;
>> +    req->regs.x86.r11 = regs->r11;
>> +    req->regs.x86.r12 = regs->r12;
>> +    req->regs.x86.r13 = regs->r13;
>> +    req->regs.x86.r14 = regs->r14;
>> +    req->regs.x86.r15 = regs->r15;
>> +
>> +    req->regs.x86.rflags = regs->eflags;
>> +    req->regs.x86.rip    = regs->eip;
>> +
>> +    req->regs.x86.msr_efer = curr->arch.hvm_vcpu.guest_efer;
>> +    req->regs.x86.cr0 = curr->arch.hvm_vcpu.guest_cr[0];
>> +    req->regs.x86.cr3 = curr->arch.hvm_vcpu.guest_cr[3];
>> +    req->regs.x86.cr4 = curr->arch.hvm_vcpu.guest_cr[4];
>> +}
>> +
>> +static int hvm_memory_event_traps(uint64_t parameters, mem_event_request_t 
>> *req)
>> +{
>>      int rc;
>> +    struct vcpu *v = current;
>> +    struct domain *d = v->domain;
>>
>> -    if ( !(p & HVMPME_MODE_MASK) )
>> +    if ( !(parameters & HVMPME_MODE_MASK) )
>>          return 0;
>>
>> -    if ( (p & HVMPME_onchangeonly) && (value == old) )
>> -        return 1;
>> -
>>      rc = mem_event_claim_slot(d, &d->mem_event->access);
>>      if ( rc == -ENOSYS )
>>      {
>> @@ -6376,85 +6370,106 @@ static int hvm_memory_event_traps(long p, uint32_t 
>> reason,
>>      else if ( rc < 0 )
>>          return rc;
>>
>> -    if ( (p & HVMPME_MODE_MASK) == HVMPME_mode_sync )
>> +    if ( (parameters & HVMPME_MODE_MASK) == HVMPME_mode_sync )
>>      {
>> -        req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
>> +        req->flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
>>          mem_event_vcpu_pause(v);
>>      }
>>
>> -    req.gfn = value;
>> -    req.vcpu_id = v->vcpu_id;
>> -    if ( gla_valid )
>> -    {
>> -        req.offset = gla & ((1 << PAGE_SHIFT) - 1);
>> -        req.gla = gla;
>> -        req.gla_valid = 1;
>> -    }
>> -    else
>> -    {
>> -        req.gla = old;
>> -    }
>> -
>> -    hvm_mem_event_fill_regs(&req);
>> -    mem_event_put_request(d, &d->mem_event->access, &req);
>> -
>> +    hvm_mem_event_fill_regs(req);
>> +    mem_event_put_request(d, &d->mem_event->access, req);
>> +
>>      return 1;
>>  }
>>
>> +static void hvm_memory_event_cr(uint32_t reason, unsigned long value,
>> +                                unsigned long old)
>> +{
>> +    mem_event_request_t req = {
>> +        .reason = reason,
>> +        .vcpu_id = current->vcpu_id,
>> +        .u.mov_to_cr.new_value = value,
>> +        .u.mov_to_cr.old_value = old
>> +    };
>> +    uint64_t parameters = 0;
>> +
>> +    switch(reason)
>> +    {
>> +    case MEM_EVENT_REASON_MOV_TO_CR0:
>> +        parameters = current->domain->arch.hvm_domain
>> +                      .params[HVM_PARAM_MEMORY_EVENT_CR0];
>> +        break;
>> +    case MEM_EVENT_REASON_MOV_TO_CR3:
>> +        parameters = current->domain->arch.hvm_domain
>> +                      .params[HVM_PARAM_MEMORY_EVENT_CR3];
>> +        break;
>> +    case MEM_EVENT_REASON_MOV_TO_CR4:
>> +        parameters = current->domain->arch.hvm_domain
>> +                      .params[HVM_PARAM_MEMORY_EVENT_CR4];
>> +        break;
>> +    };
>> +
>> +    if ( (parameters & HVMPME_onchangeonly) && (value == old) )
>> +        return;
>> +
>> +    hvm_memory_event_traps(parameters, &req);
>> +}
>> +
>>  void hvm_memory_event_cr0(unsigned long value, unsigned long old)
>>  {
>> -    hvm_memory_event_traps(current->domain->arch.hvm_domain
>> -                             .params[HVM_PARAM_MEMORY_EVENT_CR0],
>> -                           MEM_EVENT_REASON_CR0,
>> -                           value, old, 0, 0);
>> +    hvm_memory_event_cr(MEM_EVENT_REASON_MOV_TO_CR0, value, old);
>>  }
>>
>>  void hvm_memory_event_cr3(unsigned long value, unsigned long old)
>>  {
>> -    hvm_memory_event_traps(current->domain->arch.hvm_domain
>> -                             .params[HVM_PARAM_MEMORY_EVENT_CR3],
>> -                           MEM_EVENT_REASON_CR3,
>> -                           value, old, 0, 0);
>> +    hvm_memory_event_cr(MEM_EVENT_REASON_MOV_TO_CR3, value, old);
>>  }
>>
>>  void hvm_memory_event_cr4(unsigned long value, unsigned long old)
>>  {
>> -    hvm_memory_event_traps(current->domain->arch.hvm_domain
>> -                             .params[HVM_PARAM_MEMORY_EVENT_CR4],
>> -                           MEM_EVENT_REASON_CR4,
>> -                           value, old, 0, 0);
>> +    hvm_memory_event_cr(MEM_EVENT_REASON_MOV_TO_CR4, value, old);
>>  }
>>
>>  void hvm_memory_event_msr(unsigned long msr, unsigned long value)
>>  {
>> +    mem_event_request_t req = {
>> +        .reason = MEM_EVENT_REASON_MOV_TO_MSR,
>> +        .vcpu_id = current->vcpu_id,
>> +        .u.mov_to_msr.msr = msr,
>> +        .u.mov_to_msr.value = value,
>> +    };
>> +
>>      hvm_memory_event_traps(current->domain->arch.hvm_domain
>> -                             .params[HVM_PARAM_MEMORY_EVENT_MSR],
>> -                           MEM_EVENT_REASON_MSR,
>> -                           value, ~value, 1, msr);
>> +                            .params[HVM_PARAM_MEMORY_EVENT_MSR],
>> +                           &req);
>>  }
>>
>>  int hvm_memory_event_int3(unsigned long gla)
>>  {
>>      uint32_t pfec = PFEC_page_present;
>> -    unsigned long gfn;
>> -    gfn = paging_gva_to_gfn(current, gla, &pfec);
>> +    mem_event_request_t req = {
>> +        .reason = MEM_EVENT_REASON_SOFTWARE_BREAKPOINT,
>> +        .vcpu_id = current->vcpu_id,
>> +        .u.software_breakpoint.gfn = paging_gva_to_gfn(current, gla, &pfec)
>> +    };
>>
>>      return hvm_memory_event_traps(current->domain->arch.hvm_domain
>> -                                    .params[HVM_PARAM_MEMORY_EVENT_INT3],
>> -                                  MEM_EVENT_REASON_INT3,
>> -                                  gfn, 0, 1, gla);
>> +                                   .params[HVM_PARAM_MEMORY_EVENT_INT3],
>> +                                  &req);
>>  }
>>
>>  int hvm_memory_event_single_step(unsigned long gla)
>>  {
>>      uint32_t pfec = PFEC_page_present;
>> -    unsigned long gfn;
>> -    gfn = paging_gva_to_gfn(current, gla, &pfec);
>> +    mem_event_request_t req = {
>> +        .reason = MEM_EVENT_REASON_SINGLESTEP,
>> +        .vcpu_id = current->vcpu_id,
>> +        .u.singlestep.gfn = paging_gva_to_gfn(current, gla, &pfec)
>> +    };
>>
>>      return hvm_memory_event_traps(current->domain->arch.hvm_domain
>> -            .params[HVM_PARAM_MEMORY_EVENT_SINGLE_STEP],
>> -            MEM_EVENT_REASON_SINGLESTEP,
>> -            gfn, 0, 1, gla);
>> +                                   
>> .params[HVM_PARAM_MEMORY_EVENT_SINGLE_STEP],
>> +                                  &req);
>>  }
>>
>>  int nhvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
>> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
>> index 7c0fc7d..8a192ef 100644
>> --- a/xen/arch/x86/mm/mem_sharing.c
>> +++ b/xen/arch/x86/mm/mem_sharing.c
>> @@ -559,7 +559,12 @@ int mem_sharing_notify_enomem(struct domain *d, 
>> unsigned long gfn,
>>  {
>>      struct vcpu *v = current;
>>      int rc;
>> -    mem_event_request_t req = { .gfn = gfn };
>> +    mem_event_request_t req = {
>> +        .reason = MEM_EVENT_REASON_MEM_SHARING,
>> +        .vcpu_id = v->vcpu_id,
>> +        .u.mem_sharing.gfn = gfn,
>> +        .u.mem_sharing.p2mt = p2m_ram_shared
>> +    };
>>
>>      if ( (rc = __mem_event_claim_slot(d,
>>                          &d->mem_event->share, allow_sleep)) < 0 )
>> @@ -571,9 +576,6 @@ int mem_sharing_notify_enomem(struct domain *d, unsigned 
>> long gfn,
>>          mem_event_vcpu_pause(v);
>>      }
>>
>> -    req.p2mt = p2m_ram_shared;
>> -    req.vcpu_id = v->vcpu_id;
>> -
>>      mem_event_put_request(d, &d->mem_event->share, &req);
>>
>>      return 0;
>> @@ -598,6 +600,12 @@ int mem_sharing_sharing_resume(struct domain *d)
>>      {
>>          struct vcpu *v;
>>
>> +        if ( rsp.version != MEM_EVENT_INTERFACE_VERSION )
>> +        {
>> +            printk(XENLOG_G_WARNING "mem_event interface version 
>> mismatch\n");
>> +            continue;
>> +        }
>> +
>>          if ( rsp.flags & MEM_EVENT_FLAG_DUMMY )
>>              continue;
>>
>> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
>> index 6a06e9f..339f8fe 100644
>> --- a/xen/arch/x86/mm/p2m.c
>> +++ b/xen/arch/x86/mm/p2m.c
>> @@ -1081,7 +1081,10 @@ int p2m_mem_paging_evict(struct domain *d, unsigned 
>> long gfn)
>>  void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn,
>>                                  p2m_type_t p2mt)
>>  {
>> -    mem_event_request_t req = { .gfn = gfn };
>> +    mem_event_request_t req = {
>> +        .reason = MEM_EVENT_REASON_MEM_PAGING,
>> +        .u.mem_paging.gfn = gfn
>> +    };
>>
>>      /* We allow no ring in this unique case, because it won't affect
>>       * correctness of the guest execution at this point.  If this is the 
>> only
>> @@ -1092,14 +1095,14 @@ void p2m_mem_paging_drop_page(struct domain *d, 
>> unsigned long gfn,
>>          return;
>>
>>      /* Send release notification to pager */
>> -    req.flags = MEM_EVENT_FLAG_DROP_PAGE;
>> +    req.u.mem_paging.flags = MEM_PAGING_DROP_PAGE;
>>
>>      /* Update stats unless the page hasn't yet been evicted */
>>      if ( p2mt != p2m_ram_paging_out )
>>          atomic_dec(&d->paged_pages);
>>      else
>>          /* Evict will fail now, tag this request for pager */
>> -        req.flags |= MEM_EVENT_FLAG_EVICT_FAIL;
>> +        req.u.mem_paging.flags |= MEM_PAGING_EVICT_FAIL;
>>
>>      mem_event_put_request(d, &d->mem_event->paging, &req);
>>  }
>> @@ -1128,7 +1131,10 @@ void p2m_mem_paging_drop_page(struct domain *d, 
>> unsigned long gfn,
>>  void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
>>  {
>>      struct vcpu *v = current;
>> -    mem_event_request_t req = { .gfn = gfn };
>> +    mem_event_request_t req = {
>> +        .reason = MEM_EVENT_REASON_MEM_PAGING,
>> +        .u.mem_paging.gfn = gfn
>> +    };
>>      p2m_type_t p2mt;
>>      p2m_access_t a;
>>      mfn_t mfn;
>> @@ -1157,7 +1163,7 @@ void p2m_mem_paging_populate(struct domain *d, 
>> unsigned long gfn)
>>      {
>>          /* Evict will fail now, tag this request for pager */
>>          if ( p2mt == p2m_ram_paging_out )
>> -            req.flags |= MEM_EVENT_FLAG_EVICT_FAIL;
>> +            req.u.mem_paging.flags |= MEM_PAGING_EVICT_FAIL;
>>
>>          p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_in, a);
>>      }
>> @@ -1178,7 +1184,7 @@ void p2m_mem_paging_populate(struct domain *d, 
>> unsigned long gfn)
>>      }
>>
>>      /* Send request to pager */
>> -    req.p2mt = p2mt;
>> +    req.u.mem_paging.p2mt = p2mt;
>>      req.vcpu_id = v->vcpu_id;
>>
>>      mem_event_put_request(d, &d->mem_event->paging, &req);
>> @@ -1300,6 +1306,12 @@ void p2m_mem_paging_resume(struct domain *d)
>>      {
>>          struct vcpu *v;
>>
>> +        if ( rsp.version != MEM_EVENT_INTERFACE_VERSION )
>> +        {
>> +            printk(XENLOG_G_WARNING "mem_event interface version 
>> mismatch\n");
>> +            continue;
>> +        }
>> +
>>          if ( rsp.flags & MEM_EVENT_FLAG_DUMMY )
>>              continue;
>>
>> @@ -1310,20 +1322,21 @@ void p2m_mem_paging_resume(struct domain *d)
>>          v = d->vcpu[rsp.vcpu_id];
>>
>>          /* Fix p2m entry if the page was not dropped */
>> -        if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
>> +        if ( !(rsp.u.mem_paging.flags & MEM_PAGING_DROP_PAGE) )
>>          {
>> -            gfn_lock(p2m, rsp.gfn, 0);
>> -            mfn = p2m->get_entry(p2m, rsp.gfn, &p2mt, &a, 0, NULL);
>> +            uint64_t gfn = rsp.u.mem_access.gfn;
>> +            gfn_lock(p2m, gfn, 0);
>> +            mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL);
>>              /* Allow only pages which were prepared properly, or pages which
>>               * were nominated but not evicted */
>>              if ( mfn_valid(mfn) && (p2mt == p2m_ram_paging_in) )
>>              {
>> -                p2m_set_entry(p2m, rsp.gfn, mfn, PAGE_ORDER_4K,
>> +                p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
>>                                paging_mode_log_dirty(d) ? p2m_ram_logdirty :
>>                                p2m_ram_rw, a);
>> -                set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn);
>> +                set_gpfn_from_mfn(mfn_x(mfn), gfn);
>>              }
>> -            gfn_unlock(p2m, rsp.gfn, 0);
>> +            gfn_unlock(p2m, gfn, 0);
>>          }
>>          /* Unpause domain */
>>          if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
>> @@ -1341,92 +1354,94 @@ static void 
>> p2m_mem_event_fill_regs(mem_event_request_t *req)
>>      /* Architecture-specific vmcs/vmcb bits */
>>      hvm_funcs.save_cpu_ctxt(curr, &ctxt);
>>
>> -    req->x86_regs.rax = regs->eax;
>> -    req->x86_regs.rcx = regs->ecx;
>> -    req->x86_regs.rdx = regs->edx;
>> -    req->x86_regs.rbx = regs->ebx;
>> -    req->x86_regs.rsp = regs->esp;
>> -    req->x86_regs.rbp = regs->ebp;
>> -    req->x86_regs.rsi = regs->esi;
>> -    req->x86_regs.rdi = regs->edi;
>> -
>> -    req->x86_regs.r8  = regs->r8;
>> -    req->x86_regs.r9  = regs->r9;
>> -    req->x86_regs.r10 = regs->r10;
>> -    req->x86_regs.r11 = regs->r11;
>> -    req->x86_regs.r12 = regs->r12;
>> -    req->x86_regs.r13 = regs->r13;
>> -    req->x86_regs.r14 = regs->r14;
>> -    req->x86_regs.r15 = regs->r15;
>> -
>> -    req->x86_regs.rflags = regs->eflags;
>> -    req->x86_regs.rip    = regs->eip;
>> -
>> -    req->x86_regs.dr7 = curr->arch.debugreg[7];
>> -    req->x86_regs.cr0 = ctxt.cr0;
>> -    req->x86_regs.cr2 = ctxt.cr2;
>> -    req->x86_regs.cr3 = ctxt.cr3;
>> -    req->x86_regs.cr4 = ctxt.cr4;
>> -
>> -    req->x86_regs.sysenter_cs = ctxt.sysenter_cs;
>> -    req->x86_regs.sysenter_esp = ctxt.sysenter_esp;
>> -    req->x86_regs.sysenter_eip = ctxt.sysenter_eip;
>> -
>> -    req->x86_regs.msr_efer = ctxt.msr_efer;
>> -    req->x86_regs.msr_star = ctxt.msr_star;
>> -    req->x86_regs.msr_lstar = ctxt.msr_lstar;
>> +    req->regs.x86.rax = regs->eax;
>> +    req->regs.x86.rcx = regs->ecx;
>> +    req->regs.x86.rdx = regs->edx;
>> +    req->regs.x86.rbx = regs->ebx;
>> +    req->regs.x86.rsp = regs->esp;
>> +    req->regs.x86.rbp = regs->ebp;
>> +    req->regs.x86.rsi = regs->esi;
>> +    req->regs.x86.rdi = regs->edi;
>> +
>> +    req->regs.x86.r8  = regs->r8;
>> +    req->regs.x86.r9  = regs->r9;
>> +    req->regs.x86.r10 = regs->r10;
>> +    req->regs.x86.r11 = regs->r11;
>> +    req->regs.x86.r12 = regs->r12;
>> +    req->regs.x86.r13 = regs->r13;
>> +    req->regs.x86.r14 = regs->r14;
>> +    req->regs.x86.r15 = regs->r15;
>> +
>> +    req->regs.x86.rflags = regs->eflags;
>> +    req->regs.x86.rip    = regs->eip;
>> +
>> +    req->regs.x86.dr7 = curr->arch.debugreg[7];
>> +    req->regs.x86.cr0 = ctxt.cr0;
>> +    req->regs.x86.cr2 = ctxt.cr2;
>> +    req->regs.x86.cr3 = ctxt.cr3;
>> +    req->regs.x86.cr4 = ctxt.cr4;
>> +
>> +    req->regs.x86.sysenter_cs = ctxt.sysenter_cs;
>> +    req->regs.x86.sysenter_esp = ctxt.sysenter_esp;
>> +    req->regs.x86.sysenter_eip = ctxt.sysenter_eip;
>> +
>> +    req->regs.x86.msr_efer = ctxt.msr_efer;
>> +    req->regs.x86.msr_star = ctxt.msr_star;
>> +    req->regs.x86.msr_lstar = ctxt.msr_lstar;
>>
>>      hvm_get_segment_register(curr, x86_seg_fs, &seg);
>> -    req->x86_regs.fs_base = seg.base;
>> +    req->regs.x86.fs_base = seg.base;
>>
>>      hvm_get_segment_register(curr, x86_seg_gs, &seg);
>> -    req->x86_regs.gs_base = seg.base;
>> +    req->regs.x86.gs_base = seg.base;
>>
>>      hvm_get_segment_register(curr, x86_seg_cs, &seg);
>> -    req->x86_regs.cs_arbytes = seg.attr.bytes;
>> +    req->regs.x86.cs_arbytes = seg.attr.bytes;
>>  }
>>
>> -void p2m_mem_event_emulate_check(struct vcpu *v, const mem_event_response_t 
>> *rsp)
>> +void p2m_mem_event_emulate_check(struct vcpu *v,
>> +                                 const mem_event_response_t *rsp)
>>  {
>>      /* Mark vcpu for skipping one instruction upon rescheduling. */
>> -    if ( rsp->flags & MEM_EVENT_FLAG_EMULATE )
>> +    if ( rsp->flags & MEM_ACCESS_EMULATE )
>>      {
>>          xenmem_access_t access;
>>          bool_t violation = 1;
>> +        const struct mem_event_mem_access *data = &rsp->u.mem_access;
>>
>> -        if ( p2m_get_mem_access(v->domain, rsp->gfn, &access) == 0 )
>> +        if ( p2m_get_mem_access(v->domain, data->gfn, &access) == 0 )
>>          {
>>              switch ( access )
>>              {
>>              case XENMEM_access_n:
>>              case XENMEM_access_n2rwx:
>>              default:
>> -                violation = rsp->access_r || rsp->access_w || rsp->access_x;
>> +                violation = data->flags & MEM_ACCESS_RWX;
>>                  break;
>>
>>              case XENMEM_access_r:
>> -                violation = rsp->access_w || rsp->access_x;
>> +                violation = data->flags & MEM_ACCESS_WX;
>>                  break;
>>
>>              case XENMEM_access_w:
>> -                violation = rsp->access_r || rsp->access_x;
>> +                violation = data->flags & MEM_ACCESS_RX;
>>                  break;
>>
>>              case XENMEM_access_x:
>> -                violation = rsp->access_r || rsp->access_w;
>> +                violation = data->flags & MEM_ACCESS_RW;
>>                  break;
>>
>>              case XENMEM_access_rx:
>>              case XENMEM_access_rx2rw:
>> -                violation = rsp->access_w;
>> +                violation = data->flags & MEM_ACCESS_W;
>>                  break;
>>
>>              case XENMEM_access_wx:
>> -                violation = rsp->access_r;
>> +                violation = data->flags & MEM_ACCESS_R;
>>                  break;
>>
>>              case XENMEM_access_rw:
>> -                violation = rsp->access_x;
>> +                violation = data->flags & MEM_ACCESS_X;
>>                  break;
>>
>>              case XENMEM_access_rwx:
>> @@ -1532,7 +1547,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long 
>> gla,
>>      if ( v->arch.mem_event.emulate_flags )
>>      {
>>          hvm_mem_event_emulate_one((v->arch.mem_event.emulate_flags &
>> -                                   MEM_EVENT_FLAG_EMULATE_NOWRITE) != 0,
>> +                                   MEM_ACCESS_EMULATE_NOWRITE) != 0,
>>                                    TRAP_invalid_op, 
>> HVM_DELIVER_NO_ERROR_CODE);
>>
>>          v->arch.mem_event.emulate_flags = 0;
>> @@ -1544,24 +1559,28 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned 
>> long gla,
>>      if ( req )
>>      {
>>          *req_ptr = req;
>> -        req->reason = MEM_EVENT_REASON_VIOLATION;
>> +        req->reason = MEM_EVENT_REASON_MEM_ACCESS;
>>
>>          /* Pause the current VCPU */
>>          if ( p2ma != p2m_access_n2rwx )
>>              req->flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
>>
>>          /* Send request to mem event */
>> -        req->gfn = gfn;
>> -        req->offset = gpa & ((1 << PAGE_SHIFT) - 1);
>> -        req->gla_valid = npfec.gla_valid;
>> -        req->gla = gla;
>> -        if ( npfec.kind == npfec_kind_with_gla )
>> -            req->fault_with_gla = 1;
>> -        else if ( npfec.kind == npfec_kind_in_gpt )
>> -            req->fault_in_gpt = 1;
>> -        req->access_r = npfec.read_access;
>> -        req->access_w = npfec.write_access;
>> -        req->access_x = npfec.insn_fetch;
>> +        req->u.mem_access.gfn = gfn;
>> +        req->u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1);
>> +        if ( npfec.gla_valid )
>> +        {
>> +            req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
>> +            req->u.mem_access.gla = gla;
>> +
>> +            if ( npfec.kind == npfec_kind_with_gla )
>> +                req->u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA;
>> +            else if ( npfec.kind == npfec_kind_in_gpt )
>> +                req->u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT;
>> +        }
>> +        req->u.mem_access.flags |= npfec.read_access    ? MEM_ACCESS_R : 0;
>> +        req->u.mem_access.flags |= npfec.write_access   ? MEM_ACCESS_W : 0;
>> +        req->u.mem_access.flags |= npfec.insn_fetch     ? MEM_ACCESS_X : 0;
>>          req->vcpu_id = v->vcpu_id;
>>
>>          p2m_mem_event_fill_regs(req);
>> diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
>> index d8aac5f..9c5b7a6 100644
>> --- a/xen/common/mem_access.c
>> +++ b/xen/common/mem_access.c
>> @@ -38,6 +38,12 @@ void mem_access_resume(struct domain *d)
>>      {
>>          struct vcpu *v;
>>
>> +        if ( rsp.version != MEM_EVENT_INTERFACE_VERSION )
>> +        {
>> +            printk(XENLOG_G_WARNING "mem_event interface version 
>> mismatch\n");
>> +            continue;
>> +        }
>> +
>>          if ( rsp.flags & MEM_EVENT_FLAG_DUMMY )
>>              continue;
>>
>> diff --git a/xen/common/mem_event.c b/xen/common/mem_event.c
>> index 7cfbe8e..8ab06ce 100644
>> --- a/xen/common/mem_event.c
>> +++ b/xen/common/mem_event.c
>> @@ -291,6 +291,8 @@ void mem_event_put_request(struct domain *d,
>>  #endif
>>      }
>>
>> +    req->version = MEM_EVENT_INTERFACE_VERSION;
>> +
>>      mem_event_ring_lock(med);
>>
>>      /* Due to the reservations, this step must succeed. */
>> diff --git a/xen/include/public/mem_event.h b/xen/include/public/mem_event.h
>> index 599f9e8..1ef65d3 100644
>> --- a/xen/include/public/mem_event.h
>> +++ b/xen/include/public/mem_event.h
>> @@ -28,39 +28,59 @@
>>  #define _XEN_PUBLIC_MEM_EVENT_H
>>
>>  #include "xen.h"
>> +
>> +#define MEM_EVENT_INTERFACE_VERSION 0x00000001
>> +
>> +#if defined(__XEN__) || defined(__XEN_TOOLS__)
>> +
>>  #include "io/ring.h"
>>
>> -/* Memory event flags */
>> +/*
>> + * Memory event flags
>> + */
>> +
>> +/*
>> + * VCPU_PAUSED in a request signals that the vCPU triggering the event has 
>> been
>> + *  paused
>> + * VCPU_PAUSED in a response signals to unpause the vCPU
>> + */
>>  #define MEM_EVENT_FLAG_VCPU_PAUSED     (1 << 0)
>> -#define MEM_EVENT_FLAG_DROP_PAGE       (1 << 1)
>> -#define MEM_EVENT_FLAG_EVICT_FAIL      (1 << 2)
>> -#define MEM_EVENT_FLAG_FOREIGN         (1 << 3)
>> -#define MEM_EVENT_FLAG_DUMMY           (1 << 4)
>> +
>>  /*
>> - * Emulate the fault-causing instruction (if set in the event response 
>> flags).
>> - * This will allow the guest to continue execution without lifting the page
>> - * access restrictions.
>> + * Flags to aid debugging mem_event
>> + */
>> +#define MEM_EVENT_FLAG_FOREIGN         (1 << 1)
>> +#define MEM_EVENT_FLAG_DUMMY           (1 << 2)
>> +
>> +/*
>> + * Reasons for the vm event request
>>   */
>> -#define MEM_EVENT_FLAG_EMULATE         (1 << 5)
>> +
>> +/* Default case */
>> +#define MEM_EVENT_REASON_UNKNOWN                 0
>> +/* Memory access violation */
>> +#define MEM_EVENT_REASON_MEM_ACCESS              1
>> +/* Memory sharing event */
>> +#define MEM_EVENT_REASON_MEM_SHARING             2
>> +/* Memory paging event */
>> +#define MEM_EVENT_REASON_MEM_PAGING              3
>> +/* CR0 was updated */
>> +#define MEM_EVENT_REASON_MOV_TO_CR0              4
>> +/* CR3 was updated */
>> +#define MEM_EVENT_REASON_MOV_TO_CR3              5
>> +/* CR4 was updated */
>> +#define MEM_EVENT_REASON_MOV_TO_CR4              6
>> +/* An MSR was updated. Does NOT honour HVMPME_onchangeonly */
>> +#define MEM_EVENT_REASON_MOV_TO_MSR              7
>> +/* Debug operation executed (e.g. int3) */
>> +#define MEM_EVENT_REASON_SOFTWARE_BREAKPOINT     8
>> +/* Single-step (e.g. MTF) */
>> +#define MEM_EVENT_REASON_SINGLESTEP              9
>> +
>>  /*
>> - * Same as MEM_EVENT_FLAG_EMULATE, but with write operations or operations
>> - * potentially having side effects (like memory mapped or port I/O) 
>> disabled.
>> + * Using a custom struct (not hvm_hw_cpu) so as to not fill
>> + * the mem_event ring buffer too quickly.
>>   */
>> -#define MEM_EVENT_FLAG_EMULATE_NOWRITE (1 << 6)
>> -
>> -/* Reasons for the memory event request */
>> -#define MEM_EVENT_REASON_UNKNOWN     0    /* typical reason */
>> -#define MEM_EVENT_REASON_VIOLATION   1    /* access violation, GFN is 
>> address */
>> -#define MEM_EVENT_REASON_CR0         2    /* CR0 was hit: gfn is new CR0 
>> value, gla is previous */
>> -#define MEM_EVENT_REASON_CR3         3    /* CR3 was hit: gfn is new CR3 
>> value, gla is previous */
>> -#define MEM_EVENT_REASON_CR4         4    /* CR4 was hit: gfn is new CR4 
>> value, gla is previous */
>> -#define MEM_EVENT_REASON_INT3        5    /* int3 was hit: gla/gfn are RIP 
>> */
>> -#define MEM_EVENT_REASON_SINGLESTEP  6    /* single step was invoked: 
>> gla/gfn are RIP */
>> -#define MEM_EVENT_REASON_MSR         7    /* MSR was hit: gfn is MSR value, 
>> gla is MSR address;
>> -                                             does NOT honour 
>> HVMPME_onchangeonly */
>> -
>> -/* Using a custom struct (not hvm_hw_cpu) so as to not fill
>> - * the mem_event ring buffer too quickly. */
>>  struct mem_event_regs_x86 {
>>      uint64_t rax;
>>      uint64_t rcx;
>> @@ -97,31 +117,102 @@ struct mem_event_regs_x86 {
>>      uint32_t _pad;
>>  };
>>
>> -typedef struct mem_event_st {
>> -    uint32_t flags;
>> -    uint32_t vcpu_id;
>> +/*
>> + * mem_access flag definitions
>> + *
>> + * These flags are set only as part of a mem_event request.
>> + *
>> + * R/W/X: Defines the type of violation that has triggered the event
>> + *        Multiple types can be set in a single violation!
>> + * GLA_VALID: If the gla field holds a guest VA associated with the event
>> + * FAULT_WITH_GLA: If the violation was triggered by accessing gla
>> + * FAULT_IN_GPT: If the violation was triggered during translating gla
>> + */
>> +#define MEM_ACCESS_R                    (1 << 0)
>> +#define MEM_ACCESS_W                    (1 << 1)
>> +#define MEM_ACCESS_X                    (1 << 2)
>> +#define MEM_ACCESS_RWX                  (MEM_ACCESS_R | MEM_ACCESS_W | 
>> MEM_ACCESS_X)
>> +#define MEM_ACCESS_RW                   (MEM_ACCESS_R | MEM_ACCESS_W)
>> +#define MEM_ACCESS_RX                   (MEM_ACCESS_R | MEM_ACCESS_X)
>> +#define MEM_ACCESS_WX                   (MEM_ACCESS_W | MEM_ACCESS_X)
>> +#define MEM_ACCESS_GLA_VALID            (1 << 3)
>> +#define MEM_ACCESS_FAULT_WITH_GLA       (1 << 4)
>> +#define MEM_ACCESS_FAULT_IN_GPT         (1 << 5)
>> +/*
>> + * The following flags can be set in the response.
>> + *
>> + * Emulate the fault-causing instruction (if set in the event response 
>> flags).
>> + * This will allow the guest to continue execution without lifting the page
>> + * access restrictions.
>> + */
>> +#define MEM_ACCESS_EMULATE              (1 << 6)
>> +/*
>> + * Same as MEM_ACCESS_EMULATE, but with write operations or operations
>> + * potentially having side effects (like memory mapped or port I/O) 
>> disabled.
>> + */
>> +#define MEM_ACCESS_EMULATE_NOWRITE      (1 << 7)
>>
>> -    uint64_t gfn;
>> +struct mem_event_mem_access {
>> +    uint32_t gfn;
>> +    uint32_t flags; /* MEM_ACCESS_* */
>>      uint64_t offset;
>> -    uint64_t gla; /* if gla_valid */
>> +    uint64_t gla;   /* if flags has MEM_ACCESS_GLA_VALID set */
>> +};
>> +
>> +struct mem_event_mov_to_cr {
>> +    uint64_t new_value;
>> +    uint64_t old_value;
>> +};
>>
>> +struct mem_event_debug {
>> +    uint32_t gfn;
>> +    uint32_t _pad;
>> +};
>> +
>> +struct mem_event_mov_to_msr {
>> +    uint64_t msr;
>> +    uint64_t value;
>> +};
>> +
>> +#define MEM_PAGING_DROP_PAGE       (1 << 0)
>> +#define MEM_PAGING_EVICT_FAIL      (1 << 1)
>> +struct mem_event_paging {
>> +    uint32_t gfn;
>> +    uint32_t p2mt;
>> +    uint32_t flags;
>> +    uint32_t _pad;
>> +};
>> +
>> +struct mem_event_sharing {
>> +    uint32_t gfn;
>>      uint32_t p2mt;
>> +};
>> +
>> +typedef struct mem_event_st {
>> +    uint32_t version;   /* MEM_EVENT_INTERFACE_VERSION */
>> +    uint32_t flags;     /* MEM_EVENT_FLAG_* */
>> +    uint32_t reason;    /* MEM_EVENT_REASON_* */
>> +    uint32_t vcpu_id;
>>
>> -    uint16_t access_r:1;
>> -    uint16_t access_w:1;
>> -    uint16_t access_x:1;
>> -    uint16_t gla_valid:1;
>> -    uint16_t fault_with_gla:1;
>> -    uint16_t fault_in_gpt:1;
>> -    uint16_t available:10;
>> +    union {
>> +        struct mem_event_paging                mem_paging;
>> +        struct mem_event_sharing               mem_sharing;
>> +        struct mem_event_mem_access            mem_access;
>> +        struct mem_event_mov_to_cr             mov_to_cr;
>> +        struct mem_event_mov_to_msr            mov_to_msr;
>> +        struct mem_event_debug                 software_breakpoint;
>> +        struct mem_event_debug                 singlestep;
>> +    } u;
>>
>> -    uint16_t reason;
>> -    struct mem_event_regs_x86 x86_regs;
>> +    union {
>> +        struct mem_event_regs_x86 x86;
>> +    } regs;
>>  } mem_event_request_t, mem_event_response_t;
>>
>>  DEFINE_RING_TYPES(mem_event, mem_event_request_t, mem_event_response_t);
>>
>> -#endif
>> +#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
>> +#endif /* _XEN_PUBLIC_MEM_EVENT_H */
>>
>>  /*
>>   * Local variables:
>> diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
>> index 595f953..2ef1728 100644
>> --- a/xen/include/public/memory.h
>> +++ b/xen/include/public/memory.h
>> @@ -380,7 +380,8 @@ struct xen_mem_event_op {
>>      /* PAGING_PREP IN: buffer to immediately fill page in */
>>      uint64_aligned_t    buffer;
>>      /* Other OPs */
>> -    uint64_aligned_t    gfn;           /* IN:  gfn of page being operated 
>> on */
>> +    uint32_t    gfn;           /* IN:  gfn of page being operated on */
>> +    uint32_t    _pad;
>>  };
>>  typedef struct xen_mem_event_op xen_mem_event_op_t;
>>  DEFINE_XEN_GUEST_HANDLE(xen_mem_event_op_t);
>> @@ -469,21 +470,21 @@ struct xen_mem_sharing_op {
>>      union {
>>          struct mem_sharing_op_nominate {  /* OP_NOMINATE_xxx           */
>>              union {
>> -                uint64_aligned_t gfn;     /* IN: gfn to nominate       */
>> +                uint32_t      gfn;        /* IN: gfn to nominate       */
>>                  uint32_t      grant_ref;  /* IN: grant ref to nominate */
>>              } u;
>>              uint64_aligned_t  handle;     /* OUT: the handle           */
>>          } nominate;
>>          struct mem_sharing_op_share {     /* OP_SHARE/ADD_PHYSMAP */
>> -            uint64_aligned_t source_gfn;    /* IN: the gfn of the source 
>> page */
>> +            uint32_t source_gfn;          /* IN: the gfn of the source page 
>> */
>> +            uint32_t client_gfn;          /* IN: the client gfn */
>>              uint64_aligned_t source_handle; /* IN: handle to the source 
>> page */
>> -            uint64_aligned_t client_gfn;    /* IN: the client gfn */
>>              uint64_aligned_t client_handle; /* IN: handle to the client 
>> page */
>>              domid_t  client_domain; /* IN: the client domain id */
>>          } share;
>>          struct mem_sharing_op_debug {     /* OP_DEBUG_xxx */
>>              union {
>> -                uint64_aligned_t gfn;      /* IN: gfn to debug          */
>> +                uint32_t gfn;              /* IN: gfn to debug          */
>>                  uint64_aligned_t mfn;      /* IN: mfn to debug          */
>>                  uint32_t gref;     /* IN: gref to debug         */
>>              } u;
>

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.