[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] query the page type of a hvm page from within a hvm guest



>>> On 02.05.11 at 18:27, Olaf Hering <olaf@xxxxxxxxx> wrote:
> So what about this version then?

Looks good to me (you may want to replace a few tabs with spaces,
though in the hypervisor portion of the patch).

Jan

> ---
>  unmodified_drivers/linux-2.6/platform-pci/platform-pci.c |   36 
> +++++++++++++++
>  xen/arch/ia64/vmx/vmx_hypercall.c                        |    1
>  xen/arch/x86/hvm/hvm.c                                   |   27 +++++++++++
>  xen/include/public/hvm/hvm_op.h                          |   25 ++++++++--
>  4 files changed, 84 insertions(+), 5 deletions(-)
> 
> diff -r 2f08c89b767d 
> unmodified_drivers/linux-2.6/platform-pci/platform-pci.c
> --- a/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c        Wed Apr 
> 20 
> 17:13:08 2011 +0100
> +++ b/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c        Mon May 
> 02 
> 17:42:57 2011 +0200
> @@ -349,6 +349,32 @@ static int check_platform_magic(struct d
>       return -ENODEV;
>  }
>  
> +#ifdef HAVE_OLDMEM_PFN_IS_RAM
> +static int xen_oldmem_pfn_is_ram(unsigned long pfn)
> +{
> +     struct xen_hvm_get_mem_type a;
> +     int ret;
> +
> +     a.domid = DOMID_SELF;
> +     a.pfn = pfn;
> +     if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a))
> +             return -ENXIO;
> +
> +     switch (a.mem_type) {
> +             case HVMMEM_mmio_dm:
> +                     ret = 0;
> +                     break;
> +             case HVMMEM_ram_rw:
> +             case HVMMEM_ram_ro:
> +             default:
> +                     ret = 1;
> +                     break;
> +     }
> +
> +     return ret;
> +}
> +#endif
> +
>  static int __devinit platform_pci_init(struct pci_dev *pdev,
>                                      const struct pci_device_id *ent)
>  {
> @@ -417,6 +443,9 @@ static int __devinit platform_pci_init(s
>       if ((ret = xen_panic_handler_init()))
>               goto out;
>  
> +#ifdef HAVE_OLDMEM_PFN_IS_RAM
> +     register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
> +#endif
>   out:
>       if (ret) {
>               pci_release_region(pdev, 0);
> diff -r 2f08c89b767d xen/arch/ia64/vmx/vmx_hypercall.c
> --- a/xen/arch/ia64/vmx/vmx_hypercall.c       Wed Apr 20 17:13:08 2011 +0100
> +++ b/xen/arch/ia64/vmx/vmx_hypercall.c       Mon May 02 17:42:57 2011 +0200
> @@ -217,6 +217,7 @@ do_hvm_op(unsigned long op, XEN_GUEST_HA
>          break;
>      }
>  
> +    case HVMOP_get_mem_type:
>      case HVMOP_set_mem_type:
>      case HVMOP_set_mem_access:
>      case HVMOP_get_mem_access:
> diff -r 2f08c89b767d xen/arch/x86/hvm/hvm.c
> --- a/xen/arch/x86/hvm/hvm.c  Wed Apr 20 17:13:08 2011 +0100
> +++ b/xen/arch/x86/hvm/hvm.c  Mon May 02 17:42:57 2011 +0200
> @@ -3676,6 +3676,37 @@ long do_hvm_op(unsigned long op, XEN_GUE
>          break;
>      }
>  
> +    case HVMOP_get_mem_type:
> +    {
> +        struct xen_hvm_get_mem_type a;
> +        struct domain *d;
> +        p2m_type_t t;
> +
> +        if ( copy_from_guest(&a, arg, 1) )
> +            return -EFAULT;
> +
> +        rc = rcu_lock_remote_target_domain_by_id(a.domid, &d);
> +        if ( rc != 0 )
> +            return rc;
> +
> +        rc = -EINVAL;
> +        if ( is_hvm_domain(d) )
> +        {
> +            gfn_to_mfn_unshare(p2m_get_hostp2m(d), a.pfn, &t, 0);
> +            if ( p2m_is_mmio(t) )
> +                a.mem_type =  HVMMEM_mmio_dm;
> +         else if ( p2m_is_readonly(t) )
> +                a.mem_type =  HVMMEM_ram_ro;
> +         else if ( p2m_is_ram(t) )
> +                a.mem_type =  HVMMEM_ram_rw;
> +            else
> +                a.mem_type =  HVMMEM_mmio_dm;
> +            rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
> +        }
> +        rcu_unlock_domain(d);
> +        break;
> +    }
> +
>      case HVMOP_set_mem_type:
>      {
>          struct xen_hvm_set_mem_type a;
> diff -r 2f08c89b767d xen/include/public/hvm/hvm_op.h
> --- a/xen/include/public/hvm/hvm_op.h Wed Apr 20 17:13:08 2011 +0100
> +++ b/xen/include/public/hvm/hvm_op.h Mon May 02 17:42:57 2011 +0200
> @@ -76,6 +76,12 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_
>  /* Flushes all VCPU TLBs: @arg must be NULL. */
>  #define HVMOP_flush_tlbs          5
>  
> +typedef enum {
> +    HVMMEM_ram_rw,             /* Normal read/write guest RAM */
> +    HVMMEM_ram_ro,             /* Read-only; writes are discarded */
> +    HVMMEM_mmio_dm,            /* Reads and write go to the device model */
> +} hvmmem_type_t;
> +
>  /* Following tools-only interfaces may change in future. */
>  #if defined(__XEN__) || defined(__XEN_TOOLS__)
>  
> @@ -109,11 +115,6 @@ typedef struct xen_hvm_modified_memory x
>  DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
>  
>  #define HVMOP_set_mem_type    8
> -typedef enum {
> -    HVMMEM_ram_rw,             /* Normal read/write guest RAM */
> -    HVMMEM_ram_ro,             /* Read-only; writes are discarded */
> -    HVMMEM_mmio_dm,            /* Reads and write go to the device model */
> -} hvmmem_type_t;
>  /* Notify that a region of memory is to be treated in a specific way. */
>  struct xen_hvm_set_mem_type {
>      /* Domain to be updated. */
> @@ -223,6 +224,20 @@ struct xen_hvm_inject_trap {
>  typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t;
>  DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);
>  
> +#define HVMOP_get_mem_type    15
> +/* Return hvmmem_type_t for the specified pfn. */
> +struct xen_hvm_get_mem_type {
> +    /* Domain to be queried. */
> +    domid_t domid;
> +    /* OUT variable. */
> +    uint8_t mem_type;
> +    /* IN variable. */
> +    uint64_t pfn;
> +};
> +typedef struct xen_hvm_get_mem_type xen_hvm_get_mem_type_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t);
> +
> +
>  #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
>  
>  #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.