[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v9 08/13] Add IOREQ_TYPE_VMWARE_PORT



> -----Original Message-----
> From: xen-devel-bounces@xxxxxxxxxxxxx [mailto:xen-devel-
> bounces@xxxxxxxxxxxxx] On Behalf Of Don Slutz
> Sent: 16 February 2015 23:05
> To: xen-devel@xxxxxxxxxxxxx
> Cc: Kevin Tian; Keir (Xen.org); Ian Campbell; Stefano Stabellini; Jun 
> Nakajima;
> Eddie Dong; Ian Jackson; Don Slutz; Tim (Xen.org); George Dunlap; Aravind
> Gopalakrishnan; Jan Beulich; Andrew Cooper; Boris Ostrovsky; Suravee
> Suthikulpanit
> Subject: [Xen-devel] [PATCH v9 08/13] Add IOREQ_TYPE_VMWARE_PORT
> 
> This adds synchronization of the 6 vcpu registers (only 32bits of
> them) that vmport.c needs between Xen and QEMU.
> 
> This is to avoid a 2nd and 3rd exchange between QEMU and Xen to
> fetch and put these 6 vcpu registers used by the code in vmport.c
> and vmmouse.c
> 
> In the tools, enable usage of QEMU's vmport code.
> 
> The currently most useful VMware port support that QEMU has is the
> VMware mouse support.  Xorg included a VMware mouse support that
> uses absolute mode.  This make using a mouse in X11 much nicer.
> 
> Signed-off-by: Don Slutz <dslutz@xxxxxxxxxxx>
> ---
> v9:
>   New code was presented as an RFC before this.
> 
>   Paul Durrant sugested I add support for other IOREQ types
>   to HVMOP_map_io_range_to_ioreq_server.
>     I have done this.
> 
>  tools/libxc/xc_hvm_build_x86.c         |   5 +-
>  tools/libxl/libxl_dm.c                 |   4 +
>  xen/arch/x86/hvm/emulate.c             |  48 ++++++++++
>  xen/arch/x86/hvm/hvm.c                 | 159
> ++++++++++++++++++++++++++++++---
>  xen/arch/x86/hvm/io.c                  |  15 ++++
>  xen/arch/x86/hvm/vmware/vmport.c       |   4 +-
>  xen/arch/x86/x86_emulate/x86_emulate.h |   2 +
>  xen/include/asm-x86/hvm/domain.h       |   3 +-
>  xen/include/asm-x86/hvm/hvm.h          |   2 +
>  xen/include/public/hvm/hvm_op.h        |   6 ++
>  xen/include/public/hvm/ioreq.h         |  17 ++++
>  xen/include/public/hvm/params.h        |   4 +-
>  12 files changed, 251 insertions(+), 18 deletions(-)
> 
> diff --git a/tools/libxc/xc_hvm_build_x86.c b/tools/libxc/xc_hvm_build_x86.c
> index c81a25b..e338667 100644
> --- a/tools/libxc/xc_hvm_build_x86.c
> +++ b/tools/libxc/xc_hvm_build_x86.c
> @@ -46,7 +46,8 @@
>  #define SPECIALPAGE_IOREQ    5
>  #define SPECIALPAGE_IDENT_PT 6
>  #define SPECIALPAGE_CONSOLE  7
> -#define NR_SPECIAL_PAGES     8
> +#define SPECIALPAGE_VMPORT_REGS 8
> +#define NR_SPECIAL_PAGES     9
>  #define special_pfn(x) (0xff000u - NR_SPECIAL_PAGES + (x))
> 
>  #define NR_IOREQ_SERVER_PAGES 8
> @@ -493,6 +494,8 @@ static int setup_guest(xc_interface *xch,
>                       special_pfn(SPECIALPAGE_BUFIOREQ));
>      xc_hvm_param_set(xch, dom, HVM_PARAM_IOREQ_PFN,
>                       special_pfn(SPECIALPAGE_IOREQ));
> +    xc_hvm_param_set(xch, dom, HVM_PARAM_VMPORT_REGS_PFN,
> +                     special_pfn(SPECIALPAGE_VMPORT_REGS));
>      xc_hvm_param_set(xch, dom, HVM_PARAM_CONSOLE_PFN,
>                       special_pfn(SPECIALPAGE_CONSOLE));
>      xc_hvm_param_set(xch, dom, HVM_PARAM_PAGING_RING_PFN,
> diff --git a/tools/libxl/libxl_dm.c b/tools/libxl/libxl_dm.c
> index c27f9a4..620013c 100644
> --- a/tools/libxl/libxl_dm.c
> +++ b/tools/libxl/libxl_dm.c
> @@ -765,6 +765,10 @@ static char **
> libxl__build_device_model_args_new(libxl__gc *gc,
>                                              machinearg, max_ram_below_4g);
>              }
>          }
> +        if (libxl_defbool_val(c_info->vmware_port)) {
> +            machinearg = libxl__sprintf(gc, "%s,vmport=on",
> +                                        machinearg);
> +        }
>          flexarray_append(dm_args, machinearg);
>          for (i = 0; b_info->extra_hvm && b_info->extra_hvm[i] != NULL; i++)
>              flexarray_append(dm_args, b_info->extra_hvm[i]);
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> index a6a6a5c..799894b 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -240,6 +240,48 @@ static int hvmemul_do_io(
>          }
>          break;
>      }
> +    case X86EMUL_VMPORT_SEND:
> +    {
> +        struct hvm_ioreq_server *s;
> +        vmware_regs_t *vr;
> +
> +        BUILD_BUG_ON(sizeof(ioreq_t) < sizeof(vmware_regs_t));
> +
> +        p.type = IOREQ_TYPE_VMWARE_PORT;
> +        s = hvm_select_ioreq_server(curr->domain, &p);
> +        vr = get_vmport_regs_any(s, curr);
> +
> +        /*
> +         * If there is no suitable backing DM, just ignore accesses.  If
> +         * we do not have access to registers to pass to QEMU, just
> +         * ignore access.
> +         */
> +        if ( !s || !vr )
> +        {
> +            hvm_complete_assist_req(&p);
> +            rc = X86EMUL_OKAY;
> +            vio->io_state = HVMIO_none;
> +        }
> +        else
> +        {
> +            struct cpu_user_regs *regs = guest_cpu_user_regs();
> +
> +            p.data = regs->rax;
> +            vr->ebx = regs->_ebx;
> +            vr->ecx = regs->_ecx;
> +            vr->edx = regs->_edx;
> +            vr->esi = regs->_esi;
> +            vr->edi = regs->_edi;
> +
> +            vio->io_state = HVMIO_handle_pio_awaiting_completion;
> +            if ( !hvm_send_assist_req(s, &p) )
> +            {
> +                rc = X86EMUL_RETRY;
> +                vio->io_state = HVMIO_none;
> +            }
> +        }
> +        break;
> +    }
>      default:
>          BUG();
>      }
> @@ -248,6 +290,12 @@ static int hvmemul_do_io(
>      {
>          if ( ram_page )
>              put_page(ram_page);
> +        /*
> +         * If X86EMUL_VMPORT_SEND, completion in hvm_io_assist()
> +         * with no re-emulation required
> +         */
> +        if ( rc == X86EMUL_VMPORT_SEND )
> +            rc = X86EMUL_OKAY;
>          return rc;
>      }
> 
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 812f880..5b11091 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -392,6 +392,47 @@ static ioreq_t *get_ioreq(struct hvm_ioreq_server
> *s, struct vcpu *v)
>      return &p->vcpu_ioreq[v->vcpu_id];
>  }
> 
> +static vmware_regs_t *get_vmport_regs_one(struct hvm_ioreq_server *s,
> +                                          struct vcpu *v)
> +{
> +    struct hvm_ioreq_vcpu *sv;
> +
> +    list_for_each_entry ( sv,
> +                          &s->ioreq_vcpu_list,
> +                          list_entry )
> +    {
> +        if ( sv->vcpu == v )
> +        {
> +            shared_vmport_iopage_t *p = s->vmport_ioreq.va;
> +            if ( !p )
> +                return NULL;
> +            return &p->vcpu_vmport_regs[v->vcpu_id];
> +        }
> +    }
> +    return NULL;
> +}
> +
> +vmware_regs_t *get_vmport_regs_any(struct hvm_ioreq_server *s, struct
> vcpu *v)
> +{
> +    struct domain *d = v->domain;
> +
> +    ASSERT((v == current) || !vcpu_runnable(v));
> +
> +    if ( s )
> +        return get_vmport_regs_one(s, v);
> +
> +    list_for_each_entry ( s,
> +                          &d->arch.hvm_domain.ioreq_server.list,
> +                          list_entry )
> +    {
> +        vmware_regs_t *ret = get_vmport_regs_one(s, v);
> +
> +        if ( ret )
> +            return ret;
> +    }
> +    return NULL;
> +}
> +
>  bool_t hvm_io_pending(struct vcpu *v)
>  {
>      struct domain *d = v->domain;
> @@ -501,22 +542,50 @@ static void hvm_free_ioreq_gmfn(struct domain
> *d, unsigned long gmfn)
>      clear_bit(i, &d->arch.hvm_domain.ioreq_gmfn.mask);
>  }
> 
> -static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, bool_t
> buf)
> +static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, int buf)
>  {
> -    struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
> +    struct hvm_ioreq_page *iorp = NULL;
> +
> +    switch ( buf )
> +    {
> +    case 0:
> +        iorp = &s->ioreq;
> +        break;
> +    case 1:
> +        iorp = &s->bufioreq;
> +        break;
> +    case 2:
> +        iorp = &s->vmport_ioreq;
> +        break;

Now that buf is no longer a bool, could we have #defined values for the types 
rather than magic numbers?

> +    }
> +    ASSERT(iorp);
> 
>      destroy_ring_for_helper(&iorp->va, iorp->page);
>  }
> 
>  static int hvm_map_ioreq_page(
> -    struct hvm_ioreq_server *s, bool_t buf, unsigned long gmfn)
> +    struct hvm_ioreq_server *s, int buf, unsigned long gmfn)
>  {
>      struct domain *d = s->domain;
> -    struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
> +    struct hvm_ioreq_page *iorp = NULL;
>      struct page_info *page;
>      void *va;
>      int rc;
> 
> +    switch ( buf )
> +    {
> +    case 0:
> +        iorp = &s->ioreq;
> +        break;
> +    case 1:
> +        iorp = &s->bufioreq;
> +        break;
> +    case 2:
> +        iorp = &s->vmport_ioreq;
> +        break;
> +    }
> +    ASSERT(iorp);
> +
>      if ( (rc = prepare_ring_for_helper(d, gmfn, &page, &va)) )
>          return rc;
> 
> @@ -736,6 +805,7 @@ static int hvm_ioreq_server_map_pages(struct
> hvm_ioreq_server *s,
>  {
>      struct domain *d = s->domain;
>      unsigned long ioreq_pfn, bufioreq_pfn;
> +    unsigned long vmport_ioreq_pfn = d-
> >arch.hvm_domain.params[HVM_PARAM_VMPORT_REGS_PFN];
>      int rc;
> 
>      if ( is_default )
> @@ -774,8 +844,16 @@ static int hvm_ioreq_server_map_pages(struct
> hvm_ioreq_server *s,
>              goto fail4;
>      }
> 
> +    rc = hvm_map_ioreq_page(s, 2, vmport_ioreq_pfn);
> +    if ( rc )
> +        goto fail5;
> +
>      return 0;
> 
> +fail5:
> +    if ( handle_bufioreq )
> +        hvm_unmap_ioreq_page(s, 1);
> +
>  fail4:
>      hvm_unmap_ioreq_page(s, 0);
> 
> @@ -796,6 +874,10 @@ static void hvm_ioreq_server_unmap_pages(struct
> hvm_ioreq_server *s,
>  {
>      struct domain *d = s->domain;
>      bool_t handle_bufioreq = ( s->bufioreq.va != NULL );
> +    bool_t handle_vmport_ioreq = ( s->vmport_ioreq.va != NULL );
> +
> +    if ( handle_vmport_ioreq )
> +        hvm_unmap_ioreq_page(s, 2);
> 
>      if ( handle_bufioreq )
>          hvm_unmap_ioreq_page(s, 1);
> @@ -835,12 +917,42 @@ static int hvm_ioreq_server_alloc_rangesets(struct
> hvm_ioreq_server *s,
>      for ( i = 0; i < NR_IO_RANGE_TYPES; i++ )
>      {
>          char *name;
> +        char *type_name = NULL;
> +        unsigned int limit;
> 
> -        rc = asprintf(&name, "ioreq_server %d %s", s->id,
> -                      (i == HVMOP_IO_RANGE_PORT) ? "port" :
> -                      (i == HVMOP_IO_RANGE_MEMORY) ? "memory" :
> -                      (i == HVMOP_IO_RANGE_PCI) ? "pci" :
> -                      "");
> +        switch ( i )
> +        {
> +        case HVMOP_IO_RANGE_PORT:
> +            type_name = "port";
> +            limit = MAX_NR_IO_RANGES;
> +            break;
> +        case HVMOP_IO_RANGE_MEMORY:
> +            type_name = "memory";
> +            limit = MAX_NR_IO_RANGES;
> +            break;
> +        case HVMOP_IO_RANGE_PCI:
> +            type_name = "pci";
> +            limit = MAX_NR_IO_RANGES;
> +            break;
> +        case HVMOP_IO_RANGE_VMWARE_PORT:
> +            type_name = "VMware port";
> +            limit = 1;
> +            break;
> +        case HVMOP_IO_RANGE_TIMEOFFSET:
> +            type_name = "timeoffset";
> +            limit = 1;
> +            break;
> +        case HVMOP_IO_RANGE_INVALIDATE:
> +            type_name = "invalidate";
> +            limit = 1;
> +            break;
> +        default:
> +            break;

Thanks for adding the extra types for timeoffset and invalidate, although I 
don't think the invalidate type is needed. This ioreq is actually broadcast, so 
there is no selection required.

  Paul

> +        }
> +        if ( !type_name )
> +            continue;
> +
> +        rc = asprintf(&name, "ioreq_server %d %s", s->id, type_name);
>          if ( rc )
>              goto fail;
> 
> @@ -853,7 +965,12 @@ static int hvm_ioreq_server_alloc_rangesets(struct
> hvm_ioreq_server *s,
>          if ( !s->range[i] )
>              goto fail;
> 
> -        rangeset_limit(s->range[i], MAX_NR_IO_RANGES);
> +        rangeset_limit(s->range[i], limit);
> +
> +        /* VMware port */
> +        if ( i == HVMOP_IO_RANGE_VMWARE_PORT &&
> +            s->domain->arch.hvm_domain.is_vmware_port_enabled )
> +            rc = rangeset_add_range(s->range[i], 1, 1);
>      }
> 
>   done:
> @@ -1155,6 +1272,9 @@ static int
> hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
>              case HVMOP_IO_RANGE_PORT:
>              case HVMOP_IO_RANGE_MEMORY:
>              case HVMOP_IO_RANGE_PCI:
> +            case HVMOP_IO_RANGE_VMWARE_PORT:
> +            case HVMOP_IO_RANGE_TIMEOFFSET:
> +            case HVMOP_IO_RANGE_INVALIDATE:
>                  r = s->range[type];
>                  break;
> 
> @@ -1206,6 +1326,9 @@ static int
> hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
>              case HVMOP_IO_RANGE_PORT:
>              case HVMOP_IO_RANGE_MEMORY:
>              case HVMOP_IO_RANGE_PCI:
> +            case HVMOP_IO_RANGE_VMWARE_PORT:
> +            case HVMOP_IO_RANGE_TIMEOFFSET:
> +            case HVMOP_IO_RANGE_INVALIDATE:
>                  r = s->range[type];
>                  break;
> 
> @@ -2429,9 +2552,6 @@ struct hvm_ioreq_server
> *hvm_select_ioreq_server(struct domain *d,
>      if ( list_empty(&d->arch.hvm_domain.ioreq_server.list) )
>          return NULL;
> 
> -    if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO )
> -        return d->arch.hvm_domain.default_ioreq_server;
> -
>      cf8 = d->arch.hvm_domain.pci_cf8;
> 
>      if ( p->type == IOREQ_TYPE_PIO &&
> @@ -2474,7 +2594,12 @@ struct hvm_ioreq_server
> *hvm_select_ioreq_server(struct domain *d,
>          BUILD_BUG_ON(IOREQ_TYPE_PIO != HVMOP_IO_RANGE_PORT);
>          BUILD_BUG_ON(IOREQ_TYPE_COPY != HVMOP_IO_RANGE_MEMORY);
>          BUILD_BUG_ON(IOREQ_TYPE_PCI_CONFIG !=
> HVMOP_IO_RANGE_PCI);
> +        BUILD_BUG_ON(IOREQ_TYPE_VMWARE_PORT !=
> HVMOP_IO_RANGE_VMWARE_PORT);
> +        BUILD_BUG_ON(IOREQ_TYPE_TIMEOFFSET !=
> HVMOP_IO_RANGE_TIMEOFFSET);
> +        BUILD_BUG_ON(IOREQ_TYPE_INVALIDATE !=
> HVMOP_IO_RANGE_INVALIDATE);
>          r = s->range[type];
> +        if ( !r )
> +            continue;
> 
>          switch ( type )
>          {
> @@ -2501,6 +2626,13 @@ struct hvm_ioreq_server
> *hvm_select_ioreq_server(struct domain *d,
>              }
> 
>              break;
> +        case IOREQ_TYPE_VMWARE_PORT:
> +        case IOREQ_TYPE_TIMEOFFSET:
> +        case IOREQ_TYPE_INVALIDATE:
> +            if ( rangeset_contains_singleton(r, 1) )
> +                return s;
> +
> +            break;
>          }
>      }
> 
> @@ -2662,6 +2794,7 @@ void hvm_complete_assist_req(ioreq_t *p)
>      case IOREQ_TYPE_PCI_CONFIG:
>          ASSERT_UNREACHABLE();
>          break;
> +    case IOREQ_TYPE_VMWARE_PORT:
>      case IOREQ_TYPE_COPY:
>      case IOREQ_TYPE_PIO:
>          if ( p->dir == IOREQ_READ )
> diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
> index 68fb890..2d06956 100644
> --- a/xen/arch/x86/hvm/io.c
> +++ b/xen/arch/x86/hvm/io.c
> @@ -192,6 +192,21 @@ void hvm_io_assist(ioreq_t *p)
>          (void)handle_mmio();
>          break;
>      case HVMIO_handle_pio_awaiting_completion:
> +        if ( p->type == IOREQ_TYPE_VMWARE_PORT )
> +        {
> +            struct cpu_user_regs *regs = guest_cpu_user_regs();
> +            vmware_regs_t *vr = get_vmport_regs_any(NULL, curr);
> +
> +            if ( vr )
> +            {
> +                /* Only change the 32bit part of the register */
> +                regs->_ebx = vr->ebx;
> +                regs->_ecx = vr->ecx;
> +                regs->_edx = vr->edx;
> +                regs->_esi = vr->esi;
> +                regs->_edi = vr->edi;
> +            }
> +        }
>          if ( vio->io_size == 4 ) /* Needs zero extension. */
>              guest_cpu_user_regs()->rax = (uint32_t)p->data;
>          else
> diff --git a/xen/arch/x86/hvm/vmware/vmport.c
> b/xen/arch/x86/hvm/vmware/vmport.c
> index 2e61682..131bafd 100644
> --- a/xen/arch/x86/hvm/vmware/vmport.c
> +++ b/xen/arch/x86/hvm/vmware/vmport.c
> @@ -130,8 +130,8 @@ int vmport_ioport(int dir, uint32_t port, uint32_t
> bytes, uint32_t *val)
>              regs->_ecx = 1000000;
>              break;
>          default:
> -            new_eax = ~0u;
> -            break;
> +            /* Let backing DM handle */
> +            return X86EMUL_VMPORT_SEND;
>          }
>          if ( dir == IOREQ_READ )
>              *val = new_eax;
> diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h
> b/xen/arch/x86/x86_emulate/x86_emulate.h
> index e8e4413..6bf0176 100644
> --- a/xen/arch/x86/x86_emulate/x86_emulate.h
> +++ b/xen/arch/x86/x86_emulate/x86_emulate.h
> @@ -112,6 +112,8 @@ struct __packed segment_register {
>  #define X86EMUL_RETRY          3
>   /* (cmpxchg accessor): CMPXCHG failed. Maps to X86EMUL_RETRY in caller.
> */
>  #define X86EMUL_CMPXCHG_FAILED 3
> + /* Send part of registers also to DM. */
> +#define X86EMUL_VMPORT_SEND    4
> 
>  /* FPU sub-types which may be requested via ->get_fpu(). */
>  enum x86_emulate_fpu_type {
> diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-
> x86/hvm/domain.h
> index ab0e4cf..ad6ec79 100644
> --- a/xen/include/asm-x86/hvm/domain.h
> +++ b/xen/include/asm-x86/hvm/domain.h
> @@ -48,7 +48,7 @@ struct hvm_ioreq_vcpu {
>      evtchn_port_t    ioreq_evtchn;
>  };
> 
> -#define NR_IO_RANGE_TYPES (HVMOP_IO_RANGE_PCI + 1)
> +#define NR_IO_RANGE_TYPES (HVMOP_IO_RANGE_INVALIDATE + 1)
>  #define MAX_NR_IO_RANGES  256
> 
>  struct hvm_ioreq_server {
> @@ -63,6 +63,7 @@ struct hvm_ioreq_server {
>      ioservid_t             id;
>      struct hvm_ioreq_page  ioreq;
>      struct list_head       ioreq_vcpu_list;
> +    struct hvm_ioreq_page  vmport_ioreq;
>      struct hvm_ioreq_page  bufioreq;
> 
>      /* Lock to serialize access to buffered ioreq ring */
> diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-
> x86/hvm/hvm.h
> index 8ecde89..82fe62a 100644
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -534,6 +534,8 @@ extern bool_t opt_hvm_fep;
>  #define opt_hvm_fep 0
>  #endif
> 
> +vmware_regs_t *get_vmport_regs_any(struct hvm_ioreq_server *s, struct
> vcpu *v);
> +
>  #endif /* __ASM_X86_HVM_HVM_H__ */
> 
>  /*
> diff --git a/xen/include/public/hvm/hvm_op.h
> b/xen/include/public/hvm/hvm_op.h
> index cde3571..19f8c69 100644
> --- a/xen/include/public/hvm/hvm_op.h
> +++ b/xen/include/public/hvm/hvm_op.h
> @@ -314,6 +314,9 @@
> DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_ioreq_server_info_t);
>   *
>   * NOTE: unless an emulation request falls entirely within a range mapped
>   * by a secondary emulator, it will not be passed to that emulator.
> + *
> + * NOTE: The 'special' range of 1 is what is checked for outside
> + * of the three types of I/O.
>   */
>  #define HVMOP_map_io_range_to_ioreq_server 19
>  #define HVMOP_unmap_io_range_from_ioreq_server 20
> @@ -324,6 +327,9 @@ struct xen_hvm_io_range {
>  # define HVMOP_IO_RANGE_PORT   0 /* I/O port range */
>  # define HVMOP_IO_RANGE_MEMORY 1 /* MMIO range */
>  # define HVMOP_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func range */
> +# define HVMOP_IO_RANGE_VMWARE_PORT 3 /* VMware port special
> range */
> +# define HVMOP_IO_RANGE_TIMEOFFSET 7 /* TIMEOFFSET special range
> */
> +# define HVMOP_IO_RANGE_INVALIDATE 8 /* INVALIDATE special range */
>      uint64_aligned_t start, end; /* IN - inclusive start and end of range */
>  };
>  typedef struct xen_hvm_io_range xen_hvm_io_range_t;
> diff --git a/xen/include/public/hvm/ioreq.h
> b/xen/include/public/hvm/ioreq.h
> index 5b5fedf..6d5ca06 100644
> --- a/xen/include/public/hvm/ioreq.h
> +++ b/xen/include/public/hvm/ioreq.h
> @@ -35,6 +35,7 @@
>  #define IOREQ_TYPE_PIO          0 /* pio */
>  #define IOREQ_TYPE_COPY         1 /* mmio ops */
>  #define IOREQ_TYPE_PCI_CONFIG   2
> +#define IOREQ_TYPE_VMWARE_PORT  3
>  #define IOREQ_TYPE_TIMEOFFSET   7
>  #define IOREQ_TYPE_INVALIDATE   8 /* mapcache */
> 
> @@ -48,6 +49,8 @@
>   *
>   * 63....48|47..40|39..35|34..32|31........0
>   * SEGMENT |BUS   |DEV   |FN    |OFFSET
> + *
> + * For I/O type IOREQ_TYPE_VMWARE_PORT also use the vmware_regs.
>   */
>  struct ioreq {
>      uint64_t addr;          /* physical address */
> @@ -66,11 +69,25 @@ struct ioreq {
>  };
>  typedef struct ioreq ioreq_t;
> 
> +struct vmware_regs {
> +    uint32_t esi;
> +    uint32_t edi;
> +    uint32_t ebx;
> +    uint32_t ecx;
> +    uint32_t edx;
> +};
> +typedef struct vmware_regs vmware_regs_t;
> +
>  struct shared_iopage {
>      struct ioreq vcpu_ioreq[1];
>  };
>  typedef struct shared_iopage shared_iopage_t;
> 
> +struct shared_vmport_iopage {
> +    struct vmware_regs vcpu_vmport_regs[1];
> +};
> +typedef struct shared_vmport_iopage shared_vmport_iopage_t;
> +
>  struct buf_ioreq {
>      uint8_t  type;   /* I/O type                    */
>      uint8_t  pad:1;
> diff --git a/xen/include/public/hvm/params.h
> b/xen/include/public/hvm/params.h
> index 974d3a4..2f6ccf4 100644
> --- a/xen/include/public/hvm/params.h
> +++ b/xen/include/public/hvm/params.h
> @@ -50,6 +50,8 @@
>  #define HVM_PARAM_PAE_ENABLED  4
> 
>  #define HVM_PARAM_IOREQ_PFN    5
> +/* Extra vmport PFN. */
> +#define HVM_PARAM_VMPORT_REGS_PFN 36
> 
>  #define HVM_PARAM_BUFIOREQ_PFN 6
>  #define HVM_PARAM_BUFIOREQ_EVTCHN 26
> @@ -197,6 +199,6 @@
>  /* emulated VMware Hardware Version */
>  #define HVM_PARAM_VMWARE_HWVER 35
> 
> -#define HVM_NR_PARAMS          36
> +#define HVM_NR_PARAMS          37
> 
>  #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
> --
> 1.8.4
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.