[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [XEN][RFC PATCH 01/15] hvm: Modify interface to support multiple ioreq server



On Thu, 2012-03-22 at 15:59 +0000, Julien Grall wrote:
> Add structure to handle ioreq server. It's server which can
> handle a range of IO (MMIO and/or PIO) and emulate a PCI.
> Each server as its own shared page to receive ioreq. So
> we have introduced to HVM PARAM to set/get the first and
> the last shared used for ioreq.
> With it's id, the server knows which page it must use.

So id is always the page offset with the range? Why not just call it
iobuf_offset then? Is the additional layer of abstraction from calling
it "id" useful if we are just going to peek around it?

> We introduce a new kind a ioreq type IOREQ_TYPE_PCICONFIG
> which permit to forward easily PCI config space access.
> 
> Signed-off-by: Julien Grall <julien.grall@xxxxxxxxxx>
> ---
> [...]
>  xen/include/public/hvm/hvm_op.h  |   49 
> ++++++++++++++++++++++++++++++++++++++
>  xen/include/public/hvm/ioreq.h   |    1 +
>  xen/include/public/hvm/params.h  |    6 ++++-
>  xen/include/public/xen.h         |    1 +

I only looked at the public interface changes so far.

> diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
> index 6a78f75..1e0e27b 100644
> --- a/xen/include/public/hvm/hvm_op.h
> +++ b/xen/include/public/hvm/hvm_op.h
> @@ -24,6 +24,8 @@
>  #include "../xen.h"
>  #include "../trace.h"
>  
> +#include "hvm_info_table.h" /* HVM_MAX_VCPUS */

You don't appear to use HVM_MAX_VCPUS anywhere in your additions?

>  /* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
>  #define HVMOP_set_param           0
>  #define HVMOP_get_param           1
> @@ -227,6 +229,53 @@ struct xen_hvm_inject_trap {
>  typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t;
>  DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);
>  
> +#define HVMOP_register_ioreq_server 20
> +struct xen_hvm_register_ioreq_server {
> +    domid_t domid;          /* IN - domain to be serviced */
> +    unsigned int id;        /* OUT - handle for identifying this server */

elsewhere this is called servid_t?

> +};
> +typedef struct xen_hvm_register_ioreq_server xen_hvm_register_ioreq_server_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_hvm_register_ioreq_server_t);
> +
> +#define HVMOP_get_ioreq_server_buf_channel 21
> +struct xen_hvm_get_ioreq_server_buf_channel {
> +    domid_t domid;       /* IN - domain to be serviced */
> +    servid_t id;         /* IN - handle from HVMOP_register_ioreq_server */
> +    unsigned int channel;   /* OUT - buf ioreq channel */

evtchn_port_t?

> +};
> +typedef struct xen_hvm_get_ioreq_server_buf_channel 
> xen_hvm_get_ioreq_server_buf_channel_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_ioreq_server_buf_channel_t);
> +
> +#define HVMOP_map_io_range_to_ioreq_server 22
> +struct xen_hvm_map_io_range_to_ioreq_server {
> +    domid_t domid;          /* IN - domain to be serviced */
> +    uint8_t is_mmio;        /* IN - MMIO or port IO? */
> +    servid_t id;            /* IN - handle from HVMOP_register_ioreq_server 
> */
> +    uint64_aligned_t s, e;  /* IN - inclusive start and end of range */
> +};
> +typedef struct xen_hvm_map_io_range_to_ioreq_server 
> xen_hvm_map_io_range_to_ioreq_server_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_hvm_map_io_range_to_ioreq_server_t);
> +
> +#define HVMOP_unmap_io_range_from_ioreq_server 23
> +struct xen_hvm_unmap_io_range_from_ioreq_server {
> +    domid_t domid;          /* IN - domain to be serviced */
> +    uint8_t is_mmio;        /* IN - MMIO or port IO? */
> +    servid_t id;            /* IN - handle from HVMOP_register_ioreq_server 
> */
> +    uint64_aligned_t addr;  /* IN - address inside the range to remove */
> +};
> +typedef struct xen_hvm_unmap_io_range_from_ioreq_server 
> xen_hvm_unmap_io_range_from_ioreq_server_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_hvm_unmap_io_range_from_ioreq_server_t);
> +
> +#define HVMOP_register_pcidev 24
> +struct xen_hvm_register_pcidev {
> +    domid_t domid;      /* IN - domain to be serviced */
> +    servid_t id;        /* IN - handle from HVMOP_register_ioreq_server */
> +    uint16_t bdf;       /* IN - pci */
> +};
> +typedef struct xen_hvm_register_pcidev xen_hvm_register_pcidev_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_hvm_register_pcidev_t);
> +
> +
>  #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
>  
>  #define HVMOP_get_mem_type    15
> diff --git a/xen/include/public/hvm/ioreq.h b/xen/include/public/hvm/ioreq.h
> index 4022a1d..87aacd3 100644
> --- a/xen/include/public/hvm/ioreq.h
> +++ b/xen/include/public/hvm/ioreq.h
> @@ -34,6 +34,7 @@
>  
>  #define IOREQ_TYPE_PIO          0 /* pio */
>  #define IOREQ_TYPE_COPY         1 /* mmio ops */
> +#define IOREQ_TYPE_PCI_CONFIG   2 /* pci config space ops */
>  #define IOREQ_TYPE_TIMEOFFSET   7
>  #define IOREQ_TYPE_INVALIDATE   8 /* mapcache */
>  
> diff --git a/xen/include/public/hvm/params.h b/xen/include/public/hvm/params.h
> index 55c1b57..309ac1b 100644
> --- a/xen/include/public/hvm/params.h
> +++ b/xen/include/public/hvm/params.h
> @@ -147,6 +147,10 @@
>  #define HVM_PARAM_ACCESS_RING_PFN   28
>  #define HVM_PARAM_SHARING_RING_PFN  29
>  
> -#define HVM_NR_PARAMS          30
> +/* Param for ioreq servers */
> +#define HVM_PARAM_IO_PFN_FIRST       30
> +#define HVM_PARAM_IO_PFN_LAST        31
> +
> +#define HVM_NR_PARAMS          32
>  
>  #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
> diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h
> index b2f6c50..26d0e9d 100644
> --- a/xen/include/public/xen.h
> +++ b/xen/include/public/xen.h
> @@ -466,6 +466,7 @@ DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
>  #ifndef __ASSEMBLY__
>  
>  typedef uint16_t domid_t;
> +typedef uint32_t servid_t;

ioservid_t?
 
>  /* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */
>  #define DOMID_FIRST_RESERVED (0x7FF0U)



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.