|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v4 3/3] x86/ioreq server: Add HVMOP to map guest ram with p2m_ioreq_server to an ioreq server.
On 19/05/16 10:05, Yu Zhang wrote:
> A new HVMOP - HVMOP_map_mem_type_to_ioreq_server, is added to
> let one ioreq server claim/disclaim its responsibility for the
> handling of guest pages with p2m type p2m_ioreq_server. Users
> of this HVMOP can specify which kind of operation is supposed
> to be emulated in a parameter named flags. Currently, this HVMOP
> only support the emulation of write operations. And it can be
> easily extended to support the emulation of read ones if an
> ioreq server has such requirement in the future.
>
> For now, we only support one ioreq server for this p2m type, so
> once an ioreq server has claimed its ownership, subsequent calls
> of the HVMOP_map_mem_type_to_ioreq_server will fail. Users can also
> disclaim the ownership of guest ram pages with p2m_ioreq_server, by
> triggering this new HVMOP, with ioreq server id set to the current
> owner's and flags parameter set to 0.
>
> Note both HVMOP_map_mem_type_to_ioreq_server and p2m_ioreq_server
> are only supported for HVMs with HAP enabled.
>
> Also note that only after one ioreq server claims its ownership
> of p2m_ioreq_server, will the p2m type change to p2m_ioreq_server
> be allowed.
>
> Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
> Signed-off-by: Yu Zhang <yu.c.zhang@xxxxxxxxxxxxxxx>
> Acked-by: Tim Deegan <tim@xxxxxxx>
Looks OK to me:
Acked-by: George Dunlap <george.dunlap@xxxxxxxxxx>
> ---
> Cc: Paul Durrant <paul.durrant@xxxxxxxxxx>
> Cc: Jan Beulich <jbeulich@xxxxxxxx>
> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx>
> Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
> Cc: Tim Deegan <tim@xxxxxxx>
>
> changes in v4:
> - According to Paul's advice, add comments around the definition
> of HVMMEM_iore_server in hvm_op.h.
> - According to Wei Liu's comments, change the format of the commit
> message.
>
> changes in v3:
> - Only support write emulation in this patch;
> - Remove the code to handle race condition in hvmemul_do_io(),
> - No need to reset the p2m type after an ioreq server has disclaimed
> its ownership of p2m_ioreq_server;
> - Only allow p2m type change to p2m_ioreq_server after an ioreq
> server has claimed its ownership of p2m_ioreq_server;
> - Only allow p2m type change to p2m_ioreq_server from pages with type
> p2m_ram_rw, and vice versa;
> - HVMOP_map_mem_type_to_ioreq_server interface change - use uint16,
> instead of enum to specify the memory type;
> - Function prototype change to p2m_get_ioreq_server();
> - Coding style changes;
> - Commit message changes;
> - Add Tim's Acked-by.
>
> changes in v2:
> - Only support HAP enabled HVMs;
> - Replace p2m_mem_type_changed() with p2m_change_entry_type_global()
> to reset the p2m type, when an ioreq server tries to claim/disclaim
> its ownership of p2m_ioreq_server;
> - Comments changes.
> ---
> xen/arch/x86/hvm/emulate.c | 32 ++++++++++++++++--
> xen/arch/x86/hvm/hvm.c | 63 ++++++++++++++++++++++++++++++++++--
> xen/arch/x86/hvm/ioreq.c | 41 +++++++++++++++++++++++
> xen/arch/x86/mm/hap/nested_hap.c | 2 +-
> xen/arch/x86/mm/p2m-ept.c | 7 +++-
> xen/arch/x86/mm/p2m-pt.c | 23 +++++++++----
> xen/arch/x86/mm/p2m.c | 70
> ++++++++++++++++++++++++++++++++++++++++
> xen/arch/x86/mm/shadow/multi.c | 3 +-
> xen/include/asm-x86/hvm/ioreq.h | 2 ++
> xen/include/asm-x86/p2m.h | 30 +++++++++++++++--
> xen/include/public/hvm/hvm_op.h | 35 +++++++++++++++++++-
> 11 files changed, 289 insertions(+), 19 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> index b9cac8e..4571294 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -100,6 +100,7 @@ static int hvmemul_do_io(
> uint8_t dir, bool_t df, bool_t data_is_addr, uintptr_t data)
> {
> struct vcpu *curr = current;
> + struct domain *currd = curr->domain;
> struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
> ioreq_t p = {
> .type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO,
> @@ -141,7 +142,7 @@ static int hvmemul_do_io(
> (p.dir != dir) ||
> (p.df != df) ||
> (p.data_is_ptr != data_is_addr) )
> - domain_crash(curr->domain);
> + domain_crash(currd);
>
> if ( data_is_addr )
> return X86EMUL_UNHANDLEABLE;
> @@ -178,8 +179,33 @@ static int hvmemul_do_io(
> break;
> case X86EMUL_UNHANDLEABLE:
> {
> - struct hvm_ioreq_server *s =
> - hvm_select_ioreq_server(curr->domain, &p);
> + struct hvm_ioreq_server *s;
> + p2m_type_t p2mt;
> +
> + if ( is_mmio )
> + {
> + unsigned long gmfn = paddr_to_pfn(addr);
> +
> + (void) get_gfn_query_unlocked(currd, gmfn, &p2mt);
> +
> + if ( p2mt == p2m_ioreq_server )
> + {
> + unsigned long flags;
> +
> + s = p2m_get_ioreq_server(currd, &flags);
> +
> + if ( dir == IOREQ_WRITE &&
> + !(flags & P2M_IOREQ_HANDLE_WRITE_ACCESS) )
> + s = NULL;
> + }
> + else
> + s = hvm_select_ioreq_server(currd, &p);
> + }
> + else
> + {
> + p2mt = p2m_invalid;
> + s = hvm_select_ioreq_server(currd, &p);
> + }
>
> /* If there is no suitable backing DM, just ignore accesses */
> if ( !s )
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 346da97..23abeb2 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -4719,6 +4719,40 @@ static int hvmop_unmap_io_range_from_ioreq_server(
> return rc;
> }
>
> +static int hvmop_map_mem_type_to_ioreq_server(
> + XEN_GUEST_HANDLE_PARAM(xen_hvm_map_mem_type_to_ioreq_server_t) uop)
> +{
> + xen_hvm_map_mem_type_to_ioreq_server_t op;
> + struct domain *d;
> + int rc;
> +
> + if ( copy_from_guest(&op, uop, 1) )
> + return -EFAULT;
> +
> + rc = rcu_lock_remote_domain_by_id(op.domid, &d);
> + if ( rc != 0 )
> + return rc;
> +
> + rc = -EINVAL;
> + if ( !is_hvm_domain(d) )
> + goto out;
> +
> + /* Only support for HAP enabled hvm */
> + if ( !hap_enabled(d) )
> + goto out;
> +
> + rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d,
> + HVMOP_map_mem_type_to_ioreq_server);
> + if ( rc != 0 )
> + goto out;
> +
> + rc = hvm_map_mem_type_to_ioreq_server(d, op.id, op.type, op.flags);
> +
> + out:
> + rcu_unlock_domain(d);
> + return rc;
> +}
> +
> static int hvmop_set_ioreq_server_state(
> XEN_GUEST_HANDLE_PARAM(xen_hvm_set_ioreq_server_state_t) uop)
> {
> @@ -5352,9 +5386,14 @@ static int hvmop_get_mem_type(
>
> static bool_t hvm_allow_p2m_type_change(p2m_type_t old, p2m_type_t new)
> {
> + if ( new == p2m_ioreq_server )
> + return old == p2m_ram_rw;
> +
> + if ( old == p2m_ioreq_server )
> + return new == p2m_ram_rw;
> +
> if ( p2m_is_ram(old) ||
> - (p2m_is_hole(old) && new == p2m_mmio_dm) ||
> - (old == p2m_ioreq_server && new == p2m_ram_rw) )
> + (p2m_is_hole(old) && new == p2m_mmio_dm) )
> return 1;
>
> return 0;
> @@ -5389,6 +5428,21 @@ static int hvmop_set_mem_type(
> if ( !is_hvm_domain(d) )
> goto out;
>
> + if ( a.hvmmem_type == HVMMEM_ioreq_server )
> + {
> + unsigned long flags;
> + struct hvm_ioreq_server *s;
> +
> + /* HVMMEM_ioreq_server is only supported for HAP enabled hvm. */
> + if ( !hap_enabled(d) )
> + goto out;
> +
> + /* Do not change to HVMMEM_ioreq_server if no ioreq server mapped. */
> + s = p2m_get_ioreq_server(d, &flags);
> + if ( s == NULL )
> + goto out;
> + }
> +
> rc = xsm_hvm_control(XSM_DM_PRIV, d, HVMOP_set_mem_type);
> if ( rc )
> goto out;
> @@ -5490,6 +5544,11 @@ long do_hvm_op(unsigned long op,
> XEN_GUEST_HANDLE_PARAM(void) arg)
> guest_handle_cast(arg, xen_hvm_io_range_t));
> break;
>
> + case HVMOP_map_mem_type_to_ioreq_server:
> + rc = hvmop_map_mem_type_to_ioreq_server(
> + guest_handle_cast(arg, xen_hvm_map_mem_type_to_ioreq_server_t));
> + break;
> +
> case HVMOP_set_ioreq_server_state:
> rc = hvmop_set_ioreq_server_state(
> guest_handle_cast(arg, xen_hvm_set_ioreq_server_state_t));
> diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
> index 333ce14..d24e108 100644
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -753,6 +753,8 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t
> id)
>
> domain_pause(d);
>
> + p2m_destroy_ioreq_server(d, s);
> +
> hvm_ioreq_server_disable(s, 0);
>
> list_del(&s->list_entry);
> @@ -914,6 +916,45 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain
> *d, ioservid_t id,
> return rc;
> }
>
> +int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
> + uint16_t type, uint32_t flags)
> +{
> + struct hvm_ioreq_server *s;
> + int rc;
> +
> + /* For now, only HVMMEM_ioreq_server is supported. */
> + if ( type != HVMMEM_ioreq_server )
> + return -EINVAL;
> +
> + /* For now, only write emulation is supported. */
> + if ( flags & ~(HVMOP_IOREQ_MEM_ACCESS_WRITE) )
> + return -EINVAL;
> +
> + spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
> +
> + rc = -ENOENT;
> + list_for_each_entry ( s,
> + &d->arch.hvm_domain.ioreq_server.list,
> + list_entry )
> + {
> + if ( s == d->arch.hvm_domain.default_ioreq_server )
> + continue;
> +
> + if ( s->id == id )
> + {
> + rc = p2m_set_ioreq_server(d, flags, s);
> + if ( rc == 0 )
> + dprintk(XENLOG_DEBUG, "%u %s type HVMMEM_ioreq_server.\n",
> + s->id, (flags != 0) ? "mapped to" : "unmapped
> from");
> +
> + break;
> + }
> + }
> +
> + spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
> + return rc;
> +}
> +
> int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
> bool_t enabled)
> {
> diff --git a/xen/arch/x86/mm/hap/nested_hap.c
> b/xen/arch/x86/mm/hap/nested_hap.c
> index d41bb09..aa90a62 100644
> --- a/xen/arch/x86/mm/hap/nested_hap.c
> +++ b/xen/arch/x86/mm/hap/nested_hap.c
> @@ -174,7 +174,7 @@ nestedhap_walk_L0_p2m(struct p2m_domain *p2m, paddr_t
> L1_gpa, paddr_t *L0_gpa,
> if ( *p2mt == p2m_mmio_direct )
> goto direct_mmio_out;
> rc = NESTEDHVM_PAGEFAULT_MMIO;
> - if ( *p2mt == p2m_mmio_dm )
> + if ( *p2mt == p2m_mmio_dm || *p2mt == p2m_ioreq_server )
> goto out;
>
> rc = NESTEDHVM_PAGEFAULT_L0_ERROR;
> diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
> index a45a573..c5d1305 100644
> --- a/xen/arch/x86/mm/p2m-ept.c
> +++ b/xen/arch/x86/mm/p2m-ept.c
> @@ -132,6 +132,12 @@ static void ept_p2m_type_to_flags(struct p2m_domain
> *p2m, ept_entry_t *entry,
> entry->r = entry->w = entry->x = 1;
> entry->a = entry->d = !!cpu_has_vmx_ept_ad;
> break;
> + case p2m_ioreq_server:
> + entry->r = entry->x = 1;
> + entry->w = !(p2m->ioreq.flags & P2M_IOREQ_HANDLE_WRITE_ACCESS);
> + entry->a = !!cpu_has_vmx_ept_ad;
> + entry->d = entry->w && cpu_has_vmx_ept_ad;
> + break;
> case p2m_mmio_direct:
> entry->r = entry->x = 1;
> entry->w = !rangeset_contains_singleton(mmio_ro_ranges,
> @@ -171,7 +177,6 @@ static void ept_p2m_type_to_flags(struct p2m_domain *p2m,
> ept_entry_t *entry,
> entry->a = entry->d = !!cpu_has_vmx_ept_ad;
> break;
> case p2m_grant_map_ro:
> - case p2m_ioreq_server:
> entry->r = 1;
> entry->w = entry->x = 0;
> entry->a = !!cpu_has_vmx_ept_ad;
> diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
> index eabd2e3..bf75afa 100644
> --- a/xen/arch/x86/mm/p2m-pt.c
> +++ b/xen/arch/x86/mm/p2m-pt.c
> @@ -72,7 +72,9 @@ static const unsigned long pgt[] = {
> PGT_l3_page_table
> };
>
> -static unsigned long p2m_type_to_flags(p2m_type_t t, mfn_t mfn,
> +static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
> + p2m_type_t t,
> + mfn_t mfn,
> unsigned int level)
> {
> unsigned long flags;
> @@ -94,8 +96,16 @@ static unsigned long p2m_type_to_flags(p2m_type_t t, mfn_t
> mfn,
> default:
> return flags | _PAGE_NX_BIT;
> case p2m_grant_map_ro:
> - case p2m_ioreq_server:
> return flags | P2M_BASE_FLAGS | _PAGE_NX_BIT;
> + case p2m_ioreq_server:
> + {
> + flags |= P2M_BASE_FLAGS | _PAGE_RW;
> +
> + if ( p2m->ioreq.flags & P2M_IOREQ_HANDLE_WRITE_ACCESS )
> + return flags & ~_PAGE_RW;
> + else
> + return flags;
> + }
> case p2m_ram_ro:
> case p2m_ram_logdirty:
> case p2m_ram_shared:
> @@ -442,7 +452,8 @@ static int do_recalc(struct p2m_domain *p2m, unsigned
> long gfn)
> p2m_type_t p2mt = p2m_is_logdirty_range(p2m, gfn & mask, gfn |
> ~mask)
> ? p2m_ram_logdirty : p2m_ram_rw;
> unsigned long mfn = l1e_get_pfn(e);
> - unsigned long flags = p2m_type_to_flags(p2mt, _mfn(mfn), level);
> + unsigned long flags = p2m_type_to_flags(p2m, p2mt,
> + _mfn(mfn), level);
>
> if ( level )
> {
> @@ -579,7 +590,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long
> gfn, mfn_t mfn,
> ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
> l3e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt)
> ? l3e_from_pfn(mfn_x(mfn),
> - p2m_type_to_flags(p2mt, mfn, 2) | _PAGE_PSE)
> + p2m_type_to_flags(p2m, p2mt, mfn, 2) | _PAGE_PSE)
> : l3e_empty();
> entry_content.l1 = l3e_content.l3;
>
> @@ -615,7 +626,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long
> gfn, mfn_t mfn,
>
> if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
> entry_content = p2m_l1e_from_pfn(mfn_x(mfn),
> - p2m_type_to_flags(p2mt, mfn,
> 0));
> + p2m_type_to_flags(p2m, p2mt, mfn,
> 0));
> else
> entry_content = l1e_empty();
>
> @@ -651,7 +662,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long
> gfn, mfn_t mfn,
> ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
> if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
> l2e_content = l2e_from_pfn(mfn_x(mfn),
> - p2m_type_to_flags(p2mt, mfn, 1) |
> + p2m_type_to_flags(p2m, p2mt, mfn, 1) |
> _PAGE_PSE);
> else
> l2e_content = l2e_empty();
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index 9b19769..59afa2c 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -83,6 +83,8 @@ static int p2m_initialise(struct domain *d, struct
> p2m_domain *p2m)
> else
> p2m_pt_init(p2m);
>
> + spin_lock_init(&p2m->ioreq.lock);
> +
> return ret;
> }
>
> @@ -289,6 +291,74 @@ void p2m_memory_type_changed(struct domain *d)
> }
> }
>
> +int p2m_set_ioreq_server(struct domain *d,
> + unsigned long flags,
> + struct hvm_ioreq_server *s)
> +{
> + struct p2m_domain *p2m = p2m_get_hostp2m(d);
> + int rc;
> +
> + spin_lock(&p2m->ioreq.lock);
> +
> + if ( flags == 0 )
> + {
> + rc = -EINVAL;
> + if ( p2m->ioreq.server != s )
> + goto out;
> +
> + /* Unmap ioreq server from p2m type by passing flags with 0. */
> + p2m->ioreq.server = NULL;
> + p2m->ioreq.flags = 0;
> + }
> + else
> + {
> + rc = -EBUSY;
> + if ( p2m->ioreq.server != NULL )
> + goto out;
> +
> + p2m->ioreq.server = s;
> + p2m->ioreq.flags = flags;
> + }
> +
> + rc = 0;
> +
> + out:
> + spin_unlock(&p2m->ioreq.lock);
> +
> + return rc;
> +}
> +
> +struct hvm_ioreq_server *p2m_get_ioreq_server(struct domain *d,
> + unsigned long *flags)
> +{
> + struct p2m_domain *p2m = p2m_get_hostp2m(d);
> + struct hvm_ioreq_server *s;
> +
> + spin_lock(&p2m->ioreq.lock);
> +
> + s = p2m->ioreq.server;
> + *flags = p2m->ioreq.flags;
> +
> + spin_unlock(&p2m->ioreq.lock);
> + return s;
> +}
> +
> +void p2m_destroy_ioreq_server(struct domain *d,
> + struct hvm_ioreq_server *s)
> +{
> + struct p2m_domain *p2m = p2m_get_hostp2m(d);
> +
> + spin_lock(&p2m->ioreq.lock);
> +
> + if ( p2m->ioreq.server == s )
> + {
> + p2m->ioreq.server = NULL;
> + p2m->ioreq.flags = 0;
> + }
> +
> + spin_unlock(&p2m->ioreq.lock);
> +}
> +
> void p2m_enable_hardware_log_dirty(struct domain *d)
> {
> struct p2m_domain *p2m = p2m_get_hostp2m(d);
> diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
> index b322293..ae845d2 100644
> --- a/xen/arch/x86/mm/shadow/multi.c
> +++ b/xen/arch/x86/mm/shadow/multi.c
> @@ -3225,8 +3225,7 @@ static int sh_page_fault(struct vcpu *v,
> }
>
> /* Need to hand off device-model MMIO to the device model */
> - if ( p2mt == p2m_mmio_dm
> - || (p2mt == p2m_ioreq_server && ft == ft_demand_write) )
> + if ( p2mt == p2m_mmio_dm )
> {
> gpa = guest_walk_to_gpa(&gw);
> goto mmio;
> diff --git a/xen/include/asm-x86/hvm/ioreq.h b/xen/include/asm-x86/hvm/ioreq.h
> index fbf2c74..340ae3e 100644
> --- a/xen/include/asm-x86/hvm/ioreq.h
> +++ b/xen/include/asm-x86/hvm/ioreq.h
> @@ -37,6 +37,8 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d,
> ioservid_t id,
> int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
> uint32_t type, uint64_t start,
> uint64_t end);
> +int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
> + uint16_t type, uint32_t flags);
> int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
> bool_t enabled);
>
> diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
> index f3e87d6..3aa0dd7 100644
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -89,7 +89,8 @@ typedef unsigned int p2m_query_t;
> | p2m_to_mask(p2m_ram_paging_out) \
> | p2m_to_mask(p2m_ram_paged) \
> | p2m_to_mask(p2m_ram_paging_in) \
> - | p2m_to_mask(p2m_ram_shared))
> + | p2m_to_mask(p2m_ram_shared) \
> + | p2m_to_mask(p2m_ioreq_server))
>
> /* Types that represent a physmap hole that is ok to replace with a shared
> * entry */
> @@ -111,8 +112,7 @@ typedef unsigned int p2m_query_t;
> #define P2M_RO_TYPES (p2m_to_mask(p2m_ram_logdirty) \
> | p2m_to_mask(p2m_ram_ro) \
> | p2m_to_mask(p2m_grant_map_ro) \
> - | p2m_to_mask(p2m_ram_shared) \
> - | p2m_to_mask(p2m_ioreq_server))
> + | p2m_to_mask(p2m_ram_shared))
>
> /* Write-discard types, which should discard the write operations */
> #define P2M_DISCARD_WRITE_TYPES (p2m_to_mask(p2m_ram_ro) \
> @@ -336,6 +336,24 @@ struct p2m_domain {
> struct ept_data ept;
> /* NPT-equivalent structure could be added here. */
> };
> +
> + struct {
> + spinlock_t lock;
> + /*
> + * ioreq server who's responsible for the emulation of
> + * gfns with specific p2m type(for now, p2m_ioreq_server).
> + */
> + struct hvm_ioreq_server *server;
> + /*
> + * flags specifies whether read, write or both operations
> + * are to be emulated by an ioreq server.
> + */
> + unsigned int flags;
> +
> +#define P2M_IOREQ_HANDLE_WRITE_ACCESS HVMOP_IOREQ_MEM_ACCESS_WRITE
> +#define P2M_IOREQ_HANDLE_READ_ACCESS HVMOP_IOREQ_MEM_ACCESS_READ
> +
> + } ioreq;
> };
>
> /* get host p2m table */
> @@ -843,6 +861,12 @@ static inline unsigned int
> p2m_get_iommu_flags(p2m_type_t p2mt)
> return flags;
> }
>
> +int p2m_set_ioreq_server(struct domain *d, unsigned long flags,
> + struct hvm_ioreq_server *s);
> +struct hvm_ioreq_server *p2m_get_ioreq_server(struct domain *d,
> + unsigned long *flags);
> +void p2m_destroy_ioreq_server(struct domain *d, struct hvm_ioreq_server *s);
> +
> #endif /* _XEN_P2M_H */
>
> /*
> diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
> index b3e45cf..22c15a7 100644
> --- a/xen/include/public/hvm/hvm_op.h
> +++ b/xen/include/public/hvm/hvm_op.h
> @@ -89,7 +89,9 @@ typedef enum {
> HVMMEM_unused, /* Placeholder; setting memory to this type
> will fail for code after 4.7.0 */
> #endif
> - HVMMEM_ioreq_server
> + HVMMEM_ioreq_server /* Memory type claimed by an ioreq server;
> type
> + changes to this value are only allowed
> after
> + an ioreq server has claimed its ownership
> */
> } hvmmem_type_t;
>
> /* Following tools-only interfaces may change in future. */
> @@ -383,6 +385,37 @@ struct xen_hvm_set_ioreq_server_state {
> typedef struct xen_hvm_set_ioreq_server_state
> xen_hvm_set_ioreq_server_state_t;
> DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_ioreq_server_state_t);
>
> +/*
> + * HVMOP_map_mem_type_to_ioreq_server : map or unmap the IOREQ Server <id>
> + * to specific memroy type <type>
> + * for specific accesses <flags>
> + *
> + * For now, flags only accept the value of HVMOP_IOREQ_MEM_ACCESS_WRITE,
> + * which means only write operations are to be forwarded to an ioreq server.
> + * Support for the emulation of read operations can be added when an ioreq
> + * server has such requirement in future.
> + */
> +#define HVMOP_map_mem_type_to_ioreq_server 26
> +struct xen_hvm_map_mem_type_to_ioreq_server {
> + domid_t domid; /* IN - domain to be serviced */
> + ioservid_t id; /* IN - ioreq server id */
> + uint16_t type; /* IN - memory type */
> + uint16_t pad;
> + uint32_t flags; /* IN - types of accesses to be forwarded to the
> + ioreq server. flags with 0 means to unmap the
> + ioreq server */
> +#define _HVMOP_IOREQ_MEM_ACCESS_READ 0
> +#define HVMOP_IOREQ_MEM_ACCESS_READ \
> + (1u << _HVMOP_IOREQ_MEM_ACCESS_READ)
> +
> +#define _HVMOP_IOREQ_MEM_ACCESS_WRITE 1
> +#define HVMOP_IOREQ_MEM_ACCESS_WRITE \
> + (1u << _HVMOP_IOREQ_MEM_ACCESS_WRITE)
> +};
> +typedef struct xen_hvm_map_mem_type_to_ioreq_server
> + xen_hvm_map_mem_type_to_ioreq_server_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_hvm_map_mem_type_to_ioreq_server_t);
> +
> #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
>
> #if defined(__i386__) || defined(__x86_64__)
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |