|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 1/6] ioreq-server: centralize access to ioreq structures
To simplify creation of the ioreq server abstraction in a
subsequent patch, this patch centralizes all use of the shared
ioreq structure and the buffered ioreq ring to the source module
xen/arch/x86/hvm/hvm.c.
This patch also adds some missing emacs boilerplate in the places where I
needed it.
Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
xen/arch/x86/hvm/emulate.c | 38 +++++--------
xen/arch/x86/hvm/hvm.c | 113 ++++++++++++++++++++++++++++++++++++-
xen/arch/x86/hvm/io.c | 99 ++------------------------------
xen/arch/x86/hvm/stdvga.c | 2 +-
xen/arch/x86/hvm/vmx/vvmx.c | 13 ++++-
xen/include/asm-x86/hvm/hvm.h | 15 ++++-
xen/include/asm-x86/hvm/io.h | 2 +-
xen/include/asm-x86/hvm/support.h | 19 ++++---
8 files changed, 165 insertions(+), 136 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 868aa1d..0ba2020 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -57,24 +57,11 @@ static int hvmemul_do_io(
int value_is_ptr = (p_data == NULL);
struct vcpu *curr = current;
struct hvm_vcpu_io *vio;
- ioreq_t *p = get_ioreq(curr);
- ioreq_t _ioreq;
+ ioreq_t p[1];
unsigned long ram_gfn = paddr_to_pfn(ram_gpa);
p2m_type_t p2mt;
struct page_info *ram_page;
int rc;
- bool_t has_dm = 1;
-
- /*
- * Domains without a backing DM, don't have an ioreq page. Just
- * point to a struct on the stack, initialising the state as needed.
- */
- if ( !p )
- {
- has_dm = 0;
- p = &_ioreq;
- p->state = STATE_IOREQ_NONE;
- }
/* Check for paged out page */
ram_page = get_page_from_gfn(curr->domain, ram_gfn, &p2mt, P2M_UNSHARE);
@@ -173,15 +160,6 @@ static int hvmemul_do_io(
return X86EMUL_UNHANDLEABLE;
}
- if ( p->state != STATE_IOREQ_NONE )
- {
- gdprintk(XENLOG_WARNING, "WARNING: io already pending (%d)?\n",
- p->state);
- if ( ram_page )
- put_page(ram_page);
- return X86EMUL_UNHANDLEABLE;
- }
-
vio->io_state =
(p_data == NULL) ? HVMIO_dispatched : HVMIO_awaiting_completion;
vio->io_size = size;
@@ -233,7 +211,7 @@ static int hvmemul_do_io(
break;
case X86EMUL_UNHANDLEABLE:
/* If there is no backing DM, just ignore accesses */
- if ( !has_dm )
+ if ( !hvm_has_dm(curr->domain) )
{
rc = X86EMUL_OKAY;
vio->io_state = HVMIO_none;
@@ -241,7 +219,7 @@ static int hvmemul_do_io(
else
{
rc = X86EMUL_RETRY;
- if ( !hvm_send_assist_req(curr) )
+ if ( !hvm_send_assist_req(curr, p) )
vio->io_state = HVMIO_none;
else if ( p_data == NULL )
rc = X86EMUL_OKAY;
@@ -1292,3 +1270,13 @@ struct segment_register *hvmemul_get_seg_reg(
hvm_get_segment_register(current, seg, &hvmemul_ctxt->seg_reg[seg]);
return &hvmemul_ctxt->seg_reg[seg];
}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 9e85c13..0b2e57e 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -345,9 +345,27 @@ void hvm_migrate_pirqs(struct vcpu *v)
spin_unlock(&d->event_lock);
}
+static ioreq_t *get_ioreq(struct vcpu *v)
+{
+ struct domain *d = v->domain;
+ shared_iopage_t *p = d->arch.hvm_domain.ioreq.va;
+ ASSERT((v == current) || spin_is_locked(&d->arch.hvm_domain.ioreq.lock));
+ return p ? &p->vcpu_ioreq[v->vcpu_id] : NULL;
+}
+
+bool_t hvm_io_pending(struct vcpu *v)
+{
+ ioreq_t *p = get_ioreq(v);
+
+ if ( !p )
+ return 0;
+
+ return ( p->state != STATE_IOREQ_NONE );
+}
+
void hvm_do_resume(struct vcpu *v)
{
- ioreq_t *p;
+ ioreq_t *p = get_ioreq(v);
check_wakeup_from_wait();
@@ -355,7 +373,7 @@ void hvm_do_resume(struct vcpu *v)
pt_restore_timer(v);
/* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
- if ( !(p = get_ioreq(v)) )
+ if ( !p )
goto check_inject_trap;
while ( p->state != STATE_IOREQ_NONE )
@@ -1407,7 +1425,87 @@ void hvm_vcpu_down(struct vcpu *v)
}
}
-bool_t hvm_send_assist_req(struct vcpu *v)
+int hvm_buffered_io_send(struct domain *d, const ioreq_t *p)
+{
+ struct hvm_ioreq_page *iorp = &d->arch.hvm_domain.buf_ioreq;
+ buffered_iopage_t *pg = iorp->va;
+ buf_ioreq_t bp;
+ /* Timeoffset sends 64b data, but no address. Use two consecutive slots. */
+ int qw = 0;
+
+ /* Ensure buffered_iopage fits in a page */
+ BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
+
+ /*
+ * Return 0 for the cases we can't deal with:
+ * - 'addr' is only a 20-bit field, so we cannot address beyond 1MB
+ * - we cannot buffer accesses to guest memory buffers, as the guest
+ * may expect the memory buffer to be synchronously accessed
+ * - the count field is usually used with data_is_ptr and since we don't
+ * support data_is_ptr we do not waste space for the count field either
+ */
+ if ( (p->addr > 0xffffful) || p->data_is_ptr || (p->count != 1) )
+ return 0;
+
+ bp.type = p->type;
+ bp.dir = p->dir;
+ switch ( p->size )
+ {
+ case 1:
+ bp.size = 0;
+ break;
+ case 2:
+ bp.size = 1;
+ break;
+ case 4:
+ bp.size = 2;
+ break;
+ case 8:
+ bp.size = 3;
+ qw = 1;
+ break;
+ default:
+ gdprintk(XENLOG_WARNING, "unexpected ioreq size: %u\n", p->size);
+ return 0;
+ }
+
+ bp.data = p->data;
+ bp.addr = p->addr;
+
+ spin_lock(&iorp->lock);
+
+ if ( (pg->write_pointer - pg->read_pointer) >=
+ (IOREQ_BUFFER_SLOT_NUM - qw) )
+ {
+ /* The queue is full: send the iopacket through the normal path. */
+ spin_unlock(&iorp->lock);
+ return 0;
+ }
+
+ pg->buf_ioreq[pg->write_pointer % IOREQ_BUFFER_SLOT_NUM] = bp;
+
+ if ( qw )
+ {
+ bp.data = p->data >> 32;
+ pg->buf_ioreq[(pg->write_pointer+1) % IOREQ_BUFFER_SLOT_NUM] = bp;
+ }
+
+ /* Make the ioreq_t visible /before/ write_pointer. */
+ wmb();
+ pg->write_pointer += qw ? 2 : 1;
+
+ notify_via_xen_event_channel(d,
d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN]);
+ spin_unlock(&iorp->lock);
+
+ return 1;
+}
+
+bool_t hvm_has_dm(struct domain *d)
+{
+ return !!d->arch.hvm_domain.ioreq.va;
+}
+
+bool_t hvm_send_assist_req(struct vcpu *v, const ioreq_t *proto_p)
{
ioreq_t *p;
@@ -1425,6 +1523,15 @@ bool_t hvm_send_assist_req(struct vcpu *v)
return 0;
}
+ p->dir = proto_p->dir;
+ p->data_is_ptr = proto_p->data_is_ptr;
+ p->type = proto_p->type;
+ p->size = proto_p->size;
+ p->addr = proto_p->addr;
+ p->count = proto_p->count;
+ p->df = proto_p->df;
+ p->data = proto_p->data;
+
prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port);
/*
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index bf6309d..ba50c53 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -46,87 +46,9 @@
#include <xen/iocap.h>
#include <public/hvm/ioreq.h>
-int hvm_buffered_io_send(ioreq_t *p)
-{
- struct vcpu *v = current;
- struct hvm_ioreq_page *iorp = &v->domain->arch.hvm_domain.buf_ioreq;
- buffered_iopage_t *pg = iorp->va;
- buf_ioreq_t bp;
- /* Timeoffset sends 64b data, but no address. Use two consecutive slots. */
- int qw = 0;
-
- /* Ensure buffered_iopage fits in a page */
- BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
-
- /*
- * Return 0 for the cases we can't deal with:
- * - 'addr' is only a 20-bit field, so we cannot address beyond 1MB
- * - we cannot buffer accesses to guest memory buffers, as the guest
- * may expect the memory buffer to be synchronously accessed
- * - the count field is usually used with data_is_ptr and since we don't
- * support data_is_ptr we do not waste space for the count field either
- */
- if ( (p->addr > 0xffffful) || p->data_is_ptr || (p->count != 1) )
- return 0;
-
- bp.type = p->type;
- bp.dir = p->dir;
- switch ( p->size )
- {
- case 1:
- bp.size = 0;
- break;
- case 2:
- bp.size = 1;
- break;
- case 4:
- bp.size = 2;
- break;
- case 8:
- bp.size = 3;
- qw = 1;
- break;
- default:
- gdprintk(XENLOG_WARNING, "unexpected ioreq size: %u\n", p->size);
- return 0;
- }
-
- bp.data = p->data;
- bp.addr = p->addr;
-
- spin_lock(&iorp->lock);
-
- if ( (pg->write_pointer - pg->read_pointer) >=
- (IOREQ_BUFFER_SLOT_NUM - qw) )
- {
- /* The queue is full: send the iopacket through the normal path. */
- spin_unlock(&iorp->lock);
- return 0;
- }
-
- memcpy(&pg->buf_ioreq[pg->write_pointer % IOREQ_BUFFER_SLOT_NUM],
- &bp, sizeof(bp));
-
- if ( qw )
- {
- bp.data = p->data >> 32;
- memcpy(&pg->buf_ioreq[(pg->write_pointer+1) % IOREQ_BUFFER_SLOT_NUM],
- &bp, sizeof(bp));
- }
-
- /* Make the ioreq_t visible /before/ write_pointer. */
- wmb();
- pg->write_pointer += qw ? 2 : 1;
-
- notify_via_xen_event_channel(v->domain,
- v->domain->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN]);
- spin_unlock(&iorp->lock);
-
- return 1;
-}
-
void send_timeoffset_req(unsigned long timeoff)
{
+ struct vcpu *curr = current;
ioreq_t p[1];
if ( timeoff == 0 )
@@ -142,33 +64,22 @@ void send_timeoffset_req(unsigned long timeoff)
p->state = STATE_IOREQ_READY;
- if ( !hvm_buffered_io_send(p) )
+ if ( !hvm_buffered_io_send(curr->domain, p) )
printk("Unsuccessful timeoffset update\n");
}
/* Ask ioemu mapcache to invalidate mappings. */
void send_invalidate_req(void)
{
- struct vcpu *v = current;
- ioreq_t *p = get_ioreq(v);
-
- if ( !p )
- return;
-
- if ( p->state != STATE_IOREQ_NONE )
- {
- gdprintk(XENLOG_ERR, "WARNING: send invalidate req with something "
- "already pending (%d)?\n", p->state);
- domain_crash(v->domain);
- return;
- }
+ struct vcpu *curr = current;
+ ioreq_t p[1];
p->type = IOREQ_TYPE_INVALIDATE;
p->size = 4;
p->dir = IOREQ_WRITE;
p->data = ~0UL; /* flush all */
- (void)hvm_send_assist_req(v);
+ (void)hvm_send_assist_req(curr, p);
}
int handle_mmio(void)
diff --git a/xen/arch/x86/hvm/stdvga.c b/xen/arch/x86/hvm/stdvga.c
index 19e80ed..9e2d28e 100644
--- a/xen/arch/x86/hvm/stdvga.c
+++ b/xen/arch/x86/hvm/stdvga.c
@@ -580,7 +580,7 @@ static int stdvga_intercept_mmio(ioreq_t *p)
buf = (p->dir == IOREQ_WRITE);
}
- rc = (buf && hvm_buffered_io_send(p));
+ rc = (buf && hvm_buffered_io_send(d, p));
spin_unlock(&s->lock);
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 40167d6..0421623 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1394,7 +1394,6 @@ void nvmx_switch_guest(void)
struct vcpu *v = current;
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
struct cpu_user_regs *regs = guest_cpu_user_regs();
- const ioreq_t *ioreq = get_ioreq(v);
/*
* A pending IO emulation may still be not finished. In this case, no
@@ -1404,7 +1403,7 @@ void nvmx_switch_guest(void)
* don't want to continue as this setup is not implemented nor supported
* as of right now.
*/
- if ( !ioreq || ioreq->state != STATE_IOREQ_NONE )
+ if ( hvm_io_pending(v) )
return;
/*
* a softirq may interrupt us between a virtual vmentry is
@@ -2522,3 +2521,13 @@ void nvmx_set_cr_read_shadow(struct vcpu *v, unsigned
int cr)
/* nvcpu.guest_cr is what L2 write to cr actually. */
__vmwrite(read_shadow_field, v->arch.hvm_vcpu.nvcpu.guest_cr[cr]);
}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index dcc3483..08a62ea 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -26,6 +26,7 @@
#include <asm/hvm/asid.h>
#include <public/domctl.h>
#include <public/hvm/save.h>
+#include <public/hvm/ioreq.h>
#include <asm/mm.h>
/* Interrupt acknowledgement sources. */
@@ -227,7 +228,7 @@ int prepare_ring_for_helper(struct domain *d, unsigned long
gmfn,
struct page_info **_page, void **_va);
void destroy_ring_for_helper(void **_va, struct page_info *page);
-bool_t hvm_send_assist_req(struct vcpu *v);
+bool_t hvm_send_assist_req(struct vcpu *v, const ioreq_t *p);
void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat);
int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat);
@@ -339,6 +340,8 @@ static inline unsigned long hvm_get_shadow_gs_base(struct
vcpu *v)
void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx);
void hvm_migrate_timers(struct vcpu *v);
+bool_t hvm_has_dm(struct domain *d);
+bool_t hvm_io_pending(struct vcpu *v);
void hvm_do_resume(struct vcpu *v);
void hvm_migrate_pirqs(struct vcpu *v);
@@ -522,3 +525,13 @@ bool_t nhvm_vmcx_hap_enabled(struct vcpu *v);
enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v);
#endif /* __ASM_X86_HVM_HVM_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h
index 86db58d..bfd28c2 100644
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -92,7 +92,7 @@ static inline int hvm_buffered_io_intercept(ioreq_t *p)
}
int hvm_mmio_intercept(ioreq_t *p);
-int hvm_buffered_io_send(ioreq_t *p);
+int hvm_buffered_io_send(struct domain *d, const ioreq_t *p);
static inline void register_portio_handler(
struct domain *d, unsigned long addr,
diff --git a/xen/include/asm-x86/hvm/support.h
b/xen/include/asm-x86/hvm/support.h
index 3529499..05ef5c5 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -22,19 +22,10 @@
#define __ASM_X86_HVM_SUPPORT_H__
#include <xen/types.h>
-#include <public/hvm/ioreq.h>
#include <xen/sched.h>
#include <xen/hvm/save.h>
#include <asm/processor.h>
-static inline ioreq_t *get_ioreq(struct vcpu *v)
-{
- struct domain *d = v->domain;
- shared_iopage_t *p = d->arch.hvm_domain.ioreq.va;
- ASSERT((v == current) || spin_is_locked(&d->arch.hvm_domain.ioreq.lock));
- return p ? &p->vcpu_ioreq[v->vcpu_id] : NULL;
-}
-
#define HVM_DELIVER_NO_ERROR_CODE -1
#ifndef NDEBUG
@@ -142,3 +133,13 @@ int hvm_mov_to_cr(unsigned int cr, unsigned int gpr);
int hvm_mov_from_cr(unsigned int cr, unsigned int gpr);
#endif /* __ASM_X86_HVM_SUPPORT_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |