|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2 11/11] ioreq: provide support for long-running operations...
> -----Original Message-----
> From: Roger Pau Monne <roger.pau@xxxxxxxxxx>
> Sent: 03 September 2019 17:14
> To: xen-devel@xxxxxxxxxxxxxxxxxxxx
> Cc: Roger Pau Monne <roger.pau@xxxxxxxxxx>; Paul Durrant
> <Paul.Durrant@xxxxxxxxxx>; Jan Beulich
> <jbeulich@xxxxxxxx>; Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Wei Liu
> <wl@xxxxxxx>; George Dunlap
> <George.Dunlap@xxxxxxxxxx>; Ian Jackson <Ian.Jackson@xxxxxxxxxx>; Julien
> Grall <julien.grall@xxxxxxx>;
> Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>; Stefano Stabellini
> <sstabellini@xxxxxxxxxx>; Tim
> (Xen.org) <tim@xxxxxxx>
> Subject: [PATCH v2 11/11] ioreq: provide support for long-running
> operations...
>
> ...and switch vPCI to use this infrastructure for long running
> physmap modification operations.
>
> This allows to get rid of the vPCI specific modifications done to
> handle_hvm_io_completion and allows generalizing the support for
> long-running operations to other internal ioreq servers. Such support
> is implemented as a specific handler that can be registers by internal
> ioreq servers and that will be called to check for pending work.
> Returning true from this handler will prevent the vcpu from running
> until the handler returns false.
>
> Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
> ---
> xen/arch/x86/hvm/ioreq.c | 55 +++++++++++++++++++++++++-----
> xen/drivers/vpci/header.c | 61 ++++++++++++++++++----------------
> xen/drivers/vpci/vpci.c | 8 ++++-
> xen/include/asm-x86/hvm/vcpu.h | 3 +-
> xen/include/xen/vpci.h | 6 ----
> 5 files changed, 89 insertions(+), 44 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
> index 33c56b880c..caa53dfa84 100644
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -239,16 +239,48 @@ bool handle_hvm_io_completion(struct vcpu *v)
> enum hvm_io_completion io_completion;
> unsigned int id;
>
> - if ( has_vpci(d) && vpci_process_pending(v) )
> - {
> - raise_softirq(SCHEDULE_SOFTIRQ);
> - return false;
> - }
> -
> - FOR_EACH_EXTERNAL_IOREQ_SERVER(d, id, s)
> + FOR_EACH_IOREQ_SERVER(d, id, s)
> {
> struct hvm_ioreq_vcpu *sv;
>
> + if ( hvm_ioreq_is_internal(id) )
> + {
I wonder whether it would be neater by saying:
if ( !hvm_ioreq_is_internal(id) )
continue;
here to avoid the indentation below.
> + if ( vio->io_req.state == STATE_IOREQ_INPROCESS )
> + {
> + ioreq_t req = vio->io_req;
> +
> + /*
> + * Check and convert the PIO/MMIO ioreq to a PCI config space
> + * access.
> + */
> + convert_pci_ioreq(d, &req);
> +
> + if ( s->handler(v, &req, s->data) == X86EMUL_RETRY )
> + {
> + /*
> + * Need to raise a scheduler irq in order to prevent the
> + * guest vcpu from resuming execution.
> + *
> + * Note this is not required for external ioreq
> operations
> + * because in that case the vcpu is marked as blocked,
> but
> + * this cannot be done for long-running internal
> + * operations, since it would prevent the vcpu from being
> + * scheduled and thus the long running operation from
> + * finishing.
> + */
> + raise_softirq(SCHEDULE_SOFTIRQ);
> + return false;
> + }
> +
> + /* Finished processing the ioreq. */
> + if ( hvm_ioreq_needs_completion(&vio->io_req) )
> + vio->io_req.state = STATE_IORESP_READY;
> + else
> + vio->io_req.state = STATE_IOREQ_NONE;
IMO the above is also neater as:
vio->io_req.state = hvm_ioreq_needs_completion(&vio->io_req) ?
STATE_IORESP_READY : STATE_IOREQ_NONE;
> + }
> + continue;
> + }
> +
> list_for_each_entry ( sv,
> &s->ioreq_vcpu_list,
> list_entry )
> @@ -1582,7 +1614,14 @@ int hvm_send_ioreq(ioservid_t id, ioreq_t *proto_p,
> bool buffered)
> return hvm_send_buffered_ioreq(s, proto_p);
>
> if ( hvm_ioreq_is_internal(id) )
> - return s->handler(curr, proto_p, s->data);
> + {
> + int rc = s->handler(curr, proto_p, s->data);
> +
> + if ( rc == X86EMUL_RETRY )
> + curr->arch.hvm.hvm_io.io_req.state = STATE_IOREQ_INPROCESS;
> +
> + return rc;
> + }
>
> if ( unlikely(!vcpu_start_shutdown_deferral(curr)) )
> return X86EMUL_RETRY;
> diff --git a/xen/drivers/vpci/header.c b/xen/drivers/vpci/header.c
> index 3c794f486d..f1c1a69492 100644
> --- a/xen/drivers/vpci/header.c
> +++ b/xen/drivers/vpci/header.c
> @@ -129,37 +129,42 @@ static void modify_decoding(const struct pci_dev *pdev,
> uint16_t cmd,
>
> bool vpci_process_pending(struct vcpu *v)
> {
> - if ( v->vpci.mem )
> + struct map_data data = {
> + .d = v->domain,
> + .map = v->vpci.cmd & PCI_COMMAND_MEMORY,
> + };
> + int rc;
> +
> + if ( !v->vpci.mem )
> {
> - struct map_data data = {
> - .d = v->domain,
> - .map = v->vpci.cmd & PCI_COMMAND_MEMORY,
> - };
> - int rc = rangeset_consume_ranges(v->vpci.mem, map_range, &data);
> -
> - if ( rc == -ERESTART )
> - return true;
> -
> - spin_lock(&v->vpci.pdev->vpci->lock);
> - /* Disable memory decoding unconditionally on failure. */
> - modify_decoding(v->vpci.pdev,
> - rc ? v->vpci.cmd & ~PCI_COMMAND_MEMORY : v->vpci.cmd,
> - !rc && v->vpci.rom_only);
> - spin_unlock(&v->vpci.pdev->vpci->lock);
> -
> - rangeset_destroy(v->vpci.mem);
> - v->vpci.mem = NULL;
> - if ( rc )
> - /*
> - * FIXME: in case of failure remove the device from the domain.
> - * Note that there might still be leftover mappings. While this
> is
> - * safe for Dom0, for DomUs the domain will likely need to be
> - * killed in order to avoid leaking stale p2m mappings on
> - * failure.
> - */
> - vpci_remove_device(v->vpci.pdev);
> + ASSERT_UNREACHABLE();
> + return false;
> }
>
> + rc = rangeset_consume_ranges(v->vpci.mem, map_range, &data);
> +
Extraneous blank line?
Paul
> + if ( rc == -ERESTART )
> + return true;
> +
> + spin_lock(&v->vpci.pdev->vpci->lock);
> + /* Disable memory decoding unconditionally on failure. */
> + modify_decoding(v->vpci.pdev,
> + rc ? v->vpci.cmd & ~PCI_COMMAND_MEMORY : v->vpci.cmd,
> + !rc && v->vpci.rom_only);
> + spin_unlock(&v->vpci.pdev->vpci->lock);
> +
> + rangeset_destroy(v->vpci.mem);
> + v->vpci.mem = NULL;
> + if ( rc )
> + /*
> + * FIXME: in case of failure remove the device from the domain.
> + * Note that there might still be leftover mappings. While this is
> + * safe for Dom0, for DomUs the domain will likely need to be
> + * killed in order to avoid leaking stale p2m mappings on
> + * failure.
> + */
> + vpci_remove_device(v->vpci.pdev);
> +
> return false;
> }
>
> diff --git a/xen/drivers/vpci/vpci.c b/xen/drivers/vpci/vpci.c
> index 5664020c2d..6069dff612 100644
> --- a/xen/drivers/vpci/vpci.c
> +++ b/xen/drivers/vpci/vpci.c
> @@ -498,6 +498,12 @@ static int ioreq_handler(struct vcpu *v, ioreq_t *req,
> void *data)
> return X86EMUL_UNHANDLEABLE;
> }
>
> + if ( v->vpci.mem )
> + {
> + ASSERT(req->state == STATE_IOREQ_INPROCESS);
> + return vpci_process_pending(v) ? X86EMUL_RETRY : X86EMUL_OKAY;
> + }
> +
> sbdf.sbdf = req->addr >> 32;
>
> if ( req->dir )
> @@ -505,7 +511,7 @@ static int ioreq_handler(struct vcpu *v, ioreq_t *req,
> void *data)
> else
> write(sbdf, req->addr, req->size, req->data);
>
> - return X86EMUL_OKAY;
> + return v->vpci.mem ? X86EMUL_RETRY : X86EMUL_OKAY;
> }
>
> int vpci_register_ioreq(struct domain *d)
> diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
> index 38f5c2bb9b..4563746466 100644
> --- a/xen/include/asm-x86/hvm/vcpu.h
> +++ b/xen/include/asm-x86/hvm/vcpu.h
> @@ -92,7 +92,8 @@ struct hvm_vcpu_io {
>
> static inline bool hvm_ioreq_needs_completion(const ioreq_t *ioreq)
> {
> - return ioreq->state == STATE_IOREQ_READY &&
> + return (ioreq->state == STATE_IOREQ_READY ||
> + ioreq->state == STATE_IOREQ_INPROCESS) &&
> !ioreq->data_is_ptr &&
> (ioreq->type != IOREQ_TYPE_PIO || ioreq->dir != IOREQ_WRITE);
> }
> diff --git a/xen/include/xen/vpci.h b/xen/include/xen/vpci.h
> index 36f435ed5b..a65491e0c9 100644
> --- a/xen/include/xen/vpci.h
> +++ b/xen/include/xen/vpci.h
> @@ -225,12 +225,6 @@ static inline int vpci_register_ioreq(struct domain *d)
> }
>
> static inline void vpci_dump_msi(void) { }
> -
> -static inline bool vpci_process_pending(struct vcpu *v)
> -{
> - ASSERT_UNREACHABLE();
> - return false;
> -}
> #endif
>
> #endif
> --
> 2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |