|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH RFC 17/18] OvmfPkg/XenPvBlkDxe: Add BlockFront client.
On Wed, Jul 16, 2014 at 04:15:46PM +0100, Anthony PERARD wrote:
> This is the code that will do the actual communication between OVMF and
> a PV block backend, where the block device lives.
>
> This implementation originally comes from Mini-OS, a part of the Xen
> Project.
>
> Signed-off-by: Samuel Thibault <samuel.thibault@xxxxxxxxxxxxx>
> Contributed-under: TianoCore Contribution Agreement 1.0
> Signed-off-by: Anthony PERARD <anthony.perard@xxxxxxxxxx>
> ---
> OvmfPkg/XenPvBlkDxe/BlockFront.c | 583
> ++++++++++++++++++++++++++++++++++++
> OvmfPkg/XenPvBlkDxe/BlockFront.h | 88 ++++++
> OvmfPkg/XenPvBlkDxe/XenPvBlkDxe.c | 8 +
> OvmfPkg/XenPvBlkDxe/XenPvBlkDxe.inf | 2 +
> 4 files changed, 681 insertions(+)
> create mode 100644 OvmfPkg/XenPvBlkDxe/BlockFront.c
> create mode 100644 OvmfPkg/XenPvBlkDxe/BlockFront.h
>
> diff --git a/OvmfPkg/XenPvBlkDxe/BlockFront.c
> b/OvmfPkg/XenPvBlkDxe/BlockFront.c
> new file mode 100644
> index 0000000..ff5641c
> --- /dev/null
> +++ b/OvmfPkg/XenPvBlkDxe/BlockFront.c
> @@ -0,0 +1,583 @@
> +/* Minimal block driver for Mini-OS.
> + * Copyright (c) 2007-2008 Samuel Thibault.
> + * Based on netfront.c.
> + */
> +
> +#include <Library/PrintLib.h>
> +#include <Library/DebugLib.h>
> +
> +#include "BlockFront.h"
> +
> +#include <IndustryStandard/Xen/io/protocols.h>
> +#include <IndustryStandard/Xen/io/xenbus.h>
> +
> +#include "inttypes.h"
> +
> +STATIC
> +XENSTORE_STATUS
> +XenbusReadInteger (
> + IN XENBUS_PROTOCOL *This,
> + IN CONST CHAR8 *Node,
> + IN BOOLEAN FromBackend,
> + OUT UINT64 *ValuePtr
> + )
> +{
> + XENSTORE_STATUS Status;
> + CHAR8 *p;
> +
> + if (!FromBackend) {
> + Status = This->XsRead (This, XST_NIL, Node, (VOID**)&p);
> + } else {
> + Status = This->XsBackendRead (This, XST_NIL, Node, (VOID**)&p);
> + }
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + return Status;
> + }
> + // XXX Will assert if p overflow UINT64 ...
> + *ValuePtr = AsciiStrDecimalToUint64 (p);
> + FreePool (p);
> + return Status;
> +}
> +
> +STATIC
> +VOID
> +XenPvBlockFree (
> + IN XEN_BLOCK_FRONT_DEVICE *Dev
> + )
> +{
> + XENBUS_PROTOCOL *XenbusIo = Dev->XenbusIo;
> +
> + if (Dev->RingRef != 0) {
> + XenbusIo->GrantEndAccess (XenbusIo, Dev->RingRef);
> + }
> + if (Dev->Ring.sring != NULL) {
> + FreePages (Dev->Ring.sring, 1);
> + }
> + if (Dev->EventChannel != 0) {
> + XenbusIo->EventChannelClose (XenbusIo, Dev->EventChannel);
> + }
> + FreePool (Dev);
> +}
> +
> +XENSTORE_STATUS
> +XenPvBlkWaitForBackendState (
> + IN XEN_BLOCK_FRONT_DEVICE *Dev,
> + IN XenbusState WaitedState,
ExpectedState
?
> + OUT XenbusState *LastStatePtr OPTIONAL
> + )
> +{
> + XENBUS_PROTOCOL *XenbusIo = Dev->XenbusIo;
> + XenbusState State;
> + UINT64 Value;
> + XENSTORE_STATUS Status;
> +
> + while (TRUE) {
> + Status = XenbusReadInteger (XenbusIo, "state", TRUE, &Value);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + return Status;
> + }
> + if (Value > XenbusStateReconfigured) {
> + return XENSTORE_STATUS_EIO;
> + }
> + State = Value;
> + if (State >= WaitedState) {
Not State != WaitedState ?
> + break;
> + }
> + DEBUG ((EFI_D_INFO,
> + "XenPvBlk: waiting backend state %d, current: %d\n",
> + WaitedState, State));
> + XenbusIo->WaitForWatch (XenbusIo, Dev->StateWatchToken);
> + }
> +
> + if (LastStatePtr != NULL) {
> + *LastStatePtr = State;
> + }
> +
> + return XENSTORE_STATUS_SUCCESS;
> +}
> +
> +EFI_STATUS
> +XenPvBlockFrontInitialization (
> + IN XENBUS_PROTOCOL *XenbusIo,
> + IN CONST CHAR8 *NodeName,
> + OUT XEN_BLOCK_FRONT_DEVICE **DevPtr
> + )
> +{
> + XENSTORE_TRANSACTION xbt;
> + CHAR8 *DeviceType;
> + blkif_sring_t *SharedRing;
> + XENSTORE_STATUS Status;
> + XEN_BLOCK_FRONT_DEVICE *Dev;
> + XenbusState State;
> + UINT64 Value;
> +
> + ASSERT (NodeName != NULL);
> +
> + Dev = AllocateZeroPool (sizeof (XEN_BLOCK_FRONT_DEVICE));
> + Dev->Signature = XEN_BLOCK_FRONT_SIGNATURE;
> + Dev->NodeName = NodeName;
> + Dev->XenbusIo = XenbusIo;
> + Dev->DeviceId = XenbusIo->DeviceId;
> +
> + XenbusIo->XsRead (XenbusIo, XST_NIL, "device-type", (VOID**)&DeviceType);
> + if (AsciiStrCmp (DeviceType, "cdrom") == 0) {
> + Dev->MediaInfo.CdRom = TRUE;
> + } else {
> + Dev->MediaInfo.CdRom = FALSE;
> + }
> + FreePool (DeviceType);
> +
> + Status = XenbusReadInteger (XenbusIo, "backend-id", FALSE, &Value);
> + if (Status != XENSTORE_STATUS_SUCCESS || Value > UINT16_MAX) {
> + DEBUG ((EFI_D_ERROR, "XenPvBlk: Failed to get backend-id (%d)\n",
> + Status));
> + goto Error;
> + }
> + Dev->DomainId = Value;
> + XenbusIo->EventChannelAllocate (XenbusIo, Dev->DomainId,
> &Dev->EventChannel);
> +
> + SharedRing = (blkif_sring_t*) AllocatePages (1);
> + SHARED_RING_INIT (SharedRing);
> + FRONT_RING_INIT (&Dev->Ring, SharedRing, EFI_PAGE_SIZE);
> + XenbusIo->GrantAccess (XenbusIo,
> + Dev->DomainId,
> + (INTN) SharedRing >> EFI_PAGE_SHIFT,
> + FALSE,
> + &Dev->RingRef);
> +
> +Again:
> + Status = XenbusIo->XsTransactionStart (XenbusIo, &xbt);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + DEBUG ((EFI_D_WARN, "XenPvBlk: Failed to start transaction, %d\n",
> Status));
.. and do you want to error out?
> + }
> +
> + Status = XenbusIo->XsPrintf (XenbusIo, xbt, NodeName, "ring-ref", "%d",
> + Dev->RingRef);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + DEBUG ((EFI_D_ERROR, "XenPvBlk: Failed to write ring-ref.\n"));
> + goto AbortTransaction;
> + }
> + Status = XenbusIo->XsPrintf (XenbusIo, xbt, NodeName,
> + "event-channel", "%d", Dev->EventChannel);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + DEBUG ((EFI_D_ERROR, "XenPvBlk: Failed to write event-channel.\n"));
> + goto AbortTransaction;
> + }
> + Status = XenbusIo->XsPrintf (XenbusIo, xbt, NodeName,
> + "protocol", "%a", XEN_IO_PROTO_ABI_NATIVE);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + DEBUG ((EFI_D_ERROR, "XenPvBlk: Failed to write protocol.\n"));
> + goto AbortTransaction;
> + }
> +
> + Status = XenbusIo->SetState (XenbusIo, xbt, XenbusStateConnected);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + DEBUG ((EFI_D_ERROR, "XenPvBlk: Failed switch state.\n"));
> + goto AbortTransaction;
> + }
> +
> + Status = XenbusIo->XsTransactionEnd (XenbusIo, xbt, FALSE);
> + if (Status == XENSTORE_STATUS_EAGAIN) {
> + goto Again;
> + }
> +
> + XenbusIo->RegisterWatchBackend (XenbusIo, "state", &Dev->StateWatchToken);
> +
> + // Waiting backend
> + Status = XenPvBlkWaitForBackendState (Dev, XenbusStateConnected, &State);
> + if (Status != XENSTORE_STATUS_SUCCESS || State != XenbusStateConnected) {
> + DEBUG ((EFI_D_ERROR,
> + "XenPvBlk: backend for %a/%d not available, rc=%d state=%d\n",
> + XenbusIo->Type, XenbusIo->DeviceId, Status, State));
> + goto Error2;
> + }
> +
> + Status = XenbusReadInteger (XenbusIo, "info", TRUE, &Value);
> + if (Status != XENSTORE_STATUS_SUCCESS || Value > UINT32_MAX) {
> + goto Error2;
> + }
> + Dev->MediaInfo.VDiskInfo = Value;
> + if (Dev->MediaInfo.VDiskInfo & VDISK_READONLY) {
> + Dev->MediaInfo.ReadWrite = FALSE;
> + } else {
> + Dev->MediaInfo.ReadWrite = TRUE;
> + }
> +
> + Status = XenbusReadInteger (XenbusIo, "sectors", TRUE,
> &Dev->MediaInfo.Sectors);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + goto Error2;
> + }
> +
> + Status = XenbusReadInteger (XenbusIo, "sector-size", TRUE, &Value);
> + if (Status != XENSTORE_STATUS_SUCCESS || Value > UINT32_MAX) {
> + goto Error2;
> + }
> + Dev->MediaInfo.SectorSize = Value;
Why not &Dev->MediaInfo.SectorSize in XenbusReadInteger?
Is it because of types? If so, please add a comment stating that.
> +
> + // Default value
> + Value = 0;
> + Status = XenbusReadInteger (XenbusIo, "feature-barrier", TRUE, &Value);
> + if (Value == 1) {
> + Dev->MediaInfo.FeatureBarrier = TRUE;
> + } else {
> + Dev->MediaInfo.FeatureBarrier = FALSE;
> + }
> +
> + // Default value
> + Value = 0;
> + XenbusReadInteger (XenbusIo, "feature-flush-cache", TRUE, &Value);
> + if (Value == 1) {
> + Dev->MediaInfo.FeatureFlushCache = TRUE;
> + } else {
> + Dev->MediaInfo.FeatureFlushCache = FALSE;
> + }
> +
> + DEBUG ((EFI_D_INFO, "XenPvBlk: New disk with %d sectors of %d bytes\n",
> + Dev->MediaInfo.Sectors, Dev->MediaInfo.SectorSize));
> +
> + *DevPtr = Dev;
> + return EFI_SUCCESS;
> +
> +Error2:
> + XenbusIo->UnregisterWatch (XenbusIo, Dev->StateWatchToken);
> + XenbusIo->XsRemove (XenbusIo, XST_NIL, "ring-ref");
> + XenbusIo->XsRemove (XenbusIo, XST_NIL, "event-channel");
XenbusIo->XsRemove (XenbusIo, XST_NIL, "protocol");
?
> + goto Error;
> +AbortTransaction:
> + XenbusIo->XsTransactionEnd (XenbusIo, xbt, TRUE);
> +Error:
> + XenPvBlockFree (Dev);
> + return EFI_DEVICE_ERROR;
> +}
> +
> +VOID
> +XenPvBlockFrontShutdown (
> + IN XEN_BLOCK_FRONT_DEVICE *Dev
> + )
> +{
> + XENBUS_PROTOCOL *XenbusIo = Dev->XenbusIo;
> + XENSTORE_STATUS Status;
> + UINT64 Value;
> +
> + XenPvBlockSync (Dev);
> +
> + Status = XenbusIo->SetState (XenbusIo, XST_NIL, XenbusStateClosing);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + DEBUG ((EFI_D_ERROR,
> + "XenPvBlk: error while changing state to Closing: %d\n",
> + Status));
> + goto Close;
> + }
> +
> + Status = XenPvBlkWaitForBackendState (Dev, XenbusStateClosing, NULL);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + DEBUG ((EFI_D_ERROR,
> + "XenPvBlk: error while waiting for closing backend state: %d\n",
> + Status));
> + goto Close;
> + }
> +
> + Status = XenbusIo->SetState (XenbusIo, XST_NIL, XenbusStateClosed);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + DEBUG ((EFI_D_ERROR,
> + "XenPvBlk: error while changing state to Closed: %d\n",
> + Status));
> + goto Close;
> + }
> +
> + Status = XenPvBlkWaitForBackendState (Dev, XenbusStateClosed, NULL);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + DEBUG ((EFI_D_ERROR,
> + "XenPvBlk: error while waiting for closed backend state: %d\n",
> + Status));
> + goto Close;
> + }
> +
> + Status = XenbusIo->SetState (XenbusIo, XST_NIL, XenbusStateInitialising);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + DEBUG ((EFI_D_ERROR,
> + "XenPvBlk: error while changing state to initialising: %d\n",
> + Status));
> + goto Close;
> + }
> +
> + while (TRUE) {
> + Status = XenbusReadInteger (XenbusIo, "state", TRUE, &Value);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + DEBUG ((EFI_D_ERROR,
> + "XenPvBlk: error while waiting for new backend state: %d\n",
> + Status));
> + goto Close;
> + }
> + if (Value < XenbusStateInitWait || Value >= XenbusStateClosed) {
> + break;
> + }
> + DEBUG ((EFI_D_INFO,
> + "XenPvBlk: waiting backend state %d, current: %d\n",
> + XenbusStateInitWait, Value));
> + XenbusIo->WaitForWatch (XenbusIo, Dev->StateWatchToken);
> + }
> +
> +Close:
> + XenbusIo->UnregisterWatch (XenbusIo, Dev->StateWatchToken);
> + XenbusIo->XsRemove (XenbusIo, XST_NIL, "ring-ref");
> + XenbusIo->XsRemove (XenbusIo, XST_NIL, "event-channel");
"protocol"
> +
> + XenPvBlockFree (Dev);
> +}
> +
> +STATIC
> +VOID
> +XenPvBlockWaitSlot (
> + IN XEN_BLOCK_FRONT_DEVICE *Dev
> + )
> +{
> + /* Wait for a slot */
> + if (RING_FULL (&Dev->Ring)) {
> + while (TRUE) {
> + XenPvBlockAsyncIoPoll (Dev);
> + if (!RING_FULL(&Dev->Ring)) {
> + break;
> + }
> + /* Really no slot, go to sleep. */
> + // XXX wait for an event on Dev->EventChannel
> + DEBUG ((EFI_D_INFO, "%a %d, waiting\n", __func__, __LINE__));
> + }
> + }
> +}
> +
> +/* Issue an aio */
> +VOID
> +XenPvBlockAsyncIo (
> + IN OUT XEN_BLOCK_FRONT_IO *IoData,
> + IN BOOLEAN IsWrite
> + )
> +{
> + XEN_BLOCK_FRONT_DEVICE *Dev = IoData->Dev;
> + XENBUS_PROTOCOL *XenbusIo = Dev->XenbusIo;
> + blkif_request_t *Request;
> + RING_IDX RingIndex;
> + BOOLEAN Notify;
> + INT32 NumSegments, Index;
> + UINTN Start, End;
> +
> + // Can't io at non-sector-aligned location
> + ASSERT(!(IoData->Offset & (Dev->MediaInfo.SectorSize - 1)));
> + // Can't io non-sector-sized amounts
> + ASSERT(!(IoData->Size & (Dev->MediaInfo.SectorSize - 1)));
> + // Can't io non-sector-aligned buffer
> + ASSERT(!((UINTN) IoData->Buffer & (Dev->MediaInfo.SectorSize - 1)));
> +
> + Start = (UINTN) IoData->Buffer & ~EFI_PAGE_MASK;
> + End = ((UINTN) IoData->Buffer + IoData->Size + EFI_PAGE_SIZE - 1) &
> ~EFI_PAGE_MASK;
> + IoData->NumRef = NumSegments = (End - Start) / EFI_PAGE_SIZE;
> +
> + // XXX is this comment still true ?
I think it is outdated, as Linux would do max DMA up to the segment
size that is advertised. Ditto for the SCSI stack.
> + /* qemu's IDE max multsect is 16 (8KB) and SCSI max DMA was set to 32KB,
> + * so max 44KB can't happen */
> + ASSERT (NumSegments <= BLKIF_MAX_SEGMENTS_PER_REQUEST);
But this check is OK as we can't truly fit more on the ring.
> +
> + XenPvBlockWaitSlot (Dev);
What happens if there are multiple threads calling this
function? Don't we need a mutex to only allow
one thread to grab an RingIndex?
> + RingIndex = Dev->Ring.req_prod_pvt;
> + Request = RING_GET_REQUEST (&Dev->Ring, RingIndex);
> +
> + Request->operation = IsWrite ? BLKIF_OP_WRITE : BLKIF_OP_READ;
> + Request->nr_segments = NumSegments;
> + Request->handle = Dev->DeviceId;
> + Request->id = (UINTN) IoData;
> + Request->sector_number = IoData->Offset / 512;
> +
> + for (Index = 0; Index < NumSegments; Index++) {
> + Request->seg[Index].first_sect = 0;
Uh?
> + Request->seg[Index].last_sect = EFI_PAGE_SIZE / 512 - 1;
Uh? I thought you wanted to use the real sector values?
> + }
> + Request->seg[0].first_sect = ((UINTN) IoData->Buffer & EFI_PAGE_MASK) /
> 512;
> + Request->seg[NumSegments - 1].last_sect =
> + (((UINTN) IoData->Buffer + IoData->Size - 1) & EFI_PAGE_MASK) / 512;
What about in between 0 and NumSegments? You should also
fill out those values with valid entries.
> + for (Index = 0; Index < NumSegments; Index++) {
> + UINTN Data = Start + Index * EFI_PAGE_SIZE;
> + if (!IsWrite) {
> + /* Trigger CoW if needed */
Not sure I understand that reference?
> + *(CHAR8 *)(Data + (Request->seg[Index].first_sect << 9)) = 0;
> + MemoryFence ();
> + }
> + XenbusIo->GrantAccess (XenbusIo, Dev->DomainId,
> + Data >> EFI_PAGE_SHIFT, IsWrite,
> + &Request->seg[Index].gref);
> + IoData->GrantRef[Index] = Request->seg[Index].gref;
> + }
> +
> + Dev->Ring.req_prod_pvt = RingIndex + 1;
> +
> + MemoryFence ();
> + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY (&Dev->Ring, Notify);
> +
> + if (Notify) {
> + XenbusIo->EventChannelNotify (XenbusIo, Dev->EventChannel);
> + }
> +}
> +
> +STATIC
> +VOID
> +XenPvBlockAsyncIoCallback (
> + IN OUT XEN_BLOCK_FRONT_IO *IoData,
> + IN EFI_STATUS Status
> + )
> +{
> + IoData->Data = (VOID *) 1;
> + IoData->Callback = NULL;
> +}
> +
> +VOID
> +XenPvBlockIo (
> + IN OUT XEN_BLOCK_FRONT_IO *IoData,
> + IN BOOLEAN IsWrite
> + )
> +{
> + ASSERT (IoData->Callback == NULL);
> + // XXX Signal/Event instead ?
Maybe for later versions. I think this is OK for right now, thought
it will mean we can only do one I/O .. and have then to wait
until it is completed.
That limititation should be mentioned in the commit description.
> + IoData->Callback = XenPvBlockAsyncIoCallback;
> + XenPvBlockAsyncIo (IoData, IsWrite);
> + IoData->Data = NULL;
> +
> + while (TRUE) {
> + XenPvBlockAsyncIoPoll (IoData->Dev);
> + if (IoData->Data) {
> + break;
> + }
> + }
> +}
> +
> +STATIC
> +VOID
> +XenPvBlockPushOperation (
> + IN XEN_BLOCK_FRONT_DEVICE *Dev,
> + IN UINT8 Operation,
> + IN UINT64 Id
> + )
> +{
> + INT32 Index;
> + blkif_request_t *Request;
> + BOOLEAN Notify;
> +
> + XenPvBlockWaitSlot (Dev);
How do we make sure that there aren't multiple threads
calling this?
> + Index = Dev->Ring.req_prod_pvt;
> + Request = RING_GET_REQUEST(&Dev->Ring, Index);
> + Request->operation = Operation;
> + Request->nr_segments = 0;
> + Request->handle = Dev->DeviceId;
> + Request->id = Id;
> + /* Not needed anyway, but the backend will check it */
> + Request->sector_number = 0;
> + Dev->Ring.req_prod_pvt = Index + 1;
> + MemoryFence ();
> + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY (&Dev->Ring, Notify);
> + if (Notify) {
> + XENBUS_PROTOCOL *XenbusIo = Dev->XenbusIo;
> + XenbusIo->EventChannelNotify (XenbusIo, Dev->EventChannel);
> + }
> +}
> +
> +VOID
> +XenPvBlockSync (
> + IN XEN_BLOCK_FRONT_DEVICE *Dev
> + )
> +{
> + if (Dev->MediaInfo.ReadWrite) {
> + if (Dev->MediaInfo.FeatureBarrier) {
> + XenPvBlockPushOperation (Dev, BLKIF_OP_WRITE_BARRIER, 0);
> + }
> +
> + if (Dev->MediaInfo.FeatureFlushCache) {
> + XenPvBlockPushOperation (Dev, BLKIF_OP_FLUSH_DISKCACHE, 0);
> + }
> + }
> +
> + /* Note: This won't finish if another thread enqueues requests. */
> + while (TRUE) {
> + XenPvBlockAsyncIoPoll (Dev);
> + if (RING_FREE_REQUESTS (&Dev->Ring) == RING_SIZE (&Dev->Ring)) {
> + break;
> + }
> + }
> +}
> +
> +VOID
> +XenPvBlockAsyncIoPoll (
> + IN XEN_BLOCK_FRONT_DEVICE *Dev
> + )
> +{
> + RING_IDX ProducerIndex, ConsumerIndex;
> + blkif_response_t *Response;
> + INT32 More;
> +
> + do {
> + ProducerIndex = Dev->Ring.sring->rsp_prod;
> + /* Ensure we see queued responses up to 'ProducerIndex'. */
> + MemoryFence ();
> + ConsumerIndex = Dev->Ring.rsp_cons;
> +
> + while (ConsumerIndex != ProducerIndex) {
> + XEN_BLOCK_FRONT_IO *IoData;
> + INT16 Status;
> +
> + Response = RING_GET_RESPONSE (&Dev->Ring, ConsumerIndex);
> +
> + IoData = (VOID *) (UINTN) Response->id;
> + Status = Response->status;
> +
> + switch (Response->operation) {
> + case BLKIF_OP_READ:
> + case BLKIF_OP_WRITE:
> + {
> + INT32 Index;
> +
> + if (Status != BLKIF_RSP_OKAY) {
> + DEBUG ((EFI_D_ERROR,
> + "XenPvBlk: "
> + "%a error %d on %a at offset %Ld, num bytes %Ld\n",
> + Response->operation == BLKIF_OP_READ ? "read" : "write",
> + Status, IoData->Dev->NodeName,
> + IoData->Offset,
> + IoData->Size));
> + }
> +
> + for (Index = 0; Index < IoData->NumRef; Index++) {
> + Dev->XenbusIo->GrantEndAccess (Dev->XenbusIo,
> IoData->GrantRef[Index]);
> + }
> +
> + break;
> + }
> +
> + case BLKIF_OP_WRITE_BARRIER:
> + if (Status != BLKIF_RSP_OKAY) {
> + DEBUG ((EFI_D_ERROR, "XenPvBlk: write barrier error %d\n",
> Status));
> + }
> + break;
> + case BLKIF_OP_FLUSH_DISKCACHE:
> + if (Status != BLKIF_RSP_OKAY) {
> + DEBUG ((EFI_D_ERROR, "XenPvBlk: flush error %d\n", Status));
> + }
> + break;
> +
> + default:
> + DEBUG ((EFI_D_ERROR,
> + "XenPvBlk: unrecognized block operation %d response (status
> %d)\n",
> + Response->operation, Status));
> + break;
> + }
> +
> + Dev->Ring.rsp_cons = ++ConsumerIndex;
> + /* Nota: callback frees IoData itself */
> + if (IoData && IoData->Callback) {
> + IoData->Callback (IoData, Status ? EFI_DEVICE_ERROR : EFI_SUCCESS);
> + }
> + if (Dev->Ring.rsp_cons != ConsumerIndex) {
> + /* We reentered, we must not continue here */
Uh, how does that happen?
> + break;
> + }
> + }
> +
> + RING_FINAL_CHECK_FOR_RESPONSES (&Dev->Ring, More);
> + } while (More != 0);
> +}
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |