|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2 17/18] OvmfPkg/XenPvBlkDxe: Add BlockFront client.
> +/**
> + Helper to read an interger from XenStore.
integer?
> +
> + @param This A pointer to a XENBUS_PROTOCOL instance.
> + @param Node The XenStore node to read from.
> + @param FromBackend Read frontend or backend value.
> + @param ValuePtr Where to put the value.
> +
> + @retval XENSTORE_STATUS_SUCCESS If succefull, will update ValuePtr.
> + @return Any other return value indicate the error,
> + ValuePtr is not updated in this case.
> +**/
> +STATIC
> +XENSTORE_STATUS
> +XenBusReadUint64 (
> + IN XENBUS_PROTOCOL *This,
> + IN CONST CHAR8 *Node,
> + IN BOOLEAN FromBackend,
> + OUT UINT64 *ValuePtr
> + )
> +{
> + XENSTORE_STATUS Status;
> + CHAR8 *p;
> +
> + if (!FromBackend) {
> + Status = This->XsRead (This, XST_NIL, Node, (VOID**)&p);
> + } else {
> + Status = This->XsBackendRead (This, XST_NIL, Node, (VOID**)&p);
> + }
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + return Status;
> + }
> + // FIXME Will ASSERT if p overflow UINT64 ...
Do you just want to add the ASSERT in the code then?
> + *ValuePtr = AsciiStrDecimalToUint64 (p);
> + FreePool (p);
> + return Status;
> +}
> +
> +/**
> + Free an instance of XEN_BLOCK_FRONT_DEVICE.
> +
> + @param Dev The instance to free.
> +**/
> +STATIC
> +VOID
> +XenPvBlockFree (
> + IN XEN_BLOCK_FRONT_DEVICE *Dev
> + )
> +{
> + XENBUS_PROTOCOL *XenBusIo = Dev->XenBusIo;
> +
> + if (Dev->RingRef != 0) {
> + XenBusIo->GrantEndAccess (XenBusIo, Dev->RingRef);
> + }
> + if (Dev->Ring.sring != NULL) {
> + FreePages (Dev->Ring.sring, 1);
> + }
> + if (Dev->EventChannel != 0) {
> + XenBusIo->EventChannelClose (XenBusIo, Dev->EventChannel);
> + }
> + FreePool (Dev);
> +}
> +
> +/**
> + Wait until until the backend have reach the ExpectedState.
s/have reached/has reached/
> +
> + @param Dev A XEN_BLOCK_FRONT_DEVICE instance.
> + @param ExpectedState The backend state expected.
> + @param LastStatePtr An optionnal pointer where to right the final state.
s/optionnal/optional/
> +
> + @return Return XENSTORE_STATUS_SUCCESS if the new backend state is
> ExpectedState
> + or return an error otherwise.
> +**/
> +STATIC
> +XENSTORE_STATUS
> +XenPvBlkWaitForBackendState (
> + IN XEN_BLOCK_FRONT_DEVICE *Dev,
> + IN XenbusState ExpectedState,
> + OUT XenbusState *LastStatePtr OPTIONAL
> + )
> +{
> + XENBUS_PROTOCOL *XenBusIo = Dev->XenBusIo;
> + XenbusState State;
> + UINT64 Value;
> + XENSTORE_STATUS Status = XENSTORE_STATUS_SUCCESS;
> +
> + while (TRUE) {
> + Status = XenBusReadUint64 (XenBusIo, "state", TRUE, &Value);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + return Status;
> + }
> + if (Value > XenbusStateReconfigured) {
> + //
> + // Value is not a State value.
> + //
> + return XENSTORE_STATUS_EIO;
> + }
> + State = Value;
> + if (State == ExpectedState) {
> + break;
> + } else if (State > ExpectedState) {
> + Status = XENSTORE_STATUS_FAIL;
> + break;
> + }
> + DEBUG ((EFI_D_INFO,
> + "XenPvBlk: waiting backend state %d, current: %d\n",
> + ExpectedState, State));
> + XenBusIo->WaitForWatch (XenBusIo, Dev->StateWatchToken);
> + }
> +
> + if (LastStatePtr != NULL) {
> + *LastStatePtr = State;
> + }
> +
> + return Status;
> +}
> +
> +EFI_STATUS
> +XenPvBlockFrontInitialization (
> + IN XENBUS_PROTOCOL *XenBusIo,
> + IN CONST CHAR8 *NodeName,
> + OUT XEN_BLOCK_FRONT_DEVICE **DevPtr
> + )
> +{
> + XENSTORE_TRANSACTION xbt;
> + CHAR8 *DeviceType;
> + blkif_sring_t *SharedRing;
> + XENSTORE_STATUS Status;
> + XEN_BLOCK_FRONT_DEVICE *Dev;
> + XenbusState State;
> + UINT64 Value;
> +
> + ASSERT (NodeName != NULL);
> +
> + Dev = AllocateZeroPool (sizeof (XEN_BLOCK_FRONT_DEVICE));
> + Dev->Signature = XEN_BLOCK_FRONT_SIGNATURE;
> + Dev->NodeName = NodeName;
> + Dev->XenBusIo = XenBusIo;
> + Dev->DeviceId = XenBusIo->DeviceId;
> +
> + XenBusIo->XsRead (XenBusIo, XST_NIL, "device-type", (VOID**)&DeviceType);
> + if (AsciiStrCmp (DeviceType, "cdrom") == 0) {
> + Dev->MediaInfo.CdRom = TRUE;
> + } else {
> + Dev->MediaInfo.CdRom = FALSE;
> + }
> + FreePool (DeviceType);
> +
> + Status = XenBusReadUint64 (XenBusIo, "backend-id", FALSE, &Value);
> + if (Status != XENSTORE_STATUS_SUCCESS || Value > UINT16_MAX) {
> + DEBUG ((EFI_D_ERROR, "XenPvBlk: Failed to get backend-id (%d)\n",
> + Status));
> + goto Error;
> + }
> + Dev->DomainId = Value;
> + XenBusIo->EventChannelAllocate (XenBusIo, Dev->DomainId,
> &Dev->EventChannel);
> +
> + SharedRing = (blkif_sring_t*) AllocatePages (1);
> + SHARED_RING_INIT (SharedRing);
> + FRONT_RING_INIT (&Dev->Ring, SharedRing, EFI_PAGE_SIZE);
> + XenBusIo->GrantAccess (XenBusIo,
> + Dev->DomainId,
> + (INTN) SharedRing >> EFI_PAGE_SHIFT,
> + FALSE,
> + &Dev->RingRef);
> +
> +Again:
> + Status = XenBusIo->XsTransactionStart (XenBusIo, &xbt);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + DEBUG ((EFI_D_WARN, "XenPvBlk: Failed to start transaction, %d\n",
> Status));
> + goto Error;
> + }
> +
> + Status = XenBusIo->XsPrintf (XenBusIo, xbt, NodeName, "ring-ref", "%d",
> + Dev->RingRef);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + DEBUG ((EFI_D_ERROR, "XenPvBlk: Failed to write ring-ref.\n"));
> + goto AbortTransaction;
> + }
> + Status = XenBusIo->XsPrintf (XenBusIo, xbt, NodeName,
> + "event-channel", "%d", Dev->EventChannel);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + DEBUG ((EFI_D_ERROR, "XenPvBlk: Failed to write event-channel.\n"));
> + goto AbortTransaction;
> + }
> + Status = XenBusIo->XsPrintf (XenBusIo, xbt, NodeName,
> + "protocol", "%a", XEN_IO_PROTO_ABI_NATIVE);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + DEBUG ((EFI_D_ERROR, "XenPvBlk: Failed to write protocol.\n"));
> + goto AbortTransaction;
> + }
> +
> + Status = XenBusIo->SetState (XenBusIo, xbt, XenbusStateConnected);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + DEBUG ((EFI_D_ERROR, "XenPvBlk: Failed switch state.\n"));
Failed to switch state
> + goto AbortTransaction;
> + }
> +
> + Status = XenBusIo->XsTransactionEnd (XenBusIo, xbt, FALSE);
> + if (Status == XENSTORE_STATUS_EAGAIN) {
> + goto Again;
> + }
> +
> + XenBusIo->RegisterWatchBackend (XenBusIo, "state", &Dev->StateWatchToken);
> +
> + //
> + // Waiting backend
Waiting for backend.
> + //
> + Status = XenPvBlkWaitForBackendState (Dev, XenbusStateConnected, &State);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + DEBUG ((EFI_D_ERROR,
> + "XenPvBlk: backend for %a/%d not available, rc=%d state=%d\n",
> + XenBusIo->Type, XenBusIo->DeviceId, Status, State));
> + goto Error2;
> + }
> +
> + Status = XenBusReadUint64 (XenBusIo, "info", TRUE, &Value);
> + if (Status != XENSTORE_STATUS_SUCCESS || Value > UINT32_MAX) {
> + goto Error2;
> + }
> + Dev->MediaInfo.VDiskInfo = Value;
> + if (Dev->MediaInfo.VDiskInfo & VDISK_READONLY) {
> + Dev->MediaInfo.ReadWrite = FALSE;
> + } else {
> + Dev->MediaInfo.ReadWrite = TRUE;
> + }
> +
> + Status = XenBusReadUint64 (XenBusIo, "sectors", TRUE,
> &Dev->MediaInfo.Sectors);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + goto Error2;
> + }
> +
> + Status = XenBusReadUint64 (XenBusIo, "sector-size", TRUE, &Value);
> + if (Status != XENSTORE_STATUS_SUCCESS || Value > UINT32_MAX) {
> + goto Error2;
> + }
> + Dev->MediaInfo.SectorSize = Value;
> +
> + // Default value
> + Value = 0;
> + Status = XenBusReadUint64 (XenBusIo, "feature-barrier", TRUE, &Value);
> + if (Value == 1) {
> + Dev->MediaInfo.FeatureBarrier = TRUE;
> + } else {
> + Dev->MediaInfo.FeatureBarrier = FALSE;
> + }
> +
> + // Default value
> + Value = 0;
> + XenBusReadUint64 (XenBusIo, "feature-flush-cache", TRUE, &Value);
> + if (Value == 1) {
> + Dev->MediaInfo.FeatureFlushCache = TRUE;
> + } else {
> + Dev->MediaInfo.FeatureFlushCache = FALSE;
> + }
> +
> + DEBUG ((EFI_D_INFO, "XenPvBlk: New disk with %ld sectors of %d bytes\n",
> + Dev->MediaInfo.Sectors, Dev->MediaInfo.SectorSize));
> +
> + *DevPtr = Dev;
> + return EFI_SUCCESS;
> +
> +Error2:
> + XenBusIo->UnregisterWatch (XenBusIo, Dev->StateWatchToken);
> + XenBusIo->XsRemove (XenBusIo, XST_NIL, "ring-ref");
> + XenBusIo->XsRemove (XenBusIo, XST_NIL, "event-channel");
> + XenBusIo->XsRemove (XenBusIo, XST_NIL, "protocol");
> + goto Error;
> +AbortTransaction:
> + XenBusIo->XsTransactionEnd (XenBusIo, xbt, TRUE);
> +Error:
> + XenPvBlockFree (Dev);
> + return EFI_DEVICE_ERROR;
> +}
> +
> +VOID
> +XenPvBlockFrontShutdown (
> + IN XEN_BLOCK_FRONT_DEVICE *Dev
> + )
> +{
> + XENBUS_PROTOCOL *XenBusIo = Dev->XenBusIo;
> + XENSTORE_STATUS Status;
> + UINT64 Value;
> +
> + XenPvBlockSync (Dev);
> +
> + Status = XenBusIo->SetState (XenBusIo, XST_NIL, XenbusStateClosing);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + DEBUG ((EFI_D_ERROR,
> + "XenPvBlk: error while changing state to Closing: %d\n",
> + Status));
> + goto Close;
> + }
> +
> + Status = XenPvBlkWaitForBackendState (Dev, XenbusStateClosing, NULL);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + DEBUG ((EFI_D_ERROR,
> + "XenPvBlk: error while waiting for closing backend state: %d\n",
> + Status));
> + goto Close;
> + }
> +
> + Status = XenBusIo->SetState (XenBusIo, XST_NIL, XenbusStateClosed);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + DEBUG ((EFI_D_ERROR,
> + "XenPvBlk: error while changing state to Closed: %d\n",
> + Status));
> + goto Close;
> + }
> +
> + Status = XenPvBlkWaitForBackendState (Dev, XenbusStateClosed, NULL);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + DEBUG ((EFI_D_ERROR,
> + "XenPvBlk: error while waiting for closed backend state: %d\n",
> + Status));
> + goto Close;
> + }
> +
> + Status = XenBusIo->SetState (XenBusIo, XST_NIL, XenbusStateInitialising);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + DEBUG ((EFI_D_ERROR,
> + "XenPvBlk: error while changing state to initialising: %d\n",
> + Status));
> + goto Close;
> + }
> +
> + while (TRUE) {
> + Status = XenBusReadUint64 (XenBusIo, "state", TRUE, &Value);
> + if (Status != XENSTORE_STATUS_SUCCESS) {
> + DEBUG ((EFI_D_ERROR,
> + "XenPvBlk: error while waiting for new backend state: %d\n",
> + Status));
> + goto Close;
> + }
> + if (Value < XenbusStateInitWait || Value >= XenbusStateClosed) {
> + break;
> + }
> + DEBUG ((EFI_D_INFO,
> + "XenPvBlk: waiting backend state %d, current: %d\n",
> + XenbusStateInitWait, Value));
> + XenBusIo->WaitForWatch (XenBusIo, Dev->StateWatchToken);
> + }
> +
> +Close:
> + XenBusIo->UnregisterWatch (XenBusIo, Dev->StateWatchToken);
> + XenBusIo->XsRemove (XenBusIo, XST_NIL, "ring-ref");
> + XenBusIo->XsRemove (XenBusIo, XST_NIL, "event-channel");
> + XenBusIo->XsRemove (XenBusIo, XST_NIL, "protocol");
> +
> + XenPvBlockFree (Dev);
> +}
> +
> +STATIC
> +VOID
> +XenPvBlockWaitSlot (
> + IN XEN_BLOCK_FRONT_DEVICE *Dev
> + )
> +{
> + /* Wait for a slot */
> + if (RING_FULL (&Dev->Ring)) {
> + while (TRUE) {
> + XenPvBlockAsyncIoPoll (Dev);
> + if (!RING_FULL(&Dev->Ring)) {
^
Missing space
> + break;
> + }
> + /* Really no slot, could wait for an event on Dev->EventChannel. */
> + }
> + }
> +}
> +
> +VOID
> +XenPvBlockAsyncIo (
> + IN OUT XEN_BLOCK_FRONT_IO *IoData,
> + IN BOOLEAN IsWrite
> + )
> +{
> + XEN_BLOCK_FRONT_DEVICE *Dev = IoData->Dev;
> + XENBUS_PROTOCOL *XenBusIo = Dev->XenBusIo;
> + blkif_request_t *Request;
> + RING_IDX RingIndex;
> + BOOLEAN Notify;
> + INT32 NumSegments, Index;
> + UINTN Start, End;
> +
> + // Can't io at non-sector-aligned location
> + ASSERT(!(IoData->Offset & (Dev->MediaInfo.SectorSize - 1)));
> + // Can't io non-sector-sized amounts
> + ASSERT(!(IoData->Size & (Dev->MediaInfo.SectorSize - 1)));
> + // Can't io non-sector-aligned buffer
> + ASSERT(!((UINTN) IoData->Buffer & (Dev->MediaInfo.SectorSize - 1)));
> +
> + Start = (UINTN) IoData->Buffer & ~EFI_PAGE_MASK;
> + End = ((UINTN) IoData->Buffer + IoData->Size + EFI_PAGE_SIZE - 1) &
> ~EFI_PAGE_MASK;
> + IoData->NumRef = NumSegments = (End - Start) / EFI_PAGE_SIZE;
> +
> + ASSERT (NumSegments <= BLKIF_MAX_SEGMENTS_PER_REQUEST);
> +
> + XenPvBlockWaitSlot (Dev);
> + RingIndex = Dev->Ring.req_prod_pvt;
> + Request = RING_GET_REQUEST (&Dev->Ring, RingIndex);
> +
> + Request->operation = IsWrite ? BLKIF_OP_WRITE : BLKIF_OP_READ;
> + Request->nr_segments = NumSegments;
> + Request->handle = Dev->DeviceId;
> + Request->id = (UINTN) IoData;
> + Request->sector_number = IoData->Offset / 512;
Offset? Why not just call it 'LBA' instead of Offset?
> +
> + for (Index = 0; Index < NumSegments; Index++) {
> + Request->seg[Index].first_sect = 0;
> + Request->seg[Index].last_sect = EFI_PAGE_SIZE / 512 - 1;
> + }
> + Request->seg[0].first_sect = ((UINTN) IoData->Buffer & EFI_PAGE_MASK) /
> 512;
> + Request->seg[NumSegments - 1].last_sect =
> + (((UINTN) IoData->Buffer + IoData->Size - 1) & EFI_PAGE_MASK) / 512;
> + for (Index = 0; Index < NumSegments; Index++) {
> + UINTN Data = Start + Index * EFI_PAGE_SIZE;
> + XenBusIo->GrantAccess (XenBusIo, Dev->DomainId,
> + Data >> EFI_PAGE_SHIFT, IsWrite,
> + &Request->seg[Index].gref);
> + IoData->GrantRef[Index] = Request->seg[Index].gref;
> + }
> +
> + Dev->Ring.req_prod_pvt = RingIndex + 1;
> +
> + MemoryFence ();
> + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY (&Dev->Ring, Notify);
> +
> + if (Notify) {
> + XenBusIo->EventChannelNotify (XenBusIo, Dev->EventChannel);
> + }
> +}
> +
> +EFI_STATUS
> +XenPvBlockIo (
> + IN OUT XEN_BLOCK_FRONT_IO *IoData,
> + IN BOOLEAN IsWrite
> + )
> +{
> + //
> + // Status value that correspond to an IO in progress.
> + //
> + IoData->Status = EFI_ALREADY_STARTED;
> + XenPvBlockAsyncIo (IoData, IsWrite);
> +
> + while (IoData->Status == EFI_ALREADY_STARTED) {
> + XenPvBlockAsyncIoPoll (IoData->Dev);
> + }
> +
> + return IoData->Status;
> +}
> +
> +STATIC
> +VOID
> +XenPvBlockPushOperation (
> + IN XEN_BLOCK_FRONT_DEVICE *Dev,
> + IN UINT8 Operation,
> + IN UINT64 Id
> + )
> +{
> + INT32 Index;
> + blkif_request_t *Request;
> + BOOLEAN Notify;
> +
> + XenPvBlockWaitSlot (Dev);
> + Index = Dev->Ring.req_prod_pvt;
> + Request = RING_GET_REQUEST(&Dev->Ring, Index);
> + Request->operation = Operation;
> + Request->nr_segments = 0;
> + Request->handle = Dev->DeviceId;
> + Request->id = Id;
> + /* Not needed anyway, but the backend will check it */
> + Request->sector_number = 0;
> + Dev->Ring.req_prod_pvt = Index + 1;
> + MemoryFence ();
> + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY (&Dev->Ring, Notify);
> + if (Notify) {
> + XENBUS_PROTOCOL *XenBusIo = Dev->XenBusIo;
> + XenBusIo->EventChannelNotify (XenBusIo, Dev->EventChannel);
> + }
> +}
> +
> +VOID
Should you have a comment mentioning that a lock might be needed
by the caller ? So that it won't try to make this call and
also send (or read) data at the same time?
> +XenPvBlockSync (
> + IN XEN_BLOCK_FRONT_DEVICE *Dev
> + )
> +{
> + if (Dev->MediaInfo.ReadWrite) {
> + if (Dev->MediaInfo.FeatureBarrier) {
> + XenPvBlockPushOperation (Dev, BLKIF_OP_WRITE_BARRIER, 0);
> + }
> +
> + if (Dev->MediaInfo.FeatureFlushCache) {
> + XenPvBlockPushOperation (Dev, BLKIF_OP_FLUSH_DISKCACHE, 0);
> + }
> + }
> +
> + /* Note: This won't finish if another thread enqueues requests. */
> + while (TRUE) {
> + XenPvBlockAsyncIoPoll (Dev);
> + if (RING_FREE_REQUESTS (&Dev->Ring) == RING_SIZE (&Dev->Ring)) {
> + break;
> + }
> + }
> +}
> +
> +VOID
> +XenPvBlockAsyncIoPoll (
> + IN XEN_BLOCK_FRONT_DEVICE *Dev
> + )
> +{
> + RING_IDX ProducerIndex, ConsumerIndex;
> + blkif_response_t *Response;
> + INT32 More;
> +
> + do {
> + ProducerIndex = Dev->Ring.sring->rsp_prod;
> + /* Ensure we see queued responses up to 'ProducerIndex'. */
> + MemoryFence ();
> + ConsumerIndex = Dev->Ring.rsp_cons;
> +
> + while (ConsumerIndex != ProducerIndex) {
> + XEN_BLOCK_FRONT_IO *IoData = NULL;
> + INT16 Status;
> +
> + Response = RING_GET_RESPONSE (&Dev->Ring, ConsumerIndex);
> +
> + IoData = (VOID *) (UINTN) Response->id;
> + Status = Response->status;
> +
> + switch (Response->operation) {
> + case BLKIF_OP_READ:
> + case BLKIF_OP_WRITE:
> + {
> + INT32 Index;
> +
> + if (Status != BLKIF_RSP_OKAY) {
> + DEBUG ((EFI_D_ERROR,
> + "XenPvBlk: "
> + "%a error %d on %a at offset %p, num bytes %p\n",
> + Response->operation == BLKIF_OP_READ ? "read" : "write",
> + Status, IoData->Dev->NodeName,
> + IoData->Offset,
> + IoData->Size));
> + }
> +
> + for (Index = 0; Index < IoData->NumRef; Index++) {
> + Dev->XenBusIo->GrantEndAccess (Dev->XenBusIo,
> IoData->GrantRef[Index]);
> + }
> +
> + break;
> + }
> +
> + case BLKIF_OP_WRITE_BARRIER:
> + if (Status != BLKIF_RSP_OKAY) {
> + DEBUG ((EFI_D_ERROR, "XenPvBlk: write barrier error %d\n",
> Status));
> + }
> + break;
> + case BLKIF_OP_FLUSH_DISKCACHE:
> + if (Status != BLKIF_RSP_OKAY) {
> + DEBUG ((EFI_D_ERROR, "XenPvBlk: flush error %d\n", Status));
> + }
> + break;
> +
> + default:
> + DEBUG ((EFI_D_ERROR,
> + "XenPvBlk: unrecognized block operation %d response (status
> %d)\n",
> + Response->operation, Status));
> + break;
> + }
> +
> + Dev->Ring.rsp_cons = ++ConsumerIndex;
> + if (IoData != NULL) {
> + IoData->Status = Status ? EFI_DEVICE_ERROR : EFI_SUCCESS;
> + }
> + if (Dev->Ring.rsp_cons != ConsumerIndex) {
> + /* We reentered, we must not continue here */
> + break;
> + }
> + }
> +
> + RING_FINAL_CHECK_FOR_RESPONSES (&Dev->Ring, More);
> + } while (More != 0);
> +}
> diff --git a/OvmfPkg/XenPvBlkDxe/BlockFront.h
> b/OvmfPkg/XenPvBlkDxe/BlockFront.h
> new file mode 100644
> index 0000000..fb60dbd
> --- /dev/null
> +++ b/OvmfPkg/XenPvBlkDxe/BlockFront.h
> @@ -0,0 +1,87 @@
> +#include "XenPvBlkDxe.h"
> +
> +#include <IndustryStandard/Xen/event_channel.h>
> +#include <IndustryStandard/Xen/io/blkif.h>
> +
> +typedef struct _XEN_BLOCK_FRONT_DEVICE XEN_BLOCK_FRONT_DEVICE;
> +typedef struct _XEN_BLOCK_FRONT_IO XEN_BLOCK_FRONT_IO;
> +
> +struct _XEN_BLOCK_FRONT_IO
> +{
> + XEN_BLOCK_FRONT_DEVICE *Dev;
> + UINT8 *Buffer;
> + UINTN Size;
> + UINTN Offset;
> +
> + grant_ref_t GrantRef[BLKIF_MAX_SEGMENTS_PER_REQUEST];
> + INT32 NumRef;
> +
> + EFI_STATUS Status;
> +};
> +
> +typedef struct
> +{
> + UINT64 Sectors;
> + UINT32 SectorSize;
> + UINT32 VDiskInfo;
> + BOOLEAN ReadWrite;
> + BOOLEAN CdRom;
> + BOOLEAN FeatureBarrier;
> + BOOLEAN FeatureFlushCache;
> +} XEN_BLOCK_FRONT_MEDIA_INFO;
> +
> +#define XEN_BLOCK_FRONT_SIGNATURE SIGNATURE_32 ('X', 'p', 'v', 'B')
> +struct _XEN_BLOCK_FRONT_DEVICE {
> + UINT32 Signature;
> + EFI_BLOCK_IO_PROTOCOL BlockIo;
> + domid_t DomainId;
> +
> + blkif_front_ring_t Ring;
> + grant_ref_t RingRef;
> + evtchn_port_t EventChannel;
> + blkif_vdev_t DeviceId;
> +
> + CONST CHAR8 *NodeName;
> + XEN_BLOCK_FRONT_MEDIA_INFO MediaInfo;
> +
> + VOID *StateWatchToken;
> +
> + XENBUS_PROTOCOL *XenBusIo;
> +};
> +
> +#define XEN_BLOCK_FRONT_FROM_BLOCK_IO(b) \
> + CR (b, XEN_BLOCK_FRONT_DEVICE, BlockIo, XEN_BLOCK_FRONT_SIGNATURE)
> +
> +EFI_STATUS
> +XenPvBlockFrontInitialization (
> + IN XENBUS_PROTOCOL *XenBusIo,
> + IN CONST CHAR8 *NodeName,
> + OUT XEN_BLOCK_FRONT_DEVICE **DevPtr
> + );
> +
> +VOID
> +XenPvBlockFrontShutdown (
> + IN XEN_BLOCK_FRONT_DEVICE *Dev
> + );
> +
> +VOID
> +XenPvBlockAsyncIo (
> + IN OUT XEN_BLOCK_FRONT_IO *IoData,
> + IN BOOLEAN IsWrite
> + );
> +
> +EFI_STATUS
> +XenPvBlockIo (
> + IN OUT XEN_BLOCK_FRONT_IO *IoData,
> + IN BOOLEAN IsWrite
> + );
> +
> +VOID
> +XenPvBlockAsyncIoPoll (
> + IN XEN_BLOCK_FRONT_DEVICE *Dev
> + );
> +
> +VOID
> +XenPvBlockSync (
> + IN XEN_BLOCK_FRONT_DEVICE *Dev
> + );
> diff --git a/OvmfPkg/XenPvBlkDxe/XenPvBlkDxe.c
> b/OvmfPkg/XenPvBlkDxe/XenPvBlkDxe.c
> index 00a8a2c..930333f 100644
> --- a/OvmfPkg/XenPvBlkDxe/XenPvBlkDxe.c
> +++ b/OvmfPkg/XenPvBlkDxe/XenPvBlkDxe.c
> @@ -36,6 +36,8 @@
>
> #include "XenPvBlkDxe.h"
>
> +#include "BlockFront.h"
> +
>
> ///
> /// Driver Binding Protocol instance
> @@ -278,6 +280,7 @@ XenPvBlkDxeDriverBindingStart (
> {
> EFI_STATUS Status;
> XENBUS_PROTOCOL *XenBusIo;
> + XEN_BLOCK_FRONT_DEVICE *Dev;
>
> Status = gBS->OpenProtocol (
> ControllerHandle,
> @@ -291,7 +294,17 @@ XenPvBlkDxeDriverBindingStart (
> return Status;
> }
>
> + Status = XenPvBlockFrontInitialization (XenBusIo, XenBusIo->Node, &Dev);
> + if (EFI_ERROR (Status)) {
> + goto CloseProtocol;
> + }
> +
> return EFI_SUCCESS;
> +
> +CloseProtocol:
> + gBS->CloseProtocol (ControllerHandle, &gXenBusProtocolGuid,
> + This->DriverBindingHandle, ControllerHandle);
> + return Status;
> }
>
> /**
> diff --git a/OvmfPkg/XenPvBlkDxe/XenPvBlkDxe.inf
> b/OvmfPkg/XenPvBlkDxe/XenPvBlkDxe.inf
> index aaa809f..619ed8f 100644
> --- a/OvmfPkg/XenPvBlkDxe/XenPvBlkDxe.inf
> +++ b/OvmfPkg/XenPvBlkDxe/XenPvBlkDxe.inf
> @@ -50,6 +50,8 @@
> XenPvBlkDxe.c
> ComponentName.c
> ComponentName.h
> + BlockFront.c
> + BlockFront.h
>
>
> [LibraryClasses]
> --
> Anthony PERARD
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |