[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Xen-devel] [PATCH v2 1/2] libxc: introduce XC_SAVE_ID_TOOLSTACK
On Fri, Jan 20, 2012 at 9:25 AM, Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx> wrote:
Introduce a new save_id to save/restore toolstack specific extra
information.
Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
---
tools/libxc/xc_domain_restore.c | 66 +++++++++++++++++++++++++++-----------
tools/libxc/xc_domain_save.c | 17 ++++++++++
tools/libxc/xenguest.h | 23 +++++++++++++-
tools/libxc/xg_save_restore.h | 1 +
tools/libxl/libxl_dom.c | 2 +-
tools/xcutils/xc_restore.c | 3 +-
6 files changed, 90 insertions(+), 22 deletions(-)
diff --git a/tools/libxc/xc_domain_restore.c b/tools/libxc/xc_domain_restore.c
index 14451d1..72b6d5b 100644
--- a/tools/libxc/xc_domain_restore.c
+++ b/tools/libxc/xc_domain_restore.c
@@ -702,7 +702,8 @@ static void pagebuf_free(pagebuf_t* buf)
}
static int pagebuf_get_one(xc_interface *xch, struct restore_ctx *ctx,
- pagebuf_t* buf, int fd, uint32_t dom)
+ pagebuf_t* buf, int fd, uint32_t dom,
+ struct restore_callbacks* callbacks)
{
int count, countpages, oldcount, i;
void* ptmp;
@@ -725,7 +726,7 @@ static int pagebuf_get_one(xc_interface *xch, struct restore_ctx *ctx,
case XC_SAVE_ID_ENABLE_VERIFY_MODE:
DPRINTF("Entering page verify mode\n");
buf->verify = 1;
- return pagebuf_get_one(xch, ctx, buf, fd, dom);
+ return pagebuf_get_one(xch, ctx, buf, fd, dom, callbacks);
case XC_SAVE_ID_VCPU_INFO:
buf->new_ctxt_format = 1;
@@ -736,7 +737,7 @@ static int pagebuf_get_one(xc_interface *xch, struct restore_ctx *ctx,
return -1;
}
// DPRINTF("Max VCPU ID: %d, vcpumap: %llx\n", buf->max_vcpu_id, buf->vcpumap);
- return pagebuf_get_one(xch, ctx, buf, fd, dom);
+ return pagebuf_get_one(xch, ctx, buf, fd, dom, callbacks);
case XC_SAVE_ID_HVM_IDENT_PT:
/* Skip padding 4 bytes then read the EPT identity PT location. */
@@ -747,7 +748,7 @@ static int pagebuf_get_one(xc_interface *xch, struct restore_ctx *ctx,
return -1;
}
// DPRINTF("EPT identity map address: %llx\n", buf->identpt);
- return pagebuf_get_one(xch, ctx, buf, fd, dom);
+ return pagebuf_get_one(xch, ctx, buf, fd, dom, callbacks);
case XC_SAVE_ID_HVM_VM86_TSS:
/* Skip padding 4 bytes then read the vm86 TSS location. */
@@ -758,7 +759,7 @@ static int pagebuf_get_one(xc_interface *xch, struct restore_ctx *ctx,
return -1;
}
// DPRINTF("VM86 TSS location: %llx\n", buf->vm86_tss);
- return pagebuf_get_one(xch, ctx, buf, fd, dom);
+ return pagebuf_get_one(xch, ctx, buf, fd, dom, callbacks);
case XC_SAVE_ID_TMEM:
DPRINTF("xc_domain_restore start tmem\n");
@@ -766,14 +767,14 @@ static int pagebuf_get_one(xc_interface *xch, struct restore_ctx *ctx,
PERROR("error reading/restoring tmem");
return -1;
}
- return pagebuf_get_one(xch, ctx, buf, fd, dom);
+ return pagebuf_get_one(xch, ctx, buf, fd, dom, callbacks);
case XC_SAVE_ID_TMEM_EXTRA:
if ( xc_tmem_restore_extra(xch, dom, fd) ) {
PERROR("error reading/restoring tmem extra");
return -1;
}
- return pagebuf_get_one(xch, ctx, buf, fd, dom);
+ return pagebuf_get_one(xch, ctx, buf, fd, dom, callbacks);
case XC_SAVE_ID_TSC_INFO:
{
@@ -787,7 +788,7 @@ static int pagebuf_get_one(xc_interface *xch, struct restore_ctx *ctx,
PERROR("error reading/restoring tsc info");
return -1;
}
- return pagebuf_get_one(xch, ctx, buf, fd, dom);
+ return pagebuf_get_one(xch, ctx, buf, fd, dom, callbacks);
}
case XC_SAVE_ID_HVM_CONSOLE_PFN :
@@ -799,12 +800,12 @@ static int pagebuf_get_one(xc_interface *xch, struct restore_ctx *ctx,
return -1;
}
// DPRINTF("console pfn location: %llx\n", buf->console_pfn);
- return pagebuf_get_one(xch, ctx, buf, fd, dom);
+ return pagebuf_get_one(xch, ctx, buf, fd, dom, callbacks);
case XC_SAVE_ID_LAST_CHECKPOINT:
ctx->last_checkpoint = 1;
// DPRINTF("last checkpoint indication received");
- return pagebuf_get_one(xch, ctx, buf, fd, dom);
+ return pagebuf_get_one(xch, ctx, buf, fd, dom, callbacks);
case XC_SAVE_ID_HVM_ACPI_IOPORTS_LOCATION:
/* Skip padding 4 bytes then read the acpi ioport location. */
@@ -814,7 +815,7 @@ static int pagebuf_get_one(xc_interface *xch, struct restore_ctx *ctx,
PERROR("error read the acpi ioport location");
return -1;
}
- return pagebuf_get_one(xch, ctx, buf, fd, dom);
+ return pagebuf_get_one(xch, ctx, buf, fd, dom, callbacks);
case XC_SAVE_ID_HVM_VIRIDIAN:
/* Skip padding 4 bytes then read the acpi ioport location. */
@@ -824,8 +825,33 @@ static int pagebuf_get_one(xc_interface *xch, struct restore_ctx *ctx,
PERROR("error read the viridian flag");
return -1;
}
- return pagebuf_get_one(xch, ctx, buf, fd, dom);
+ return pagebuf_get_one(xch, ctx, buf, fd, dom, callbacks);
+ case XC_SAVE_ID_TOOLSTACK:
+ {
+ uint32_t len;
+ uint8_t *buf2;
+ RDEXACT(fd, &len, sizeof(len));
+ buf2 = (uint8_t*) malloc(len);
+ if ( buf2 == NULL )
+ {
+ PERROR("error memory allocation");
+ return -1;
+ }
+ RDEXACT(fd, buf2, len);
+ if ( callbacks != NULL && callbacks->toolstack_restore != NULL )
+ {
+ if ( callbacks->toolstack_restore(dom,
+ buf2, len, callbacks->data) < 0 )
+ {
Pagebuf() shouldnt be modifying any domain state until the entire memory buffer is obtained, esp the device state. See below.
@@ -1542,7 +1570,7 @@ int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom,
// DPRINTF("Buffered checkpoint\n");
- if ( pagebuf_get(xch, ctx, &pagebuf, io_fd, dom) ) {
+ if ( pagebuf_get(xch, ctx, &pagebuf, io_fd, dom, callbacks) ) {
PERROR("error when buffering batch, finishing");
goto finish;
}
If there is an error in applying the toolstack state, then pagebuf_get returns -1 and the rest of the code would still attempt to resume the domain, with possibly inconsistent device model state.
Also, lets say there was no error in applying the toolstack state. If a network error occurs while receiving the next XC_SAVE_ID or so, then again, the code following the above snippet would attempt to resume the domain, with a memory state inconsistent
with the device state.
The right place to call the callbacks->toolstack_restore would be after the finish_hvm: label, where currently the old QEMU device state is restored.
shriram
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|