|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 4/4] Implement 'xm vcpu-set' command for HVM guest
On Fri, 31 May 2013, Anthony PERARD wrote:
> From: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
>
> Currently Xen has 'xm vcpu-set' command for PV domain, but not
> available for HVM domain. This patch is use to enable 'xm vcpu-set'
> command for HVM domain. It setup vcpu watch at xenstore, and at qemu
> side, handle vcpu online/offline accordingly. With this patch, 'xm
> vcpu-set' command works for both PV and HVM guest with same format.
>
> Signed-off-by: Liu, Jinsong <jinsong.liu@xxxxxxxxx>
>
> Port from qemu-xen-traditionnal to qemu-xen:
> qemu-xen does not support anymore command through xenstore, so this
> path include an initialisation of a xenstore loop.
> An upstream of this patch would need to go QMP.
>
> Signed-off-by: Anthony PERARD <anthony.perard@xxxxxxxxxx>
> ---
> hw/acpi_piix4.c | 5 +++--
> xen-all.c | 65
> +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
> 2 files changed, 68 insertions(+), 2 deletions(-)
>
> diff --git a/hw/acpi_piix4.c b/hw/acpi_piix4.c
> index 49c38d3..4c01aa2 100644
> --- a/hw/acpi_piix4.c
> +++ b/hw/acpi_piix4.c
> @@ -728,8 +728,8 @@ static int disable_processor(PIIX4PMState *g, int cpu)
>
> void qemu_cpu_add_remove(int cpu, int state)
> {
> - if ((cpu <=0) || (cpu >= max_cpus)) {
> - fprintf(stderr, "vcpu out of range, should be [1~%d]\n", max_cpus -
> 1);
> + if ((cpu < 0) || (cpu >= max_cpus)) {
> + fprintf(stderr, "vcpu out of range, should be [0~%d]\n", max_cpus -
> 1);
> return;
> }
I see where this change is coming from now but..
> @@ -742,6 +742,7 @@ void qemu_cpu_add_remove(int cpu, int state)
> return;
> }
> }
> + fprintf(stderr, "%s vcpu %d\n", state ? "Add" : "Remove", cpu);
>
> pm_update_sci(acpi_state);
> }
..the pm_update_sci is still in place
> diff --git a/xen-all.c b/xen-all.c
> index daf43b9..04b88a6 100644
> --- a/xen-all.c
> +++ b/xen-all.c
> @@ -99,6 +99,8 @@ typedef struct XenIOState {
> Notifier suspend;
> } XenIOState;
>
> +static void xen_xenstore_watch_vcpu_set(XenIOState *state);
> +
> /* Xen specific function for piix pci */
>
> int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
> @@ -1170,6 +1172,7 @@ int xen_hvm_init(void)
> xen_be_register("vkbd", &xen_kbdmouse_ops);
> xen_be_register("qdisk", &xen_blkdev_ops);
> xen_read_physmap(state);
> + xen_xenstore_watch_vcpu_set(state);
>
> return 0;
> }
> @@ -1234,3 +1237,65 @@ void xen_modified_memory(ram_addr_t start, ram_addr_t
> length)
> }
> }
> }
> +
> +/* Xenstore watch init for vcpu-set */
> +
> +static void xenstore_process_vcpu_set_event(char **vec, struct xs_handle
> *xsh)
> +{
> + char *act = NULL;
> + char *vcpustr, *node = vec[XS_WATCH_PATH];
> + unsigned int vcpu, len;
> +
> + vcpustr = strstr(node, "cpu/");
> + if (!vcpustr) {
> + fprintf(stderr, "vcpu-set: watch node error.\n");
> + return;
> + }
> + sscanf(vcpustr, "cpu/%u", &vcpu);
> +
> + act = xs_read(xsh, XBT_NULL, node, &len);
> + if (!act) {
> + fprintf(stderr, "vcpu-set: no command yet.\n");
> + return;
> + }
> +
> + if (!strncmp(act, "online", len))
> + qemu_cpu_add_remove(vcpu, 1);
> + else if (!strncmp(act, "offline", len))
> + qemu_cpu_add_remove(vcpu, 0);
> + else
> + fprintf(stderr, "vcpu-set: command error.\n");
> +
> + free(act);
> + return;
> +}
> +
> +static void xenstore_process_event(void *opaque)
> +{
> + char **vec;
> + unsigned int num;
> + struct xs_handle *xsh = opaque;
> +
> + vec = xs_read_watch(xsh, &num);
> + if (!vec)
> + return;
> +
> + if (!strcmp(vec[XS_WATCH_TOKEN], "vcpu-set")) {
> + xenstore_process_vcpu_set_event(vec, xsh);
> + goto out;
> + }
> +
> + out:
> + free(vec);
> +}
> +
> +static void xen_xenstore_watch_vcpu_set(XenIOState *state)
> +{
> + char path[40];
> + /* Set a watch for vcpu-set */
> + snprintf(path, sizeof(path), "/local/domain/%d/cpu", xen_domid);
> + xs_watch(state->xenstore, path, "vcpu-set");
> +
> + qemu_set_fd_handler(xs_fileno(state->xenstore), xenstore_process_event,
> + NULL, state->xenstore);
> +}
> --
> Anthony PERARD
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |