[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC v2 6/6] xen/arm: Implement toolstack for xl restore/save and migrate
From: Jaeyong Yoo <jaeyong.yoo@xxxxxxxxxxx> This patch implements xl save/restore operation in xc_arm_migrate.c and make it compilable with existing design. The operation is also used by migration. The overall process of save is the following: 1) save guest parameters (i.e., memory map, console and store pfn, etc) 2) save memory (if it is live migration, perform dirty-page tracing) 3) save hvm states (i.e., gic, timer, vcpu etc) Signed-off-by: Alexey Sokolov <sokolov.a@xxxxxxxxxxx> Signed-off-by: Wei Huang <w1.huang@xxxxxxxxxxx> --- config/arm32.mk | 1 + config/arm64.mk | 1 + tools/libxc/Makefile | 6 +- tools/libxc/xc_arm_migrate.c | 702 ++++++++++++++++++++++++++++++++++++++++++ tools/libxc/xc_dom_arm.c | 4 +- tools/libxl/libxl.h | 3 - tools/misc/Makefile | 4 +- 7 files changed, 714 insertions(+), 7 deletions(-) create mode 100644 tools/libxc/xc_arm_migrate.c diff --git a/config/arm32.mk b/config/arm32.mk index aa79d22..01374c9 100644 --- a/config/arm32.mk +++ b/config/arm32.mk @@ -1,6 +1,7 @@ CONFIG_ARM := y CONFIG_ARM_32 := y CONFIG_ARM_$(XEN_OS) := y +CONFIG_MIGRATE := y CONFIG_XEN_INSTALL_SUFFIX := diff --git a/config/arm64.mk b/config/arm64.mk index 15b57a4..7ac3b65 100644 --- a/config/arm64.mk +++ b/config/arm64.mk @@ -1,6 +1,7 @@ CONFIG_ARM := y CONFIG_ARM_64 := y CONFIG_ARM_$(XEN_OS) := y +CONFIG_MIGRATE := y CONFIG_XEN_INSTALL_SUFFIX := diff --git a/tools/libxc/Makefile b/tools/libxc/Makefile index 2cca2b2..6b90b1c 100644 --- a/tools/libxc/Makefile +++ b/tools/libxc/Makefile @@ -43,8 +43,13 @@ CTRL_SRCS-$(CONFIG_MiniOS) += xc_minios.c GUEST_SRCS-y := GUEST_SRCS-y += xg_private.c xc_suspend.c ifeq ($(CONFIG_MIGRATE),y) +ifeq ($(CONFIG_X86),y) GUEST_SRCS-y += xc_domain_restore.c xc_domain_save.c GUEST_SRCS-y += xc_offline_page.c xc_compression.c +endif +ifeq ($(CONFIG_ARM),y) +GUEST_SRCS-y += xc_arm_migrate.c +endif else GUEST_SRCS-y += xc_nomigrate.c endif @@ -64,7 +69,6 @@ $(patsubst %.c,%.opic,$(ELF_SRCS-y)): CFLAGS += -Wno-pointer-sign GUEST_SRCS-y += xc_dom_core.c xc_dom_boot.c GUEST_SRCS-y += xc_dom_elfloader.c GUEST_SRCS-$(CONFIG_X86) += xc_dom_bzimageloader.c -GUEST_SRCS-$(CONFIG_X86) += xc_dom_decompress_lz4.c GUEST_SRCS-$(CONFIG_ARM) += xc_dom_armzimageloader.c GUEST_SRCS-y += xc_dom_binloader.c GUEST_SRCS-y += xc_dom_compat_linux.c diff --git a/tools/libxc/xc_arm_migrate.c b/tools/libxc/xc_arm_migrate.c new file mode 100644 index 0000000..ab2b94c --- /dev/null +++ b/tools/libxc/xc_arm_migrate.c @@ -0,0 +1,702 @@ +/* + * Copyright (c) 2013, Samsung Electronics + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + */ + +#include <inttypes.h> +#include <errno.h> +#include <xenctrl.h> +#include <xenguest.h> + +#include <unistd.h> +#include <xc_private.h> +#include <xc_dom.h> +#include "xc_bitops.h" +#include "xg_private.h" + +#define DEF_MAX_ITERS 29 /* limit us to 30 times round loop */ +#define DEF_MAX_FACTOR 3 /* never send more than 3x p2m_size */ +#define DEF_MIN_DIRTY_PER_ITER 50 /* dirty page count to define last iter */ +#define DEF_PROGRESS_RATE 50 /* progress bar update rate */ + +//#define DISABLE_LIVE_MIGRATION + +//#define ARM_MIGRATE_VERBOSE + +/* + * Guest params to save: used HVM params, save flags, memory map + */ +typedef struct guest_params +{ + unsigned long console_pfn; + unsigned long store_pfn; + uint32_t flags; + xen_pfn_t start_gpfn; + xen_pfn_t max_gpfn; + uint32_t max_vcpu_id; +} guest_params_t; + +static int suspend_and_state(int (*suspend)(void*), void *data, + xc_interface *xch, int dom) +{ + xc_dominfo_t info; + if ( !(*suspend)(data) ) + { + ERROR("Suspend request failed"); + return -1; + } + + if ( (xc_domain_getinfo(xch, dom, 1, &info) != 1) || + !info.shutdown || (info.shutdown_reason != SHUTDOWN_suspend) ) + { + ERROR("Domain is not in suspended state after suspend attempt"); + return -1; + } + + return 0; +} + +static int write_exact_handled(xc_interface *xch, int fd, const void *data, + size_t size) +{ + if ( write_exact(fd, data, size) ) + { + ERROR("Write failed, check space"); + return -1; + } + return 0; +} + +/* ============ Memory ============= */ +static int save_memory(xc_interface *xch, int io_fd, uint32_t dom, + struct save_callbacks *callbacks, + uint32_t max_iters, uint32_t max_factor, + guest_params_t *params) +{ + int live = !!(params->flags & XCFLAGS_LIVE); + int debug = !!(params->flags & XCFLAGS_DEBUG); + xen_pfn_t i; + char reportbuf[80]; + int iter = 0; + int last_iter = !live; + int total_dirty_pages_num = 0; + int dirty_pages_on_prev_iter_num = 0; + int count = 0; + char *page = 0; + xen_pfn_t *busy_pages = 0; + int busy_pages_count = 0; + int busy_pages_max = 256; + + DECLARE_HYPERCALL_BUFFER(unsigned long, to_send); + + xen_pfn_t start = params->start_gpfn; + const xen_pfn_t end = params->max_gpfn; + const xen_pfn_t mem_size = end - start; + + if ( debug ) + { + IPRINTF("(save mem) start=%llx end=%llx!\n", (unsigned long long)start, + (unsigned long long)end); + } + + if ( live ) + { + if ( xc_shadow_control(xch, dom, XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY, + NULL, 0, NULL, 0, NULL) < 0 ) + { + ERROR("Couldn't enable log-dirty mode !\n"); + return -1; + } + + max_iters = max_iters ? : DEF_MAX_ITERS; + max_factor = max_factor ? : DEF_MAX_FACTOR; + + if ( debug ) + IPRINTF("Log-dirty mode enabled, max_iters=%d, max_factor=%d!\n", + max_iters, max_factor); + } + + to_send = xc_hypercall_buffer_alloc_pages(xch, to_send, + NRPAGES(bitmap_size(mem_size))); + if ( !to_send ) + { + ERROR("Couldn't allocate to_send array!\n"); + return -1; + } + + /* send all pages on first iter */ + memset(to_send, 0xff, bitmap_size(mem_size)); + + for ( ; ; ) + { + int dirty_pages_on_current_iter_num = 0; + int frc; + iter++; + + snprintf(reportbuf, sizeof(reportbuf), + "Saving memory: iter %d (last sent %u)", + iter, dirty_pages_on_prev_iter_num); + + xc_report_progress_start(xch, reportbuf, mem_size); + + if ( (iter > 1 && + dirty_pages_on_prev_iter_num < DEF_MIN_DIRTY_PER_ITER) || + (iter == max_iters) || + (total_dirty_pages_num >= mem_size*max_factor) ) + { + if ( debug ) + IPRINTF("Last iteration"); + last_iter = 1; + } + + if ( last_iter ) + { + if ( suspend_and_state(callbacks->suspend, callbacks->data, + xch, dom) ) + { + ERROR("Domain appears not to have suspended"); + return -1; + } + } + if ( live && iter > 1 ) + { + frc = xc_shadow_control(xch, dom, XEN_DOMCTL_SHADOW_OP_CLEAN, + HYPERCALL_BUFFER(to_send), mem_size, + NULL, 0, NULL); + if ( frc != mem_size ) + { + ERROR("Error peeking shadow bitmap"); + xc_hypercall_buffer_free_pages(xch, to_send, + NRPAGES(bitmap_size(mem_size))); + return -1; + } + } + + busy_pages = malloc(sizeof(xen_pfn_t) * busy_pages_max); + + for ( i = start; i < end; ++i ) + { + if ( test_bit(i - start, to_send) ) + { + page = xc_map_foreign_range(xch, dom, PAGE_SIZE, PROT_READ, i); + if ( !page ) + { + /* This page is mapped elsewhere, should be resent later */ + busy_pages[busy_pages_count] = i; + busy_pages_count++; + if ( busy_pages_count >= busy_pages_max ) + { + busy_pages_max += 256; + busy_pages = realloc(busy_pages, sizeof(xen_pfn_t) * + busy_pages_max); + } + continue; + } + + if ( write_exact_handled(xch, io_fd, &i, sizeof(i)) || + write_exact_handled(xch, io_fd, page, PAGE_SIZE) ) + { + munmap(page, PAGE_SIZE); + free(busy_pages); + return -1; + } + count++; + munmap(page, PAGE_SIZE); + + if ( (i % DEF_PROGRESS_RATE) == 0 ) + xc_report_progress_step(xch, i - start, mem_size); + dirty_pages_on_current_iter_num++; + } + } + + while ( busy_pages_count ) + { + /* Send busy pages */ + busy_pages_count--; + i = busy_pages[busy_pages_count]; + if ( test_bit(i - start, to_send) ) + { + page = xc_map_foreign_range(xch, dom, PAGE_SIZE,PROT_READ, i); + if ( !page ) + { + IPRINTF("WARNING: 2nd attempt to save page " + "busy failed pfn=%llx", (unsigned long long)i); + continue; + } + + if ( debug ) + { + IPRINTF("save mem: resend busy page %llx\n", + (unsigned long long)i); + } + + if ( write_exact_handled(xch, io_fd, &i, sizeof(i)) || + write_exact_handled(xch, io_fd, page, PAGE_SIZE) ) + { + munmap(page, PAGE_SIZE); + free(busy_pages); + return -1; + } + count++; + munmap(page, PAGE_SIZE); + dirty_pages_on_current_iter_num++; + } + } + free(busy_pages); + + if ( debug ) + IPRINTF("Dirty pages=%d", dirty_pages_on_current_iter_num); + + xc_report_progress_step(xch, mem_size, mem_size); + + dirty_pages_on_prev_iter_num = dirty_pages_on_current_iter_num; + total_dirty_pages_num += dirty_pages_on_current_iter_num; + + if ( last_iter ) + { + xc_hypercall_buffer_free_pages(xch, to_send, + NRPAGES(bitmap_size(mem_size))); + if ( live ) + { + if ( xc_shadow_control(xch, dom, XEN_DOMCTL_SHADOW_OP_OFF, + NULL, 0, NULL, 0, NULL) < 0 ) + ERROR("Couldn't disable log-dirty mode"); + } + break; + } + } + if ( debug ) + { + IPRINTF("save mem: pages count = %d\n", count); + } + + i = (xen_pfn_t) -1; /* end page marker */ + return write_exact_handled(xch, io_fd, &i, sizeof(i)); +} + +static int restore_memory(xc_interface *xch, int io_fd, uint32_t dom, + guest_params_t *params) +{ + xen_pfn_t end = params->max_gpfn; + xen_pfn_t gpfn; + int debug = !!(params->flags & XCFLAGS_DEBUG); + int count = 0; + char *page; + xen_pfn_t start = params->start_gpfn; + + /* TODO allocate several pages per call */ + for ( gpfn = start; gpfn < end; ++gpfn ) + { + if ( xc_domain_populate_physmap_exact(xch, dom, 1, 0, 0, &gpfn) ) + { + PERROR("Memory allocation for a new domain failed"); + return -1; + } + } + + while ( 1 ) + { + + if ( read_exact(io_fd, &gpfn, sizeof(gpfn)) ) + { + PERROR("GPFN read failed during memory transfer, count=%d", count); + return -1; + } + if ( gpfn == (xen_pfn_t) -1 ) break; /* end page marker */ + + if ( gpfn < start || gpfn >= end ) + { + ERROR("GPFN %llx doesn't belong to RAM address space, count=%d", + (unsigned long long)gpfn, count); + return -1; + } + page = xc_map_foreign_range(xch, dom, PAGE_SIZE, + PROT_READ | PROT_WRITE, gpfn); + if ( !page ) + { + PERROR("xc_map_foreign_range failed, pfn=%llx", gpfn); + return -1; + } + if ( read_exact(io_fd, page, PAGE_SIZE) ) + { + PERROR("Page data read failed during memory transfer, pfn=%llx", + gpfn); + return -1; + } + munmap(page, PAGE_SIZE); + count++; + } + + if ( debug ) + { + IPRINTF("Memory restored, pages count=%d", count); + } + return 0; +} + +/* ============ HVM context =========== */ +static int save_armhvm(xc_interface *xch, int io_fd, uint32_t dom, int debug) +{ + /* HVM: a buffer for holding HVM context */ + uint32_t hvm_buf_size = 0; + uint8_t *hvm_buf = NULL; + uint32_t rec_size; + int retval = -1; + + /* Need another buffer for HVM context */ + hvm_buf_size = xc_domain_hvm_getcontext(xch, dom, 0, 0); + if ( hvm_buf_size == -1 ) + { + ERROR("Couldn't get HVM context size from Xen"); + goto out; + } + hvm_buf = malloc(hvm_buf_size); + + if ( !hvm_buf ) + { + ERROR("Couldn't allocate memory for hvm buffer"); + goto out; + } + + /* Get HVM context from Xen and save it too */ + if ( (rec_size = xc_domain_hvm_getcontext(xch, dom, hvm_buf, + hvm_buf_size)) == -1 ) + { + ERROR("HVM:Could not get hvm buffer"); + goto out; + } + + if ( debug ) + IPRINTF("HVM save size %d %d", hvm_buf_size, rec_size); + + if ( write_exact_handled(xch, io_fd, &rec_size, sizeof(uint32_t)) ) + goto out; + + if ( write_exact_handled(xch, io_fd, hvm_buf, rec_size) ) + { + goto out; + } + + retval = 0; + +out: + if ( hvm_buf ) + free (hvm_buf); + + return retval; +} + +static int restore_armhvm(xc_interface *xch, int io_fd, + uint32_t dom, int debug) +{ + uint32_t rec_size; + uint32_t hvm_buf_size = 0; + uint8_t *hvm_buf = NULL; + int frc = 0; + int retval = -1; + + if ( read_exact(io_fd, &rec_size, sizeof(uint32_t)) ) + { + PERROR("Could not read HVM size"); + goto out; + } + + if ( !rec_size ) + { + ERROR("Zero HVM size"); + goto out; + } + + hvm_buf_size = xc_domain_hvm_getcontext(xch, dom, 0, 0); + if ( hvm_buf_size != rec_size ) + { + ERROR("HVM size for this domain is not the same as stored"); + } + + hvm_buf = malloc(hvm_buf_size); + if ( !hvm_buf ) + { + ERROR("Couldn't allocate memory"); + goto out; + } + + if ( read_exact(io_fd, hvm_buf, hvm_buf_size) ) + { + PERROR("Could not read HVM context"); + goto out; + } + + frc = xc_domain_hvm_setcontext(xch, dom, hvm_buf, hvm_buf_size); + if ( frc ) + { + ERROR("error setting the HVM context"); + goto out; + } + retval = 0; + + if ( debug ) + { + IPRINTF("HVM restore size %d %d", hvm_buf_size, rec_size); + } +out: + if ( hvm_buf ) + free (hvm_buf); + return retval; +} + +/* ================= Console & Xenstore & Memory map =========== */ +static int save_guest_params(xc_interface *xch, int io_fd, + uint32_t dom, uint32_t flags, + guest_params_t *params) +{ + size_t sz = sizeof(guest_params_t); + xc_dominfo_t dom_info; + + params->max_gpfn = xc_domain_maximum_gpfn(xch, dom); + params->start_gpfn = (GUEST_RAM_BASE >> PAGE_SHIFT); + + if ( flags & XCFLAGS_DEBUG ) + { + IPRINTF("Guest param save size: %d ", (int)sz); + } + + if ( xc_get_hvm_param(xch, dom, HVM_PARAM_CONSOLE_PFN, + ¶ms->console_pfn) ) + { + ERROR("Can't get console gpfn"); + return -1; + } + + if ( xc_get_hvm_param(xch, dom, HVM_PARAM_STORE_PFN, ¶ms->store_pfn) ) + { + ERROR("Can't get store gpfn"); + return -1; + } + + if ( xc_domain_getinfo(xch, dom, 1, &dom_info ) < 0) + { + ERROR("Can't get domain info for dom %d", dom); + return -1; + } + params->max_vcpu_id = dom_info.max_vcpu_id; + + params->flags = flags; + + if ( write_exact_handled(xch, io_fd, params, sz) ) + { + return -1; + } + + return 0; +} + +static int restore_guest_params(xc_interface *xch, int io_fd, + uint32_t dom, guest_params_t *params) +{ + size_t sz = sizeof(guest_params_t); + xen_pfn_t nr_pfns; + unsigned int maxmemkb; + + if ( read_exact(io_fd, params, sizeof(guest_params_t)) ) + { + PERROR("Can't read guest params"); + return -1; + } + + nr_pfns = params->max_gpfn - params->start_gpfn; + maxmemkb = (unsigned int) nr_pfns << (PAGE_SHIFT - 10); + + if ( params->flags & XCFLAGS_DEBUG ) + { + IPRINTF("Guest param restore size: %d ", (int)sz); + IPRINTF("Guest memory size: %d MB", maxmemkb >> 10); + } + + if ( xc_domain_setmaxmem(xch, dom, maxmemkb) ) + { + ERROR("Can't set memory map"); + return -1; + } + + /* Set max. number of vcpus as max_vcpu_id + 1 */ + if ( xc_domain_max_vcpus(xch, dom, params->max_vcpu_id + 1) ) + { + ERROR("Can't set max vcpu number for domain"); + return -1; + } + + return 0; +} + +static int set_guest_params(xc_interface *xch, int io_fd, uint32_t dom, + guest_params_t *params, unsigned int console_evtchn, + domid_t console_domid, unsigned int store_evtchn, + domid_t store_domid) +{ + int rc = 0; + + if ( (rc = xc_clear_domain_page(xch, dom, params->console_pfn)) ) + { + ERROR("Can't clear console page"); + return rc; + } + + if ( (rc = xc_clear_domain_page(xch, dom, params->store_pfn)) ) + { + ERROR("Can't clear xenstore page"); + return rc; + } + + if ( (rc = xc_dom_gnttab_hvm_seed(xch, dom, params->console_pfn, + params->store_pfn, console_domid, + store_domid)) ) + { + ERROR("Can't grant console and xenstore pages"); + return rc; + } + + if ( (rc = xc_set_hvm_param(xch, dom, HVM_PARAM_CONSOLE_PFN, + params->console_pfn)) ) + { + ERROR("Can't set console gpfn"); + return rc; + } + + if ( (rc = xc_set_hvm_param(xch, dom, HVM_PARAM_STORE_PFN, + params->store_pfn)) ) + { + ERROR("Can't set xenstore gpfn"); + return rc; + } + + if ( (rc = xc_set_hvm_param(xch, dom, HVM_PARAM_CONSOLE_EVTCHN, + console_evtchn)) ) + { + ERROR("Can't set console event channel"); + return rc; + } + + if ( (rc = xc_set_hvm_param(xch, dom, HVM_PARAM_STORE_EVTCHN, + store_evtchn)) ) + { + ERROR("Can't set xenstore event channel"); + return rc; + } + return 0; +} + +/* ================== Main ============== */ +int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom, + uint32_t max_iters, uint32_t max_factor, uint32_t flags, + struct save_callbacks *callbacks, int hvm, + unsigned long vm_generationid_addr) +{ + int debug; + guest_params_t params; + +#ifdef ARM_MIGRATE_VERBOSE + flags |= XCFLAGS_DEBUG; +#endif + +#ifdef DISABLE_LIVE_MIGRATION + flags &= ~(XCFLAGS_LIVE); +#endif + + debug = !!(flags & XCFLAGS_DEBUG); + if ( save_guest_params(xch, io_fd, dom, flags, ¶ms) ) + { + ERROR("Can't save guest params"); + return -1; + } + + if ( save_memory(xch, io_fd, dom, callbacks, max_iters, + max_factor, ¶ms) ) + { + ERROR("Memory not saved"); + return -1; + } + + if ( save_armhvm(xch, io_fd, dom, debug) ) + { + ERROR("HVM not saved"); + return -1; + } + + if ( debug ) + { + IPRINTF("Domain %d saved", dom); + } + return 0; +} + +int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom, + unsigned int store_evtchn, unsigned long *store_gpfn, + domid_t store_domid, unsigned int console_evtchn, + unsigned long *console_gpfn, domid_t console_domid, + unsigned int hvm, unsigned int pae, int superpages, + int no_incr_generationid, int checkpointed_stream, + unsigned long *vm_generationid_addr, + struct restore_callbacks *callbacks) +{ + guest_params_t params; + int debug = 1; + + if ( restore_guest_params(xch, io_fd, dom, ¶ms) ) + { + ERROR("Can't restore guest params"); + return -1; + } + debug = !!(params.flags & XCFLAGS_DEBUG); + + if ( restore_memory(xch, io_fd, dom, ¶ms) ) + { + ERROR("Can't restore memory"); + return -1; + } + if ( set_guest_params(xch, io_fd, dom, ¶ms, + console_evtchn, console_domid, + store_evtchn, store_domid) ) + { + ERROR("Can't setup guest params"); + return -1; + } + + /* Setup console and store PFNs to caller */ + *console_gpfn = params.console_pfn; + *store_gpfn = params.store_pfn; + + if ( restore_armhvm(xch, io_fd, dom, debug) ) + { + ERROR("HVM not restored"); + return -1; + } + + if ( debug ) + { + IPRINTF("Domain %d restored", dom); + } + + return 0; +} + +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/tools/libxc/xc_dom_arm.c b/tools/libxc/xc_dom_arm.c index f051515..044a8de 100644 --- a/tools/libxc/xc_dom_arm.c +++ b/tools/libxc/xc_dom_arm.c @@ -335,7 +335,9 @@ int arch_setup_meminit(struct xc_dom_image *dom) modbase += dtb_size; } - return 0; + return xc_domain_setmaxmem(dom->xch, dom->guest_domid, + (dom->total_pages + NR_MAGIC_PAGES) + << (PAGE_SHIFT - 10)); } int arch_setup_bootearly(struct xc_dom_image *dom) diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h index b2c3015..e10f4fb 100644 --- a/tools/libxl/libxl.h +++ b/tools/libxl/libxl.h @@ -441,9 +441,6 @@ * - libxl_domain_resume * - libxl_domain_remus_start */ -#if defined(__arm__) || defined(__aarch64__) -#define LIBXL_HAVE_NO_SUSPEND_RESUME 1 -#endif /* * LIBXL_HAVE_DEVICE_PCI_SEIZE diff --git a/tools/misc/Makefile b/tools/misc/Makefile index 17aeda5..0824100 100644 --- a/tools/misc/Makefile +++ b/tools/misc/Makefile @@ -11,7 +11,7 @@ HDRS = $(wildcard *.h) TARGETS-y := xenperf xenpm xen-tmem-list-parse gtraceview gtracestat xenlockprof xenwatchdogd xencov TARGETS-$(CONFIG_X86) += xen-detect xen-hvmctx xen-hvmcrash xen-lowmemd xen-mfndump -TARGETS-$(CONFIG_MIGRATE) += xen-hptool +TARGETS-$(CONFIG_X86) += xen-hptool TARGETS := $(TARGETS-y) SUBDIRS := $(SUBDIRS-y) @@ -23,7 +23,7 @@ INSTALL_BIN := $(INSTALL_BIN-y) INSTALL_SBIN-y := xen-bugtool xen-python-path xenperf xenpm xen-tmem-list-parse gtraceview \ gtracestat xenlockprof xenwatchdogd xen-ringwatch xencov INSTALL_SBIN-$(CONFIG_X86) += xen-hvmctx xen-hvmcrash xen-lowmemd xen-mfndump -INSTALL_SBIN-$(CONFIG_MIGRATE) += xen-hptool +INSTALL_SBIN-$(CONFIG_X86) += xen-hptool INSTALL_SBIN := $(INSTALL_SBIN-y) INSTALL_PRIVBIN-y := xenpvnetboot -- 1.7.9.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |