|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [linux-2.6.18-xen] sync Xen public headers to 4.6.0 level
# HG changeset patch
# User Jan Beulich
# Date 1445005454 -7200
# Node ID 288d236ca763e3e889774fcb8a10fda136723130
# Parent 3f51e783c26a52086fb349313fad1758e4e35297
sync Xen public headers to 4.6.0 level
---
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/arch-arm.h
--- a/include/xen/interface/arch-arm.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/arch-arm.h Fri Oct 16 16:24:14 2015 +0200
@@ -87,15 +87,10 @@
* unavailable/unsupported.
*
* HYPERVISOR_memory_op
- * All generic sub-operations.
- *
- * In addition the following arch specific sub-ops:
- * * XENMEM_add_to_physmap
- * * XENMEM_add_to_physmap_batch
+ * All generic sub-operations
*
* HYPERVISOR_domctl
* All generic sub-operations, with the exception of:
- * * XEN_DOMCTL_iomem_permission (not yet implemented)
* * XEN_DOMCTL_irq_permission (not yet implemented)
*
* HYPERVISOR_sched_op
@@ -170,6 +165,7 @@
#define XEN_HYPERCALL_TAG 0XEA1
+#define int64_aligned_t int64_t __attribute__((aligned(8)))
#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
#ifndef __ASSEMBLY__
@@ -183,8 +179,8 @@
* XEN_GUEST_HANDLE represents a guest pointer, when passed as a field
* in a struct in memory. On ARM is always 8 bytes sizes and 8 bytes
* aligned.
- * XEN_GUEST_HANDLE_PARAM represent a guest pointer, when passed as an
- * hypercall argument. It is 4 bytes on aarch and 8 bytes on aarch64.
+ * XEN_GUEST_HANDLE_PARAM represents a guest pointer, when passed as an
+ * hypercall argument. It is 4 bytes on aarch32 and 8 bytes on aarch64.
*/
#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
___DEFINE_XEN_GUEST_HANDLE(name, type); \
@@ -192,7 +188,6 @@
#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
#define __XEN_GUEST_HANDLE(name) __guest_handle_64_ ## name
#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
-/* this is going to be changed on 64 bit */
#define XEN_GUEST_HANDLE_PARAM(name) __guest_handle_ ## name
#define set_xen_guest_handle_raw(hnd, val) \
do { \
@@ -303,7 +298,35 @@ struct vcpu_guest_context {
};
typedef struct vcpu_guest_context vcpu_guest_context_t;
DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
-#endif
+
+/*
+ * struct xen_arch_domainconfig's ABI is covered by
+ * XEN_DOMCTL_INTERFACE_VERSION.
+ */
+#define XEN_DOMCTL_CONFIG_GIC_NATIVE 0
+#define XEN_DOMCTL_CONFIG_GIC_V2 1
+#define XEN_DOMCTL_CONFIG_GIC_V3 2
+struct xen_arch_domainconfig {
+ /* IN/OUT */
+ uint8_t gic_version;
+ /* IN */
+ uint32_t nr_spis;
+ /*
+ * OUT
+ * Based on the property clock-frequency in the DT timer node.
+ * The property may be present when the bootloader/firmware doesn't
+ * set correctly CNTFRQ which hold the timer frequency.
+ *
+ * As it's not possible to trap this register, we have to replicate
+ * the value in the guest DT.
+ *
+ * = 0 => property not present
+ * > 0 => Value of the property
+ *
+ */
+ uint32_t clock_frequency;
+};
+#endif /* __XEN__ || __XEN_TOOLS__ */
struct arch_vcpu_info {
};
@@ -318,7 +341,7 @@ typedef uint64_t xen_callback_t;
#if defined(__XEN__) || defined(__XEN_TOOLS__)
-/* PSR bits (CPSR, SPSR)*/
+/* PSR bits (CPSR, SPSR) */
#define PSR_THUMB (1<<5) /* Thumb Mode enable */
#define PSR_FIQ_MASK (1<<6) /* Fast Interrupt mask */
@@ -365,7 +388,8 @@ typedef uint64_t xen_callback_t;
/* Physical Address Space */
-/* vGIC mappings: Only one set of mapping is used by the guest.
+/*
+ * vGIC mappings: Only one set of mapping is used by the guest.
* Therefore they can overlap.
*/
@@ -382,10 +406,11 @@ typedef uint64_t xen_callback_t;
#define GUEST_GICV3_RDIST_STRIDE 0x20000ULL
#define GUEST_GICV3_RDIST_REGIONS 1
-#define GUEST_GICV3_GICR0_BASE 0x03020000ULL /* vCPU0 - vCPU7 */
-#define GUEST_GICV3_GICR0_SIZE 0x00100000ULL
+#define GUEST_GICV3_GICR0_BASE 0x03020000ULL /* vCPU0 - vCPU127 */
+#define GUEST_GICV3_GICR0_SIZE 0x01000000ULL
-/* 16MB == 4096 pages reserved for guest to use as a region to map its
+/*
+ * 16MB == 4096 pages reserved for guest to use as a region to map its
* grant table in.
*/
#define GUEST_GNTTAB_BASE 0x38000000ULL
@@ -423,6 +448,11 @@ typedef uint64_t xen_callback_t;
#endif
+#ifndef __ASSEMBLY__
+/* Stub definition of PMU structure */
+typedef struct xen_pmu_arch { uint8_t dummy; } xen_pmu_arch_t;
+#endif
+
#endif /* __XEN_PUBLIC_ARCH_ARM_H__ */
/*
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/arch-x86/cpuid.h
--- a/include/xen/interface/arch-x86/cpuid.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/arch-x86/cpuid.h Fri Oct 16 16:24:14 2015 +0200
@@ -76,13 +76,14 @@
/*
* Leaf 5 (0x40000x04)
* HVM-specific features
+ * EAX: Features
+ * EBX: vcpu id (iff EAX has XEN_HVM_CPUID_VCPU_ID_PRESENT flag)
*/
-
-/* EAX Features */
#define XEN_HVM_CPUID_APIC_ACCESS_VIRT (1u << 0) /* Virtualized APIC registers
*/
#define XEN_HVM_CPUID_X2APIC_VIRT (1u << 1) /* Virtualized x2APIC
accesses */
/* Memory mapped from other domains has valid IOMMU entries */
#define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2)
+#define XEN_HVM_CPUID_VCPU_ID_PRESENT (1u << 3) /* vcpu id is present in EBX
*/
#define XEN_CPUID_MAX_NUM_LEAVES 4
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/arch-x86/hvm/save.h
--- a/include/xen/interface/arch-x86/hvm/save.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/arch-x86/hvm/save.h Fri Oct 16 16:24:14 2015 +0200
@@ -569,6 +569,7 @@ struct hvm_viridian_domain_context {
uint64_t hypercall_gpa;
uint64_t guest_os_id;
uint64_t time_ref_count;
+ uint64_t reference_tsc;
};
DECLARE_HVM_SAVE_TYPE(VIRIDIAN_DOMAIN, 15, struct hvm_viridian_domain_context);
@@ -617,3 +618,13 @@ struct hvm_msr {
#define HVM_SAVE_CODE_MAX 20
#endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/arch-x86/pmu.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/include/xen/interface/arch-x86/pmu.h Fri Oct 16 16:24:14 2015 +0200
@@ -0,0 +1,167 @@
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2015 Oracle and/or its affiliates. All rights reserved.
+ */
+
+#ifndef __XEN_PUBLIC_ARCH_X86_PMU_H__
+#define __XEN_PUBLIC_ARCH_X86_PMU_H__
+
+/* x86-specific PMU definitions */
+
+/* AMD PMU registers and structures */
+struct xen_pmu_amd_ctxt {
+ /*
+ * Offsets to counter and control MSRs (relative to xen_pmu_arch.c.amd).
+ * For PV(H) guests these fields are RO.
+ */
+ uint32_t counters;
+ uint32_t ctrls;
+
+ /* Counter MSRs */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ uint64_t regs[];
+#elif defined(__GNUC__)
+ uint64_t regs[0];
+#endif
+};
+typedef struct xen_pmu_amd_ctxt xen_pmu_amd_ctxt_t;
+DEFINE_XEN_GUEST_HANDLE(xen_pmu_amd_ctxt_t);
+
+/* Intel PMU registers and structures */
+struct xen_pmu_cntr_pair {
+ uint64_t counter;
+ uint64_t control;
+};
+typedef struct xen_pmu_cntr_pair xen_pmu_cntr_pair_t;
+DEFINE_XEN_GUEST_HANDLE(xen_pmu_cntr_pair_t);
+
+struct xen_pmu_intel_ctxt {
+ /*
+ * Offsets to fixed and architectural counter MSRs (relative to
+ * xen_pmu_arch.c.intel).
+ * For PV(H) guests these fields are RO.
+ */
+ uint32_t fixed_counters;
+ uint32_t arch_counters;
+
+ /* PMU registers */
+ uint64_t global_ctrl;
+ uint64_t global_ovf_ctrl;
+ uint64_t global_status;
+ uint64_t fixed_ctrl;
+ uint64_t ds_area;
+ uint64_t pebs_enable;
+ uint64_t debugctl;
+
+ /* Fixed and architectural counter MSRs */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ uint64_t regs[];
+#elif defined(__GNUC__)
+ uint64_t regs[0];
+#endif
+};
+typedef struct xen_pmu_intel_ctxt xen_pmu_intel_ctxt_t;
+DEFINE_XEN_GUEST_HANDLE(xen_pmu_intel_ctxt_t);
+
+/* Sampled domain's registers */
+struct xen_pmu_regs {
+ uint64_t ip;
+ uint64_t sp;
+ uint64_t flags;
+ uint16_t cs;
+ uint16_t ss;
+ uint8_t cpl;
+ uint8_t pad[3];
+};
+typedef struct xen_pmu_regs xen_pmu_regs_t;
+DEFINE_XEN_GUEST_HANDLE(xen_pmu_regs_t);
+
+/* PMU flags */
+#define PMU_CACHED (1<<0) /* PMU MSRs are cached in the context */
+#define PMU_SAMPLE_USER (1<<1) /* Sample is from user or kernel mode */
+#define PMU_SAMPLE_REAL (1<<2) /* Sample is from realmode */
+#define PMU_SAMPLE_PV (1<<3) /* Sample from a PV guest */
+
+/*
+ * Architecture-specific information describing state of the processor at
+ * the time of PMU interrupt.
+ * Fields of this structure marked as RW for guest should only be written by
+ * the guest when PMU_CACHED bit in pmu_flags is set (which is done by the
+ * hypervisor during PMU interrupt). Hypervisor will read updated data in
+ * XENPMU_flush hypercall and clear PMU_CACHED bit.
+ */
+struct xen_pmu_arch {
+ union {
+ /*
+ * Processor's registers at the time of interrupt.
+ * WO for hypervisor, RO for guests.
+ */
+ struct xen_pmu_regs regs;
+ /* Padding for adding new registers to xen_pmu_regs in the future */
+#define XENPMU_REGS_PAD_SZ 64
+ uint8_t pad[XENPMU_REGS_PAD_SZ];
+ } r;
+
+ /* WO for hypervisor, RO for guest */
+ uint64_t pmu_flags;
+
+ /*
+ * APIC LVTPC register.
+ * RW for both hypervisor and guest.
+ * Only APIC_LVT_MASKED bit is loaded by the hypervisor into hardware
+ * during XENPMU_flush or XENPMU_lvtpc_set.
+ */
+ union {
+ uint32_t lapic_lvtpc;
+ uint64_t pad;
+ } l;
+
+ /*
+ * Vendor-specific PMU registers.
+ * RW for both hypervisor and guest (see exceptions above).
+ * Guest's updates to this field are verified and then loaded by the
+ * hypervisor into hardware during XENPMU_flush
+ */
+ union {
+ struct xen_pmu_amd_ctxt amd;
+ struct xen_pmu_intel_ctxt intel;
+
+ /*
+ * Padding for contexts (fixed parts only, does not include MSR banks
+ * that are specified by offsets)
+ */
+#define XENPMU_CTXT_PAD_SZ 128
+ uint8_t pad[XENPMU_CTXT_PAD_SZ];
+ } c;
+};
+typedef struct xen_pmu_arch xen_pmu_arch_t;
+DEFINE_XEN_GUEST_HANDLE(xen_pmu_arch_t);
+
+#endif /* __XEN_PUBLIC_ARCH_X86_PMU_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
+
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/arch-x86/xen-x86_32.h
--- a/include/xen/interface/arch-x86/xen-x86_32.h Fri Oct 16 15:57:20
2015 +0200
+++ b/include/xen/interface/arch-x86/xen-x86_32.h Fri Oct 16 16:24:14
2015 +0200
@@ -104,6 +104,7 @@
do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \
(hnd).p = val; \
} while ( 0 )
+#define int64_aligned_t int64_t __attribute__((aligned(8)))
#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
#define __XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name
#define XEN_GUEST_HANDLE_64(name) __XEN_GUEST_HANDLE_64(name)
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/arch-x86/xen.h
--- a/include/xen/interface/arch-x86/xen.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/arch-x86/xen.h Fri Oct 16 16:24:14 2015 +0200
@@ -220,14 +220,58 @@ typedef struct vcpu_guest_context vcpu_g
DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
struct arch_shared_info {
- unsigned long max_pfn; /* max pfn that appears in table */
- /* Frame containing list of mfns containing list of mfns containing p2m. */
+ /*
+ * Number of valid entries in the p2m table(s) anchored at
+ * pfn_to_mfn_frame_list_list and/or p2m_vaddr.
+ */
+ unsigned long max_pfn;
+ /*
+ * Frame containing list of mfns containing list of mfns containing p2m.
+ * A value of 0 indicates it has not yet been set up, ~0 indicates it has
+ * been set to invalid e.g. due to the p2m being too large for the 3-level
+ * p2m tree. In this case the linear mapper p2m list anchored at p2m_vaddr
+ * is to be used.
+ */
xen_pfn_t pfn_to_mfn_frame_list_list;
unsigned long nmi_reason;
- uint64_t pad[32];
+ /*
+ * Following three fields are valid if p2m_cr3 contains a value different
+ * from 0.
+ * p2m_cr3 is the root of the address space where p2m_vaddr is valid.
+ * p2m_cr3 is in the same format as a cr3 value in the vcpu register state
+ * and holds the folded machine frame number (via xen_pfn_to_cr3) of a
+ * L3 or L4 page table.
+ * p2m_vaddr holds the virtual address of the linear p2m list. All entries
+ * in the range [0...max_pfn[ are accessible via this pointer.
+ * p2m_generation will be incremented by the guest before and after each
+ * change of the mappings of the p2m list. p2m_generation starts at 0 and
+ * a value with the least significant bit set indicates that a mapping
+ * update is in progress. This allows guest external software (e.g. in
Dom0)
+ * to verify that read mappings are consistent and whether they have
changed
+ * since the last check.
+ * Modifying a p2m element in the linear p2m list is allowed via an atomic
+ * write only.
+ */
+ unsigned long p2m_cr3; /* cr3 value of the p2m address space */
+ unsigned long p2m_vaddr; /* virtual address of the p2m list */
+ unsigned long p2m_generation; /* generation count of p2m mapping */
+#ifdef __i386__
+ /* There's no room for this field in the generic structure. */
+ uint32_t wc_sec_hi;
+#endif
};
typedef struct arch_shared_info arch_shared_info_t;
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+/*
+ * struct xen_arch_domainconfig's ABI is covered by
+ * XEN_DOMCTL_INTERFACE_VERSION.
+ */
+struct xen_arch_domainconfig {
+ char dummy;
+};
+#endif
+
#endif /* !__ASSEMBLY__ */
/*
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/domctl.h
--- a/include/xen/interface/domctl.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/domctl.h Fri Oct 16 16:24:14 2015 +0200
@@ -37,7 +37,7 @@
#include "hvm/save.h"
#include "memory.h"
-#define XEN_DOMCTL_INTERFACE_VERSION 0x0000000a
+#define XEN_DOMCTL_INTERFACE_VERSION 0x0000000b
/*
* NB. xen_domctl.domain is an IN/OUT parameter for this operation.
@@ -64,23 +64,11 @@ struct xen_domctl_createdomain {
#define _XEN_DOMCTL_CDF_pvh_guest 4
#define XEN_DOMCTL_CDF_pvh_guest (1U<<_XEN_DOMCTL_CDF_pvh_guest)
uint32_t flags;
+ struct xen_arch_domainconfig config;
};
typedef struct xen_domctl_createdomain xen_domctl_createdomain_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t);
-#if defined(__arm__) || defined(__aarch64__)
-#define XEN_DOMCTL_CONFIG_GIC_DEFAULT 0
-#define XEN_DOMCTL_CONFIG_GIC_V2 1
-#define XEN_DOMCTL_CONFIG_GIC_V3 2
-/* XEN_DOMCTL_configure_domain */
-struct xen_domctl_arm_configuredomain {
- /* IN/OUT parameters */
- uint8_t gic_version;
-};
-typedef struct xen_domctl_arm_configuredomain xen_domctl_arm_configuredomain_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_arm_configuredomain_t);
-#endif
-
/* XEN_DOMCTL_getdomaininfo */
struct xen_domctl_getdomaininfo {
/* OUT variables. */
@@ -161,27 +149,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_getme
#define XEN_DOMCTL_PFINFO_BROKEN (0xdU<<28) /* broken page */
#define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28)
-struct xen_domctl_getpageframeinfo {
- /* IN variables. */
- uint64_aligned_t gmfn; /* GMFN to query */
- /* OUT variables. */
- /* Is the page PINNED to a type? */
- uint32_t type; /* see above type defs */
-};
-typedef struct xen_domctl_getpageframeinfo xen_domctl_getpageframeinfo_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo_t);
-
-
-/* XEN_DOMCTL_getpageframeinfo2 */
-struct xen_domctl_getpageframeinfo2 {
- /* IN variables. */
- uint64_aligned_t num;
- /* IN/OUT variables. */
- XEN_GUEST_HANDLE_64(uint32) array;
-};
-typedef struct xen_domctl_getpageframeinfo2 xen_domctl_getpageframeinfo2_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo2_t);
-
/* XEN_DOMCTL_getpageframeinfo3 */
struct xen_domctl_getpageframeinfo3 {
/* IN variables. */
@@ -357,7 +324,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_v
/* XEN_DOMCTL_scheduler_op */
/* Scheduler types. */
-#define XEN_SCHEDULER_SEDF 4
+/* #define XEN_SCHEDULER_SEDF 4 (Removed) */
#define XEN_SCHEDULER_CREDIT 5
#define XEN_SCHEDULER_CREDIT2 6
#define XEN_SCHEDULER_ARINC653 7
@@ -370,13 +337,6 @@ struct xen_domctl_scheduler_op {
uint32_t sched_id; /* XEN_SCHEDULER_* */
uint32_t cmd; /* XEN_DOMCTL_SCHEDOP_* */
union {
- struct xen_domctl_sched_sedf {
- uint64_aligned_t period;
- uint64_aligned_t slice;
- uint64_aligned_t latency;
- uint32_t extratime;
- uint32_t weight;
- } sedf;
struct xen_domctl_sched_credit {
uint16_t weight;
uint16_t cap;
@@ -469,7 +429,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_arch_
/* XEN_DOMCTL_settimeoffset */
struct xen_domctl_settimeoffset {
- int32_t time_offset_seconds; /* applied to domain wallclock time */
+ int64_aligned_t time_offset_seconds; /* applied to domain wallclock time */
};
typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t);
@@ -515,12 +475,33 @@ typedef struct xen_domctl_sendtrigger xe
DEFINE_XEN_GUEST_HANDLE(xen_domctl_sendtrigger_t);
-/* Assign PCI device to HVM guest. Sets up IOMMU structures. */
+/* Assign a device to a guest. Sets up IOMMU structures. */
/* XEN_DOMCTL_assign_device */
/* XEN_DOMCTL_test_assign_device */
-/* XEN_DOMCTL_deassign_device */
+/*
+ * XEN_DOMCTL_deassign_device: The behavior of this DOMCTL differs
+ * between the different type of device:
+ * - PCI device (XEN_DOMCTL_DEV_PCI) will be reassigned to DOM0
+ * - DT device (XEN_DOMCTL_DT_PCI) will left unassigned. DOM0
+ * will have to call XEN_DOMCTL_assign_device in order to use the
+ * device.
+ */
+#define XEN_DOMCTL_DEV_PCI 0
+#define XEN_DOMCTL_DEV_DT 1
struct xen_domctl_assign_device {
- uint32_t machine_sbdf; /* machine PCI ID of assigned device */
+ uint32_t dev; /* XEN_DOMCTL_DEV_* */
+ union {
+ struct {
+ uint32_t machine_sbdf; /* machine PCI ID of assigned device */
+ } pci;
+ struct {
+ uint32_t size; /* Length of the path */
+ XEN_GUEST_HANDLE_64(char) path; /* path to the device tree node */
+ } dt;
+ } u;
+ /* IN */
+#define XEN_DOMCTL_DEV_RDM_RELAXED 1
+ uint32_t flag; /* flag of assigned device */
};
typedef struct xen_domctl_assign_device xen_domctl_assign_device_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_assign_device_t);
@@ -544,6 +525,7 @@ typedef enum pt_irq_type_e {
PT_IRQ_TYPE_ISA,
PT_IRQ_TYPE_MSI,
PT_IRQ_TYPE_MSI_TRANSLATE,
+ PT_IRQ_TYPE_SPI, /* ARM: valid range 32-1019 */
} pt_irq_type_t;
struct xen_domctl_bind_pt_irq {
uint32_t machine_irq;
@@ -564,6 +546,9 @@ struct xen_domctl_bind_pt_irq {
uint32_t gflags;
uint64_aligned_t gtable;
} msi;
+ struct {
+ uint16_t spi;
+ } spi;
} u;
};
typedef struct xen_domctl_bind_pt_irq xen_domctl_bind_pt_irq_t;
@@ -571,6 +556,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_bind_
/* Bind machine I/O address range -> HVM address range. */
+/* If this returns -E2BIG lower nr_mfns value. */
/* XEN_DOMCTL_memory_mapping */
#define DPCI_ADD_MAPPING 1
#define DPCI_REMOVE_MAPPING 0
@@ -752,18 +738,13 @@ typedef struct xen_domctl_disable_migrat
/* XEN_DOMCTL_gettscinfo */
/* XEN_DOMCTL_settscinfo */
-struct xen_guest_tsc_info {
+typedef struct xen_domctl_tsc_info {
+ /* IN/OUT */
uint32_t tsc_mode;
uint32_t gtsc_khz;
uint32_t incarnation;
uint32_t pad;
uint64_aligned_t elapsed_nsec;
-};
-typedef struct xen_guest_tsc_info xen_guest_tsc_info_t;
-DEFINE_XEN_GUEST_HANDLE(xen_guest_tsc_info_t);
-typedef struct xen_domctl_tsc_info {
- XEN_GUEST_HANDLE_64(xen_guest_tsc_info_t) out_info; /* OUT */
- xen_guest_tsc_info_t info; /* IN */
} xen_domctl_tsc_info_t;
/* XEN_DOMCTL_gdbsx_guestmemio guest mem io */
@@ -793,10 +774,21 @@ struct xen_domctl_gdbsx_domstatus {
};
/*
- * Memory event operations
+ * VM event operations
*/
-/* XEN_DOMCTL_mem_event_op */
+/* XEN_DOMCTL_vm_event_op */
+
+/*
+ * There are currently three rings available for VM events:
+ * sharing, monitor and paging. This hypercall allows one to
+ * control these rings (enable/disable), as well as to signal
+ * to the hypervisor to pull responses (resume) from the given
+ * ring.
+ */
+#define XEN_VM_EVENT_ENABLE 0
+#define XEN_VM_EVENT_DISABLE 1
+#define XEN_VM_EVENT_RESUME 2
/*
* Domain memory paging
@@ -805,42 +797,38 @@ struct xen_domctl_gdbsx_domstatus {
* pager<->hypervisor interface. Use XENMEM_paging_op*
* to perform per-page operations.
*
- * The XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE domctl returns several
+ * The XEN_VM_EVENT_PAGING_ENABLE domctl returns several
* non-standard error codes to indicate why paging could not be enabled:
* ENODEV - host lacks HAP support (EPT/NPT) or HAP is disabled in guest
* EMLINK - guest has iommu passthrough enabled
* EXDEV - guest has PoD enabled
* EBUSY - guest has or had paging enabled, ring buffer still active
*/
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING 1
-
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE 0
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE 1
+#define XEN_DOMCTL_VM_EVENT_OP_PAGING 1
/*
- * Access permissions.
+ * Monitor helper.
*
* As with paging, use the domctl for teardown/setup of the
* helper<->hypervisor interface.
*
- * There are HVM hypercalls to set the per-page access permissions of every
- * page in a domain. When one of these permissions--independent, read,
- * write, and execute--is violated, the VCPU is paused and a memory event
- * is sent with what happened. (See public/mem_event.h) .
+ * The monitor interface can be used to register for various VM events. For
+ * example, there are HVM hypercalls to set the per-page access permissions
+ * of every page in a domain. When one of these permissions--independent,
+ * read, write, and execute--is violated, the VCPU is paused and a memory event
+ * is sent with what happened. The memory event handler can then resume the
+ * VCPU and redo the access with a XEN_VM_EVENT_RESUME option.
*
- * The memory event handler can then resume the VCPU and redo the access
- * with a XENMEM_access_op_resume hypercall.
+ * See public/vm_event.h for the list of available events that can be
+ * subscribed to via the monitor interface.
*
- * The XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE domctl returns several
+ * The XEN_VM_EVENT_MONITOR_* domctls returns
* non-standard error codes to indicate why access could not be enabled:
* ENODEV - host lacks HAP support (EPT/NPT) or HAP is disabled in guest
* EBUSY - guest has or had access enabled, ring buffer still active
+ *
*/
-#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS 2
-
-#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE 0
-#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE 1
-#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE_INTROSPECTION 2
+#define XEN_DOMCTL_VM_EVENT_OP_MONITOR 2
/*
* Sharing ENOMEM helper.
@@ -855,21 +843,18 @@ struct xen_domctl_gdbsx_domstatus {
* Note that shring can be turned on (as per the domctl below)
* *without* this ring being setup.
*/
-#define XEN_DOMCTL_MEM_EVENT_OP_SHARING 3
-
-#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_ENABLE 0
-#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DISABLE 1
+#define XEN_DOMCTL_VM_EVENT_OP_SHARING 3
/* Use for teardown/setup of helper<->hypervisor interface for paging,
* access and sharing.*/
-struct xen_domctl_mem_event_op {
- uint32_t op; /* XEN_DOMCTL_MEM_EVENT_OP_*_* */
- uint32_t mode; /* XEN_DOMCTL_MEM_EVENT_OP_* */
+struct xen_domctl_vm_event_op {
+ uint32_t op; /* XEN_VM_EVENT_* */
+ uint32_t mode; /* XEN_DOMCTL_VM_EVENT_OP_* */
uint32_t port; /* OUT: event channel for ring */
};
-typedef struct xen_domctl_mem_event_op xen_domctl_mem_event_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_event_op_t);
+typedef struct xen_domctl_vm_event_op xen_domctl_vm_event_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_vm_event_op_t);
/*
* Memory sharing operations
@@ -1001,27 +986,37 @@ typedef struct xen_domctl_vcpu_msrs xen_
DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpu_msrs_t);
#endif
-/*
- * Use in XEN_DOMCTL_setvnumainfo to set
- * vNUMA domain topology.
- */
+/* XEN_DOMCTL_setvnumainfo: specifies a virtual NUMA topology for the guest */
struct xen_domctl_vnuma {
+ /* IN: number of vNUMA nodes to setup. Shall be greater than 0 */
uint32_t nr_vnodes;
+ /* IN: number of memory ranges to setup */
uint32_t nr_vmemranges;
+ /*
+ * IN: number of vCPUs of the domain (used as size of the vcpu_to_vnode
+ * array declared below). Shall be equal to the domain's max_vcpus.
+ */
uint32_t nr_vcpus;
- uint32_t pad;
- XEN_GUEST_HANDLE_64(uint) vdistance;
- XEN_GUEST_HANDLE_64(uint) vcpu_to_vnode;
+ uint32_t pad; /* must be zero */
/*
- * vnodes to physical NUMA nodes mask.
- * This kept on per-domain basis for
- * interested consumers, such as numa aware ballooning.
+ * IN: array for specifying the distances of the vNUMA nodes
+ * between each others. Shall have nr_vnodes*nr_vnodes elements.
+ */
+ XEN_GUEST_HANDLE_64(uint) vdistance;
+ /*
+ * IN: array for specifying to what vNUMA node each vCPU belongs.
+ * Shall have nr_vcpus elements.
+ */
+ XEN_GUEST_HANDLE_64(uint) vcpu_to_vnode;
+ /*
+ * IN: array for specifying on what physical NUMA node each vNUMA
+ * node is placed. Shall have nr_vnodes elements.
*/
XEN_GUEST_HANDLE_64(uint) vnode_to_pnode;
-
/*
- * memory rages for each vNUMA node
+ * IN: array for specifying the memory ranges. Shall have
+ * nr_vmemranges elements.
*/
XEN_GUEST_HANDLE_64(xen_vmemrange_t) vmemrange;
};
@@ -1038,6 +1033,79 @@ struct xen_domctl_psr_cmt_op {
typedef struct xen_domctl_psr_cmt_op xen_domctl_psr_cmt_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_psr_cmt_op_t);
+/* XEN_DOMCTL_MONITOR_*
+ *
+ * Enable/disable monitoring various VM events.
+ * This domctl configures what events will be reported to helper apps
+ * via the ring buffer "MONITOR". The ring has to be first enabled
+ * with the domctl XEN_DOMCTL_VM_EVENT_OP_MONITOR.
+ *
+ * GET_CAPABILITIES can be used to determine which of these features is
+ * available on a given platform.
+ *
+ * NOTICE: mem_access events are also delivered via the "MONITOR" ring buffer;
+ * however, enabling/disabling those events is performed with the use of
+ * memory_op hypercalls!
+ */
+#define XEN_DOMCTL_MONITOR_OP_ENABLE 0
+#define XEN_DOMCTL_MONITOR_OP_DISABLE 1
+#define XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES 2
+
+#define XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG 0
+#define XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR 1
+#define XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP 2
+#define XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT 3
+#define XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST 4
+
+struct xen_domctl_monitor_op {
+ uint32_t op; /* XEN_DOMCTL_MONITOR_OP_* */
+
+ /*
+ * When used with ENABLE/DISABLE this has to be set to
+ * the requested XEN_DOMCTL_MONITOR_EVENT_* value.
+ * With GET_CAPABILITIES this field returns a bitmap of
+ * events supported by the platform, in the format
+ * (1 << XEN_DOMCTL_MONITOR_EVENT_*).
+ */
+ uint32_t event;
+
+ /*
+ * Further options when issuing XEN_DOMCTL_MONITOR_OP_ENABLE.
+ */
+ union {
+ struct {
+ /* Which control register */
+ uint8_t index;
+ /* Pause vCPU until response */
+ uint8_t sync;
+ /* Send event only on a change of value */
+ uint8_t onchangeonly;
+ } mov_to_cr;
+
+ struct {
+ /* Enable the capture of an extended set of MSRs */
+ uint8_t extended_capture;
+ } mov_to_msr;
+
+ struct {
+ /* Pause vCPU until response */
+ uint8_t sync;
+ } guest_request;
+ } u;
+};
+typedef struct xen_domctl_monitor_op xen_domctl_monitor_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_monitor_op_t);
+
+struct xen_domctl_psr_cat_op {
+#define XEN_DOMCTL_PSR_CAT_OP_SET_L3_CBM 0
+#define XEN_DOMCTL_PSR_CAT_OP_GET_L3_CBM 1
+ uint32_t cmd; /* IN: XEN_DOMCTL_PSR_CAT_OP_* */
+ uint32_t target; /* IN */
+ uint64_t data; /* IN/OUT */
+};
+typedef struct xen_domctl_psr_cat_op xen_domctl_psr_cat_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_psr_cat_op_t);
+
struct xen_domctl {
uint32_t cmd;
#define XEN_DOMCTL_createdomain 1
@@ -1046,8 +1114,8 @@ struct xen_domctl {
#define XEN_DOMCTL_unpausedomain 4
#define XEN_DOMCTL_getdomaininfo 5
#define XEN_DOMCTL_getmemlist 6
-#define XEN_DOMCTL_getpageframeinfo 7
-#define XEN_DOMCTL_getpageframeinfo2 8
+/* #define XEN_DOMCTL_getpageframeinfo 7 Obsolete - use
getpageframeinfo3 */
+/* #define XEN_DOMCTL_getpageframeinfo2 8 Obsolete - use
getpageframeinfo3 */
#define XEN_DOMCTL_setvcpuaffinity 9
#define XEN_DOMCTL_shadow_op 10
#define XEN_DOMCTL_max_mem 11
@@ -1062,10 +1130,10 @@ struct xen_domctl {
#define XEN_DOMCTL_iomem_permission 20
#define XEN_DOMCTL_ioport_permission 21
#define XEN_DOMCTL_hypercall_init 22
-#define XEN_DOMCTL_arch_setup 23
+#define XEN_DOMCTL_arch_setup 23 /* Obsolete IA64 only */
#define XEN_DOMCTL_settimeoffset 24
#define XEN_DOMCTL_getvcpuaffinity 25
-#define XEN_DOMCTL_real_mode_area 26
+#define XEN_DOMCTL_real_mode_area 26 /* Obsolete PPC only */
#define XEN_DOMCTL_resumedomain 27
#define XEN_DOMCTL_sendtrigger 28
#define XEN_DOMCTL_subscribe 29
@@ -1080,7 +1148,7 @@ struct xen_domctl {
#define XEN_DOMCTL_pin_mem_cacheattr 41
#define XEN_DOMCTL_set_ext_vcpucontext 42
#define XEN_DOMCTL_get_ext_vcpucontext 43
-#define XEN_DOMCTL_set_opt_feature 44
+#define XEN_DOMCTL_set_opt_feature 44 /* Obsolete IA64 only */
#define XEN_DOMCTL_test_assign_device 45
#define XEN_DOMCTL_set_target 46
#define XEN_DOMCTL_deassign_device 47
@@ -1092,7 +1160,7 @@ struct xen_domctl {
#define XEN_DOMCTL_suppress_spurious_page_faults 53
#define XEN_DOMCTL_debug_op 54
#define XEN_DOMCTL_gethvmcontext_partial 55
-#define XEN_DOMCTL_mem_event_op 56
+#define XEN_DOMCTL_vm_event_op 56
#define XEN_DOMCTL_mem_sharing_op 57
#define XEN_DOMCTL_disable_migrate 58
#define XEN_DOMCTL_gettscinfo 59
@@ -1112,7 +1180,8 @@ struct xen_domctl {
#define XEN_DOMCTL_set_vcpu_msrs 73
#define XEN_DOMCTL_setvnumainfo 74
#define XEN_DOMCTL_psr_cmt_op 75
-#define XEN_DOMCTL_arm_configure_domain 76
+#define XEN_DOMCTL_monitor_op 77
+#define XEN_DOMCTL_psr_cat_op 78
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
@@ -1126,8 +1195,6 @@ struct xen_domctl {
#endif
struct xen_domctl_getdomaininfo getdomaininfo;
struct xen_domctl_getmemlist getmemlist;
- struct xen_domctl_getpageframeinfo getpageframeinfo;
- struct xen_domctl_getpageframeinfo2 getpageframeinfo2;
struct xen_domctl_getpageframeinfo3 getpageframeinfo3;
struct xen_domctl_nodeaffinity nodeaffinity;
struct xen_domctl_vcpuaffinity vcpuaffinity;
@@ -1163,7 +1230,7 @@ struct xen_domctl {
struct xen_domctl_set_target set_target;
struct xen_domctl_subscribe subscribe;
struct xen_domctl_debug_op debug_op;
- struct xen_domctl_mem_event_op mem_event_op;
+ struct xen_domctl_vm_event_op vm_event_op;
struct xen_domctl_mem_sharing_op mem_sharing_op;
#if defined(__i386__) || defined(__x86_64__)
struct xen_domctl_cpuid cpuid;
@@ -1181,6 +1248,8 @@ struct xen_domctl {
struct xen_domctl_gdbsx_domstatus gdbsx_domstatus;
struct xen_domctl_vnuma vnuma;
struct xen_domctl_psr_cmt_op psr_cmt_op;
+ struct xen_domctl_monitor_op monitor_op;
+ struct xen_domctl_psr_cat_op psr_cat_op;
uint8_t pad[128];
} u;
};
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/errno.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/include/xen/interface/errno.h Fri Oct 16 16:24:14 2015 +0200
@@ -0,0 +1,95 @@
+#ifndef __XEN_PUBLIC_ERRNO_H__
+
+#ifndef __ASSEMBLY__
+
+#define XEN_ERRNO(name, value) XEN_##name = value,
+enum xen_errno {
+
+#else /* !__ASSEMBLY__ */
+
+#define XEN_ERRNO(name, value) .equ XEN_##name, value
+
+#endif /* __ASSEMBLY__ */
+
+/* ` enum neg_errnoval { [ -Efoo for each Efoo in the list below ] } */
+/* ` enum errnoval { */
+
+#endif /* __XEN_PUBLIC_ERRNO_H__ */
+
+#ifdef XEN_ERRNO
+
+/*
+ * Values originating from x86 Linux. Please consider using respective
+ * values when adding new definitions here.
+ *
+ * The set of identifiers to be added here shouldn't extend beyond what
+ * POSIX mandates (see e.g.
+ * http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html)
+ * with the exception that we support some optional (XSR) values
+ * specified there (but no new ones should be added).
+ */
+
+XEN_ERRNO(EPERM, 1) /* Operation not permitted */
+XEN_ERRNO(ENOENT, 2) /* No such file or directory */
+XEN_ERRNO(ESRCH, 3) /* No such process */
+#ifdef __XEN__ /* Internal only, should never be exposed to the guest. */
+XEN_ERRNO(EINTR, 4) /* Interrupted system call */
+#endif
+XEN_ERRNO(EIO, 5) /* I/O error */
+XEN_ERRNO(ENXIO, 6) /* No such device or address */
+XEN_ERRNO(E2BIG, 7) /* Arg list too long */
+XEN_ERRNO(ENOEXEC, 8) /* Exec format error */
+XEN_ERRNO(EBADF, 9) /* Bad file number */
+XEN_ERRNO(ECHILD, 10) /* No child processes */
+XEN_ERRNO(EAGAIN, 11) /* Try again */
+XEN_ERRNO(ENOMEM, 12) /* Out of memory */
+XEN_ERRNO(EACCES, 13) /* Permission denied */
+XEN_ERRNO(EFAULT, 14) /* Bad address */
+XEN_ERRNO(EBUSY, 16) /* Device or resource busy */
+XEN_ERRNO(EEXIST, 17) /* File exists */
+XEN_ERRNO(EXDEV, 18) /* Cross-device link */
+XEN_ERRNO(ENODEV, 19) /* No such device */
+XEN_ERRNO(EINVAL, 22) /* Invalid argument */
+XEN_ERRNO(ENFILE, 23) /* File table overflow */
+XEN_ERRNO(EMFILE, 24) /* Too many open files */
+XEN_ERRNO(ENOSPC, 28) /* No space left on device */
+XEN_ERRNO(EMLINK, 31) /* Too many links */
+XEN_ERRNO(EDOM, 33) /* Math argument out of domain of func
*/
+XEN_ERRNO(ERANGE, 34) /* Math result not representable */
+XEN_ERRNO(EDEADLK, 35) /* Resource deadlock would occur */
+XEN_ERRNO(ENAMETOOLONG, 36) /* File name too long */
+XEN_ERRNO(ENOLCK, 37) /* No record locks available */
+XEN_ERRNO(ENOSYS, 38) /* Function not implemented */
+XEN_ERRNO(ENODATA, 61) /* No data available */
+XEN_ERRNO(ETIME, 62) /* Timer expired */
+XEN_ERRNO(EBADMSG, 74) /* Not a data message */
+XEN_ERRNO(EOVERFLOW, 75) /* Value too large for defined data type */
+XEN_ERRNO(EILSEQ, 84) /* Illegal byte sequence */
+#ifdef __XEN__ /* Internal only, should never be exposed to the guest. */
+XEN_ERRNO(ERESTART, 85) /* Interrupted system call should be restarted
*/
+#endif
+XEN_ERRNO(ENOTSOCK, 88) /* Socket operation on non-socket */
+XEN_ERRNO(EOPNOTSUPP, 95) /* Operation not supported on transport
endpoint */
+XEN_ERRNO(EADDRINUSE, 98) /* Address already in use */
+XEN_ERRNO(EADDRNOTAVAIL, 99) /* Cannot assign requested address */
+XEN_ERRNO(ENOBUFS, 105) /* No buffer space available */
+XEN_ERRNO(EISCONN, 106) /* Transport endpoint is already connected */
+XEN_ERRNO(ENOTCONN, 107) /* Transport endpoint is not connected */
+XEN_ERRNO(ETIMEDOUT, 110) /* Connection timed out */
+
+#undef XEN_ERRNO
+#endif /* XEN_ERRNO */
+
+#ifndef __XEN_PUBLIC_ERRNO_H__
+#define __XEN_PUBLIC_ERRNO_H__
+
+/* ` } */
+
+#ifndef __ASSEMBLY__
+};
+#endif
+
+#define XEN_EWOULDBLOCK XEN_EAGAIN /* Operation would block */
+#define XEN_EDEADLOCK XEN_EDEADLK /* Resource deadlock would
occur */
+
+#endif /* __XEN_PUBLIC_ERRNO_H__ */
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/features.h
--- a/include/xen/interface/features.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/features.h Fri Oct 16 16:24:14 2015 +0200
@@ -99,6 +99,9 @@
#define XENFEAT_grant_map_identity 12
*/
+/* Guest can use XENMEMF_vnode to specify virtual node for memory op. */
+#define XENFEAT_memory_op_vnode_supported 13
+
#define XENFEAT_NR_SUBMAPS 1
#endif /* __XEN_PUBLIC_FEATURES_H__ */
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/grant_table.h
--- a/include/xen/interface/grant_table.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/grant_table.h Fri Oct 16 16:24:14 2015 +0200
@@ -134,8 +134,10 @@ struct grant_entry_v1 {
/* The domain being granted foreign privileges. [GST] */
domid_t domid;
/*
- * GTF_permit_access: Frame that @domid is allowed to map and access. [GST]
- * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN]
+ * GTF_permit_access: GFN that @domid is allowed to map and access. [GST]
+ * GTF_accept_transfer: GFN that @domid is allowed to transfer into. [GST]
+ * GTF_transfer_completed: MFN whose ownership transferred by @domid
+ * (non-translated guests only). [XEN]
*/
uint32_t frame;
};
@@ -321,7 +323,7 @@ typedef uint32_t grant_handle_t;
/*
* GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access
* by devices and/or host CPUs. If successful, <handle> is a tracking number
- * that must be presented later to destroy the mapping(s). On error, <handle>
+ * that must be presented later to destroy the mapping(s). On error, <status>
* is a negative status code.
* NOTES:
* 1. If GNTMAP_device_map is specified then <dev_bus_addr> is the address
@@ -453,7 +455,7 @@ DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_
struct gnttab_copy {
/* IN parameters. */
- struct {
+ struct gnttab_copy_ptr {
union {
grant_ref_t ref;
xen_pfn_t gmfn;
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/hvm/e820.h
--- a/include/xen/interface/hvm/e820.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/hvm/e820.h Fri Oct 16 16:24:14 2015 +0200
@@ -1,4 +1,3 @@
-
/*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
@@ -17,6 +16,8 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2006, Keir Fraser
*/
#ifndef __XEN_PUBLIC_HVM_E820_H__
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/hvm/hvm_info_table.h
--- a/include/xen/interface/hvm/hvm_info_table.h Fri Oct 16 15:57:20
2015 +0200
+++ b/include/xen/interface/hvm/hvm_info_table.h Fri Oct 16 16:24:14
2015 +0200
@@ -20,6 +20,8 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2006, Keir Fraser
*/
#ifndef __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/hvm/hvm_op.h
--- a/include/xen/interface/hvm/hvm_op.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/hvm/hvm_op.h Fri Oct 16 16:24:14 2015 +0200
@@ -16,6 +16,8 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2007, Keir Fraser
*/
#ifndef __XEN_PUBLIC_HVM_HVM_OP_H__
@@ -81,6 +83,7 @@ typedef enum {
HVMMEM_ram_rw, /* Normal read/write guest RAM */
HVMMEM_ram_ro, /* Read-only; writes are discarded */
HVMMEM_mmio_dm, /* Reads and write go to the device model */
+ HVMMEM_mmio_write_dm /* Read-only; writes go to the device model */
} hvmmem_type_t;
/* Following tools-only interfaces may change in future. */
@@ -265,6 +268,13 @@ typedef uint16_t ioservid_t;
#define HVMOP_create_ioreq_server 17
struct xen_hvm_create_ioreq_server {
domid_t domid; /* IN - domain to be serviced */
+#define HVM_IOREQSRV_BUFIOREQ_OFF 0
+#define HVM_IOREQSRV_BUFIOREQ_LEGACY 1
+/*
+ * Use this when read_pointer gets updated atomically and
+ * the pointer pair gets read atomically:
+ */
+#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */
ioservid_t id; /* OUT - server id */
};
@@ -369,6 +379,116 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_iore
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+#if defined(__i386__) || defined(__x86_64__)
+
+/*
+ * HVMOP_set_evtchn_upcall_vector: Set a <vector> that should be used for event
+ * channel upcalls on the specified <vcpu>. If
set,
+ * this vector will be used in preference to
the
+ * domain global callback via (see
+ * HVM_PARAM_CALLBACK_IRQ).
+ */
+#define HVMOP_set_evtchn_upcall_vector 23
+struct xen_hvm_evtchn_upcall_vector {
+ uint32_t vcpu;
+ uint8_t vector;
+};
+typedef struct xen_hvm_evtchn_upcall_vector xen_hvm_evtchn_upcall_vector_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_evtchn_upcall_vector_t);
+
+#endif /* defined(__i386__) || defined(__x86_64__) */
+
+#define HVMOP_guest_request_vm_event 24
+
+/* HVMOP_altp2m: perform altp2m state operations */
+#define HVMOP_altp2m 25
+
+#define HVMOP_ALTP2M_INTERFACE_VERSION 0x00000001
+
+struct xen_hvm_altp2m_domain_state {
+ /* IN or OUT variable on/off */
+ uint8_t state;
+};
+typedef struct xen_hvm_altp2m_domain_state xen_hvm_altp2m_domain_state_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_domain_state_t);
+
+struct xen_hvm_altp2m_vcpu_enable_notify {
+ uint32_t vcpu_id;
+ uint32_t pad;
+ /* #VE info area gfn */
+ uint64_t gfn;
+};
+typedef struct xen_hvm_altp2m_vcpu_enable_notify
xen_hvm_altp2m_vcpu_enable_notify_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_vcpu_enable_notify_t);
+
+struct xen_hvm_altp2m_view {
+ /* IN/OUT variable */
+ uint16_t view;
+ /* Create view only: default access type
+ * NOTE: currently ignored */
+ uint16_t hvmmem_default_access; /* xenmem_access_t */
+};
+typedef struct xen_hvm_altp2m_view xen_hvm_altp2m_view_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_view_t);
+
+struct xen_hvm_altp2m_set_mem_access {
+ /* view */
+ uint16_t view;
+ /* Memory type */
+ uint16_t hvmmem_access; /* xenmem_access_t */
+ uint32_t pad;
+ /* gfn */
+ uint64_t gfn;
+};
+typedef struct xen_hvm_altp2m_set_mem_access xen_hvm_altp2m_set_mem_access_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_set_mem_access_t);
+
+struct xen_hvm_altp2m_change_gfn {
+ /* view */
+ uint16_t view;
+ uint16_t pad1;
+ uint32_t pad2;
+ /* old gfn */
+ uint64_t old_gfn;
+ /* new gfn, INVALID_GFN (~0UL) means revert */
+ uint64_t new_gfn;
+};
+typedef struct xen_hvm_altp2m_change_gfn xen_hvm_altp2m_change_gfn_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_change_gfn_t);
+
+struct xen_hvm_altp2m_op {
+ uint32_t version; /* HVMOP_ALTP2M_INTERFACE_VERSION */
+ uint32_t cmd;
+/* Get/set the altp2m state for a domain */
+#define HVMOP_altp2m_get_domain_state 1
+#define HVMOP_altp2m_set_domain_state 2
+/* Set the current VCPU to receive altp2m event notifications */
+#define HVMOP_altp2m_vcpu_enable_notify 3
+/* Create a new view */
+#define HVMOP_altp2m_create_p2m 4
+/* Destroy a view */
+#define HVMOP_altp2m_destroy_p2m 5
+/* Switch view for an entire domain */
+#define HVMOP_altp2m_switch_p2m 6
+/* Notify that a page of memory is to have specific access types */
+#define HVMOP_altp2m_set_mem_access 7
+/* Change a p2m entry to have a different gfn->mfn mapping */
+#define HVMOP_altp2m_change_gfn 8
+ domid_t domain;
+ uint16_t pad1;
+ uint32_t pad2;
+ union {
+ struct xen_hvm_altp2m_domain_state domain_state;
+ struct xen_hvm_altp2m_vcpu_enable_notify enable_notify;
+ struct xen_hvm_altp2m_view view;
+ struct xen_hvm_altp2m_set_mem_access set_mem_access;
+ struct xen_hvm_altp2m_change_gfn change_gfn;
+ uint8_t pad[64];
+ } u;
+};
+typedef struct xen_hvm_altp2m_op xen_hvm_altp2m_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_op_t);
+
#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
/*
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/hvm/hvm_xs_strings.h
--- a/include/xen/interface/hvm/hvm_xs_strings.h Fri Oct 16 15:57:20
2015 +0200
+++ b/include/xen/interface/hvm/hvm_xs_strings.h Fri Oct 16 16:24:14
2015 +0200
@@ -20,6 +20,8 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2013, Citrix Systems
*/
#ifndef __XEN_PUBLIC_HVM_HVM_XS_STRINGS_H__
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/hvm/ioreq.h
--- a/include/xen/interface/hvm/ioreq.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/hvm/ioreq.h Fri Oct 16 16:24:14 2015 +0200
@@ -83,8 +83,17 @@ typedef struct buf_ioreq buf_ioreq_t;
#define IOREQ_BUFFER_SLOT_NUM 511 /* 8 bytes each, plus 2 4-byte indexes */
struct buffered_iopage {
- unsigned int read_pointer;
- unsigned int write_pointer;
+#ifdef __XEN__
+ union bufioreq_pointers {
+ struct {
+#endif
+ uint32_t read_pointer;
+ uint32_t write_pointer;
+#ifdef __XEN__
+ };
+ uint64_t full;
+ } ptrs;
+#endif
buf_ioreq_t buf_ioreq[IOREQ_BUFFER_SLOT_NUM];
}; /* NB. Size of this structure must be no greater than one page. */
typedef struct buffered_iopage buffered_iopage_t;
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/hvm/params.h
--- a/include/xen/interface/hvm/params.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/hvm/params.h Fri Oct 16 16:24:14 2015 +0200
@@ -16,6 +16,8 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2007, Keir Fraser
*/
#ifndef __XEN_PUBLIC_HVM_PARAMS_H__
@@ -97,10 +99,15 @@
#define _HVMPV_time_ref_count 2
#define HVMPV_time_ref_count (1 << _HVMPV_time_ref_count)
+/* Enable Reference TSC Page (HV_X64_MSR_REFERENCE_TSC) */
+#define _HVMPV_reference_tsc 3
+#define HVMPV_reference_tsc (1 << _HVMPV_reference_tsc)
+
#define HVMPV_feature_mask \
(HVMPV_base_freq | \
HVMPV_no_freq | \
- HVMPV_time_ref_count)
+ HVMPV_time_ref_count | \
+ HVMPV_reference_tsc)
#endif
@@ -162,8 +169,7 @@
*/
#define HVM_PARAM_ACPI_IOPORTS_LOCATION 19
-/* Enable blocking memory events, async or sync (pause vcpu until response)
- * onchangeonly indicates messages only on a change of value */
+/* Deprecated */
#define HVM_PARAM_MEMORY_EVENT_CR0 20
#define HVM_PARAM_MEMORY_EVENT_CR3 21
#define HVM_PARAM_MEMORY_EVENT_CR4 22
@@ -171,18 +177,12 @@
#define HVM_PARAM_MEMORY_EVENT_SINGLE_STEP 25
#define HVM_PARAM_MEMORY_EVENT_MSR 30
-#define HVMPME_MODE_MASK (3 << 0)
-#define HVMPME_mode_disabled 0
-#define HVMPME_mode_async 1
-#define HVMPME_mode_sync 2
-#define HVMPME_onchangeonly (1 << 2)
-
/* Boolean: Enable nestedhvm (hvm only) */
#define HVM_PARAM_NESTEDHVM 24
/* Params for the mem event rings */
#define HVM_PARAM_PAGING_RING_PFN 27
-#define HVM_PARAM_ACCESS_RING_PFN 28
+#define HVM_PARAM_MONITOR_RING_PFN 28
#define HVM_PARAM_SHARING_RING_PFN 29
/* SHUTDOWN_* action in case of a triple fault */
@@ -194,6 +194,9 @@
/* Location of the VM Generation ID in guest physical address space. */
#define HVM_PARAM_VM_GENERATION_ID_ADDR 34
-#define HVM_NR_PARAMS 35
+/* Boolean: Enable altp2m */
+#define HVM_PARAM_ALTP2M 35
+
+#define HVM_NR_PARAMS 36
#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/io/blkif.h
--- a/include/xen/interface/io/blkif.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/io/blkif.h Fri Oct 16 16:24:14 2015 +0200
@@ -92,6 +92,12 @@
* backend driver to open the backing device. (e.g. the path to the
* file or block device representing the backing store.)
*
+ * physical-device
+ * Values: "MAJOR:MINOR"
+ *
+ * MAJOR and MINOR are the major number and minor number of the
+ * backing device respectively.
+ *
* type
* Values: "file", "phy", "tap"
*
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/io/libxenvchan.h
--- a/include/xen/interface/io/libxenvchan.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/io/libxenvchan.h Fri Oct 16 16:24:14 2015 +0200
@@ -21,8 +21,7 @@
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
+ * License along with this library; If not, see
<http://www.gnu.org/licenses/>.
*
* @section DESCRIPTION
*
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/io/netif.h
--- a/include/xen/interface/io/netif.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/io/netif.h Fri Oct 16 16:24:14 2015 +0200
@@ -136,14 +136,145 @@
*/
/*
+ * "feature-multicast-control" advertises the capability to filter ethernet
+ * multicast packets in the backend. To enable use of this capability the
+ * frontend must set "request-multicast-control" before moving into the
+ * connected state.
+ *
+ * If "request-multicast-control" is set then the backend transmit side should
+ * no longer flood multicast packets to the frontend, it should instead drop
any
+ * multicast packet that does not match in a filter list. The list is
+ * amended by the frontend by sending dummy transmit requests containing
+ * XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL} extra-info fragments as specified
below.
+ * Once enabled by the frontend, the feature cannot be disabled except by
+ * closing and re-connecting to the backend.
+ */
+
+/*
* This is the 'wire' format for packets:
- * Request 1: netif_tx_request -- NETTXF_* (any flags)
- * [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info)
- * [Request 3: netif_tx_extra] (only if request 2 has XEN_NETIF_EXTRA_MORE)
- * Request 4: netif_tx_request -- NETTXF_more_data
- * Request 5: netif_tx_request -- NETTXF_more_data
+ * Request 1: netif_tx_request_t -- NETTXF_* (any flags)
+ * [Request 2: netif_extra_info_t] (only if request 1 has NETTXF_extra_info)
+ * [Request 3: netif_extra_info_t] (only if request 2 has XEN_NETIF_EXTRA_MORE)
+ * Request 4: netif_tx_request_t -- NETTXF_more_data
+ * Request 5: netif_tx_request_t -- NETTXF_more_data
* ...
- * Request N: netif_tx_request -- 0
+ * Request N: netif_tx_request_t -- 0
+ */
+
+/*
+ * Guest transmit
+ * ==============
+ *
+ * Ring slot size is 12 octets, however not all request/response
+ * structs use the full size.
+ *
+ * tx request data (netif_tx_request_t)
+ * ------------------------------------
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | grant ref | offset | flags |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | id | size |
+ * +-----+-----+-----+-----+
+ *
+ * grant ref: Reference to buffer page.
+ * offset: Offset within buffer page.
+ * flags: NETTXF_*.
+ * id: request identifier, echoed in response.
+ * size: packet size in bytes.
+ *
+ * tx response (netif_tx_response_t)
+ * ---------------------------------
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | id | status | unused |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | unused |
+ * +-----+-----+-----+-----+
+ *
+ * id: reflects id in transmit request
+ * status: NETIF_RSP_*
+ *
+ * Guest receive
+ * =============
+ *
+ * Ring slot size is 8 octets.
+ *
+ * rx request (netif_rx_request_t)
+ * -------------------------------
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | id | pad | gref |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ *
+ * id: request identifier, echoed in response.
+ * gref: reference to incoming granted frame.
+ *
+ * rx response (netif_rx_response_t)
+ * ---------------------------------
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | id | offset | flags | status |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ *
+ * id: reflects id in receive request
+ * offset: offset in page of start of received packet
+ * flags: NETRXF_*
+ * status: -ve: NETIF_RSP_*; +ve: Rx'ed pkt size.
+ *
+ * Extra Info
+ * ==========
+ *
+ * Can be present if initial request has NET{T,R}XF_extra_info, or
+ * previous extra request has XEN_NETIF_EXTRA_MORE.
+ *
+ * The struct therefore needs to fit into either a tx or rx slot and
+ * is therefore limited to 8 octets.
+ *
+ * extra info (netif_extra_info_t)
+ * -------------------------------
+ *
+ * General format:
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * |type |flags| type specfic data |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | padding for tx |
+ * +-----+-----+-----+-----+
+ *
+ * type: XEN_NETIF_EXTRA_TYPE_*
+ * flags: XEN_NETIF_EXTRA_FLAG_*
+ * padding for tx: present only in the tx case due to 8 octet limit
+ * from rx case. Not shown in type specific entries below.
+ *
+ * XEN_NETIF_EXTRA_TYPE_GSO:
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * |type |flags| size |type | pad | features |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ *
+ * type: Must be XEN_NETIF_EXTRA_TYPE_GSO
+ * flags: XEN_NETIF_EXTRA_FLAG_*
+ * size: Maximum payload size of each segment.
+ * type: XEN_NETIF_GSO_TYPE_*
+ * features: EN_NETIF_GSO_FEAT_*
+ *
+ * XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}:
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * |type |flags| addr |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ *
+ * type: Must be XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}
+ * flags: XEN_NETIF_EXTRA_FLAG_*
+ * addr: address to add/remove
*/
/* Protocol checksum field is blank in the packet (hardware offload)? */
@@ -179,7 +310,7 @@ typedef struct netif_tx_request netif_tx
#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */
#define XEN_NETIF_EXTRA_TYPE_MAX (4)
-/* netif_extra_info flags. */
+/* netif_extra_info_t flags. */
#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
#define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE)
@@ -189,8 +320,8 @@ typedef struct netif_tx_request netif_tx
#define XEN_NETIF_GSO_TYPE_TCPV6 (2)
/*
- * This structure needs to fit within both netif_tx_request and
- * netif_rx_response for compatibility.
+ * This structure needs to fit within both netif_tx_request_t and
+ * netif_rx_response_t for compatibility.
*/
struct netif_extra_info {
uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */
@@ -225,14 +356,6 @@ struct netif_extra_info {
/*
* XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}:
- * Backend advertises availability via 'feature-multicast-control'
- * xenbus node containing value '1'.
- * Frontend requests this feature by advertising
- * 'request-multicast-control' xenbus node containing value '1'.
- * If multicast control is requested then multicast flooding is
- * disabled and the frontend must explicitly register its interest
- * in multicast groups using dummy transmit requests containing
- * MCAST_{ADD,DEL} extra-info fragments.
*/
struct {
uint8_t addr[6]; /* Address to add/remove. */
@@ -251,6 +374,7 @@ typedef struct netif_tx_response netif_t
struct netif_rx_request {
uint16_t id; /* Echoed in response message. */
+ uint16_t pad;
grant_ref_t gref; /* Reference to incoming granted frame */
};
typedef struct netif_rx_request netif_rx_request_t;
@@ -289,7 +413,7 @@ DEFINE_RING_TYPES(netif_rx, struct netif
#define NETIF_RSP_DROPPED -2
#define NETIF_RSP_ERROR -1
#define NETIF_RSP_OKAY 0
-/* No response: used for auxiliary requests (e.g., netif_tx_extra). */
+/* No response: used for auxiliary requests (e.g., netif_extra_info_t). */
#define NETIF_RSP_NULL 1
#endif
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/io/protocols.h
--- a/include/xen/interface/io/protocols.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/io/protocols.h Fri Oct 16 16:24:14 2015 +0200
@@ -18,6 +18,8 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2008, Keir Fraser
*/
#ifndef __XEN_PROTOCOLS_H__
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/io/ring.h
--- a/include/xen/interface/io/ring.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/io/ring.h Fri Oct 16 16:24:14 2015 +0200
@@ -111,7 +111,7 @@ struct __name##_sring {
uint8_t msg; \
} tapif_user; \
uint8_t pvt_pad[4]; \
- } private; \
+ } pvt; \
uint8_t __pad[44]; \
union __name##_sring_entry ring[1]; /* variable-length */ \
}; \
@@ -156,7 +156,7 @@ typedef struct __name##_back_ring __name
#define SHARED_RING_INIT(_s) do { \
(_s)->req_prod = (_s)->rsp_prod = 0; \
(_s)->req_event = (_s)->rsp_event = 1; \
- (void)memset((_s)->private.pvt_pad, 0, sizeof((_s)->private.pvt_pad)); \
+ (void)memset((_s)->pvt.pvt_pad, 0, sizeof((_s)->pvt.pvt_pad)); \
(void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
} while(0)
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/io/usbif.h
--- a/include/xen/interface/io/usbif.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/io/usbif.h Fri Oct 16 16:24:14 2015 +0200
@@ -31,6 +31,76 @@
#include "ring.h"
#include "../grant_table.h"
+/*
+ * Feature and Parameter Negotiation
+ * =================================
+ * The two halves of a Xen pvUSB driver utilize nodes within the XenStore to
+ * communicate capabilities and to negotiate operating parameters. This
+ * section enumerates these nodes which reside in the respective front and
+ * backend portions of the XenStore, following the XenBus convention.
+ *
+ * Any specified default value is in effect if the corresponding XenBus node
+ * is not present in the XenStore.
+ *
+ * XenStore nodes in sections marked "PRIVATE" are solely for use by the
+ * driver side whose XenBus tree contains them.
+ *
+ *****************************************************************************
+ * Backend XenBus Nodes
+ *****************************************************************************
+ *
+ *------------------ Backend Device Identification (PRIVATE) ------------------
+ *
+ * num-ports
+ * Values: unsigned [1...31]
+ *
+ * Number of ports for this (virtual) USB host connector.
+ *
+ * usb-ver
+ * Values: unsigned [1...2]
+ *
+ * USB version of this host connector: 1 = USB 1.1, 2 = USB 2.0.
+ *
+ * port/[1...31]
+ * Values: string
+ *
+ * Physical USB device connected to the given port, e.g. "3-1.5".
+ *
+ *****************************************************************************
+ * Frontend XenBus Nodes
+ *****************************************************************************
+ *
+ *----------------------- Request Transport Parameters -----------------------
+ *
+ * event-channel
+ * Values: unsigned
+ *
+ * The identifier of the Xen event channel used to signal activity
+ * in the ring buffer.
+ *
+ * urb-ring-ref
+ * Values: unsigned
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * the sole page in a single page sized ring buffer. This is the ring
+ * buffer for urb requests.
+ *
+ * conn-ring-ref
+ * Values: unsigned
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * the sole page in a single page sized ring buffer. This is the ring
+ * buffer for connection/disconnection requests.
+ *
+ * protocol
+ * Values: string (XEN_IO_PROTO_ABI_*)
+ * Default Value: XEN_IO_PROTO_ABI_NATIVE
+ *
+ * The machine ABI rules governing the format of all ring request and
+ * response structures.
+ *
+ */
+
enum usb_spec_version {
USB_VER_UNKNOWN = 0,
USB_VER_USB11,
@@ -41,37 +111,65 @@ enum usb_spec_version {
/*
* USB pipe in usbif_request
*
- * bits 0-5 are specific bits for virtual USB driver.
- * bits 7-31 are standard urb pipe.
+ * - port number: bits 0-4
+ * (USB_MAXCHILDREN is 31)
*
- * - port number(NEW): bits 0-4
- * (USB_MAXCHILDREN is 31)
- *
- * - operation flag(NEW): bit 5
- * (0 = submit urb,
- * 1 = unlink urb)
+ * - operation flag: bit 5
+ * (0 = submit urb,
+ * 1 = unlink urb)
*
* - direction: bit 7
- * (0 = Host-to-Device [Out]
- * 1 = Device-to-Host [In])
+ * (0 = Host-to-Device [Out]
+ * 1 = Device-to-Host [In])
*
* - device address: bits 8-14
*
* - endpoint: bits 15-18
*
- * - pipe type: bits 30-31
- * (00 = isochronous, 01 = interrupt,
- * 10 = control, 11 = bulk)
+ * - pipe type: bits 30-31
+ * (00 = isochronous, 01 = interrupt,
+ * 10 = control, 11 = bulk)
*/
-#define usbif_pipeportnum(pipe) ((pipe) & 0x1f)
-#define usbif_setportnum_pipe(pipe, portnum) \
- ((pipe)|(portnum))
-#define usbif_pipeunlink(pipe) ((pipe) & 0x20)
-#define usbif_pipesubmit(pipe) (!usbif_pipeunlink(pipe))
-#define usbif_setunlink_pipe(pipe) ((pipe)|(0x20))
+#define USBIF_PIPE_PORT_MASK 0x0000001f
+#define USBIF_PIPE_UNLINK 0x00000020
+#define USBIF_PIPE_DIR 0x00000080
+#define USBIF_PIPE_DEV_MASK 0x0000007f
+#define USBIF_PIPE_DEV_SHIFT 8
+#define USBIF_PIPE_EP_MASK 0x0000000f
+#define USBIF_PIPE_EP_SHIFT 15
+#define USBIF_PIPE_TYPE_MASK 0x00000003
+#define USBIF_PIPE_TYPE_SHIFT 30
+#define USBIF_PIPE_TYPE_ISOC 0
+#define USBIF_PIPE_TYPE_INT 1
+#define USBIF_PIPE_TYPE_CTRL 2
+#define USBIF_PIPE_TYPE_BULK 3
+
+#define usbif_pipeportnum(pipe) ((pipe) &
USBIF_PIPE_PORT_MASK)
+#define usbif_setportnum_pipe(pipe, portnum) ((pipe) | (portnum))
+
+#define usbif_pipeunlink(pipe) ((pipe) & USBIF_PIPE_UNLINK)
+#define usbif_pipesubmit(pipe) (!usbif_pipeunlink(pipe))
+#define usbif_setunlink_pipe(pipe) ((pipe) | USBIF_PIPE_UNLINK)
+
+#define usbif_pipein(pipe) ((pipe) & USBIF_PIPE_DIR)
+#define usbif_pipeout(pipe) (!usbif_pipein(pipe))
+
+#define usbif_pipedevice(pipe) \
+ (((pipe) >> USBIF_PIPE_DEV_SHIFT) & USBIF_PIPE_DEV_MASK)
+
+#define usbif_pipeendpoint(pipe) \
+ (((pipe) >> USBIF_PIPE_EP_SHIFT) & USBIF_PIPE_EP_MASK)
+
+#define usbif_pipetype(pipe) \
+ (((pipe) >> USBIF_PIPE_TYPE_SHIFT) & USBIF_PIPE_TYPE_MASK)
+#define usbif_pipeisoc(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_ISOC)
+#define usbif_pipeint(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_INT)
+#define usbif_pipectrl(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_CTRL)
+#define usbif_pipebulk(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_BULK)
#define USBIF_MAX_SEGMENTS_PER_REQUEST (16)
+#define USBIF_MAX_PORTNR 31
/*
* RING for transferring urbs.
@@ -141,6 +239,10 @@ struct usbif_conn_response {
uint16_t id; /* request id */
uint8_t portnum; /* port number */
uint8_t speed; /* usb_device_speed */
+#define USBIF_SPEED_NONE 0
+#define USBIF_SPEED_LOW 1
+#define USBIF_SPEED_FULL 2
+#define USBIF_SPEED_HIGH 3
};
typedef struct usbif_conn_response usbif_conn_response_t;
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/mem_event.h
--- a/include/xen/interface/mem_event.h Fri Oct 16 15:57:20 2015 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,134 +0,0 @@
-/******************************************************************************
- * mem_event.h
- *
- * Memory event common structures.
- *
- * Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp)
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef _XEN_PUBLIC_MEM_EVENT_H
-#define _XEN_PUBLIC_MEM_EVENT_H
-
-#include "xen.h"
-#include "io/ring.h"
-
-/* Memory event flags */
-#define MEM_EVENT_FLAG_VCPU_PAUSED (1 << 0)
-#define MEM_EVENT_FLAG_DROP_PAGE (1 << 1)
-#define MEM_EVENT_FLAG_EVICT_FAIL (1 << 2)
-#define MEM_EVENT_FLAG_FOREIGN (1 << 3)
-#define MEM_EVENT_FLAG_DUMMY (1 << 4)
-/*
- * Emulate the fault-causing instruction (if set in the event response flags).
- * This will allow the guest to continue execution without lifting the page
- * access restrictions.
- */
-#define MEM_EVENT_FLAG_EMULATE (1 << 5)
-/*
- * Same as MEM_EVENT_FLAG_EMULATE, but with write operations or operations
- * potentially having side effects (like memory mapped or port I/O) disabled.
- */
-#define MEM_EVENT_FLAG_EMULATE_NOWRITE (1 << 6)
-
-/* Reasons for the memory event request */
-#define MEM_EVENT_REASON_UNKNOWN 0 /* typical reason */
-#define MEM_EVENT_REASON_VIOLATION 1 /* access violation, GFN is address
*/
-#define MEM_EVENT_REASON_CR0 2 /* CR0 was hit: gfn is new CR0
value, gla is previous */
-#define MEM_EVENT_REASON_CR3 3 /* CR3 was hit: gfn is new CR3
value, gla is previous */
-#define MEM_EVENT_REASON_CR4 4 /* CR4 was hit: gfn is new CR4
value, gla is previous */
-#define MEM_EVENT_REASON_INT3 5 /* int3 was hit: gla/gfn are RIP */
-#define MEM_EVENT_REASON_SINGLESTEP 6 /* single step was invoked: gla/gfn
are RIP */
-#define MEM_EVENT_REASON_MSR 7 /* MSR was hit: gfn is MSR value,
gla is MSR address;
- does NOT honour
HVMPME_onchangeonly */
-
-/* Using a custom struct (not hvm_hw_cpu) so as to not fill
- * the mem_event ring buffer too quickly. */
-struct mem_event_regs_x86 {
- uint64_t rax;
- uint64_t rcx;
- uint64_t rdx;
- uint64_t rbx;
- uint64_t rsp;
- uint64_t rbp;
- uint64_t rsi;
- uint64_t rdi;
- uint64_t r8;
- uint64_t r9;
- uint64_t r10;
- uint64_t r11;
- uint64_t r12;
- uint64_t r13;
- uint64_t r14;
- uint64_t r15;
- uint64_t rflags;
- uint64_t dr7;
- uint64_t rip;
- uint64_t cr0;
- uint64_t cr2;
- uint64_t cr3;
- uint64_t cr4;
- uint64_t sysenter_cs;
- uint64_t sysenter_esp;
- uint64_t sysenter_eip;
- uint64_t msr_efer;
- uint64_t msr_star;
- uint64_t msr_lstar;
- uint64_t fs_base;
- uint64_t gs_base;
- uint32_t cs_arbytes;
- uint32_t _pad;
-};
-
-typedef struct mem_event_st {
- uint32_t flags;
- uint32_t vcpu_id;
-
- uint64_t gfn;
- uint64_t offset;
- uint64_t gla; /* if gla_valid */
-
- uint32_t p2mt;
-
- uint16_t access_r:1;
- uint16_t access_w:1;
- uint16_t access_x:1;
- uint16_t gla_valid:1;
- uint16_t fault_with_gla:1;
- uint16_t fault_in_gpt:1;
- uint16_t available:10;
-
- uint16_t reason;
- struct mem_event_regs_x86 x86_regs;
-} mem_event_request_t, mem_event_response_t;
-
-DEFINE_RING_TYPES(mem_event, mem_event_request_t, mem_event_response_t);
-
-#endif
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/memory.h
--- a/include/xen/interface/memory.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/memory.h Fri Oct 16 16:24:14 2015 +0200
@@ -28,6 +28,7 @@
#define __XEN_PUBLIC_MEMORY_H__
#include "xen.h"
+#include "physdev.h"
/*
* Increase or decrease the specified domain's memory reservation. Returns the
@@ -55,6 +56,8 @@
/* Flag to request allocation only from the node specified */
#define XENMEMF_exact_node_request (1<<17)
#define XENMEMF_exact_node(n) (XENMEMF_node(n) | XENMEMF_exact_node_request)
+/* Flag to indicate the node specified is virtual node */
+#define XENMEMF_vnode (1<<18)
#endif
struct xen_memory_reservation {
@@ -372,23 +375,23 @@ typedef struct xen_pod_target xen_pod_ta
#define XENMEM_paging_op_evict 1
#define XENMEM_paging_op_prep 2
-struct xen_mem_event_op {
- uint8_t op; /* XENMEM_*_op_* */
+struct xen_mem_paging_op {
+ uint8_t op; /* XENMEM_paging_op_* */
domid_t domain;
-
/* PAGING_PREP IN: buffer to immediately fill page in */
uint64_aligned_t buffer;
/* Other OPs */
uint64_aligned_t gfn; /* IN: gfn of page being operated on */
};
-typedef struct xen_mem_event_op xen_mem_event_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_mem_event_op_t);
+typedef struct xen_mem_paging_op xen_mem_paging_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_paging_op_t);
#define XENMEM_access_op 21
-#define XENMEM_access_op_resume 0
-#define XENMEM_access_op_set_access 1
-#define XENMEM_access_op_get_access 2
+#define XENMEM_access_op_set_access 0
+#define XENMEM_access_op_get_access 1
+#define XENMEM_access_op_enable_emulate 2
+#define XENMEM_access_op_disable_emulate 3
typedef enum {
XENMEM_access_n,
@@ -439,12 +442,11 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_access_o
#define XENMEM_sharing_op_nominate_gfn 0
#define XENMEM_sharing_op_nominate_gref 1
#define XENMEM_sharing_op_share 2
-#define XENMEM_sharing_op_resume 3
-#define XENMEM_sharing_op_debug_gfn 4
-#define XENMEM_sharing_op_debug_mfn 5
-#define XENMEM_sharing_op_debug_gref 6
-#define XENMEM_sharing_op_add_physmap 7
-#define XENMEM_sharing_op_audit 8
+#define XENMEM_sharing_op_debug_gfn 3
+#define XENMEM_sharing_op_debug_mfn 4
+#define XENMEM_sharing_op_debug_gref 5
+#define XENMEM_sharing_op_add_physmap 6
+#define XENMEM_sharing_op_audit 7
#define XENMEM_SHARING_OP_S_HANDLE_INVALID (-10)
#define XENMEM_SHARING_OP_C_HANDLE_INVALID (-9)
@@ -521,6 +523,40 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_
* The zero value is appropiate.
*/
+/*
+ * With some legacy devices, certain guest-physical addresses cannot safely
+ * be used for other purposes, e.g. to map guest RAM. This hypercall
+ * enumerates those regions so the toolstack can avoid using them.
+ */
+#define XENMEM_reserved_device_memory_map 27
+struct xen_reserved_device_memory {
+ xen_pfn_t start_pfn;
+ xen_ulong_t nr_pages;
+};
+typedef struct xen_reserved_device_memory xen_reserved_device_memory_t;
+DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_t);
+
+struct xen_reserved_device_memory_map {
+#define XENMEM_RDM_ALL 1 /* Request all regions (ignore dev union). */
+ /* IN */
+ uint32_t flags;
+ /*
+ * IN/OUT
+ *
+ * Gets set to the required number of entries when too low,
+ * signaled by error code -ERANGE.
+ */
+ unsigned int nr_entries;
+ /* OUT */
+ XEN_GUEST_HANDLE(xen_reserved_device_memory_t) buffer;
+ /* IN */
+ union {
+ struct physdev_pci_device pci;
+ } dev;
+};
+typedef struct xen_reserved_device_memory_map xen_reserved_device_memory_map_t;
+DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_map_t);
+
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
/*
@@ -572,7 +608,7 @@ struct xen_vnuma_topology_info {
typedef struct xen_vnuma_topology_info xen_vnuma_topology_info_t;
DEFINE_XEN_GUEST_HANDLE(xen_vnuma_topology_info_t);
-/* Next available subop number is 27 */
+/* Next available subop number is 28 */
#endif /* __XEN_PUBLIC_MEMORY_H__ */
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/physdev.h
--- a/include/xen/interface/physdev.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/physdev.h Fri Oct 16 16:24:14 2015 +0200
@@ -16,6 +16,8 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2006, Keir Fraser
*/
#ifndef __XEN_PUBLIC_PHYSDEV_H__
@@ -293,6 +295,11 @@ struct physdev_pci_device_add {
uint8_t bus;
uint8_t devfn;
} physfn;
+ /*
+ * Optional parameters array.
+ * First element ([0]) is PXM domain associated with the device (if
+ * XEN_PCI_DEV_PXM is set)
+ */
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
uint32_t optarr[];
#elif defined(__GNUC__)
@@ -344,9 +351,11 @@ DEFINE_XEN_GUEST_HANDLE(physdev_dbgp_op_
*/
#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4
+#if __XEN_INTERFACE_VERSION__ < 0x00040600
/*
* These all-capitals physdev operation names are superceded by the new names
- * (defined above) since interface version 0x00030202.
+ * (defined above) since interface version 0x00030202. The guard above was
+ * added post-4.5 only though and hence shouldn't check for 0x00030202.
*/
#define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query
#define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl
@@ -357,6 +366,7 @@ DEFINE_XEN_GUEST_HANDLE(physdev_dbgp_op_
#define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector
#define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi
#define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared
+#endif
#if __XEN_INTERFACE_VERSION__ < 0x00040200
#define PHYSDEVOP_pirq_eoi_gmfn PHYSDEVOP_pirq_eoi_gmfn_v1
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/platform.h
--- a/include/xen/interface/platform.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/platform.h Fri Oct 16 16:24:14 2015 +0200
@@ -35,13 +35,28 @@
* Set clock such that it would read <secs,nsecs> after 00:00:00 UTC,
* 1 January, 1970 if the current system time was <system_time>.
*/
-#define XENPF_settime 17
-struct xenpf_settime {
+#define XENPF_settime32 17
+struct xenpf_settime32 {
/* IN variables. */
uint32_t secs;
uint32_t nsecs;
uint64_t system_time;
};
+#define XENPF_settime64 62
+struct xenpf_settime64 {
+ /* IN variables. */
+ uint64_t secs;
+ uint32_t nsecs;
+ uint32_t mbz;
+ uint64_t system_time;
+};
+#if __XEN_INTERFACE_VERSION__ < 0x00040600
+#define XENPF_settime XENPF_settime32
+#define xenpf_settime xenpf_settime32
+#else
+#define XENPF_settime XENPF_settime64
+#define xenpf_settime xenpf_settime64
+#endif
typedef struct xenpf_settime xenpf_settime_t;
DEFINE_XEN_GUEST_HANDLE(xenpf_settime_t);
@@ -126,6 +141,26 @@ DEFINE_XEN_GUEST_HANDLE(xenpf_platform_q
#define XEN_EFI_query_variable_info 9
#define XEN_EFI_query_capsule_capabilities 10
#define XEN_EFI_update_capsule 11
+
+struct xenpf_efi_time {
+ uint16_t year;
+ uint8_t month;
+ uint8_t day;
+ uint8_t hour;
+ uint8_t min;
+ uint8_t sec;
+ uint32_t ns;
+ int16_t tz;
+ uint8_t daylight;
+};
+
+struct xenpf_efi_guid {
+ uint32_t data1;
+ uint16_t data2;
+ uint16_t data3;
+ uint8_t data4[8];
+};
+
struct xenpf_efi_runtime_call {
uint32_t function;
/*
@@ -138,17 +173,7 @@ struct xenpf_efi_runtime_call {
union {
#define XEN_EFI_GET_TIME_SET_CLEARS_NS 0x00000001
struct {
- struct xenpf_efi_time {
- uint16_t year;
- uint8_t month;
- uint8_t day;
- uint8_t hour;
- uint8_t min;
- uint8_t sec;
- uint32_t ns;
- int16_t tz;
- uint8_t daylight;
- } time;
+ struct xenpf_efi_time time;
uint32_t resolution;
uint32_t accuracy;
} get_time;
@@ -170,12 +195,7 @@ struct xenpf_efi_runtime_call {
XEN_GUEST_HANDLE(void) name; /* UCS-2/UTF-16 string */
xen_ulong_t size;
XEN_GUEST_HANDLE(void) data;
- struct xenpf_efi_guid {
- uint32_t data1;
- uint16_t data2;
- uint16_t data3;
- uint8_t data4[8];
- } vendor_guid;
+ struct xenpf_efi_guid vendor_guid;
} get_variable, set_variable;
struct {
@@ -540,6 +560,16 @@ DEFINE_XEN_GUEST_HANDLE(xenpf_core_parki
#define XEN_RESOURCE_OP_MSR_READ 0
#define XEN_RESOURCE_OP_MSR_WRITE 1
+/*
+ * Specially handled MSRs:
+ * - MSR_IA32_TSC
+ * READ: Returns the scaled system time(ns) instead of raw timestamp. In
+ * multiple entry case, if other MSR read is followed by a MSR_IA32_TSC
+ * read, then both reads are guaranteed to be performed atomically (with
+ * IRQ disabled). The return time indicates the point of reading that
MSR.
+ * WRITE: Not supported.
+ */
+
struct xenpf_resource_entry {
union {
uint32_t cmd; /* IN: XEN_RESOURCE_OP_* */
@@ -560,6 +590,24 @@ struct xenpf_resource_op {
typedef struct xenpf_resource_op xenpf_resource_op_t;
DEFINE_XEN_GUEST_HANDLE(xenpf_resource_op_t);
+#define XENPF_get_symbol 63
+struct xenpf_symdata {
+ /* IN/OUT variables */
+ uint32_t namelen; /* IN: size of name buffer */
+ /* OUT: strlen(name) of hypervisor symbol (may be */
+ /* larger than what's been copied to guest) */
+ uint32_t symnum; /* IN: Symbol to read */
+ /* OUT: Next available symbol. If same as IN then */
+ /* we reached the end */
+
+ /* OUT variables */
+ XEN_GUEST_HANDLE(char) name;
+ uint64_t address;
+ char type;
+};
+typedef struct xenpf_symdata xenpf_symdata_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_symdata_t);
+
/*
* ` enum neg_errnoval
* ` HYPERVISOR_platform_op(const struct xen_platform_op*);
@@ -569,6 +617,8 @@ struct xen_platform_op {
uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
union {
struct xenpf_settime settime;
+ struct xenpf_settime32 settime32;
+ struct xenpf_settime64 settime64;
struct xenpf_add_memtype add_memtype;
struct xenpf_del_memtype del_memtype;
struct xenpf_read_memtype read_memtype;
@@ -587,6 +637,7 @@ struct xen_platform_op {
struct xenpf_mem_hotadd mem_add;
struct xenpf_core_parking core_parking;
struct xenpf_resource_op resource_op;
+ struct xenpf_symdata symdata;
uint8_t pad[128];
} u;
};
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/pmu.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/include/xen/interface/pmu.h Fri Oct 16 16:24:14 2015 +0200
@@ -0,0 +1,133 @@
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2015 Oracle and/or its affiliates. All rights reserved.
+ */
+
+#ifndef __XEN_PUBLIC_PMU_H__
+#define __XEN_PUBLIC_PMU_H__
+
+#include "xen.h"
+#if defined(__i386__) || defined(__x86_64__)
+#include "arch-x86/pmu.h"
+#elif defined (__arm__) || defined (__aarch64__)
+#include "arch-arm.h"
+#else
+#error "Unsupported architecture"
+#endif
+
+#define XENPMU_VER_MAJ 0
+#define XENPMU_VER_MIN 1
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_xenpmu_op(enum xenpmu_op cmd, struct xenpmu_params *args);
+ *
+ * @cmd == XENPMU_* (PMU operation)
+ * @args == struct xenpmu_params
+ */
+/* ` enum xenpmu_op { */
+#define XENPMU_mode_get 0 /* Also used for getting PMU version */
+#define XENPMU_mode_set 1
+#define XENPMU_feature_get 2
+#define XENPMU_feature_set 3
+#define XENPMU_init 4
+#define XENPMU_finish 5
+#define XENPMU_lvtpc_set 6
+#define XENPMU_flush 7 /* Write cached MSR values to HW */
+/* ` } */
+
+/* Parameters structure for HYPERVISOR_xenpmu_op call */
+struct xen_pmu_params {
+ /* IN/OUT parameters */
+ struct {
+ uint32_t maj;
+ uint32_t min;
+ } version;
+ uint64_t val;
+
+ /* IN parameters */
+ uint32_t vcpu;
+ uint32_t pad;
+};
+typedef struct xen_pmu_params xen_pmu_params_t;
+DEFINE_XEN_GUEST_HANDLE(xen_pmu_params_t);
+
+/* PMU modes:
+ * - XENPMU_MODE_OFF: No PMU virtualization
+ * - XENPMU_MODE_SELF: Guests can profile themselves
+ * - XENPMU_MODE_HV: Guests can profile themselves, dom0 profiles
+ * itself and Xen
+ * - XENPMU_MODE_ALL: Only dom0 has access to VPMU and it profiles
+ * everyone: itself, the hypervisor and the guests.
+ */
+#define XENPMU_MODE_OFF 0
+#define XENPMU_MODE_SELF (1<<0)
+#define XENPMU_MODE_HV (1<<1)
+#define XENPMU_MODE_ALL (1<<2)
+
+/*
+ * PMU features:
+ * - XENPMU_FEATURE_INTEL_BTS: Intel BTS support (ignored on AMD)
+ */
+#define XENPMU_FEATURE_INTEL_BTS 1
+
+/*
+ * Shared PMU data between hypervisor and PV(H) domains.
+ *
+ * The hypervisor fills out this structure during PMU interrupt and sends an
+ * interrupt to appropriate VCPU.
+ * Architecture-independent fields of xen_pmu_data are WO for the hypervisor
+ * and RO for the guest but some fields in xen_pmu_arch can be writable
+ * by both the hypervisor and the guest (see arch-$arch/pmu.h).
+ */
+struct xen_pmu_data {
+ /* Interrupted VCPU */
+ uint32_t vcpu_id;
+
+ /*
+ * Physical processor on which the interrupt occurred. On non-privileged
+ * guests set to vcpu_id;
+ */
+ uint32_t pcpu_id;
+
+ /*
+ * Domain that was interrupted. On non-privileged guests set to DOMID_SELF.
+ * On privileged guests can be DOMID_SELF, DOMID_XEN, or, when in
+ * XENPMU_MODE_ALL mode, domain ID of another domain.
+ */
+ domid_t domain_id;
+
+ uint8_t pad[6];
+
+ /* Architecture-specific information */
+ struct xen_pmu_arch pmu;
+};
+
+#endif /* __XEN_PUBLIC_PMU_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/sysctl.h
--- a/include/xen/interface/sysctl.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/sysctl.h Fri Oct 16 16:24:14 2015 +0200
@@ -33,8 +33,10 @@
#include "xen.h"
#include "domctl.h"
+#include "physdev.h"
+#include "tmem.h"
-#define XEN_SYSCTL_INTERFACE_VERSION 0x0000000B
+#define XEN_SYSCTL_INTERFACE_VERSION 0x0000000C
/*
* Read console content from Xen buffer ring.
@@ -462,61 +464,76 @@ struct xen_sysctl_lockprof_op {
typedef struct xen_sysctl_lockprof_op xen_sysctl_lockprof_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_op_t);
-/* XEN_SYSCTL_topologyinfo */
-#define INVALID_TOPOLOGY_ID (~0U)
-struct xen_sysctl_topologyinfo {
- /*
- * IN: maximum addressable entry in the caller-provided arrays.
- * OUT: largest cpu identifier in the system.
- * If OUT is greater than IN then the arrays are truncated!
- * If OUT is leass than IN then the array tails are not written by sysctl.
- */
- uint32_t max_cpu_index;
+/* XEN_SYSCTL_cputopoinfo */
+#define XEN_INVALID_CORE_ID (~0U)
+#define XEN_INVALID_SOCKET_ID (~0U)
+#define XEN_INVALID_NODE_ID (~0U)
+
+struct xen_sysctl_cputopo {
+ uint32_t core;
+ uint32_t socket;
+ uint32_t node;
+};
+typedef struct xen_sysctl_cputopo xen_sysctl_cputopo_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cputopo_t);
+
+/*
+ * IN:
+ * - a NULL 'cputopo' handle is a request for maximun 'num_cpus'.
+ * - otherwise it's the number of entries in 'cputopo'
+ *
+ * OUT:
+ * - If 'num_cpus' is less than the number Xen wants to write but the handle
+ * handle is not a NULL one, partial data gets returned and 'num_cpus' gets
+ * updated to reflect the intended number.
+ * - Otherwise, 'num_cpus' shall indicate the number of entries written, which
+ * may be less than the input value.
+ */
+struct xen_sysctl_cputopoinfo {
+ uint32_t num_cpus;
+ XEN_GUEST_HANDLE_64(xen_sysctl_cputopo_t) cputopo;
+};
+typedef struct xen_sysctl_cputopoinfo xen_sysctl_cputopoinfo_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cputopoinfo_t);
+
+/* XEN_SYSCTL_numainfo */
+#define XEN_INVALID_MEM_SZ (~0U)
+#define XEN_INVALID_NODE_DIST (~0U)
+
+struct xen_sysctl_meminfo {
+ uint64_t memsize;
+ uint64_t memfree;
+};
+typedef struct xen_sysctl_meminfo xen_sysctl_meminfo_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_meminfo_t);
+
+/*
+ * IN:
+ * - Both 'meminfo' and 'distance' handles being null is a request
+ * for maximum value of 'num_nodes'.
+ * - Otherwise it's the number of entries in 'meminfo' and square root
+ * of number of entries in 'distance' (when corresponding handle is
+ * non-null)
+ *
+ * OUT:
+ * - If 'num_nodes' is less than the number Xen wants to write but either
+ * handle is not a NULL one, partial data gets returned and 'num_nodes'
+ * gets updated to reflect the intended number.
+ * - Otherwise, 'num_nodes' shall indicate the number of entries written,
which
+ * may be less than the input value.
+ */
+
+struct xen_sysctl_numainfo {
+ uint32_t num_nodes;
+
+ XEN_GUEST_HANDLE_64(xen_sysctl_meminfo_t) meminfo;
/*
- * If not NULL, these arrays are filled with core/socket/node identifier
- * for each cpu.
- * If a cpu has no core/socket/node information (e.g., cpu not present)
- * then the sentinel value ~0u is written to each array.
- * The number of array elements written by the sysctl is:
- * min(@max_cpu_index_IN,@max_cpu_index_OUT)+1
+ * Distance between nodes 'i' and 'j' is stored in index 'i*N + j',
+ * where N is the number of nodes that will be returned in 'num_nodes'
+ * (i.e. not 'num_nodes' provided by the caller)
*/
- XEN_GUEST_HANDLE_64(uint32) cpu_to_core;
- XEN_GUEST_HANDLE_64(uint32) cpu_to_socket;
- XEN_GUEST_HANDLE_64(uint32) cpu_to_node;
-};
-typedef struct xen_sysctl_topologyinfo xen_sysctl_topologyinfo_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_topologyinfo_t);
-
-/* XEN_SYSCTL_numainfo */
-#define INVALID_NUMAINFO_ID (~0U)
-struct xen_sysctl_numainfo {
- /*
- * IN: maximum addressable entry in the caller-provided arrays.
- * OUT: largest node identifier in the system.
- * If OUT is greater than IN then the arrays are truncated!
- */
- uint32_t max_node_index;
-
- /* NB. Entries are 0 if node is not present. */
- XEN_GUEST_HANDLE_64(uint64) node_to_memsize;
- XEN_GUEST_HANDLE_64(uint64) node_to_memfree;
-
- /*
- * Array, of size (max_node_index+1)^2, listing memory access distances
- * between nodes. If an entry has no node distance information (e.g., node
- * not present) then the value ~0u is written.
- *
- * Note that the array rows must be indexed by multiplying by the minimum
- * of the caller-provided max_node_index and the returned value of
- * max_node_index. That is, if the largest node index in the system is
- * smaller than the caller can handle, a smaller 2-d array is constructed
- * within the space provided by the caller. When this occurs, trailing
- * space provided by the caller is not modified. If the largest node index
- * in the system is larger than the caller can handle, then a 2-d array of
- * the maximum size handleable by the caller is constructed.
- */
- XEN_GUEST_HANDLE_64(uint32) node_to_node_distance;
+ XEN_GUEST_HANDLE_64(uint32) distance;
};
typedef struct xen_sysctl_numainfo xen_sysctl_numainfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_numainfo_t);
@@ -641,6 +658,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cover
/* The L3 cache size is returned in KB unit */
#define XEN_SYSCTL_PSR_CMT_get_l3_cache_size 2
#define XEN_SYSCTL_PSR_CMT_enabled 3
+#define XEN_SYSCTL_PSR_CMT_get_l3_event_mask 4
struct xen_sysctl_psr_cmt_op {
uint32_t cmd; /* IN: XEN_SYSCTL_PSR_CMT_* */
uint32_t flags; /* padding variable, may be extended for future use */
@@ -655,6 +673,97 @@ struct xen_sysctl_psr_cmt_op {
typedef struct xen_sysctl_psr_cmt_op xen_sysctl_psr_cmt_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cmt_op_t);
+/* XEN_SYSCTL_pcitopoinfo */
+#define XEN_INVALID_DEV (XEN_INVALID_NODE_ID - 1)
+struct xen_sysctl_pcitopoinfo {
+ /*
+ * IN: Number of elements in 'pcitopo' and 'nodes' arrays.
+ * OUT: Number of processed elements of those arrays.
+ */
+ uint32_t num_devs;
+
+ /* IN: list of devices for which node IDs are requested. */
+ XEN_GUEST_HANDLE_64(physdev_pci_device_t) devs;
+
+ /*
+ * OUT: node identifier for each device.
+ * If information for a particular device is not available then
+ * corresponding entry will be set to XEN_INVALID_NODE_ID. If
+ * device is not known to the hypervisor then XEN_INVALID_DEV
+ * will be provided.
+ */
+ XEN_GUEST_HANDLE_64(uint32) nodes;
+};
+typedef struct xen_sysctl_pcitopoinfo xen_sysctl_pcitopoinfo_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_pcitopoinfo_t);
+
+#define XEN_SYSCTL_PSR_CAT_get_l3_info 0
+struct xen_sysctl_psr_cat_op {
+ uint32_t cmd; /* IN: XEN_SYSCTL_PSR_CAT_* */
+ uint32_t target; /* IN */
+ union {
+ struct {
+ uint32_t cbm_len; /* OUT: CBM length */
+ uint32_t cos_max; /* OUT: Maximum COS */
+ } l3_info;
+ } u;
+};
+typedef struct xen_sysctl_psr_cat_op xen_sysctl_psr_cat_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
+
+#define XEN_SYSCTL_TMEM_OP_ALL_CLIENTS 0xFFFFU
+
+#define XEN_SYSCTL_TMEM_OP_THAW 0
+#define XEN_SYSCTL_TMEM_OP_FREEZE 1
+#define XEN_SYSCTL_TMEM_OP_FLUSH 2
+#define XEN_SYSCTL_TMEM_OP_DESTROY 3
+#define XEN_SYSCTL_TMEM_OP_LIST 4
+#define XEN_SYSCTL_TMEM_OP_SET_WEIGHT 5
+#define XEN_SYSCTL_TMEM_OP_SET_CAP 6
+#define XEN_SYSCTL_TMEM_OP_SET_COMPRESS 7
+#define XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB 8
+#define XEN_SYSCTL_TMEM_OP_SAVE_BEGIN 10
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION 11
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS 12
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT 13
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_CAP 14
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS 15
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS 16
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_NPAGES 17
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_UUID 18
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_PAGE 19
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_INV 20
+#define XEN_SYSCTL_TMEM_OP_SAVE_END 21
+#define XEN_SYSCTL_TMEM_OP_RESTORE_BEGIN 30
+#define XEN_SYSCTL_TMEM_OP_RESTORE_PUT_PAGE 32
+#define XEN_SYSCTL_TMEM_OP_RESTORE_FLUSH_PAGE 33
+
+/*
+ * XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_[PAGE|INV] override the 'buf' in
+ * xen_sysctl_tmem_op with this structure - sometimes with an extra
+ * page tackled on.
+ */
+struct tmem_handle {
+ uint32_t pool_id;
+ uint32_t index;
+ xen_tmem_oid_t oid;
+};
+
+struct xen_sysctl_tmem_op {
+ uint32_t cmd; /* IN: XEN_SYSCTL_TMEM_OP_* . */
+ int32_t pool_id; /* IN: 0 by default unless _SAVE_*, RESTORE_* .*/
+ uint32_t cli_id; /* IN: client id, 0 for
XEN_SYSCTL_TMEM_QUERY_FREEABLE_MB
+ for all others can be the domain id or
+ XEN_SYSCTL_TMEM_OP_ALL_CLIENTS for all. */
+ uint32_t arg1; /* IN: If not applicable to command use 0. */
+ uint32_t arg2; /* IN: If not applicable to command use 0. */
+ uint32_t pad; /* Padding so structure is the same under 32 and 64. */
+ xen_tmem_oid_t oid; /* IN: If not applicable to command use 0s. */
+ XEN_GUEST_HANDLE_64(char) buf; /* IN/OUT: Buffer to save and restore ops.
*/
+};
+typedef struct xen_sysctl_tmem_op xen_sysctl_tmem_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tmem_op_t);
+
struct xen_sysctl {
uint32_t cmd;
#define XEN_SYSCTL_readconsole 1
@@ -671,18 +780,22 @@ struct xen_sysctl {
#define XEN_SYSCTL_pm_op 12
#define XEN_SYSCTL_page_offline_op 14
#define XEN_SYSCTL_lockprof_op 15
-#define XEN_SYSCTL_topologyinfo 16
+#define XEN_SYSCTL_cputopoinfo 16
#define XEN_SYSCTL_numainfo 17
#define XEN_SYSCTL_cpupool_op 18
#define XEN_SYSCTL_scheduler_op 19
#define XEN_SYSCTL_coverage_op 20
#define XEN_SYSCTL_psr_cmt_op 21
+#define XEN_SYSCTL_pcitopoinfo 22
+#define XEN_SYSCTL_psr_cat_op 23
+#define XEN_SYSCTL_tmem_op 24
uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
union {
struct xen_sysctl_readconsole readconsole;
struct xen_sysctl_tbuf_op tbuf_op;
struct xen_sysctl_physinfo physinfo;
- struct xen_sysctl_topologyinfo topologyinfo;
+ struct xen_sysctl_cputopoinfo cputopoinfo;
+ struct xen_sysctl_pcitopoinfo pcitopoinfo;
struct xen_sysctl_numainfo numainfo;
struct xen_sysctl_sched_id sched_id;
struct xen_sysctl_perfc_op perfc_op;
@@ -699,6 +812,8 @@ struct xen_sysctl {
struct xen_sysctl_scheduler_op scheduler_op;
struct xen_sysctl_coverage_op coverage_op;
struct xen_sysctl_psr_cmt_op psr_cmt_op;
+ struct xen_sysctl_psr_cat_op psr_cat_op;
+ struct xen_sysctl_tmem_op tmem_op;
uint8_t pad[128];
} u;
};
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/tmem.h
--- a/include/xen/interface/tmem.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/tmem.h Fri Oct 16 16:24:14 2015 +0200
@@ -33,7 +33,11 @@
#define TMEM_SPEC_VERSION 1
/* Commands to HYPERVISOR_tmem_op() */
-#define TMEM_CONTROL 0
+#ifdef __XEN__
+#define TMEM_CONTROL 0 /* Now called XEN_SYSCTL_tmem_op */
+#else
+#undef TMEM_CONTROL
+#endif
#define TMEM_NEW_POOL 1
#define TMEM_DESTROY_POOL 2
#define TMEM_PUT_PAGE 4
@@ -48,35 +52,9 @@
#endif
/* Privileged commands to HYPERVISOR_tmem_op() */
-#define TMEM_AUTH 101
+#define TMEM_AUTH 101
#define TMEM_RESTORE_NEW 102
-/* Subops for HYPERVISOR_tmem_op(TMEM_CONTROL) */
-#define TMEMC_THAW 0
-#define TMEMC_FREEZE 1
-#define TMEMC_FLUSH 2
-#define TMEMC_DESTROY 3
-#define TMEMC_LIST 4
-#define TMEMC_SET_WEIGHT 5
-#define TMEMC_SET_CAP 6
-#define TMEMC_SET_COMPRESS 7
-#define TMEMC_QUERY_FREEABLE_MB 8
-#define TMEMC_SAVE_BEGIN 10
-#define TMEMC_SAVE_GET_VERSION 11
-#define TMEMC_SAVE_GET_MAXPOOLS 12
-#define TMEMC_SAVE_GET_CLIENT_WEIGHT 13
-#define TMEMC_SAVE_GET_CLIENT_CAP 14
-#define TMEMC_SAVE_GET_CLIENT_FLAGS 15
-#define TMEMC_SAVE_GET_POOL_FLAGS 16
-#define TMEMC_SAVE_GET_POOL_NPAGES 17
-#define TMEMC_SAVE_GET_POOL_UUID 18
-#define TMEMC_SAVE_GET_NEXT_PAGE 19
-#define TMEMC_SAVE_GET_NEXT_INV 20
-#define TMEMC_SAVE_END 21
-#define TMEMC_RESTORE_BEGIN 30
-#define TMEMC_RESTORE_PUT_PAGE 32
-#define TMEMC_RESTORE_FLUSH_PAGE 33
-
/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
#define TMEM_POOL_PERSIST 1
#define TMEM_POOL_SHARED 2
@@ -95,6 +73,11 @@
#define EFROZEN 1000
#define EEMPTY 1001
+struct xen_tmem_oid {
+ uint64_t oid[3];
+};
+typedef struct xen_tmem_oid xen_tmem_oid_t;
+DEFINE_XEN_GUEST_HANDLE(xen_tmem_oid_t);
#ifndef __ASSEMBLY__
#if __XEN_INTERFACE_VERSION__ < 0x00040400
@@ -110,17 +93,12 @@ struct tmem_op {
uint32_t flags;
uint32_t arg1;
} creat; /* for cmd == TMEM_NEW_POOL, TMEM_AUTH, TMEM_RESTORE_NEW */
- struct {
- uint32_t subop;
- uint32_t cli_id;
- uint32_t arg1;
- uint32_t arg2;
+ struct {
+#if __XEN_INTERFACE_VERSION__ < 0x00040600
uint64_t oid[3];
- tmem_cli_va_t buf;
- } ctrl; /* for cmd == TMEM_CONTROL */
- struct {
-
- uint64_t oid[3];
+#else
+ xen_tmem_oid_t oid;
+#endif
uint32_t index;
uint32_t tmem_offset;
uint32_t pfn_offset;
@@ -131,12 +109,6 @@ struct tmem_op {
};
typedef struct tmem_op tmem_op_t;
DEFINE_XEN_GUEST_HANDLE(tmem_op_t);
-
-struct tmem_handle {
- uint32_t pool_id;
- uint32_t index;
- uint64_t oid[3];
-};
#endif
#endif /* __XEN_PUBLIC_TMEM_H__ */
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/trace.h
--- a/include/xen/interface/trace.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/trace.h Fri Oct 16 16:24:14 2015 +0200
@@ -75,7 +75,7 @@
/* Per-scheduler IDs, to identify scheduler specific events */
#define TRC_SCHED_CSCHED 0
#define TRC_SCHED_CSCHED2 1
-#define TRC_SCHED_SEDF 2
+/* #define XEN_SCHEDULER_SEDF 2 (Removed) */
#define TRC_SCHED_ARINC653 3
#define TRC_SCHED_RTDS 4
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/vcpu.h
--- a/include/xen/interface/vcpu.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/vcpu.h Fri Oct 16 16:24:14 2015 +0200
@@ -31,7 +31,7 @@
/*
* Prototype for this hypercall is:
- * int vcpu_op(int cmd, int vcpuid, void *extra_args)
+ * long vcpu_op(int cmd, unsigned int vcpuid, void *extra_args)
* @cmd == VCPUOP_??? (VCPU operation).
* @vcpuid == VCPU to operate on.
* @extra_args == Operation-specific extra arguments (NULL if none).
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/vm_event.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/include/xen/interface/vm_event.h Fri Oct 16 16:24:14 2015 +0200
@@ -0,0 +1,269 @@
+/******************************************************************************
+ * vm_event.h
+ *
+ * Memory event common structures.
+ *
+ * Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp)
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _XEN_PUBLIC_VM_EVENT_H
+#define _XEN_PUBLIC_VM_EVENT_H
+
+#include "xen.h"
+
+#define VM_EVENT_INTERFACE_VERSION 0x00000001
+
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+
+#include "io/ring.h"
+
+/*
+ * Memory event flags
+ */
+
+/*
+ * VCPU_PAUSED in a request signals that the vCPU triggering the event has been
+ * paused
+ * VCPU_PAUSED in a response signals to unpause the vCPU
+ */
+#define VM_EVENT_FLAG_VCPU_PAUSED (1 << 0)
+/* Flags to aid debugging vm_event */
+#define VM_EVENT_FLAG_FOREIGN (1 << 1)
+/*
+ * The following flags can be set in response to a mem_access event.
+ *
+ * Emulate the fault-causing instruction (if set in the event response flags).
+ * This will allow the guest to continue execution without lifting the page
+ * access restrictions.
+ */
+#define VM_EVENT_FLAG_EMULATE (1 << 2)
+/*
+ * Same as VM_EVENT_FLAG_EMULATE, but with write operations or operations
+ * potentially having side effects (like memory mapped or port I/O) disabled.
+ */
+#define VM_EVENT_FLAG_EMULATE_NOWRITE (1 << 3)
+/*
+ * Toggle singlestepping on vm_event response.
+ * Requires the vCPU to be paused already (synchronous events only).
+ */
+#define VM_EVENT_FLAG_TOGGLE_SINGLESTEP (1 << 4)
+/*
+ * Data is being sent back to the hypervisor in the event response, to be
+ * returned by the read function when emulating an instruction.
+ * This flag is only useful when combined with VM_EVENT_FLAG_EMULATE
+ * and takes precedence if combined with VM_EVENT_FLAG_EMULATE_NOWRITE
+ * (i.e. if both VM_EVENT_FLAG_EMULATE_NOWRITE and
+ * VM_EVENT_FLAG_SET_EMUL_READ_DATA are set, only the latter will be honored).
+ */
+#define VM_EVENT_FLAG_SET_EMUL_READ_DATA (1 << 5)
+ /*
+ * Deny completion of the operation that triggered the event.
+ * Currently only useful for MSR, CR0, CR3 and CR4 write events.
+ */
+#define VM_EVENT_FLAG_DENY (1 << 6)
+/*
+ * This flag can be set in a request or a response
+ *
+ * On a request, indicates that the event occurred in the alternate p2m
specified by
+ * the altp2m_idx request field.
+ *
+ * On a response, indicates that the VCPU should resume in the alternate p2m
specified
+ * by the altp2m_idx response field if possible.
+ */
+#define VM_EVENT_FLAG_ALTERNATE_P2M (1 << 7)
+
+/*
+ * Reasons for the vm event request
+ */
+
+/* Default case */
+#define VM_EVENT_REASON_UNKNOWN 0
+/* Memory access violation */
+#define VM_EVENT_REASON_MEM_ACCESS 1
+/* Memory sharing event */
+#define VM_EVENT_REASON_MEM_SHARING 2
+/* Memory paging event */
+#define VM_EVENT_REASON_MEM_PAGING 3
+/* A control register was updated */
+#define VM_EVENT_REASON_WRITE_CTRLREG 4
+/* An MSR was updated. */
+#define VM_EVENT_REASON_MOV_TO_MSR 5
+/* Debug operation executed (e.g. int3) */
+#define VM_EVENT_REASON_SOFTWARE_BREAKPOINT 6
+/* Single-step (e.g. MTF) */
+#define VM_EVENT_REASON_SINGLESTEP 7
+/* An event has been requested via HVMOP_guest_request_vm_event. */
+#define VM_EVENT_REASON_GUEST_REQUEST 8
+
+/* Supported values for the vm_event_write_ctrlreg index. */
+#define VM_EVENT_X86_CR0 0
+#define VM_EVENT_X86_CR3 1
+#define VM_EVENT_X86_CR4 2
+#define VM_EVENT_X86_XCR0 3
+
+/*
+ * Using a custom struct (not hvm_hw_cpu) so as to not fill
+ * the vm_event ring buffer too quickly.
+ */
+struct vm_event_regs_x86 {
+ uint64_t rax;
+ uint64_t rcx;
+ uint64_t rdx;
+ uint64_t rbx;
+ uint64_t rsp;
+ uint64_t rbp;
+ uint64_t rsi;
+ uint64_t rdi;
+ uint64_t r8;
+ uint64_t r9;
+ uint64_t r10;
+ uint64_t r11;
+ uint64_t r12;
+ uint64_t r13;
+ uint64_t r14;
+ uint64_t r15;
+ uint64_t rflags;
+ uint64_t dr7;
+ uint64_t rip;
+ uint64_t cr0;
+ uint64_t cr2;
+ uint64_t cr3;
+ uint64_t cr4;
+ uint64_t sysenter_cs;
+ uint64_t sysenter_esp;
+ uint64_t sysenter_eip;
+ uint64_t msr_efer;
+ uint64_t msr_star;
+ uint64_t msr_lstar;
+ uint64_t fs_base;
+ uint64_t gs_base;
+ uint32_t cs_arbytes;
+ uint32_t _pad;
+};
+
+/*
+ * mem_access flag definitions
+ *
+ * These flags are set only as part of a mem_event request.
+ *
+ * R/W/X: Defines the type of violation that has triggered the event
+ * Multiple types can be set in a single violation!
+ * GLA_VALID: If the gla field holds a guest VA associated with the event
+ * FAULT_WITH_GLA: If the violation was triggered by accessing gla
+ * FAULT_IN_GPT: If the violation was triggered during translating gla
+ */
+#define MEM_ACCESS_R (1 << 0)
+#define MEM_ACCESS_W (1 << 1)
+#define MEM_ACCESS_X (1 << 2)
+#define MEM_ACCESS_RWX (MEM_ACCESS_R | MEM_ACCESS_W |
MEM_ACCESS_X)
+#define MEM_ACCESS_RW (MEM_ACCESS_R | MEM_ACCESS_W)
+#define MEM_ACCESS_RX (MEM_ACCESS_R | MEM_ACCESS_X)
+#define MEM_ACCESS_WX (MEM_ACCESS_W | MEM_ACCESS_X)
+#define MEM_ACCESS_GLA_VALID (1 << 3)
+#define MEM_ACCESS_FAULT_WITH_GLA (1 << 4)
+#define MEM_ACCESS_FAULT_IN_GPT (1 << 5)
+
+struct vm_event_mem_access {
+ uint64_t gfn;
+ uint64_t offset;
+ uint64_t gla; /* if flags has MEM_ACCESS_GLA_VALID set */
+ uint32_t flags; /* MEM_ACCESS_* */
+ uint32_t _pad;
+};
+
+struct vm_event_write_ctrlreg {
+ uint32_t index;
+ uint32_t _pad;
+ uint64_t new_value;
+ uint64_t old_value;
+};
+
+struct vm_event_debug {
+ uint64_t gfn;
+};
+
+struct vm_event_mov_to_msr {
+ uint64_t msr;
+ uint64_t value;
+};
+
+#define MEM_PAGING_DROP_PAGE (1 << 0)
+#define MEM_PAGING_EVICT_FAIL (1 << 1)
+
+struct vm_event_paging {
+ uint64_t gfn;
+ uint32_t p2mt;
+ uint32_t flags;
+};
+
+struct vm_event_sharing {
+ uint64_t gfn;
+ uint32_t p2mt;
+ uint32_t _pad;
+};
+
+struct vm_event_emul_read_data {
+ uint32_t size;
+ /* The struct is used in a union with vm_event_regs_x86. */
+ uint8_t data[sizeof(struct vm_event_regs_x86) - sizeof(uint32_t)];
+};
+
+typedef struct vm_event_st {
+ uint32_t version; /* VM_EVENT_INTERFACE_VERSION */
+ uint32_t flags; /* VM_EVENT_FLAG_* */
+ uint32_t reason; /* VM_EVENT_REASON_* */
+ uint32_t vcpu_id;
+ uint16_t altp2m_idx; /* may be used during request and response */
+ uint16_t _pad[3];
+
+ union {
+ struct vm_event_paging mem_paging;
+ struct vm_event_sharing mem_sharing;
+ struct vm_event_mem_access mem_access;
+ struct vm_event_write_ctrlreg write_ctrlreg;
+ struct vm_event_mov_to_msr mov_to_msr;
+ struct vm_event_debug software_breakpoint;
+ struct vm_event_debug singlestep;
+ } u;
+
+ union {
+ union {
+ struct vm_event_regs_x86 x86;
+ } regs;
+
+ struct vm_event_emul_read_data emul_read_data;
+ } data;
+} vm_event_request_t, vm_event_response_t;
+
+DEFINE_RING_TYPES(vm_event, vm_event_request_t, vm_event_response_t);
+
+#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+#endif /* _XEN_PUBLIC_VM_EVENT_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/xen-compat.h
--- a/include/xen/interface/xen-compat.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/xen-compat.h Fri Oct 16 16:24:14 2015 +0200
@@ -27,7 +27,7 @@
#ifndef __XEN_PUBLIC_XEN_COMPAT_H__
#define __XEN_PUBLIC_XEN_COMPAT_H__
-#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040500
+#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040600
#if defined(__XEN__) || defined(__XEN_TOOLS__)
/* Xen is built with matching headers and implements the latest interface. */
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/xen.h
--- a/include/xen/interface/xen.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/xen.h Fri Oct 16 16:24:14 2015 +0200
@@ -103,6 +103,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
#define __HYPERVISOR_kexec_op 37
#define __HYPERVISOR_tmem_op 38
#define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */
+#define __HYPERVISOR_xenpmu_op 40
/* Architecture-specific hypercall definitions. */
#define __HYPERVISOR_arch_0 48
@@ -162,6 +163,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
#define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occured */
#define VIRQ_XC_RESERVED 11 /* G. Reserved for XenClient */
#define VIRQ_ENOMEM 12 /* G. (DOM0) Low on heap memory */
+#define VIRQ_XENPMU 13 /* V. PMC interrupt */
/* Architecture-specific VIRQ definitions. */
#define VIRQ_ARCH_0 16
@@ -488,7 +490,21 @@ DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
/* x86/PAE guests: support PDPTs above 4GB. */
#define VMASST_TYPE_pae_extended_cr3 3
+/*
+ * x86/64 guests: strictly hide M2P from user mode.
+ * This allows the guest to control respective hypervisor behavior:
+ * - when not set, L4 tables get created with the respective slot blank,
+ * and whenever the L4 table gets used as a kernel one the missing
+ * mapping gets inserted,
+ * - when set, L4 tables get created with the respective slot initialized
+ * as before, and whenever the L4 table gets used as a user one the
+ * mapping gets zapped.
+ */
+#define VMASST_TYPE_m2p_strict 32
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040600
#define MAX_VMASST_TYPE 3
+#endif
#ifndef __ASSEMBLY__
@@ -684,6 +700,12 @@ struct shared_info {
uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */
uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */
uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */
+#if !defined(__i386__)
+ uint32_t wc_sec_hi;
+# define xen_wc_sec_hi wc_sec_hi
+#elif !defined(__XEN__) && !defined(__XEN_TOOLS__)
+# define xen_wc_sec_hi arch.wc_sec_hi
+#endif
struct arch_shared_info arch;
@@ -700,24 +722,27 @@ typedef struct shared_info shared_info_t
* 3. This the order of bootstrap elements in the initial virtual region:
* a. relocated kernel image
* b. initial ram disk [mod_start, mod_len]
+ * (may be omitted)
* c. list of allocated page frames [mfn_list, nr_pages]
* (unless relocated due to XEN_ELFNOTE_INIT_P2M)
* d. start_info_t structure [register ESI (x86)]
- * e. bootstrap page tables [pt_base and CR3 (x86)]
- * f. bootstrap stack [register ESP (x86)]
+ * in case of dom0 this page contains the console info, too
+ * e. unless dom0: xenstore ring page
+ * f. unless dom0: console ring page
+ * g. bootstrap page tables [pt_base and CR3 (x86)]
+ * h. bootstrap stack [register ESP (x86)]
* 4. Bootstrap elements are packed together, but each is 4kB-aligned.
- * 5. The initial ram disk may be omitted.
- * 6. The list of page frames forms a contiguous 'pseudo-physical' memory
+ * 5. The list of page frames forms a contiguous 'pseudo-physical' memory
* layout for the domain. In particular, the bootstrap virtual-memory
* region is a 1:1 mapping to the first section of the pseudo-physical map.
- * 7. All bootstrap elements are mapped read-writable for the guest OS. The
+ * 6. All bootstrap elements are mapped read-writable for the guest OS. The
* only exception is the bootstrap page table, which is mapped read-only.
- * 8. There is guaranteed to be at least 512kB padding after the final
+ * 7. There is guaranteed to be at least 512kB padding after the final
* bootstrap element. If necessary, the bootstrap virtual region is
* extended by an extra 4MB to ensure this.
*
* Note: Prior to 25833:bb85bbccb1c9. ("x86/32-on-64 adjust Dom0 initial page
- * table layout") a bug caused the pt_base (3.e above) and cr3 to not point
+ * table layout") a bug caused the pt_base (3.g above) and cr3 to not point
* to the start of the guest page tables (it was offset by two pages).
* This only manifested itself on 32-on-64 dom0 kernels and not 32-on-64 domU
* or 64-bit kernels of any colour. The page tables for a 32-on-64 dom0 got
@@ -773,6 +798,8 @@ typedef struct start_info start_info_t;
#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
#define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */
#define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */
+#define SIF_VIRT_P2M_4TOOLS (1<<4) /* Do Xen tools understand a virt. mapped */
+ /* P->M making the 3 level tree obsolete? */
#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
/*
@@ -872,6 +899,9 @@ __DEFINE_XEN_GUEST_HANDLE(uint64, uint64
/* Default definitions for macros used by domctl/sysctl. */
#if defined(__XEN__) || defined(__XEN_TOOLS__)
+#ifndef int64_aligned_t
+#define int64_aligned_t int64_t
+#endif
#ifndef uint64_aligned_t
#define uint64_aligned_t uint64_t
#endif
diff -r 3f51e783c26a -r 288d236ca763 include/xen/interface/xsm/flask_op.h
--- a/include/xen/interface/xsm/flask_op.h Fri Oct 16 15:57:20 2015 +0200
+++ b/include/xen/interface/xsm/flask_op.h Fri Oct 16 16:24:14 2015 +0200
@@ -25,6 +25,8 @@
#ifndef __FLASK_OP_H__
#define __FLASK_OP_H__
+#include "../event_channel.h"
+
#define XEN_FLASK_INTERFACE_VERSION 1
struct xen_flask_load {
@@ -148,6 +150,13 @@ struct xen_flask_relabel {
uint32_t sid;
};
+struct xen_flask_devicetree_label {
+ /* IN */
+ uint32_t sid;
+ uint32_t length;
+ XEN_GUEST_HANDLE(char) path;
+};
+
struct xen_flask_op {
uint32_t cmd;
#define FLASK_LOAD 1
@@ -174,6 +183,7 @@ struct xen_flask_op {
#define FLASK_DEL_OCONTEXT 22
#define FLASK_GET_PEER_SID 23
#define FLASK_RELABEL_DOMAIN 24
+#define FLASK_DEVICETREE_LABEL 25
uint32_t interface_version; /* XEN_FLASK_INTERFACE_VERSION */
union {
struct xen_flask_load load;
@@ -193,6 +203,7 @@ struct xen_flask_op {
struct xen_flask_ocontext ocontext;
struct xen_flask_peersid peersid;
struct xen_flask_relabel relabel;
+ struct xen_flask_devicetree_label devicetree_label;
} u;
};
typedef struct xen_flask_op xen_flask_op_t;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |