|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v6 05/12] x86/domctl: Handle ACPI access from domctl
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
Changes in v6:
* Adjustments to to patch 4 changes.
* Added a spinlock for VCPU map access
* Return an error on guest trying to write VCPU map
xen/arch/x86/hvm/acpi.c | 57 +++++++++++++++++++++++++++++++++++-----
xen/include/asm-x86/hvm/domain.h | 1 +
2 files changed, 52 insertions(+), 6 deletions(-)
diff --git a/xen/arch/x86/hvm/acpi.c b/xen/arch/x86/hvm/acpi.c
index f0a84f9..9f0578e 100644
--- a/xen/arch/x86/hvm/acpi.c
+++ b/xen/arch/x86/hvm/acpi.c
@@ -7,17 +7,22 @@
#include <xen/lib.h>
#include <xen/sched.h>
+#include <asm/guest_access.h>
+
#include <public/arch-x86/xen.h>
-static int acpi_cpumap_access_common(struct domain *d, bool is_write,
- unsigned int port,
+static int acpi_cpumap_access_common(struct domain *d, bool is_guest_access,
+ bool is_write, unsigned int port,
unsigned int bytes, uint32_t *val)
{
unsigned int first_byte = port - XEN_ACPI_CPU_MAP;
+ int rc = X86EMUL_OKAY;
BUILD_BUG_ON(XEN_ACPI_CPU_MAP + XEN_ACPI_CPU_MAP_LEN
> ACPI_GPE0_BLK_ADDRESS_V1);
+ spin_lock(&d->arch.hvm_domain.acpi_lock);
+
if ( !is_write )
{
uint32_t mask = (bytes < 4) ? ~0U << (bytes * 8) : 0;
@@ -32,23 +37,61 @@ static int acpi_cpumap_access_common(struct domain *d, bool
is_write,
memcpy(val, (uint8_t *)d->avail_vcpus + first_byte,
min(bytes, ((d->max_vcpus + 7) / 8) - first_byte));
}
+ else if ( !is_guest_access )
+ memcpy((uint8_t *)d->avail_vcpus + first_byte, val,
+ min(bytes, ((d->max_vcpus + 7) / 8) - first_byte));
else
/* Guests do not write CPU map */
- return X86EMUL_UNHANDLEABLE;
+ rc = X86EMUL_UNHANDLEABLE;
- return X86EMUL_OKAY;
+ spin_unlock(&d->arch.hvm_domain.acpi_lock);
+
+ return rc;
}
int hvm_acpi_domctl_access(struct domain *d,
const struct xen_domctl_acpi_access *access)
{
- return -ENOSYS;
+ unsigned int bytes, i;
+ uint32_t val = 0;
+ uint8_t *ptr = (uint8_t *)&val;
+ int rc;
+ bool is_write = (access->rw == XEN_DOMCTL_ACPI_WRITE) ? true : false;
+
+ if ( has_acpi_dm_ff(d) )
+ return -EOPNOTSUPP;
+
+ if ( access->space_id != XEN_ACPI_SYSTEM_IO )
+ return -EINVAL;
+
+ if ( !((access->address >= XEN_ACPI_CPU_MAP) &&
+ (access->address < XEN_ACPI_CPU_MAP + XEN_ACPI_CPU_MAP_LEN)) )
+ return -ENODEV;
+
+ for ( i = 0; i < access->width; i += sizeof(val) )
+ {
+ bytes = (access->width - i > sizeof(val)) ?
+ sizeof(val) : access->width - i;
+
+ if ( is_write && copy_from_guest_offset(ptr, access->val, i, bytes) )
+ return -EFAULT;
+
+ rc = acpi_cpumap_access_common(d, false, is_write,
+ access->address, bytes, &val);
+ if ( rc )
+ return rc;
+
+ if ( !is_write && copy_to_guest_offset(access->val, i, ptr, bytes) )
+ return -EFAULT;
+ }
+
+ return 0;
}
static int acpi_cpumap_guest_access(int dir, unsigned int port,
unsigned int bytes, uint32_t *val)
{
- return acpi_cpumap_access_common(current->domain,
+ return acpi_cpumap_access_common(current->domain, true,
(dir == IOREQ_WRITE) ? true : false,
port, bytes, val);
}
@@ -148,6 +191,8 @@ void hvm_acpi_init(struct domain *d)
sizeof(d->arch.hvm_domain.acpi.pm1a_sts) +
sizeof(d->arch.hvm_domain.acpi.pm1a_en),
acpi_guest_access);
+
+ spin_lock_init(&d->arch.hvm_domain.acpi_lock);
}
/*
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 07815b6..438ea12 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -111,6 +111,7 @@ struct hvm_domain {
*/
#define hvm_hw_acpi hvm_hw_pmtimer
struct hvm_hw_acpi acpi;
+ spinlock_t acpi_lock;
/* VCPU which is current target for 8259 interrupts. */
struct vcpu *i8259_target;
--
2.7.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |