|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1/3] libx86: Introduce a helper to deserialise cpuid_policy objects
As with the serialise side, Xen's copy_from_guest API is used, with a
compatibility wrapper for the userspace build.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Signed-off-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
---
xen/include/xen/lib/x86/cpuid.h | 21 ++++++++
xen/lib/x86/cpuid.c | 106 ++++++++++++++++++++++++++++++++++++++++
xen/lib/x86/private.h | 14 ++++++
3 files changed, 141 insertions(+)
diff --git a/xen/include/xen/lib/x86/cpuid.h b/xen/include/xen/lib/x86/cpuid.h
index 22d43ef..767a33b 100644
--- a/xen/include/xen/lib/x86/cpuid.h
+++ b/xen/include/xen/lib/x86/cpuid.h
@@ -319,6 +319,27 @@ typedef xen_cpuid_leaf_t cpuid_leaf_buffer_t[];
int x86_cpuid_copy_to_buffer(const struct cpuid_policy *policy,
cpuid_leaf_buffer_t leaves, uint32_t *nr_entries);
+/**
+ * Unserialise a cpuid_policy object from an array of cpuid leaves.
+ *
+ * @param policy The cpuid_policy to unserialise into.
+ * @param leaves The array of leaves to unserialise from.
+ * @param nr_entries The number of entries in 'leaves'.
+ * @param err_leaf Optional hint filled on error.
+ * @param err_subleaf Optional hint filled on error.
+ * @returns -errno
+ *
+ * Reads at most CPUID_MAX_SERIALISED_LEAVES. May return -ERANGE if an
+ * incoming leaf is out of range of cpuid_policy, in which case the optional
+ * err_* pointers are filled to aid diagnostics.
+ *
+ * No content validation of in-range leaves is performed.
+ */
+int x86_cpuid_copy_from_buffer(struct cpuid_policy *policy,
+ const cpuid_leaf_buffer_t leaves,
+ uint32_t nr_entries, uint32_t *err_leaf,
+ uint32_t *err_subleaf);
+
#endif /* !XEN_LIB_X86_CPUID_H */
/*
diff --git a/xen/lib/x86/cpuid.c b/xen/lib/x86/cpuid.c
index 5a3159b..7fc4148 100644
--- a/xen/lib/x86/cpuid.c
+++ b/xen/lib/x86/cpuid.c
@@ -233,6 +233,112 @@ int x86_cpuid_copy_to_buffer(const struct cpuid_policy *p,
return 0;
}
+int x86_cpuid_copy_from_buffer(struct cpuid_policy *p,
+ const cpuid_leaf_buffer_t leaves,
+ uint32_t nr_entries, uint32_t *err_leaf,
+ uint32_t *err_subleaf)
+{
+ unsigned int i;
+ xen_cpuid_leaf_t data;
+ struct cpuid_leaf *l = (void *)&data.a;
+
+ /*
+ * A well formed caller is expected pass an array with leaves in order,
+ * and without any repetitions. However, due to per-vendor differences,
+ * and in the case of upgrade or levelled scenarios, we typically expect
+ * fewer than MAX leaves to be passed.
+ *
+ * Detecting repeated entries is prohibitively complicated, so we don't
+ * bother. That said, one way or another if more than MAX leaves are
+ * passed, something is wrong.
+ */
+ if ( nr_entries > CPUID_MAX_SERIALISED_LEAVES )
+ return -E2BIG;
+
+ for ( i = 0; i < nr_entries; ++i )
+ {
+ if ( copy_from_buffer_offset(&data, leaves, i, 1) )
+ return -EFAULT;
+
+ switch ( data.leaf )
+ {
+ case 0 ... ARRAY_SIZE(p->basic.raw) - 1:
+ switch ( data.leaf )
+ {
+ case 0x4:
+ if ( data.subleaf >= ARRAY_SIZE(p->cache.raw) )
+ goto out_of_range;
+
+ p->cache.raw[data.subleaf] = *l;
+ break;
+
+ case 0x7:
+ if ( data.subleaf >= ARRAY_SIZE(p->feat.raw) )
+ goto out_of_range;
+
+ p->feat.raw[data.subleaf] = *l;
+ break;
+
+ case 0xb:
+ if ( data.subleaf >= ARRAY_SIZE(p->topo.raw) )
+ goto out_of_range;
+
+ p->topo.raw[data.subleaf] = *l;
+ break;
+
+ case 0xd:
+ if ( data.subleaf >= ARRAY_SIZE(p->xstate.raw) )
+ goto out_of_range;
+
+ p->xstate.raw[data.subleaf] = *l;
+ break;
+
+ default:
+ if ( data.subleaf != XEN_CPUID_NO_SUBLEAF )
+ goto out_of_range;
+
+ p->basic.raw[data.leaf] = *l;
+ break;
+ }
+ break;
+
+ case 0x40000000:
+ if ( data.subleaf != XEN_CPUID_NO_SUBLEAF )
+ goto out_of_range;
+
+ p->hv_limit = l->a;
+ break;
+
+ case 0x40000100:
+ if ( data.subleaf != XEN_CPUID_NO_SUBLEAF )
+ goto out_of_range;
+
+ p->hv2_limit = l->a;
+ break;
+
+ case 0x80000000 ... 0x80000000 + ARRAY_SIZE(p->extd.raw) - 1:
+ if ( data.subleaf != XEN_CPUID_NO_SUBLEAF )
+ goto out_of_range;
+
+ p->extd.raw[data.leaf & 0xffff] = *l;
+ break;
+
+ default:
+ goto out_of_range;
+ }
+ }
+
+ return 0;
+
+ out_of_range:
+ if ( err_leaf )
+ *err_leaf = data.leaf;
+ if ( err_subleaf )
+ *err_subleaf = data.subleaf;
+
+ return -ERANGE;
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/lib/x86/private.h b/xen/lib/x86/private.h
index 3ee99aa..e0ff2da 100644
--- a/xen/lib/x86/private.h
+++ b/xen/lib/x86/private.h
@@ -12,6 +12,7 @@
#include <asm/msr-index.h>
#define copy_to_buffer_offset copy_to_guest_offset
+#define copy_from_buffer_offset copy_from_guest_offset
#else
@@ -44,6 +45,19 @@ static inline bool test_bit(unsigned int bit, const void
*vaddr)
0; \
})
+/* memcpy(), but with copy_from_guest_offset()'s API. */
+#define copy_from_buffer_offset(dst, src, index, nr) \
+({ \
+ const typeof(*(src)) *src_ = (src); \
+ typeof(*(dst)) *dst_ = (dst); \
+ typeof(index) index_ = (index); \
+ typeof(nr) nr_ = (nr), i_; \
+ \
+ for ( i_ = 0; i_ < nr_; i_++ ) \
+ dst_[i_] = src_[index_ + i_]; \
+ 0; \
+})
+
#endif /* __XEN__ */
#endif /* XEN_LIB_X86_PRIVATE_H */
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |