|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] x86/pv: Move segment descriptor infrastructure into PV-only files
commit 89002866bb6c6f26024f015820c8f52012f95cf2
Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Thu Sep 3 19:28:15 2020 +0100
Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Wed Sep 9 17:57:26 2020 +0100
x86/pv: Move segment descriptor infrastructure into PV-only files
... so all segment checking/adjustment logic is co-located.
Perform some trivial style cleanup to check_descriptor() as it moves,
converting types, and cleaning up trailing whitespace.
In particular, this means that check_descriptor() is now excluded from
!CONFIG_PV builds.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
xen/arch/x86/mm.c | 17 ------
xen/arch/x86/pv/descriptor-tables.c | 100 ++++++++++++++++++++++++++++++++++++
xen/arch/x86/x86_64/mm.c | 87 -------------------------------
xen/include/asm-x86/mm.h | 2 -
xen/include/asm-x86/pv/mm.h | 2 +
5 files changed, 102 insertions(+), 106 deletions(-)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 35ec0e11f6..56bf7add2b 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -585,23 +585,6 @@ const char __section(".bss.page_aligned.const")
__aligned(PAGE_SIZE)
zero_page[PAGE_SIZE];
-#ifdef CONFIG_PV
-static int validate_segdesc_page(struct page_info *page)
-{
- const struct domain *owner = page_get_owner(page);
- seg_desc_t *descs = __map_domain_page(page);
- unsigned i;
-
- for ( i = 0; i < 512; i++ )
- if ( unlikely(!check_descriptor(owner, &descs[i])) )
- break;
-
- unmap_domain_page(descs);
-
- return i == 512 ? 0 : -EINVAL;
-}
-#endif
-
static int _get_page_type(struct page_info *page, unsigned long type,
bool preemptible);
diff --git a/xen/arch/x86/pv/descriptor-tables.c
b/xen/arch/x86/pv/descriptor-tables.c
index 3856128993..39c1a2311a 100644
--- a/xen/arch/x86/pv/descriptor-tables.c
+++ b/xen/arch/x86/pv/descriptor-tables.c
@@ -185,6 +185,106 @@ int compat_set_gdt(XEN_GUEST_HANDLE_PARAM(uint)
frame_list,
return ret;
}
+static bool check_descriptor(const struct domain *dom, seg_desc_t *d)
+{
+ unsigned int a = d->a, b = d->b, cs, dpl;
+
+ /* A not-present descriptor will always fault, so is safe. */
+ if ( !(b & _SEGMENT_P) )
+ return true;
+
+ /* Check and fix up the DPL. */
+ dpl = (b >> 13) & 3;
+ __fixup_guest_selector(dom, dpl);
+ b = (b & ~_SEGMENT_DPL) | (dpl << 13);
+
+ /* All code and data segments are okay. No base/limit checking. */
+ if ( b & _SEGMENT_S )
+ {
+ if ( is_pv_32bit_domain(dom) )
+ {
+ unsigned long base, limit;
+
+ if ( b & _SEGMENT_L )
+ goto bad;
+
+ /*
+ * Older PAE Linux guests use segments which are limited to
+ * 0xf6800000. Extend these to allow access to the larger read-only
+ * M2P table available in 32on64 mode.
+ */
+ base = (b & 0xff000000) | ((b & 0xff) << 16) | (a >> 16);
+
+ limit = (b & 0xf0000) | (a & 0xffff);
+ limit++; /* We add one because limit is inclusive. */
+
+ if ( b & _SEGMENT_G )
+ limit <<= 12;
+
+ if ( (base == 0) && (limit > HYPERVISOR_COMPAT_VIRT_START(dom)) )
+ {
+ a |= 0x0000ffff;
+ b |= 0x000f0000;
+ }
+ }
+
+ goto good;
+ }
+
+ /* Invalid type 0 is harmless. It is used for 2nd half of a call gate. */
+ if ( (b & _SEGMENT_TYPE) == 0x000 )
+ return true;
+
+ /* Everything but a call gate is discarded here. */
+ if ( (b & _SEGMENT_TYPE) != 0xc00 )
+ goto bad;
+
+ /* Validate the target code selector. */
+ cs = a >> 16;
+ if ( !guest_gate_selector_okay(dom, cs) )
+ goto bad;
+ /*
+ * Force DPL to zero, causing a GP fault with its error code indicating
+ * the gate in use, allowing emulation. This is necessary because with
+ * native guests (kernel in ring 3) call gates cannot be used directly
+ * to transition from user to kernel mode (and whether a gate is used
+ * to enter the kernel can only be determined when the gate is being
+ * used), and with compat guests call gates cannot be used at all as
+ * there are only 64-bit ones.
+ * Store the original DPL in the selector's RPL field.
+ */
+ b &= ~_SEGMENT_DPL;
+ cs = (cs & ~3) | dpl;
+ a = (a & 0xffffU) | (cs << 16);
+
+ /* Reserved bits must be zero. */
+ if ( b & (is_pv_32bit_domain(dom) ? 0xe0 : 0xff) )
+ goto bad;
+
+ good:
+ d->a = a;
+ d->b = b;
+ return true;
+
+ bad:
+ return false;
+}
+
+int validate_segdesc_page(struct page_info *page)
+{
+ const struct domain *owner = page_get_owner(page);
+ seg_desc_t *descs = __map_domain_page(page);
+ unsigned i;
+
+ for ( i = 0; i < 512; i++ )
+ if ( unlikely(!check_descriptor(owner, &descs[i])) )
+ break;
+
+ unmap_domain_page(descs);
+
+ return i == 512 ? 0 : -EINVAL;
+}
+
long do_update_descriptor(uint64_t gaddr, seg_desc_t d)
{
struct domain *currd = current->domain;
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 98581dfe5f..1f32062c15 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1010,93 +1010,6 @@ long subarch_memory_op(unsigned long cmd,
XEN_GUEST_HANDLE_PARAM(void) arg)
return rc;
}
-/* Returns TRUE if given descriptor is valid for GDT or LDT. */
-int check_descriptor(const struct domain *dom, seg_desc_t *d)
-{
- u32 a = d->a, b = d->b;
- u16 cs;
- unsigned int dpl;
-
- /* A not-present descriptor will always fault, so is safe. */
- if ( !(b & _SEGMENT_P) )
- return 1;
-
- /* Check and fix up the DPL. */
- dpl = (b >> 13) & 3;
- __fixup_guest_selector(dom, dpl);
- b = (b & ~_SEGMENT_DPL) | (dpl << 13);
-
- /* All code and data segments are okay. No base/limit checking. */
- if ( (b & _SEGMENT_S) )
- {
- if ( is_pv_32bit_domain(dom) )
- {
- unsigned long base, limit;
-
- if ( b & _SEGMENT_L )
- goto bad;
-
- /*
- * Older PAE Linux guests use segments which are limited to
- * 0xf6800000. Extend these to allow access to the larger read-only
- * M2P table available in 32on64 mode.
- */
- base = (b & 0xff000000) | ((b & 0xff) << 16) | (a >> 16);
-
- limit = (b & 0xf0000) | (a & 0xffff);
- limit++; /* We add one because limit is inclusive. */
-
- if ( (b & _SEGMENT_G) )
- limit <<= 12;
-
- if ( (base == 0) && (limit > HYPERVISOR_COMPAT_VIRT_START(dom)) )
- {
- a |= 0x0000ffff;
- b |= 0x000f0000;
- }
- }
-
- goto good;
- }
-
- /* Invalid type 0 is harmless. It is used for 2nd half of a call gate. */
- if ( (b & _SEGMENT_TYPE) == 0x000 )
- return 1;
-
- /* Everything but a call gate is discarded here. */
- if ( (b & _SEGMENT_TYPE) != 0xc00 )
- goto bad;
-
- /* Validate the target code selector. */
- cs = a >> 16;
- if ( !guest_gate_selector_okay(dom, cs) )
- goto bad;
- /*
- * Force DPL to zero, causing a GP fault with its error code indicating
- * the gate in use, allowing emulation. This is necessary because with
- * native guests (kernel in ring 3) call gates cannot be used directly
- * to transition from user to kernel mode (and whether a gate is used
- * to enter the kernel can only be determined when the gate is being
- * used), and with compat guests call gates cannot be used at all as
- * there are only 64-bit ones.
- * Store the original DPL in the selector's RPL field.
- */
- b &= ~_SEGMENT_DPL;
- cs = (cs & ~3) | dpl;
- a = (a & 0xffffU) | (cs << 16);
-
- /* Reserved bits must be zero. */
- if ( b & (is_pv_32bit_domain(dom) ? 0xe0 : 0xff) )
- goto bad;
-
- good:
- d->a = a;
- d->b = b;
- return 1;
- bad:
- return 0;
-}
-
int pagefault_by_memadd(unsigned long addr, struct cpu_user_regs *regs)
{
struct domain *d = current->domain;
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 632ece1cee..deeba75a1c 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -454,8 +454,6 @@ static inline int get_page_and_type(struct page_info *page,
ASSERT(((_p)->count_info & PGC_count_mask) != 0); \
ASSERT(page_get_owner(_p) == (_d))
-int check_descriptor(const struct domain *d, seg_desc_t *desc);
-
extern paddr_t mem_hotplug;
/******************************************************************************
diff --git a/xen/include/asm-x86/pv/mm.h b/xen/include/asm-x86/pv/mm.h
index 07a12d5c49..9983f8257c 100644
--- a/xen/include/asm-x86/pv/mm.h
+++ b/xen/include/asm-x86/pv/mm.h
@@ -32,6 +32,8 @@ void pv_destroy_gdt(struct vcpu *v);
bool pv_map_ldt_shadow_page(unsigned int off);
bool pv_destroy_ldt(struct vcpu *v);
+int validate_segdesc_page(struct page_info *page);
+
#else
#include <xen/errno.h>
--
generated by git-patchbot for /home/xen/git/xen.git#staging
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |