|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] x86: move / split usercopy.c to / into arch-specific library
commit 7370966d1cb7e53e7bb55fe0d4d16068e9b81a1f
Author: Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Wed Dec 17 09:02:37 2025 +0100
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Dec 17 09:02:37 2025 +0100
x86: move / split usercopy.c to / into arch-specific library
The file wasn't correctly named for our purposes anyway. Split it into its
"guest" and "unsafe" parts, thus allowing the latter to not be linked in
at all (for presently having no caller). The building of the "guest" part
can then (later) become conditional upon PV=y.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Reviewed-by: Grygorii Strashko <grygorii_strashko@xxxxxxxx>
---
xen/arch/x86/Makefile | 4 --
xen/arch/x86/lib/Makefile | 2 +
xen/arch/x86/lib/copy-guest.c | 152 +++++++++++++++++++++++++++++++++++++++
xen/arch/x86/lib/copy-unsafe.c | 22 ++++++
xen/arch/x86/usercopy.c | 160 -----------------------------------------
5 files changed, 176 insertions(+), 164 deletions(-)
diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile
index 61e2293a46..dfb258d7ac 100644
--- a/xen/arch/x86/Makefile
+++ b/xen/arch/x86/Makefile
@@ -72,7 +72,6 @@ obj-y += time.o
obj-y += traps-setup.o
obj-y += traps.o
obj-$(CONFIG_INTEL) += tsx.o
-obj-y += usercopy.o
obj-y += x86_emulate.o
obj-$(CONFIG_TBOOT) += tboot.o
obj-y += hpet.o
@@ -93,9 +92,6 @@ hostprogs-y += efi/mkreloc
$(obj)/efi/mkreloc: HOSTCFLAGS += -I$(srctree)/include
-# Allows usercopy.c to include itself
-$(obj)/usercopy.o: CFLAGS-y += -iquote .
-
ifneq ($(CONFIG_HVM),y)
$(obj)/x86_emulate.o: CFLAGS-y += -Wno-unused-label
endif
diff --git a/xen/arch/x86/lib/Makefile b/xen/arch/x86/lib/Makefile
index ddf7e19bdc..8fe2dfd885 100644
--- a/xen/arch/x86/lib/Makefile
+++ b/xen/arch/x86/lib/Makefile
@@ -1 +1,3 @@
+lib-y += copy-guest.o
+lib-y += copy-unsafe.o
lib-y += generic-hweightl.o
diff --git a/xen/arch/x86/lib/copy-guest.c b/xen/arch/x86/lib/copy-guest.c
new file mode 100644
index 0000000000..73284b3f14
--- /dev/null
+++ b/xen/arch/x86/lib/copy-guest.c
@@ -0,0 +1,152 @@
+/*
+ * User address space access functions.
+ *
+ * Copyright 1997 Andi Kleen <ak@xxxxxx>
+ * Copyright 1997 Linus Torvalds
+ * Copyright 2002 Andi Kleen <ak@xxxxxxx>
+ */
+
+#include <xen/sched.h>
+
+#include <asm/uaccess.h>
+
+#ifndef GUARD
+# define GUARD UA_KEEP
+#endif
+
+unsigned int copy_to_guest_ll(void __user *to, const void *from, unsigned int
n)
+{
+ GUARD(unsigned dummy);
+
+ stac();
+ asm_inline volatile (
+ GUARD(
+ " guest_access_mask_ptr %[to], %q[scratch1], %q[scratch2]\n"
+ )
+ "1: rep movsb\n"
+ "2:\n"
+ _ASM_EXTABLE(1b, 2b)
+ : [cnt] "+c" (n), [to] "+D" (to), [from] "+S" (from)
+ GUARD(, [scratch1] "=&r" (dummy), [scratch2] "=&r" (dummy))
+ :: "memory" );
+ clac();
+
+ return n;
+}
+
+unsigned int copy_from_guest_ll(void *to, const void __user *from, unsigned
int n)
+{
+ unsigned dummy;
+
+ stac();
+ asm_inline volatile (
+ GUARD(
+ " guest_access_mask_ptr %[from], %q[scratch1], %q[scratch2]\n"
+ )
+ "1: rep movsb\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "6: mov %[cnt], %k[from]\n"
+ " xchg %%eax, %[aux]\n"
+ " xor %%eax, %%eax\n"
+ " rep stosb\n"
+ " xchg %[aux], %%eax\n"
+ " mov %k[from], %[cnt]\n"
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 6b)
+ : [cnt] "+c" (n), [to] "+D" (to), [from] "+S" (from),
+ [aux] "=&r" (dummy)
+ GUARD(, [scratch1] "=&r" (dummy), [scratch2] "=&r" (dummy))
+ :: "memory" );
+ clac();
+
+ return n;
+}
+
+#if GUARD(1) + 0
+
+/**
+ * copy_to_guest_pv: - Copy a block of data into PV guest space.
+ * @to: Destination address, in PV guest space.
+ * @from: Source address, in hypervisor space.
+ * @n: Number of bytes to copy.
+ *
+ * Copy data from hypervisor space to PV guest space.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ */
+unsigned int copy_to_guest_pv(void __user *to, const void *from, unsigned int
n)
+{
+ if ( access_ok(to, n) )
+ n = __copy_to_guest_pv(to, from, n);
+ return n;
+}
+
+/**
+ * clear_guest_pv: - Zero a block of memory in PV guest space.
+ * @to: Destination address, in PV guest space.
+ * @n: Number of bytes to zero.
+ *
+ * Zero a block of memory in PV guest space.
+ *
+ * Returns number of bytes that could not be cleared.
+ * On success, this will be zero.
+ */
+unsigned int clear_guest_pv(void __user *to, unsigned int n)
+{
+ if ( access_ok(to, n) )
+ {
+ long dummy;
+
+ stac();
+ asm_inline volatile (
+ " guest_access_mask_ptr %[to], %[scratch1], %[scratch2]\n"
+ "1: rep stosb\n"
+ "2:\n"
+ _ASM_EXTABLE(1b,2b)
+ : [cnt] "+c" (n), [to] "+D" (to), [scratch1] "=&r" (dummy),
+ [scratch2] "=&r" (dummy)
+ : "a" (0) );
+ clac();
+ }
+
+ return n;
+}
+
+/**
+ * copy_from_guest_pv: - Copy a block of data from PV guest space.
+ * @to: Destination address, in hypervisor space.
+ * @from: Source address, in PV guest space.
+ * @n: Number of bytes to copy.
+ *
+ * Copy data from PV guest space to hypervisor space.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ *
+ * If some data could not be copied, this function will pad the copied
+ * data to the requested size using zero bytes.
+ */
+unsigned int copy_from_guest_pv(void *to, const void __user *from,
+ unsigned int n)
+{
+ if ( access_ok(from, n) )
+ n = __copy_from_guest_pv(to, from, n);
+ else
+ memset(to, 0, n);
+ return n;
+}
+
+#endif /* GUARD(1) */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/x86/lib/copy-unsafe.c b/xen/arch/x86/lib/copy-unsafe.c
new file mode 100644
index 0000000000..a51500370f
--- /dev/null
+++ b/xen/arch/x86/lib/copy-unsafe.c
@@ -0,0 +1,22 @@
+/*
+ * "Unsafe" access functions.
+ */
+
+#include <asm/uaccess.h>
+
+#define GUARD UA_DROP
+#define copy_to_guest_ll copy_to_unsafe_ll
+#define copy_from_guest_ll copy_from_unsafe_ll
+#undef __user
+#define __user
+#include "copy-guest.c"
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/x86/usercopy.c b/xen/arch/x86/usercopy.c
deleted file mode 100644
index a24b52cc66..0000000000
--- a/xen/arch/x86/usercopy.c
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * User address space access functions.
- *
- * Copyright 1997 Andi Kleen <ak@xxxxxx>
- * Copyright 1997 Linus Torvalds
- * Copyright 2002 Andi Kleen <ak@xxxxxxx>
- */
-
-#include <xen/lib.h>
-#include <xen/sched.h>
-#include <asm/uaccess.h>
-
-#ifndef GUARD
-# define GUARD UA_KEEP
-#endif
-
-unsigned int copy_to_guest_ll(void __user *to, const void *from, unsigned int
n)
-{
- GUARD(unsigned dummy);
-
- stac();
- asm_inline volatile (
- GUARD(
- " guest_access_mask_ptr %[to], %q[scratch1], %q[scratch2]\n"
- )
- "1: rep movsb\n"
- "2:\n"
- _ASM_EXTABLE(1b, 2b)
- : [cnt] "+c" (n), [to] "+D" (to), [from] "+S" (from)
- GUARD(, [scratch1] "=&r" (dummy), [scratch2] "=&r" (dummy))
- :: "memory" );
- clac();
-
- return n;
-}
-
-unsigned int copy_from_guest_ll(void *to, const void __user *from, unsigned
int n)
-{
- unsigned dummy;
-
- stac();
- asm_inline volatile (
- GUARD(
- " guest_access_mask_ptr %[from], %q[scratch1], %q[scratch2]\n"
- )
- "1: rep movsb\n"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- "6: mov %[cnt], %k[from]\n"
- " xchg %%eax, %[aux]\n"
- " xor %%eax, %%eax\n"
- " rep stosb\n"
- " xchg %[aux], %%eax\n"
- " mov %k[from], %[cnt]\n"
- " jmp 2b\n"
- ".previous\n"
- _ASM_EXTABLE(1b, 6b)
- : [cnt] "+c" (n), [to] "+D" (to), [from] "+S" (from),
- [aux] "=&r" (dummy)
- GUARD(, [scratch1] "=&r" (dummy), [scratch2] "=&r" (dummy))
- :: "memory" );
- clac();
-
- return n;
-}
-
-#if GUARD(1) + 0
-
-/**
- * copy_to_guest_pv: - Copy a block of data into PV guest space.
- * @to: Destination address, in PV guest space.
- * @from: Source address, in hypervisor space.
- * @n: Number of bytes to copy.
- *
- * Copy data from hypervisor space to PV guest space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-unsigned int copy_to_guest_pv(void __user *to, const void *from, unsigned int
n)
-{
- if ( access_ok(to, n) )
- n = __copy_to_guest_pv(to, from, n);
- return n;
-}
-
-/**
- * clear_guest_pv: - Zero a block of memory in PV guest space.
- * @to: Destination address, in PV guest space.
- * @n: Number of bytes to zero.
- *
- * Zero a block of memory in PV guest space.
- *
- * Returns number of bytes that could not be cleared.
- * On success, this will be zero.
- */
-unsigned int clear_guest_pv(void __user *to, unsigned int n)
-{
- if ( access_ok(to, n) )
- {
- long dummy;
-
- stac();
- asm_inline volatile (
- " guest_access_mask_ptr %[to], %[scratch1], %[scratch2]\n"
- "1: rep stosb\n"
- "2:\n"
- _ASM_EXTABLE(1b,2b)
- : [cnt] "+c" (n), [to] "+D" (to), [scratch1] "=&r" (dummy),
- [scratch2] "=&r" (dummy)
- : "a" (0) );
- clac();
- }
-
- return n;
-}
-
-/**
- * copy_from_guest_pv: - Copy a block of data from PV guest space.
- * @to: Destination address, in hypervisor space.
- * @from: Source address, in PV guest space.
- * @n: Number of bytes to copy.
- *
- * Copy data from PV guest space to hypervisor space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-unsigned int copy_from_guest_pv(void *to, const void __user *from,
- unsigned int n)
-{
- if ( access_ok(from, n) )
- n = __copy_from_guest_pv(to, from, n);
- else
- memset(to, 0, n);
- return n;
-}
-
-# undef GUARD
-# define GUARD UA_DROP
-# define copy_to_guest_ll copy_to_unsafe_ll
-# define copy_from_guest_ll copy_from_unsafe_ll
-# undef __user
-# define __user
-# include __FILE__
-
-#endif /* GUARD(1) */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
--
generated by git-patchbot for /home/xen/git/xen.git#staging
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |