As you may know by now, xc_map_foreign* first sets up a region of virtual memory with an mmap call, and then issues the mapping ioctl to populate the region.
Let's say for the sake of argument that the mapping operation is broken up into pieces. Current checks in the implementation of PRIVCMD_MMAPBATCH* prevent you from populating the memory region piece-meal.
If that doesn't fix it, then you'll need to give us the output of those printks.
When a foreign mapper attempts to map guest frames that are paged out,
the mapper receives an ENOENT response and will have to try again
while a helper process pages the target frame back in.
Gating checks on PRIVCMD_MMAPBATCH* ioctl args were preventing retries
of mapping calls.
Signed-off-by: Andres Lagar-Cavilla <
andres@xxxxxxxxxxxxxxxx>
---
drivers/xen/privcmd.c | 32 +++++++++++++++++++++++++++-----
1 files changed, 27 insertions(+), 5 deletions(-)
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index f8e5dd7..44a26c6 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -43,9 +43,12 @@ MODULE_LICENSE("GPL");
#define PRIV_VMA_LOCKED ((void *)1)
-#ifndef HAVE_ARCH_PRIVCMD_MMAP
static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
-#endif
+
+static int privcmd_enforce_singleshot_mapping_granular(
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ unsigned long nr_pages);
static long privcmd_ioctl_hypercall(void __user *udata)
{
@@ -422,9 +425,9 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
vma = find_vma(mm, m.addr);
if (!vma ||
vma->vm_ops != &privcmd_vm_ops ||
-
(m.addr != vma->vm_start) ||
-
((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
-
!privcmd_enforce_singleshot_mapping(vma)) {
+
(m.addr < vma->vm_start) ||
+
((m.addr + (nr_pages << PAGE_SHIFT)) > vma->vm_end) ||
+
!privcmd_enforce_singleshot_mapping_granular(vma, m.addr, nr_pages)) {
up_write(&mm->mmap_sem);
ret = -EINVAL;
goto out;
@@ -540,11 +543,30 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
+/* For MMAP */
static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
{
return !cmpxchg(&vma->vm_private_data, NULL, PRIV_VMA_LOCKED);
}
+/* For MMAPBATCH*. This allows asserting the singleshot mapping
+ * on a per pfn/pte basis. Mapping calls that fail with ENOENT
+ * can be then retried until success. */
+static int enforce_singleshot_mapping_fn(pte_t *pte, struct page *pmd_page,
+
unsigned long addr, void *data)
+{
+
return pte_none(*pte) ? 0 : -EBUSY;
+}
+
+static int privcmd_enforce_singleshot_mapping_granular(
+
struct vm_area_struct *vma,
+
unsigned long addr,
+
unsigned long nr_pages)
+{
+
return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
+
enforce_singleshot_mapping_fn, NULL) == 0;
+}
+
const struct file_operations xen_privcmd_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = privcmd_ioctl,
--
1.7.1