>From 7ec4da2e1a40963d459dec6c61e810e5badd390a Mon Sep 17 00:00:00 2001 From: Mats Petersson Date: Wed, 19 Dec 2012 11:58:23 +0000 Subject: [PATCH] Fixed up after IanC's comments. --- arch/arm/xen/enlighten.c | 104 +++++++++++++++++++++++++++++++++++++++------- 1 file changed, 90 insertions(+), 14 deletions(-) diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 7a32976..2bf8556 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -73,7 +73,7 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, } struct remap_data { - xen_pfn_t fgmfn; /* foreign domain's gmfn */ + xen_pfn_t *fgmfn; /* foreign domain's gmfn */ pgprot_t prot; domid_t domid; struct vm_area_struct *vma; @@ -90,38 +90,114 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, unsigned long pfn = page_to_pfn(page); pte_t pte = pfn_pte(pfn, info->prot); - if (map_foreign_page(pfn, info->fgmfn, info->domid)) + // TODO: We should really batch these updates + if (map_foreign_page(pfn, *info->fgmfn, info->domid)) return -EFAULT; set_pte_at(info->vma->vm_mm, addr, ptep, pte); + info->fgmfn++; return 0; } -int xen_remap_domain_mfn_range(struct vm_area_struct *vma, - unsigned long addr, - xen_pfn_t mfn, int nr, - pgprot_t prot, unsigned domid, - struct page **pages) +/* do_remap_mfn() - helper function to map foreign pages + * @vma: the vma for the pages to be mapped into + * @addr: the address at which to map the pages + * @mfn: pointer to array of MFNs to map + * @nr: the number entries in the MFN array + * @prot: page protection mask + * @domid: id of the domain that we are mapping from + * @pages: page information. + * + * This function takes an array of mfns and maps nr pages from that into + * this kernel's memory. The owner of the pages is defined by domid. Where the + * pages are mapped is determined by addr, and vma is used for "accounting" of + * the pages. + * + * Return value is zero for success, negative for failure. + */ +static int do_remap_mfn(struct vm_area_struct *vma, + unsigned long addr, + xen_pfn_t *mfn, int nr, + pgprot_t prot, unsigned domid, + struct page **pages) { int err; struct remap_data data; - /* TBD: Batching, current sole caller only does page at a time */ - if (nr > 1) - return -EINVAL; + /* Kept here for the purpose of + BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); data.fgmfn = mfn; - data.prot = prot; + data.prot = prot; data.domid = domid; - data.vma = vma; - data.index = 0; + data.vma = vma; data.pages = pages; - err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT, + data.index = 0; + + unsigned long range = nr << PAGE_SHIFT; + + err = apply_to_page_range(vma->vm_mm, addr, range, remap_pte_fn, &data); + /* Warning: We do probably need to care about what error we + get here. However, currently, the remap_pte_fn is only + likely to return EFAULT or some other "things are very + bad" error code, which the rest of the calling code won't + be able to fix up. So we just exit with the error we got. + */ return err; } + +/* xen_remap_domain_mfn_array() - Used to map an array of foreign pages + * @vma: the vma for the pages to be mapped into + * @addr: the address at which to map the pages + * @mfn: pointer to array of MFNs to map + * @nr: the number entries in the MFN array + * @err_ptr: pointer to array of integers, one per MFN, for an error + * value for each page. The err_ptr must not be NULL. + * @prot: page protection mask + * @domid: id of the domain that we are mapping from + * @pages: page information + * + * This function takes an array of mfns and maps nr pages from that into this + * kernel's memory. The owner of the pages is defined by domid. Where the pages + * are mapped is determined by addr, and vma is used for "accounting" of the + * pages. The err_ptr array is filled in on any page that is not successfully + * mapped in. + * + * Return value is zero for success, negative ERRNO value for failure. + * Note that the error value -ENOENT is considered a "retry", so when this + * error code is seen, another call should be made with the list of pages that + * are marked as -ENOENT in the err_ptr array. + */ +int xen_remap_domain_mfn_array(struct vm_area_struct *vma, + unsigned long addr, + xen_pfn_t *mfn, int nr, + int *err_ptr, pgprot_t prot, + unsigned domid, + struct page **pages) +{ + /* We BUG_ON because it's a programmer error to pass a NULL err_ptr, + * and the consequences later is quite hard to detect what the actual + * cause of "wrong memory was mapped in". + * Note: This variant doesn't actually use err_ptr at the moment. + */ + BUG_ON(err_ptr == NULL); + return do_remap_mfn(vma, addr, mfn, nr, prot, domid, pages); +} +EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array); + +/* Not used in ARM. Use xen_remap_domain_mfn_array(). */ +int xen_remap_domain_mfn_range(struct vm_area_struct *vma, + unsigned long addr, + xen_pfn_t mfn, int nr, + pgprot_t prot, unsigned domid, + struct page **pages) +{ + return -ENOSYS; +} EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); + int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, int nr, struct page **pages) { -- 1.7.9.5