[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC PATCH 8/8]: PVH: privcmd changes
--- drivers/xen/privcmd.c | 68 +++++++++++++++++++++++++++++++++++++++++++++++- 1 files changed, 66 insertions(+), 2 deletions(-) diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index ccee0f1..0a240ab 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -33,6 +33,7 @@ #include <xen/features.h> #include <xen/page.h> #include <xen/xen-ops.h> +#include <xen/balloon.h> #include "privcmd.h" @@ -199,6 +200,10 @@ static long privcmd_ioctl_mmap(void __user *udata) if (!xen_initial_domain()) return -EPERM; + /* PVH: TBD/FIXME. For now we only support privcmd_ioctl_mmap_batch */ + if (xen_pvh_domain()) + return -ENOSYS; + if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd))) return -EFAULT; @@ -251,6 +256,8 @@ struct mmap_batch_state { xen_pfn_t __user *user; }; +/* PVH dom0: if domU being created is PV, then mfn is mfn(addr on bus). If + * it's PVH then mfn is pfn (input to HAP). */ static int mmap_batch_fn(void *data, void *state) { xen_pfn_t *mfnp = data; @@ -274,6 +281,40 @@ static int mmap_return_errors(void *data, void *state) return put_user(*mfnp, st->user++); } +/* Allocate pfns that are then mapped with gmfns from foreign domid. Update + * the vma with the page info to use later. + * Returns: 0 if success, otherwise -errno + */ +static int pvh_privcmd_resv_pfns(struct vm_area_struct *vma, int numpgs) +{ + int rc; + struct xen_pvh_sav_pfn_info *savp; + + savp = kzalloc(sizeof(struct xen_pvh_sav_pfn_info), GFP_KERNEL); + if (savp == NULL) + return -ENOMEM; + + savp->sp_paga = kcalloc(numpgs, sizeof(savp->sp_paga[0]), GFP_KERNEL); + if (savp->sp_paga == NULL) { + kfree(savp); + return -ENOMEM; + } + + rc = alloc_xenballooned_pages(numpgs, savp->sp_paga, 0); + if (rc != 0) { + pr_warn("%s Could not alloc %d pfns rc:%d\n", __FUNCTION__, + numpgs, rc); + kfree(savp->sp_paga); + kfree(savp); + return -ENOMEM; + } + savp->sp_num_pgs = numpgs; + BUG_ON(vma->vm_private_data); + vma->vm_private_data = savp; + + return 0; +} + static struct vm_operations_struct privcmd_vm_ops; static long privcmd_ioctl_mmap_batch(void __user *udata) @@ -315,6 +356,12 @@ static long privcmd_ioctl_mmap_batch(void __user *udata) goto out; } + if (xen_pvh_domain()) { + if ((ret=pvh_privcmd_resv_pfns(vma, m.num))) { + up_write(&mm->mmap_sem); + goto out; + } + } state.domain = m.dom; state.vma = vma; state.va = m.addr; @@ -365,6 +412,19 @@ static long privcmd_ioctl(struct file *file, return ret; } +static void privcmd_close(struct vm_area_struct *vma) +{ + struct xen_pvh_sav_pfn_info *savp; + + if (!xen_pvh_domain() || ((savp=vma->vm_private_data) == NULL)) + return; + + while (savp->sp_next_todo--) { + xen_pfn_t pfn = page_to_pfn(savp->sp_paga[savp->sp_next_todo]); + pvh_rem_xen_p2m(pfn, 1); + } +} + static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", @@ -375,13 +435,14 @@ static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } static struct vm_operations_struct privcmd_vm_ops = { + .close = privcmd_close, .fault = privcmd_fault }; static int privcmd_mmap(struct file *file, struct vm_area_struct *vma) { - /* Unsupported for auto-translate guests. */ - if (xen_feature(XENFEAT_auto_translated_physmap)) + /* Unsupported for auto-translate guests unless PVH */ + if (xen_feature(XENFEAT_auto_translated_physmap) && !xen_pvh_domain()) return -ENOSYS; /* DONTCOPY is essential for Xen because copy_page_range doesn't know @@ -395,6 +456,9 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma) static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma) { + if (xen_pvh_domain()) + return (vma->vm_private_data == NULL); + return (xchg(&vma->vm_private_data, (void *)1) == NULL); } -- 1.7.2.3 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |