[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2 10/12] x86/altp2m: define and implement alternate p2m HVMOP types.
>>> On 22.06.15 at 20:56, <edmund.h.white@xxxxxxxxx> wrote: > --- a/xen/arch/x86/hvm/hvm.c > +++ b/xen/arch/x86/hvm/hvm.c > @@ -6424,6 +6424,222 @@ long do_hvm_op(unsigned long op, > XEN_GUEST_HANDLE_PARAM(void) arg) > break; > } > > + case HVMOP_altp2m_get_domain_state: > + { > + struct xen_hvm_altp2m_domain_state a; > + struct domain *d; > + > + if ( copy_from_guest(&a, arg, 1) ) > + return -EFAULT; > + > + d = rcu_lock_domain_by_any_id(a.domid); > + if ( d == NULL ) > + return -ESRCH; > + > + rc = -EINVAL; > + if ( !is_hvm_domain(d) || !hvm_altp2m_supported() ) > + goto param_fail9; > + > + a.state = altp2mhvm_active(d); > + rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0; __copy_to_guest() > + > + param_fail9: Can you please avoid introducing further numbered "param_fail" labels? In the case here I think you could easily get away without any label. > + case HVMOP_altp2m_destroy_p2m: > + { > + struct xen_hvm_altp2m_view a; > + struct domain *d; > + > + if ( copy_from_guest(&a, arg, 1) ) > + return -EFAULT; > + > + d = rcu_lock_domain_by_any_id(a.domid); > + if ( d == NULL ) > + return -ESRCH; > + > + rc = -EINVAL; > + if ( !is_hvm_domain(d) || !hvm_altp2m_supported() || > + !d->arch.altp2m_active ) > + goto param_fail12; > + > + if ( p2m_destroy_altp2m_by_id(d, a.view) ) > + rc = 0; This function should have its own return code, which should be assigned to rc (avoiding all sorts of failures to be reported as -EINVAL). > + case HVMOP_altp2m_switch_p2m: > + { > + struct xen_hvm_altp2m_view a; > + struct domain *d; > + > + if ( copy_from_guest(&a, arg, 1) ) > + return -EFAULT; > + > + d = rcu_lock_domain_by_any_id(a.domid); > + if ( d == NULL ) > + return -ESRCH; > + > + rc = -EINVAL; > + if ( !is_hvm_domain(d) || !hvm_altp2m_supported() || > + !d->arch.altp2m_active ) > + goto param_fail13; > + > + if ( p2m_switch_domain_altp2m_by_id(d, a.view) ) > + rc = 0; Same here. > + case HVMOP_altp2m_set_mem_access: > + { > + struct xen_hvm_altp2m_set_mem_access a; > + struct domain *d; > + > + if ( copy_from_guest(&a, arg, 1) ) > + return -EFAULT; > + > + d = rcu_lock_domain_by_any_id(a.domid); > + if ( d == NULL ) > + return -ESRCH; > + > + rc = -EINVAL; > + if ( !is_hvm_domain(d) || !hvm_altp2m_supported() || > + !d->arch.altp2m_active ) > + goto param_fail14; > + > + if ( p2m_set_altp2m_mem_access(d, a.view, a.pfn, a.hvmmem_access) ) > + rc = 0; And here. > + case HVMOP_altp2m_change_pfn: > + { > + struct xen_hvm_altp2m_change_pfn a; > + struct domain *d; > + > + if ( copy_from_guest(&a, arg, 1) ) > + return -EFAULT; > + > + d = rcu_lock_domain_by_any_id(a.domid); > + if ( d == NULL ) > + return -ESRCH; > + > + rc = -EINVAL; > + if ( !is_hvm_domain(d) || !hvm_altp2m_supported() || > + !d->arch.altp2m_active ) > + goto param_fail15; > + > + if ( p2m_change_altp2m_pfn(d, a.view, a.old_pfn, a.new_pfn) ) > + rc = 0; And again. > --- a/xen/include/public/hvm/hvm_op.h > +++ b/xen/include/public/hvm/hvm_op.h > @@ -389,6 +389,75 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_evtchn_upcall_vector_t); > > #endif /* defined(__i386__) || defined(__x86_64__) */ > > +/* Set/get the altp2m state for a domain */ All of the below is being added outside any __XEN__/__XEN_TOOLS__ section, yet as Andrew noted you don't whitelist the ops for guest access. This needs to be consistent. > +#define HVMOP_altp2m_set_domain_state 24 > +#define HVMOP_altp2m_get_domain_state 25 > +struct xen_hvm_altp2m_domain_state { > + /* Domain to be updated or queried */ > + domid_t domid; > + /* IN or OUT variable on/off */ > + uint8_t state; > +}; And if any of these are to be guest accessible, padding fields should be made explicit, checked to be zero on input, and cleared to zero on output if not copying back anyway what the guest has provided. Jan _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |