[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2 12/13] x86/sysctl: Implement XEN_SYSCTL_get_cpu_policy
On Fri, Jul 13, 2018 at 09:03:13PM +0100, Andrew Cooper wrote: > diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h > index dd7d8a9..ee3ab09 100644 > --- a/tools/libxc/include/xenctrl.h > +++ b/tools/libxc/include/xenctrl.h > @@ -2553,6 +2553,12 @@ int xc_get_cpu_levelling_caps(xc_interface *xch, > uint32_t *caps); > int xc_get_cpu_featureset(xc_interface *xch, uint32_t index, > uint32_t *nr_features, uint32_t *featureset); > > +int xc_get_cpu_policy_size(xc_interface *xch, uint32_t *nr_leaves, Nit: I would do s/nr_leaves/nr_cpuid_leaves/. > +int xc_get_system_cpu_policy(xc_interface *xch, uint32_t index, > + uint32_t *nr_leaves, xen_cpuid_leaf_t *leaves, > + uint32_t *nr_msrs, xen_msr_entry_t *msrs) > +{ > + struct xen_sysctl sysctl = {}; > + DECLARE_HYPERCALL_BOUNCE(leaves, > + *nr_leaves * sizeof(*leaves), > + XC_HYPERCALL_BUFFER_BOUNCE_OUT); > + DECLARE_HYPERCALL_BOUNCE(msrs, > + *nr_msrs * sizeof(*msrs), > + XC_HYPERCALL_BUFFER_BOUNCE_OUT); > + int ret; > + > + if ( xc_hypercall_bounce_pre(xch, leaves) ) > + return -1; > + > + if ( xc_hypercall_bounce_pre(xch, msrs) ) > + return -1; You can join both in a single if: if ( xc_hypercall_bounce_pre(xch, leaves) || xc_hypercall_bounce_pre(xch, msrs) ) return -1; > + > + sysctl.cmd = XEN_SYSCTL_get_cpu_policy; > + sysctl.u.cpu_policy.index = index; > + sysctl.u.cpu_policy.nr_leaves = *nr_leaves; > + set_xen_guest_handle(sysctl.u.cpu_policy.cpuid_policy, leaves); > + sysctl.u.cpu_policy.nr_msrs = *nr_msrs; > + set_xen_guest_handle(sysctl.u.cpu_policy.msr_policy, msrs); sysctl can be initialized at declaration time instead of zeroing it and then setting the fields: struct xen_sysctl sysctl = { .cmd = XEN_SYSCTL_get_cpu_policy; .u.cpu_policy.index = index; .u.cpu_policy.nr_leaves = *nr_leaves; ... }; > @@ -344,7 +377,55 @@ int main(int argc, char **argv) > mode = MODE_INTERPRET; > } > > - if ( mode == MODE_INFO || mode == MODE_DETAIL ) > + if ( mode == MODE_POLICY ) > + { > + static const char *const sys_policies[] = { > + [ XEN_SYSCTL_cpu_policy_raw ] = "Raw", > + [ XEN_SYSCTL_cpu_policy_host ] = "Host", > + [ XEN_SYSCTL_cpu_policy_pv_max ] = "PV Max", > + [ XEN_SYSCTL_cpu_policy_hvm_max ] = "HVM Max", > + [ XEN_SYSCTL_cpu_policy_pv_default ] = "PV Default", > + [ XEN_SYSCTL_cpu_policy_hvm_default ] = "HVM Default", > + }; > + xen_cpuid_leaf_t *leaves; > + xen_msr_entry_t *msrs; > + uint32_t pol, max_leaves, max_msrs; pol could be a plain unsigned int, and named i. > @@ -322,6 +323,76 @@ long arch_do_sysctl( > break; > } > > + case XEN_SYSCTL_get_cpu_policy: > + { > + const struct cpu_policy *policy; > + > + /* Bad policy index? */ > + if ( sysctl->u.cpu_policy.index >= ARRAY_SIZE(system_policies) ) > + { > + ret = -EINVAL; > + break; > + } > + policy = &system_policies[ > + array_index_nospec(sysctl->u.cpu_policy.index, > + ARRAY_SIZE(system_policies))]; > + > + /* Request for maximum number of leaves/MSRs? */ > + if ( guest_handle_is_null(sysctl->u.cpu_policy.cpuid_policy) ) > + { > + sysctl->u.cpu_policy.nr_leaves = CPUID_MAX_SERIALISED_LEAVES; > + if ( __copy_field_to_guest(u_sysctl, sysctl, > + u.cpu_policy.nr_leaves) ) > + { > + ret = -EFAULT; > + break; > + } > + } > + if ( guest_handle_is_null(sysctl->u.cpu_policy.msr_policy) ) > + { > + sysctl->u.cpu_policy.nr_msrs = MSR_MAX_SERIALISED_ENTRIES; > + if ( __copy_field_to_guest(u_sysctl, sysctl, > + u.cpu_policy.nr_msrs) ) > + { > + ret = -EFAULT; > + break; > + } > + } > + > + /* Serialise the information the caller wants. */ > + if ( !guest_handle_is_null(sysctl->u.cpu_policy.cpuid_policy) ) > + { > + if ( (ret = x86_cpuid_copy_to_buffer( > + policy->cpuid, > + sysctl->u.cpu_policy.cpuid_policy, > + &sysctl->u.cpu_policy.nr_leaves)) ) > + break; You could have this better aligned by first assigning the result value to ret and then checking for errors. Roger. _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |