[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 10/13] x86/domctl: Implement XEN_DOMCTL_get_cpumsr_policy
From: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx> This finally (after literally years of work!) marks the point where the toolstack can ask the hypervisor for the current CPUID configuration of a specific domain. Also extend xen-cpuid's --policy mode to be able to take a domid and dump a specific domains CPUID and MSR policy. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Signed-off-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Ian Jackson <Ian.Jackson@xxxxxxxxxxxxx> CC: Wei Liu <wei.liu2@xxxxxxxxxx> CC: Roger Pau Monné <roger.pau@xxxxxxxxxx> CC: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx> CC: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx> --- tools/libxc/include/xenctrl.h | 3 ++ tools/libxc/xc_cpuid_x86.c | 40 +++++++++++++++++++++++ tools/misc/xen-cpuid.c | 64 +++++++++++++++++++++++++++++++------ xen/arch/x86/domctl.c | 46 ++++++++++++++++++++++++++ xen/include/public/domctl.h | 18 +++++++++++ xen/xsm/flask/hooks.c | 1 + xen/xsm/flask/policy/access_vectors | 1 + 7 files changed, 164 insertions(+), 9 deletions(-) diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h index d1f0925c..15d2b92 100644 --- a/tools/libxc/include/xenctrl.h +++ b/tools/libxc/include/xenctrl.h @@ -2541,6 +2541,9 @@ int xc_get_cpumsr_policy_size(xc_interface *xch, uint32_t *nr_leaves, int xc_get_system_cpumsr_policy(xc_interface *xch, uint32_t index, uint32_t *nr_leaves, xen_cpuid_leaf_t *leaves, uint32_t *nr_msrs, xen_msr_entry_t *msrs); +int xc_get_domain_cpumsr_policy(xc_interface *xch, uint32_t domid, + uint32_t *nr_leaves, xen_cpuid_leaf_t *leaves, + uint32_t *nr_msrs, xen_msr_entry_t *msrs); uint32_t xc_get_cpu_featureset_size(void); diff --git a/tools/libxc/xc_cpuid_x86.c b/tools/libxc/xc_cpuid_x86.c index ce2a584..efbac77 100644 --- a/tools/libxc/xc_cpuid_x86.c +++ b/tools/libxc/xc_cpuid_x86.c @@ -193,6 +193,46 @@ int xc_get_system_cpumsr_policy(xc_interface *xch, uint32_t index, return ret; } +int xc_get_domain_cpumsr_policy(xc_interface *xch, uint32_t domid, + uint32_t *nr_leaves, xen_cpuid_leaf_t *leaves, + uint32_t *nr_msrs, xen_msr_entry_t *msrs) +{ + DECLARE_DOMCTL; + DECLARE_HYPERCALL_BOUNCE(leaves, + *nr_leaves * sizeof(*leaves), + XC_HYPERCALL_BUFFER_BOUNCE_OUT); + DECLARE_HYPERCALL_BOUNCE(msrs, + *nr_msrs * sizeof(*msrs), + XC_HYPERCALL_BUFFER_BOUNCE_OUT); + int ret; + + if ( xc_hypercall_bounce_pre(xch, leaves) ) + return -1; + + if ( xc_hypercall_bounce_pre(xch, msrs) ) + return -1; + + domctl.cmd = XEN_DOMCTL_get_cpumsr_policy; + domctl.domain = domid; + domctl.u.cpumsr_policy.nr_leaves = *nr_leaves; + set_xen_guest_handle(domctl.u.cpumsr_policy.cpuid_policy, leaves); + domctl.u.cpumsr_policy.nr_msrs = *nr_msrs; + set_xen_guest_handle(domctl.u.cpumsr_policy.msr_policy, msrs); + + ret = do_domctl(xch, &domctl); + + xc_hypercall_bounce_post(xch, leaves); + xc_hypercall_bounce_post(xch, msrs); + + if ( !ret ) + { + *nr_leaves = domctl.u.cpumsr_policy.nr_leaves; + *nr_msrs = domctl.u.cpumsr_policy.nr_msrs; + } + + return ret; +} + struct cpuid_domain_info { enum diff --git a/tools/misc/xen-cpuid.c b/tools/misc/xen-cpuid.c index a5b3004..52a3694 100644 --- a/tools/misc/xen-cpuid.c +++ b/tools/misc/xen-cpuid.c @@ -3,6 +3,8 @@ #include <err.h> #include <getopt.h> #include <string.h> +#include <errno.h> +#include <limits.h> #include <xenctrl.h> @@ -308,11 +310,13 @@ int main(int argc, char **argv) { enum { MODE_UNKNOWN, MODE_INFO, MODE_DETAIL, MODE_INTERPRET, MODE_POLICY } mode = MODE_UNKNOWN; + int domid = -1; nr_features = xc_get_cpu_featureset_size(); for ( ;; ) { + const char *tmp_optarg; int option_index = 0, c; static struct option long_options[] = { @@ -320,11 +324,11 @@ int main(int argc, char **argv) { "info", no_argument, NULL, 'i' }, { "detail", no_argument, NULL, 'd' }, { "verbose", no_argument, NULL, 'v' }, - { "policy", no_argument, NULL, 'p' }, + { "policy", optional_argument, NULL, 'p' }, { NULL, 0, NULL, 0 }, }; - c = getopt_long(argc, argv, "hidvp", long_options, &option_index); + c = getopt_long(argc, argv, "hidvp::", long_options, &option_index); if ( c == -1 ) break; @@ -344,6 +348,28 @@ int main(int argc, char **argv) case 'p': mode = MODE_POLICY; + + tmp_optarg = optarg; + + /* Make "--policy $DOMID" and "-p $DOMID" work. */ + if ( !optarg && optind < argc && + argv[optind] != NULL && argv[optind][0] != '\0' && + argv[optind][0] != '-' ) + tmp_optarg = argv[optind++]; + + if ( tmp_optarg ) + { + char *endptr; + + errno = 0; + domid = strtol(tmp_optarg, &endptr, 0); + + if ( (errno == ERANGE && + (domid == LONG_MAX || domid == LONG_MIN)) || + (errno != 0 && domid == 0) || + endptr == tmp_optarg ) + err(1, "strtol(%s,,)", tmp_optarg); + } break; case 'd': @@ -397,8 +423,9 @@ int main(int argc, char **argv) if ( xc_get_cpumsr_policy_size(xch, &max_leaves, &max_msrs) ) err(1, "xc_get_cpumsr_policy_size(...)"); - printf("Xen reports there are maximum %u leaves and %u MSRs\n", - max_leaves, max_msrs); + if ( domid == -1 ) + printf("Xen reports there are maximum %u leaves and %u MSRs\n", + max_leaves, max_msrs); leaves = calloc(max_leaves, sizeof(xen_cpuid_leaf_t)); if ( !leaves ) @@ -407,17 +434,36 @@ int main(int argc, char **argv) if ( !msrs ) err(1, "calloc(max_msrs)"); - for ( pol = 0; pol < ARRAY_SIZE(sys_policies); ++pol ) + if ( domid != -1 ) { + char name[20]; uint32_t nr_leaves = max_leaves; uint32_t nr_msrs = max_msrs; - if ( xc_get_system_cpumsr_policy(xch, pol, &nr_leaves, leaves, + if ( xc_get_domain_cpumsr_policy(xch, domid, &nr_leaves, leaves, &nr_msrs, msrs) ) - err(1, "xc_get_system_cpumsr_policy(, %s,,)", - sys_policies[pol]); + err(1, "xc_get_domain_cpuid_policy(, %d, %d,, %d,)", + domid, nr_leaves, nr_msrs); - print_policy(sys_policies[pol], leaves, nr_leaves, msrs, nr_msrs); + snprintf(name, sizeof(name), "Domain %d", domid); + print_policy(name, leaves, nr_leaves, msrs, nr_msrs); + } + else + { + /* Get system policies */ + for ( pol = 0; pol < ARRAY_SIZE(sys_policies); ++pol ) + { + uint32_t nr_leaves = max_leaves; + uint32_t nr_msrs = max_msrs; + + if ( xc_get_system_cpumsr_policy(xch, pol, &nr_leaves, leaves, + &nr_msrs, msrs) ) + err(1, "xc_get_system_cpumsr_policy(, %s,,)", + sys_policies[pol]); + + print_policy(sys_policies[pol], leaves, nr_leaves, + msrs, nr_msrs); + } } free(leaves); diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index 3e9580b..8b48349 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -1524,6 +1524,52 @@ long arch_do_domctl( recalculate_cpuid_policy(d); break; + case XEN_DOMCTL_get_cpumsr_policy: + if ( d == currd || /* No domain_pause() */ + d->max_vcpus == 0 ) /* No vcpus yet. */ + { + ret = -EINVAL; + break; + } + + domain_pause(d); + + if ( !guest_handle_is_null(domctl->u.cpumsr_policy.cpuid_policy) ) + { + if ( (ret = x86_cpuid_copy_to_buffer( + d->arch.cpuid, + domctl->u.cpumsr_policy.cpuid_policy, + &domctl->u.cpumsr_policy.nr_leaves)) ) + goto get_cpumsr_policy_out; + + if ( __copy_field_to_guest(u_domctl, domctl, + u.cpumsr_policy.nr_leaves) ) + { + ret = -EFAULT; + goto get_cpumsr_policy_out; + } + } + + if ( !guest_handle_is_null(domctl->u.cpumsr_policy.msr_policy) ) + { + if ( (ret = x86_msr_copy_to_buffer( + d->arch.msr, d->vcpu[0]->arch.msr, + domctl->u.cpumsr_policy.msr_policy, + &domctl->u.cpumsr_policy.nr_msrs)) ) + goto get_cpumsr_policy_out; + + if ( __copy_field_to_guest(u_domctl, domctl, + u.cpumsr_policy.nr_msrs) ) + { + ret = -EFAULT; + goto get_cpumsr_policy_out; + } + } + + get_cpumsr_policy_out: + domain_unpause(d); + break; + default: ret = iommu_do_domctl(domctl, d, u_domctl); break; diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h index 0535da8..1ca41bd 100644 --- a/xen/include/public/domctl.h +++ b/xen/include/public/domctl.h @@ -635,6 +635,22 @@ struct xen_domctl_cpuid { uint32_t ecx; uint32_t edx; }; + +/* + * XEN_SYSCTL_{get,set}_cpumsr_policy (x86 specific) + * + * Query or set the CPUID and MSR policies for a specific domain. + */ +struct xen_domctl_cpumsr_policy { + uint32_t nr_leaves; /* IN/OUT: Number of leaves in/written to + * 'cpuid_policy'. */ + uint32_t nr_msrs; /* IN/OUT: Number of MSRs in/written to + * 'msr_domain_policy' */ + XEN_GUEST_HANDLE_64(xen_cpuid_leaf_t) cpuid_policy; /* IN/OUT: */ + XEN_GUEST_HANDLE_64(xen_msr_entry_t) msr_policy; /* IN/OUT: */ +}; +typedef struct xen_domctl_cpumsr_policy xen_domctl_cpumsr_policy_t; +DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpumsr_policy_t); #endif /* @@ -1172,6 +1188,7 @@ struct xen_domctl { #define XEN_DOMCTL_soft_reset 79 #define XEN_DOMCTL_set_gnttab_limits 80 #define XEN_DOMCTL_vuart_op 81 +#define XEN_DOMCTL_get_cpumsr_policy 82 #define XEN_DOMCTL_gdbsx_guestmemio 1000 #define XEN_DOMCTL_gdbsx_pausevcpu 1001 #define XEN_DOMCTL_gdbsx_unpausevcpu 1002 @@ -1216,6 +1233,7 @@ struct xen_domctl { struct xen_domctl_mem_sharing_op mem_sharing_op; #if defined(__i386__) || defined(__x86_64__) struct xen_domctl_cpuid cpuid; + struct xen_domctl_cpumsr_policy cpumsr_policy; struct xen_domctl_vcpuextstate vcpuextstate; struct xen_domctl_vcpu_msrs vcpu_msrs; #endif diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c index 1d30b0e..245fcfd 100644 --- a/xen/xsm/flask/hooks.c +++ b/xen/xsm/flask/hooks.c @@ -717,6 +717,7 @@ static int flask_domctl(struct domain *d, int cmd) return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN__SET_VIRQ_HANDLER); case XEN_DOMCTL_set_cpuid: + case XEN_DOMCTL_get_cpumsr_policy: return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__SET_CPUID); case XEN_DOMCTL_gettscinfo: diff --git a/xen/xsm/flask/policy/access_vectors b/xen/xsm/flask/policy/access_vectors index b5bc7a2..c92bea3 100644 --- a/xen/xsm/flask/policy/access_vectors +++ b/xen/xsm/flask/policy/access_vectors @@ -213,6 +213,7 @@ class domain2 # target = the new target domain set_as_target # XEN_DOMCTL_set_cpuid +# XEN_DOMCTL_get_cpumsr_policy set_cpuid # XEN_DOMCTL_gettscinfo gettsc -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |