[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v12 6/9] x86: collect global QoS monitoring information
This implementation tries to put all policies into user space, thus some global QoS monitoring information needs to be exposed, such as the total RMID count, L3 upscaling factor, etc. Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx> --- xen/arch/x86/sysctl.c | 56 +++++++++++++++++++++++++++++++++++++++++++ xen/include/public/sysctl.h | 14 +++++++++++ 2 files changed, 70 insertions(+) diff --git a/xen/arch/x86/sysctl.c b/xen/arch/x86/sysctl.c index 15d4b91..0a21ad2 100644 --- a/xen/arch/x86/sysctl.c +++ b/xen/arch/x86/sysctl.c @@ -28,6 +28,7 @@ #include <xen/nodemask.h> #include <xen/cpu.h> #include <xsm/xsm.h> +#include <asm/pqos.h> #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) @@ -70,6 +71,7 @@ long arch_do_sysctl( struct xen_sysctl *sysctl, XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) { long ret = 0; + bool_t copyback = 0; switch ( sysctl->cmd ) { @@ -101,11 +103,65 @@ long arch_do_sysctl( } break; + case XEN_SYSCTL_pqos_monitor_op: + if ( !pqos_monitor_enabled() ) + return -ENODEV; + + switch ( sysctl->u.pqos_monitor_op.cmd ) + { + case XEN_SYSCTL_PQOS_MONITOR_cqm_enabled: + sysctl->u.pqos_monitor_op.data = + (pqosm->qm_features & QOS_MONITOR_TYPE_L3) && + (pqosm->l3m.l3_features & L3_FEATURE_OCCUPANCY); + break; + case XEN_SYSCTL_PQOS_MONITOR_get_total_rmid: + sysctl->u.pqos_monitor_op.data = + pqosm->rmid_max - pqosm->rmid_min + 1; + break; + case XEN_SYSCTL_PQOS_MONITOR_get_l3_upscaling_factor: + sysctl->u.pqos_monitor_op.data = pqosm->l3m.upscaling_factor; + break; + case XEN_SYSCTL_PQOS_MONITOR_get_l3_cache_size: + sysctl->u.pqos_monitor_op.data = boot_cpu_data.x86_cache_size; + break; + case XEN_SYSCTL_PQOS_MONITOR_get_socket_cpu: + { + unsigned int i, cpu; + unsigned int socket = sysctl->u.pqos_monitor_op.data; + + for ( i = 0; i < nr_cpu_ids; i++ ) + { + if ( cpu_to_socket(i) < 0 || cpu_to_socket(i) != socket ) + continue; + cpu = cpumask_any(per_cpu(cpu_core_mask, i)); + if ( cpu < nr_cpu_ids ) + { + sysctl->u.pqos_monitor_op.data = cpu; + break; + } + } + + if ( i == nr_cpu_ids ) + ret = -ESRCH; + } + break; + + default: + sysctl->u.pqos_monitor_op.data = 0; + ret = -ENOSYS; + break; + } + copyback = 1; + break; + default: ret = -ENOSYS; break; } + if ( copyback && __copy_to_guest(u_sysctl, sysctl, 1) ) + ret = -EFAULT; + return ret; } diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h index 3588698..5ec1212 100644 --- a/xen/include/public/sysctl.h +++ b/xen/include/public/sysctl.h @@ -636,6 +636,18 @@ struct xen_sysctl_coverage_op { typedef struct xen_sysctl_coverage_op xen_sysctl_coverage_op_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_coverage_op_t); +#define XEN_SYSCTL_PQOS_MONITOR_get_total_rmid 0 +#define XEN_SYSCTL_PQOS_MONITOR_get_l3_upscaling_factor 1 +#define XEN_SYSCTL_PQOS_MONITOR_get_l3_cache_size 2 +#define XEN_SYSCTL_PQOS_MONITOR_get_socket_cpu 3 +#define XEN_SYSCTL_PQOS_MONITOR_cqm_enabled 4 +struct xen_sysctl_pqos_monitor_op { + uint32_t cmd; + uint64_aligned_t data; +}; +typedef struct xen_sysctl_pqos_monitor_op xen_sysctl_pqos_monitor_op_t; +DEFINE_XEN_GUEST_HANDLE(xen_sysctl_pqos_monitor_op_t); + struct xen_sysctl { uint32_t cmd; @@ -658,6 +670,7 @@ struct xen_sysctl { #define XEN_SYSCTL_cpupool_op 18 #define XEN_SYSCTL_scheduler_op 19 #define XEN_SYSCTL_coverage_op 20 +#define XEN_SYSCTL_pqos_monitor_op 21 uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */ union { struct xen_sysctl_readconsole readconsole; @@ -679,6 +692,7 @@ struct xen_sysctl { struct xen_sysctl_cpupool_op cpupool_op; struct xen_sysctl_scheduler_op scheduler_op; struct xen_sysctl_coverage_op coverage_op; + struct xen_sysctl_pqos_monitor_op pqos_monitor_op; uint8_t pad[128]; } u; }; -- 1.7.9.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |