|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [RFC PATCH v3 11/12] xen: arm: implement XEN_SYSCTL_cpufreq_op
On Sun, Oct 26, 2014 at 7:41 PM, Stefano Stabellini
<stefano.stabellini@xxxxxxxxxxxxx> wrote:
> On Thu, 23 Oct 2014, Oleksandr Dmytryshyn wrote:
>> Kernel uses this op to get some parameters for the
>> xen-cpufreq driver to change CPUs frequency.
>
> The commit message doesn't describe many of the changes introduced by
> this patch.
> For example it fails to mention that it is
> introducing notify_cpufreq_domains.
>
> We need a better commit message.
I'll extend a commit message in the next patch-set.
> Also doesn't notify_cpufreq_domains belong to the previous patch anyway?
I'll move this function to the previous patch in the next patch-set.
>> Signed-off-by: Oleksandr Dmytryshyn <oleksandr.dmytryshyn@xxxxxxxxxxxxxxx>
>> ---
>> xen/common/sysctl.c | 8 ++++++
>> xen/drivers/cpufreq/hwdom-cpufreq.c | 52
>> +++++++++++++++++++++++++++++++++++++
>> xen/include/public/sysctl.h | 19 ++++++++++++++
>> xen/include/xen/cpufreq.h | 2 ++
>> 4 files changed, 81 insertions(+)
>>
>> diff --git a/xen/common/sysctl.c b/xen/common/sysctl.c
>> index 0dcf06a..fd0cd0d 100644
>> --- a/xen/common/sysctl.c
>> +++ b/xen/common/sysctl.c
>> @@ -27,6 +27,7 @@
>> #include <xsm/xsm.h>
>> #include <xen/pmstat.h>
>> #include <xen/gcov.h>
>> +#include <xen/cpufreq.h>
>>
>> long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
>> {
>> @@ -362,6 +363,13 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t)
>> u_sysctl)
>> break;
>> #endif
>>
>> +#ifdef HAS_HWDOM_CPUFREQ
>> + case XEN_SYSCTL_cpufreq_op:
>> + ret = sysctl_cpufreq_op(&op->u.cpufreq_op);
>> + copyback = 1;
>> + break;
>> +#endif
>> +
>> default:
>> ret = arch_do_sysctl(op, u_sysctl);
>> copyback = 0;
>> diff --git a/xen/drivers/cpufreq/hwdom-cpufreq.c
>> b/xen/drivers/cpufreq/hwdom-cpufreq.c
>> index 67c9e1d..cc97f37 100644
>> --- a/xen/drivers/cpufreq/hwdom-cpufreq.c
>> +++ b/xen/drivers/cpufreq/hwdom-cpufreq.c
>> @@ -34,12 +34,53 @@ struct hwdom_cpufreq_data {
>> };
>>
>> static struct hwdom_cpufreq_data *hwdom_cpufreq_drv_data[NR_CPUS];
>> +static DEFINE_SPINLOCK(sysctl_cpufreq_lock);
>> +
>> +struct sysctl_cpufreq_data {
>> + uint32_t cpu;
>> + uint32_t freq;
>> + uint32_t relation;
>> + int32_t result;
>> +};
>> +
>> +static struct sysctl_cpufreq_data sysctl_cpufreq_data;
>>
>> int cpufreq_cpu_init(unsigned int cpuid)
>> {
>> return cpufreq_add_cpu(cpuid);
>> }
>>
>> +static void notify_cpufreq_domains(void)
>> +{
>> + send_global_virq(VIRQ_CPUFREQ);
>> +}
>> +
>> +int sysctl_cpufreq_op(xen_sysctl_cpufreq_op_t *op)
>> +{
>> + int ret = 0;
>> + switch ( op->cmd )
>> + {
>> + case XEN_SYSCTL_CPUFREQ_get_target:
>> + spin_lock(&sysctl_cpufreq_lock);
>> + op->u.target.cpu = sysctl_cpufreq_data.cpu;
>> + op->u.target.freq = sysctl_cpufreq_data.freq;
>> + op->u.target.relation = sysctl_cpufreq_data.relation;
>> + spin_unlock(&sysctl_cpufreq_lock);
>> + break;
>> +
>> + case XEN_SYSCTL_CPUFREQ_set_result:
>> + spin_lock(&sysctl_cpufreq_lock);
>> + sysctl_cpufreq_data.result = op->u.result;
>> + spin_unlock(&sysctl_cpufreq_lock);
>> + break;
>> +
>> + default:
>> + return -EOPNOTSUPP;
>> + break;
>> + }
>> + return ret;
>> +}
>> +
>> static int hwdom_cpufreq_verify(struct cpufreq_policy *policy)
>> {
>> struct hwdom_cpufreq_data *data;
>> @@ -97,6 +138,17 @@ static int hwdom_cpufreq_target(struct cpufreq_policy
>> *policy,
>> freqs.old = perf->states[perf->state].core_frequency * 1000;
>> freqs.new = data->freq_table[next_state].frequency;
>>
>> + /* Do send cmd for Dom0 */
>> + spin_lock(&sysctl_cpufreq_lock);
>> + /* return previous result */
>> + ret = sysctl_cpufreq_data.result;
>> +
>> + sysctl_cpufreq_data.cpu = policy->cpu;
>> + sysctl_cpufreq_data.freq = freqs.new;
>> + sysctl_cpufreq_data.relation = (uint32_t)relation;
>> + spin_unlock(&sysctl_cpufreq_lock);
>> + notify_cpufreq_domains();
>> +
>> for_each_cpu( j, &online_policy_cpus )
>> cpufreq_statistic_update(j, perf->state, next_perf_state);
>>
>> diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
>> index 8437d31..ecd4674 100644
>> --- a/xen/include/public/sysctl.h
>> +++ b/xen/include/public/sysctl.h
>> @@ -632,6 +632,23 @@ struct xen_sysctl_coverage_op {
>> typedef struct xen_sysctl_coverage_op xen_sysctl_coverage_op_t;
>> DEFINE_XEN_GUEST_HANDLE(xen_sysctl_coverage_op_t);
>>
>> +#define XEN_SYSCTL_CPUFREQ_get_target 0
>> +#define XEN_SYSCTL_CPUFREQ_set_result 1
>> +
>> +struct xen_sysctl_cpufreq_op {
>> + uint32_t cmd;
>> + union {
>> + struct {
>> + uint32_t cpu;
>> + uint32_t freq;
>> + uint32_t relation;
>> + } target;
>> + uint32_t result;
>> + } u;
>> +};
>> +typedef struct xen_sysctl_cpufreq_op xen_sysctl_cpufreq_op_t;
>> +DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpufreq_op_t);
>> +
>>
>> struct xen_sysctl {
>> uint32_t cmd;
>> @@ -654,6 +671,7 @@ struct xen_sysctl {
>> #define XEN_SYSCTL_cpupool_op 18
>> #define XEN_SYSCTL_scheduler_op 19
>> #define XEN_SYSCTL_coverage_op 20
>> +#define XEN_SYSCTL_cpufreq_op 21
>> uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
>> union {
>> struct xen_sysctl_readconsole readconsole;
>> @@ -675,6 +693,7 @@ struct xen_sysctl {
>> struct xen_sysctl_cpupool_op cpupool_op;
>> struct xen_sysctl_scheduler_op scheduler_op;
>> struct xen_sysctl_coverage_op coverage_op;
>> + struct xen_sysctl_cpufreq_op cpufreq_op;
>> uint8_t pad[128];
>> } u;
>> };
>> diff --git a/xen/include/xen/cpufreq.h b/xen/include/xen/cpufreq.h
>> index d7b6c34..0c8c19d 100644
>> --- a/xen/include/xen/cpufreq.h
>> +++ b/xen/include/xen/cpufreq.h
>> @@ -264,4 +264,6 @@ int write_userspace_scaling_setspeed(unsigned int cpu,
>> unsigned int freq);
>> void cpufreq_dbs_timer_suspend(void);
>> void cpufreq_dbs_timer_resume(void);
>>
>> +int sysctl_cpufreq_op(xen_sysctl_cpufreq_op_t *op);
>> +
>> #endif /* __XEN_CPUFREQ_PM_H__ */
>> --
>> 1.9.1
>>
>>
>> _______________________________________________
>> Xen-devel mailing list
>> Xen-devel@xxxxxxxxxxxxx
>> http://lists.xen.org/xen-devel
>>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |