|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v5 16/18] tools: drop "has_num" condition check for cppc mode
In `xenpm get-cpufreq-para <cpuid>`, ->freq_num and ->cpu_num checking are
tied together via variable "has_num", while ->freq_num only has non-zero value
when cpufreq driver in legacy P-states mode.
So we drop the "has_num" condition check, and mirror the ->gov_num check for
both ->freq_num and ->cpu_num in xc_get_cpufreq_para().
Signed-off-by: Penny Zheng <Penny.Zheng@xxxxxxx>
---
v3 -> v4:
- drop the "has_num" condition check
---
v4 -> v5:
- refactor title and commit
- make all three pieces (xc_hypercall_bounce_pre()) be as similar as possible
---
tools/libs/ctrl/xc_pm.c | 43 +++++++++++++++++++++++------------------
1 file changed, 24 insertions(+), 19 deletions(-)
diff --git a/tools/libs/ctrl/xc_pm.c b/tools/libs/ctrl/xc_pm.c
index 3c9e272aee..cdc072e757 100644
--- a/tools/libs/ctrl/xc_pm.c
+++ b/tools/libs/ctrl/xc_pm.c
@@ -212,34 +212,41 @@ int xc_get_cpufreq_para(xc_interface *xch, int cpuid,
DECLARE_NAMED_HYPERCALL_BOUNCE(scaling_available_governors,
user_para->scaling_available_governors,
user_para->gov_num * CPUFREQ_NAME_LEN * sizeof(char),
XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
- bool has_num = user_para->cpu_num && user_para->freq_num;
- if ( has_num )
+ if ( (user_para->cpu_num && !user_para->affected_cpus) ||
+ (user_para->freq_num && !user_para->scaling_available_frequencies) ||
+ (user_para->gov_num && !user_para->scaling_available_governors) )
+ {
+ errno = EINVAL;
+ return -1;
+ }
+ if ( user_para->cpu_num )
{
- if ( (!user_para->affected_cpus) ||
- (!user_para->scaling_available_frequencies) ||
- (user_para->gov_num && !user_para->scaling_available_governors) )
- {
- errno = EINVAL;
- return -1;
- }
ret = xc_hypercall_bounce_pre(xch, affected_cpus);
if ( ret )
return ret;
+ }
+ if ( user_para->freq_num )
+ {
ret = xc_hypercall_bounce_pre(xch, scaling_available_frequencies);
if ( ret )
goto unlock_2;
- if ( user_para->gov_num )
- ret = xc_hypercall_bounce_pre(xch, scaling_available_governors);
+ }
+ if ( user_para->gov_num )
+ {
+ ret = xc_hypercall_bounce_pre(xch, scaling_available_governors);
if ( ret )
goto unlock_3;
+ }
+ if ( user_para->cpu_num )
set_xen_guest_handle(sys_para->affected_cpus, affected_cpus);
- set_xen_guest_handle(sys_para->scaling_available_frequencies,
scaling_available_frequencies);
- if ( user_para->gov_num )
- set_xen_guest_handle(sys_para->scaling_available_governors,
- scaling_available_governors);
- }
+ if ( user_para->freq_num )
+ set_xen_guest_handle(sys_para->scaling_available_frequencies,
+ scaling_available_frequencies);
+ if ( user_para->gov_num )
+ set_xen_guest_handle(sys_para->scaling_available_governors,
+ scaling_available_governors);
sysctl.cmd = XEN_SYSCTL_pm_op;
sysctl.u.pm_op.cmd = GET_CPUFREQ_PARA;
@@ -258,9 +265,7 @@ int xc_get_cpufreq_para(xc_interface *xch, int cpuid,
user_para->gov_num = sys_para->gov_num;
}
- if ( has_num )
- goto unlock_4;
- return ret;
+ goto unlock_4;
}
else
{
--
2.34.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |