|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v1 4/4] xl: enabling XL to set per-VCPU parameters of a domain for RTDS scheduler
Change main_sched_rtds and related output functions to support per-VCPU settings
for xl sched-rtds tool.
Signed-off-by: Chong Li <chong.li@xxxxxxxxx>
Signed-off-by: Meng Xu <mengxu@xxxxxxxxxxxxx>
Signed-off-by: Sisu Xi <xisisu@xxxxxxxxx>
---
tools/libxl/xl_cmdimpl.c | 131 +++++++++++++++++++++++++++++++++++++----------
1 file changed, 103 insertions(+), 28 deletions(-)
diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c
index 648ca08..1e9b0d8 100644
--- a/tools/libxl/xl_cmdimpl.c
+++ b/tools/libxl/xl_cmdimpl.c
@@ -5709,9 +5709,11 @@ static int sched_rtds_domain_output(
char *domname;
libxl_domain_sched_params scinfo;
int rc = 0;
+ int i;
- if (domid < 0) {
- printf("%-33s %4s %9s %9s\n", "Name", "ID", "Period", "Budget");
+ if (domid < 0) {
+ printf("%-33s %4s %4s %9s %9s\n", "Name", "ID",
+ "VCPU", "Period", "Budget");
return 0;
}
@@ -5721,11 +5723,14 @@ static int sched_rtds_domain_output(
goto out;
domname = libxl_domid_to_name(ctx, domid);
- printf("%-33s %4d %9d %9d\n",
- domname,
- domid,
- scinfo.period,
- scinfo.budget);
+ for( i = 0; i < scinfo.rtds.num_vcpus; i++ ) {
+ printf("%-33s %4d %4d %9"PRIu64" %9"PRIu64"\n",
+ domname,
+ domid,
+ scinfo.rtds.vcpus[i].index,
+ scinfo.rtds.vcpus[i].period,
+ scinfo.rtds.vcpus[i].budget);
+ }
free(domname);
out:
@@ -5744,6 +5749,7 @@ static int sched_rtds_pool_output(uint32_t poolid)
return 0;
}
+
static int sched_default_pool_output(uint32_t poolid)
{
char *poolname;
@@ -6120,38 +6126,87 @@ int main_sched_rtds(int argc, char **argv)
{
const char *dom = NULL;
const char *cpupool = NULL;
- int period = 0; /* period is in microsecond */
- int budget = 0; /* budget is in microsecond */
+
+ int vcpus[LIBXL_XEN_LEGACY_MAX_VCPUS]; /* indices of VCPUs that change */
+ int periods[LIBXL_XEN_LEGACY_MAX_VCPUS]; /* period is in microsecond */
+ int budgets[LIBXL_XEN_LEGACY_MAX_VCPUS]; /* budget is in microsecond */
+ int index=0; /*index of the arrays above*/
+ bool flag_b = false;
+ bool flag_p = false;
+ bool flag_v = false;
bool opt_p = false;
bool opt_b = false;
- int opt, rc;
+ bool opt_v = false;
+ int opt, rc, i;
static struct option opts[] = {
{"domain", 1, 0, 'd'},
{"period", 1, 0, 'p'},
{"budget", 1, 0, 'b'},
+ {"vcpu",1, 0, 'v'},
{"cpupool", 1, 0, 'c'},
COMMON_LONG_OPTS,
{0, 0, 0, 0}
};
- SWITCH_FOREACH_OPT(opt, "d:p:b:c:h", opts, "sched-rtds", 0) {
+ SWITCH_FOREACH_OPT(opt, "d:p:b:v:c:h", opts, "sched-rtds", 0) {
case 'd':
dom = optarg;
break;
case 'p':
- period = strtol(optarg, NULL, 10);
+ periods[index] = strtol(optarg, NULL, 10);
opt_p = 1;
+ if (flag_p == 1) { /* budget or vcpuID is missed */
+ fprintf(stderr, "Must specify period, budget and vcpuID\n");
+ return 1;
+ }
+ flag_p = 1;
+ if (flag_p && flag_b && flag_v) {
+ /*
+ * Get one complete set of per-VCPU parameters
+ * (period, budget, vcpuID).
+ */
+ flag_p = 0;
+ flag_b = 0;
+ flag_v = 0;
+ index++;
+ }
break;
case 'b':
- budget = strtol(optarg, NULL, 10);
+ budgets[index] = strtol(optarg, NULL, 10);
opt_b = 1;
+ if (flag_b == 1) { /* period or vcpuID is missed */
+ fprintf(stderr, "Must specify period, budget and vcpuID\n");
+ return 1;
+ }
+ flag_b = 1;
+ if (flag_p && flag_b && flag_v) {
+ flag_p = 0;
+ flag_b = 0;
+ flag_v = 0;
+ index++;
+ }
+ break;
+ case 'v':
+ vcpus[index] = strtol(optarg, NULL, 10);
+ opt_v = 1;
+ if (flag_v == 1) { /* period or budget is missed */
+ fprintf(stderr, "Must specify period, budget and vcpuID\n");
+ return 1;
+ }
+ flag_v = 1;
+ if (flag_p && flag_b && flag_v) {
+ flag_p = 0;
+ flag_b = 0;
+ flag_v = 0;
+ index++;
+ }
break;
case 'c':
cpupool = optarg;
break;
}
- if (cpupool && (dom || opt_p || opt_b)) {
+ if (cpupool && (dom || opt_p || opt_b || opt_v)) {
fprintf(stderr, "Specifying a cpupool is not allowed with "
"other options.\n");
return 1;
@@ -6164,29 +6219,49 @@ int main_sched_rtds(int argc, char **argv)
fprintf(stderr, "Must specify period and budget\n");
return 1;
}
+ if (opt_v && (flag_b|| flag_v || flag_p)) {
+ fprintf(stderr, "Must specify period and budget and vcpuID\n");
+ return 1;
+ }
- if (!dom) { /* list all domain's rt scheduler info */
+ if (!dom) { /* list all domain's rtds scheduler info */
return -sched_domain_output(LIBXL_SCHEDULER_RTDS,
sched_rtds_domain_output,
sched_rtds_pool_output,
cpupool);
} else {
uint32_t domid = find_domain(dom);
- if (!opt_p && !opt_b) { /* output rt scheduler info */
+ if (!opt_p && !opt_b && !opt_v) { /* output rtds scheduler info */
sched_rtds_domain_output(-1);
return -sched_rtds_domain_output(domid);
- } else { /* set rt scheduler paramaters */
- libxl_domain_sched_params scinfo;
- libxl_domain_sched_params_init(&scinfo);
- scinfo.sched = LIBXL_SCHEDULER_RTDS;
- scinfo.period = period;
- scinfo.budget = budget;
-
- rc = sched_domain_set(domid, &scinfo);
- libxl_domain_sched_params_dispose(&scinfo);
- if (rc)
- return -rc;
- }
+ } else if (opt_v) { /* set per-vcpu rtds scheduler paramaters */
+ libxl_domain_sched_params scinfo;
+ libxl_domain_sched_params_init(&scinfo);
+ scinfo.sched = LIBXL_SCHEDULER_RTDS;
+ scinfo.rtds.num_vcpus = index;
+ scinfo.rtds.vcpus = (libxl_vcpu *)
+ malloc(sizeof(libxl_vcpu) * (index));
+ for (i = 0; i < index; i++) {
+ scinfo.rtds.vcpus[i].index = vcpus[i];
+ scinfo.rtds.vcpus[i].period = periods[i];
+ scinfo.rtds.vcpus[i].budget = budgets[i];
+ }
+ rc = sched_domain_set(domid, &scinfo);
+ libxl_domain_sched_params_dispose(&scinfo);
+ if (rc)
+ return -rc;
+ } else { /* set per-dom rtds scheduler paramaters */
+ libxl_domain_sched_params scinfo;
+ libxl_domain_sched_params_init(&scinfo);
+ scinfo.sched = LIBXL_SCHEDULER_RTDS;
+ scinfo.rtds.num_vcpus = 0;
+ scinfo.period = periods[0];
+ scinfo.budget = budgets[0];
+ rc = sched_domain_set(domid, &scinfo);
+ libxl_domain_sched_params_dispose(&scinfo);
+ if (rc)
+ return -rc;
+ }
}
return 0;
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |