|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2/2] public/sysctl: drop unnecessary typedefs and handles
By virtue of the struct xen_sysctl container structure, most of them
are really just cluttering the name space.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1212,11 +1212,11 @@ int xc_readconsolering(xc_interface *xch
int xc_send_debug_keys(xc_interface *xch, char *keys);
int xc_set_parameters(xc_interface *xch, char *params);
-typedef xen_sysctl_physinfo_t xc_physinfo_t;
-typedef xen_sysctl_cputopo_t xc_cputopo_t;
-typedef xen_sysctl_numainfo_t xc_numainfo_t;
-typedef xen_sysctl_meminfo_t xc_meminfo_t;
-typedef xen_sysctl_pcitopoinfo_t xc_pcitopoinfo_t;
+typedef struct xen_sysctl_physinfo xc_physinfo_t;
+typedef struct xen_sysctl_cputopo xc_cputopo_t;
+typedef struct xen_sysctl_numainfo xc_numainfo_t;
+typedef struct xen_sysctl_meminfo xc_meminfo_t;
+typedef struct xen_sysctl_pcitopoinfo xc_pcitopoinfo_t;
typedef uint32_t xc_cpu_to_node_t;
typedef uint32_t xc_cpu_to_socket_t;
@@ -1240,7 +1240,7 @@ int xc_machphys_mfn_list(xc_interface *x
unsigned long max_extents,
xen_pfn_t *extent_start);
-typedef xen_sysctl_cpuinfo_t xc_cpuinfo_t;
+typedef struct xen_sysctl_cpuinfo xc_cpuinfo_t;
int xc_getcpuinfo(xc_interface *xch, int max_cpus,
xc_cpuinfo_t *info, int *nr_cpus);
@@ -1853,8 +1853,8 @@ int xc_cpu_offline(xc_interface *xch, in
* cpufreq para name of this structure named
* same as sysfs file name of native linux
*/
-typedef xen_userspace_t xc_userspace_t;
-typedef xen_ondemand_t xc_ondemand_t;
+typedef struct xen_userspace xc_userspace_t;
+typedef struct xen_ondemand xc_ondemand_t;
struct xc_get_cpufreq_para {
/* IN/OUT variable */
--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -547,7 +547,7 @@ int xc_livepatch_upload(xc_interface *xc
DECLARE_SYSCTL;
DECLARE_HYPERCALL_BUFFER(char, local);
DECLARE_HYPERCALL_BOUNCE(name, 0 /* later */,
XC_HYPERCALL_BUFFER_BOUNCE_IN);
- xen_livepatch_name_t def_name = { .pad = { 0, 0, 0 } };
+ struct xen_livepatch_name def_name = { };
if ( !name || !payload )
{
@@ -594,12 +594,12 @@ int xc_livepatch_upload(xc_interface *xc
int xc_livepatch_get(xc_interface *xch,
char *name,
- xen_livepatch_status_t *status)
+ struct xen_livepatch_status *status)
{
int rc;
DECLARE_SYSCTL;
DECLARE_HYPERCALL_BOUNCE(name, 0 /*adjust later */,
XC_HYPERCALL_BUFFER_BOUNCE_IN);
- xen_livepatch_name_t def_name = { .pad = { 0, 0, 0 } };
+ struct xen_livepatch_name def_name = { };
if ( !name )
{
@@ -677,7 +677,7 @@ int xc_livepatch_get(xc_interface *xch,
* retrieved (if any).
*/
int xc_livepatch_list(xc_interface *xch, unsigned int max, unsigned int start,
- xen_livepatch_status_t *info,
+ struct xen_livepatch_status *info,
char *name, uint32_t *len,
unsigned int *done,
unsigned int *left)
@@ -837,7 +837,7 @@ static int _xc_livepatch_action(xc_inter
DECLARE_SYSCTL;
/* The size is figured out when we strlen(name) */
DECLARE_HYPERCALL_BOUNCE(name, 0, XC_HYPERCALL_BUFFER_BOUNCE_IN);
- xen_livepatch_name_t def_name = { .pad = { 0, 0, 0 } };
+ struct xen_livepatch_name def_name = { };
def_name.size = strlen(name) + 1;
--- a/xen/arch/arm/sysctl.c
+++ b/xen/arch/arm/sysctl.c
@@ -12,7 +12,7 @@
#include <xen/hypercall.h>
#include <public/sysctl.h>
-void arch_do_physinfo(xen_sysctl_physinfo_t *pi) { }
+void arch_do_physinfo(struct xen_sysctl_physinfo *pi) { }
long arch_do_sysctl(struct xen_sysctl *sysctl,
XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
--- a/xen/arch/x86/sysctl.c
+++ b/xen/arch/x86/sysctl.c
@@ -72,7 +72,7 @@ long cpu_down_helper(void *data)
return ret;
}
-void arch_do_physinfo(xen_sysctl_physinfo_t *pi)
+void arch_do_physinfo(struct xen_sysctl_physinfo *pi)
{
memcpy(pi->hw_cap, boot_cpu_data.x86_capability,
min(sizeof(pi->hw_cap), sizeof(boot_cpu_data.x86_capability)));
--- a/xen/common/gcov/gcov.c
+++ b/xen/common/gcov/gcov.c
@@ -209,7 +209,7 @@ static int gcov_dump_all(XEN_GUEST_HANDL
return ret;
}
-int sysctl_gcov_op(xen_sysctl_gcov_op_t *op)
+int sysctl_gcov_op(struct xen_sysctl_gcov_op *op)
{
int ret;
--- a/xen/common/livepatch.c
+++ b/xen/common/livepatch.c
@@ -104,7 +104,7 @@ static struct livepatch_work livepatch_w
*/
static DEFINE_PER_CPU(bool_t, work_to_do);
-static int get_name(const xen_livepatch_name_t *name, char *n)
+static int get_name(const struct xen_livepatch_name *name, char *n)
{
if ( !name->size || name->size > XEN_LIVEPATCH_NAME_SIZE )
return -EINVAL;
@@ -121,7 +121,7 @@ static int get_name(const xen_livepatch_
return 0;
}
-static int verify_payload(const xen_sysctl_livepatch_upload_t *upload, char *n)
+static int verify_payload(const struct xen_sysctl_livepatch_upload *upload,
char *n)
{
if ( get_name(&upload->name, n) )
return -EINVAL;
@@ -897,7 +897,7 @@ static int load_payload_data(struct payl
return rc;
}
-static int livepatch_upload(xen_sysctl_livepatch_upload_t *upload)
+static int livepatch_upload(struct xen_sysctl_livepatch_upload *upload)
{
struct payload *data, *found;
char n[XEN_LIVEPATCH_NAME_SIZE];
@@ -954,7 +954,7 @@ static int livepatch_upload(xen_sysctl_l
return rc;
}
-static int livepatch_get(xen_sysctl_livepatch_get_t *get)
+static int livepatch_get(struct xen_sysctl_livepatch_get *get)
{
struct payload *data;
int rc;
@@ -985,9 +985,9 @@ static int livepatch_get(xen_sysctl_live
return 0;
}
-static int livepatch_list(xen_sysctl_livepatch_list_t *list)
+static int livepatch_list(struct xen_sysctl_livepatch_list *list)
{
- xen_livepatch_status_t status;
+ struct xen_livepatch_status status;
struct payload *data;
unsigned int idx = 0, i = 0;
int rc = 0;
@@ -1451,7 +1451,7 @@ static int build_id_dep(struct payload *
return 0;
}
-static int livepatch_action(xen_sysctl_livepatch_action_t *action)
+static int livepatch_action(struct xen_sysctl_livepatch_action *action)
{
struct payload *data;
char n[XEN_LIVEPATCH_NAME_SIZE];
@@ -1560,7 +1560,7 @@ static int livepatch_action(xen_sysctl_l
return rc;
}
-int livepatch_op(xen_sysctl_livepatch_op_t *livepatch)
+int livepatch_op(struct xen_sysctl_livepatch_op *livepatch)
{
int rc;
--- a/xen/common/perfc.c
+++ b/xen/common/perfc.c
@@ -152,8 +152,8 @@ void perfc_reset(unsigned char key)
arch_perfc_reset();
}
-static xen_sysctl_perfc_desc_t perfc_d[NR_PERFCTRS];
-static xen_sysctl_perfc_val_t *perfc_vals;
+static struct xen_sysctl_perfc_desc perfc_d[NR_PERFCTRS];
+static struct xen_sysctl_perfc_val *perfc_vals;
static unsigned int perfc_nbr_vals;
static cpumask_t perfc_cpumap;
@@ -190,7 +190,7 @@ static int perfc_copy_info(XEN_GUEST_HAN
}
xfree(perfc_vals);
- perfc_vals = xmalloc_array(xen_sysctl_perfc_val_t, perfc_nbr_vals);
+ perfc_vals = xmalloc_array(struct xen_sysctl_perfc_val,
perfc_nbr_vals);
}
if ( guest_handle_is_null(desc) )
@@ -241,7 +241,7 @@ static int perfc_copy_info(XEN_GUEST_HAN
}
/* Dom0 control of perf counters */
-int perfc_control(xen_sysctl_perfc_op_t *pc)
+int perfc_control(struct xen_sysctl_perfc_op *pc)
{
static DEFINE_SPINLOCK(lock);
int rc;
--- a/xen/common/sched_arinc653.c
+++ b/xen/common/sched_arinc653.c
@@ -694,7 +694,7 @@ static int
a653sched_adjust_global(const struct scheduler *ops,
struct xen_sysctl_scheduler_op *sc)
{
- xen_sysctl_arinc653_schedule_t local_sched;
+ struct xen_sysctl_arinc653_schedule local_sched;
int rc = -EINVAL;
switch ( sc->cmd )
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -1240,7 +1240,7 @@ csched_sys_cntl(const struct scheduler *
struct xen_sysctl_scheduler_op *sc)
{
int rc = -EINVAL;
- xen_sysctl_credit_schedule_t *params = &sc->u.sched_credit;
+ struct xen_sysctl_credit_schedule *params = &sc->u.sched_credit;
struct csched_private *prv = CSCHED_PRIV(ops);
unsigned long flags;
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -2443,7 +2443,7 @@ csched2_dom_cntl(
static int csched2_sys_cntl(const struct scheduler *ops,
struct xen_sysctl_scheduler_op *sc)
{
- xen_sysctl_credit2_schedule_t *params = &sc->u.sched_credit2;
+ struct xen_sysctl_credit2_schedule *params = &sc->u.sched_credit2;
struct csched2_private *prv = csched2_priv(ops);
unsigned long flags;
--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -380,7 +380,7 @@ void spinlock_profile_reset(unsigned cha
}
typedef struct {
- xen_sysctl_lockprof_op_t *pc;
+ struct xen_sysctl_lockprof_op *pc;
int rc;
} spinlock_profile_ucopy_t;
@@ -388,7 +388,7 @@ static void spinlock_profile_ucopy_elem(
int32_t type, int32_t idx, void *par)
{
spinlock_profile_ucopy_t *p = par;
- xen_sysctl_lockprof_data_t elem;
+ struct xen_sysctl_lockprof_data elem;
if ( p->rc )
return;
@@ -411,7 +411,7 @@ static void spinlock_profile_ucopy_elem(
}
/* Dom0 control of lock profiling */
-int spinlock_profile_control(xen_sysctl_lockprof_op_t *pc)
+int spinlock_profile_control(struct xen_sysctl_lockprof_op *pc)
{
int rc = 0;
spinlock_profile_ucopy_t par;
--- a/xen/common/sysctl.c
+++ b/xen/common/sysctl.c
@@ -250,7 +250,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
case XEN_SYSCTL_physinfo:
{
- xen_sysctl_physinfo_t *pi = &op->u.physinfo;
+ struct xen_sysctl_physinfo *pi = &op->u.physinfo;
memset(pi, 0, sizeof(*pi));
pi->threads_per_core =
@@ -276,7 +276,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
case XEN_SYSCTL_numainfo:
{
unsigned int i, j, num_nodes;
- xen_sysctl_numainfo_t *ni = &op->u.numainfo;
+ struct xen_sysctl_numainfo *ni = &op->u.numainfo;
bool_t do_meminfo = !guest_handle_is_null(ni->meminfo);
bool_t do_distance = !guest_handle_is_null(ni->distance);
@@ -284,7 +284,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
if ( do_meminfo || do_distance )
{
- xen_sysctl_meminfo_t meminfo = { 0 };
+ struct xen_sysctl_meminfo meminfo = { };
if ( num_nodes > ni->num_nodes )
num_nodes = ni->num_nodes;
@@ -346,12 +346,12 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
case XEN_SYSCTL_cputopoinfo:
{
unsigned int i, num_cpus;
- xen_sysctl_cputopoinfo_t *ti = &op->u.cputopoinfo;
+ struct xen_sysctl_cputopoinfo *ti = &op->u.cputopoinfo;
num_cpus = cpumask_last(&cpu_online_map) + 1;
if ( !guest_handle_is_null(ti->cputopo) )
{
- xen_sysctl_cputopo_t cputopo = { 0 };
+ struct xen_sysctl_cputopo cputopo = { };
if ( num_cpus > ti->num_cpus )
num_cpus = ti->num_cpus;
@@ -405,7 +405,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
#ifdef CONFIG_HAS_PCI
case XEN_SYSCTL_pcitopoinfo:
{
- xen_sysctl_pcitopoinfo_t *ti = &op->u.pcitopoinfo;
+ struct xen_sysctl_pcitopoinfo *ti = &op->u.pcitopoinfo;
unsigned int i = 0;
if ( guest_handle_is_null(ti->devs) ||
--- a/xen/common/trace.c
+++ b/xen/common/trace.c
@@ -367,9 +367,9 @@ void __init init_trace_bufs(void)
/**
* tb_control - sysctl operations on trace buffers.
- * @tbc: a pointer to a xen_sysctl_tbuf_op_t to be filled out
+ * @tbc: a pointer to a struct xen_sysctl_tbuf_op to be filled out
*/
-int tb_control(xen_sysctl_tbuf_op_t *tbc)
+int tb_control(struct xen_sysctl_tbuf_op *tbc)
{
static DEFINE_SPINLOCK(lock);
int rc = 0;
--- a/xen/include/public/sysctl.h
+++ b/xen/include/public/sysctl.h
@@ -58,8 +58,6 @@ struct xen_sysctl_readconsole {
/* IN: Size of buffer; OUT: Bytes written to buffer. */
uint32_t count;
};
-typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t);
/* Get trace buffers machine base address */
/* XEN_SYSCTL_tbuf_op */
@@ -79,8 +77,6 @@ struct xen_sysctl_tbuf_op {
uint64_aligned_t buffer_mfn;
uint32_t size; /* Also an IN variable! */
};
-typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t);
/*
* Get physical information about the host machine
@@ -109,8 +105,6 @@ struct xen_sysctl_physinfo {
/* XEN_SYSCTL_PHYSCAP_??? */
uint32_t capabilities;
};
-typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t);
/*
* Get the ID of the current scheduler.
@@ -120,8 +114,6 @@ struct xen_sysctl_sched_id {
/* OUT variable */
uint32_t sched_id;
};
-typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t);
/* Interface for controlling Xen software performance counters. */
/* XEN_SYSCTL_perfc_op */
@@ -148,8 +140,6 @@ struct xen_sysctl_perfc_op {
/* counter values (or NULL) */
XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val;
};
-typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t);
/* XEN_SYSCTL_getdomaininfolist */
struct xen_sysctl_getdomaininfolist {
@@ -160,8 +150,6 @@ struct xen_sysctl_getdomaininfolist {
/* OUT variables. */
uint32_t num_domains;
};
-typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t);
/* Inject debug keys into Xen. */
/* XEN_SYSCTL_debug_keys */
@@ -170,8 +158,6 @@ struct xen_sysctl_debug_keys {
XEN_GUEST_HANDLE_64(char) keys;
uint32_t nr_keys;
};
-typedef struct xen_sysctl_debug_keys xen_sysctl_debug_keys_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_debug_keys_t);
/* Get physical CPU information. */
/* XEN_SYSCTL_getcpuinfo */
@@ -187,8 +173,6 @@ struct xen_sysctl_getcpuinfo {
/* OUT variables. */
uint32_t nr_cpus;
};
-typedef struct xen_sysctl_getcpuinfo xen_sysctl_getcpuinfo_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getcpuinfo_t);
/* XEN_SYSCTL_availheap */
struct xen_sysctl_availheap {
@@ -199,8 +183,6 @@ struct xen_sysctl_availheap {
/* OUT variables. */
uint64_aligned_t avail_bytes;/* Bytes available in the specified region. */
};
-typedef struct xen_sysctl_availheap xen_sysctl_availheap_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_availheap_t);
/* XEN_SYSCTL_get_pmstat */
struct pm_px_val {
@@ -219,8 +201,6 @@ struct pm_px_stat {
XEN_GUEST_HANDLE_64(uint64) trans_pt; /* Px transition table */
XEN_GUEST_HANDLE_64(pm_px_val_t) pt;
};
-typedef struct pm_px_stat pm_px_stat_t;
-DEFINE_XEN_GUEST_HANDLE(pm_px_stat_t);
struct pm_cx_stat {
uint32_t nr; /* entry nr in triggers & residencies, including C0 */
@@ -259,8 +239,6 @@ struct xen_sysctl_get_pmstat {
/* other struct for tx, etc */
} u;
};
-typedef struct xen_sysctl_get_pmstat xen_sysctl_get_pmstat_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_get_pmstat_t);
/* XEN_SYSCTL_cpu_hotplug */
struct xen_sysctl_cpu_hotplug {
@@ -270,8 +248,6 @@ struct xen_sysctl_cpu_hotplug {
#define XEN_SYSCTL_CPU_HOTPLUG_OFFLINE 1
uint32_t op; /* hotplug opcode */
};
-typedef struct xen_sysctl_cpu_hotplug xen_sysctl_cpu_hotplug_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_hotplug_t);
/*
* Get/set xen power management, include
@@ -281,7 +257,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_h
struct xen_userspace {
uint32_t scaling_setspeed;
};
-typedef struct xen_userspace xen_userspace_t;
struct xen_ondemand {
uint32_t sampling_rate_max;
@@ -290,7 +265,6 @@ struct xen_ondemand {
uint32_t sampling_rate;
uint32_t up_threshold;
};
-typedef struct xen_ondemand xen_ondemand_t;
/*
* cpufreq para name of this structure named
@@ -461,8 +435,6 @@ struct xen_sysctl_lockprof_op {
/* profile information (or NULL) */
XEN_GUEST_HANDLE_64(xen_sysctl_lockprof_data_t) data;
};
-typedef struct xen_sysctl_lockprof_op xen_sysctl_lockprof_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_op_t);
/* XEN_SYSCTL_cputopoinfo */
#define XEN_INVALID_CORE_ID (~0U)
@@ -493,8 +465,6 @@ struct xen_sysctl_cputopoinfo {
uint32_t num_cpus;
XEN_GUEST_HANDLE_64(xen_sysctl_cputopo_t) cputopo;
};
-typedef struct xen_sysctl_cputopoinfo xen_sysctl_cputopoinfo_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cputopoinfo_t);
/* XEN_SYSCTL_numainfo */
#define XEN_INVALID_MEM_SZ (~0U)
@@ -535,8 +505,6 @@ struct xen_sysctl_numainfo {
*/
XEN_GUEST_HANDLE_64(uint32) distance;
};
-typedef struct xen_sysctl_numainfo xen_sysctl_numainfo_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_numainfo_t);
/* XEN_SYSCTL_cpupool_op */
#define XEN_SYSCTL_CPUPOOL_OP_CREATE 1 /* C */
@@ -556,8 +524,6 @@ struct xen_sysctl_cpupool_op {
uint32_t n_dom; /* OUT: I */
struct xenctl_bitmap cpumap; /* OUT: IF */
};
-typedef struct xen_sysctl_cpupool_op xen_sysctl_cpupool_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpupool_op_t);
/*
* Error return values of cpupool operations:
@@ -637,14 +603,10 @@ struct xen_sysctl_credit_schedule {
unsigned tslice_ms;
unsigned ratelimit_us;
};
-typedef struct xen_sysctl_credit_schedule xen_sysctl_credit_schedule_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_credit_schedule_t);
struct xen_sysctl_credit2_schedule {
unsigned ratelimit_us;
};
-typedef struct xen_sysctl_credit2_schedule xen_sysctl_credit2_schedule_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_credit2_schedule_t);
/* XEN_SYSCTL_scheduler_op */
/* Set or get info? */
@@ -662,8 +624,6 @@ struct xen_sysctl_scheduler_op {
struct xen_sysctl_credit2_schedule sched_credit2;
} u;
};
-typedef struct xen_sysctl_scheduler_op xen_sysctl_scheduler_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_scheduler_op_t);
/*
* Output format of gcov data:
@@ -696,8 +656,6 @@ struct xen_sysctl_gcov_op {
uint32_t size; /* IN/OUT: size of the buffer */
XEN_GUEST_HANDLE_64(char) buffer; /* OUT */
};
-typedef struct xen_sysctl_gcov_op xen_sysctl_gcov_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_gcov_op_t);
#define XEN_SYSCTL_PSR_CMT_get_total_rmid 0
#define XEN_SYSCTL_PSR_CMT_get_l3_upscaling_factor 1
@@ -716,8 +674,6 @@ struct xen_sysctl_psr_cmt_op {
} l3_cache;
} u;
};
-typedef struct xen_sysctl_psr_cmt_op xen_sysctl_psr_cmt_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cmt_op_t);
/* XEN_SYSCTL_pcitopoinfo */
#define XEN_INVALID_DEV (XEN_INVALID_NODE_ID - 1)
@@ -740,8 +696,6 @@ struct xen_sysctl_pcitopoinfo {
*/
XEN_GUEST_HANDLE_64(uint32) nodes;
};
-typedef struct xen_sysctl_pcitopoinfo xen_sysctl_pcitopoinfo_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_pcitopoinfo_t);
#define XEN_SYSCTL_PSR_CAT_get_l3_info 0
#define XEN_SYSCTL_PSR_CAT_get_l2_info 1
@@ -757,8 +711,6 @@ struct xen_sysctl_psr_cat_op {
} cat_info;
} u;
};
-typedef struct xen_sysctl_psr_cat_op xen_sysctl_psr_cat_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
#define XEN_SYSCTL_TMEM_OP_ALL_CLIENTS 0xFFFFU
@@ -863,8 +815,6 @@ struct xen_sysctl_tmem_op {
/* of them. */
} u;
};
-typedef struct xen_sysctl_tmem_op xen_sysctl_tmem_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tmem_op_t);
/*
* XEN_SYSCTL_get_cpu_levelling_caps (x86 specific)
@@ -884,8 +834,6 @@ struct xen_sysctl_cpu_levelling_caps {
#define XEN_SYSCTL_CPU_LEVELCAP_l7s0_ebx (1ul << 8) /* 0x00000007:0.ebx */
uint32_t caps;
};
-typedef struct xen_sysctl_cpu_levelling_caps xen_sysctl_cpu_levelling_caps_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_levelling_caps_t);
/*
* XEN_SYSCTL_get_cpu_featureset (x86 specific)
@@ -909,8 +857,6 @@ struct xen_sysctl_cpu_featureset {
* maximum length. */
XEN_GUEST_HANDLE_64(uint32) features; /* OUT: */
};
-typedef struct xen_sysctl_featureset xen_sysctl_featureset_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_featureset_t);
/*
* XEN_SYSCTL_LIVEPATCH_op
@@ -966,8 +912,6 @@ struct xen_livepatch_name {
XEN_LIVEPATCH_NAME_SIZE. */
uint16_t pad[3]; /* IN: MUST be zero. */
};
-typedef struct xen_livepatch_name xen_livepatch_name_t;
-DEFINE_XEN_GUEST_HANDLE(xen_livepatch_name_t);
/*
* Upload a payload to the hypervisor. The payload is verified
@@ -986,12 +930,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_livepatch_na
*/
#define XEN_SYSCTL_LIVEPATCH_UPLOAD 0
struct xen_sysctl_livepatch_upload {
- xen_livepatch_name_t name; /* IN, name of the patch. */
+ struct xen_livepatch_name name; /* IN, name of the patch. */
uint64_t size; /* IN, size of the ELF file. */
XEN_GUEST_HANDLE_64(uint8) payload; /* IN, the ELF file. */
};
-typedef struct xen_sysctl_livepatch_upload xen_sysctl_livepatch_upload_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livepatch_upload_t);
/*
* Retrieve an status of an specific payload.
@@ -1013,11 +955,9 @@ typedef struct xen_livepatch_status xen_
DEFINE_XEN_GUEST_HANDLE(xen_livepatch_status_t);
struct xen_sysctl_livepatch_get {
- xen_livepatch_name_t name; /* IN, name of the payload. */
- xen_livepatch_status_t status; /* IN/OUT, state of it. */
+ struct xen_livepatch_name name; /* IN, name of the payload. */
+ struct xen_livepatch_status status; /* IN/OUT, state of it. */
};
-typedef struct xen_sysctl_livepatch_get xen_sysctl_livepatch_get_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livepatch_get_t);
/*
* Retrieve an array of abbreviated status and names of payloads that are
@@ -1059,8 +999,6 @@ struct xen_sysctl_livepatch_list {
XEN_GUEST_HANDLE_64(uint32) len; /* OUT: Array of lengths of name's.
Must have nr of them. */
};
-typedef struct xen_sysctl_livepatch_list xen_sysctl_livepatch_list_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livepatch_list_t);
/*
* Perform an operation on the payload structure referenced by the `name`
field.
@@ -1069,7 +1007,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livep
*/
#define XEN_SYSCTL_LIVEPATCH_ACTION 3
struct xen_sysctl_livepatch_action {
- xen_livepatch_name_t name; /* IN, name of the patch. */
+ struct xen_livepatch_name name; /* IN, name of the patch. */
#define LIVEPATCH_ACTION_UNLOAD 1
#define LIVEPATCH_ACTION_REVERT 2
#define LIVEPATCH_ACTION_APPLY 3
@@ -1080,21 +1018,17 @@ struct xen_sysctl_livepatch_action {
/* Or upper bound of time (ns) */
/* for operation to take. */
};
-typedef struct xen_sysctl_livepatch_action xen_sysctl_livepatch_action_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livepatch_action_t);
struct xen_sysctl_livepatch_op {
uint32_t cmd; /* IN: XEN_SYSCTL_LIVEPATCH_*. */
uint32_t pad; /* IN: Always zero. */
union {
- xen_sysctl_livepatch_upload_t upload;
- xen_sysctl_livepatch_list_t list;
- xen_sysctl_livepatch_get_t get;
- xen_sysctl_livepatch_action_t action;
+ struct xen_sysctl_livepatch_upload upload;
+ struct xen_sysctl_livepatch_list list;
+ struct xen_sysctl_livepatch_get get;
+ struct xen_sysctl_livepatch_action action;
} u;
};
-typedef struct xen_sysctl_livepatch_op xen_sysctl_livepatch_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livepatch_op_t);
/*
* XEN_SYSCTL_set_parameter
@@ -1111,8 +1045,6 @@ struct xen_sysctl_set_parameter {
uint16_t size; /* IN: size of parameters. */
uint16_t pad[3]; /* IN: MUST be zero. */
};
-typedef struct xen_sysctl_set_parameter xen_sysctl_set_parameter_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_set_parameter_t);
struct xen_sysctl {
uint32_t cmd;
--- a/xen/include/xen/gcov.h
+++ b/xen/include/xen/gcov.h
@@ -3,7 +3,7 @@
#ifdef CONFIG_GCOV
#include <public/sysctl.h>
-int sysctl_gcov_op(xen_sysctl_gcov_op_t *op);
+int sysctl_gcov_op(struct xen_sysctl_gcov_op *op);
#endif
#endif /* _XEN_GCOV_H */
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -914,7 +914,7 @@ int cpupool_do_sysctl(struct xen_sysctl_
void schedule_dump(struct cpupool *c);
extern void dump_runq(unsigned char key);
-void arch_do_physinfo(xen_sysctl_physinfo_t *pi);
+void arch_do_physinfo(struct xen_sysctl_physinfo *pi);
#endif /* __SCHED_H__ */
--- a/xen/include/xen/spinlock.h
+++ b/xen/include/xen/spinlock.h
@@ -110,7 +110,7 @@ void _lock_profile_deregister_struct(int
#define lock_profile_deregister_struct(type, ptr) \
_lock_profile_deregister_struct(type, &((ptr)->profile_head))
-extern int spinlock_profile_control(xen_sysctl_lockprof_op_t *pc);
+extern int spinlock_profile_control(struct xen_sysctl_lockprof_op *pc);
extern void spinlock_profile_printall(unsigned char key);
extern void spinlock_profile_reset(unsigned char key);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |