|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC XEN PATCH v4 11/41] xen/pmem: add XEN_SYSCTL_nvdimm_pmem_get_regions
XEN_SYSCTL_nvdimm_pmem_get_regions, which is a command of hypercall
XEN_SYSCTL_nvdimm_op, is to get a list of PMEM regions of specified
type (see PMEM_REGION_TYPE_*).
Signed-off-by: Haozhong Zhang <haozhong.zhang@xxxxxxxxx>
---
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>
---
tools/libxc/include/xenctrl.h | 18 ++++++++++++
tools/libxc/xc_misc.c | 62 +++++++++++++++++++++++++++++++++++++++
xen/common/pmem.c | 67 +++++++++++++++++++++++++++++++++++++++++++
xen/include/public/sysctl.h | 27 +++++++++++++++++
4 files changed, 174 insertions(+)
diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 195ff69846..e0adad1cf8 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -2622,6 +2622,24 @@ int xc_domain_cacheflush(xc_interface *xch, uint32_t
domid,
int xc_nvdimm_pmem_get_regions_nr(xc_interface *xch,
uint8_t type, uint32_t *nr);
+/*
+ * Get an array of information of PMEM regions of the specified type.
+ *
+ * Parameters:
+ * xch: xc interface handle
+ * type: the type of PMEM regions, must be one of PMEM_REGION_TYPE_*
+ * buffer: the buffer where the information of PMEM regions is returned,
+ * the caller should allocate enough memory for it.
+ * nr : IN: the maximum number of PMEM regions that can be returned
+ * in @buffer
+ * OUT: the actual number of returned PMEM regions in @buffer
+ *
+ * Return:
+ * On success, return 0. Otherwise, return a non-zero error code.
+ */
+int xc_nvdimm_pmem_get_regions(xc_interface *xch, uint8_t type,
+ void *buffer, uint32_t *nr);
+
/* Compat shims */
#include "xenctrl_compat.h"
diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c
index a3c6cfe2f6..11befa444f 100644
--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -911,6 +911,68 @@ int xc_nvdimm_pmem_get_regions_nr(xc_interface *xch,
uint8_t type, uint32_t *nr)
return rc;
}
+int xc_nvdimm_pmem_get_regions(xc_interface *xch, uint8_t type,
+ void *buffer, uint32_t *nr)
+{
+ DECLARE_SYSCTL;
+ DECLARE_HYPERCALL_BOUNCE(buffer, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+ struct xen_sysctl_nvdimm_op *nvdimm = &sysctl.u.nvdimm;
+ xen_sysctl_nvdimm_pmem_regions_t *regions = &nvdimm->u.pmem_regions;
+ unsigned int max;
+ unsigned long size;
+ int rc;
+
+ if ( !buffer || !nr )
+ return -EINVAL;
+
+ max = *nr;
+ if ( !max )
+ return 0;
+
+ switch ( type )
+ {
+ case PMEM_REGION_TYPE_RAW:
+ size = sizeof(xen_sysctl_nvdimm_pmem_raw_region_t) * max;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ HYPERCALL_BOUNCE_SET_SIZE(buffer, size);
+ if ( xc_hypercall_bounce_pre(xch, buffer) )
+ return -EFAULT;
+
+ sysctl.cmd = XEN_SYSCTL_nvdimm_op;
+ nvdimm->cmd = XEN_SYSCTL_nvdimm_pmem_get_regions;
+ nvdimm->err = 0;
+ regions->type = type;
+ regions->num_regions = max;
+
+ switch ( type )
+ {
+ case PMEM_REGION_TYPE_RAW:
+ set_xen_guest_handle(regions->u_buffer.raw_regions, buffer);
+ break;
+
+ default:
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = do_sysctl(xch, &sysctl);
+ if ( !rc )
+ *nr = regions->num_regions;
+ else if ( nvdimm->err )
+ rc = -nvdimm->err;
+
+out:
+ xc_hypercall_bounce_post(xch, buffer);
+
+ return rc;
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/common/pmem.c b/xen/common/pmem.c
index b196b256bb..0afc1573c6 100644
--- a/xen/common/pmem.c
+++ b/xen/common/pmem.c
@@ -22,6 +22,8 @@
#include <xen/paging.h>
#include <xen/pmem.h>
+#include <asm/guest_access.h>
+
/*
* All PMEM regions presenting in NFIT SPA range structures are linked
* in this list.
@@ -114,6 +116,67 @@ static int
pmem_get_regions_nr(xen_sysctl_nvdimm_pmem_regions_nr_t *regions_nr)
return rc;
}
+static int pmem_get_raw_regions(
+ XEN_GUEST_HANDLE_64(xen_sysctl_nvdimm_pmem_raw_region_t) regions,
+ unsigned int *num_regions)
+{
+ struct list_head *cur;
+ unsigned int nr = 0, max = *num_regions;
+ xen_sysctl_nvdimm_pmem_raw_region_t region;
+ int rc = 0;
+
+ if ( !guest_handle_okay(regions, max * sizeof(region)) )
+ return -EINVAL;
+
+ list_for_each(cur, &pmem_raw_regions)
+ {
+ struct pmem *pmem = list_entry(cur, struct pmem, link);
+
+ if ( nr >= max )
+ break;
+
+ region.smfn = pmem->smfn;
+ region.emfn = pmem->emfn;
+ region.pxm = pmem->u.raw.pxm;
+
+ if ( copy_to_guest_offset(regions, nr, ®ion, 1) )
+ {
+ rc = -EFAULT;
+ break;
+ }
+
+ nr++;
+ }
+
+ *num_regions = nr;
+
+ return rc;
+}
+
+static int pmem_get_regions(xen_sysctl_nvdimm_pmem_regions_t *regions)
+{
+ unsigned int type = regions->type, max = regions->num_regions;
+ int rc = 0;
+
+ if ( !max )
+ return 0;
+
+ switch ( type )
+ {
+ case PMEM_REGION_TYPE_RAW:
+ rc = pmem_get_raw_regions(regions->u_buffer.raw_regions, &max);
+ break;
+
+ default:
+ rc = -EINVAL;
+ }
+
+ if ( !rc )
+ regions->num_regions = max;
+
+ return rc;
+}
+
/**
* Register a pmem region to Xen.
*
@@ -159,6 +222,10 @@ int pmem_do_sysctl(struct xen_sysctl_nvdimm_op *nvdimm)
rc = pmem_get_regions_nr(&nvdimm->u.pmem_regions_nr);
break;
+ case XEN_SYSCTL_nvdimm_pmem_get_regions:
+ rc = pmem_get_regions(&nvdimm->u.pmem_regions);
+ break;
+
default:
rc = -ENOSYS;
}
diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
index c3c992225a..9b2a65fcb9 100644
--- a/xen/include/public/sysctl.h
+++ b/xen/include/public/sysctl.h
@@ -1052,6 +1052,15 @@ struct xen_sysctl_set_parameter {
/* Types of PMEM regions */
#define PMEM_REGION_TYPE_RAW 0 /* PMEM regions detected by Xen */
+/* PMEM_REGION_TYPE_RAW */
+struct xen_sysctl_nvdimm_pmem_raw_region {
+ uint64_t smfn;
+ uint64_t emfn;
+ uint32_t pxm;
+};
+typedef struct xen_sysctl_nvdimm_pmem_raw_region
xen_sysctl_nvdimm_pmem_raw_region_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_nvdimm_pmem_raw_region_t);
+
/* XEN_SYSCTL_nvdimm_pmem_get_regions_nr */
struct xen_sysctl_nvdimm_pmem_regions_nr {
uint8_t type; /* IN: one of PMEM_REGION_TYPE_* */
@@ -1060,12 +1069,30 @@ struct xen_sysctl_nvdimm_pmem_regions_nr {
typedef struct xen_sysctl_nvdimm_pmem_regions_nr
xen_sysctl_nvdimm_pmem_regions_nr_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_nvdimm_pmem_regions_nr_t);
+/* XEN_SYSCTL_nvdimm_pmem_get_regions */
+struct xen_sysctl_nvdimm_pmem_regions {
+ uint8_t type; /* IN: one of PMEM_REGION_TYPE_* */
+ uint32_t num_regions; /* IN: the maximum number of entries that can be
+ returned via the guest handler in @u_buffer
+ OUT: the actual number of entries returned via
+ the guest handler in @u_buffer */
+ union {
+ /* if type == PMEM_REGION_TYPE_RAW */
+ XEN_GUEST_HANDLE_64(xen_sysctl_nvdimm_pmem_raw_region_t) raw_regions;
+ } u_buffer; /* IN: the guest handler where the entries of PMEM
+ regions of the type @type are returned */
+};
+typedef struct xen_sysctl_nvdimm_pmem_regions xen_sysctl_nvdimm_pmem_regions_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_nvdimm_pmem_regions_t);
+
struct xen_sysctl_nvdimm_op {
uint32_t cmd; /* IN: XEN_SYSCTL_nvdimm_*; none is implemented yet. */
#define XEN_SYSCTL_nvdimm_pmem_get_regions_nr 0
+#define XEN_SYSCTL_nvdimm_pmem_get_regions 1
uint32_t err; /* OUT: error code */
union {
xen_sysctl_nvdimm_pmem_regions_nr_t pmem_regions_nr;
+ xen_sysctl_nvdimm_pmem_regions_t pmem_regions;
} u;
};
--
2.15.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |