[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC XEN PATCH v3 19/39] xen/pmem: support PMEM_REGION_TYPE_MGMT for XEN_SYSCTL_nvdimm_pmem_get_regions
Allow XEN_SYSCTL_nvdimm_pmem_get_regions to return a list of management PMEM regions. Signed-off-by: Haozhong Zhang <haozhong.zhang@xxxxxxxxx> --- Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx> Cc: Wei Liu <wei.liu2@xxxxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Cc: Jan Beulich <jbeulich@xxxxxxxx> --- tools/libxc/xc_misc.c | 8 ++++++++ xen/common/pmem.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ xen/include/public/sysctl.h | 11 +++++++++++ 3 files changed, 64 insertions(+) diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c index 4b5558aaa5..3ad254f5ae 100644 --- a/tools/libxc/xc_misc.c +++ b/tools/libxc/xc_misc.c @@ -939,6 +939,10 @@ int xc_nvdimm_pmem_get_regions(xc_interface *xch, uint8_t type, size = sizeof(xen_sysctl_nvdimm_pmem_raw_region_t) * max; break; + case PMEM_REGION_TYPE_MGMT: + size = sizeof(xen_sysctl_nvdimm_pmem_mgmt_region_t) * max; + break; + default: return -EINVAL; } @@ -960,6 +964,10 @@ int xc_nvdimm_pmem_get_regions(xc_interface *xch, uint8_t type, set_xen_guest_handle(regions->u_buffer.raw_regions, buffer); break; + case PMEM_REGION_TYPE_MGMT: + set_xen_guest_handle(regions->u_buffer.mgmt_regions, buffer); + break; + default: rc = -EINVAL; goto out; diff --git a/xen/common/pmem.c b/xen/common/pmem.c index 54b3e7119a..dcd8160407 100644 --- a/xen/common/pmem.c +++ b/xen/common/pmem.c @@ -190,6 +190,47 @@ static int pmem_get_raw_regions( return rc; } +static int pmem_get_mgmt_regions( + XEN_GUEST_HANDLE_64(xen_sysctl_nvdimm_pmem_mgmt_region_t) regions, + unsigned int *num_regions) +{ + struct list_head *cur; + unsigned int nr = 0, max = *num_regions; + xen_sysctl_nvdimm_pmem_mgmt_region_t region; + int rc = 0; + + if ( !guest_handle_okay(regions, max * sizeof(region)) ) + return -EINVAL; + + spin_lock(&pmem_mgmt_lock); + + list_for_each(cur, &pmem_mgmt_regions) + { + struct pmem *pmem = list_entry(cur, struct pmem, link); + + if ( nr >= max ) + break; + + region.smfn = pmem->smfn; + region.emfn = pmem->emfn; + region.used_mfns = pmem->u.mgmt.used; + + if ( copy_to_guest_offset(regions, nr, ®ion, 1) ) + { + rc = -EFAULT; + break; + } + + nr++; + } + + spin_unlock(&pmem_mgmt_lock); + + *num_regions = nr; + + return rc; +} + static int pmem_get_regions(xen_sysctl_nvdimm_pmem_regions_t *regions) { unsigned int type = regions->type, max = regions->num_regions; @@ -204,6 +245,10 @@ static int pmem_get_regions(xen_sysctl_nvdimm_pmem_regions_t *regions) rc = pmem_get_raw_regions(regions->u_buffer.raw_regions, &max); break; + case PMEM_REGION_TYPE_MGMT: + rc = pmem_get_mgmt_regions(regions->u_buffer.mgmt_regions, &max); + break; + default: rc = -EINVAL; } diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h index 5d208033a0..f825716446 100644 --- a/xen/include/public/sysctl.h +++ b/xen/include/public/sysctl.h @@ -1131,6 +1131,15 @@ struct xen_sysctl_nvdimm_pmem_raw_region { typedef struct xen_sysctl_nvdimm_pmem_raw_region xen_sysctl_nvdimm_pmem_raw_region_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_nvdimm_pmem_raw_region_t); +/* PMEM_REGION_TYPE_MGMT */ +struct xen_sysctl_nvdimm_pmem_mgmt_region { + uint64_t smfn; + uint64_t emfn; + uint64_t used_mfns; +}; +typedef struct xen_sysctl_nvdimm_pmem_mgmt_region xen_sysctl_nvdimm_pmem_mgmt_region_t; +DEFINE_XEN_GUEST_HANDLE(xen_sysctl_nvdimm_pmem_mgmt_region_t); + /* XEN_SYSCTL_nvdimm_pmem_get_regions_nr */ struct xen_sysctl_nvdimm_pmem_regions_nr { uint8_t type; /* IN: one of PMEM_REGION_TYPE_* */ @@ -1149,6 +1158,8 @@ struct xen_sysctl_nvdimm_pmem_regions { union { /* if type == PMEM_REGION_TYPE_RAW */ XEN_GUEST_HANDLE_64(xen_sysctl_nvdimm_pmem_raw_region_t) raw_regions; + /* if type == PMEM_REGION_TYPE_MGMT */ + XEN_GUEST_HANDLE_64(xen_sysctl_nvdimm_pmem_mgmt_region_t) mgmt_regions; } u_buffer; /* IN: the guest handler where the entries of PMEM regions of the type @type are returned */ }; -- 2.14.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |