[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] xen: move XEN_SYSCTL_physinfo, XEN_SYSCTL_numainfo and XEN_SYSCTL_topologyinfo to common code


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Tue, 19 Feb 2013 15:22:23 +0000
  • Delivery-date: Tue, 19 Feb 2013 15:22:50 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
# Date 1360935140 0
# Node ID 1e168abb73b52d440f5528bdc27125df2c3e9669
# Parent  d030eb79a811360e1f1d0ee788bc47227c71b400
xen: move XEN_SYSCTL_physinfo, XEN_SYSCTL_numainfo and XEN_SYSCTL_topologyinfo 
to common code

Move XEN_SYSCTL_physinfo, XEN_SYSCTL_numainfo and
XEN_SYSCTL_topologyinfo from x86/sysctl.c to common/sysctl.c.

The implementation of XEN_SYSCTL_physinfo is mostly generic but needs to
fill in few arch specific details: introduce arch_do_physinfo to do that.

The implementation of XEN_SYSCTL_physinfo relies on two global
variables: total_pages and cpu_khz. Make them available on ARM.

Implement node_spanned_pages and __node_distance on ARM, assuming 1 numa
node for now.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Committed-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---


diff -r d030eb79a811 -r 1e168abb73b5 xen/arch/arm/mm.c
--- a/xen/arch/arm/mm.c Fri Feb 15 13:32:19 2013 +0000
+++ b/xen/arch/arm/mm.c Fri Feb 15 13:32:20 2013 +0000
@@ -62,6 +62,7 @@ unsigned long frametable_base_mfn __read
 unsigned long frametable_virt_end __read_mostly;
 
 unsigned long max_page;
+unsigned long total_pages;
 
 extern char __init_begin[], __init_end[];
 
diff -r d030eb79a811 -r 1e168abb73b5 xen/arch/arm/setup.c
--- a/xen/arch/arm/setup.c      Fri Feb 15 13:32:19 2013 +0000
+++ b/xen/arch/arm/setup.c      Fri Feb 15 13:32:20 2013 +0000
@@ -219,7 +219,7 @@ static void __init setup_mm(unsigned lon
     ram_start = early_info.mem.bank[0].start;
     ram_size  = early_info.mem.bank[0].size;
     ram_end = ram_start + ram_size;
-    ram_pages = ram_size >> PAGE_SHIFT;
+    total_pages = ram_pages = ram_size >> PAGE_SHIFT;
 
     /*
      * Locate the xenheap using these constraints:
diff -r d030eb79a811 -r 1e168abb73b5 xen/arch/arm/sysctl.c
--- a/xen/arch/arm/sysctl.c     Fri Feb 15 13:32:19 2013 +0000
+++ b/xen/arch/arm/sysctl.c     Fri Feb 15 13:32:20 2013 +0000
@@ -12,6 +12,8 @@
 #include <xen/errno.h>
 #include <public/sysctl.h>
 
+void arch_do_physinfo(xen_sysctl_physinfo_t *pi) { }
+
 long arch_do_sysctl(struct xen_sysctl *sysctl,
                     XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
 {
diff -r d030eb79a811 -r 1e168abb73b5 xen/arch/arm/time.c
--- a/xen/arch/arm/time.c       Fri Feb 15 13:32:19 2013 +0000
+++ b/xen/arch/arm/time.c       Fri Feb 15 13:32:20 2013 +0000
@@ -43,16 +43,16 @@ uint64_t __read_mostly boot_count;
 
 /* For fine-grained timekeeping, we use the ARM "Generic Timer", a
  * register-mapped time source in the SoC. */
-static uint32_t __read_mostly cntfrq;      /* Ticks per second */
+unsigned long __read_mostly cpu_khz;  /* CPU clock frequency in kHz. */
 
 /*static inline*/ s_time_t ticks_to_ns(uint64_t ticks)
 {
-    return muldiv64(ticks, SECONDS(1), cntfrq);
+    return muldiv64(ticks, SECONDS(1), 1000 * cpu_khz);
 }
 
 /*static inline*/ uint64_t ns_to_ticks(s_time_t ns)
 {
-    return muldiv64(ns, cntfrq, SECONDS(1));
+    return muldiv64(ns, 1000 * cpu_khz, SECONDS(1));
 }
 
 /* TODO: On a real system the firmware would have set the frequency in
@@ -93,9 +93,9 @@ int __init init_xen_time(void)
     if ( (READ_CP32(ID_PFR1) & ID_PFR1_GT_MASK) != ID_PFR1_GT_v1 )
         panic("CPU does not support the Generic Timer v1 interface.\n");
 
-    cntfrq = READ_CP32(CNTFRQ);
+    cpu_khz = READ_CP32(CNTFRQ) / 1000;
     boot_count = READ_CP64(CNTPCT);
-    printk("Using generic timer at %"PRIu32" Hz\n", cntfrq);
+    printk("Using generic timer at %lu KHz\n", cpu_khz);
 
     return 0;
 }
diff -r d030eb79a811 -r 1e168abb73b5 xen/arch/x86/sysctl.c
--- a/xen/arch/x86/sysctl.c     Fri Feb 15 13:32:19 2013 +0000
+++ b/xen/arch/x86/sysctl.c     Fri Feb 15 13:32:20 2013 +0000
@@ -57,6 +57,15 @@ long cpu_down_helper(void *data)
     return ret;
 }
 
+void arch_do_physinfo(xen_sysctl_physinfo_t *pi)
+{
+    memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
+    if ( hvm_enabled )
+        pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm;
+    if ( iommu_enabled )
+        pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio;
+}
+
 long arch_do_sysctl(
     struct xen_sysctl *sysctl, XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
 {
@@ -65,120 +74,6 @@ long arch_do_sysctl(
     switch ( sysctl->cmd )
     {
 
-    case XEN_SYSCTL_physinfo:
-    {
-        xen_sysctl_physinfo_t *pi = &sysctl->u.physinfo;
-
-        memset(pi, 0, sizeof(*pi));
-        pi->threads_per_core =
-            cpumask_weight(per_cpu(cpu_sibling_mask, 0));
-        pi->cores_per_socket =
-            cpumask_weight(per_cpu(cpu_core_mask, 0)) / pi->threads_per_core;
-        pi->nr_cpus = num_online_cpus();
-        pi->nr_nodes = num_online_nodes();
-        pi->max_node_id = MAX_NUMNODES-1;
-        pi->max_cpu_id = nr_cpu_ids - 1;
-        pi->total_pages = total_pages;
-        pi->free_pages = avail_domheap_pages();
-        pi->scrub_pages = 0;
-        pi->cpu_khz = cpu_khz;
-        memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
-        if ( hvm_enabled )
-            pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm;
-        if ( iommu_enabled )
-            pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio;
-
-        if ( __copy_field_to_guest(u_sysctl, sysctl, u.physinfo) )
-            ret = -EFAULT;
-    }
-    break;
-        
-    case XEN_SYSCTL_topologyinfo:
-    {
-        uint32_t i, max_cpu_index, last_online_cpu;
-        xen_sysctl_topologyinfo_t *ti = &sysctl->u.topologyinfo;
-
-        last_online_cpu = cpumask_last(&cpu_online_map);
-        max_cpu_index = min_t(uint32_t, ti->max_cpu_index, last_online_cpu);
-        ti->max_cpu_index = last_online_cpu;
-
-        for ( i = 0; i <= max_cpu_index; i++ )
-        {
-            if ( !guest_handle_is_null(ti->cpu_to_core) )
-            {
-                uint32_t core = cpu_online(i) ? cpu_to_core(i) : ~0u;
-                if ( copy_to_guest_offset(ti->cpu_to_core, i, &core, 1) )
-                    break;
-            }
-            if ( !guest_handle_is_null(ti->cpu_to_socket) )
-            {
-                uint32_t socket = cpu_online(i) ? cpu_to_socket(i) : ~0u;
-                if ( copy_to_guest_offset(ti->cpu_to_socket, i, &socket, 1) )
-                    break;
-            }
-            if ( !guest_handle_is_null(ti->cpu_to_node) )
-            {
-                uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
-                if ( copy_to_guest_offset(ti->cpu_to_node, i, &node, 1) )
-                    break;
-            }
-        }
-
-        ret = ((i <= max_cpu_index) ||
-               __copy_field_to_guest(u_sysctl, sysctl, u.topologyinfo))
-            ? -EFAULT : 0;
-    }
-    break;
-
-    case XEN_SYSCTL_numainfo:
-    {
-        uint32_t i, j, max_node_index, last_online_node;
-        xen_sysctl_numainfo_t *ni = &sysctl->u.numainfo;
-
-        last_online_node = last_node(node_online_map);
-        max_node_index = min_t(uint32_t, ni->max_node_index, last_online_node);
-        ni->max_node_index = last_online_node;
-
-        for ( i = 0; i <= max_node_index; i++ )
-        {
-            if ( !guest_handle_is_null(ni->node_to_memsize) )
-            {
-                uint64_t memsize = node_online(i) ? 
-                                   node_spanned_pages(i) << PAGE_SHIFT : 0ul;
-                if ( copy_to_guest_offset(ni->node_to_memsize, i, &memsize, 1) 
)
-                    break;
-            }
-            if ( !guest_handle_is_null(ni->node_to_memfree) )
-            {
-                uint64_t memfree = node_online(i) ? 
-                                   avail_node_heap_pages(i) << PAGE_SHIFT : 
0ul;
-                if ( copy_to_guest_offset(ni->node_to_memfree, i, &memfree, 1) 
)
-                    break;
-            }
-
-            if ( !guest_handle_is_null(ni->node_to_node_distance) )
-            {
-                for ( j = 0; j <= max_node_index; j++)
-                {
-                    uint32_t distance = ~0u;
-                    if ( node_online(i) && node_online(j) )
-                        distance = __node_distance(i, j);
-                    if ( copy_to_guest_offset(
-                        ni->node_to_node_distance, 
-                        i*(max_node_index+1) + j, &distance, 1) )
-                        break;
-                }
-                if ( j <= max_node_index )
-                    break;
-            }
-        }
-
-        ret = ((i <= max_node_index) ||
-               __copy_field_to_guest(u_sysctl, sysctl, u.numainfo))
-            ? -EFAULT : 0;
-    }
-    break;
-    
     case XEN_SYSCTL_cpu_hotplug:
     {
         unsigned int cpu = sysctl->u.cpu_hotplug.cpu;
diff -r d030eb79a811 -r 1e168abb73b5 xen/common/sysctl.c
--- a/xen/common/sysctl.c       Fri Feb 15 13:32:19 2013 +0000
+++ b/xen/common/sysctl.c       Fri Feb 15 13:32:20 2013 +0000
@@ -249,6 +249,115 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
         ret = sched_adjust_global(&op->u.scheduler_op);
         break;
 
+    case XEN_SYSCTL_physinfo:
+    {
+        xen_sysctl_physinfo_t *pi = &op->u.physinfo;
+
+        memset(pi, 0, sizeof(*pi));
+        pi->threads_per_core =
+            cpumask_weight(per_cpu(cpu_sibling_mask, 0));
+        pi->cores_per_socket =
+            cpumask_weight(per_cpu(cpu_core_mask, 0)) / pi->threads_per_core;
+        pi->nr_cpus = num_online_cpus();
+        pi->nr_nodes = num_online_nodes();
+        pi->max_node_id = MAX_NUMNODES-1;
+        pi->max_cpu_id = nr_cpu_ids - 1;
+        pi->total_pages = total_pages;
+        pi->free_pages = avail_domheap_pages();
+        pi->scrub_pages = 0;
+        pi->cpu_khz = cpu_khz;
+        arch_do_physinfo(pi);
+
+        if ( copy_to_guest(u_sysctl, op, 1) )
+            ret = -EFAULT;
+    }
+    break;
+
+    case XEN_SYSCTL_numainfo:
+    {
+        uint32_t i, j, max_node_index, last_online_node;
+        xen_sysctl_numainfo_t *ni = &op->u.numainfo;
+
+        last_online_node = last_node(node_online_map);
+        max_node_index = min_t(uint32_t, ni->max_node_index, last_online_node);
+        ni->max_node_index = last_online_node;
+
+        for ( i = 0; i <= max_node_index; i++ )
+        {
+            if ( !guest_handle_is_null(ni->node_to_memsize) )
+            {
+                uint64_t memsize = node_online(i) ?
+                                   node_spanned_pages(i) << PAGE_SHIFT : 0ul;
+                if ( copy_to_guest_offset(ni->node_to_memsize, i, &memsize, 1) 
)
+                    break;
+            }
+            if ( !guest_handle_is_null(ni->node_to_memfree) )
+            {
+                uint64_t memfree = node_online(i) ?
+                                   avail_node_heap_pages(i) << PAGE_SHIFT : 
0ul;
+                if ( copy_to_guest_offset(ni->node_to_memfree, i, &memfree, 1) 
)
+                    break;
+            }
+
+            if ( !guest_handle_is_null(ni->node_to_node_distance) )
+            {
+                for ( j = 0; j <= max_node_index; j++)
+                {
+                    uint32_t distance = ~0u;
+                    if ( node_online(i) && node_online(j) )
+                        distance = __node_distance(i, j);
+                    if ( copy_to_guest_offset(
+                        ni->node_to_node_distance,
+                        i*(max_node_index+1) + j, &distance, 1) )
+                        break;
+                }
+                if ( j <= max_node_index )
+                    break;
+            }
+        }
+
+        ret = ((i <= max_node_index) || copy_to_guest(u_sysctl, op, 1))
+            ? -EFAULT : 0;
+    }
+    break;
+
+    case XEN_SYSCTL_topologyinfo:
+    {
+        uint32_t i, max_cpu_index, last_online_cpu;
+        xen_sysctl_topologyinfo_t *ti = &op->u.topologyinfo;
+
+        last_online_cpu = cpumask_last(&cpu_online_map);
+        max_cpu_index = min_t(uint32_t, ti->max_cpu_index, last_online_cpu);
+        ti->max_cpu_index = last_online_cpu;
+
+        for ( i = 0; i <= max_cpu_index; i++ )
+        {
+            if ( !guest_handle_is_null(ti->cpu_to_core) )
+            {
+                uint32_t core = cpu_online(i) ? cpu_to_core(i) : ~0u;
+                if ( copy_to_guest_offset(ti->cpu_to_core, i, &core, 1) )
+                    break;
+            }
+            if ( !guest_handle_is_null(ti->cpu_to_socket) )
+            {
+                uint32_t socket = cpu_online(i) ? cpu_to_socket(i) : ~0u;
+                if ( copy_to_guest_offset(ti->cpu_to_socket, i, &socket, 1) )
+                    break;
+            }
+            if ( !guest_handle_is_null(ti->cpu_to_node) )
+            {
+                uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
+                if ( copy_to_guest_offset(ti->cpu_to_node, i, &node, 1) )
+                    break;
+            }
+        }
+
+        ret = ((i <= max_cpu_index) || copy_to_guest(u_sysctl, op, 1))
+            ? -EFAULT : 0;
+    }
+    break;
+
+
     default:
         ret = arch_do_sysctl(op, u_sysctl);
         copyback = 0;
diff -r d030eb79a811 -r 1e168abb73b5 xen/include/asm-arm/numa.h
--- a/xen/include/asm-arm/numa.h        Fri Feb 15 13:32:19 2013 +0000
+++ b/xen/include/asm-arm/numa.h        Fri Feb 15 13:32:20 2013 +0000
@@ -10,6 +10,10 @@ static inline __attribute__((pure)) int 
     return 0;
 }
 
+/* XXX: implement NUMA support */
+#define node_spanned_pages(nid)        (total_pages)
+#define __node_distance(a, b) (20)
+
 #endif /* __ARCH_ARM_NUMA_H */
 /*
  * Local variables:
diff -r d030eb79a811 -r 1e168abb73b5 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Fri Feb 15 13:32:19 2013 +0000
+++ b/xen/include/xen/sched.h   Fri Feb 15 13:32:20 2013 +0000
@@ -748,6 +748,8 @@ extern void dump_runq(unsigned char key)
 
 #define num_cpupool_cpus(c) cpumask_weight((c)->cpu_valid)
 
+void arch_do_physinfo(xen_sysctl_physinfo_t *pi);
+
 #endif /* __SCHED_H__ */
 
 /*

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.