[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/3] x86/hvm: constify static data where possible


  • To: "xen-devel" <xen-devel@xxxxxxxxxxxxx>
  • From: "Jan Beulich" <JBeulich@xxxxxxxx>
  • Date: Tue, 11 Sep 2012 13:47:49 +0100
  • Delivery-date: Tue, 11 Sep 2012 12:48:12 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xen.org>

In a few cases this also extends to making them static in the first
place.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3123,7 +3123,7 @@ typedef unsigned long hvm_hypercall_t(
 
 #if defined(__i386__)
 
-static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
+static hvm_hypercall_t *const hvm_hypercall32_table[NR_hypercalls] = {
     [ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)hvm_memory_op,
     [ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op,
     [ __HYPERVISOR_vcpu_op ] = (hvm_hypercall_t *)hvm_vcpu_op,
@@ -3208,7 +3208,7 @@ static long hvm_physdev_op_compat32(
     }
 }
 
-static hvm_hypercall_t *hvm_hypercall64_table[NR_hypercalls] = {
+static hvm_hypercall_t *const hvm_hypercall64_table[NR_hypercalls] = {
     [ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)hvm_memory_op,
     [ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op,
     [ __HYPERVISOR_vcpu_op ] = (hvm_hypercall_t *)hvm_vcpu_op,
@@ -3225,7 +3225,7 @@ static hvm_hypercall_t *hvm_hypercall64_
 #define COMPAT_CALL(x)                                        \
     [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) compat_ ## x
 
-static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
+static hvm_hypercall_t *const hvm_hypercall32_table[NR_hypercalls] = {
     [ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)hvm_memory_op_compat32,
     [ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t 
*)hvm_grant_table_op_compat32,
     [ __HYPERVISOR_vcpu_op ] = (hvm_hypercall_t *)hvm_vcpu_op_compat32,
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -34,14 +34,14 @@ static uint32_t size_or_mask;
 #define pat_cr_2_paf(pat_cr,n)  ((((uint64_t)pat_cr) >> ((n)<<3)) & 0xff)
 
 /* PAT entry to PTE flags (PAT, PCD, PWT bits). */
-static uint8_t pat_entry_2_pte_flags[8] = {
+static const uint8_t pat_entry_2_pte_flags[8] = {
     0,           _PAGE_PWT,
     _PAGE_PCD,   _PAGE_PCD | _PAGE_PWT,
     _PAGE_PAT,   _PAGE_PAT | _PAGE_PWT,
     _PAGE_PAT | _PAGE_PCD, _PAGE_PAT | _PAGE_PCD | _PAGE_PWT };
 
 /* Effective mm type lookup table, according to MTRR and PAT. */
-static uint8_t mm_type_tbl[MTRR_NUM_TYPES][PAT_TYPE_NUMS] = {
+static const uint8_t mm_type_tbl[MTRR_NUM_TYPES][PAT_TYPE_NUMS] = {
 /********PAT(UC,WC,RS,RS,WT,WP,WB,UC-)*/
 /* RS means reserved type(2,3), and type is hardcoded here */
  /*MTRR(UC):(UC,WC,RS,RS,UC,UC,UC,UC)*/
--- a/xen/arch/x86/hvm/stdvga.c
+++ b/xen/arch/x86/hvm/stdvga.c
@@ -59,7 +59,7 @@ static const uint32_t mask16[16] = {
 };
 
 /* force some bits to zero */
-const uint8_t sr_mask[8] = {
+static const uint8_t sr_mask[8] = {
     (uint8_t)~0xfc,
     (uint8_t)~0xc2,
     (uint8_t)~0xf0,
@@ -70,7 +70,7 @@ const uint8_t sr_mask[8] = {
     (uint8_t)~0x00,
 };
 
-const uint8_t gr_mask[9] = {
+static const uint8_t gr_mask[9] = {
     (uint8_t)~0xf0, /* 0x00 */
     (uint8_t)~0xf0, /* 0x01 */
     (uint8_t)~0xf0, /* 0x02 */
--- a/xen/arch/x86/hvm/svm/emulate.c
+++ b/xen/arch/x86/hvm/svm/emulate.c
@@ -109,7 +109,7 @@ MAKE_INSTR(STGI,   3, 0x0f, 0x01, 0xdc);
 MAKE_INSTR(CLGI,   3, 0x0f, 0x01, 0xdd);
 MAKE_INSTR(INVLPGA,3, 0x0f, 0x01, 0xdf);
 
-static const u8 *opc_bytes[INSTR_MAX_COUNT] = 
+static const u8 *const opc_bytes[INSTR_MAX_COUNT] =
 {
     [INSTR_INVD]   = OPCODE_INVD,
     [INSTR_WBINVD] = OPCODE_WBINVD,
--- a/xen/arch/x86/hvm/svm/vpmu.c
+++ b/xen/arch/x86/hvm/svm/vpmu.c
@@ -44,13 +44,13 @@
 #define set_guest_mode(msr) (msr |= (1ULL << MSR_F10H_EVNTSEL_GO_SHIFT))
 #define is_overflowed(msr) (!((msr) & (1ULL << (MSR_F10H_COUNTER_LENGTH-1))))
 
-static int __read_mostly num_counters = 0;
-static u32 __read_mostly *counters = NULL;
-static u32 __read_mostly *ctrls = NULL;
-static bool_t __read_mostly k7_counters_mirrored = 0;
+static unsigned int __read_mostly num_counters;
+static const u32 __read_mostly *counters;
+static const u32 __read_mostly *ctrls;
+static bool_t __read_mostly k7_counters_mirrored;
 
 /* PMU Counter MSRs. */
-u32 AMD_F10H_COUNTERS[] = {
+static const u32 AMD_F10H_COUNTERS[] = {
     MSR_K7_PERFCTR0,
     MSR_K7_PERFCTR1,
     MSR_K7_PERFCTR2,
@@ -58,14 +58,14 @@ u32 AMD_F10H_COUNTERS[] = {
 };
 
 /* PMU Control MSRs. */
-u32 AMD_F10H_CTRLS[] = {
+static const u32 AMD_F10H_CTRLS[] = {
     MSR_K7_EVNTSEL0,
     MSR_K7_EVNTSEL1,
     MSR_K7_EVNTSEL2,
     MSR_K7_EVNTSEL3
 };
 
-u32 AMD_F15H_COUNTERS[] = {
+static const u32 AMD_F15H_COUNTERS[] = {
     MSR_AMD_FAM15H_PERFCTR0,
     MSR_AMD_FAM15H_PERFCTR1,
     MSR_AMD_FAM15H_PERFCTR2,
@@ -74,7 +74,7 @@ u32 AMD_F15H_COUNTERS[] = {
     MSR_AMD_FAM15H_PERFCTR5
 };
 
-u32 AMD_F15H_CTRLS[] = {
+static const u32 AMD_F15H_CTRLS[] = {
     MSR_AMD_FAM15H_EVNTSEL0,
     MSR_AMD_FAM15H_EVNTSEL1,
     MSR_AMD_FAM15H_EVNTSEL2,
@@ -161,7 +161,7 @@ static int amd_vpmu_do_interrupt(struct 
 
 static inline void context_restore(struct vcpu *v)
 {
-    u64 i;
+    unsigned int i;
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
     struct amd_vpmu_context *ctxt = vpmu->context;
 
@@ -198,7 +198,7 @@ static void amd_vpmu_restore(struct vcpu
 
 static inline void context_save(struct vcpu *v)
 {
-    int i;
+    unsigned int i;
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
     struct amd_vpmu_context *ctxt = vpmu->context;
 
@@ -225,7 +225,7 @@ static void amd_vpmu_save(struct vcpu *v
 
 static void context_update(unsigned int msr, u64 msr_content)
 {
-    int i;
+    unsigned int i;
     struct vcpu *v = current;
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
     struct amd_vpmu_context *ctxt = vpmu->context;
@@ -294,7 +294,7 @@ static int amd_vpmu_do_rdmsr(unsigned in
 
 static int amd_vpmu_initialise(struct vcpu *v)
 {
-    struct amd_vpmu_context *ctxt = NULL;
+    struct amd_vpmu_context *ctxt;
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
     uint8_t family = current_cpu_data.x86;
 
@@ -323,7 +323,7 @@ static int amd_vpmu_initialise(struct vc
         }
     }
 
-    ctxt = xzalloc_bytes(sizeof(struct amd_vpmu_context));
+    ctxt = xzalloc(struct amd_vpmu_context);
     if ( !ctxt )
     {
         gdprintk(XENLOG_WARNING, "Insufficient memory for PMU, "
@@ -332,7 +332,7 @@ static int amd_vpmu_initialise(struct vc
         return -ENOMEM;
     }
 
-    vpmu->context = (void *)ctxt;
+    vpmu->context = ctxt;
     vpmu_set(vpmu, VPMU_CONTEXT_ALLOCATED);
     return 0;
 }
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -53,7 +53,7 @@
     LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY |\
     APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER
 
-static unsigned int vlapic_lvt_mask[VLAPIC_LVT_NUM] =
+static const unsigned int vlapic_lvt_mask[VLAPIC_LVT_NUM] =
 {
      /* LVTT */
      LVT_MASK | APIC_TIMER_MODE_MASK,
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -149,7 +149,7 @@ static void vmx_vcpu_destroy(struct vcpu
 
 static DEFINE_PER_CPU(struct vmx_msr_state, host_msr_state);
 
-static u32 msr_index[] =
+static const u32 msr_index[] =
 {
     MSR_LSTAR, MSR_STAR, MSR_SYSCALL_MASK
 };
--- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
@@ -101,28 +101,30 @@ static void handle_pmc_quirk(u64 msr_con
     }
 }
 
-u32 core2_counters_msr[] =   {
+static const u32 core2_counters_msr[] = {
     MSR_CORE_PERF_FIXED_CTR0,
     MSR_CORE_PERF_FIXED_CTR1,
-    MSR_CORE_PERF_FIXED_CTR2};
+    MSR_CORE_PERF_FIXED_CTR2
+};
 
 /* Core 2 Non-architectual Performance Control MSRs. */
-u32 core2_ctrls_msr[] = {
+static const u32 core2_ctrls_msr[] = {
     MSR_CORE_PERF_FIXED_CTR_CTRL,
     MSR_IA32_PEBS_ENABLE,
-    MSR_IA32_DS_AREA};
+    MSR_IA32_DS_AREA
+};
 
 struct pmumsr {
     unsigned int num;
-    u32 *msr;
+    const u32 *msr;
 };
 
-struct pmumsr core2_counters = {
+static const struct pmumsr core2_counters = {
     3,
     core2_counters_msr
 };
 
-struct pmumsr core2_ctrls = {
+static const struct pmumsr core2_ctrls = {
     3,
     core2_ctrls_msr
 };
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -107,7 +107,7 @@ uint32_t nvmx_vcpu_asid(struct vcpu *v)
     return 0;
 }
 
-enum x86_segment sreg_to_index[] = {
+static const enum x86_segment sreg_to_index[] = {
     [VMX_SREG_ES] = x86_seg_es,
     [VMX_SREG_CS] = x86_seg_cs,
     [VMX_SREG_SS] = x86_seg_ss,
@@ -633,7 +633,7 @@ u64 nvmx_get_tsc_offset(struct vcpu *v)
 /*
  * Context synchronized between shadow and virtual VMCS.
  */
-static unsigned long vmcs_gstate_field[] = {
+static const u16 vmcs_gstate_field[] = {
     /* 16 BITS */
     GUEST_ES_SELECTOR,
     GUEST_CS_SELECTOR,
@@ -698,7 +698,7 @@ static unsigned long vmcs_gstate_field[]
 /*
  * Context: shadow -> virtual VMCS
  */
-static unsigned long vmcs_ro_field[] = {
+static const u16 vmcs_ro_field[] = {
     GUEST_PHYSICAL_ADDRESS,
     VM_INSTRUCTION_ERROR,
     VM_EXIT_REASON,
@@ -713,9 +713,9 @@ static unsigned long vmcs_ro_field[] = {
 };
 
 static struct vmcs_host_to_guest {
-    unsigned long host_field;
-    unsigned long guest_field;
-} vmcs_h2g_field[] = {
+    u16 host_field;
+    u16 guest_field;
+} const vmcs_h2g_field[] = {
     {HOST_ES_SELECTOR, GUEST_ES_SELECTOR},
     {HOST_CS_SELECTOR, GUEST_CS_SELECTOR},
     {HOST_SS_SELECTOR, GUEST_SS_SELECTOR},


Attachment: hvm-arrays.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.