[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2] x86: re-order struct arch_domain fields



On 10/02/15 11:34, Jan Beulich wrote:
> ... to reduce padding holes.
>
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

> ---
> v2: Drop vtsc_usercount movement to struct pv_domain.
>
> --- a/xen/include/asm-x86/domain.h
> +++ b/xen/include/asm-x86/domain.h
> @@ -255,13 +255,16 @@ struct arch_domain
>  
>      unsigned int hv_compat_vstart;
>  
> -    bool_t s3_integrity;
> +    /* Maximum physical-address bitwidth supported by this guest. */
> +    unsigned int physaddr_bitsize;
>  
>      /* I/O-port admin-specified access capabilities. */
>      struct rangeset *ioport_caps;
>      uint32_t pci_cf8;
>      uint8_t cmos_idx;
>  
> +    bool_t s3_integrity;
> +
>      struct list_head pdev_list;
>  
>      union {
> @@ -275,6 +278,18 @@ struct arch_domain
>       * page_alloc lock */
>      int page_alloc_unlock_level;
>  
> +    /* Continuable domain_relinquish_resources(). */
> +    enum {
> +        RELMEM_not_started,
> +        RELMEM_shared,
> +        RELMEM_xen,
> +        RELMEM_l4,
> +        RELMEM_l3,
> +        RELMEM_l2,
> +        RELMEM_done,
> +    } relmem;
> +    struct page_list_head relmem_list;
> +
>      /* nestedhvm: translate l2 guest physical to host physical */
>      struct p2m_domain *nested_p2m[MAX_NESTEDP2M];
>      mm_lock_t nested_p2m_lock;
> @@ -282,27 +297,16 @@ struct arch_domain
>      /* NB. protected by d->event_lock and by irq_desc[irq].lock */
>      struct radix_tree_root irq_pirq;
>  
> -    /* Maximum physical-address bitwidth supported by this guest. */
> -    unsigned int physaddr_bitsize;
> -
>      /* Is a 32-bit PV (non-HVM) guest? */
>      bool_t is_32bit_pv;
>      /* Is shared-info page in 32-bit format? */
>      bool_t has_32bit_shinfo;
> +
>      /* Domain cannot handle spurious page faults? */
>      bool_t suppress_spurious_page_faults;
>  
> -    /* Continuable domain_relinquish_resources(). */
> -    enum {
> -        RELMEM_not_started,
> -        RELMEM_shared,
> -        RELMEM_xen,
> -        RELMEM_l4,
> -        RELMEM_l3,
> -        RELMEM_l2,
> -        RELMEM_done,
> -    } relmem;
> -    struct page_list_head relmem_list;
> +    /* Is PHYSDEVOP_eoi to automatically unmask the event channel? */
> +    bool_t auto_unmask;
>  
>      cpuid_input_t *cpuids;
>  
> @@ -329,15 +333,12 @@ struct arch_domain
>      struct e820entry *e820;
>      unsigned int nr_e820;
>  
> -    /* set auto_unmask to 1 if you want PHYSDEVOP_eoi to automatically
> -     * unmask the event channel */
> -    bool_t auto_unmask;
> +    unsigned int psr_rmid; /* RMID assigned to the domain for CMT */
> +
>      /* Shared page for notifying that explicit PIRQ EOI is required. */
>      unsigned long *pirq_eoi_map;
>      unsigned long pirq_eoi_map_mfn;
> -
> -    unsigned int psr_rmid; /* RMID assigned to the domain for CMT */
> -} __cacheline_aligned;
> +};
>  
>  #define has_arch_pdevs(d)    (!list_empty(&(d)->arch.pdev_list))
>  
> @@ -493,7 +494,7 @@ struct arch_vcpu
>          unsigned long eip;
>      } mem_event;
>  
> -} __cacheline_aligned;
> +};
>  
>  smap_check_policy_t smap_policy_change(struct vcpu *v,
>                                         smap_check_policy_t new_policy);
>
>
>


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.