|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 12/18] PVH xen: mapcache and show registers
>>> On 25.06.13 at 02:01, Mukesh Rathor <mukesh.rathor@xxxxxxxxxx> wrote:
> PVH doesn't use map cache. show_registers() for PVH takes the HVM path.
>
> Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
> diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
> index 9297ea0..5092fdb 100644
> --- a/xen/arch/x86/domain_page.c
> +++ b/xen/arch/x86/domain_page.c
> @@ -34,7 +34,7 @@ static inline struct vcpu *mapcache_current_vcpu(void)
> * then it means we are running on the idle domain's page table and
> must
> * therefore use its mapcache.
> */
> - if ( unlikely(pagetable_is_null(v->arch.guest_table)) && !is_hvm_vcpu(v)
> )
> + if ( unlikely(pagetable_is_null(v->arch.guest_table)) && is_pv_vcpu(v) )
> {
> /* If we really are idling, perform lazy context switch now. */
> if ( (v = idle_vcpu[smp_processor_id()]) == current )
> @@ -71,7 +71,7 @@ void *map_domain_page(unsigned long mfn)
> #endif
>
> v = mapcache_current_vcpu();
> - if ( !v || is_hvm_vcpu(v) )
> + if ( !v || !is_pv_vcpu(v) )
> return mfn_to_virt(mfn);
>
> dcache = &v->domain->arch.pv_domain.mapcache;
> @@ -176,7 +176,7 @@ void unmap_domain_page(const void *ptr)
> ASSERT(va >= MAPCACHE_VIRT_START && va < MAPCACHE_VIRT_END);
>
> v = mapcache_current_vcpu();
> - ASSERT(v && !is_hvm_vcpu(v));
> + ASSERT(v && is_pv_vcpu(v));
>
> dcache = &v->domain->arch.pv_domain.mapcache;
> ASSERT(dcache->inuse);
> @@ -243,7 +243,7 @@ int mapcache_domain_init(struct domain *d)
> struct mapcache_domain *dcache = &d->arch.pv_domain.mapcache;
> unsigned int bitmap_pages;
>
> - if ( is_hvm_domain(d) || is_idle_domain(d) )
> + if ( !is_pv_domain(d) || is_idle_domain(d) )
> return 0;
>
> #ifdef NDEBUG
> @@ -274,7 +274,7 @@ int mapcache_vcpu_init(struct vcpu *v)
> unsigned int ents = d->max_vcpus * MAPCACHE_VCPU_ENTRIES;
> unsigned int nr = PFN_UP(BITS_TO_LONGS(ents) * sizeof(long));
>
> - if ( is_hvm_vcpu(v) || !dcache->inuse )
> + if ( !is_pv_vcpu(v) || !dcache->inuse )
> return 0;
>
> if ( ents > dcache->entries )
> diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
> index d2f7209..bcfd740 100644
> --- a/xen/arch/x86/x86_64/traps.c
> +++ b/xen/arch/x86/x86_64/traps.c
> @@ -85,7 +85,7 @@ void show_registers(struct cpu_user_regs *regs)
> enum context context;
> struct vcpu *v = current;
>
> - if ( is_hvm_vcpu(v) && guest_mode(regs) )
> + if ( !is_pv_vcpu(v) && guest_mode(regs) )
> {
> struct segment_register sreg;
> context = CTXT_hvm_guest;
> @@ -146,8 +146,8 @@ void vcpu_show_registers(const struct vcpu *v)
> const struct cpu_user_regs *regs = &v->arch.user_regs;
> unsigned long crs[8];
>
> - /* No need to handle HVM for now. */
> - if ( is_hvm_vcpu(v) )
> + /* No need to handle HVM and PVH for now. */
> + if ( !is_pv_vcpu(v) )
> return;
>
> crs[0] = v->arch.pv_vcpu.ctrlreg[0];
> --
> 1.7.2.3
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |