[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 2/6] x86/mm: use optional cache in guest_walk_tables()
> -----Original Message----- > From: Jan Beulich [mailto:JBeulich@xxxxxxxx] > Sent: 19 July 2018 11:47 > To: xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxxx> > Cc: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Paul Durrant > <Paul.Durrant@xxxxxxxxxx>; George Dunlap <George.Dunlap@xxxxxxxxxx> > Subject: [PATCH 2/6] x86/mm: use optional cache in guest_walk_tables() > > The caching isn't actually implemented here, this is just setting the > stage. > > Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx> > > --- a/xen/arch/x86/hvm/emulate.c > +++ b/xen/arch/x86/hvm/emulate.c > @@ -2572,6 +2572,18 @@ void hvm_dump_emulation_state(const char > hvmemul_ctxt->insn_buf); > } > > +bool hvmemul_read_cache(const struct hvmemul_cache *cache, paddr_t > gpa, > + unsigned int level, void *buffer, unsigned int size) > +{ > + return false; > +} > + > +void hvmemul_write_cache(struct hvmemul_cache *cache, paddr_t gpa, > + unsigned int level, const void *buffer, > + unsigned int size) > +{ > +} > + > /* > * Local variables: > * mode: C > --- a/xen/arch/x86/mm/guest_walk.c > +++ b/xen/arch/x86/mm/guest_walk.c > @@ -94,6 +94,7 @@ guest_walk_tables(struct vcpu *v, struct > guest_l4e_t *l4p; > #endif > uint32_t gflags, rc; > + paddr_t gpa; > unsigned int leaf_level; > p2m_query_t qt = P2M_ALLOC | P2M_UNSHARE; > > @@ -134,7 +135,15 @@ guest_walk_tables(struct vcpu *v, struct > /* Get the l4e from the top level table and check its flags*/ > gw->l4mfn = top_mfn; > l4p = (guest_l4e_t *) top_map; > - gw->l4e = l4p[guest_l4_table_offset(gla)]; > + gpa = gfn_to_gaddr(top_gfn) + > + guest_l4_table_offset(gla) * sizeof(guest_l4e_t); > + if ( !cache || > + !hvmemul_read_cache(cache, gpa, 4, &gw->l4e, sizeof(gw->l4e)) ) > + { > + gw->l4e = l4p[guest_l4_table_offset(gla)]; > + if ( cache ) > + hvmemul_write_cache(cache, gpa, 4, &gw->l4e, sizeof(gw->l4e)); > + } > gflags = guest_l4e_get_flags(gw->l4e); > if ( !(gflags & _PAGE_PRESENT) ) > goto out; > @@ -164,7 +173,15 @@ guest_walk_tables(struct vcpu *v, struct > } > > /* Get the l3e and check its flags*/ > - gw->l3e = l3p[guest_l3_table_offset(gla)]; > + gpa = gfn_to_gaddr(guest_l4e_get_gfn(gw->l4e)) + > + guest_l3_table_offset(gla) * sizeof(guest_l3e_t); > + if ( !cache || > + !hvmemul_read_cache(cache, gpa, 3, &gw->l3e, sizeof(gw->l3e)) ) > + { > + gw->l3e = l3p[guest_l3_table_offset(gla)]; > + if ( cache ) > + hvmemul_write_cache(cache, gpa, 3, &gw->l3e, sizeof(gw->l3e)); > + } > gflags = guest_l3e_get_flags(gw->l3e); > if ( !(gflags & _PAGE_PRESENT) ) > goto out; > @@ -216,7 +233,16 @@ guest_walk_tables(struct vcpu *v, struct > #else /* PAE only... */ > > /* Get the l3e and check its flag */ > - gw->l3e = ((guest_l3e_t *)top_map)[guest_l3_table_offset(gla)]; > + gpa = gfn_to_gaddr(top_gfn) + ((unsigned long)top_map & > ~PAGE_MASK) + > + guest_l3_table_offset(gla) * sizeof(guest_l3e_t); > + if ( !cache || > + !hvmemul_read_cache(cache, gpa, 3, &gw->l3e, sizeof(gw->l3e)) ) > + { > + gw->l3e = ((guest_l3e_t *)top_map)[guest_l3_table_offset(gla)]; > + if ( cache ) > + hvmemul_write_cache(cache, gpa, 3, &gw->l3e, sizeof(gw->l3e)); > + } > + > gflags = guest_l3e_get_flags(gw->l3e); > if ( !(gflags & _PAGE_PRESENT) ) > goto out; > @@ -242,18 +268,24 @@ guest_walk_tables(struct vcpu *v, struct > goto out; > } > > - /* Get the l2e */ > - gw->l2e = l2p[guest_l2_table_offset(gla)]; > - > #else /* 32-bit only... */ > > - /* Get l2e from the top level table */ > gw->l2mfn = top_mfn; > l2p = (guest_l2e_t *) top_map; > - gw->l2e = l2p[guest_l2_table_offset(gla)]; > > #endif /* All levels... */ > > + /* Get the l2e */ > + gpa = gfn_to_gaddr(top_gfn) + > + guest_l2_table_offset(gla) * sizeof(guest_l2e_t); > + if ( !cache || > + !hvmemul_read_cache(cache, gpa, 2, &gw->l2e, sizeof(gw->l2e)) ) > + { > + gw->l2e = l2p[guest_l2_table_offset(gla)]; > + if ( cache ) > + hvmemul_write_cache(cache, gpa, 2, &gw->l2e, sizeof(gw->l2e)); > + } > + > /* Check the l2e flags. */ > gflags = guest_l2e_get_flags(gw->l2e); > if ( !(gflags & _PAGE_PRESENT) ) > @@ -335,7 +367,17 @@ guest_walk_tables(struct vcpu *v, struct > gw->pfec |= rc & PFEC_synth_mask; > goto out; > } > - gw->l1e = l1p[guest_l1_table_offset(gla)]; > + > + gpa = gfn_to_gaddr(top_gfn) + > + guest_l1_table_offset(gla) * sizeof(guest_l1e_t); > + if ( !cache || > + !hvmemul_read_cache(cache, gpa, 1, &gw->l1e, sizeof(gw->l1e)) ) > + { > + gw->l1e = l1p[guest_l1_table_offset(gla)]; > + if ( cache ) > + hvmemul_write_cache(cache, gpa, 1, &gw->l1e, sizeof(gw->l1e)); > + } > + > gflags = guest_l1e_get_flags(gw->l1e); > if ( !(gflags & _PAGE_PRESENT) ) > goto out; > --- a/xen/include/asm-x86/hvm/emulate.h > +++ b/xen/include/asm-x86/hvm/emulate.h > @@ -98,6 +98,13 @@ int hvmemul_do_pio_buffer(uint16_t port, > uint8_t dir, > void *buffer); > > +struct hvmemul_cache; > +bool hvmemul_read_cache(const struct hvmemul_cache *, paddr_t gpa, > + unsigned int level, void *buffer, unsigned int size); > +void hvmemul_write_cache(struct hvmemul_cache *, paddr_t gpa, > + unsigned int level, const void *buffer, > + unsigned int size); > + > void hvm_dump_emulation_state(const char *loglvl, const char *prefix, > struct hvm_emulate_ctxt *hvmemul_ctxt, int rc); > > > _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |