[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH SpectreV1+L1TF v6 8/9] common/grant_table: block speculative out-of-bound accesses
Guests can issue grant table operations and provide guest controlled data to them. This data is also used for memory loads. To avoid speculative out-of-bound accesses, we use the array_index_nospec macro where applicable. However, there are also memory accesses that cannot be protected by a single array protection, or multiple accesses in a row. To protect these, a nospec barrier is placed between the actual range check and the access via the block_speculation macro. As different versions of grant tables use structures of different size, and the status is encoded in an array for version 2, speculative execution might touch zero-initialized structures of version 2 while the table is actually using version 1. As PV guests can have control over their NULL page, these accesses are prevented by protecting the grant table version evaluation. This commit is part of the SpectreV1+L1TF mitigation patch series. Signed-off-by: Norbert Manthey <nmanthey@xxxxxxxxx> --- Notes: v6: Explain version 1 vs version 2 case in commit message Protect grant table version checks Use block_speculation in map_grant_ref instead of updating op->ref Move evaluate_nospec closer to the okay variable in gnttab_transfer xen/common/grant_table.c | 48 ++++++++++++++++++++++++++++++++++++------------ 1 file changed, 36 insertions(+), 12 deletions(-) diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c --- a/xen/common/grant_table.c +++ b/xen/common/grant_table.c @@ -37,6 +37,7 @@ #include <xen/paging.h> #include <xen/keyhandler.h> #include <xen/vmap.h> +#include <xen/nospec.h> #include <xsm/xsm.h> #include <asm/flushtlb.h> @@ -203,8 +204,9 @@ static inline unsigned int nr_status_frames(const struct grant_table *gt) } #define MAPTRACK_PER_PAGE (PAGE_SIZE / sizeof(struct grant_mapping)) -#define maptrack_entry(t, e) \ - ((t)->maptrack[(e)/MAPTRACK_PER_PAGE][(e)%MAPTRACK_PER_PAGE]) +#define maptrack_entry(t, e) \ + ((t)->maptrack[array_index_nospec(e, (t)->maptrack_limit) \ + /MAPTRACK_PER_PAGE][(e)%MAPTRACK_PER_PAGE]) static inline unsigned int nr_maptrack_frames(struct grant_table *t) @@ -963,9 +965,13 @@ map_grant_ref( PIN_FAIL(unlock_out, GNTST_bad_gntref, "Bad ref %#x for d%d\n", op->ref, rgt->domain->domain_id); + /* Make sure the above check is not bypassed speculatively */ + block_speculation(); + act = active_entry_acquire(rgt, op->ref); shah = shared_entry_header(rgt, op->ref); - status = rgt->gt_version == 1 ? &shah->flags : &status_entry(rgt, op->ref); + status = evaluate_nospec(rgt->gt_version == 1) ? &shah->flags + : &status_entry(rgt, op->ref); /* If already pinned, check the active domid and avoid refcnt overflow. */ if ( act->pin && @@ -987,7 +993,7 @@ map_grant_ref( if ( !act->pin ) { - unsigned long gfn = rgt->gt_version == 1 ? + unsigned long gfn = evaluate_nospec(rgt->gt_version == 1) ? shared_entry_v1(rgt, op->ref).frame : shared_entry_v2(rgt, op->ref).full_page.frame; @@ -1321,7 +1327,8 @@ unmap_common( goto unlock_out; } - act = active_entry_acquire(rgt, op->ref); + act = active_entry_acquire(rgt, array_index_nospec(op->ref, + nr_grant_entries(rgt))); /* * Note that we (ab)use the active entry lock here to protect against @@ -1418,7 +1425,7 @@ unmap_common_complete(struct gnttab_unmap_common *op) struct page_info *pg; uint16_t *status; - if ( !op->done ) + if ( evaluate_nospec(!op->done) ) { /* unmap_common() didn't do anything - nothing to complete. */ return; @@ -2026,6 +2033,9 @@ gnttab_prepare_for_transfer( goto fail; } + /* Make sure the above check is not bypassed speculatively */ + ref = array_index_nospec(ref, nr_grant_entries(rgt)); + sha = shared_entry_header(rgt, ref); scombo.word = *(u32 *)&sha->flags; @@ -2223,7 +2233,11 @@ gnttab_transfer( okay = gnttab_prepare_for_transfer(e, d, gop.ref); spin_lock(&e->page_alloc_lock); - if ( unlikely(!okay) || unlikely(e->is_dying) ) + /* + * Make sure the reference bound check in gnttab_prepare_for_transfer + * is respected and speculative execution is blocked accordingly + */ + if ( unlikely(!evaluate_nospec(okay)) || unlikely(e->is_dying) ) { bool_t drop_dom_ref = !domain_adjust_tot_pages(e, -1); @@ -2253,7 +2267,7 @@ gnttab_transfer( grant_read_lock(e->grant_table); act = active_entry_acquire(e->grant_table, gop.ref); - if ( e->grant_table->gt_version == 1 ) + if ( evaluate_nospec(e->grant_table->gt_version == 1) ) { grant_entry_v1_t *sha = &shared_entry_v1(e->grant_table, gop.ref); @@ -2408,9 +2422,12 @@ acquire_grant_for_copy( PIN_FAIL(gt_unlock_out, GNTST_bad_gntref, "Bad grant reference %#x\n", gref); + /* Make sure the above check is not bypassed speculatively */ + gref = array_index_nospec(gref, nr_grant_entries(rgt)); + act = active_entry_acquire(rgt, gref); shah = shared_entry_header(rgt, gref); - if ( rgt->gt_version == 1 ) + if ( evaluate_nospec(rgt->gt_version == 1) ) { sha2 = NULL; status = &shah->flags; @@ -2826,6 +2843,9 @@ static int gnttab_copy_buf(const struct gnttab_copy *op, op->dest.offset, dest->ptr.offset, op->len, dest->len); + /* Make sure the above checks are not bypassed speculatively */ + block_speculation(); + memcpy(dest->virt + op->dest.offset, src->virt + op->source.offset, op->len); gnttab_mark_dirty(dest->domain, dest->mfn); @@ -3211,6 +3231,10 @@ swap_grant_ref(grant_ref_t ref_a, grant_ref_t ref_b) if ( unlikely(ref_b >= nr_grant_entries(d->grant_table))) PIN_FAIL(out, GNTST_bad_gntref, "Bad ref-b %#x\n", ref_b); + /* Make sure the above checks are not bypassed speculatively */ + ref_a = array_index_nospec(ref_a, nr_grant_entries(d->grant_table)); + ref_b = array_index_nospec(ref_b, nr_grant_entries(d->grant_table)); + /* Swapping the same ref is a no-op. */ if ( ref_a == ref_b ) goto out; @@ -3223,7 +3247,7 @@ swap_grant_ref(grant_ref_t ref_a, grant_ref_t ref_b) if ( act_b->pin ) PIN_FAIL(out, GNTST_eagain, "ref b %#x busy\n", ref_b); - if ( gt->gt_version == 1 ) + if ( evaluate_nospec(gt->gt_version == 1) ) { grant_entry_v1_t shared; @@ -3771,7 +3795,7 @@ int mem_sharing_gref_to_gfn(struct grant_table *gt, grant_ref_t ref, rc = -EINVAL; else if ( ref >= nr_grant_entries(gt) ) rc = -ENOENT; - else if ( gt->gt_version == 1 ) + else if ( evaluate_nospec(gt->gt_version == 1) ) { const grant_entry_v1_t *sha1 = &shared_entry_v1(gt, ref); @@ -3793,7 +3817,7 @@ int mem_sharing_gref_to_gfn(struct grant_table *gt, grant_ref_t ref, rc = -ENXIO; else if ( !rc && status ) { - if ( gt->gt_version == 1 ) + if ( evaluate_nospec(gt->gt_version == 1) ) *status = flags; else *status = status_entry(gt, ref); -- 2.7.4 Amazon Development Center Germany GmbH Krausenstr. 38 10117 Berlin Geschaeftsfuehrer: Christian Schlaeger, Ralf Herbrich Ust-ID: DE 289 237 879 Eingetragen am Amtsgericht Charlottenburg HRB 149173 B _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |