[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH] x86/shadow: replace stale literal numbers in hash_{vcpu,domain}_foreach()
15 apparently once used to be the last valid type to request a callback for, and the dimension of the respective array. The arrays meanwhile are larger than this (in a benign way, i.e. no caller ever sets a mask bit higher than 15), dimensioned by SH_type_unused. Have the ASSERT()s follow suit and add build time checks at the call sites. Also adjust a comment naming the wrong of the two functions. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- The ASSERT()s being adjusted look redundant with the BUILD_BUG_ON()s being added, so I wonder whether dropping them wouldn't be the better route. --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -1623,6 +1623,9 @@ void shadow_hash_delete(struct domain *d typedef int (*hash_vcpu_callback_t)(struct vcpu *v, mfn_t smfn, mfn_t other_mfn); typedef int (*hash_domain_callback_t)(struct domain *d, mfn_t smfn, mfn_t other_mfn); +#define HASH_CALLBACKS_CHECK(mask) \ + BUILD_BUG_ON((mask) > (1U << ARRAY_SIZE(callbacks)) - 1) + static void hash_vcpu_foreach(struct vcpu *v, unsigned int callback_mask, const hash_vcpu_callback_t callbacks[], mfn_t callback_mfn) @@ -1658,7 +1661,7 @@ static void hash_vcpu_foreach(struct vcp { if ( callback_mask & (1 << x->u.sh.type) ) { - ASSERT(x->u.sh.type <= 15); + ASSERT(x->u.sh.type < SH_type_unused); ASSERT(callbacks[x->u.sh.type] != NULL); done = callbacks[x->u.sh.type](v, page_to_mfn(x), callback_mfn); @@ -1705,7 +1708,7 @@ static void hash_domain_foreach(struct d { if ( callback_mask & (1 << x->u.sh.type) ) { - ASSERT(x->u.sh.type <= 15); + ASSERT(x->u.sh.type < SH_type_unused); ASSERT(callbacks[x->u.sh.type] != NULL); done = callbacks[x->u.sh.type](d, page_to_mfn(x), callback_mfn); @@ -2009,6 +2012,7 @@ int sh_remove_write_access(struct domain perfc_incr(shadow_writeable_bf_1); else perfc_incr(shadow_writeable_bf); + HASH_CALLBACKS_CHECK(callback_mask); hash_domain_foreach(d, callback_mask, callbacks, gmfn); /* If that didn't catch the mapping, then there's some non-pagetable @@ -2080,6 +2084,7 @@ int sh_remove_all_mappings(struct domain /* Brute-force search of all the shadows, by walking the hash */ perfc_incr(shadow_mappings_bf); + HASH_CALLBACKS_CHECK(callback_mask); hash_domain_foreach(d, callback_mask, callbacks, gmfn); /* If that didn't catch the mapping, something is very wrong */ @@ -2246,10 +2251,12 @@ void sh_remove_shadows(struct domain *d, /* Search for this shadow in all appropriate shadows */ perfc_incr(shadow_unshadow); - /* Lower-level shadows need to be excised from upper-level shadows. - * This call to hash_vcpu_foreach() looks dangerous but is in fact OK: each + /* + * Lower-level shadows need to be excised from upper-level shadows. This + * call to hash_domain_foreach() looks dangerous but is in fact OK: each * call will remove at most one shadow, and terminate immediately when - * it does remove it, so we never walk the hash after doing a deletion. */ + * it does remove it, so we never walk the hash after doing a deletion. + */ #define DO_UNSHADOW(_type) do { \ t = (_type); \ if( !(pg->count_info & PGC_page_table) \ @@ -2270,6 +2277,7 @@ void sh_remove_shadows(struct domain *d, if( !fast \ && (pg->count_info & PGC_page_table) \ && (pg->shadow_flags & (1 << t)) ) \ + HASH_CALLBACKS_CHECK(SHF_page_type_mask); \ hash_domain_foreach(d, masks[t], callbacks, smfn); \ } while (0) @@ -2370,6 +2378,7 @@ void sh_reset_l3_up_pointers(struct vcpu }; static const unsigned int callback_mask = SHF_L3_64; + HASH_CALLBACKS_CHECK(callback_mask); hash_vcpu_foreach(v, callback_mask, callbacks, INVALID_MFN); } @@ -3420,6 +3429,7 @@ void shadow_audit_tables(struct vcpu *v) } } + HASH_CALLBACKS_CHECK(SHF_page_type_mask); hash_vcpu_foreach(v, mask, callbacks, INVALID_MFN); }
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |