[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 03/10] viridian: introduce a per-cpu hypercall_vpmask and accessor functions...
From: Paul Durrant <pdurrant@xxxxxxxxxx> ... and make use of them in hvcall_flush()/need_flush(). Subsequent patches will need to deal with virtual processor masks potentially wider than 64 bits. Thus, to avoid using too much stack, this patch introduces global per-cpu virtual processor masks and converts the implementation of hvcall_flush() to use them. Signed-off-by: Paul Durrant <pdurrant@xxxxxxxxxx> --- Cc: Wei Liu <wl@xxxxxxx> Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Cc: "Roger Pau Monné" <roger.pau@xxxxxxxxxx> --- xen/arch/x86/hvm/viridian/viridian.c | 51 +++++++++++++++++++++++++--- 1 file changed, 47 insertions(+), 4 deletions(-) diff --git a/xen/arch/x86/hvm/viridian/viridian.c b/xen/arch/x86/hvm/viridian/viridian.c index c4f720f58d6d..4ab1f14b2248 100644 --- a/xen/arch/x86/hvm/viridian/viridian.c +++ b/xen/arch/x86/hvm/viridian/viridian.c @@ -507,15 +507,41 @@ void viridian_domain_deinit(struct domain *d) XFREE(d->arch.hvm.viridian); } +struct hypercall_vpmask { + DECLARE_BITMAP(mask, HVM_MAX_VCPUS); +}; + +static DEFINE_PER_CPU(struct hypercall_vpmask, hypercall_vpmask); + +static void vpmask_empty(struct hypercall_vpmask *vpmask) +{ + bitmap_zero(vpmask->mask, HVM_MAX_VCPUS); +} + +static void vpmask_set(struct hypercall_vpmask *vpmask, unsigned int vp) +{ + __set_bit(vp, vpmask->mask); +} + +static void vpmask_fill(struct hypercall_vpmask *vpmask) +{ + bitmap_fill(vpmask->mask, HVM_MAX_VCPUS); +} + +static bool vpmask_test(struct hypercall_vpmask *vpmask, unsigned int vp) +{ + return test_bit(vp, vpmask->mask); +} + /* * Windows should not issue the hypercalls requiring this callback in the * case where vcpu_id would exceed the size of the mask. */ static bool need_flush(void *ctxt, struct vcpu *v) { - uint64_t vcpu_mask = *(uint64_t *)ctxt; + struct hypercall_vpmask *vpmask = ctxt; - return vcpu_mask & (1ul << v->vcpu_id); + return vpmask_test(vpmask, v->vcpu_id); } union hypercall_input { @@ -546,6 +572,7 @@ static int hvcall_flush(union hypercall_input *input, unsigned long input_params_gpa, unsigned long output_params_gpa) { + struct hypercall_vpmask *vpmask = &this_cpu(hypercall_vpmask); struct { uint64_t address_space; uint64_t flags; @@ -567,13 +594,29 @@ static int hvcall_flush(union hypercall_input *input, * so err on the safe side. */ if ( input_params.flags & HV_FLUSH_ALL_PROCESSORS ) - input_params.vcpu_mask = ~0ul; + vpmask_fill(vpmask); + else + { + unsigned int vp; + + vpmask_empty(vpmask); + for (vp = 0; vp < 64; vp++) + { + if ( !input_params.vcpu_mask ) + break; + + if ( input_params.vcpu_mask & 1 ) + vpmask_set(vpmask, vp); + + input_params.vcpu_mask >>= 1; + } + } /* * A false return means that another vcpu is currently trying * a similar operation, so back off. */ - if ( !paging_flush_tlb(need_flush, &input_params.vcpu_mask) ) + if ( !paging_flush_tlb(need_flush, vpmask) ) return -ERESTART; output->rep_complete = input->rep_count; -- 2.20.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |