[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [RFC PATCH 7/7] accel/kvm: Use CPU_FOREACH_KVM()
Only iterate over KVM vCPUs when running KVM specific code. Signed-off-by: Philippe Mathieu-Daudé <philmd@xxxxxxxxxx> --- include/system/kvm_int.h | 3 +++ accel/kvm/kvm-all.c | 14 +++++++------- hw/i386/kvm/clock.c | 3 ++- hw/intc/spapr_xive_kvm.c | 5 +++-- hw/intc/xics_kvm.c | 5 +++-- target/i386/kvm/kvm.c | 4 ++-- target/i386/kvm/xen-emu.c | 2 +- target/s390x/kvm/kvm.c | 2 +- target/s390x/kvm/stsi-topology.c | 3 ++- 9 files changed, 24 insertions(+), 17 deletions(-) diff --git a/include/system/kvm_int.h b/include/system/kvm_int.h index 4de6106869b..0ef4c336b18 100644 --- a/include/system/kvm_int.h +++ b/include/system/kvm_int.h @@ -13,6 +13,7 @@ #include "qapi/qapi-types-common.h" #include "qemu/accel.h" #include "qemu/queue.h" +#include "system/hw_accel.h" #include "system/kvm.h" #include "hw/boards.h" #include "hw/i386/topology.h" @@ -168,6 +169,8 @@ struct KVMState char *device; }; +#define CPU_FOREACH_KVM(cpu) CPU_FOREACH_HWACCEL(cpu) + void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, AddressSpace *as, int as_id, const char *name); diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index c65b790433c..9b26b286865 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -872,7 +872,7 @@ static uint64_t kvm_dirty_ring_reap_locked(KVMState *s, CPUState* cpu) if (cpu) { total = kvm_dirty_ring_reap_one(s, cpu); } else { - CPU_FOREACH(cpu) { + CPU_FOREACH_KVM(cpu) { total += kvm_dirty_ring_reap_one(s, cpu); } } @@ -935,7 +935,7 @@ static void kvm_cpu_synchronize_kick_all(void) { CPUState *cpu; - CPU_FOREACH(cpu) { + CPU_FOREACH_KVM(cpu) { run_on_cpu(cpu, do_kvm_cpu_synchronize_kick, RUN_ON_CPU_NULL); } } @@ -3535,7 +3535,7 @@ int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len) } } - CPU_FOREACH(cpu) { + CPU_FOREACH_KVM(cpu) { err = kvm_update_guest_debug(cpu, 0); if (err) { return err; @@ -3574,7 +3574,7 @@ int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len) } } - CPU_FOREACH(cpu) { + CPU_FOREACH_KVM(cpu) { err = kvm_update_guest_debug(cpu, 0); if (err) { return err; @@ -3592,7 +3592,7 @@ void kvm_remove_all_breakpoints(CPUState *cpu) QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) { if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) { /* Try harder to find a CPU that currently sees the breakpoint. */ - CPU_FOREACH(tmpcpu) { + CPU_FOREACH_KVM(tmpcpu) { if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) { break; } @@ -3603,7 +3603,7 @@ void kvm_remove_all_breakpoints(CPUState *cpu) } kvm_arch_remove_all_hw_breakpoints(); - CPU_FOREACH(cpu) { + CPU_FOREACH_KVM(cpu) { kvm_update_guest_debug(cpu, 0); } } @@ -4384,7 +4384,7 @@ static void query_stats_cb(StatsResultList **result, StatsTarget target, stats_args.result.stats = result; stats_args.names = names; stats_args.errp = errp; - CPU_FOREACH(cpu) { + CPU_FOREACH_KVM(cpu) { if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) { continue; } diff --git a/hw/i386/kvm/clock.c b/hw/i386/kvm/clock.c index 63be5088420..f2638cf2c22 100644 --- a/hw/i386/kvm/clock.c +++ b/hw/i386/kvm/clock.c @@ -17,6 +17,7 @@ #include "qemu/host-utils.h" #include "qemu/module.h" #include "system/kvm.h" +#include "system/kvm_int.h" #include "system/runstate.h" #include "system/hw_accel.h" #include "kvm/kvm_i386.h" @@ -196,7 +197,7 @@ static void kvmclock_vm_state_change(void *opaque, bool running, if (!cap_clock_ctrl) { return; } - CPU_FOREACH(cpu) { + CPU_FOREACH_KVM(cpu) { run_on_cpu(cpu, do_kvmclock_ctrl, RUN_ON_CPU_NULL); } } else { diff --git a/hw/intc/spapr_xive_kvm.c b/hw/intc/spapr_xive_kvm.c index 26d30b41c15..08354f08512 100644 --- a/hw/intc/spapr_xive_kvm.c +++ b/hw/intc/spapr_xive_kvm.c @@ -14,6 +14,7 @@ #include "target/ppc/cpu.h" #include "system/cpus.h" #include "system/kvm.h" +#include "system/kvm_int.h" #include "system/runstate.h" #include "hw/ppc/spapr.h" #include "hw/ppc/spapr_cpu_core.h" @@ -678,7 +679,7 @@ int kvmppc_xive_post_load(SpaprXive *xive, int version_id) * 'post_load' handler of XiveTCTX because the machine is not * necessarily connected to the KVM device at that time. */ - CPU_FOREACH(cs) { + CPU_FOREACH_KVM(cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); ret = kvmppc_xive_cpu_set_state(spapr_cpu_state(cpu)->tctx, &local_err); @@ -795,7 +796,7 @@ int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers, kvmppc_xive_change_state_handler, xive); /* Connect the presenters to the initial VCPUs of the machine */ - CPU_FOREACH(cs) { + CPU_FOREACH_KVM(cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); ret = kvmppc_xive_cpu_connect(spapr_cpu_state(cpu)->tctx, errp); diff --git a/hw/intc/xics_kvm.c b/hw/intc/xics_kvm.c index ee72969f5f1..aed2ad44363 100644 --- a/hw/intc/xics_kvm.c +++ b/hw/intc/xics_kvm.c @@ -29,6 +29,7 @@ #include "qapi/error.h" #include "trace.h" #include "system/kvm.h" +#include "system/kvm_int.h" #include "hw/ppc/spapr.h" #include "hw/ppc/spapr_cpu_core.h" #include "hw/ppc/xics.h" @@ -418,7 +419,7 @@ int xics_kvm_connect(SpaprInterruptController *intc, uint32_t nr_servers, kvm_gsi_direct_mapping = true; /* Create the presenters */ - CPU_FOREACH(cs) { + CPU_FOREACH_KVM(cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); icp_kvm_realize(DEVICE(spapr_cpu_state(cpu)->icp), &local_err); @@ -434,7 +435,7 @@ int xics_kvm_connect(SpaprInterruptController *intc, uint32_t nr_servers, } /* Connect the presenters to the initial VCPUs of the machine */ - CPU_FOREACH(cs) { + CPU_FOREACH_KVM(cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); icp_set_kvm_state(spapr_cpu_state(cpu)->icp, &local_err); if (local_err) { diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index 2f66e63b880..437911d6c6a 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -329,7 +329,7 @@ void kvm_synchronize_all_tsc(void) CPUState *cpu; if (kvm_enabled()) { - CPU_FOREACH(cpu) { + CPU_FOREACH_KVM(cpu) { run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL); } } @@ -2847,7 +2847,7 @@ static void *kvm_msr_energy_thread(void *data) * Identify the vcpu threads * Calculate the number of vcpu per package */ - CPU_FOREACH(cpu) { + CPU_FOREACH_KVM(cpu) { for (int i = 0; i < num_threads; i++) { if (cpu->thread_id == thd_stat[i].thread_id) { thd_stat[i].is_vcpu = true; diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c index e81a2458812..36ae9c11252 100644 --- a/target/i386/kvm/xen-emu.c +++ b/target/i386/kvm/xen-emu.c @@ -1422,7 +1422,7 @@ int kvm_xen_soft_reset(void) return err; } - CPU_FOREACH(cpu) { + CPU_FOREACH_KVM(cpu) { async_run_on_cpu(cpu, do_vcpu_soft_reset, RUN_ON_CPU_NULL); } diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c index db645a48133..a02e78ce807 100644 --- a/target/s390x/kvm/kvm.c +++ b/target/s390x/kvm/kvm.c @@ -1559,7 +1559,7 @@ static void handle_diag_318(S390CPU *cpu, struct kvm_run *run) return; } - CPU_FOREACH(t) { + CPU_FOREACH_KVM(t) { run_on_cpu(t, s390_do_cpu_set_diag318, RUN_ON_CPU_HOST_ULONG(diag318_info)); } diff --git a/target/s390x/kvm/stsi-topology.c b/target/s390x/kvm/stsi-topology.c index c8d6389cd87..cf1a9b5d218 100644 --- a/target/s390x/kvm/stsi-topology.c +++ b/target/s390x/kvm/stsi-topology.c @@ -10,6 +10,7 @@ #include "cpu.h" #include "hw/s390x/sclp.h" #include "hw/s390x/cpu-topology.h" +#include "system/kvm_int.h" QEMU_BUILD_BUG_ON(S390_CPU_ENTITLEMENT_LOW != 1); QEMU_BUILD_BUG_ON(S390_CPU_ENTITLEMENT_MEDIUM != 2); @@ -256,7 +257,7 @@ static void s390_topology_fill_list_sorted(S390TopologyList *topology_list) QTAILQ_INSERT_HEAD(topology_list, &sentinel, next); - CPU_FOREACH(cs) { + CPU_FOREACH_KVM(cs) { S390TopologyId id = s390_topology_from_cpu(S390_CPU(cs)); S390TopologyEntry *entry = NULL, *tmp; -- 2.47.1
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |