[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 02/24] xen: allow global VIRQ handlers to be delegated to other domains
This patch sends global VIRQs to a domain designated as the VIRQ handler instead of sending all global VIRQ events to dom0. This is required in order to run xenstored in a stubdom, because VIRQ_DOM_EXC must be sent to xenstored for domain destruction to work properly. This patch was inspired by the xenstored stubdomain patch series sent to xen-devel by Alex Zeffertt in 2009. Signed-off-by: Diego Ongaro <diego.ongaro@xxxxxxxxxx> Signed-off-by: Alex Zeffertt <alex.zeffertt@xxxxxxxxxxxxx> Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx> Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx> Cc: Keir Fraser <keir@xxxxxxx> --- tools/flask/policy/policy/flask/access_vectors | 1 + tools/libxc/xc_domain.c | 10 ++++ tools/libxc/xenctrl.h | 9 +++ xen/arch/x86/cpu/mcheck/amd_nonfatal.c | 2 +- xen/arch/x86/cpu/mcheck/mce.c | 2 +- xen/arch/x86/cpu/mcheck/mce_intel.c | 6 +- xen/arch/x86/cpu/mcheck/non-fatal.c | 2 +- xen/common/cpu.c | 4 +- xen/common/domain.c | 8 ++-- xen/common/domctl.c | 17 ++++++ xen/common/event_channel.c | 66 +++++++++++++++++++++++- xen/common/trace.c | 2 +- xen/drivers/char/console.c | 4 +- xen/include/public/domctl.h | 8 +++ xen/include/xen/event.h | 12 +++- xen/include/xsm/xsm.h | 6 ++ xen/xsm/dummy.c | 6 ++ xen/xsm/flask/hooks.c | 6 ++ xen/xsm/flask/include/av_perm_to_string.h | 1 + xen/xsm/flask/include/av_permissions.h | 1 + 20 files changed, 154 insertions(+), 19 deletions(-) diff --git a/tools/flask/policy/policy/flask/access_vectors b/tools/flask/policy/policy/flask/access_vectors index 644f2e1..5901911 100644 --- a/tools/flask/policy/policy/flask/access_vectors +++ b/tools/flask/policy/policy/flask/access_vectors @@ -85,6 +85,7 @@ class domain getpodtarget setpodtarget set_misc_info + set_virq_handler } class hvm diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c index ab019b8..d98e68b 100644 --- a/tools/libxc/xc_domain.c +++ b/tools/libxc/xc_domain.c @@ -1504,6 +1504,16 @@ int xc_domain_set_access_required(xc_interface *xch, return do_domctl(xch, &domctl); } +int xc_domain_set_virq_handler(xc_interface *xch, uint32_t domid, int virq) +{ + DECLARE_DOMCTL; + + domctl.cmd = XEN_DOMCTL_set_virq_handler; + domctl.domain = domid; + domctl.u.set_virq_handler.virq = virq; + return do_domctl(xch, &domctl); +} + /* * Local variables: * mode: C diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h index 37b0fd6..2ffdd85 100644 --- a/tools/libxc/xenctrl.h +++ b/tools/libxc/xenctrl.h @@ -749,6 +749,15 @@ int xc_domain_p2m_audit(xc_interface *xch, int xc_domain_set_access_required(xc_interface *xch, uint32_t domid, unsigned int required); +/** + * This function sets the handler of global VIRQs sent by the hypervisor + * + * @parm xch a handle to an open hypervisor interface + * @parm domid the domain id which will handle the VIRQ + * @parm virq the virq number (VIRQ_*) + * return 0 on success, -1 on failure + */ +int xc_domain_set_virq_handler(xc_interface *xch, uint32_t domid, int virq); /* * CPUPOOL MANAGEMENT FUNCTIONS diff --git a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c index 50288bd..9222098 100644 --- a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c +++ b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c @@ -100,7 +100,7 @@ static void mce_amd_checkregs(void *info) if (dom0_vmce_enabled()) { mctelem_commit(mctc); - send_guest_global_virq(dom0, VIRQ_MCA); + send_global_virq(VIRQ_MCA); } else if (++dumpcount >= 10) { x86_mcinfo_dump((struct mc_info *)mctelem_dataptr(mctc)); mctelem_dismiss(mctc); diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c index b592041..c4e4477 100644 --- a/xen/arch/x86/cpu/mcheck/mce.c +++ b/xen/arch/x86/cpu/mcheck/mce.c @@ -594,7 +594,7 @@ void mcheck_cmn_handler(struct cpu_user_regs *regs, long error_code, if (dom0_vmce_enabled()) { if (mctc != NULL) mctelem_commit(mctc); - send_guest_global_virq(dom0, VIRQ_MCA); + send_global_virq(VIRQ_MCA); } else { x86_mcinfo_dump(mci); if (mctc != NULL) diff --git a/xen/arch/x86/cpu/mcheck/mce_intel.c b/xen/arch/x86/cpu/mcheck/mce_intel.c index 0986025..0894080 100644 --- a/xen/arch/x86/cpu/mcheck/mce_intel.c +++ b/xen/arch/x86/cpu/mcheck/mce_intel.c @@ -354,7 +354,7 @@ static void mce_softirq(void) /* Step2: Send Log to DOM0 through vIRQ */ if (dom0_vmce_enabled()) { mce_printk(MCE_VERBOSE, "MCE: send MCE# to DOM0 through virq\n"); - send_guest_global_virq(dom0, VIRQ_MCA); + send_global_virq(VIRQ_MCA); } } @@ -1085,7 +1085,7 @@ static void cmci_discover(void) if (bs.errcnt && mctc != NULL) { if (dom0_vmce_enabled()) { mctelem_commit(mctc); - send_guest_global_virq(dom0, VIRQ_MCA); + send_global_virq(VIRQ_MCA); } else { x86_mcinfo_dump(mctelem_dataptr(mctc)); mctelem_dismiss(mctc); @@ -1205,7 +1205,7 @@ fastcall void smp_cmci_interrupt(struct cpu_user_regs *regs) if (dom0_vmce_enabled()) { mctelem_commit(mctc); mce_printk(MCE_VERBOSE, "CMCI: send CMCI to DOM0 through virq\n"); - send_guest_global_virq(dom0, VIRQ_MCA); + send_global_virq(VIRQ_MCA); } else { x86_mcinfo_dump(mctelem_dataptr(mctc)); mctelem_dismiss(mctc); diff --git a/xen/arch/x86/cpu/mcheck/non-fatal.c b/xen/arch/x86/cpu/mcheck/non-fatal.c index c57688f..1dded9b 100644 --- a/xen/arch/x86/cpu/mcheck/non-fatal.c +++ b/xen/arch/x86/cpu/mcheck/non-fatal.c @@ -55,7 +55,7 @@ static void mce_checkregs (void *info) if (dom0_vmce_enabled()) { mctelem_commit(mctc); - send_guest_global_virq(dom0, VIRQ_MCA); + send_global_virq(VIRQ_MCA); } else if (++dumpcount >= 10) { x86_mcinfo_dump((struct mc_info *)mctelem_dataptr(mctc)); mctelem_dismiss(mctc); diff --git a/xen/common/cpu.c b/xen/common/cpu.c index 79abdb7..630881e 100644 --- a/xen/common/cpu.c +++ b/xen/common/cpu.c @@ -108,7 +108,7 @@ int cpu_down(unsigned int cpu) notifier_rc = notifier_call_chain(&cpu_chain, CPU_DEAD, hcpu, NULL); BUG_ON(notifier_rc != NOTIFY_DONE); - send_guest_global_virq(dom0, VIRQ_PCPU_STATE); + send_global_virq(VIRQ_PCPU_STATE); cpu_hotplug_done(); return 0; @@ -148,7 +148,7 @@ int cpu_up(unsigned int cpu) notifier_rc = notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu, NULL); BUG_ON(notifier_rc != NOTIFY_DONE); - send_guest_global_virq(dom0, VIRQ_PCPU_STATE); + send_global_virq(VIRQ_PCPU_STATE); cpu_hotplug_done(); return 0; diff --git a/xen/common/domain.c b/xen/common/domain.c index fd20210..500c7a2 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -86,7 +86,7 @@ static void __domain_finalise_shutdown(struct domain *d) if ( (d->shutdown_code == SHUTDOWN_suspend) && d->suspend_evtchn ) evtchn_send(d, d->suspend_evtchn); else - send_guest_global_virq(dom0, VIRQ_DOM_EXC); + send_global_virq(VIRQ_DOM_EXC); } static void vcpu_check_shutdown(struct vcpu *v) @@ -480,7 +480,7 @@ int domain_kill(struct domain *d) } d->is_dying = DOMDYING_dead; put_domain(d); - send_guest_global_virq(dom0, VIRQ_DOM_EXC); + send_global_virq(VIRQ_DOM_EXC); /* fallthrough */ case DOMDYING_dead: break; @@ -621,7 +621,7 @@ void domain_pause_for_debugger(void) for_each_vcpu ( d, v ) vcpu_sleep_nosync(v); - send_guest_global_virq(dom0, VIRQ_DEBUGGER); + send_global_virq(VIRQ_DEBUGGER); } /* Complete domain destroy after RCU readers are not holding old references. */ @@ -680,7 +680,7 @@ static void complete_domain_destroy(struct rcu_head *head) free_cpumask_var(d->domain_dirty_cpumask); free_domain_struct(d); - send_guest_global_virq(dom0, VIRQ_DOM_EXC); + send_global_virq(VIRQ_DOM_EXC); } /* Release resources belonging to task @p. */ diff --git a/xen/common/domctl.c b/xen/common/domctl.c index 5b0fc4a..8001a91 100644 --- a/xen/common/domctl.c +++ b/xen/common/domctl.c @@ -995,6 +995,23 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl) } break; + case XEN_DOMCTL_set_virq_handler: + { + struct domain *d; + uint32_t virq = op->u.set_virq_handler.virq; + + ret = -ESRCH; + d = rcu_lock_domain_by_id(op->domain); + if ( d != NULL ) + { + ret = xsm_set_virq_handler(d, virq); + if ( !ret ) + ret = set_global_virq_handler(d, virq); + rcu_unlock_domain(d); + } + } + break; + default: ret = arch_do_domctl(op, u_domctl); break; diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c index 9212042..43bd167 100644 --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -689,7 +689,7 @@ void send_guest_vcpu_virq(struct vcpu *v, int virq) spin_unlock_irqrestore(&v->virq_lock, flags); } -void send_guest_global_virq(struct domain *d, int virq) +static void send_guest_global_virq(struct domain *d, int virq) { unsigned long flags; int port; @@ -739,6 +739,68 @@ int send_guest_pirq(struct domain *d, const struct pirq *pirq) return evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port); } +static struct domain *global_virq_handlers[NR_VIRQS] __read_mostly; + +static DEFINE_SPINLOCK(global_virq_handlers_lock); + +void send_global_virq(uint32_t virq) +{ + ASSERT(virq < NR_VIRQS); + ASSERT(virq_is_global(virq)); + + send_guest_global_virq(global_virq_handlers[virq] ?: dom0, virq); +} + +int set_global_virq_handler(struct domain *d, uint32_t virq) +{ + struct domain *old; + + if (virq >= NR_VIRQS) + return -EINVAL; + if (!virq_is_global(virq)) + return -EINVAL; + + if (global_virq_handlers[virq] == d) + return 0; + + if (unlikely(!get_domain(d))) + return -EINVAL; + + spin_lock(&global_virq_handlers_lock); + old = global_virq_handlers[virq]; + global_virq_handlers[virq] = d; + spin_unlock(&global_virq_handlers_lock); + + if (old != NULL) + put_domain(old); + + return 0; +} + +static void clear_global_virq_handlers(struct domain *d) +{ + uint32_t virq; + int put_count = 0; + + spin_lock(&global_virq_handlers_lock); + + for (virq = 0; virq < NR_VIRQS; virq++) + { + if (global_virq_handlers[virq] == d) + { + global_virq_handlers[virq] = NULL; + put_count++; + } + } + + spin_unlock(&global_virq_handlers_lock); + + while (put_count) + { + put_domain(d); + put_count--; + } +} static long evtchn_status(evtchn_status_t *status) { @@ -1160,6 +1222,8 @@ void evtchn_destroy(struct domain *d) d->evtchn[i] = NULL; } spin_unlock(&d->event_lock); + + clear_global_virq_handlers(d); } diff --git a/xen/common/trace.c b/xen/common/trace.c index 5772f24..58cbf39 100644 --- a/xen/common/trace.c +++ b/xen/common/trace.c @@ -661,7 +661,7 @@ static inline void insert_lost_records(struct t_buf *buf) */ static void trace_notify_dom0(unsigned long unused) { - send_guest_global_virq(dom0, VIRQ_TBUF); + send_global_virq(VIRQ_TBUF); } static DECLARE_SOFTIRQ_TASKLET(trace_notify_dom0_tasklet, trace_notify_dom0, 0); diff --git a/xen/drivers/char/console.c b/xen/drivers/char/console.c index 89cf4f8..6560182 100644 --- a/xen/drivers/char/console.c +++ b/xen/drivers/char/console.c @@ -288,7 +288,7 @@ static void __serial_rx(char c, struct cpu_user_regs *regs) if ( (serial_rx_prod-serial_rx_cons) != SERIAL_RX_SIZE ) serial_rx_ring[SERIAL_RX_MASK(serial_rx_prod++)] = c; /* Always notify the guest: prevents receive path from getting stuck. */ - send_guest_global_virq(dom0, VIRQ_CONSOLE); + send_global_virq(VIRQ_CONSOLE); } static void serial_rx(char c, struct cpu_user_regs *regs) @@ -315,7 +315,7 @@ static void serial_rx(char c, struct cpu_user_regs *regs) static void notify_dom0_con_ring(unsigned long unused) { - send_guest_global_virq(dom0, VIRQ_CON_RING); + send_global_virq(VIRQ_CON_RING); } static DECLARE_SOFTIRQ_TASKLET(notify_dom0_con_ring_tasklet, notify_dom0_con_ring, 0); diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h index c7640aa..75be370 100644 --- a/xen/include/public/domctl.h +++ b/xen/include/public/domctl.h @@ -813,6 +813,12 @@ struct xen_domctl_audit_p2m { typedef struct xen_domctl_audit_p2m xen_domctl_audit_p2m_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_audit_p2m_t); +struct xen_domctl_set_virq_handler { + uint32_t virq; /* IN */ +}; +typedef struct xen_domctl_set_virq_handler xen_domctl_set_virq_handler_t; +DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_virq_handler_t); + #if defined(__i386__) || defined(__x86_64__) /* XEN_DOMCTL_setvcpuextstate */ /* XEN_DOMCTL_getvcpuextstate */ @@ -912,6 +918,7 @@ struct xen_domctl { #define XEN_DOMCTL_getvcpuextstate 63 #define XEN_DOMCTL_set_access_required 64 #define XEN_DOMCTL_audit_p2m 65 +#define XEN_DOMCTL_set_virq_handler 66 #define XEN_DOMCTL_gdbsx_guestmemio 1000 #define XEN_DOMCTL_gdbsx_pausevcpu 1001 #define XEN_DOMCTL_gdbsx_unpausevcpu 1002 @@ -966,6 +973,7 @@ struct xen_domctl { #endif struct xen_domctl_set_access_required access_required; struct xen_domctl_audit_p2m audit_p2m; + struct xen_domctl_set_virq_handler set_virq_handler; struct xen_domctl_gdbsx_memio gdbsx_guest_memio; struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu; struct xen_domctl_gdbsx_domstatus gdbsx_domstatus; diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h index 232d50e..40b8a7a 100644 --- a/xen/include/xen/event.h +++ b/xen/include/xen/event.h @@ -23,11 +23,17 @@ void send_guest_vcpu_virq(struct vcpu *v, int virq); /* - * send_guest_global_virq: Notify guest via a global VIRQ. - * @d: Domain to which virtual IRQ should be sent + * send_global_virq: Notify the domain handling a global VIRQ. * @virq: Virtual IRQ number (VIRQ_*) */ -void send_guest_global_virq(struct domain *d, int virq); +void send_global_virq(uint32_t virq); + +/* + * sent_global_virq_handler: Set a global VIRQ handler. + * @d: New target domain for this VIRQ + * @virq: Virtual IRQ number (VIRQ_*), must be global + */ +int set_global_virq_handler(struct domain *d, uint32_t virq); /* * send_guest_pirq: diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h index 566c808..e3cae60 100644 --- a/xen/include/xsm/xsm.h +++ b/xen/include/xsm/xsm.h @@ -64,6 +64,7 @@ struct xsm_operations { int (*domain_settime) (struct domain *d); int (*set_target) (struct domain *d, struct domain *e); int (*domctl) (struct domain *d, int cmd); + int (*set_virq_handler) (struct domain *d, uint32_t virq); int (*tbufcontrol) (void); int (*readconsole) (uint32_t clear); int (*sched_id) (void); @@ -265,6 +266,11 @@ static inline int xsm_domctl (struct domain *d, int cmd) return xsm_call(domctl(d, cmd)); } +static inline int xsm_set_virq_handler (struct domain *d, uint32_t virq) +{ + return xsm_call(set_virq_handler(d, virq)); +} + static inline int xsm_tbufcontrol (void) { return xsm_call(tbufcontrol()); diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c index 65daa4e..acf9c8a 100644 --- a/xen/xsm/dummy.c +++ b/xen/xsm/dummy.c @@ -94,6 +94,11 @@ static int dummy_domctl(struct domain *d, int cmd) return 0; } +static int dummy_set_virq_handler(struct domain *d, uint32_t virq) +{ + return 0; +} + static int dummy_tbufcontrol (void) { return 0; @@ -596,6 +601,7 @@ void xsm_fixup_ops (struct xsm_operations *ops) set_to_dummy_if_null(ops, domain_settime); set_to_dummy_if_null(ops, set_target); set_to_dummy_if_null(ops, domctl); + set_to_dummy_if_null(ops, set_virq_handler); set_to_dummy_if_null(ops, tbufcontrol); set_to_dummy_if_null(ops, readconsole); set_to_dummy_if_null(ops, sched_id); diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c index a2020a9..543dc77 100644 --- a/xen/xsm/flask/hooks.c +++ b/xen/xsm/flask/hooks.c @@ -597,6 +597,11 @@ static int flask_domctl(struct domain *d, int cmd) return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, DOMAIN__SET_MISC_INFO); } +static int flask_set_virq_handler(struct domain *d, uint32_t virq) +{ + return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, DOMAIN__SET_VIRQ_HANDLER); +} + static int flask_tbufcontrol(void) { return domain_has_xen(current->domain, XEN__TBUFCONTROL); @@ -1460,6 +1465,7 @@ static struct xsm_operations flask_ops = { .domain_settime = flask_domain_settime, .set_target = flask_set_target, .domctl = flask_domctl, + .set_virq_handler = flask_set_virq_handler, .tbufcontrol = flask_tbufcontrol, .readconsole = flask_readconsole, .sched_id = flask_sched_id, diff --git a/xen/xsm/flask/include/av_perm_to_string.h b/xen/xsm/flask/include/av_perm_to_string.h index 85cbffc..17a1c36 100644 --- a/xen/xsm/flask/include/av_perm_to_string.h +++ b/xen/xsm/flask/include/av_perm_to_string.h @@ -60,6 +60,7 @@ S_(SECCLASS_DOMAIN, DOMAIN__GETPODTARGET, "getpodtarget") S_(SECCLASS_DOMAIN, DOMAIN__SETPODTARGET, "setpodtarget") S_(SECCLASS_DOMAIN, DOMAIN__SET_MISC_INFO, "set_misc_info") + S_(SECCLASS_DOMAIN, DOMAIN__SET_VIRQ_HANDLER, "set_virq_handler") S_(SECCLASS_HVM, HVM__SETHVMC, "sethvmc") S_(SECCLASS_HVM, HVM__GETHVMC, "gethvmc") S_(SECCLASS_HVM, HVM__SETPARAM, "setparam") diff --git a/xen/xsm/flask/include/av_permissions.h b/xen/xsm/flask/include/av_permissions.h index 9e55a86..42eaf81 100644 --- a/xen/xsm/flask/include/av_permissions.h +++ b/xen/xsm/flask/include/av_permissions.h @@ -61,6 +61,7 @@ #define DOMAIN__GETPODTARGET 0x10000000UL #define DOMAIN__SETPODTARGET 0x20000000UL #define DOMAIN__SET_MISC_INFO 0x40000000UL +#define DOMAIN__SET_VIRQ_HANDLER 0x80000000UL #define HVM__SETHVMC 0x00000001UL #define HVM__GETHVMC 0x00000002UL -- 1.7.7.6 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |