|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v6 10/24] xsplice: Implement support for applying/reverting/replacing patches.
On 07/04/16 04:49, Konrad Rzeszutek Wilk wrote:
> +void arch_xsplice_post_action(void)
> +{
/* Serialise the CPU pipeline. */
Otherwise it makes one wonder what a cpuid instruction has to do with
xsplicing.
> diff --git a/xen/common/xsplice.c b/xen/common/xsplice.c
> index 10c8166..2df879e 100644
> --- a/xen/common/xsplice.c
> +++ b/xen/common/xsplice.c
> @@ -11,17 +12,29 @@
> #include <xen/mm.h>
> #include <xen/sched.h>
> #include <xen/smp.h>
> +#include <xen/softirq.h>
> #include <xen/spinlock.h>
> #include <xen/vmap.h>
> +#include <xen/wait.h>
> #include <xen/xsplice_elf.h>
> #include <xen/xsplice.h>
> +#include <xen/xsplice_patch.h>
>
> #include <asm/event.h>
> #include <public/sysctl.h>
>
> +/*
> + * Protects against payload_list operations and also allows only one
> + * caller in schedule_work.
> + */
This comment looks like it should be part of a previous patch.
> static DEFINE_SPINLOCK(payload_lock);
> static LIST_HEAD(payload_list);
>
> +/*
> + * Patches which have been applied.
> + */
> +static LIST_HEAD(applied_list);
> +
> static unsigned int payload_cnt;
> static unsigned int payload_version = 1;
>
> @@ -37,9 +50,35 @@ struct payload {
> size_t ro_size; /* .. and its size (if any). */
> size_t pages; /* Total pages for
> [text,rw,ro]_addr */
> mfn_t *mfn; /* The MFNs backing these pages. */
> + struct list_head applied_list; /* Linked to 'applied_list'. */
> + struct xsplice_patch_func_internal *funcs; /* The array of functions
> to patch. */
> + unsigned int nfuncs; /* Nr of functions to patch. */
> char name[XEN_XSPLICE_NAME_SIZE]; /* Name of it. */
> };
>
> +/* Defines an outstanding patching action. */
> +struct xsplice_work
> +{
> + atomic_t semaphore; /* Used for rendezvous. */
> + atomic_t irq_semaphore; /* Used to signal all IRQs disabled. */
> + uint32_t timeout; /* Timeout to do the operation. */
> + struct payload *data; /* The payload on which to act. */
> + volatile bool_t do_work; /* Signals work to do. */
> + volatile bool_t ready; /* Signals all CPUs synchronized. */
> + unsigned int cmd; /* Action request: XSPLICE_ACTION_* */
Alignment.
> +static int schedule_work(struct payload *data, uint32_t cmd, uint32_t
> timeout)
> +{
> + unsigned int cpu;
> +
> + ASSERT(spin_is_locked(&payload_lock));
> +
> + /* Fail if an operation is already scheduled. */
> + if ( xsplice_work.do_work )
> + return -EBUSY;
> +
> + if ( !get_cpu_maps() )
> + {
> + printk(XENLOG_ERR XSPLICE "%s: unable to get cpu_maps lock!\n",
> + data->name);
> + return -EBUSY;
> + }
> +
> + xsplice_work.cmd = cmd;
> + xsplice_work.data = data;
> + xsplice_work.timeout = timeout ?: MILLISECS(30);
> +
> + dprintk(XENLOG_DEBUG, XSPLICE "%s: timeout is %"PRI_stime"ms\n",
> + data->name, xsplice_work.timeout / MILLISECS(1));
> +
> + atomic_set(&xsplice_work.semaphore, -1);
> + atomic_set(&xsplice_work.irq_semaphore, -1);
> +
> + xsplice_work.ready = 0;
> + smp_wmb();
> + xsplice_work.do_work = 1;
> + smp_wmb();
> + /*
> + * Above smp_wmb() gives us a compiler barrier, as we MUST do this
> + * after setting the global structure.
> + */
> + for_each_online_cpu ( cpu )
> + per_cpu(work_to_do, cpu) = 1;
This should be in reschedule_fn() to avoid dirtying the cachelines on
the current cpu and have them read by other cpus.
> +
> + put_cpu_maps();
> +
> + return 0;
> +}
> +
> +static void reschedule_fn(void *unused)
> +{
> + smp_mb(); /* Synchronize with setting do_work */
You don't need the barrier here, but you do need...
> + raise_softirq(SCHEDULE_SOFTIRQ);
> +}
> +
> +static int xsplice_spin(atomic_t *counter, s_time_t timeout,
> + unsigned int cpus, const char *s)
> +{
> + int rc = 0;
> +
> + while ( atomic_read(counter) != cpus && NOW() < timeout )
> + cpu_relax();
> +
> + /* Log & abort. */
> + if ( atomic_read(counter) != cpus )
> + {
> + printk(XENLOG_ERR XSPLICE "%s: Timed out on %s semaphore %u/%u\n",
> + xsplice_work.data->name, s, atomic_read(counter), cpus);
> + rc = -EBUSY;
> + xsplice_work.data->rc = rc;
> + xsplice_work.do_work = 0;
> + smp_wmb();
> + }
> +
> + return rc;
> +}
> +
> +/*
> + * The main function which manages the work of quiescing the system and
> + * patching code.
> + */
> +void check_for_xsplice_work(void)
> +{
> +#define ACTION(x) [XSPLICE_ACTION_##x] = #x
> + static const char *const names[] = {
> + ACTION(APPLY),
> + ACTION(REVERT),
> + ACTION(REPLACE),
> + };
> + unsigned int cpu = smp_processor_id();
> + s_time_t timeout;
> + unsigned long flags;
> +
> + /* Fast path: no work to do. */
> + if ( !per_cpu(work_to_do, cpu ) )
> + return;
> +
> + /* In case we aborted, other CPUs can skip right away. */
... an smp_rmb() here.
> + if ( !xsplice_work.do_work )
> + {
> + per_cpu(work_to_do, cpu) = 0;
> + return;
> + }
> +
> + ASSERT(local_irq_is_enabled());
> +
> + /* Set at -1, so will go up to num_online_cpus - 1. */
> + if ( atomic_inc_and_test(&xsplice_work.semaphore) )
> + {
> + struct payload *p;
> + unsigned int cpus;
> +
> + p = xsplice_work.data;
> + if ( !get_cpu_maps() )
> + {
> + printk(XENLOG_ERR XSPLICE "%s: CPU%u - unable to get cpu_maps
> lock!\n",
> + p->name, cpu);
> + per_cpu(work_to_do, cpu) = 0;
> + xsplice_work.data->rc = -EBUSY;
> + xsplice_work.do_work = 0;
> + /*
> + * Do NOT decrement semaphore down - as that may cause the other
> + * CPU (which may be at this ready to increment it)
> + * to assume the role of master and then needlessly time out
> + * out (as do_work is zero).
> + */
smp_wmb();
> + return;
> + }
> + /* "Mask" NMIs. */
> + arch_xsplice_mask();
> +
> + barrier(); /* MUST do it after get_cpu_maps. */
> + cpus = num_online_cpus() - 1;
> +
> + if ( cpus )
> + {
> + dprintk(XENLOG_DEBUG, XSPLICE "%s: CPU%u - IPIing the other %u
> CPUs\n",
> + p->name, cpu, cpus);
> + smp_call_function(reschedule_fn, NULL, 0);
> + }
> +
> + timeout = xsplice_work.timeout + NOW();
> + if ( xsplice_spin(&xsplice_work.semaphore, timeout, cpus, "CPU") )
> + goto abort;
> +
> + /* All CPUs are waiting, now signal to disable IRQs. */
> + xsplice_work.ready = 1;
> + smp_wmb();
> +
> + atomic_inc(&xsplice_work.irq_semaphore);
> + if ( !xsplice_spin(&xsplice_work.irq_semaphore, timeout, cpus,
> "IRQ") )
> + {
> + local_irq_save(flags);
> + /* Do the patching. */
> + xsplice_do_action();
> + /* Flush the CPU i-cache via CPUID instruction (on x86). */
> + arch_xsplice_post_action();
> + local_irq_restore(flags);
> + }
> + arch_xsplice_unmask();
> +
> + abort:
> + per_cpu(work_to_do, cpu) = 0;
> + xsplice_work.do_work = 0;
> +
> + smp_wmb(); /* MUST complete writes before put_cpu_maps(). */
> +
> + put_cpu_maps();
> +
> + printk(XENLOG_INFO XSPLICE "%s finished %s with rc=%d\n",
> + p->name, names[xsplice_work.cmd], p->rc);
> + }
> + else
> + {
> + /* Wait for all CPUs to rendezvous. */
> + while ( xsplice_work.do_work && !xsplice_work.ready )
> + cpu_relax();
> +
> + /* Disable IRQs and signal. */
> + local_irq_save(flags);
> + atomic_inc(&xsplice_work.irq_semaphore);
> +
> + /* Wait for patching to complete. */
> + while ( xsplice_work.do_work )
> + cpu_relax();
> +
> + /* To flush out pipeline. */
> + arch_xsplice_post_action();
> + local_irq_restore(flags);
> +
> + per_cpu(work_to_do, cpu) = 0;
> + }
> +}
> +
> diff --git a/xen/include/xen/xsplice.h b/xen/include/xen/xsplice.h
> index b843b5f..71d7939 100644
> --- a/xen/include/xen/xsplice.h
> +++ b/xen/include/xen/xsplice.h
> @@ -11,12 +11,37 @@ struct xsplice_elf_sec;
> struct xsplice_elf_sym;
> struct xen_sysctl_xsplice_op;
>
> +#include <xen/elfstructs.h>
> #ifdef CONFIG_XSPLICE
>
> +/*
> + * The structure which defines the patching. This is what the hypervisor
> + * expects in the '.xsplice.func' section of the ELF file.
> + *
> + * This MUST be in sync with what the tools generate. We expose
> + * for the tools the 'struct xsplice_patch_func' which does not have
> + * platform specific entries.
Shouldn't this be somewhere in the public API then? even if it is
explicitly declared as unstable due to xpatches needing to be rebuilt
from exact source?
> diff --git a/xen/include/xen/xsplice_patch.h b/xen/include/xen/xsplice_patch.h
> new file mode 100644
> index 0000000..f305826
> --- /dev/null
> +++ b/xen/include/xen/xsplice_patch.h
> @@ -0,0 +1,25 @@
> +/*
> + * Copyright (C) 2016 Citrix Systems R&D Ltd.
> + */
> +
> +#ifndef __XEN_XSPLICE_PATCH_H__
> +#define __XEN_XSPLICE_PATCH_H__
> +
> +#define XSPLICE_PAYLOAD_VERSION 1
> +/*
> + * .xsplice.funcs structure layout defined in the `Payload format`
> + * section in the xSplice design document.
> + *
> + * The size should be exactly 64 bytes.
> + */
Ditto, wrt the public API.
~Andrew
> +struct xsplice_patch_func {
> + const char *name; /* Name of function to be patched. */
> + uint64_t new_addr;
> + uint64_t old_addr; /* Can be zero and name will be looked up. */
> + uint32_t new_size;
> + uint32_t old_size;
> + uint8_t version; /* MUST be XSPLICE_PAYLOAD_VERSION. */
> + uint8_t pad[31]; /* MUST be zero filled. */
> +};
> +
> +#endif /* __XEN_XSPLICE_PATCH_H__ */
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |