[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v3 4/4] x86/mm: move mmio_ro_emulated_write() to PV only file
mmio_ro_emulated_write() is only used in pv/ro-page-fault.c, move the function to that file and make it static. No functional change intended. Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> --- xen/arch/x86/include/asm/mm.h | 12 ------------ xen/arch/x86/mm.c | 26 ------------------------- xen/arch/x86/pv/ro-page-fault.c | 34 +++++++++++++++++++++++++++++++++ 3 files changed, 34 insertions(+), 38 deletions(-) diff --git a/xen/arch/x86/include/asm/mm.h b/xen/arch/x86/include/asm/mm.h index aeb8ebcf2d56..2665daa6f74f 100644 --- a/xen/arch/x86/include/asm/mm.h +++ b/xen/arch/x86/include/asm/mm.h @@ -542,18 +542,6 @@ void memguard_unguard_stack(void *p); int subpage_mmio_ro_add(paddr_t start, size_t size); bool subpage_mmio_write_accept(mfn_t mfn, unsigned long gla); -struct mmio_ro_emulate_ctxt { - unsigned long cr2; - /* Used only for mmcfg case */ - unsigned int seg, bdf; - /* Used only for non-mmcfg case */ - mfn_t mfn; -}; - -int cf_check mmio_ro_emulated_write( - enum x86_segment seg, unsigned long offset, void *p_data, - unsigned int bytes, struct x86_emulate_ctxt *ctxt); - /* r/o MMIO subpage access handlers. */ struct subpage_ro_range { struct list_head list; diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 0e29bab03a95..9ac855f998dc 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -5144,32 +5144,6 @@ void subpage_mmio_write_emulate( goto write_ignored; } -int cf_check mmio_ro_emulated_write( - enum x86_segment seg, - unsigned long offset, - void *p_data, - unsigned int bytes, - struct x86_emulate_ctxt *ctxt) -{ - struct mmio_ro_emulate_ctxt *mmio_ro_ctxt = ctxt->data; - unsigned long data = 0; - - /* Only allow naturally-aligned stores at the original %cr2 address. */ - if ( ((bytes | offset) & (bytes - 1)) || !bytes || - offset != mmio_ro_ctxt->cr2 || bytes > sizeof(data) ) - { - gdprintk(XENLOG_WARNING, "bad access (cr2=%lx, addr=%lx, bytes=%u)\n", - mmio_ro_ctxt->cr2, offset, bytes); - return X86EMUL_UNHANDLEABLE; - } - - memcpy(&data, p_data, bytes); - subpage_mmio_write_emulate(mmio_ro_ctxt->mfn, PAGE_OFFSET(offset), - data, bytes); - - return X86EMUL_OKAY; -} - /* * For these PTE APIs, the caller must follow the alloc-map-unmap-free * lifecycle, which means explicitly mapping the PTE pages before accessing diff --git a/xen/arch/x86/pv/ro-page-fault.c b/xen/arch/x86/pv/ro-page-fault.c index 11b01c479e43..3dd795288379 100644 --- a/xen/arch/x86/pv/ro-page-fault.c +++ b/xen/arch/x86/pv/ro-page-fault.c @@ -298,6 +298,14 @@ static int ptwr_do_page_fault(struct x86_emulate_ctxt *ctxt, * fault handling for read-only MMIO pages */ +struct mmio_ro_emulate_ctxt { + unsigned long cr2; + /* Used only for mmcfg case */ + unsigned int seg, bdf; + /* Used only for non-mmcfg case */ + mfn_t mfn; +}; + static int cf_check mmcfg_intercept_write( enum x86_segment seg, unsigned long offset, @@ -329,6 +337,32 @@ static int cf_check mmcfg_intercept_write( return X86EMUL_OKAY; } +static int cf_check mmio_ro_emulated_write( + enum x86_segment seg, + unsigned long offset, + void *p_data, + unsigned int bytes, + struct x86_emulate_ctxt *ctxt) +{ + struct mmio_ro_emulate_ctxt *mmio_ro_ctxt = ctxt->data; + unsigned long data = 0; + + /* Only allow naturally-aligned stores at the original %cr2 address. */ + if ( ((bytes | offset) & (bytes - 1)) || !bytes || + offset != mmio_ro_ctxt->cr2 || bytes > sizeof(data) ) + { + gdprintk(XENLOG_WARNING, "bad access (cr2=%lx, addr=%lx, bytes=%u)\n", + mmio_ro_ctxt->cr2, offset, bytes); + return X86EMUL_UNHANDLEABLE; + } + + memcpy(&data, p_data, bytes); + subpage_mmio_write_emulate(mmio_ro_ctxt->mfn, PAGE_OFFSET(offset), + data, bytes); + + return X86EMUL_OKAY; +} + static const struct x86_emulate_ops mmio_ro_emulate_ops = { .read = x86emul_unhandleable_rw, .insn_fetch = ptwr_emulated_insn_fetch, -- 2.48.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |