[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v8 13/17] xen/riscv: add minimal stuff to mm.h to build full Xen
Signed-off-by: Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx> Acked-by: Jan Beulich <jbeulich@xxxxxxxx> --- Changes in V8: - Nothing changed only rebase. --- Changes in V7: - update argument type of maddr_to_virt() function: unsigned long -> paddr_t - rename argument of PFN_ORDER(): pfn -> pg. - add Acked-by: Jan Beulich <jbeulich@xxxxxxxx> --- Changes in V6: - drop __virt_to_maddr() ( transform to macro ) and __maddr_to_virt ( rename to maddr_to_virt ). - parenthesize va in definition of vmap_to_mfn(). - Code style fixes. --- Changes in V5: - update the comment around "struct domain *domain;" : zero -> NULL - fix ident. for unsigned long val; - put page_to_virt() and virt_to_page() close to each other. - drop unnessary leading underscore - drop a space before the comment: /* Count of uses of this frame as its current type. */ - drop comment about a page 'not as a shadow'. it is not necessary for RISC-V --- Changes in V4: - update an argument name of PFN_ORDERN macros. - drop pad at the end of 'struct page_info'. - Change message -> subject in "Changes in V3" - delete duplicated macros from riscv/mm.h - fix identation in struct page_info - align comment for PGC_ macros - update definitions of domain_set_alloc_bitsize() and domain_clamp_alloc_bitsize() - drop unnessary comments. - s/BUG/BUG_ON("...") - define __virt_to_maddr, __maddr_to_virt as stubs - add inclusion of xen/mm-frame.h for mfn_x and others - include "xen/mm.h" instead of "asm/mm.h" to fix compilation issues: In file included from arch/riscv/setup.c:7: ./arch/riscv/include/asm/mm.h:60:28: error: field 'list' has incomplete type 60 | struct page_list_entry list; | ^~~~ ./arch/riscv/include/asm/mm.h:81:43: error: 'MAX_ORDER' undeclared here (not in a function) 81 | unsigned long first_dirty:MAX_ORDER + 1; | ^~~~~~~~~ ./arch/riscv/include/asm/mm.h:81:31: error: bit-field 'first_dirty' width not an integer constant 81 | unsigned long first_dirty:MAX_ORDER + 1; - Define __virt_to_mfn() and __mfn_to_virt() using maddr_to_mfn() and mfn_to_maddr(). --- Changes in V3: - update the commit title - introduce DIRECTMAP_VIRT_START. - drop changes related pfn_to_paddr() and paddr_to_pfn as they were remvoe in [PATCH v2 32/39] xen/riscv: add minimal stuff to asm/page.h to build full Xen - code style fixes. - drop get_page_nr and put_page_nr as they don't need for time being - drop CONFIG_STATIC_MEMORY related things - code style fixes --- Changes in V2: - define stub for arch_get_dma_bitsize(void) --- xen/arch/riscv/include/asm/mm.h | 240 ++++++++++++++++++++++++++++++++ xen/arch/riscv/mm.c | 2 +- xen/arch/riscv/setup.c | 2 +- 3 files changed, 242 insertions(+), 2 deletions(-) diff --git a/xen/arch/riscv/include/asm/mm.h b/xen/arch/riscv/include/asm/mm.h index 07c7a0abba..cc4a07a71c 100644 --- a/xen/arch/riscv/include/asm/mm.h +++ b/xen/arch/riscv/include/asm/mm.h @@ -3,11 +3,246 @@ #ifndef _ASM_RISCV_MM_H #define _ASM_RISCV_MM_H +#include <public/xen.h> +#include <xen/bug.h> +#include <xen/mm-frame.h> +#include <xen/pdx.h> +#include <xen/types.h> + #include <asm/page-bits.h> #define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT) #define paddr_to_pfn(pa) ((unsigned long)((pa) >> PAGE_SHIFT)) +#define paddr_to_pdx(pa) mfn_to_pdx(maddr_to_mfn(pa)) +#define gfn_to_gaddr(gfn) pfn_to_paddr(gfn_x(gfn)) +#define gaddr_to_gfn(ga) _gfn(paddr_to_pfn(ga)) +#define mfn_to_maddr(mfn) pfn_to_paddr(mfn_x(mfn)) +#define maddr_to_mfn(ma) _mfn(paddr_to_pfn(ma)) +#define vmap_to_mfn(va) maddr_to_mfn(virt_to_maddr((vaddr_t)(va))) +#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va)) + +static inline void *maddr_to_virt(paddr_t ma) +{ + BUG_ON("unimplemented"); + return NULL; +} + +#define virt_to_maddr(va) ({ BUG_ON("unimplemented"); 0; }) + +/* Convert between Xen-heap virtual addresses and machine frame numbers. */ +#define __virt_to_mfn(va) mfn_x(maddr_to_mfn(virt_to_maddr(va))) +#define __mfn_to_virt(mfn) maddr_to_virt(mfn_to_maddr(_mfn(mfn))) + +/* + * We define non-underscored wrappers for above conversion functions. + * These are overriden in various source files while underscored version + * remain intact. + */ +#define virt_to_mfn(va) __virt_to_mfn(va) +#define mfn_to_virt(mfn) __mfn_to_virt(mfn) + +struct page_info +{ + /* Each frame can be threaded onto a doubly-linked list. */ + struct page_list_entry list; + + /* Reference count and various PGC_xxx flags and fields. */ + unsigned long count_info; + + /* Context-dependent fields follow... */ + union { + /* Page is in use: ((count_info & PGC_count_mask) != 0). */ + struct { + /* Type reference count and various PGT_xxx flags and fields. */ + unsigned long type_info; + } inuse; + + /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */ + union { + struct { + /* + * Index of the first *possibly* unscrubbed page in the buddy. + * One more bit than maximum possible order to accommodate + * INVALID_DIRTY_IDX. + */ +#define INVALID_DIRTY_IDX ((1UL << (MAX_ORDER + 1)) - 1) + unsigned long first_dirty:MAX_ORDER + 1; + + /* Do TLBs need flushing for safety before next page use? */ + bool need_tlbflush:1; + +#define BUDDY_NOT_SCRUBBING 0 +#define BUDDY_SCRUBBING 1 +#define BUDDY_SCRUB_ABORT 2 + unsigned long scrub_state:2; + }; + + unsigned long val; + } free; + } u; + + union { + /* Page is in use */ + struct { + /* Owner of this page (NULL if page is anonymous). */ + struct domain *domain; + } inuse; + + /* Page is on a free list. */ + struct { + /* Order-size of the free chunk this page is the head of. */ + unsigned int order; + } free; + } v; + + union { + /* + * Timestamp from 'TLB clock', used to avoid extra safety flushes. + * Only valid for: a) free pages, and b) pages with zero type count + */ + uint32_t tlbflush_timestamp; + }; +}; + +#define frame_table ((struct page_info *)FRAMETABLE_VIRT_START) + +/* PDX of the first page in the frame table. */ +extern unsigned long frametable_base_pdx; + +/* Convert between machine frame numbers and page-info structures. */ +#define mfn_to_page(mfn) \ + (frame_table + (mfn_to_pdx(mfn) - frametable_base_pdx)) +#define page_to_mfn(pg) \ + pdx_to_mfn((unsigned long)((pg) - frame_table) + frametable_base_pdx) + +static inline void *page_to_virt(const struct page_info *pg) +{ + return mfn_to_virt(mfn_x(page_to_mfn(pg))); +} + +/* Convert between Xen-heap virtual addresses and page-info structures. */ +static inline struct page_info *virt_to_page(const void *v) +{ + BUG_ON("unimplemented"); + return NULL; +} + +/* + * Common code requires get_page_type and put_page_type. + * We don't care about typecounts so we just do the minimum to make it + * happy. + */ +static inline int get_page_type(struct page_info *page, unsigned long type) +{ + return 1; +} + +static inline void put_page_type(struct page_info *page) +{ +} + +static inline void put_page_and_type(struct page_info *page) +{ + put_page_type(page); + put_page(page); +} + +/* + * RISC-V does not have an M2P, but common code expects a handful of + * M2P-related defines and functions. Provide dummy versions of these. + */ +#define INVALID_M2P_ENTRY (~0UL) +#define SHARED_M2P_ENTRY (~0UL - 1UL) +#define SHARED_M2P(_e) ((_e) == SHARED_M2P_ENTRY) + +#define set_gpfn_from_mfn(mfn, pfn) do { (void)(mfn), (void)(pfn); } while (0) +#define mfn_to_gfn(d, mfn) ((void)(d), _gfn(mfn_x(mfn))) + +#define PDX_GROUP_SHIFT (PAGE_SHIFT + VPN_BITS) + +static inline unsigned long domain_get_maximum_gpfn(struct domain *d) +{ + BUG_ON("unimplemented"); + return 0; +} + +static inline long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) +{ + BUG_ON("unimplemented"); + return 0; +} + +/* + * On RISCV, all the RAM is currently direct mapped in Xen. + * Hence return always true. + */ +static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr) +{ + return true; +} + +#define PG_shift(idx) (BITS_PER_LONG - (idx)) +#define PG_mask(x, idx) (x ## UL << PG_shift(idx)) + +#define PGT_none PG_mask(0, 1) /* no special uses of this page */ +#define PGT_writable_page PG_mask(1, 1) /* has writable mappings? */ +#define PGT_type_mask PG_mask(1, 1) /* Bits 31 or 63. */ + +/* Count of uses of this frame as its current type. */ +#define PGT_count_width PG_shift(2) +#define PGT_count_mask ((1UL << PGT_count_width) - 1) + +/* + * Page needs to be scrubbed. Since this bit can only be set on a page that is + * free (i.e. in PGC_state_free) we can reuse PGC_allocated bit. + */ +#define _PGC_need_scrub _PGC_allocated +#define PGC_need_scrub PGC_allocated + +/* Cleared when the owning guest 'frees' this page. */ +#define _PGC_allocated PG_shift(1) +#define PGC_allocated PG_mask(1, 1) +/* Page is Xen heap? */ +#define _PGC_xen_heap PG_shift(2) +#define PGC_xen_heap PG_mask(1, 2) +/* Page is broken? */ +#define _PGC_broken PG_shift(7) +#define PGC_broken PG_mask(1, 7) +/* Mutually-exclusive page states: { inuse, offlining, offlined, free }. */ +#define PGC_state PG_mask(3, 9) +#define PGC_state_inuse PG_mask(0, 9) +#define PGC_state_offlining PG_mask(1, 9) +#define PGC_state_offlined PG_mask(2, 9) +#define PGC_state_free PG_mask(3, 9) +#define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st) + +/* Count of references to this frame. */ +#define PGC_count_width PG_shift(9) +#define PGC_count_mask ((1UL << PGC_count_width) - 1) + +#define _PGC_extra PG_shift(10) +#define PGC_extra PG_mask(1, 10) + +#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap) +#define is_xen_heap_mfn(mfn) \ + (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn))) + +#define is_xen_fixed_mfn(mfn) \ + ((mfn_to_maddr(mfn) >= virt_to_maddr((vaddr_t)_start)) && \ + (mfn_to_maddr(mfn) <= virt_to_maddr((vaddr_t)_end - 1))) + +#define page_get_owner(p) (p)->v.inuse.domain +#define page_set_owner(p, d) ((p)->v.inuse.domain = (d)) + +/* TODO: implement */ +#define mfn_valid(mfn) ({ (void)(mfn); 0; }) + +#define domain_set_alloc_bitsize(d) ((void)(d)) +#define domain_clamp_alloc_bitsize(d, b) ((void)(d), (b)) + +#define PFN_ORDER(pg) ((pg)->v.free.order) + extern unsigned char cpu0_boot_stack[]; void setup_initial_pagetables(void); @@ -20,4 +255,9 @@ unsigned long calc_phys_offset(void); void turn_on_mmu(unsigned long ra); +static inline unsigned int arch_get_dma_bitsize(void) +{ + return 32; /* TODO */ +} + #endif /* _ASM_RISCV_MM_H */ diff --git a/xen/arch/riscv/mm.c b/xen/arch/riscv/mm.c index 053f043a3d..fe3a43be20 100644 --- a/xen/arch/riscv/mm.c +++ b/xen/arch/riscv/mm.c @@ -5,12 +5,12 @@ #include <xen/init.h> #include <xen/kernel.h> #include <xen/macros.h> +#include <xen/mm.h> #include <xen/pfn.h> #include <asm/early_printk.h> #include <asm/csr.h> #include <asm/current.h> -#include <asm/mm.h> #include <asm/page.h> #include <asm/processor.h> diff --git a/xen/arch/riscv/setup.c b/xen/arch/riscv/setup.c index 6593f601c1..98a94c4c48 100644 --- a/xen/arch/riscv/setup.c +++ b/xen/arch/riscv/setup.c @@ -2,9 +2,9 @@ #include <xen/compile.h> #include <xen/init.h> +#include <xen/mm.h> #include <asm/early_printk.h> -#include <asm/mm.h> /* Xen stack for bringing up the first CPU. */ unsigned char __initdata cpu0_boot_stack[STACK_SIZE] -- 2.44.0
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |